repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/connection_utils.py
flax/server/connection_utils.py
from __future__ import annotations import asyncio import random from typing import Any, List, Optional, Tuple from flax.server.ws_connection import WSFlaxConnection async def send_all_first_reply( func: str, arg: Any, peers: List[WSFlaxConnection], timeout=15 ) -> Optional[Tuple[Any, WSFlaxConnection]]: """performs an API request to peers and returns the result of the first response and the peer that sent it.""" async def do_func(peer_x: WSFlaxConnection, func_x: str, arg_x: Any): method_to_call = getattr(peer_x, func_x) result_x = await method_to_call(arg_x) if result_x is not None: return result_x, peer_x else: await asyncio.sleep(timeout) return None tasks = [] for peer in peers: tasks.append(do_func(peer, func, arg)) done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) if len(done) > 0: d = done.pop() result = d.result() if result is None: return None response, peer = result return response, peer else: return None async def send_to_random(func: str, arg: Any, peers: List[WSFlaxConnection]) -> Optional[Tuple[Any, WSFlaxConnection]]: """performs an API request to peers and returns the result of the first response and the peer that sent it.""" async def do_func(peer_x: WSFlaxConnection, func_x: str, arg_x: Any): method_to_call = getattr(peer_x, func_x) result_x = await method_to_call(arg_x) if result_x is not None: return result_x, peer_x else: await asyncio.sleep(15) return None tasks = [] random_peer = random.choice(peers) tasks.append(do_func(random_peer, func, arg)) done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED) if len(done) > 0: d = done.pop() result = d.result() if result is None: return None response, peer = result return response, peer else: return None
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/node_discovery.py
flax/server/node_discovery.py
import asyncio import math import time import traceback from random import Random from secrets import randbits from typing import Dict, Optional, List, Set import flax.server.ws_connection as ws import dns.asyncresolver from flax.protocols import full_node_protocol, introducer_protocol from flax.protocols.protocol_message_types import ProtocolMessageTypes from flax.server.address_manager import AddressManager, ExtendedPeerInfo from flax.server.address_manager_store import AddressManagerStore from flax.server.address_manager_sqlite_store import create_address_manager_from_db from flax.server.outbound_message import NodeType, make_msg, Message from flax.server.peer_store_resolver import PeerStoreResolver from flax.server.server import FlaxServer from flax.types.peer_info import PeerInfo, TimestampedPeerInfo from flax.util.hash import std_hash from flax.util.ints import uint64 MAX_PEERS_RECEIVED_PER_REQUEST = 1000 MAX_TOTAL_PEERS_RECEIVED = 3000 MAX_CONCURRENT_OUTBOUND_CONNECTIONS = 70 NETWORK_ID_DEFAULT_PORTS = { "mainnet": 6888, "testnet7": 58444, "testnet10": 58444, "testnet8": 58445, } class FullNodeDiscovery: resolver: Optional[dns.asyncresolver.Resolver] def __init__( self, server: FlaxServer, target_outbound_count: int, peer_store_resolver: PeerStoreResolver, introducer_info: Optional[Dict], dns_servers: List[str], peer_connect_interval: int, selected_network: str, default_port: Optional[int], log, ): self.server: FlaxServer = server self.message_queue: asyncio.Queue = asyncio.Queue() self.is_closed = False self.target_outbound_count = target_outbound_count self.legacy_peer_db_path = peer_store_resolver.legacy_peer_db_path self.legacy_peer_db_migrated = False self.peers_file_path = peer_store_resolver.peers_file_path self.dns_servers = dns_servers if introducer_info is not None: self.introducer_info: Optional[PeerInfo] = PeerInfo( introducer_info["host"], introducer_info["port"], ) else: self.introducer_info = None self.peer_connect_interval = peer_connect_interval self.log = log self.relay_queue: Optional[asyncio.Queue] = None self.address_manager: Optional[AddressManager] = None self.connection_time_pretest: Dict = {} self.received_count_from_peers: Dict = {} self.lock = asyncio.Lock() self.connect_peers_task: Optional[asyncio.Task] = None self.serialize_task: Optional[asyncio.Task] = None self.cleanup_task: Optional[asyncio.Task] = None self.initial_wait: int = 0 try: self.resolver: Optional[dns.asyncresolver.Resolver] = dns.asyncresolver.Resolver() except Exception: self.resolver = None self.log.exception("Error initializing asyncresolver") self.pending_outbound_connections: Set[str] = set() self.pending_tasks: Set[asyncio.Task] = set() self.default_port: Optional[int] = default_port if default_port is None and selected_network in NETWORK_ID_DEFAULT_PORTS: self.default_port = NETWORK_ID_DEFAULT_PORTS[selected_network] async def migrate_address_manager_if_necessary(self) -> None: if ( self.legacy_peer_db_migrated or self.peers_file_path.exists() or self.legacy_peer_db_path is None or not self.legacy_peer_db_path.exists() ): # No need for migration if: # - we've already migrated # - we have a peers file # - we don't have a legacy peer db return try: self.log.info(f"Migrating legacy peer database from {self.legacy_peer_db_path}") # Attempt to create an AddressManager from the legacy peer database address_manager: Optional[AddressManager] = await create_address_manager_from_db(self.legacy_peer_db_path) if address_manager is not None: self.log.info(f"Writing migrated peer data to {self.peers_file_path}") # Write the AddressManager data to the new peers file await AddressManagerStore.serialize(address_manager, self.peers_file_path) self.legacy_peer_db_migrated = True except Exception: self.log.exception("Error migrating legacy peer database") async def initialize_address_manager(self) -> None: self.address_manager = await AddressManagerStore.create_address_manager(self.peers_file_path) self.server.set_received_message_callback(self.update_peer_timestamp_on_message) async def start_tasks(self) -> None: random = Random() self.connect_peers_task = asyncio.create_task(self._connect_to_peers(random)) self.serialize_task = asyncio.create_task(self._periodically_serialize(random)) self.cleanup_task = asyncio.create_task(self._periodically_cleanup()) async def _close_common(self) -> None: self.is_closed = True self.cancel_task_safe(self.connect_peers_task) self.cancel_task_safe(self.serialize_task) self.cancel_task_safe(self.cleanup_task) for t in self.pending_tasks: self.cancel_task_safe(t) if len(self.pending_tasks) > 0: await asyncio.wait(self.pending_tasks) def cancel_task_safe(self, task: Optional[asyncio.Task]): if task is not None: try: task.cancel() except Exception as e: self.log.error(f"Error while canceling task.{e} {task}") def add_message(self, message, data): self.message_queue.put_nowait((message, data)) async def on_connect(self, peer: ws.WSFlaxConnection): if ( peer.is_outbound is False and peer.peer_server_port is not None and peer.connection_type is NodeType.FULL_NODE and self.server._local_type is NodeType.FULL_NODE and self.address_manager is not None ): timestamped_peer_info = TimestampedPeerInfo( peer.peer_host, peer.peer_server_port, uint64(int(time.time())), ) await self.address_manager.add_to_new_table([timestamped_peer_info], peer.get_peer_info(), 0) if self.relay_queue is not None: self.relay_queue.put_nowait((timestamped_peer_info, 1)) if ( peer.is_outbound and peer.peer_server_port is not None and peer.connection_type is NodeType.FULL_NODE and (self.server._local_type is NodeType.FULL_NODE or self.server._local_type is NodeType.WALLET) and self.address_manager is not None ): msg = make_msg(ProtocolMessageTypes.request_peers, full_node_protocol.RequestPeers()) await peer.send_message(msg) # Updates timestamps each time we receive a message for outbound connections. async def update_peer_timestamp_on_message(self, peer: ws.WSFlaxConnection): if ( peer.is_outbound and peer.peer_server_port is not None and peer.connection_type is NodeType.FULL_NODE and self.server._local_type is NodeType.FULL_NODE and self.address_manager is not None ): peer_info = peer.get_peer_info() if peer_info is None: return None if peer_info.host not in self.connection_time_pretest: self.connection_time_pretest[peer_info.host] = time.time() if time.time() - self.connection_time_pretest[peer_info.host] > 600: self.connection_time_pretest[peer_info.host] = time.time() await self.address_manager.connect(peer_info) def _num_needed_peers(self) -> int: target = self.target_outbound_count outgoing = len(self.server.get_full_node_outgoing_connections()) return max(0, target - outgoing) """ Uses the Poisson distribution to determine the next time when we'll initiate a feeler connection. (https://en.wikipedia.org/wiki/Poisson_distribution) """ def _poisson_next_send(self, now, avg_interval_seconds, random): return now + ( math.log(random.randrange(1 << 48) * -0.0000000000000035527136788 + 1) * avg_interval_seconds * -1000000.0 + 0.5 ) async def _introducer_client(self): if self.introducer_info is None: return None async def on_connect(peer: ws.WSFlaxConnection): msg = make_msg(ProtocolMessageTypes.request_peers_introducer, introducer_protocol.RequestPeersIntroducer()) await peer.send_message(msg) await self.server.start_client(self.introducer_info, on_connect) async def _query_dns(self, dns_address): try: if self.default_port is None: self.log.error( "Network id not supported in NETWORK_ID_DEFAULT_PORTS neither in config. Skipping DNS query." ) return if self.resolver is None: self.log.warning("Skipping DNS query: asyncresolver not initialized.") return for rdtype in ["A", "AAAA"]: peers: List[TimestampedPeerInfo] = [] result = await self.resolver.resolve(qname=dns_address, rdtype=rdtype, lifetime=30) for ip in result: peers.append( TimestampedPeerInfo( ip.to_text(), self.default_port, 0, ) ) self.log.info(f"Received {len(peers)} peers from DNS seeder, using rdtype = {rdtype}.") if len(peers) > 0: await self._respond_peers_common(full_node_protocol.RespondPeers(peers), None, False) except Exception as e: self.log.warning(f"querying DNS introducer failed: {e}") async def on_connect_callback(self, peer: ws.WSFlaxConnection): if self.server.on_connect is not None: await self.server.on_connect(peer) else: await self.on_connect(peer) async def start_client_async(self, addr: PeerInfo, is_feeler: bool) -> None: try: if self.address_manager is None: return self.pending_outbound_connections.add(addr.host) client_connected = await self.server.start_client( addr, on_connect=self.on_connect_callback, is_feeler=is_feeler, ) if self.server.is_duplicate_or_self_connection(addr): # Mark it as a softer attempt, without counting the failures. await self.address_manager.attempt(addr, False) else: if client_connected is True: await self.address_manager.mark_good(addr) await self.address_manager.connect(addr) else: await self.address_manager.attempt(addr, True) self.pending_outbound_connections.remove(addr.host) except Exception as e: if addr.host in self.pending_outbound_connections: self.pending_outbound_connections.remove(addr.host) self.log.error(f"Exception in create outbound connections: {e}") self.log.error(f"Traceback: {traceback.format_exc()}") async def _connect_to_peers(self, random) -> None: next_feeler = self._poisson_next_send(time.time() * 1000 * 1000, 240, random) retry_introducers = False introducer_attempts: int = 0 dns_server_index: int = 0 local_peerinfo: Optional[PeerInfo] = await self.server.get_peer_info() last_timestamp_local_info: uint64 = uint64(int(time.time())) last_collision_timestamp = 0 if self.initial_wait > 0: await asyncio.sleep(self.initial_wait) introducer_backoff = 1 while not self.is_closed: try: assert self.address_manager is not None # We don't know any address, connect to the introducer to get some. size = await self.address_manager.size() if size == 0 or retry_introducers: try: await asyncio.sleep(introducer_backoff) except asyncio.CancelledError: return None # Run dual between DNS servers and introducers. One time query DNS server, # next two times query the introducer. if introducer_attempts % 3 == 0 and len(self.dns_servers) > 0: dns_address = self.dns_servers[dns_server_index] dns_server_index = (dns_server_index + 1) % len(self.dns_servers) await self._query_dns(dns_address) else: await self._introducer_client() # there's some delay between receiving the peers from the # introducer until they get incorporated to prevent this # loop for running one more time. Add this delay to ensure # that once we get peers, we stop contacting the introducer. try: await asyncio.sleep(5) except asyncio.CancelledError: return None retry_introducers = False introducer_attempts += 1 # keep doubling the introducer delay until we reach 5 # minutes if introducer_backoff < 300: introducer_backoff *= 2 continue else: introducer_backoff = 1 # Only connect out to one peer per network group (/16 for IPv4). groups = set() full_node_connected = self.server.get_full_node_outgoing_connections() connected = [c.get_peer_info() for c in full_node_connected] connected = [c for c in connected if c is not None] for conn in full_node_connected: peer = conn.get_peer_info() if peer is None: continue group = peer.get_group() groups.add(group) # Feeler Connections # # Design goals: # * Increase the number of connectable addresses in the tried table. # # Method: # * Choose a random address from new and attempt to connect to it if we can connect # successfully it is added to tried. # * Start attempting feeler connections only after node finishes making outbound # connections. # * Only make a feeler connection once every few minutes. is_feeler = False has_collision = False if self._num_needed_peers() == 0: if time.time() * 1000 * 1000 > next_feeler: next_feeler = self._poisson_next_send(time.time() * 1000 * 1000, 240, random) is_feeler = True await self.address_manager.resolve_tried_collisions() tries = 0 now = time.time() got_peer = False addr: Optional[PeerInfo] = None max_tries = 50 if len(groups) < 3: max_tries = 10 elif len(groups) <= 5: max_tries = 25 select_peer_interval = max(0.1, len(groups) * 0.25) while not got_peer and not self.is_closed: self.log.debug(f"Address manager query count: {tries}. Query limit: {max_tries}") try: await asyncio.sleep(select_peer_interval) except asyncio.CancelledError: return None tries += 1 if tries > max_tries: addr = None retry_introducers = True break info: Optional[ExtendedPeerInfo] = await self.address_manager.select_tried_collision() if info is None or time.time() - last_collision_timestamp <= 60: info = await self.address_manager.select_peer(is_feeler) else: has_collision = True last_collision_timestamp = int(time.time()) if info is None: if not is_feeler: retry_introducers = True break # Require outbound connections, other than feelers, # to be to distinct network groups. addr = info.peer_info if has_collision: break if addr is not None and not addr.is_valid(): addr = None continue if not is_feeler and addr.get_group() in groups: addr = None continue if addr in connected: addr = None continue # attempt a node once per 30 minutes. if now - info.last_try < 1800: continue if time.time() - last_timestamp_local_info > 1800 or local_peerinfo is None: local_peerinfo = await self.server.get_peer_info() last_timestamp_local_info = uint64(int(time.time())) if local_peerinfo is not None and addr == local_peerinfo: continue got_peer = True self.log.debug(f"Addrman selected address: {addr}.") disconnect_after_handshake = is_feeler extra_peers_needed = self._num_needed_peers() if extra_peers_needed == 0: disconnect_after_handshake = True retry_introducers = False self.log.debug(f"Num peers needed: {extra_peers_needed}") initiate_connection = extra_peers_needed > 0 or has_collision or is_feeler connect_peer_interval = max(0.25, len(groups) * 0.5) if not initiate_connection: connect_peer_interval += 15 connect_peer_interval = min(connect_peer_interval, self.peer_connect_interval) if addr is not None and initiate_connection and addr.host not in self.pending_outbound_connections: if len(self.pending_outbound_connections) >= MAX_CONCURRENT_OUTBOUND_CONNECTIONS: self.log.debug("Max concurrent outbound connections reached. waiting") await asyncio.wait(self.pending_tasks, return_when=asyncio.FIRST_COMPLETED) self.pending_tasks.add( asyncio.create_task(self.start_client_async(addr, disconnect_after_handshake)) ) await asyncio.sleep(connect_peer_interval) # prune completed connect tasks self.pending_task = set(filter(lambda t: not t.done(), self.pending_tasks)) except Exception as e: self.log.error(f"Exception in create outbound connections: {e}") self.log.error(f"Traceback: {traceback.format_exc()}") async def _periodically_serialize(self, random: Random): while not self.is_closed: if self.address_manager is None: await asyncio.sleep(10) continue serialize_interval = random.randint(15 * 60, 30 * 60) await asyncio.sleep(serialize_interval) async with self.address_manager.lock: await AddressManagerStore.serialize(self.address_manager, self.peers_file_path) async def _periodically_cleanup(self) -> None: while not self.is_closed: # Removes entries with timestamp worse than 14 days ago # and with a high number of failed attempts. # Most likely, the peer left the network, # so we can save space in the peer tables. cleanup_interval = 1800 max_timestamp_difference = 14 * 3600 * 24 max_consecutive_failures = 10 await asyncio.sleep(cleanup_interval) # Perform the cleanup only if we have at least 3 connections. full_node_connected = self.server.get_connections(NodeType.FULL_NODE) connected = [c.get_peer_info() for c in full_node_connected] connected = [c for c in connected if c is not None] if self.address_manager is not None and len(connected) >= 3: async with self.address_manager.lock: self.address_manager.cleanup(max_timestamp_difference, max_consecutive_failures) async def _respond_peers_common(self, request, peer_src, is_full_node) -> None: # Check if we got the peers from a full node or from the introducer. peers_adjusted_timestamp = [] is_misbehaving = False if len(request.peer_list) > MAX_PEERS_RECEIVED_PER_REQUEST: is_misbehaving = True if is_full_node: if peer_src is None: return None async with self.lock: if peer_src.host not in self.received_count_from_peers: self.received_count_from_peers[peer_src.host] = 0 self.received_count_from_peers[peer_src.host] += len(request.peer_list) if self.received_count_from_peers[peer_src.host] > MAX_TOTAL_PEERS_RECEIVED: is_misbehaving = True if is_misbehaving: return None for peer in request.peer_list: if peer.timestamp < 100000000 or peer.timestamp > time.time() + 10 * 60: # Invalid timestamp, predefine a bad one. current_peer = TimestampedPeerInfo( peer.host, peer.port, uint64(int(time.time() - 5 * 24 * 60 * 60)), ) else: current_peer = peer if not is_full_node: current_peer = TimestampedPeerInfo( peer.host, peer.port, uint64(0), ) peers_adjusted_timestamp.append(current_peer) assert self.address_manager is not None if is_full_node: await self.address_manager.add_to_new_table(peers_adjusted_timestamp, peer_src, 2 * 60 * 60) else: await self.address_manager.add_to_new_table(peers_adjusted_timestamp, None, 0) class FullNodePeers(FullNodeDiscovery): self_advertise_task: Optional[asyncio.Task] = None address_relay_task: Optional[asyncio.Task] = None def __init__( self, server, max_inbound_count, target_outbound_count, peer_store_resolver: PeerStoreResolver, introducer_info, dns_servers, peer_connect_interval, selected_network, default_port, log, ): super().__init__( server, target_outbound_count, peer_store_resolver, introducer_info, dns_servers, peer_connect_interval, selected_network, default_port, log, ) self.relay_queue = asyncio.Queue() self.neighbour_known_peers: Dict = {} self.key = randbits(256) async def start(self) -> None: await self.migrate_address_manager_if_necessary() await self.initialize_address_manager() self.self_advertise_task = asyncio.create_task(self._periodically_self_advertise_and_clean_data()) self.address_relay_task = asyncio.create_task(self._address_relay()) await self.start_tasks() async def close(self) -> None: await self._close_common() self.cancel_task_safe(self.self_advertise_task) self.cancel_task_safe(self.address_relay_task) async def _periodically_self_advertise_and_clean_data(self): while not self.is_closed: try: try: await asyncio.sleep(24 * 3600) except asyncio.CancelledError: return None # Clean up known nodes for neighbours every 24 hours. async with self.lock: for neighbour in list(self.neighbour_known_peers.keys()): self.neighbour_known_peers[neighbour].clear() # Self advertise every 24 hours. peer = await self.server.get_peer_info() if peer is None: continue timestamped_peer = [ TimestampedPeerInfo( peer.host, peer.port, uint64(int(time.time())), ) ] msg = make_msg( ProtocolMessageTypes.respond_peers, full_node_protocol.RespondPeers(timestamped_peer), ) await self.server.send_to_all([msg], NodeType.FULL_NODE) async with self.lock: for host in list(self.received_count_from_peers.keys()): self.received_count_from_peers[host] = 0 except Exception as e: self.log.error(f"Exception in self advertise: {e}") self.log.error(f"Traceback: {traceback.format_exc()}") async def add_peers_neighbour(self, peers, neighbour_info) -> None: neighbour_data = (neighbour_info.host, neighbour_info.port) async with self.lock: for peer in peers: if neighbour_data not in self.neighbour_known_peers: self.neighbour_known_peers[neighbour_data] = set() if peer.host not in self.neighbour_known_peers[neighbour_data]: self.neighbour_known_peers[neighbour_data].add(peer.host) async def request_peers(self, peer_info: PeerInfo) -> Optional[Message]: try: # Prevent a fingerprint attack: do not send peers to inbound connections. # This asymmetric behavior for inbound and outbound connections was introduced # to prevent a fingerprinting attack: an attacker can send specific fake addresses # to users' AddrMan and later request them by sending getaddr messages. # Making nodes which are behind NAT and can only make outgoing connections ignore # the request_peers message mitigates the attack. if self.address_manager is None: return None peers = await self.address_manager.get_peers() await self.add_peers_neighbour(peers, peer_info) msg = make_msg( ProtocolMessageTypes.respond_peers, full_node_protocol.RespondPeers(peers), ) return msg except Exception as e: self.log.error(f"Request peers exception: {e}") return None async def respond_peers(self, request, peer_src, is_full_node: bool) -> None: try: await self._respond_peers_common(request, peer_src, is_full_node) if is_full_node: await self.add_peers_neighbour(request.peer_list, peer_src) if len(request.peer_list) == 1 and self.relay_queue is not None: peer = request.peer_list[0] if peer.timestamp > time.time() - 60 * 10: self.relay_queue.put_nowait((peer, 2)) except Exception as e: self.log.error(f"Respond peers exception: {e}. Traceback: {traceback.format_exc()}") return None async def _address_relay(self): while not self.is_closed: try: try: relay_peer, num_peers = await self.relay_queue.get() except asyncio.CancelledError: return None relay_peer_info = PeerInfo(relay_peer.host, relay_peer.port) if not relay_peer_info.is_valid(): continue # https://en.bitcoin.it/wiki/Satoshi_Client_Node_Discovery#Address_Relay connections = self.server.get_connections(NodeType.FULL_NODE) hashes = [] cur_day = int(time.time()) // (24 * 60 * 60) for connection in connections: peer_info = connection.get_peer_info() if peer_info is None: continue cur_hash = int.from_bytes( bytes( std_hash( self.key.to_bytes(32, byteorder="big") + peer_info.get_key() + cur_day.to_bytes(3, byteorder="big") ) ), byteorder="big", ) hashes.append((cur_hash, connection)) hashes.sort(key=lambda x: x[0]) for index, (_, connection) in enumerate(hashes): if index >= num_peers: break peer_info = connection.get_peer_info() pair = (peer_info.host, peer_info.port) async with self.lock: if pair in self.neighbour_known_peers and relay_peer.host in self.neighbour_known_peers[pair]: continue if pair not in self.neighbour_known_peers: self.neighbour_known_peers[pair] = set() self.neighbour_known_peers[pair].add(relay_peer.host) if connection.peer_node_id is None: continue msg = make_msg( ProtocolMessageTypes.respond_peers, full_node_protocol.RespondPeers([relay_peer]), ) await connection.send_message(msg) except Exception as e: self.log.error(f"Exception in address relay: {e}") self.log.error(f"Traceback: {traceback.format_exc()}") class WalletPeers(FullNodeDiscovery): def __init__( self, server, target_outbound_count, peer_store_resolver: PeerStoreResolver, introducer_info, dns_servers, peer_connect_interval, selected_network, default_port, log, ) -> None: super().__init__( server, target_outbound_count, peer_store_resolver, introducer_info, dns_servers, peer_connect_interval, selected_network, default_port, log, ) async def start(self) -> None: self.initial_wait = 1 await self.migrate_address_manager_if_necessary() await self.initialize_address_manager() await self.start_tasks() async def ensure_is_closed(self) -> None: if self.is_closed: return None await self._close_common() async def respond_peers(self, request, peer_src, is_full_node) -> None: await self._respond_peers_common(request, peer_src, is_full_node)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/rate_limit_numbers.py
flax/server/rate_limit_numbers.py
# All of these rate limits scale with the number of transactions so the aggregate amounts are higher from __future__ import annotations import copy import dataclasses import functools from typing import Any, Dict, List, Optional from flax.protocols.protocol_message_types import ProtocolMessageTypes from flax.protocols.shared_protocol import Capability compose_rate_limits_cache: Dict[int, Dict[str, Any]] = {} @dataclasses.dataclass(frozen=True) class RLSettings: frequency: int # Max request per time period (ie 1 min) max_size: int # Max size of each request max_total_size: Optional[int] = None # Max cumulative size of all requests in that period def get_rate_limits_to_use(our_capabilities: List[Capability], peer_capabilities: List[Capability]) -> Dict[str, Any]: # This will use the newest possible rate limits that both peers support. At this time there are only two # options, v1 and v2. if Capability.RATE_LIMITS_V2 in our_capabilities and Capability.RATE_LIMITS_V2 in peer_capabilities: # Use V2 rate limits if 2 in compose_rate_limits_cache: return compose_rate_limits_cache[2] composed = compose_rate_limits(rate_limits[1], rate_limits[2]) compose_rate_limits_cache[2] = composed return composed else: # Use V1 rate limits return rate_limits[1] def compose_rate_limits(old_rate_limits: Dict[str, Any], new_rate_limits: Dict[str, Any]) -> Dict[str, Any]: # Composes two rate limits dicts, so that the newer values override the older values final_rate_limits: Dict[str, Any] = copy.deepcopy(new_rate_limits) categories: List[str] = ["rate_limits_tx", "rate_limits_other"] all_new_msgs_lists: List[List[ProtocolMessageTypes]] = [ list(new_rate_limits[category].keys()) for category in categories ] all_new_msgs: List[ProtocolMessageTypes] = functools.reduce(lambda a, b: a + b, all_new_msgs_lists) for old_cat, mapping in old_rate_limits.items(): if old_cat in categories: for old_protocol_msg, old_rate_limit_value in mapping.items(): if old_protocol_msg not in all_new_msgs: if old_cat not in final_rate_limits: final_rate_limits[old_cat] = {} final_rate_limits[old_cat][old_protocol_msg] = old_rate_limit_value return final_rate_limits # Each number in this dict corresponds to a specific version of rate limits (1, 2, etc). # Version 1 includes the original limits for flax software from versions 1.0 to 1.4. rate_limits = { 1: { "default_settings": RLSettings(100, 1024 * 1024, 100 * 1024 * 1024), "non_tx_freq": 1000, # There is also a freq limit for many requests "non_tx_max_total_size": 100 * 1024 * 1024, # There is also a size limit for many requests # All transaction related apis also have an aggregate limit "rate_limits_tx": { ProtocolMessageTypes.new_transaction: RLSettings(5000, 100, 5000 * 100), ProtocolMessageTypes.request_transaction: RLSettings(5000, 100, 5000 * 100), ProtocolMessageTypes.respond_transaction: RLSettings( 5000, 1 * 1024 * 1024, 20 * 1024 * 1024 ), # TODO: check this ProtocolMessageTypes.send_transaction: RLSettings(5000, 1024 * 1024), ProtocolMessageTypes.transaction_ack: RLSettings(5000, 2048), }, # All non-transaction apis also have an aggregate limit "rate_limits_other": { ProtocolMessageTypes.handshake: RLSettings(5, 10 * 1024, 5 * 10 * 1024), ProtocolMessageTypes.harvester_handshake: RLSettings(5, 1024 * 1024), ProtocolMessageTypes.new_signage_point_harvester: RLSettings(100, 1024), ProtocolMessageTypes.new_proof_of_space: RLSettings(100, 2048), ProtocolMessageTypes.request_signatures: RLSettings(100, 2048), ProtocolMessageTypes.respond_signatures: RLSettings(100, 2048), ProtocolMessageTypes.new_signage_point: RLSettings(200, 2048), ProtocolMessageTypes.declare_proof_of_space: RLSettings(100, 10 * 1024), ProtocolMessageTypes.request_signed_values: RLSettings(100, 512), ProtocolMessageTypes.farming_info: RLSettings(100, 1024), ProtocolMessageTypes.signed_values: RLSettings(100, 1024), ProtocolMessageTypes.new_peak_timelord: RLSettings(100, 20 * 1024), ProtocolMessageTypes.new_unfinished_block_timelord: RLSettings(100, 10 * 1024), ProtocolMessageTypes.new_signage_point_vdf: RLSettings(100, 100 * 1024), ProtocolMessageTypes.new_infusion_point_vdf: RLSettings(100, 100 * 1024), ProtocolMessageTypes.new_end_of_sub_slot_vdf: RLSettings(100, 100 * 1024), ProtocolMessageTypes.request_compact_proof_of_time: RLSettings(100, 10 * 1024), ProtocolMessageTypes.respond_compact_proof_of_time: RLSettings(100, 100 * 1024), ProtocolMessageTypes.new_peak: RLSettings(200, 512), ProtocolMessageTypes.request_proof_of_weight: RLSettings(5, 100), ProtocolMessageTypes.respond_proof_of_weight: RLSettings(5, 50 * 1024 * 1024, 100 * 1024 * 1024), ProtocolMessageTypes.request_block: RLSettings(200, 100), ProtocolMessageTypes.reject_block: RLSettings(200, 100), ProtocolMessageTypes.request_blocks: RLSettings(500, 100), ProtocolMessageTypes.respond_blocks: RLSettings(100, 50 * 1024 * 1024, 5 * 50 * 1024 * 1024), ProtocolMessageTypes.reject_blocks: RLSettings(100, 100), ProtocolMessageTypes.respond_block: RLSettings(200, 2 * 1024 * 1024, 10 * 2 * 1024 * 1024), ProtocolMessageTypes.new_unfinished_block: RLSettings(200, 100), ProtocolMessageTypes.request_unfinished_block: RLSettings(200, 100), ProtocolMessageTypes.respond_unfinished_block: RLSettings(200, 2 * 1024 * 1024, 10 * 2 * 1024 * 1024), ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot: RLSettings(200, 200), ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot: RLSettings(200, 200), ProtocolMessageTypes.respond_signage_point: RLSettings(200, 50 * 1024), ProtocolMessageTypes.respond_end_of_sub_slot: RLSettings(100, 50 * 1024), ProtocolMessageTypes.request_mempool_transactions: RLSettings(5, 1024 * 1024), ProtocolMessageTypes.request_compact_vdf: RLSettings(200, 1024), ProtocolMessageTypes.respond_compact_vdf: RLSettings(200, 100 * 1024), ProtocolMessageTypes.new_compact_vdf: RLSettings(100, 1024), ProtocolMessageTypes.request_peers: RLSettings(10, 100), ProtocolMessageTypes.respond_peers: RLSettings(10, 1 * 1024 * 1024), ProtocolMessageTypes.request_puzzle_solution: RLSettings(1000, 100), ProtocolMessageTypes.respond_puzzle_solution: RLSettings(1000, 1024 * 1024), ProtocolMessageTypes.reject_puzzle_solution: RLSettings(1000, 100), ProtocolMessageTypes.new_peak_wallet: RLSettings(200, 300), ProtocolMessageTypes.request_block_header: RLSettings(500, 100), ProtocolMessageTypes.respond_block_header: RLSettings(500, 500 * 1024), ProtocolMessageTypes.reject_header_request: RLSettings(500, 100), ProtocolMessageTypes.request_removals: RLSettings(500, 50 * 1024, 10 * 1024 * 1024), ProtocolMessageTypes.respond_removals: RLSettings(500, 1024 * 1024, 10 * 1024 * 1024), ProtocolMessageTypes.reject_removals_request: RLSettings(500, 100), ProtocolMessageTypes.request_additions: RLSettings(500, 1024 * 1024, 10 * 1024 * 1024), ProtocolMessageTypes.respond_additions: RLSettings(500, 1024 * 1024, 10 * 1024 * 1024), ProtocolMessageTypes.reject_additions_request: RLSettings(500, 100), ProtocolMessageTypes.request_header_blocks: RLSettings(500, 100), ProtocolMessageTypes.reject_header_blocks: RLSettings(100, 100), ProtocolMessageTypes.respond_header_blocks: RLSettings(500, 2 * 1024 * 1024, 100 * 1024 * 1024), ProtocolMessageTypes.request_peers_introducer: RLSettings(100, 100), ProtocolMessageTypes.respond_peers_introducer: RLSettings(100, 1024 * 1024), ProtocolMessageTypes.farm_new_block: RLSettings(200, 200), ProtocolMessageTypes.request_plots: RLSettings(10, 10 * 1024 * 1024), ProtocolMessageTypes.respond_plots: RLSettings(10, 100 * 1024 * 1024), ProtocolMessageTypes.plot_sync_start: RLSettings(1000, 100 * 1024 * 1024), ProtocolMessageTypes.plot_sync_loaded: RLSettings(1000, 100 * 1024 * 1024), ProtocolMessageTypes.plot_sync_removed: RLSettings(1000, 100 * 1024 * 1024), ProtocolMessageTypes.plot_sync_invalid: RLSettings(1000, 100 * 1024 * 1024), ProtocolMessageTypes.plot_sync_keys_missing: RLSettings(1000, 100 * 1024 * 1024), ProtocolMessageTypes.plot_sync_duplicates: RLSettings(1000, 100 * 1024 * 1024), ProtocolMessageTypes.plot_sync_done: RLSettings(1000, 100 * 1024 * 1024), ProtocolMessageTypes.plot_sync_response: RLSettings(3000, 100 * 1024 * 1024), ProtocolMessageTypes.coin_state_update: RLSettings(1000, 100 * 1024 * 1024), ProtocolMessageTypes.register_interest_in_puzzle_hash: RLSettings(1000, 100 * 1024 * 1024), ProtocolMessageTypes.respond_to_ph_update: RLSettings(1000, 100 * 1024 * 1024), ProtocolMessageTypes.register_interest_in_coin: RLSettings(1000, 100 * 1024 * 1024), ProtocolMessageTypes.respond_to_coin_update: RLSettings(1000, 100 * 1024 * 1024), ProtocolMessageTypes.request_ses_hashes: RLSettings(2000, 1 * 1024 * 1024), ProtocolMessageTypes.respond_ses_hashes: RLSettings(2000, 1 * 1024 * 1024), ProtocolMessageTypes.request_children: RLSettings(2000, 1024 * 1024), ProtocolMessageTypes.respond_children: RLSettings(2000, 1 * 1024 * 1024), }, }, 2: { "default_settings": RLSettings(100, 1024 * 1024, 100 * 1024 * 1024), "non_tx_freq": 1000, # There is also a freq limit for many requests "non_tx_max_total_size": 100 * 1024 * 1024, # There is also a size limit for many requests "rate_limits_tx": { ProtocolMessageTypes.request_block_header: RLSettings(500, 100), ProtocolMessageTypes.respond_block_header: RLSettings(500, 500 * 1024), ProtocolMessageTypes.reject_header_request: RLSettings(500, 100), ProtocolMessageTypes.request_removals: RLSettings(5000, 50 * 1024, 10 * 1024 * 1024), ProtocolMessageTypes.respond_removals: RLSettings(5000, 1024 * 1024, 10 * 1024 * 1024), ProtocolMessageTypes.reject_removals_request: RLSettings(500, 100), ProtocolMessageTypes.request_additions: RLSettings(50000, 100 * 1024 * 1024), ProtocolMessageTypes.respond_additions: RLSettings(50000, 100 * 1024 * 1024), ProtocolMessageTypes.reject_additions_request: RLSettings(500, 100), ProtocolMessageTypes.reject_header_blocks: RLSettings(1000, 100), ProtocolMessageTypes.respond_header_blocks: RLSettings(5000, 2 * 1024 * 1024), ProtocolMessageTypes.request_block_headers: RLSettings(5000, 100), ProtocolMessageTypes.reject_block_headers: RLSettings(1000, 100), ProtocolMessageTypes.respond_block_headers: RLSettings(5000, 2 * 1024 * 1024), ProtocolMessageTypes.request_ses_hashes: RLSettings(2000, 1 * 1024 * 1024), ProtocolMessageTypes.respond_ses_hashes: RLSettings(2000, 1 * 1024 * 1024), ProtocolMessageTypes.request_children: RLSettings(2000, 1024 * 1024), ProtocolMessageTypes.respond_children: RLSettings(2000, 1 * 1024 * 1024), ProtocolMessageTypes.request_puzzle_solution: RLSettings(5000, 100), ProtocolMessageTypes.respond_puzzle_solution: RLSettings(5000, 1024 * 1024), ProtocolMessageTypes.reject_puzzle_solution: RLSettings(5000, 100), }, "rate_limits_other": { # These will have a lower cap since they don't scale with high TPS (NON_TX_FREQ) ProtocolMessageTypes.request_header_blocks: RLSettings(5000, 100), }, }, }
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/address_manager_sqlite_store.py
flax/server/address_manager_sqlite_store.py
import aiosqlite from flax.server.address_manager import AddressManager, ExtendedPeerInfo, NEW_BUCKETS_PER_ADDRESS from pathlib import Path from typing import Dict, List, Optional, Tuple Node = Tuple[int, ExtendedPeerInfo] Table = Tuple[int, int] async def create_address_manager_from_db(db_path: Path) -> Optional[AddressManager]: """ Creates an AddressManager using data from the SQLite peer db """ async with aiosqlite.connect(db_path) as connection: await connection.execute("pragma journal_mode=wal") pass # await connection.execute("pragma synchronous=OFF") # Prevent DB corruption by avoiding ill-advised synchronous optimization. metadata: Dict[str, str] = await get_metadata(connection) address_manager: Optional[AddressManager] = None if not await is_empty(metadata): nodes: List[Node] = await get_nodes(connection) new_table_entries: List[Table] = await get_new_table(connection) address_manager = create_address_manager(metadata, nodes, new_table_entries) return address_manager async def get_metadata(connection: aiosqlite.Connection) -> Dict[str, str]: cursor = await connection.execute("SELECT key, value from peer_metadata") metadata = await cursor.fetchall() await cursor.close() return {key: value for key, value in metadata} async def get_nodes(connection: aiosqlite.Connection) -> List[Node]: cursor = await connection.execute("SELECT node_id, value from peer_nodes") nodes_id = await cursor.fetchall() await cursor.close() return [(node_id, ExtendedPeerInfo.from_string(info_str)) for node_id, info_str in nodes_id] async def get_new_table(connection: aiosqlite.Connection) -> List[Table]: cursor = await connection.execute("SELECT node_id, bucket from peer_new_table") entries = await cursor.fetchall() await cursor.close() return [(node_id, bucket) for node_id, bucket in entries] async def is_empty(metadata: Dict[str, str]) -> bool: if "key" not in metadata: return True if int(metadata.get("new_count", 0)) > 0: return False if int(metadata.get("tried_count", 0)) > 0: return False return True def create_address_manager( metadata: Dict[str, str], nodes: List[Node], new_table_entries: List[Table] ) -> AddressManager: address_manager: AddressManager = AddressManager() # ----- NOTICE ----- # The following code was taken from the original implementation of # AddressManagerStore.deserialize(). The code is duplicated/preserved # here to support migration from older versions. # ------------------ address_manager.key = int(metadata["key"]) address_manager.new_count = int(metadata["new_count"]) # address_manager.tried_count = int(metadata["tried_count"]) address_manager.tried_count = 0 new_table_nodes = [(node_id, info) for node_id, info in nodes if node_id < address_manager.new_count] for n, info in new_table_nodes: address_manager.map_addr[info.peer_info.host] = n address_manager.map_info[n] = info info.random_pos = len(address_manager.random_pos) address_manager.random_pos.append(n) address_manager.id_count = len(new_table_nodes) tried_table_nodes = [(node_id, info) for node_id, info in nodes if node_id >= address_manager.new_count] # lost_count = 0 for node_id, info in tried_table_nodes: tried_bucket = info.get_tried_bucket(address_manager.key) tried_bucket_pos = info.get_bucket_position(address_manager.key, False, tried_bucket) if address_manager.tried_matrix[tried_bucket][tried_bucket_pos] == -1: info.random_pos = len(address_manager.random_pos) info.is_tried = True id_count = address_manager.id_count address_manager.random_pos.append(id_count) address_manager.map_info[id_count] = info address_manager.map_addr[info.peer_info.host] = id_count address_manager.tried_matrix[tried_bucket][tried_bucket_pos] = id_count address_manager.id_count += 1 address_manager.tried_count += 1 # else: # lost_count += 1 # address_manager.tried_count -= lost_count for node_id, bucket in new_table_entries: if node_id >= 0 and node_id < address_manager.new_count: info = address_manager.map_info[node_id] bucket_pos = info.get_bucket_position(address_manager.key, True, bucket) if address_manager.new_matrix[bucket][bucket_pos] == -1 and info.ref_count < NEW_BUCKETS_PER_ADDRESS: info.ref_count += 1 address_manager.new_matrix[bucket][bucket_pos] = node_id for node_id, info in list(address_manager.map_info.items()): if not info.is_tried and info.ref_count == 0: address_manager.delete_new_entry_(node_id) address_manager.load_used_table_positions() return address_manager
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/start_full_node.py
flax/server/start_full_node.py
from __future__ import annotations import logging import os import pathlib import sys from multiprocessing import freeze_support from typing import Dict, List, Optional, Tuple from flax.consensus.constants import ConsensusConstants from flax.consensus.default_constants import DEFAULT_CONSTANTS from flax.full_node.full_node import FullNode from flax.full_node.full_node_api import FullNodeAPI from flax.rpc.full_node_rpc_api import FullNodeRpcApi from flax.server.outbound_message import NodeType from flax.server.start_service import RpcInfo, Service, async_run from flax.util.flax_logging import initialize_service_logging from flax.util.config import load_config, load_config_cli from flax.util.default_root import DEFAULT_ROOT_PATH from flax.util.ints import uint16 # See: https://bugs.python.org/issue29288 "".encode("idna") SERVICE_NAME = "full_node" log = logging.getLogger(__name__) def create_full_node_service( root_path: pathlib.Path, config: Dict, consensus_constants: ConsensusConstants, connect_to_daemon: bool = True, override_capabilities: List[Tuple[uint16, str]] = None, ) -> Service[FullNode]: service_config = config[SERVICE_NAME] full_node = FullNode( service_config, root_path=root_path, consensus_constants=consensus_constants, ) api = FullNodeAPI(full_node) upnp_list = [] if service_config["enable_upnp"]: upnp_list = [service_config["port"]] network_id = service_config["selected_network"] rpc_info: Optional[RpcInfo] = None if service_config["start_rpc_server"]: rpc_info = (FullNodeRpcApi, service_config["rpc_port"]) return Service( root_path=root_path, config=config, node=api.full_node, peer_api=api, node_type=NodeType.FULL_NODE, advertised_port=service_config["port"], service_name=SERVICE_NAME, upnp_ports=upnp_list, server_listen_ports=[service_config["port"]], on_connect_callback=full_node.on_connect, network_id=network_id, rpc_info=rpc_info, connect_to_daemon=connect_to_daemon, override_capabilities=override_capabilities, ) async def async_main() -> int: # TODO: refactor to avoid the double load config = load_config(DEFAULT_ROOT_PATH, "config.yaml") service_config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME) config[SERVICE_NAME] = service_config overrides = service_config["network_overrides"]["constants"][service_config["selected_network"]] updated_constants = DEFAULT_CONSTANTS.replace_str_to_bytes(**overrides) initialize_service_logging(service_name=SERVICE_NAME, config=config) service = create_full_node_service(DEFAULT_ROOT_PATH, config, updated_constants) await service.setup_process_global_state() await service.run() return 0 def main() -> int: freeze_support() if os.getenv("FLAX_INSTRUMENT_NODE", 0) != 0: import atexit from flax.util.task_timing import start_task_instrumentation, stop_task_instrumentation start_task_instrumentation() atexit.register(stop_task_instrumentation) return async_run(async_main()) if __name__ == "__main__": sys.exit(main())
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/start_timelord.py
flax/server/start_timelord.py
from __future__ import annotations import logging import pathlib import sys from typing import Dict, Optional from flax.consensus.constants import ConsensusConstants from flax.consensus.default_constants import DEFAULT_CONSTANTS from flax.rpc.timelord_rpc_api import TimelordRpcApi from flax.server.outbound_message import NodeType from flax.server.start_service import RpcInfo, Service, async_run from flax.timelord.timelord import Timelord from flax.timelord.timelord_api import TimelordAPI from flax.types.peer_info import PeerInfo from flax.util.flax_logging import initialize_service_logging from flax.util.config import load_config, load_config_cli from flax.util.default_root import DEFAULT_ROOT_PATH # See: https://bugs.python.org/issue29288 "".encode("idna") SERVICE_NAME = "timelord" log = logging.getLogger(__name__) def create_timelord_service( root_path: pathlib.Path, config: Dict, constants: ConsensusConstants, connect_to_daemon: bool = True, ) -> Service[Timelord]: service_config = config[SERVICE_NAME] connect_peers = [PeerInfo(service_config["full_node_peer"]["host"], service_config["full_node_peer"]["port"])] overrides = service_config["network_overrides"]["constants"][service_config["selected_network"]] updated_constants = constants.replace_str_to_bytes(**overrides) node = Timelord(root_path, service_config, updated_constants) peer_api = TimelordAPI(node) network_id = service_config["selected_network"] rpc_info: Optional[RpcInfo] = None if service_config.get("start_rpc_server", True): rpc_info = (TimelordRpcApi, service_config.get("rpc_port", 8557)) return Service( root_path=root_path, config=config, peer_api=peer_api, node=node, node_type=NodeType.TIMELORD, advertised_port=service_config["port"], service_name=SERVICE_NAME, server_listen_ports=[service_config["port"]], connect_peers=connect_peers, network_id=network_id, rpc_info=rpc_info, connect_to_daemon=connect_to_daemon, ) async def async_main() -> int: # TODO: refactor to avoid the double load config = load_config(DEFAULT_ROOT_PATH, "config.yaml") service_config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME) config[SERVICE_NAME] = service_config initialize_service_logging(service_name=SERVICE_NAME, config=config) service = create_timelord_service(DEFAULT_ROOT_PATH, config, DEFAULT_CONSTANTS) await service.setup_process_global_state() await service.run() return 0 def main() -> int: return async_run(async_main()) if __name__ == "__main__": sys.exit(main())
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/introducer_peers.py
flax/server/introducer_peers.py
import random import time from typing import Set, List, Optional from dataclasses import dataclass from flax.types.peer_info import PeerInfo from flax.util.ints import uint64, uint16 @dataclass(frozen=False) class VettedPeer: host: str port: uint16 # 0 means we have not attempted to vet this peer yet # a negative number means we have failed that many vetting attempts in a row # a positive number means we have successfully vetted the peer this many # times in a row vetted: int = 0 # the timestamp of the last *successful* vetting of this peer vetted_timestamp: uint64 = uint64(0) # the last time we attempted to vet this peer, or 0 if we haven't tried yet # we set this regardless of whether the vetting is successful or not last_attempt: uint64 = uint64(0) time_added: uint64 = uint64(0) def __init__(self, h: str, p: uint16): self.host = h self.port = p def __eq__(self, rhs): return self.host == rhs.host and self.port == rhs.port def __hash__(self): return hash((self.host, self.port)) class IntroducerPeers: """ Has the list of known full node peers that are already connected or may be connected to, and the time that they were last added. """ def __init__(self) -> None: self._peers: Set[VettedPeer] = set() def add(self, peer: Optional[PeerInfo]) -> bool: if peer is None or not peer.port: return False p = VettedPeer(peer.host, peer.port) p.time_added = uint64(int(time.time())) if p in self._peers: return True self._peers.add(p) return True def remove(self, peer: Optional[VettedPeer]) -> bool: if peer is None or not peer.port: return False try: self._peers.remove(peer) return True except ValueError: return False def get_peers(self, max_peers: int = 0, randomize: bool = False, recent_threshold=9999999) -> List[VettedPeer]: target_peers = [peer for peer in self._peers if time.time() - peer.time_added < recent_threshold] if not max_peers or max_peers > len(target_peers): max_peers = len(target_peers) if randomize: return random.sample(target_peers, max_peers) else: return target_peers[:max_peers]
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/address_manager.py
flax/server/address_manager.py
from __future__ import annotations import logging import math import time from asyncio import Lock from random import choice, randrange from secrets import randbits from typing import Dict, List, Optional, Set, Tuple from flax.types.peer_info import PeerInfo, TimestampedPeerInfo from flax.util.hash import std_hash from flax.util.ints import uint16, uint64 TRIED_BUCKETS_PER_GROUP = 8 NEW_BUCKETS_PER_SOURCE_GROUP = 64 TRIED_BUCKET_COUNT = 256 NEW_BUCKET_COUNT = 1024 BUCKET_SIZE = 64 TRIED_COLLISION_SIZE = 10 NEW_BUCKETS_PER_ADDRESS = 8 LOG_TRIED_BUCKET_COUNT = 3 LOG_NEW_BUCKET_COUNT = 10 LOG_BUCKET_SIZE = 6 HORIZON_DAYS = 30 MAX_RETRIES = 3 MIN_FAIL_DAYS = 7 MAX_FAILURES = 10 log = logging.getLogger(__name__) # This is a Python port from 'CAddrInfo' class from Bitcoin core code. class ExtendedPeerInfo: def __init__( self, addr: TimestampedPeerInfo, src_peer: Optional[PeerInfo], ): self.peer_info: PeerInfo = PeerInfo( addr.host, addr.port, ) self.timestamp: int = addr.timestamp self.src: Optional[PeerInfo] = src_peer if src_peer is None: self.src = self.peer_info self.random_pos: Optional[int] = None self.is_tried: bool = False self.ref_count: int = 0 self.last_success: int = 0 self.last_try: int = 0 self.num_attempts: int = 0 self.last_count_attempt: int = 0 def to_string(self) -> str: assert self.src is not None out = ( self.peer_info.host + " " + str(int(self.peer_info.port)) + " " + str(int(self.timestamp)) + " " + self.src.host + " " + str(int(self.src.port)) ) return out @classmethod def from_string(cls, peer_str: str): blobs = peer_str.split(" ") assert len(blobs) == 5 peer_info = TimestampedPeerInfo(blobs[0], uint16(int(blobs[1])), uint64(int(blobs[2]))) src_peer = PeerInfo(blobs[3], uint16(int(blobs[4]))) return cls(peer_info, src_peer) def get_tried_bucket(self, key: int) -> int: hash1 = int.from_bytes( bytes(std_hash(key.to_bytes(32, byteorder="big") + self.peer_info.get_key())[:8]), byteorder="big", ) hash1 = hash1 % TRIED_BUCKETS_PER_GROUP hash2 = int.from_bytes( bytes(std_hash(key.to_bytes(32, byteorder="big") + self.peer_info.get_group() + bytes([hash1]))[:8]), byteorder="big", ) return hash2 % TRIED_BUCKET_COUNT def get_new_bucket(self, key: int, src_peer: Optional[PeerInfo] = None) -> int: if src_peer is None: src_peer = self.src assert src_peer is not None hash1 = int.from_bytes( bytes(std_hash(key.to_bytes(32, byteorder="big") + self.peer_info.get_group() + src_peer.get_group())[:8]), byteorder="big", ) hash1 = hash1 % NEW_BUCKETS_PER_SOURCE_GROUP hash2 = int.from_bytes( bytes(std_hash(key.to_bytes(32, byteorder="big") + src_peer.get_group() + bytes([hash1]))[:8]), byteorder="big", ) return hash2 % NEW_BUCKET_COUNT def get_bucket_position(self, key: int, is_new: bool, nBucket: int) -> int: ch = "N" if is_new else "K" hash1 = int.from_bytes( bytes( std_hash( key.to_bytes(32, byteorder="big") + ch.encode() + nBucket.to_bytes(3, byteorder="big") + self.peer_info.get_key() )[:8] ), byteorder="big", ) return hash1 % BUCKET_SIZE def is_terrible(self, now: Optional[int] = None) -> bool: if now is None: now = int(math.floor(time.time())) # never remove things tried in the last minute if self.last_try > 0 and self.last_try >= now - 60: return False # came in a flying DeLorean if self.timestamp > now + 10 * 60: return True # not seen in recent history if self.timestamp == 0 or now - self.timestamp > HORIZON_DAYS * 24 * 60 * 60: return True # tried N times and never a success if self.last_success == 0 and self.num_attempts >= MAX_RETRIES: return True # N successive failures in the last week if now - self.last_success > MIN_FAIL_DAYS * 24 * 60 * 60 and self.num_attempts >= MAX_FAILURES: return True return False def get_selection_chance(self, now: Optional[int] = None): if now is None: now = int(math.floor(time.time())) chance = 1.0 since_last_try = max(now - self.last_try, 0) # deprioritize very recent attempts away if since_last_try < 60 * 10: chance *= 0.01 # deprioritize 66% after each failed attempt, # but at most 1/28th to avoid the search taking forever or overly penalizing outages. chance *= pow(0.66, min(self.num_attempts, 8)) return chance # This is a Python port from 'CAddrMan' class from Bitcoin core code. class AddressManager: id_count: int key: int random_pos: List[int] tried_matrix: List[List[int]] new_matrix: List[List[int]] tried_count: int new_count: int map_addr: Dict[str, int] map_info: Dict[int, ExtendedPeerInfo] last_good: int tried_collisions: List[int] used_new_matrix_positions: Set[Tuple[int, int]] used_tried_matrix_positions: Set[Tuple[int, int]] allow_private_subnets: bool def __init__(self) -> None: self.clear() self.lock: Lock = Lock() def clear(self) -> None: self.id_count = 0 self.key = randbits(256) self.random_pos = [] self.tried_matrix = [[-1 for x in range(BUCKET_SIZE)] for y in range(TRIED_BUCKET_COUNT)] self.new_matrix = [[-1 for x in range(BUCKET_SIZE)] for y in range(NEW_BUCKET_COUNT)] self.tried_count = 0 self.new_count = 0 self.map_addr = {} self.map_info = {} self.last_good = 1 self.tried_collisions = [] self.used_new_matrix_positions = set() self.used_tried_matrix_positions = set() self.allow_private_subnets = False def make_private_subnets_valid(self) -> None: self.allow_private_subnets = True # Use only this method for modifying new matrix. def _set_new_matrix(self, row: int, col: int, value: int) -> None: self.new_matrix[row][col] = value if value == -1: if (row, col) in self.used_new_matrix_positions: self.used_new_matrix_positions.remove((row, col)) else: if (row, col) not in self.used_new_matrix_positions: self.used_new_matrix_positions.add((row, col)) # Use only this method for modifying tried matrix. def _set_tried_matrix(self, row: int, col: int, value: int) -> None: self.tried_matrix[row][col] = value if value == -1: if (row, col) in self.used_tried_matrix_positions: self.used_tried_matrix_positions.remove((row, col)) else: if (row, col) not in self.used_tried_matrix_positions: self.used_tried_matrix_positions.add((row, col)) def load_used_table_positions(self) -> None: self.used_new_matrix_positions = set() self.used_tried_matrix_positions = set() for bucket in range(NEW_BUCKET_COUNT): for pos in range(BUCKET_SIZE): if self.new_matrix[bucket][pos] != -1: self.used_new_matrix_positions.add((bucket, pos)) for bucket in range(TRIED_BUCKET_COUNT): for pos in range(BUCKET_SIZE): if self.tried_matrix[bucket][pos] != -1: self.used_tried_matrix_positions.add((bucket, pos)) def create_(self, addr: TimestampedPeerInfo, addr_src: Optional[PeerInfo]) -> Tuple[ExtendedPeerInfo, int]: self.id_count += 1 node_id = self.id_count self.map_info[node_id] = ExtendedPeerInfo(addr, addr_src) self.map_addr[addr.host] = node_id self.map_info[node_id].random_pos = len(self.random_pos) self.random_pos.append(node_id) return (self.map_info[node_id], node_id) def find_(self, addr: PeerInfo) -> Tuple[Optional[ExtendedPeerInfo], Optional[int]]: if addr.host not in self.map_addr: return (None, None) node_id = self.map_addr[addr.host] if node_id not in self.map_info: return (None, node_id) return (self.map_info[node_id], node_id) def swap_random_(self, rand_pos_1: int, rand_pos_2: int) -> None: if rand_pos_1 == rand_pos_2: return None assert rand_pos_1 < len(self.random_pos) and rand_pos_2 < len(self.random_pos) node_id_1 = self.random_pos[rand_pos_1] node_id_2 = self.random_pos[rand_pos_2] self.map_info[node_id_1].random_pos = rand_pos_2 self.map_info[node_id_2].random_pos = rand_pos_1 self.random_pos[rand_pos_1] = node_id_2 self.random_pos[rand_pos_2] = node_id_1 def make_tried_(self, info: ExtendedPeerInfo, node_id: int) -> None: for bucket in range(NEW_BUCKET_COUNT): pos = info.get_bucket_position(self.key, True, bucket) if self.new_matrix[bucket][pos] == node_id: self._set_new_matrix(bucket, pos, -1) info.ref_count -= 1 assert info.ref_count == 0 self.new_count -= 1 cur_bucket = info.get_tried_bucket(self.key) cur_bucket_pos = info.get_bucket_position(self.key, False, cur_bucket) if self.tried_matrix[cur_bucket][cur_bucket_pos] != -1: # Evict the old node from the tried table. node_id_evict = self.tried_matrix[cur_bucket][cur_bucket_pos] assert node_id_evict in self.map_info old_info = self.map_info[node_id_evict] old_info.is_tried = False self._set_tried_matrix(cur_bucket, cur_bucket_pos, -1) self.tried_count -= 1 # Find its position into new table. new_bucket = old_info.get_new_bucket(self.key) new_bucket_pos = old_info.get_bucket_position(self.key, True, new_bucket) self.clear_new_(new_bucket, new_bucket_pos) old_info.ref_count = 1 self._set_new_matrix(new_bucket, new_bucket_pos, node_id_evict) self.new_count += 1 self._set_tried_matrix(cur_bucket, cur_bucket_pos, node_id) self.tried_count += 1 info.is_tried = True def clear_new_(self, bucket: int, pos: int) -> None: if self.new_matrix[bucket][pos] != -1: delete_id = self.new_matrix[bucket][pos] delete_info = self.map_info[delete_id] assert delete_info.ref_count > 0 delete_info.ref_count -= 1 self._set_new_matrix(bucket, pos, -1) if delete_info.ref_count == 0: self.delete_new_entry_(delete_id) def mark_good_(self, addr: PeerInfo, test_before_evict: bool, timestamp: int) -> None: self.last_good = timestamp (info, node_id) = self.find_(addr) if not addr.is_valid(self.allow_private_subnets): return None if info is None: return None if node_id is None: return None if not (info.peer_info.host == addr.host and info.peer_info.port == addr.port): return None # update info info.last_success = timestamp info.last_try = timestamp info.num_attempts = 0 # timestamp is not updated here, to avoid leaking information about # currently-connected peers. # if it is already in the tried set, don't do anything else if info.is_tried: return None # find a bucket it is in now bucket_rand = randrange(NEW_BUCKET_COUNT) new_bucket = -1 for n in range(NEW_BUCKET_COUNT): cur_new_bucket = (n + bucket_rand) % NEW_BUCKET_COUNT cur_new_bucket_pos = info.get_bucket_position(self.key, True, cur_new_bucket) if self.new_matrix[cur_new_bucket][cur_new_bucket_pos] == node_id: new_bucket = cur_new_bucket break # if no bucket is found, something bad happened; if new_bucket == -1: return None # NOTE(Florin): Double check this. It's not used anywhere else. # which tried bucket to move the entry to tried_bucket = info.get_tried_bucket(self.key) tried_bucket_pos = info.get_bucket_position(self.key, False, tried_bucket) # Will moving this address into tried evict another entry? if test_before_evict and self.tried_matrix[tried_bucket][tried_bucket_pos] != -1: if len(self.tried_collisions) < TRIED_COLLISION_SIZE: if node_id not in self.tried_collisions: self.tried_collisions.append(node_id) else: self.make_tried_(info, node_id) def delete_new_entry_(self, node_id: int) -> None: info = self.map_info[node_id] if info is None or info.random_pos is None: return None self.swap_random_(info.random_pos, len(self.random_pos) - 1) self.random_pos = self.random_pos[:-1] del self.map_addr[info.peer_info.host] del self.map_info[node_id] self.new_count -= 1 def add_to_new_table_(self, addr: TimestampedPeerInfo, source: Optional[PeerInfo], penalty: int) -> bool: is_unique = False peer_info = PeerInfo( addr.host, addr.port, ) if not peer_info.is_valid(self.allow_private_subnets): return False (info, node_id) = self.find_(peer_info) if info is not None and info.peer_info.host == addr.host and info.peer_info.port == addr.port: penalty = 0 if info is not None: # periodically update timestamp currently_online = time.time() - addr.timestamp < 24 * 60 * 60 update_interval = 60 * 60 if currently_online else 24 * 60 * 60 if addr.timestamp > 0 and ( info.timestamp > 0 or info.timestamp < addr.timestamp - update_interval - penalty ): info.timestamp = max(0, addr.timestamp - penalty) # do not update if no new information is present if addr.timestamp == 0 or (info.timestamp > 0 and addr.timestamp <= info.timestamp): return False # do not update if the entry was already in the "tried" table if info.is_tried: return False # do not update if the max reference count is reached if info.ref_count == NEW_BUCKETS_PER_ADDRESS: return False # stochastic test: previous ref_count == N: 2^N times harder to increase it factor = 1 << info.ref_count if factor > 1 and randrange(factor) != 0: return False else: (info, node_id) = self.create_(addr, source) info.timestamp = max(0, info.timestamp - penalty) self.new_count += 1 is_unique = True new_bucket = info.get_new_bucket(self.key, source) new_bucket_pos = info.get_bucket_position(self.key, True, new_bucket) if self.new_matrix[new_bucket][new_bucket_pos] != node_id: add_to_new = self.new_matrix[new_bucket][new_bucket_pos] == -1 if not add_to_new: info_existing = self.map_info[self.new_matrix[new_bucket][new_bucket_pos]] if info_existing.is_terrible() or (info_existing.ref_count > 1 and info.ref_count == 0): add_to_new = True if add_to_new: self.clear_new_(new_bucket, new_bucket_pos) info.ref_count += 1 if node_id is not None: self._set_new_matrix(new_bucket, new_bucket_pos, node_id) else: if info.ref_count == 0: if node_id is not None: self.delete_new_entry_(node_id) return is_unique def attempt_(self, addr: PeerInfo, count_failures: bool, timestamp: int) -> None: info, _ = self.find_(addr) if info is None: return None if not (info.peer_info.host == addr.host and info.peer_info.port == addr.port): return None info.last_try = timestamp if count_failures and info.last_count_attempt < self.last_good: info.last_count_attempt = timestamp info.num_attempts += 1 def select_peer_(self, new_only: bool) -> Optional[ExtendedPeerInfo]: if len(self.random_pos) == 0: return None if new_only and self.new_count == 0: return None # Use a 50% chance for choosing between tried and new table entries. if not new_only and self.tried_count > 0 and (self.new_count == 0 or randrange(2) == 0): chance = 1.0 start = time.time() cached_tried_matrix_positions: List[Tuple[int, int]] = [] if len(self.used_tried_matrix_positions) < math.sqrt(TRIED_BUCKET_COUNT * BUCKET_SIZE): cached_tried_matrix_positions = list(self.used_tried_matrix_positions) while True: if len(self.used_tried_matrix_positions) < math.sqrt(TRIED_BUCKET_COUNT * BUCKET_SIZE): if len(self.used_tried_matrix_positions) == 0: log.error(f"Empty tried table, but tried_count shows {self.tried_count}.") return None # The table is sparse, randomly pick from positions list. index = randrange(len(cached_tried_matrix_positions)) tried_bucket, tried_bucket_pos = cached_tried_matrix_positions[index] else: # The table is dense, randomly trying positions is faster than loading positions list. tried_bucket = randrange(TRIED_BUCKET_COUNT) tried_bucket_pos = randrange(BUCKET_SIZE) while self.tried_matrix[tried_bucket][tried_bucket_pos] == -1: tried_bucket = (tried_bucket + randbits(LOG_TRIED_BUCKET_COUNT)) % TRIED_BUCKET_COUNT tried_bucket_pos = (tried_bucket_pos + randbits(LOG_BUCKET_SIZE)) % BUCKET_SIZE node_id = self.tried_matrix[tried_bucket][tried_bucket_pos] assert node_id != -1 info = self.map_info[node_id] if randbits(30) < (chance * info.get_selection_chance() * (1 << 30)): end = time.time() log.debug(f"address_manager.select_peer took {(end - start):.2e} seconds in tried table.") return info chance *= 1.2 else: chance = 1.0 start = time.time() cached_new_matrix_positions: List[Tuple[int, int]] = [] if len(self.used_new_matrix_positions) < math.sqrt(NEW_BUCKET_COUNT * BUCKET_SIZE): cached_new_matrix_positions = list(self.used_new_matrix_positions) while True: if len(self.used_new_matrix_positions) < math.sqrt(NEW_BUCKET_COUNT * BUCKET_SIZE): if len(self.used_new_matrix_positions) == 0: log.error(f"Empty new table, but new_count shows {self.new_count}.") return None index = randrange(len(cached_new_matrix_positions)) new_bucket, new_bucket_pos = cached_new_matrix_positions[index] else: new_bucket = randrange(NEW_BUCKET_COUNT) new_bucket_pos = randrange(BUCKET_SIZE) while self.new_matrix[new_bucket][new_bucket_pos] == -1: new_bucket = (new_bucket + randbits(LOG_NEW_BUCKET_COUNT)) % NEW_BUCKET_COUNT new_bucket_pos = (new_bucket_pos + randbits(LOG_BUCKET_SIZE)) % BUCKET_SIZE node_id = self.new_matrix[new_bucket][new_bucket_pos] assert node_id != -1 info = self.map_info[node_id] if randbits(30) < chance * info.get_selection_chance() * (1 << 30): end = time.time() log.debug(f"address_manager.select_peer took {(end - start):.2e} seconds in new table.") return info chance *= 1.2 def resolve_tried_collisions_(self) -> None: for node_id in self.tried_collisions[:]: resolved = False if node_id not in self.map_info: resolved = True else: info = self.map_info[node_id] peer = info.peer_info tried_bucket = info.get_tried_bucket(self.key) tried_bucket_pos = info.get_bucket_position(self.key, False, tried_bucket) if self.tried_matrix[tried_bucket][tried_bucket_pos] != -1: old_id = self.tried_matrix[tried_bucket][tried_bucket_pos] old_info = self.map_info[old_id] if time.time() - old_info.last_success < 4 * 60 * 60: resolved = True elif time.time() - old_info.last_try < 4 * 60 * 60: if time.time() - old_info.last_try > 60: self.mark_good_(peer, False, math.floor(time.time())) resolved = True elif time.time() - info.last_success > 40 * 60: self.mark_good_(peer, False, math.floor(time.time())) resolved = True else: self.mark_good_(peer, False, math.floor(time.time())) resolved = True if resolved: self.tried_collisions.remove(node_id) def select_tried_collision_(self) -> Optional[ExtendedPeerInfo]: if len(self.tried_collisions) == 0: return None new_id = choice(self.tried_collisions) if new_id not in self.map_info: self.tried_collisions.remove(new_id) return None new_info = self.map_info[new_id] tried_bucket = new_info.get_tried_bucket(self.key) tried_bucket_pos = new_info.get_bucket_position(self.key, False, tried_bucket) old_id = self.tried_matrix[tried_bucket][tried_bucket_pos] return self.map_info[old_id] def get_peers_(self) -> List[TimestampedPeerInfo]: addr: List[TimestampedPeerInfo] = [] num_nodes = math.ceil(23 * len(self.random_pos) / 100) if num_nodes > 1000: num_nodes = 1000 for n in range(len(self.random_pos)): if len(addr) >= num_nodes: return addr rand_pos = randrange(len(self.random_pos) - n) + n self.swap_random_(n, rand_pos) info = self.map_info[self.random_pos[n]] if not info.peer_info.is_valid(self.allow_private_subnets): continue if not info.is_terrible(): cur_peer_info = TimestampedPeerInfo( info.peer_info.host, uint16(info.peer_info.port), uint64(info.timestamp), ) addr.append(cur_peer_info) return addr def cleanup(self, max_timestamp_difference: int, max_consecutive_failures: int): now = int(math.floor(time.time())) for bucket in range(NEW_BUCKET_COUNT): for pos in range(BUCKET_SIZE): if self.new_matrix[bucket][pos] != -1: node_id = self.new_matrix[bucket][pos] cur_info = self.map_info[node_id] if ( cur_info.timestamp < now - max_timestamp_difference and cur_info.num_attempts >= max_consecutive_failures ): self.clear_new_(bucket, pos) def connect_(self, addr: PeerInfo, timestamp: int): info, _ = self.find_(addr) if info is None: return None # check whether we are talking about the exact same peer if not (info.peer_info.host == addr.host and info.peer_info.port == addr.port): return None update_interval = 20 * 60 if timestamp - info.timestamp > update_interval: info.timestamp = timestamp async def size(self) -> int: async with self.lock: return len(self.random_pos) async def add_to_new_table( self, addresses: List[TimestampedPeerInfo], source: Optional[PeerInfo] = None, penalty: int = 0, ) -> bool: is_added = False async with self.lock: for addr in addresses: cur_peer_added = self.add_to_new_table_(addr, source, penalty) is_added = is_added or cur_peer_added return is_added # Mark an entry as accessible. async def mark_good( self, addr: PeerInfo, test_before_evict: bool = True, timestamp: int = -1, ): if timestamp == -1: timestamp = math.floor(time.time()) async with self.lock: self.mark_good_(addr, test_before_evict, timestamp) # Mark an entry as connection attempted to. async def attempt( self, addr: PeerInfo, count_failures: bool, timestamp: int = -1, ): if timestamp == -1: timestamp = math.floor(time.time()) async with self.lock: self.attempt_(addr, count_failures, timestamp) # See if any to-be-evicted tried table entries have been tested and if so resolve the collisions. async def resolve_tried_collisions(self): async with self.lock: self.resolve_tried_collisions_() # Randomly select an address in tried that another address is attempting to evict. async def select_tried_collision(self) -> Optional[ExtendedPeerInfo]: async with self.lock: return self.select_tried_collision_() # Choose an address to connect to. async def select_peer(self, new_only: bool = False) -> Optional[ExtendedPeerInfo]: async with self.lock: return self.select_peer_(new_only) # Return a bunch of addresses, selected at random. async def get_peers(self) -> List[TimestampedPeerInfo]: async with self.lock: return self.get_peers_() async def connect(self, addr: PeerInfo, timestamp: int = -1): if timestamp == -1: timestamp = math.floor(time.time()) async with self.lock: return self.connect_(addr, timestamp)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/start_farmer.py
flax/server/start_farmer.py
from __future__ import annotations import pathlib import sys from typing import Dict, Optional from flax.consensus.constants import ConsensusConstants from flax.consensus.default_constants import DEFAULT_CONSTANTS from flax.farmer.farmer import Farmer from flax.farmer.farmer_api import FarmerAPI from flax.rpc.farmer_rpc_api import FarmerRpcApi from flax.server.outbound_message import NodeType from flax.server.start_service import RpcInfo, Service, async_run from flax.types.peer_info import PeerInfo from flax.util.flax_logging import initialize_service_logging from flax.util.config import load_config, load_config_cli from flax.util.default_root import DEFAULT_ROOT_PATH from flax.util.keychain import Keychain # See: https://bugs.python.org/issue29288 "".encode("idna") SERVICE_NAME = "farmer" def create_farmer_service( root_path: pathlib.Path, config: Dict, config_pool: Dict, consensus_constants: ConsensusConstants, keychain: Optional[Keychain] = None, connect_to_daemon: bool = True, ) -> Service[Farmer]: service_config = config[SERVICE_NAME] connect_peers = [] fnp = service_config.get("full_node_peer") if fnp is not None: connect_peers.append(PeerInfo(fnp["host"], fnp["port"])) overrides = service_config["network_overrides"]["constants"][service_config["selected_network"]] updated_constants = consensus_constants.replace_str_to_bytes(**overrides) farmer = Farmer( root_path, service_config, config_pool, consensus_constants=updated_constants, local_keychain=keychain ) peer_api = FarmerAPI(farmer) network_id = service_config["selected_network"] rpc_info: Optional[RpcInfo] = None if service_config["start_rpc_server"]: rpc_info = (FarmerRpcApi, service_config["rpc_port"]) return Service( root_path=root_path, config=config, node=farmer, peer_api=peer_api, node_type=NodeType.FARMER, advertised_port=service_config["port"], service_name=SERVICE_NAME, server_listen_ports=[service_config["port"]], connect_peers=connect_peers, on_connect_callback=farmer.on_connect, network_id=network_id, rpc_info=rpc_info, connect_to_daemon=connect_to_daemon, ) async def async_main() -> int: # TODO: refactor to avoid the double load config = load_config(DEFAULT_ROOT_PATH, "config.yaml") service_config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME) config[SERVICE_NAME] = service_config config_pool = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", "pool") config["pool"] = config_pool initialize_service_logging(service_name=SERVICE_NAME, config=config) service = create_farmer_service(DEFAULT_ROOT_PATH, config, config_pool, DEFAULT_CONSTANTS) await service.setup_process_global_state() await service.run() return 0 def main() -> int: return async_run(async_main()) if __name__ == "__main__": sys.exit(main())
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/peer_store_resolver.py
flax/server/peer_store_resolver.py
import os from pathlib import Path from typing import Dict, Optional class PeerStoreResolver: """ Determines the peers data file path using values from the config """ def __init__( self, root_path: Path, config: Dict, *, selected_network: str, peers_file_path_key: str, # config key for the peers data file relative path legacy_peer_db_path_key: str, # config key for the deprecated peer db path default_peers_file_path: str, # default value for the peers data file relative path ): self.root_path = root_path self.config = config self.selected_network = selected_network self.peers_file_path_key = peers_file_path_key self.legacy_peer_db_path_key = legacy_peer_db_path_key self.default_peers_file_path = default_peers_file_path def _resolve_and_update_config(self) -> Path: """ Resolve the peers data file path from the config, and update the config if necessary. We leave the legacy peer db path in the config to support downgrading. If peers_file_path_key is not found in the config, we'll attempt to derive the path from the the config's legacy_peer_db_path_key value. """ peers_file_path_str: Optional[str] = self.config.get(self.peers_file_path_key) if peers_file_path_str is None: # Check if the legacy peer db path exists and derive a new path from it peer_db_path: Optional[str] = self.config.get(self.legacy_peer_db_path_key) if peer_db_path is not None: # Use the legacy path's directory with the new peers data filename peers_file_path_str = os.fspath(Path(peer_db_path).parent / self._peers_file_name) else: # Neither value is present in the config, use the default peers_file_path_str = os.fspath(Path(self.default_peers_file_path).parent / self._peers_file_name) # Update the config self.config[self.peers_file_path_key] = peers_file_path_str return self.root_path / Path(peers_file_path_str) @property def _peers_file_name(self) -> str: """ Internal property to get the name component of the peers data file path """ if self.selected_network == "mainnet": return Path(self.default_peers_file_path).name else: # For testnets, we include the network name in the peers data filename path = Path(self.default_peers_file_path) return path.with_name(f"{path.stem}_{self.selected_network}{path.suffix}").name @property def peers_file_path(self) -> Path: """ Path to the peers data file, resolved using data from the config """ return self._resolve_and_update_config() @property def legacy_peer_db_path(self) -> Optional[Path]: """ Path to the legacy peer db file, resolved using data from the config. The legacy peer db is only used for migration to the new format. We're only concerned about migrating mainnet users, so we purposefully omit the testnet filename change. """ peer_db_path: Optional[str] = self.config.get(self.legacy_peer_db_path_key) if peer_db_path is not None: return self.root_path / Path(peer_db_path) return None
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/ws_connection.py
flax/server/ws_connection.py
from __future__ import annotations import asyncio import contextlib import logging import time import traceback from typing import Any, Callable, Dict, List, Optional, Tuple from aiohttp import WSCloseCode, WSMessage, WSMsgType from flax.cmds.init_funcs import flax_full_version_str from flax.protocols.protocol_message_types import ProtocolMessageTypes from flax.protocols.protocol_state_machine import message_response_ok from flax.protocols.protocol_timing import INTERNAL_PROTOCOL_ERROR_BAN_SECONDS from flax.protocols.shared_protocol import Capability, Handshake from flax.server.outbound_message import Message, NodeType, make_msg from flax.server.rate_limits import RateLimiter from flax.types.peer_info import PeerInfo from flax.util.api_decorators import get_metadata from flax.util.errors import Err, ProtocolError from flax.util.ints import uint8, uint16 # Each message is prepended with LENGTH_BYTES bytes specifying the length from flax.util.network import class_for_type, is_localhost # Max size 2^(8*4) which is around 4GiB LENGTH_BYTES: int = 4 class WSFlaxConnection: """ Represents a connection to another node. Local host and port are ours, while peer host and port are the host and port of the peer that we are connected to. Node_id and connection_type are set after the handshake is performed in this connection. """ def __init__( self, local_type: NodeType, ws: Any, # Websocket server_port: int, log: logging.Logger, is_outbound: bool, is_feeler: bool, # Special type of connection, that disconnects after the handshake. peer_host, incoming_queue, close_callback: Callable, peer_id, inbound_rate_limit_percent: int, outbound_rate_limit_percent: int, local_capabilities_for_handshake: List[Tuple[uint16, str]], close_event=None, session=None, ): # Local properties self.ws: Any = ws self.local_type = local_type self.local_port = server_port self.local_capabilities_for_handshake = local_capabilities_for_handshake self.local_capabilities: List[Capability] = [ Capability(x[0]) for x in local_capabilities_for_handshake if x[1] == "1" ] # Remote properties self.peer_host = peer_host peername = self.ws._writer.transport.get_extra_info("peername") if peername is None: raise ValueError(f"Was not able to get peername from {self.peer_host}") connection_port = peername[1] self.peer_port = connection_port self.peer_server_port: Optional[uint16] = None self.peer_node_id = peer_id self.log = log # connection properties self.is_outbound = is_outbound self.is_feeler = is_feeler # FlaxConnection metrics self.creation_time = time.time() self.bytes_read = 0 self.bytes_written = 0 self.last_message_time: float = 0 # Messaging self.incoming_queue: asyncio.Queue = incoming_queue self.outgoing_queue: asyncio.Queue = asyncio.Queue() self.inbound_task: Optional[asyncio.Task] = None self.outbound_task: Optional[asyncio.Task] = None self.active: bool = False # once handshake is successful this will be changed to True self.close_event: asyncio.Event = close_event self.session = session self.close_callback = close_callback self.pending_requests: Dict[uint16, asyncio.Event] = {} self.request_results: Dict[uint16, Message] = {} self.closed = False self.connection_type: Optional[NodeType] = None if is_outbound: self.request_nonce: uint16 = uint16(0) else: # Different nonce to reduce chances of overlap. Each peer will increment the nonce by one for each # request. The receiving peer (not is_outbound), will use 2^15 to 2^16 - 1 self.request_nonce = uint16(2**15) # This means that even if the other peer's boundaries for each minute are not aligned, we will not # disconnect. Also it allows a little flexibility. self.outbound_rate_limiter = RateLimiter(incoming=False, percentage_of_limit=outbound_rate_limit_percent) self.inbound_rate_limiter = RateLimiter(incoming=True, percentage_of_limit=inbound_rate_limit_percent) self.peer_capabilities: List[Capability] = [] # Used by the Flax Seeder. self.version = "" self.protocol_version = "" async def perform_handshake( self, network_id: str, protocol_version: str, server_port: int, local_type: NodeType, ) -> None: if self.is_outbound: outbound_handshake = make_msg( ProtocolMessageTypes.handshake, Handshake( 'flax-' + network_id, protocol_version, flax_full_version_str(), uint16(server_port), uint8(local_type.value), self.local_capabilities_for_handshake, ), ) assert outbound_handshake is not None await self._send_message(outbound_handshake) inbound_handshake_msg = await self._read_one_message() if inbound_handshake_msg is None: raise ProtocolError(Err.INVALID_HANDSHAKE) inbound_handshake = Handshake.from_bytes(inbound_handshake_msg.data) # Handle case of invalid ProtocolMessageType try: message_type: ProtocolMessageTypes = ProtocolMessageTypes(inbound_handshake_msg.type) except Exception: raise ProtocolError(Err.INVALID_HANDSHAKE) if message_type != ProtocolMessageTypes.handshake: raise ProtocolError(Err.INVALID_HANDSHAKE) if inbound_handshake.network_id != 'flax-' + network_id: raise ProtocolError(Err.INCOMPATIBLE_NETWORK_ID) self.version = inbound_handshake.software_version self.protocol_version = inbound_handshake.protocol_version self.peer_server_port = inbound_handshake.server_port self.connection_type = NodeType(inbound_handshake.node_type) # "1" means capability is enabled self.peer_capabilities = [Capability(x[0]) for x in inbound_handshake.capabilities if x[1] == "1"] else: try: message = await self._read_one_message() except Exception: raise ProtocolError(Err.INVALID_HANDSHAKE) if message is None: raise ProtocolError(Err.INVALID_HANDSHAKE) # Handle case of invalid ProtocolMessageType try: message_type = ProtocolMessageTypes(message.type) except Exception: raise ProtocolError(Err.INVALID_HANDSHAKE) if message_type != ProtocolMessageTypes.handshake: raise ProtocolError(Err.INVALID_HANDSHAKE) inbound_handshake = Handshake.from_bytes(message.data) if inbound_handshake.network_id != 'flax-' + network_id: raise ProtocolError(Err.INCOMPATIBLE_NETWORK_ID) outbound_handshake = make_msg( ProtocolMessageTypes.handshake, Handshake( 'flax-' + network_id, protocol_version, flax_full_version_str(), uint16(server_port), uint8(local_type.value), self.local_capabilities_for_handshake, ), ) await self._send_message(outbound_handshake) self.peer_server_port = inbound_handshake.server_port self.connection_type = NodeType(inbound_handshake.node_type) # "1" means capability is enabled self.peer_capabilities = [Capability(x[0]) for x in inbound_handshake.capabilities if x[1] == "1"] self.outbound_task = asyncio.create_task(self.outbound_handler()) self.inbound_task = asyncio.create_task(self.inbound_handler()) async def close(self, ban_time: int = 0, ws_close_code: WSCloseCode = WSCloseCode.OK, error: Optional[Err] = None): """ Closes the connection, and finally calls the close_callback on the server, so the connection gets removed from the global list. """ if self.closed: return None self.closed = True if error is None: message = b"" else: message = str(int(error.value)).encode("utf-8") try: if self.inbound_task is not None: self.inbound_task.cancel() if self.outbound_task is not None: self.outbound_task.cancel() if self.ws is not None and self.ws._closed is False: await self.ws.close(code=ws_close_code, message=message) if self.session is not None: await self.session.close() if self.close_event is not None: self.close_event.set() self.cancel_pending_requests() except Exception: error_stack = traceback.format_exc() self.log.warning(f"Exception closing socket: {error_stack}") try: self.close_callback(self, ban_time) except Exception: error_stack = traceback.format_exc() self.log.error(f"Error closing1: {error_stack}") raise try: self.close_callback(self, ban_time) except Exception: error_stack = traceback.format_exc() self.log.error(f"Error closing2: {error_stack}") async def ban_peer_bad_protocol(self, log_err_msg: str): """Ban peer for protocol violation""" ban_seconds = INTERNAL_PROTOCOL_ERROR_BAN_SECONDS self.log.error(f"Banning peer for {ban_seconds} seconds: {self.peer_host} {log_err_msg}") await self.close(ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.INVALID_PROTOCOL_MESSAGE) def cancel_pending_requests(self): for message_id, event in self.pending_requests.items(): try: event.set() except Exception as e: self.log.error(f"Failed setting event for {message_id}: {e} {traceback.format_exc()}") async def outbound_handler(self) -> None: try: while not self.closed: msg = await self.outgoing_queue.get() if msg is not None: await self._send_message(msg) except asyncio.CancelledError: pass except Exception as e: expected = False if isinstance(e, (BrokenPipeError, ConnectionResetError, TimeoutError)): expected = True elif isinstance(e, OSError): if e.errno in {113}: expected = True if expected: self.log.warning(f"{e} {self.peer_host}") else: error_stack = traceback.format_exc() self.log.error(f"Exception: {e} with {self.peer_host}") self.log.error(f"Exception Stack: {error_stack}") async def inbound_handler(self): try: while not self.closed: message: Message = await self._read_one_message() if message is not None: if message.id in self.pending_requests: self.request_results[message.id] = message event = self.pending_requests[message.id] event.set() else: await self.incoming_queue.put((message, self)) else: continue except asyncio.CancelledError: self.log.debug("Inbound_handler task cancelled") except Exception as e: error_stack = traceback.format_exc() self.log.error(f"Exception: {e}") self.log.error(f"Exception Stack: {error_stack}") async def send_message(self, message: Message) -> bool: """Send message sends a message with no tracking / callback.""" if self.closed: return False await self.outgoing_queue.put(message) return True def __getattr__(self, attr_name: str): # TODO KWARGS async def invoke(*args, **kwargs): timeout = 60 if "timeout" in kwargs: timeout = kwargs["timeout"] attribute = getattr(class_for_type(self.connection_type), attr_name, None) if attribute is None: raise AttributeError(f"Node type {self.connection_type} does not have method {attr_name}") msg: Message = Message(uint8(getattr(ProtocolMessageTypes, attr_name).value), None, args[0]) request_start_t = time.time() result = await self.send_request(msg, timeout) self.log.debug( f"Time for request {attr_name}: {self.get_peer_logging()} = {time.time() - request_start_t}, " f"None? {result is None}" ) if result is not None: sent_message_type = ProtocolMessageTypes(msg.type) recv_message_type = ProtocolMessageTypes(result.type) if not message_response_ok(sent_message_type, recv_message_type): # peer protocol violation error_message = f"WSConnection.invoke sent message {sent_message_type.name} " f"but received {recv_message_type.name}" await self.ban_peer_bad_protocol(self.error_message) raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE, [error_message]) recv_method = getattr(class_for_type(self.local_type), recv_message_type.name) result = get_metadata(recv_method).message_class.from_bytes(result.data) return result return invoke async def send_request(self, message_no_id: Message, timeout: int) -> Optional[Message]: """Sends a message and waits for a response.""" if self.closed: return None # We will wait for this event, it will be set either by the response, or the timeout event = asyncio.Event() # The request nonce is an integer between 0 and 2**16 - 1, which is used to match requests to responses # If is_outbound, 0 <= nonce < 2^15, else 2^15 <= nonce < 2^16 request_id = self.request_nonce if self.is_outbound: self.request_nonce = uint16(self.request_nonce + 1) if self.request_nonce != (2**15 - 1) else uint16(0) else: self.request_nonce = ( uint16(self.request_nonce + 1) if self.request_nonce != (2**16 - 1) else uint16(2**15) ) message = Message(message_no_id.type, request_id, message_no_id.data) assert message.id is not None self.pending_requests[message.id] = event await self.outgoing_queue.put(message) # Either the result is available below or not, no need to detect the timeout error with contextlib.suppress(asyncio.TimeoutError): await asyncio.wait_for(event.wait(), timeout=timeout) self.pending_requests.pop(message.id) result: Optional[Message] = None if message.id in self.request_results: result = self.request_results[message.id] assert result is not None self.log.debug(f"<- {ProtocolMessageTypes(result.type).name} from: {self.peer_host}:{self.peer_port}") self.request_results.pop(message.id) return result async def send_messages(self, messages: List[Message]): if self.closed: return None for message in messages: await self.outgoing_queue.put(message) async def _wait_and_retry(self, msg: Message, queue: asyncio.Queue): try: await asyncio.sleep(1) await queue.put(msg) except Exception as e: self.log.debug(f"Exception {e} while waiting to retry sending rate limited message") return None async def _send_message(self, message: Message): encoded: bytes = bytes(message) size = len(encoded) assert len(encoded) < (2 ** (LENGTH_BYTES * 8)) if not self.outbound_rate_limiter.process_msg_and_check( message, self.local_capabilities, self.peer_capabilities ): if not is_localhost(self.peer_host): self.log.warning( f"Rate limiting ourselves. message type: {ProtocolMessageTypes(message.type).name}, " f"peer: {self.peer_host}" ) # TODO: fix this special case. This function has rate limits which are too low. if ProtocolMessageTypes(message.type) != ProtocolMessageTypes.respond_peers: asyncio.create_task(self._wait_and_retry(message, self.outgoing_queue)) return None else: self.log.debug( f"Not rate limiting ourselves. message type: {ProtocolMessageTypes(message.type).name}, " f"peer: {self.peer_host}" ) await self.ws.send_bytes(encoded) self.log.debug(f"-> {ProtocolMessageTypes(message.type).name} to peer {self.peer_host} {self.peer_node_id}") self.bytes_written += size async def _read_one_message(self) -> Optional[Message]: try: message: WSMessage = await self.ws.receive(30) except asyncio.TimeoutError: # self.ws._closed if we didn't receive a ping / pong if self.ws._closed: asyncio.create_task(self.close()) await asyncio.sleep(3) return None return None if self.connection_type is not None: connection_type_str = NodeType(self.connection_type).name.lower() else: connection_type_str = "" if message.type == WSMsgType.CLOSING: self.log.debug( f"Closing connection to {connection_type_str} {self.peer_host}:" f"{self.peer_server_port}/" f"{self.peer_port}" ) asyncio.create_task(self.close()) await asyncio.sleep(3) elif message.type == WSMsgType.CLOSE: self.log.debug( f"Peer closed connection {connection_type_str} {self.peer_host}:" f"{self.peer_server_port}/" f"{self.peer_port}" ) asyncio.create_task(self.close()) await asyncio.sleep(3) elif message.type == WSMsgType.CLOSED: if not self.closed: asyncio.create_task(self.close()) await asyncio.sleep(3) return None elif message.type == WSMsgType.BINARY: data = message.data full_message_loaded: Message = Message.from_bytes(data) self.bytes_read += len(data) self.last_message_time = time.time() try: message_type = ProtocolMessageTypes(full_message_loaded.type).name except Exception: message_type = "Unknown" if not self.inbound_rate_limiter.process_msg_and_check( full_message_loaded, self.local_capabilities, self.peer_capabilities ): if self.local_type == NodeType.FULL_NODE and not is_localhost(self.peer_host): self.log.error( f"Peer has been rate limited and will be disconnected: {self.peer_host}, " f"message: {message_type}" ) # Only full node disconnects peers, to prevent abuse and crashing timelords, farmers, etc asyncio.create_task(self.close(300)) await asyncio.sleep(3) return None else: self.log.debug( f"Peer surpassed rate limit {self.peer_host}, message: {message_type}, " f"port {self.peer_port} but not disconnecting" ) return full_message_loaded return full_message_loaded elif message.type == WSMsgType.ERROR: self.log.error(f"WebSocket Error: {message}") if message.data.code == WSCloseCode.MESSAGE_TOO_BIG: asyncio.create_task(self.close(300)) else: asyncio.create_task(self.close()) await asyncio.sleep(3) else: self.log.error(f"Unexpected WebSocket message type: {message}") asyncio.create_task(self.close()) await asyncio.sleep(3) return None # Used by the Flax Seeder. def get_version(self) -> str: return self.version def get_tls_version(self) -> str: ssl_obj = self.ws._writer.transport.get_extra_info("ssl_object") if ssl_obj is not None: return ssl_obj.version() else: return "unknown" def get_peer_info(self) -> Optional[PeerInfo]: result = self.ws._writer.transport.get_extra_info("peername") if result is None: return None connection_host = result[0] port = self.peer_server_port if self.peer_server_port is not None else self.peer_port return PeerInfo(connection_host, port) def get_peer_logging(self) -> PeerInfo: info: Optional[PeerInfo] = self.get_peer_info() if info is None: # in this case, we will use self.peer_host which is friendlier for logging port = self.peer_server_port if self.peer_server_port is not None else self.peer_port return PeerInfo(self.peer_host, port) else: return info
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/__init__.py
flax/server/__init__.py
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/ssl_context.py
flax/server/ssl_context.py
from __future__ import annotations from pathlib import Path from typing import Dict def public_ssl_paths(path: Path, config: Dict): return ( path / config["ssl"]["public_crt"], path / config["ssl"]["public_key"], ) def private_ssl_paths(path: Path, config: Dict): return ( path / config["ssl"]["private_crt"], path / config["ssl"]["private_key"], ) def private_ssl_ca_paths(path: Path, config: Dict): return ( path / config["private_ssl_ca"]["crt"], path / config["private_ssl_ca"]["key"], ) def flax_ssl_ca_paths(path: Path, config: Dict): return ( path / config["flax_ssl_ca"]["crt"], path / config["flax_ssl_ca"]["key"], )
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/server.py
flax/server/server.py
from __future__ import annotations import asyncio import logging import ssl import time import traceback from collections import Counter from ipaddress import IPv4Network, IPv6Address, IPv6Network, ip_address, ip_network from pathlib import Path from secrets import token_bytes from typing import Any, Callable from typing import Counter as typing_Counter from typing import Dict, List, Optional, Set, Tuple, Union from aiohttp import ( ClientResponseError, ClientSession, ClientTimeout, ServerDisconnectedError, WSCloseCode, client_exceptions, web, ) from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from flax.protocols.protocol_message_types import ProtocolMessageTypes from flax.protocols.protocol_state_machine import message_requires_reply from flax.protocols.protocol_timing import API_EXCEPTION_BAN_SECONDS, INVALID_PROTOCOL_BAN_SECONDS from flax.protocols.shared_protocol import protocol_version from flax.server.introducer_peers import IntroducerPeers from flax.server.outbound_message import Message, NodeType from flax.server.ssl_context import private_ssl_paths, public_ssl_paths from flax.server.ws_connection import WSFlaxConnection from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.peer_info import PeerInfo from flax.util.api_decorators import get_metadata from flax.util.errors import Err, ProtocolError from flax.util.ints import uint16 from flax.util.network import WebServer, is_in_network, is_localhost from flax.util.ssl_check import verify_ssl_certs_and_keys max_message_size = 50 * 1024 * 1024 # 50MB def ssl_context_for_server( ca_cert: Path, ca_key: Path, private_cert_path: Path, private_key_path: Path, *, check_permissions: bool = True, log: Optional[logging.Logger] = None, ) -> ssl.SSLContext: if check_permissions: verify_ssl_certs_and_keys([ca_cert, private_cert_path], [ca_key, private_key_path], log) ssl_context = ssl._create_unverified_context(purpose=ssl.Purpose.CLIENT_AUTH, cafile=str(ca_cert)) ssl_context.check_hostname = False ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2 ssl_context.set_ciphers( ( "ECDHE-ECDSA-AES256-GCM-SHA384:" "ECDHE-RSA-AES256-GCM-SHA384:" "ECDHE-ECDSA-CHACHA20-POLY1305:" "ECDHE-RSA-CHACHA20-POLY1305:" "ECDHE-ECDSA-AES128-GCM-SHA256:" "ECDHE-RSA-AES128-GCM-SHA256:" "ECDHE-ECDSA-AES256-SHA384:" "ECDHE-RSA-AES256-SHA384:" "ECDHE-ECDSA-AES128-SHA256:" "ECDHE-RSA-AES128-SHA256" ) ) ssl_context.load_cert_chain(certfile=str(private_cert_path), keyfile=str(private_key_path)) ssl_context.verify_mode = ssl.CERT_REQUIRED return ssl_context def ssl_context_for_root( ca_cert_file: str, *, check_permissions: bool = True, log: Optional[logging.Logger] = None ) -> ssl.SSLContext: if check_permissions: verify_ssl_certs_and_keys([Path(ca_cert_file)], [], log) ssl_context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_cert_file) return ssl_context def ssl_context_for_client( ca_cert: Path, ca_key: Path, private_cert_path: Path, private_key_path: Path, *, check_permissions: bool = True, log: Optional[logging.Logger] = None, ) -> ssl.SSLContext: if check_permissions: verify_ssl_certs_and_keys([ca_cert, private_cert_path], [ca_key, private_key_path], log) ssl_context = ssl._create_unverified_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=str(ca_cert)) ssl_context.check_hostname = False ssl_context.load_cert_chain(certfile=str(private_cert_path), keyfile=str(private_key_path)) ssl_context.verify_mode = ssl.CERT_REQUIRED return ssl_context def calculate_node_id(cert_path: Path) -> bytes32: pem_cert = x509.load_pem_x509_certificate(cert_path.read_bytes(), default_backend()) der_cert_bytes = pem_cert.public_bytes(encoding=serialization.Encoding.DER) der_cert = x509.load_der_x509_certificate(der_cert_bytes, default_backend()) return bytes32(der_cert.fingerprint(hashes.SHA256())) class FlaxServer: def __init__( self, port: int, node: Any, api: Any, local_type: NodeType, ping_interval: int, network_id: str, inbound_rate_limit_percent: int, outbound_rate_limit_percent: int, capabilities: List[Tuple[uint16, str]], root_path: Path, config: Dict, private_ca_crt_key: Tuple[Path, Path], flax_ca_crt_key: Tuple[Path, Path], name: str = None, ): # Keeps track of all connections to and from this node. self.all_connections: Dict[bytes32, WSFlaxConnection] = {} self._port = port # TCP port to identify our node self._local_type: NodeType = local_type self._local_capabilities_for_handshake = capabilities self._ping_interval = ping_interval self._network_id = network_id self._inbound_rate_limit_percent = inbound_rate_limit_percent self._outbound_rate_limit_percent = outbound_rate_limit_percent self.log = logging.getLogger(name if name else __name__) self.log.info("Service capabilities: %s", self._local_capabilities_for_handshake) # Our unique random node id that we will send to other peers, regenerated on launch self.api = api self.node = node self.root_path = root_path self.config = config self.on_connect: Optional[Callable] = None self.incoming_messages: asyncio.Queue = asyncio.Queue() self.shut_down_event = asyncio.Event() if self._local_type is NodeType.INTRODUCER: self.introducer_peers = IntroducerPeers() ca_private_crt_path, ca_private_key_path = private_ca_crt_key flax_ca_crt_path, flax_ca_key_path = flax_ca_crt_key private_cert_path, private_key_path = None, None public_cert_path, public_key_path = None, None authenticated_client_types = {NodeType.HARVESTER} authenticated_server_types = {NodeType.HARVESTER, NodeType.FARMER, NodeType.WALLET, NodeType.DATA_LAYER} if self._local_type in authenticated_client_types: # Authenticated clients private_cert_path, private_key_path = private_ssl_paths(root_path, config) self.ssl_client_context = ssl_context_for_client( ca_private_crt_path, ca_private_key_path, private_cert_path, private_key_path ) else: # Public clients public_cert_path, public_key_path = public_ssl_paths(root_path, config) self.ssl_client_context = ssl_context_for_client( flax_ca_crt_path, flax_ca_key_path, public_cert_path, public_key_path ) if self._local_type in authenticated_server_types: # Authenticated servers private_cert_path, private_key_path = private_ssl_paths(root_path, config) self.ssl_context = ssl_context_for_server( ca_private_crt_path, ca_private_key_path, private_cert_path, private_key_path, log=self.log, ) else: # Public servers public_cert_path, public_key_path = public_ssl_paths(root_path, config) self.ssl_context = ssl_context_for_server( flax_ca_crt_path, flax_ca_key_path, public_cert_path, public_key_path, log=self.log ) # If node has public cert use that one for id, if not use private. self.node_id = calculate_node_id(private_cert_path if public_cert_path is None else public_cert_path) self.incoming_task: Optional[asyncio.Task] = None self.gc_task: Optional[asyncio.Task] = None self.webserver: Optional[WebServer] = None self.connection_close_task: Optional[asyncio.Task] = None self.received_message_callback: Optional[Callable] = None self.api_tasks: Dict[bytes32, asyncio.Task] = {} self.execute_tasks: Set[bytes32] = set() self.tasks_from_peer: Dict[bytes32, Set[bytes32]] = {} self.banned_peers: Dict[str, float] = {} self.invalid_protocol_ban_seconds = INVALID_PROTOCOL_BAN_SECONDS self.api_exception_ban_seconds = API_EXCEPTION_BAN_SECONDS self.exempt_peer_networks: List[Union[IPv4Network, IPv6Network]] = [ ip_network(net, strict=False) for net in config.get("exempt_peer_networks", []) ] def set_received_message_callback(self, callback: Callable): self.received_message_callback = callback async def garbage_collect_connections_task(self) -> None: """ Periodically checks for connections with no activity (have not sent us any data), and removes them, to allow room for other peers. """ is_crawler = getattr(self.node, "crawl", None) while True: await asyncio.sleep(600 if is_crawler is None else 2) to_remove: List[WSFlaxConnection] = [] for connection in self.all_connections.values(): if ( self._local_type == NodeType.FULL_NODE or self._local_type == NodeType.WALLET ) and connection.connection_type == NodeType.FULL_NODE: if is_crawler is not None: if time.time() - connection.creation_time > 5: to_remove.append(connection) else: if time.time() - connection.last_message_time > 1800: to_remove.append(connection) for connection in to_remove: self.log.debug(f"Garbage collecting connection {connection.peer_host} due to inactivity") await connection.close() # Also garbage collect banned_peers dict to_remove_ban = [] for peer_ip, ban_until_time in self.banned_peers.items(): if time.time() > ban_until_time: to_remove_ban.append(peer_ip) for peer_ip in to_remove_ban: del self.banned_peers[peer_ip] async def start_server(self, prefer_ipv6: bool, on_connect: Callable = None): if self.webserver is not None: raise RuntimeError("FlaxServer already started") if self.incoming_task is None: self.incoming_task = asyncio.create_task(self.incoming_api_task()) if self.gc_task is None: self.gc_task = asyncio.create_task(self.garbage_collect_connections_task()) if self._local_type in [NodeType.WALLET, NodeType.HARVESTER, NodeType.TIMELORD]: return None self.on_connect = on_connect self.webserver = await WebServer.create( hostname="", port=uint16(self._port), routes=[web.get("/ws", self.incoming_connection)], ssl_context=self.ssl_context, prefer_ipv6=prefer_ipv6, logger=self.log, ) self._port = int(self.webserver.listen_port) self.log.info(f"Started listening on port: {self._port}") async def incoming_connection(self, request): if getattr(self.node, "crawl", None) is not None: return if request.remote in self.banned_peers and time.time() < self.banned_peers[request.remote]: self.log.warning(f"Peer {request.remote} is banned, refusing connection") return None ws = web.WebSocketResponse(max_msg_size=max_message_size) await ws.prepare(request) close_event = asyncio.Event() ssl_object = request.get_extra_info("ssl_object") if ssl_object is None: reason = f"ssl_object is None for request {request}" self.log.warning(reason) raise web.HTTPInternalServerError(reason=reason) cert_bytes = ssl_object.getpeercert(True) der_cert = x509.load_der_x509_certificate(cert_bytes) peer_id = bytes32(der_cert.fingerprint(hashes.SHA256())) if peer_id == self.node_id: return ws connection: Optional[WSFlaxConnection] = None try: connection = WSFlaxConnection( self._local_type, ws, self._port, self.log, False, False, request.remote, self.incoming_messages, self.connection_closed, peer_id, self._inbound_rate_limit_percent, self._outbound_rate_limit_percent, self._local_capabilities_for_handshake, close_event, ) await connection.perform_handshake(self._network_id, protocol_version, self._port, self._local_type) # Limit inbound connections to config's specifications. if not self.accept_inbound_connections(connection.connection_type) and not is_in_network( connection.peer_host, self.exempt_peer_networks ): self.log.info( f"Not accepting inbound connection: {connection.get_peer_logging()}.Inbound limit reached." ) await connection.close() close_event.set() else: await self.connection_added(connection, self.on_connect) if self._local_type is NodeType.INTRODUCER and connection.connection_type is NodeType.FULL_NODE: self.introducer_peers.add(connection.get_peer_info()) except ProtocolError as e: if connection is not None: await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, e.code) if e.code == Err.INVALID_HANDSHAKE: self.log.warning("Invalid handshake with peer. Maybe the peer is running old software.") close_event.set() elif e.code == Err.INCOMPATIBLE_NETWORK_ID: self.log.warning("Incompatible network ID. Maybe the peer is on another network") close_event.set() elif e.code == Err.SELF_CONNECTION: close_event.set() else: error_stack = traceback.format_exc() self.log.error(f"Exception {e}, exception Stack: {error_stack}") close_event.set() except ValueError as e: if connection is not None: await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN) self.log.warning(f"{e} - closing connection") close_event.set() except Exception as e: if connection is not None: await connection.close(ws_close_code=WSCloseCode.PROTOCOL_ERROR, error=Err.UNKNOWN) error_stack = traceback.format_exc() self.log.error(f"Exception {e}, exception Stack: {error_stack}") close_event.set() await close_event.wait() return ws async def connection_added(self, connection: WSFlaxConnection, on_connect=None): # If we already had a connection to this peer_id, close the old one. This is secure because peer_ids are based # on TLS public keys if connection.peer_node_id in self.all_connections: con = self.all_connections[connection.peer_node_id] await con.close() self.all_connections[connection.peer_node_id] = connection if connection.connection_type is not None: if on_connect is not None: await on_connect(connection) else: self.log.error(f"Invalid connection type for connection {connection}") def is_duplicate_or_self_connection(self, target_node: PeerInfo) -> bool: if is_localhost(target_node.host) and target_node.port == self._port: # Don't connect to self self.log.debug(f"Not connecting to {target_node}") return True for connection in self.all_connections.values(): if connection.host == target_node.host and connection.peer_server_port == target_node.port: self.log.debug(f"Not connecting to {target_node}, duplicate connection") return True return False async def start_client( self, target_node: PeerInfo, on_connect: Callable = None, is_feeler: bool = False, ) -> bool: """ Tries to connect to the target node, adding one connection into the pipeline, if successful. An on connect method can also be specified, and this will be saved into the instance variables. """ if self.is_duplicate_or_self_connection(target_node): return False if target_node.host in self.banned_peers and time.time() < self.banned_peers[target_node.host]: self.log.warning(f"Peer {target_node.host} is still banned, not connecting to it") return False session = None connection: Optional[WSFlaxConnection] = None try: # Crawler/DNS introducer usually uses a lower timeout than the default timeout_value = float(self.config.get("peer_connect_timeout", 30)) timeout = ClientTimeout(total=timeout_value) session = ClientSession(timeout=timeout) try: if type(ip_address(target_node.host)) is IPv6Address: target_node = PeerInfo(f"[{target_node.host}]", target_node.port) except ValueError: pass url = f"wss://{target_node.host}:{target_node.port}/ws" self.log.debug(f"Connecting: {url}, Peer info: {target_node}") try: ws = await session.ws_connect( url, autoclose=True, autoping=True, heartbeat=60, ssl=self.ssl_client_context, max_msg_size=max_message_size, ) except ServerDisconnectedError: self.log.debug(f"Server disconnected error connecting to {url}. Perhaps we are banned by the peer.") return False except ClientResponseError as e: self.log.warning(f"Connection failed to {url}. Error: {e}") return False except asyncio.TimeoutError: self.log.debug(f"Timeout error connecting to {url}") return False if ws is None: return False ssl_object = ws.get_extra_info("ssl_object") if ssl_object is None: raise ValueError(f"ssl_object is None for {ws}") cert_bytes = ssl_object.getpeercert(True) der_cert = x509.load_der_x509_certificate(cert_bytes, default_backend()) peer_id = bytes32(der_cert.fingerprint(hashes.SHA256())) if peer_id == self.node_id: raise RuntimeError(f"Trying to connect to a peer ({target_node}) with the same peer_id: {peer_id}") connection = WSFlaxConnection( self._local_type, ws, self._port, self.log, True, False, target_node.host, self.incoming_messages, self.connection_closed, peer_id, self._inbound_rate_limit_percent, self._outbound_rate_limit_percent, self._local_capabilities_for_handshake, session=session, ) await connection.perform_handshake(self._network_id, protocol_version, self._port, self._local_type) await self.connection_added(connection, on_connect) # the session has been adopted by the connection, don't close it at # the end of the function session = None connection_type_str = "" if connection.connection_type is not None: connection_type_str = connection.connection_type.name.lower() self.log.info(f"Connected with {connection_type_str} {target_node}") if is_feeler: asyncio.create_task(connection.close()) return True except client_exceptions.ClientConnectorError as e: self.log.info(f"{e}") except ProtocolError as e: if connection is not None: await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, e.code) if e.code == Err.INVALID_HANDSHAKE: self.log.warning(f"Invalid handshake with peer {target_node}. Maybe the peer is running old software.") elif e.code == Err.INCOMPATIBLE_NETWORK_ID: self.log.warning("Incompatible network ID. Maybe the peer is on another network") elif e.code == Err.SELF_CONNECTION: pass else: error_stack = traceback.format_exc() self.log.error(f"Exception {e}, exception Stack: {error_stack}") except Exception as e: if connection is not None: await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN) error_stack = traceback.format_exc() self.log.error(f"Exception {e}, exception Stack: {error_stack}") finally: if session is not None: await session.close() return False def connection_closed(self, connection: WSFlaxConnection, ban_time: int): if is_localhost(connection.peer_host) and ban_time != 0: self.log.warning(f"Trying to ban localhost for {ban_time}, but will not ban") ban_time = 0 self.log.info(f"Connection closed: {connection.peer_host}, node id: {connection.peer_node_id}") if ban_time > 0: ban_until: float = time.time() + ban_time self.log.warning(f"Banning {connection.peer_host} for {ban_time} seconds") if connection.peer_host in self.banned_peers: if ban_until > self.banned_peers[connection.peer_host]: self.banned_peers[connection.peer_host] = ban_until else: self.banned_peers[connection.peer_host] = ban_until if connection.peer_node_id in self.all_connections: self.all_connections.pop(connection.peer_node_id) if connection.connection_type is None: # This means the handshake was never finished with this peer self.log.debug( f"Invalid connection type for connection {connection.peer_host}," f" while closing. Handshake never finished." ) self.cancel_tasks_from_peer(connection.peer_node_id) on_disconnect = getattr(self.node, "on_disconnect", None) if on_disconnect is not None: on_disconnect(connection) def cancel_tasks_from_peer(self, peer_id: bytes32): if peer_id not in self.tasks_from_peer: return None task_ids = self.tasks_from_peer[peer_id] for task_id in task_ids: if task_id in self.execute_tasks: continue task = self.api_tasks[task_id] task.cancel() async def incoming_api_task(self) -> None: message_types: typing_Counter[str] = Counter() # Used for debugging information. while True: payload_inc, connection_inc = await self.incoming_messages.get() if payload_inc is None or connection_inc is None: continue async def api_call(full_message: Message, connection: WSFlaxConnection, task_id): nonlocal message_types start_time = time.time() message_type = "" try: if self.received_message_callback is not None: await self.received_message_callback(connection) connection.log.debug( f"<- {ProtocolMessageTypes(full_message.type).name} from peer " f"{connection.peer_node_id} {connection.peer_host}" ) message_type = ProtocolMessageTypes(full_message.type).name message_types[message_type] += 1 f = getattr(self.api, message_type, None) if len(message_types) % 100 == 0: self.log.debug(f"Message types: {[(m, n) for m, n in sorted(message_types.items()) if n != 0]}") if f is None: self.log.error(f"Non existing function: {message_type}") raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE, [message_type]) metadata = get_metadata(function=f) if not metadata.api_function: self.log.error(f"Peer trying to call non api function {message_type}") raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE, [message_type]) # If api is not ready ignore the request if hasattr(self.api, "api_ready"): if self.api.api_ready is False: return None timeout: Optional[int] = 600 if metadata.execute_task: # Don't timeout on methods with execute_task decorator, these need to run fully self.execute_tasks.add(task_id) timeout = None if metadata.peer_required: coroutine = f(full_message.data, connection) else: coroutine = f(full_message.data) async def wrapped_coroutine() -> Optional[Message]: try: result = await coroutine return result except asyncio.CancelledError: pass except Exception as e: tb = traceback.format_exc() connection.log.error(f"Exception: {e}, {connection.get_peer_logging()}. {tb}") raise return None response: Optional[Message] = await asyncio.wait_for(wrapped_coroutine(), timeout=timeout) connection.log.debug( f"Time taken to process {message_type} from {connection.peer_node_id} is " f"{time.time() - start_time} seconds" ) if response is not None: response_message = Message(response.type, full_message.id, response.data) await connection.send_message(response_message) except TimeoutError: connection.log.error(f"Timeout error for: {message_type}") except Exception as e: if self.connection_close_task is None: tb = traceback.format_exc() connection.log.error( f"Exception: {e} {type(e)}, closing connection {connection.get_peer_logging()}. {tb}" ) else: connection.log.debug(f"Exception: {e} while closing connection") # TODO: actually throw one of the errors from errors.py and pass this to close await connection.close(self.api_exception_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN) finally: message_types[message_type] -= 1 if task_id in self.api_tasks: self.api_tasks.pop(task_id) if task_id in self.tasks_from_peer[connection.peer_node_id]: self.tasks_from_peer[connection.peer_node_id].remove(task_id) if task_id in self.execute_tasks: self.execute_tasks.remove(task_id) task_id: bytes32 = bytes32(token_bytes(32)) api_task = asyncio.create_task(api_call(payload_inc, connection_inc, task_id)) self.api_tasks[task_id] = api_task if connection_inc.peer_node_id not in self.tasks_from_peer: self.tasks_from_peer[connection_inc.peer_node_id] = set() self.tasks_from_peer[connection_inc.peer_node_id].add(task_id) async def send_to_others( self, messages: List[Message], node_type: NodeType, origin_peer: WSFlaxConnection, ): for node_id, connection in self.all_connections.items(): if node_id == origin_peer.peer_node_id: continue if connection.connection_type is node_type: for message in messages: await connection.send_message(message) async def validate_broadcast_message_type(self, messages: List[Message], node_type: NodeType): for message in messages: if message_requires_reply(ProtocolMessageTypes(message.type)): # Internal protocol logic error - we will raise, blocking messages to all peers self.log.error(f"Attempt to broadcast message requiring protocol response: {message.type}") for _, connection in self.all_connections.items(): if connection.connection_type is node_type: await connection.close( self.invalid_protocol_ban_seconds, WSCloseCode.INTERNAL_ERROR, Err.INTERNAL_PROTOCOL_ERROR, ) raise ProtocolError(Err.INTERNAL_PROTOCOL_ERROR, [message.type]) async def send_to_all(self, messages: List[Message], node_type: NodeType): await self.validate_broadcast_message_type(messages, node_type) for _, connection in self.all_connections.items(): if connection.connection_type is node_type: for message in messages: await connection.send_message(message) async def send_to_all_except(self, messages: List[Message], node_type: NodeType, exclude: bytes32): await self.validate_broadcast_message_type(messages, node_type) for _, connection in self.all_connections.items(): if connection.connection_type is node_type and connection.peer_node_id != exclude: for message in messages: await connection.send_message(message) async def send_to_specific(self, messages: List[Message], node_id: bytes32): if node_id in self.all_connections: connection = self.all_connections[node_id] for message in messages: await connection.send_message(message) def get_outgoing_connections(self) -> List[WSFlaxConnection]: result = [] for _, connection in self.all_connections.items(): if connection.is_outbound: result.append(connection) return result def get_full_node_outgoing_connections(self) -> List[WSFlaxConnection]: result = [] connections = self.get_connections(NodeType.FULL_NODE) for connection in connections: if connection.is_outbound: result.append(connection) return result def get_connections(self, node_type: Optional[NodeType] = None) -> List[WSFlaxConnection]: result = [] for _, connection in self.all_connections.items(): if node_type is None or connection.connection_type == node_type: result.append(connection) return result async def close_all_connections(self) -> None: keys = [a for a, b in self.all_connections.items()] for node_id in keys: try: if node_id in self.all_connections: connection = self.all_connections[node_id] await connection.close() except Exception as e: self.log.error(f"Exception while closing connection {e}") def close_all(self) -> None: self.connection_close_task = asyncio.create_task(self.close_all_connections()) if self.webserver is not None:
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
true
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/upnp.py
flax/server/upnp.py
from __future__ import annotations import logging import threading from dataclasses import dataclass, field from queue import Queue from typing import Optional, Tuple, Union from typing_extensions import Literal log = logging.getLogger(__name__) try: import miniupnpc except ImportError: log.info( "importing miniupnpc failed." " This is not required to run flax, it allows incoming connections from other peers." ) miniupnpc = None @dataclass class UPnP: _thread: Optional[threading.Thread] = None _queue: Queue[Union[Tuple[Literal["remap", "release"], int], Tuple[Literal["shutdown"]]]] = field( default_factory=Queue, ) _upnp: Optional[miniupnpc.UPnP] = None def setup(self) -> None: if miniupnpc is None: return if self._thread is not None: raise Exception(f"already started, {type(self).__name__} instances are not reusable") self._thread = threading.Thread(target=self._run) self._thread.start() def _is_alive(self) -> bool: if self._thread is None: return False return self._thread.is_alive() def _run(self) -> None: try: self._upnp = miniupnpc.UPnP() self._upnp.discoverdelay = 30 self._upnp.discover() self._upnp.selectigd() keep_going = True while keep_going: msg = self._queue.get() if msg[0] == "remap": port = msg[1] log.info(f"Attempting to enable UPnP (open up port {port})") try: self._upnp.deleteportmapping(port, "TCP") except Exception as e: log.info(f"Removal of previous portmapping failed. This does not indicate an error: {e}") self._upnp.addportmapping(port, "TCP", self._upnp.lanaddr, port, "flax", "") log.info( f"Port {port} opened with UPnP. lanaddr {self._upnp.lanaddr} " f"external: {self._upnp.externalipaddress()}" ) elif msg[0] == "release": port = msg[1] log.info(f"UPnP, releasing port {port}") self._upnp.deleteportmapping(port, "TCP") log.info(f"UPnP, Port {port} closed") elif msg[0] == "shutdown": keep_going = False except Exception as e: log.info("UPnP failed. This is not required to run flax, it allows incoming connections from other peers.") log.info(e) def remap(self, port: int) -> None: if not self._is_alive(): return self._queue.put(("remap", port)) def release(self, port: int) -> None: if not self._is_alive(): return self._queue.put(("release", port)) def shutdown(self) -> None: if self._thread is None: return if self._is_alive(): self._queue.put(("shutdown",)) log.info("UPnP, shutting down thread") self._thread.join(5) # this is here just in case the UPnP object is destroyed non-gracefully, # e.g. via an exception before the main thread can call shutdown() def __del__(self) -> None: self.shutdown()
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/start_harvester.py
flax/server/start_harvester.py
from __future__ import annotations import pathlib import sys from typing import Dict, Optional from flax.consensus.constants import ConsensusConstants from flax.consensus.default_constants import DEFAULT_CONSTANTS from flax.harvester.harvester import Harvester from flax.harvester.harvester_api import HarvesterAPI from flax.rpc.harvester_rpc_api import HarvesterRpcApi from flax.server.outbound_message import NodeType from flax.server.start_service import RpcInfo, Service, async_run from flax.types.peer_info import PeerInfo from flax.util.flax_logging import initialize_service_logging from flax.util.config import load_config, load_config_cli from flax.util.default_root import DEFAULT_ROOT_PATH # See: https://bugs.python.org/issue29288 "".encode("idna") SERVICE_NAME = "harvester" def create_harvester_service( root_path: pathlib.Path, config: Dict, consensus_constants: ConsensusConstants, farmer_peer: Optional[PeerInfo], connect_to_daemon: bool = True, ) -> Service[Harvester]: service_config = config[SERVICE_NAME] overrides = service_config["network_overrides"]["constants"][service_config["selected_network"]] updated_constants = consensus_constants.replace_str_to_bytes(**overrides) harvester = Harvester(root_path, service_config, updated_constants) peer_api = HarvesterAPI(harvester) network_id = service_config["selected_network"] rpc_info: Optional[RpcInfo] = None if service_config["start_rpc_server"]: rpc_info = (HarvesterRpcApi, service_config["rpc_port"]) return Service( root_path=root_path, config=config, node=harvester, peer_api=peer_api, node_type=NodeType.HARVESTER, advertised_port=service_config["port"], service_name=SERVICE_NAME, server_listen_ports=[service_config["port"]], connect_peers=[] if farmer_peer is None else [farmer_peer], network_id=network_id, rpc_info=rpc_info, connect_to_daemon=connect_to_daemon, ) async def async_main() -> int: # TODO: refactor to avoid the double load config = load_config(DEFAULT_ROOT_PATH, "config.yaml") service_config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME) config[SERVICE_NAME] = service_config initialize_service_logging(service_name=SERVICE_NAME, config=config) farmer_peer = PeerInfo(service_config["farmer_peer"]["host"], service_config["farmer_peer"]["port"]) service = create_harvester_service(DEFAULT_ROOT_PATH, config, DEFAULT_CONSTANTS, farmer_peer) await service.setup_process_global_state() await service.run() return 0 def main() -> int: return async_run(async_main()) if __name__ == "__main__": sys.exit(main())
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/start_wallet.py
flax/server/start_wallet.py
import pathlib import os from multiprocessing import freeze_support import sys from typing import Dict, Optional from flax.consensus.constants import ConsensusConstants from flax.consensus.default_constants import DEFAULT_CONSTANTS from flax.rpc.wallet_rpc_api import WalletRpcApi from flax.server.outbound_message import NodeType from flax.server.start_service import RpcInfo, Service, async_run from flax.types.peer_info import PeerInfo from flax.util.flax_logging import initialize_service_logging from flax.util.config import load_config_cli, load_config from flax.util.default_root import DEFAULT_ROOT_PATH from flax.util.keychain import Keychain from flax.wallet.wallet_node import WalletNode # See: https://bugs.python.org/issue29288 from flax.wallet.wallet_node_api import WalletNodeAPI "".encode("idna") SERVICE_NAME = "wallet" def create_wallet_service( root_path: pathlib.Path, config: Dict, consensus_constants: ConsensusConstants, keychain: Optional[Keychain] = None, connect_to_daemon: bool = True, ) -> Service[WalletNode]: service_config = config[SERVICE_NAME] overrides = service_config["network_overrides"]["constants"][service_config["selected_network"]] updated_constants = consensus_constants.replace_str_to_bytes(**overrides) # add local node to trusted peers if old config if "trusted_peers" not in service_config: full_node_config = config["full_node"] trusted_peer = full_node_config["ssl"]["public_crt"] service_config["trusted_peers"] = {} service_config["trusted_peers"]["local_node"] = trusted_peer if "short_sync_blocks_behind_threshold" not in service_config: service_config["short_sync_blocks_behind_threshold"] = 20 node = WalletNode( service_config, root_path, constants=updated_constants, local_keychain=keychain, ) peer_api = WalletNodeAPI(node) fnp = service_config.get("full_node_peer") if fnp: connect_peers = [PeerInfo(fnp["host"], fnp["port"])] node.full_node_peer = PeerInfo(fnp["host"], fnp["port"]) else: connect_peers = [] node.full_node_peer = None network_id = service_config["selected_network"] rpc_port = service_config.get("rpc_port") rpc_info: Optional[RpcInfo] = None if rpc_port is not None: rpc_info = (WalletRpcApi, service_config["rpc_port"]) return Service( server_listen_ports=[service_config["port"]], root_path=root_path, config=config, node=node, peer_api=peer_api, node_type=NodeType.WALLET, service_name=SERVICE_NAME, on_connect_callback=node.on_connect, connect_peers=connect_peers, network_id=network_id, rpc_info=rpc_info, advertised_port=service_config["port"], connect_to_daemon=connect_to_daemon, ) async def async_main() -> int: # TODO: refactor to avoid the double load config = load_config(DEFAULT_ROOT_PATH, "config.yaml") service_config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME) config[SERVICE_NAME] = service_config # This is simulator local_test = service_config["testing"] if local_test is True: from flax.simulator.block_tools import test_constants constants = test_constants current = service_config["database_path"] service_config["database_path"] = f"{current}_simulation" service_config["selected_network"] = "testnet0" else: constants = DEFAULT_CONSTANTS initialize_service_logging(service_name=SERVICE_NAME, config=config) service = create_wallet_service(DEFAULT_ROOT_PATH, config, constants) await service.setup_process_global_state() await service.run() return 0 def main() -> int: freeze_support() if os.getenv("FLAX_INSTRUMENT_WALLET", 0) != 0: from flax.util.task_timing import start_task_instrumentation, stop_task_instrumentation import atexit start_task_instrumentation() atexit.register(stop_task_instrumentation) return async_run(async_main()) if __name__ == "__main__": sys.exit(main())
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/block_height_map.py
flax/full_node/block_height_map.py
import logging from typing import Dict, List, Optional, Tuple from flax.util.ints import uint32 from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary from pathlib import Path import aiofiles from dataclasses import dataclass from flax.util.streamable import Streamable, streamable from flax.util.files import write_file_async from flax.util.db_wrapper import DBWrapper2 log = logging.getLogger(__name__) @streamable @dataclass(frozen=True) class SesCache(Streamable): content: List[Tuple[uint32, bytes]] class BlockHeightMap: db: DBWrapper2 # the below dictionaries are loaded from the database, from the peak # and back in time on startup. # Defines the path from genesis to the peak, no orphan blocks # this buffer contains all block hashes that are part of the current peak # ordered by height. i.e. __height_to_hash[0..32] is the genesis hash # __height_to_hash[32..64] is the hash for height 1 and so on __height_to_hash: bytearray # All sub-epoch summaries that have been included in the blockchain from the beginning until and including the peak # (height_included, SubEpochSummary). Note: ONLY for the blocks in the path to the peak # The value is a serialized SubEpochSummary object __sub_epoch_summaries: Dict[uint32, bytes] # count how many blocks have been added since the cache was last written to # disk __dirty: int # the file we're saving the height-to-hash cache to __height_to_hash_filename: Path # the file we're saving the sub epoch summary cache to __ses_filename: Path @classmethod async def create(cls, blockchain_dir: Path, db: DBWrapper2) -> "BlockHeightMap": self = BlockHeightMap() self.db = db self.__dirty = 0 self.__height_to_hash = bytearray() self.__sub_epoch_summaries = {} self.__height_to_hash_filename = blockchain_dir / "height-to-hash" self.__ses_filename = blockchain_dir / "sub-epoch-summaries" async with self.db.reader_no_transaction() as conn: if db.db_version == 2: async with conn.execute("SELECT hash FROM current_peak WHERE key = 0") as cursor: peak_row = await cursor.fetchone() if peak_row is None: return self async with conn.execute( "SELECT header_hash,prev_hash,height,sub_epoch_summary FROM full_blocks WHERE header_hash=?", (peak_row[0],), ) as cursor: row = await cursor.fetchone() if row is None: return self else: async with await conn.execute( "SELECT header_hash,prev_hash,height,sub_epoch_summary from block_records WHERE is_peak=1" ) as cursor: row = await cursor.fetchone() if row is None: return self try: async with aiofiles.open(self.__height_to_hash_filename, "rb") as f: self.__height_to_hash = bytearray(await f.read()) except Exception: # it's OK if this file doesn't exist, we can rebuild it pass try: async with aiofiles.open(self.__ses_filename, "rb") as f: self.__sub_epoch_summaries = {k: v for (k, v) in SesCache.from_bytes(await f.read()).content} except Exception: # it's OK if this file doesn't exist, we can rebuild it pass peak: bytes32 prev_hash: bytes32 if db.db_version == 2: peak = row[0] prev_hash = row[1] else: peak = bytes32.fromhex(row[0]) prev_hash = bytes32.fromhex(row[1]) height = row[2] # allocate memory for height to hash map # this may also truncate it, if thie file on disk had an invalid size new_size = (height + 1) * 32 size = len(self.__height_to_hash) if size > new_size: del self.__height_to_hash[new_size:] else: self.__height_to_hash += bytearray([0] * (new_size - size)) # if the peak hash is already in the height-to-hash map, we don't need # to load anything more from the DB if self.get_hash(height) != peak: self.__set_hash(height, peak) if row[3] is not None: self.__sub_epoch_summaries[height] = row[3] # prepopulate the height -> hash mapping await self._load_blocks_from(height, prev_hash) await self.maybe_flush() return self def update_height(self, height: uint32, header_hash: bytes32, ses: Optional[SubEpochSummary]) -> None: # we're only updating the last hash. If we've reorged, we already rolled # back, making this the new peak assert height * 32 <= len(self.__height_to_hash) self.__set_hash(height, header_hash) if ses is not None: self.__sub_epoch_summaries[height] = bytes(ses) async def maybe_flush(self) -> None: if self.__dirty < 1000: return assert (len(self.__height_to_hash) % 32) == 0 map_buf = self.__height_to_hash.copy() ses_buf = bytes(SesCache([(k, v) for (k, v) in self.__sub_epoch_summaries.items()])) self.__dirty = 0 await write_file_async(self.__height_to_hash_filename, map_buf) await write_file_async(self.__ses_filename, ses_buf) # load height-to-hash map entries from the DB starting at height back in # time until we hit a match in the existing map, at which point we can # assume all previous blocks have already been populated async def _load_blocks_from(self, height: uint32, prev_hash: bytes32) -> None: while height > 0: # load 5000 blocks at a time window_end = max(0, height - 5000) if self.db.db_version == 2: query = ( "SELECT header_hash,prev_hash,height,sub_epoch_summary from full_blocks " "INDEXED BY height WHERE height>=? AND height <?" ) else: query = ( "SELECT header_hash,prev_hash,height,sub_epoch_summary from block_records " "INDEXED BY height WHERE height>=? AND height <?" ) async with self.db.reader_no_transaction() as conn: async with conn.execute(query, (window_end, height)) as cursor: # maps block-hash -> (height, prev-hash, sub-epoch-summary) ordered: Dict[bytes32, Tuple[uint32, bytes32, Optional[bytes]]] = {} if self.db.db_version == 2: for r in await cursor.fetchall(): ordered[r[0]] = (r[2], r[1], r[3]) else: for r in await cursor.fetchall(): ordered[bytes32.fromhex(r[0])] = (r[2], bytes32.fromhex(r[1]), r[3]) while height > window_end: if prev_hash not in ordered: raise ValueError( f"block with header hash is missing from your blockchain database: {prev_hash.hex()}" ) entry = ordered[prev_hash] assert height == entry[0] + 1 height = entry[0] if entry[2] is not None: if ( self.get_hash(height) == prev_hash and height in self.__sub_epoch_summaries and self.__sub_epoch_summaries[height] == entry[2] ): return self.__sub_epoch_summaries[height] = entry[2] elif height in self.__sub_epoch_summaries: # if the database file was swapped out and the existing # cache doesn't represent any of it at all, a missing sub # epoch summary needs to be removed from the cache too del self.__sub_epoch_summaries[height] self.__set_hash(height, prev_hash) prev_hash = entry[1] def __set_hash(self, height: int, block_hash: bytes32) -> None: idx = height * 32 self.__height_to_hash[idx : idx + 32] = block_hash self.__dirty += 1 def get_hash(self, height: uint32) -> bytes32: idx = height * 32 assert idx + 32 <= len(self.__height_to_hash) return bytes32(self.__height_to_hash[idx : idx + 32]) def contains_height(self, height: uint32) -> bool: return height * 32 < len(self.__height_to_hash) def rollback(self, fork_height: int) -> None: # fork height may be -1, in which case all blocks are different and we # should clear all sub epoch summaries heights_to_delete = [] for ses_included_height in self.__sub_epoch_summaries.keys(): if ses_included_height > fork_height: heights_to_delete.append(ses_included_height) for height in heights_to_delete: del self.__sub_epoch_summaries[height] del self.__height_to_hash[(fork_height + 1) * 32 :] def get_ses(self, height: uint32) -> SubEpochSummary: return SubEpochSummary.from_bytes(self.__sub_epoch_summaries[height]) def get_ses_heights(self) -> List[uint32]: return sorted(self.__sub_epoch_summaries.keys())
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/bitcoin_fee_estimator.py
flax/full_node/bitcoin_fee_estimator.py
from __future__ import annotations import logging from datetime import datetime from flax.full_node.fee_estimate_store import FeeStore from flax.full_node.fee_estimation import FeeBlockInfo, FeeMempoolInfo from flax.full_node.fee_estimator import SmartFeeEstimator from flax.full_node.fee_estimator_interface import FeeEstimatorInterface from flax.full_node.fee_tracker import FeeTracker from flax.types.clvm_cost import CLVMCost from flax.types.fee_rate import FeeRate from flax.types.mempool_item import MempoolItem from flax.types.mojos import Mojos from flax.util.ints import uint32, uint64 class BitcoinFeeEstimator(FeeEstimatorInterface): """ A Fee Estimator based on the concepts and code at: https://github.com/bitcoin/bitcoin/tree/5b6f0f31fa6ce85db3fb7f9823b1bbb06161ae32/src/policy """ def __init__(self, fee_tracker: FeeTracker, smart_fee_estimator: SmartFeeEstimator) -> None: self.fee_rate_estimator: SmartFeeEstimator = smart_fee_estimator self.tracker: FeeTracker = fee_tracker self.last_mempool_info = FeeMempoolInfo( CLVMCost(uint64(0)), FeeRate.create(Mojos(uint64(0)), CLVMCost(uint64(1))), CLVMCost(uint64(0)), datetime.min, CLVMCost(uint64(0)), ) def new_block(self, block_info: FeeBlockInfo) -> None: self.tracker.process_block(block_info.block_height, block_info.included_items) def add_mempool_item(self, mempool_info: FeeMempoolInfo, mempool_item: MempoolItem) -> None: self.last_mempool_info = mempool_info def remove_mempool_item(self, mempool_info: FeeMempoolInfo, mempool_item: MempoolItem) -> None: self.last_mempool_info = mempool_info self.tracker.remove_tx(mempool_item) def estimate_fee_rate(self, *, time_offset_seconds: int) -> FeeRate: """ time_offset_seconds: Target time in the future we want our tx included by """ fee_estimate = self.fee_rate_estimator.get_estimate(time_offset_seconds) if fee_estimate.error is not None: return FeeRate(uint64(0)) return fee_estimate.estimated_fee_rate def estimate_fee_rate_for_block(self, block: uint32) -> FeeRate: fee_estimate = self.fee_rate_estimator.get_estimate_for_block(block) if fee_estimate.error is not None: return FeeRate(uint64(0)) return fee_estimate.estimated_fee_rate def mempool_size(self) -> CLVMCost: """Report last seen mempool size""" return self.last_mempool_info.current_mempool_cost def mempool_max_size(self) -> CLVMCost: """Report current mempool max size (cost)""" return self.last_mempool_info.max_size_in_cost def get_tracker(self) -> FeeTracker: """ `get_tracker` is for testing the BitcoinFeeEstimator. Not part of `FeeEstimatorInterface` """ return self.tracker def create_bitcoin_fee_estimator(max_block_cost_clvm: uint64, log: logging.Logger) -> BitcoinFeeEstimator: # fee_store and fee_tracker are particular to the BitcoinFeeEstimator, and # are not necessary if a different fee estimator is used. fee_store = FeeStore() fee_tracker = FeeTracker(log, fee_store) smart_fee_estimator = SmartFeeEstimator(fee_tracker, max_block_cost_clvm) return BitcoinFeeEstimator(fee_tracker, smart_fee_estimator)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/bundle_tools.py
flax/full_node/bundle_tools.py
import re from typing import Optional, Tuple, List, Union from flax.full_node.generator import create_compressed_generator from flax.types.blockchain_format.program import SerializedProgram, Program from flax.types.coin_spend import CoinSpend from flax.types.generator_types import BlockGenerator, CompressorArg from flax.types.spend_bundle import SpendBundle from flax.util.byte_types import hexstr_to_bytes from flax.util.ints import uint32 from clvm.casts import int_to_bytes def _serialize_amount(val: int) -> bytes: assert val >= 0 assert val < 2**64 atom: bytes = int_to_bytes(val) size = len(atom) assert size <= 9 if size == 0: return b"\x80" if size == 1 and atom[0] <= 0x7F: return atom size_blob = bytes([0x80 | size]) return size_blob + atom def spend_bundle_to_serialized_coin_spend_entry_list(bundle: SpendBundle) -> bytes: r = b"" # ( ( parent-coin-id puzzle-reveal amount solution ) ... ) for coin_spend in bundle.coin_spends: r += b"\xff" # A0 is the length-prefix for the parent coin ID (which is always 32 # bytes long) r += b"\xff\xa0" + coin_spend.coin.parent_coin_info r += b"\xff" + bytes(coin_spend.puzzle_reveal) r += b"\xff" + _serialize_amount(coin_spend.coin.amount) r += b"\xff" + bytes(coin_spend.solution) r += b"\x80" r += b"\x80" return r def simple_solution_generator(bundle: SpendBundle) -> BlockGenerator: """ Simply quotes the solutions we know. """ cse_list = spend_bundle_to_serialized_coin_spend_entry_list(bundle) # this is the serialized form of the lisp structure below. The "q" operator # is has opcode 1. # (q . ( cse_list )) block_program = b"\xff\x01\xff" + cse_list + b"\x80" return BlockGenerator(SerializedProgram.from_bytes(block_program), [], []) STANDARD_TRANSACTION_PUZZLE_PREFIX = r"""ff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01""" # noqa STANDARD_TRANSACTION_PUZZLE_PATTERN = re.compile(STANDARD_TRANSACTION_PUZZLE_PREFIX + r"(b0[a-f0-9]{96})ff018080") # match_standard_transaction_anywhere def match_standard_transaction_at_any_index(generator_body: bytes) -> Optional[Tuple[int, int]]: """Return (start, end) of match, or None if pattern could not be found""" # We intentionally match the entire puzzle, not just the prefix that we will use, # in case we later want to convert the template generator into a tree of CLVM # Objects before operating on it m = STANDARD_TRANSACTION_PUZZLE_PATTERN.search(generator_body.hex()) if m: assert m.start() % 2 == 0 and m.end() % 2 == 0 start = m.start() // 2 end = (m.end() - 98 - len("ff018080")) // 2 assert generator_body[start:end] == bytes.fromhex(STANDARD_TRANSACTION_PUZZLE_PREFIX) return start, end else: return None def match_standard_transaction_exactly_and_return_pubkey(puzzle: SerializedProgram) -> Optional[bytes]: m = STANDARD_TRANSACTION_PUZZLE_PATTERN.fullmatch(bytes(puzzle).hex()) return None if m is None else hexstr_to_bytes(m.group(1)) def compress_cse_puzzle(puzzle: SerializedProgram) -> Optional[bytes]: return match_standard_transaction_exactly_and_return_pubkey(puzzle) def compress_coin_spend(coin_spend: CoinSpend) -> List[List[Union[bytes, None, int, Program]]]: compressed_puzzle = compress_cse_puzzle(coin_spend.puzzle_reveal) return [ [coin_spend.coin.parent_coin_info, coin_spend.coin.amount], [compressed_puzzle, Program.from_bytes(bytes(coin_spend.solution))], ] def puzzle_suitable_for_compression(puzzle: SerializedProgram) -> bool: return True if match_standard_transaction_exactly_and_return_pubkey(puzzle) else False def bundle_suitable_for_compression(bundle: SpendBundle) -> bool: return all(puzzle_suitable_for_compression(coin_spend.puzzle_reveal) for coin_spend in bundle.coin_spends) def compressed_coin_spend_entry_list(bundle: SpendBundle) -> List[List[List[Union[bytes, None, int, Program]]]]: compressed_cse_list: List[List[List[Union[bytes, None, int, Program]]]] = [] for coin_spend in bundle.coin_spends: compressed_cse_list.append(compress_coin_spend(coin_spend)) return compressed_cse_list def compressed_spend_bundle_solution(original_generator_params: CompressorArg, bundle: SpendBundle) -> BlockGenerator: compressed_cse_list = compressed_coin_spend_entry_list(bundle) return create_compressed_generator(original_generator_params, compressed_cse_list) def best_solution_generator_from_template(previous_generator: CompressorArg, bundle: SpendBundle) -> BlockGenerator: """ Creates a compressed block generator, taking in a block that passes the checks below """ if bundle_suitable_for_compression(bundle): return compressed_spend_bundle_solution(previous_generator, bundle) else: return simple_solution_generator(bundle) def detect_potential_template_generator(block_height: uint32, program: SerializedProgram) -> Optional[CompressorArg]: """ If this returns a GeneratorArg, that means that the input, `program`, has a standard transaction that is not compressed that we can use as a template for future blocks. If it returns None, this block cannot be used. In this implementation, we store the offsets needed by the compressor in the GeneratorArg This block will serve as a template for the compression of other newly farmed blocks. """ m = match_standard_transaction_at_any_index(bytes(program)) if m is None: return None start, end = m if start and end and end > start >= 0: return CompressorArg(block_height, program, start, end) else: return None
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/fee_history.py
flax/full_node/fee_history.py
from __future__ import annotations from dataclasses import dataclass from typing import List from flax.util.ints import uint8, uint32 from flax.util.streamable import Streamable, streamable @streamable @dataclass(frozen=True) class FeeStatBackup(Streamable): type: str tx_ct_avg: List[str] confirmed_average: List[List[str]] failed_average: List[List[str]] m_fee_rate_avg: List[str] @streamable @dataclass(frozen=True) class FeeTrackerBackup(Streamable): fee_estimator_version: uint8 first_recorded_height: uint32 latest_seen_height: uint32 stats: List[FeeStatBackup]
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/fee_estimate_store.py
flax/full_node/fee_estimate_store.py
from __future__ import annotations import dataclasses from typing import Optional import typing_extensions from flax.full_node.fee_history import FeeTrackerBackup @typing_extensions.final @dataclasses.dataclass class FeeStore: """ This object stores Fee Stats """ _backup: Optional[FeeTrackerBackup] = None def get_stored_fee_data(self) -> Optional[FeeTrackerBackup]: return self._backup def store_fee_data(self, fee_backup: FeeTrackerBackup) -> None: self._backup = fee_backup
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/fee_estimate.py
flax/full_node/fee_estimate.py
from __future__ import annotations from dataclasses import dataclass from typing import List, Optional from flax.types.fee_rate import FeeRate from flax.util.ints import uint64 from flax.util.streamable import Streamable, streamable @streamable @dataclass(frozen=True) class FeeEstimate(Streamable): """ error: If error is not None, estimated_fee_rate is invalid, and `error` is a string describing the error. It can happen that only some requested FeeEstimates have errors, but others are valid. For example, an implementation may not have enough data yet for estimates farther in the future, or, an invalid parameter may have been passed. time_target: Epoch time in seconds we are targeting to include our `SpendBundle` in the blockchain. estimated_fee_rate: expressed in mojo per 1 clvm_cost. `estimated_fee` can be zero. """ error: Optional[str] time_target: uint64 # unix time stamp in seconds estimated_fee_rate: FeeRate # Mojos per clvm cost @streamable @dataclass(frozen=True) class FeeEstimateGroup(Streamable): """ If error is not None, at least one item in the list `estimates` is invalid. Estimates are expressed in mojos / 1 clvm_cost. """ error: Optional[str] estimates: List[FeeEstimate]
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/fee_estimator.py
flax/full_node/fee_estimator.py
from __future__ import annotations import logging from dataclasses import dataclass, field from flax.full_node.fee_estimate import FeeEstimate, FeeEstimateGroup from flax.full_node.fee_estimation import FeeMempoolInfo from flax.full_node.fee_tracker import BucketResult, EstimateResult, FeeTracker, get_estimate_time_intervals from flax.types.fee_rate import FeeRate from flax.util.ints import uint32, uint64 # https://github.com/bitcoin/bitcoin/blob/5b6f0f31fa6ce85db3fb7f9823b1bbb06161ae32/src/policy/fees.cpp @dataclass() class SmartFeeEstimator: fee_tracker: FeeTracker max_block_cost_clvm: uint64 log: logging.Logger = field(default_factory=lambda: logging.getLogger(__name__)) def parse(self, fee_result: EstimateResult) -> float: fail_bucket: BucketResult = fee_result.fail_bucket median = fee_result.median if median != -1: return median if fail_bucket.start == 0: return -1.0 # If median is -1, tracker wasn't able to find a passing bucket. # Suggest one bucket higher than the lowest failing bucket. # get_bucket_index returns left (-1) bucket (-1). Start value is already -1 # We want +1 from the lowest bucket it failed at. Thus +3 max_val = len(self.fee_tracker.buckets) - 1 start_index = min(self.fee_tracker.get_bucket_index(fail_bucket.start) + 3, max_val) fee_val: float = self.fee_tracker.buckets[start_index] return fee_val def get_estimate_for_block(self, block: uint32) -> FeeEstimate: estimate_result = self.fee_tracker.estimate_fee_for_block(block) return self.estimate_result_to_fee_estimate(estimate_result) def get_estimate(self, time_offset_seconds: int) -> FeeEstimate: estimate_result = self.fee_tracker.estimate_fee(time_offset_seconds) return self.estimate_result_to_fee_estimate(estimate_result) def get_estimates(self, mempool_info: FeeMempoolInfo, ignore_mempool: bool = False) -> FeeEstimateGroup: self.log.error(self.fee_tracker.buckets) short_time_seconds, med_time_seconds, long_time_seconds = get_estimate_time_intervals() if ignore_mempool is False and (self.fee_tracker.latest_seen_height == 0): return FeeEstimateGroup(error="Not enough data", estimates=[]) tracking_length = self.fee_tracker.latest_seen_height - self.fee_tracker.first_recorded_height if tracking_length < 20: return FeeEstimateGroup(error="Not enough data", estimates=[]) if ignore_mempool is False and mempool_info.current_mempool_cost < int(mempool_info.MAX_BLOCK_COST_CLVM * 0.8): return FeeEstimateGroup( error=None, estimates=[ FeeEstimate(None, uint64(short_time_seconds), FeeRate(uint64(0))), FeeEstimate(None, uint64(med_time_seconds), FeeRate(uint64(0))), FeeEstimate(None, uint64(long_time_seconds), FeeRate(uint64(0))), ], ) short_result, med_result, long_result = self.fee_tracker.estimate_fees() short = self.estimate_result_to_fee_estimate(short_result) med = self.estimate_result_to_fee_estimate(med_result) long = self.estimate_result_to_fee_estimate(long_result) return FeeEstimateGroup(error=None, estimates=[short, med, long]) def estimate_result_to_fee_estimate(self, r: EstimateResult) -> FeeEstimate: fee: float = self.parse(r) if fee == -1 or r.median == -1: return FeeEstimate("Not enough data", r.requested_time, FeeRate(uint64(0))) else: # convert from mojo / 1000 clvm_cost to mojo / 1 clvm_cost return FeeEstimate(None, r.requested_time, FeeRate(uint64(fee / 1000)))
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/fee_estimator_interface.py
flax/full_node/fee_estimator_interface.py
from __future__ import annotations from typing_extensions import Protocol from flax.full_node.fee_estimation import FeeBlockInfo, FeeMempoolInfo from flax.types.clvm_cost import CLVMCost from flax.types.fee_rate import FeeRate from flax.types.mempool_item import MempoolItem class FeeEstimatorInterface(Protocol): def new_block(self, block_info: FeeBlockInfo) -> None: """A new block has been added to the blockchain""" pass def add_mempool_item(self, mempool_item_info: FeeMempoolInfo, mempool_item: MempoolItem) -> None: """A MempoolItem (transaction and associated info) has been added to the mempool""" pass def remove_mempool_item(self, mempool_info: FeeMempoolInfo, mempool_item: MempoolItem) -> None: """A MempoolItem (transaction and associated info) has been removed from the mempool""" pass def estimate_fee_rate(self, *, time_offset_seconds: int) -> FeeRate: """time_offset_seconds: number of seconds into the future for which to estimate fee""" pass def mempool_size(self) -> CLVMCost: """Report last seen mempool size""" pass def mempool_max_size(self) -> CLVMCost: """Report current mempool max "size" (i.e. CLVM cost)""" pass
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/hint_management.py
flax/full_node/hint_management.py
from __future__ import annotations from typing import Dict, List, Optional, Set, Tuple from flax.consensus.blockchain import StateChangeSummary from flax.types.blockchain_format.coin import Coin from flax.types.blockchain_format.sized_bytes import bytes32 from flax.util.ints import uint64 def get_hints_and_subscription_coin_ids( state_change_summary: StateChangeSummary, coin_subscriptions: Dict[bytes32, Set[bytes32]], ph_subscriptions: Dict[bytes32, Set[bytes32]], ) -> Tuple[List[Tuple[bytes32, bytes]], List[bytes32]]: # Precondition: all hints passed in are max 32 bytes long # Returns the hints that we need to add to the DB, and the coin ids that need to be looked up # Finds the coin IDs that we need to lookup in order to notify wallets of hinted transactions hint: Optional[bytes] hints_to_add: List[Tuple[bytes32, bytes]] = [] # Goes through additions and removals for each block and flattens to a map and a set lookup_coin_ids: Set[bytes32] = set() def add_if_coin_subscription(coin_id: bytes32) -> None: if coin_id in coin_subscriptions: lookup_coin_ids.add(coin_id) def add_if_ph_subscription(puzzle_hash: bytes32, coin_id: bytes32) -> None: if puzzle_hash in ph_subscriptions: lookup_coin_ids.add(coin_id) for npc_result in state_change_summary.new_npc_results: if npc_result.conds is not None: for spend in npc_result.conds.spends: # Record all coin_ids that we are interested in, that had changes add_if_coin_subscription(bytes32(spend.coin_id)) add_if_ph_subscription(bytes32(spend.puzzle_hash), bytes32(spend.coin_id)) for new_ph, new_am, hint in spend.create_coin: addition_coin: Coin = Coin(bytes32(spend.coin_id), bytes32(new_ph), uint64(new_am)) addition_coin_name = addition_coin.name() add_if_coin_subscription(addition_coin_name) add_if_ph_subscription(addition_coin.puzzle_hash, addition_coin_name) if hint is None: continue if len(hint) == 32: add_if_ph_subscription(bytes32(hint), addition_coin_name) if len(hint) > 0: assert len(hint) <= 32 hints_to_add.append((addition_coin_name, hint)) # Goes through all new reward coins for reward_coin in state_change_summary.new_rewards: reward_coin_name: bytes32 = reward_coin.name() add_if_coin_subscription(reward_coin_name) add_if_ph_subscription(reward_coin.puzzle_hash, reward_coin_name) return hints_to_add, list(lookup_coin_ids)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/generator.py
flax/full_node/generator.py
import logging from typing import List, Optional, Union, Tuple from flax.types.blockchain_format.program import Program, SerializedProgram from flax.types.generator_types import BlockGenerator, GeneratorBlockCacheInterface, CompressorArg from flax.util.ints import uint32 from flax.wallet.puzzles.load_clvm import load_clvm_maybe_recompile from flax.wallet.puzzles.rom_bootstrap_generator import get_generator GENERATOR_MOD = get_generator() DECOMPRESS_BLOCK = load_clvm_maybe_recompile("block_program_zero.clvm", package_or_requirement="flax.wallet.puzzles") DECOMPRESS_PUZZLE = load_clvm_maybe_recompile("decompress_puzzle.clvm", package_or_requirement="flax.wallet.puzzles") # DECOMPRESS_CSE = load_clvm_maybe_recompile( # "decompress_coin_spend_entry.clvm", # package_or_requirement="flax.wallet.puzzles", # ) DECOMPRESS_CSE_WITH_PREFIX = load_clvm_maybe_recompile( "decompress_coin_spend_entry_with_prefix.clvm", package_or_requirement="flax.wallet.puzzles" ) log = logging.getLogger(__name__) def create_block_generator( generator: SerializedProgram, block_heights_list: List[uint32], generator_block_cache: GeneratorBlockCacheInterface ) -> Optional[BlockGenerator]: """`create_block_generator` will returns None if it fails to look up any referenced block""" generator_list: List[SerializedProgram] = [] generator_heights: List[uint32] = [] for i in block_heights_list: previous_generator = generator_block_cache.get_generator_for_block_height(i) if previous_generator is None: log.error(f"Failed to look up generator for block {i}. Ref List: {block_heights_list}") return None generator_list.append(previous_generator) generator_heights.append(i) return BlockGenerator(generator, generator_list, generator_heights) def create_generator_args(generator_ref_list: List[SerializedProgram]) -> Program: """ `create_generator_args`: The format and contents of these arguments affect consensus. """ gen_ref_list = [bytes(g) for g in generator_ref_list] ret: Program = Program.to([gen_ref_list]) return ret def create_compressed_generator( original_generator: CompressorArg, compressed_cse_list: List[List[List[Union[bytes, None, int, Program]]]], ) -> BlockGenerator: """ Bind the generator block program template to a particular reference block, template bytes offsets, and SpendBundle. """ start = original_generator.start end = original_generator.end program = DECOMPRESS_BLOCK.curry( DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, Program.to(start), Program.to(end), compressed_cse_list ) return BlockGenerator(program, [original_generator.generator], [original_generator.block_height]) def setup_generator_args(self: BlockGenerator) -> Tuple[SerializedProgram, Program]: args = create_generator_args(self.generator_refs) return self.program, args def run_generator_mempool(self: BlockGenerator, max_cost: int) -> Tuple[int, SerializedProgram]: program, args = setup_generator_args(self) return GENERATOR_MOD.run_mempool_with_cost(max_cost, program, args) def run_generator_unsafe(self: BlockGenerator, max_cost: int) -> Tuple[int, SerializedProgram]: """This mode is meant for accepting possibly soft-forked transactions into the mempool""" program, args = setup_generator_args(self) return GENERATOR_MOD.run_with_cost(max_cost, program, args)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/weight_proof.py
flax/full_node/weight_proof.py
import asyncio import dataclasses import logging import math from multiprocessing.context import BaseContext import pathlib import random from concurrent.futures.process import ProcessPoolExecutor import tempfile from typing import Dict, IO, List, Optional, Tuple, Awaitable from flax.consensus.block_header_validation import validate_finished_header_block from flax.consensus.block_record import BlockRecord from flax.consensus.blockchain_interface import BlockchainInterface from flax.consensus.constants import ConsensusConstants from flax.consensus.deficit import calculate_deficit from flax.consensus.full_block_to_block_record import header_block_to_sub_block_record from flax.consensus.pot_iterations import ( calculate_ip_iters, calculate_iterations_quality, calculate_sp_iters, is_overflow_block, ) from flax.util.chunks import chunks from flax.consensus.vdf_info_computation import get_signage_point_vdf_info from flax.types.blockchain_format.classgroup import ClassgroupElement from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.blockchain_format.slots import ChallengeChainSubSlot, RewardChainSubSlot from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary from flax.types.blockchain_format.vdf import VDFInfo, VDFProof from flax.types.end_of_slot_bundle import EndOfSubSlotBundle from flax.types.header_block import HeaderBlock from flax.types.weight_proof import ( SubEpochChallengeSegment, SubEpochData, SubSlotData, WeightProof, SubEpochSegments, RecentChainData, ) from flax.util.block_cache import BlockCache from flax.util.hash import std_hash from flax.util.ints import uint8, uint32, uint64, uint128 from flax.util.setproctitle import getproctitle, setproctitle log = logging.getLogger(__name__) def _create_shutdown_file() -> IO: return tempfile.NamedTemporaryFile(prefix="flax_full_node_weight_proof_handler_executor_shutdown_trigger") class WeightProofHandler: LAMBDA_L = 100 C = 0.5 MAX_SAMPLES = 20 def __init__( self, constants: ConsensusConstants, blockchain: BlockchainInterface, multiprocessing_context: Optional[BaseContext] = None, ): self.tip: Optional[bytes32] = None self.proof: Optional[WeightProof] = None self.constants = constants self.blockchain = blockchain self.lock = asyncio.Lock() self._num_processes = 4 self.multiprocessing_context = multiprocessing_context async def get_proof_of_weight(self, tip: bytes32) -> Optional[WeightProof]: tip_rec = self.blockchain.try_block_record(tip) if tip_rec is None: log.error("unknown tip") return None if tip_rec.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS: log.debug("chain to short for weight proof") return None async with self.lock: if self.proof is not None: if self.proof.recent_chain_data[-1].header_hash == tip: return self.proof wp = await self._create_proof_of_weight(tip) if wp is None: return None self.proof = wp self.tip = tip return wp def get_sub_epoch_data(self, tip_height: uint32, summary_heights: List[uint32]) -> List[SubEpochData]: sub_epoch_data: List[SubEpochData] = [] for sub_epoch_n, ses_height in enumerate(summary_heights): if ses_height > tip_height: break ses = self.blockchain.get_ses(ses_height) log.debug("handle sub epoch summary %s at height: %s ses %s", sub_epoch_n, ses_height, ses) sub_epoch_data.append(_create_sub_epoch_data(ses)) return sub_epoch_data async def _create_proof_of_weight(self, tip: bytes32) -> Optional[WeightProof]: """ Creates a weight proof object """ assert self.blockchain is not None sub_epoch_segments: List[SubEpochChallengeSegment] = [] tip_rec = self.blockchain.try_block_record(tip) if tip_rec is None: log.error("failed not tip in cache") return None log.info(f"create weight proof peak {tip} {tip_rec.height}") recent_chain = await self._get_recent_chain(tip_rec.height) if recent_chain is None: return None summary_heights = self.blockchain.get_ses_heights() zero_hash = self.blockchain.height_to_hash(uint32(0)) assert zero_hash is not None prev_ses_block = await self.blockchain.get_block_record_from_db(zero_hash) if prev_ses_block is None: return None sub_epoch_data = self.get_sub_epoch_data(tip_rec.height, summary_heights) # use second to last ses as seed seed = self.get_seed_for_proof(summary_heights, tip_rec.height) rng = random.Random(seed) weight_to_check = _get_weights_for_sampling(rng, tip_rec.weight, recent_chain) sample_n = 0 ses_blocks = await self.blockchain.get_block_records_at(summary_heights) if ses_blocks is None: return None for sub_epoch_n, ses_height in enumerate(summary_heights): if ses_height > tip_rec.height: break # if we have enough sub_epoch samples, dont sample if sample_n >= self.MAX_SAMPLES: log.debug("reached sampled sub epoch cap") break # sample sub epoch # next sub block ses_block = ses_blocks[sub_epoch_n] if ses_block is None or ses_block.sub_epoch_summary_included is None: log.error("error while building proof") return None if _sample_sub_epoch(prev_ses_block.weight, ses_block.weight, weight_to_check): # type: ignore sample_n += 1 segments = await self.blockchain.get_sub_epoch_challenge_segments(ses_block.header_hash) if segments is None: segments = await self.__create_sub_epoch_segments(ses_block, prev_ses_block, uint32(sub_epoch_n)) if segments is None: log.error( f"failed while building segments for sub epoch {sub_epoch_n}, ses height {ses_height} " ) return None await self.blockchain.persist_sub_epoch_challenge_segments(ses_block.header_hash, segments) sub_epoch_segments.extend(segments) prev_ses_block = ses_block log.debug(f"sub_epochs: {len(sub_epoch_data)}") return WeightProof(sub_epoch_data, sub_epoch_segments, recent_chain) def get_seed_for_proof(self, summary_heights: List[uint32], tip_height) -> bytes32: count = 0 ses = None for sub_epoch_n, ses_height in enumerate(reversed(summary_heights)): if ses_height <= tip_height: count += 1 if count == 2: ses = self.blockchain.get_ses(ses_height) break assert ses is not None seed = ses.get_hash() return seed async def _get_recent_chain(self, tip_height: uint32) -> Optional[List[HeaderBlock]]: recent_chain: List[HeaderBlock] = [] ses_heights = self.blockchain.get_ses_heights() min_height = 0 count_ses = 0 for ses_height in reversed(ses_heights): if ses_height <= tip_height: count_ses += 1 if count_ses == 2: min_height = ses_height - 1 break log.debug(f"start {min_height} end {tip_height}") headers = await self.blockchain.get_header_blocks_in_range(min_height, tip_height, tx_filter=False) blocks = await self.blockchain.get_block_records_in_range(min_height, tip_height) ses_count = 0 curr_height = tip_height blocks_n = 0 while ses_count < 2: if curr_height == 0: break # add to needed reward chain recent blocks header_hash = self.blockchain.height_to_hash(curr_height) assert header_hash is not None header_block = headers[header_hash] block_rec = blocks[header_block.header_hash] if header_block is None: log.error("creating recent chain failed") return None recent_chain.insert(0, header_block) if block_rec.sub_epoch_summary_included: ses_count += 1 curr_height = uint32(curr_height - 1) blocks_n += 1 header_hash = self.blockchain.height_to_hash(curr_height) assert header_hash is not None header_block = headers[header_hash] recent_chain.insert(0, header_block) log.info( f"recent chain, " f"start: {recent_chain[0].reward_chain_block.height} " f"end: {recent_chain[-1].reward_chain_block.height} " ) return recent_chain async def create_prev_sub_epoch_segments(self) -> None: log.debug("create prev sub_epoch_segments") heights = self.blockchain.get_ses_heights() if len(heights) < 3: return None count = len(heights) - 2 ses_sub_block = self.blockchain.height_to_block_record(heights[-2]) prev_ses_sub_block = self.blockchain.height_to_block_record(heights[-3]) assert prev_ses_sub_block.sub_epoch_summary_included is not None segments = await self.__create_sub_epoch_segments(ses_sub_block, prev_ses_sub_block, uint32(count)) assert segments is not None await self.blockchain.persist_sub_epoch_challenge_segments(ses_sub_block.header_hash, segments) log.debug("sub_epoch_segments done") return None async def create_sub_epoch_segments(self) -> None: log.debug("check segments in db") """ Creates a weight proof object """ assert self.blockchain is not None peak_height = self.blockchain.get_peak_height() if peak_height is None: log.error("no peak yet") return None summary_heights = self.blockchain.get_ses_heights() h_hash: Optional[bytes32] = self.blockchain.height_to_hash(uint32(0)) if h_hash is None: return None prev_ses_block: Optional[BlockRecord] = await self.blockchain.get_block_record_from_db(h_hash) if prev_ses_block is None: return None ses_blocks = await self.blockchain.get_block_records_at(summary_heights) if ses_blocks is None: return None for sub_epoch_n, ses_height in enumerate(summary_heights): log.debug(f"check db for sub epoch {sub_epoch_n}") if ses_height > peak_height: break ses_block = ses_blocks[sub_epoch_n] if ses_block is None or ses_block.sub_epoch_summary_included is None: log.error("error while building proof") return None await self.__create_persist_segment(prev_ses_block, ses_block, ses_height, sub_epoch_n) prev_ses_block = ses_block await asyncio.sleep(2) log.debug("done checking segments") return None async def __create_persist_segment(self, prev_ses_block, ses_block, ses_height, sub_epoch_n): segments = await self.blockchain.get_sub_epoch_challenge_segments(ses_block.header_hash) if segments is None: segments = await self.__create_sub_epoch_segments(ses_block, prev_ses_block, uint32(sub_epoch_n)) if segments is None: log.error(f"failed while building segments for sub epoch {sub_epoch_n}, ses height {ses_height} ") return None await self.blockchain.persist_sub_epoch_challenge_segments(ses_block.header_hash, segments) async def __create_sub_epoch_segments( self, ses_block: BlockRecord, se_start: BlockRecord, sub_epoch_n: uint32 ) -> Optional[List[SubEpochChallengeSegment]]: segments: List[SubEpochChallengeSegment] = [] start_height = await self.get_prev_two_slots_height(se_start) blocks = await self.blockchain.get_block_records_in_range( start_height, ses_block.height + self.constants.MAX_SUB_SLOT_BLOCKS ) header_blocks = await self.blockchain.get_header_blocks_in_range( start_height, ses_block.height + self.constants.MAX_SUB_SLOT_BLOCKS, tx_filter=False ) curr: Optional[HeaderBlock] = header_blocks[se_start.header_hash] height = se_start.height assert curr is not None first = True idx = 0 while curr.height < ses_block.height: if blocks[curr.header_hash].is_challenge_block(self.constants): log.debug(f"challenge segment {idx}, starts at {curr.height} ") seg, height = await self._create_challenge_segment(curr, sub_epoch_n, header_blocks, blocks, first) if seg is None: log.error(f"failed creating segment {curr.header_hash} ") return None segments.append(seg) idx += 1 first = False else: height = height + uint32(1) # type: ignore header_hash = self.blockchain.height_to_hash(height) assert header_hash is not None curr = header_blocks[header_hash] if curr is None: return None log.debug(f"next sub epoch starts at {height}") return segments async def get_prev_two_slots_height(self, se_start: BlockRecord) -> uint32: # find prev 2 slots height slot = 0 batch_size = 50 curr_rec = se_start blocks = await self.blockchain.get_block_records_in_range(curr_rec.height - batch_size, curr_rec.height) end = curr_rec.height while slot < 2 and curr_rec.height > 0: if curr_rec.first_in_sub_slot: slot += 1 if end - curr_rec.height == batch_size - 1: blocks = await self.blockchain.get_block_records_in_range(curr_rec.height - batch_size, curr_rec.height) end = curr_rec.height header_hash = self.blockchain.height_to_hash(uint32(curr_rec.height - 1)) assert header_hash is not None curr_rec = blocks[header_hash] return curr_rec.height async def _create_challenge_segment( self, header_block: HeaderBlock, sub_epoch_n: uint32, header_blocks: Dict[bytes32, HeaderBlock], blocks: Dict[bytes32, BlockRecord], first_segment_in_sub_epoch: bool, ) -> Tuple[Optional[SubEpochChallengeSegment], uint32]: assert self.blockchain is not None sub_slots: List[SubSlotData] = [] log.debug(f"create challenge segment block {header_block.header_hash} block height {header_block.height} ") # VDFs from sub slots before challenge block first_sub_slots, first_rc_end_of_slot_vdf = await self.__first_sub_slot_vdfs( header_block, header_blocks, blocks, first_segment_in_sub_epoch ) if first_sub_slots is None: log.error("failed building first sub slots") return None, uint32(0) sub_slots.extend(first_sub_slots) ssd = await _challenge_block_vdfs( self.constants, header_block, blocks[header_block.header_hash], blocks, ) sub_slots.append(ssd) # # VDFs from slot after challenge block to end of slot log.debug(f"create slot end vdf for block {header_block.header_hash} height {header_block.height} ") challenge_slot_end_sub_slots, end_height = await self.__slot_end_vdf( uint32(header_block.height + 1), header_blocks, blocks ) if challenge_slot_end_sub_slots is None: log.error("failed building slot end ") return None, uint32(0) sub_slots.extend(challenge_slot_end_sub_slots) if first_segment_in_sub_epoch and sub_epoch_n != 0: return ( SubEpochChallengeSegment(sub_epoch_n, sub_slots, first_rc_end_of_slot_vdf), end_height, ) return SubEpochChallengeSegment(sub_epoch_n, sub_slots, None), end_height # returns a challenge chain vdf from slot start to signage point async def __first_sub_slot_vdfs( self, header_block: HeaderBlock, header_blocks: Dict[bytes32, HeaderBlock], blocks: Dict[bytes32, BlockRecord], first_in_sub_epoch: bool, ) -> Tuple[Optional[List[SubSlotData]], Optional[VDFInfo]]: # combine cc vdfs of all reward blocks from the start of the sub slot to end header_block_sub_rec = blocks[header_block.header_hash] # find slot start curr_sub_rec = header_block_sub_rec first_rc_end_of_slot_vdf = None if first_in_sub_epoch and curr_sub_rec.height > 0: while not curr_sub_rec.sub_epoch_summary_included: curr_sub_rec = blocks[curr_sub_rec.prev_hash] first_rc_end_of_slot_vdf = self.first_rc_end_of_slot_vdf(header_block, blocks, header_blocks) else: if header_block_sub_rec.overflow and header_block_sub_rec.first_in_sub_slot: sub_slots_num = 2 while sub_slots_num > 0 and curr_sub_rec.height > 0: if curr_sub_rec.first_in_sub_slot: assert curr_sub_rec.finished_challenge_slot_hashes is not None sub_slots_num -= len(curr_sub_rec.finished_challenge_slot_hashes) curr_sub_rec = blocks[curr_sub_rec.prev_hash] else: while not curr_sub_rec.first_in_sub_slot and curr_sub_rec.height > 0: curr_sub_rec = blocks[curr_sub_rec.prev_hash] curr = header_blocks[curr_sub_rec.header_hash] sub_slots_data: List[SubSlotData] = [] tmp_sub_slots_data: List[SubSlotData] = [] while curr.height < header_block.height: if curr is None: log.error("failed fetching block") return None, None if curr.first_in_sub_slot: # if not blue boxed if not blue_boxed_end_of_slot(curr.finished_sub_slots[0]): sub_slots_data.extend(tmp_sub_slots_data) for idx, sub_slot in enumerate(curr.finished_sub_slots): curr_icc_info = None if sub_slot.infused_challenge_chain is not None: curr_icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf sub_slots_data.append(handle_finished_slots(sub_slot, curr_icc_info)) tmp_sub_slots_data = [] ssd = SubSlotData( None, None, None, None, None, curr.reward_chain_block.signage_point_index, None, None, None, None, curr.reward_chain_block.challenge_chain_ip_vdf, curr.reward_chain_block.infused_challenge_chain_ip_vdf, curr.total_iters, ) tmp_sub_slots_data.append(ssd) header_hash = self.blockchain.height_to_hash(uint32(curr.height + 1)) assert header_hash is not None curr = header_blocks[header_hash] if len(tmp_sub_slots_data) > 0: sub_slots_data.extend(tmp_sub_slots_data) for idx, sub_slot in enumerate(header_block.finished_sub_slots): curr_icc_info = None if sub_slot.infused_challenge_chain is not None: curr_icc_info = sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf sub_slots_data.append(handle_finished_slots(sub_slot, curr_icc_info)) return sub_slots_data, first_rc_end_of_slot_vdf def first_rc_end_of_slot_vdf( self, header_block, blocks: Dict[bytes32, BlockRecord], header_blocks: Dict[bytes32, HeaderBlock], ) -> Optional[VDFInfo]: curr = blocks[header_block.header_hash] while curr.height > 0 and not curr.sub_epoch_summary_included: curr = blocks[curr.prev_hash] return header_blocks[curr.header_hash].finished_sub_slots[-1].reward_chain.end_of_slot_vdf async def __slot_end_vdf( self, start_height: uint32, header_blocks: Dict[bytes32, HeaderBlock], blocks: Dict[bytes32, BlockRecord] ) -> Tuple[Optional[List[SubSlotData]], uint32]: # gets all vdfs first sub slot after challenge block to last sub slot log.debug(f"slot end vdf start height {start_height}") header_hash = self.blockchain.height_to_hash(start_height) assert header_hash is not None curr = header_blocks[header_hash] curr_header_hash = curr.header_hash sub_slots_data: List[SubSlotData] = [] tmp_sub_slots_data: List[SubSlotData] = [] while not blocks[curr_header_hash].is_challenge_block(self.constants): if curr.first_in_sub_slot: sub_slots_data.extend(tmp_sub_slots_data) curr_prev_header_hash = curr.prev_header_hash # add collected vdfs for idx, sub_slot in enumerate(curr.finished_sub_slots): prev_rec = blocks[curr_prev_header_hash] eos_vdf_iters = prev_rec.sub_slot_iters if idx == 0: eos_vdf_iters = uint64(prev_rec.sub_slot_iters - prev_rec.ip_iters(self.constants)) sub_slots_data.append(handle_end_of_slot(sub_slot, eos_vdf_iters)) tmp_sub_slots_data = [] tmp_sub_slots_data.append(self.handle_block_vdfs(curr, blocks)) header_hash = self.blockchain.height_to_hash(uint32(curr.height + 1)) assert header_hash is not None curr = header_blocks[header_hash] curr_header_hash = curr.header_hash if len(tmp_sub_slots_data) > 0: sub_slots_data.extend(tmp_sub_slots_data) log.debug(f"slot end vdf end height {curr.height} slots {len(sub_slots_data)} ") return sub_slots_data, curr.height def handle_block_vdfs(self, curr: HeaderBlock, blocks: Dict[bytes32, BlockRecord]): cc_sp_proof = None icc_ip_proof = None cc_sp_info = None icc_ip_info = None block_record = blocks[curr.header_hash] if curr.infused_challenge_chain_ip_proof is not None: assert curr.reward_chain_block.infused_challenge_chain_ip_vdf icc_ip_proof = curr.infused_challenge_chain_ip_proof icc_ip_info = curr.reward_chain_block.infused_challenge_chain_ip_vdf if curr.challenge_chain_sp_proof is not None: assert curr.reward_chain_block.challenge_chain_sp_vdf cc_sp_vdf_info = curr.reward_chain_block.challenge_chain_sp_vdf if not curr.challenge_chain_sp_proof.normalized_to_identity: (_, _, _, _, cc_vdf_iters, _,) = get_signage_point_vdf_info( self.constants, curr.finished_sub_slots, block_record.overflow, None if curr.height == 0 else blocks[curr.prev_header_hash], BlockCache(blocks), block_record.sp_total_iters(self.constants), block_record.sp_iters(self.constants), ) cc_sp_vdf_info = VDFInfo( curr.reward_chain_block.challenge_chain_sp_vdf.challenge, cc_vdf_iters, curr.reward_chain_block.challenge_chain_sp_vdf.output, ) cc_sp_proof = curr.challenge_chain_sp_proof cc_sp_info = cc_sp_vdf_info return SubSlotData( None, cc_sp_proof, curr.challenge_chain_ip_proof, icc_ip_proof, cc_sp_info, curr.reward_chain_block.signage_point_index, None, None, None, None, curr.reward_chain_block.challenge_chain_ip_vdf, icc_ip_info, curr.total_iters, ) def validate_weight_proof_single_proc(self, weight_proof: WeightProof) -> Tuple[bool, uint32]: assert self.blockchain is not None assert len(weight_proof.sub_epochs) > 0 if len(weight_proof.sub_epochs) == 0: return False, uint32(0) peak_height = weight_proof.recent_chain_data[-1].reward_chain_block.height log.info(f"validate weight proof peak height {peak_height}") summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof) if summaries is None: log.warning("weight proof failed sub epoch data validation") return False, uint32(0) summary_bytes, wp_segment_bytes, wp_recent_chain_bytes = vars_to_bytes(summaries, weight_proof) log.info("validate sub epoch challenge segments") seed = summaries[-2].get_hash() rng = random.Random(seed) if not validate_sub_epoch_sampling(rng, sub_epoch_weight_list, weight_proof): log.error("failed weight proof sub epoch sample validation") return False, uint32(0) if not _validate_sub_epoch_segments(self.constants, rng, wp_segment_bytes, summary_bytes): return False, uint32(0) log.info("validate weight proof recent blocks") success, _ = validate_recent_blocks(self.constants, wp_recent_chain_bytes, summary_bytes) if not success: return False, uint32(0) fork_point, _ = self.get_fork_point(summaries) return True, fork_point def get_fork_point_no_validations(self, weight_proof: WeightProof) -> Tuple[bool, uint32]: log.debug("get fork point skip validations") assert self.blockchain is not None assert len(weight_proof.sub_epochs) > 0 if len(weight_proof.sub_epochs) == 0: return False, uint32(0) summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof) if summaries is None: log.warning("weight proof failed to validate sub epoch summaries") return False, uint32(0) fork_height, _ = self.get_fork_point(summaries) return True, fork_height async def validate_weight_proof(self, weight_proof: WeightProof) -> Tuple[bool, uint32, List[SubEpochSummary]]: assert self.blockchain is not None if len(weight_proof.sub_epochs) == 0: return False, uint32(0), [] # timing reference: start summaries, sub_epoch_weight_list = _validate_sub_epoch_summaries(self.constants, weight_proof) await asyncio.sleep(0) # break up otherwise multi-second sync code # timing reference: 1 second if summaries is None or sub_epoch_weight_list is None: log.error("weight proof failed sub epoch data validation") return False, uint32(0), [] fork_point, ses_fork_idx = self.get_fork_point(summaries) # timing reference: 1 second # TODO: Consider implementing an async polling closer for the executor. with ProcessPoolExecutor( max_workers=self._num_processes, mp_context=self.multiprocessing_context, initializer=setproctitle, initargs=(f"{getproctitle()}_worker",), ) as executor: # The shutdown file manager must be inside of the executor manager so that # we request the workers close prior to waiting for them to close. with _create_shutdown_file() as shutdown_file: task: asyncio.Task = asyncio.create_task( validate_weight_proof_inner( self.constants, executor, shutdown_file.name, self._num_processes, weight_proof, summaries, sub_epoch_weight_list, False, ses_fork_idx, ) ) valid, _ = await task return valid, fork_point, summaries def get_fork_point(self, received_summaries: List[SubEpochSummary]) -> Tuple[uint32, int]: # returns the fork height and ses index # iterate through sub epoch summaries to find fork point fork_point_index = 0 ses_heights = self.blockchain.get_ses_heights() for idx, summary_height in enumerate(ses_heights): log.debug(f"check summary {idx} height {summary_height}") local_ses = self.blockchain.get_ses(summary_height) if idx == len(received_summaries) - 1: # end of wp summaries, local chain is longer or equal to wp chain break if local_ses is None or local_ses.get_hash() != received_summaries[idx].get_hash(): break fork_point_index = idx if fork_point_index <= 2: # Two summeries can have different blocks and still be identical # This gets resolved after one full sub epoch return uint32(0), 0 return ses_heights[fork_point_index - 2], fork_point_index def _get_weights_for_sampling( rng: random.Random, total_weight: uint128, recent_chain: List[HeaderBlock] ) -> Optional[List[uint128]]: weight_to_check = [] last_l_weight = recent_chain[-1].reward_chain_block.weight - recent_chain[0].reward_chain_block.weight delta = last_l_weight / total_weight prob_of_adv_succeeding = 1 - math.log(WeightProofHandler.C, delta) if prob_of_adv_succeeding <= 0: return None queries = -WeightProofHandler.LAMBDA_L * math.log(2, prob_of_adv_succeeding) for i in range(int(queries) + 1): u = rng.random() q = 1 - delta**u # todo check division and type conversions weight = q * float(total_weight) weight_to_check.append(uint128(int(weight))) weight_to_check.sort() return weight_to_check def _sample_sub_epoch( start_of_epoch_weight: uint128, end_of_epoch_weight: uint128, weight_to_check: List[uint128], ) -> bool: """ weight_to_check: List[uint128] is expected to be sorted """ if weight_to_check is None: return True if weight_to_check[-1] < start_of_epoch_weight: return False if weight_to_check[0] > end_of_epoch_weight: return False choose = False for weight in weight_to_check: if weight > end_of_epoch_weight: return False if start_of_epoch_weight < weight < end_of_epoch_weight: log.debug(f"start weight: {start_of_epoch_weight}") log.debug(f"weight to check {weight}") log.debug(f"end weight: {end_of_epoch_weight}") choose = True break return choose # wp creation methods def _create_sub_epoch_data( sub_epoch_summary: SubEpochSummary, ) -> SubEpochData: reward_chain_hash: bytes32 = sub_epoch_summary.reward_chain_hash # Number of subblocks overflow in previous slot previous_sub_epoch_overflows: uint8 = sub_epoch_summary.num_blocks_overflow # total in sub epoch - expected # New work difficulty and iterations per sub-slot sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty return SubEpochData(reward_chain_hash, previous_sub_epoch_overflows, sub_slot_iters, new_difficulty) async def _challenge_block_vdfs( constants: ConsensusConstants, header_block: HeaderBlock, block_rec: BlockRecord, sub_blocks: Dict[bytes32, BlockRecord], ): (_, _, _, _, cc_vdf_iters, _,) = get_signage_point_vdf_info( constants, header_block.finished_sub_slots, block_rec.overflow, None if header_block.height == 0 else sub_blocks[header_block.prev_header_hash], BlockCache(sub_blocks), block_rec.sp_total_iters(constants), block_rec.sp_iters(constants), ) cc_sp_info = None if header_block.reward_chain_block.challenge_chain_sp_vdf:
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
true
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/pending_tx_cache.py
flax/full_node/pending_tx_cache.py
from __future__ import annotations from typing import Dict from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.mempool_item import MempoolItem class PendingTxCache: _cache_max_total_cost: int _cache_cost: int _txs: Dict[bytes32, MempoolItem] def __init__(self, cost_limit: int): self._cache_max_total_cost = cost_limit self._cache_cost = 0 self._txs = {} def add(self, item: MempoolItem) -> None: """ Adds SpendBundles that have failed to be added to the pool in potential tx set. This is later used to retry to add them. """ if item.spend_bundle_name in self._txs: return None self._txs[item.spend_bundle_name] = item self._cache_cost += item.cost while self._cache_cost > self._cache_max_total_cost: first_in = list(self._txs.keys())[0] self._cache_cost -= self._txs[first_in].cost self._txs.pop(first_in) def drain(self) -> Dict[bytes32, MempoolItem]: ret = self._txs self._txs = {} self._cache_cost = 0 return ret def cost(self) -> int: return self._cache_cost
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/coin_store.py
flax/full_node/coin_store.py
from __future__ import annotations import dataclasses import sqlite3 from typing import List, Optional, Set, Dict, Any, Tuple, Union import typing_extensions from aiosqlite import Cursor from flax.protocols.wallet_protocol import CoinState from flax.types.blockchain_format.coin import Coin from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.coin_record import CoinRecord from flax.util.db_wrapper import DBWrapper2, SQLITE_MAX_VARIABLE_NUMBER from flax.util.ints import uint32, uint64 from flax.util.chunks import chunks import time import logging from flax.util.lru_cache import LRUCache log = logging.getLogger(__name__) @typing_extensions.final @dataclasses.dataclass class CoinStore: """ This object handles CoinRecords in DB. """ db_wrapper: DBWrapper2 coins_added_at_height_cache: LRUCache[uint32, List[CoinRecord]] @classmethod async def create(cls, db_wrapper: DBWrapper2) -> CoinStore: self = CoinStore(db_wrapper, LRUCache(100)) async with self.db_wrapper.writer_maybe_transaction() as conn: log.info("DB: Creating coin store tables and indexes.") if self.db_wrapper.db_version == 2: # the coin_name is unique in this table because the CoinStore always # only represent a single peak await conn.execute( "CREATE TABLE IF NOT EXISTS coin_record(" "coin_name blob PRIMARY KEY," " confirmed_index bigint," " spent_index bigint," # if this is zero, it means the coin has not been spent " coinbase int," " puzzle_hash blob," " coin_parent blob," " amount blob," # we use a blob of 8 bytes to store uint64 " timestamp bigint)" ) else: # the coin_name is unique in this table because the CoinStore always # only represent a single peak await conn.execute( ( "CREATE TABLE IF NOT EXISTS coin_record(" "coin_name text PRIMARY KEY," " confirmed_index bigint," " spent_index bigint," " spent int," " coinbase int," " puzzle_hash text," " coin_parent text," " amount blob," " timestamp bigint)" ) ) # Useful for reorg lookups log.info("DB: Creating index coin_confirmed_index") await conn.execute("CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)") log.info("DB: Creating index coin_spent_index") await conn.execute("CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)") log.info("DB: Creating index coin_puzzle_hash") await conn.execute("CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)") log.info("DB: Creating index coin_parent_index") await conn.execute("CREATE INDEX IF NOT EXISTS coin_parent_index on coin_record(coin_parent)") return self async def num_unspent(self) -> int: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute("SELECT COUNT(*) FROM coin_record WHERE spent_index=0") as cursor: row = await cursor.fetchone() if row is not None: count: int = row[0] return count return 0 def maybe_from_hex(self, field: Union[bytes, str]) -> bytes32: if self.db_wrapper.db_version == 2: assert isinstance(field, bytes) return bytes32(field) else: assert isinstance(field, str) return bytes32.fromhex(field) def maybe_to_hex(self, field: bytes) -> Any: if self.db_wrapper.db_version == 2: return field else: return field.hex() async def new_block( self, height: uint32, timestamp: uint64, included_reward_coins: Set[Coin], tx_additions: List[Coin], tx_removals: List[bytes32], ) -> List[CoinRecord]: """ Only called for blocks which are blocks (and thus have rewards and transactions) Returns a list of the CoinRecords that were added by this block """ start = time.monotonic() additions = [] for coin in tx_additions: record: CoinRecord = CoinRecord( coin, height, uint32(0), False, timestamp, ) additions.append(record) if height == 0: assert len(included_reward_coins) == 0 else: assert len(included_reward_coins) >= 2 for coin in included_reward_coins: reward_coin_r: CoinRecord = CoinRecord( coin, height, uint32(0), True, timestamp, ) additions.append(reward_coin_r) await self._add_coin_records(additions) await self._set_spent(tx_removals, height) end = time.monotonic() log.log( logging.WARNING if end - start > 10 else logging.DEBUG, f"Height {height}: It took {end - start:0.2f}s to apply {len(tx_additions)} additions and " + f"{len(tx_removals)} removals to the coin store. Make sure " + "blockchain database is on a fast drive", ) return additions # Checks DB and DiffStores for CoinRecord with coin_name and returns it async def get_coin_record(self, coin_name: bytes32) -> Optional[CoinRecord]: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " "coin_parent, amount, timestamp FROM coin_record WHERE coin_name=?", (self.maybe_to_hex(coin_name),), ) as cursor: row = await cursor.fetchone() if row is not None: coin = self.row_to_coin(row) return CoinRecord(coin, row[0], row[1], row[2], row[6]) return None async def get_coin_records(self, names: List[bytes32]) -> List[CoinRecord]: if len(names) == 0: return [] coins: List[CoinRecord] = [] async with self.db_wrapper.reader_no_transaction() as conn: cursors: List[Cursor] = [] for names_chunk in chunks(names, SQLITE_MAX_VARIABLE_NUMBER): names_db: Tuple[Any, ...] if self.db_wrapper.db_version == 2: names_db = tuple(names_chunk) else: names_db = tuple([n.hex() for n in names_chunk]) cursors.append( await conn.execute( f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " f"coin_parent, amount, timestamp FROM coin_record " f'WHERE coin_name in ({",".join(["?"] * len(names_db))}) ', names_db, ) ) for cursor in cursors: for row in await cursor.fetchall(): coin = self.row_to_coin(row) record = CoinRecord(coin, row[0], row[1], row[2], row[6]) coins.append(record) return coins async def get_coins_added_at_height(self, height: uint32) -> List[CoinRecord]: coins_added: Optional[List[CoinRecord]] = self.coins_added_at_height_cache.get(height) if coins_added is not None: return coins_added async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " "coin_parent, amount, timestamp FROM coin_record WHERE confirmed_index=?", (height,), ) as cursor: rows = await cursor.fetchall() coins = [] for row in rows: coin = self.row_to_coin(row) coins.append(CoinRecord(coin, row[0], row[1], row[2], row[6])) self.coins_added_at_height_cache.put(height, coins) return coins async def get_coins_removed_at_height(self, height: uint32) -> List[CoinRecord]: # Special case to avoid querying all unspent coins (spent_index=0) if height == 0: return [] async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " "coin_parent, amount, timestamp FROM coin_record WHERE spent_index=?", (height,), ) as cursor: coins = [] for row in await cursor.fetchall(): if row[1] != 0: coin = self.row_to_coin(row) coin_record = CoinRecord(coin, row[0], row[1], row[2], row[6]) coins.append(coin_record) return coins async def get_all_coins(self, include_spent_coins: bool) -> List[CoinRecord]: # WARNING: this should only be used for testing or in a simulation, # running it on a synced testnet or mainnet node will most likely result in an OOM error. coins = set() async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " f"coin_parent, amount, timestamp FROM coin_record " f"{'' if include_spent_coins else 'INDEXED BY coin_spent_index WHERE spent_index=0'}" f" ORDER BY confirmed_index" ) as cursor: for row in await cursor.fetchall(): coin = self.row_to_coin(row) coins.add(CoinRecord(coin, row[0], row[1], row[2], row[6])) return list(coins) # Checks DB and DiffStores for CoinRecords with puzzle_hash and returns them async def get_coin_records_by_puzzle_hash( self, include_spent_coins: bool, puzzle_hash: bytes32, start_height: uint32 = uint32(0), end_height: uint32 = uint32((2**32) - 1), ) -> List[CoinRecord]: coins = set() async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " f"coin_parent, amount, timestamp FROM coin_record INDEXED BY coin_puzzle_hash WHERE puzzle_hash=? " f"AND confirmed_index>=? AND confirmed_index<? " f"{'' if include_spent_coins else 'AND spent_index=0'}", (self.maybe_to_hex(puzzle_hash), start_height, end_height), ) as cursor: for row in await cursor.fetchall(): coin = self.row_to_coin(row) coins.add(CoinRecord(coin, row[0], row[1], row[2], row[6])) return list(coins) async def get_coin_records_by_puzzle_hashes( self, include_spent_coins: bool, puzzle_hashes: List[bytes32], start_height: uint32 = uint32(0), end_height: uint32 = uint32((2**32) - 1), ) -> List[CoinRecord]: if len(puzzle_hashes) == 0: return [] coins = set() puzzle_hashes_db: Tuple[Any, ...] if self.db_wrapper.db_version == 2: puzzle_hashes_db = tuple(puzzle_hashes) else: puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes]) async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " f"coin_parent, amount, timestamp FROM coin_record INDEXED BY coin_puzzle_hash " f'WHERE puzzle_hash in ({"?," * (len(puzzle_hashes) - 1)}?) ' f"AND confirmed_index>=? AND confirmed_index<? " f"{'' if include_spent_coins else 'AND spent_index=0'}", puzzle_hashes_db + (start_height, end_height), ) as cursor: for row in await cursor.fetchall(): coin = self.row_to_coin(row) coins.add(CoinRecord(coin, row[0], row[1], row[2], row[6])) return list(coins) async def get_coin_records_by_names( self, include_spent_coins: bool, names: List[bytes32], start_height: uint32 = uint32(0), end_height: uint32 = uint32((2**32) - 1), ) -> List[CoinRecord]: if len(names) == 0: return [] coins = set() names_db: Tuple[Any, ...] if self.db_wrapper.db_version == 2: names_db = tuple(names) else: names_db = tuple([name.hex() for name in names]) async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " f"coin_parent, amount, timestamp FROM coin_record INDEXED BY sqlite_autoindex_coin_record_1 " f'WHERE coin_name in ({"?," * (len(names) - 1)}?) ' f"AND confirmed_index>=? AND confirmed_index<? " f"{'' if include_spent_coins else 'AND spent_index=0'}", names_db + (start_height, end_height), ) as cursor: for row in await cursor.fetchall(): coin = self.row_to_coin(row) coins.add(CoinRecord(coin, row[0], row[1], row[2], row[6])) return list(coins) def row_to_coin(self, row: sqlite3.Row) -> Coin: return Coin(self.maybe_from_hex(row[4]), self.maybe_from_hex(row[3]), uint64.from_bytes(row[5])) def row_to_coin_state(self, row: sqlite3.Row) -> CoinState: coin = self.row_to_coin(row) spent_h = None if row[1] != 0: spent_h = row[1] return CoinState(coin, spent_h, row[0]) async def get_coin_states_by_puzzle_hashes( self, include_spent_coins: bool, puzzle_hashes: List[bytes32], min_height: uint32 = uint32(0), ) -> List[CoinState]: if len(puzzle_hashes) == 0: return [] coins = set() async with self.db_wrapper.reader_no_transaction() as conn: for puzzles in chunks(puzzle_hashes, SQLITE_MAX_VARIABLE_NUMBER): puzzle_hashes_db: Tuple[Any, ...] if self.db_wrapper.db_version == 2: puzzle_hashes_db = tuple(puzzles) else: puzzle_hashes_db = tuple([ph.hex() for ph in puzzles]) async with conn.execute( f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " f"coin_parent, amount, timestamp FROM coin_record INDEXED BY coin_puzzle_hash " f'WHERE puzzle_hash in ({"?," * (len(puzzles) - 1)}?) ' f"AND (confirmed_index>=? OR spent_index>=?)" f"{'' if include_spent_coins else 'AND spent_index=0'}", puzzle_hashes_db + (min_height, min_height), ) as cursor: row: sqlite3.Row async for row in cursor: coins.add(self.row_to_coin_state(row)) return list(coins) async def get_coin_records_by_parent_ids( self, include_spent_coins: bool, parent_ids: List[bytes32], start_height: uint32 = uint32(0), end_height: uint32 = uint32((2**32) - 1), ) -> List[CoinRecord]: if len(parent_ids) == 0: return [] coins = set() async with self.db_wrapper.reader_no_transaction() as conn: for ids in chunks(parent_ids, SQLITE_MAX_VARIABLE_NUMBER): parent_ids_db: Tuple[Any, ...] if self.db_wrapper.db_version == 2: parent_ids_db = tuple(ids) else: parent_ids_db = tuple([pid.hex() for pid in ids]) async with conn.execute( f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " f'coin_parent, amount, timestamp FROM coin_record WHERE coin_parent in ({"?," * (len(ids) - 1)}?) ' f"AND confirmed_index>=? AND confirmed_index<? " f"{'' if include_spent_coins else 'AND spent_index=0'}", parent_ids_db + (start_height, end_height), ) as cursor: async for row in cursor: coin = self.row_to_coin(row) coins.add(CoinRecord(coin, row[0], row[1], row[2], row[6])) return list(coins) async def get_coin_states_by_ids( self, include_spent_coins: bool, coin_ids: List[bytes32], min_height: uint32 = uint32(0), ) -> List[CoinState]: if len(coin_ids) == 0: return [] coins = set() async with self.db_wrapper.reader_no_transaction() as conn: for ids in chunks(coin_ids, SQLITE_MAX_VARIABLE_NUMBER): coin_ids_db: Tuple[Any, ...] if self.db_wrapper.db_version == 2: coin_ids_db = tuple(ids) else: coin_ids_db = tuple([pid.hex() for pid in ids]) async with conn.execute( f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " f'coin_parent, amount, timestamp FROM coin_record WHERE coin_name in ({"?," * (len(ids) - 1)}?) ' f"AND (confirmed_index>=? OR spent_index>=?)" f"{'' if include_spent_coins else 'AND spent_index=0'}", coin_ids_db + (min_height, min_height), ) as cursor: async for row in cursor: coins.add(self.row_to_coin_state(row)) return list(coins) async def rollback_to_block(self, block_index: int) -> List[CoinRecord]: """ Note that block_index can be negative, in which case everything is rolled back Returns the list of coin records that have been modified """ coin_changes: Dict[bytes32, CoinRecord] = {} # Add coins that are confirmed in the reverted blocks to the list of updated coins. async with self.db_wrapper.writer_maybe_transaction() as conn: async with conn.execute( "SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " "coin_parent, amount, timestamp FROM coin_record WHERE confirmed_index>?", (block_index,), ) as cursor: for row in await cursor.fetchall(): coin = self.row_to_coin(row) record = CoinRecord(coin, uint32(0), row[1], row[2], uint64(0)) coin_changes[record.name] = record # Delete reverted blocks from storage await conn.execute("DELETE FROM coin_record WHERE confirmed_index>?", (block_index,)) # Add coins that are confirmed in the reverted blocks to the list of changed coins. async with conn.execute( "SELECT confirmed_index, spent_index, coinbase, puzzle_hash, " "coin_parent, amount, timestamp FROM coin_record WHERE spent_index>?", (block_index,), ) as cursor: for row in await cursor.fetchall(): coin = self.row_to_coin(row) record = CoinRecord(coin, row[0], uint32(0), row[2], row[6]) if record.name not in coin_changes: coin_changes[record.name] = record if self.db_wrapper.db_version == 2: await conn.execute("UPDATE coin_record SET spent_index=0 WHERE spent_index>?", (block_index,)) else: await conn.execute( "UPDATE coin_record SET spent_index = 0, spent = 0 WHERE spent_index>?", (block_index,) ) self.coins_added_at_height_cache = LRUCache(self.coins_added_at_height_cache.capacity) return list(coin_changes.values()) # Store CoinRecord in DB async def _add_coin_records(self, records: List[CoinRecord]) -> None: if self.db_wrapper.db_version == 2: values2 = [] for record in records: values2.append( ( record.coin.name(), record.confirmed_block_index, record.spent_block_index, int(record.coinbase), record.coin.puzzle_hash, record.coin.parent_coin_info, bytes(uint64(record.coin.amount)), record.timestamp, ) ) if len(values2) > 0: async with self.db_wrapper.writer_maybe_transaction() as conn: await conn.executemany( "INSERT INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?)", values2, ) else: values = [] for record in records: values.append( ( record.coin.name().hex(), record.confirmed_block_index, record.spent_block_index, int(record.spent), int(record.coinbase), record.coin.puzzle_hash.hex(), record.coin.parent_coin_info.hex(), bytes(uint64(record.coin.amount)), record.timestamp, ) ) if len(values) > 0: async with self.db_wrapper.writer_maybe_transaction() as conn: await conn.executemany( "INSERT INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)", values, ) # Update coin_record to be spent in DB async def _set_spent(self, coin_names: List[bytes32], index: uint32) -> None: assert len(coin_names) == 0 or index > 0 if len(coin_names) == 0: return None async with self.db_wrapper.writer_maybe_transaction() as conn: rows_updated: int = 0 for coin_names_chunk in chunks(coin_names, SQLITE_MAX_VARIABLE_NUMBER): name_params = ",".join(["?"] * len(coin_names_chunk)) if self.db_wrapper.db_version == 2: ret: Cursor = await conn.execute( f"UPDATE OR FAIL coin_record INDEXED BY sqlite_autoindex_coin_record_1 " f"SET spent_index={index} " f"WHERE spent_index=0 " f"AND coin_name IN ({name_params})", coin_names_chunk, ) else: ret = await conn.execute( f"UPDATE OR FAIL coin_record INDEXED BY sqlite_autoindex_coin_record_1 " f"SET spent=1, spent_index={index} " f"WHERE spent_index=0 " f"AND coin_name IN ({name_params})", [name.hex() for name in coin_names_chunk], ) rows_updated += ret.rowcount if rows_updated != len(coin_names): raise ValueError( f"Invalid operation to set spent, total updates {rows_updated} expected {len(coin_names)}" )
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/hint_store.py
flax/full_node/hint_store.py
from __future__ import annotations import dataclasses from typing import List, Tuple import typing_extensions from flax.types.blockchain_format.sized_bytes import bytes32 from flax.util.db_wrapper import DBWrapper2 import logging log = logging.getLogger(__name__) @typing_extensions.final @dataclasses.dataclass class HintStore: db_wrapper: DBWrapper2 @classmethod async def create(cls, db_wrapper: DBWrapper2) -> HintStore: self = HintStore(db_wrapper) async with self.db_wrapper.writer_maybe_transaction() as conn: log.info("DB: Creating hint store tables and indexes.") if self.db_wrapper.db_version == 2: await conn.execute("CREATE TABLE IF NOT EXISTS hints(coin_id blob, hint blob, UNIQUE (coin_id, hint))") else: await conn.execute( "CREATE TABLE IF NOT EXISTS hints(id INTEGER PRIMARY KEY AUTOINCREMENT, coin_id blob, hint blob)" ) log.info("DB: Creating index hint_index") await conn.execute("CREATE INDEX IF NOT EXISTS hint_index on hints(hint)") return self async def get_coin_ids(self, hint: bytes) -> List[bytes32]: async with self.db_wrapper.reader_no_transaction() as conn: cursor = await conn.execute("SELECT coin_id from hints WHERE hint=?", (hint,)) rows = await cursor.fetchall() await cursor.close() coin_ids = [] for row in rows: coin_ids.append(row[0]) return coin_ids async def add_hints(self, coin_hint_list: List[Tuple[bytes32, bytes]]) -> None: if len(coin_hint_list) == 0: return None async with self.db_wrapper.writer_maybe_transaction() as conn: if self.db_wrapper.db_version == 2: cursor = await conn.executemany( "INSERT OR IGNORE INTO hints VALUES(?, ?)", coin_hint_list, ) else: cursor = await conn.executemany( "INSERT INTO hints VALUES(?, ?, ?)", [(None,) + record for record in coin_hint_list], ) await cursor.close() async def count_hints(self) -> int: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute("select count(*) from hints") as cursor: row = await cursor.fetchone() assert row is not None [count] = row return int(count)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/block_store.py
flax/full_node/block_store.py
from __future__ import annotations import dataclasses import logging import sqlite3 from typing import Dict, List, Optional, Tuple, Any, Union, Sequence import typing_extensions import zstd from flax.consensus.block_record import BlockRecord from flax.types.blockchain_format.program import SerializedProgram from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.full_block import FullBlock from flax.types.weight_proof import SubEpochChallengeSegment, SubEpochSegments from flax.util.db_wrapper import DBWrapper2, execute_fetchone from flax.util.errors import Err from flax.util.full_block_utils import block_info_from_block, generator_from_block from flax.util.ints import uint32 from flax.util.lru_cache import LRUCache from flax.util.full_block_utils import GeneratorBlockInfo log = logging.getLogger(__name__) @typing_extensions.final @dataclasses.dataclass class BlockStore: block_cache: LRUCache[bytes32, FullBlock] db_wrapper: DBWrapper2 ses_challenge_cache: LRUCache[bytes32, List[SubEpochChallengeSegment]] @classmethod async def create(cls, db_wrapper: DBWrapper2) -> BlockStore: self = cls(LRUCache(1000), db_wrapper, LRUCache(50)) async with self.db_wrapper.writer_maybe_transaction() as conn: log.info("DB: Creating block store tables and indexes.") if self.db_wrapper.db_version == 2: # TODO: most data in block is duplicated in block_record. The only # reason for this is that our parsing of a FullBlock is so slow, # it's faster to store duplicate data to parse less when we just # need the BlockRecord. Once we fix the parsing (and data structure) # of FullBlock, this can use less space await conn.execute( "CREATE TABLE IF NOT EXISTS full_blocks(" "header_hash blob PRIMARY KEY," "prev_hash blob," "height bigint," "sub_epoch_summary blob," "is_fully_compactified tinyint," "in_main_chain tinyint," "block blob," "block_record blob)" ) # This is a single-row table containing the hash of the current # peak. The "key" field is there to make update statements simple await conn.execute("CREATE TABLE IF NOT EXISTS current_peak(key int PRIMARY KEY, hash blob)") # If any of these indices are altered, they should also be altered # in the flax/cmds/db_upgrade.py file log.info("DB: Creating index height") await conn.execute("CREATE INDEX IF NOT EXISTS height on full_blocks(height)") # Sub epoch segments for weight proofs await conn.execute( "CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3(" "ses_block_hash blob PRIMARY KEY," "challenge_segments blob)" ) # If any of these indices are altered, they should also be altered # in the flax/cmds/db_upgrade.py file log.info("DB: Creating index is_fully_compactified") await conn.execute( "CREATE INDEX IF NOT EXISTS is_fully_compactified ON" " full_blocks(is_fully_compactified, in_main_chain) WHERE in_main_chain=1" ) log.info("DB: Creating index main_chain") await conn.execute( "CREATE INDEX IF NOT EXISTS main_chain ON full_blocks(height, in_main_chain) WHERE in_main_chain=1" ) else: await conn.execute( "CREATE TABLE IF NOT EXISTS full_blocks(header_hash text PRIMARY KEY, height bigint," " is_block tinyint, is_fully_compactified tinyint, block blob)" ) # Block records await conn.execute( "CREATE TABLE IF NOT EXISTS block_records(header_hash " "text PRIMARY KEY, prev_hash text, height bigint," "block blob, sub_epoch_summary blob, is_peak tinyint, is_block tinyint)" ) # Sub epoch segments for weight proofs await conn.execute( "CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3(ses_block_hash text PRIMARY KEY," "challenge_segments blob)" ) # Height index so we can look up in order of height for sync purposes log.info("DB: Creating index full_block_height") await conn.execute("CREATE INDEX IF NOT EXISTS full_block_height on full_blocks(height)") log.info("DB: Creating index is_fully_compactified") await conn.execute( "CREATE INDEX IF NOT EXISTS is_fully_compactified on full_blocks(is_fully_compactified)" ) log.info("DB: Creating index height") await conn.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)") log.info("DB: Creating index peak") await conn.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)") return self def maybe_from_hex(self, field: Union[bytes, str]) -> bytes32: if self.db_wrapper.db_version == 2: assert isinstance(field, bytes) return bytes32(field) else: assert isinstance(field, str) return bytes32.fromhex(field) def maybe_to_hex(self, field: bytes) -> Any: if self.db_wrapper.db_version == 2: return field else: return field.hex() def compress(self, block: FullBlock) -> bytes: ret: bytes = zstd.compress(bytes(block)) return ret def maybe_decompress(self, block_bytes: bytes) -> FullBlock: if self.db_wrapper.db_version == 2: ret: FullBlock = FullBlock.from_bytes(zstd.decompress(block_bytes)) else: ret = FullBlock.from_bytes(block_bytes) return ret def maybe_decompress_blob(self, block_bytes: bytes) -> bytes: if self.db_wrapper.db_version == 2: ret: bytes = zstd.decompress(block_bytes) return ret else: return block_bytes async def rollback(self, height: int) -> None: if self.db_wrapper.db_version == 2: async with self.db_wrapper.writer_maybe_transaction() as conn: await conn.execute( "UPDATE OR FAIL full_blocks SET in_main_chain=0 WHERE height>? AND in_main_chain=1", (height,) ) async def set_in_chain(self, header_hashes: List[Tuple[bytes32]]) -> None: if self.db_wrapper.db_version == 2: async with self.db_wrapper.writer_maybe_transaction() as conn: await conn.executemany( "UPDATE OR FAIL full_blocks SET in_main_chain=1 WHERE header_hash=?", header_hashes ) async def replace_proof(self, header_hash: bytes32, block: FullBlock) -> None: assert header_hash == block.header_hash block_bytes: bytes if self.db_wrapper.db_version == 2: block_bytes = self.compress(block) else: block_bytes = bytes(block) self.block_cache.put(header_hash, block) async with self.db_wrapper.writer_maybe_transaction() as conn: await conn.execute( "UPDATE full_blocks SET block=?,is_fully_compactified=? WHERE header_hash=?", ( block_bytes, int(block.is_fully_compactified()), self.maybe_to_hex(header_hash), ), ) async def add_full_block(self, header_hash: bytes32, block: FullBlock, block_record: BlockRecord) -> None: self.block_cache.put(header_hash, block) if self.db_wrapper.db_version == 2: ses: Optional[bytes] = ( None if block_record.sub_epoch_summary_included is None else bytes(block_record.sub_epoch_summary_included) ) async with self.db_wrapper.writer_maybe_transaction() as conn: await conn.execute( "INSERT OR IGNORE INTO full_blocks VALUES(?, ?, ?, ?, ?, ?, ?, ?)", ( header_hash, block.prev_header_hash, block.height, ses, int(block.is_fully_compactified()), False, # in_main_chain self.compress(block), bytes(block_record), ), ) else: async with self.db_wrapper.writer_maybe_transaction() as conn: await conn.execute( "INSERT OR IGNORE INTO full_blocks VALUES(?, ?, ?, ?, ?)", ( header_hash.hex(), block.height, int(block.is_transaction_block()), int(block.is_fully_compactified()), bytes(block), ), ) await conn.execute( "INSERT OR IGNORE INTO block_records VALUES(?, ?, ?, ?,?, ?, ?)", ( header_hash.hex(), block.prev_header_hash.hex(), block.height, bytes(block_record), None if block_record.sub_epoch_summary_included is None else bytes(block_record.sub_epoch_summary_included), False, block.is_transaction_block(), ), ) async def persist_sub_epoch_challenge_segments( self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment] ) -> None: async with self.db_wrapper.writer_maybe_transaction() as conn: await conn.execute( "INSERT OR REPLACE INTO sub_epoch_segments_v3 VALUES(?, ?)", (self.maybe_to_hex(ses_block_hash), bytes(SubEpochSegments(segments))), ) async def get_sub_epoch_challenge_segments( self, ses_block_hash: bytes32, ) -> Optional[List[SubEpochChallengeSegment]]: cached: Optional[List[SubEpochChallengeSegment]] = self.ses_challenge_cache.get(ses_block_hash) if cached is not None: return cached async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT challenge_segments from sub_epoch_segments_v3 WHERE ses_block_hash=?", (self.maybe_to_hex(ses_block_hash),), ) as cursor: row = await cursor.fetchone() if row is not None: challenge_segments: List[SubEpochChallengeSegment] = SubEpochSegments.from_bytes(row[0]).challenge_segments self.ses_challenge_cache.put(ses_block_hash, challenge_segments) return challenge_segments return None def rollback_cache_block(self, header_hash: bytes32) -> None: try: self.block_cache.remove(header_hash) except KeyError: # this is best effort. When rolling back, we may not have added the # block to the cache yet pass async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]: cached: Optional[FullBlock] = self.block_cache.get(header_hash) if cached is not None: log.debug(f"cache hit for block {header_hash.hex()}") return cached log.debug(f"cache miss for block {header_hash.hex()}") async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT block from full_blocks WHERE header_hash=?", (self.maybe_to_hex(header_hash),) ) as cursor: row = await cursor.fetchone() if row is not None: block = self.maybe_decompress(row[0]) self.block_cache.put(header_hash, block) return block return None async def get_full_block_bytes(self, header_hash: bytes32) -> Optional[bytes]: cached = self.block_cache.get(header_hash) if cached is not None: log.debug(f"cache hit for block {header_hash.hex()}") return bytes(cached) log.debug(f"cache miss for block {header_hash.hex()}") async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT block from full_blocks WHERE header_hash=?", (self.maybe_to_hex(header_hash),) ) as cursor: row = await cursor.fetchone() if row is not None: if self.db_wrapper.db_version == 2: ret: bytes = zstd.decompress(row[0]) else: ret = row[0] return ret return None async def get_full_blocks_at(self, heights: List[uint32]) -> List[FullBlock]: if len(heights) == 0: return [] formatted_str = f'SELECT block from full_blocks WHERE height in ({"?," * (len(heights) - 1)}?)' async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute(formatted_str, heights) as cursor: ret: List[FullBlock] = [] for row in await cursor.fetchall(): ret.append(self.maybe_decompress(row[0])) return ret async def get_block_info(self, header_hash: bytes32) -> Optional[GeneratorBlockInfo]: cached = self.block_cache.get(header_hash) if cached is not None: log.debug(f"cache hit for block {header_hash.hex()}") return GeneratorBlockInfo( cached.foliage.prev_block_hash, cached.transactions_generator, cached.transactions_generator_ref_list ) formatted_str = "SELECT block, height from full_blocks WHERE header_hash=?" async with self.db_wrapper.reader_no_transaction() as conn: row = await execute_fetchone(conn, formatted_str, (self.maybe_to_hex(header_hash),)) if row is None: return None if self.db_wrapper.db_version == 2: block_bytes = zstd.decompress(row[0]) else: block_bytes = row[0] try: return block_info_from_block(block_bytes) except Exception as e: log.exception(f"cheap parser failed for block at height {row[1]}: {e}") # this is defensive, on the off-chance that # block_info_from_block() fails, fall back to the reliable # definition of parsing a block b = FullBlock.from_bytes(block_bytes) return GeneratorBlockInfo( b.foliage.prev_block_hash, b.transactions_generator, b.transactions_generator_ref_list ) async def get_generator(self, header_hash: bytes32) -> Optional[SerializedProgram]: cached = self.block_cache.get(header_hash) if cached is not None: log.debug(f"cache hit for block {header_hash.hex()}") return cached.transactions_generator formatted_str = "SELECT block, height from full_blocks WHERE header_hash=?" async with self.db_wrapper.reader_no_transaction() as conn: row = await execute_fetchone(conn, formatted_str, (self.maybe_to_hex(header_hash),)) if row is None: return None if self.db_wrapper.db_version == 2: block_bytes = zstd.decompress(row[0]) else: block_bytes = row[0] try: return generator_from_block(block_bytes) except Exception as e: log.error(f"cheap parser failed for block at height {row[1]}: {e}") # this is defensive, on the off-chance that # generator_from_block() fails, fall back to the reliable # definition of parsing a block b = FullBlock.from_bytes(block_bytes) return b.transactions_generator async def get_generators_at(self, heights: List[uint32]) -> List[SerializedProgram]: assert self.db_wrapper.db_version == 2 if len(heights) == 0: return [] generators: Dict[uint32, SerializedProgram] = {} formatted_str = ( f"SELECT block, height from full_blocks " f'WHERE in_main_chain=1 AND height in ({"?," * (len(heights) - 1)}?)' ) async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute(formatted_str, heights) as cursor: async for row in cursor: block_bytes = zstd.decompress(row[0]) try: gen = generator_from_block(block_bytes) except Exception as e: log.error(f"cheap parser failed for block at height {row[1]}: {e}") # this is defensive, on the off-chance that # generator_from_block() fails, fall back to the reliable # definition of parsing a block b = FullBlock.from_bytes(block_bytes) gen = b.transactions_generator if gen is None: raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR) generators[uint32(row[1])] = gen return [generators[h] for h in heights] async def get_block_records_by_hash(self, header_hashes: List[bytes32]) -> List[BlockRecord]: """ Returns a list of Block Records, ordered by the same order in which header_hashes are passed in. Throws an exception if the blocks are not present """ if len(header_hashes) == 0: return [] all_blocks: Dict[bytes32, BlockRecord] = {} if self.db_wrapper.db_version == 2: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT header_hash,block_record FROM full_blocks " f'WHERE header_hash in ({"?," * (len(header_hashes) - 1)}?)', header_hashes, ) as cursor: for row in await cursor.fetchall(): header_hash = bytes32(row[0]) all_blocks[header_hash] = BlockRecord.from_bytes(row[1]) else: formatted_str = f'SELECT block from block_records WHERE header_hash in ({"?," * (len(header_hashes) - 1)}?)' async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute(formatted_str, [hh.hex() for hh in header_hashes]) as cursor: for row in await cursor.fetchall(): block_rec: BlockRecord = BlockRecord.from_bytes(row[0]) all_blocks[block_rec.header_hash] = block_rec ret: List[BlockRecord] = [] for hh in header_hashes: if hh not in all_blocks: raise ValueError(f"Header hash {hh} not in the blockchain") ret.append(all_blocks[hh]) return ret async def get_block_bytes_by_hash(self, header_hashes: List[bytes32]) -> List[bytes]: """ Returns a list of Full Blocks block blobs, ordered by the same order in which header_hashes are passed in. Throws an exception if the blocks are not present """ if len(header_hashes) == 0: return [] # sqlite on python3.7 on windows has issues with large variable substitutions assert len(header_hashes) < 901 header_hashes_db: Sequence[Union[bytes32, str]] if self.db_wrapper.db_version == 2: header_hashes_db = header_hashes else: header_hashes_db = [hh.hex() for hh in header_hashes] formatted_str = ( f'SELECT header_hash, block from full_blocks WHERE header_hash in ({"?," * (len(header_hashes_db) - 1)}?)' ) all_blocks: Dict[bytes32, bytes] = {} async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute(formatted_str, header_hashes_db) as cursor: for row in await cursor.fetchall(): header_hash = self.maybe_from_hex(row[0]) all_blocks[header_hash] = self.maybe_decompress_blob(row[1]) ret: List[bytes] = [] for hh in header_hashes: block = all_blocks.get(hh) if block is not None: ret.append(block) else: raise ValueError(f"Header hash {hh} not in the blockchain") return ret async def get_blocks_by_hash(self, header_hashes: List[bytes32]) -> List[FullBlock]: """ Returns a list of Full Blocks blocks, ordered by the same order in which header_hashes are passed in. Throws an exception if the blocks are not present """ if len(header_hashes) == 0: return [] header_hashes_db: Sequence[Union[bytes32, str]] if self.db_wrapper.db_version == 2: header_hashes_db = header_hashes else: header_hashes_db = [hh.hex() for hh in header_hashes] formatted_str = ( f'SELECT header_hash, block from full_blocks WHERE header_hash in ({"?," * (len(header_hashes_db) - 1)}?)' ) all_blocks: Dict[bytes32, FullBlock] = {} async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute(formatted_str, header_hashes_db) as cursor: for row in await cursor.fetchall(): header_hash = self.maybe_from_hex(row[0]) full_block: FullBlock = self.maybe_decompress(row[1]) all_blocks[header_hash] = full_block self.block_cache.put(header_hash, full_block) ret: List[FullBlock] = [] for hh in header_hashes: if hh not in all_blocks: raise ValueError(f"Header hash {hh} not in the blockchain") ret.append(all_blocks[hh]) return ret async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]: if self.db_wrapper.db_version == 2: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT block_record FROM full_blocks WHERE header_hash=?", (header_hash,), ) as cursor: row = await cursor.fetchone() if row is not None: return BlockRecord.from_bytes(row[0]) else: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT block from block_records WHERE header_hash=?", (header_hash.hex(),), ) as cursor: row = await cursor.fetchone() if row is not None: return BlockRecord.from_bytes(row[0]) return None async def get_block_records_in_range( self, start: int, stop: int, ) -> Dict[bytes32, BlockRecord]: """ Returns a dictionary with all blocks in range between start and stop if present. """ ret: Dict[bytes32, BlockRecord] = {} if self.db_wrapper.db_version == 2: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT header_hash, block_record FROM full_blocks WHERE height >= ? AND height <= ?", (start, stop), ) as cursor: for row in await cursor.fetchall(): header_hash = bytes32(row[0]) ret[header_hash] = BlockRecord.from_bytes(row[1]) else: formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}" async with self.db_wrapper.reader_no_transaction() as conn: async with await conn.execute(formatted_str) as cursor: for row in await cursor.fetchall(): header_hash = self.maybe_from_hex(row[0]) ret[header_hash] = BlockRecord.from_bytes(row[1]) return ret async def get_block_bytes_in_range( self, start: int, stop: int, ) -> List[bytes]: """ Returns a list with all full blocks in range between start and stop if present. """ maybe_decompress_blob = self.maybe_decompress_blob assert self.db_wrapper.db_version == 2 async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT block FROM full_blocks WHERE height >= ? AND height <= ? and in_main_chain=1", (start, stop), ) as cursor: rows: List[sqlite3.Row] = list(await cursor.fetchall()) if len(rows) != (stop - start) + 1: raise ValueError(f"Some blocks in range {start}-{stop} were not found.") return [maybe_decompress_blob(row[0]) for row in rows] async def get_peak(self) -> Optional[Tuple[bytes32, uint32]]: if self.db_wrapper.db_version == 2: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute("SELECT hash FROM current_peak WHERE key = 0") as cursor: peak_row = await cursor.fetchone() if peak_row is None: return None async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute("SELECT height FROM full_blocks WHERE header_hash=?", (peak_row[0],)) as cursor: peak_height = await cursor.fetchone() if peak_height is None: return None return bytes32(peak_row[0]), uint32(peak_height[0]) else: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute("SELECT header_hash, height from block_records WHERE is_peak = 1") as cursor: peak_row = await cursor.fetchone() if peak_row is None: return None return bytes32(bytes.fromhex(peak_row[0])), uint32(peak_row[1]) async def get_block_records_close_to_peak( self, blocks_n: int ) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]: """ Returns a dictionary with all blocks that have height >= peak height - blocks_n, as well as the peak header hash. """ peak = await self.get_peak() if peak is None: return {}, None ret: Dict[bytes32, BlockRecord] = {} if self.db_wrapper.db_version == 2: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT header_hash, block_record FROM full_blocks WHERE height >= ?", (peak[1] - blocks_n,), ) as cursor: for row in await cursor.fetchall(): header_hash = bytes32(row[0]) ret[header_hash] = BlockRecord.from_bytes(row[1]) else: formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak[1] - blocks_n}" async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute(formatted_str) as cursor: for row in await cursor.fetchall(): header_hash = self.maybe_from_hex(row[0]) ret[header_hash] = BlockRecord.from_bytes(row[1]) return ret, peak[0] async def set_peak(self, header_hash: bytes32) -> None: # We need to be in a sqlite transaction here. # Note: we do not commit this to the database yet, as we need to also change the coin store if self.db_wrapper.db_version == 2: # Note: we use the key field as 0 just to ensure all inserts replace the existing row async with self.db_wrapper.writer_maybe_transaction() as conn: await conn.execute("INSERT OR REPLACE INTO current_peak VALUES(?, ?)", (0, header_hash)) else: async with self.db_wrapper.writer_maybe_transaction() as conn: await conn.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1") await conn.execute( "UPDATE block_records SET is_peak=1 WHERE header_hash=?", (self.maybe_to_hex(header_hash),), ) async def is_fully_compactified(self, header_hash: bytes32) -> Optional[bool]: async with self.db_wrapper.writer_maybe_transaction() as conn: async with conn.execute( "SELECT is_fully_compactified from full_blocks WHERE header_hash=?", (self.maybe_to_hex(header_hash),) ) as cursor: row = await cursor.fetchone() if row is None: return None return bool(row[0]) async def get_random_not_compactified(self, number: int) -> List[int]: if self.db_wrapper.db_version == 2: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( f"SELECT height FROM full_blocks WHERE in_main_chain=1 AND is_fully_compactified=0 " f"ORDER BY RANDOM() LIMIT {number}" ) as cursor: rows = await cursor.fetchall() else: # Since orphan blocks do not get compactified, we need to check whether all blocks with a # certain height are not compact. And if we do have compact orphan blocks, then all that # happens is that the occasional chain block stays uncompact - not ideal, but harmless. async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( f"SELECT height FROM full_blocks GROUP BY height HAVING sum(is_fully_compactified)=0 " f"ORDER BY RANDOM() LIMIT {number}" ) as cursor: rows = await cursor.fetchall() heights = [int(row[0]) for row in rows] return heights async def count_compactified_blocks(self) -> int: if self.db_wrapper.db_version == 2: # DB V2 has an index on is_fully_compactified only for blocks in the main chain async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "select count(*) from full_blocks where is_fully_compactified=1 and in_main_chain=1" ) as cursor: row = await cursor.fetchone() else: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute("select count(*) from full_blocks where is_fully_compactified=1") as cursor: row = await cursor.fetchone() assert row is not None [count] = row return int(count) async def count_uncompactified_blocks(self) -> int: if self.db_wrapper.db_version == 2: # DB V2 has an index on is_fully_compactified only for blocks in the main chain async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "select count(*) from full_blocks where is_fully_compactified=0 and in_main_chain=1" ) as cursor: row = await cursor.fetchone() else: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute("select count(*) from full_blocks where is_fully_compactified=0") as cursor: row = await cursor.fetchone() assert row is not None [count] = row return int(count)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/fee_tracker.py
flax/full_node/fee_tracker.py
from __future__ import annotations import logging from dataclasses import dataclass from typing import List, Optional, Tuple from sortedcontainers import SortedDict from flax.full_node.fee_estimate_store import FeeStore from flax.full_node.fee_estimator_constants import ( FEE_ESTIMATOR_VERSION, INFINITE_FEE_RATE, INITIAL_STEP, LONG_BLOCK_PERIOD, LONG_DECAY, LONG_SCALE, MAX_FEE_RATE, MED_BLOCK_PERIOD, MED_DECAY, MED_SCALE, SECONDS_PER_BLOCK, SHORT_BLOCK_PERIOD, SHORT_DECAY, SHORT_SCALE, STEP_SIZE, SUCCESS_PCT, SUFFICIENT_FEE_TXS, ) from flax.full_node.fee_history import FeeStatBackup, FeeTrackerBackup from flax.types.mempool_item import MempoolItem from flax.util.ints import uint8, uint32, uint64 @dataclass class BucketResult: start: float end: float within_target: float total_confirmed: float in_mempool: float left_mempool: float @dataclass class EstimateResult: requested_time: uint64 pass_bucket: BucketResult fail_bucket: BucketResult median: float def get_estimate_block_intervals() -> List[int]: return [ SHORT_BLOCK_PERIOD * SHORT_SCALE - SHORT_SCALE, MED_BLOCK_PERIOD * MED_SCALE - MED_SCALE, LONG_BLOCK_PERIOD * LONG_SCALE - LONG_SCALE, ] def get_estimate_time_intervals() -> List[uint64]: return [uint64(blocks * SECONDS_PER_BLOCK) for blocks in get_estimate_block_intervals()] # Implementation of bitcoin core fee estimation algorithm # https://gist.github.com/morcos/d3637f015bc4e607e1fd10d8351e9f41 class FeeStat: # TxConfirmStats buckets: List[float] sorted_buckets: SortedDict # key is upper bound of bucket, val is index in buckets # For each bucket xL # Count the total number of txs in each bucket # Track historical moving average of this total over block tx_ct_avg: List[float] # Count the total number of txs confirmed within Y blocks in each bucket # Track the historical moving average of these totals over blocks confirmed_average: List[List[float]] # confirmed_average [y][x] # Track moving average of txs which have been evicted from the mempool # after failing to be confirmed within Y block failed_average: List[List[float]] # failed_average [y][x] # Sum the total fee_rate of all txs in each bucket # Track historical moving average of this total over blocks m_fee_rate_avg: List[float] decay: float # Resolution of blocks with which confirmations are tracked scale: int # Mempool counts of outstanding transactions # For each bucket x, track the number of transactions in mempool # that are unconfirmed for each possible confirmation value y unconfirmed_txs: List[List[int]] # transactions still unconfirmed after get_max_confirmed for each bucket old_unconfirmed_txs: List[int] max_confirms: int fee_store: FeeStore def __init__( self, buckets: List[float], sorted_buckets: SortedDict, max_periods: int, decay: float, scale: int, log: logging.Logger, fee_store: FeeStore, my_type: str, ): self.buckets = buckets self.sorted_buckets = sorted_buckets self.confirmed_average = [[] for _ in range(0, max_periods)] self.failed_average = [[] for _ in range(0, max_periods)] self.decay = decay self.scale = scale self.max_confirms = self.scale * len(self.confirmed_average) self.log = log self.fee_store = fee_store self.type = my_type self.max_periods = max_periods for i in range(0, max_periods): self.confirmed_average[i] = [0 for _ in range(0, len(buckets))] self.failed_average[i] = [0 for _ in range(0, len(buckets))] self.tx_ct_avg = [0 for _ in range(0, len(buckets))] self.m_fee_rate_avg = [0 for _ in range(0, len(buckets))] self.unconfirmed_txs = [[] for _ in range(0, self.max_confirms)] for i in range(0, self.max_confirms): self.unconfirmed_txs[i] = [0 for _ in range(0, len(buckets))] self.old_unconfirmed_txs = [0 for _ in range(0, len(buckets))] def get_bucket_index(self, fee_rate: float) -> int: if fee_rate in self.sorted_buckets: bucket_index = self.sorted_buckets[fee_rate] else: # Choose the bucket to the left if we do not have exactly this fee rate bucket_index = self.sorted_buckets.bisect_left(fee_rate) - 1 return int(bucket_index) def tx_confirmed(self, blocks_to_confirm: int, item: MempoolItem) -> None: if blocks_to_confirm < 1: raise ValueError("tx_confirmed called with < 1 block to confirm") periods_to_confirm = int((blocks_to_confirm + self.scale - 1) / self.scale) fee_rate = item.fee_per_cost * 1000 bucket_index = self.get_bucket_index(fee_rate) for i in range(periods_to_confirm, len(self.confirmed_average)): self.confirmed_average[i - 1][bucket_index] += 1 self.tx_ct_avg[bucket_index] += 1 self.m_fee_rate_avg[bucket_index] += fee_rate def update_moving_averages(self) -> None: for j in range(0, len(self.buckets)): for i in range(0, len(self.confirmed_average)): self.confirmed_average[i][j] *= self.decay self.failed_average[i][j] *= self.decay self.tx_ct_avg[j] *= self.decay self.m_fee_rate_avg[j] *= self.decay def clear_current(self, block_height: uint32) -> None: for i in range(0, len(self.buckets)): self.old_unconfirmed_txs[i] += self.unconfirmed_txs[block_height % len(self.unconfirmed_txs)][i] self.unconfirmed_txs[block_height % len(self.unconfirmed_txs)][i] = 0 def new_mempool_tx(self, block_height: uint32, fee_rate: float) -> int: bucket_index: int = self.get_bucket_index(fee_rate) block_index = block_height % len(self.unconfirmed_txs) self.unconfirmed_txs[block_index][bucket_index] += 1 return bucket_index def remove_tx(self, latest_seen_height: uint32, item: MempoolItem, bucket_index: int) -> None: if item.height_added_to_mempool is None: return block_ago = latest_seen_height - item.height_added_to_mempool if latest_seen_height == 0: block_ago = 0 if block_ago < 0: return if block_ago >= len(self.unconfirmed_txs): if self.old_unconfirmed_txs[bucket_index] > 0: self.old_unconfirmed_txs[bucket_index] -= 1 else: self.log.warning("Fee estimator error") else: block_index = item.height_added_to_mempool % len(self.unconfirmed_txs) if self.unconfirmed_txs[block_index][bucket_index] > 0: self.unconfirmed_txs[block_index][bucket_index] -= 1 else: self.log.warning("Fee estimator error") if block_ago >= self.scale: periods_ago = block_ago / self.scale for i in range(0, len(self.failed_average)): if i >= periods_ago: break self.failed_average[i][bucket_index] += 1 def create_backup(self) -> FeeStatBackup: str_tx_ct_abg: List[str] = [] str_confirmed_average: List[List[str]] = [] str_failed_average: List[List[str]] = [] str_m_fee_rate_avg: List[str] = [] for i in range(0, self.max_periods): str_i_list_conf = [] for j in range(0, len(self.confirmed_average[i])): str_i_list_conf.append(float.hex(float(self.confirmed_average[i][j]))) str_confirmed_average.append(str_i_list_conf) str_i_list_fail = [] for j in range(0, len(self.failed_average[i])): str_i_list_fail.append(float.hex(float(self.failed_average[i][j]))) str_failed_average.append(str_i_list_fail) for i in range(0, len(self.tx_ct_avg)): str_tx_ct_abg.append(float.hex(float(self.tx_ct_avg[i]))) for i in range(0, len(self.m_fee_rate_avg)): str_m_fee_rate_avg.append(float.hex(float(self.m_fee_rate_avg[i]))) return FeeStatBackup(self.type, str_tx_ct_abg, str_confirmed_average, str_failed_average, str_m_fee_rate_avg) def import_backup(self, backup: FeeStatBackup) -> None: for i in range(0, self.max_periods): for j in range(0, len(self.confirmed_average[i])): self.confirmed_average[i][j] = float.fromhex(backup.confirmed_average[i][j]) for j in range(0, len(self.failed_average[i])): self.failed_average[i][j] = float.fromhex(backup.failed_average[i][j]) for i in range(0, len(self.tx_ct_avg)): self.tx_ct_avg[i] = float.fromhex(backup.tx_ct_avg[i]) for i in range(0, len(self.m_fee_rate_avg)): self.m_fee_rate_avg[i] = float.fromhex(backup.m_fee_rate_avg[i]) # See TxConfirmStats::EstimateMedianVal in https://github.com/bitcoin/bitcoin/blob/master/src/policy/fees.cpp def estimate_median_val( self, conf_target: int, sufficient_tx_val: float, success_break_point: float, block_height: uint32 ) -> EstimateResult: """ conf_target is the number of blocks within which we hope to get our SpendBundle confirmed """ if conf_target < 0: raise ValueError(f"Bad argument to estimate_median_val: conf_target must be >= 0. Got {conf_target}") n_conf = 0.0 # Number of txs confirmed within conf_target total_num = 0.0 # Total number of txs that were extra_num = 0.0 fail_num = 0.0 period_target = int((conf_target + self.scale - 1) / self.scale) max_bucket_index = len(self.buckets) - 1 cur_near_bucket = max_bucket_index best_near_bucket = max_bucket_index cur_far_bucket = max_bucket_index best_far_bucket = max_bucket_index found_answer = False bins = len(self.unconfirmed_txs) new_bucket_range = True passing = True pass_bucket: BucketResult = BucketResult( start=0.0, end=0.0, within_target=0.0, total_confirmed=0.0, in_mempool=0.0, left_mempool=0.0, ) fail_bucket: BucketResult = BucketResult( start=0.0, end=0.0, within_target=0.0, total_confirmed=0.0, in_mempool=0.0, left_mempool=0.0, ) for bucket in range(max_bucket_index, -1, -1): if new_bucket_range: cur_near_bucket = bucket new_bucket_range = False cur_far_bucket = bucket if period_target - 1 < 0 or period_target - 1 >= len(self.confirmed_average): return EstimateResult( requested_time=uint64(conf_target * SECONDS_PER_BLOCK), pass_bucket=pass_bucket, fail_bucket=fail_bucket, median=-1.0, ) ca_len = len(self.confirmed_average[period_target - 1]) if bucket < 0 or bucket >= ca_len: raise RuntimeError(f"bucket index ({bucket}) out of range (0, {ca_len})") n_conf += self.confirmed_average[period_target - 1][bucket] total_num += self.tx_ct_avg[bucket] fail_num += self.failed_average[period_target - 1][bucket] for conf_ct in range(conf_target, self.max_confirms): extra_num += self.unconfirmed_txs[(block_height - conf_ct) % bins][bucket] extra_num += self.old_unconfirmed_txs[bucket] # If we have enough transaction data points in this range of buckets, # we can test for success # (Only count the confirmed data points, so that each confirmation count # will be looking at the same amount of data and same bucket breaks) if total_num >= sufficient_tx_val / (1 - self.decay): curr_pct = n_conf / (total_num + fail_num + extra_num) # Check to see if we are no longer getting confirmed at the same rate if curr_pct < success_break_point: if passing is True: fail_min_bucket = min(cur_near_bucket, cur_far_bucket) fail_max_bucket = max(cur_near_bucket, cur_far_bucket) self.log.debug(f"Fail_min_bucket: {fail_min_bucket}") fail_bucket = BucketResult( start=self.buckets[fail_min_bucket - 1] if fail_min_bucket else 0, end=self.buckets[fail_max_bucket], within_target=n_conf, total_confirmed=total_num, in_mempool=extra_num, left_mempool=fail_num, ) passing = False continue else: # Otherwise, update the cumulative stats and bucket variables # and reset the counters found_answer = True passing = True pass_bucket.within_target = n_conf n_conf = 0 pass_bucket.total_confirmed = total_num total_num = 0 pass_bucket.in_mempool = extra_num pass_bucket.left_mempool = fail_num fail_num = 0 extra_num = 0 best_near_bucket = cur_near_bucket best_far_bucket = cur_far_bucket new_bucket_range = True median = -1.0 tx_sum = 0.0 min_bucket = min(best_near_bucket, best_far_bucket) max_bucket = max(best_near_bucket, best_far_bucket) for i in range(min_bucket, max_bucket + 1): tx_sum += self.tx_ct_avg[i] if found_answer and tx_sum != 0: tx_sum = tx_sum / 2 for i in range(min_bucket, max_bucket): if self.tx_ct_avg[i] < tx_sum: tx_sum -= self.tx_ct_avg[i] else: # This is the correct bucket median = self.m_fee_rate_avg[i] / self.tx_ct_avg[i] break pass_bucket.start = self.buckets[min_bucket - 1] if min_bucket else 0 pass_bucket.end = self.buckets[max_bucket] if passing and new_bucket_range is False: fail_min_bucket = min(cur_near_bucket, cur_far_bucket) fail_max_bucket = max(cur_near_bucket, cur_far_bucket) fail_bucket = BucketResult( start=self.buckets[fail_min_bucket - 1] if fail_min_bucket else 0, end=self.buckets[fail_max_bucket], within_target=n_conf, total_confirmed=total_num, in_mempool=extra_num, left_mempool=fail_num, ) passed_within_target_perc = 0.0 failed_within_target_perc = 0.0 pass_bucket_total = pass_bucket.total_confirmed + pass_bucket.in_mempool + pass_bucket.left_mempool if pass_bucket_total > 0: passed_within_target_perc = 100 * pass_bucket.within_target / pass_bucket_total fail_bucket_total = fail_bucket.total_confirmed + fail_bucket.in_mempool + fail_bucket.left_mempool if fail_bucket_total > 0: failed_within_target_perc = 100 * fail_bucket.within_target / fail_bucket_total self.log.debug(f"passed_within_target_perc: {passed_within_target_perc}") self.log.debug(f"failed_within_target_perc: {failed_within_target_perc}") result = EstimateResult( requested_time=uint64(conf_target * SECONDS_PER_BLOCK - SECONDS_PER_BLOCK), pass_bucket=pass_bucket, fail_bucket=fail_bucket, median=median, ) return result class FeeTracker: sorted_buckets: SortedDict short_horizon: FeeStat med_horizon: FeeStat long_horizon: FeeStat log: logging.Logger latest_seen_height: uint32 first_recorded_height: uint32 fee_store: FeeStore buckets: List[float] def __init__(self, log: logging.Logger, fee_store: FeeStore): self.log = log self.sorted_buckets = SortedDict() self.buckets = [] self.latest_seen_height = uint32(0) self.first_recorded_height = uint32(0) self.fee_store = fee_store fee_rate = 0.0 index = 0 while fee_rate < MAX_FEE_RATE: self.buckets.append(fee_rate) self.sorted_buckets[fee_rate] = index if fee_rate == 0: fee_rate = INITIAL_STEP else: fee_rate = fee_rate * STEP_SIZE index += 1 self.buckets.append(INFINITE_FEE_RATE) self.sorted_buckets[INFINITE_FEE_RATE] = index assert len(self.sorted_buckets.keys()) == len(self.buckets) self.short_horizon = FeeStat( self.buckets, self.sorted_buckets, SHORT_BLOCK_PERIOD, SHORT_DECAY, SHORT_SCALE, self.log, self.fee_store, "short", ) self.med_horizon = FeeStat( self.buckets, self.sorted_buckets, MED_BLOCK_PERIOD, MED_DECAY, MED_SCALE, self.log, self.fee_store, "medium", ) self.long_horizon = FeeStat( self.buckets, self.sorted_buckets, LONG_BLOCK_PERIOD, LONG_DECAY, LONG_SCALE, self.log, self.fee_store, "long", ) fee_backup: Optional[FeeTrackerBackup] = self.fee_store.get_stored_fee_data() if fee_backup is not None: self.first_recorded_height = fee_backup.first_recorded_height self.latest_seen_height = fee_backup.latest_seen_height for stat in fee_backup.stats: if stat.type == "short": self.short_horizon.import_backup(stat) if stat.type == "medium": self.med_horizon.import_backup(stat) if stat.type == "long": self.long_horizon.import_backup(stat) def shutdown(self) -> None: short = self.short_horizon.create_backup() medium = self.med_horizon.create_backup() long = self.long_horizon.create_backup() stats = [short, medium, long] backup = FeeTrackerBackup( uint8(FEE_ESTIMATOR_VERSION), self.first_recorded_height, self.latest_seen_height, stats ) self.fee_store.store_fee_data(backup) def process_block(self, block_height: uint32, items: List[MempoolItem]) -> None: """A new block has been farmed and these transactions have been included in that block""" if block_height <= self.latest_seen_height: # Ignore reorgs return self.latest_seen_height = block_height self.short_horizon.update_moving_averages() self.med_horizon.update_moving_averages() self.long_horizon.update_moving_averages() for item in items: self.process_block_tx(block_height, item) if self.first_recorded_height == 0 and len(items) > 0: self.first_recorded_height = block_height self.log.info(f"Fee Estimator first recorded height: {self.first_recorded_height}") def process_block_tx(self, current_height: uint32, item: MempoolItem) -> None: if item.height_added_to_mempool is None: raise ValueError("process_block_tx called with item.height_added_to_mempool=None") blocks_to_confirm = current_height - item.height_added_to_mempool if blocks_to_confirm <= 0: return self.short_horizon.tx_confirmed(blocks_to_confirm, item) self.med_horizon.tx_confirmed(blocks_to_confirm, item) self.long_horizon.tx_confirmed(blocks_to_confirm, item) def get_bucket_index(self, fee_rate: float) -> int: if fee_rate in self.sorted_buckets: bucket_index = self.sorted_buckets[fee_rate] else: bucket_index = self.sorted_buckets.bisect_left(fee_rate) - 1 return int(bucket_index) def remove_tx(self, item: MempoolItem) -> None: bucket_index = self.get_bucket_index(item.fee_per_cost * 1000) self.short_horizon.remove_tx(self.latest_seen_height, item, bucket_index) self.med_horizon.remove_tx(self.latest_seen_height, item, bucket_index) self.long_horizon.remove_tx(self.latest_seen_height, item, bucket_index) def estimate_fee_for_block(self, target_block: uint32) -> EstimateResult: return self.med_horizon.estimate_median_val( conf_target=target_block, sufficient_tx_val=SUFFICIENT_FEE_TXS, success_break_point=SUCCESS_PCT, block_height=self.latest_seen_height, ) def estimate_fee(self, target_time: int) -> EstimateResult: confirm_target_block = int(target_time / SECONDS_PER_BLOCK) + 1 return self.estimate_fee_for_block(uint32(confirm_target_block)) def estimate_fees(self) -> Tuple[EstimateResult, EstimateResult, EstimateResult]: """returns the fee estimate for short, medium, and long time horizons""" short = self.short_horizon.estimate_median_val( conf_target=SHORT_BLOCK_PERIOD * SHORT_SCALE - SHORT_SCALE, sufficient_tx_val=SUFFICIENT_FEE_TXS, success_break_point=SUCCESS_PCT, block_height=self.latest_seen_height, ) med = self.med_horizon.estimate_median_val( conf_target=MED_BLOCK_PERIOD * MED_SCALE - MED_SCALE, sufficient_tx_val=SUFFICIENT_FEE_TXS, success_break_point=SUCCESS_PCT, block_height=self.latest_seen_height, ) long = self.long_horizon.estimate_median_val( conf_target=LONG_BLOCK_PERIOD * LONG_SCALE - LONG_SCALE, sufficient_tx_val=SUFFICIENT_FEE_TXS, success_break_point=SUCCESS_PCT, block_height=self.latest_seen_height, ) return short, med, long
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/mempool.py
flax/full_node/mempool.py
from __future__ import annotations import logging from datetime import datetime from typing import Dict, List, Optional from sortedcontainers import SortedDict from flax.full_node.bitcoin_fee_estimator import create_bitcoin_fee_estimator from flax.full_node.fee_estimation import FeeMempoolInfo from flax.full_node.fee_estimator_interface import FeeEstimatorInterface from flax.types.blockchain_format.coin import Coin from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.clvm_cost import CLVMCost from flax.types.fee_rate import FeeRate from flax.types.mempool_item import MempoolItem from flax.util.ints import uint64 class Mempool: def __init__(self, max_size_in_cost: int, minimum_fee_per_cost_to_replace: uint64, max_block_cost_clvm: uint64): self.log = logging.getLogger(__name__) self.spends: Dict[bytes32, MempoolItem] = {} self.sorted_spends: SortedDict = SortedDict() self.removals: Dict[bytes32, List[bytes32]] = {} # From removal coin id to spend bundle id self.max_size_in_cost: int = max_size_in_cost self.total_mempool_cost: int = 0 self.minimum_fee_per_cost_to_replace: uint64 = minimum_fee_per_cost_to_replace self.fee_estimator: FeeEstimatorInterface = create_bitcoin_fee_estimator(max_block_cost_clvm, self.log) def get_min_fee_rate(self, cost: int) -> float: """ Gets the minimum fpc rate that a transaction with specified cost will need in order to get included. """ if self.at_full_capacity(cost): current_cost = self.total_mempool_cost # Iterates through all spends in increasing fee per cost fee_per_cost: float for fee_per_cost, spends_with_fpc in self.sorted_spends.items(): for spend_name, item in spends_with_fpc.items(): current_cost -= item.cost # Removing one at a time, until our transaction of size cost fits if current_cost + cost <= self.max_size_in_cost: return fee_per_cost raise ValueError( f"Transaction with cost {cost} does not fit in mempool of max cost {self.max_size_in_cost}" ) else: return 0 def remove_from_pool(self, items: List[bytes32]) -> None: """ Removes an item from the mempool. """ for spend_bundle_id in items: item: Optional[MempoolItem] = self.spends.get(spend_bundle_id) if item is None: continue assert item.name == spend_bundle_id removals: List[Coin] = item.removals for rem in removals: rem_name: bytes32 = rem.name() self.removals[rem_name].remove(spend_bundle_id) if len(self.removals[rem_name]) == 0: del self.removals[rem_name] del self.spends[item.name] del self.sorted_spends[item.fee_per_cost][item.name] dic = self.sorted_spends[item.fee_per_cost] if len(dic.values()) == 0: del self.sorted_spends[item.fee_per_cost] self.total_mempool_cost -= item.cost assert self.total_mempool_cost >= 0 mempool_info = self.get_mempool_info() self.fee_estimator.remove_mempool_item(mempool_info, item) def add_to_pool(self, item: MempoolItem) -> None: """ Adds an item to the mempool by kicking out transactions (if it doesn't fit), in order of increasing fee per cost """ while self.at_full_capacity(item.cost): # Val is Dict[hash, MempoolItem] fee_per_cost, val = self.sorted_spends.peekitem(index=0) to_remove: MempoolItem = list(val.values())[0] self.remove_from_pool([to_remove.name]) self.spends[item.name] = item # sorted_spends is Dict[float, Dict[bytes32, MempoolItem]] if item.fee_per_cost not in self.sorted_spends: self.sorted_spends[item.fee_per_cost] = {} self.sorted_spends[item.fee_per_cost][item.name] = item for coin in item.removals: coin_id = coin.name() if coin_id not in self.removals: self.removals[coin_id] = [] self.removals[coin_id].append(item.name) self.total_mempool_cost += item.cost mempool_info = self.get_mempool_info() self.fee_estimator.add_mempool_item(mempool_info, item) def at_full_capacity(self, cost: int) -> bool: """ Checks whether the mempool is at full capacity and cannot accept a transaction with size cost. """ return self.total_mempool_cost + cost > self.max_size_in_cost def get_mempool_info(self) -> FeeMempoolInfo: return FeeMempoolInfo( CLVMCost(uint64(self.max_size_in_cost)), FeeRate(uint64(self.minimum_fee_per_cost_to_replace)), CLVMCost(uint64(self.total_mempool_cost)), datetime.now(), CLVMCost(uint64(self.max_size_in_cost)), )
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/__init__.py
flax/full_node/__init__.py
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/fee_estimator_example.py
flax/full_node/fee_estimator_example.py
from __future__ import annotations from typing import Any, Dict, List from flax.full_node.fee_estimate import FeeEstimate from flax.full_node.fee_estimation import FeeBlockInfo, FeeMempoolInfo from flax.full_node.fee_estimator_interface import FeeEstimatorInterface from flax.types.clvm_cost import CLVMCost from flax.types.fee_rate import FeeRate from flax.types.mempool_item import MempoolItem from flax.util.ints import uint64 MIN_MOJO_PER_COST = 5 def example_fee_rate_function(time_in_seconds: int) -> uint64: return uint64(MIN_MOJO_PER_COST * max((3600 - time_in_seconds), 1)) class FeeEstimatorExample(FeeEstimatorInterface): """ An example Fee Estimator that can be plugged in for testing, or development of new fee estimators. Note that we inherit from the FeeEstimatorInterface protocol to ensure we keep up to date with interface changes. """ def __init__(self, config: Dict[str, Any] = {}) -> None: self.config = config def new_block(self, block_info: FeeBlockInfo) -> None: pass def add_mempool_item(self, mempool_info: FeeMempoolInfo, mempool_item: MempoolItem) -> None: pass def remove_mempool_item(self, mempool_info: FeeMempoolInfo, mempool_item: MempoolItem) -> None: pass def estimate_fee_rate(self, *, time_offset_seconds: int) -> FeeRate: return FeeRate(example_fee_rate_function(time_offset_seconds)) def mempool_size(self) -> CLVMCost: """Report last seen mempool size""" return CLVMCost(uint64(0)) def mempool_max_size(self) -> CLVMCost: """Report current mempool max size (cost)""" return CLVMCost(uint64(0)) def request_fee_estimates(self, request_times: List[uint64]) -> List[FeeEstimate]: estimates = [self.estimate_fee_rate(time_offset_seconds=t) for t in request_times] fee_estimates = [FeeEstimate(None, t, e) for (t, e) in zip(request_times, estimates)] return fee_estimates
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/lock_queue.py
flax/full_node/lock_queue.py
from __future__ import annotations import asyncio import dataclasses import logging import traceback from types import TracebackType from typing import Awaitable, Callable log = logging.getLogger(__name__) @dataclasses.dataclass(frozen=True, order=True) class PrioritizedCallable: priority: int af: Callable[[], Awaitable[object]] = dataclasses.field(compare=False) class LockQueue: """ The purpose of this class is to be able to control access to a lock, and give priority to certain clients (LockClients). To use it, create a lock and clients: ``` my_lock = LockQueue(asyncio.Lock()) client_a = LockClient(0, my_lock) client_b = LockClient(1, my_lock) async with client_a: ... ``` The clients can be used like normal async locks, but the higher priority (lower number) will always go first. Must be created under an asyncio running loop, and close and await_closed should be called. """ def __init__(self, inner_lock: asyncio.Lock): self._inner_lock: asyncio.Lock = inner_lock self._task_queue: asyncio.PriorityQueue[PrioritizedCallable] = asyncio.PriorityQueue() self._run_task = asyncio.create_task(self._run()) self._release_event = asyncio.Event() async def put(self, priority: int, callback: Callable[[], Awaitable[object]]) -> None: await self._task_queue.put(PrioritizedCallable(priority=priority, af=callback)) async def acquire(self) -> None: await self._inner_lock.acquire() def release(self) -> None: self._inner_lock.release() self._release_event.set() async def _run(self) -> None: try: while True: prioritized_callback = await self._task_queue.get() self._release_event = asyncio.Event() await self.acquire() await prioritized_callback.af() await self._release_event.wait() except asyncio.CancelledError: error_stack = traceback.format_exc() log.debug(f"LockQueue._run() cancelled: {error_stack}") def close(self) -> None: self._run_task.cancel() async def await_closed(self) -> None: await self._run_task class LockClient: def __init__(self, priority: int, queue: LockQueue): self._priority = priority self._queue = queue async def __aenter__(self) -> None: called: asyncio.Event = asyncio.Event() # Use a parameter default to avoid a closure async def callback(called_inner: asyncio.Event = called) -> None: called_inner.set() await self._queue.put(priority=self._priority, callback=callback) await called.wait() async def __aexit__( self, typ: type[BaseException] | None, value: BaseException | None, traceback: TracebackType | None ) -> bool | None: self._queue.release() return None
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/sync_store.py
flax/full_node/sync_store.py
from __future__ import annotations import asyncio import logging from collections import OrderedDict as orderedDict from dataclasses import dataclass, field from typing import Dict, List, Optional, OrderedDict, Set, Tuple import typing_extensions from flax.types.blockchain_format.sized_bytes import bytes32 from flax.util.ints import uint32, uint128 log = logging.getLogger(__name__) @typing_extensions.final @dataclass class SyncStore: # Whether or not we are syncing sync_mode: bool = False long_sync: bool = False # Header hash : peer node id peak_to_peer: OrderedDict[bytes32, Set[bytes32]] = field(default_factory=orderedDict) # peer node id : [header_hash, height, weight] peer_to_peak: Dict[bytes32, Tuple[bytes32, uint32, uint128]] = field(default_factory=dict) # Peak hash we are syncing towards sync_target_header_hash: Optional[bytes32] = None # Peak height we are syncing towards sync_target_height: Optional[uint32] = None peers_changed: asyncio.Event = field(default_factory=asyncio.Event) # Set of nodes which we are batch syncing from batch_syncing: Set[bytes32] = field(default_factory=set) # Set of nodes which we are backtrack syncing from, and how many threads backtrack_syncing: Dict[bytes32, int] = field(default_factory=dict) def set_peak_target(self, peak_hash: bytes32, target_height: uint32) -> None: self.sync_target_header_hash = peak_hash self.sync_target_height = target_height def get_sync_target_hash(self) -> Optional[bytes32]: return self.sync_target_header_hash def get_sync_target_height(self) -> Optional[uint32]: return self.sync_target_height def set_sync_mode(self, sync_mode: bool) -> None: self.sync_mode = sync_mode def get_sync_mode(self) -> bool: return self.sync_mode def set_long_sync(self, long_sync: bool) -> None: self.long_sync = long_sync def get_long_sync(self) -> bool: return self.long_sync def seen_header_hash(self, header_hash: bytes32) -> bool: return header_hash in self.peak_to_peer def peer_has_block( self, header_hash: bytes32, peer_id: bytes32, weight: uint128, height: uint32, new_peak: bool ) -> None: """ Adds a record that a certain peer has a block. """ if header_hash == self.sync_target_header_hash: self.peers_changed.set() if header_hash in self.peak_to_peer: self.peak_to_peer[header_hash].add(peer_id) else: self.peak_to_peer[header_hash] = {peer_id} if len(self.peak_to_peer) > 256: # nice power of two item = self.peak_to_peer.popitem(last=False) # Remove the oldest entry # sync target hash is used throughout the sync process and should not be deleted. if item[0] == self.sync_target_header_hash: self.peak_to_peer[item[0]] = item[1] # Put it back in if it was the sync target self.peak_to_peer.popitem(last=False) # Remove the oldest entry again if new_peak: self.peer_to_peak[peer_id] = (header_hash, height, weight) def get_peers_that_have_peak(self, header_hashes: List[bytes32]) -> Set[bytes32]: """ Returns: peer ids of peers that have at least one of the header hashes. """ node_ids: Set[bytes32] = set() for header_hash in header_hashes: if header_hash in self.peak_to_peer: for node_id in self.peak_to_peer[header_hash]: node_ids.add(node_id) return node_ids def get_peak_of_each_peer(self) -> Dict[bytes32, Tuple[bytes32, uint32, uint128]]: """ Returns: dictionary of peer id to peak information. """ ret = {} for peer_id, v in self.peer_to_peak.items(): if v[0] not in self.peak_to_peer: continue ret[peer_id] = v return ret def get_heaviest_peak(self) -> Optional[Tuple[bytes32, uint32, uint128]]: """ Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified us of. """ if len(self.peer_to_peak) == 0: return None heaviest_peak_hash: Optional[bytes32] = None heaviest_peak_weight: uint128 = uint128(0) heaviest_peak_height: Optional[uint32] = None for peer_id, (peak_hash, height, weight) in self.peer_to_peak.items(): if peak_hash not in self.peak_to_peer: continue if heaviest_peak_hash is None or weight > heaviest_peak_weight: heaviest_peak_hash = peak_hash heaviest_peak_weight = weight heaviest_peak_height = height assert heaviest_peak_hash is not None and heaviest_peak_weight is not None and heaviest_peak_height is not None return heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight async def clear_sync_info(self) -> None: """ Clears the peak_to_peer info which can get quite large. """ self.peak_to_peer = orderedDict() def peer_disconnected(self, node_id: bytes32) -> None: if node_id in self.peer_to_peak: del self.peer_to_peak[node_id] for peak, peers in self.peak_to_peer.items(): if node_id in peers: self.peak_to_peer[peak].remove(node_id) assert node_id not in self.peak_to_peer[peak] self.peers_changed.set()
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/mempool_manager.py
flax/full_node/mempool_manager.py
import asyncio import collections import logging from concurrent.futures import Executor from multiprocessing.context import BaseContext import time from concurrent.futures.process import ProcessPoolExecutor from flax.full_node.fee_estimation import FeeMempoolInfo, FeeBlockInfo from flax.types.clvm_cost import CLVMCost from flax.types.fee_rate import FeeRate from flax.util.inline_executor import InlineExecutor from typing import Dict, List, Optional, Set, Tuple from blspy import GTElement from chiabip158 import PyBIP158 from flax.util import cached_bls from flax.consensus.block_record import BlockRecord from flax.consensus.constants import ConsensusConstants from flax.consensus.cost_calculator import NPCResult from flax.full_node.bundle_tools import simple_solution_generator from flax.full_node.coin_store import CoinStore from flax.full_node.mempool import Mempool from flax.full_node.mempool_check_conditions import get_name_puzzle_conditions from flax.full_node.pending_tx_cache import PendingTxCache from flax.types.blockchain_format.coin import Coin from flax.types.blockchain_format.sized_bytes import bytes32, bytes48 from flax.types.coin_record import CoinRecord from flax.types.mempool_inclusion_status import MempoolInclusionStatus from flax.types.mempool_item import MempoolItem from flax.types.spend_bundle import SpendBundle from flax.util.cached_bls import LOCAL_CACHE from flax.util.condition_tools import pkm_pairs from flax.util.errors import Err, ValidationError from flax.util.generator_tools import additions_for_npc from flax.util.ints import uint32, uint64 from flax.util.lru_cache import LRUCache from flax.util.setproctitle import getproctitle, setproctitle from flax.full_node.mempool_check_conditions import mempool_check_time_locks log = logging.getLogger(__name__) def validate_clvm_and_signature( spend_bundle_bytes: bytes, max_cost: int, cost_per_byte: int, additional_data: bytes ) -> Tuple[Optional[Err], bytes, Dict[bytes32, bytes]]: """ Validates CLVM and aggregate signature for a spendbundle. This is meant to be called under a ProcessPoolExecutor in order to validate the heavy parts of a transaction in a different thread. Returns an optional error, the NPCResult and a cache of the new pairings validated (if not error) """ try: bundle: SpendBundle = SpendBundle.from_bytes(spend_bundle_bytes) program = simple_solution_generator(bundle) # npc contains names of the coins removed, puzzle_hashes and their spend conditions result: NPCResult = get_name_puzzle_conditions( program, max_cost, cost_per_byte=cost_per_byte, mempool_mode=True ) if result.error is not None: return Err(result.error), b"", {} pks: List[bytes48] = [] msgs: List[bytes] = [] assert result.conds is not None pks, msgs = pkm_pairs(result.conds, additional_data) # Verify aggregated signature cache: LRUCache[bytes32, GTElement] = LRUCache(10000) if not cached_bls.aggregate_verify(pks, msgs, bundle.aggregated_signature, True, cache): return Err.BAD_AGGREGATE_SIGNATURE, b"", {} new_cache_entries: Dict[bytes32, bytes] = {} for k, v in cache.cache.items(): new_cache_entries[k] = bytes(v) except ValidationError as e: return e.code, b"", {} except Exception: return Err.UNKNOWN, b"", {} return None, bytes(result), new_cache_entries class MempoolManager: pool: Executor def __init__( self, coin_store: CoinStore, consensus_constants: ConsensusConstants, multiprocessing_context: Optional[BaseContext] = None, *, single_threaded: bool = False, ): self.constants: ConsensusConstants = consensus_constants # Keep track of seen spend_bundles self.seen_bundle_hashes: Dict[bytes32, bytes32] = {} self.coin_store = coin_store # The fee per cost must be above this amount to consider the fee "nonzero", and thus able to kick out other # transactions. This prevents spam. This is equivalent to 0.055 XFX per block, or about 0.00005 XFX for two # spends. self.nonzero_fee_minimum_fpc = 5 self.limit_factor = 0.5 self.mempool_max_total_cost = int(self.constants.MAX_BLOCK_COST_CLVM * self.constants.MEMPOOL_BLOCK_BUFFER) # Transactions that were unable to enter mempool, used for retry. (they were invalid) self.potential_cache = PendingTxCache(self.constants.MAX_BLOCK_COST_CLVM * 1) self.seen_cache_size = 10000 if single_threaded: self.pool = InlineExecutor() else: self.pool = ProcessPoolExecutor( max_workers=2, mp_context=multiprocessing_context, initializer=setproctitle, initargs=(f"{getproctitle()}_worker",), ) # The mempool will correspond to a certain peak self.peak: Optional[BlockRecord] = None self.mempool: Mempool = Mempool( self.mempool_max_total_cost, uint64(self.nonzero_fee_minimum_fpc), uint64(self.constants.MAX_BLOCK_COST_CLVM), ) def shut_down(self) -> None: self.pool.shutdown(wait=True) async def create_bundle_from_mempool( self, last_tb_header_hash: bytes32 ) -> Optional[Tuple[SpendBundle, List[Coin], List[Coin]]]: """ Returns aggregated spendbundle that can be used for creating new block, additions and removals in that spend_bundle """ if self.peak is None or self.peak.header_hash != last_tb_header_hash: return None cost_sum = 0 # Checks that total cost does not exceed block maximum fee_sum = 0 # Checks that total fees don't exceed 64 bits spend_bundles: List[SpendBundle] = [] removals = [] additions = [] broke_from_inner_loop = False log.info(f"Starting to make block, max cost: {self.constants.MAX_BLOCK_COST_CLVM}") for dic in reversed(self.mempool.sorted_spends.values()): if broke_from_inner_loop: break for item in dic.values(): log.info(f"Cumulative cost: {cost_sum}, fee per cost: {item.fee / item.cost}") if ( item.cost + cost_sum <= self.limit_factor * self.constants.MAX_BLOCK_COST_CLVM and item.fee + fee_sum <= self.constants.MAX_COIN_AMOUNT ): spend_bundles.append(item.spend_bundle) cost_sum += item.cost fee_sum += item.fee removals.extend(item.removals) additions.extend(item.additions) else: broke_from_inner_loop = True break if len(spend_bundles) > 0: log.info( f"Cumulative cost of block (real cost should be less) {cost_sum}. Proportion " f"full: {cost_sum / self.constants.MAX_BLOCK_COST_CLVM}" ) agg = SpendBundle.aggregate(spend_bundles) return agg, additions, removals else: return None def get_filter(self) -> bytes: all_transactions: Set[bytes32] = set() byte_array_list = [] for key, _ in self.mempool.spends.items(): if key not in all_transactions: all_transactions.add(key) byte_array_list.append(bytearray(key)) tx_filter: PyBIP158 = PyBIP158(byte_array_list) return bytes(tx_filter.GetEncoded()) def is_fee_enough(self, fees: uint64, cost: uint64) -> bool: """ Determines whether any of the pools can accept a transaction with a given fees and cost. """ if cost == 0: return False fees_per_cost = fees / cost if not self.mempool.at_full_capacity(cost) or ( fees_per_cost >= self.nonzero_fee_minimum_fpc and fees_per_cost > self.mempool.get_min_fee_rate(cost) ): return True return False def add_and_maybe_pop_seen(self, spend_name: bytes32) -> None: self.seen_bundle_hashes[spend_name] = spend_name while len(self.seen_bundle_hashes) > self.seen_cache_size: first_in = list(self.seen_bundle_hashes.keys())[0] self.seen_bundle_hashes.pop(first_in) def seen(self, bundle_hash: bytes32) -> bool: """Return true if we saw this spendbundle recently""" return bundle_hash in self.seen_bundle_hashes def remove_seen(self, bundle_hash: bytes32) -> None: if bundle_hash in self.seen_bundle_hashes: self.seen_bundle_hashes.pop(bundle_hash) @staticmethod def get_min_fee_increase() -> int: # 0.00001 XFX return 10000000 def can_replace( self, conflicting_items: Dict[bytes32, MempoolItem], removals: Dict[bytes32, CoinRecord], fees: uint64, fees_per_cost: float, ) -> bool: conflicting_fees = 0 conflicting_cost = 0 for item in conflicting_items.values(): conflicting_fees += item.fee conflicting_cost += item.cost # All coins spent in all conflicting items must also be spent in the new item. (superset rule). This is # important because otherwise there exists an attack. A user spends coin A. An attacker replaces the # bundle with AB with a higher fee. An attacker then replaces the bundle with just B with a higher # fee than AB therefore kicking out A altogether. The better way to solve this would be to keep a cache # of booted transactions like A, and retry them after they get removed from mempool due to a conflict. for coin in item.removals: if coin.name() not in removals: log.debug(f"Rejecting conflicting tx as it does not spend conflicting coin {coin.name()}") return False # New item must have higher fee per cost conflicting_fees_per_cost = conflicting_fees / conflicting_cost if fees_per_cost <= conflicting_fees_per_cost: log.debug( f"Rejecting conflicting tx due to not increasing fees per cost " f"({fees_per_cost} <= {conflicting_fees_per_cost})" ) return False # New item must increase the total fee at least by a certain amount fee_increase = fees - conflicting_fees if fee_increase < self.get_min_fee_increase(): log.debug(f"Rejecting conflicting tx due to low fee increase ({fee_increase})") return False log.info(f"Replacing conflicting tx in mempool. New tx fee: {fees}, old tx fees: {conflicting_fees}") return True async def pre_validate_spendbundle( self, new_spend: SpendBundle, new_spend_bytes: Optional[bytes], spend_name: bytes32 ) -> NPCResult: """ Errors are included within the cached_result. This runs in another process so we don't block the main thread """ start_time = time.time() if new_spend_bytes is None: new_spend_bytes = bytes(new_spend) err, cached_result_bytes, new_cache_entries = await asyncio.get_running_loop().run_in_executor( self.pool, validate_clvm_and_signature, new_spend_bytes, int(self.limit_factor * self.constants.MAX_BLOCK_COST_CLVM), self.constants.COST_PER_BYTE, self.constants.AGG_SIG_ME_ADDITIONAL_DATA, ) if err is not None: raise ValidationError(err) for cache_entry_key, cached_entry_value in new_cache_entries.items(): LOCAL_CACHE.put(cache_entry_key, GTElement.from_bytes_unchecked(cached_entry_value)) ret: NPCResult = NPCResult.from_bytes(cached_result_bytes) end_time = time.time() duration = end_time - start_time log.log( logging.DEBUG if duration < 2 else logging.WARNING, f"pre_validate_spendbundle took {end_time - start_time:0.4f} seconds for {spend_name}", ) return ret async def add_spend_bundle( self, new_spend: SpendBundle, npc_result: NPCResult, spend_name: bytes32, ) -> Tuple[Optional[uint64], MempoolInclusionStatus, Optional[Err]]: """ Validates and adds to mempool a new_spend with the given NPCResult, and spend_name, and the current mempool. The mempool should be locked during this call (blockchain lock). If there are mempool conflicts, the conflicting spends might be removed (if the new spend is a superset of the previous). Otherwise, the new spend might be added to the potential pool. Args: new_spend: spend bundle to validate and add npc_result: result of running the clvm transaction in a fake block spend_name: hash of the spend bundle data, passed in as an optimization Returns: Optional[uint64]: cost of the entire transaction, None iff status is FAILED MempoolInclusionStatus: SUCCESS (should add to pool), FAILED (cannot add), and PENDING (can add later) Optional[Err]: Err is set iff status is FAILED """ # Skip if already added if spend_name in self.mempool.spends: cost: Optional[uint64] = self.mempool.spends[spend_name].cost assert cost is not None return uint64(cost), MempoolInclusionStatus.SUCCESS, None err, item, remove_items = await self.validate_spend_bundle(new_spend, npc_result, spend_name) if err is None: # No error, immediately add to mempool, after removing conflicting TXs. assert item is not None self.mempool.add_to_pool(item) self.mempool.remove_from_pool(remove_items) return item.cost, MempoolInclusionStatus.SUCCESS, None elif item is not None: # There is an error, but we still returned a mempool item, this means we should add to the pending pool. self.potential_cache.add(item) return item.cost, MempoolInclusionStatus.PENDING, err else: # Cannot add to the mempool or pending pool. return None, MempoolInclusionStatus.FAILED, err async def validate_spend_bundle( self, new_spend: SpendBundle, npc_result: NPCResult, spend_name: bytes32, ) -> Tuple[Optional[Err], Optional[MempoolItem], List[bytes32]]: """ Validates new_spend with the given NPCResult, and spend_name, and the current mempool. The mempool should be locked during this call (blockchain lock). Args: new_spend: spend bundle to validate npc_result: result of running the clvm transaction in a fake block spend_name: hash of the spend bundle data, passed in as an optimization Returns: Optional[Err]: Err is set if we cannot add to the mempool, None if we will immediately add to mempool Optional[MempoolItem]: the item to add (to mempool or pending pool) List[bytes32]: conflicting mempool items to remove, if no Err """ start_time = time.time() if self.peak is None: return Err.MEMPOOL_NOT_INITIALIZED, None, [] assert npc_result.error is None if npc_result.error is not None: return Err(npc_result.error), None, [] cost = npc_result.cost log.debug(f"Cost: {cost}") if cost > int(self.limit_factor * self.constants.MAX_BLOCK_COST_CLVM): # we shouldn't ever end up here, since the cost is limited when we # execute the CLVM program. return Err.BLOCK_COST_EXCEEDS_MAX, None, [] assert npc_result.conds is not None # build removal list removal_names: List[bytes32] = [bytes32(spend.coin_id) for spend in npc_result.conds.spends] if set(removal_names) != set([s.name() for s in new_spend.removals()]): # If you reach here it's probably because your program reveal doesn't match the coin's puzzle hash return Err.INVALID_SPEND_BUNDLE, None, [] additions: List[Coin] = additions_for_npc(npc_result) additions_dict: Dict[bytes32, Coin] = {} for add in additions: additions_dict[add.name()] = add addition_amount: int = 0 # Check additions for max coin amount for coin in additions: if coin.amount < 0: return Err.COIN_AMOUNT_NEGATIVE, None, [] if coin.amount > self.constants.MAX_COIN_AMOUNT: return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None, [] addition_amount = addition_amount + coin.amount # Check for duplicate outputs addition_counter = collections.Counter(_.name() for _ in additions) for k, v in addition_counter.items(): if v > 1: return Err.DUPLICATE_OUTPUT, None, [] # Check for duplicate inputs removal_counter = collections.Counter(name for name in removal_names) for k, v in removal_counter.items(): if v > 1: return Err.DOUBLE_SPEND, None, [] removal_record_dict: Dict[bytes32, CoinRecord] = {} removal_amount: int = 0 for name in removal_names: removal_record = await self.coin_store.get_coin_record(name) if removal_record is None and name not in additions_dict: return Err.UNKNOWN_UNSPENT, None, [] elif name in additions_dict: removal_coin = additions_dict[name] # The timestamp and block-height of this coin being spent needs # to be consistent with what we use to check time-lock # conditions (below). All spends (including ephemeral coins) are # spent simultaneously. Ephemeral coins with an # ASSERT_SECONDS_RELATIVE 0 condition are still OK to spend in # the same block. assert self.peak.timestamp is not None removal_record = CoinRecord( removal_coin, uint32(self.peak.height + 1), uint32(0), False, self.peak.timestamp, ) assert removal_record is not None removal_amount = removal_amount + removal_record.coin.amount removal_record_dict[name] = removal_record removals: List[Coin] = [record.coin for record in removal_record_dict.values()] if addition_amount > removal_amount: return Err.MINTING_COIN, None, [] fees = uint64(removal_amount - addition_amount) assert_fee_sum: uint64 = uint64(npc_result.conds.reserve_fee) if fees < assert_fee_sum: return Err.RESERVE_FEE_CONDITION_FAILED, None, [] if cost == 0: return Err.UNKNOWN, None, [] fees_per_cost: float = fees / cost # If pool is at capacity check the fee, if not then accept even without the fee if self.mempool.at_full_capacity(cost): if fees_per_cost < self.nonzero_fee_minimum_fpc: return Err.INVALID_FEE_TOO_CLOSE_TO_ZERO, None, [] if fees_per_cost <= self.mempool.get_min_fee_rate(cost): return Err.INVALID_FEE_LOW_FEE, None, [] # Check removals against UnspentDB + DiffStore + Mempool + SpendBundle # Use this information later when constructing a block fail_reason, conflicts = await self.check_removals(removal_record_dict) # If there is a mempool conflict check if this SpendBundle has a higher fee per cost than all others conflicting_pool_items: Dict[bytes32, MempoolItem] = {} # If we have a mempool conflict, continue, since we still want to keep around the TX in the pending pool. if fail_reason is not None and fail_reason is not Err.MEMPOOL_CONFLICT: return fail_reason, None, [] # Verify conditions, create hash_key list for aggsig check for spend in npc_result.conds.spends: coin_record: CoinRecord = removal_record_dict[bytes32(spend.coin_id)] # Check that the revealed removal puzzles actually match the puzzle hash if spend.puzzle_hash != coin_record.coin.puzzle_hash: log.warning("Mempool rejecting transaction because of wrong puzzle_hash") log.warning(f"{spend.puzzle_hash.hex()} != {coin_record.coin.puzzle_hash.hex()}") return Err.WRONG_PUZZLE_HASH, None, [] flaxlisp_height = ( self.peak.prev_transaction_block_height if not self.peak.is_transaction_block else self.peak.height ) assert self.peak.timestamp is not None tl_error: Optional[Err] = mempool_check_time_locks( removal_record_dict, npc_result.conds, uint32(flaxlisp_height), self.peak.timestamp, ) potential = MempoolItem( new_spend, uint64(fees), npc_result, cost, spend_name, additions, removals, self.peak.height ) if tl_error: if tl_error is Err.ASSERT_HEIGHT_ABSOLUTE_FAILED or tl_error is Err.ASSERT_HEIGHT_RELATIVE_FAILED: return tl_error, potential, [] # MempoolInclusionStatus.PENDING else: return tl_error, None, [] # MempoolInclusionStatus.FAILED if fail_reason is Err.MEMPOOL_CONFLICT: for conflicting in conflicts: for c_sb_id in self.mempool.removals[conflicting.name()]: sb: MempoolItem = self.mempool.spends[c_sb_id] conflicting_pool_items[sb.name] = sb log.warning(f"Conflicting pool items: {len(conflicting_pool_items)}") if not self.can_replace(conflicting_pool_items, removal_record_dict, fees, fees_per_cost): return Err.MEMPOOL_CONFLICT, potential, [] duration = time.time() - start_time log.log( logging.DEBUG if duration < 2 else logging.WARNING, f"add_spendbundle {spend_name} took {duration:0.2f} seconds. " f"Cost: {cost} ({round(100.0 * cost/self.constants.MAX_BLOCK_COST_CLVM, 3)}% of max block cost)", ) return None, potential, list(conflicting_pool_items.keys()) async def check_removals(self, removals: Dict[bytes32, CoinRecord]) -> Tuple[Optional[Err], List[Coin]]: """ This function checks for double spends, unknown spends and conflicting transactions in mempool. Returns Error (if any), dictionary of Unspents, list of coins with conflict errors (if any any). Note that additions are not checked for duplicates, because having duplicate additions requires also having duplicate removals. """ assert self.peak is not None conflicts: List[Coin] = [] for record in removals.values(): removal = record.coin # 1. Checks if it's been spent already if record.spent: return Err.DOUBLE_SPEND, [] # 2. Checks if there's a mempool conflict if removal.name() in self.mempool.removals: conflicts.append(removal) if len(conflicts) > 0: return Err.MEMPOOL_CONFLICT, conflicts # 5. If coins can be spent return list of unspents as we see them in local storage return None, [] def get_spendbundle(self, bundle_hash: bytes32) -> Optional[SpendBundle]: """Returns a full SpendBundle if it's inside one the mempools""" if bundle_hash in self.mempool.spends: return self.mempool.spends[bundle_hash].spend_bundle return None def get_mempool_item(self, bundle_hash: bytes32) -> Optional[MempoolItem]: """Returns a MempoolItem if it's inside one the mempools""" if bundle_hash in self.mempool.spends: return self.mempool.spends[bundle_hash] return None async def new_peak( self, new_peak: Optional[BlockRecord], last_npc_result: Optional[NPCResult] ) -> List[Tuple[SpendBundle, NPCResult, bytes32]]: """ Called when a new peak is available, we try to recreate a mempool for the new tip. """ if new_peak is None: return [] if new_peak.is_transaction_block is False: return [] if self.peak == new_peak: return [] assert new_peak.timestamp is not None included_items = [] use_optimization: bool = self.peak is not None and new_peak.prev_transaction_block_hash == self.peak.header_hash self.peak = new_peak if use_optimization and last_npc_result is not None: # We don't reinitialize a mempool, just kick removed items if last_npc_result.conds is not None: for spend in last_npc_result.conds.spends: if spend.coin_id in self.mempool.removals: c_ids: List[bytes32] = self.mempool.removals[bytes32(spend.coin_id)] self.mempool.remove_from_pool(c_ids) for c_id in c_ids: self.remove_seen(c_id) else: old_pool = self.mempool self.mempool = Mempool( self.mempool_max_total_cost, uint64(self.nonzero_fee_minimum_fpc), uint64(self.constants.MAX_BLOCK_COST_CLVM), ) self.seen_bundle_hashes = {} for item in old_pool.spends.values(): _, result, err = await self.add_spend_bundle(item.spend_bundle, item.npc_result, item.spend_bundle_name) # Only add to `seen` if inclusion worked, so it can be resubmitted in case of a reorg if result == MempoolInclusionStatus.SUCCESS: self.add_and_maybe_pop_seen(item.spend_bundle_name) # If the spend bundle was confirmed or conflicting (can no longer be in mempool), it won't be # successfully added to the new mempool. if result == MempoolInclusionStatus.FAILED and err == Err.DOUBLE_SPEND: # Item was in mempool, but after the new block it's a double spend. # Item is most likely included in the block. included_items.append(item) potential_txs = self.potential_cache.drain() txs_added = [] for item in potential_txs.values(): cost, status, error = await self.add_spend_bundle( item.spend_bundle, item.npc_result, item.spend_bundle_name ) if status == MempoolInclusionStatus.SUCCESS: txs_added.append((item.spend_bundle, item.npc_result, item.spend_bundle_name)) log.info( f"Size of mempool: {len(self.mempool.spends)} spends, cost: {self.mempool.total_mempool_cost} " f"minimum fee rate (in FPC) to get in for 5M cost tx: {self.mempool.get_min_fee_rate(5000000)}" ) self.mempool.fee_estimator.new_block(FeeBlockInfo(new_peak.height, included_items)) return txs_added async def get_items_not_in_filter(self, mempool_filter: PyBIP158, limit: int = 100) -> List[MempoolItem]: items: List[MempoolItem] = [] counter = 0 broke_from_inner_loop = False # Send 100 with the highest fee per cost for dic in self.mempool.sorted_spends.values(): if broke_from_inner_loop: break for item in dic.values(): if counter == limit: broke_from_inner_loop = True break if mempool_filter.Match(bytearray(item.spend_bundle_name)): continue items.append(item) counter += 1 return items def get_mempool_info(self) -> FeeMempoolInfo: import datetime return FeeMempoolInfo( CLVMCost(uint64(self.mempool_max_total_cost)), FeeRate(uint64(self.nonzero_fee_minimum_fpc)), CLVMCost(uint64(self.mempool.total_mempool_cost)), datetime.datetime.now(), CLVMCost(uint64(self.constants.MAX_BLOCK_COST_CLVM)), )
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/fee_estimation.py
flax/full_node/fee_estimation.py
from __future__ import annotations from dataclasses import dataclass from datetime import datetime from typing import List from flax.types.clvm_cost import CLVMCost from flax.types.fee_rate import FeeRate from flax.types.mempool_item import MempoolItem from flax.util.ints import uint32 @dataclass(frozen=True) class FeeMempoolInfo: """ Information from Mempool and MempoolItems needed to estimate fees. Updated when `MemPoolItem`s are added or removed from the Mempool. Attributes: current_mempool_cost (uint64):This is the current capacity of the mempool, measured in XFX per CLVM Cost max_size_in_cost (uint64): This is the maximum capacity of the mempool, measured in XFX per CLVM Cost minimum_fee_per_cost_to_replace (uint64): Smallest FPC that might be accepted to replace another SpendBundle time (datetime): Local time this sample was taken Note that we use the node's local time, not "Blockchain time" for the timestamp above """ max_size_in_cost: CLVMCost # Mempool max allowed CLVM cost total minimum_fee_per_cost_to_replace: FeeRate current_mempool_cost: CLVMCost # Current sum of CLVM cost of all SpendBundles in mempool (mempool "size") time: datetime # Local time this sample was taken MAX_BLOCK_COST_CLVM: CLVMCost # Max CLVMCost allowed in the Mempool @dataclass(frozen=True) class FeeMempoolItem: height_added: uint32 fee_per_cost: FeeRate @dataclass(frozen=True) class FeeBlockInfo: # See BlockRecord """ Information from Blockchain needed to estimate fees. """ block_height: uint32 included_items: List[MempoolItem]
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/mempool_check_conditions.py
flax/full_node/mempool_check_conditions.py
import logging from typing import Dict, Optional, Tuple from chia_rs import MEMPOOL_MODE, NO_NEG_DIV, get_puzzle_and_solution_for_coin as get_puzzle_and_solution_for_coin_rust from flax.types.blockchain_format.coin import Coin from flax.consensus.cost_calculator import NPCResult from flax.types.spend_bundle_conditions import SpendBundleConditions from flax.full_node.generator import setup_generator_args from flax.types.coin_record import CoinRecord from flax.types.generator_types import BlockGenerator from flax.types.blockchain_format.sized_bytes import bytes32 from flax.util.errors import Err from flax.util.ints import uint32, uint64, uint16 from flax.wallet.puzzles.rom_bootstrap_generator import get_generator from flax.types.blockchain_format.program import SerializedProgram from flax.wallet.puzzles.load_clvm import load_serialized_clvm_maybe_recompile from flax.consensus.default_constants import DEFAULT_CONSTANTS from flax.types.blockchain_format.program import Program GENERATOR_MOD = get_generator() DESERIALIZE_MOD = load_serialized_clvm_maybe_recompile( "flaxlisp_deserialisation.clvm", package_or_requirement="flax.wallet.puzzles" ) log = logging.getLogger(__name__) def unwrap(x: Optional[uint32]) -> uint32: assert x is not None return x def get_name_puzzle_conditions( generator: BlockGenerator, max_cost: int, *, cost_per_byte: int, mempool_mode: bool ) -> NPCResult: block_program, block_program_args = setup_generator_args(generator) size_cost = len(bytes(generator.program)) * cost_per_byte max_cost -= size_cost if max_cost < 0: return NPCResult(uint16(Err.INVALID_BLOCK_COST.value), None, uint64(0)) # mempool mode also has these rules apply assert (MEMPOOL_MODE & NO_NEG_DIV) != 0 if mempool_mode: flags = MEMPOOL_MODE else: # conditions must use integers in canonical encoding (i.e. no redundant # leading zeros) # the division operator may not be used with negative operands flags = NO_NEG_DIV try: err, result = GENERATOR_MOD.run_as_generator(max_cost, flags, block_program, block_program_args) assert (err is None) != (result is None) if err is not None: return NPCResult(uint16(err), None, uint64(0)) else: assert result is not None return NPCResult(None, result, uint64(result.cost + size_cost)) except BaseException: log.exception("get_name_puzzle_condition failed") return NPCResult(uint16(Err.GENERATOR_RUNTIME_ERROR.value), None, uint64(0)) def get_puzzle_and_solution_for_coin( generator: BlockGenerator, coin: Coin ) -> Tuple[Optional[Exception], Optional[SerializedProgram], Optional[SerializedProgram]]: try: args = bytearray(b"\xff") args += bytes(DESERIALIZE_MOD) args += b"\xff" args += bytes(Program.to([bytes(a) for a in generator.generator_refs])) args += b"\x80\x80" puzzle, solution = get_puzzle_and_solution_for_coin_rust( bytes(generator.program), bytes(args), DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM, coin.parent_coin_info, coin.amount, coin.puzzle_hash, ) return None, SerializedProgram.from_bytes(puzzle), SerializedProgram.from_bytes(solution) except Exception as e: return e, None, None def mempool_check_time_locks( removal_coin_records: Dict[bytes32, CoinRecord], bundle_conds: SpendBundleConditions, prev_transaction_block_height: uint32, timestamp: uint64, ) -> Optional[Err]: """ Check all time and height conditions against current state. """ if prev_transaction_block_height < bundle_conds.height_absolute: return Err.ASSERT_HEIGHT_ABSOLUTE_FAILED if timestamp < bundle_conds.seconds_absolute: return Err.ASSERT_SECONDS_ABSOLUTE_FAILED for spend in bundle_conds.spends: unspent = removal_coin_records[bytes32(spend.coin_id)] if spend.height_relative is not None: if prev_transaction_block_height < unspent.confirmed_block_index + spend.height_relative: return Err.ASSERT_HEIGHT_RELATIVE_FAILED if timestamp < unspent.timestamp + spend.seconds_relative: return Err.ASSERT_SECONDS_RELATIVE_FAILED return None
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/fee_estimator_constants.py
flax/full_node/fee_estimator_constants.py
# https://github.com/bitcoin/bitcoin/blob/5b6f0f31fa6ce85db3fb7f9823b1bbb06161ae32/src/policy/fees.h from __future__ import annotations MIN_FEE_RATE = 0 # Value of first bucket INITIAL_STEP = 100 # First bucket after zero value MAX_FEE_RATE = 40000000 # Mojo per 1000 cost unit INFINITE_FEE_RATE = 1000000000 STEP_SIZE = 1.05 # bucket increase by 1.05 # Track confirm delays up to SHORT_BLOCK_PERIOD blocks for short horizon SHORT_BLOCK_PERIOD = 12 # 3 SHORT_SCALE = 1 # Track confirm delays up to MED_BLOCK_PERIOD blocks for medium horizon MED_BLOCK_PERIOD = 24 # 15 MED_SCALE = 2 # Track confirm delays up to LONG_BLOCK_PERIOD blocks for long horizon LONG_BLOCK_PERIOD = 42 # 15 LONG_SCALE = 24 # 4 SECONDS_PER_BLOCK = 40 SHORT_DECAY = 0.962 MED_DECAY = 0.9952 LONG_DECAY = 0.99931 HALF_SUCCESS_PCT = 0.6 # Require 60 % success rate for target confirmations SUCCESS_PCT = 0.85 # Require 85 % success rate for target confirmations DOUBLE_SUCCESS_PCT = 0.95 # Require 95 % success rate for target confirmations SUFFICIENT_FEE_TXS = 0.1 # Require an avg of 0.1 tx in the combined fee rate bucket per block to have stat significance FEE_ESTIMATOR_VERSION = 1 OLDEST_ESTIMATE_HISTORY = 6 * 1008
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/full_node.py
flax/full_node/full_node.py
from __future__ import annotations import asyncio import contextlib import dataclasses import logging import multiprocessing from multiprocessing.context import BaseContext import random import time import traceback from pathlib import Path from typing import Any, Awaitable, Callable, Dict, List, Optional, Set, Tuple, Union import sqlite3 from blspy import AugSchemeMPL import flax.server.ws_connection as ws # lgtm [py/import-and-import-from] from flax.consensus.block_creation import unfinished_block_to_full_block from flax.consensus.block_record import BlockRecord from flax.consensus.blockchain import Blockchain, ReceiveBlockResult, StateChangeSummary from flax.consensus.blockchain_interface import BlockchainInterface from flax.consensus.constants import ConsensusConstants from flax.consensus.cost_calculator import NPCResult from flax.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty from flax.consensus.make_sub_epoch_summary import next_sub_epoch_summary from flax.consensus.multiprocess_validation import PreValidationResult from flax.consensus.pot_iterations import calculate_sp_iters from flax.full_node.block_store import BlockStore from flax.full_node.hint_management import get_hints_and_subscription_coin_ids from flax.full_node.lock_queue import LockQueue, LockClient from flax.full_node.bundle_tools import detect_potential_template_generator from flax.full_node.coin_store import CoinStore from flax.full_node.full_node_store import FullNodeStore, FullNodeStorePeakResult from flax.full_node.hint_store import HintStore from flax.full_node.mempool_manager import MempoolManager from flax.full_node.signage_point import SignagePoint from flax.full_node.sync_store import SyncStore from flax.full_node.weight_proof import WeightProofHandler from flax.protocols import farmer_protocol, full_node_protocol, timelord_protocol, wallet_protocol from flax.protocols.full_node_protocol import ( RequestBlocks, RespondBlock, RespondBlocks, RespondSignagePoint, ) from flax.protocols.protocol_message_types import ProtocolMessageTypes from flax.protocols.wallet_protocol import CoinState, CoinStateUpdate from flax.server.node_discovery import FullNodePeers from flax.server.outbound_message import Message, NodeType, make_msg from flax.server.peer_store_resolver import PeerStoreResolver from flax.server.server import FlaxServer from flax.types.blockchain_format.classgroup import ClassgroupElement from flax.types.blockchain_format.pool_target import PoolTarget from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary from flax.types.blockchain_format.vdf import CompressibleVDFField, VDFInfo, VDFProof from flax.types.coin_record import CoinRecord from flax.types.end_of_slot_bundle import EndOfSubSlotBundle from flax.types.full_block import FullBlock from flax.types.generator_types import BlockGenerator from flax.types.header_block import HeaderBlock from flax.types.mempool_inclusion_status import MempoolInclusionStatus from flax.types.spend_bundle import SpendBundle from flax.types.transaction_queue_entry import TransactionQueueEntry from flax.types.unfinished_block import UnfinishedBlock from flax.util import cached_bls from flax.util.bech32m import encode_puzzle_hash from flax.util.check_fork_next_block import check_fork_next_block from flax.util.condition_tools import pkm_pairs from flax.util.config import PEER_DB_PATH_KEY_DEPRECATED, process_config_start_method from flax.util.db_wrapper import DBWrapper2, create_connection from flax.util.errors import ConsensusError, Err, ValidationError from flax.util.ints import uint8, uint32, uint64, uint128 from flax.util.path import path_from_root from flax.util.safe_cancel_task import cancel_task_safe from flax.util.profiler import profile_task from flax.util.memory_profiler import mem_profile_task from flax.util.db_synchronous import db_synchronous_on from flax.util.db_version import lookup_db_version, set_db_version_async # This is the result of calling peak_post_processing, which is then fed into peak_post_processing_2 @dataclasses.dataclass class PeakPostProcessingResult: mempool_peak_result: List[Tuple[SpendBundle, NPCResult, bytes32]] # The result of calling MempoolManager.new_peak fns_peak_result: FullNodeStorePeakResult # The result of calling FullNodeStore.new_peak hints: List[Tuple[bytes32, bytes]] # The hints added to the DB lookup_coin_ids: List[bytes32] # The coin IDs that we need to look up to notify wallets of changes class FullNode: _segment_task: Optional[asyncio.Task[None]] initialized: bool root_path: Path config: Dict[str, Any] _server: Optional[FlaxServer] _shut_down: bool constants: ConsensusConstants pow_creation: Dict[bytes32, asyncio.Event] state_changed_callback: Optional[Callable[[str, Optional[Dict[str, Any]]], None]] full_node_peers: Optional[FullNodePeers] sync_store: Any signage_point_times: List[float] full_node_store: FullNodeStore uncompact_task: Optional[asyncio.Task[None]] compact_vdf_requests: Set[bytes32] log: logging.Logger multiprocessing_context: Optional[BaseContext] _ui_tasks: Set[asyncio.Task[None]] db_path: Path # TODO: use NewType all over to describe these various uses of the same types # Puzzle Hash : Set[Peer ID] coin_subscriptions: Dict[bytes32, Set[bytes32]] # Puzzle Hash : Set[Peer ID] ph_subscriptions: Dict[bytes32, Set[bytes32]] # Peer ID: Set[Coin ids] peer_coin_ids: Dict[bytes32, Set[bytes32]] # Peer ID: Set[puzzle_hash] peer_puzzle_hash: Dict[bytes32, Set[bytes32]] # Peer ID: subscription count peer_sub_counter: Dict[bytes32, int] _transaction_queue_task: Optional[asyncio.Task[None]] simulator_transaction_callback: Optional[Callable[[bytes32], Awaitable[None]]] _sync_task: Optional[asyncio.Task[None]] _transaction_queue: Optional[asyncio.PriorityQueue[Tuple[int, TransactionQueueEntry]]] _compact_vdf_sem: Optional[asyncio.Semaphore] _new_peak_sem: Optional[asyncio.Semaphore] _respond_transaction_semaphore: Optional[asyncio.Semaphore] _db_wrapper: Optional[DBWrapper2] _hint_store: Optional[HintStore] transaction_responses: List[Tuple[bytes32, MempoolInclusionStatus, Optional[Err]]] _block_store: Optional[BlockStore] _coin_store: Optional[CoinStore] _mempool_manager: Optional[MempoolManager] _init_weight_proof: Optional[asyncio.Task[None]] _blockchain: Optional[Blockchain] _timelord_lock: Optional[asyncio.Lock] weight_proof_handler: Optional[WeightProofHandler] _blockchain_lock_queue: Optional[LockQueue] _maybe_blockchain_lock_high_priority: Optional[LockClient] _maybe_blockchain_lock_low_priority: Optional[LockClient] @property def server(self) -> FlaxServer: # This is a stop gap until the class usage is refactored such the values of # integral attributes are known at creation of the instance. if self._server is None: raise RuntimeError("server not assigned") return self._server def __init__( self, config: Dict[str, Any], root_path: Path, consensus_constants: ConsensusConstants, name: str = __name__, ) -> None: self._segment_task = None self.initialized = False self.root_path = root_path self.config = config self._server = None self._shut_down = False # Set to true to close all infinite loops self.constants = consensus_constants self.pow_creation = {} self.state_changed_callback = None self.full_node_peers = None self.sync_store = None self.signage_point_times = [time.time() for _ in range(self.constants.NUM_SPS_SUB_SLOT)] self.full_node_store = FullNodeStore(self.constants) self.uncompact_task = None self.compact_vdf_requests = set() self.log = logging.getLogger(name) # TODO: Logging isn't setup yet so the log entries related to parsing the # config would end up on stdout if handled here. self.multiprocessing_context = None self._ui_tasks = set() db_path_replaced: str = config["database_path"].replace("CHALLENGE", config["selected_network"]) self.db_path = path_from_root(root_path, db_path_replaced) self.coin_subscriptions = {} self.ph_subscriptions = {} self.peer_coin_ids = {} self.peer_puzzle_hash = {} self.peer_sub_counter = {} self.db_path.parent.mkdir(parents=True, exist_ok=True) self._transaction_queue_task = None self.simulator_transaction_callback = None self._sync_task = None self._transaction_queue = None self._compact_vdf_sem = None self._new_peak_sem = None self._respond_transaction_semaphore = None self._db_wrapper = None self._hint_store = None self.transaction_responses = [] self._block_store = None self._coin_store = None self._mempool_manager = None self._init_weight_proof = None self._blockchain = None self._timelord_lock = None self.weight_proof_handler = None self._blockchain_lock_queue = None self._maybe_blockchain_lock_high_priority = None self._maybe_blockchain_lock_low_priority = None @property def block_store(self) -> BlockStore: assert self._block_store is not None return self._block_store @property def _blockchain_lock_high_priority(self) -> LockClient: assert self._maybe_blockchain_lock_high_priority is not None return self._maybe_blockchain_lock_high_priority @property def _blockchain_lock_low_priority(self) -> LockClient: assert self._maybe_blockchain_lock_low_priority is not None return self._maybe_blockchain_lock_low_priority @property def timelord_lock(self) -> asyncio.Lock: assert self._timelord_lock is not None return self._timelord_lock @property def mempool_manager(self) -> MempoolManager: assert self._mempool_manager is not None return self._mempool_manager @property def blockchain(self) -> Blockchain: assert self._blockchain is not None return self._blockchain @property def coin_store(self) -> CoinStore: assert self._coin_store is not None return self._coin_store @property def respond_transaction_semaphore(self) -> asyncio.Semaphore: assert self._respond_transaction_semaphore is not None return self._respond_transaction_semaphore @property def transaction_queue(self) -> asyncio.PriorityQueue[Tuple[int, TransactionQueueEntry]]: assert self._transaction_queue is not None return self._transaction_queue @property def db_wrapper(self) -> DBWrapper2: assert self._db_wrapper is not None return self._db_wrapper @property def hint_store(self) -> HintStore: assert self._hint_store is not None return self._hint_store @property def new_peak_sem(self) -> asyncio.Semaphore: assert self._new_peak_sem is not None return self._new_peak_sem @property def compact_vdf_sem(self) -> asyncio.Semaphore: assert self._compact_vdf_sem is not None return self._compact_vdf_sem def get_connections(self, request_node_type: Optional[NodeType]) -> List[Dict[str, Any]]: connections = self.server.get_connections(request_node_type) con_info: List[Dict[str, Any]] = [] if self.sync_store is not None: peak_store = self.sync_store.peer_to_peak else: peak_store = None for con in connections: if peak_store is not None and con.peer_node_id in peak_store: peak_hash, peak_height, peak_weight = peak_store[con.peer_node_id] else: peak_height = None peak_hash = None peak_weight = None con_dict: Dict[str, Any] = { "type": con.connection_type, "local_port": con.local_port, "peer_host": con.peer_host, "peer_port": con.peer_port, "peer_server_port": con.peer_server_port, "node_id": con.peer_node_id, "creation_time": con.creation_time, "bytes_read": con.bytes_read, "bytes_written": con.bytes_written, "last_message_time": con.last_message_time, "peak_height": peak_height, "peak_weight": peak_weight, "peak_hash": peak_hash, } con_info.append(con_dict) return con_info def _set_state_changed_callback(self, callback: Callable[..., Any]) -> None: self.state_changed_callback = callback async def _start(self) -> None: self._timelord_lock = asyncio.Lock() self._compact_vdf_sem = asyncio.Semaphore(4) # We don't want to run too many concurrent new_peak instances, because it would fetch the same block from # multiple peers and re-validate. self._new_peak_sem = asyncio.Semaphore(2) # These many respond_transaction tasks can be active at any point in time self._respond_transaction_semaphore = asyncio.Semaphore(200) # create the store (db) and full node instance # TODO: is this standardized and thus able to be handled by DBWrapper2? async with create_connection(self.db_path) as db_connection: db_version = await lookup_db_version(db_connection) self.log.info(f"using blockchain database {self.db_path}, which is version {db_version}") sql_log_path: Optional[Path] = None if self.config.get("log_sqlite_cmds", False): sql_log_path = path_from_root(self.root_path, "log/sql.log") self.log.info(f"logging SQL commands to {sql_log_path}") db_sync = db_synchronous_on(self.config.get("db_sync", "auto")) # Never use pragma synchronous=OFF in Flax. db_sync = "FULL" self.log.info(f"opening blockchain DB: synchronous={db_sync}") self._db_wrapper = await DBWrapper2.create( self.db_path, db_version=db_version, reader_count=4, log_path=sql_log_path, synchronous=db_sync, ) if self.db_wrapper.db_version != 2: async with self.db_wrapper.reader_no_transaction() as conn: async with conn.execute( "SELECT name FROM sqlite_master WHERE type='table' AND name='full_blocks'" ) as cur: if len(list(await cur.fetchall())) == 0: try: # this is a new DB file. Make it v2 async with self.db_wrapper.writer_maybe_transaction() as w_conn: await set_db_version_async(w_conn, 2) self.db_wrapper.db_version = 2 self.log.info("blockchain database is empty, configuring as v2") except sqlite3.OperationalError: # it could be a database created with "flax init", which is # empty except it has the database_version table pass self._block_store = await BlockStore.create(self.db_wrapper) self.sync_store = SyncStore() self._hint_store = await HintStore.create(self.db_wrapper) self._coin_store = await CoinStore.create(self.db_wrapper) self.log.info("Initializing blockchain from disk") start_time = time.time() reserved_cores = self.config.get("reserved_cores", 0) single_threaded = self.config.get("single_threaded", False) multiprocessing_start_method = process_config_start_method(config=self.config, log=self.log) self.multiprocessing_context = multiprocessing.get_context(method=multiprocessing_start_method) self._blockchain = await Blockchain.create( coin_store=self.coin_store, block_store=self.block_store, consensus_constants=self.constants, blockchain_dir=self.db_path.parent, reserved_cores=reserved_cores, multiprocessing_context=self.multiprocessing_context, single_threaded=single_threaded, ) self._mempool_manager = MempoolManager( coin_store=self.coin_store, consensus_constants=self.constants, multiprocessing_context=self.multiprocessing_context, single_threaded=single_threaded, ) # Blocks are validated under high priority, and transactions under low priority. This guarantees blocks will # be validated first. blockchain_lock_queue = LockQueue(self.blockchain.lock) self._blockchain_lock_queue = blockchain_lock_queue self._maybe_blockchain_lock_high_priority = LockClient(0, blockchain_lock_queue) self._maybe_blockchain_lock_low_priority = LockClient(1, blockchain_lock_queue) # Transactions go into this queue from the server, and get sent to respond_transaction self._transaction_queue = asyncio.PriorityQueue(10000) self._transaction_queue_task: asyncio.Task[None] = asyncio.create_task(self._handle_transactions()) self.transaction_responses = [] self._init_weight_proof = asyncio.create_task(self.initialize_weight_proof()) if self.config.get("enable_profiler", False): asyncio.create_task(profile_task(self.root_path, "node", self.log)) if self.config.get("enable_memory_profiler", False): asyncio.create_task(mem_profile_task(self.root_path, "node", self.log)) time_taken = time.time() - start_time peak: Optional[BlockRecord] = self.blockchain.get_peak() if peak is None: self.log.info(f"Initialized with empty blockchain time taken: {int(time_taken)}s") num_unspent = await self.coin_store.num_unspent() if num_unspent > 0: self.log.error( f"Inconsistent blockchain DB file! Could not find peak block but found {num_unspent} coins! " "This is a fatal error. The blockchain database may be corrupt" ) raise RuntimeError("corrupt blockchain DB") else: self.log.info( f"Blockchain initialized to peak {peak.header_hash} height" f" {peak.height}, " f"time taken: {int(time_taken)}s" ) async with self._blockchain_lock_high_priority: pending_tx = await self.mempool_manager.new_peak(peak, None) assert len(pending_tx) == 0 # no pending transactions when starting up full_peak: Optional[FullBlock] = await self.blockchain.get_full_peak() assert full_peak is not None state_change_summary = StateChangeSummary(peak, uint32(max(peak.height - 1, 0)), [], [], []) ppp_result: PeakPostProcessingResult = await self.peak_post_processing( full_peak, state_change_summary, None ) await self.peak_post_processing_2(full_peak, None, state_change_summary, ppp_result) if self.config["send_uncompact_interval"] != 0: sanitize_weight_proof_only = False if "sanitize_weight_proof_only" in self.config: sanitize_weight_proof_only = self.config["sanitize_weight_proof_only"] assert self.config["target_uncompact_proofs"] != 0 self.uncompact_task = asyncio.create_task( self.broadcast_uncompact_blocks( self.config["send_uncompact_interval"], self.config["target_uncompact_proofs"], sanitize_weight_proof_only, ) ) self.initialized = True if self.full_node_peers is not None: asyncio.create_task(self.full_node_peers.start()) async def _handle_one_transaction(self, entry: TransactionQueueEntry) -> None: peer = entry.peer try: inc_status, err = await self.respond_transaction(entry.transaction, entry.spend_name, peer, entry.test) self.transaction_responses.append((entry.spend_name, inc_status, err)) if len(self.transaction_responses) > 50: self.transaction_responses = self.transaction_responses[1:] except asyncio.CancelledError: error_stack = traceback.format_exc() self.log.debug(f"Cancelling _handle_one_transaction, closing: {error_stack}") except Exception: error_stack = traceback.format_exc() self.log.error(f"Error in _handle_one_transaction, closing: {error_stack}") if peer is not None: await peer.close() finally: self.respond_transaction_semaphore.release() async def _handle_transactions(self) -> None: try: while not self._shut_down: # We use a semaphore to make sure we don't send more than 200 concurrent calls of respond_transaction. # However, doing them one at a time would be slow, because they get sent to other processes. await self.respond_transaction_semaphore.acquire() item: TransactionQueueEntry = (await self.transaction_queue.get())[1] asyncio.create_task(self._handle_one_transaction(item)) except asyncio.CancelledError: raise async def initialize_weight_proof(self) -> None: self.weight_proof_handler = WeightProofHandler( constants=self.constants, blockchain=self.blockchain, multiprocessing_context=self.multiprocessing_context, ) peak = self.blockchain.get_peak() if peak is not None: await self.weight_proof_handler.create_sub_epoch_segments() def set_server(self, server: FlaxServer) -> None: self._server = server dns_servers: List[str] = [] network_name = self.config["selected_network"] try: default_port = self.config["network_overrides"]["config"][network_name]["default_full_node_port"] except Exception: self.log.info("Default port field not found in config.") default_port = None if "dns_servers" in self.config: dns_servers = self.config["dns_servers"] elif self.config["port"] == 6888: # If `dns_servers` misses from the `config`, hardcode it if we're running mainnet. dns_servers.append("dns-introducer.flaxnetwork.org") try: self.full_node_peers = FullNodePeers( self.server, self.config["target_peer_count"] - self.config["target_outbound_peer_count"], self.config["target_outbound_peer_count"], PeerStoreResolver( self.root_path, self.config, selected_network=network_name, peers_file_path_key="peers_file_path", legacy_peer_db_path_key=PEER_DB_PATH_KEY_DEPRECATED, default_peers_file_path="db/peers.dat", ), self.config["introducer_peer"], dns_servers, self.config["peer_connect_interval"], self.config["selected_network"], default_port, self.log, ) except Exception as e: error_stack = traceback.format_exc() self.log.error(f"Exception: {e}") self.log.error(f"Exception in peer discovery: {e}") self.log.error(f"Exception Stack: {error_stack}") def _state_changed(self, change: str, change_data: Optional[Dict[str, Any]] = None) -> None: if self.state_changed_callback is not None: self.state_changed_callback(change, change_data) async def short_sync_batch(self, peer: ws.WSFlaxConnection, start_height: uint32, target_height: uint32) -> bool: """ Tries to sync to a chain which is not too far in the future, by downloading batches of blocks. If the first block that we download is not connected to our chain, we return False and do an expensive long sync instead. Long sync is not preferred because it requires downloading and validating a weight proof. Args: peer: peer to sync from start_height: height that we should start downloading at. (Our peak is higher) target_height: target to sync to Returns: False if the fork point was not found, and we need to do a long sync. True otherwise. """ # Don't trigger multiple batch syncs to the same peer if ( peer.peer_node_id in self.sync_store.backtrack_syncing and self.sync_store.backtrack_syncing[peer.peer_node_id] > 0 ): return True # Don't batch sync, we are already in progress of a backtrack sync if peer.peer_node_id in self.sync_store.batch_syncing: return True # Don't trigger a long sync self.sync_store.batch_syncing.add(peer.peer_node_id) self.log.info(f"Starting batch short sync from {start_height} to height {target_height}") if start_height > 0: first = await peer.request_block(full_node_protocol.RequestBlock(uint32(start_height), False)) if first is None or not isinstance(first, full_node_protocol.RespondBlock): self.sync_store.batch_syncing.remove(peer.peer_node_id) raise ValueError(f"Error short batch syncing, could not fetch block at height {start_height}") if not self.blockchain.contains_block(first.block.prev_header_hash): self.log.info("Batch syncing stopped, this is a deep chain") self.sync_store.batch_syncing.remove(peer.peer_node_id) # First sb not connected to our blockchain, do a long sync instead return False batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS if self._segment_task is not None and (not self._segment_task.done()): try: self._segment_task.cancel() except Exception as e: self.log.warning(f"failed to cancel segment task {e}") self._segment_task = None try: for height in range(start_height, target_height, batch_size): end_height = min(target_height, height + batch_size) request = RequestBlocks(uint32(height), uint32(end_height), True) response = await peer.request_blocks(request) if not response: raise ValueError(f"Error short batch syncing, invalid/no response for {height}-{end_height}") async with self._blockchain_lock_high_priority: state_change_summary: Optional[StateChangeSummary] success, state_change_summary = await self.receive_block_batch(response.blocks, peer, None) if not success: raise ValueError(f"Error short batch syncing, failed to validate blocks {height}-{end_height}") if state_change_summary is not None: try: peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak() assert peak_fb is not None ppp_result: PeakPostProcessingResult = await self.peak_post_processing( peak_fb, state_change_summary, peer, ) await self.peak_post_processing_2(peak_fb, peer, state_change_summary, ppp_result) except Exception: # Still do post processing after cancel (or exception) peak_fb = await self.blockchain.get_full_peak() assert peak_fb is not None await self.peak_post_processing(peak_fb, state_change_summary, peer) raise finally: self.log.info(f"Added blocks {height}-{end_height}") except (asyncio.CancelledError, Exception): self.sync_store.batch_syncing.remove(peer.peer_node_id) raise self.sync_store.batch_syncing.remove(peer.peer_node_id) return True async def short_sync_backtrack( self, peer: ws.WSFlaxConnection, peak_height: uint32, target_height: uint32, target_unf_hash: bytes32 ) -> bool: """ Performs a backtrack sync, where blocks are downloaded one at a time from newest to oldest. If we do not find the fork point 5 deeper than our peak, we return False and do a long sync instead. Args: peer: peer to sync from peak_height: height of our peak target_height: target height target_unf_hash: partial hash of the unfinished block of the target Returns: True iff we found the fork point, and we do not need to long sync. """ try: if peer.peer_node_id not in self.sync_store.backtrack_syncing: self.sync_store.backtrack_syncing[peer.peer_node_id] = 0 self.sync_store.backtrack_syncing[peer.peer_node_id] += 1 unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(target_unf_hash) curr_height: int = target_height found_fork_point = False responses = [] while curr_height > peak_height - 5: # If we already have the unfinished block, don't fetch the transactions. In the normal case, we will # already have the unfinished block, from when it was broadcast, so we just need to download the header, # but not the transactions fetch_tx: bool = unfinished_block is None or curr_height != target_height curr = await peer.request_block(full_node_protocol.RequestBlock(uint32(curr_height), fetch_tx)) if curr is None: raise ValueError(f"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, timed out") if curr is None or not isinstance(curr, full_node_protocol.RespondBlock): raise ValueError( f"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, wrong type {type(curr)}" ) responses.append(curr) if self.blockchain.contains_block(curr.block.prev_header_hash) or curr_height == 0: found_fork_point = True break curr_height -= 1 if found_fork_point: for response in reversed(responses): await self.respond_block(response, peer) except (asyncio.CancelledError, Exception): self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1 raise self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1 return found_fork_point async def _refresh_ui_connections(self, sleep_before: float = 0) -> None: if sleep_before > 0: await asyncio.sleep(sleep_before) self._state_changed("peer_changed_peak") async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSFlaxConnection) -> None: """ We have received a notification of a new peak from a peer. This happens either when we have just connected, or when the peer has updated their peak. Args: request: information about the new peak peer: peer that sent the message """ try: seen_header_hash = self.sync_store.seen_header_hash(request.header_hash) # Updates heights in the UI. Sleeps 1.5s before, so other peers have time to update their peaks as well. # Limit to 3 refreshes. if not seen_header_hash and len(self._ui_tasks) < 3:
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
true
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/full_node_api.py
flax/full_node/full_node_api.py
import asyncio import dataclasses import logging import time import traceback import functools from datetime import datetime, timezone from secrets import token_bytes from typing import Dict, List, Optional, Tuple, Set from blspy import AugSchemeMPL, G2Element, G1Element from chiabip158 import PyBIP158 import flax.server.ws_connection as ws from flax.consensus.block_creation import create_unfinished_block from flax.consensus.block_record import BlockRecord from flax.consensus.pot_iterations import calculate_ip_iters, calculate_iterations_quality, calculate_sp_iters from flax.full_node.bundle_tools import best_solution_generator_from_template, simple_solution_generator from flax.full_node.fee_estimate import FeeEstimate, FeeEstimateGroup from flax.full_node.full_node import FullNode from flax.full_node.mempool_check_conditions import get_puzzle_and_solution_for_coin from flax.full_node.signage_point import SignagePoint from flax.full_node.fee_estimator_interface import FeeEstimatorInterface from flax.protocols import farmer_protocol, full_node_protocol, introducer_protocol, timelord_protocol, wallet_protocol from flax.protocols.full_node_protocol import RejectBlock, RejectBlocks from flax.protocols.protocol_message_types import ProtocolMessageTypes from flax.protocols.wallet_protocol import ( PuzzleSolutionResponse, RejectBlockHeaders, RejectHeaderBlocks, RejectHeaderRequest, CoinState, RespondFeeEstimates, RespondSESInfo, ) from flax.server.server import FlaxServer from concurrent.futures import ThreadPoolExecutor from flax.types.block_protocol import BlockInfo from flax.server.outbound_message import Message, make_msg from flax.types.blockchain_format.coin import Coin, hash_coin_ids from flax.types.blockchain_format.pool_target import PoolTarget from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary from flax.types.coin_record import CoinRecord from flax.types.end_of_slot_bundle import EndOfSubSlotBundle from flax.types.full_block import FullBlock from flax.types.generator_types import BlockGenerator from flax.types.mempool_inclusion_status import MempoolInclusionStatus from flax.types.mempool_item import MempoolItem from flax.types.peer_info import PeerInfo from flax.types.transaction_queue_entry import TransactionQueueEntry from flax.types.unfinished_block import UnfinishedBlock from flax.util.api_decorators import api_request, peer_required, bytes_required, execute_task, reply_type from flax.util.full_block_utils import header_block_from_block from flax.util.generator_tools import get_block_header, tx_removals_and_additions from flax.util.hash import std_hash from flax.util.ints import uint8, uint32, uint64, uint128 from flax.util.merkle_set import MerkleSet from flax.full_node.mempool_check_conditions import get_name_puzzle_conditions class FullNodeAPI: full_node: FullNode executor: ThreadPoolExecutor def __init__(self, full_node: FullNode) -> None: self.full_node = full_node self.executor = ThreadPoolExecutor(max_workers=1) @property def server(self) -> FlaxServer: assert self.full_node.server is not None return self.full_node.server @property def log(self) -> logging.Logger: return self.full_node.log @property def api_ready(self) -> bool: return self.full_node.initialized @peer_required @api_request @reply_type([ProtocolMessageTypes.respond_peers]) async def request_peers( self, _request: full_node_protocol.RequestPeers, peer: ws.WSFlaxConnection ) -> Optional[Message]: if peer.peer_server_port is None: return None peer_info = PeerInfo(peer.peer_host, peer.peer_server_port) if self.full_node.full_node_peers is not None: msg = await self.full_node.full_node_peers.request_peers(peer_info) return msg return None @peer_required @api_request async def respond_peers( self, request: full_node_protocol.RespondPeers, peer: ws.WSFlaxConnection ) -> Optional[Message]: self.log.debug(f"Received {len(request.peer_list)} peers") if self.full_node.full_node_peers is not None: await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), True) return None @peer_required @api_request async def respond_peers_introducer( self, request: introducer_protocol.RespondPeersIntroducer, peer: ws.WSFlaxConnection ) -> Optional[Message]: self.log.debug(f"Received {len(request.peer_list)} peers from introducer") if self.full_node.full_node_peers is not None: await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), False) await peer.close() return None @execute_task @peer_required @api_request async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSFlaxConnection) -> None: """ A peer notifies us that they have added a new peak to their blockchain. If we don't have it, we can ask for it. """ # this semaphore limits the number of tasks that can call new_peak() at # the same time, since it can be expensive new_peak_sem = self.full_node.new_peak_sem waiter_count = 0 if new_peak_sem._waiters is None else len(new_peak_sem._waiters) if waiter_count > 0: self.full_node.log.debug(f"new_peak Waiters: {waiter_count}") if waiter_count > 20: return None async with new_peak_sem: await self.full_node.new_peak(request, peer) return None @peer_required @api_request async def new_transaction( self, transaction: full_node_protocol.NewTransaction, peer: ws.WSFlaxConnection ) -> Optional[Message]: """ A peer notifies us of a new transaction. Requests a full transaction if we haven't seen it previously, and if the fees are enough. """ # Ignore if syncing if self.full_node.sync_store.get_sync_mode(): return None if not (await self.full_node.synced()): return None # Ignore if already seen if self.full_node.mempool_manager.seen(transaction.transaction_id): return None if self.full_node.mempool_manager.is_fee_enough(transaction.fees, transaction.cost): # If there's current pending request just add this peer to the set of peers that have this tx if transaction.transaction_id in self.full_node.full_node_store.pending_tx_request: if transaction.transaction_id in self.full_node.full_node_store.peers_with_tx: current_set = self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] if peer.peer_node_id in current_set: return None current_set.add(peer.peer_node_id) return None else: new_set = set() new_set.add(peer.peer_node_id) self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set return None self.full_node.full_node_store.pending_tx_request[transaction.transaction_id] = peer.peer_node_id new_set = set() new_set.add(peer.peer_node_id) self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set async def tx_request_and_timeout(full_node: FullNode, transaction_id: bytes32, task_id: bytes32) -> None: counter = 0 try: while True: # Limit to asking a few peers, it's possible that this tx got included on chain already # Highly unlikely that the peers that advertised a tx don't respond to a request. Also, if we # drop some transactions, we don't want to re-fetch too many times if counter == 5: break if transaction_id not in full_node.full_node_store.peers_with_tx: break peers_with_tx: Set[bytes32] = full_node.full_node_store.peers_with_tx[transaction_id] if len(peers_with_tx) == 0: break peer_id = peers_with_tx.pop() assert full_node.server is not None if peer_id not in full_node.server.all_connections: continue random_peer = full_node.server.all_connections[peer_id] request_tx = full_node_protocol.RequestTransaction(transaction.transaction_id) msg = make_msg(ProtocolMessageTypes.request_transaction, request_tx) await random_peer.send_message(msg) await asyncio.sleep(5) counter += 1 if full_node.mempool_manager.seen(transaction_id): break except asyncio.CancelledError: pass finally: # Always Cleanup if transaction_id in full_node.full_node_store.peers_with_tx: full_node.full_node_store.peers_with_tx.pop(transaction_id) if transaction_id in full_node.full_node_store.pending_tx_request: full_node.full_node_store.pending_tx_request.pop(transaction_id) if task_id in full_node.full_node_store.tx_fetch_tasks: full_node.full_node_store.tx_fetch_tasks.pop(task_id) task_id: bytes32 = bytes32(token_bytes(32)) fetch_task = asyncio.create_task( tx_request_and_timeout(self.full_node, transaction.transaction_id, task_id) ) self.full_node.full_node_store.tx_fetch_tasks[task_id] = fetch_task return None return None @api_request @reply_type([ProtocolMessageTypes.respond_transaction]) async def request_transaction(self, request: full_node_protocol.RequestTransaction) -> Optional[Message]: """Peer has requested a full transaction from us.""" # Ignore if syncing if self.full_node.sync_store.get_sync_mode(): return None spend_bundle = self.full_node.mempool_manager.get_spendbundle(request.transaction_id) if spend_bundle is None: return None transaction = full_node_protocol.RespondTransaction(spend_bundle) msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction) return msg @peer_required @api_request @bytes_required async def respond_transaction( self, tx: full_node_protocol.RespondTransaction, peer: ws.WSFlaxConnection, tx_bytes: bytes = b"", test: bool = False, ) -> Optional[Message]: """ Receives a full transaction from peer. If tx is added to mempool, send tx_id to others. (new_transaction) """ assert tx_bytes != b"" spend_name = std_hash(tx_bytes) if spend_name in self.full_node.full_node_store.pending_tx_request: self.full_node.full_node_store.pending_tx_request.pop(spend_name) if spend_name in self.full_node.full_node_store.peers_with_tx: self.full_node.full_node_store.peers_with_tx.pop(spend_name) if self.full_node.transaction_queue.qsize() % 100 == 0 and not self.full_node.transaction_queue.empty(): self.full_node.log.debug(f"respond_transaction Waiters: {self.full_node.transaction_queue.qsize()}") if self.full_node.transaction_queue.full(): return None # TODO: Use fee in priority calculation, to prioritize high fee TXs await self.full_node.transaction_queue.put( (1, TransactionQueueEntry(tx.transaction, tx_bytes, spend_name, peer, test)) ) return None @api_request @reply_type([ProtocolMessageTypes.respond_proof_of_weight]) async def request_proof_of_weight(self, request: full_node_protocol.RequestProofOfWeight) -> Optional[Message]: if self.full_node.weight_proof_handler is None: return None if not self.full_node.blockchain.contains_block(request.tip): self.log.error(f"got weight proof request for unknown peak {request.tip}") return None if request.tip in self.full_node.pow_creation: event = self.full_node.pow_creation[request.tip] await event.wait() wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip) else: event = asyncio.Event() self.full_node.pow_creation[request.tip] = event wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip) event.set() tips = list(self.full_node.pow_creation.keys()) if len(tips) > 4: # Remove old from cache for i in range(0, 4): self.full_node.pow_creation.pop(tips[i]) if wp is None: self.log.error(f"failed creating weight proof for peak {request.tip}") return None # Serialization of wp is slow if ( self.full_node.full_node_store.serialized_wp_message_tip is not None and self.full_node.full_node_store.serialized_wp_message_tip == request.tip ): return self.full_node.full_node_store.serialized_wp_message message = make_msg( ProtocolMessageTypes.respond_proof_of_weight, full_node_protocol.RespondProofOfWeight(wp, request.tip) ) self.full_node.full_node_store.serialized_wp_message_tip = request.tip self.full_node.full_node_store.serialized_wp_message = message return message @api_request async def respond_proof_of_weight(self, request: full_node_protocol.RespondProofOfWeight) -> Optional[Message]: self.log.warning("Received proof of weight too late.") return None @api_request @reply_type([ProtocolMessageTypes.respond_block, ProtocolMessageTypes.reject_block]) async def request_block(self, request: full_node_protocol.RequestBlock) -> Optional[Message]: if not self.full_node.blockchain.contains_height(request.height): reject = RejectBlock(request.height) msg = make_msg(ProtocolMessageTypes.reject_block, reject) return msg header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(request.height) if header_hash is None: return make_msg(ProtocolMessageTypes.reject_block, RejectBlock(request.height)) block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash) if block is not None: if not request.include_transaction_block and block.transactions_generator is not None: block = dataclasses.replace(block, transactions_generator=None) return make_msg(ProtocolMessageTypes.respond_block, full_node_protocol.RespondBlock(block)) return make_msg(ProtocolMessageTypes.reject_block, RejectBlock(request.height)) @api_request @reply_type([ProtocolMessageTypes.respond_blocks, ProtocolMessageTypes.reject_blocks]) async def request_blocks(self, request: full_node_protocol.RequestBlocks) -> Optional[Message]: if request.end_height < request.start_height or request.end_height - request.start_height > 32: reject = RejectBlocks(request.start_height, request.end_height) msg: Message = make_msg(ProtocolMessageTypes.reject_blocks, reject) return msg for i in range(request.start_height, request.end_height + 1): if not self.full_node.blockchain.contains_height(uint32(i)): reject = RejectBlocks(request.start_height, request.end_height) msg = make_msg(ProtocolMessageTypes.reject_blocks, reject) return msg if not request.include_transaction_block: blocks: List[FullBlock] = [] for i in range(request.start_height, request.end_height + 1): header_hash_i: Optional[bytes32] = self.full_node.blockchain.height_to_hash(uint32(i)) if header_hash_i is None: reject = RejectBlocks(request.start_height, request.end_height) return make_msg(ProtocolMessageTypes.reject_blocks, reject) block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash_i) if block is None: reject = RejectBlocks(request.start_height, request.end_height) return make_msg(ProtocolMessageTypes.reject_blocks, reject) block = dataclasses.replace(block, transactions_generator=None) blocks.append(block) msg = make_msg( ProtocolMessageTypes.respond_blocks, full_node_protocol.RespondBlocks(request.start_height, request.end_height, blocks), ) else: blocks_bytes: List[bytes] = [] for i in range(request.start_height, request.end_height + 1): header_hash_i = self.full_node.blockchain.height_to_hash(uint32(i)) if header_hash_i is None: reject = RejectBlocks(request.start_height, request.end_height) return make_msg(ProtocolMessageTypes.reject_blocks, reject) block_bytes: Optional[bytes] = await self.full_node.block_store.get_full_block_bytes(header_hash_i) if block_bytes is None: reject = RejectBlocks(request.start_height, request.end_height) msg = make_msg(ProtocolMessageTypes.reject_blocks, reject) return msg blocks_bytes.append(block_bytes) respond_blocks_manually_streamed: bytes = ( bytes(uint32(request.start_height)) + bytes(uint32(request.end_height)) + len(blocks_bytes).to_bytes(4, "big", signed=False) ) for block_bytes in blocks_bytes: respond_blocks_manually_streamed += block_bytes msg = make_msg(ProtocolMessageTypes.respond_blocks, respond_blocks_manually_streamed) return msg @api_request async def reject_block(self, request: full_node_protocol.RejectBlock) -> None: self.log.debug(f"reject_block {request.height}") @api_request async def reject_blocks(self, request: full_node_protocol.RejectBlocks) -> None: self.log.debug(f"reject_blocks {request.start_height} {request.end_height}") @api_request async def respond_blocks(self, request: full_node_protocol.RespondBlocks) -> None: self.log.warning("Received unsolicited/late blocks") return None @api_request @peer_required async def respond_block( self, respond_block: full_node_protocol.RespondBlock, peer: ws.WSFlaxConnection, ) -> Optional[Message]: """ Receive a full block from a peer full node (or ourselves). """ self.log.warning(f"Received unsolicited/late block from peer {peer.get_peer_logging()}") return None @api_request async def new_unfinished_block( self, new_unfinished_block: full_node_protocol.NewUnfinishedBlock ) -> Optional[Message]: # Ignore if syncing if self.full_node.sync_store.get_sync_mode(): return None block_hash = new_unfinished_block.unfinished_reward_hash if self.full_node.full_node_store.get_unfinished_block(block_hash) is not None: return None # This prevents us from downloading the same block from many peers if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks: return None msg = make_msg( ProtocolMessageTypes.request_unfinished_block, full_node_protocol.RequestUnfinishedBlock(block_hash), ) self.full_node.full_node_store.requesting_unfinished_blocks.add(block_hash) # However, we want to eventually download from other peers, if this peer does not respond # Todo: keep track of who it was async def eventually_clear() -> None: await asyncio.sleep(5) if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks: self.full_node.full_node_store.requesting_unfinished_blocks.remove(block_hash) asyncio.create_task(eventually_clear()) return msg @api_request @reply_type([ProtocolMessageTypes.respond_unfinished_block]) async def request_unfinished_block( self, request_unfinished_block: full_node_protocol.RequestUnfinishedBlock ) -> Optional[Message]: unfinished_block: Optional[UnfinishedBlock] = self.full_node.full_node_store.get_unfinished_block( request_unfinished_block.unfinished_reward_hash ) if unfinished_block is not None: msg = make_msg( ProtocolMessageTypes.respond_unfinished_block, full_node_protocol.RespondUnfinishedBlock(unfinished_block), ) return msg return None @peer_required @api_request @bytes_required async def respond_unfinished_block( self, respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock, peer: ws.WSFlaxConnection, respond_unfinished_block_bytes: bytes = b"", ) -> Optional[Message]: if self.full_node.sync_store.get_sync_mode(): return None await self.full_node.respond_unfinished_block( respond_unfinished_block, peer, block_bytes=respond_unfinished_block_bytes ) return None @api_request @peer_required async def new_signage_point_or_end_of_sub_slot( self, new_sp: full_node_protocol.NewSignagePointOrEndOfSubSlot, peer: ws.WSFlaxConnection ) -> Optional[Message]: # Ignore if syncing if self.full_node.sync_store.get_sync_mode(): return None if ( self.full_node.full_node_store.get_signage_point_by_index( new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion, ) is not None ): return None if self.full_node.full_node_store.have_newer_signage_point( new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion ): return None if new_sp.index_from_challenge == 0 and new_sp.prev_challenge_hash is not None: if self.full_node.full_node_store.get_sub_slot(new_sp.prev_challenge_hash) is None: collected_eos = [] challenge_hash_to_request = new_sp.challenge_hash last_rc = new_sp.last_rc_infusion num_non_empty_sub_slots_seen = 0 for _ in range(30): if num_non_empty_sub_slots_seen >= 3: self.log.debug("Diverged from peer. Don't have the same blocks") return None # If this is an end of sub slot, and we don't have the prev, request the prev instead # We want to catch up to the latest slot so we can receive signage points full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot( challenge_hash_to_request, uint8(0), last_rc ) response = await peer.request_signage_point_or_end_of_sub_slot(full_node_request, timeout=10) if not isinstance(response, full_node_protocol.RespondEndOfSubSlot): self.full_node.log.debug(f"Invalid response for slot {response}") return None collected_eos.append(response) if ( self.full_node.full_node_store.get_sub_slot( response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge ) is not None or response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge == self.full_node.constants.GENESIS_CHALLENGE ): for eos in reversed(collected_eos): await self.respond_end_of_sub_slot(eos, peer) return None if ( response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.number_of_iterations != response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.number_of_iterations ): num_non_empty_sub_slots_seen += 1 challenge_hash_to_request = ( response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge ) last_rc = response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge self.full_node.log.warning("Failed to catch up in sub-slots") return None if new_sp.index_from_challenge > 0: if ( new_sp.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE and self.full_node.full_node_store.get_sub_slot(new_sp.challenge_hash) is None ): # If this is a normal signage point,, and we don't have the end of sub slot, request the end of sub slot full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot( new_sp.challenge_hash, uint8(0), new_sp.last_rc_infusion ) return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request) # Otherwise (we have the prev or the end of sub slot), request it normally full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot( new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion ) return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request) @api_request @reply_type([ProtocolMessageTypes.respond_signage_point, ProtocolMessageTypes.respond_end_of_sub_slot]) async def request_signage_point_or_end_of_sub_slot( self, request: full_node_protocol.RequestSignagePointOrEndOfSubSlot ) -> Optional[Message]: if request.index_from_challenge == 0: sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = self.full_node.full_node_store.get_sub_slot( request.challenge_hash ) if sub_slot is not None: return make_msg( ProtocolMessageTypes.respond_end_of_sub_slot, full_node_protocol.RespondEndOfSubSlot(sub_slot[0]), ) else: if self.full_node.full_node_store.get_sub_slot(request.challenge_hash) is None: if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE: self.log.info(f"Don't have challenge hash {request.challenge_hash}") sp: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point_by_index( request.challenge_hash, request.index_from_challenge, request.last_rc_infusion, ) if sp is not None: assert ( sp.cc_vdf is not None and sp.cc_proof is not None and sp.rc_vdf is not None and sp.rc_proof is not None ) full_node_response = full_node_protocol.RespondSignagePoint( request.index_from_challenge, sp.cc_vdf, sp.cc_proof, sp.rc_vdf, sp.rc_proof, ) return make_msg(ProtocolMessageTypes.respond_signage_point, full_node_response) else: self.log.info(f"Don't have signage point {request}") return None @peer_required @api_request async def respond_signage_point( self, request: full_node_protocol.RespondSignagePoint, peer: ws.WSFlaxConnection ) -> Optional[Message]: if self.full_node.sync_store.get_sync_mode(): return None async with self.full_node.timelord_lock: # Already have signage point if self.full_node.full_node_store.have_newer_signage_point( request.challenge_chain_vdf.challenge, request.index_from_challenge, request.reward_chain_vdf.challenge, ): return None existing_sp = self.full_node.full_node_store.get_signage_point( request.challenge_chain_vdf.output.get_hash() ) if existing_sp is not None and existing_sp.rc_vdf == request.reward_chain_vdf: return None peak = self.full_node.blockchain.get_peak() if peak is not None and peak.height > self.full_node.constants.MAX_SUB_SLOT_BLOCKS: next_sub_slot_iters = self.full_node.blockchain.get_next_slot_iters(peak.header_hash, True) sub_slots_for_peak = await self.full_node.blockchain.get_sp_and_ip_sub_slots(peak.header_hash) assert sub_slots_for_peak is not None ip_sub_slot: Optional[EndOfSubSlotBundle] = sub_slots_for_peak[1] else: sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING next_sub_slot_iters = sub_slot_iters ip_sub_slot = None added = self.full_node.full_node_store.new_signage_point( request.index_from_challenge, self.full_node.blockchain, self.full_node.blockchain.get_peak(), next_sub_slot_iters, SignagePoint( request.challenge_chain_vdf, request.challenge_chain_proof, request.reward_chain_vdf, request.reward_chain_proof, ), ) if added: await self.full_node.signage_point_post_processing(request, peer, ip_sub_slot) else: self.log.debug( f"Signage point {request.index_from_challenge} not added, CC challenge: " f"{request.challenge_chain_vdf.challenge}, RC challenge: {request.reward_chain_vdf.challenge}" ) return None @peer_required @api_request async def respond_end_of_sub_slot( self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSFlaxConnection ) -> Optional[Message]: if self.full_node.sync_store.get_sync_mode(): return None msg, _ = await self.full_node.respond_end_of_sub_slot(request, peer) return msg @peer_required @api_request async def request_mempool_transactions( self, request: full_node_protocol.RequestMempoolTransactions, peer: ws.WSFlaxConnection, ) -> Optional[Message]: received_filter = PyBIP158(bytearray(request.filter)) items: List[MempoolItem] = await self.full_node.mempool_manager.get_items_not_in_filter(received_filter) for item in items: transaction = full_node_protocol.RespondTransaction(item.spend_bundle) msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction) await peer.send_message(msg) return None # FARMER PROTOCOL @api_request @peer_required async def declare_proof_of_space( self, request: farmer_protocol.DeclareProofOfSpace, peer: ws.WSFlaxConnection ) -> Optional[Message]: """ Creates a block body and header, with the proof of space, coinbase, and fee targets provided by the farmer, and sends the hash of the header data back to the farmer. """
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
true
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/full_node_store.py
flax/full_node/full_node_store.py
from __future__ import annotations import asyncio import dataclasses import logging import time from typing import Dict, List, Optional, Set, Tuple from flax.consensus.block_record import BlockRecord from flax.consensus.blockchain_interface import BlockchainInterface from flax.consensus.constants import ConsensusConstants from flax.consensus.difficulty_adjustment import can_finish_sub_and_full_epoch from flax.consensus.make_sub_epoch_summary import next_sub_epoch_summary from flax.consensus.multiprocess_validation import PreValidationResult from flax.consensus.pot_iterations import calculate_sp_interval_iters from flax.full_node.signage_point import SignagePoint from flax.protocols import timelord_protocol from flax.server.outbound_message import Message from flax.types.blockchain_format.classgroup import ClassgroupElement from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary from flax.types.blockchain_format.vdf import VDFInfo from flax.types.end_of_slot_bundle import EndOfSubSlotBundle from flax.types.full_block import FullBlock from flax.types.generator_types import CompressorArg from flax.types.unfinished_block import UnfinishedBlock from flax.util.ints import uint8, uint32, uint64, uint128 from flax.util.lru_cache import LRUCache from flax.util.streamable import Streamable, streamable log = logging.getLogger(__name__) @streamable @dataclasses.dataclass(frozen=True) class FullNodeStorePeakResult(Streamable): added_eos: Optional[EndOfSubSlotBundle] new_signage_points: List[Tuple[uint8, SignagePoint]] new_infusion_points: List[timelord_protocol.NewInfusionPointVDF] class FullNodeStore: constants: ConsensusConstants # Blocks which we have created, but don't have plot signatures yet, so not yet "unfinished blocks" candidate_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock]] candidate_backup_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock]] # Header hashes of unfinished blocks that we have seen recently seen_unfinished_blocks: Set[bytes32] # Unfinished blocks, keyed from reward hash unfinished_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock, PreValidationResult]] # Finished slots and sps from the peak's slot onwards # We store all 32 SPs for each slot, starting as 32 Nones and filling them as we go # Also stores the total iters at the end of slot # For the first sub-slot, EndOfSlotBundle is None finished_sub_slots: List[Tuple[Optional[EndOfSubSlotBundle], List[Optional[SignagePoint]], uint128]] # These caches maintain objects which depend on infused blocks in the reward chain, that we # might receive before the blocks themselves. The dict keys are the reward chain challenge hashes. # End of slots which depend on infusions that we don't have future_eos_cache: Dict[bytes32, List[EndOfSubSlotBundle]] # Signage points which depend on infusions that we don't have future_sp_cache: Dict[bytes32, List[Tuple[uint8, SignagePoint]]] # Infusion point VDFs which depend on infusions that we don't have future_ip_cache: Dict[bytes32, List[timelord_protocol.NewInfusionPointVDF]] # This stores the time that each key was added to the future cache, so we can clear old keys future_cache_key_times: Dict[bytes32, int] # These recent caches are for pooling support recent_signage_points: LRUCache[bytes32, Tuple[SignagePoint, float]] recent_eos: LRUCache[bytes32, Tuple[EndOfSubSlotBundle, float]] # Partial hashes of unfinished blocks we are requesting requesting_unfinished_blocks: Set[bytes32] previous_generator: Optional[CompressorArg] pending_tx_request: Dict[bytes32, bytes32] # tx_id: peer_id peers_with_tx: Dict[bytes32, Set[bytes32]] # tx_id: Set[peer_ids} tx_fetch_tasks: Dict[bytes32, asyncio.Task[None]] # Task id: task serialized_wp_message: Optional[Message] serialized_wp_message_tip: Optional[bytes32] def __init__(self, constants: ConsensusConstants): self.candidate_blocks = {} self.candidate_backup_blocks = {} self.seen_unfinished_blocks = set() self.unfinished_blocks = {} self.finished_sub_slots = [] self.future_eos_cache = {} self.future_sp_cache = {} self.future_ip_cache = {} self.recent_signage_points = LRUCache(500) self.recent_eos = LRUCache(50) self.requesting_unfinished_blocks = set() self.previous_generator = None self.future_cache_key_times = {} self.constants = constants self.clear_slots() self.initialize_genesis_sub_slot() self.pending_tx_request = {} self.peers_with_tx = {} self.tx_fetch_tasks = {} self.serialized_wp_message = None self.serialized_wp_message_tip = None def add_candidate_block( self, quality_string: bytes32, height: uint32, unfinished_block: UnfinishedBlock, backup: bool = False ) -> None: if backup: self.candidate_backup_blocks[quality_string] = (height, unfinished_block) else: self.candidate_blocks[quality_string] = (height, unfinished_block) def get_candidate_block( self, quality_string: bytes32, backup: bool = False ) -> Optional[Tuple[uint32, UnfinishedBlock]]: if backup: return self.candidate_backup_blocks.get(quality_string, None) else: return self.candidate_blocks.get(quality_string, None) def clear_candidate_blocks_below(self, height: uint32) -> None: del_keys = [] for key, value in self.candidate_blocks.items(): if value[0] < height: del_keys.append(key) for key in del_keys: try: del self.candidate_blocks[key] except KeyError: pass del_keys = [] for key, value in self.candidate_backup_blocks.items(): if value[0] < height: del_keys.append(key) for key in del_keys: try: del self.candidate_backup_blocks[key] except KeyError: pass def seen_unfinished_block(self, object_hash: bytes32) -> bool: if object_hash in self.seen_unfinished_blocks: return True self.seen_unfinished_blocks.add(object_hash) return False def clear_seen_unfinished_blocks(self) -> None: self.seen_unfinished_blocks.clear() def add_unfinished_block( self, height: uint32, unfinished_block: UnfinishedBlock, result: PreValidationResult ) -> None: self.unfinished_blocks[unfinished_block.partial_hash] = (height, unfinished_block, result) def get_unfinished_block(self, unfinished_reward_hash: bytes32) -> Optional[UnfinishedBlock]: result = self.unfinished_blocks.get(unfinished_reward_hash, None) if result is None: return None return result[1] def get_unfinished_block_result(self, unfinished_reward_hash: bytes32) -> Optional[PreValidationResult]: result = self.unfinished_blocks.get(unfinished_reward_hash, None) if result is None: return None return result[2] def get_unfinished_blocks(self) -> Dict[bytes32, Tuple[uint32, UnfinishedBlock, PreValidationResult]]: return self.unfinished_blocks def clear_unfinished_blocks_below(self, height: uint32) -> None: del_keys: List[bytes32] = [] for partial_reward_hash, (unf_height, unfinished_block, _) in self.unfinished_blocks.items(): if unf_height < height: del_keys.append(partial_reward_hash) for del_key in del_keys: del self.unfinished_blocks[del_key] def remove_unfinished_block(self, partial_reward_hash: bytes32) -> None: if partial_reward_hash in self.unfinished_blocks: del self.unfinished_blocks[partial_reward_hash] def add_to_future_ip(self, infusion_point: timelord_protocol.NewInfusionPointVDF) -> None: ch: bytes32 = infusion_point.reward_chain_ip_vdf.challenge if ch not in self.future_ip_cache: self.future_ip_cache[ch] = [] self.future_ip_cache[ch].append(infusion_point) def in_future_sp_cache(self, signage_point: SignagePoint, index: uint8) -> bool: if signage_point.rc_vdf is None: return False if signage_point.rc_vdf.challenge not in self.future_sp_cache: return False for cache_index, cache_sp in self.future_sp_cache[signage_point.rc_vdf.challenge]: if cache_index == index and cache_sp.rc_vdf == signage_point.rc_vdf: return True return False def add_to_future_sp(self, signage_point: SignagePoint, index: uint8) -> None: # We are missing a block here if ( signage_point.cc_vdf is None or signage_point.rc_vdf is None or signage_point.cc_proof is None or signage_point.rc_proof is None ): return None if signage_point.rc_vdf.challenge not in self.future_sp_cache: self.future_sp_cache[signage_point.rc_vdf.challenge] = [] if self.in_future_sp_cache(signage_point, index): return None self.future_cache_key_times[signage_point.rc_vdf.challenge] = int(time.time()) self.future_sp_cache[signage_point.rc_vdf.challenge].append((index, signage_point)) log.info(f"Don't have rc hash {signage_point.rc_vdf.challenge}. caching signage point {index}.") def get_future_ip(self, rc_challenge_hash: bytes32) -> List[timelord_protocol.NewInfusionPointVDF]: return self.future_ip_cache.get(rc_challenge_hash, []) def clear_old_cache_entries(self) -> None: current_time: int = int(time.time()) remove_keys: List[bytes32] = [] for rc_hash, time_added in self.future_cache_key_times.items(): if current_time - time_added > 3600: remove_keys.append(rc_hash) for k in remove_keys: self.future_cache_key_times.pop(k, None) self.future_ip_cache.pop(k, []) self.future_eos_cache.pop(k, []) self.future_sp_cache.pop(k, []) def clear_slots(self) -> None: self.finished_sub_slots.clear() def get_sub_slot(self, challenge_hash: bytes32) -> Optional[Tuple[EndOfSubSlotBundle, int, uint128]]: assert len(self.finished_sub_slots) >= 1 for index, (sub_slot, _, total_iters) in enumerate(self.finished_sub_slots): if sub_slot is not None and sub_slot.challenge_chain.get_hash() == challenge_hash: return sub_slot, index, total_iters return None def initialize_genesis_sub_slot(self) -> None: self.clear_slots() self.finished_sub_slots = [(None, [None] * self.constants.NUM_SPS_SUB_SLOT, uint128(0))] def new_finished_sub_slot( self, eos: EndOfSubSlotBundle, blocks: BlockchainInterface, peak: Optional[BlockRecord], peak_full_block: Optional[FullBlock], ) -> Optional[List[timelord_protocol.NewInfusionPointVDF]]: """ Returns false if not added. Returns a list if added. The list contains all infusion points that depended on this sub slot """ assert len(self.finished_sub_slots) >= 1 assert (peak is None) == (peak_full_block is None) last_slot, _, last_slot_iters = self.finished_sub_slots[-1] cc_challenge: bytes32 = ( last_slot.challenge_chain.get_hash() if last_slot is not None else self.constants.GENESIS_CHALLENGE ) rc_challenge: bytes32 = ( last_slot.reward_chain.get_hash() if last_slot is not None else self.constants.GENESIS_CHALLENGE ) icc_challenge: Optional[bytes32] = None icc_iters: Optional[uint64] = None # Skip if already present for slot, _, _ in self.finished_sub_slots: if slot == eos: return [] if eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge != cc_challenge: # This slot does not append to our next slot # This prevent other peers from appending fake VDFs to our cache return None if peak is None: sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING else: sub_slot_iters = peak.sub_slot_iters total_iters = uint128(last_slot_iters + sub_slot_iters) if peak is not None and peak.total_iters > last_slot_iters: # Peak is in this slot # Note: Adding an end of subslot does not lock the blockchain, for performance reasons. Only the # timelord_lock is used. Therefore, it's possible that we add a new peak at the same time as seeing # the finished subslot, and the peak is not fully added yet, so it looks like we still need the subslot. # In that case, we will exit here and let the new_peak code add the subslot. if total_iters < peak.total_iters: return None rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge cc_start_element = peak.challenge_vdf_output iters = uint64(total_iters - peak.total_iters) if peak.reward_infusion_new_challenge != rc_challenge: # We don't have this challenge hash yet if rc_challenge not in self.future_eos_cache: self.future_eos_cache[rc_challenge] = [] self.future_eos_cache[rc_challenge].append(eos) self.future_cache_key_times[rc_challenge] = int(time.time()) log.info(f"Don't have challenge hash {rc_challenge}, caching EOS") return None if peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK: icc_start_element = None elif peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1: icc_start_element = ClassgroupElement.get_default_element() else: icc_start_element = peak.infused_challenge_vdf_output if peak.deficit < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK: curr = peak while not curr.first_in_sub_slot and not curr.is_challenge_block(self.constants): curr = blocks.block_record(curr.prev_hash) if curr.is_challenge_block(self.constants): icc_challenge = curr.challenge_block_info_hash icc_iters = uint64(total_iters - curr.total_iters) else: assert curr.finished_infused_challenge_slot_hashes is not None icc_challenge = curr.finished_infused_challenge_slot_hashes[-1] icc_iters = sub_slot_iters assert icc_challenge is not None if can_finish_sub_and_full_epoch( self.constants, blocks, peak.height, peak.prev_hash, peak.deficit, peak.sub_epoch_summary_included is not None, )[0]: assert peak_full_block is not None ses: Optional[SubEpochSummary] = next_sub_epoch_summary( self.constants, blocks, peak.required_iters, peak_full_block, True ) if ses is not None: if eos.challenge_chain.subepoch_summary_hash != ses.get_hash(): log.warning(f"SES not correct {ses.get_hash(), eos.challenge_chain}") return None else: if eos.challenge_chain.subepoch_summary_hash is not None: log.warning("SES not correct, should be None") return None else: # This is on an empty slot cc_start_element = ClassgroupElement.get_default_element() icc_start_element = ClassgroupElement.get_default_element() iters = sub_slot_iters icc_iters = sub_slot_iters # The icc should only be present if the previous slot had an icc too, and not deficit 0 (just finished slot) icc_challenge = ( last_slot.infused_challenge_chain.get_hash() if last_slot is not None and last_slot.infused_challenge_chain is not None and last_slot.reward_chain.deficit != self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK else None ) # Validate cc VDF partial_cc_vdf_info = VDFInfo( cc_challenge, iters, eos.challenge_chain.challenge_chain_end_of_slot_vdf.output, ) # The EOS will have the whole sub-slot iters, but the proof is only the delta, from the last peak if eos.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace( partial_cc_vdf_info, number_of_iterations=sub_slot_iters, ): return None if ( not eos.proofs.challenge_chain_slot_proof.normalized_to_identity and not eos.proofs.challenge_chain_slot_proof.is_valid( self.constants, cc_start_element, partial_cc_vdf_info, ) ): return None if ( eos.proofs.challenge_chain_slot_proof.normalized_to_identity and not eos.proofs.challenge_chain_slot_proof.is_valid( self.constants, ClassgroupElement.get_default_element(), eos.challenge_chain.challenge_chain_end_of_slot_vdf, ) ): return None # Validate reward chain VDF if not eos.proofs.reward_chain_slot_proof.is_valid( self.constants, ClassgroupElement.get_default_element(), eos.reward_chain.end_of_slot_vdf, VDFInfo(rc_challenge, iters, eos.reward_chain.end_of_slot_vdf.output), ): return None if icc_challenge is not None: assert icc_start_element is not None assert icc_iters is not None assert eos.infused_challenge_chain is not None assert eos.infused_challenge_chain is not None assert eos.proofs.infused_challenge_chain_slot_proof is not None partial_icc_vdf_info = VDFInfo( icc_challenge, iters, eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output, ) # The EOS will have the whole sub-slot iters, but the proof is only the delta, from the last peak if eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace( partial_icc_vdf_info, number_of_iterations=icc_iters, ): return None if ( not eos.proofs.infused_challenge_chain_slot_proof.normalized_to_identity and not eos.proofs.infused_challenge_chain_slot_proof.is_valid( self.constants, icc_start_element, partial_icc_vdf_info ) ): return None if ( eos.proofs.infused_challenge_chain_slot_proof.normalized_to_identity and not eos.proofs.infused_challenge_chain_slot_proof.is_valid( self.constants, ClassgroupElement.get_default_element(), eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf, ) ): return None else: # This is the first sub slot and it's empty, therefore there is no ICC if eos.infused_challenge_chain is not None or eos.proofs.infused_challenge_chain_slot_proof is not None: return None self.finished_sub_slots.append((eos, [None] * self.constants.NUM_SPS_SUB_SLOT, total_iters)) new_cc_hash = eos.challenge_chain.get_hash() self.recent_eos.put(new_cc_hash, (eos, time.time())) new_ips: List[timelord_protocol.NewInfusionPointVDF] = [] for ip in self.future_ip_cache.get(eos.reward_chain.get_hash(), []): new_ips.append(ip) return new_ips def new_signage_point( self, index: uint8, blocks: BlockchainInterface, peak: Optional[BlockRecord], next_sub_slot_iters: uint64, signage_point: SignagePoint, skip_vdf_validation: bool = False, ) -> bool: """ Returns true if sp successfully added """ assert len(self.finished_sub_slots) >= 1 if peak is None or peak.height < 2: sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING else: sub_slot_iters = peak.sub_slot_iters # If we don't have this slot, return False if index == 0 or index >= self.constants.NUM_SPS_SUB_SLOT: return False assert ( signage_point.cc_vdf is not None and signage_point.cc_proof is not None and signage_point.rc_vdf is not None and signage_point.rc_proof is not None ) for sub_slot, sp_arr, start_ss_total_iters in self.finished_sub_slots: if sub_slot is None: assert start_ss_total_iters == 0 ss_challenge_hash = self.constants.GENESIS_CHALLENGE ss_reward_hash = self.constants.GENESIS_CHALLENGE else: ss_challenge_hash = sub_slot.challenge_chain.get_hash() ss_reward_hash = sub_slot.reward_chain.get_hash() if ss_challenge_hash == signage_point.cc_vdf.challenge: # If we do have this slot, find the Prev block from SP and validate SP if peak is not None and start_ss_total_iters > peak.total_iters: # We are in a future sub slot from the peak, so maybe there is a new SSI checkpoint_size: uint64 = uint64(next_sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT) delta_iters: uint64 = uint64(checkpoint_size * index) future_sub_slot: bool = True else: # We are not in a future sub slot from the peak, so there is no new SSI checkpoint_size = uint64(sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT) delta_iters = uint64(checkpoint_size * index) future_sub_slot = False sp_total_iters = start_ss_total_iters + delta_iters curr = peak if peak is None or future_sub_slot: check_from_start_of_ss = True else: check_from_start_of_ss = False while ( curr is not None and curr.total_iters > start_ss_total_iters and curr.total_iters > sp_total_iters ): if curr.first_in_sub_slot: # Did not find a block where it's iters are before our sp_total_iters, in this ss check_from_start_of_ss = True break curr = blocks.block_record(curr.prev_hash) if check_from_start_of_ss: # Check VDFs from start of sub slot cc_vdf_info_expected = VDFInfo( ss_challenge_hash, delta_iters, signage_point.cc_vdf.output, ) rc_vdf_info_expected = VDFInfo( ss_reward_hash, delta_iters, signage_point.rc_vdf.output, ) else: # Check VDFs from curr assert curr is not None cc_vdf_info_expected = VDFInfo( ss_challenge_hash, uint64(sp_total_iters - curr.total_iters), signage_point.cc_vdf.output, ) rc_vdf_info_expected = VDFInfo( curr.reward_infusion_new_challenge, uint64(sp_total_iters - curr.total_iters), signage_point.rc_vdf.output, ) if not signage_point.cc_vdf == dataclasses.replace( cc_vdf_info_expected, number_of_iterations=delta_iters ): self.add_to_future_sp(signage_point, index) return False if check_from_start_of_ss: start_ele = ClassgroupElement.get_default_element() else: assert curr is not None start_ele = curr.challenge_vdf_output if not skip_vdf_validation: if not signage_point.cc_proof.normalized_to_identity and not signage_point.cc_proof.is_valid( self.constants, start_ele, cc_vdf_info_expected, ): self.add_to_future_sp(signage_point, index) return False if signage_point.cc_proof.normalized_to_identity and not signage_point.cc_proof.is_valid( self.constants, ClassgroupElement.get_default_element(), signage_point.cc_vdf, ): self.add_to_future_sp(signage_point, index) return False if rc_vdf_info_expected.challenge != signage_point.rc_vdf.challenge: # This signage point is probably outdated self.add_to_future_sp(signage_point, index) return False if not skip_vdf_validation: if not signage_point.rc_proof.is_valid( self.constants, ClassgroupElement.get_default_element(), signage_point.rc_vdf, rc_vdf_info_expected, ): self.add_to_future_sp(signage_point, index) return False sp_arr[index] = signage_point self.recent_signage_points.put(signage_point.cc_vdf.output.get_hash(), (signage_point, time.time())) return True self.add_to_future_sp(signage_point, index) return False def get_signage_point(self, cc_signage_point: bytes32) -> Optional[SignagePoint]: assert len(self.finished_sub_slots) >= 1 if cc_signage_point == self.constants.GENESIS_CHALLENGE: return SignagePoint(None, None, None, None) for sub_slot, sps, _ in self.finished_sub_slots: if sub_slot is not None and sub_slot.challenge_chain.get_hash() == cc_signage_point: return SignagePoint(None, None, None, None) for sp in sps: if sp is not None: assert sp.cc_vdf is not None if sp.cc_vdf.output.get_hash() == cc_signage_point: return sp return None def get_signage_point_by_index( self, challenge_hash: bytes32, index: uint8, last_rc_infusion: bytes32 ) -> Optional[SignagePoint]: assert len(self.finished_sub_slots) >= 1 for sub_slot, sps, _ in self.finished_sub_slots: if sub_slot is not None: cc_hash = sub_slot.challenge_chain.get_hash() else: cc_hash = self.constants.GENESIS_CHALLENGE if cc_hash == challenge_hash: if index == 0: return SignagePoint(None, None, None, None) sp: Optional[SignagePoint] = sps[index] if sp is not None: assert sp.rc_vdf is not None if sp.rc_vdf.challenge == last_rc_infusion: return sp return None return None def have_newer_signage_point(self, challenge_hash: bytes32, index: uint8, last_rc_infusion: bytes32) -> bool: """ Returns true if we have a signage point at this index which is based on a newer infusion. """ assert len(self.finished_sub_slots) >= 1 for sub_slot, sps, _ in self.finished_sub_slots: if sub_slot is not None: cc_hash = sub_slot.challenge_chain.get_hash() else: cc_hash = self.constants.GENESIS_CHALLENGE if cc_hash == challenge_hash: found_rc_hash = False for i in range(0, index): sp: Optional[SignagePoint] = sps[i] if sp is not None and sp.rc_vdf is not None and sp.rc_vdf.challenge == last_rc_infusion: found_rc_hash = True sp = sps[index] if ( found_rc_hash and sp is not None and sp.rc_vdf is not None and sp.rc_vdf.challenge != last_rc_infusion ): return True return False def new_peak( self, peak: BlockRecord, peak_full_block: FullBlock, sp_sub_slot: Optional[EndOfSubSlotBundle], # None if not overflow, or in first/second slot ip_sub_slot: Optional[EndOfSubSlotBundle], # None if in first slot fork_block: Optional[BlockRecord], blocks: BlockchainInterface, ) -> FullNodeStorePeakResult: """ If the peak is an overflow block, must provide two sub-slots: one for the current sub-slot and one for the prev sub-slot (since we still might get more blocks with an sp in the previous sub-slot) Results in either one or two sub-slots in finished_sub_slots. """ assert len(self.finished_sub_slots) >= 1 if ip_sub_slot is None: # We are still in the first sub-slot, no new sub slots ey self.initialize_genesis_sub_slot() else: # This is not the first sub-slot in the chain sp_sub_slot_sps: List[Optional[SignagePoint]] = [None] * self.constants.NUM_SPS_SUB_SLOT ip_sub_slot_sps: List[Optional[SignagePoint]] = [None] * self.constants.NUM_SPS_SUB_SLOT if fork_block is not None and fork_block.sub_slot_iters != peak.sub_slot_iters: # If there was a reorg and a difficulty adjustment, just clear all the slots self.clear_slots() else: interval_iters = calculate_sp_interval_iters(self.constants, peak.sub_slot_iters) # If it's not a reorg, or there is a reorg on the same difficulty, we can keep signage points # that we had before, in the cache for index, (sub_slot, sps, total_iters) in enumerate(self.finished_sub_slots): if sub_slot is None: continue if fork_block is None: # If this is not a reorg, we still want to remove signage points after the new peak fork_block = peak replaced_sps: List[Optional[SignagePoint]] = [] # index 0 is the end of sub slot for i, sp in enumerate(sps): if (total_iters + i * interval_iters) < fork_block.total_iters: # Sps before the fork point as still valid replaced_sps.append(sp) else: if sp is not None: log.debug( f"Reverting {i} {(total_iters + i * interval_iters)} {fork_block.total_iters}" ) # Sps after the fork point should be removed replaced_sps.append(None) assert len(sps) == len(replaced_sps) if sub_slot == sp_sub_slot: sp_sub_slot_sps = replaced_sps if sub_slot == ip_sub_slot: ip_sub_slot_sps = replaced_sps self.clear_slots()
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
true
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/full_node/signage_point.py
flax/full_node/signage_point.py
from __future__ import annotations from dataclasses import dataclass from typing import Optional from flax.types.blockchain_format.vdf import VDFInfo, VDFProof from flax.util.streamable import Streamable, streamable @streamable @dataclass(frozen=True) class SignagePoint(Streamable): cc_vdf: Optional[VDFInfo] cc_proof: Optional[VDFProof] rc_vdf: Optional[VDFInfo] rc_proof: Optional[VDFProof]
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/daemon/client.py
flax/daemon/client.py
from __future__ import annotations import asyncio import json import ssl from contextlib import asynccontextmanager from pathlib import Path from typing import Any, AsyncIterator, Dict, Optional import aiohttp from flax.util.json_util import dict_to_json_str from flax.util.ws_message import WsRpcMessage, create_payload_dict class DaemonProxy: def __init__( self, uri: str, ssl_context: Optional[ssl.SSLContext], max_message_size: int = 50 * 1000 * 1000, ): self._uri = uri self._request_dict: Dict[str, asyncio.Event] = {} self.response_dict: Dict[str, WsRpcMessage] = {} self.ssl_context = ssl_context self.client_session: Optional[aiohttp.ClientSession] = None self.websocket: Optional[aiohttp.ClientWebSocketResponse] = None self.max_message_size = max_message_size def format_request(self, command: str, data: Dict[str, Any]) -> WsRpcMessage: request = create_payload_dict(command, data, "client", "daemon") return request async def start(self) -> None: try: self.client_session = aiohttp.ClientSession() self.websocket = await self.client_session.ws_connect( self._uri, autoclose=True, autoping=True, heartbeat=60, ssl_context=self.ssl_context, max_msg_size=self.max_message_size, ) except Exception: await self.close() raise async def listener_task() -> None: try: await self.listener() finally: await self.close() asyncio.create_task(listener_task()) await asyncio.sleep(1) async def listener(self) -> None: if self.websocket is None: raise TypeError("Websocket is None in listener!") while True: message = await self.websocket.receive() if message.type == aiohttp.WSMsgType.TEXT: decoded: WsRpcMessage = json.loads(message.data) request_id = decoded["request_id"] if request_id in self._request_dict: self.response_dict[request_id] = decoded self._request_dict[request_id].set() else: return None async def _get(self, request: WsRpcMessage) -> WsRpcMessage: request_id = request["request_id"] self._request_dict[request_id] = asyncio.Event() string = dict_to_json_str(request) if self.websocket is None or self.websocket.closed: raise Exception("Websocket is not connected") asyncio.create_task(self.websocket.send_str(string)) try: await asyncio.wait_for(self._request_dict[request_id].wait(), timeout=30) self._request_dict.pop(request_id) response: WsRpcMessage = self.response_dict[request_id] self.response_dict.pop(request_id) return response except asyncio.TimeoutError: self._request_dict.pop(request_id) raise Exception(f"No response from daemon for request_id: {request_id}") async def get_version(self) -> WsRpcMessage: data: Dict[str, Any] = {} request = self.format_request("get_version", data) response = await self._get(request) return response async def start_service(self, service_name: str) -> WsRpcMessage: data = {"service": service_name} request = self.format_request("start_service", data) response = await self._get(request) return response async def stop_service(self, service_name: str, delay_before_kill: int = 15) -> WsRpcMessage: data = {"service": service_name} request = self.format_request("stop_service", data) response = await self._get(request) return response async def is_running(self, service_name: str) -> bool: data = {"service": service_name} request = self.format_request("is_running", data) response = await self._get(request) if "is_running" in response["data"]: return bool(response["data"]["is_running"]) return False async def is_keyring_locked(self) -> bool: data: Dict[str, Any] = {} request = self.format_request("is_keyring_locked", data) response = await self._get(request) if "is_keyring_locked" in response["data"]: return bool(response["data"]["is_keyring_locked"]) return False async def unlock_keyring(self, passphrase: str) -> WsRpcMessage: data = {"key": passphrase} request = self.format_request("unlock_keyring", data) response = await self._get(request) return response async def notify_keyring_migration_completed(self, passphrase: Optional[str]) -> WsRpcMessage: data: Dict[str, Any] = {"key": passphrase} request: WsRpcMessage = self.format_request("notify_keyring_migration_completed", data) response: WsRpcMessage = await self._get(request) return response async def ping(self) -> WsRpcMessage: request = self.format_request("ping", {}) response = await self._get(request) return response async def close(self) -> None: if self.websocket is not None: await self.websocket.close() if self.client_session is not None: await self.client_session.close() async def exit(self) -> WsRpcMessage: request = self.format_request("exit", {}) return await self._get(request) async def connect_to_daemon( self_hostname: str, daemon_port: int, max_message_size: int, ssl_context: ssl.SSLContext ) -> DaemonProxy: """ Connect to the local daemon. """ client = DaemonProxy(f"wss://{self_hostname}:{daemon_port}", ssl_context, max_message_size) await client.start() return client async def connect_to_daemon_and_validate( root_path: Path, config: Dict[str, Any], quiet: bool = False ) -> Optional[DaemonProxy]: """ Connect to the local daemon and do a ping to ensure that something is really there and running. """ from flax.server.server import ssl_context_for_client try: daemon_max_message_size = config.get("daemon_max_message_size", 50 * 1000 * 1000) crt_path = root_path / config["daemon_ssl"]["private_crt"] key_path = root_path / config["daemon_ssl"]["private_key"] ca_crt_path = root_path / config["private_ssl_ca"]["crt"] ca_key_path = root_path / config["private_ssl_ca"]["key"] ssl_context = ssl_context_for_client(ca_crt_path, ca_key_path, crt_path, key_path) connection = await connect_to_daemon( config["self_hostname"], config["daemon_port"], daemon_max_message_size, ssl_context ) r = await connection.ping() if "value" in r["data"] and r["data"]["value"] == "pong": return connection except Exception: if not quiet: print("Daemon not started yet") return None return None @asynccontextmanager async def acquire_connection_to_daemon( root_path: Path, config: Dict[str, Any], quiet: bool = False ) -> AsyncIterator[Optional[DaemonProxy]]: """ Asynchronous context manager which attempts to create a connection to the daemon. The connection object (DaemonProxy) is yielded to the caller. After the caller's block exits scope, execution resumes in this function, wherein the connection is closed. """ daemon: Optional[DaemonProxy] = None try: daemon = await connect_to_daemon_and_validate(root_path, config, quiet=quiet) yield daemon # <---- except Exception as e: print(f"Exception occurred while communicating with the daemon: {e}") if daemon is not None: await daemon.close()
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/daemon/keychain_proxy.py
flax/daemon/keychain_proxy.py
import asyncio import logging import ssl import traceback from aiohttp import ClientSession, ClientConnectorError from blspy import AugSchemeMPL, PrivateKey from flax.cmds.init_funcs import check_keys from flax.daemon.client import DaemonProxy from flax.daemon.keychain_server import ( KEYCHAIN_ERR_KEYERROR, KEYCHAIN_ERR_LOCKED, KEYCHAIN_ERR_MALFORMED_REQUEST, KEYCHAIN_ERR_NO_KEYS, KEYCHAIN_ERR_KEY_NOT_FOUND, ) from flax.server.server import ssl_context_for_client from flax.util.config import load_config from flax.util.errors import ( KeychainIsLocked, KeychainIsEmpty, KeychainKeyNotFound, KeychainMalformedRequest, KeychainMalformedResponse, KeychainProxyConnectionTimeout, ) from flax.util.keychain import ( Keychain, bytes_to_mnemonic, mnemonic_to_seed, ) from flax.util.ws_message import WsRpcMessage from pathlib import Path from typing import Any, Dict, List, Optional, Tuple class KeychainProxy(DaemonProxy): """ KeychainProxy can act on behalf of a local or remote keychain. In the case of wrapping a local keychain, the proxy object simply forwards-along the calls to the underlying local keychain. In the remote case, calls are made to the daemon over the RPC interface, allowing the daemon to act as the keychain authority. """ def __init__( self, log: logging.Logger, uri: str = "", ssl_context: Optional[ssl.SSLContext] = None, local_keychain: Optional[Keychain] = None, user: Optional[str] = None, service: Optional[str] = None, ): super().__init__(uri, ssl_context) self.log = log if local_keychain: self.keychain = local_keychain else: self.keychain = None # type: ignore self.keychain_user = user self.keychain_service = service # these are used to track and close the keychain connection self.keychain_connection_task: Optional[asyncio.Task[None]] = None self.shut_down: bool = False self.connection_established: asyncio.Event = asyncio.Event() def use_local_keychain(self) -> bool: """ Indicates whether the proxy forwards calls to a local keychain """ return self.keychain is not None def format_request(self, command: str, data: Dict[str, Any]) -> WsRpcMessage: """ Overrides DaemonProxy.format_request() to add keychain-specific RPC params """ if data is None: data = {} if self.keychain_user or self.keychain_service: data["kc_user"] = self.keychain_user data["kc_service"] = self.keychain_service return super().format_request(command, data) async def _get(self, request: WsRpcMessage) -> WsRpcMessage: """ Overrides DaemonProxy._get() to handle the connection state """ try: if not self.shut_down: # if we are shut down, and we send a request we should throw original error. await asyncio.wait_for(self.connection_established.wait(), timeout=30) # in case of heavy swap usage. else: self.log.error("Attempting to send request to a keychain-proxy that has shut down.") self.log.debug(f"Sending request to keychain command: {request['command']} from {request['origin']}.") return await super()._get(request) except asyncio.TimeoutError: raise KeychainProxyConnectionTimeout() async def start(self) -> None: self.keychain_connection_task = asyncio.create_task(self.connect_to_keychain()) await self.connection_established.wait() # wait until connection is established. async def connect_to_keychain(self) -> None: while not self.shut_down: try: self.client_session = ClientSession() self.websocket = await self.client_session.ws_connect( self._uri, autoclose=True, autoping=True, heartbeat=60, ssl_context=self.ssl_context, max_msg_size=self.max_message_size, ) await self.listener() except ClientConnectorError: self.log.warning(f"Can not connect to keychain at {self._uri}.") except Exception as e: tb = traceback.format_exc() self.log.warning(f"Exception: {tb} {type(e)}") self.log.info(f"Reconnecting to keychain at {self._uri}.") self.connection_established.clear() if self.websocket is not None: await self.websocket.close() if self.client_session is not None: await self.client_session.close() self.websocket = None self.client_session = None await asyncio.sleep(2) async def listener(self) -> None: self.connection_established.set() # mark connection as active. await super().listener() self.log.info("Close signal received from keychain, we probably timed out.") async def close(self) -> None: self.shut_down = True await super().close() if self.keychain_connection_task is not None: await self.keychain_connection_task async def get_response_for_request(self, request_name: str, data: Dict[str, Any]) -> Tuple[WsRpcMessage, bool]: request = self.format_request(request_name, data) response = await self._get(request) success = response["data"].get("success", False) return response, success def handle_error(self, response: WsRpcMessage) -> None: """ Common error handling for RPC responses """ error = response["data"].get("error", None) if error: error_details = response["data"].get("error_details", {}) if error == KEYCHAIN_ERR_LOCKED: raise KeychainIsLocked() elif error == KEYCHAIN_ERR_NO_KEYS: raise KeychainIsEmpty() elif error == KEYCHAIN_ERR_KEY_NOT_FOUND: raise KeychainKeyNotFound() elif error == KEYCHAIN_ERR_MALFORMED_REQUEST: message = error_details.get("message", "") raise KeychainMalformedRequest(message) else: # Try to construct a more informative error message including the call that failed if "command" in response["data"]: err = f"{response['data'].get('command')} failed with error: {error}" raise Exception(f"{err}") raise Exception(f"{error}") async def add_private_key(self, mnemonic: str) -> PrivateKey: """ Forwards to Keychain.add_private_key() """ key: PrivateKey if self.use_local_keychain(): key = self.keychain.add_private_key(mnemonic) else: response, success = await self.get_response_for_request("add_private_key", {"mnemonic": mnemonic}) if success: seed = mnemonic_to_seed(mnemonic) key = AugSchemeMPL.key_gen(seed) else: error = response["data"].get("error", None) if error == KEYCHAIN_ERR_KEYERROR: error_details = response["data"].get("error_details", {}) word = error_details.get("word", "") raise KeyError(word) else: self.handle_error(response) return key async def check_keys(self, root_path: Path) -> None: """ Forwards to init_funcs.check_keys() """ if self.use_local_keychain(): check_keys(root_path, self.keychain) else: response, success = await self.get_response_for_request("check_keys", {"root_path": str(root_path)}) if not success: self.handle_error(response) async def delete_all_keys(self) -> None: """ Forwards to Keychain.delete_all_keys() """ if self.use_local_keychain(): self.keychain.delete_all_keys() else: response, success = await self.get_response_for_request("delete_all_keys", {}) if not success: self.handle_error(response) async def delete_key_by_fingerprint(self, fingerprint: int) -> None: """ Forwards to Keychain.delete_key_by_fingerprint() """ if self.use_local_keychain(): self.keychain.delete_key_by_fingerprint(fingerprint) else: response, success = await self.get_response_for_request( "delete_key_by_fingerprint", {"fingerprint": fingerprint} ) if not success: self.handle_error(response) async def get_all_private_keys(self) -> List[Tuple[PrivateKey, bytes]]: """ Forwards to Keychain.get_all_private_keys() """ keys: List[Tuple[PrivateKey, bytes]] = [] if self.use_local_keychain(): keys = self.keychain.get_all_private_keys() else: response, success = await self.get_response_for_request("get_all_private_keys", {}) if success: private_keys = response["data"].get("private_keys", None) if private_keys is None: err = f"Missing private_keys in {response.get('command')} response" self.log.error(f"{err}") raise KeychainMalformedResponse(f"{err}") else: for key_dict in private_keys: pk = key_dict.get("pk", None) ent_str = key_dict.get("entropy", None) if pk is None or ent_str is None: err = f"Missing pk and/or ent in {response.get('command')} response" self.log.error(f"{err}") continue # We'll skip the incomplete key entry ent = bytes.fromhex(ent_str) mnemonic = bytes_to_mnemonic(ent) seed = mnemonic_to_seed(mnemonic) key = AugSchemeMPL.key_gen(seed) if bytes(key.get_g1()).hex() == pk: keys.append((key, ent)) else: err = "G1Elements don't match" self.log.error(f"{err}") else: self.handle_error(response) return keys async def get_first_private_key(self) -> Optional[PrivateKey]: """ Forwards to Keychain.get_first_private_key() """ key: Optional[PrivateKey] = None if self.use_local_keychain(): sk_ent = self.keychain.get_first_private_key() if sk_ent: key = sk_ent[0] else: response, success = await self.get_response_for_request("get_first_private_key", {}) if success: private_key = response["data"].get("private_key", None) if private_key is None: err = f"Missing private_key in {response.get('command')} response" self.log.error(f"{err}") raise KeychainMalformedResponse(f"{err}") else: pk = private_key.get("pk", None) ent_str = private_key.get("entropy", None) if pk is None or ent_str is None: err = f"Missing pk and/or ent in {response.get('command')} response" self.log.error(f"{err}") raise KeychainMalformedResponse(f"{err}") ent = bytes.fromhex(ent_str) mnemonic = bytes_to_mnemonic(ent) seed = mnemonic_to_seed(mnemonic) sk = AugSchemeMPL.key_gen(seed) if bytes(sk.get_g1()).hex() == pk: key = sk else: err = "G1Elements don't match" self.log.error(f"{err}") else: self.handle_error(response) return key async def get_key_for_fingerprint(self, fingerprint: Optional[int]) -> Optional[PrivateKey]: """ Locates and returns a private key matching the provided fingerprint """ key: Optional[PrivateKey] = None if self.use_local_keychain(): private_keys = self.keychain.get_all_private_keys() if len(private_keys) == 0: raise KeychainIsEmpty() else: if fingerprint is not None: for sk, _ in private_keys: if sk.get_g1().get_fingerprint() == fingerprint: key = sk break if key is None: raise KeychainKeyNotFound(fingerprint) else: key = private_keys[0][0] else: response, success = await self.get_response_for_request( "get_key_for_fingerprint", {"fingerprint": fingerprint} ) if success: pk = response["data"].get("pk", None) ent = response["data"].get("entropy", None) if pk is None or ent is None: err = f"Missing pk and/or ent in {response.get('command')} response" self.log.error(f"{err}") raise KeychainMalformedResponse(f"{err}") else: mnemonic = bytes_to_mnemonic(bytes.fromhex(ent)) seed = mnemonic_to_seed(mnemonic) private_key = AugSchemeMPL.key_gen(seed) if bytes(private_key.get_g1()).hex() == pk: key = private_key else: err = "G1Elements don't match" self.log.error(f"{err}") else: self.handle_error(response) return key def wrap_local_keychain(keychain: Keychain, log: logging.Logger) -> KeychainProxy: """ Wrap an existing local Keychain instance in a KeychainProxy to utilize the same interface as a remote Keychain """ return KeychainProxy(local_keychain=keychain, log=log) async def connect_to_keychain( self_hostname: str, daemon_port: int, ssl_context: Optional[ssl.SSLContext], log: logging.Logger, user: Optional[str] = None, service: Optional[str] = None, ) -> KeychainProxy: """ Connect to the local daemon. """ client = KeychainProxy( uri=f"wss://{self_hostname}:{daemon_port}", ssl_context=ssl_context, log=log, user=user, service=service ) # Connect to the service if the proxy isn't using a local keychain if not client.use_local_keychain(): await client.start() return client async def connect_to_keychain_and_validate( root_path: Path, log: logging.Logger, user: Optional[str] = None, service: Optional[str] = None, ) -> Optional[KeychainProxy]: """ Connect to the local daemon and do a ping to ensure that something is really there and running. """ try: net_config = load_config(root_path, "config.yaml") crt_path = root_path / net_config["daemon_ssl"]["private_crt"] key_path = root_path / net_config["daemon_ssl"]["private_key"] ca_crt_path = root_path / net_config["private_ssl_ca"]["crt"] ca_key_path = root_path / net_config["private_ssl_ca"]["key"] ssl_context = ssl_context_for_client(ca_crt_path, ca_key_path, crt_path, key_path, log=log) connection = await connect_to_keychain( net_config["self_hostname"], net_config["daemon_port"], ssl_context, log, user, service ) # If proxying to a local keychain, don't attempt to ping if connection.use_local_keychain(): return connection r = await connection.ping() # this is purposely using the base classes _get method if "value" in r["data"] and r["data"]["value"] == "pong": return connection except Exception as e: print(f"Keychain(daemon) not started yet: {e}") return None
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/daemon/keychain_server.py
flax/daemon/keychain_server.py
import logging from blspy import PrivateKey from flax.cmds.init_funcs import check_keys from flax.util.errors import KeychainException, KeychainFingerprintNotFound from flax.util.ints import uint32 from flax.util.keychain import KeyData, Keychain from flax.util.streamable import streamable, Streamable from pathlib import Path from dataclasses import dataclass from typing import Any, Dict, List, Optional, Type # Commands that are handled by the KeychainServer keychain_commands = [ "add_private_key", "check_keys", "delete_all_keys", "delete_key_by_fingerprint", "get_all_private_keys", "get_first_private_key", "get_key_for_fingerprint", "get_key", "get_keys", "set_label", "delete_label", ] log = logging.getLogger(__name__) KEYCHAIN_ERR_KEYERROR = "key error" KEYCHAIN_ERR_LOCKED = "keyring is locked" KEYCHAIN_ERR_NO_KEYS = "no keys present" KEYCHAIN_ERR_KEY_NOT_FOUND = "key not found" KEYCHAIN_ERR_MALFORMED_REQUEST = "malformed request" @streamable @dataclass(frozen=True) class EmptyResponse(Streamable): pass @streamable @dataclass(frozen=True) class GetKeyResponse(Streamable): key: KeyData @streamable @dataclass(frozen=True) class GetKeyRequest(Streamable): fingerprint: uint32 include_secrets: bool = False def run(self, keychain: Keychain) -> GetKeyResponse: return GetKeyResponse(key=keychain.get_key(self.fingerprint, self.include_secrets)) @streamable @dataclass(frozen=True) class GetKeysResponse(Streamable): keys: List[KeyData] @streamable @dataclass(frozen=True) class GetKeysRequest(Streamable): include_secrets: bool = False def run(self, keychain: Keychain) -> GetKeysResponse: return GetKeysResponse(keys=keychain.get_keys(self.include_secrets)) @streamable @dataclass(frozen=True) class SetLabelRequest(Streamable): fingerprint: uint32 label: str def run(self, keychain: Keychain) -> EmptyResponse: keychain.set_label(int(self.fingerprint), self.label) return EmptyResponse() @streamable @dataclass(frozen=True) class DeleteLabelRequest(Streamable): fingerprint: uint32 def run(self, keychain: Keychain) -> EmptyResponse: keychain.delete_label(self.fingerprint) return EmptyResponse() class KeychainServer: """ Implements a remote keychain service for clients to perform key operations on """ def __init__(self): self._default_keychain = Keychain() self._alt_keychains = {} def get_keychain_for_request(self, request: Dict[str, Any]): """ Keychain instances can have user and service strings associated with them. The keychain backends ultimately point to the same data stores, but the user and service strings are used to partition those data stores. We attempt to maintain a mapping of user/service pairs to their corresponding Keychain. """ keychain = None user = request.get("kc_user", self._default_keychain.user) service = request.get("kc_service", self._default_keychain.service) if user == self._default_keychain.user and service == self._default_keychain.service: keychain = self._default_keychain else: key = (user or "unnamed") + (service or "") if key in self._alt_keychains: keychain = self._alt_keychains[key] else: keychain = Keychain(user=user, service=service) self._alt_keychains[key] = keychain return keychain async def handle_command(self, command: str, data: Dict[str, Any]) -> Dict[str, Any]: try: if command == "add_private_key": return await self.add_private_key(data) elif command == "check_keys": return await self.check_keys(data) elif command == "delete_all_keys": return await self.delete_all_keys(data) elif command == "delete_key_by_fingerprint": return await self.delete_key_by_fingerprint(data) elif command == "get_all_private_keys": return await self.get_all_private_keys(data) elif command == "get_first_private_key": return await self.get_first_private_key(data) elif command == "get_key_for_fingerprint": return await self.get_key_for_fingerprint(data) elif command == "get_key": return await self.run_request(data, GetKeyRequest) elif command == "get_keys": return await self.run_request(data, GetKeysRequest) elif command == "set_label": return await self.run_request(data, SetLabelRequest) elif command == "delete_label": return await self.run_request(data, DeleteLabelRequest) return {} except Exception as e: log.exception(e) return {"success": False, "error": str(e), "command": command} async def add_private_key(self, request: Dict[str, Any]) -> Dict[str, Any]: if self.get_keychain_for_request(request).is_keyring_locked(): return {"success": False, "error": KEYCHAIN_ERR_LOCKED} mnemonic = request.get("mnemonic", None) label = request.get("label", None) if mnemonic is None: return { "success": False, "error": KEYCHAIN_ERR_MALFORMED_REQUEST, "error_details": {"message": "missing mnemonic"}, } try: self.get_keychain_for_request(request).add_private_key(mnemonic, label) except KeyError as e: return { "success": False, "error": KEYCHAIN_ERR_KEYERROR, "error_details": {"message": f"The word '{e.args[0]}' is incorrect.'", "word": e.args[0]}, } except ValueError as e: log.exception(e) return { "success": False, "error": str(e), } return {"success": True} async def check_keys(self, request: Dict[str, Any]) -> Dict[str, Any]: if self.get_keychain_for_request(request).is_keyring_locked(): return {"success": False, "error": KEYCHAIN_ERR_LOCKED} root_path = request.get("root_path", None) if root_path is None: return { "success": False, "error": KEYCHAIN_ERR_MALFORMED_REQUEST, "error_details": {"message": "missing root_path"}, } check_keys(Path(root_path)) return {"success": True} async def delete_all_keys(self, request: Dict[str, Any]) -> Dict[str, Any]: if self.get_keychain_for_request(request).is_keyring_locked(): return {"success": False, "error": KEYCHAIN_ERR_LOCKED} self.get_keychain_for_request(request).delete_all_keys() return {"success": True} async def delete_key_by_fingerprint(self, request: Dict[str, Any]) -> Dict[str, Any]: if self.get_keychain_for_request(request).is_keyring_locked(): return {"success": False, "error": KEYCHAIN_ERR_LOCKED} fingerprint = request.get("fingerprint", None) if fingerprint is None: return { "success": False, "error": KEYCHAIN_ERR_MALFORMED_REQUEST, "error_details": {"message": "missing fingerprint"}, } self.get_keychain_for_request(request).delete_key_by_fingerprint(fingerprint) return {"success": True} async def run_request(self, request_dict: Dict[str, Any], request_type: Type[Any]) -> Dict[str, Any]: keychain = self.get_keychain_for_request(request_dict) if keychain.is_keyring_locked(): return {"success": False, "error": KEYCHAIN_ERR_LOCKED} try: request = request_type.from_json_dict(request_dict) except Exception as e: return { "success": False, "error": KEYCHAIN_ERR_MALFORMED_REQUEST, "error_details": {"message": str(e)}, } try: return {"success": True, **request.run(keychain).to_json_dict()} except KeychainFingerprintNotFound as e: return { "success": False, "error": KEYCHAIN_ERR_KEY_NOT_FOUND, "error_details": {"fingerprint": e.fingerprint}, } except KeychainException as e: return { "success": False, "error": KEYCHAIN_ERR_MALFORMED_REQUEST, "error_details": {"message": str(e)}, } async def get_all_private_keys(self, request: Dict[str, Any]) -> Dict[str, Any]: all_keys: List[Dict[str, Any]] = [] if self.get_keychain_for_request(request).is_keyring_locked(): return {"success": False, "error": KEYCHAIN_ERR_LOCKED} private_keys = self.get_keychain_for_request(request).get_all_private_keys() for sk, entropy in private_keys: all_keys.append({"pk": bytes(sk.get_g1()).hex(), "entropy": entropy.hex()}) return {"success": True, "private_keys": all_keys} async def get_first_private_key(self, request: Dict[str, Any]) -> Dict[str, Any]: key: Dict[str, Any] = {} if self.get_keychain_for_request(request).is_keyring_locked(): return {"success": False, "error": KEYCHAIN_ERR_LOCKED} sk_ent = self.get_keychain_for_request(request).get_first_private_key() if sk_ent is None: return {"success": False, "error": KEYCHAIN_ERR_NO_KEYS} pk_str = bytes(sk_ent[0].get_g1()).hex() ent_str = sk_ent[1].hex() key = {"pk": pk_str, "entropy": ent_str} return {"success": True, "private_key": key} async def get_key_for_fingerprint(self, request: Dict[str, Any]) -> Dict[str, Any]: if self.get_keychain_for_request(request).is_keyring_locked(): return {"success": False, "error": KEYCHAIN_ERR_LOCKED} private_keys = self.get_keychain_for_request(request).get_all_private_keys() if len(private_keys) == 0: return {"success": False, "error": KEYCHAIN_ERR_NO_KEYS} fingerprint = request.get("fingerprint", None) private_key: Optional[PrivateKey] = None entropy: Optional[bytes] = None if fingerprint is not None: for sk, entropy in private_keys: if sk.get_g1().get_fingerprint() == fingerprint: private_key = sk break else: private_key, entropy = private_keys[0] if private_key is not None and entropy is not None: return {"success": True, "pk": bytes(private_key.get_g1()).hex(), "entropy": entropy.hex()} else: return {"success": False, "error": KEYCHAIN_ERR_KEY_NOT_FOUND}
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/daemon/windows_signal.py
flax/daemon/windows_signal.py
""" Code taken from Stack Overflow Eryk Sun. https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine """ from __future__ import annotations import os import signal import sys from types import FrameType from typing import Any, Callable, Optional, Union # https://github.com/python/typeshed/blob/fbddd2c4e2b746f1880399ed0cb31a44d6ede6ff/stdlib/signal.pyi _HANDLER = Union[Callable[[int, Optional[FrameType]], Any], int, signal.Handlers, None] if sys.platform != "win32" and sys.platform != "cygwin": kill = os.kill else: # adapt the conflated API on Windows. import threading sigmap = { signal.SIGINT: signal.CTRL_C_EVENT, # pylint: disable=E1101 signal.SIGBREAK: signal.CTRL_BREAK_EVENT, # pylint: disable=E1101 } def kill(pid: int, signum: signal.Signals) -> None: if signum in sigmap and pid == os.getpid(): # we don't know if the current process is a # process group leader, so just broadcast # to all processes attached to this console. pid = 0 thread = threading.current_thread() handler = signal.getsignal(signum) # work around the synchronization problem when calling # kill from the main thread. if signum in sigmap and thread.name == "MainThread" and callable(handler) and pid == 0: event = threading.Event() callable_handler = handler def handler_set_event(signum: int, frame: Optional[FrameType]) -> Any: event.set() return callable_handler(signum, frame) signal.signal(signum, handler_set_event) try: os.kill(pid, sigmap[signum]) # busy wait because we can't block in the main # thread, else the signal handler can't execute. while not event.is_set(): pass finally: signal.signal(signum, handler) else: os.kill(pid, sigmap.get(signum, signum))
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/daemon/__init__.py
flax/daemon/__init__.py
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/daemon/server.py
flax/daemon/server.py
import asyncio import functools import json import logging import os import signal import ssl import subprocess import sys import time import traceback import uuid from concurrent.futures import ThreadPoolExecutor from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional, TextIO, Tuple from flax import __version__ from flax.cmds.init_funcs import check_keys, flax_init, flax_full_version_str from flax.cmds.passphrase_funcs import default_passphrase, using_default_passphrase from flax.daemon.keychain_server import KeychainServer, keychain_commands from flax.daemon.windows_signal import kill from flax.plotters.plotters import get_available_plotters from flax.plotting.util import add_plot_directory from flax.server.server import ssl_context_for_root, ssl_context_for_server from flax.ssl.create_ssl import get_mozilla_ca_crt from flax.util.beta_metrics import BetaMetricsLogger from flax.util.flax_logging import initialize_service_logging from flax.util.config import load_config from flax.util.errors import KeychainRequiresMigration, KeychainCurrentPassphraseIsInvalid from flax.util.json_util import dict_to_json_str from flax.util.keychain import ( Keychain, passphrase_requirements, supports_os_passphrase_storage, ) from flax.util.lock import Lockfile, LockfileError from flax.util.network import WebServer from flax.util.service_groups import validate_service from flax.util.setproctitle import setproctitle from flax.util.ws_message import WsRpcMessage, create_payload, format_response io_pool_exc = ThreadPoolExecutor() try: from aiohttp import ClientSession, WSMsgType, web from aiohttp.web_ws import WebSocketResponse except ModuleNotFoundError: print("Error: Make sure to run . ./activate from the project folder before starting Flax.") quit() log = logging.getLogger(__name__) service_plotter = "flax_plotter" async def fetch(url: str): async with ClientSession() as session: try: mozilla_root = get_mozilla_ca_crt() ssl_context = ssl_context_for_root(mozilla_root, log=log) response = await session.get(url, ssl=ssl_context) if not response.ok: log.warning("Response not OK.") return None return await response.text() except Exception as e: log.error(f"Exception while fetching {url}, exception: {e}") return None class PlotState(str, Enum): SUBMITTED = "SUBMITTED" RUNNING = "RUNNING" REMOVING = "REMOVING" FINISHED = "FINISHED" class PlotEvent(str, Enum): LOG_CHANGED = "log_changed" STATE_CHANGED = "state_changed" # determine if application is a script file or frozen exe if getattr(sys, "frozen", False): name_map = { "flax": "flax", "flax_data_layer": "start_data_layer", "flax_data_layer_http": "start_data_layer_http", "flax_wallet": "start_wallet", "flax_full_node": "start_full_node", "flax_harvester": "start_harvester", "flax_farmer": "start_farmer", "flax_introducer": "start_introducer", "flax_timelord": "start_timelord", "flax_timelord_launcher": "timelord_launcher", "flax_full_node_simulator": "start_simulator", "flax_seeder": "start_seeder", "flax_crawler": "start_crawler", } def executable_for_service(service_name: str) -> str: application_path = os.path.dirname(sys.executable) if sys.platform == "win32" or sys.platform == "cygwin": executable = name_map[service_name] path = f"{application_path}/{executable}.exe" return path else: path = f"{application_path}/{name_map[service_name]}" return path else: application_path = os.path.dirname(__file__) def executable_for_service(service_name: str) -> str: return service_name async def ping() -> Dict[str, Any]: response = {"success": True, "value": "pong"} return response class WebSocketServer: def __init__( self, root_path: Path, ca_crt_path: Path, ca_key_path: Path, crt_path: Path, key_path: Path, shutdown_event: asyncio.Event, run_check_keys_on_unlock: bool = False, ): self.root_path = root_path self.log = log self.services: Dict = dict() self.plots_queue: List[Dict] = [] self.connections: Dict[str, List[WebSocketResponse]] = dict() # service_name : [WebSocket] self.remote_address_map: Dict[WebSocketResponse, str] = dict() # socket: service_name self.ping_job: Optional[asyncio.Task] = None self.net_config = load_config(root_path, "config.yaml") self.self_hostname = self.net_config["self_hostname"] self.daemon_port = self.net_config["daemon_port"] self.daemon_max_message_size = self.net_config.get("daemon_max_message_size", 50 * 1000 * 1000) self.webserver: Optional[WebServer] = None self.ssl_context = ssl_context_for_server(ca_crt_path, ca_key_path, crt_path, key_path, log=self.log) self.keychain_server = KeychainServer() self.run_check_keys_on_unlock = run_check_keys_on_unlock self.shutdown_event = shutdown_event async def start(self): self.log.info("Starting Daemon Server") # Note: the minimum_version has been already set to TLSv1_2 # in ssl_context_for_server() # Daemon is internal connections, so override to TLSv1_3 only if ssl.HAS_TLSv1_3: try: self.ssl_context.minimum_version = ssl.TLSVersion.TLSv1_3 except ValueError: # in case the attempt above confused the config, set it again (likely not needed but doesn't hurt) self.ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2 if self.ssl_context.minimum_version is not ssl.TLSVersion.TLSv1_3: self.log.warning( ( "Deprecation Warning: Your version of SSL (%s) does not support TLS1.3. " "A future version of Flax will require TLS1.3." ), ssl.OPENSSL_VERSION, ) self.webserver = await WebServer.create( hostname=self.self_hostname, port=self.daemon_port, keepalive_timeout=300, shutdown_timeout=3, routes=[web.get("/", self.incoming_connection)], ssl_context=self.ssl_context, logger=self.log, ) async def setup_process_global_state(self) -> None: try: asyncio.get_running_loop().add_signal_handler( signal.SIGINT, functools.partial(self._accept_signal, signal_number=signal.SIGINT), ) asyncio.get_running_loop().add_signal_handler( signal.SIGTERM, functools.partial(self._accept_signal, signal_number=signal.SIGTERM), ) except NotImplementedError: self.log.info("Not implemented") def _accept_signal(self, signal_number: int, stack_frame=None): asyncio.create_task(self.stop()) def cancel_task_safe(self, task: Optional[asyncio.Task]): if task is not None: try: task.cancel() except Exception as e: self.log.error(f"Error while canceling task.{e} {task}") async def stop(self) -> Dict[str, Any]: self.cancel_task_safe(self.ping_job) service_names = list(self.services.keys()) stop_service_jobs = [kill_service(self.root_path, self.services, s_n) for s_n in service_names] if stop_service_jobs: await asyncio.wait(stop_service_jobs) self.services.clear() asyncio.create_task(self.exit()) log.info(f"Daemon Server stopping, Services stopped: {service_names}") return {"success": True, "services_stopped": service_names} async def incoming_connection(self, request): ws: WebSocketResponse = web.WebSocketResponse(max_msg_size=self.daemon_max_message_size, heartbeat=30) await ws.prepare(request) while True: msg = await ws.receive() self.log.debug("Received message: %s", msg) if msg.type == WSMsgType.TEXT: try: decoded = json.loads(msg.data) if "data" not in decoded: decoded["data"] = {} response, sockets_to_use = await self.handle_message(ws, decoded) except Exception as e: tb = traceback.format_exc() self.log.error(f"Error while handling message: {tb}") error = {"success": False, "error": f"{e}"} response = format_response(decoded, error) sockets_to_use = [] if len(sockets_to_use) > 0: for socket in sockets_to_use: try: await socket.send_str(response) except Exception as e: tb = traceback.format_exc() self.log.error(f"Unexpected exception trying to send to websocket: {e} {tb}") self.remove_connection(socket) await socket.close() else: service_name = "Unknown" if ws in self.remote_address_map: service_name = self.remote_address_map[ws] if msg.type == WSMsgType.CLOSE: self.log.info(f"ConnectionClosed. Closing websocket with {service_name}") elif msg.type == WSMsgType.ERROR: self.log.info(f"Websocket exception. Closing websocket with {service_name}. {ws.exception()}") self.remove_connection(ws) await ws.close() break def remove_connection(self, websocket: WebSocketResponse): service_name = None if websocket in self.remote_address_map: service_name = self.remote_address_map[websocket] self.remote_address_map.pop(websocket) if service_name in self.connections: after_removal = [] for connection in self.connections[service_name]: if connection == websocket: continue else: after_removal.append(connection) self.connections[service_name] = after_removal async def ping_task(self) -> None: restart = True await asyncio.sleep(30) for remote_address, service_name in self.remote_address_map.items(): if service_name in self.connections: sockets = self.connections[service_name] for socket in sockets: try: self.log.debug(f"About to ping: {service_name}") await socket.ping() except asyncio.CancelledError: self.log.warning("Ping task received Cancel") restart = False break except Exception: self.log.exception("Ping error") self.log.error("Ping failed, connection closed.") self.remove_connection(socket) await socket.close() if restart is True: self.ping_job = asyncio.create_task(self.ping_task()) async def handle_message( self, websocket: WebSocketResponse, message: WsRpcMessage ) -> Tuple[Optional[str], List[Any]]: """ This function gets called when new message is received via websocket. """ command = message["command"] destination = message["destination"] if destination != "daemon": if destination in self.connections: sockets = self.connections[destination] return dict_to_json_str(message), sockets return None, [] data = message["data"] commands_with_data = [ "start_service", "start_plotting", "stop_plotting", "stop_service", "is_running", "register_service", ] if len(data) == 0 and command in commands_with_data: response = {"success": False, "error": f'{command} requires "data"'} # Keychain commands should be handled by KeychainServer elif command in keychain_commands: response = await self.keychain_server.handle_command(command, data) elif command == "ping": response = await ping() elif command == "start_service": response = await self.start_service(data) elif command == "start_plotting": response = await self.start_plotting(data) elif command == "stop_plotting": response = await self.stop_plotting(data) elif command == "stop_service": response = await self.stop_service(data) elif command == "running_services": response = await self.running_services(data) elif command == "is_running": response = await self.is_running(data) elif command == "is_keyring_locked": response = await self.is_keyring_locked() elif command == "keyring_status": response = await self.keyring_status() elif command == "unlock_keyring": response = await self.unlock_keyring(data) elif command == "validate_keyring_passphrase": response = await self.validate_keyring_passphrase(data) elif command == "migrate_keyring": response = await self.migrate_keyring(data) elif command == "set_keyring_passphrase": response = await self.set_keyring_passphrase(data) elif command == "remove_keyring_passphrase": response = await self.remove_keyring_passphrase(data) elif command == "notify_keyring_migration_completed": response = await self.notify_keyring_migration_completed(data) elif command == "exit": response = await self.stop() elif command == "register_service": response = await self.register_service(websocket, data) elif command == "get_status": response = self.get_status() elif command == "get_version": response = self.get_version() elif command == "get_plotters": response = await self.get_plotters() else: self.log.error(f"UK>> {message}") response = {"success": False, "error": f"unknown_command {command}"} full_response = format_response(message, response) return full_response, [websocket] async def is_keyring_locked(self) -> Dict[str, Any]: locked: bool = Keychain.is_keyring_locked() response: Dict[str, Any] = {"success": True, "is_keyring_locked": locked} return response async def keyring_status(self) -> Dict[str, Any]: can_save_passphrase: bool = supports_os_passphrase_storage() user_passphrase_is_set: bool = Keychain.has_master_passphrase() and not using_default_passphrase() locked: bool = Keychain.is_keyring_locked() needs_migration: bool = Keychain.needs_migration() can_remove_legacy_keys: bool = False # Disabling GUI support for removing legacy keys post-migration can_set_passphrase_hint: bool = True passphrase_hint: str = Keychain.get_master_passphrase_hint() or "" requirements: Dict[str, Any] = passphrase_requirements() response: Dict[str, Any] = { "success": True, "is_keyring_locked": locked, "can_save_passphrase": can_save_passphrase, "user_passphrase_is_set": user_passphrase_is_set, "needs_migration": needs_migration, "can_remove_legacy_keys": can_remove_legacy_keys, "can_set_passphrase_hint": can_set_passphrase_hint, "passphrase_hint": passphrase_hint, "passphrase_requirements": requirements, } # Help diagnose GUI launch issues self.log.debug(f"Keyring status: {response}") return response async def unlock_keyring(self, request: Dict[str, Any]) -> Dict[str, Any]: success: bool = False error: Optional[str] = None key: Optional[str] = request.get("key", None) if type(key) is not str: return {"success": False, "error": "missing key"} try: if Keychain.master_passphrase_is_valid(key, force_reload=True): Keychain.set_cached_master_passphrase(key) success = True # Inform the GUI of keyring status changes self.keyring_status_changed(await self.keyring_status(), "wallet_ui") else: error = "bad passphrase" except Exception as e: tb = traceback.format_exc() self.log.error(f"Keyring passphrase validation failed: {e} {tb}") error = "validation exception" if success and self.run_check_keys_on_unlock: try: self.log.info("Running check_keys now that the keyring is unlocked") check_keys(self.root_path) self.run_check_keys_on_unlock = False except Exception as e: tb = traceback.format_exc() self.log.error(f"check_keys failed after unlocking keyring: {e} {tb}") response: Dict[str, Any] = {"success": success, "error": error} return response async def validate_keyring_passphrase(self, request: Dict[str, Any]) -> Dict[str, Any]: success: bool = False error: Optional[str] = None key: Optional[str] = request.get("key", None) if type(key) is not str: return {"success": False, "error": "missing key"} try: success = Keychain.master_passphrase_is_valid(key, force_reload=True) except Exception as e: tb = traceback.format_exc() self.log.error(f"Keyring passphrase validation failed: {e} {tb}") error = "validation exception" response: Dict[str, Any] = {"success": success, "error": error} return response async def migrate_keyring(self, request: Dict[str, Any]) -> Dict[str, Any]: if Keychain.needs_migration() is False: # If the keyring has already been migrated, we'll raise an error to the client. # The reason for raising an error is because the migration request has side- # effects beyond copying keys from the legacy keyring to the new keyring. The # request may have set a passphrase and indicated that keys should be cleaned # from the legacy keyring. If we were to return early and indicate success, # the client and user's expectations may not match reality (were my keys # deleted from the legacy keyring? was my passphrase set?). return {"success": False, "error": "migration not needed"} success: bool = False error: Optional[str] = None passphrase: Optional[str] = request.get("passphrase", None) passphrase_hint: Optional[str] = request.get("passphrase_hint", None) save_passphrase: bool = request.get("save_passphrase", False) cleanup_legacy_keyring: bool = request.get("cleanup_legacy_keyring", False) if passphrase is not None and type(passphrase) is not str: return {"success": False, "error": 'expected string value for "passphrase"'} if passphrase_hint is not None and type(passphrase_hint) is not str: return {"success": False, "error": 'expected string value for "passphrase_hint"'} if not Keychain.passphrase_meets_requirements(passphrase): return {"success": False, "error": "passphrase doesn't satisfy requirements"} if type(cleanup_legacy_keyring) is not bool: return {"success": False, "error": 'expected bool value for "cleanup_legacy_keyring"'} try: Keychain.migrate_legacy_keyring( passphrase=passphrase, passphrase_hint=passphrase_hint, save_passphrase=save_passphrase, cleanup_legacy_keyring=cleanup_legacy_keyring, ) success = True # Inform the GUI of keyring status changes self.keyring_status_changed(await self.keyring_status(), "wallet_ui") except Exception as e: tb = traceback.format_exc() self.log.error(f"Legacy keyring migration failed: {e} {tb}") error = f"keyring migration failed: {e}" response: Dict[str, Any] = {"success": success, "error": error} return response async def set_keyring_passphrase(self, request: Dict[str, Any]) -> Dict[str, Any]: success: bool = False error: Optional[str] = None current_passphrase: Optional[str] = None new_passphrase: Optional[str] = None passphrase_hint: Optional[str] = request.get("passphrase_hint", None) save_passphrase: bool = request.get("save_passphrase", False) if using_default_passphrase(): current_passphrase = default_passphrase() if Keychain.has_master_passphrase() and not current_passphrase: current_passphrase = request.get("current_passphrase", None) if type(current_passphrase) is not str: return {"success": False, "error": "missing current_passphrase"} new_passphrase = request.get("new_passphrase", None) if type(new_passphrase) is not str: return {"success": False, "error": "missing new_passphrase"} if not Keychain.passphrase_meets_requirements(new_passphrase): return {"success": False, "error": "passphrase doesn't satisfy requirements"} try: assert new_passphrase is not None # mypy, I love you Keychain.set_master_passphrase( current_passphrase, new_passphrase, passphrase_hint=passphrase_hint, save_passphrase=save_passphrase, ) except KeychainRequiresMigration: error = "keyring requires migration" except KeychainCurrentPassphraseIsInvalid: error = "current passphrase is invalid" except Exception as e: tb = traceback.format_exc() self.log.error(f"Failed to set keyring passphrase: {e} {tb}") else: success = True # Inform the GUI of keyring status changes self.keyring_status_changed(await self.keyring_status(), "wallet_ui") response: Dict[str, Any] = {"success": success, "error": error} return response async def remove_keyring_passphrase(self, request: Dict[str, Any]) -> Dict[str, Any]: success: bool = False error: Optional[str] = None current_passphrase: Optional[str] = None if not Keychain.has_master_passphrase(): return {"success": False, "error": "passphrase not set"} current_passphrase = request.get("current_passphrase", None) if type(current_passphrase) is not str: return {"success": False, "error": "missing current_passphrase"} try: Keychain.remove_master_passphrase(current_passphrase) except KeychainCurrentPassphraseIsInvalid: error = "current passphrase is invalid" except Exception as e: tb = traceback.format_exc() self.log.error(f"Failed to remove keyring passphrase: {e} {tb}") else: success = True # Inform the GUI of keyring status changes self.keyring_status_changed(await self.keyring_status(), "wallet_ui") response: Dict[str, Any] = {"success": success, "error": error} return response async def notify_keyring_migration_completed(self, request: Dict[str, Any]) -> Dict[str, Any]: success: bool = False error: Optional[str] = None key: Optional[str] = request.get("key", None) if type(key) is not str: return {"success": False, "error": "missing key"} Keychain.handle_migration_completed() try: if Keychain.master_passphrase_is_valid(key, force_reload=True): Keychain.set_cached_master_passphrase(key) success = True # Inform the GUI of keyring status changes self.keyring_status_changed(await self.keyring_status(), "wallet_ui") else: error = "bad passphrase" except Exception as e: tb = traceback.format_exc() self.log.error(f"Keyring passphrase validation failed: {e} {tb}") error = "validation exception" response: Dict[str, Any] = {"success": success, "error": error} return response def get_status(self) -> Dict[str, Any]: response = {"success": True, "genesis_initialized": True} return response def get_version(self) -> Dict[str, Any]: response = {"success": True, "version": __version__} return response async def get_plotters(self) -> Dict[str, Any]: plotters: Dict[str, Any] = get_available_plotters(self.root_path) response: Dict[str, Any] = {"success": True, "plotters": plotters} return response async def _keyring_status_changed(self, keyring_status: Dict[str, Any], destination: str): """ Attempt to communicate with the GUI to inform it of any keyring status changes (e.g. keyring becomes unlocked or migration completes) """ websockets = self.connections.get("wallet_ui", None) if websockets is None: return None if keyring_status is None: return None response = create_payload("keyring_status_changed", keyring_status, "daemon", destination) for websocket in websockets.copy(): try: await websocket.send_str(response) except Exception as e: tb = traceback.format_exc() self.log.error(f"Unexpected exception trying to send to websocket: {e} {tb}") websockets.remove(websocket) await websocket.close() def keyring_status_changed(self, keyring_status: Dict[str, Any], destination: str): asyncio.create_task(self._keyring_status_changed(keyring_status, destination)) def plot_queue_to_payload(self, plot_queue_item, send_full_log: bool) -> Dict[str, Any]: error = plot_queue_item.get("error") has_error = error is not None item = { "id": plot_queue_item["id"], "queue": plot_queue_item["queue"], "size": plot_queue_item["size"], "parallel": plot_queue_item["parallel"], "delay": plot_queue_item["delay"], "state": plot_queue_item["state"], "error": str(error) if has_error else None, "deleted": plot_queue_item["deleted"], "log_new": plot_queue_item.get("log_new"), } if send_full_log: item["log"] = plot_queue_item.get("log") return item def prepare_plot_state_message(self, state: PlotEvent, id): message = { "state": state, "queue": self.extract_plot_queue(id), } return message def extract_plot_queue(self, id=None) -> List[Dict]: send_full_log = id is None data = [] for item in self.plots_queue: if id is None or item["id"] == id: data.append(self.plot_queue_to_payload(item, send_full_log)) return data async def _state_changed(self, service: str, message: Dict[str, Any]): """If id is None, send the whole state queue""" if service not in self.connections: return None websockets = self.connections[service] if message is None: return None response = create_payload("state_changed", message, service, "wallet_ui") for websocket in websockets.copy(): try: await websocket.send_str(response) except Exception as e: tb = traceback.format_exc() self.log.error(f"Unexpected exception trying to send to websocket: {e} {tb}") websockets.remove(websocket) await websocket.close() def state_changed(self, service: str, message: Dict[str, Any]): asyncio.create_task(self._state_changed(service, message)) async def _watch_file_changes(self, config, fp: TextIO, loop: asyncio.AbstractEventLoop): id: str = config["id"] plotter: str = config["plotter"] final_words: List[str] = [] if plotter == "chiapos": final_words = ["Renamed final file"] elif plotter == "bladebit": final_words = ["Finished plotting in"] elif plotter == "bladebit2": final_words = ["Finished plotting in"] elif plotter == "madmax": temp_dir = config["temp_dir"] final_dir = config["final_dir"] if temp_dir == final_dir: final_words = ["Total plot creation time was"] else: # "Renamed final plot" if moving to a final dir on the same volume # "Copy to <path> finished, took..." if copying to another volume final_words = ["Renamed final plot", "finished, took"] while True: new_data = await loop.run_in_executor(io_pool_exc, fp.readline) if config["state"] is not PlotState.RUNNING: return None if new_data not in (None, ""): config["log"] = new_data if config["log"] is None else config["log"] + new_data config["log_new"] = new_data self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.LOG_CHANGED, id)) if new_data: for word in final_words: if word in new_data: return None else: time.sleep(0.5) async def _track_plotting_progress(self, config, loop: asyncio.AbstractEventLoop): file_path = config["out_file"] with open(file_path, "r") as fp: await self._watch_file_changes(config, fp, loop) def _common_plotting_command_args(self, request: Any, ignoreCount: bool) -> List[str]: n = 1 if ignoreCount else request["n"] # Plot count d = request["d"] # Final directory r = request["r"] # Threads f = request.get("f") # Farmer pubkey p = request.get("p") # Pool pubkey c = request.get("c") # Pool contract address command_args: List[str] = [] command_args.append(f"-n{n}") command_args.append(f"-d{d}") command_args.append(f"-r{r}") if f is not None: command_args.append(f"-f{f}") if p is not None: command_args.append(f"-p{p}") if c is not None: command_args.append(f"-c{c}") return command_args def _chiapos_plotting_command_args(self, request: Any, ignoreCount: bool) -> List[str]: k = request["k"] # Plot size t = request["t"] # Temp directory t2 = request["t2"] # Temp2 directory b = request["b"] # Buffer size u = request["u"] # Buckets a = request.get("a") # Fingerprint e = request["e"] # Disable bitfield x = request["x"] # Exclude final directory override_k = request["overrideK"] # Force plot sizes < k32 command_args: List[str] = [] command_args.append(f"-k{k}") command_args.append(f"-t{t}") command_args.append(f"-2{t2}") command_args.append(f"-b{b}") command_args.append(f"-u{u}") if a is not None: command_args.append(f"-a{a}") if e is True: command_args.append("-e") if x is True: command_args.append("-x") if override_k is True: command_args.append("--override-k") return command_args def _bladebit_plotting_command_args(self, request: Any, ignoreCount: bool) -> List[str]: w = request.get("w", False) # Warm start
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
true
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/start_funcs.py
flax/cmds/start_funcs.py
import asyncio from concurrent.futures import ThreadPoolExecutor import os import subprocess import sys from pathlib import Path from typing import Any, Dict, Optional from flax.cmds.keys_funcs import migrate_keys from flax.cmds.passphrase_funcs import get_current_passphrase from flax.daemon.client import DaemonProxy, connect_to_daemon_and_validate from flax.util.errors import KeychainMaxUnlockAttempts from flax.util.keychain import Keychain from flax.util.service_groups import services_for_groups def launch_start_daemon(root_path: Path) -> subprocess.Popen: os.environ["FLAX_ROOT"] = str(root_path) # TODO: use startupinfo=subprocess.DETACHED_PROCESS on windows process = subprocess.Popen([sys.argv[0], "run_daemon", "--wait-for-unlock"], stdout=subprocess.PIPE) return process async def create_start_daemon_connection(root_path: Path, config: Dict[str, Any]) -> Optional[DaemonProxy]: connection = await connect_to_daemon_and_validate(root_path, config) if connection is None: print("Starting daemon") # launch a daemon process = launch_start_daemon(root_path) # give the daemon a chance to start up if process.stdout: process.stdout.readline() await asyncio.sleep(1) # it prints "daemon: listening" connection = await connect_to_daemon_and_validate(root_path, config) if connection: passphrase = None if await connection.is_keyring_locked(): passphrase = Keychain.get_cached_master_passphrase() if not Keychain.master_passphrase_is_valid(passphrase): with ThreadPoolExecutor(max_workers=1, thread_name_prefix="get_current_passphrase") as executor: passphrase = await asyncio.get_running_loop().run_in_executor(executor, get_current_passphrase) if passphrase: print("Unlocking daemon keyring") await connection.unlock_keyring(passphrase) return connection return None async def async_start( root_path: Path, config: Dict[str, Any], group: str, restart: bool, force_keyring_migration: bool ) -> None: try: daemon = await create_start_daemon_connection(root_path, config) except KeychainMaxUnlockAttempts: print("Failed to unlock keyring") return None if daemon is None: print("Failed to create the flax daemon") return None if force_keyring_migration: if not await migrate_keys(root_path, True): await daemon.close() sys.exit(1) for service in services_for_groups(group): if await daemon.is_running(service_name=service): print(f"{service}: ", end="", flush=True) if restart: if await daemon.stop_service(service_name=service): print("stopped") else: print("stop failed") else: print("Already running, use `-r` to restart") continue print(f"{service}: ", end="", flush=True) msg = await daemon.start_service(service_name=service) success = msg and msg["data"]["success"] if success is True: print("started") else: error = "no response" if msg: error = msg["data"]["error"] print(f"{service} failed to start. Error: {error}") await daemon.close()
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/show.py
flax/cmds/show.py
from typing import Optional import click from flax.cmds.show_funcs import show_async @click.command("show", short_help="Show node information", no_args_is_help=True) @click.option( "-p", "--rpc-port", help=( "Set the port where the Full Node is hosting the RPC interface. " "See the rpc_port under full_node in config.yaml" ), type=int, default=None, ) @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fee", help="Show the fee information", is_flag=True, type=bool, default=False) @click.option("-s", "--state", help="Show the current state of the blockchain", is_flag=True, type=bool, default=False) @click.option( "-c", "--connections", help="List nodes connected to this Full Node", is_flag=True, type=bool, default=False ) @click.option("-a", "--add-connection", help="Connect to another Full Node by ip:port", type=str, default="") @click.option( "-r", "--remove-connection", help="Remove a Node by the first 8 characters of NodeID", type=str, default="" ) @click.option( "-bh", "--block-header-hash-by-height", help="Look up a block header hash by block height", type=str, default="" ) @click.option("-b", "--block-by-header-hash", help="Look up a block by block header hash", type=str, default="") @click.pass_context def show_cmd( ctx: click.Context, rpc_port: Optional[int], wallet_rpc_port: Optional[int], fee: bool, state: bool, connections: bool, add_connection: str, remove_connection: str, block_header_hash_by_height: str, block_by_header_hash: str, ) -> None: import asyncio if connections: print("'flax show -c' has been renamed to 'flax peer -c' ") if add_connection != "": print("'flax show -a' has been renamed to 'flax peer -a' ") if remove_connection != "": print("'flax show -r' has been renamed to 'flax peer -r' ") if wallet_rpc_port is not None: print("'flax show -wp' is not used, please remove it from your command.") asyncio.run( show_async( rpc_port, ctx.obj["root_path"], fee, state, block_header_hash_by_height, block_by_header_hash, ) )
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/netspace_funcs.py
flax/cmds/netspace_funcs.py
from __future__ import annotations from typing import Optional from flax.cmds.cmds_util import get_any_service_client from flax.rpc.full_node_rpc_client import FullNodeRpcClient from flax.util.byte_types import hexstr_to_bytes from flax.util.misc import format_bytes async def netstorge_async(rpc_port: Optional[int], delta_block_height: str, start: str) -> None: """ Calculates the estimated space on the network given two block header hashes. """ client: Optional[FullNodeRpcClient] async with get_any_service_client("full_node", rpc_port) as node_config_fp: client, _, _ = node_config_fp if client is not None: if delta_block_height: if start == "": blockchain_state = await client.get_blockchain_state() if blockchain_state["peak"] is None: print("No blocks in blockchain") return None newer_block_height = blockchain_state["peak"].height else: newer_block = await client.get_block_record(hexstr_to_bytes(start)) if newer_block is None: print("Block header hash", start, "not found.") return None else: print("newer_height", newer_block.height) newer_block_height = newer_block.height newer_block_header = await client.get_block_record_by_height(newer_block_height) older_block_height = max(0, newer_block_height - int(delta_block_height)) older_block_header = await client.get_block_record_by_height(older_block_height) assert newer_block_header is not None and older_block_header is not None network_space_bytes_estimate = await client.get_network_space( newer_block_header.header_hash, older_block_header.header_hash ) assert network_space_bytes_estimate is not None print( "Older Block\n" f"Block Height: {older_block_header.height}\n" f"Weight: {older_block_header.weight}\n" f"VDF Iterations: {older_block_header.total_iters}\n" f"Header Hash: 0x{older_block_header.header_hash}\n" ) print( "Newer Block\n" f"Block Height: {newer_block_header.height}\n" f"Weight: {newer_block_header.weight}\n" f"VDF Iterations: {newer_block_header.total_iters}\n" f"Header Hash: 0x{newer_block_header.header_hash}\n" ) print(format_bytes(network_space_bytes_estimate))
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/beta_funcs.py
flax/cmds/beta_funcs.py
from __future__ import annotations import os import sys from pathlib import Path from typing import Any, Callable, Dict, List, Optional from flax.util.beta_metrics import metrics_log_interval_max, metrics_log_interval_min from flax.util.flax_logging import get_beta_logging_config from flax.util.errors import InvalidPathError from flax.util.misc import format_bytes, prompt_yes_no, validate_directory_writable def default_beta_root_path() -> Path: return Path(os.path.expanduser(os.getenv("FLAX_BETA_ROOT", "~/flax-beta-test"))).resolve() def warn_if_beta_enabled(config: Dict[str, Any]) -> None: if config.get("beta", {}).get("enabled", False): print("\nWARNING: beta test mode is enabled. Run `flax beta disable` if this is unintentional.\n") def prompt_beta_warning() -> bool: logging_config = get_beta_logging_config() # The `/ 5` is just a rough estimation for `gzip` being used by the log rotation in beta mode. It was like # 7-10x compressed in example tests with 2MB files. min_space = format_bytes(int(logging_config["log_maxfilesrotation"] * logging_config["log_maxbytesrotation"] / 5)) return prompt_yes_no( f"\nWARNING: Enabling the beta test mode increases disk writes and may lead to {min_space} of " "extra logfiles getting stored on your disk. This should only be done if you are part of the beta test " "program.\n\nDo you really want to enable the beta test mode?" ) def prompt_for_beta_path(default_path: Path) -> Path: path: Optional[Path] = None for _ in range(3): user_input = input( "\nEnter a directory where the beta test logs can be stored or press enter to use the default " f"[{str(default_path)}]:" ) test_path = Path(user_input) if user_input else default_path if not test_path.is_dir() and prompt_yes_no( f"\nDirectory {str(test_path)!r} doesn't exist.\n\nDo you want to create it?" ): test_path.mkdir(parents=True) try: validate_directory_writable(test_path) except InvalidPathError as e: print(str(e)) continue path = test_path break if path is None: sys.exit("Aborted!") else: return path def prompt_for_metrics_log_interval(default_interval: int) -> int: interval: Optional[int] = None for _ in range(3): user_input = input( "\nEnter a number of seconds as interval in which analytics getting logged, press enter to use the default " f"[{str(default_interval)}]:" ) test_interval = int(user_input) if user_input else default_interval try: validate_metrics_log_interval(test_interval) except ValueError as e: print("\nERROR: " + str(e)) continue interval = test_interval break if interval is None: sys.exit("Aborted!") else: return interval def update_beta_config(enabled: bool, path: Path, metrics_log_interval: int, config: Dict[str, Any]) -> None: if "beta" not in config: config["beta"] = {} config["beta"].update( { "enabled": enabled, "path": str(path), "metrics_log_interval": metrics_log_interval, } ) def validate_beta_path(beta_root_path: Path) -> None: try: validate_directory_writable(beta_root_path) except InvalidPathError as e: sys.exit(str(e)) def validate_metrics_log_interval(interval: int) -> None: if interval < metrics_log_interval_min or interval > metrics_log_interval_max: raise ValueError(f"Must be in the range of {metrics_log_interval_min}s to {metrics_log_interval_max}s.") def prepare_plotting_log(path: Path) -> None: # TODO: Do stuff we want to do with the logs before submission. Maybe even just fully parse them and # create some final result files and zip them instead of just the logs. print(f" - {path.name}") def prepare_flax_blockchain_log(path: Path) -> None: # TODO: Do stuff we want to do with the logs before submission. Maybe even just fully parse them and # create some final result files and zip them instead of just the logs. print(f" - {path.name}") def prepare_logs(prepare_path: Path, prepare_callback: Callable[[Path], None]) -> List[Path]: result = [path for path in prepare_path.iterdir()] if prepare_path.exists() else [] if len(result): print(f"\nPreparing {prepare_path.name!r} logs:") for log in result: if log.name.startswith("."): continue prepare_callback(log) return result
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/db_backup_func.py
flax/cmds/db_backup_func.py
from __future__ import annotations from pathlib import Path from typing import Any, Dict, Optional from flax.util.config import load_config from flax.util.path import path_from_root def db_backup_func( root_path: Path, backup_db_file: Optional[Path] = None, *, no_indexes: bool, ) -> None: config: Dict[str, Any] = load_config(root_path, "config.yaml")["full_node"] selected_network: str = config["selected_network"] db_pattern: str = config["database_path"] db_path_replaced: str = db_pattern.replace("CHALLENGE", selected_network) source_db = path_from_root(root_path, db_path_replaced) if backup_db_file is None: db_path_replaced_backup = db_path_replaced.replace("blockchain_", "vacuumed_blockchain_") backup_db_file = path_from_root(root_path, db_path_replaced_backup) backup_db(source_db, backup_db_file, no_indexes=no_indexes) print(f"\n\nDatabase backup finished : {backup_db_file}\n") def backup_db(source_db: Path, backup_db: Path, *, no_indexes: bool) -> None: import sqlite3 from contextlib import closing # VACUUM INTO is only available starting with SQLite version 3.27.0 if not no_indexes and sqlite3.sqlite_version_info < (3, 27, 0): raise RuntimeError( f"SQLite {sqlite3.sqlite_version} not supported. Version needed is 3.27.0" f"\n\tuse '--no_indexes' option to create a backup without indexes instead." f"\n\tIn case of a restore, the missing indexes will be recreated during full node startup." ) if not backup_db.parent.exists(): print(f"backup destination path doesn't exist. {backup_db.parent}") raise RuntimeError(f"can't find {backup_db}") print(f"reading from blockchain database: {source_db}") print(f"writing to backup file: {backup_db}") with closing(sqlite3.connect(source_db)) as in_db: try: if no_indexes: in_db.execute("ATTACH DATABASE ? AS backup", (str(backup_db),)) in_db.execute("pragma backup.journal_mode=OFF") in_db.execute("pragma backup.synchronous=OFF") # Use writable_schema=1 to allow create table using internal sqlite names like sqlite_stat1 in_db.execute("pragma backup.writable_schema=1") cursor = in_db.cursor() for row in cursor.execute( "select replace(sql,'CREATE TABLE ', 'CREATE TABLE backup.') from sqlite_master " "where upper(type)='TABLE'" ): in_db.execute(row[0]) in_db.execute("BEGIN TRANSACTION") for row in cursor.execute( "select 'INSERT INTO backup.'||name||' SELECT * FROM main.'||name from sqlite_master " "where upper(type)='TABLE'" ): in_db.execute(row[0]) in_db.execute("COMMIT") in_db.execute("DETACH DATABASE backup") else: in_db.execute("VACUUM INTO ?", [str(backup_db)]) except sqlite3.OperationalError as e: raise RuntimeError( f"backup failed with error: '{e}'" f"\n\tYour backup file {backup_db} is probably left over in an insconsistent state." )
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/wallet_funcs.py
flax/cmds/wallet_funcs.py
import asyncio import os import pathlib import sys import time from datetime import datetime from decimal import Decimal from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Union from flax.cmds.cmds_util import transaction_status_msg, transaction_submitted_msg from flax.cmds.peer_funcs import print_connections from flax.cmds.units import units from flax.rpc.wallet_rpc_client import WalletRpcClient from flax.server.start_wallet import SERVICE_NAME from flax.types.blockchain_format.sized_bytes import bytes32 from flax.util.bech32m import bech32_decode, decode_puzzle_hash, encode_puzzle_hash from flax.util.config import load_config, selected_network_address_prefix from flax.util.default_root import DEFAULT_ROOT_PATH from flax.util.ints import uint16, uint32, uint64 from flax.wallet.nft_wallet.nft_info import NFTInfo from flax.wallet.outer_puzzles import AssetType from flax.wallet.puzzle_drivers import PuzzleInfo from flax.wallet.trade_record import TradeRecord from flax.wallet.trading.offer import Offer from flax.wallet.trading.trade_status import TradeStatus from flax.wallet.transaction_record import TransactionRecord from flax.wallet.util.address_type import AddressType, ensure_valid_address from flax.wallet.util.transaction_type import TransactionType from flax.wallet.util.wallet_types import WalletType CATNameResolver = Callable[[bytes32], Awaitable[Optional[Tuple[Optional[uint32], str]]]] transaction_type_descriptions = { TransactionType.INCOMING_TX: "received", TransactionType.OUTGOING_TX: "sent", TransactionType.COINBASE_REWARD: "rewarded", TransactionType.FEE_REWARD: "rewarded", TransactionType.INCOMING_TRADE: "received in trade", TransactionType.OUTGOING_TRADE: "sent in trade", } def transaction_description_from_type(tx: TransactionRecord) -> str: return transaction_type_descriptions.get(TransactionType(tx.type), "(unknown reason)") def print_transaction(tx: TransactionRecord, verbose: bool, name, address_prefix: str, mojo_per_unit: int) -> None: if verbose: print(tx) else: flax_amount = Decimal(int(tx.amount)) / mojo_per_unit to_address = encode_puzzle_hash(tx.to_puzzle_hash, address_prefix) print(f"Transaction {tx.name}") print(f"Status: {'Confirmed' if tx.confirmed else ('In mempool' if tx.is_in_mempool() else 'Pending')}") description = transaction_description_from_type(tx) print(f"Amount {description}: {flax_amount} {name}") print(f"To address: {to_address}") print("Created at:", datetime.fromtimestamp(tx.created_at_time).strftime("%Y-%m-%d %H:%M:%S")) print("") def get_mojo_per_unit(wallet_type: WalletType) -> int: mojo_per_unit: int if wallet_type in {WalletType.STANDARD_WALLET, WalletType.POOLING_WALLET, WalletType.DATA_LAYER}: mojo_per_unit = units["flax"] elif wallet_type == WalletType.CAT: mojo_per_unit = units["cat"] else: raise LookupError(f"Operation is not supported for Wallet type {wallet_type.name}") return mojo_per_unit async def get_wallet_type(wallet_id: int, wallet_client: WalletRpcClient) -> WalletType: summaries_response = await wallet_client.get_wallets() for summary in summaries_response: summary_id: int = summary["id"] summary_type: int = summary["type"] if wallet_id == summary_id: return WalletType(summary_type) raise LookupError(f"Wallet ID not found: {wallet_id}") async def get_name_for_wallet_id( config: Dict[str, Any], wallet_type: WalletType, wallet_id: int, wallet_client: WalletRpcClient, ): if wallet_type in {WalletType.STANDARD_WALLET, WalletType.POOLING_WALLET, WalletType.DATA_LAYER}: name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"].upper() elif wallet_type == WalletType.CAT: name = await wallet_client.get_cat_name(wallet_id=str(wallet_id)) else: raise LookupError(f"Operation is not supported for Wallet type {wallet_type.name}") return name async def get_transaction(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: transaction_id = bytes32.from_hexstr(args["tx_id"]) config = load_config(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME) address_prefix = selected_network_address_prefix(config) tx: TransactionRecord = await wallet_client.get_transaction("this is unused", transaction_id=transaction_id) try: wallet_type = await get_wallet_type(wallet_id=tx.wallet_id, wallet_client=wallet_client) mojo_per_unit = get_mojo_per_unit(wallet_type=wallet_type) name = await get_name_for_wallet_id( config=config, wallet_type=wallet_type, wallet_id=tx.wallet_id, wallet_client=wallet_client, ) except LookupError as e: print(e.args[0]) return print_transaction( tx, verbose=(args["verbose"] > 0), name=name, address_prefix=address_prefix, mojo_per_unit=mojo_per_unit, ) async def get_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_id = args["id"] paginate = args["paginate"] if paginate is None: paginate = sys.stdout.isatty() offset = args["offset"] limit = args["limit"] sort_key = args["sort_key"] reverse = args["reverse"] txs: List[TransactionRecord] = await wallet_client.get_transactions( wallet_id, start=offset, end=(offset + limit), sort_key=sort_key, reverse=reverse ) config = load_config(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME) address_prefix = selected_network_address_prefix(config) if len(txs) == 0: print("There are no transactions to this address") try: wallet_type = await get_wallet_type(wallet_id=wallet_id, wallet_client=wallet_client) mojo_per_unit = get_mojo_per_unit(wallet_type=wallet_type) name = await get_name_for_wallet_id( config=config, wallet_type=wallet_type, wallet_id=wallet_id, wallet_client=wallet_client, ) except LookupError as e: print(e.args[0]) return num_per_screen = 5 if paginate else len(txs) for i in range(0, len(txs), num_per_screen): for j in range(0, num_per_screen): if i + j >= len(txs): break print_transaction( txs[i + j], verbose=(args["verbose"] > 0), name=name, address_prefix=address_prefix, mojo_per_unit=mojo_per_unit, ) if i + num_per_screen >= len(txs): return None print("Press q to quit, or c to continue") while True: entered_key = sys.stdin.read(1) if entered_key == "q": return None elif entered_key == "c": break def check_unusual_transaction(amount: Decimal, fee: Decimal): return fee >= amount async def send(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_id: int = args["id"] amount = Decimal(args["amount"]) fee = Decimal(args["fee"]) address = args["address"] override = args["override"] min_coin_amount = Decimal(args["min_coin_amount"]) memo = args["memo"] if memo is None: memos = None else: memos = [memo] if not override and check_unusual_transaction(amount, fee): print( f"A transaction of amount {amount} and fee {fee} is unusual.\n" f"Pass in --override if you are sure you mean to do this." ) return if amount == 0: print("You can not send an empty transaction") return try: typ = await get_wallet_type(wallet_id=wallet_id, wallet_client=wallet_client) except LookupError: print(f"Wallet id: {wallet_id} not found.") return final_fee = uint64(int(fee * units["flax"])) final_amount: uint64 final_min_coin_amount: uint64 if typ == WalletType.STANDARD_WALLET: final_amount = uint64(int(amount * units["flax"])) final_min_coin_amount = uint64(int(min_coin_amount * units["flax"])) print("Submitting transaction...") res = await wallet_client.send_transaction( str(wallet_id), final_amount, address, final_fee, memos, final_min_coin_amount ) elif typ == WalletType.CAT: final_amount = uint64(int(amount * units["cat"])) final_min_coin_amount = uint64(int(min_coin_amount * units["cat"])) print("Submitting transaction...") res = await wallet_client.cat_spend( str(wallet_id), final_amount, address, final_fee, memos, final_min_coin_amount ) else: print("Only standard wallet and CAT wallets are supported") return tx_id = res.name start = time.time() while time.time() - start < 10: await asyncio.sleep(0.1) tx = await wallet_client.get_transaction(str(wallet_id), tx_id) if len(tx.sent_to) > 0: print(transaction_submitted_msg(tx)) print(transaction_status_msg(fingerprint, tx_id)) return None print("Transaction not yet submitted to nodes") print(f"To get status, use command: flax wallet get_transaction -f {fingerprint} -tx 0x{tx_id}") async def get_address(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_id = args["id"] new_address: bool = args.get("new_address", False) res = await wallet_client.get_next_address(wallet_id, new_address) print(res) async def delete_unconfirmed_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_id = args["id"] await wallet_client.delete_unconfirmed_transactions(wallet_id) print(f"Successfully deleted all unconfirmed transactions for wallet id {wallet_id} on key {fingerprint}") async def get_derivation_index(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: res = await wallet_client.get_current_derivation_index() print(f"Last derivation index: {res}") async def update_derivation_index(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: index = args["index"] print("Updating derivation index... This may take a while.") res = await wallet_client.extend_derivation_index(index) print(f"Updated derivation index: {res}") print("Your balances may take a while to update.") async def add_token(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: asset_id = args["asset_id"] token_name = args["token_name"] try: asset_id_bytes: bytes32 = bytes32.from_hexstr(asset_id) existing_info: Optional[Tuple[Optional[uint32], str]] = await wallet_client.cat_asset_id_to_name(asset_id_bytes) if existing_info is None or existing_info[0] is None: response = await wallet_client.create_wallet_for_existing_cat(asset_id_bytes) wallet_id = response["wallet_id"] await wallet_client.set_cat_name(wallet_id, token_name) print(f"Successfully added {token_name} with wallet id {wallet_id} on key {fingerprint}") else: wallet_id, old_name = existing_info await wallet_client.set_cat_name(wallet_id, token_name) print(f"Successfully renamed {old_name} with wallet_id {wallet_id} on key {fingerprint} to {token_name}") except ValueError as e: if "fromhex()" in str(e): print(f"{asset_id} is not a valid Asset ID") else: raise async def make_offer(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: offers: List[str] = args["offers"] requests: List[str] = args["requests"] filepath: str = args["filepath"] fee: int = int(Decimal(args["fee"]) * units["flax"]) config = load_config(DEFAULT_ROOT_PATH, "config.yaml") if [] in [offers, requests]: print("Not creating offer: Must be offering and requesting at least one asset") else: offer_dict: Dict[Union[uint32, str], int] = {} driver_dict: Dict[str, Any] = {} printable_dict: Dict[str, Tuple[str, int, int]] = {} # Dict[asset_name, Tuple[amount, unit, multiplier]] royalty_asset_dict: Dict[Any, Tuple[Any, uint16]] = {} fungible_asset_dict: Dict[Any, uint64] = {} for item in [*offers, *requests]: name, amount = tuple(item.split(":")[0:2]) try: b32_id = bytes32.from_hexstr(name) id: Union[uint32, str] = b32_id.hex() result = await wallet_client.cat_asset_id_to_name(b32_id) if result is not None: name = result[1] else: name = "Unknown CAT" unit = units["cat"] if item in offers: fungible_asset_dict[name] = uint64(abs(int(Decimal(amount) * unit))) except ValueError: try: hrp, _ = bech32_decode(name) if hrp == "nft": coin_id = decode_puzzle_hash(name) unit = 1 info = NFTInfo.from_json_dict((await wallet_client.get_nft_info(coin_id.hex()))["nft_info"]) id = info.launcher_id.hex() assert isinstance(id, str) if item in requests: driver_dict[id] = { "type": "singleton", "launcher_id": "0x" + id, "launcher_ph": "0x" + info.launcher_puzhash.hex(), "also": { "type": "metadata", "metadata": info.chain_info, "updater_hash": "0x" + info.updater_puzhash.hex(), }, } if info.supports_did: assert info.royalty_puzzle_hash is not None assert info.royalty_percentage is not None driver_dict[id]["also"]["also"] = { "type": "ownership", "owner": "()", "transfer_program": { "type": "royalty transfer program", "launcher_id": "0x" + info.launcher_id.hex(), "royalty_address": "0x" + info.royalty_puzzle_hash.hex(), "royalty_percentage": str(info.royalty_percentage), }, } royalty_asset_dict[name] = ( encode_puzzle_hash(info.royalty_puzzle_hash, AddressType.XFX.hrp(config)), info.royalty_percentage, ) else: id = decode_puzzle_hash(name).hex() assert hrp is not None unit = units[hrp] except ValueError: id = uint32(int(name)) if id == 1: name = "XFX" unit = units["flax"] else: name = await wallet_client.get_cat_name(str(id)) unit = units["cat"] if item in offers: fungible_asset_dict[name] = uint64(abs(int(Decimal(amount) * unit))) multiplier: int = -1 if item in offers else 1 printable_dict[name] = (amount, unit, multiplier) if id in offer_dict: print("Not creating offer: Cannot offer and request the same asset in a trade") break else: offer_dict[id] = int(Decimal(amount) * unit) * multiplier else: print("Creating Offer") print("--------------") print() print("OFFERING:") for name, data in printable_dict.items(): amount, unit, multiplier = data if multiplier < 0: print(f" - {amount} {name} ({int(Decimal(amount) * unit)} mojos)") print("REQUESTING:") for name, data in printable_dict.items(): amount, unit, multiplier = data if multiplier > 0: print(f" - {amount} {name} ({int(Decimal(amount) * unit)} mojos)") if royalty_asset_dict != {}: royalty_summary: Dict[Any, List[Dict[str, Any]]] = await wallet_client.nft_calculate_royalties( royalty_asset_dict, fungible_asset_dict ) total_amounts_requested: Dict[Any, int] = {} print() print("Royalties Summary:") for nft_id, summaries in royalty_summary.items(): print(f" - For {nft_id}:") for summary in summaries: divisor = units["flax"] if summary["asset"] == "XFX" else units["cat"] converted_amount = Decimal(summary["amount"]) / divisor total_amounts_requested.setdefault(summary["asset"], fungible_asset_dict[summary["asset"]]) total_amounts_requested[summary["asset"]] += summary["amount"] print( f" - {converted_amount} {summary['asset']} ({summary['amount']} mojos) to {summary['address']}" # noqa ) print() print("Total Amounts Offered:") for asset, requested_amount in total_amounts_requested.items(): divisor = units["flax"] if asset == "XFX" else units["cat"] converted_amount = Decimal(requested_amount) / divisor print(f" - {converted_amount} {asset} ({requested_amount} mojos)") print() nft_confirmation = input( "Offers for NFTs will have royalties automatically added. " + "Are you sure you would like to continue? (y/n): " ) if nft_confirmation not in ["y", "yes"]: print("Not creating offer...") return confirmation = input("Confirm (y/n): ") if confirmation not in ["y", "yes"]: print("Not creating offer...") else: offer, trade_record = await wallet_client.create_offer_for_ids( offer_dict, driver_dict=driver_dict, fee=fee ) if offer is not None: with open(pathlib.Path(filepath), "w") as file: file.write(offer.to_bech32()) print(f"Created offer with ID {trade_record.trade_id}") print(f"Use flax wallet get_offers --id {trade_record.trade_id} -f {fingerprint} to view status") else: print("Error creating offer") def timestamp_to_time(timestamp): return datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S") async def print_offer_summary(cat_name_resolver: CATNameResolver, sum_dict: Dict[str, int], has_fee: bool = False): for asset_id, amount in sum_dict.items(): description: str = "" unit: int = units["flax"] wid: str = "1" if asset_id == "xfx" else "" mojo_amount: int = int(Decimal(amount)) name: str = "XFX" if asset_id != "xfx": name = asset_id if asset_id == "unknown": name = "Unknown" unit = units["mojo"] if has_fee: description = " [Typically represents change returned from the included fee]" else: unit = units["cat"] result = await cat_name_resolver(bytes32.from_hexstr(asset_id)) if result is not None: wid = str(result[0]) name = result[1] output: str = f" - {name}" mojo_str: str = f"{mojo_amount} {'mojo' if mojo_amount == 1 else 'mojos'}" if len(wid) > 0: output += f" (Wallet ID: {wid})" if unit == units["mojo"]: output += f": {mojo_str}" else: output += f": {mojo_amount / unit} ({mojo_str})" if len(description) > 0: output += f" {description}" print(output) async def print_trade_record(record, wallet_client: WalletRpcClient, summaries: bool = False) -> None: print() print(f"Record with id: {record.trade_id}") print("---------------") print(f"Created at: {timestamp_to_time(record.created_at_time)}") print(f"Confirmed at: {record.confirmed_at_index if record.confirmed_at_index > 0 else 'Not confirmed'}") print(f"Accepted at: {timestamp_to_time(record.accepted_at_time) if record.accepted_at_time else 'N/A'}") print(f"Status: {TradeStatus(record.status).name}") if summaries: print("Summary:") offer = Offer.from_bytes(record.offer) offered, requested, _ = offer.summary() outbound_balances: Dict[str, int] = offer.get_pending_amounts() fees: Decimal = Decimal(offer.bundle.fees()) cat_name_resolver = wallet_client.cat_asset_id_to_name print(" OFFERED:") await print_offer_summary(cat_name_resolver, offered) print(" REQUESTED:") await print_offer_summary(cat_name_resolver, requested) print("Pending Outbound Balances:") await print_offer_summary(cat_name_resolver, outbound_balances, has_fee=(fees > 0)) print(f"Included Fees: {fees / units['flax']}") print("---------------") async def get_offers(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: id: Optional[str] = args.get("id", None) filepath: Optional[str] = args.get("filepath", None) exclude_my_offers: bool = args.get("exclude_my_offers", False) exclude_taken_offers: bool = args.get("exclude_taken_offers", False) include_completed: bool = args.get("include_completed", False) summaries: bool = args.get("summaries", False) reverse: bool = args.get("reverse", False) file_contents: bool = (filepath is not None) or summaries records: List[TradeRecord] = [] if id is None: batch_size: int = 10 start: int = 0 end: int = start + batch_size # Traverse offers page by page while True: new_records: List[TradeRecord] = await wallet_client.get_all_offers( start, end, reverse=reverse, file_contents=file_contents, exclude_my_offers=exclude_my_offers, exclude_taken_offers=exclude_taken_offers, include_completed=include_completed, ) records.extend(new_records) # If fewer records were returned than requested, we're done if len(new_records) < batch_size: break start = end end += batch_size else: records = [await wallet_client.get_offer(bytes32.from_hexstr(id), file_contents)] if filepath is not None: with open(pathlib.Path(filepath), "w") as file: file.write(Offer.from_bytes(records[0].offer).to_bech32()) file.close() for record in records: await print_trade_record(record, wallet_client, summaries=summaries) async def take_offer(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: if os.path.exists(args["file"]): filepath = pathlib.Path(args["file"]) with open(filepath, "r") as file: offer_hex: str = file.read() file.close() else: offer_hex = args["file"] examine_only: bool = args["examine_only"] fee: int = int(Decimal(args["fee"]) * units["flax"]) config = load_config(DEFAULT_ROOT_PATH, "config.yaml") try: offer = Offer.from_bech32(offer_hex) except ValueError: print("Please enter a valid offer file or hex blob") return ### # This is temporary code, delete it when we no longer care about incorrectly parsing CAT1s # There's also temp code in test_wallet_rpc.py and wallet_rpc_api.py from flax.types.spend_bundle import SpendBundle from flax.util.bech32m import bech32_decode, convertbits from flax.wallet.util.puzzle_compression import decompress_object_with_puzzles hrpgot, data = bech32_decode(offer_hex, max_length=len(offer_hex)) if data is None: raise ValueError("Invalid Offer") decoded = convertbits(list(data), 5, 8, False) decoded_bytes = bytes(decoded) try: decompressed_bytes = decompress_object_with_puzzles(decoded_bytes) except TypeError: decompressed_bytes = decoded_bytes bundle = SpendBundle.from_bytes(decompressed_bytes) for spend in bundle.coin_spends: mod, _ = spend.puzzle_reveal.to_program().uncurry() if mod.get_tree_hash() == bytes32.from_hexstr( "72dec062874cd4d3aab892a0906688a1ae412b0109982e1797a170add88bdcdc" ): raise ValueError("CAT1s are no longer supported") ### offered, requested, _ = offer.summary() cat_name_resolver = wallet_client.cat_asset_id_to_name print("Summary:") print(" OFFERED:") await print_offer_summary(cat_name_resolver, offered) print(" REQUESTED:") await print_offer_summary(cat_name_resolver, requested) print() royalty_asset_dict: Dict[Any, Tuple[Any, uint16]] = {} for royalty_asset_id in nft_coin_ids_supporting_royalties_from_offer(offer): if royalty_asset_id.hex() in offered: percentage, address = await get_nft_royalty_percentage_and_address(royalty_asset_id, wallet_client) royalty_asset_dict[encode_puzzle_hash(royalty_asset_id, AddressType.NFT.hrp(config))] = ( encode_puzzle_hash(address, AddressType.XFX.hrp(config)), percentage, ) if royalty_asset_dict != {}: fungible_asset_dict: Dict[Any, uint64] = {} for fungible_asset_id in fungible_assets_from_offer(offer): fungible_asset_id_str = fungible_asset_id.hex() if fungible_asset_id is not None else "xfx" if fungible_asset_id_str in requested: nft_royalty_currency: str = "Unknown CAT" if fungible_asset_id is None: nft_royalty_currency = "XFX" else: result = await wallet_client.cat_asset_id_to_name(fungible_asset_id) if result is not None: nft_royalty_currency = result[1] fungible_asset_dict[nft_royalty_currency] = uint64(requested[fungible_asset_id_str]) if fungible_asset_dict != {}: royalty_summary: Dict[Any, List[Dict[str, Any]]] = await wallet_client.nft_calculate_royalties( royalty_asset_dict, fungible_asset_dict ) total_amounts_requested: Dict[Any, int] = {} print("Royalties Summary:") for nft_id, summaries in royalty_summary.items(): print(f" - For {nft_id}:") for summary in summaries: divisor = units["flax"] if summary["asset"] == "XFX" else units["cat"] converted_amount = Decimal(summary["amount"]) / divisor total_amounts_requested.setdefault(summary["asset"], fungible_asset_dict[summary["asset"]]) total_amounts_requested[summary["asset"]] += summary["amount"] print( f" - {converted_amount} {summary['asset']} ({summary['amount']} mojos) to {summary['address']}" # noqa ) print() print("Total Amounts Requested:") for asset, amount in total_amounts_requested.items(): divisor = units["flax"] if asset == "XFX" else units["cat"] converted_amount = Decimal(amount) / divisor print(f" - {converted_amount} {asset} ({amount} mojos)") print(f"Included Fees: {Decimal(offer.bundle.fees()) / units['flax']}") if not examine_only: print() confirmation = input("Would you like to take this offer? (y/n): ") if confirmation in ["y", "yes"]: trade_record = await wallet_client.take_offer(offer, fee=fee) print(f"Accepted offer with ID {trade_record.trade_id}") print(f"Use flax wallet get_offers --id {trade_record.trade_id} -f {fingerprint} to view its status") async def cancel_offer(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: id = bytes32.from_hexstr(args["id"]) secure: bool = not args["insecure"] fee: int = int(Decimal(args["fee"]) * units["flax"]) trade_record = await wallet_client.get_offer(id, file_contents=True) await print_trade_record(trade_record, wallet_client, summaries=True) confirmation = input(f"Are you sure you wish to cancel offer with ID: {trade_record.trade_id}? (y/n): ") if confirmation in ["y", "yes"]: await wallet_client.cancel_offer(id, secure=secure, fee=fee) print(f"Cancelled offer with ID {trade_record.trade_id}") if secure: print(f"Use flax wallet get_offers --id {trade_record.trade_id} -f {fingerprint} to view cancel status") def wallet_coin_unit(typ: WalletType, address_prefix: str) -> Tuple[str, int]: if typ == WalletType.CAT: return "", units["cat"] if typ in [WalletType.STANDARD_WALLET, WalletType.POOLING_WALLET, WalletType.MULTI_SIG]: return address_prefix, units["flax"] return "", units["mojo"] def print_balance(amount: int, scale: int, address_prefix: str) -> str: ret = f"{amount / scale} {address_prefix} " if scale > 1: ret += f"({amount} mojo)" return ret async def print_balances(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_type: Optional[WalletType] = None if "type" in args: wallet_type = WalletType(args["type"]) summaries_response = await wallet_client.get_wallets(wallet_type) config = load_config(DEFAULT_ROOT_PATH, "config.yaml") address_prefix = selected_network_address_prefix(config) is_synced: bool = await wallet_client.get_synced() is_syncing: bool = await wallet_client.get_sync_status() print(f"Wallet height: {await wallet_client.get_height_info()}") if is_syncing: print("Sync status: Syncing...") elif is_synced: print("Sync status: Synced") else: print("Sync status: Not synced") if not is_syncing and is_synced: if len(summaries_response) == 0: type_hint = " " if wallet_type is None else f" from type {wallet_type.name} " print(f"\nNo wallets{type_hint}available for fingerprint: {fingerprint}") else: print(f"Balances, fingerprint: {fingerprint}") for summary in summaries_response: indent: str = " " # asset_id currently contains both the asset ID and TAIL program bytes concatenated together. # A future RPC update may split them apart, but for now we'll show the first 32 bytes (64 chars) asset_id = summary["data"][:64] wallet_id = summary["id"] balances = await wallet_client.get_wallet_balance(wallet_id) typ = WalletType(int(summary["type"])) address_prefix, scale = wallet_coin_unit(typ, address_prefix) total_balance: str = print_balance(balances["confirmed_wallet_balance"], scale, address_prefix) unconfirmed_wallet_balance: str = print_balance( balances["unconfirmed_wallet_balance"], scale, address_prefix ) spendable_balance: str = print_balance(balances["spendable_balance"], scale, address_prefix) my_did: Optional[str] = None print() print(f"{summary['name']}:") print(f"{indent}{'-Total Balance:'.ljust(23)} {total_balance}")
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
true
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/plots.py
flax/cmds/plots.py
from __future__ import annotations import asyncio import logging import sys from pathlib import Path import click from flax.plotting.util import add_plot_directory, validate_plot_size DEFAULT_STRIPE_SIZE = 65536 log = logging.getLogger(__name__) def show_plots(root_path: Path): from flax.plotting.util import get_plot_directories print("Directories where plots are being searched for:") print("Note that subdirectories must be added manually") print( "Add with 'flax plots add -d [dir]' and remove with" + " 'flax plots remove -d [dir]'" + " Scan and check plots with 'flax plots check'" ) print() for str_path in get_plot_directories(root_path): print(f"{str_path}") @click.group("plots", short_help="Manage your plots") @click.pass_context def plots_cmd(ctx: click.Context): """Create, add, remove and check your plots""" from flax.util.flax_logging import initialize_logging root_path: Path = ctx.obj["root_path"] if not root_path.is_dir(): raise RuntimeError("Please initialize (or migrate) your config directory with 'flax init'") initialize_logging("", {"log_level": "INFO", "log_stdout": True}, root_path) @plots_cmd.command("create", short_help="Create plots") @click.option("-k", "--size", help="Plot size", type=int, default=32, show_default=True) @click.option("--override-k", help="Force size smaller than 32", default=False, show_default=True, is_flag=True) @click.option("-n", "--num", help="Number of plots or challenges", type=int, default=1, show_default=True) @click.option("-b", "--buffer", help="Megabytes for sort/plot buffer", type=int, default=3389, show_default=True) @click.option("-r", "--num_threads", help="Number of threads to use", type=int, default=2, show_default=True) @click.option("-u", "--buckets", help="Number of buckets", type=int, default=128, show_default=True) @click.option( "-a", "--alt_fingerprint", type=int, default=None, help="Enter the alternative fingerprint of the key you want to use", ) @click.option( "-c", "--pool_contract_address", type=str, default=None, help="Address of where the pool reward will be sent to. Only used if alt_fingerprint and pool public key are None", ) @click.option("-f", "--farmer_public_key", help="Hex farmer public key", type=str, default=None) @click.option("-p", "--pool_public_key", help="Hex public key of pool", type=str, default=None) @click.option( "-t", "--tmp_dir", help="Temporary directory for plotting files", type=click.Path(), default=Path("."), show_default=True, ) @click.option("-2", "--tmp2_dir", help="Second temporary directory for plotting files", type=click.Path(), default=None) @click.option( "-d", "--final_dir", help="Final directory for plots (relative or absolute)", type=click.Path(), default=Path("."), show_default=True, ) @click.option("-i", "--plotid", help="PlotID in hex for reproducing plots (debugging only)", type=str, default=None) @click.option("-m", "--memo", help="Memo in hex for reproducing plots (debugging only)", type=str, default=None) @click.option("-e", "--nobitfield", help="Disable bitfield", default=False, is_flag=True) @click.option( "-x", "--exclude_final_dir", help="Skips adding [final dir] to harvester for farming", default=False, is_flag=True ) @click.option( "-D", "--connect_to_daemon", help="Connects to the daemon for keychain operations", default=False, is_flag=True, hidden=True, # -D is only set when launched by the daemon ) @click.pass_context def create_cmd( ctx: click.Context, size: int, override_k: bool, num: int, buffer: int, num_threads: int, buckets: int, alt_fingerprint: int, pool_contract_address: str, farmer_public_key: str, pool_public_key: str, tmp_dir: str, tmp2_dir: str, final_dir: str, plotid: str, memo: str, nobitfield: bool, exclude_final_dir: bool, connect_to_daemon: bool, ): from flax.plotting.create_plots import create_plots, resolve_plot_keys class Params(object): def __init__(self): self.size = size self.num = num self.buffer = buffer self.num_threads = num_threads self.buckets = buckets self.stripe_size = DEFAULT_STRIPE_SIZE self.tmp_dir = Path(tmp_dir) self.tmp2_dir = Path(tmp2_dir) if tmp2_dir else None self.final_dir = Path(final_dir) self.plotid = plotid self.memo = memo self.nobitfield = nobitfield root_path: Path = ctx.obj["root_path"] try: validate_plot_size(root_path, size, override_k) except ValueError as e: print(e) sys.exit(1) plot_keys = asyncio.run( resolve_plot_keys( farmer_public_key, alt_fingerprint, pool_public_key, pool_contract_address, root_path, log, connect_to_daemon, ) ) asyncio.run(create_plots(Params(), plot_keys)) if not exclude_final_dir: try: add_plot_directory(root_path, final_dir) except ValueError as e: print(e) @plots_cmd.command("check", short_help="Checks plots") @click.option("-n", "--num", help="Number of plots or challenges", type=int, default=None) @click.option( "-g", "--grep_string", help="Shows only plots that contain the string in the filename or directory name", type=str, default=None, ) @click.option("-l", "--list_duplicates", help="List plots with duplicate IDs", default=False, is_flag=True) @click.option("--debug-show-memo", help="Shows memo to recreate the same exact plot", default=False, is_flag=True) @click.option("--challenge-start", help="Begins at a different [start] for -n [challenges]", type=int, default=None) @click.pass_context def check_cmd( ctx: click.Context, num: int, grep_string: str, list_duplicates: bool, debug_show_memo: bool, challenge_start: int ): from flax.plotting.check_plots import check_plots check_plots(ctx.obj["root_path"], num, challenge_start, grep_string, list_duplicates, debug_show_memo) @plots_cmd.command("add", short_help="Adds a directory of plots") @click.option( "-d", "--final_dir", help="Final directory for plots (relative or absolute)", type=click.Path(), default=".", show_default=True, ) @click.pass_context def add_cmd(ctx: click.Context, final_dir: str): from flax.plotting.util import add_plot_directory try: add_plot_directory(ctx.obj["root_path"], final_dir) print(f"Successfully added: {final_dir}") except ValueError as e: print(e) @plots_cmd.command("remove", short_help="Removes a directory of plots from config.yaml") @click.option( "-d", "--final_dir", help="Final directory for plots (relative or absolute)", type=click.Path(), default=".", show_default=True, ) @click.pass_context def remove_cmd(ctx: click.Context, final_dir: str): from flax.plotting.util import remove_plot_directory remove_plot_directory(ctx.obj["root_path"], final_dir) @plots_cmd.command("show", short_help="Shows the directory of current plots") @click.pass_context def show_cmd(ctx: click.Context): show_plots(ctx.obj["root_path"])
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/peer.py
flax/cmds/peer.py
from __future__ import annotations from typing import Optional import click from flax.cmds.cmds_util import NODE_TYPES from flax.cmds.peer_funcs import peer_async @click.command("peer", short_help="Show, or modify peering connections", no_args_is_help=True) @click.option( "-p", "--rpc-port", help=( "Set the port where the farmer, wallet, full node or harvester " "is hosting the RPC interface. See the rpc_port in config.yaml" ), type=int, default=None, ) @click.option( "-c", "--connections", help="List nodes connected to this Full Node", is_flag=True, type=bool, default=False ) @click.option("-a", "--add-connection", help="Connect to another Full Node by ip:port", type=str, default="") @click.option( "-r", "--remove-connection", help="Remove a Node by the first 8 characters of NodeID", type=str, default="" ) @click.argument("node_type", type=click.Choice(list(NODE_TYPES.keys())), nargs=1, required=True) @click.pass_context def peer_cmd( ctx: click.Context, rpc_port: Optional[int], connections: bool, add_connection: str, remove_connection: str, node_type: str, ) -> None: import asyncio asyncio.run( peer_async( node_type, rpc_port, ctx.obj["root_path"], connections, add_connection, remove_connection, ) )
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/db_validate_func.py
flax/cmds/db_validate_func.py
from __future__ import annotations from pathlib import Path from typing import Any, Dict, Optional from flax.consensus.block_record import BlockRecord from flax.consensus.default_constants import DEFAULT_CONSTANTS from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.full_block import FullBlock from flax.util.config import load_config from flax.util.path import path_from_root def db_validate_func( root_path: Path, in_db_path: Optional[Path] = None, *, validate_blocks: bool, ) -> None: if in_db_path is None: config: Dict[str, Any] = load_config(root_path, "config.yaml")["full_node"] selected_network: str = config["selected_network"] db_pattern: str = config["database_path"] db_path_replaced: str = db_pattern.replace("CHALLENGE", selected_network) in_db_path = path_from_root(root_path, db_path_replaced) validate_v2(in_db_path, validate_blocks=validate_blocks) print(f"\n\nDATABASE IS VALID: {in_db_path}\n") def validate_v2(in_path: Path, *, validate_blocks: bool) -> None: import sqlite3 from contextlib import closing import zstd if not in_path.exists(): print(f"input file doesn't exist. {in_path}") raise RuntimeError(f"can't find {in_path}") print(f"opening file for reading: {in_path}") with closing(sqlite3.connect(in_path)) as in_db: # read the database version try: with closing(in_db.execute("SELECT * FROM database_version")) as cursor: row = cursor.fetchone() if row is None or row == []: raise RuntimeError("Database is missing version field") if row[0] != 2: raise RuntimeError(f"Database has the wrong version ({row[0]} expected 2)") except sqlite3.OperationalError: raise RuntimeError("Database is missing version table") try: with closing(in_db.execute("SELECT hash FROM current_peak WHERE key = 0")) as cursor: row = cursor.fetchone() if row is None or row == []: raise RuntimeError("Database is missing current_peak field") peak = bytes32(row[0]) except sqlite3.OperationalError: raise RuntimeError("Database is missing current_peak table") print(f"peak hash: {peak}") with closing(in_db.execute("SELECT height FROM full_blocks WHERE header_hash = ?", (peak,))) as cursor: peak_row = cursor.fetchone() if peak_row is None or peak_row == []: raise RuntimeError("Database is missing the peak block") peak_height = peak_row[0] print(f"peak height: {peak_height}") print("traversing the full chain") current_height = peak_height # we're looking for a block with this hash expect_hash = peak # once we find it, we know what the next block to look for is, which # this is set to next_hash = None num_orphans = 0 height_to_hash = bytearray(peak_height * 32) with closing( in_db.execute( f"SELECT header_hash, prev_hash, height, in_main_chain" f"{', block, block_record' if validate_blocks else ''} " "FROM full_blocks ORDER BY height DESC" ) ) as cursor: for row in cursor: hh = row[0] prev = row[1] height = row[2] in_main_chain = row[3] # if there are blocks being added to the database, just ignore # the ones added since we picked the peak if height > peak_height: continue if validate_blocks: block = FullBlock.from_bytes(zstd.decompress(row[4])) block_record = BlockRecord.from_bytes(row[5]) actual_header_hash = block.header_hash actual_prev_hash = block.prev_header_hash if actual_header_hash != hh: raise RuntimeError( f"Block {hh.hex()} has a blob with mismatching " f"hash: {actual_header_hash.hex()}" ) if block_record.header_hash != hh: raise RuntimeError( f"Block {hh.hex()} has a block record with mismatching " f"hash: {block_record.header_hash.hex()}" ) if block_record.total_iters != block.total_iters: raise RuntimeError( f"Block {hh.hex()} has a block record with mismatching total " f"iters: {block_record.total_iters} expected {block.total_iters}" ) if block_record.prev_hash != actual_prev_hash: raise RuntimeError( f"Block {hh.hex()} has a block record with mismatching " f"prev_hash: {block_record.prev_hash} expected {actual_prev_hash.hex()}" ) if block.height != height: raise RuntimeError( f"Block {hh.hex()} has a mismatching " f"height: {block.height} expected {height}" ) if height != current_height: # we're moving to the next level. Make sure we found the block # we were looking for at the previous level if next_hash is None: raise RuntimeError( f"Database is missing the block with hash {expect_hash} at height {current_height}" ) expect_hash = next_hash next_hash = None current_height = height if hh == expect_hash: if next_hash is not None: raise RuntimeError(f"Database has multiple blocks with hash {hh.hex()}, " f"at height {height}") if not in_main_chain: raise RuntimeError( f"block {hh.hex()} (height: {height}) is part of the main chain, " f"but in_main_chain is not set" ) if validate_blocks: if actual_prev_hash != prev: raise RuntimeError( f"Block {hh.hex()} has a blob with mismatching " f"prev-hash: {actual_prev_hash}, expected {prev}" ) next_hash = prev height_to_hash[height * 32 : height * 32 + 32] = hh print(f"\r{height} orphaned blocks: {num_orphans} ", end="") else: if in_main_chain: raise RuntimeError( f"block {hh.hex()} (height: {height}) is orphaned, " "but in_main_chain is set" ) num_orphans += 1 print("") if current_height != 0: raise RuntimeError(f"Database is missing blocks below height {current_height}") # make sure the prev_hash pointer of block height 0 is the genesis # challenge if next_hash != DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA: raise RuntimeError( f"Blockchain has invalid genesis challenge {next_hash}, expected " f"{DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA.hex()}" ) if num_orphans > 0: print(f"{num_orphans} orphaned blocks")
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/wallet.py
flax/cmds/wallet.py
import sys from typing import Any, Dict, List, Optional, Tuple import click from flax.cmds.plotnft import validate_fee from flax.wallet.transaction_sorting import SortKey from flax.wallet.util.address_type import AddressType from flax.wallet.util.wallet_types import WalletType from flax.cmds.cmds_util import execute_with_wallet @click.group("wallet", short_help="Manage your wallet") @click.pass_context def wallet_cmd(ctx: click.Context) -> None: pass @wallet_cmd.command("get_transaction", short_help="Get a transaction") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True) @click.option("-tx", "--tx_id", help="transaction id to search for", type=str, required=True) @click.option("--verbose", "-v", count=True, type=int) def get_transaction_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int, tx_id: str, verbose: int) -> None: extra_params = {"id": id, "tx_id": tx_id, "verbose": verbose} import asyncio from .wallet_funcs import get_transaction asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_transaction)) @wallet_cmd.command("get_transactions", short_help="Get all transactions") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True) @click.option( "-o", "--offset", help="Skip transactions from the beginning of the list", type=int, default=0, show_default=True, required=True, ) @click.option( "-l", "--limit", help="Max number of transactions to return", type=int, default=(2**32 - 1), show_default=True, required=False, ) @click.option("--verbose", "-v", count=True, type=int) @click.option( "--paginate/--no-paginate", default=None, help="Prompt for each page of data. Defaults to true for interactive consoles, otherwise false.", ) @click.option( "--sort-by-height", "sort_key", flag_value=SortKey.CONFIRMED_AT_HEIGHT, type=SortKey, help="Sort transactions by height", ) @click.option( "--sort-by-relevance", "sort_key", flag_value=SortKey.RELEVANCE, type=SortKey, default=True, help="Sort transactions by {confirmed, height, time}", ) @click.option( "--reverse", is_flag=True, default=False, help="Reverse the transaction ordering", ) def get_transactions_cmd( wallet_rpc_port: Optional[int], fingerprint: int, id: int, offset: int, limit: int, verbose: bool, paginate: Optional[bool], sort_key: SortKey, reverse: bool, ) -> None: extra_params = { "id": id, "verbose": verbose, "offset": offset, "paginate": paginate, "limit": limit, "sort_key": sort_key, "reverse": reverse, } import asyncio from .wallet_funcs import get_transactions asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_transactions)) # The flush/close avoids output like below when piping through `head -n 1` # which will close stdout. # # Exception ignored in: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> # BrokenPipeError: [Errno 32] Broken pipe sys.stdout.flush() sys.stdout.close() @wallet_cmd.command("send", short_help="Send flax to another wallet") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True) @click.option("-a", "--amount", help="How much flax to send, in XFX", type=str, required=True) @click.option("-e", "--memo", help="Additional memo for the transaction", type=str, default=None) @click.option( "-m", "--fee", help="Set the fees for the transaction, in XFX", type=str, default="0", show_default=True, required=True, ) @click.option("-t", "--address", help="Address to send the XFX", type=str, required=True) @click.option( "-o", "--override", help="Submits transaction without checking for unusual values", is_flag=True, default=False ) @click.option( "-ma", "--min_coin_amount", help="Ignore coins worth less then this much XFX or CAT units", type=str, required=False, default="0", ) def send_cmd( wallet_rpc_port: Optional[int], fingerprint: int, id: int, amount: str, memo: Optional[str], fee: str, address: str, override: bool, min_coin_amount: str, ) -> None: extra_params = { "id": id, "amount": amount, "memo": memo, "fee": fee, "address": address, "override": override, "min_coin_amount": min_coin_amount, } import asyncio from .wallet_funcs import send asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, send)) @wallet_cmd.command("show", short_help="Show wallet information") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option( "-w", "--wallet_type", help="Choose a specific wallet type to return", type=click.Choice([x.name.lower() for x in WalletType]), default=None, ) def show_cmd(wallet_rpc_port: Optional[int], fingerprint: int, wallet_type: Optional[str]) -> None: import asyncio from .wallet_funcs import print_balances args: Dict[str, Any] = {} if wallet_type is not None: args["type"] = WalletType[wallet_type.upper()] asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, args, print_balances)) @wallet_cmd.command("get_address", short_help="Get a wallet receive address") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option( "-n/-l", "--new-address/--latest-address", help=( "Create a new wallet receive address, or show the most recently created wallet receive address" " [default: show most recent address]" ), is_flag=True, default=False, ) def get_address_cmd(wallet_rpc_port: Optional[int], id, fingerprint: int, new_address: bool) -> None: extra_params = {"id": id, "new_address": new_address} import asyncio from .wallet_funcs import get_address asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_address)) @wallet_cmd.command( "delete_unconfirmed_transactions", short_help="Deletes all unconfirmed transactions for this wallet ID" ) @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) def delete_unconfirmed_transactions_cmd(wallet_rpc_port: Optional[int], id, fingerprint: int) -> None: extra_params = {"id": id} import asyncio from .wallet_funcs import delete_unconfirmed_transactions asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, delete_unconfirmed_transactions)) @wallet_cmd.command("get_derivation_index", short_help="Get the last puzzle hash derivation path index") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) def get_derivation_index_cmd(wallet_rpc_port: Optional[int], fingerprint: int) -> None: extra_params: Dict[str, Any] = {} import asyncio from .wallet_funcs import get_derivation_index asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_derivation_index)) @wallet_cmd.command("sign_message", short_help="Sign a message by a derivation address") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-a", "--address", help="The address you want to use for signing", type=str, required=True) @click.option("-m", "--hex_message", help="The hex message you want sign", type=str, required=True) def address_sign_message(wallet_rpc_port: Optional[int], fingerprint: int, address: str, hex_message: str) -> None: extra_params: Dict[str, Any] = {"address": address, "message": hex_message, "type": AddressType.XFX} import asyncio from .wallet_funcs import sign_message asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, sign_message)) @wallet_cmd.command( "update_derivation_index", short_help="Generate additional derived puzzle hashes starting at the provided index" ) @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option( "-i", "--index", help="Index to set. Must be greater than the current derivation index", type=int, required=True ) def update_derivation_index_cmd(wallet_rpc_port: Optional[int], fingerprint: int, index: int) -> None: extra_params = {"index": index} import asyncio from .wallet_funcs import update_derivation_index asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, update_derivation_index)) @wallet_cmd.command("add_token", short_help="Add/Rename a CAT to the wallet by its asset ID") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option( "-id", "--asset-id", help="The Asset ID of the coin you wish to add/rename (the treehash of the TAIL program)", required=True, ) @click.option( "-n", "--token-name", help="The name you wish to designate to the token", ) @click.option( "-f", "--fingerprint", type=int, default=None, help="The wallet fingerprint you wish to add the token to", ) def add_token_cmd(wallet_rpc_port: Optional[int], asset_id: str, token_name: str, fingerprint: int) -> None: extra_params = {"asset_id": asset_id, "token_name": token_name} import asyncio from .wallet_funcs import add_token asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, add_token)) @wallet_cmd.command("make_offer", short_help="Create an offer of XFX/CATs for XFX/CATs") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option( "-o", "--offer", help="A wallet id to offer and the amount to offer (formatted like wallet_id:amount)", required=True, multiple=True, ) @click.option( "-r", "--request", help="A wallet id of an asset to receive and the amount you wish to receive (formatted like wallet_id:amount)", required=True, multiple=True, ) @click.option("-p", "--filepath", help="The path to write the generated offer file to", required=True) @click.option("-m", "--fee", help="A fee to add to the offer when it gets taken", default="0") def make_offer_cmd( wallet_rpc_port: Optional[int], fingerprint: int, offer: Tuple[str], request: Tuple[str], filepath: str, fee: str ) -> None: extra_params = {"offers": offer, "requests": request, "filepath": filepath, "fee": fee} import asyncio from .wallet_funcs import make_offer asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, make_offer)) @wallet_cmd.command( "get_offers", short_help="Get the status of existing offers. Displays only active/pending offers by default." ) @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-id", "--id", help="The ID of the offer that you wish to examine") @click.option("-p", "--filepath", help="The path to rewrite the offer file to (must be used in conjunction with --id)") @click.option("-em", "--exclude-my-offers", help="Exclude your own offers from the output", is_flag=True) @click.option("-et", "--exclude-taken-offers", help="Exclude offers that you've accepted from the output", is_flag=True) @click.option( "-ic", "--include-completed", help="Include offers that have been confirmed/cancelled or failed", is_flag=True ) @click.option("-s", "--summaries", help="Show the assets being offered and requested for each offer", is_flag=True) @click.option("-r", "--reverse", help="Reverse the order of the output", is_flag=True) def get_offers_cmd( wallet_rpc_port: Optional[int], fingerprint: int, id: Optional[str], filepath: Optional[str], exclude_my_offers: bool, exclude_taken_offers: bool, include_completed: bool, summaries: bool, reverse: bool, ) -> None: extra_params = { "id": id, "filepath": filepath, "exclude_my_offers": exclude_my_offers, "exclude_taken_offers": exclude_taken_offers, "include_completed": include_completed, "summaries": summaries, "reverse": reverse, } import asyncio from .wallet_funcs import get_offers asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_offers)) @wallet_cmd.command("take_offer", short_help="Examine or take an offer") @click.argument("path_or_hex", type=str, nargs=1, required=True) @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-e", "--examine-only", help="Print the summary of the offer file but do not take it", is_flag=True) @click.option("-m", "--fee", help="The fee to use when pushing the completed offer", default="0") def take_offer_cmd( path_or_hex: str, wallet_rpc_port: Optional[int], fingerprint: int, examine_only: bool, fee: str ) -> None: extra_params = {"file": path_or_hex, "examine_only": examine_only, "fee": fee} import asyncio from .wallet_funcs import take_offer asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, take_offer)) @wallet_cmd.command("cancel_offer", short_help="Cancel an existing offer") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-id", "--id", help="The offer ID that you wish to cancel") @click.option("--insecure", help="Don't make an on-chain transaction, simply mark the offer as cancelled", is_flag=True) @click.option("-m", "--fee", help="The fee to use when cancelling the offer securely", default="0") def cancel_offer_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: str, insecure: bool, fee: str) -> None: extra_params = {"id": id, "insecure": insecure, "fee": fee} import asyncio from .wallet_funcs import cancel_offer asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, cancel_offer)) @wallet_cmd.group("did", short_help="DID related actions") def did_cmd(): pass @did_cmd.command("create", short_help="Create DID wallet") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-n", "--name", help="Set the DID wallet name", type=str) @click.option( "-a", "--amount", help="Set the DID amount in mojos. Value must be an odd number.", type=int, default=1, show_default=True, ) @click.option( "-m", "--fee", help="Set the fees per transaction, in XFX.", type=str, default="0", show_default=True, callback=validate_fee, ) def did_create_wallet_cmd( wallet_rpc_port: Optional[int], fingerprint: int, name: Optional[str], amount: Optional[int], fee: Optional[int] ) -> None: import asyncio from .wallet_funcs import create_did_wallet extra_params = {"amount": amount, "fee": fee, "name": name} asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, create_did_wallet)) @did_cmd.command("sign_message", short_help="Sign a message by a DID") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-i", "--did_id", help="DID ID you want to use for signing", type=str, required=True) @click.option("-m", "--hex_message", help="The hex message you want to sign", type=str, required=True) def did_sign_message(wallet_rpc_port: Optional[int], fingerprint: int, did_id: str, hex_message: str) -> None: extra_params: Dict[str, Any] = {"did_id": did_id, "message": hex_message, "type": AddressType.DID} import asyncio from .wallet_funcs import sign_message asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, sign_message)) @did_cmd.command("set_name", short_help="Set DID wallet name") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-i", "--id", help="Id of the wallet to use", type=int, required=True) @click.option("-n", "--name", help="Set the DID wallet name", type=str, required=True) def did_wallet_name_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int, name: str) -> None: import asyncio from .wallet_funcs import did_set_wallet_name extra_params = {"wallet_id": id, "name": name} asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, did_set_wallet_name)) @did_cmd.command("get_did", short_help="Get DID from wallet") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-i", "--id", help="Id of the wallet to use", type=int, required=True) def did_get_did_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int) -> None: import asyncio from .wallet_funcs import get_did extra_params = {"did_wallet_id": id} asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_did)) @wallet_cmd.group("nft", short_help="NFT related actions") def nft_cmd(): pass @nft_cmd.command("create", short_help="Create an NFT wallet") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-di", "--did-id", help="DID Id to use", type=str) @click.option("-n", "--name", help="Set the NFT wallet name", type=str) def nft_wallet_create_cmd( wallet_rpc_port: Optional[int], fingerprint: int, did_id: Optional[str], name: Optional[str] ) -> None: import asyncio from .wallet_funcs import create_nft_wallet extra_params: Dict[str, Any] = {"did_id": did_id, "name": name} asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, create_nft_wallet)) @nft_cmd.command("sign_message", short_help="Sign a message by a NFT") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-i", "--nft_id", help="NFT ID you want to use for signing", type=str, required=True) @click.option("-m", "--hex_message", help="The hex message you want to sign", type=str, required=True) def nft_sign_message(wallet_rpc_port: Optional[int], fingerprint: int, nft_id: str, hex_message: str) -> None: extra_params: Dict[str, Any] = {"nft_id": nft_id, "message": hex_message, "type": AddressType.NFT} import asyncio from .wallet_funcs import sign_message asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, sign_message)) @nft_cmd.command("mint", short_help="Mint an NFT") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-i", "--id", help="Id of the NFT wallet to use", type=int, required=True) @click.option("-ra", "--royalty-address", help="Royalty address", type=str) @click.option("-ta", "--target-address", help="Target address", type=str) @click.option("--no-did-ownership", help="Disable DID ownership support", is_flag=True, default=False) @click.option("-nh", "--hash", help="NFT content hash", type=str, required=True) @click.option("-u", "--uris", help="Comma separated list of URIs", type=str, required=True) @click.option("-mh", "--metadata-hash", help="NFT metadata hash", type=str, default="") @click.option("-mu", "--metadata-uris", help="Comma separated list of metadata URIs", type=str) @click.option("-lh", "--license-hash", help="NFT license hash", type=str, default="") @click.option("-lu", "--license-uris", help="Comma separated list of license URIs", type=str) @click.option("-et", "--edition-total", help="NFT edition total", type=int, show_default=True, default=1) @click.option("-en", "--edition-number", help="NFT edition number", show_default=True, default=1, type=int) @click.option( "-m", "--fee", help="Set the fees per transaction, in XFX.", type=str, default="0", show_default=True, callback=validate_fee, ) @click.option( "-rp", "--royalty-percentage-fraction", help="NFT royalty percentage fraction in basis points. Example: 175 would represent 1.75%", type=int, default=0, show_default=True, ) def nft_mint_cmd( wallet_rpc_port: Optional[int], fingerprint: int, id: int, royalty_address: Optional[str], target_address: Optional[str], no_did_ownership: bool, hash: str, uris: str, metadata_hash: Optional[str], metadata_uris: Optional[str], license_hash: Optional[str], license_uris: Optional[str], edition_total: Optional[int], edition_number: Optional[int], fee: str, royalty_percentage_fraction: int, ) -> None: import asyncio from .wallet_funcs import mint_nft if metadata_uris is None: metadata_uris_list = [] else: metadata_uris_list = [mu.strip() for mu in metadata_uris.split(",")] if license_uris is None: license_uris_list = [] else: license_uris_list = [lu.strip() for lu in license_uris.split(",")] extra_params = { "wallet_id": id, "royalty_address": royalty_address, "target_address": target_address, "no_did_ownership": no_did_ownership, "hash": hash, "uris": [u.strip() for u in uris.split(",")], "metadata_hash": metadata_hash, "metadata_uris": metadata_uris_list, "license_hash": license_hash, "license_uris": license_uris_list, "edition_total": edition_total, "edition_number": edition_number, "fee": fee, "royalty_percentage": royalty_percentage_fraction, } asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, mint_nft)) @nft_cmd.command("add_uri", short_help="Add an URI to an NFT") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-i", "--id", help="Id of the NFT wallet to use", type=int, required=True) @click.option("-ni", "--nft-coin-id", help="Id of the NFT coin to add the URI to", type=str, required=True) @click.option("-u", "--uri", help="URI to add to the NFT", type=str) @click.option("-mu", "--metadata-uri", help="Metadata URI to add to the NFT", type=str) @click.option("-lu", "--license-uri", help="License URI to add to the NFT", type=str) @click.option( "-m", "--fee", help="Set the fees per transaction, in XFX.", type=str, default="0", show_default=True, callback=validate_fee, ) def nft_add_uri_cmd( wallet_rpc_port: Optional[int], fingerprint: int, id: int, nft_coin_id: str, uri: str, metadata_uri: str, license_uri: str, fee: str, ) -> None: import asyncio from .wallet_funcs import add_uri_to_nft extra_params = { "wallet_id": id, "nft_coin_id": nft_coin_id, "uri": uri, "metadata_uri": metadata_uri, "license_uri": license_uri, "fee": fee, } asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, add_uri_to_nft)) @nft_cmd.command("transfer", short_help="Transfer an NFT") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-i", "--id", help="Id of the NFT wallet to use", type=int, required=True) @click.option("-ni", "--nft-coin-id", help="Id of the NFT coin to transfer", type=str, required=True) @click.option("-ta", "--target-address", help="Target recipient wallet address", type=str, required=True) @click.option( "-m", "--fee", help="Set the fees per transaction, in XFX.", type=str, default="0", show_default=True, callback=validate_fee, ) def nft_transfer_cmd( wallet_rpc_port: Optional[int], fingerprint: int, id: int, nft_coin_id: str, target_address: str, fee: str, ) -> None: import asyncio from .wallet_funcs import transfer_nft extra_params = { "wallet_id": id, "nft_coin_id": nft_coin_id, "target_address": target_address, "fee": fee, } asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, transfer_nft)) @nft_cmd.command("list", short_help="List the current NFTs") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-i", "--id", help="Id of the NFT wallet to use", type=int, required=True) def nft_list_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int) -> None: import asyncio from .wallet_funcs import list_nfts extra_params = {"wallet_id": id} asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, list_nfts)) @nft_cmd.command("set_did", short_help="Set a DID on an NFT") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-i", "--id", help="Id of the NFT wallet to use", type=int, required=True) @click.option("-di", "--did-id", help="DID Id to set on the NFT", type=str, required=True) @click.option("-ni", "--nft-coin-id", help="Id of the NFT coin to set the DID on", type=str, required=True) @click.option( "-m", "--fee", help="Set the fees per transaction, in XFX.", type=str, default="0", show_default=True, callback=validate_fee, ) def nft_set_did_cmd( wallet_rpc_port: Optional[int], fingerprint: int, id: int, did_id: str, nft_coin_id: str, fee: str, ) -> None: import asyncio from .wallet_funcs import set_nft_did extra_params = { "wallet_id": id, "did_id": did_id, "nft_coin_id": nft_coin_id, "fee": fee, } asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, set_nft_did)) @nft_cmd.command("get_info", short_help="Get NFT information") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-ni", "--nft-coin-id", help="Id of the NFT coin to get information on", type=str, required=True) def nft_get_info_cmd( wallet_rpc_port: Optional[int], fingerprint: int, nft_coin_id: str, ) -> None: import asyncio from .wallet_funcs import get_nft_info extra_params = { "nft_coin_id": nft_coin_id, } asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_nft_info)) @wallet_cmd.group("notifications", short_help="Send/Manage notifications") def notification_cmd(): pass @notification_cmd.command("send", short_help="Send a notification to the owner of an address") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int,
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
true
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/cmds_util.py
flax/cmds/cmds_util.py
from __future__ import annotations from contextlib import asynccontextmanager from pathlib import Path from typing import Any, AsyncIterator, Awaitable, Callable, Dict, Optional, Tuple, Type from aiohttp import ClientConnectorError from flax.rpc.farmer_rpc_client import FarmerRpcClient from flax.rpc.full_node_rpc_client import FullNodeRpcClient from flax.rpc.harvester_rpc_client import HarvesterRpcClient from flax.rpc.rpc_client import RpcClient from flax.rpc.wallet_rpc_client import WalletRpcClient from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.mempool_submission_status import MempoolSubmissionStatus from flax.util.config import load_config from flax.util.default_root import DEFAULT_ROOT_PATH from flax.util.ints import uint16 from flax.wallet.transaction_record import TransactionRecord NODE_TYPES: Dict[str, Type[RpcClient]] = { "farmer": FarmerRpcClient, "wallet": WalletRpcClient, "full_node": FullNodeRpcClient, "harvester": HarvesterRpcClient, } def transaction_submitted_msg(tx: TransactionRecord) -> str: sent_to = [MempoolSubmissionStatus(s[0], s[1], s[2]).to_json_dict_convenience() for s in tx.sent_to] return f"Transaction submitted to nodes: {sent_to}" def transaction_status_msg(fingerprint: int, tx_id: bytes32) -> str: return f"Run 'flax wallet get_transaction -f {fingerprint} -tx 0x{tx_id}' to get status" async def validate_client_connection( rpc_client: RpcClient, node_type: str, rpc_port: int, fingerprint: Optional[int], login_to_wallet: bool ) -> Optional[int]: try: await rpc_client.healthz() if type(rpc_client) == WalletRpcClient and login_to_wallet: fingerprint = await get_wallet(rpc_client, fingerprint) if fingerprint is None: rpc_client.close() except ClientConnectorError: print(f"Connection error. Check if {node_type.replace('_', ' ')} rpc is running at {rpc_port}") print(f"This is normal if {node_type.replace('_', ' ')} is still starting up") rpc_client.close() await rpc_client.await_closed() # if close is not already called this does nothing return fingerprint @asynccontextmanager async def get_any_service_client( node_type: str, rpc_port: Optional[int] = None, root_path: Path = DEFAULT_ROOT_PATH, fingerprint: Optional[int] = None, login_to_wallet: bool = True, ) -> AsyncIterator[Tuple[Optional[Any], Dict[str, Any], Optional[int]]]: """ Yields a tuple with a RpcClient for the applicable node type a dictionary of the node's configuration, and a fingerprint if applicable. However, if connecting to the node fails then we will return None for the RpcClient. """ if node_type not in NODE_TYPES.keys(): # Click already checks this, so this should never happen raise ValueError(f"Invalid node type: {node_type}") # load variables from config file config = load_config(root_path, "config.yaml") self_hostname = config["self_hostname"] if rpc_port is None: rpc_port = config[node_type]["rpc_port"] # select node client type based on string node_client = await NODE_TYPES[node_type].create(self_hostname, uint16(rpc_port), root_path, config) try: # check if we can connect to node, and if we can then validate # fingerprint access, otherwise return fingerprint and shutdown client fingerprint = await validate_client_connection(node_client, node_type, rpc_port, fingerprint, login_to_wallet) if node_client.session.closed: yield None, config, fingerprint else: yield node_client, config, fingerprint except Exception as e: # this is only here to make the errors more user-friendly. print(f"Exception from '{node_type}' {e}") finally: node_client.close() # this can run even if already closed, will just do nothing. await node_client.await_closed() async def get_wallet(wallet_client: WalletRpcClient, fingerprint: Optional[int]) -> Optional[int]: if fingerprint is not None: fingerprints = [fingerprint] else: fingerprints = await wallet_client.get_public_keys() if len(fingerprints) == 0: print("No keys loaded. Run 'flax keys generate' or import a key") return None if len(fingerprints) == 1: fingerprint = fingerprints[0] if fingerprint is not None: log_in_response = await wallet_client.log_in(fingerprint) else: logged_in_fingerprint: Optional[int] = await wallet_client.get_logged_in_fingerprint() spacing: str = " " if logged_in_fingerprint is not None else "" current_sync_status: str = "" if logged_in_fingerprint is not None: if await wallet_client.get_synced(): current_sync_status = "Synced" elif await wallet_client.get_sync_status(): current_sync_status = "Syncing" else: current_sync_status = "Not Synced" print("Wallet keys:") for i, fp in enumerate(fingerprints): row: str = f"{i + 1}) " row += "* " if fp == logged_in_fingerprint else spacing row += f"{fp}" if fp == logged_in_fingerprint and len(current_sync_status) > 0: row += f" ({current_sync_status})" print(row) val = None prompt: str = ( f"Choose a wallet key [1-{len(fingerprints)}] ('q' to quit, or Enter to use {logged_in_fingerprint}): " ) while val is None: val = input(prompt) if val == "q": return None elif val == "" and logged_in_fingerprint is not None: fingerprint = logged_in_fingerprint break elif not val.isdigit(): val = None else: index = int(val) - 1 if index < 0 or index >= len(fingerprints): print("Invalid value") val = None continue else: fingerprint = fingerprints[index] assert fingerprint is not None log_in_response = await wallet_client.log_in(fingerprint) if log_in_response["success"] is False: print(f"Login failed: {log_in_response}") return None return fingerprint async def execute_with_wallet( wallet_rpc_port: Optional[int], fingerprint: int, extra_params: Dict[str, Any], function: Callable[[Dict[str, Any], WalletRpcClient, int], Awaitable[None]], ) -> None: wallet_client: Optional[WalletRpcClient] async with get_any_service_client("wallet", wallet_rpc_port, fingerprint=fingerprint) as node_config_fp: wallet_client, _, new_fp = node_config_fp if wallet_client is not None: assert new_fp is not None # wallet only sanity check await function(extra_params, wallet_client, new_fp)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/db.py
flax/cmds/db.py
from pathlib import Path import click from flax.cmds.db_upgrade_func import db_upgrade_func from flax.cmds.db_validate_func import db_validate_func from flax.cmds.db_backup_func import db_backup_func @click.group("db", short_help="Manage the blockchain database") def db_cmd() -> None: pass @db_cmd.command("upgrade", short_help="upgrade a v1 database to v2") @click.option("--input", default=None, type=click.Path(), help="specify input database file") @click.option("--output", default=None, type=click.Path(), help="specify output database file") @click.option( "--no-update-config", default=False, is_flag=True, help="don't update config file to point to new database. When specifying a " "custom output file, the config will not be updated regardless", ) @click.option( "--force", default=False, is_flag=True, help="force conversion despite warnings", ) @click.pass_context def db_upgrade_cmd(ctx: click.Context, no_update_config: bool, force: bool, **kwargs) -> None: try: in_db_path = kwargs.get("input") out_db_path = kwargs.get("output") db_upgrade_func( Path(ctx.obj["root_path"]), None if in_db_path is None else Path(in_db_path), None if out_db_path is None else Path(out_db_path), no_update_config=no_update_config, force=force, ) except RuntimeError as e: print(f"FAILED: {e}") @db_cmd.command("validate", short_help="validate the (v2) blockchain database. Does not verify proofs") @click.option("--db", default=None, type=click.Path(), help="Specifies which database file to validate") @click.option( "--validate-blocks", default=False, is_flag=True, help="validate consistency of properties of the encoded blocks and block records", ) @click.pass_context def db_validate_cmd(ctx: click.Context, validate_blocks: bool, **kwargs) -> None: try: in_db_path = kwargs.get("db") db_validate_func( Path(ctx.obj["root_path"]), None if in_db_path is None else Path(in_db_path), validate_blocks=validate_blocks, ) except RuntimeError as e: print(f"FAILED: {e}") @db_cmd.command("backup", short_help="backup the blockchain database using VACUUM INTO command") @click.option("--backup_file", default=None, type=click.Path(), help="Specifies the backup file") @click.option("--no_indexes", default=False, is_flag=True, help="Create backup without indexes") @click.pass_context def db_backup_cmd(ctx: click.Context, no_indexes: bool, **kwargs) -> None: try: db_backup_file = kwargs.get("backup_file") db_backup_func( Path(ctx.obj["root_path"]), None if db_backup_file is None else Path(db_backup_file), no_indexes=no_indexes, ) except RuntimeError as e: print(f"FAILED: {e}")
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/rpc.py
flax/cmds/rpc.py
from __future__ import annotations import asyncio import json import sys from typing import Any, Dict, List, Optional, TextIO import click from aiohttp import ClientResponseError from flax.util.config import load_config from flax.util.default_root import DEFAULT_ROOT_PATH from flax.util.ints import uint16 services: List[str] = ["crawler", "farmer", "full_node", "harvester", "timelord", "wallet", "data_layer"] async def call_endpoint(service: str, endpoint: str, request: Dict[str, Any], config: Dict[str, Any]) -> Dict[str, Any]: from flax.rpc.rpc_client import RpcClient port: uint16 if service == "crawler": # crawler config is inside the seeder config port = uint16(config["seeder"][service]["rpc_port"]) else: port = uint16(config[service]["rpc_port"]) try: client = await RpcClient.create(config["self_hostname"], port, DEFAULT_ROOT_PATH, config) except Exception as e: raise Exception(f"Failed to create RPC client {service}: {e}") result: Dict[str, Any] try: result = await client.fetch(endpoint, request) except ClientResponseError as e: if e.code == 404: raise Exception(f"Invalid endpoint for {service}: {endpoint}") raise except Exception as e: raise Exception(f"Request failed: {e}") finally: client.close() await client.await_closed() return result def print_result(json_dict: Dict[str, Any]) -> None: print(json.dumps(json_dict, indent=4, sort_keys=True)) def get_routes(service: str, config: Dict[str, Any]) -> Dict[str, Any]: return asyncio.run(call_endpoint(service, "get_routes", {}, config)) @click.group("rpc", short_help="RPC Client") def rpc_cmd() -> None: pass @rpc_cmd.command("endpoints", help="Print all endpoints of a service") @click.argument("service", type=click.Choice(services)) def endpoints_cmd(service: str) -> None: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") try: routes = get_routes(service, config) for route in routes["routes"]: print(route[1:]) except Exception as e: print(e) @rpc_cmd.command("status", help="Print the status of all available RPC services") def status_cmd() -> None: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") def print_row(c0: str, c1: str) -> None: c0 = "{0:<12}".format(f"{c0}") c1 = "{0:<9}".format(f"{c1}") print(f"{c0} | {c1}") print_row("SERVICE", "STATUS") print_row("------------", "---------") for service in services: status = "ACTIVE" try: if not get_routes(service, config)["success"]: raise Exception() except Exception: status = "INACTIVE" print_row(service, status) def create_commands() -> None: for service in services: @rpc_cmd.command( service, short_help=f"RPC client for the {service} RPC API", help=( f"Call ENDPOINT (RPC endpoint as as string) of the {service} " "RPC API with REQUEST (must be a JSON string) as request data." ), ) @click.argument("endpoint", type=str) @click.argument("request", type=str, required=False) @click.option( "-j", "--json-file", help="Optionally instead of REQUEST you can provide a json file containing the request data", type=click.File("r"), default=None, ) def rpc_client_cmd( endpoint: str, request: Optional[str], json_file: Optional[TextIO], service: str = service ) -> None: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") if request is not None and json_file is not None: sys.exit( "Can only use one request source: REQUEST argument OR -j/--json-file option. See the help with -h" ) request_json: Dict[str, Any] = {} if json_file is not None: try: request_json = json.load(json_file) except Exception as e: sys.exit(f"Invalid JSON file: {e}") if request is not None: try: request_json = json.loads(request) except Exception as e: sys.exit(f"Invalid REQUEST JSON: {e}") try: if endpoint[0] == "/": endpoint = endpoint[1:] print_result(asyncio.run(call_endpoint(service, endpoint, request_json, config))) except Exception as e: sys.exit(e) create_commands()
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/units.py
flax/cmds/units.py
from __future__ import annotations from typing import Dict # The rest of the codebase uses mojos everywhere. # Only use these units for user facing interfaces. units: Dict[str, int] = { "flax": 10**12, # 1 flax (XFX) is 1,000,000,000,000 mojo (1 trillion) "mojo": 1, "cat": 10**3, # 1 CAT is 1000 CAT mojos }
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/show_funcs.py
flax/cmds/show_funcs.py
from __future__ import annotations from pathlib import Path from typing import Any, Dict, List, Optional, Union from flax.rpc.full_node_rpc_client import FullNodeRpcClient async def print_blockchain_state(node_client: FullNodeRpcClient, config: Dict[str, Any]) -> bool: import time from flax.consensus.block_record import BlockRecord from flax.util.ints import uint64 from flax.util.misc import format_bytes blockchain_state = await node_client.get_blockchain_state() if blockchain_state is None: print("There is no blockchain found yet. Try again shortly") return True peak: Optional[BlockRecord] = blockchain_state["peak"] node_id = blockchain_state["node_id"] difficulty = blockchain_state["difficulty"] sub_slot_iters = blockchain_state["sub_slot_iters"] synced = blockchain_state["sync"]["synced"] sync_mode = blockchain_state["sync"]["sync_mode"] num_blocks: int = 10 network_name = config["selected_network"] genesis_challenge = config["farmer"]["network_overrides"]["constants"][network_name]["GENESIS_CHALLENGE"] full_node_port = config["full_node"]["port"] full_node_rpc_port = config["full_node"]["rpc_port"] print(f"Network: {network_name} Port: {full_node_port} RPC Port: {full_node_rpc_port}") print(f"Node ID: {node_id}") print(f"Genesis Challenge: {genesis_challenge}") if synced: print("Current Blockchain Status: Full Node Synced") print("\nPeak: Hash:", peak.header_hash if peak is not None else "") elif peak is not None and sync_mode: sync_max_block = blockchain_state["sync"]["sync_tip_height"] sync_current_block = blockchain_state["sync"]["sync_progress_height"] print( f"Current Blockchain Status: Syncing {sync_current_block}/{sync_max_block} " f"({sync_max_block - sync_current_block} behind)." ) print("Peak: Hash:", peak.header_hash if peak is not None else "") elif peak is not None: print(f"Current Blockchain Status: Not Synced. Peak height: {peak.height}") else: print("\nSearching for an initial chain\n") print("You may be able to expedite with 'flax show -a host:port' using a known node.\n") if peak is not None: if peak.is_transaction_block: peak_time = peak.timestamp else: peak_hash = peak.header_hash curr = await node_client.get_block_record(peak_hash) while curr is not None and not curr.is_transaction_block: curr = await node_client.get_block_record(curr.prev_hash) if curr is not None: peak_time = curr.timestamp else: peak_time = uint64(0) peak_time_struct = time.struct_time(time.localtime(peak_time)) print( " Time:", f"{time.strftime('%a %b %d %Y %T %Z', peak_time_struct)}", f" Height: {peak.height:>10}\n", ) print("Estimated network space: ", end="") print(format_bytes(blockchain_state["space"])) print(f"Current difficulty: {difficulty}") print(f"Current VDF sub_slot_iters: {sub_slot_iters}") print("\n Height: | Hash:") added_blocks: List[BlockRecord] = [] curr = await node_client.get_block_record(peak.header_hash) while curr is not None and len(added_blocks) < num_blocks and curr.height > 0: added_blocks.append(curr) curr = await node_client.get_block_record(curr.prev_hash) for b in added_blocks: print(f"{b.height:>9} | {b.header_hash}") else: print("Blockchain has no blocks yet") return False async def print_block_from_hash( node_client: FullNodeRpcClient, config: Dict[str, Any], block_by_header_hash: str ) -> None: import time from flax.consensus.block_record import BlockRecord from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.full_block import FullBlock from flax.util.bech32m import encode_puzzle_hash from flax.util.byte_types import hexstr_to_bytes block: Optional[BlockRecord] = await node_client.get_block_record(hexstr_to_bytes(block_by_header_hash)) full_block: Optional[FullBlock] = await node_client.get_block(hexstr_to_bytes(block_by_header_hash)) # Would like to have a verbose flag for this if block is not None: assert full_block is not None prev_b = await node_client.get_block_record(block.prev_hash) if prev_b is not None: difficulty = block.weight - prev_b.weight else: difficulty = block.weight if block.is_transaction_block: assert full_block.transactions_info is not None block_time = time.struct_time( time.localtime( full_block.foliage_transaction_block.timestamp if full_block.foliage_transaction_block else None ) ) block_time_string = time.strftime("%a %b %d %Y %T %Z", block_time) cost = str(full_block.transactions_info.cost) tx_filter_hash: Union[str, bytes32] = "Not a transaction block" if full_block.foliage_transaction_block: tx_filter_hash = full_block.foliage_transaction_block.filter_hash fees: Any = block.fees else: block_time_string = "Not a transaction block" cost = "Not a transaction block" tx_filter_hash = "Not a transaction block" fees = "Not a transaction block" address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"] farmer_address = encode_puzzle_hash(block.farmer_puzzle_hash, address_prefix) pool_address = encode_puzzle_hash(block.pool_puzzle_hash, address_prefix) pool_pk = ( full_block.reward_chain_block.proof_of_space.pool_public_key if full_block.reward_chain_block.proof_of_space.pool_public_key is not None else "Pay to pool puzzle hash" ) print( f"Block Height {block.height}\n" f"Header Hash 0x{block.header_hash.hex()}\n" f"Timestamp {block_time_string}\n" f"Weight {block.weight}\n" f"Previous Block 0x{block.prev_hash.hex()}\n" f"Difficulty {difficulty}\n" f"Sub-slot iters {block.sub_slot_iters}\n" f"Cost {cost}\n" f"Total VDF Iterations {block.total_iters}\n" f"Is a Transaction Block?{block.is_transaction_block}\n" f"Deficit {block.deficit}\n" f"PoSpace 'k' Size {full_block.reward_chain_block.proof_of_space.size}\n" f"Plot Public Key 0x{full_block.reward_chain_block.proof_of_space.plot_public_key}\n" f"Pool Public Key {pool_pk}\n" f"Tx Filter Hash {tx_filter_hash}\n" f"Farmer Address {farmer_address}\n" f"Pool Address {pool_address}\n" f"Fees Amount {fees}\n" ) else: print("Block with header hash", block_by_header_hash, "not found") async def print_fee_info(node_client: FullNodeRpcClient) -> None: target_times = [60, 120, 300] target_times_names = ["1 minute", "2 minutes", "5 minutes"] res = await node_client.get_fee_estimate(target_times=target_times, cost=1) print(f" Mempool max size: {res['mempool_max_size']:>12} CLVM cost") print(f" Mempool size: {res['mempool_size']:>12} CLVM cost") print(f" Current Fee Rate: {res['current_fee_rate']:>12} mojo per CLVM cost") print("\nFee Rate Estimates:") max_name_len = max(len(name) for name in target_times_names) for (n, e) in zip(target_times_names, res["estimates"]): print(f" {n:>{max_name_len}}: {e} mojo per CLVM cost") print("") async def show_async( rpc_port: Optional[int], root_path: Path, print_fee_info_flag: bool, print_state: bool, block_header_hash_by_height: str, block_by_header_hash: str, ) -> None: from flax.cmds.cmds_util import get_any_service_client node_client: Optional[FullNodeRpcClient] async with get_any_service_client("full_node", rpc_port, root_path) as node_config_fp: node_client, config, _ = node_config_fp if node_client is not None: # Check State if print_state: if await print_blockchain_state(node_client, config) is True: return None # if no blockchain is found if print_fee_info_flag: await print_fee_info(node_client) # Get Block Information if block_header_hash_by_height != "": block_header = await node_client.get_block_record_by_height(block_header_hash_by_height) if block_header is not None: print(f"Header hash of block {block_header_hash_by_height}: " f"{block_header.header_hash.hex()}") else: print("Block height", block_header_hash_by_height, "not found") if block_by_header_hash != "": await print_block_from_hash(node_client, config, block_by_header_hash)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/flax.py
flax/cmds/flax.py
from io import TextIOWrapper import click from flax import __version__ from flax.cmds.beta import beta_cmd from flax.cmds.configure import configure_cmd from flax.cmds.farm import farm_cmd from flax.cmds.data import data_cmd from flax.cmds.init import init_cmd from flax.cmds.keys import keys_cmd from flax.cmds.netspace import netspace_cmd from flax.cmds.passphrase import passphrase_cmd from flax.cmds.peer import peer_cmd from flax.cmds.plots import plots_cmd from flax.cmds.rpc import rpc_cmd from flax.cmds.show import show_cmd from flax.cmds.start import start_cmd from flax.cmds.stop import stop_cmd from flax.cmds.wallet import wallet_cmd from flax.cmds.plotters import plotters_cmd from flax.cmds.db import db_cmd from flax.util.default_root import DEFAULT_KEYS_ROOT_PATH, DEFAULT_ROOT_PATH from flax.util.errors import KeychainCurrentPassphraseIsInvalid from flax.util.keychain import ( Keychain, set_keys_root_path, ) from flax.util.ssl_check import check_ssl from typing import Optional CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) @click.group( help=f"\n Manage flax blockchain infrastructure ({__version__})\n", epilog="Try 'flax start node', 'flax netspace -d 192', or 'flax show -s'", context_settings=CONTEXT_SETTINGS, ) @click.option("--root-path", default=DEFAULT_ROOT_PATH, help="Config file root", type=click.Path(), show_default=True) @click.option( "--keys-root-path", default=DEFAULT_KEYS_ROOT_PATH, help="Keyring file root", type=click.Path(), show_default=True ) @click.option("--passphrase-file", type=click.File("r"), help="File or descriptor to read the keyring passphrase from") @click.option( "--force-legacy-keyring-migration/--no-force-legacy-keyring-migration", default=True, help="Force legacy keyring migration. Legacy keyring support will be removed in an upcoming version!", ) @click.pass_context def cli( ctx: click.Context, root_path: str, keys_root_path: Optional[str] = None, passphrase_file: Optional[TextIOWrapper] = None, force_legacy_keyring_migration: bool = True, ) -> None: from pathlib import Path ctx.ensure_object(dict) ctx.obj["root_path"] = Path(root_path) ctx.obj["force_legacy_keyring_migration"] = force_legacy_keyring_migration # keys_root_path and passphrase_file will be None if the passphrase options have been # scrubbed from the CLI options if keys_root_path is not None: set_keys_root_path(Path(keys_root_path)) if passphrase_file is not None: from flax.cmds.passphrase_funcs import cache_passphrase, read_passphrase_from_file from sys import exit try: passphrase = read_passphrase_from_file(passphrase_file) if Keychain.master_passphrase_is_valid(passphrase): cache_passphrase(passphrase) else: raise KeychainCurrentPassphraseIsInvalid() except KeychainCurrentPassphraseIsInvalid: if Path(passphrase_file.name).is_file(): print(f'Invalid passphrase found in "{passphrase_file.name}"') else: print("Invalid passphrase") exit(1) except Exception as e: print(f"Failed to read passphrase: {e}") check_ssl(Path(root_path)) @cli.command("version", short_help="Show flax version") def version_cmd() -> None: print(__version__) @cli.command("run_daemon", short_help="Runs flax daemon") @click.option( "--wait-for-unlock", help="If the keyring is passphrase-protected, the daemon will wait for an unlock command before accessing keys", default=False, is_flag=True, hidden=True, # --wait-for-unlock is only set when launched by flax start <service> ) @click.pass_context def run_daemon_cmd(ctx: click.Context, wait_for_unlock: bool) -> None: import asyncio from flax.daemon.server import async_run_daemon from flax.util.keychain import Keychain wait_for_unlock = wait_for_unlock and Keychain.is_keyring_locked() asyncio.run(async_run_daemon(ctx.obj["root_path"], wait_for_unlock=wait_for_unlock)) cli.add_command(keys_cmd) cli.add_command(plots_cmd) cli.add_command(wallet_cmd) cli.add_command(configure_cmd) cli.add_command(init_cmd) cli.add_command(rpc_cmd) cli.add_command(show_cmd) cli.add_command(start_cmd) cli.add_command(stop_cmd) cli.add_command(netspace_cmd) cli.add_command(farm_cmd) cli.add_command(plotters_cmd) cli.add_command(db_cmd) cli.add_command(peer_cmd) cli.add_command(data_cmd) cli.add_command(passphrase_cmd) cli.add_command(beta_cmd) def main() -> None: cli() # pylint: disable=no-value-for-parameter if __name__ == "__main__": main()
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/netspace.py
flax/cmds/netspace.py
from typing import Optional import click @click.command("netspace", short_help="Estimate total farmed space on the network") @click.option( "-p", "--rpc-port", help=( "Set the port where the Full Node is hosting the RPC interface. " "See the rpc_port under full_node in config.yaml. " "[default: 8555]" ), type=int, default=None, ) @click.option( "-d", "--delta-block-height", help=( "Compare a block X blocks older to estimate total network space. " "Defaults to 4608 blocks (~1 day) and Peak block as the starting block. " "Use --start BLOCK_HEIGHT to specify starting block. " "Use 192 blocks to estimate over the last hour." ), type=str, default="4608", ) @click.option( "-s", "--start", help="Newest block used to calculate estimated total network space. Defaults to Peak block.", type=str, default="", ) def netspace_cmd(rpc_port: Optional[int], delta_block_height: str, start: str) -> None: """ Calculates the estimated space on the network given two block header hashes. """ import asyncio from .netspace_funcs import netstorge_async asyncio.run(netstorge_async(rpc_port, delta_block_height, start))
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/farm.py
flax/cmds/farm.py
from typing import Optional import click @click.group("farm", short_help="Manage your farm") def farm_cmd() -> None: pass @farm_cmd.command("summary", short_help="Summary of farming information") @click.option( "-p", "--rpc-port", help=( "Set the port where the Full Node is hosting the RPC interface. " "See the rpc_port under full_node in config.yaml" ), type=int, default=None, show_default=True, ) @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, show_default=True, ) @click.option( "-hp", "--harvester-rpc-port", help=( "Set the port where the Harvester is hosting the RPC interface" "See the rpc_port under harvester in config.yaml" ), type=int, default=None, show_default=True, ) @click.option( "-fp", "--farmer-rpc-port", help=( "Set the port where the Farmer is hosting the RPC interface. " "See the rpc_port under farmer in config.yaml" ), type=int, default=None, show_default=True, ) def summary_cmd( rpc_port: Optional[int], wallet_rpc_port: Optional[int], harvester_rpc_port: Optional[int], farmer_rpc_port: Optional[int], ) -> None: from .farm_funcs import summary import asyncio asyncio.run(summary(rpc_port, wallet_rpc_port, harvester_rpc_port, farmer_rpc_port)) @farm_cmd.command("challenges", short_help="Show the latest challenges") @click.option( "-fp", "--farmer-rpc-port", help="Set the port where the Farmer is hosting the RPC interface. See the rpc_port under farmer in config.yaml", type=int, default=None, show_default=True, ) @click.option( "-l", "--limit", help="Limit the number of challenges shown. Use 0 to disable the limit", type=click.IntRange(0), default=20, show_default=True, ) def challenges_cmd(farmer_rpc_port: Optional[int], limit: int) -> None: from .farm_funcs import challenges import asyncio asyncio.run(challenges(farmer_rpc_port, limit))
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/stop.py
flax/cmds/stop.py
from __future__ import annotations import asyncio import sys from pathlib import Path from typing import Any, Dict import click from flax.util.config import load_config from flax.util.service_groups import all_groups, services_for_groups async def async_stop(root_path: Path, config: Dict[str, Any], group: str, stop_daemon: bool) -> int: from flax.daemon.client import connect_to_daemon_and_validate daemon = await connect_to_daemon_and_validate(root_path, config) if daemon is None: print("Couldn't connect to flax daemon") return 1 if stop_daemon: r = await daemon.exit() await daemon.close() if r.get("data", {}).get("success", False): if r["data"].get("services_stopped") is not None: [print(f"{service}: Stopped") for service in r["data"]["services_stopped"]] await asyncio.sleep(1) # just cosmetic print("Daemon stopped") else: print(f"Stop daemon failed {r}") return 0 return_val = 0 for service in services_for_groups(group): print(f"{service}: ", end="", flush=True) if not await daemon.is_running(service_name=service): print("Not running") elif await daemon.stop_service(service_name=service): print("Stopped") else: print("Stop failed") return_val = 1 await daemon.close() return return_val @click.command("stop", short_help="Stop services") @click.option("-d", "--daemon", is_flag=True, type=bool, help="Stop daemon") @click.argument("group", type=click.Choice(list(all_groups())), nargs=-1, required=True) @click.pass_context def stop_cmd(ctx: click.Context, daemon: bool, group: str) -> None: from flax.cmds.beta_funcs import warn_if_beta_enabled root_path = ctx.obj["root_path"] config = load_config(root_path, "config.yaml") warn_if_beta_enabled(config) sys.exit(asyncio.run(async_stop(root_path, config, group, daemon)))
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/passphrase_funcs.py
flax/cmds/passphrase_funcs.py
import click import colorama import os import sys import time from flax.daemon.client import acquire_connection_to_daemon from flax.util.config import load_config from flax.util.errors import KeychainMaxUnlockAttempts from flax.util.keychain import Keychain, supports_os_passphrase_storage from flax.util.keyring_wrapper import KeyringWrapper, DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE from flax.util.misc import prompt_yes_no from flax.util.ws_message import WsRpcMessage from getpass import getpass from io import TextIOWrapper from pathlib import Path from typing import Any, Dict, Optional, Tuple DEFAULT_PASSPHRASE_PROMPT = ( colorama.Fore.YELLOW + colorama.Style.BRIGHT + "(Unlock Keyring)" + colorama.Style.RESET_ALL + " Passphrase: " ) # noqa: E501 FAILED_ATTEMPT_DELAY = 0.5 MAX_KEYS = 100 MAX_RETRIES = 3 SAVE_MASTER_PASSPHRASE_WARNING = ( colorama.Fore.YELLOW + colorama.Style.BRIGHT + "\n!!! SECURITY WARNING !!!\n" + colorama.Style.RESET_ALL + "Other processes may be able to access your saved passphrase, possibly exposing your private keys.\n" + "You should not save your passphrase unless you fully trust your environment.\n" ) def obtain_current_passphrase(prompt: str = DEFAULT_PASSPHRASE_PROMPT, use_passphrase_cache: bool = False) -> str: """ Obtains the master passphrase for the keyring, optionally using the cached value (if previously set). If the passphrase isn't already cached, the user is prompted interactively to enter their passphrase a max of MAX_RETRIES times before failing. """ if use_passphrase_cache: passphrase, validated = KeyringWrapper.get_shared_instance().get_cached_master_passphrase() if passphrase: # If the cached passphrase was previously validated, we assume it's... valid if validated: return passphrase # Cached passphrase needs to be validated if KeyringWrapper.get_shared_instance().master_passphrase_is_valid(passphrase): KeyringWrapper.get_shared_instance().set_cached_master_passphrase(passphrase, validated=True) return passphrase else: # Cached passphrase is bad, clear the cache KeyringWrapper.get_shared_instance().set_cached_master_passphrase(None) # Prompt interactively with up to MAX_RETRIES attempts for i in range(MAX_RETRIES): colorama.init() passphrase = prompt_for_passphrase(prompt) if KeyringWrapper.get_shared_instance().master_passphrase_is_valid(passphrase): # If using the passphrase cache, and the user inputted a passphrase, update the cache if use_passphrase_cache: KeyringWrapper.get_shared_instance().set_cached_master_passphrase(passphrase, validated=True) return passphrase time.sleep(FAILED_ATTEMPT_DELAY) print("Incorrect passphrase\n") raise KeychainMaxUnlockAttempts() def verify_passphrase_meets_requirements( new_passphrase: str, confirmation_passphrase: str ) -> Tuple[bool, Optional[str]]: match = new_passphrase == confirmation_passphrase min_length = Keychain.minimum_passphrase_length() meets_len_requirement = len(new_passphrase) >= min_length if match and meets_len_requirement: return True, None elif not match: return False, "Passphrases do not match" elif not meets_len_requirement: return False, f"Minimum passphrase length is {min_length}" else: raise Exception("Unexpected passphrase verification case") def prompt_for_passphrase(prompt: str) -> str: if sys.platform == "win32" or sys.platform == "cygwin": print(prompt, end="") prompt = "" return getpass(prompt) def prompt_to_save_passphrase() -> bool: save: bool = False try: if supports_os_passphrase_storage(): location: Optional[str] = None warning: Optional[str] = None if sys.platform == "darwin": location = "macOS Keychain" warning = SAVE_MASTER_PASSPHRASE_WARNING elif sys.platform == "win32" or sys.platform == "cygwin": location = "Windows Credential Manager" warning = SAVE_MASTER_PASSPHRASE_WARNING if location is None: raise ValueError("OS-specific credential store not specified") print( "\n" "Your passphrase can be stored in your system's secure credential store. " "Other Flax processes will be able to access your keys without prompting for your passphrase." ) if warning is not None: colorama.init() print(warning) save = prompt_yes_no(f"Would you like to save your passphrase to the {location}?") except Exception as e: print(f"Caught exception: {e}") return False return save def prompt_for_new_passphrase() -> Tuple[str, bool]: min_length: int = Keychain.minimum_passphrase_length() if min_length > 0: n = min_length print(f"\nPassphrases must be {n} or more characters in length") # lgtm [py/clear-text-logging-sensitive-data] while True: passphrase: str = getpass("New Passphrase: ") confirmation: str = getpass("Confirm Passphrase: ") save_passphrase: bool = False valid_passphrase, error_msg = verify_passphrase_meets_requirements(passphrase, confirmation) if valid_passphrase: if supports_os_passphrase_storage(): save_passphrase = prompt_to_save_passphrase() return passphrase, save_passphrase elif error_msg: print(f"{error_msg}\n") # lgtm [py/clear-text-logging-sensitive-data] def read_passphrase_from_file(passphrase_file: TextIOWrapper) -> str: passphrase = passphrase_file.read().rstrip(os.environ.get("FLAX_PASSPHRASE_STRIP_TRAILING_CHARS", "\r\n")) passphrase_file.close() return passphrase def initialize_passphrase() -> None: if Keychain.has_master_passphrase(): print("Keyring is already protected by a passphrase") print("\nUse 'flax passphrase set' or 'flax passphrase remove' to update or remove your passphrase") sys.exit(1) # We'll rely on Keyring initialization to leverage the cached passphrase for # bootstrapping the keyring encryption process print("Setting keyring passphrase") passphrase: Optional[str] = None # save_passphrase indicates whether the passphrase should be saved in the # macOS Keychain or Windows Credential Manager save_passphrase: bool = False if Keychain.has_cached_passphrase(): passphrase = Keychain.get_cached_master_passphrase() if not passphrase or passphrase == default_passphrase(): passphrase, save_passphrase = prompt_for_new_passphrase() Keychain.set_master_passphrase(current_passphrase=None, new_passphrase=passphrase, save_passphrase=save_passphrase) def set_or_update_passphrase(passphrase: Optional[str], current_passphrase: Optional[str], hint: Optional[str]) -> bool: # Prompt for the current passphrase, if necessary if Keychain.has_master_passphrase(): # Try the default passphrase first if using_default_passphrase(): current_passphrase = default_passphrase() if not current_passphrase: try: current_passphrase = obtain_current_passphrase("Current Passphrase: ") except Exception as e: print(f"Unable to confirm current passphrase: {e}") sys.exit(1) success: bool = False new_passphrase: Optional[str] = passphrase save_passphrase: bool = False try: # Prompt for the new passphrase, if necessary if new_passphrase is None: new_passphrase, save_passphrase = prompt_for_new_passphrase() if new_passphrase == current_passphrase: raise ValueError("passphrase is unchanged") Keychain.set_master_passphrase( current_passphrase=current_passphrase, new_passphrase=new_passphrase, passphrase_hint=hint, save_passphrase=save_passphrase, ) success = True except Exception as e: print(f"Unable to set or update passphrase: {e}") success = False return success def remove_passphrase(current_passphrase: Optional[str]) -> bool: """ Removes the user's keyring passphrase. The keyring will be re-encrypted to the default passphrase. """ success = False if not Keychain.has_master_passphrase() or using_default_passphrase(): print("Passphrase is not currently set") success = False else: # Try the default passphrase first if using_default_passphrase(): current_passphrase = default_passphrase() # Prompt for the current passphrase, if necessary if not current_passphrase: try: current_passphrase = obtain_current_passphrase("Current Passphrase: ") except Exception as e: print(f"Unable to confirm current passphrase: {e}") success = False if current_passphrase: try: Keychain.remove_master_passphrase(current_passphrase) success = True except Exception as e: print(f"Unable to remove passphrase: {e}") success = False return success def cache_passphrase(passphrase: str) -> None: Keychain.set_cached_master_passphrase(passphrase) def get_current_passphrase() -> Optional[str]: if not Keychain.has_master_passphrase(): return None current_passphrase = None if using_default_passphrase(): current_passphrase = default_passphrase() else: try: current_passphrase = obtain_current_passphrase() except Exception as e: print(f"Unable to confirm current passphrase: {e}") raise return current_passphrase def default_passphrase() -> str: return DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE def using_default_passphrase() -> bool: if not Keychain.has_master_passphrase(): return False return Keychain.master_passphrase_is_valid(default_passphrase()) def display_passphrase_hint() -> None: passphrase_hint = Keychain.get_master_passphrase_hint() if passphrase_hint is not None: print(f"Passphrase hint: {passphrase_hint}") # lgtm [py/clear-text-logging-sensitive-data] else: print("Passphrase hint is not set") def update_passphrase_hint(hint: Optional[str] = None) -> bool: updated: bool = False if Keychain.has_master_passphrase() is False or using_default_passphrase(): print("Updating the passphrase hint requires that a passphrase has been set") else: current_passphrase: Optional[str] = get_current_passphrase() if current_passphrase is None: print("Keyring is not passphrase-protected") else: # Set or remove the passphrase hint Keychain.set_master_passphrase_hint(current_passphrase, hint) updated = True return updated def set_passphrase_hint(hint: str) -> None: if update_passphrase_hint(hint): print("Passphrase hint set") else: print("Passphrase hint was not updated") def remove_passphrase_hint() -> None: if update_passphrase_hint(None): print("Passphrase hint removed") else: print("Passphrase hint was not removed") async def async_update_daemon_passphrase_cache_if_running(root_path: Path, config: Dict[str, Any]) -> None: """ Attempt to connect to the daemon and update the cached passphrase """ new_passphrase = Keychain.get_cached_master_passphrase() assert new_passphrase is not None try: async with acquire_connection_to_daemon(root_path, config, quiet=True) as daemon: if daemon is not None: response = await daemon.unlock_keyring(new_passphrase) if response is None: raise Exception("daemon didn't respond") success: bool = response.get("data", {}).get("success", False) if success is False: error = response.get("data", {}).get("error", "unknown error") raise Exception(error) except Exception as e: print(f"Failed to notify daemon of updated keyring passphrase: {e}") async def async_update_daemon_migration_completed_if_running() -> None: """ Attempt to connect to the daemon to notify that keyring migration has completed. This allows the daemon to refresh its keyring so that it can stop using the legacy keyring. """ ctx: click.Context = click.get_current_context() root_path: Path = ctx.obj["root_path"] if root_path is None: print("Missing root_path in context. Unable to notify daemon") return None async with acquire_connection_to_daemon(root_path, load_config(root_path, "config.yaml"), quiet=True) as daemon: if daemon is not None: passphrase: str = Keychain.get_cached_master_passphrase() print("Updating daemon... ", end="") response: WsRpcMessage = await daemon.notify_keyring_migration_completed(passphrase) success: bool = response.get("data", {}).get("success", False) print("succeeded" if success is True else "failed")
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/keys.py
flax/cmds/keys.py
import asyncio import click import sys from typing import Optional, Tuple @click.group("keys", short_help="Manage your keys") @click.pass_context def keys_cmd(ctx: click.Context): """Create, delete, view and use your key pairs""" from pathlib import Path from .keys_funcs import migrate_keys root_path: Path = ctx.obj["root_path"] if not root_path.is_dir(): raise RuntimeError("Please initialize (or migrate) your config directory with flax init") if ctx.obj["force_legacy_keyring_migration"] and not asyncio.run(migrate_keys(root_path, True)): sys.exit(1) @keys_cmd.command("generate", short_help="Generates and adds a key to keychain") @click.option( "--label", "-l", default=None, help="Enter the label for the key", type=str, required=False, ) @click.pass_context def generate_cmd(ctx: click.Context, label: Optional[str]): from .init_funcs import check_keys from .keys_funcs import generate_and_add generate_and_add(label) check_keys(ctx.obj["root_path"]) @keys_cmd.command("show", short_help="Displays all the keys in keychain") @click.option( "--show-mnemonic-seed", help="Show the mnemonic seed of the keys", default=False, show_default=True, is_flag=True ) @click.option( "--non-observer-derivation", "-d", help=( "Show the first wallet address using non-observer derivation. Older Flax versions use " "non-observer derivation when generating wallet addresses." ), default=False, show_default=True, is_flag=True, ) def show_cmd(show_mnemonic_seed, non_observer_derivation): from .keys_funcs import show_all_keys show_all_keys(show_mnemonic_seed, non_observer_derivation) @keys_cmd.command("add", short_help="Add a private key by mnemonic") @click.option( "--filename", "-f", default=None, help="The filename containing the secret key mnemonic to add", type=str, required=False, ) @click.option( "--label", "-l", default=None, help="Enter the label for the key", type=str, required=False, ) @click.pass_context def add_cmd(ctx: click.Context, filename: str, label: Optional[str]): from .init_funcs import check_keys from .keys_funcs import query_and_add_private_key_seed mnemonic = None if filename: from pathlib import Path mnemonic = Path(filename).read_text().rstrip() query_and_add_private_key_seed(mnemonic, label) check_keys(ctx.obj["root_path"]) @keys_cmd.group("label", short_help="Manage your key labels") def label_cmd(): pass @label_cmd.command("show", short_help="Show the labels of all available keys") def show_label_cmd(): from .keys_funcs import show_all_key_labels show_all_key_labels() @label_cmd.command("set", short_help="Set the label of a key") @click.option( "--fingerprint", "-f", help="Enter the fingerprint of the key you want to use", type=int, required=True, ) @click.option( "--label", "-l", help="Enter the new label for the key", type=str, required=True, ) def set_label_cmd(fingerprint: int, label: str): from .keys_funcs import set_key_label set_key_label(fingerprint, label) @label_cmd.command("delete", short_help="Delete the label of a key") @click.option( "--fingerprint", "-f", help="Enter the fingerprint of the key you want to use", type=int, required=True, ) def delete_label_cmd(fingerprint: int): from .keys_funcs import delete_key_label delete_key_label(fingerprint) @keys_cmd.command("delete", short_help="Delete a key by its pk fingerprint in hex form") @click.option( "--fingerprint", "-f", default=None, help="Enter the fingerprint of the key you want to use", type=int, required=True, ) @click.pass_context def delete_cmd(ctx: click.Context, fingerprint: int): from .init_funcs import check_keys from .keys_funcs import delete delete(fingerprint) check_keys(ctx.obj["root_path"]) @keys_cmd.command("delete_all", short_help="Delete all private keys in keychain") def delete_all_cmd(): from flax.util.keychain import Keychain Keychain().delete_all_keys() @keys_cmd.command("generate_and_print", short_help="Generates but does NOT add to keychain") def generate_and_print_cmd(): from .keys_funcs import generate_and_print generate_and_print() @keys_cmd.command("sign", short_help="Sign a message with a private key") @click.option("--message", "-d", default=None, help="Enter the message to sign in UTF-8", type=str, required=True) @click.option( "--fingerprint", "-f", default=None, help="Enter the fingerprint of the key you want to use", type=int, required=False, ) @click.option( "--mnemonic-seed-filename", "filename", # Rename the target argument default=None, help="The filename containing the mnemonic seed of the master key used for signing.", type=str, required=False, ) @click.option("--hd_path", "-t", help="Enter the HD path in the form 'm/12381/8444/n/n'", type=str, required=True) @click.option( "--as-bytes", "-b", help="Sign the message as sequence of bytes rather than UTF-8 string", default=False, show_default=True, is_flag=True, ) def sign_cmd(message: str, fingerprint: Optional[int], filename: Optional[str], hd_path: str, as_bytes: bool): from .keys_funcs import resolve_derivation_master_key, sign private_key = resolve_derivation_master_key(filename if filename is not None else fingerprint) sign(message, private_key, hd_path, as_bytes) @keys_cmd.command("verify", short_help="Verify a signature with a pk") @click.option("--message", "-d", default=None, help="Enter the message to sign in UTF-8", type=str, required=True) @click.option("--public_key", "-p", default=None, help="Enter the pk in hex", type=str, required=True) @click.option("--signature", "-s", default=None, help="Enter the signature in hex", type=str, required=True) def verify_cmd(message: str, public_key: str, signature: str): from .keys_funcs import verify verify(message, public_key, signature) @keys_cmd.command("migrate", short_help="Attempt to migrate keys to the Flax keyring") @click.pass_context def migrate_cmd(ctx: click.Context): from .keys_funcs import migrate_keys asyncio.run(migrate_keys(ctx.obj["root_path"])) @keys_cmd.group("derive", short_help="Derive child keys or wallet addresses") @click.option( "--fingerprint", "-f", default=None, help="Enter the fingerprint of the key you want to use.", type=int, required=False, ) @click.option( "--mnemonic-seed-filename", "filename", # Rename the target argument default=None, help="The filename containing the mnemonic seed of the master key to derive from.", type=str, required=False, ) @click.pass_context def derive_cmd(ctx: click.Context, fingerprint: Optional[int], filename: Optional[str]): ctx.obj["fingerprint"] = fingerprint ctx.obj["filename"] = filename @derive_cmd.command("search", short_help="Search the keyring for one or more matching derived keys or wallet addresses") @click.argument("search-terms", type=str, nargs=-1) @click.option( "--limit", "-l", default=100, show_default=True, help="Limit the number of derivations to search against", type=int ) @click.option( "--non-observer-derivation", "-d", help="Search will be performed against keys derived using non-observer derivation.", default=False, show_default=True, is_flag=True, ) @click.option( "--show-progress", "-P", help="Show search progress", default=False, show_default=True, is_flag=True, ) @click.option( "--search-type", "-t", help="Limit the search to include just the specified types", default=["address", "public_key"], show_default=True, multiple=True, type=click.Choice(["public_key", "private_key", "address", "all"], case_sensitive=True), ) @click.option( "--derive-from-hd-path", "-p", help="Search for items derived from a specific HD path. Indices ending in an 'n' indicate that " "non-observer derivation should be used at that index. Example HD path: m/12381n/8444n/2/", type=str, ) @click.option("--prefix", "-x", help="Address prefix (xfx for mainnet, txfx for testnet)", default=None, type=str) @click.pass_context def search_cmd( ctx: click.Context, search_terms: Tuple[str, ...], limit: int, non_observer_derivation: bool, show_progress: bool, search_type: Tuple[str, ...], derive_from_hd_path: Optional[str], prefix: Optional[str], ): import sys from .keys_funcs import search_derive, resolve_derivation_master_key from blspy import PrivateKey private_key: Optional[PrivateKey] = None fingerprint: Optional[int] = ctx.obj.get("fingerprint", None) filename: Optional[str] = ctx.obj.get("filename", None) # Specifying the master key is optional for the search command. If not specified, we'll search all keys. if fingerprint is not None or filename is not None: private_key = resolve_derivation_master_key(filename if filename is not None else fingerprint) found: bool = search_derive( ctx.obj["root_path"], private_key, search_terms, limit, non_observer_derivation, show_progress, ("all",) if "all" in search_type else search_type, derive_from_hd_path, prefix, ) sys.exit(0 if found else 1) @derive_cmd.command("wallet-address", short_help="Derive wallet receive addresses") @click.option( "--index", "-i", help="Index of the first wallet address to derive. Index 0 is the first wallet address.", default=0 ) @click.option("--count", "-n", help="Number of wallet addresses to derive, starting at index.", default=1) @click.option("--prefix", "-x", help="Address prefix (xfx for mainnet, txfx for testnet)", default=None, type=str) @click.option( "--non-observer-derivation", "-d", help="Derive wallet addresses using non-observer derivation.", default=False, show_default=True, is_flag=True, ) @click.option( "--show-hd-path", help="Show the HD path of the derived wallet addresses. If non-observer-derivation is specified, " "path indices will have an 'n' suffix.", default=False, show_default=True, is_flag=True, ) @click.pass_context def wallet_address_cmd( ctx: click.Context, index: int, count: int, prefix: Optional[str], non_observer_derivation: bool, show_hd_path: bool ): from .keys_funcs import derive_wallet_address, resolve_derivation_master_key fingerprint: Optional[int] = ctx.obj.get("fingerprint", None) filename: Optional[str] = ctx.obj.get("filename", None) private_key = resolve_derivation_master_key(filename if filename is not None else fingerprint) derive_wallet_address( ctx.obj["root_path"], private_key, index, count, prefix, non_observer_derivation, show_hd_path ) @derive_cmd.command("child-key", short_help="Derive child keys") @click.option( "--type", "-t", "key_type", # Rename the target argument help="Type of child key to derive", required=False, type=click.Choice(["farmer", "pool", "wallet", "local", "backup", "singleton", "pool_auth"]), ) @click.option( "--derive-from-hd-path", "-p", help="Derive child keys rooted from a specific HD path. Indices ending in an 'n' indicate that " "non-observer derivation should be used at that index. Example HD path: m/12381n/8444n/2/", type=str, ) @click.option( "--index", "-i", help="Index of the first child key to derive. Index 0 is the first child key.", default=0 ) @click.option("--count", "-n", help="Number of child keys to derive, starting at index.", default=1) @click.option( "--non-observer-derivation", "-d", help="Derive keys using non-observer derivation.", default=False, show_default=True, is_flag=True, ) @click.option( "--show-private-keys", "-s", help="Display derived private keys", default=False, show_default=True, is_flag=True, ) @click.option( "--show-hd-path", help="Show the HD path of the derived wallet addresses", default=False, show_default=True, is_flag=True, ) @click.pass_context def child_key_cmd( ctx: click.Context, key_type: Optional[str], derive_from_hd_path: Optional[str], index: int, count: int, non_observer_derivation: bool, show_private_keys: bool, show_hd_path: bool, ): from .keys_funcs import derive_child_key, resolve_derivation_master_key if key_type is None and derive_from_hd_path is None: ctx.fail("--type or --derive-from-hd-path is required") fingerprint: Optional[int] = ctx.obj.get("fingerprint", None) filename: Optional[str] = ctx.obj.get("filename", None) private_key = resolve_derivation_master_key(filename if filename is not None else fingerprint) derive_child_key( private_key, key_type, derive_from_hd_path.lower() if derive_from_hd_path is not None else None, index, count, non_observer_derivation, show_private_keys, show_hd_path, )
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/start.py
flax/cmds/start.py
import click from flax.util.config import load_config from flax.util.service_groups import all_groups @click.command("start", short_help="Start service groups") @click.option("-r", "--restart", is_flag=True, type=bool, help="Restart running services") @click.argument("group", type=click.Choice(list(all_groups())), nargs=-1, required=True) @click.pass_context def start_cmd(ctx: click.Context, restart: bool, group: str) -> None: import asyncio from flax.cmds.beta_funcs import warn_if_beta_enabled from .start_funcs import async_start root_path = ctx.obj["root_path"] config = load_config(root_path, "config.yaml") warn_if_beta_enabled(config) asyncio.run(async_start(root_path, config, group, restart, ctx.obj["force_legacy_keyring_migration"]))
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/configure.py
flax/cmds/configure.py
from __future__ import annotations from pathlib import Path from typing import Optional import click from flax.util.config import load_defaults_for_missing_services, lock_and_load_config, save_config, str2bool def configure( root_path: Path, set_farmer_peer: str, set_node_introducer: str, set_fullnode_port: str, set_harvester_port: str, set_log_level: str, enable_upnp: str, set_outbound_peer_count: str, set_peer_count: str, testnet: str, peer_connect_timeout: str, crawler_db_path: str, crawler_minimum_version_count: Optional[int], seeder_domain_name: str, seeder_nameserver: str, ): config_yaml = "config.yaml" with lock_and_load_config(root_path, config_yaml, fill_missing_services=True) as config: config.update(load_defaults_for_missing_services(config=config, config_name=config_yaml)) change_made = False if set_node_introducer: try: if set_node_introducer.index(":"): host, port = ( ":".join(set_node_introducer.split(":")[:-1]), set_node_introducer.split(":")[-1], ) config["full_node"]["introducer_peer"]["host"] = host config["full_node"]["introducer_peer"]["port"] = int(port) config["introducer"]["port"] = int(port) print("Node introducer updated") change_made = True except ValueError: print("Node introducer address must be in format [IP:Port]") if set_farmer_peer: try: if set_farmer_peer.index(":"): host, port = ( ":".join(set_farmer_peer.split(":")[:-1]), set_farmer_peer.split(":")[-1], ) config["full_node"]["farmer_peer"]["host"] = host config["full_node"]["farmer_peer"]["port"] = int(port) config["harvester"]["farmer_peer"]["host"] = host config["harvester"]["farmer_peer"]["port"] = int(port) print("Farmer peer updated, make sure your harvester has the proper cert installed") change_made = True except ValueError: print("Farmer address must be in format [IP:Port]") if set_fullnode_port: config["full_node"]["port"] = int(set_fullnode_port) config["full_node"]["introducer_peer"]["port"] = int(set_fullnode_port) config["farmer"]["full_node_peer"]["port"] = int(set_fullnode_port) config["timelord"]["full_node_peer"]["port"] = int(set_fullnode_port) config["wallet"]["full_node_peer"]["port"] = int(set_fullnode_port) config["wallet"]["introducer_peer"]["port"] = int(set_fullnode_port) config["introducer"]["port"] = int(set_fullnode_port) print("Default full node port updated") change_made = True if set_harvester_port: config["harvester"]["port"] = int(set_harvester_port) config["farmer"]["harvester_peer"]["port"] = int(set_harvester_port) print("Default harvester port updated") change_made = True if set_log_level: levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"] if set_log_level in levels: config["logging"]["log_level"] = set_log_level print(f"Logging level updated. Check {root_path}/log/debug.log") change_made = True else: print(f"Logging level not updated. Use one of: {levels}") if enable_upnp: config["full_node"]["enable_upnp"] = str2bool(enable_upnp) if str2bool(enable_upnp): print("uPnP enabled") else: print("uPnP disabled") change_made = True if set_outbound_peer_count: config["full_node"]["target_outbound_peer_count"] = int(set_outbound_peer_count) print("Target outbound peer count updated") change_made = True if set_peer_count: config["full_node"]["target_peer_count"] = int(set_peer_count) print("Target peer count updated") change_made = True if testnet: if testnet == "true" or testnet == "t": print("Setting Testnet") testnet_port = "56888" testnet_introducer = "introducer-testnet10.flaxnetwork.org" testnet_dns_introducer = "dns-introducer-testnet10.flaxnetwork.org" bootstrap_peers = ["testnet10-node.flaxnetwork.org"] testnet = "testnet10" config["full_node"]["port"] = int(testnet_port) if config["full_node"]["introducer_peer"] is None: config["full_node"]["introducer_peer"] = {} assert config["full_node"]["introducer_peer"] is not None # mypy if config["wallet"]["introducer_peer"] is None: config["wallet"]["introducer_peer"] = {} assert config["wallet"]["introducer_peer"] is not None # mypy config["full_node"]["introducer_peer"]["port"] = int(testnet_port) config["farmer"]["full_node_peer"]["port"] = int(testnet_port) config["timelord"]["full_node_peer"]["port"] = int(testnet_port) config["wallet"]["full_node_peer"]["port"] = int(testnet_port) config["wallet"]["introducer_peer"]["port"] = int(testnet_port) config["introducer"]["port"] = int(testnet_port) config["full_node"]["introducer_peer"]["host"] = testnet_introducer config["full_node"]["dns_servers"] = [testnet_dns_introducer] config["wallet"]["introducer_peer"]["host"] = testnet_introducer config["wallet"]["dns_servers"] = [testnet_dns_introducer] config["selected_network"] = testnet config["harvester"]["selected_network"] = testnet config["pool"]["selected_network"] = testnet config["farmer"]["selected_network"] = testnet config["timelord"]["selected_network"] = testnet config["full_node"]["selected_network"] = testnet config["ui"]["selected_network"] = testnet config["introducer"]["selected_network"] = testnet config["wallet"]["selected_network"] = testnet config["data_layer"]["selected_network"] = testnet if "seeder" in config: config["seeder"]["port"] = int(testnet_port) config["seeder"]["other_peers_port"] = int(testnet_port) config["seeder"]["selected_network"] = testnet config["seeder"]["bootstrap_peers"] = bootstrap_peers print("Default full node port, introducer and network setting updated") change_made = True elif testnet == "false" or testnet == "f": print("Setting Mainnet") mainnet_port = "6888" mainnet_introducer = "introducer.flaxnetwork.org" mainnet_dns_introducer = "dns-introducer.flaxnetwork.org" bootstrap_peers = ["node.flaxnetwork.org"] net = "mainnet" config["full_node"]["port"] = int(mainnet_port) config["full_node"]["introducer_peer"]["port"] = int(mainnet_port) config["farmer"]["full_node_peer"]["port"] = int(mainnet_port) config["timelord"]["full_node_peer"]["port"] = int(mainnet_port) config["wallet"]["full_node_peer"]["port"] = int(mainnet_port) config["wallet"]["introducer_peer"]["port"] = int(mainnet_port) config["introducer"]["port"] = int(mainnet_port) config["full_node"]["introducer_peer"]["host"] = mainnet_introducer config["full_node"]["dns_servers"] = [mainnet_dns_introducer] config["wallet"]["introducer_peer"]["host"] = mainnet_introducer config["wallet"]["dns_servers"] = [mainnet_dns_introducer] config["selected_network"] = net config["harvester"]["selected_network"] = net config["pool"]["selected_network"] = net config["farmer"]["selected_network"] = net config["timelord"]["selected_network"] = net config["full_node"]["selected_network"] = net config["ui"]["selected_network"] = net config["introducer"]["selected_network"] = net config["wallet"]["selected_network"] = net config["data_layer"]["selected_network"] = net if "seeder" in config: config["seeder"]["port"] = int(mainnet_port) config["seeder"]["other_peers_port"] = int(mainnet_port) config["seeder"]["selected_network"] = net config["seeder"]["bootstrap_peers"] = bootstrap_peers print("Default full node port, introducer and network setting updated") change_made = True else: print("Please choose True or False") if peer_connect_timeout: config["full_node"]["peer_connect_timeout"] = int(peer_connect_timeout) change_made = True if crawler_db_path is not None and "seeder" in config: config["seeder"]["crawler_db_path"] = crawler_db_path change_made = True if crawler_minimum_version_count is not None and "seeder" in config: config["seeder"]["minimum_version_count"] = crawler_minimum_version_count change_made = True if seeder_domain_name is not None and "seeder" in config: config["seeder"]["domain_name"] = seeder_domain_name change_made = True if seeder_nameserver is not None and "seeder" in config: config["seeder"]["nameserver"] = seeder_nameserver change_made = True if change_made: print("Restart any running flax services for changes to take effect") save_config(root_path, "config.yaml", config) @click.command("configure", short_help="Modify configuration", no_args_is_help=True) @click.option( "--testnet", "-t", help="configures for connection to testnet", type=click.Choice(["true", "t", "false", "f"]), ) @click.option("--set-node-introducer", help="Set the introducer for node - IP:Port", type=str) @click.option("--set-farmer-peer", help="Set the farmer peer for harvester - IP:Port", type=str) @click.option( "--set-fullnode-port", help="Set the port to use for the fullnode, useful for testing", type=str, ) @click.option( "--set-harvester-port", help="Set the port to use for the harvester, useful for testing", type=str, ) @click.option( "--set-log-level", "--log-level", "-log-level", help="Set the instance log level", type=click.Choice(["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]), ) @click.option( "--enable-upnp", "--upnp", "-upnp", help="Enable or disable uPnP", type=click.Choice(["true", "t", "false", "f"]), ) @click.option( "--set_outbound-peer-count", help="Update the target outbound peer count (default 8)", type=str, ) @click.option("--set-peer-count", help="Update the target peer count (default 80)", type=str) @click.option("--set-peer-connect-timeout", help="Update the peer connect timeout (default 30)", type=str) @click.option( "--crawler-db-path", help="configures the path to the crawler database", type=str, ) @click.option( "--crawler-minimum-version-count", help="configures how many of a particular version must be seen to be reported in logs", type=int, ) @click.option( "--seeder-domain-name", help="configures the seeder domain_name setting. Ex: `seeder.example.com.`", type=str, ) @click.option( "--seeder-nameserver", help="configures the seeder nameserver setting. Ex: `example.com.`", type=str, ) @click.pass_context def configure_cmd( ctx, set_farmer_peer, set_node_introducer, set_fullnode_port, set_harvester_port, set_log_level, enable_upnp, set_outbound_peer_count, set_peer_count, testnet, set_peer_connect_timeout, crawler_db_path, crawler_minimum_version_count, seeder_domain_name, seeder_nameserver, ): configure( ctx.obj["root_path"], set_farmer_peer, set_node_introducer, set_fullnode_port, set_harvester_port, set_log_level, enable_upnp, set_outbound_peer_count, set_peer_count, testnet, set_peer_connect_timeout, crawler_db_path, crawler_minimum_version_count, seeder_domain_name, seeder_nameserver, )
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/beta.py
flax/cmds/beta.py
from __future__ import annotations import zipfile from datetime import datetime from pathlib import Path from typing import List, Optional import click from flax.cmds.beta_funcs import ( default_beta_root_path, prepare_flax_blockchain_log, prepare_logs, prepare_plotting_log, prompt_beta_warning, prompt_for_beta_path, prompt_for_metrics_log_interval, update_beta_config, validate_beta_path, validate_metrics_log_interval, ) from flax.util.beta_metrics import metrics_log_interval_default from flax.util.config import lock_and_load_config, save_config def print_restart_warning() -> None: print("\nRestart the daemon and any running flax services for changes to take effect.") @click.group("beta", hidden=True) def beta_cmd() -> None: pass @beta_cmd.command("configure", help="Configure the beta test mode parameters") @click.option("-p", "--path", help="The beta mode root path", type=str, required=False) @click.option("-i", "--interval", help="System metrics will be logged based on this interval", type=int, required=False) @click.pass_context def configure(ctx: click.Context, path: Optional[str], interval: Optional[int]) -> None: root_path = ctx.obj["root_path"] with lock_and_load_config(root_path, "config.yaml") as config: if "beta" not in config: raise click.ClickException("beta test mode is not enabled, enable it first with `flax beta enable`") # Adjust the path if path is None: beta_root_path = prompt_for_beta_path(Path(config["beta"].get("path", default_beta_root_path()))) else: beta_root_path = Path(path) validate_beta_path(beta_root_path) # Adjust the metrics log interval if interval is None: metrics_log_interval = prompt_for_metrics_log_interval( int(config["beta"].get("metrics_log_interval", metrics_log_interval_default)) ) else: metrics_log_interval = interval try: validate_metrics_log_interval(metrics_log_interval) except ValueError as e: raise click.ClickException(str(e)) update_beta_config(True, beta_root_path, metrics_log_interval, config) save_config(root_path, "config.yaml", config) print("\nbeta config updated") print_restart_warning() @beta_cmd.command("enable", help="Enable beta test mode") @click.option( "-f", "--force", help="Force accept the beta program warning", is_flag=True, default=False, ) @click.option("-p", "--path", help="The beta mode root path", type=str, required=False) @click.pass_context def enable_cmd(ctx: click.Context, force: bool, path: Optional[str]) -> None: root_path = ctx.obj["root_path"] with lock_and_load_config(root_path, "config.yaml") as config: if config.get("beta", {}).get("enabled", False): raise click.ClickException("beta test mode is already enabled") if not force and not prompt_beta_warning(): ctx.abort() # Use the existing beta path if there is one and no path was provided as parameter current_path = config.get("beta", {}).get("path") current_path = None if current_path is None else Path(current_path) if path is None and current_path is None: beta_root_path = prompt_for_beta_path(current_path or default_beta_root_path()) else: beta_root_path = Path(path or current_path) validate_beta_path(beta_root_path) update_beta_config(True, beta_root_path, metrics_log_interval_default, config) save_config(root_path, "config.yaml", config) print(f"\nbeta test mode enabled with path {str(beta_root_path)!r}") print_restart_warning() @beta_cmd.command("disable", help="Disable beta test mode") @click.pass_context def disable_cmd(ctx: click.Context) -> None: root_path = ctx.obj["root_path"] with lock_and_load_config(root_path, "config.yaml") as config: if not config.get("beta", {}).get("enabled", False): raise click.ClickException("beta test mode is not enabled") config["beta"]["enabled"] = False save_config(root_path, "config.yaml", config) print("\nbeta test mode disabled") print_restart_warning() @beta_cmd.command("prepare_submission", help="Prepare the collected log data for submission") @click.pass_context def prepare_submission_cmd(ctx: click.Context) -> None: with lock_and_load_config(ctx.obj["root_path"], "config.yaml") as config: beta_root_path = config.get("beta", {}).get("path", None) if beta_root_path is None: raise click.ClickException("beta test mode not enabled. Run `flax beta enable` first.") beta_root_path = Path(beta_root_path) validate_beta_path(beta_root_path) available_results = sorted([path for path in beta_root_path.iterdir() if path.is_dir()]) if len(available_results) == 0: raise click.ClickException(f"No beta logs found in {str(beta_root_path)!r}.") print("Available versions:") for i in range(len(available_results)): print(f" [{i + 1}] {available_results[i].name}") user_input = input("Select the version you want to prepare for submission: ") try: if int(user_input) <= 0: raise IndexError() prepare_result = available_results[int(user_input) - 1] except IndexError: raise click.ClickException(f"Invalid choice: {user_input}") plotting_path = Path(prepare_result / "plotting") flax_blockchain_path = Path(prepare_result / "flax-blockchain") flax_logs = prepare_logs(plotting_path, prepare_flax_blockchain_log) plotting_logs = prepare_logs(flax_blockchain_path, prepare_plotting_log) submission_file_path = ( prepare_result / f"submission_{prepare_result.name}__{datetime.now().strftime('%m_%d_%Y__%H_%M_%S')}.zip" ) def add_files(paths: List[Path]) -> int: added = 0 for path in paths: if path.name.startswith("."): continue zip_file.write(path, path.relative_to(prepare_result)) added += 1 return added with zipfile.ZipFile(submission_file_path, "w", zipfile.ZIP_DEFLATED) as zip_file: files_added = add_files(flax_logs) + add_files(plotting_logs) if files_added == 0: submission_file_path.unlink() message = f"No logs files found in {str(plotting_path)!r} and {str(flax_blockchain_path)!r}." raise click.ClickException(message) print(f"\nDone. You can find the prepared submission data in {submission_file_path}.") @beta_cmd.command("status", help="Show the current beta configuration") @click.pass_context def status(ctx: click.Context) -> None: with lock_and_load_config(ctx.obj["root_path"], "config.yaml") as config: beta_config = config.get("beta") if beta_config is None: raise click.ClickException("beta test mode is not enabled, enable it first with `flax beta enable`") print(f"enabled: {beta_config['enabled']}") print(f"path: {beta_config['path']}") print(f"metrics log interval: {beta_config['metrics_log_interval']}s")
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/data_funcs.py
flax/cmds/data_funcs.py
from __future__ import annotations from contextlib import asynccontextmanager from decimal import Decimal from pathlib import Path from typing import AsyncIterator, Dict, List, Optional, Tuple, cast import aiohttp from flax.cmds.units import units from flax.rpc.data_layer_rpc_client import DataLayerRpcClient from flax.types.blockchain_format.sized_bytes import bytes32 from flax.util.byte_types import hexstr_to_bytes from flax.util.config import load_config from flax.util.default_root import DEFAULT_ROOT_PATH from flax.util.ints import uint16, uint64 # TODO: there seems to be a large amount of repetition in these to dedupe @asynccontextmanager async def get_client(rpc_port: Optional[int]) -> AsyncIterator[Tuple[DataLayerRpcClient, int]]: config = load_config(DEFAULT_ROOT_PATH, "config.yaml", fill_missing_services=True) self_hostname = config["self_hostname"] if rpc_port is None: rpc_port = cast(int, config["data_layer"]["rpc_port"]) client = await DataLayerRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config) try: yield client, rpc_port finally: client.close() await client.await_closed() async def create_data_store_cmd(rpc_port: Optional[int], fee: Optional[str]) -> None: final_fee = None if fee is not None: final_fee = uint64(int(Decimal(fee) * units["flax"])) try: async with get_client(rpc_port) as (client, rpc_port): res = await client.create_data_store(fee=final_fee) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") return async def get_value_cmd(rpc_port: Optional[int], store_id: str, key: str, root_hash: Optional[str]) -> None: store_id_bytes = bytes32.from_hexstr(store_id) key_bytes = hexstr_to_bytes(key) root_hash_bytes = None if root_hash is None else bytes32.from_hexstr(root_hash) try: async with get_client(rpc_port) as (client, rpc_port): res = await client.get_value(store_id=store_id_bytes, key=key_bytes, root_hash=root_hash_bytes) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") return async def update_data_store_cmd( rpc_port: Optional[int], store_id: str, changelist: List[Dict[str, str]], fee: Optional[str], ) -> None: store_id_bytes = bytes32.from_hexstr(store_id) final_fee = None if fee is not None: final_fee = uint64(int(Decimal(fee) * units["flax"])) try: async with get_client(rpc_port) as (client, rpc_port): res = await client.update_data_store(store_id=store_id_bytes, changelist=changelist, fee=final_fee) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") return async def get_keys_cmd( rpc_port: Optional[int], store_id: str, root_hash: Optional[str], ) -> None: store_id_bytes = bytes32.from_hexstr(store_id) root_hash_bytes = None if root_hash is None else bytes32.from_hexstr(root_hash) try: async with get_client(rpc_port) as (client, rpc_port): res = await client.get_keys(store_id=store_id_bytes, root_hash=root_hash_bytes) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") return async def get_keys_values_cmd( rpc_port: Optional[int], store_id: str, root_hash: Optional[str], ) -> None: store_id_bytes = bytes32.from_hexstr(store_id) root_hash_bytes = None if root_hash is None else bytes32.from_hexstr(root_hash) try: async with get_client(rpc_port) as (client, rpc_port): res = await client.get_keys_values(store_id=store_id_bytes, root_hash=root_hash_bytes) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") return async def get_root_cmd( rpc_port: Optional[int], store_id: str, ) -> None: store_id_bytes = bytes32.from_hexstr(store_id) try: async with get_client(rpc_port) as (client, rpc_port): res = await client.get_root(store_id=store_id_bytes) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") return async def subscribe_cmd( rpc_port: Optional[int], store_id: str, urls: List[str], ) -> None: store_id_bytes = bytes32.from_hexstr(store_id) try: async with get_client(rpc_port) as (client, rpc_port): res = await client.subscribe(store_id=store_id_bytes, urls=urls) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") async def unsubscribe_cmd( rpc_port: Optional[int], store_id: str, ) -> None: store_id_bytes = bytes32.from_hexstr(store_id) try: async with get_client(rpc_port) as (client, rpc_port): res = await client.unsubscribe(store_id=store_id_bytes) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") async def remove_subscriptions_cmd( rpc_port: Optional[int], store_id: str, urls: List[str], ) -> None: store_id_bytes = bytes32.from_hexstr(store_id) try: async with get_client(rpc_port) as (client, rpc_port): res = await client.remove_subscriptions(store_id=store_id_bytes, urls=urls) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") async def get_kv_diff_cmd( rpc_port: Optional[int], store_id: str, hash_1: str, hash_2: str, ) -> None: store_id_bytes = bytes32.from_hexstr(store_id) hash_1_bytes = bytes32.from_hexstr(hash_1) hash_2_bytes = bytes32.from_hexstr(hash_2) try: async with get_client(rpc_port) as (client, rpc_port): res = await client.get_kv_diff(store_id=store_id_bytes, hash_1=hash_1_bytes, hash_2=hash_2_bytes) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") async def get_root_history_cmd( rpc_port: Optional[int], store_id: str, ) -> None: store_id_bytes = bytes32.from_hexstr(store_id) try: async with get_client(rpc_port) as (client, rpc_port): res = await client.get_root_history(store_id=store_id_bytes) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") async def add_missing_files_cmd( rpc_port: Optional[int], ids: Optional[List[str]], overwrite: bool, foldername: Optional[Path] ) -> None: try: async with get_client(rpc_port) as (client, rpc_port): res = await client.add_missing_files( store_ids=(None if ids is None else [bytes32.from_hexstr(id) for id in ids]), overwrite=overwrite, foldername=foldername, ) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") async def add_mirror_cmd( rpc_port: Optional[int], store_id: str, urls: List[str], amount: int, fee: Optional[str] ) -> None: try: store_id_bytes = bytes32.from_hexstr(store_id) final_fee = None if fee is not None: final_fee = uint64(int(Decimal(fee) * units["flax"])) async with get_client(rpc_port) as (client, rpc_port): res = await client.add_mirror( store_id=store_id_bytes, urls=urls, amount=amount, fee=final_fee, ) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") async def delete_mirror_cmd(rpc_port: Optional[int], coin_id: str, fee: Optional[str]) -> None: try: coin_id_bytes = bytes32.from_hexstr(coin_id) final_fee = None if fee is not None: final_fee = uint64(int(Decimal(fee) * units["flax"])) async with get_client(rpc_port) as (client, rpc_port): res = await client.delete_mirror( coin_id=coin_id_bytes, fee=final_fee, ) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") async def get_mirrors_cmd(rpc_port: Optional[int], store_id: str) -> None: try: store_id_bytes = bytes32.from_hexstr(store_id) async with get_client(rpc_port) as (client, rpc_port): res = await client.get_mirrors(store_id=store_id_bytes) print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") async def get_subscriptions_cmd(rpc_port: Optional[int]) -> None: try: async with get_client(rpc_port) as (client, rpc_port): res = await client.get_subscriptions() print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}") async def get_owned_stores_cmd(rpc_port: Optional[int]) -> None: try: async with get_client(rpc_port) as (client, rpc_port): res = await client.get_owned_stores() print(res) except aiohttp.ClientConnectorError: print(f"Connection error. Check if data is running at {rpc_port}") except Exception as e: print(f"Exception from 'data': {e}")
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/passphrase.py
flax/cmds/passphrase.py
import asyncio import click import sys from io import TextIOWrapper from typing import Optional from flax.util.config import load_config @click.group("passphrase", short_help="Manage your keyring passphrase") @click.pass_context def passphrase_cmd(ctx: click.Context): from .keys_funcs import migrate_keys if ctx.obj["force_legacy_keyring_migration"] and not asyncio.run(migrate_keys(ctx.obj["root_path"], True)): sys.exit(1) @passphrase_cmd.command( "set", help="""Sets or updates the keyring passphrase. If --passphrase-file and/or --current-passphrase-file options are provided, the passphrases will be read from the specified files. Otherwise, a prompt will be provided to enter the passphrase.""", short_help="Set or update the keyring passphrase", ) @click.option("--passphrase-file", type=click.File("r"), help="File or descriptor to read the passphrase from") @click.option( "--current-passphrase-file", type=click.File("r"), help="File or descriptor to read the current passphrase from" ) @click.option("--hint", type=str, help="Passphrase hint") @click.pass_context def set_cmd( ctx: click.Context, passphrase_file: Optional[TextIOWrapper], current_passphrase_file: Optional[TextIOWrapper], hint: Optional[str], ) -> None: from .passphrase_funcs import ( async_update_daemon_passphrase_cache_if_running, read_passphrase_from_file, set_or_update_passphrase, verify_passphrase_meets_requirements, ) success: bool = False current_passphrase: Optional[str] = None if current_passphrase_file is not None: current_passphrase = read_passphrase_from_file(current_passphrase_file) if passphrase_file is not None: try: # Read the passphrase from a file and verify it new_passphrase: str = read_passphrase_from_file(passphrase_file) valid_passphrase, error_msg = verify_passphrase_meets_requirements( new_passphrase, new_passphrase ) # new_passphrase provided for both args since we don't have a separate confirmation passphrase if not valid_passphrase: raise ValueError(f"{error_msg}") except ValueError as e: print(f"Unable to set passphrase: {e}") except Exception as e: print(f"Failed to read passphrase: {e}") else: # Interactively prompt for the current passphrase (if set) success = set_or_update_passphrase( passphrase=new_passphrase, current_passphrase=current_passphrase, hint=hint ) else: success = set_or_update_passphrase(passphrase=None, current_passphrase=current_passphrase, hint=hint) if success: # Attempt to update the daemon's passphrase cache root_path = ctx.obj["root_path"] config = load_config(root_path, "config.yaml") sys.exit(asyncio.run(async_update_daemon_passphrase_cache_if_running(root_path, config))) @passphrase_cmd.command( "remove", help="""Remove the keyring passphrase. If the --current-passphrase-file option is provided, the passphrase will be read from the specified file. Otherwise, a prompt will be provided to enter the passphrase.""", short_help="Remove the keyring passphrase", ) @click.option( "--current-passphrase-file", type=click.File("r"), help="File or descriptor to read the current passphrase from" ) @click.pass_context def remove_cmd(ctx: click.Context, current_passphrase_file: Optional[TextIOWrapper]) -> None: from .passphrase_funcs import ( async_update_daemon_passphrase_cache_if_running, read_passphrase_from_file, remove_passphrase, ) current_passphrase: Optional[str] = None if current_passphrase_file is not None: current_passphrase = read_passphrase_from_file(current_passphrase_file) if remove_passphrase(current_passphrase): # Attempt to update the daemon's passphrase cache root_path = ctx.obj["root_path"] config = load_config(root_path, "config.yaml") sys.exit(asyncio.run(async_update_daemon_passphrase_cache_if_running(root_path, config))) @passphrase_cmd.group("hint", short_help="Manage the optional keyring passphrase hint") def hint_cmd() -> None: pass @hint_cmd.command("display", short_help="Display the keyring passphrase hint") def display_hint(): from .passphrase_funcs import display_passphrase_hint display_passphrase_hint() @hint_cmd.command("set", short_help="Set or update the keyring passphrase hint") @click.argument("hint", nargs=1) def set_hint(hint): from .passphrase_funcs import set_passphrase_hint set_passphrase_hint(hint) @hint_cmd.command("remove", short_help="Remove the keyring passphrase hint") def remove_hint(): from .passphrase_funcs import remove_passphrase_hint remove_passphrase_hint()
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/plotters.py
flax/cmds/plotters.py
import click from flax.plotters.plotters import call_plotters @click.command( "plotters", short_help="Advanced plotting options", context_settings={"ignore_unknown_options": True}, add_help_option=False, ) @click.pass_context @click.argument("args", nargs=-1) def plotters_cmd(ctx: click.Context, args): call_plotters(ctx.obj["root_path"], args)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/__init__.py
flax/cmds/__init__.py
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/peer_funcs.py
flax/cmds/peer_funcs.py
from __future__ import annotations from pathlib import Path from typing import Any, Dict, Optional from flax.cmds.cmds_util import get_any_service_client from flax.rpc.rpc_client import RpcClient async def add_node_connection(rpc_client: RpcClient, add_connection: str) -> None: if ":" not in add_connection: print("Enter a valid IP and port in the following format: 10.5.4.3:8000") else: ip, port = ( ":".join(add_connection.split(":")[:-1]), add_connection.split(":")[-1], ) print(f"Connecting to {ip}, {port}") try: await rpc_client.open_connection(ip, int(port)) except Exception: print(f"Failed to connect to {ip}:{port}") async def remove_node_connection(rpc_client: RpcClient, remove_connection: str) -> None: from flax.server.outbound_message import NodeType result_txt = "" if len(remove_connection) != 8: result_txt = "Invalid NodeID. Do not include '.'" else: connections = await rpc_client.get_connections() for con in connections: if remove_connection == con["node_id"].hex()[:8]: print("Attempting to disconnect", "NodeID", remove_connection) try: await rpc_client.close_connection(con["node_id"]) except Exception: result_txt = f"Failed to disconnect NodeID {remove_connection}" else: result_txt = ( f"NodeID {remove_connection}... {NodeType(con['type']).name} {con['peer_host']} disconnected" ) elif result_txt == "": result_txt = f"NodeID {remove_connection}... not found" print(result_txt) async def print_connections(rpc_client: RpcClient, trusted_peers: Dict[str, Any]) -> None: import time from flax.server.outbound_message import NodeType from flax.util.network import is_trusted_inner connections = await rpc_client.get_connections() print("Connections:") print("Type IP Ports NodeID Last Connect" + " MiB Up|Dwn") for con in connections: last_connect_tuple = time.struct_time(time.localtime(con["last_message_time"])) last_connect = time.strftime("%b %d %T", last_connect_tuple) mb_down = con["bytes_read"] / (1024 * 1024) mb_up = con["bytes_written"] / (1024 * 1024) host = con["peer_host"] # Strip IPv6 brackets host = host.strip("[]") trusted: bool = is_trusted_inner(host, con["node_id"], trusted_peers, False) # Nodetype length is 9 because INTRODUCER will be deprecated if NodeType(con["type"]) is NodeType.FULL_NODE: peak_height = con.get("peak_height", None) connection_peak_hash = con.get("peak_hash", None) if connection_peak_hash is None: connection_peak_hash = "No Info" else: if connection_peak_hash.startswith(("0x", "0X")): connection_peak_hash = connection_peak_hash[2:] connection_peak_hash = f"{connection_peak_hash[:8]}..." con_str = ( f"{NodeType(con['type']).name:9} {host:39} " f"{con['peer_port']:5}/{con['peer_server_port']:<5}" f" {con['node_id'].hex()[:8]}... " f"{last_connect} " f"{mb_up:7.1f}|{mb_down:<7.1f}" f"\n " ) if peak_height is not None: con_str += f"-Height: {peak_height:8.0f} -Hash: {connection_peak_hash}" else: con_str += f"-Height: No Info -Hash: {connection_peak_hash}" # Only show when Trusted is True if trusted: con_str += f" -Trusted: {trusted}" else: con_str = ( f"{NodeType(con['type']).name:9} {host:39} " f"{con['peer_port']:5}/{con['peer_server_port']:<5}" f" {con['node_id'].hex()[:8]}... " f"{last_connect} " f"{mb_up:7.1f}|{mb_down:<7.1f}" ) print(con_str) async def peer_async( node_type: str, rpc_port: Optional[int], root_path: Path, show_connections: bool, add_connection: str, remove_connection: str, ) -> None: rpc_client: Optional[RpcClient] async with get_any_service_client(node_type, rpc_port, root_path) as node_config_fp: rpc_client, config, _ = node_config_fp if rpc_client is not None: # Check or edit node connections if show_connections: trusted_peers: Dict[str, Any] = config["full_node"].get("trusted_peers", {}) await print_connections(rpc_client, trusted_peers) # if called together with state, leave a blank line if add_connection: await add_node_connection(rpc_client, add_connection) if remove_connection: await remove_node_connection(rpc_client, remove_connection)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/init_funcs.py
flax/cmds/init_funcs.py
import os import shutil import sqlite3 from pathlib import Path from typing import Any, Dict, List, Optional, Tuple import yaml from flax import __version__ from flax.consensus.coinbase import create_puzzlehash_for_pk from flax.ssl.create_ssl import ( ensure_ssl_dirs, generate_ca_signed_cert, get_flax_ca_crt_key, make_ca_cert, write_ssl_cert_and_key, ) from flax.util.bech32m import encode_puzzle_hash from flax.util.config import ( create_default_flax_config, initial_config_file, load_config, lock_and_load_config, save_config, unflatten_properties, ) from flax.util.db_version import set_db_version from flax.util.keychain import Keychain from flax.util.path import path_from_root from flax.util.ssl_check import ( DEFAULT_PERMISSIONS_CERT_FILE, DEFAULT_PERMISSIONS_KEY_FILE, RESTRICT_MASK_CERT_FILE, RESTRICT_MASK_KEY_FILE, check_and_fix_permissions_for_ssl_file, fix_ssl, ) from flax.wallet.derive_keys import ( master_sk_to_pool_sk, master_sk_to_wallet_sk_intermediate, master_sk_to_wallet_sk_unhardened_intermediate, _derive_path, _derive_path_unhardened, ) from flax.cmds.configure import configure _all_private_node_names: List[str] = [ "full_node", "wallet", "farmer", "harvester", "timelord", "crawler", "data_layer", "daemon", ] _all_public_node_names: List[str] = ["full_node", "wallet", "farmer", "introducer", "timelord", "data_layer"] def dict_add_new_default(updated: Dict, default: Dict, do_not_migrate_keys: Dict[str, Any]): for k in do_not_migrate_keys: if k in updated and do_not_migrate_keys[k] == "": updated.pop(k) for k, v in default.items(): ignore = False if k in do_not_migrate_keys: do_not_data = do_not_migrate_keys[k] if isinstance(do_not_data, dict): ignore = False else: ignore = True if isinstance(v, dict) and k in updated and ignore is False: # If there is an intermediate key with empty string value, do not migrate all descendants if do_not_migrate_keys.get(k, None) == "": do_not_migrate_keys[k] = v dict_add_new_default(updated[k], default[k], do_not_migrate_keys.get(k, {})) elif k not in updated or ignore is True: updated[k] = v def check_keys(new_root: Path, keychain: Optional[Keychain] = None) -> None: if keychain is None: keychain = Keychain() all_sks = keychain.get_all_private_keys() if len(all_sks) == 0: print("No keys are present in the keychain. Generate them with 'flax keys generate'") return None with lock_and_load_config(new_root, "config.yaml") as config: pool_child_pubkeys = [master_sk_to_pool_sk(sk).get_g1() for sk, _ in all_sks] all_targets = [] stop_searching_for_farmer = "xfx_target_address" not in config["farmer"] stop_searching_for_pool = "xfx_target_address" not in config["pool"] number_of_ph_to_search = 50 selected = config["selected_network"] prefix = config["network_overrides"]["config"][selected]["address_prefix"] intermediates = {} for sk, _ in all_sks: intermediates[bytes(sk)] = { "observer": master_sk_to_wallet_sk_unhardened_intermediate(sk), "non-observer": master_sk_to_wallet_sk_intermediate(sk), } for i in range(number_of_ph_to_search): if stop_searching_for_farmer and stop_searching_for_pool and i > 0: break for sk, _ in all_sks: intermediate_n = intermediates[bytes(sk)]["non-observer"] intermediate_o = intermediates[bytes(sk)]["observer"] all_targets.append( encode_puzzle_hash( create_puzzlehash_for_pk(_derive_path_unhardened(intermediate_o, [i]).get_g1()), prefix ) ) all_targets.append( encode_puzzle_hash(create_puzzlehash_for_pk(_derive_path(intermediate_n, [i]).get_g1()), prefix) ) if all_targets[-1] == config["farmer"].get("xfx_target_address") or all_targets[-2] == config[ "farmer" ].get("xfx_target_address"): stop_searching_for_farmer = True if all_targets[-1] == config["pool"].get("xfx_target_address") or all_targets[-2] == config["pool"].get( "xfx_target_address" ): stop_searching_for_pool = True # Set the destinations, if necessary updated_target: bool = False if "xfx_target_address" not in config["farmer"]: print( f"Setting the xfx destination for the farmer reward (1/8 plus fees, solo and pooling)" f" to {all_targets[0]}" ) config["farmer"]["xfx_target_address"] = all_targets[0] updated_target = True elif config["farmer"]["xfx_target_address"] not in all_targets: print( f"WARNING: using a farmer address which we might not have the private" f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding " f"{config['farmer']['xfx_target_address']} with {all_targets[0]}" ) if "pool" not in config: config["pool"] = {} if "xfx_target_address" not in config["pool"]: print(f"Setting the xfx destination address for pool reward (7/8 for solo only) to {all_targets[0]}") config["pool"]["xfx_target_address"] = all_targets[0] updated_target = True elif config["pool"]["xfx_target_address"] not in all_targets: print( f"WARNING: using a pool address which we might not have the private" f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding " f"{config['pool']['xfx_target_address']} with {all_targets[0]}" ) if updated_target: print( f"To change the XFX destination addresses, edit the `xfx_target_address` entries in" f" {(new_root / 'config' / 'config.yaml').absolute()}." ) # Set the pool pks in the farmer pool_pubkeys_hex = set(bytes(pk).hex() for pk in pool_child_pubkeys) if "pool_public_keys" in config["farmer"]: for pk_hex in config["farmer"]["pool_public_keys"]: # Add original ones in config pool_pubkeys_hex.add(pk_hex) config["farmer"]["pool_public_keys"] = pool_pubkeys_hex save_config(new_root, "config.yaml", config) def copy_files_rec(old_path: Path, new_path: Path): if old_path.is_file(): print(f"{new_path}") new_path.parent.mkdir(parents=True, exist_ok=True) shutil.copy(old_path, new_path) elif old_path.is_dir(): for old_path_child in old_path.iterdir(): new_path_child = new_path / old_path_child.name copy_files_rec(old_path_child, new_path_child) def migrate_from( old_root: Path, new_root: Path, manifest: List[str], do_not_migrate_settings: List[str], ): """ Copy all the files in "manifest" to the new config directory. """ if old_root == new_root: print("same as new path, exiting") return 1 if not old_root.is_dir(): print(f"{old_root} not found - this is ok if you did not install this version") return 0 print(f"\n{old_root} found") print(f"Copying files from {old_root} to {new_root}\n") for f in manifest: old_path = old_root / f new_path = new_root / f copy_files_rec(old_path, new_path) # update config yaml with new keys with lock_and_load_config(new_root, "config.yaml") as config: config_str: str = initial_config_file("config.yaml") default_config: Dict = yaml.safe_load(config_str) flattened_keys = unflatten_properties({k: "" for k in do_not_migrate_settings}) dict_add_new_default(config, default_config, flattened_keys) save_config(new_root, "config.yaml", config) create_all_ssl(new_root) return 1 def create_all_ssl( root_path: Path, *, private_ca_crt_and_key: Optional[Tuple[bytes, bytes]] = None, node_certs_and_keys: Optional[Dict[str, Dict]] = None, private_node_names: List[str] = _all_private_node_names, public_node_names: List[str] = _all_public_node_names, overwrite: bool = True, ): # remove old key and crt config_dir = root_path / "config" old_key_path = config_dir / "trusted.key" old_crt_path = config_dir / "trusted.crt" if old_key_path.exists(): print(f"Old key not needed anymore, deleting {old_key_path}") os.remove(old_key_path) if old_crt_path.exists(): print(f"Old crt not needed anymore, deleting {old_crt_path}") os.remove(old_crt_path) ssl_dir = config_dir / "ssl" ca_dir = ssl_dir / "ca" ensure_ssl_dirs([ssl_dir, ca_dir]) private_ca_key_path = ca_dir / "private_ca.key" private_ca_crt_path = ca_dir / "private_ca.crt" flax_ca_crt, flax_ca_key = get_flax_ca_crt_key() flax_ca_crt_path = ca_dir / "flax_ca.crt" flax_ca_key_path = ca_dir / "flax_ca.key" write_ssl_cert_and_key(flax_ca_crt_path, flax_ca_crt, flax_ca_key_path, flax_ca_key, overwrite=overwrite) # If Private CA crt/key are passed-in, write them out if private_ca_crt_and_key is not None: private_ca_crt, private_ca_key = private_ca_crt_and_key write_ssl_cert_and_key(private_ca_crt_path, private_ca_crt, private_ca_key_path, private_ca_key) if not private_ca_key_path.exists() or not private_ca_crt_path.exists(): # Create private CA print(f"Can't find private CA, creating a new one in {root_path} to generate TLS certificates") make_ca_cert(private_ca_crt_path, private_ca_key_path) # Create private certs for each node ca_key = private_ca_key_path.read_bytes() ca_crt = private_ca_crt_path.read_bytes() generate_ssl_for_nodes( ssl_dir, ca_crt, ca_key, prefix="private", nodes=private_node_names, node_certs_and_keys=node_certs_and_keys, overwrite=overwrite, ) else: # This is entered when user copied over private CA print(f"Found private CA in {root_path}, using it to generate TLS certificates") ca_key = private_ca_key_path.read_bytes() ca_crt = private_ca_crt_path.read_bytes() generate_ssl_for_nodes( ssl_dir, ca_crt, ca_key, prefix="private", nodes=private_node_names, node_certs_and_keys=node_certs_and_keys, overwrite=overwrite, ) flax_ca_crt, flax_ca_key = get_flax_ca_crt_key() generate_ssl_for_nodes( ssl_dir, flax_ca_crt, flax_ca_key, prefix="public", nodes=public_node_names, overwrite=False, node_certs_and_keys=node_certs_and_keys, ) def generate_ssl_for_nodes( ssl_dir: Path, ca_crt: bytes, ca_key: bytes, *, prefix: str, nodes: List[str], overwrite: bool = True, node_certs_and_keys: Optional[Dict[str, Dict]] = None, ): for node_name in nodes: node_dir = ssl_dir / node_name ensure_ssl_dirs([node_dir]) key_path = node_dir / f"{prefix}_{node_name}.key" crt_path = node_dir / f"{prefix}_{node_name}.crt" if node_certs_and_keys is not None: certs_and_keys = node_certs_and_keys.get(node_name, {}).get(prefix, {}) crt = certs_and_keys.get("crt", None) key = certs_and_keys.get("key", None) if crt is not None and key is not None: write_ssl_cert_and_key(crt_path, crt, key_path, key) continue if key_path.exists() and crt_path.exists() and overwrite is False: continue generate_ca_signed_cert(ca_crt, ca_key, crt_path, key_path) def copy_cert_files(cert_path: Path, new_path: Path): for old_path_child in cert_path.glob("*.crt"): new_path_child = new_path / old_path_child.name copy_files_rec(old_path_child, new_path_child) check_and_fix_permissions_for_ssl_file(new_path_child, RESTRICT_MASK_CERT_FILE, DEFAULT_PERMISSIONS_CERT_FILE) for old_path_child in cert_path.glob("*.key"): new_path_child = new_path / old_path_child.name copy_files_rec(old_path_child, new_path_child) check_and_fix_permissions_for_ssl_file(new_path_child, RESTRICT_MASK_KEY_FILE, DEFAULT_PERMISSIONS_KEY_FILE) def init( create_certs: Optional[Path], root_path: Path, fix_ssl_permissions: bool = False, testnet: bool = False, v1_db: bool = False, ): if create_certs is not None: if root_path.exists(): if os.path.isdir(create_certs): ca_dir: Path = root_path / "config/ssl/ca" if ca_dir.exists(): print(f"Deleting your OLD CA in {ca_dir}") shutil.rmtree(ca_dir) print(f"Copying your CA from {create_certs} to {ca_dir}") copy_cert_files(create_certs, ca_dir) create_all_ssl(root_path) else: print(f"** Directory {create_certs} does not exist **") else: print(f"** {root_path} does not exist. Executing core init **") # sanity check here to prevent infinite recursion if ( flax_init( root_path, fix_ssl_permissions=fix_ssl_permissions, testnet=testnet, v1_db=v1_db, ) == 0 and root_path.exists() ): return init(create_certs, root_path, fix_ssl_permissions) print(f"** {root_path} was not created. Exiting **") return -1 else: return flax_init(root_path, fix_ssl_permissions=fix_ssl_permissions, testnet=testnet, v1_db=v1_db) def flax_version_number() -> Tuple[str, str, str, str]: scm_full_version = __version__ left_full_version = scm_full_version.split("+") version = left_full_version[0].split(".") scm_major_version = version[0] scm_minor_version = version[1] if len(version) > 2: smc_patch_version = version[2] patch_release_number = smc_patch_version else: smc_patch_version = "" major_release_number = scm_major_version minor_release_number = scm_minor_version dev_release_number = "" # If this is a beta dev release - get which beta it is if "0b" in scm_minor_version: original_minor_ver_list = scm_minor_version.split("0b") major_release_number = str(1 - int(scm_major_version)) # decrement the major release for beta minor_release_number = scm_major_version patch_release_number = original_minor_ver_list[1] if smc_patch_version and "dev" in smc_patch_version: dev_release_number = "." + smc_patch_version elif "0rc" in version[1]: original_minor_ver_list = scm_minor_version.split("0rc") major_release_number = str(1 - int(scm_major_version)) # decrement the major release for release candidate minor_release_number = str(int(scm_major_version) + 1) # RC is 0.2.1 for RC 1 patch_release_number = original_minor_ver_list[1] if smc_patch_version and "dev" in smc_patch_version: dev_release_number = "." + smc_patch_version else: major_release_number = scm_major_version minor_release_number = scm_minor_version patch_release_number = smc_patch_version dev_release_number = "" install_release_number = major_release_number + "." + minor_release_number if len(patch_release_number) > 0: install_release_number += "." + patch_release_number if len(dev_release_number) > 0: install_release_number += dev_release_number return major_release_number, minor_release_number, patch_release_number, dev_release_number def flax_full_version_str() -> str: major, minor, patch, dev = flax_version_number() return f"{major}.{minor}.{patch}{dev}" def flax_init( root_path: Path, *, should_check_keys: bool = True, fix_ssl_permissions: bool = False, testnet: bool = False, v1_db: bool = False, ): """ Standard first run initialization or migration steps. Handles config creation, generation of SSL certs, and setting target addresses (via check_keys). should_check_keys can be set to False to avoid blocking when accessing a passphrase protected Keychain. When launching the daemon from the GUI, we want the GUI to handle unlocking the keychain. """ flax_root = os.environ.get("FLAX_ROOT", None) if flax_root is not None: print(f"FLAX_ROOT is set to {flax_root}") print(f"Flax directory {root_path}") if root_path.is_dir() and Path(root_path / "config" / "config.yaml").exists(): # This is reached if FLAX_ROOT is set, or if user has run flax init twice # before a new update. if testnet: configure( root_path, set_farmer_peer="", set_node_introducer="", set_fullnode_port="", set_harvester_port="", set_log_level="", enable_upnp="", set_outbound_peer_count="", set_peer_count="", testnet="true", peer_connect_timeout="", crawler_db_path="", crawler_minimum_version_count=None, seeder_domain_name="", seeder_nameserver="", ) if fix_ssl_permissions: fix_ssl(root_path) if should_check_keys: check_keys(root_path) print(f"{root_path} already exists, no migration action taken") return -1 create_default_flax_config(root_path) if testnet: configure( root_path, set_farmer_peer="", set_node_introducer="", set_fullnode_port="", set_harvester_port="", set_log_level="", enable_upnp="", set_outbound_peer_count="", set_peer_count="", testnet="true", peer_connect_timeout="", crawler_db_path="", crawler_minimum_version_count=None, seeder_domain_name="", seeder_nameserver="", ) create_all_ssl(root_path) if fix_ssl_permissions: fix_ssl(root_path) if should_check_keys: check_keys(root_path) config: Dict db_path_replaced: str if v1_db: with lock_and_load_config(root_path, "config.yaml") as config: db_pattern = config["full_node"]["database_path"] new_db_path = db_pattern.replace("_v2_", "_v1_") config["full_node"]["database_path"] = new_db_path db_path_replaced = new_db_path.replace("CHALLENGE", config["selected_network"]) db_path = path_from_root(root_path, db_path_replaced) db_path.parent.mkdir(parents=True, exist_ok=True) with sqlite3.connect(db_path) as connection: set_db_version(connection, 1) save_config(root_path, "config.yaml", config) else: config = load_config(root_path, "config.yaml")["full_node"] db_path_replaced = config["database_path"].replace("CHALLENGE", config["selected_network"]) db_path = path_from_root(root_path, db_path_replaced) db_path.parent.mkdir(parents=True, exist_ok=True) try: # create new v2 db file with sqlite3.connect(db_path) as connection: set_db_version(connection, 2) except sqlite3.OperationalError: # db already exists, so we're good pass print("") print("To see your keys, run 'flax keys show --show-mnemonic-seed'") return 0
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/farm_funcs.py
flax/cmds/farm_funcs.py
from typing import Any, Dict, List, Optional from flax.cmds.cmds_util import get_any_service_client from flax.cmds.units import units from flax.consensus.block_record import BlockRecord from flax.rpc.farmer_rpc_client import FarmerRpcClient from flax.rpc.full_node_rpc_client import FullNodeRpcClient from flax.rpc.wallet_rpc_client import WalletRpcClient from flax.util.misc import format_bytes from flax.util.misc import format_minutes from flax.util.network import is_localhost SECONDS_PER_BLOCK = (24 * 3600) / 4608 async def get_harvesters_summary(farmer_rpc_port: Optional[int]) -> Optional[Dict[str, Any]]: farmer_client: Optional[FarmerRpcClient] async with get_any_service_client("farmer", farmer_rpc_port) as node_config_fp: farmer_client, _, _ = node_config_fp if farmer_client is not None: return await farmer_client.get_harvesters_summary() return None async def get_blockchain_state(rpc_port: Optional[int]) -> Optional[Dict[str, Any]]: client: Optional[FullNodeRpcClient] async with get_any_service_client("full_node", rpc_port) as node_config_fp: client, _, _ = node_config_fp if client is not None: return await client.get_blockchain_state() return None async def get_average_block_time(rpc_port: Optional[int]) -> float: client: Optional[FullNodeRpcClient] async with get_any_service_client("full_node", rpc_port) as node_config_fp: client, _, _ = node_config_fp if client is not None: blocks_to_compare = 500 blockchain_state = await client.get_blockchain_state() curr: Optional[BlockRecord] = blockchain_state["peak"] if curr is None or curr.height < (blocks_to_compare + 100): return SECONDS_PER_BLOCK while curr is not None and curr.height > 0 and not curr.is_transaction_block: curr = await client.get_block_record(curr.prev_hash) if curr is None or curr.timestamp is None or curr.height is None: # stupid mypy return SECONDS_PER_BLOCK past_curr = await client.get_block_record_by_height(curr.height - blocks_to_compare) while past_curr is not None and past_curr.height > 0 and not past_curr.is_transaction_block: past_curr = await client.get_block_record(past_curr.prev_hash) if past_curr is None or past_curr.timestamp is None or past_curr.height is None: # stupid mypy return SECONDS_PER_BLOCK return (curr.timestamp - past_curr.timestamp) / (curr.height - past_curr.height) return SECONDS_PER_BLOCK async def get_wallets_stats(wallet_rpc_port: Optional[int]) -> Optional[Dict[str, Any]]: wallet_client: Optional[WalletRpcClient] async with get_any_service_client("wallet", wallet_rpc_port, login_to_wallet=False) as node_config_fp: wallet_client, _, _ = node_config_fp if wallet_client is not None: return await wallet_client.get_farmed_amount() return None async def get_challenges(farmer_rpc_port: Optional[int]) -> Optional[List[Dict[str, Any]]]: farmer_client: Optional[FarmerRpcClient] async with get_any_service_client("farmer", farmer_rpc_port) as node_config_fp: farmer_client, _, _ = node_config_fp if farmer_client is not None: return await farmer_client.get_signage_points() return None async def challenges(farmer_rpc_port: Optional[int], limit: int) -> None: signage_points = await get_challenges(farmer_rpc_port) if signage_points is None: return None signage_points.reverse() if limit != 0: signage_points = signage_points[:limit] for signage_point in signage_points: print( ( f"Hash: {signage_point['signage_point']['challenge_hash']} " f"Index: {signage_point['signage_point']['signage_point_index']}" ) ) async def summary( rpc_port: Optional[int], wallet_rpc_port: Optional[int], harvester_rpc_port: Optional[int], farmer_rpc_port: Optional[int], ) -> None: harvesters_summary = await get_harvesters_summary(farmer_rpc_port) blockchain_state = await get_blockchain_state(rpc_port) farmer_running = False if harvesters_summary is None else True # harvesters uses farmer rpc too wallet_not_ready: bool = False amounts = None try: amounts = await get_wallets_stats(wallet_rpc_port) except Exception: wallet_not_ready = True wallet_not_running: bool = True if amounts is None else False print("Farming status: ", end="") if blockchain_state is None: print("Not available") elif blockchain_state["sync"]["sync_mode"]: print("Syncing") elif not blockchain_state["sync"]["synced"]: print("Not synced or not connected to peers") elif not farmer_running: print("Not running") else: print("Farming") if amounts is not None: print(f"Total flax farmed: {amounts['farmed_amount'] / units['flax']}") print(f"User transaction fees: {amounts['fee_amount'] / units['flax']}") print(f"Block rewards: {(amounts['farmer_reward_amount'] + amounts['pool_reward_amount']) / units['flax']}") print(f"Last height farmed: {amounts['last_height_farmed']}") class PlotStats: total_plot_size = 0 total_plots = 0 if harvesters_summary is not None: harvesters_local: Dict[str, Dict[str, Any]] = {} harvesters_remote: Dict[str, Dict[str, Any]] = {} for harvester in harvesters_summary["harvesters"]: ip = harvester["connection"]["host"] if is_localhost(ip): harvesters_local[harvester["connection"]["node_id"]] = harvester else: if ip not in harvesters_remote: harvesters_remote[ip] = {} harvesters_remote[ip][harvester["connection"]["node_id"]] = harvester def process_harvesters(harvester_peers_in: dict): for harvester_peer_id, harvester_dict in harvester_peers_in.items(): syncing = harvester_dict["syncing"] if syncing is not None and syncing["initial"]: print(f" Loading plots: {syncing['plot_files_processed']} / {syncing['plot_files_total']}") else: total_plot_size_harvester = harvester_dict["total_plot_size"] plot_count_harvester = harvester_dict["plots"] PlotStats.total_plot_size += total_plot_size_harvester PlotStats.total_plots += plot_count_harvester print(f" {plot_count_harvester} plots of size: {format_bytes(total_plot_size_harvester)}") if len(harvesters_local) > 0: print(f"Local Harvester{'s' if len(harvesters_local) > 1 else ''}") process_harvesters(harvesters_local) for harvester_ip, harvester_peers in harvesters_remote.items(): print(f"Remote Harvester{'s' if len(harvester_peers) > 1 else ''} for IP: {harvester_ip}") process_harvesters(harvester_peers) print(f"Plot count for all harvesters: {PlotStats.total_plots}") print("Total size of plots: ", end="") print(format_bytes(PlotStats.total_plot_size)) else: print("Plot count: Unknown") print("Total size of plots: Unknown") if blockchain_state is not None: print("Estimated network space: ", end="") print(format_bytes(blockchain_state["space"])) else: print("Estimated network space: Unknown") minutes = -1 if blockchain_state is not None and harvesters_summary is not None: proportion = PlotStats.total_plot_size / blockchain_state["space"] if blockchain_state["space"] else -1 minutes = int((await get_average_block_time(rpc_port) / 60) / proportion) if proportion else -1 if harvesters_summary is not None and PlotStats.total_plots == 0: print("Expected time to win: Never (no plots)") else: print("Expected time to win: " + format_minutes(minutes)) if amounts is None: if wallet_not_running: print("For details on farmed rewards and fees you should run 'flax start wallet' and 'flax wallet show'") elif wallet_not_ready: print("For details on farmed rewards and fees you should run 'flax wallet show'") else: print("Note: log into your key using 'flax wallet show' to see rewards for each key")
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/plotnft_funcs.py
flax/cmds/plotnft_funcs.py
from decimal import Decimal from dataclasses import replace import aiohttp import asyncio import functools import json import time from pprint import pprint from typing import Any, List, Dict, Optional, Callable from flax.cmds.units import units from flax.cmds.wallet_funcs import print_balance, wallet_coin_unit from flax.pools.pool_config import load_pool_config, PoolWalletConfig, update_pool_config from flax.pools.pool_wallet_info import PoolWalletInfo, PoolSingletonState from flax.protocols.pool_protocol import POOL_PROTOCOL_VERSION from flax.rpc.farmer_rpc_client import FarmerRpcClient from flax.rpc.wallet_rpc_client import WalletRpcClient from flax.types.blockchain_format.sized_bytes import bytes32 from flax.server.server import ssl_context_for_root from flax.ssl.create_ssl import get_mozilla_ca_crt from flax.util.bech32m import encode_puzzle_hash, decode_puzzle_hash from flax.util.byte_types import hexstr_to_bytes from flax.util.config import load_config from flax.util.default_root import DEFAULT_ROOT_PATH from flax.util.ints import uint32, uint64 from flax.cmds.cmds_util import transaction_submitted_msg, transaction_status_msg, get_any_service_client from flax.wallet.transaction_record import TransactionRecord from flax.wallet.util.wallet_types import WalletType async def create_pool_args(pool_url: str) -> Dict: try: async with aiohttp.ClientSession() as session: async with session.get(f"{pool_url}/pool_info", ssl=ssl_context_for_root(get_mozilla_ca_crt())) as response: if response.ok: json_dict = json.loads(await response.text()) else: raise ValueError(f"Response from {pool_url} not OK: {response.status}") except Exception as e: raise ValueError(f"Error connecting to pool {pool_url}: {e}") if json_dict["relative_lock_height"] > 1000: raise ValueError("Relative lock height too high for this pool, cannot join") if json_dict["protocol_version"] != POOL_PROTOCOL_VERSION: raise ValueError(f"Incorrect version: {json_dict['protocol_version']}, should be {POOL_PROTOCOL_VERSION}") header_msg = f"\n---- Pool parameters fetched from {pool_url} ----" print(header_msg) pprint(json_dict) print("-" * len(header_msg)) return json_dict async def create(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: state = args["state"] prompt = not args.get("yes", False) fee = Decimal(args.get("fee", 0)) fee_mojos = uint64(int(fee * units["flax"])) target_puzzle_hash: Optional[bytes32] # Could use initial_pool_state_from_dict to simplify if state == "SELF_POOLING": pool_url: Optional[str] = None relative_lock_height = uint32(0) target_puzzle_hash = None # wallet will fill this in elif state == "FARMING_TO_POOL": config = load_config(DEFAULT_ROOT_PATH, "config.yaml") enforce_https = config["full_node"]["selected_network"] == "mainnet" pool_url = str(args["pool_url"]) if enforce_https and not pool_url.startswith("https://"): print(f"Pool URLs must be HTTPS on mainnet {pool_url}. Aborting.") return json_dict = await create_pool_args(pool_url) relative_lock_height = json_dict["relative_lock_height"] target_puzzle_hash = bytes32.from_hexstr(json_dict["target_puzzle_hash"]) else: raise ValueError("Plot NFT must be created in SELF_POOLING or FARMING_TO_POOL state.") pool_msg = f" and join pool: {pool_url}" if pool_url else "" print(f"Will create a plot NFT{pool_msg}.") if prompt: user_input: str = input("Confirm [n]/y: ") else: user_input = "yes" if user_input.lower() == "y" or user_input.lower() == "yes": try: tx_record: TransactionRecord = await wallet_client.create_new_pool_wallet( target_puzzle_hash, pool_url, relative_lock_height, "localhost:5000", "new", state, fee_mojos, ) start = time.time() while time.time() - start < 10: await asyncio.sleep(0.1) tx = await wallet_client.get_transaction(str(1), tx_record.name) if len(tx.sent_to) > 0: print(transaction_submitted_msg(tx)) print(transaction_status_msg(fingerprint, tx_record.name)) return None except Exception as e: print(f"Error creating plot NFT: {e}\n Please start both farmer and wallet with: flax start -r farmer") return print("Aborting.") async def pprint_pool_wallet_state( wallet_client: WalletRpcClient, wallet_id: int, pool_wallet_info: PoolWalletInfo, address_prefix: str, pool_state_dict: Optional[Dict[str, Any]], ): if pool_wallet_info.current.state == PoolSingletonState.LEAVING_POOL and pool_wallet_info.target is None: expected_leave_height = pool_wallet_info.singleton_block_height + pool_wallet_info.current.relative_lock_height print(f"Current state: INVALID_STATE. Please leave/join again after block height {expected_leave_height}") else: print(f"Current state: {PoolSingletonState(pool_wallet_info.current.state).name}") print(f"Current state from block height: {pool_wallet_info.singleton_block_height}") print(f"Launcher ID: {pool_wallet_info.launcher_id}") print( "Target address (not for plotting): " f"{encode_puzzle_hash(pool_wallet_info.current.target_puzzle_hash, address_prefix)}" ) print(f"Number of plots: {0 if pool_state_dict is None else pool_state_dict['plot_count']}") print(f"Owner public key: {pool_wallet_info.current.owner_pubkey}") print( f"Pool contract address (use ONLY for plotting - do not send money to this address): " f"{encode_puzzle_hash(pool_wallet_info.p2_singleton_puzzle_hash, address_prefix)}" ) if pool_wallet_info.target is not None: print(f"Target state: {PoolSingletonState(pool_wallet_info.target.state).name}") print(f"Target pool URL: {pool_wallet_info.target.pool_url}") if pool_wallet_info.current.state == PoolSingletonState.SELF_POOLING.value: balances: Dict = await wallet_client.get_wallet_balance(str(wallet_id)) balance = balances["confirmed_wallet_balance"] typ = WalletType(int(WalletType.POOLING_WALLET)) address_prefix, scale = wallet_coin_unit(typ, address_prefix) print(f"Claimable balance: {print_balance(balance, scale, address_prefix)}") if pool_wallet_info.current.state == PoolSingletonState.FARMING_TO_POOL: print(f"Current pool URL: {pool_wallet_info.current.pool_url}") if pool_state_dict is not None: print(f"Current difficulty: {pool_state_dict['current_difficulty']}") print(f"Points balance: {pool_state_dict['current_points']}") points_found_24h = [points for timestamp, points in pool_state_dict["points_found_24h"]] points_acknowledged_24h = [points for timestamp, points in pool_state_dict["points_acknowledged_24h"]] summed_points_found_24h = sum(points_found_24h) summed_points_acknowledged_24h = sum(points_acknowledged_24h) if summed_points_found_24h == 0: success_pct = 0.0 else: success_pct = summed_points_acknowledged_24h / summed_points_found_24h print(f"Points found (24h): {summed_points_found_24h}") print(f"Percent Successful Points (24h): {success_pct:.2%}") payout_instructions: str = pool_state_dict["pool_config"]["payout_instructions"] try: payout_address = encode_puzzle_hash(bytes32.fromhex(payout_instructions), address_prefix) print(f"Payout instructions (pool will pay to this address): {payout_address}") except Exception: print(f"Payout instructions (pool will pay you with this): {payout_instructions}") print(f"Relative lock height: {pool_wallet_info.current.relative_lock_height} blocks") if pool_wallet_info.current.state == PoolSingletonState.LEAVING_POOL: expected_leave_height = pool_wallet_info.singleton_block_height + pool_wallet_info.current.relative_lock_height if pool_wallet_info.target is not None: print(f"Expected to leave after block height: {expected_leave_height}") async def show(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: farmer_client: Optional[FarmerRpcClient] async with get_any_service_client("farmer") as node_config_fp: farmer_client, config, _ = node_config_fp if farmer_client is not None: address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"] summaries_response = await wallet_client.get_wallets() wallet_id_passed_in = args.get("id", None) pool_state_list = (await farmer_client.get_pool_state())["pool_state"] pool_state_dict: Dict[bytes32, Dict] = { bytes32.from_hexstr(pool_state_item["pool_config"]["launcher_id"]): pool_state_item for pool_state_item in pool_state_list } if wallet_id_passed_in is not None: for summary in summaries_response: typ = WalletType(int(summary["type"])) if summary["id"] == wallet_id_passed_in and typ != WalletType.POOLING_WALLET: print( f"Wallet with id: {wallet_id_passed_in} is not a pooling wallet." " Please provide a different id." ) return pool_wallet_info, _ = await wallet_client.pw_status(wallet_id_passed_in) await pprint_pool_wallet_state( wallet_client, wallet_id_passed_in, pool_wallet_info, address_prefix, pool_state_dict.get(pool_wallet_info.launcher_id), ) else: print(f"Wallet height: {await wallet_client.get_height_info()}") print(f"Sync status: {'Synced' if (await wallet_client.get_synced()) else 'Not synced'}") for summary in summaries_response: wallet_id = summary["id"] typ = WalletType(int(summary["type"])) if typ == WalletType.POOLING_WALLET: print(f"Wallet id {wallet_id}: ") pool_wallet_info, _ = await wallet_client.pw_status(wallet_id) await pprint_pool_wallet_state( wallet_client, wallet_id, pool_wallet_info, address_prefix, pool_state_dict.get(pool_wallet_info.launcher_id), ) print("") async def get_login_link(launcher_id_str: str) -> None: launcher_id: bytes32 = bytes32.from_hexstr(launcher_id_str) farmer_client: Optional[FarmerRpcClient] async with get_any_service_client("farmer") as node_config_fp: farmer_client, _, _ = node_config_fp if farmer_client is not None: login_link: Optional[str] = await farmer_client.get_pool_login_link(launcher_id) if login_link is None: print("Was not able to get login link.") else: print(login_link) async def submit_tx_with_confirmation( message: str, prompt: bool, func: Callable, wallet_client: WalletRpcClient, fingerprint: int, wallet_id: int ): print(message) if prompt: user_input: str = input("Confirm [n]/y: ") else: user_input = "yes" if user_input.lower() == "y" or user_input.lower() == "yes": try: result: Dict = await func() tx_record: TransactionRecord = result["transaction"] start = time.time() while time.time() - start < 10: await asyncio.sleep(0.1) tx = await wallet_client.get_transaction(str(1), tx_record.name) if len(tx.sent_to) > 0: print(transaction_submitted_msg(tx)) print(transaction_status_msg(fingerprint, tx_record.name)) return None except Exception as e: print(f"Error performing operation on Plot NFT -f {fingerprint} wallet id: {wallet_id}: {e}") return print("Aborting.") async def join_pool(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: config = load_config(DEFAULT_ROOT_PATH, "config.yaml") enforce_https = config["full_node"]["selected_network"] == "mainnet" pool_url: str = args["pool_url"] fee = Decimal(args.get("fee", 0)) fee_mojos = uint64(int(fee * units["flax"])) if enforce_https and not pool_url.startswith("https://"): print(f"Pool URLs must be HTTPS on mainnet {pool_url}. Aborting.") return wallet_id = args.get("id", None) prompt = not args.get("yes", False) try: async with aiohttp.ClientSession() as session: async with session.get(f"{pool_url}/pool_info", ssl=ssl_context_for_root(get_mozilla_ca_crt())) as response: if response.ok: json_dict = json.loads(await response.text()) else: print(f"Response not OK: {response.status}") return except Exception as e: print(f"Error connecting to pool {pool_url}: {e}") return if json_dict["relative_lock_height"] > 1000: print("Relative lock height too high for this pool, cannot join") return if json_dict["protocol_version"] != POOL_PROTOCOL_VERSION: print(f"Incorrect version: {json_dict['protocol_version']}, should be {POOL_PROTOCOL_VERSION}") return pprint(json_dict) msg = f"\nWill join pool: {pool_url} with Plot NFT {fingerprint}." func = functools.partial( wallet_client.pw_join_pool, wallet_id, hexstr_to_bytes(json_dict["target_puzzle_hash"]), pool_url, json_dict["relative_lock_height"], fee_mojos, ) await submit_tx_with_confirmation(msg, prompt, func, wallet_client, fingerprint, wallet_id) async def self_pool(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_id = args.get("id", None) prompt = not args.get("yes", False) fee = Decimal(args.get("fee", 0)) fee_mojos = uint64(int(fee * units["flax"])) msg = f"Will start self-farming with Plot NFT on wallet id {wallet_id} fingerprint {fingerprint}." func = functools.partial(wallet_client.pw_self_pool, wallet_id, fee_mojos) await submit_tx_with_confirmation(msg, prompt, func, wallet_client, fingerprint, wallet_id) async def inspect_cmd(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_id = args.get("id", None) pool_wallet_info, unconfirmed_transactions = await wallet_client.pw_status(wallet_id) print( { "pool_wallet_info": pool_wallet_info, "unconfirmed_transactions": [ {"sent_to": tx.sent_to, "transaction_id": tx.name.hex()} for tx in unconfirmed_transactions ], } ) async def claim_cmd(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None: wallet_id = args.get("id", None) fee = Decimal(args.get("fee", 0)) fee_mojos = uint64(int(fee * units["flax"])) msg = f"\nWill claim rewards for wallet ID: {wallet_id}." func = functools.partial( wallet_client.pw_absorb_rewards, wallet_id, fee_mojos, ) await submit_tx_with_confirmation(msg, False, func, wallet_client, fingerprint, wallet_id) async def change_payout_instructions(launcher_id: str, address: str) -> None: new_pool_configs: List[PoolWalletConfig] = [] id_found = False try: puzzle_hash = decode_puzzle_hash(address) except ValueError: print(f"Invalid Address: {address}") return old_configs: List[PoolWalletConfig] = load_pool_config(DEFAULT_ROOT_PATH) for pool_config in old_configs: if pool_config.launcher_id == hexstr_to_bytes(launcher_id): id_found = True pool_config = replace(pool_config, payout_instructions=puzzle_hash.hex()) new_pool_configs.append(pool_config) if id_found: print(f"Launcher Id: {launcher_id} Found, Updating Config.") await update_pool_config(DEFAULT_ROOT_PATH, new_pool_configs) print(f"Payout Instructions for launcher id: {launcher_id} successfully updated to: {address}.") print(f"You will need to change the payout instructions on every device you use to: {address}.") else: print(f"Launcher Id: {launcher_id} Not found.")
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/plotnft.py
flax/cmds/plotnft.py
from decimal import Decimal from typing import Optional import click from flax.cmds.cmds_util import execute_with_wallet MAX_CMDLINE_FEE = Decimal(0.5) def validate_fee(ctx, param, value): try: fee = Decimal(value) except ValueError: raise click.BadParameter("Fee must be decimal dotted value in XFX (e.g. 0.00005)") if fee < 0 or fee > MAX_CMDLINE_FEE: raise click.BadParameter(f"Fee must be in the range 0 to {MAX_CMDLINE_FEE}") return value @click.group("plotnft", short_help="Manage your plot NFTs") def plotnft_cmd() -> None: pass @plotnft_cmd.command("show", short_help="Show plotnft information") @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) @click.option("-i", "--id", help="ID of the wallet to use", type=int, default=None, show_default=True, required=False) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) def show_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int) -> None: import asyncio from .plotnft_funcs import show asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, {"id": id}, show)) @plotnft_cmd.command( "get_login_link", short_help="Create a login link for a pool. To get the launcher id, use plotnft show." ) @click.option("-l", "--launcher_id", help="Launcher ID of the plotnft", type=str, required=True) def get_login_link_cmd(launcher_id: str) -> None: import asyncio from .plotnft_funcs import get_login_link asyncio.run(get_login_link(launcher_id)) @plotnft_cmd.command("create", short_help="Create a plot NFT") @click.option("-y", "--yes", help="No prompts", is_flag=True) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-u", "--pool_url", help="HTTPS host:port of the pool to join", type=str, required=False) @click.option("-s", "--state", help="Initial state of Plot NFT: local or pool", type=str, required=True) @click.option( "-m", "--fee", help="Set the fees per transaction, in XFX. Fee is used TWICE: once to create the singleton, once for init.", type=str, default="0", show_default=True, required=True, callback=validate_fee, ) @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) def create_cmd( wallet_rpc_port: Optional[int], fingerprint: int, pool_url: str, state: str, fee: int, yes: bool, ) -> None: import asyncio from .plotnft_funcs import create if pool_url is not None and state.lower() == "local": print(f" pool_url argument [{pool_url}] is not allowed when creating in 'local' state") return if pool_url in [None, ""] and state.lower() == "pool": print(" pool_url argument (-u) is required for pool starting state") return valid_initial_states = {"pool": "FARMING_TO_POOL", "local": "SELF_POOLING"} extra_params = { "pool_url": pool_url, "state": valid_initial_states[state], "fee": fee, "yes": yes, } asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, create)) @plotnft_cmd.command("join", short_help="Join a plot NFT to a Pool") @click.option("-y", "--yes", help="No prompts", is_flag=True) @click.option("-i", "--id", help="ID of the wallet to use", type=int, default=None, show_default=True, required=True) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option("-u", "--pool_url", help="HTTPS host:port of the pool to join", type=str, required=True) @click.option( "-m", "--fee", help="Set the fees per transaction, in XFX. Fee is used TWICE: once to leave pool, once to join.", type=str, default="0", show_default=True, required=True, callback=validate_fee, ) @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) def join_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int, fee: int, pool_url: str, yes: bool) -> None: import asyncio from .plotnft_funcs import join_pool extra_params = {"pool_url": pool_url, "id": id, "fee": fee, "yes": yes} asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, join_pool)) @plotnft_cmd.command("leave", short_help="Leave a pool and return to self-farming") @click.option("-y", "--yes", help="No prompts", is_flag=True) @click.option("-i", "--id", help="ID of the wallet to use", type=int, default=None, show_default=True, required=True) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option( "-m", "--fee", help="Set the fees per transaction, in XFX. Fee is charged TWICE.", type=str, default="0", show_default=True, required=True, callback=validate_fee, ) @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) def self_pool_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int, fee: int, yes: bool) -> None: import asyncio from .plotnft_funcs import self_pool extra_params = {"id": id, "fee": fee, "yes": yes} asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, self_pool)) @plotnft_cmd.command("inspect", short_help="Get Detailed plotnft information as JSON") @click.option("-i", "--id", help="ID of the wallet to use", type=int, default=None, show_default=True, required=True) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) def inspect(wallet_rpc_port: Optional[int], fingerprint: int, id: int) -> None: import asyncio from .plotnft_funcs import inspect_cmd extra_params = {"id": id} asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, inspect_cmd)) @plotnft_cmd.command("claim", short_help="Claim rewards from a plot NFT") @click.option("-i", "--id", help="ID of the wallet to use", type=int, default=None, show_default=True, required=True) @click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int) @click.option( "-m", "--fee", help="Set the fees per transaction, in XFX.", type=str, default="0", show_default=True, required=True, callback=validate_fee, ) @click.option( "-wp", "--wallet-rpc-port", help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml", type=int, default=None, ) def claim(wallet_rpc_port: Optional[int], fingerprint: int, id: int, fee: int) -> None: import asyncio from .plotnft_funcs import claim_cmd extra_params = {"id": id, "fee": fee} asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, claim_cmd)) @plotnft_cmd.command( "change_payout_instructions", short_help="Change the payout instructions for a pool. To get the launcher id, use plotnft show.", ) @click.option("-l", "--launcher_id", help="Launcher ID of the plotnft", type=str, required=True) @click.option("-a", "--address", help="New address for payout instructions", type=str, required=True) def change_payout_instructions_cmd(launcher_id: str, address: str) -> None: import asyncio from .plotnft_funcs import change_payout_instructions asyncio.run(change_payout_instructions(launcher_id, address))
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/keys_funcs.py
flax/cmds/keys_funcs.py
import logging import os import sys from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union from flax.consensus.coinbase import create_puzzlehash_for_pk from flax.cmds.passphrase_funcs import obtain_current_passphrase from flax.daemon.client import connect_to_daemon_and_validate from flax.daemon.keychain_proxy import KeychainProxy, connect_to_keychain_and_validate, wrap_local_keychain from flax.util.bech32m import encode_puzzle_hash from flax.util.errors import KeychainNotSet from flax.util.config import load_config from flax.util.default_root import DEFAULT_ROOT_PATH from flax.util.errors import KeychainException from flax.util.file_keyring import MAX_LABEL_LENGTH from flax.util.ints import uint32 from flax.util.keychain import Keychain, bytes_to_mnemonic, generate_mnemonic, mnemonic_to_seed from flax.util.keyring_wrapper import KeyringWrapper from flax.wallet.derive_keys import ( master_sk_to_farmer_sk, master_sk_to_pool_sk, master_sk_to_wallet_sk, master_sk_to_wallet_sk_unhardened, ) def unlock_keyring() -> None: """ Used to unlock the keyring interactively, if necessary """ try: if KeyringWrapper.get_shared_instance().has_master_passphrase(): obtain_current_passphrase(use_passphrase_cache=True) except Exception as e: print(f"Unable to unlock the keyring: {e}") sys.exit(1) def generate_and_print(): """ Generates a seed for a private key, and prints the mnemonic to the terminal. """ mnemonic = generate_mnemonic() print("Generating private key. Mnemonic (24 secret words):") print(mnemonic) print("Note that this key has not been added to the keychain. Run flax keys add") return mnemonic def generate_and_add(label: Optional[str]): """ Generates a seed for a private key, prints the mnemonic to the terminal, and adds the key to the keyring. """ unlock_keyring() print("Generating private key") query_and_add_private_key_seed(mnemonic=generate_mnemonic(), label=label) def query_and_add_private_key_seed(mnemonic: Optional[str], label: Optional[str] = None): unlock_keyring() if mnemonic is None: mnemonic = input("Enter the mnemonic you want to use: ") if label is None: label = input("Enter the label you want to assign to this key (Press Enter to skip): ") if len(label) == 0: label = None add_private_key_seed(mnemonic, label) def add_private_key_seed(mnemonic: str, label: Optional[str]): """ Add a private key seed to the keyring, with the given mnemonic and an optional label. """ unlock_keyring() try: sk = Keychain().add_private_key(mnemonic, label) fingerprint = sk.get_g1().get_fingerprint() print(f"Added private key with public key fingerprint {fingerprint}") except (ValueError, KeychainException) as e: print(e) def show_all_key_labels() -> None: unlock_keyring() fingerprint_width = 11 def print_line(fingerprint: str, label: str) -> None: fingerprint_text = ("{0:<" + str(fingerprint_width) + "}").format(fingerprint) label_text = ("{0:<" + str(MAX_LABEL_LENGTH) + "}").format(label) print("| " + fingerprint_text + " | " + label_text + " |") keys = Keychain().get_keys() if len(keys) == 0: sys.exit("No keys are present in the keychain. Generate them with 'flax keys generate'") print_line("fingerprint", "label") print_line("-" * fingerprint_width, "-" * MAX_LABEL_LENGTH) for key_data in keys: print_line(str(key_data.fingerprint), key_data.label or "No label assigned") def set_key_label(fingerprint: int, label: str) -> None: unlock_keyring() try: Keychain().set_label(fingerprint, label) print(f"label {label!r} assigned to {fingerprint!r}") except Exception as e: sys.exit(f"Error: {e}") def delete_key_label(fingerprint: int) -> None: unlock_keyring() try: Keychain().delete_label(fingerprint) print(f"label removed for {fingerprint!r}") except Exception as e: sys.exit(f"Error: {e}") def show_all_keys(show_mnemonic: bool, non_observer_derivation: bool): """ Prints all keys and mnemonics (if available). """ unlock_keyring() root_path = DEFAULT_ROOT_PATH config = load_config(root_path, "config.yaml") all_keys = Keychain().get_keys(True) selected = config["selected_network"] prefix = config["network_overrides"]["config"][selected]["address_prefix"] if len(all_keys) == 0: print("There are no saved private keys") return None msg = "Showing all public keys derived from your master seed and private key:" if show_mnemonic: msg = "Showing all public and private keys" print(msg) for key_data in all_keys: sk = key_data.private_key print("") if key_data.label is not None: print("Label:", key_data.label) print("Fingerprint:", key_data.fingerprint) print("Master public key (m):", key_data.public_key) print( "Farmer public key (m/12381/8444/0/0):", master_sk_to_farmer_sk(sk).get_g1(), ) print("Pool public key (m/12381/8444/1/0):", master_sk_to_pool_sk(sk).get_g1()) first_wallet_sk: PrivateKey = ( master_sk_to_wallet_sk(sk, uint32(0)) if non_observer_derivation else master_sk_to_wallet_sk_unhardened(sk, uint32(0)) ) wallet_address: str = encode_puzzle_hash(create_puzzlehash_for_pk(first_wallet_sk.get_g1()), prefix) print(f"First wallet address{' (non-observer)' if non_observer_derivation else ''}: {wallet_address}") if show_mnemonic: print("Master private key (m):", bytes(sk).hex()) print( "First wallet secret key (m/12381/8444/2/0):", master_sk_to_wallet_sk(sk, uint32(0)), ) mnemonic = bytes_to_mnemonic(key_data.entropy) print(" Mnemonic seed (24 secret words):") print(mnemonic) def delete(fingerprint: int): """ Delete a key by its public key fingerprint (which is an integer). """ unlock_keyring() print(f"Deleting private_key with fingerprint {fingerprint}") Keychain().delete_key_by_fingerprint(fingerprint) def derive_sk_from_hd_path(master_sk: PrivateKey, hd_path_root: str) -> Tuple[PrivateKey, str]: """ Derive a private key from the provided HD path. Takes a master key and HD path as input, and returns the derived key and the HD path that was used to derive it. """ from flax.wallet.derive_keys import _derive_path, _derive_path_unhardened class DerivationType(Enum): NONOBSERVER = 0 OBSERVER = 1 path: List[str] = hd_path_root.split("/") if len(path) == 0 or path[0] != "m": raise ValueError("Invalid HD path. Must start with 'm'") path = path[1:] # Skip "m" if len(path) > 0 and path[-1] == "": # remove trailing slash path = path[:-1] index_and_derivation_types: List[Tuple[int, DerivationType]] = [] # Validate path for current_index_str in path: if len(current_index_str) == 0: raise ValueError("Invalid HD path. Empty index") non_observer: bool = current_index_str[-1] == "n" current_index: int = int(current_index_str[:-1]) if non_observer else int(current_index_str) index_and_derivation_types.append( (current_index, DerivationType.NONOBSERVER if non_observer else DerivationType.OBSERVER) ) current_sk: PrivateKey = master_sk # Derive keys along the path for (current_index, derivation_type) in index_and_derivation_types: if derivation_type == DerivationType.NONOBSERVER: current_sk = _derive_path(current_sk, [current_index]) elif derivation_type == DerivationType.OBSERVER: current_sk = _derive_path_unhardened(current_sk, [current_index]) else: raise ValueError(f"Unhandled derivation type: {derivation_type}") return (current_sk, "m/" + "/".join(path) + "/") def sign(message: str, private_key: PrivateKey, hd_path: str, as_bytes: bool): sk: PrivateKey = derive_sk_from_hd_path(private_key, hd_path)[0] data = bytes.fromhex(message) if as_bytes else bytes(message, "utf-8") print("Public key:", sk.get_g1()) print("Signature:", AugSchemeMPL.sign(sk, data)) def verify(message: str, public_key: str, signature: str): messageBytes = bytes(message, "utf-8") public_key = G1Element.from_bytes(bytes.fromhex(public_key)) signature = G2Element.from_bytes(bytes.fromhex(signature)) print(AugSchemeMPL.verify(public_key, messageBytes, signature)) async def migrate_keys(root_path: Path, forced: bool = False) -> bool: from flax.util.keyring_wrapper import KeyringWrapper from flax.util.misc import prompt_yes_no deprecation_message = ( "\nLegacy keyring support is deprecated and will be removed in an upcoming version. " "You need to migrate your keyring to continue using Flax.\n" ) # Check if the keyring needs a full migration (i.e. if it's using the old keyring) if Keychain.needs_migration(): print(deprecation_message) return await KeyringWrapper.get_shared_instance().migrate_legacy_keyring_interactive() else: already_checked_marker = KeyringWrapper.get_shared_instance().keys_root_path / ".checked_legacy_migration" if forced and already_checked_marker.exists(): return True log = logging.getLogger("migrate_keys") config = load_config(root_path, "config.yaml") # Connect to the daemon here first to see if ts running since `connect_to_keychain_and_validate` just tries to # connect forever if it's not up. keychain_proxy: Optional[KeychainProxy] = None daemon = await connect_to_daemon_and_validate(root_path, config, quiet=True) if daemon is not None: await daemon.close() keychain_proxy = await connect_to_keychain_and_validate(root_path, log) if keychain_proxy is None: keychain_proxy = wrap_local_keychain(Keychain(), log=log) try: legacy_keyring = Keychain(force_legacy=True) all_sks = await keychain_proxy.get_all_private_keys() all_legacy_sks = legacy_keyring.get_all_private_keys() set_legacy_sks = {str(x[0]) for x in all_legacy_sks} set_sks = {str(x[0]) for x in all_sks} missing_legacy_keys = set_legacy_sks - set_sks keys_to_migrate = [x for x in all_legacy_sks if str(x[0]) in missing_legacy_keys] except KeychainNotSet: keys_to_migrate = [] if len(keys_to_migrate) > 0: print(deprecation_message) print(f"Found {len(keys_to_migrate)} key(s) that need migration:") for key, _ in keys_to_migrate: print(f"Fingerprint: {key.get_g1().get_fingerprint()}") print() if not prompt_yes_no("Migrate these keys?"): await keychain_proxy.close() print("Migration aborted, can't run any flax commands.") return False for sk, seed_bytes in keys_to_migrate: mnemonic = bytes_to_mnemonic(seed_bytes) await keychain_proxy.add_private_key(mnemonic) fingerprint = sk.get_g1().get_fingerprint() print(f"Added private key with public key fingerprint {fingerprint}") print(f"Migrated {len(keys_to_migrate)} key(s)") print("Verifying migration results...", end="") all_sks = await keychain_proxy.get_all_private_keys() await keychain_proxy.close() set_sks = {str(x[0]) for x in all_sks} keys_present = set_sks.issuperset(set(map(lambda x: str(x[0]), keys_to_migrate))) if keys_present: print(" Verified") print() response = prompt_yes_no("Remove key(s) from old keyring (recommended)?") if response: legacy_keyring.delete_keys(keys_to_migrate) print(f"Removed {len(keys_to_migrate)} key(s) from old keyring") print("Migration complete") else: print(" Failed") return False return True elif not forced: print("No keys need migration") if already_checked_marker.parent.exists(): already_checked_marker.touch() await keychain_proxy.close() return True def _clear_line_part(n: int): # Move backward, overwrite with spaces, then move backward again sys.stdout.write("\b" * n) sys.stdout.write(" " * n) sys.stdout.write("\b" * n) def _search_derived( current_sk: PrivateKey, search_terms: Tuple[str, ...], path: str, path_indices: Optional[List[int]], limit: int, non_observer_derivation: bool, show_progress: bool, search_public_key: bool, search_private_key: bool, search_address: bool, prefix: str, ) -> List[str]: # Return a subset of search_terms that were found """ Performs a shallow search of keys derived from the current sk for items matching the provided search terms. """ from flax.wallet.derive_keys import _derive_path, _derive_path_unhardened class DerivedSearchResultType(Enum): PUBLIC_KEY = "public key" PRIVATE_KEY = "private key" WALLET_ADDRESS = "wallet address" remaining_search_terms: Dict[str, None] = dict.fromkeys(search_terms) current_path: str = path current_path_indices: List[int] = path_indices if path_indices is not None else [] found_search_terms: List[str] = [] for index in range(limit): found_items: List[Tuple[str, str, DerivedSearchResultType]] = [] printed_match: bool = False current_index_str = str(index) + ("n" if non_observer_derivation else "") current_path += f"{current_index_str}" current_path_indices.append(index) if show_progress: # Output just the current index e.g. "25" or "25n" sys.stdout.write(f"{current_index_str}") sys.stdout.flush() # Derive the private key if non_observer_derivation: child_sk = _derive_path(current_sk, current_path_indices) else: child_sk = _derive_path_unhardened(current_sk, current_path_indices) child_pk: Optional[G1Element] = None # Public key is needed for searching against wallet addresses or public keys if search_public_key or search_address: child_pk = child_sk.get_g1() address: Optional[str] = None if search_address: # Generate a wallet address using the standard p2_delegated_puzzle_or_hidden_puzzle puzzle # TODO: consider generating addresses using other puzzles address = encode_puzzle_hash(create_puzzlehash_for_pk(child_pk), prefix) for term in remaining_search_terms: found_item: Any = None found_item_type: Optional[DerivedSearchResultType] = None if search_private_key and term in str(child_sk): found_item = private_key_string_repr(child_sk) found_item_type = DerivedSearchResultType.PRIVATE_KEY elif search_public_key and child_pk is not None and term in str(child_pk): found_item = child_pk found_item_type = DerivedSearchResultType.PUBLIC_KEY elif search_address and address is not None and term in address: found_item = address found_item_type = DerivedSearchResultType.WALLET_ADDRESS if found_item is not None and found_item_type is not None: found_items.append((term, found_item, found_item_type)) if len(found_items) > 0 and show_progress: print() for (term, found_item, found_item_type) in found_items: # Update remaining_search_terms and found_search_terms del remaining_search_terms[term] found_search_terms.append(term) print( f"Found {found_item_type.value}: {found_item} (HD path: {current_path})" ) # lgtm [py/clear-text-logging-sensitive-data] printed_match = True if len(remaining_search_terms) == 0: break # Remove the last index from the path current_path = current_path[: -len(str(current_index_str))] current_path_indices = current_path_indices[:-1] if show_progress: if printed_match: # Write the path (without current_index_str) since we printed out a match # e.g. m/12381/8444/2/ sys.stdout.write(f"{current_path}") # lgtm [py/clear-text-logging-sensitive-data] # Remove the last index from the output else: _clear_line_part(len(current_index_str)) return found_search_terms def search_derive( root_path: Path, private_key: Optional[PrivateKey], search_terms: Tuple[str, ...], limit: int, non_observer_derivation: bool, show_progress: bool, search_types: Tuple[str, ...], derive_from_hd_path: Optional[str], prefix: Optional[str], ) -> bool: """ Searches for items derived from the provided private key, or if not specified, search each private key in the keyring. """ from time import perf_counter start_time = perf_counter() private_keys: List[PrivateKey] remaining_search_terms: Dict[str, None] = dict.fromkeys(search_terms) # poor man's ordered set search_address = "address" in search_types search_public_key = "public_key" in search_types search_private_key = "private_key" in search_types if prefix is None: config: Dict = load_config(root_path, "config.yaml") selected: str = config["selected_network"] prefix = config["network_overrides"]["config"][selected]["address_prefix"] if "all" in search_types: search_address = True search_public_key = True search_private_key = True if private_key is None: private_keys = [sk for sk, _ in Keychain().get_all_private_keys()] else: private_keys = [private_key] for sk in private_keys: current_path: str = "" found_terms: List[str] = [] if show_progress: print(f"Searching keys derived from: {sk.get_g1().get_fingerprint()}") # Derive from the provided HD path if derive_from_hd_path is not None: derivation_root_sk, hd_path_root = derive_sk_from_hd_path(sk, derive_from_hd_path) if show_progress: sys.stdout.write(hd_path_root) # Shallow search under hd_path_root found_terms = _search_derived( derivation_root_sk, tuple(remaining_search_terms.keys()), hd_path_root, None, limit, non_observer_derivation, show_progress, search_public_key, search_private_key, search_address, prefix, ) # Update remaining_search_terms for term in found_terms: del remaining_search_terms[term] if len(remaining_search_terms) == 0: # Found everything we were looking for break current_path = hd_path_root # Otherwise derive from well-known derivation paths else: current_path_indices: List[int] = [12381, 8444] path_root: str = "m/" for i in [12381, 8444]: path_root += f"{i}{'n' if non_observer_derivation else ''}/" if show_progress: # Print the path root (without last index) # e.g. m/12381/8444/ sys.stdout.write(path_root) # 7 account levels for derived keys (0-6): # 0 = farmer, 1 = pool, 2 = wallet, 3 = local, 4 = backup key, 5 = singleton, 6 = pooling authentication for account in range(7): account_str = str(account) + ("n" if non_observer_derivation else "") current_path = path_root + f"{account_str}/" current_path_indices.append(account) if show_progress: # Print the current path index # e.g. 2/ (example full output: m/12381/8444/2/) sys.stdout.write(f"{account_str}/") # lgtm [py/clear-text-logging-sensitive-data] found_terms = _search_derived( sk, tuple(remaining_search_terms.keys()), current_path, list(current_path_indices), # copy limit, non_observer_derivation, show_progress, search_public_key, search_private_key, search_address, prefix, ) # Update remaining_search_terms for found_term in found_terms: del remaining_search_terms[found_term] if len(remaining_search_terms) == 0: # Found everything we were looking for break if show_progress: # +1 to remove the trailing slash _clear_line_part(1 + len(str(account_str))) current_path_indices = current_path_indices[:-1] if len(remaining_search_terms) == 0: # Found everything we were looking for break if show_progress: # +1 to remove the trailing slash _clear_line_part(1 + len(current_path)) sys.stdout.flush() end_time = perf_counter() if len(remaining_search_terms) > 0: for term in remaining_search_terms: print(f"Could not find '{term}'") if show_progress: print() print(f"Search completed in {end_time - start_time} seconds") return len(remaining_search_terms) == 0 def derive_wallet_address( root_path: Path, private_key: PrivateKey, index: int, count: int, prefix: Optional[str], non_observer_derivation: bool, show_hd_path: bool, ): """ Generate wallet addresses using keys derived from the provided private key. """ if prefix is None: config: Dict = load_config(root_path, "config.yaml") selected: str = config["selected_network"] prefix = config["network_overrides"]["config"][selected]["address_prefix"] path_indices: List[int] = [12381, 8444, 2] wallet_hd_path_root: str = "m/" for i in path_indices: wallet_hd_path_root += f"{i}{'n' if non_observer_derivation else ''}/" for i in range(index, index + count): if non_observer_derivation: sk = master_sk_to_wallet_sk(private_key, uint32(i)) else: sk = master_sk_to_wallet_sk_unhardened(private_key, uint32(i)) # Generate a wallet address using the standard p2_delegated_puzzle_or_hidden_puzzle puzzle # TODO: consider generating addresses using other puzzles address = encode_puzzle_hash(create_puzzlehash_for_pk(sk.get_g1()), prefix) if show_hd_path: print( f"Wallet address {i} " f"({wallet_hd_path_root + str(i) + ('n' if non_observer_derivation else '')}): {address}" ) else: print(f"Wallet address {i}: {address}") def private_key_string_repr(private_key: PrivateKey): """Print a PrivateKey in a human-readable formats""" s: str = str(private_key) return s[len("<PrivateKey ") : s.rfind(">")] if s.startswith("<PrivateKey ") else s def derive_child_key( master_sk: PrivateKey, key_type: Optional[str], derive_from_hd_path: Optional[str], index: int, count: int, non_observer_derivation: bool, show_private_keys: bool, show_hd_path: bool, ): """ Derive child keys from the provided master key. """ from flax.wallet.derive_keys import _derive_path, _derive_path_unhardened derivation_root_sk: Optional[PrivateKey] = None hd_path_root: Optional[str] = None current_sk: Optional[PrivateKey] = None # Key type was specified if key_type is not None: path_indices: List[int] = [12381, 8444] path_indices.append( { "farmer": 0, "pool": 1, "wallet": 2, "local": 3, "backup": 4, "singleton": 5, "pool_auth": 6, }[key_type] ) if non_observer_derivation: current_sk = _derive_path(master_sk, path_indices) else: current_sk = _derive_path_unhardened(master_sk, path_indices) derivation_root_sk = current_sk hd_path_root = "m/" for i in path_indices: hd_path_root += f"{i}{'n' if non_observer_derivation else ''}/" # Arbitrary HD path was specified elif derive_from_hd_path is not None: derivation_root_sk, hd_path_root = derive_sk_from_hd_path(master_sk, derive_from_hd_path) # Derive child keys from derivation_root_sk if derivation_root_sk is not None and hd_path_root is not None: for i in range(index, index + count): if non_observer_derivation: sk = _derive_path(derivation_root_sk, [i]) else: sk = _derive_path_unhardened(derivation_root_sk, [i]) hd_path: str = ( " (" + hd_path_root + str(i) + ("n" if non_observer_derivation else "") + ")" if show_hd_path else "" ) key_type_str: Optional[str] if key_type is not None: key_type_str = key_type.capitalize() else: key_type_str = "Non-Observer" if non_observer_derivation else "Observer" print(f"{key_type_str} public key {i}{hd_path}: {sk.get_g1()}") if show_private_keys: print(f"{key_type_str} private key {i}{hd_path}: {private_key_string_repr(sk)}") def private_key_for_fingerprint(fingerprint: int) -> Optional[PrivateKey]: unlock_keyring() private_keys = Keychain().get_all_private_keys() for sk, _ in private_keys: if sk.get_g1().get_fingerprint() == fingerprint: return sk return None def get_private_key_with_fingerprint_or_prompt(fingerprint: Optional[int]): """ Get a private key with the specified fingerprint. If fingerprint is not specified, prompt the user to select a key. """ # Return the private key matching the specified fingerprint if fingerprint is not None: return private_key_for_fingerprint(fingerprint) fingerprints: List[int] = [pk.get_fingerprint() for pk in Keychain().get_all_public_keys()] while True: print("Choose key:") for i, fp in enumerate(fingerprints): print(f"{i+1}) {fp}") val = None while val is None: val = input("Enter a number to pick or q to quit: ") if val == "q": return None if not val.isdigit(): val = None else: index = int(val) - 1 if index >= len(fingerprints): print("Invalid value") val = None continue else: return private_key_for_fingerprint(fingerprints[index]) def private_key_from_mnemonic_seed_file(filename: Path) -> PrivateKey: """ Create a private key from a mnemonic seed file. """ mnemonic = filename.read_text().rstrip() seed = mnemonic_to_seed(mnemonic) return AugSchemeMPL.key_gen(seed) def resolve_derivation_master_key(fingerprint_or_filename: Optional[Union[int, str, Path]]) -> PrivateKey: """ Given a key fingerprint of file containing a mnemonic seed, return the private key. """ if fingerprint_or_filename is not None and ( isinstance(fingerprint_or_filename, str) or isinstance(fingerprint_or_filename, Path) ): return private_key_from_mnemonic_seed_file(Path(os.fspath(fingerprint_or_filename))) else: return get_private_key_with_fingerprint_or_prompt(fingerprint_or_filename)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/db_upgrade_func.py
flax/cmds/db_upgrade_func.py
from typing import Dict, Optional import platform from pathlib import Path import shutil import sys from time import time import textwrap import os from flax.util.config import load_config, lock_and_load_config, save_config from flax.util.path import path_from_root from flax.util.ints import uint32 from flax.types.blockchain_format.sized_bytes import bytes32 # if either the input database or output database file is specified, the # configuration file will not be updated to use the new database. Only when using # the currently configured db file, and writing to the default output file will # the configuration file also be updated def db_upgrade_func( root_path: Path, in_db_path: Optional[Path] = None, out_db_path: Optional[Path] = None, *, no_update_config: bool = False, force: bool = False, ) -> None: update_config: bool = in_db_path is None and out_db_path is None and not no_update_config config: Dict selected_network: str db_pattern: str if in_db_path is None or out_db_path is None: config = load_config(root_path, "config.yaml")["full_node"] selected_network = config["selected_network"] db_pattern = config["database_path"] db_path_replaced: str if in_db_path is None: db_path_replaced = db_pattern.replace("CHALLENGE", selected_network) in_db_path = path_from_root(root_path, db_path_replaced) if out_db_path is None: db_path_replaced = db_pattern.replace("CHALLENGE", selected_network).replace("_v1_", "_v2_") out_db_path = path_from_root(root_path, db_path_replaced) out_db_path.parent.mkdir(parents=True, exist_ok=True) total, used, free = shutil.disk_usage(out_db_path.parent) in_db_size = in_db_path.stat().st_size if free < in_db_size: no_free: bool = free < in_db_size * 0.6 strength: str if no_free: strength = "probably not enough" else: strength = "very little" print(f"there is {strength} free space on the volume where the output database will be written:") print(f" {out_db_path}") print( f"free space: {free / 1024 / 1024 / 1024:0.2f} GiB expected about " f"{in_db_size / 1024 / 1024 / 1024:0.2f} GiB" ) if no_free and not force: print("to override this check and convert anyway, pass --force") return try: convert_v1_to_v2(in_db_path, out_db_path) if update_config: print("updating config.yaml") with lock_and_load_config(root_path, "config.yaml") as config: new_db_path = db_pattern.replace("_v1_", "_v2_") config["full_node"]["database_path"] = new_db_path print(f"database_path: {new_db_path}") save_config(root_path, "config.yaml", config) except RuntimeError as e: print(f"conversion failed with error: {e}.") except Exception as e: print( textwrap.dedent( f"""\ conversion failed with error: {e}. The target v2 database is left in place (possibly in an incomplete state) {out_db_path} If the failure was caused by a full disk, ensure the volumes of your temporary- and target directory have sufficient free space.""" ) ) if platform.system() == "Windows": temp_dir = None # this is where GetTempPath() looks # https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-gettemppatha if "TMP" in os.environ: temp_dir = os.environ["TMP"] elif "TEMP" in os.environ: temp_dir = os.environ["TEMP"] elif "USERPROFILE" in os.environ: temp_dir = os.environ["USERPROFILE"] if temp_dir is not None: print(f"your temporary directory may be {temp_dir}") temp_env = "TMP" else: temp_env = "SQLITE_TMPDIR" print(f'you can specify the "{temp_env}" environment variable to control the temporary directory to be used') print(f"\n\nLEAVING PREVIOUS DB FILE UNTOUCHED {in_db_path}\n") BLOCK_COMMIT_RATE = 10000 SES_COMMIT_RATE = 2000 HINT_COMMIT_RATE = 2000 COIN_COMMIT_RATE = 30000 def convert_v1_to_v2(in_path: Path, out_path: Path) -> None: import sqlite3 import zstd from contextlib import closing if not in_path.exists(): raise RuntimeError(f"input file doesn't exist. {in_path}") if in_path == out_path: raise RuntimeError(f"output file is the same as the input {in_path}") if out_path.exists(): raise RuntimeError(f"output file already exists. {out_path}") print(f"opening file for reading: {in_path}") with closing(sqlite3.connect(in_path)) as in_db: try: with closing(in_db.execute("SELECT * from database_version")) as cursor: row = cursor.fetchone() if row is not None and row[0] != 1: raise RuntimeError(f"blockchain database already version {row[0]}. Won't convert") except sqlite3.OperationalError: pass print(f"opening file for writing: {out_path}") with closing(sqlite3.connect(out_path)) as out_db: out_db.execute("pragma journal_mode=OFF") out_db.execute("pragma synchronous=OFF") out_db.execute("pragma cache_size=131072") out_db.execute("pragma locking_mode=exclusive") print("initializing v2 version") out_db.execute("CREATE TABLE database_version(version int)") out_db.execute("INSERT INTO database_version VALUES(?)", (2,)) print("initializing v2 block store") out_db.execute( "CREATE TABLE full_blocks(" "header_hash blob PRIMARY KEY," "prev_hash blob," "height bigint," "sub_epoch_summary blob," "is_fully_compactified tinyint," "in_main_chain tinyint," "block blob," "block_record blob)" ) out_db.execute( "CREATE TABLE sub_epoch_segments_v3(" "ses_block_hash blob PRIMARY KEY," "challenge_segments blob)" ) out_db.execute("CREATE TABLE current_peak(key int PRIMARY KEY, hash blob)") with closing(in_db.execute("SELECT header_hash, height from block_records WHERE is_peak = 1")) as cursor: peak_row = cursor.fetchone() if peak_row is None: raise RuntimeError("v1 database does not have a peak block, there is no blockchain to convert") peak_hash = bytes32(bytes.fromhex(peak_row[0])) peak_height = uint32(peak_row[1]) print(f"peak: {peak_hash.hex()} height: {peak_height}") out_db.execute("INSERT INTO current_peak VALUES(?, ?)", (0, peak_hash)) out_db.commit() print("[1/5] converting full_blocks") height = peak_height + 1 hh = peak_hash commit_in = BLOCK_COMMIT_RATE rate = 1.0 start_time = time() block_start_time = start_time block_values = [] with closing( in_db.execute( "SELECT header_hash, prev_hash, block, sub_epoch_summary FROM block_records ORDER BY height DESC" ) ) as cursor: with closing( in_db.execute( "SELECT header_hash, height, is_fully_compactified, block FROM full_blocks ORDER BY height DESC" ) ) as cursor_2: out_db.execute("begin transaction") for row in cursor: header_hash = bytes.fromhex(row[0]) if header_hash != hh: continue # progress cursor_2 until we find the header hash while True: row_2 = cursor_2.fetchone() if row_2 is None: raise RuntimeError(f"block {hh.hex()} not found") if bytes.fromhex(row_2[0]) == hh: break assert row_2[1] == height - 1 height = row_2[1] is_fully_compactified = row_2[2] block_bytes = row_2[3] prev_hash = bytes32.fromhex(row[1]) block_record = row[2] ses = row[3] block_values.append( ( hh, prev_hash, height, ses, is_fully_compactified, 1, # in_main_chain zstd.compress(block_bytes), block_record, ) ) hh = prev_hash if (height % 1000) == 0: print( f"\r{height: 10d} {(peak_height-height)*100/peak_height:.2f}% " f"{rate:0.1f} blocks/s ETA: {height//rate} s ", end="", ) sys.stdout.flush() commit_in -= 1 if commit_in == 0: commit_in = BLOCK_COMMIT_RATE out_db.executemany( "INSERT OR REPLACE INTO full_blocks VALUES(?, ?, ?, ?, ?, ?, ?, ?)", block_values ) out_db.commit() out_db.execute("begin transaction") block_values = [] end_time = time() rate = BLOCK_COMMIT_RATE / (end_time - start_time) start_time = end_time out_db.executemany("INSERT OR REPLACE INTO full_blocks VALUES(?, ?, ?, ?, ?, ?, ?, ?)", block_values) out_db.commit() end_time = time() print(f"\r {end_time - block_start_time:.2f} seconds ") print("[2/5] converting sub_epoch_segments_v3") commit_in = SES_COMMIT_RATE ses_values = [] ses_start_time = time() with closing( in_db.execute("SELECT ses_block_hash, challenge_segments FROM sub_epoch_segments_v3") ) as cursor: count = 0 out_db.execute("begin transaction") for row in cursor: block_hash = bytes32.fromhex(row[0]) ses = row[1] ses_values.append((block_hash, ses)) count += 1 if (count % 100) == 0: print(f"\r{count:10d} ", end="") sys.stdout.flush() commit_in -= 1 if commit_in == 0: commit_in = SES_COMMIT_RATE out_db.executemany("INSERT INTO sub_epoch_segments_v3 VALUES (?, ?)", ses_values) out_db.commit() out_db.execute("begin transaction") ses_values = [] out_db.executemany("INSERT INTO sub_epoch_segments_v3 VALUES (?, ?)", ses_values) out_db.commit() end_time = time() print(f"\r {end_time - ses_start_time:.2f} seconds ") print("[3/5] converting hint_store") commit_in = HINT_COMMIT_RATE hint_start_time = time() hint_values = [] out_db.execute("CREATE TABLE hints(coin_id blob, hint blob, UNIQUE (coin_id, hint))") out_db.commit() try: with closing(in_db.execute("SELECT coin_id, hint FROM hints")) as cursor: count = 0 out_db.execute("begin transaction") for row in cursor: hint_values.append((row[0], row[1])) commit_in -= 1 if commit_in == 0: commit_in = HINT_COMMIT_RATE out_db.executemany("INSERT OR IGNORE INTO hints VALUES(?, ?)", hint_values) out_db.commit() out_db.execute("begin transaction") hint_values = [] except sqlite3.OperationalError: print(" no hints table, skipping") out_db.executemany("INSERT OR IGNORE INTO hints VALUES (?, ?)", hint_values) out_db.commit() end_time = time() print(f"\r {end_time - hint_start_time:.2f} seconds ") print("[4/5] converting coin_store") out_db.execute( "CREATE TABLE coin_record(" "coin_name blob PRIMARY KEY," " confirmed_index bigint," " spent_index bigint," # if this is zero, it means the coin has not been spent " coinbase int," " puzzle_hash blob," " coin_parent blob," " amount blob," # we use a blob of 8 bytes to store uint64 " timestamp bigint)" ) out_db.commit() commit_in = COIN_COMMIT_RATE rate = 1.0 start_time = time() coin_values = [] coin_start_time = start_time with closing( in_db.execute( "SELECT coin_name, confirmed_index, spent_index, coinbase, " "puzzle_hash, coin_parent, amount, timestamp " "FROM coin_record WHERE confirmed_index <= ?", (peak_height,), ) ) as cursor: count = 0 out_db.execute("begin transaction") for row in cursor: spent_index = row[2] # in order to convert a consistent snapshot of the # blockchain state, any coin that was spent *after* our # cutoff must be converted into an unspent coin if spent_index > peak_height: spent_index = 0 coin_values.append( ( bytes.fromhex(row[0]), row[1], spent_index, row[3], bytes.fromhex(row[4]), bytes.fromhex(row[5]), row[6], row[7], ) ) count += 1 if (count % 2000) == 0: print(f"\r{count//1000:10d}k coins {rate:0.1f} coins/s ", end="") sys.stdout.flush() commit_in -= 1 if commit_in == 0: commit_in = COIN_COMMIT_RATE out_db.executemany("INSERT INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?)", coin_values) out_db.commit() out_db.execute("begin transaction") coin_values = [] end_time = time() rate = COIN_COMMIT_RATE / (end_time - start_time) start_time = end_time out_db.executemany("INSERT INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?)", coin_values) out_db.commit() end_time = time() print(f"\r {end_time - coin_start_time:.2f} seconds ") print("[5/5] build indices") index_start_time = time() print(" block store") out_db.execute("CREATE INDEX height on full_blocks(height)") out_db.execute( "CREATE INDEX is_fully_compactified ON" " full_blocks(is_fully_compactified, in_main_chain) WHERE in_main_chain=1" ) out_db.execute("CREATE INDEX main_chain ON full_blocks(height, in_main_chain) WHERE in_main_chain=1") out_db.commit() print(" coin store") out_db.execute("CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)") out_db.execute("CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)") out_db.execute("CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)") out_db.execute("CREATE INDEX IF NOT EXISTS coin_parent_index on coin_record(coin_parent)") out_db.commit() print(" hint store") out_db.execute("CREATE TABLE IF NOT EXISTS hints(coin_id blob, hint blob, UNIQUE (coin_id, hint))") out_db.commit() end_time = time() print(f"\r {end_time - index_start_time:.2f} seconds ")
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/init.py
flax/cmds/init.py
import click @click.command("init", short_help="Create or migrate the configuration") @click.option( "--create-certs", "-c", default=None, help="Create new SSL certificates based on CA in [directory]", type=click.Path(), ) @click.option( "--fix-ssl-permissions", is_flag=True, help="Attempt to fix SSL certificate/key file permissions", ) @click.option("--testnet", is_flag=True, help="Configure this flax install to connect to the testnet") @click.option("--set-passphrase", "-s", is_flag=True, help="Protect your keyring with a passphrase") @click.option( "--v1-db", is_flag=True, help="Initialize the blockchain database in v1 format (compatible with older versions of the full node)", ) @click.pass_context def init_cmd(ctx: click.Context, create_certs: str, fix_ssl_permissions: bool, testnet: bool, v1_db: bool, **kwargs): """ Create a new configuration or migrate from previous versions to current \b Follow these steps to create new certificates for a remote harvester: - Make a copy of your Farming Machine CA directory: ~/.flax/[version]/config/ssl/ca - Shut down all flax daemon processes with `flax stop all -d` - Run `flax init -c [directory]` on your remote harvester, where [directory] is the the copy of your Farming Machine CA directory - Get more details on remote harvester on Flax wiki: https://github.com/Flax-Network/flax-blockchain/wiki/Farming-on-many-machines """ from pathlib import Path from .init_funcs import init from flax.cmds.passphrase_funcs import initialize_passphrase set_passphrase = kwargs.get("set_passphrase") if set_passphrase: initialize_passphrase() init( Path(create_certs) if create_certs is not None else None, ctx.obj["root_path"], fix_ssl_permissions, testnet, v1_db, ) if __name__ == "__main__": from .init_funcs import flax_init from flax.util.default_root import DEFAULT_ROOT_PATH flax_init(DEFAULT_ROOT_PATH)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/cmds/data.py
flax/cmds/data.py
from __future__ import annotations import json import logging from pathlib import Path from typing import Any, Callable, Coroutine, Dict, List, Optional, TypeVar, Union import click _T = TypeVar("_T") FC = TypeVar("FC", bound=Union[Callable[..., Any], click.Command]) logger = logging.getLogger(__name__) # TODO: this is more general and should be part of refactoring the overall CLI code duplication def run(coro: Coroutine[Any, Any, Optional[Dict[str, Any]]]) -> None: import asyncio response = asyncio.run(coro) success = response is not None and response.get("success", False) logger.info(f"data layer cli call response:{success}") # todo make sure all cli methods follow this pattern, uncomment # if not success: # raise click.ClickException(message=f"query unsuccessful, response: {response}") @click.group("data", short_help="Manage your data") def data_cmd() -> None: pass # TODO: maybe use more helpful `type=`s to get click to handle error reporting of # malformed inputs. def create_changelist_option() -> Callable[[FC], FC]: return click.option( "-d", "--changelist", "changelist_string", help="str representing the changelist", type=str, required=True, ) def create_key_option() -> Callable[[FC], FC]: return click.option( "-h", "--key", "key_string", help="str representing the key", type=str, required=True, ) def create_data_store_id_option() -> Callable[[FC], FC]: return click.option( "-store", "--id", help="The hexadecimal store id.", type=str, required=True, ) def create_data_store_name_option() -> Callable[[FC], FC]: return click.option( "-n", "--table_name", "table_name", help="The name of the table.", type=str, required=True, ) def create_rpc_port_option() -> Callable[[FC], FC]: return click.option( "-dp", "--data-rpc-port", help="Set the port where the data layer is hosting the RPC interface. See rpc_port under wallet in config.yaml", type=int, default=None, show_default=True, ) def create_fee_option() -> Callable[[FC], FC]: return click.option( "-m", "--fee", help="Set the fees for the transaction, in XFX", type=str, default=None, show_default=True, required=False, ) @data_cmd.command("create_data_store", short_help="Create a new data store") @create_rpc_port_option() @create_fee_option() def create_data_store( data_rpc_port: int, fee: Optional[str], ) -> None: from flax.cmds.data_funcs import create_data_store_cmd run(create_data_store_cmd(data_rpc_port, fee)) @data_cmd.command("get_value", short_help="Get the value for a given key and store") @create_data_store_id_option() @create_key_option() @click.option("-r", "--root_hash", help="The hexadecimal root hash", type=str, required=False) @create_rpc_port_option() def get_value( id: str, key_string: str, root_hash: Optional[str], data_rpc_port: int, ) -> None: from flax.cmds.data_funcs import get_value_cmd run(get_value_cmd(data_rpc_port, id, key_string, root_hash)) @data_cmd.command("update_data_store", short_help="Update a store by providing the changelist operations") @create_data_store_id_option() @create_changelist_option() @create_rpc_port_option() @create_fee_option() def update_data_store( id: str, changelist_string: str, data_rpc_port: int, fee: str, ) -> None: from flax.cmds.data_funcs import update_data_store_cmd run(update_data_store_cmd(rpc_port=data_rpc_port, store_id=id, changelist=json.loads(changelist_string), fee=fee)) @data_cmd.command("get_keys", short_help="Get all keys for a given store") @create_data_store_id_option() @click.option("-r", "--root_hash", help="The hexadecimal root hash", type=str, required=False) @create_rpc_port_option() def get_keys( id: str, root_hash: Optional[str], data_rpc_port: int, ) -> None: from flax.cmds.data_funcs import get_keys_cmd run(get_keys_cmd(data_rpc_port, id, root_hash)) @data_cmd.command("get_keys_values", short_help="Get all keys and values for a given store") @create_data_store_id_option() @click.option("-r", "--root_hash", help="The hexadecimal root hash", type=str, required=False) @create_rpc_port_option() def get_keys_values( id: str, root_hash: Optional[str], data_rpc_port: int, ) -> None: from flax.cmds.data_funcs import get_keys_values_cmd run(get_keys_values_cmd(data_rpc_port, id, root_hash)) @data_cmd.command("get_root", short_help="Get the published root hash value for a given store") @create_data_store_id_option() @create_rpc_port_option() def get_root( id: str, data_rpc_port: int, ) -> None: from flax.cmds.data_funcs import get_root_cmd run(get_root_cmd(rpc_port=data_rpc_port, store_id=id)) @data_cmd.command("subscribe", short_help="Subscribe to a store") @create_data_store_id_option() @click.option( "-u", "--url", "urls", help="Manually provide a list of servers urls for downloading the data", type=str, multiple=True, ) @create_rpc_port_option() def subscribe( id: str, urls: List[str], data_rpc_port: int, ) -> None: from flax.cmds.data_funcs import subscribe_cmd run(subscribe_cmd(rpc_port=data_rpc_port, store_id=id, urls=urls)) @data_cmd.command("remove_subscription", short_help="Remove server urls that are added via subscribing to urls") @create_data_store_id_option() @click.option("-u", "--url", "urls", help="Server urls to remove", type=str, multiple=True) @create_rpc_port_option() def remove_subscription( id: str, urls: List[str], data_rpc_port: int, ) -> None: from flax.cmds.data_funcs import remove_subscriptions_cmd run(remove_subscriptions_cmd(rpc_port=data_rpc_port, store_id=id, urls=urls)) @data_cmd.command("unsubscribe", short_help="Completely untrack a store") @create_data_store_id_option() @create_rpc_port_option() def unsubscribe( id: str, data_rpc_port: int, ) -> None: from flax.cmds.data_funcs import unsubscribe_cmd run(unsubscribe_cmd(rpc_port=data_rpc_port, store_id=id)) @data_cmd.command( "get_kv_diff", short_help="Get the inserted and deleted keys and values between an initial and a final hash" ) @create_data_store_id_option() @click.option("-hash_1", "--hash_1", help="Initial hash", type=str) @click.option("-hash_2", "--hash_2", help="Final hash", type=str) @create_rpc_port_option() def get_kv_diff( id: str, hash_1: str, hash_2: str, data_rpc_port: int, ) -> None: from flax.cmds.data_funcs import get_kv_diff_cmd run(get_kv_diff_cmd(rpc_port=data_rpc_port, store_id=id, hash_1=hash_1, hash_2=hash_2)) @data_cmd.command("get_root_history", short_help="Get all changes of a singleton") @create_data_store_id_option() @create_rpc_port_option() def get_root_history( id: str, data_rpc_port: int, ) -> None: from flax.cmds.data_funcs import get_root_history_cmd run(get_root_history_cmd(rpc_port=data_rpc_port, store_id=id)) @data_cmd.command("add_missing_files", short_help="Manually reconstruct server files from the data layer database") @click.option( "-i", "--ids", help="List of stores to reconstruct. If not specified, all stores will be reconstructed", type=str, required=False, ) @click.option( "-o/-n", "--overwrite/--no-overwrite", help="Specify if already existing files need to be overwritten by this command", ) @click.option( "-f", "--foldername", type=str, help="If specified, use a non-default folder to write the files", required=False ) @create_rpc_port_option() def add_missing_files(ids: Optional[str], overwrite: bool, foldername: Optional[str], data_rpc_port: int) -> None: from flax.cmds.data_funcs import add_missing_files_cmd run( add_missing_files_cmd( rpc_port=data_rpc_port, ids=None if ids is None else json.loads(ids), overwrite=overwrite, foldername=None if foldername is None else Path(foldername), ) ) @data_cmd.command("add_mirror", short_help="Publish mirror urls on chain") @click.option("-i", "--id", help="Store id", type=str, required=True) @click.option( "-a", "--amount", help="Amount to spend for this mirror, in mojos", type=int, default=0, show_default=True ) @click.option( "-u", "--url", "urls", help="URL to publish on the new coin, multiple accepted and will be published to a single coin.", type=str, multiple=True, ) @create_fee_option() @create_rpc_port_option() def add_mirror(id: str, amount: int, urls: List[str], fee: Optional[str], data_rpc_port: int) -> None: from flax.cmds.data_funcs import add_mirror_cmd run( add_mirror_cmd( rpc_port=data_rpc_port, store_id=id, urls=urls, amount=amount, fee=fee, ) ) @data_cmd.command("delete_mirror", short_help="Delete an owned mirror by its coin id") @click.option("-c", "--coin_id", help="Coin id", type=str, required=True) @create_fee_option() @create_rpc_port_option() def delete_mirror(coin_id: str, fee: Optional[str], data_rpc_port: int) -> None: from flax.cmds.data_funcs import delete_mirror_cmd run( delete_mirror_cmd( rpc_port=data_rpc_port, coin_id=coin_id, fee=fee, ) ) @data_cmd.command("get_mirrors", short_help="Get a list of all mirrors for a given store") @click.option("-i", "--id", help="Store id", type=str, required=True) @create_rpc_port_option() def get_mirrors(id: str, data_rpc_port: int) -> None: from flax.cmds.data_funcs import get_mirrors_cmd run( get_mirrors_cmd( rpc_port=data_rpc_port, store_id=id, ) ) @data_cmd.command("get_subscriptions", short_help="Get subscribed stores, including the owned stores") @create_rpc_port_option() def get_subscriptions(data_rpc_port: int) -> None: from flax.cmds.data_funcs import get_subscriptions_cmd run( get_subscriptions_cmd( rpc_port=data_rpc_port, ) ) @data_cmd.command("get_owned_stores", short_help="Get owned stores") @create_rpc_port_option() def get_owned_stores(data_rpc_port: int) -> None: from flax.cmds.data_funcs import get_owned_stores_cmd run( get_owned_stores_cmd( rpc_port=data_rpc_port, ) )
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/introducer/introducer_api.py
flax/introducer/introducer_api.py
from __future__ import annotations from typing import Callable, Optional from flax.introducer.introducer import Introducer from flax.protocols.introducer_protocol import RequestPeersIntroducer, RespondPeersIntroducer from flax.protocols.protocol_message_types import ProtocolMessageTypes from flax.server.outbound_message import Message, make_msg from flax.server.ws_connection import WSFlaxConnection from flax.types.peer_info import TimestampedPeerInfo from flax.util.api_decorators import api_request, peer_required from flax.util.ints import uint64 class IntroducerAPI: introducer: Introducer def __init__(self, introducer) -> None: self.introducer = introducer def _set_state_changed_callback(self, callback: Callable): pass @peer_required @api_request async def request_peers_introducer( self, request: RequestPeersIntroducer, peer: WSFlaxConnection, ) -> Optional[Message]: max_peers = self.introducer.max_peers_to_send if self.introducer.server is None or self.introducer.server.introducer_peers is None: return None rawpeers = self.introducer.server.introducer_peers.get_peers( max_peers * 5, True, self.introducer.recent_peer_threshold ) peers = [] for r_peer in rawpeers: if r_peer.vetted <= 0: continue if r_peer.host == peer.peer_host and r_peer.port == peer.peer_server_port: continue peer_without_timestamp = TimestampedPeerInfo( r_peer.host, r_peer.port, uint64(0), ) peers.append(peer_without_timestamp) if len(peers) >= max_peers: break self.introducer.log.info(f"Sending vetted {peers}") msg = make_msg(ProtocolMessageTypes.respond_peers_introducer, RespondPeersIntroducer(peers)) return msg
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/introducer/introducer.py
flax/introducer/introducer.py
import asyncio import logging import time from typing import Any, Callable, Dict, List, Optional from flax.rpc.rpc_server import default_get_connections from flax.server.outbound_message import NodeType from flax.server.server import FlaxServer from flax.server.introducer_peers import VettedPeer from flax.server.ws_connection import WSFlaxConnection from flax.util.ints import uint64 class Introducer: @property def server(self) -> FlaxServer: # This is a stop gap until the class usage is refactored such the values of # integral attributes are known at creation of the instance. if self._server is None: raise RuntimeError("server not assigned") return self._server def __init__(self, max_peers_to_send: int, recent_peer_threshold: int): self.max_peers_to_send = max_peers_to_send self.recent_peer_threshold = recent_peer_threshold self._shut_down = False self._server: Optional[FlaxServer] = None self.log = logging.getLogger(__name__) async def _start(self): self._vetting_task = asyncio.create_task(self._vetting_loop()) def _close(self): self._shut_down = True self._vetting_task.cancel() async def _await_closed(self): pass # await self._vetting_task async def on_connect(self, peer: WSFlaxConnection) -> None: pass def _set_state_changed_callback(self, callback: Callable): # TODO: fill this out? pass def get_connections(self, request_node_type: Optional[NodeType]) -> List[Dict[str, Any]]: return default_get_connections(server=self.server, request_node_type=request_node_type) def set_server(self, server: FlaxServer): self._server = server async def _vetting_loop(self): while True: if self._shut_down: return None try: for i in range(60): if self._shut_down: return None await asyncio.sleep(1) self.log.info("Vetting random peers.") if self._server.introducer_peers is None: continue raw_peers = self.server.introducer_peers.get_peers(100, True, 3 * self.recent_peer_threshold) if len(raw_peers) == 0: continue peer: VettedPeer for peer in raw_peers: if self._shut_down: return None now = time.time() # if it was too long ago we checked this peer, check it # again if peer.vetted > 0 and now > peer.vetted_timestamp + 3600: peer.vetted = 0 if peer.vetted > 0: continue # don't re-vet peers too frequently if now < peer.last_attempt + 500: continue try: peer.last_attempt = uint64(time.time()) self.log.info(f"Vetting peer {peer.host} {peer.port}") r, w = await asyncio.wait_for( asyncio.open_connection(peer.host, int(peer.port)), timeout=3, ) w.close() except Exception as e: self.log.warning(f"Could not vet {peer}, removing. {type(e)}{str(e)}") peer.vetted = min(peer.vetted - 1, -1) # if we have failed 6 times in a row, remove the peer if peer.vetted < -6: self.server.introducer_peers.remove(peer) continue self.log.info(f"Have vetted {peer} successfully!") peer.vetted_timestamp = uint64(time.time()) peer.vetted = max(peer.vetted + 1, 1) except Exception as e: self.log.error(e)
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/introducer/__init__.py
flax/introducer/__init__.py
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/benchmarks/jsonify.py
benchmarks/jsonify.py
from __future__ import annotations import random from time import perf_counter from tests.util.test_full_block_utils import get_full_blocks random.seed(123456789) def main() -> None: total_time = 0.0 counter = 0 for block in get_full_blocks(): start = perf_counter() block.to_json_dict() end = perf_counter() total_time += end - start counter += 1 print(f"total time: {total_time:0.2f}s ({counter} iterations)") if __name__ == "__main__": main()
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/benchmarks/coin_store.py
benchmarks/coin_store.py
from __future__ import annotations import asyncio import os import random import sys from pathlib import Path from time import monotonic from typing import List, Tuple from utils import rand_hash, rewards, setup_db from flax.full_node.coin_store import CoinStore from flax.types.blockchain_format.coin import Coin from flax.types.blockchain_format.sized_bytes import bytes32 from flax.util.db_wrapper import DBWrapper2 from flax.util.ints import uint32, uint64 NUM_ITERS = 200 # we need seeded random, to have reproducible benchmark runs random.seed(123456789) def make_coin() -> Coin: return Coin(rand_hash(), rand_hash(), uint64(1)) def make_coins(num: int) -> Tuple[List[Coin], List[bytes32]]: additions: List[Coin] = [] hashes: List[bytes32] = [] for i in range(num): c = make_coin() additions.append(c) hashes.append(c.name()) return additions, hashes async def run_new_block_benchmark(version: int): verbose: bool = "--verbose" in sys.argv db_wrapper: DBWrapper2 = await setup_db("coin-store-benchmark.db", version) # keep track of benchmark total time all_test_time = 0.0 try: coin_store = await CoinStore.create(db_wrapper) all_unspent: List[bytes32] = [] all_coins: List[bytes32] = [] block_height = 1 timestamp = 1631794488 print("Building database ", end="") for height in range(block_height, block_height + NUM_ITERS): # add some new coins additions, hashes = make_coins(2000) # farm rewards farmer_coin, pool_coin = rewards(uint32(height)) all_coins += hashes all_unspent += hashes all_unspent += [pool_coin.name(), farmer_coin.name()] # remove some coins we've added previously random.shuffle(all_unspent) removals = all_unspent[:100] all_unspent = all_unspent[100:] await coin_store.new_block( uint32(height), uint64(timestamp), {pool_coin, farmer_coin}, additions, removals, ) # 19 seconds per block timestamp += 19 if verbose: print(".", end="") sys.stdout.flush() block_height += NUM_ITERS total_time = 0.0 total_add = 0.0 total_remove = 0.0 print("") if verbose: print("Profiling mostly additions ", end="") for height in range(block_height, block_height + NUM_ITERS): # add some new coins additions, hashes = make_coins(2000) total_add += 2000 farmer_coin, pool_coin = rewards(uint32(height)) all_coins += hashes all_unspent += hashes all_unspent += [pool_coin.name(), farmer_coin.name()] total_add += 2 # remove some coins we've added previously random.shuffle(all_unspent) removals = all_unspent[:100] all_unspent = all_unspent[100:] total_remove += 100 start = monotonic() await coin_store.new_block( uint32(height), uint64(timestamp), {pool_coin, farmer_coin}, additions, removals, ) stop = monotonic() # 19 seconds per block timestamp += 19 total_time += stop - start if verbose: print(".", end="") sys.stdout.flush() block_height += NUM_ITERS if verbose: print("") print(f"{total_time:0.4f}s, MOSTLY ADDITIONS additions: {total_add} removals: {total_remove}") all_test_time += total_time if verbose: print("Profiling mostly removals ", end="") total_add = 0 total_remove = 0 total_time = 0 for height in range(block_height, block_height + NUM_ITERS): additions = [] # add one new coins c = make_coin() additions.append(c) total_add += 1 farmer_coin, pool_coin = rewards(uint32(height)) all_coins += [c.name()] all_unspent += [c.name()] all_unspent += [pool_coin.name(), farmer_coin.name()] total_add += 2 # remove some coins we've added previously random.shuffle(all_unspent) removals = all_unspent[:700] all_unspent = all_unspent[700:] total_remove += 700 start = monotonic() await coin_store.new_block( uint32(height), uint64(timestamp), {pool_coin, farmer_coin}, additions, removals, ) stop = monotonic() # 19 seconds per block timestamp += 19 total_time += stop - start if verbose: print(".", end="") sys.stdout.flush() block_height += NUM_ITERS if verbose: print("") print(f"{total_time:0.4f}s, MOSTLY REMOVALS additions: {total_add} removals: {total_remove}") all_test_time += total_time if verbose: print("Profiling full block transactions", end="") total_add = 0 total_remove = 0 total_time = 0 for height in range(block_height, block_height + NUM_ITERS): # add some new coins additions, hashes = make_coins(2000) total_add += 2000 farmer_coin, pool_coin = rewards(uint32(height)) all_coins += hashes all_unspent += hashes all_unspent += [pool_coin.name(), farmer_coin.name()] total_add += 2 # remove some coins we've added previously random.shuffle(all_unspent) removals = all_unspent[:2000] all_unspent = all_unspent[2000:] total_remove += 2000 start = monotonic() await coin_store.new_block( uint32(height), uint64(timestamp), {pool_coin, farmer_coin}, additions, removals, ) stop = monotonic() # 19 seconds per block timestamp += 19 total_time += stop - start if verbose: print(".", end="") sys.stdout.flush() block_height += NUM_ITERS if verbose: print("") print(f"{total_time:0.4f}s, FULLBLOCKS additions: {total_add} removals: {total_remove}") all_test_time += total_time if verbose: print("profiling get_coin_records_by_names, include_spent ", end="") total_time = 0 found_coins = 0 for i in range(NUM_ITERS): lookup = random.sample(all_coins, 200) start = monotonic() records = await coin_store.get_coin_records_by_names(True, lookup) total_time += monotonic() - start assert len(records) == 200 found_coins += len(records) if verbose: print(".", end="") sys.stdout.flush() if verbose: print("") print( f"{total_time:0.4f}s, GET RECORDS BY NAMES with spent {NUM_ITERS} " f"lookups found {found_coins} coins in total" ) all_test_time += total_time if verbose: print("profiling get_coin_records_by_names, without spent coins ", end="") total_time = 0 found_coins = 0 for i in range(NUM_ITERS): lookup = random.sample(all_coins, 200) start = monotonic() records = await coin_store.get_coin_records_by_names(False, lookup) total_time += monotonic() - start assert len(records) <= 200 found_coins += len(records) if verbose: print(".", end="") sys.stdout.flush() if verbose: print("") print( f"{total_time:0.4f}s, GET RECORDS BY NAMES without spent {NUM_ITERS} " f"lookups found {found_coins} coins in total" ) all_test_time += total_time if verbose: print("profiling get_coin_removed_at_height ", end="") total_time = 0 found_coins = 0 for i in range(1, block_height): start = monotonic() records = await coin_store.get_coins_removed_at_height(uint32(i)) total_time += monotonic() - start found_coins += len(records) if verbose: print(".", end="") sys.stdout.flush() if verbose: print("") print( f"{total_time:0.4f}s, GET COINS REMOVED AT HEIGHT {block_height-1} blocks, " f"found {found_coins} coins in total" ) all_test_time += total_time print(f"all tests completed in {all_test_time:0.4f}s") finally: await db_wrapper.close() db_size = os.path.getsize(Path("coin-store-benchmark.db")) print(f"database size: {db_size/1000000:.3f} MB") if __name__ == "__main__": print("version 1") asyncio.run(run_new_block_benchmark(1)) print("version 2") asyncio.run(run_new_block_benchmark(2))
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/benchmarks/utils.py
benchmarks/utils.py
from __future__ import annotations import os import random import subprocess import sys from datetime import datetime from pathlib import Path from typing import Tuple, Union import aiosqlite import click from blspy import AugSchemeMPL, G1Element, G2Element from flax.consensus.coinbase import create_farmer_coin, create_pool_coin from flax.consensus.default_constants import DEFAULT_CONSTANTS from flax.types.blockchain_format.classgroup import ClassgroupElement from flax.types.blockchain_format.coin import Coin from flax.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo from flax.types.blockchain_format.pool_target import PoolTarget from flax.types.blockchain_format.program import SerializedProgram from flax.types.blockchain_format.proof_of_space import ProofOfSpace from flax.types.blockchain_format.reward_chain_block import RewardChainBlock from flax.types.blockchain_format.sized_bytes import bytes32, bytes100 from flax.types.blockchain_format.vdf import VDFInfo, VDFProof from flax.types.full_block import FullBlock from flax.util.db_wrapper import DBWrapper2 from flax.util.ints import uint8, uint32, uint64, uint128 # farmer puzzle hash ph = bytes32(b"a" * 32) with open(Path(os.path.realpath(__file__)).parent / "clvm_generator.bin", "rb") as f: clvm_generator = f.read() # Workaround to allow `Enum` with click.Choice: https://github.com/pallets/click/issues/605#issuecomment-901099036 class EnumType(click.Choice): def __init__(self, enum, case_sensitive=False): self.__enum = enum super().__init__(choices=[item.value for item in enum], case_sensitive=case_sensitive) def convert(self, value, param, ctx): converted_str = super().convert(value, param, ctx) return self.__enum(converted_str) def rewards(height: uint32) -> Tuple[Coin, Coin]: farmer_coin = create_farmer_coin(height, ph, uint64(250000000), DEFAULT_CONSTANTS.GENESIS_CHALLENGE) pool_coin = create_pool_coin(height, ph, uint64(1750000000), DEFAULT_CONSTANTS.GENESIS_CHALLENGE) return farmer_coin, pool_coin def rand_bytes(num) -> bytes: ret = bytearray(num) for i in range(num): ret[i] = random.getrandbits(8) return bytes(ret) def rand_hash() -> bytes32: return bytes32(rand_bytes(32)) def rand_g1() -> G1Element: sk = AugSchemeMPL.key_gen(rand_bytes(96)) return sk.get_g1() def rand_g2() -> G2Element: sk = AugSchemeMPL.key_gen(rand_bytes(96)) return AugSchemeMPL.sign(sk, b"foobar") def rand_class_group_element() -> ClassgroupElement: return ClassgroupElement(bytes100(rand_bytes(100))) def rand_vdf() -> VDFInfo: return VDFInfo(rand_hash(), uint64(random.randint(100000, 1000000000)), rand_class_group_element()) def rand_vdf_proof() -> VDFProof: return VDFProof( uint8(1), # witness_type rand_hash(), # witness bool(random.randint(0, 1)), # normalized_to_identity ) def rand_full_block() -> FullBlock: proof_of_space = ProofOfSpace( rand_hash(), rand_g1(), None, rand_g1(), uint8(0), rand_bytes(8 * 32), ) reward_chain_block = RewardChainBlock( uint128(1), uint32(2), uint128(3), uint8(4), rand_hash(), proof_of_space, None, rand_g2(), rand_vdf(), None, rand_g2(), rand_vdf(), rand_vdf(), True, ) pool_target = PoolTarget( rand_hash(), uint32(0), ) foliage_block_data = FoliageBlockData( rand_hash(), pool_target, rand_g2(), rand_hash(), rand_hash(), ) foliage = Foliage( rand_hash(), rand_hash(), foliage_block_data, rand_g2(), rand_hash(), rand_g2(), ) foliage_transaction_block = FoliageTransactionBlock( rand_hash(), uint64(0), rand_hash(), rand_hash(), rand_hash(), rand_hash(), ) farmer_coin, pool_coin = rewards(uint32(0)) transactions_info = TransactionsInfo( rand_hash(), rand_hash(), rand_g2(), uint64(0), uint64(1), [farmer_coin, pool_coin], ) full_block = FullBlock( [], reward_chain_block, rand_vdf_proof(), rand_vdf_proof(), rand_vdf_proof(), rand_vdf_proof(), rand_vdf_proof(), foliage, foliage_transaction_block, transactions_info, SerializedProgram.from_bytes(clvm_generator), [], ) return full_block async def setup_db(name: Union[str, os.PathLike], db_version: int) -> DBWrapper2: db_filename = Path(name) try: os.unlink(db_filename) except FileNotFoundError: pass connection = await aiosqlite.connect(db_filename) def sql_trace_callback(req: str): sql_log_path = "sql.log" timestamp = datetime.now().strftime("%H:%M:%S.%f") log = open(sql_log_path, "a") log.write(timestamp + " " + req + "\n") log.close() if "--sql-logging" in sys.argv: await connection.set_trace_callback(sql_trace_callback) await connection.execute("pragma journal_mode=wal") await connection.execute("pragma synchronous=full") ret = DBWrapper2(connection, db_version) await ret.add_connection(await aiosqlite.connect(db_filename)) return ret def get_commit_hash() -> str: try: os.chdir(Path(os.path.realpath(__file__)).parent) commit_hash = ( subprocess.run(["git", "rev-parse", "--short", "HEAD"], check=True, stdout=subprocess.PIPE) .stdout.decode("utf-8") .strip() ) except Exception: sys.exit("Failed to get the commit hash") try: if len(subprocess.run(["git", "status", "-s"], check=True, stdout=subprocess.PIPE).stdout) > 0: raise Exception() except Exception: commit_hash += "-dirty" return commit_hash
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/benchmarks/block_store.py
benchmarks/block_store.py
from __future__ import annotations import asyncio import os import random import sys from pathlib import Path from time import monotonic from utils import ( rand_bytes, rand_class_group_element, rand_g1, rand_g2, rand_hash, rand_vdf, rand_vdf_proof, rewards, setup_db, ) from benchmarks.utils import clvm_generator from flax.consensus.block_record import BlockRecord from flax.full_node.block_store import BlockStore from flax.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo from flax.types.blockchain_format.pool_target import PoolTarget from flax.types.blockchain_format.program import SerializedProgram from flax.types.blockchain_format.proof_of_space import ProofOfSpace from flax.types.blockchain_format.reward_chain_block import RewardChainBlock from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary from flax.types.full_block import FullBlock from flax.util.db_wrapper import DBWrapper2 from flax.util.ints import uint8, uint32, uint64, uint128 NUM_ITERS = 20000 # we need seeded random, to have reproducible benchmark runs random.seed(123456789) async def run_add_block_benchmark(version: int): verbose: bool = "--verbose" in sys.argv db_wrapper: DBWrapper2 = await setup_db("block-store-benchmark.db", version) # keep track of benchmark total time all_test_time = 0.0 prev_block = bytes32([0] * 32) prev_ses_hash = bytes32([0] * 32) header_hashes = [] try: block_store = await BlockStore.create(db_wrapper) block_height = 1 timestamp = uint64(1631794488) weight = uint128(10) iters = uint128(123456) sp_index = uint8(0) deficit = uint8(0) sub_slot_iters = uint64(10) required_iters = uint64(100) transaction_block_counter = 0 prev_transaction_block = bytes32([0] * 32) prev_transaction_height = uint32(0) total_time = 0.0 ses_counter = 0 if verbose: print("profiling add_full_block", end="") for height in range(block_height, block_height + NUM_ITERS): is_transaction = transaction_block_counter == 0 fees = uint64(random.randint(0, 150000)) farmer_coin, pool_coin = rewards(uint32(height)) reward_claims_incorporated = [farmer_coin, pool_coin] # TODO: increase fidelity by setting these as well finished_challenge_slot_hashes = None finished_infused_challenge_slot_hashes = None finished_reward_slot_hashes = None sub_epoch_summary_included = None if ses_counter == 0: sub_epoch_summary_included = SubEpochSummary( prev_ses_hash, rand_hash(), uint8(random.randint(0, 255)), # num_blocks_overflow: uint8 None, # new_difficulty: Optional[uint64] None, # new_sub_slot_iters: Optional[uint64] ) has_pool_pk = random.randint(0, 1) proof_of_space = ProofOfSpace( rand_hash(), # challenge rand_g1() if has_pool_pk else None, rand_hash() if not has_pool_pk else None, rand_g1(), # plot_public_key uint8(32), rand_bytes(8 * 32), ) reward_chain_block = RewardChainBlock( weight, uint32(height), iters, sp_index, rand_hash(), # pos_ss_cc_challenge_hash proof_of_space, None if sp_index == 0 else rand_vdf(), rand_g2(), # challenge_chain_sp_signature rand_vdf(), # challenge_chain_ip_vdf rand_vdf() if sp_index != 0 else None, # reward_chain_sp_vdf rand_g2(), # reward_chain_sp_signature rand_vdf(), # reward_chain_ip_vdf rand_vdf() if deficit < 16 else None, is_transaction, ) pool_target = PoolTarget( rand_hash(), # puzzle_hash uint32(0), # max_height ) foliage_block_data = FoliageBlockData( rand_hash(), # unfinished_reward_block_hash pool_target, rand_g2() if has_pool_pk else None, # pool_signature rand_hash(), # farmer_reward_puzzle_hash bytes32([0] * 32), # extension_data ) foliage = Foliage( prev_block, rand_hash(), # reward_block_hash foliage_block_data, rand_g2(), # foliage_block_data_signature rand_hash() if is_transaction else None, # foliage_transaction_block_hash rand_g2() if is_transaction else None, # foliage_transaction_block_signature ) foliage_transaction_block = ( None if not is_transaction else FoliageTransactionBlock( prev_transaction_block, timestamp, rand_hash(), # filter_hash rand_hash(), # additions_root rand_hash(), # removals_root rand_hash(), # transactions_info_hash ) ) transactions_info = ( None if not is_transaction else TransactionsInfo( rand_hash(), # generator_root rand_hash(), # generator_refs_root rand_g2(), # aggregated_signature fees, uint64(random.randint(0, 12000000000)), # cost reward_claims_incorporated, ) ) full_block = FullBlock( [], # finished_sub_slots reward_chain_block, rand_vdf_proof() if sp_index > 0 else None, # challenge_chain_sp_proof rand_vdf_proof(), # challenge_chain_ip_proof rand_vdf_proof() if sp_index > 0 else None, # reward_chain_sp_proof rand_vdf_proof(), # reward_chain_ip_proof rand_vdf_proof() if deficit < 4 else None, # infused_challenge_chain_ip_proof foliage, foliage_transaction_block, transactions_info, None if is_transaction else SerializedProgram.from_bytes(clvm_generator), # transactions_generator [], # transactions_generator_ref_list ) header_hash = full_block.header_hash record = BlockRecord( header_hash, prev_block, uint32(height), weight, iters, sp_index, rand_class_group_element(), None if deficit > 3 else rand_class_group_element(), rand_hash(), # reward_infusion_new_challenge rand_hash(), # challenge_block_info_hash sub_slot_iters, rand_hash(), # pool_puzzle_hash rand_hash(), # farmer_puzzle_hash required_iters, deficit, deficit == 16, prev_transaction_height, timestamp if is_transaction else None, prev_transaction_block if prev_transaction_block != bytes32([0] * 32) else None, None if fees == 0 else fees, reward_claims_incorporated, finished_challenge_slot_hashes, finished_infused_challenge_slot_hashes, finished_reward_slot_hashes, sub_epoch_summary_included, ) start = monotonic() await block_store.add_full_block(header_hash, full_block, record) await block_store.set_in_chain([(header_hash,)]) header_hashes.append(header_hash) await block_store.set_peak(header_hash) stop = monotonic() total_time += stop - start # 19 seconds per block timestamp = uint64(timestamp + 19) weight = uint128(weight + 10) iters = uint128(iters + 123456) sp_index = uint8((sp_index + 1) % 64) deficit = uint8((deficit + 3) % 17) ses_counter = (ses_counter + 1) % 384 prev_block = header_hash # every 33 blocks is a transaction block transaction_block_counter = (transaction_block_counter + 1) % 33 if is_transaction: prev_transaction_block = header_hash prev_transaction_height = uint32(height) if ses_counter == 0: prev_ses_hash = header_hash if verbose: print(".", end="") sys.stdout.flush() block_height += NUM_ITERS if verbose: print("") print(f"{total_time:0.4f}s, add_full_block") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_full_block") random.shuffle(header_hashes) start = monotonic() for h in header_hashes: block = await block_store.get_full_block(h) assert block is not None assert block.header_hash == h stop = monotonic() total_time += stop - start print(f"{total_time:0.4f}s, get_full_block") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_full_block_bytes") start = monotonic() for h in header_hashes: block_bs = await block_store.get_full_block_bytes(h) assert block_bs is not None assert len(block_bs) > 0 stop = monotonic() total_time += stop - start print(f"{total_time:0.4f}s, get_full_block_bytes") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_full_blocks_at") start = monotonic() for hi in range(1, block_height): blocks = await block_store.get_full_blocks_at([uint32(hi)]) assert len(blocks) == 1 assert blocks[0].height == hi stop = monotonic() total_time += stop - start print(f"{total_time:0.4f}s, get_full_blocks_at") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_block_records_by_hash") start = monotonic() for h in header_hashes: block_recs = await block_store.get_block_records_by_hash([h]) assert len(block_recs) == 1 assert block_recs[0].header_hash == h stop = monotonic() total_time += stop - start print(f"{total_time:0.4f}s, get_block_records_by_hash") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_blocks_by_hash") start = monotonic() for h in header_hashes: blocks = await block_store.get_blocks_by_hash([h]) assert len(blocks) == 1 assert blocks[0].header_hash == h stop = monotonic() total_time += stop - start print(f"{total_time:0.4f}s, get_blocks_by_hash") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_block_record") start = monotonic() for h in header_hashes: block_rec = await block_store.get_block_record(h) assert block_rec is not None assert block_rec.header_hash == h stop = monotonic() total_time += stop - start print(f"{total_time:0.4f}s, get_block_record") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_block_records_in_range") start = monotonic() for i in range(100): hi = random.randint(1, block_height - 100) blocks_dict = await block_store.get_block_records_in_range(hi, hi + 99) assert len(blocks_dict) == 100 stop = monotonic() total_time += stop - start print(f"{total_time:0.4f}s, get_block_records_in_range") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_block_records_close_to_peak") start = monotonic() block_dict, peak_h = await block_store.get_block_records_close_to_peak(99) assert len(block_dict) == 100 stop = monotonic() total_time += stop - start print(f"{total_time:0.4f}s, get_block_records_close_to_peak") all_test_time += total_time total_time = 0.0 if verbose: print("profiling is_fully_compactified") start = monotonic() for h in header_hashes: compactified = await block_store.is_fully_compactified(h) assert compactified is False stop = monotonic() total_time += stop - start print(f"{total_time:0.4f}s, get_block_record") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_random_not_compactified") start = monotonic() for i in range(1, 5000): blocks_int_list = await block_store.get_random_not_compactified(100) assert len(blocks_int_list) == 100 stop = monotonic() total_time += stop - start print(f"{total_time:0.4f}s, get_random_not_compactified") all_test_time += total_time print(f"all tests completed in {all_test_time:0.4f}s") db_size = os.path.getsize(Path("block-store-benchmark.db")) print(f"database size: {db_size/1000000:.3f} MB") finally: await db_wrapper.close() if __name__ == "__main__": print("version 1") asyncio.run(run_add_block_benchmark(1)) print("version 2") asyncio.run(run_add_block_benchmark(2))
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/benchmarks/streamable.py
benchmarks/streamable.py
from __future__ import annotations import json import sys from dataclasses import dataclass from enum import Enum from statistics import stdev from time import process_time as clock from typing import Any, Callable, Dict, List, Optional, TextIO, Tuple, Type, Union import click from utils import EnumType, get_commit_hash, rand_bytes, rand_full_block, rand_hash from flax.types.blockchain_format.sized_bytes import bytes32 from flax.types.full_block import FullBlock from flax.util.ints import uint8, uint64 from flax.util.streamable import Streamable, streamable _version = 1 @streamable @dataclass(frozen=True) class BenchmarkInner(Streamable): a: str @streamable @dataclass(frozen=True) class BenchmarkMiddle(Streamable): a: uint64 b: List[bytes32] c: Tuple[str, bool, uint8, List[bytes]] d: Tuple[BenchmarkInner, BenchmarkInner] e: BenchmarkInner @streamable @dataclass(frozen=True) class BenchmarkClass(Streamable): a: Optional[BenchmarkMiddle] b: Optional[BenchmarkMiddle] c: BenchmarkMiddle d: List[BenchmarkMiddle] e: Tuple[BenchmarkMiddle, BenchmarkMiddle, BenchmarkMiddle] def get_random_inner() -> BenchmarkInner: return BenchmarkInner(rand_bytes(20).hex()) def get_random_middle() -> BenchmarkMiddle: a: uint64 = uint64(10) b: List[bytes32] = [rand_hash() for _ in range(a)] c: Tuple[str, bool, uint8, List[bytes]] = ("benchmark", False, uint8(1), [rand_bytes(a) for _ in range(a)]) d: Tuple[BenchmarkInner, BenchmarkInner] = (get_random_inner(), get_random_inner()) e: BenchmarkInner = get_random_inner() return BenchmarkMiddle(a, b, c, d, e) def get_random_benchmark_object() -> BenchmarkClass: a: Optional[BenchmarkMiddle] = None b: Optional[BenchmarkMiddle] = get_random_middle() c: BenchmarkMiddle = get_random_middle() d: List[BenchmarkMiddle] = [get_random_middle() for _ in range(5)] e: Tuple[BenchmarkMiddle, BenchmarkMiddle, BenchmarkMiddle] = ( get_random_middle(), get_random_middle(), get_random_middle(), ) return BenchmarkClass(a, b, c, d, e) def print_row( *, mode: str, us_per_iteration: Union[str, float], stdev_us_per_iteration: Union[str, float], avg_iterations: Union[str, int], stdev_iterations: Union[str, float], end: str = "\n", ) -> None: mode = "{0:<10}".format(f"{mode}") us_per_iteration = "{0:<12}".format(f"{us_per_iteration}") stdev_us_per_iteration = "{0:>20}".format(f"{stdev_us_per_iteration}") avg_iterations = "{0:>18}".format(f"{avg_iterations}") stdev_iterations = "{0:>22}".format(f"{stdev_iterations}") print(f"{mode} | {us_per_iteration} | {stdev_us_per_iteration} | {avg_iterations} | {stdev_iterations}", end=end) @dataclass class BenchmarkResults: us_per_iteration: float stdev_us_per_iteration: float avg_iterations: int stdev_iterations: float def print_results(mode: str, bench_result: BenchmarkResults, final: bool) -> None: print_row( mode=mode, us_per_iteration=bench_result.us_per_iteration, stdev_us_per_iteration=bench_result.stdev_us_per_iteration, avg_iterations=bench_result.avg_iterations, stdev_iterations=bench_result.stdev_iterations, end="\n" if final else "\r", ) # The strings in this Enum are by purpose. See benchmark.utils.EnumType. class Data(str, Enum): all = "all" benchmark = "benchmark" full_block = "full_block" # The strings in this Enum are by purpose. See benchmark.utils.EnumType. class Mode(str, Enum): all = "all" creation = "creation" to_bytes = "to_bytes" from_bytes = "from_bytes" to_json = "to_json" from_json = "from_json" def to_bytes(obj: Any) -> bytes: return bytes(obj) @dataclass class ModeParameter: conversion_cb: Callable[[Any], Any] preparation_cb: Optional[Callable[[Any], Any]] = None @dataclass class BenchmarkParameter: data_class: Type[Any] object_creation_cb: Callable[[], Any] mode_parameter: Dict[Mode, Optional[ModeParameter]] benchmark_parameter: Dict[Data, BenchmarkParameter] = { Data.benchmark: BenchmarkParameter( BenchmarkClass, get_random_benchmark_object, { Mode.creation: None, Mode.to_bytes: ModeParameter(to_bytes), Mode.from_bytes: ModeParameter(BenchmarkClass.from_bytes, to_bytes), Mode.to_json: ModeParameter(BenchmarkClass.to_json_dict), Mode.from_json: ModeParameter(BenchmarkClass.from_json_dict, BenchmarkClass.to_json_dict), }, ), Data.full_block: BenchmarkParameter( FullBlock, rand_full_block, { Mode.creation: None, Mode.to_bytes: ModeParameter(to_bytes), Mode.from_bytes: ModeParameter(FullBlock.from_bytes, to_bytes), Mode.to_json: ModeParameter(FullBlock.to_json_dict), Mode.from_json: ModeParameter(FullBlock.from_json_dict, FullBlock.to_json_dict), }, ), } def run_for_ms(cb: Callable[[], Any], ms_to_run: int = 100) -> List[int]: us_iteration_results: List[int] = [] start = clock() while int((clock() - start) * 1000) < ms_to_run: start_iteration = clock() cb() stop_iteration = clock() us_iteration_results.append(int((stop_iteration - start_iteration) * 1000 * 1000)) return us_iteration_results def calc_stdev_percent(iterations: List[int], avg: float) -> float: deviation = 0 if len(iterations) < 2 else int(stdev(iterations) * 100) / 100 return int((deviation / avg * 100) * 100) / 100 def pop_data(key: str, *, old: Dict[str, Any], new: Dict[str, Any]) -> Tuple[Any, Any]: if key not in old: sys.exit(f"{key} missing in old") if key not in new: sys.exit(f"{key} missing in new") return old.pop(key), new.pop(key) def print_compare_row(c0: str, c1: Union[str, float], c2: Union[str, float], c3: Union[str, float]) -> None: c0 = "{0:<12}".format(f"{c0}") c1 = "{0:<16}".format(f"{c1}") c2 = "{0:<16}".format(f"{c2}") c3 = "{0:<12}".format(f"{c3}") print(f"{c0} | {c1} | {c2} | {c3}") def compare_results( old: Dict[str, Dict[str, Dict[str, Union[float, int]]]], new: Dict[str, Dict[str, Dict[str, Union[float, int]]]] ) -> None: old_version, new_version = pop_data("version", old=old, new=new) if old_version != new_version: sys.exit(f"version missmatch: old: {old_version} vs new: {new_version}") old_commit_hash, new_commit_hash = pop_data("commit_hash", old=old, new=new) for data, modes in new.items(): if data not in old: continue print(f"\ncompare: {data}, old: {old_commit_hash}, new: {new_commit_hash}") print_compare_row("mode", "µs/iteration old", "µs/iteration new", "diff %") for mode, results in modes.items(): if mode not in old[data]: continue old_us, new_us = pop_data("us_per_iteration", old=old[data][mode], new=results) print_compare_row(mode, old_us, new_us, int((new_us - old_us) / old_us * 10000) / 100) @click.command() @click.option("-d", "--data", default=Data.all, type=EnumType(Data)) @click.option("-m", "--mode", default=Mode.all, type=EnumType(Mode)) @click.option("-r", "--runs", default=100, help="Number of benchmark runs to average results") @click.option("-t", "--ms", default=50, help="Milliseconds per run") @click.option("--live/--no-live", default=False, help="Print live results (slower)") @click.option("-o", "--output", type=click.File("w"), help="Write the results to a file") @click.option("-c", "--compare", type=click.File("r"), help="Compare to the results from a file") def run(data: Data, mode: Mode, runs: int, ms: int, live: bool, output: TextIO, compare: TextIO) -> None: results: Dict[Data, Dict[Mode, List[List[int]]]] = {} bench_results: Dict[str, Any] = {"version": _version, "commit_hash": get_commit_hash()} for current_data, parameter in benchmark_parameter.items(): if data == Data.all or current_data == data: results[current_data] = {} bench_results[current_data] = {} print( f"\nbenchmarks: {mode.name}, data: {parameter.data_class.__name__} runs: {runs}, ms/run: {ms}, " f"commit_hash: {bench_results['commit_hash']}" ) print_row( mode="mode", us_per_iteration="µs/iteration", stdev_us_per_iteration="stdev µs/iteration %", avg_iterations="avg iterations/run", stdev_iterations="stdev iterations/run %", ) for current_mode, current_mode_parameter in parameter.mode_parameter.items(): results[current_data][current_mode] = [] if mode == Mode.all or current_mode == mode: us_iteration_results: List[int] all_results: List[List[int]] = results[current_data][current_mode] obj = parameter.object_creation_cb() def get_bench_results() -> BenchmarkResults: all_runtimes: List[int] = [x for inner in all_results for x in inner] total_iterations: int = len(all_runtimes) total_elapsed_us: int = sum(all_runtimes) avg_iterations: float = total_iterations / len(all_results) stdev_iterations: float = calc_stdev_percent([len(x) for x in all_results], avg_iterations) us_per_iteration: float = total_elapsed_us / total_iterations stdev_us_per_iteration: float = calc_stdev_percent( all_runtimes, total_elapsed_us / total_iterations ) return BenchmarkResults( int(us_per_iteration * 100) / 100, stdev_us_per_iteration, int(avg_iterations), stdev_iterations, ) current_run: int = 0 while current_run < runs: current_run += 1 if current_mode == Mode.creation: cls = type(obj) us_iteration_results = run_for_ms(lambda: cls(**obj.__dict__), ms) else: assert current_mode_parameter is not None conversion_cb = current_mode_parameter.conversion_cb assert conversion_cb is not None prepared_obj = parameter.object_creation_cb() if current_mode_parameter.preparation_cb is not None: prepared_obj = current_mode_parameter.preparation_cb(obj) us_iteration_results = run_for_ms(lambda: conversion_cb(prepared_obj), ms) all_results.append(us_iteration_results) if live: print_results(current_mode.name, get_bench_results(), False) assert current_run == runs bench_result = get_bench_results() bench_results[current_data][current_mode] = bench_result.__dict__ print_results(current_mode.name, bench_result, True) json_output = json.dumps(bench_results) if output: output.write(json_output) if compare: compare_results(json.load(compare), json.loads(json_output)) if __name__ == "__main__": run() # pylint: disable = no-value-for-parameter
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/benchmarks/block_ref.py
benchmarks/block_ref.py
from __future__ import annotations import asyncio import os import random from dataclasses import dataclass from pathlib import Path from time import monotonic from typing import List, Optional import aiosqlite import click from flax.consensus.blockchain import Blockchain from flax.consensus.default_constants import DEFAULT_CONSTANTS from flax.full_node.block_store import BlockStore from flax.full_node.coin_store import CoinStore from flax.types.blockchain_format.program import SerializedProgram from flax.types.blockchain_format.sized_bytes import bytes32 from flax.util.db_version import lookup_db_version from flax.util.db_wrapper import DBWrapper2 from flax.util.ints import uint32 # the first transaction block. Each byte in transaction_height_delta is the # number of blocks to skip forward to get to the next transaction block transaction_block_heights = [] last = 225698 file_path = os.path.realpath(__file__) for delta in open(Path(file_path).parent / "transaction_height_delta", "rb").read(): new = last + delta transaction_block_heights.append(new) last = new @dataclass(frozen=True) class BlockInfo: prev_header_hash: bytes32 transactions_generator: Optional[SerializedProgram] transactions_generator_ref_list: List[uint32] def random_refs() -> List[uint32]: ret = random.sample(transaction_block_heights, DEFAULT_CONSTANTS.MAX_GENERATOR_REF_LIST_SIZE) random.shuffle(ret) return [uint32(i) for i in ret] REPETITIONS = 100 async def main(db_path: Path): random.seed(0x213FB154) async with aiosqlite.connect(db_path) as connection: await connection.execute("pragma journal_mode=wal") await connection.execute("pragma synchronous=FULL") await connection.execute("pragma query_only=ON") db_version: int = await lookup_db_version(connection) db_wrapper = DBWrapper2(connection, db_version=db_version) await db_wrapper.add_connection(await aiosqlite.connect(db_path)) block_store = await BlockStore.create(db_wrapper) coin_store = await CoinStore.create(db_wrapper) start_time = monotonic() # make configurable reserved_cores = 4 blockchain = await Blockchain.create(coin_store, block_store, DEFAULT_CONSTANTS, db_path.parent, reserved_cores) peak = blockchain.get_peak() assert peak is not None timing = 0.0 for i in range(REPETITIONS): block = BlockInfo( peak.header_hash, SerializedProgram.from_bytes(bytes.fromhex("80")), random_refs(), ) start_time = monotonic() gen = await blockchain.get_block_generator(block) one_call = monotonic() - start_time timing += one_call assert gen is not None print(f"get_block_generator(): {timing/REPETITIONS:0.3f}s") blockchain.shut_down() @click.command() @click.argument("db-path", type=click.Path()) def entry_point(db_path: Path): asyncio.run(main(Path(db_path))) if __name__ == "__main__": # pylint: disable = no-value-for-parameter entry_point()
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/benchmarks/mempool.py
benchmarks/mempool.py
from __future__ import annotations import asyncio import cProfile from contextlib import contextmanager from subprocess import check_call from time import monotonic from typing import Iterator, List from utils import setup_db from flax.consensus.block_record import BlockRecord from flax.consensus.coinbase import create_farmer_coin, create_pool_coin from flax.consensus.default_constants import DEFAULT_CONSTANTS from flax.full_node.coin_store import CoinStore from flax.full_node.mempool_manager import MempoolManager from flax.simulator.wallet_tools import WalletTool from flax.types.blockchain_format.classgroup import ClassgroupElement from flax.types.blockchain_format.coin import Coin from flax.types.blockchain_format.sized_bytes import bytes32, bytes100 from flax.types.mempool_inclusion_status import MempoolInclusionStatus from flax.types.spend_bundle import SpendBundle from flax.util.db_wrapper import DBWrapper2 from flax.util.ints import uint8, uint32, uint64, uint128 NUM_ITERS = 100 NUM_PEERS = 5 @contextmanager def enable_profiler(profile: bool, name: str) -> Iterator[None]: if not profile: yield return with cProfile.Profile() as pr: yield pr.create_stats() output_file = f"mempool-{name}" pr.dump_stats(output_file + ".profile") check_call(["gprof2dot", "-f", "pstats", "-o", output_file + ".dot", output_file + ".profile"]) with open(output_file + ".png", "w+") as f: check_call(["dot", "-T", "png", output_file + ".dot"], stdout=f) print("output written to: %s.png" % output_file) def fake_block_record(block_height: uint32, timestamp: uint64) -> BlockRecord: return BlockRecord( bytes32(b"a" * 32), # header_hash bytes32(b"b" * 32), # prev_hash block_height, # height uint128(0), # weight uint128(0), # total_iters uint8(0), # signage_point_index ClassgroupElement(bytes100(b"1" * 100)), # challenge_vdf_output None, # infused_challenge_vdf_output bytes32(b"f" * 32), # reward_infusion_new_challenge bytes32(b"c" * 32), # challenge_block_info_hash uint64(0), # sub_slot_iters bytes32(b"d" * 32), # pool_puzzle_hash bytes32(b"e" * 32), # farmer_puzzle_hash uint64(0), # required_iters uint8(0), # deficit False, # overflow uint32(block_height - 1), # prev_transaction_block_height timestamp, # timestamp None, # prev_transaction_block_hash uint64(0), # fees None, # reward_claims_incorporated None, # finished_challenge_slot_hashes None, # finished_infused_challenge_slot_hashes None, # finished_reward_slot_hashes None, # sub_epoch_summary_included ) async def run_mempool_benchmark(single_threaded: bool) -> None: suffix = "st" if single_threaded else "mt" db_wrapper: DBWrapper2 = await setup_db(f"mempool-benchmark-coins-{suffix}.db", 2) try: coin_store = await CoinStore.create(db_wrapper) mempool = MempoolManager(coin_store, DEFAULT_CONSTANTS, single_threaded=single_threaded) wt = WalletTool(DEFAULT_CONSTANTS) spend_bundles: List[List[SpendBundle]] = [] timestamp = uint64(1631794488) height = uint32(1) print("Building SpendBundles") for peer in range(NUM_PEERS): print(f" peer {peer}") print(" reward coins") unspent: List[Coin] = [] for idx in range(NUM_ITERS): height = uint32(height + 1) # farm rewards farmer_coin = create_farmer_coin( height, wt.get_new_puzzlehash(), uint64(250000000), DEFAULT_CONSTANTS.GENESIS_CHALLENGE ) pool_coin = create_pool_coin( height, wt.get_new_puzzlehash(), uint64(1750000000), DEFAULT_CONSTANTS.GENESIS_CHALLENGE ) unspent.extend([farmer_coin, pool_coin]) await coin_store.new_block( height, timestamp, set([pool_coin, farmer_coin]), [], [], ) bundles: List[SpendBundle] = [] print(" spend bundles") for coin in unspent: tx: SpendBundle = wt.generate_signed_transaction( uint64(coin.amount // 2), wt.get_new_puzzlehash(), coin ) bundles.append(tx) spend_bundles.append(bundles) # 19 seconds per block timestamp = uint64(timestamp + 19) if single_threaded: print("Single-threaded") else: print("Multi-threaded") print("Profiling add_spendbundle()") # the mempool only looks at: # timestamp # height # is_transaction_block # header_hash print("initialize MempoolManager") rec = fake_block_record(height, timestamp) await mempool.new_peak(rec, None) async def add_spend_bundles(spend_bundles: List[SpendBundle]) -> None: for tx in spend_bundles: npc = await mempool.pre_validate_spendbundle(tx, None, tx.name()) assert npc is not None _, status, error = await mempool.add_spend_bundle(tx, npc, tx.name()) assert status == MempoolInclusionStatus.SUCCESS assert error is None total_bundles = 0 tasks = [] with enable_profiler(True, f"add-{suffix}"): start = monotonic() for peer in range(NUM_PEERS): total_bundles += len(spend_bundles[peer]) tasks.append(asyncio.create_task(add_spend_bundles(spend_bundles[peer]))) await asyncio.gather(*tasks) stop = monotonic() print(f"add_spendbundle time: {stop - start:0.4f}s") print(f"{(stop - start) / total_bundles * 1000:0.2f}ms per add_spendbundle() call") with enable_profiler(True, f"create-{suffix}"): start = monotonic() for i in range(2000): await mempool.create_bundle_from_mempool(bytes32(b"a" * 32)) stop = monotonic() print(f"create_bundle_from_mempool time: {stop - start:0.4f}s") # TODO: add benchmark for new_peak() finally: await db_wrapper.close() if __name__ == "__main__": import logging logger = logging.getLogger() logger.addHandler(logging.StreamHandler()) logger.setLevel(logging.WARNING) asyncio.run(run_mempool_benchmark(True)) asyncio.run(run_mempool_benchmark(False))
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false
Flax-Network/flax-blockchain
https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/benchmarks/__init__.py
benchmarks/__init__.py
python
Apache-2.0
bb8715f3155bb8011a04cc8c05b3fa8133e4c64b
2026-01-05T07:13:52.951017Z
false