idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
36,500
def stop_server ( working_dir , clean = False , kill = False ) : timeout = 1.0 dead = False for i in xrange ( 0 , 5 ) : pid_file = get_pidfile_path ( working_dir ) if not os . path . exists ( pid_file ) : dead = True break pid = read_pid_file ( pid_file ) if pid is not None : try : os . kill ( pid , signal . SIGTERM ) except OSError , oe : if oe . errno == errno . ESRCH : log . info ( "Process %s is not running" % pid ) try : os . unlink ( pid_file ) except : pass return except Exception , e : log . exception ( e ) os . abort ( ) else : log . info ( "Corrupt PID file. Please make sure all instances of this program have stopped and remove {}" . format ( pid_file ) ) os . abort ( ) blockstack_opts = get_blockstack_opts ( ) srv = BlockstackRPCClient ( 'localhost' , blockstack_opts [ 'rpc_port' ] , timeout = 5 , protocol = 'http' ) try : res = blockstack_ping ( proxy = srv ) except socket . error as se : if se . errno == errno . ECONNREFUSED : try : os . kill ( pid , 0 ) log . info ( "Server %s is not dead yet..." % pid ) except OSError , oe : log . info ( "Server %s is dead to us" % pid ) dead = True break else : continue log . info ( "Server %s is still running; trying again in %s seconds" % ( pid , timeout ) ) time . sleep ( timeout ) timeout *= 2 if not dead and kill : log . info ( "Killing server %s" % pid ) clean = True try : os . kill ( pid , signal . SIGKILL ) except Exception , e : pass if clean : try : os . unlink ( pid_file ) except : pass log . debug ( "Blockstack server stopped" )
Stop the blockstackd server .
36,501
def genesis_block_load ( module_path = None ) : if os . environ . get ( 'BLOCKSTACK_GENESIS_BLOCK_PATH' ) is not None : log . warning ( 'Using envar-given genesis block' ) module_path = os . environ [ 'BLOCKSTACK_GENESIS_BLOCK_PATH' ] genesis_block = None genesis_block_stages = None if module_path : log . debug ( 'Load genesis block from {}' . format ( module_path ) ) genesis_block_path = module_path try : genesis_block_mod = imp . load_source ( 'genesis_block' , genesis_block_path ) genesis_block = genesis_block_mod . GENESIS_BLOCK genesis_block_stages = genesis_block_mod . GENESIS_BLOCK_STAGES if BLOCKSTACK_TEST : print '' print 'genesis block' print json . dumps ( genesis_block , indent = 4 , sort_keys = True ) print '' except Exception as e : log . exception ( e ) log . fatal ( 'Failed to load genesis block' ) os . abort ( ) else : log . debug ( 'Load built-in genesis block' ) genesis_block = get_genesis_block ( ) genesis_block_stages = get_genesis_block_stages ( ) try : for stage in genesis_block_stages : jsonschema . validate ( GENESIS_BLOCK_SCHEMA , stage ) jsonschema . validate ( GENESIS_BLOCK_SCHEMA , genesis_block ) set_genesis_block ( genesis_block ) set_genesis_block_stages ( genesis_block_stages ) log . debug ( 'Genesis block has {} stages' . format ( len ( genesis_block_stages ) ) ) for i , stage in enumerate ( genesis_block_stages ) : log . debug ( 'Stage {} has {} row(s)' . format ( i + 1 , len ( stage [ 'rows' ] ) ) ) except Exception as e : log . fatal ( "Invalid genesis block" ) os . abort ( ) return True
Make sure the genesis block is good to go . Load and instantiate it .
36,502
def server_setup ( working_dir , port = None , api_port = None , indexer_enabled = None , indexer_url = None , api_enabled = None , recover = False ) : if not is_genesis_block_instantiated ( ) : genesis_block_load ( ) blockstack_opts = get_blockstack_opts ( ) blockstack_api_opts = get_blockstack_api_opts ( ) pid_file = get_pidfile_path ( working_dir ) indexer_enabled = indexer_enabled if indexer_enabled is not None else blockstack_opts [ 'enabled' ] api_enabled = api_enabled if api_enabled is not None else blockstack_api_opts [ 'enabled' ] indexer_url = indexer_url if indexer_url is not None else blockstack_api_opts . get ( 'indexer_url' , None ) if api_enabled and not indexer_url : print ( "FATAL: no 'indexer_url' in the config file, and no --indexer_url given in the arguments" ) sys . exit ( 1 ) if port is None : port = blockstack_opts [ 'rpc_port' ] if api_port is None : api_port = blockstack_api_opts [ 'api_port' ] signal . signal ( signal . SIGINT , blockstack_signal_handler ) signal . signal ( signal . SIGQUIT , blockstack_signal_handler ) signal . signal ( signal . SIGTERM , blockstack_signal_handler ) put_pidfile ( pid_file , os . getpid ( ) ) set_indexing ( working_dir , False ) if blockstack_opts [ 'enabled' ] != indexer_enabled : log . debug ( "Override blockstack.enabled to {}" . format ( indexer_enabled ) ) blockstack_opts [ 'enabled' ] = indexer_enabled set_blockstack_opts ( blockstack_opts ) if blockstack_api_opts [ 'enabled' ] != api_enabled : log . debug ( "Override blockstack-api.enabled to {}" . format ( indexer_enabled ) ) blockstack_api_opts [ 'enabled' ] = api_enabled set_blockstack_api_opts ( blockstack_api_opts ) if blockstack_api_opts [ 'indexer_url' ] != indexer_url : log . debug ( "Override blockstack-api.indexer_url to {}" . format ( indexer_url ) ) blockstack_api_opts [ 'indexer_url' ] = indexer_url set_blockstack_api_opts ( blockstack_api_opts ) rpc_srv = None api_srv = None atlas_state = None subdomain_state = None if blockstack_opts [ 'enabled' ] : db = get_or_instantiate_db_state ( working_dir ) atlas_state = atlas_init ( blockstack_opts , db , port = port , recover = recover ) db . close ( ) subdomain_state = subdomains_init ( blockstack_opts , working_dir , atlas_state ) if atlas_state : atlas_node_start ( atlas_state ) rpc_srv = rpc_start ( working_dir , port , subdomain_index = subdomain_state , thread = False ) if blockstack_api_opts [ 'enabled' ] : api_srv = api_start ( working_dir , blockstack_api_opts [ 'api_host' ] , api_port , thread = False ) if rpc_srv : rpc_srv . start ( ) if api_srv : api_srv . start ( ) gc_start ( ) set_running ( True ) set_indexing ( working_dir , False ) log . debug ( "Server setup: API = {}, Indexer = {}, Indexer URL = {}" . format ( blockstack_api_opts [ 'enabled' ] , blockstack_opts [ 'enabled' ] , blockstack_api_opts [ 'indexer_url' ] ) ) ret = { 'working_dir' : working_dir , 'atlas' : atlas_state , 'subdomains' : subdomain_state , 'subdomains_initialized' : False , 'rpc' : rpc_srv , 'api' : api_srv , 'pid_file' : pid_file , 'port' : port , 'api_port' : api_port } return ret
Set up the server . Start all subsystems write pid file set up signal handlers set up DB . Returns a server instance .
36,503
def server_shutdown ( server_state ) : set_running ( False ) rpc_stop ( server_state ) api_stop ( server_state ) server_atlas_shutdown ( server_state ) gc_stop ( ) try : if os . path . exists ( server_state [ 'pid_file' ] ) : os . unlink ( server_state [ 'pid_file' ] ) except : pass return True
Shut down server subsystems . Remove PID file .
36,504
def run_server ( working_dir , foreground = False , expected_snapshots = GENESIS_SNAPSHOT , port = None , api_port = None , use_api = None , use_indexer = None , indexer_url = None , recover = False ) : global rpc_server global api_server indexer_log_path = get_logfile_path ( working_dir ) logfile = None if not foreground : if os . path . exists ( indexer_log_path ) : logfile = open ( indexer_log_path , 'a' ) else : logfile = open ( indexer_log_path , 'a+' ) child_pid = daemonize ( logfile ) if child_pid < 0 : log . error ( "Failed to daemonize: {}" . format ( child_pid ) ) return - 1 if child_pid > 0 : log . debug ( "Running in the background as PID {}" . format ( child_pid ) ) sys . exit ( 0 ) server_state = server_setup ( working_dir , port = port , api_port = api_port , indexer_enabled = use_indexer , indexer_url = indexer_url , api_enabled = use_api , recover = recover ) atexit . register ( server_shutdown , server_state ) rpc_server = server_state [ 'rpc' ] blockstack_opts = get_blockstack_opts ( ) blockstack_api_opts = get_blockstack_api_opts ( ) if blockstack_opts [ 'enabled' ] : log . debug ( "Begin Indexing" ) while is_running ( ) : try : running = index_blockchain ( server_state , expected_snapshots = expected_snapshots ) except Exception , e : log . exception ( e ) log . error ( "FATAL: caught exception while indexing" ) os . abort ( ) deadline = time . time ( ) + REINDEX_FREQUENCY while time . time ( ) < deadline and is_running ( ) : try : time . sleep ( 1.0 ) except : break log . debug ( "End Indexing" ) elif blockstack_api_opts [ 'enabled' ] : log . debug ( "Begin serving REST requests" ) while is_running ( ) : try : time . sleep ( 1.0 ) except : break log . debug ( "End serving REST requests" ) server_shutdown ( server_state ) if logfile is not None : logfile . flush ( ) logfile . close ( ) return 0
Run blockstackd . Optionally daemonize . Return 0 on success Return negative on error
36,505
def setup ( working_dir , interactive = False ) : log . debug ( "Working dir: {}" . format ( working_dir ) ) if not os . path . exists ( working_dir ) : os . makedirs ( working_dir , 0700 ) node_config = load_configuration ( working_dir ) if node_config is None : sys . exit ( 1 ) log . debug ( "config\n{}" . format ( json . dumps ( node_config , indent = 4 , sort_keys = True ) ) ) return node_config
Do one - time initialization . Call this to set up global state .
36,506
def reconfigure ( working_dir ) : configure ( working_dir , force = True , interactive = True ) print "Blockstack successfully reconfigured." sys . exit ( 0 )
Reconfigure blockstackd .
36,507
def verify_database ( trusted_consensus_hash , consensus_block_height , untrusted_working_dir , trusted_working_dir , start_block = None , expected_snapshots = { } ) : db = BlockstackDB . get_readwrite_instance ( trusted_working_dir ) consensus_impl = virtualchain_hooks return virtualchain . state_engine_verify ( trusted_consensus_hash , consensus_block_height , consensus_impl , untrusted_working_dir , db , start_block = start_block , expected_snapshots = expected_snapshots )
Verify that a database is consistent with a known - good consensus hash . Return True if valid . Return False if not
36,508
def check_and_set_envars ( argv ) : special_flags = { '--debug' : { 'arg' : False , 'envar' : 'BLOCKSTACK_DEBUG' , 'exec' : True , } , '--verbose' : { 'arg' : False , 'envar' : 'BLOCKSTACK_DEBUG' , 'exec' : True , } , '--testnet-id' : { 'arg' : True , 'envar' : 'BLOCKSTACK_TESTNET_ID' , 'exec' : True , } , '--testnet-start-block' : { 'arg' : True , 'envar' : 'BLOCKSTACK_TESTNET_START_BLOCK' , 'exec' : True , } , '--working_dir' : { 'arg' : True , 'argname' : 'working_dir' , 'exec' : False , } , '--working-dir' : { 'arg' : True , 'argname' : 'working_dir' , 'exec' : False , } , } cli_envs = { } cli_args = { } new_argv = [ ] stripped_argv = [ ] do_exec = False i = 0 while i < len ( argv ) : arg = argv [ i ] value = None for special_flag in special_flags . keys ( ) : if not arg . startswith ( special_flag ) : continue if special_flags [ special_flag ] [ 'arg' ] : if '=' in arg : argparts = arg . split ( "=" ) value_parts = argparts [ 1 : ] arg = argparts [ 0 ] value = '=' . join ( value_parts ) elif i + 1 < len ( argv ) : value = argv [ i + 1 ] i += 1 else : print >> sys . stderr , "%s requires an argument" % special_flag return False else : value = "1" break i += 1 if value is not None : if 'envar' in special_flags [ special_flag ] : cli_envs [ special_flags [ special_flag ] [ 'envar' ] ] = value if 'argname' in special_flags [ special_flag ] : cli_args [ special_flags [ special_flag ] [ 'argname' ] ] = value new_argv . append ( arg ) new_argv . append ( value ) if special_flags [ special_flag ] [ 'exec' ] : do_exec = True else : new_argv . append ( arg ) stripped_argv . append ( arg ) if do_exec : for cli_env , cli_env_value in cli_envs . items ( ) : os . environ [ cli_env ] = cli_env_value if os . environ . get ( "BLOCKSTACK_DEBUG" ) is not None : print "Re-exec as {}" . format ( " " . join ( new_argv ) ) os . execv ( new_argv [ 0 ] , new_argv ) log . debug ( "Stripped argv: {}" . format ( ' ' . join ( stripped_argv ) ) ) return cli_args , stripped_argv
Go through argv and find any special command - line flags that set environment variables that affect multiple modules .
36,509
def load_expected_snapshots ( snapshots_path ) : snapshots_path = os . path . expanduser ( snapshots_path ) expected_snapshots = { } try : with open ( snapshots_path , "r" ) as f : snapshots_json = f . read ( ) snapshots_data = json . loads ( snapshots_json ) assert 'snapshots' in snapshots_data . keys ( ) , "Not a valid snapshots file" for ( block_id_str , consensus_hash ) in snapshots_data [ 'snapshots' ] . items ( ) : expected_snapshots [ int ( block_id_str ) ] = str ( consensus_hash ) log . debug ( "Loaded expected snapshots from legacy JSON {}; {} entries" . format ( snapshots_path , len ( expected_snapshots ) ) ) return expected_snapshots except ValueError as ve : log . debug ( "Snapshots file {} is not JSON" . format ( snapshots_path ) ) except Exception as e : if os . environ . get ( 'BLOCKSTACK_DEBUG' ) == '1' : log . exception ( e ) log . debug ( "Failed to read expected snapshots from '{}'" . format ( snapshots_path ) ) return None try : db_con = virtualchain . StateEngine . db_connect ( snapshots_path ) expected_snapshots = virtualchain . StateEngine . get_consensus_hashes ( None , None , db_con = db_con , completeness_check = False ) log . debug ( "Loaded expected snapshots from chainstate DB {}, {} entries" . format ( snapshots_path , len ( expected_snapshots ) ) ) return expected_snapshots except : log . debug ( "{} does not appear to be a chainstate DB" . format ( snapshots_path ) ) return None
Load expected consensus hashes from a . snapshots file . Return the snapshots as a dict on success Return None on error
36,510
def do_genesis_block_audit ( genesis_block_path = None , key_id = None ) : signing_keys = GENESIS_BLOCK_SIGNING_KEYS if genesis_block_path is not None : genesis_block_load ( genesis_block_path ) if key_id is not None : gpg2_path = find_gpg2 ( ) assert gpg2_path , 'You need to install gpg2' p = subprocess . Popen ( [ gpg2_path , '-a' , '--export' , key_id ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) out , err = p . communicate ( ) if p . returncode != 0 : log . error ( 'Failed to load key {}\n{}' . format ( key_id , err ) ) return False signing_keys = { key_id : out . strip ( ) } res = genesis_block_audit ( get_genesis_block_stages ( ) , key_bundle = signing_keys ) if not res : log . error ( 'Genesis block is NOT signed by {}' . format ( ', ' . join ( signing_keys . keys ( ) ) ) ) return False return True
Loads and audits the genesis block optionally using an alternative key
36,511
def setup_recovery ( working_dir ) : db = get_db_state ( working_dir ) bitcoind_session = get_bitcoind ( new = True ) assert bitcoind_session is not None _ , current_block = virtualchain . get_index_range ( 'bitcoin' , bitcoind_session , virtualchain_hooks , working_dir ) assert current_block , 'Failed to connect to bitcoind' set_recovery_range ( working_dir , db . lastblock , current_block - NUM_CONFIRMATIONS ) return True
Set up the recovery metadata so we can fully recover secondary state like subdomains .
36,512
def check_recovery ( working_dir ) : recovery_start_block , recovery_end_block = get_recovery_range ( working_dir ) if recovery_start_block is not None and recovery_end_block is not None : local_current_block = virtualchain_hooks . get_last_block ( working_dir ) if local_current_block <= recovery_end_block : return True log . debug ( 'Chain state is at block {}, and is outside the recovery window {}-{}' . format ( local_current_block , recovery_start_block , recovery_end_block ) ) clear_recovery_range ( working_dir ) return False else : return False
Do we need to recover on start - up?
36,513
def success_response ( self , method_resp , ** kw ) : resp = { 'status' : True , 'indexing' : config . is_indexing ( self . working_dir ) , 'lastblock' : virtualchain_hooks . get_last_block ( self . working_dir ) , } resp . update ( kw ) resp . update ( method_resp ) if self . is_stale ( ) : resp [ 'stale' ] = True resp [ 'warning' ] = 'Daemon has not reindexed since {}' . format ( self . last_indexing_time ) return resp
Make a standard success response which contains some ancilliary data .
36,514
def load_name_info ( self , db , name_record ) : name = str ( name_record [ 'name' ] ) name_record = self . sanitize_rec ( name_record ) namespace_id = get_namespace_from_name ( name ) namespace_record = db . get_namespace ( namespace_id , include_history = False ) if namespace_record is None : namespace_record = db . get_namespace_reveal ( namespace_id , include_history = False ) if namespace_record is None : return None if namespace_record [ 'lifetime' ] != NAMESPACE_LIFE_INFINITE : deadlines = BlockstackDB . get_name_deadlines ( name_record , namespace_record , db . lastblock ) if deadlines is not None : name_record [ 'expire_block' ] = deadlines [ 'expire_block' ] name_record [ 'renewal_deadline' ] = deadlines [ 'renewal_deadline' ] else : name_record [ 'expire_block' ] = - 1 name_record [ 'renewal_deadline' ] = - 1 else : name_record [ 'expire_block' ] = - 1 name_record [ 'renewal_deadline' ] = - 1 if name_record [ 'expire_block' ] > 0 and name_record [ 'expire_block' ] <= db . lastblock : name_record [ 'expired' ] = True else : name_record [ 'expired' ] = False if 'value_hash' in name_record and name_record [ 'value_hash' ] is not None : conf = get_blockstack_opts ( ) if is_atlas_enabled ( conf ) : zfdata = self . get_zonefile_data ( name_record [ 'value_hash' ] , conf [ 'zonefiles' ] ) if zfdata is not None : zfdata = base64 . b64encode ( zfdata ) name_record [ 'zonefile' ] = zfdata return name_record
Get some extra name information given a db - loaded name record . Return the updated name_record
36,515
def get_name_DID_info ( self , name ) : db = get_db_state ( self . working_dir ) did_info = db . get_name_DID_info ( name ) if did_info is None : return { 'error' : 'No such name' , 'http_status' : 404 } return did_info
Get a name s DID info Returns None if not found
36,516
def rpc_get_name_DID ( self , name , ** con_info ) : did_info = None if check_name ( name ) : did_info = self . get_name_DID_info ( name ) elif check_subdomain ( name ) : did_info = self . get_subdomain_DID_info ( name ) else : return { 'error' : 'Invalid name or subdomain' , 'http_status' : 400 } if did_info is None : return { 'error' : 'No DID for this name' , 'http_status' : 404 } did = make_DID ( did_info [ 'name_type' ] , did_info [ 'address' ] , did_info [ 'index' ] ) return self . success_response ( { 'did' : did } )
Given a name or subdomain return its DID .
36,517
def rpc_get_DID_record ( self , did , ** con_info ) : if not isinstance ( did , ( str , unicode ) ) : return { 'error' : 'Invalid DID: not a string' , 'http_status' : 400 } try : did_info = parse_DID ( did ) except : return { 'error' : 'Invalid DID' , 'http_status' : 400 } res = None if did_info [ 'name_type' ] == 'name' : res = self . get_name_DID_record ( did ) elif did_info [ 'name_type' ] == 'subdomain' : res = self . get_subdomain_DID_record ( did ) if 'error' in res : return { 'error' : res [ 'error' ] , 'http_status' : res . get ( 'http_status' , 404 ) } return self . success_response ( { 'record' : res [ 'record' ] } )
Given a DID return the name or subdomain it corresponds to
36,518
def rpc_get_blockstack_ops_at ( self , block_id , offset , count , ** con_info ) : if not check_block ( block_id ) : return { 'error' : 'Invalid block height' , 'http_status' : 400 } if not check_offset ( offset ) : return { 'error' : 'Invalid offset' , 'http_status' : 400 } if not check_count ( count , 10 ) : return { 'error' : 'Invalid count' , 'http_status' : 400 } db = get_db_state ( self . working_dir ) nameops = db . get_all_blockstack_ops_at ( block_id , offset = offset , count = count ) db . close ( ) log . debug ( "{} name operations at block {}, offset {}, count {}" . format ( len ( nameops ) , block_id , offset , count ) ) ret = [ ] for nameop in nameops : assert 'opcode' in nameop , 'BUG: missing opcode in {}' . format ( json . dumps ( nameop , sort_keys = True ) ) canonical_op = self . sanitize_rec ( nameop ) ret . append ( canonical_op ) return self . success_response ( { 'nameops' : ret } )
Get the name operations that occured in the given block . Does not include account operations .
36,519
def rpc_get_blockstack_ops_hash_at ( self , block_id , ** con_info ) : if not check_block ( block_id ) : return { 'error' : 'Invalid block height' , 'http_status' : 400 } db = get_db_state ( self . working_dir ) ops_hash = db . get_block_ops_hash ( block_id ) db . close ( ) return self . success_response ( { 'ops_hash' : ops_hash } )
Get the hash over the sequence of names and namespaces altered at the given block . Used by SNV clients .
36,520
def get_bitcoind_info ( self ) : cached_bitcoind_info = self . get_cached_bitcoind_info ( ) if cached_bitcoind_info : return cached_bitcoind_info bitcoind_opts = default_bitcoind_opts ( virtualchain . get_config_filename ( virtualchain_hooks , self . working_dir ) , prefix = True ) bitcoind = get_bitcoind ( new_bitcoind_opts = bitcoind_opts , new = True ) if bitcoind is None : return { 'error' : 'Internal server error: failed to connect to bitcoind' } try : info = bitcoind . getinfo ( ) assert 'error' not in info assert 'blocks' in info self . set_cached_bitcoind_info ( info ) return info except Exception as e : raise
Get bitcoind info . Try the cache and on cache miss fetch from bitcoind and cache .
36,521
def get_consensus_info ( self ) : cached_consensus_info = self . get_cached_consensus_info ( ) if cached_consensus_info : return cached_consensus_info db = get_db_state ( self . working_dir ) ch = db . get_current_consensus ( ) block = db . get_current_block ( ) db . close ( ) cinfo = { 'consensus_hash' : ch , 'block_height' : block } self . set_cached_consensus_info ( cinfo ) return cinfo
Get block height and consensus hash . Try the cache and on cache miss fetch from the db
36,522
def rpc_get_account_tokens ( self , address , ** con_info ) : if not check_account_address ( address ) : return { 'error' : 'Invalid address' , 'http_status' : 400 } if is_c32_address ( address ) : address = c32ToB58 ( address ) db = get_db_state ( self . working_dir ) token_list = db . get_account_tokens ( address ) db . close ( ) return self . success_response ( { 'token_types' : token_list } )
Get the types of tokens that an account owns Returns the list on success
36,523
def rpc_get_account_balance ( self , address , token_type , ** con_info ) : if not check_account_address ( address ) : return { 'error' : 'Invalid address' , 'http_status' : 400 } if not check_token_type ( token_type ) : return { 'error' : 'Invalid token type' , 'http_status' : 400 } if is_c32_address ( address ) : address = c32ToB58 ( address ) db = get_db_state ( self . working_dir ) account = db . get_account ( address , token_type ) if account is None : return self . success_response ( { 'balance' : 0 } ) balance = db . get_account_balance ( account ) if balance is None : balance = 0 db . close ( ) return self . success_response ( { 'balance' : balance } )
Get the balance of an address for a particular token type Returns the value on success Returns 0 if the balance is 0 or if there is no address
36,524
def export_account_state ( self , account_state ) : return { 'address' : account_state [ 'address' ] , 'type' : account_state [ 'type' ] , 'credit_value' : '{}' . format ( account_state [ 'credit_value' ] ) , 'debit_value' : '{}' . format ( account_state [ 'debit_value' ] ) , 'lock_transfer_block_id' : account_state [ 'lock_transfer_block_id' ] , 'block_id' : account_state [ 'block_id' ] , 'vtxindex' : account_state [ 'vtxindex' ] , 'txid' : account_state [ 'txid' ] , }
Make an account state presentable to external consumers
36,525
def rpc_get_account_record ( self , address , token_type , ** con_info ) : if not check_account_address ( address ) : return { 'error' : 'Invalid address' , 'http_status' : 400 } if not check_token_type ( token_type ) : return { 'error' : 'Invalid token type' , 'http_status' : 400 } if is_c32_address ( address ) : address = c32ToB58 ( address ) db = get_db_state ( self . working_dir ) account = db . get_account ( address , token_type ) db . close ( ) if account is None : return { 'error' : 'No such account' , 'http_status' : 404 } state = self . export_account_state ( account ) return self . success_response ( { 'account' : state } )
Get the current state of an account
36,526
def rpc_get_account_at ( self , address , block_height , ** con_info ) : if not check_account_address ( address ) : return { 'error' : 'Invalid address' , 'http_status' : 400 } if not check_block ( block_height ) : return { 'error' : 'Invalid start block' , 'http_status' : 400 } if is_c32_address ( address ) : address = c32ToB58 ( address ) db = get_db_state ( self . working_dir ) account_states = db . get_account_at ( address , block_height ) db . close ( ) ret = [ self . export_account_state ( hist ) for hist in account_states ] return self . success_response ( { 'history' : ret } )
Get the account s statuses at a particular block height . Returns the sequence of history states on success
36,527
def rpc_get_consensus_hashes ( self , block_id_list , ** con_info ) : if type ( block_id_list ) != list : return { 'error' : 'Invalid block heights' , 'http_status' : 400 } if len ( block_id_list ) > 32 : return { 'error' : 'Too many block heights' , 'http_status' : 400 } for bid in block_id_list : if not check_block ( bid ) : return { 'error' : 'Invalid block height' , 'http_status' : 400 } db = get_db_state ( self . working_dir ) ret = { } for block_id in block_id_list : ret [ block_id ] = db . get_consensus_at ( block_id ) db . close ( ) return self . success_response ( { 'consensus_hashes' : ret } )
Return the consensus hashes at multiple block numbers Return a dict mapping each block ID to its consensus hash .
36,528
def get_zonefile_data ( self , zonefile_hash , zonefile_dir ) : atlas_zonefile_data = get_atlas_zonefile_data ( zonefile_hash , zonefile_dir , check = False ) if atlas_zonefile_data is not None : zfh = get_zonefile_data_hash ( atlas_zonefile_data ) if zfh != zonefile_hash : log . debug ( "Invalid local zonefile %s" % zonefile_hash ) remove_atlas_zonefile_data ( zonefile_hash , zonefile_dir ) else : log . debug ( "Zonefile %s is local" % zonefile_hash ) return atlas_zonefile_data return None
Get a zonefile by hash Return the serialized zonefile on success Return None on error
36,529
def rpc_get_zonefiles_by_block ( self , from_block , to_block , offset , count , ** con_info ) : conf = get_blockstack_opts ( ) if not is_atlas_enabled ( conf ) : return { 'error' : 'Not an atlas node' , 'http_status' : 400 } if not check_block ( from_block ) : return { 'error' : 'Invalid from_block height' , 'http_status' : 400 } if not check_block ( to_block ) : return { 'error' : 'Invalid to_block height' , 'http_status' : 400 } if not check_offset ( offset ) : return { 'error' : 'invalid offset' , 'http_status' : 400 } if not check_count ( count , 100 ) : return { 'error' : 'invalid count' , 'http_status' : 400 } zonefile_info = atlasdb_get_zonefiles_by_block ( from_block , to_block , offset , count , path = conf [ 'atlasdb_path' ] ) if 'error' in zonefile_info : return zonefile_info return self . success_response ( { 'zonefile_info' : zonefile_info } )
Get information about zonefiles announced in blocks [
36,530
def peer_exchange ( self , peer_host , peer_port ) : peer_list = atlas_get_live_neighbors ( "%s:%s" % ( peer_host , peer_port ) ) if len ( peer_list ) > atlas_max_neighbors ( ) : random . shuffle ( peer_list ) peer_list = peer_list [ : atlas_max_neighbors ( ) ] log . info ( "Enqueue remote peer {}:{}" . format ( peer_host , peer_port ) ) atlas_peer_enqueue ( "%s:%s" % ( peer_host , peer_port ) ) log . debug ( "Live peers reply to %s:%s: %s" % ( peer_host , peer_port , peer_list ) ) return peer_list
Exchange peers . Add the given peer to the list of new peers to consider . Return the list of healthy peers
36,531
def rpc_atlas_peer_exchange ( self , remote_peer , ** con_info ) : conf = get_blockstack_opts ( ) if not conf . get ( 'atlas' , False ) : return { 'error' : 'Not an atlas node' , 'http_status' : 404 } client_host = con_info [ 'client_host' ] client_port = con_info [ 'client_port' ] peer_host = None peer_port = None LOCALHOST = [ '127.0.0.1' , '::1' , 'localhost' ] if client_host not in LOCALHOST : peer_host = client_host peer_port = client_port else : try : peer_host , peer_port = url_to_host_port ( remote_peer ) assert peer_host assert peer_port except : return { 'error' : 'Invalid remote peer address' , 'http_status' : 400 } peers = self . peer_exchange ( peer_host , peer_port ) return self . success_response ( { 'peers' : peers } )
Accept a remotely - given atlas peer and return our list of healthy peers . The remotely - given atlas peer will only be considered if the caller is localhost ; otherwise the caller s socket - given information will be used . This is to prevent a malicious node from filling up this node s peer table with junk .
36,532
def stop_server ( self ) : if self . rpc_server is not None : try : self . rpc_server . socket . shutdown ( socket . SHUT_RDWR ) except : log . warning ( "Failed to shut down server socket" ) self . rpc_server . shutdown ( )
Stop serving . Also stops the thread .
36,533
def get_last_block ( working_dir ) : impl = sys . modules [ __name__ ] return BlockstackDB . get_lastblock ( impl , working_dir )
Get the last block processed Return the integer on success Return None on error
36,534
def get_or_instantiate_db_state ( working_dir ) : new_db = BlockstackDB . borrow_readwrite_instance ( working_dir , - 1 ) BlockstackDB . release_readwrite_instance ( new_db , - 1 ) return get_db_state ( working_dir )
Get a read - only handle to the DB . Instantiate it first if it doesn t exist .
36,535
def check_quirks ( block_id , block_op , db_state ) : if op_get_opcode_name ( block_op [ 'op' ] ) in OPCODE_NAME_NAMEOPS and op_get_opcode_name ( block_op [ 'op' ] ) not in OPCODE_NAME_STATE_PREORDER : assert 'last_creation_op' in block_op , 'QUIRK BUG: missing last_creation_op in {}' . format ( op_get_opcode_name ( block_op [ 'op' ] ) ) if block_op [ 'last_creation_op' ] == NAME_IMPORT : assert isinstance ( block_op [ 'op_fee' ] , float ) , 'QUIRK BUG: op_fee is not a float when it should be' return
Check that all serialization compatibility quirks have been preserved . Used primarily for testing .
36,536
def sync_blockchain ( working_dir , bt_opts , last_block , server_state , expected_snapshots = { } , ** virtualchain_args ) : subdomain_index = server_state [ 'subdomains' ] atlas_state = server_state [ 'atlas' ] impl = sys . modules [ __name__ ] log . info ( "Synchronizing database {} up to block {}" . format ( working_dir , last_block ) ) new_db = BlockstackDB . borrow_readwrite_instance ( working_dir , last_block , expected_snapshots = expected_snapshots ) new_db . subdomain_index = subdomain_index new_db . atlas_state = atlas_state rc = virtualchain . sync_virtualchain ( bt_opts , last_block , new_db , expected_snapshots = expected_snapshots , ** virtualchain_args ) BlockstackDB . release_readwrite_instance ( new_db , last_block ) return rc
synchronize state with the blockchain . Return True on success Return False if we re supposed to stop indexing Abort on error
36,537
def url_protocol ( url , port = None ) : if not url . startswith ( 'http://' ) and not url . startswith ( 'https://' ) : return None urlinfo = urllib2 . urlparse . urlparse ( url ) assert urlinfo . scheme in [ 'http' , 'https' ] , 'Invalid URL scheme in {}' . format ( url ) return urlinfo . scheme
Get the protocol to use for a URL . return http or https or None
36,538
def make_DID ( name_type , address , index ) : if name_type not in [ 'name' , 'subdomain' ] : raise ValueError ( "Require 'name' or 'subdomain' for name_type" ) if name_type == 'name' : address = virtualchain . address_reencode ( address ) else : vb = keylib . b58check . b58check_version_byte ( address ) if vb == bitcoin_blockchain . version_byte : vb = SUBDOMAIN_ADDRESS_VERSION_BYTE else : vb = SUBDOMAIN_ADDRESS_MULTISIG_VERSION_BYTE address = virtualchain . address_reencode ( address , version_byte = vb ) return 'did:stack:v0:{}-{}' . format ( address , index )
Standard way of making a DID . name_type is name or subdomain
36,539
def process_request_thread ( self , request , client_address ) : from . . blockstackd import get_gc_thread try : self . finish_request ( request , client_address ) except Exception : self . handle_error ( request , client_address ) finally : self . shutdown_request ( request ) shutdown_thread = False with self . _thread_guard : if threading . current_thread ( ) . ident in self . _threads : del self . _threads [ threading . current_thread ( ) . ident ] shutdown_thread = True if BLOCKSTACK_TEST : log . debug ( '{} active threads (removed {})' . format ( len ( self . _threads ) , threading . current_thread ( ) . ident ) ) if shutdown_thread : gc_thread = get_gc_thread ( ) if gc_thread : gc_thread . gc_event ( )
Same as in BaseServer but as a thread . In addition exception handling is done here .
36,540
def get_request ( self ) : request , client_addr = super ( BoundedThreadingMixIn , self ) . get_request ( ) overload = False with self . _thread_guard : if self . _threads is not None and len ( self . _threads ) + 1 > MAX_RPC_THREADS : overload = True if overload : res = self . overloaded ( client_addr ) request . sendall ( res ) sys . stderr . write ( '{} - - [{}] "Overloaded"\n' . format ( client_addr [ 0 ] , time_str ( time . time ( ) ) ) ) self . shutdown_request ( request ) return None , None return request , client_addr
Accept a request up to the given number of allowed threads . Defer to self . overloaded if there are already too many pending requests .
36,541
def get_epoch_config ( block_height ) : global EPOCHS epoch_number = get_epoch_number ( block_height ) if epoch_number < 0 or epoch_number >= len ( EPOCHS ) : log . error ( "FATAL: invalid epoch %s" % epoch_number ) os . abort ( ) return EPOCHS [ epoch_number ]
Get the epoch constants for the given block height
36,542
def get_epoch_namespace_lifetime_multiplier ( block_height , namespace_id ) : epoch_config = get_epoch_config ( block_height ) if epoch_config [ 'namespaces' ] . has_key ( namespace_id ) : return epoch_config [ 'namespaces' ] [ namespace_id ] [ 'NAMESPACE_LIFETIME_MULTIPLIER' ] else : return epoch_config [ 'namespaces' ] [ '*' ] [ 'NAMESPACE_LIFETIME_MULTIPLIER' ]
what s the namespace lifetime multipler for this epoch?
36,543
def get_epoch_namespace_lifetime_grace_period ( block_height , namespace_id ) : epoch_config = get_epoch_config ( block_height ) if epoch_config [ 'namespaces' ] . has_key ( namespace_id ) : return epoch_config [ 'namespaces' ] [ namespace_id ] [ 'NAMESPACE_LIFETIME_GRACE_PERIOD' ] else : return epoch_config [ 'namespaces' ] [ '*' ] [ 'NAMESPACE_LIFETIME_GRACE_PERIOD' ]
what s the namespace lifetime grace period for this epoch?
36,544
def get_epoch_namespace_prices ( block_height , units ) : assert units in [ 'BTC' , TOKEN_TYPE_STACKS ] , 'Invalid unit {}' . format ( units ) epoch_config = get_epoch_config ( block_height ) if units == 'BTC' : return epoch_config [ 'namespace_prices' ] else : return epoch_config [ 'namespace_prices_stacks' ]
get the list of namespace prices by block height
36,545
def op_get_opcode_name ( op_string ) : global OPCODE_NAMES if op_string == '{}:' . format ( NAME_REGISTRATION ) : return 'NAME_RENEWAL' op = op_string [ 0 ] if op not in OPCODE_NAMES : raise Exception ( 'No such operation "{}"' . format ( op ) ) return OPCODE_NAMES [ op ]
Get the name of an opcode given the op byte sequence of the operation .
36,546
def get_announce_filename ( working_dir ) : announce_filepath = os . path . join ( working_dir , get_default_virtualchain_impl ( ) . get_virtual_chain_name ( ) ) + '.announce' return announce_filepath
Get the path to the file that stores all of the announcements .
36,547
def is_indexing ( working_dir ) : indexing_path = get_indexing_lockfile ( working_dir ) if os . path . exists ( indexing_path ) : return True else : return False
Is the blockstack daemon synchronizing with the blockchain?
36,548
def set_indexing ( working_dir , flag ) : indexing_path = get_indexing_lockfile ( working_dir ) if flag : try : fd = open ( indexing_path , "w+" ) fd . close ( ) return True except : return False else : try : os . unlink ( indexing_path ) return True except : return False
Set a flag in the filesystem as to whether or not we re indexing .
36,549
def set_recovery_range ( working_dir , start_block , end_block ) : recovery_range_path = os . path . join ( working_dir , '.recovery' ) with open ( recovery_range_path , 'w' ) as f : f . write ( '{}\n{}\n' . format ( start_block , end_block ) ) f . flush ( ) os . fsync ( f . fileno ( ) )
Set the recovery block range if we re restoring and reporcessing transactions from a backup .
36,550
def clear_recovery_range ( working_dir ) : recovery_range_path = os . path . join ( working_dir , '.recovery' ) if os . path . exists ( recovery_range_path ) : os . unlink ( recovery_range_path )
Clear out our recovery hint
36,551
def is_atlas_enabled ( blockstack_opts ) : if not blockstack_opts [ 'atlas' ] : log . debug ( "Atlas is disabled" ) return False if 'zonefiles' not in blockstack_opts : log . debug ( "Atlas is disabled: no 'zonefiles' path set" ) return False if 'atlasdb_path' not in blockstack_opts : log . debug ( "Atlas is disabled: no 'atlasdb_path' path set" ) return False return True
Can we do atlas operations?
36,552
def is_subdomains_enabled ( blockstack_opts ) : if not is_atlas_enabled ( blockstack_opts ) : log . debug ( "Subdomains are disabled" ) return False if 'subdomaindb_path' not in blockstack_opts : log . debug ( "Subdomains are disabled: no 'subdomaindb_path' path set" ) return False return True
Can we do subdomain operations?
36,553
def store_announcement ( working_dir , announcement_hash , announcement_text , force = False ) : if not force : if announcement_hash in ANNOUNCEMENTS : return announce_filename = get_announce_filename ( working_dir ) announce_filename_tmp = announce_filename + ".tmp" announce_text = "" announce_cleanup_list = [ ] if os . path . exists ( announce_filename_tmp ) : log . debug ( "Merge announcement list %s" % announce_filename_tmp ) with open ( announce_filename , "r" ) as f : announce_text += f . read ( ) i = 1 failed_path = announce_filename_tmp + ( ".%s" % i ) while os . path . exists ( failed_path ) : log . debug ( "Merge announcement list %s" % failed_path ) with open ( failed_path , "r" ) as f : announce_text += f . read ( ) announce_cleanup_list . append ( failed_path ) i += 1 failed_path = announce_filename_tmp + ( ".%s" % i ) announce_filename_tmp = failed_path if os . path . exists ( announce_filename ) : with open ( announce_filename , "r" ) as f : announce_text += f . read ( ) announce_text += ( "\n%s\n" % announcement_hash ) if not force : announcement_list = announce_text . split ( "\n" ) unseen_announcements = filter ( lambda a : a not in ANNOUNCEMENTS , announcement_list ) announce_text = "\n" . join ( unseen_announcements ) . strip ( ) + "\n" log . debug ( "Store announcement hash to %s" % announce_filename ) with open ( announce_filename_tmp , "w" ) as f : f . write ( announce_text ) f . flush ( ) if sys . platform == 'win32' and os . path . exists ( announce_filename_tmp ) : try : os . unlink ( announce_filename_tmp ) except : pass try : os . rename ( announce_filename_tmp , announce_filename ) except : log . error ( "Failed to save announcement %s to %s" % ( announcement_hash , announce_filename ) ) raise for tmp_path in announce_cleanup_list : try : os . unlink ( tmp_path ) except : pass announcement_text_dir = os . path . join ( working_dir , "announcements" ) if not os . path . exists ( announcement_text_dir ) : try : os . makedirs ( announcement_text_dir ) except : log . error ( "Failed to make directory %s" % announcement_text_dir ) raise announcement_text_path = os . path . join ( announcement_text_dir , "%s.txt" % announcement_hash ) try : with open ( announcement_text_path , "w" ) as f : f . write ( announcement_text ) except : log . error ( "Failed to save announcement text to %s" % announcement_text_path ) raise log . debug ( "Stored announcement to %s" % ( announcement_text_path ) )
Store a new announcement locally atomically .
36,554
def default_blockstack_api_opts ( working_dir , config_file = None ) : from . util import url_to_host_port , url_protocol if config_file is None : config_file = virtualchain . get_config_filename ( get_default_virtualchain_impl ( ) , working_dir ) parser = SafeConfigParser ( ) parser . read ( config_file ) blockstack_api_opts = { } indexer_url = None api_port = DEFAULT_API_PORT api_host = DEFAULT_API_HOST run_api = True if parser . has_section ( 'blockstack-api' ) : if parser . has_option ( 'blockstack-api' , 'enabled' ) : run_api = parser . get ( 'blockstack-api' , 'enabled' ) . lower ( ) in [ 'true' , '1' , 'on' ] if parser . has_option ( 'blockstack-api' , 'api_port' ) : api_port = int ( parser . get ( 'blockstack-api' , 'api_port' ) ) if parser . has_option ( 'blockstack-api' , 'api_host' ) : api_host = parser . get ( 'blockstack-api' , 'api_host' ) if parser . has_option ( 'blockstack-api' , 'indexer_url' ) : indexer_host , indexer_port = url_to_host_port ( parser . get ( 'blockstack-api' , 'indexer_url' ) ) indexer_protocol = url_protocol ( parser . get ( 'blockstack-api' , 'indexer_url' ) ) if indexer_protocol is None : indexer_protocol = 'http' indexer_url = parser . get ( 'blockstack-api' , 'indexer_url' ) if indexer_url is None : indexer_url = 'http://localhost:{}' . format ( RPC_SERVER_PORT ) blockstack_api_opts = { 'indexer_url' : indexer_url , 'api_host' : api_host , 'api_port' : api_port , 'enabled' : run_api } for ( k , v ) in blockstack_api_opts . items ( ) : if v is None : del blockstack_api_opts [ k ] return blockstack_api_opts
Get our default blockstack RESTful API opts from a config file or from sane defaults .
36,555
def interactive_prompt ( message , parameters , default_opts ) : lines = message . split ( '\n' ) max_line_len = max ( [ len ( l ) for l in lines ] ) print ( '-' * max_line_len ) print ( message ) print ( '-' * max_line_len ) ret = { } for param in parameters : formatted_param = param prompt_str = '{}: ' . format ( formatted_param ) if param in default_opts : prompt_str = '{} (default: "{}"): ' . format ( formatted_param , default_opts [ param ] ) try : value = raw_input ( prompt_str ) except KeyboardInterrupt : log . debug ( 'Exiting on keyboard interrupt' ) sys . exit ( 0 ) if len ( value ) > 0 : ret [ param ] = value elif param in default_opts : ret [ param ] = default_opts [ param ] else : ret [ param ] = None return ret
Prompt the user for a series of parameters Return a dict mapping the parameter name to the user - given value .
36,556
def find_missing ( message , all_params , given_opts , default_opts , header = None , prompt_missing = True ) : missing_params = list ( set ( all_params ) - set ( given_opts ) ) num_prompted = 0 if not missing_params : return given_opts , missing_params , num_prompted if not prompt_missing : missing_values = set ( default_opts ) - set ( given_opts ) num_prompted = len ( missing_values ) given_opts . update ( default_opts ) else : if header is not None : print ( '-' * len ( header ) ) print ( header ) missing_values = interactive_prompt ( message , missing_params , default_opts ) num_prompted = len ( missing_values ) given_opts . update ( missing_values ) return given_opts , missing_params , num_prompted
Find and interactively prompt the user for missing parameters given the list of all valid parameters and a dict of known options .
36,557
def opt_strip ( prefix , opts ) : ret = { } for opt_name , opt_value in opts . items ( ) : if opt_name . startswith ( prefix ) : opt_name = opt_name [ len ( prefix ) : ] ret [ opt_name ] = opt_value return ret
Given a dict of opts that start with prefix remove the prefix from each of them .
36,558
def opt_restore ( prefix , opts ) : return { prefix + name : value for name , value in opts . items ( ) }
Given a dict of opts add the given prefix to each key
36,559
def default_bitcoind_opts ( config_file = None , prefix = False ) : default_bitcoin_opts = virtualchain . get_bitcoind_config ( config_file = config_file ) default_bitcoin_opts = { k : v for k , v in default_bitcoin_opts . items ( ) if v is not None } if not prefix : default_bitcoin_opts = opt_strip ( 'bitcoind_' , default_bitcoin_opts ) return default_bitcoin_opts
Get our default bitcoind options such as from a config file or from sane defaults
36,560
def default_working_dir ( ) : import nameset . virtualchain_hooks as virtualchain_hooks return os . path . expanduser ( '~/.{}' . format ( virtualchain_hooks . get_virtual_chain_name ( ) ) )
Get the default configuration directory for blockstackd
36,561
def write_config_file ( opts , config_file ) : parser = SafeConfigParser ( ) if os . path . exists ( config_file ) : parser . read ( config_file ) for sec_name in opts : sec_opts = opts [ sec_name ] if parser . has_section ( sec_name ) : parser . remove_section ( sec_name ) parser . add_section ( sec_name ) for opt_name , opt_value in sec_opts . items ( ) : if opt_value is None : opt_value = '' parser . set ( sec_name , opt_name , '{}' . format ( opt_value ) ) with open ( config_file , 'w' ) as fout : os . fchmod ( fout . fileno ( ) , 0600 ) parser . write ( fout ) return True
Write our config file with the given options dict . Each key is a section name and each value is the list of options .
36,562
def load_configuration ( working_dir ) : import nameset . virtualchain_hooks as virtualchain_hooks opts = configure ( working_dir ) blockstack_opts = opts . get ( 'blockstack' , None ) blockstack_api_opts = opts . get ( 'blockstack-api' , None ) bitcoin_opts = opts [ 'bitcoind' ] config_server_version = blockstack_opts . get ( 'server_version' , None ) if ( config_server_version is None or versions_need_upgrade ( config_server_version , VERSION ) ) : print >> sys . stderr , "Obsolete or unrecognizable config file ({}): '{}' != '{}'" . format ( virtualchain . get_config_filename ( virtualchain_hooks , working_dir ) , config_server_version , VERSION ) print >> sys . stderr , 'Please see the release notes for version {} for instructions to upgrade (in the release-notes/ folder).' . format ( VERSION ) return None set_bitcoin_opts ( bitcoin_opts ) set_blockstack_opts ( blockstack_opts ) set_blockstack_api_opts ( blockstack_api_opts ) return { 'bitcoind' : bitcoin_opts , 'blockstack' : blockstack_opts , 'blockstack-api' : blockstack_api_opts }
Load the system configuration and set global variables Return the configuration of the node on success . Return None on failure
36,563
def check ( state_engine , nameop , block_id , checked_ops ) : namespace_id = nameop [ 'namespace_id' ] sender = nameop [ 'sender' ] if not state_engine . is_namespace_revealed ( namespace_id ) : log . warning ( "Namespace '%s' is not revealed" % namespace_id ) return False revealed_namespace = state_engine . get_namespace_reveal ( namespace_id ) if revealed_namespace [ 'recipient' ] != sender : log . warning ( "Namespace '%s' is not owned by '%s' (but by %s)" % ( namespace_id , sender , revealed_namespace [ 'recipient' ] ) ) return False if state_engine . is_namespace_ready ( namespace_id ) : log . warning ( "Namespace '%s' is already registered" % namespace_id ) return False nameop [ 'sender_pubkey' ] = revealed_namespace [ 'sender_pubkey' ] nameop [ 'address' ] = revealed_namespace [ 'address' ] return True
Verify the validity of a NAMESPACE_READY operation . It is only valid if it has been imported by the same sender as the corresponding NAMESPACE_REVEAL and the namespace is still in the process of being imported .
36,564
def int_to_charset ( val , charset ) : if val < 0 : raise ValueError ( '"val" must be a non-negative integer.' ) if val == 0 : return charset [ 0 ] output = "" while val > 0 : val , digit = divmod ( val , len ( charset ) ) output += charset [ digit ] return output [ : : - 1 ]
Turn a non - negative integer into a string .
36,565
def charset_to_int ( s , charset ) : output = 0 for char in s : output = output * len ( charset ) + charset . index ( char ) return output
Turn a string into a non - negative integer .
36,566
def change_charset ( s , original_charset , target_charset ) : if not isinstance ( s , str ) : raise ValueError ( '"s" must be a string.' ) intermediate_integer = charset_to_int ( s , original_charset ) output_string = int_to_charset ( intermediate_integer , target_charset ) return output_string
Convert a string from one charset to another .
36,567
def autofill ( * autofill_fields ) : def wrap ( reader ) : def wrapped_reader ( * args , ** kw ) : rec = reader ( * args , ** kw ) if rec is not None : for field in autofill_fields : if field == "opcode" and 'opcode' not in rec . keys ( ) : assert 'op' in rec . keys ( ) , "BUG: record is missing 'op'" rec [ 'opcode' ] = op_get_opcode_name ( rec [ 'op' ] ) else : raise Exception ( "Unknown autofill field '%s'" % field ) return rec return wrapped_reader return wrap
Decorator to automatically fill in extra useful fields that aren t stored in the db .
36,568
def get_readonly_instance ( cls , working_dir , expected_snapshots = { } ) : import virtualchain_hooks db_path = virtualchain . get_db_filename ( virtualchain_hooks , working_dir ) db = BlockstackDB ( db_path , DISPOSITION_RO , working_dir , get_genesis_block ( ) , expected_snapshots = { } ) rc = db . db_setup ( ) if not rc : log . error ( "Failed to set up virtualchain state engine" ) return None return db
Get a read - only handle to the blockstack - specific name db . Multiple read - only handles may exist .
36,569
def make_opfields ( cls ) : opfields = { } for opname in SERIALIZE_FIELDS . keys ( ) : opcode = NAME_OPCODES [ opname ] opfields [ opcode ] = SERIALIZE_FIELDS [ opname ] return opfields
Calculate the virtulachain - required opfields dict .
36,570
def get_state_paths ( cls , impl , working_dir ) : return super ( BlockstackDB , cls ) . get_state_paths ( impl , working_dir ) + [ os . path . join ( working_dir , 'atlas.db' ) , os . path . join ( working_dir , 'subdomains.db' ) , os . path . join ( working_dir , 'subdomains.db.queue' ) ]
Get the paths to the relevant db files to back up
36,571
def close ( self ) : if self . db is not None : self . db . commit ( ) self . db . close ( ) self . db = None return
Close the db and release memory
36,572
def get_import_keychain_path ( cls , keychain_dir , namespace_id ) : cached_keychain = os . path . join ( keychain_dir , "{}.keychain" . format ( namespace_id ) ) return cached_keychain
Get the path to the import keychain
36,573
def build_import_keychain ( cls , keychain_dir , namespace_id , pubkey_hex ) : pubkey_addr = virtualchain . BitcoinPublicKey ( str ( pubkey_hex ) ) . address ( ) cached_keychain = cls . get_import_keychain_path ( keychain_dir , namespace_id ) if os . path . exists ( cached_keychain ) : child_addrs = [ ] try : lines = [ ] with open ( cached_keychain , "r" ) as f : lines = f . readlines ( ) child_attrs = [ l . strip ( ) for l in lines ] log . debug ( "Loaded cached import keychain for '%s' (%s)" % ( pubkey_hex , pubkey_addr ) ) return child_attrs except Exception , e : log . exception ( e ) pass pubkey_hex = str ( pubkey_hex ) public_keychain = keychain . PublicKeychain . from_public_key ( pubkey_hex ) child_addrs = [ ] for i in xrange ( 0 , NAME_IMPORT_KEYRING_SIZE ) : public_child = public_keychain . child ( i ) public_child_address = public_child . address ( ) if virtualchain . version_byte == 111 : old_child_address = public_child_address public_child_address = virtualchain . hex_hash160_to_address ( virtualchain . address_to_hex_hash160 ( public_child_address ) ) log . debug ( "Re-encode '%s' to '%s'" % ( old_child_address , public_child_address ) ) child_addrs . append ( public_child_address ) if i % 20 == 0 and i != 0 : log . debug ( "%s children..." % i ) child_addrs . append ( pubkey_addr ) log . debug ( "Done building import keychain for '%s' (%s)" % ( pubkey_hex , pubkey_addr ) ) try : with open ( cached_keychain , "w+" ) as f : for addr in child_addrs : f . write ( "%s\n" % addr ) f . flush ( ) log . debug ( "Cached keychain to '%s'" % cached_keychain ) except Exception , e : log . exception ( e ) log . error ( "Unable to cache keychain for '%s' (%s)" % ( pubkey_hex , pubkey_addr ) ) return child_addrs
Generate all possible NAME_IMPORT addresses from the NAMESPACE_REVEAL public key
36,574
def load_import_keychain ( cls , working_dir , namespace_id ) : cached_keychain = os . path . join ( working_dir , "%s.keychain" % namespace_id ) if os . path . exists ( cached_keychain ) : log . debug ( "Load import keychain '%s'" % cached_keychain ) child_addrs = [ ] try : lines = [ ] with open ( cached_keychain , "r" ) as f : lines = f . readlines ( ) child_attrs = [ l . strip ( ) for l in lines ] log . debug ( "Loaded cached import keychain for '%s'" % namespace_id ) return child_attrs except Exception , e : log . exception ( e ) log . error ( "FATAL: uncaught exception loading the import keychain" ) os . abort ( ) else : log . debug ( "No import keychain at '%s'" % cached_keychain ) return None
Get an import keychain from disk . Return None if it doesn t exist .
36,575
def commit_finished ( self , block_id ) : self . db . commit ( ) assert block_id + 1 in self . vesting , 'BUG: failed to vest at {}' . format ( block_id ) self . clear_collisions ( block_id ) self . clear_vesting ( block_id + 1 )
Called when the block is finished . Commits all data .
36,576
def log_commit ( self , block_id , vtxindex , op , opcode , op_data ) : debug_op = self . sanitize_op ( op_data ) if 'history' in debug_op : del debug_op [ 'history' ] log . debug ( "COMMIT %s (%s) at (%s, %s) data: %s" , opcode , op , block_id , vtxindex , ", " . join ( [ "%s='%s'" % ( k , debug_op [ k ] ) for k in sorted ( debug_op . keys ( ) ) ] ) ) return
Log a committed operation
36,577
def log_reject ( self , block_id , vtxindex , op , op_data ) : debug_op = self . sanitize_op ( op_data ) if 'history' in debug_op : del debug_op [ 'history' ] log . debug ( "REJECT %s at (%s, %s) data: %s" , op_get_opcode_name ( op ) , block_id , vtxindex , ", " . join ( [ "%s='%s'" % ( k , debug_op [ k ] ) for k in sorted ( debug_op . keys ( ) ) ] ) ) return
Log a rejected operation
36,578
def sanitize_op ( self , op_data ) : op_data = super ( BlockstackDB , self ) . sanitize_op ( op_data ) to_remove = get_state_invariant_tags ( ) for tag in to_remove : if tag in op_data . keys ( ) : del op_data [ tag ] opcode_family = op_get_opcode_name ( op_data [ 'op' ] ) mutate_fields = op_get_mutate_fields ( opcode_family ) for mf in mutate_fields : if not op_data . has_key ( mf ) : log . debug ( "Adding NULL mutate field '%s.%s'" % ( opcode_family , mf ) ) op_data [ mf ] = None for extra_field in [ 'opcode' ] : if extra_field in op_data : del op_data [ extra_field ] return op_data
Remove unnecessary fields for an operation i . e . prior to committing it . This includes any invariant tags we ve added with our invariant decorators ( such as
36,579
def put_collisions ( self , block_id , collisions ) : self . collisions [ block_id ] = copy . deepcopy ( collisions )
Put collision state for a particular block . Any operations checked at this block_id that collide with the given collision state will be rejected .
36,580
def get_namespace ( self , namespace_id , include_history = True ) : cur = self . db . cursor ( ) return namedb_get_namespace_ready ( cur , namespace_id , include_history = include_history )
Given a namespace ID get the ready namespace op for it .
36,581
def get_DID_name ( self , did ) : did = str ( did ) did_info = None try : did_info = parse_DID ( did ) assert did_info [ 'name_type' ] == 'name' except Exception as e : if BLOCKSTACK_DEBUG : log . exception ( e ) raise ValueError ( "Invalid DID: {}" . format ( did ) ) cur = self . db . cursor ( ) historic_name_info = namedb_get_historic_names_by_address ( cur , did_info [ 'address' ] , offset = did_info [ 'index' ] , count = 1 ) if historic_name_info is None : return None name = historic_name_info [ 0 ] [ 'name' ] block_height = historic_name_info [ 0 ] [ 'block_id' ] vtxindex = historic_name_info [ 0 ] [ 'vtxindex' ] log . debug ( "DID {} refers to {}-{}-{}" . format ( did , name , block_height , vtxindex ) ) name_rec = self . get_name ( name , include_history = True , include_expired = True ) if name_rec is None : return None name_rec_latest = None found = False for height in sorted ( name_rec [ 'history' ] . keys ( ) ) : if found : break if height < block_height : continue for state in name_rec [ 'history' ] [ height ] : if height == block_height and state [ 'vtxindex' ] < vtxindex : continue if state [ 'op' ] == NAME_PREORDER : found = True break if state [ 'revoked' ] : log . debug ( "DID {} refers to {}-{}-{}, which is revoked at {}-{}" . format ( did , name , block_height , vtxindex , height , state [ 'vtxindex' ] ) ) return None name_rec_latest = state return name_rec_latest
Given a DID get the name Return None if not found or if the name was revoked Raise if the DID is invalid
36,582
def get_account_tokens ( self , address ) : cur = self . db . cursor ( ) return namedb_get_account_tokens ( cur , address )
Get the list of tokens that this address owns
36,583
def get_account ( self , address , token_type ) : cur = self . db . cursor ( ) return namedb_get_account ( cur , address , token_type )
Get the state of an account for a given token type
36,584
def get_account_balance ( self , account ) : balance = namedb_get_account_balance ( account ) assert isinstance ( balance , ( int , long ) ) , 'BUG: account balance of {} is {} (type {})' . format ( account [ 'address' ] , balance , type ( balance ) ) return balance
What s the balance of an account? Aborts if its negative
36,585
def get_account_history ( self , address , offset = None , count = None ) : cur = self . db . cursor ( ) return namedb_get_account_history ( cur , address , offset = offset , count = count )
Get the history of account transactions over a block range Returns a dict keyed by blocks which map to lists of account state transitions
36,586
def get_name_at ( self , name , block_number , include_expired = False ) : cur = self . db . cursor ( ) return namedb_get_name_at ( cur , name , block_number , include_expired = include_expired )
Generate and return the sequence of of states a name record was in at a particular block number .
36,587
def get_namespace_at ( self , namespace_id , block_number ) : cur = self . db . cursor ( ) return namedb_get_namespace_at ( cur , namespace_id , block_number , include_expired = True )
Generate and return the sequence of states a namespace record was in at a particular block number .
36,588
def get_account_at ( self , address , block_number ) : cur = self . db . cursor ( ) return namedb_get_account_at ( cur , address , block_number )
Get the sequence of states an account was in at a given block . Returns a list of states
36,589
def get_name_history ( self , name , offset = None , count = None , reverse = False ) : cur = self . db . cursor ( ) name_hist = namedb_get_history ( cur , name , offset = offset , count = count , reverse = reverse ) return name_hist
Get the historic states for a name grouped by block height .
36,590
def is_name_zonefile_hash ( self , name , zonefile_hash ) : cur = self . db . cursor ( ) return namedb_is_name_zonefile_hash ( cur , name , zonefile_hash )
Was a zone file sent by a name?
36,591
def get_all_blockstack_ops_at ( self , block_number , offset = None , count = None , include_history = None , restore_history = None ) : if include_history is not None : log . warn ( "DEPRECATED use of include_history" ) if restore_history is not None : log . warn ( "DEPRECATED use of restore_history" ) log . debug ( "Get all accepted operations at %s in %s" % ( block_number , self . db_filename ) ) recs = namedb_get_all_blockstack_ops_at ( self . db , block_number , offset = offset , count = count ) for rec in recs : assert 'op' in rec rec [ 'opcode' ] = op_get_opcode_name ( rec [ 'op' ] ) return recs
Get all name namespace and account records affected at a particular block in the state they were at the given block number . Paginate if offset count are given .
36,592
def get_name_from_name_hash128 ( self , name ) : cur = self . db . cursor ( ) name = namedb_get_name_from_name_hash128 ( cur , name , self . lastblock ) return name
Get the name from a name hash
36,593
def get_num_historic_names_by_address ( self , address ) : cur = self . db . cursor ( ) count = namedb_get_num_historic_names_by_address ( cur , address ) return count
Get the number of names historically owned by an address
36,594
def get_names_owned_by_sender ( self , sender_pubkey , lastblock = None ) : cur = self . db . cursor ( ) if lastblock is None : lastblock = self . lastblock names = namedb_get_names_by_sender ( cur , sender_pubkey , lastblock ) return names
Get the set of names owned by a particular script - pubkey .
36,595
def get_num_names ( self , include_expired = False ) : cur = self . db . cursor ( ) return namedb_get_num_names ( cur , self . lastblock , include_expired = include_expired )
Get the number of names that exist .
36,596
def get_all_names ( self , offset = None , count = None , include_expired = False ) : if offset is not None and offset < 0 : offset = None if count is not None and count < 0 : count = None cur = self . db . cursor ( ) names = namedb_get_all_names ( cur , self . lastblock , offset = offset , count = count , include_expired = include_expired ) return names
Get the set of all registered names with optional pagination Returns the list of names .
36,597
def get_num_names_in_namespace ( self , namespace_id ) : cur = self . db . cursor ( ) return namedb_get_num_names_in_namespace ( cur , namespace_id , self . lastblock )
Get the number of names in a namespace
36,598
def get_names_in_namespace ( self , namespace_id , offset = None , count = None ) : if offset is not None and offset < 0 : offset = None if count is not None and count < 0 : count = None cur = self . db . cursor ( ) names = namedb_get_names_in_namespace ( cur , namespace_id , self . lastblock , offset = offset , count = count ) return names
Get the set of all registered names in a particular namespace . Returns the list of names .
36,599
def get_all_namespace_ids ( self ) : cur = self . db . cursor ( ) namespace_ids = namedb_get_all_namespace_ids ( cur ) return namespace_ids
Get the set of all existing READY namespace IDs .