idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
224,400 | def atlas_peer_dequeue_all ( peer_queue = None ) : peers = [ ] with AtlasPeerQueueLocked ( peer_queue ) as pq : while len ( pq ) > 0 : peers . append ( pq . pop ( 0 ) ) return peers | Get all queued peers | 61 | 5 |
224,401 | def atlas_zonefile_push_enqueue ( zonefile_hash , name , txid , zonefile_data , zonefile_queue = None , con = None , path = None ) : res = False bits = atlasdb_get_zonefile_bits ( zonefile_hash , path = path , con = con ) if len ( bits ) == 0 : # invalid hash return with AtlasZonefileQueueLocked ( zonefile_queue ) as zfq : if len ( zfq ) < MAX_QUEUED_ZONEFILES : zfdata = { 'zonefile_hash' : zonefile_hash , 'zonefile' : zonefile_data , 'name' : name , 'txid' : txid } zfq . append ( zfdata ) res = True return res | Enqueue the given zonefile into our push queue from which it will be replicated to storage and sent out to other peers who don t have it . | 178 | 30 |
224,402 | def atlas_zonefile_push_dequeue ( zonefile_queue = None ) : ret = None with AtlasZonefileQueueLocked ( zonefile_queue ) as zfq : if len ( zfq ) > 0 : ret = zfq . pop ( 0 ) return ret | Dequeue a zonefile s information to replicate Return None if there are none queued | 63 | 17 |
224,403 | def atlas_zonefile_push ( my_hostport , peer_hostport , zonefile_data , timeout = None , peer_table = None ) : if timeout is None : timeout = atlas_push_zonefiles_timeout ( ) zonefile_hash = get_zonefile_data_hash ( zonefile_data ) zonefile_data_b64 = base64 . b64encode ( zonefile_data ) host , port = url_to_host_port ( peer_hostport ) RPC = get_rpc_client_class ( ) rpc = RPC ( host , port , timeout = timeout , src = my_hostport ) status = False assert not atlas_peer_table_is_locked_by_me ( ) try : push_info = blockstack_put_zonefiles ( peer_hostport , [ zonefile_data_b64 ] , timeout = timeout , my_hostport = my_hostport , proxy = rpc ) if 'error' not in push_info : if push_info [ 'saved' ] == 1 : # woo! saved = True except ( socket . timeout , socket . gaierror , socket . herror , socket . error ) , se : atlas_log_socket_error ( "put_zonefiles(%s)" % peer_hostport , peer_hostport , se ) except AssertionError , ae : log . exception ( ae ) log . error ( "Invalid server response from %s" % peer_hostport ) except Exception , e : log . exception ( e ) log . error ( "Failed to push zonefile %s to %s" % ( zonefile_hash , peer_hostport ) ) with AtlasPeerTableLocked ( peer_table ) as ptbl : atlas_peer_update_health ( peer_hostport , status , peer_table = ptbl ) return status | Push the given zonefile to the given peer Return True on success Return False on failure | 411 | 17 |
224,404 | def atlas_node_init ( my_hostname , my_portnum , atlasdb_path , zonefile_dir , working_dir ) : atlas_state = { } atlas_state [ 'peer_crawler' ] = AtlasPeerCrawler ( my_hostname , my_portnum , atlasdb_path , working_dir ) atlas_state [ 'health_checker' ] = AtlasHealthChecker ( my_hostname , my_portnum , atlasdb_path ) atlas_state [ 'zonefile_crawler' ] = AtlasZonefileCrawler ( my_hostname , my_portnum , atlasdb_path , zonefile_dir ) # atlas_state['zonefile_pusher'] = AtlasZonefilePusher(my_hostname, my_portnum, atlasdb_path, zonefile_dir) return atlas_state | Start up the atlas node . Return a bundle of atlas state | 200 | 14 |
224,405 | def atlas_node_start ( atlas_state ) : for component in atlas_state . keys ( ) : log . debug ( "Starting Atlas component '%s'" % component ) atlas_state [ component ] . start ( ) | Start up atlas threads | 52 | 5 |
224,406 | def atlas_node_add_callback ( atlas_state , callback_name , callback ) : if callback_name == 'store_zonefile' : atlas_state [ 'zonefile_crawler' ] . set_store_zonefile_callback ( callback ) else : raise ValueError ( "Unrecognized callback {}" . format ( callback_name ) ) | Add a callback to the initialized atlas state | 80 | 9 |
224,407 | def atlas_node_stop ( atlas_state ) : for component in atlas_state . keys ( ) : log . debug ( "Stopping Atlas component '%s'" % component ) atlas_state [ component ] . ask_join ( ) atlas_state [ component ] . join ( ) return True | Stop the atlas node threads | 68 | 6 |
224,408 | def canonical_peer ( self , peer ) : their_host , their_port = url_to_host_port ( peer ) if their_host in [ '127.0.0.1' , '::1' ] : their_host = 'localhost' return "%s:%s" % ( their_host , their_port ) | Get the canonical peer name | 74 | 5 |
224,409 | def remove_unhealthy_peers ( self , count , con = None , path = None , peer_table = None , min_request_count = 10 , min_health = MIN_PEER_HEALTH ) : if path is None : path = self . atlasdb_path removed = [ ] rank_peer_list = atlas_rank_peers_by_health ( peer_table = peer_table , with_rank = True ) for rank , peer in rank_peer_list : reqcount = atlas_peer_get_request_count ( peer , peer_table = peer_table ) if reqcount >= min_request_count and rank < min_health and not atlas_peer_is_whitelisted ( peer , peer_table = peer_table ) and not atlas_peer_is_blacklisted ( peer , peer_table = peer_table ) : removed . append ( peer ) random . shuffle ( removed ) if len ( removed ) > count : removed = removed [ : count ] for peer in removed : log . debug ( "Remove unhealthy peer %s" % ( peer ) ) atlasdb_remove_peer ( peer , con = con , path = path , peer_table = peer_table ) return removed | Remove up to | 271 | 3 |
224,410 | def get_current_peers ( self , peer_table = None ) : # get current peers current_peers = None with AtlasPeerTableLocked ( peer_table ) as ptbl : current_peers = ptbl . keys ( ) [ : ] return current_peers | Get the current set of peers | 62 | 6 |
224,411 | def canonical_new_peer_list ( self , peers_to_add ) : new_peers = list ( set ( self . new_peers + peers_to_add ) ) random . shuffle ( new_peers ) # canonicalize tmp = [ ] for peer in new_peers : tmp . append ( self . canonical_peer ( peer ) ) new_peers = tmp # don't talk to myself if self . my_hostport in new_peers : new_peers . remove ( self . my_hostport ) return new_peers | Make a list of canonical new peers using the self . new_peers and the given peers to add | 122 | 21 |
224,412 | def step ( self , con = None , path = None , peer_table = None , local_inv = None ) : if path is None : path = self . atlasdb_path peer_hostports = [ ] stale_peers = [ ] num_peers = None peer_hostports = None with AtlasPeerTableLocked ( peer_table ) as ptbl : num_peers = len ( ptbl . keys ( ) ) peer_hostports = ptbl . keys ( ) [ : ] # who are we going to ping? # someone we haven't pinged in a while, chosen at random for peer in peer_hostports : if not atlas_peer_has_fresh_zonefile_inventory ( peer , peer_table = ptbl ) : # haven't talked to this peer in a while stale_peers . append ( peer ) log . debug ( "Peer %s has a stale zonefile inventory" % peer ) if len ( stale_peers ) > 0 : log . debug ( "Refresh zonefile inventories for %s peers" % len ( stale_peers ) ) for peer_hostport in stale_peers : # refresh everyone log . debug ( "%s: Refresh zonefile inventory for %s" % ( self . hostport , peer_hostport ) ) res = atlas_peer_refresh_zonefile_inventory ( self . hostport , peer_hostport , 0 , con = con , path = path , peer_table = peer_table , local_inv = local_inv ) if res is None : log . warning ( "Failed to refresh zonefile inventory for %s" % peer_hostport ) return | Find peers with stale zonefile inventory data and refresh them . | 361 | 12 |
224,413 | def run ( self , peer_table = None ) : self . running = True while self . running : local_inv = atlas_get_zonefile_inventory ( ) t1 = time_now ( ) self . step ( peer_table = peer_table , local_inv = local_inv , path = self . atlasdb_path ) t2 = time_now ( ) # don't go too fast if t2 - t1 < PEER_HEALTH_NEIGHBOR_WORK_INTERVAL : deadline = time_now ( ) + PEER_HEALTH_NEIGHBOR_WORK_INTERVAL - ( t2 - t1 ) while time_now ( ) < deadline and self . running : time_sleep ( self . hostport , self . __class__ . __name__ , 1.0 ) if not self . running : break | Loop forever pinging someone every pass . | 186 | 8 |
224,414 | def set_zonefile_present ( self , zfhash , block_height , con = None , path = None ) : was_present = atlasdb_set_zonefile_present ( zfhash , True , con = con , path = path ) # tell anyone who cares that we got this zone file, if it was new if not was_present and self . store_zonefile_cb : log . debug ( '{} was new, so passing it along to zonefile storage watchers...' . format ( zfhash ) ) self . store_zonefile_cb ( zfhash , block_height ) else : log . debug ( '{} was seen before, so not passing it along to zonefile storage watchers' . format ( zfhash ) ) | Set a zonefile as present and if it was previously absent inform the storage listener | 167 | 16 |
224,415 | def find_zonefile_origins ( self , missing_zfinfo , peer_hostports ) : zonefile_origins = { } # map peer hostport to list of zonefile hashes # which peers can serve each zonefile? for zfhash in missing_zfinfo . keys ( ) : for peer_hostport in peer_hostports : if not zonefile_origins . has_key ( peer_hostport ) : zonefile_origins [ peer_hostport ] = [ ] if peer_hostport in missing_zfinfo [ zfhash ] [ 'peers' ] : zonefile_origins [ peer_hostport ] . append ( zfhash ) return zonefile_origins | Find out which peers can serve which zonefiles | 156 | 9 |
224,416 | def step ( self , peer_table = None , zonefile_queue = None , path = None ) : if path is None : path = self . atlasdb_path if BLOCKSTACK_TEST : log . debug ( "%s: %s step" % ( self . hostport , self . __class__ . __name__ ) ) if self . push_timeout is None : self . push_timeout = atlas_push_zonefiles_timeout ( ) zfinfo = atlas_zonefile_push_dequeue ( zonefile_queue = zonefile_queue ) if zfinfo is None : return 0 zfhash = zfinfo [ 'zonefile_hash' ] zfdata_txt = zfinfo [ 'zonefile' ] name = zfinfo [ 'name' ] txid = zfinfo [ 'txid' ] zfbits = atlasdb_get_zonefile_bits ( zfhash , path = path ) if len ( zfbits ) == 0 : # not recognized return 0 # it's a valid zonefile. store it. rc = add_atlas_zonefile_data ( str ( zfdata_txt ) , self . zonefile_dir ) if not rc : log . error ( "Failed to replicate zonefile %s to external storage" % zfhash ) peers = None # see if we can send this somewhere with AtlasPeerTableLocked ( peer_table ) as ptbl : peers = atlas_zonefile_find_push_peers ( zfhash , peer_table = ptbl , zonefile_bits = zfbits ) if len ( peers ) == 0 : # everyone has it log . debug ( "%s: All peers have zonefile %s" % ( self . hostport , zfhash ) ) return 0 # push it off ret = 0 for peer in peers : log . debug ( "%s: Push to %s" % ( self . hostport , peer ) ) atlas_zonefile_push ( self . hostport , peer , zfdata_txt , timeout = self . push_timeout ) ret += 1 return ret | Run one step of this algorithm . Push the zonefile to all the peers that need it . Return the number of peers we sent to | 461 | 27 |
224,417 | def queuedb_create ( path ) : global QUEUE_SQL , ERROR_SQL lines = [ l + ";" for l in QUEUE_SQL . split ( ";" ) ] con = sqlite3 . connect ( path , isolation_level = None ) db_query_execute ( con , 'pragma mmap_size=536870912' , ( ) ) for line in lines : db_query_execute ( con , line , ( ) ) con . commit ( ) con . row_factory = queuedb_row_factory return con | Create a sqlite3 db at the given path . Create all the tables and indexes we need . Raises if the table already exists | 124 | 27 |
224,418 | def queuedb_row_factory ( cursor , row ) : d = { } for idx , col in enumerate ( cursor . description ) : d [ col [ 0 ] ] = row [ idx ] return d | Dict row factory | 48 | 4 |
224,419 | def queuedb_findall ( path , queue_id , name = None , offset = None , limit = None ) : sql = "SELECT * FROM queue WHERE queue_id = ? ORDER BY rowid ASC" args = ( queue_id , ) if name : sql += ' AND name = ?' args += ( name , ) if limit : sql += ' LIMIT ?' args += ( limit , ) if offset : sql += ' OFFSET ?' args += ( offset , ) sql += ';' db = queuedb_open ( path ) if db is None : raise Exception ( "Failed to open %s" % path ) cur = db . cursor ( ) rows = queuedb_query_execute ( cur , sql , args ) count = 0 ret = [ ] for row in rows : dat = { } dat . update ( row ) ret . append ( dat ) db . close ( ) return ret | Get all queued entries for a queue and a name . If name is None then find all queue entries | 196 | 21 |
224,420 | def queuedb_append ( path , queue_id , name , data ) : sql = "INSERT INTO queue VALUES (?,?,?);" args = ( name , queue_id , data ) db = queuedb_open ( path ) if db is None : raise Exception ( "Failed to open %s" % path ) cur = db . cursor ( ) res = queuedb_query_execute ( cur , sql , args ) db . commit ( ) db . close ( ) return True | Append an element to the back of the queue . Return True on success Raise on error | 107 | 18 |
224,421 | def queuedb_remove ( path , entry , cur = None ) : sql = "DELETE FROM queue WHERE queue_id = ? AND name = ?;" args = ( entry [ 'queue_id' ] , entry [ 'name' ] ) cursor = None if cur : cursor = cur else : db = queuedb_open ( path ) if db is None : raise Exception ( "Failed to open %s" % path ) cursor = db . cursor ( ) res = queuedb_query_execute ( cursor , sql , args ) if cur is None : db . commit ( ) db . close ( ) return True | Remove an element from a queue . Return True on success Raise on error | 134 | 14 |
224,422 | def queuedb_removeall ( path , entries ) : db = queuedb_open ( path ) if db is None : raise Exception ( "Failed to open %s" % path ) cursor = db . cursor ( ) queuedb_query_execute ( cursor , 'BEGIN' , ( ) ) for entry in entries : queuedb_remove ( path , entry , cur = cursor ) queuedb_query_execute ( cursor , 'END' , ( ) ) db . commit ( ) db . close ( ) return True | Remove all entries from a queue | 115 | 6 |
224,423 | def check_payment_in_stacks ( state_engine , nameop , state_op_type , fee_block_id ) : name = nameop [ 'name' ] namespace_id = get_namespace_from_name ( name ) name_without_namespace = get_name_from_fq_name ( name ) namespace = state_engine . get_namespace ( namespace_id ) stacks_payment_info = get_stacks_payment ( state_engine , nameop , state_op_type ) if stacks_payment_info [ 'status' ] : # got a stacks payment! check price and make sure we paid the right amount tokens_paid = stacks_payment_info [ 'tokens_paid' ] token_units = stacks_payment_info [ 'token_units' ] log . debug ( 'Transaction pays {} units of {} for {}, even though its namespace was priced in BTC' . format ( tokens_paid , token_units , name ) ) stacks_price = price_name_stacks ( name_without_namespace , namespace , fee_block_id ) # price in Stacks, but following the BTC-given price curve res = check_token_payment ( name , stacks_price , stacks_payment_info ) if res [ 'status' ] : # success return { 'status' : True , 'tokens_paid' : tokens_paid , 'token_units' : token_units } return { 'status' : False } | Verify that if tokens were paid for a name priced in BTC that enough were paid . Does not check account balances or namespace types ; it only inspects the transaction data . | 322 | 35 |
224,424 | def check_payment ( state_engine , state_op_type , nameop , fee_block_id , token_address , burn_address , name_fee , block_id ) : assert state_op_type in [ 'NAME_REGISTRATION' , 'NAME_RENEWAL' ] , 'Invalid op type {}' . format ( state_op_type ) assert name_fee is not None assert isinstance ( name_fee , ( int , long ) ) name = nameop [ 'name' ] namespace_id = get_namespace_from_name ( name ) namespace = state_engine . get_namespace ( namespace_id ) res = None log . debug ( '{} is a version-0x{} namespace' . format ( namespace [ 'namespace_id' ] , namespace [ 'version' ] ) ) # check name fee, depending on which version. if namespace [ 'version' ] == NAMESPACE_VERSION_PAY_TO_BURN : res = check_payment_v1 ( state_engine , state_op_type , nameop , fee_block_id , token_address , burn_address , name_fee , block_id ) elif namespace [ 'version' ] == NAMESPACE_VERSION_PAY_TO_CREATOR : res = check_payment_v2 ( state_engine , state_op_type , nameop , fee_block_id , token_address , burn_address , name_fee , block_id ) elif namespace [ 'version' ] == NAMESPACE_VERSION_PAY_WITH_STACKS : res = check_payment_v3 ( state_engine , state_op_type , nameop , fee_block_id , token_address , burn_address , name_fee , block_id ) else : # unrecognized namespace rules log . warning ( "Namespace {} has version bits 0x{:x}, which has unknown registration rules" . format ( namespace [ 'namespace_id' ] , namespace [ 'version' ] ) ) return { 'status' : False } if not res [ 'status' ] : return res tokens_paid = res [ 'tokens_paid' ] token_units = res [ 'token_units' ] return { 'status' : True , 'tokens_paid' : tokens_paid , 'token_units' : token_units } | Verify that the right payment was made in the right cryptocurrency units . Does not check any accounts or modify the nameop in any way ; it only checks that the name was paid for by the transaction . | 525 | 41 |
224,425 | def check ( state_engine , nameop , block_id , checked_ops ) : namespace_id_hash = nameop [ 'preorder_hash' ] consensus_hash = nameop [ 'consensus_hash' ] token_fee = nameop [ 'token_fee' ] # cannot be preordered already if not state_engine . is_new_namespace_preorder ( namespace_id_hash ) : log . warning ( "Namespace preorder '%s' already in use" % namespace_id_hash ) return False # has to have a reasonable consensus hash if not state_engine . is_consensus_hash_valid ( block_id , consensus_hash ) : valid_consensus_hashes = state_engine . get_valid_consensus_hashes ( block_id ) log . warning ( "Invalid consensus hash '%s': expected any of %s" % ( consensus_hash , "," . join ( valid_consensus_hashes ) ) ) return False # has to have paid a fee if not 'op_fee' in nameop : log . warning ( "Missing namespace preorder fee" ) return False # paid to the right burn address if nameop [ 'burn_address' ] != BLOCKSTACK_BURN_ADDRESS : log . warning ( "Invalid burn address: expected {}, got {}" . format ( BLOCKSTACK_BURN_ADDRESS , nameop [ 'burn_address' ] ) ) return False # token burn fee must be present, if we're in the right epoch for it epoch_features = get_epoch_features ( block_id ) if EPOCH_FEATURE_STACKS_BUY_NAMESPACES in epoch_features : # must pay in STACKs if 'token_fee' not in nameop : log . warning ( "Missing token fee" ) return False token_fee = nameop [ 'token_fee' ] token_address = nameop [ 'address' ] token_type = TOKEN_TYPE_STACKS # was a token fee paid? if token_fee is None : log . warning ( "No tokens paid by this NAMESPACE_PREORDER" ) return False # does this account have enough balance? account_info = state_engine . get_account ( token_address , token_type ) if account_info is None : log . warning ( "No account for {} ({})" . format ( token_address , token_type ) ) return False account_balance = state_engine . get_account_balance ( account_info ) assert isinstance ( account_balance , ( int , long ) ) , 'BUG: account_balance of {} is {} (type {})' . format ( token_address , account_balance , type ( account_balance ) ) assert isinstance ( token_fee , ( int , long ) ) , 'BUG: token_fee is {} (type {})' . format ( token_fee , type ( token_fee ) ) if account_balance < token_fee : # can't afford log . warning ( "Account {} has balance {} {}, but needs to pay {} {}" . format ( token_address , account_balance , token_type , token_fee , token_type ) ) return False # debit this account when we commit state_preorder_put_account_payment_info ( nameop , token_address , token_type , token_fee ) # NOTE: must be a string, to avoid overflow nameop [ 'token_fee' ] = '{}' . format ( token_fee ) nameop [ 'token_units' ] = TOKEN_TYPE_STACKS else : # must pay in BTC # not paying in tokens, but say so! state_preorder_put_account_payment_info ( nameop , None , None , None ) nameop [ 'token_fee' ] = '0' nameop [ 'token_units' ] = 'BTC' return True | Given a NAMESPACE_PREORDER nameop see if we can preorder it . It must be unqiue . | 856 | 27 |
224,426 | def snapshot_peek_number ( fd , off ) : # read number of 8 bytes fd . seek ( off - 8 , os . SEEK_SET ) value_hex = fd . read ( 8 ) if len ( value_hex ) != 8 : return None try : value = int ( value_hex , 16 ) except ValueError : return None return value | Read the last 8 bytes of fd and interpret it as an int . | 79 | 15 |
224,427 | def get_file_hash ( fd , hashfunc , fd_len = None ) : h = hashfunc ( ) fd . seek ( 0 , os . SEEK_SET ) count = 0 while True : buf = fd . read ( 65536 ) if len ( buf ) == 0 : break if fd_len is not None : if count + len ( buf ) > fd_len : buf = buf [ : fd_len - count ] h . update ( buf ) count += len ( buf ) hashed = h . hexdigest ( ) return hashed | Get the hex - encoded hash of the fd s data | 125 | 12 |
224,428 | def fast_sync_sign_snapshot ( snapshot_path , private_key , first = False ) : if not os . path . exists ( snapshot_path ) : log . error ( "No such file or directory: {}" . format ( snapshot_path ) ) return False file_size = 0 payload_size = 0 write_offset = 0 try : sb = os . stat ( snapshot_path ) file_size = sb . st_size assert file_size > 8 except Exception as e : log . exception ( e ) return False num_sigs = 0 snapshot_hash = None with open ( snapshot_path , 'r+' ) as f : if not first : info = fast_sync_inspect ( f ) if 'error' in info : log . error ( "Failed to inspect {}: {}" . format ( snapshot_path , info [ 'error' ] ) ) return False num_sigs = len ( info [ 'signatures' ] ) write_offset = info [ 'sig_append_offset' ] payload_size = info [ 'payload_size' ] else : # no one has signed yet. write_offset = file_size num_sigs = 0 payload_size = file_size # hash the file and sign the (bin-encoded) hash privkey_hex = keylib . ECPrivateKey ( private_key ) . to_hex ( ) hash_hex = get_file_hash ( f , hashlib . sha256 , fd_len = payload_size ) sigb64 = sign_digest ( hash_hex , privkey_hex , hashfunc = hashlib . sha256 ) if BLOCKSTACK_TEST : log . debug ( "Signed {} with {} to make {}" . format ( hash_hex , keylib . ECPrivateKey ( private_key ) . public_key ( ) . to_hex ( ) , sigb64 ) ) # append f . seek ( write_offset , os . SEEK_SET ) f . write ( sigb64 ) f . write ( '{:08x}' . format ( len ( sigb64 ) ) ) # append number of signatures num_sigs += 1 f . write ( '{:08x}' . format ( num_sigs ) ) f . flush ( ) os . fsync ( f . fileno ( ) ) return True | Append a signature to the end of a snapshot path with the given private key . | 518 | 17 |
224,429 | def fast_sync_snapshot_compress ( snapshot_dir , export_path ) : snapshot_dir = os . path . abspath ( snapshot_dir ) export_path = os . path . abspath ( export_path ) if os . path . exists ( export_path ) : return { 'error' : 'Snapshot path exists: {}' . format ( export_path ) } old_dir = os . getcwd ( ) count_ref = [ 0 ] def print_progress ( tarinfo ) : count_ref [ 0 ] += 1 if count_ref [ 0 ] % 100 == 0 : log . debug ( "{} files compressed..." . format ( count_ref [ 0 ] ) ) return tarinfo try : os . chdir ( snapshot_dir ) with tarfile . TarFile . bz2open ( export_path , "w" ) as f : f . add ( "." , filter = print_progress ) except : os . chdir ( old_dir ) raise finally : os . chdir ( old_dir ) return { 'status' : True } | Given the path to a directory compress it and export it to the given path . | 232 | 16 |
224,430 | def fast_sync_snapshot_decompress ( snapshot_path , output_dir ) : if not tarfile . is_tarfile ( snapshot_path ) : return { 'error' : 'Not a tarfile-compatible archive: {}' . format ( snapshot_path ) } if not os . path . exists ( output_dir ) : os . makedirs ( output_dir ) with tarfile . TarFile . bz2open ( snapshot_path , 'r' ) as f : tarfile . TarFile . extractall ( f , path = output_dir ) return { 'status' : True } | Given the path to a snapshot file decompress it and write its contents to the given output directory | 132 | 19 |
224,431 | def fast_sync_fetch ( working_dir , import_url ) : try : fd , tmppath = tempfile . mkstemp ( prefix = '.blockstack-fast-sync-' , dir = working_dir ) except Exception , e : log . exception ( e ) return None log . debug ( "Fetch {} to {}..." . format ( import_url , tmppath ) ) try : path , headers = urllib . urlretrieve ( import_url , tmppath ) except Exception , e : os . close ( fd ) log . exception ( e ) return None os . close ( fd ) return tmppath | Get the data for an import snapshot . Store it to a temporary path Return the path on success Return None on error | 143 | 23 |
224,432 | def state_check_collisions ( state_engine , nameop , history_id_key , block_id , checked_ops , collision_checker ) : # verify no collisions against already-accepted names collision_check = getattr ( state_engine , collision_checker , None ) try : assert collision_check is not None , "Collision-checker '%s' not defined" % collision_checker assert hasattr ( collision_check , "__call__" ) , "Collision-checker '%s' is not callable" % collision_checker assert history_id_key in nameop . keys ( ) , "History ID key '%s' not in name operation" % ( history_id_key ) assert 'op' in nameop . keys ( ) , "BUG: no op in nameop" except Exception , e : log . exception ( e ) log . error ( "FATAL: incorrect state_create() decorator" ) sys . exit ( 1 ) rc = collision_check ( nameop [ history_id_key ] , block_id , checked_ops ) return rc | See that there are no state - creating or state - preordering collisions at this block for this history ID . Return True if collided ; False if not | 243 | 30 |
224,433 | def state_create_is_valid ( nameop ) : assert '__state_create__' in nameop , "Not tagged with @state_create" assert nameop [ '__state_create__' ] , "BUG: tagged False by @state_create" assert '__preorder__' in nameop , "No preorder" assert '__table__' in nameop , "No table given" assert '__history_id_key__' in nameop , "No history ID key given" assert nameop [ '__history_id_key__' ] in nameop , "No history ID given" assert '__always_set__' in nameop , "No always-set fields given" return True | Is a nameop a valid state - preorder operation? | 156 | 12 |
224,434 | def state_transition_is_valid ( nameop ) : assert '__state_transition__' in nameop , "Not tagged with @state_transition" assert nameop [ '__state_transition__' ] , "BUG: @state_transition tagged False" assert '__history_id_key__' in nameop , "Missing __history_id_key__" history_id_key = nameop [ '__history_id_key__' ] assert history_id_key in [ "name" , "namespace_id" ] , "Invalid history ID key '%s'" % history_id_key assert '__table__' in nameop , "Missing __table__" assert '__always_set__' in nameop , "No always-set fields given" assert '__account_payment_info__' in nameop , 'No account payment information present' return True | Is this a valid state transition? | 199 | 7 |
224,435 | def _read_atlas_zonefile ( zonefile_path , zonefile_hash ) : with open ( zonefile_path , "rb" ) as f : data = f . read ( ) # sanity check if zonefile_hash is not None : if not verify_zonefile ( data , zonefile_hash ) : log . debug ( "Corrupt zonefile '%s'" % zonefile_hash ) return None return data | Read and verify an atlas zone file | 93 | 8 |
224,436 | def get_atlas_zonefile_data ( zonefile_hash , zonefile_dir , check = True ) : zonefile_path = atlas_zonefile_path ( zonefile_dir , zonefile_hash ) zonefile_path_legacy = atlas_zonefile_path_legacy ( zonefile_dir , zonefile_hash ) for zfp in [ zonefile_path , zonefile_path_legacy ] : if not os . path . exists ( zfp ) : continue if check : res = _read_atlas_zonefile ( zfp , zonefile_hash ) else : res = _read_atlas_zonefile ( zfp , None ) if res : return res return None | Get a serialized cached zonefile from local disk Return None if not found | 157 | 15 |
224,437 | def store_atlas_zonefile_data ( zonefile_data , zonefile_dir , fsync = True ) : if not os . path . exists ( zonefile_dir ) : os . makedirs ( zonefile_dir , 0700 ) zonefile_hash = get_zonefile_data_hash ( zonefile_data ) # only store to the latest supported directory zonefile_path = atlas_zonefile_path ( zonefile_dir , zonefile_hash ) zonefile_dir_path = os . path . dirname ( zonefile_path ) if os . path . exists ( zonefile_path ) : # already exists return True if not os . path . exists ( zonefile_dir_path ) : os . makedirs ( zonefile_dir_path ) try : with open ( zonefile_path , "wb" ) as f : f . write ( zonefile_data ) f . flush ( ) if fsync : os . fsync ( f . fileno ( ) ) except Exception , e : log . exception ( e ) return False return True | Store a validated zonefile . zonefile_data should be a dict . The caller should first authenticate the zonefile . Return True on success Return False on error | 233 | 33 |
224,438 | def remove_atlas_zonefile_data ( zonefile_hash , zonefile_dir ) : if not os . path . exists ( zonefile_dir ) : return True zonefile_path = atlas_zonefile_path ( zonefile_dir , zonefile_hash ) zonefile_path_legacy = atlas_zonefile_path_legacy ( zonefile_dir , zonefile_hash ) for zfp in [ zonefile_path , zonefile_path_legacy ] : if not os . path . exists ( zonefile_path ) : continue try : os . unlink ( zonefile_path ) except : log . error ( "Failed to unlink zonefile %s (%s)" % ( zonefile_hash , zonefile_path ) ) return True | Remove a cached zonefile . Idempotent ; returns True if deleted or it didn t exist . Returns False on error | 170 | 25 |
224,439 | def add_atlas_zonefile_data ( zonefile_text , zonefile_dir , fsync = True ) : rc = store_atlas_zonefile_data ( zonefile_text , zonefile_dir , fsync = fsync ) if not rc : zonefile_hash = get_zonefile_data_hash ( zonefile_text ) log . error ( "Failed to save zonefile {}" . format ( zonefile_hash ) ) rc = False return rc | Add a zone file to the atlas zonefiles Return True on success Return False on error | 105 | 18 |
224,440 | def transfer_sanity_check ( name , consensus_hash ) : if name is not None and ( not is_b40 ( name ) or "+" in name or name . count ( "." ) > 1 ) : raise Exception ( "Name '%s' has non-base-38 characters" % name ) # without the scheme, name must be 37 bytes if name is not None and ( len ( name ) > LENGTHS [ 'blockchain_id_name' ] ) : raise Exception ( "Name '%s' is too long; expected %s bytes" % ( name , LENGTHS [ 'blockchain_id_name' ] ) ) return True | Verify that data for a transfer is valid . | 143 | 10 |
224,441 | def find_transfer_consensus_hash ( name_rec , block_id , vtxindex , nameop_consensus_hash ) : # work backwards from the last block for historic_block_number in reversed ( sorted ( name_rec [ 'history' ] . keys ( ) ) ) : for historic_state in reversed ( name_rec [ 'history' ] [ historic_block_number ] ) : if historic_state [ 'block_number' ] > block_id or ( historic_state [ 'block_number' ] == block_id and historic_state [ 'vtxindex' ] > vtxindex ) : # from the future continue if historic_state [ 'op' ] in [ NAME_REGISTRATION , NAME_IMPORT ] : # out of history without finding a NAME_UPDATE return nameop_consensus_hash if historic_state [ 'op' ] == NAME_UPDATE : # reuse this consensus hash assert historic_state [ 'consensus_hash' ] is not None , 'BUG: NAME_UPDATE did not set "consensus_hash": {}' . format ( historic_state ) return historic_state [ 'consensus_hash' ] return nameop_consensus_hash | Given a name record find the last consensus hash set by a non - NAME_TRANSFER operation . | 262 | 21 |
224,442 | def canonicalize ( parsed_op ) : assert 'op' in parsed_op assert len ( parsed_op [ 'op' ] ) == 2 if parsed_op [ 'op' ] [ 1 ] == TRANSFER_KEEP_DATA : parsed_op [ 'keep_data' ] = True elif parsed_op [ 'op' ] [ 1 ] == TRANSFER_REMOVE_DATA : parsed_op [ 'keep_data' ] = False else : raise ValueError ( "Invalid op '{}'" . format ( parsed_op [ 'op' ] ) ) return parsed_op | Get the canonical form of this operation putting it into a form where it can be serialized to form a consensus hash . This method is meant to preserve compatibility across blockstackd releases . | 129 | 37 |
224,443 | def get_bitcoind ( new_bitcoind_opts = None , reset = False , new = False ) : global bitcoind if reset : bitcoind = None elif not new and bitcoind is not None : return bitcoind if new or bitcoind is None : if new_bitcoind_opts is not None : set_bitcoin_opts ( new_bitcoind_opts ) bitcoin_opts = get_bitcoin_opts ( ) new_bitcoind = None try : try : new_bitcoind = virtualchain . connect_bitcoind ( bitcoin_opts ) except KeyError , ke : log . exception ( ke ) log . error ( "Invalid configuration: %s" % bitcoin_opts ) return None if new : return new_bitcoind else : # save for subsequent reuse bitcoind = new_bitcoind return bitcoind except Exception , e : log . exception ( e ) return None | Get or instantiate our bitcoind client . Optionally re - set the bitcoind options . | 210 | 21 |
224,444 | def get_pidfile_path ( working_dir ) : pid_filename = virtualchain_hooks . get_virtual_chain_name ( ) + ".pid" return os . path . join ( working_dir , pid_filename ) | Get the PID file path . | 51 | 6 |
224,445 | def put_pidfile ( pidfile_path , pid ) : with open ( pidfile_path , "w" ) as f : f . write ( "%s" % pid ) os . fsync ( f . fileno ( ) ) return | Put a PID into a pidfile | 52 | 7 |
224,446 | def get_logfile_path ( working_dir ) : logfile_filename = virtualchain_hooks . get_virtual_chain_name ( ) + ".log" return os . path . join ( working_dir , logfile_filename ) | Get the logfile path for our service endpoint . | 53 | 10 |
224,447 | def get_index_range ( working_dir ) : bitcoind_session = get_bitcoind ( new = True ) assert bitcoind_session is not None first_block = None last_block = None wait = 1.0 while last_block is None and is_running ( ) : first_block , last_block = virtualchain . get_index_range ( 'bitcoin' , bitcoind_session , virtualchain_hooks , working_dir ) if first_block is None or last_block is None : # try to reconnnect log . error ( "Reconnect to bitcoind in {} seconds" . format ( wait ) ) time . sleep ( wait ) wait = min ( wait * 2.0 + random . random ( ) * wait , 60 ) bitcoind_session = get_bitcoind ( new = True ) continue else : return first_block , last_block - NUM_CONFIRMATIONS return None , None | Get the bitcoin block index range . Mask connection failures with timeouts . Always try to reconnect . | 207 | 19 |
224,448 | def rpc_start ( working_dir , port , subdomain_index = None , thread = True ) : rpc_srv = BlockstackdRPCServer ( working_dir , port , subdomain_index = subdomain_index ) log . debug ( "Starting RPC on port {}" . format ( port ) ) if thread : rpc_srv . start ( ) return rpc_srv | Start the global RPC server thread Returns the RPC server thread | 88 | 11 |
224,449 | def rpc_chain_sync ( server_state , new_block_height , finish_time ) : rpc_srv = server_state [ 'rpc' ] if rpc_srv is not None : rpc_srv . cache_flush ( ) rpc_srv . set_last_index_time ( finish_time ) | Flush the global RPC server cache and tell the rpc server that we ve reached the given block height at the given time . | 77 | 26 |
224,450 | def rpc_stop ( server_state ) : rpc_srv = server_state [ 'rpc' ] if rpc_srv is not None : log . info ( "Shutting down RPC" ) rpc_srv . stop_server ( ) rpc_srv . join ( ) log . info ( "RPC joined" ) else : log . info ( "RPC already joined" ) server_state [ 'rpc' ] = None | Stop the global RPC server thread | 101 | 6 |
224,451 | def gc_stop ( ) : global gc_thread if gc_thread : log . info ( "Shutting down GC thread" ) gc_thread . signal_stop ( ) gc_thread . join ( ) log . info ( "GC thread joined" ) gc_thread = None else : log . info ( "GC thread already joined" ) | Stop a the optimistic GC thread | 78 | 6 |
224,452 | def api_start ( working_dir , host , port , thread = True ) : api_srv = BlockstackdAPIServer ( working_dir , host , port ) log . info ( "Starting API server on port {}" . format ( port ) ) if thread : api_srv . start ( ) return api_srv | Start the global API server Returns the API server thread | 72 | 10 |
224,453 | def api_stop ( server_state ) : api_srv = server_state [ 'api' ] if api_srv is not None : log . info ( "Shutting down API" ) api_srv . stop_server ( ) api_srv . join ( ) log . info ( "API server joined" ) else : log . info ( "API already joined" ) server_state [ 'api' ] = None | Stop the global API server thread | 93 | 6 |
224,454 | def atlas_init ( blockstack_opts , db , recover = False , port = None ) : if port is None : port = blockstack_opts [ 'rpc_port' ] # start atlas node atlas_state = None if is_atlas_enabled ( blockstack_opts ) : atlas_seed_peers = filter ( lambda x : len ( x ) > 0 , blockstack_opts [ 'atlas_seeds' ] . split ( "," ) ) atlas_blacklist = filter ( lambda x : len ( x ) > 0 , blockstack_opts [ 'atlas_blacklist' ] . split ( "," ) ) zonefile_dir = blockstack_opts [ 'zonefiles' ] my_hostname = blockstack_opts [ 'atlas_hostname' ] my_port = blockstack_opts [ 'atlas_port' ] initial_peer_table = atlasdb_init ( blockstack_opts [ 'atlasdb_path' ] , zonefile_dir , db , atlas_seed_peers , atlas_blacklist , validate = True , recover = recover ) atlas_peer_table_init ( initial_peer_table ) atlas_state = atlas_node_init ( my_hostname , my_port , blockstack_opts [ 'atlasdb_path' ] , zonefile_dir , db . working_dir ) return atlas_state | Start up atlas functionality | 324 | 5 |
224,455 | def read_pid_file ( pidfile_path ) : try : fin = open ( pidfile_path , "r" ) except Exception , e : return None else : pid_data = fin . read ( ) . strip ( ) fin . close ( ) try : pid = int ( pid_data ) return pid except : return None | Read the PID from the PID file | 71 | 7 |
224,456 | def check_server_running ( pid ) : if pid == os . getpid ( ) : # special case--we're in Docker or some other kind of container # (or we got really unlucky and got the same PID twice). # this PID does not correspond to another running server, either way. return False try : os . kill ( pid , 0 ) return True except OSError as oe : if oe . errno == errno . ESRCH : return False else : raise | Determine if the given process is running | 104 | 9 |
224,457 | def stop_server ( working_dir , clean = False , kill = False ) : timeout = 1.0 dead = False for i in xrange ( 0 , 5 ) : # try to kill the main supervisor pid_file = get_pidfile_path ( working_dir ) if not os . path . exists ( pid_file ) : dead = True break pid = read_pid_file ( pid_file ) if pid is not None : try : os . kill ( pid , signal . SIGTERM ) except OSError , oe : if oe . errno == errno . ESRCH : # already dead log . info ( "Process %s is not running" % pid ) try : os . unlink ( pid_file ) except : pass return except Exception , e : log . exception ( e ) os . abort ( ) else : log . info ( "Corrupt PID file. Please make sure all instances of this program have stopped and remove {}" . format ( pid_file ) ) os . abort ( ) # is it actually dead? blockstack_opts = get_blockstack_opts ( ) srv = BlockstackRPCClient ( 'localhost' , blockstack_opts [ 'rpc_port' ] , timeout = 5 , protocol = 'http' ) try : res = blockstack_ping ( proxy = srv ) except socket . error as se : # dead? if se . errno == errno . ECONNREFUSED : # couldn't connect, so infer dead try : os . kill ( pid , 0 ) log . info ( "Server %s is not dead yet..." % pid ) except OSError , oe : log . info ( "Server %s is dead to us" % pid ) dead = True break else : continue log . info ( "Server %s is still running; trying again in %s seconds" % ( pid , timeout ) ) time . sleep ( timeout ) timeout *= 2 if not dead and kill : # be sure to clean up the pidfile log . info ( "Killing server %s" % pid ) clean = True try : os . kill ( pid , signal . SIGKILL ) except Exception , e : pass if clean : # blow away the pid file try : os . unlink ( pid_file ) except : pass log . debug ( "Blockstack server stopped" ) | Stop the blockstackd server . | 505 | 7 |
224,458 | def genesis_block_load ( module_path = None ) : if os . environ . get ( 'BLOCKSTACK_GENESIS_BLOCK_PATH' ) is not None : log . warning ( 'Using envar-given genesis block' ) module_path = os . environ [ 'BLOCKSTACK_GENESIS_BLOCK_PATH' ] genesis_block = None genesis_block_stages = None if module_path : log . debug ( 'Load genesis block from {}' . format ( module_path ) ) genesis_block_path = module_path try : genesis_block_mod = imp . load_source ( 'genesis_block' , genesis_block_path ) genesis_block = genesis_block_mod . GENESIS_BLOCK genesis_block_stages = genesis_block_mod . GENESIS_BLOCK_STAGES if BLOCKSTACK_TEST : print '' print 'genesis block' print json . dumps ( genesis_block , indent = 4 , sort_keys = True ) print '' except Exception as e : log . exception ( e ) log . fatal ( 'Failed to load genesis block' ) os . abort ( ) else : log . debug ( 'Load built-in genesis block' ) genesis_block = get_genesis_block ( ) genesis_block_stages = get_genesis_block_stages ( ) try : for stage in genesis_block_stages : jsonschema . validate ( GENESIS_BLOCK_SCHEMA , stage ) jsonschema . validate ( GENESIS_BLOCK_SCHEMA , genesis_block ) set_genesis_block ( genesis_block ) set_genesis_block_stages ( genesis_block_stages ) log . debug ( 'Genesis block has {} stages' . format ( len ( genesis_block_stages ) ) ) for i , stage in enumerate ( genesis_block_stages ) : log . debug ( 'Stage {} has {} row(s)' . format ( i + 1 , len ( stage [ 'rows' ] ) ) ) except Exception as e : log . fatal ( "Invalid genesis block" ) os . abort ( ) return True | Make sure the genesis block is good to go . Load and instantiate it . | 481 | 16 |
224,459 | def server_shutdown ( server_state ) : set_running ( False ) # stop API servers rpc_stop ( server_state ) api_stop ( server_state ) # stop atlas node server_atlas_shutdown ( server_state ) # stopping GC gc_stop ( ) # clear PID file try : if os . path . exists ( server_state [ 'pid_file' ] ) : os . unlink ( server_state [ 'pid_file' ] ) except : pass return True | Shut down server subsystems . Remove PID file . | 110 | 10 |
224,460 | def run_server ( working_dir , foreground = False , expected_snapshots = GENESIS_SNAPSHOT , port = None , api_port = None , use_api = None , use_indexer = None , indexer_url = None , recover = False ) : global rpc_server global api_server indexer_log_path = get_logfile_path ( working_dir ) logfile = None if not foreground : if os . path . exists ( indexer_log_path ) : logfile = open ( indexer_log_path , 'a' ) else : logfile = open ( indexer_log_path , 'a+' ) child_pid = daemonize ( logfile ) if child_pid < 0 : log . error ( "Failed to daemonize: {}" . format ( child_pid ) ) return - 1 if child_pid > 0 : # we're the parent log . debug ( "Running in the background as PID {}" . format ( child_pid ) ) sys . exit ( 0 ) server_state = server_setup ( working_dir , port = port , api_port = api_port , indexer_enabled = use_indexer , indexer_url = indexer_url , api_enabled = use_api , recover = recover ) atexit . register ( server_shutdown , server_state ) rpc_server = server_state [ 'rpc' ] blockstack_opts = get_blockstack_opts ( ) blockstack_api_opts = get_blockstack_api_opts ( ) if blockstack_opts [ 'enabled' ] : log . debug ( "Begin Indexing" ) while is_running ( ) : try : running = index_blockchain ( server_state , expected_snapshots = expected_snapshots ) except Exception , e : log . exception ( e ) log . error ( "FATAL: caught exception while indexing" ) os . abort ( ) # wait for the next block deadline = time . time ( ) + REINDEX_FREQUENCY while time . time ( ) < deadline and is_running ( ) : try : time . sleep ( 1.0 ) except : # interrupt break log . debug ( "End Indexing" ) elif blockstack_api_opts [ 'enabled' ] : log . debug ( "Begin serving REST requests" ) while is_running ( ) : try : time . sleep ( 1.0 ) except : # interrupt break log . debug ( "End serving REST requests" ) server_shutdown ( server_state ) # close logfile if logfile is not None : logfile . flush ( ) logfile . close ( ) return 0 | Run blockstackd . Optionally daemonize . Return 0 on success Return negative on error | 584 | 18 |
224,461 | def setup ( working_dir , interactive = False ) : # set up our implementation log . debug ( "Working dir: {}" . format ( working_dir ) ) if not os . path . exists ( working_dir ) : os . makedirs ( working_dir , 0700 ) node_config = load_configuration ( working_dir ) if node_config is None : sys . exit ( 1 ) log . debug ( "config\n{}" . format ( json . dumps ( node_config , indent = 4 , sort_keys = True ) ) ) return node_config | Do one - time initialization . Call this to set up global state . | 124 | 14 |
224,462 | def reconfigure ( working_dir ) : configure ( working_dir , force = True , interactive = True ) print "Blockstack successfully reconfigured." sys . exit ( 0 ) | Reconfigure blockstackd . | 37 | 7 |
224,463 | def verify_database ( trusted_consensus_hash , consensus_block_height , untrusted_working_dir , trusted_working_dir , start_block = None , expected_snapshots = { } ) : db = BlockstackDB . get_readwrite_instance ( trusted_working_dir ) consensus_impl = virtualchain_hooks return virtualchain . state_engine_verify ( trusted_consensus_hash , consensus_block_height , consensus_impl , untrusted_working_dir , db , start_block = start_block , expected_snapshots = expected_snapshots ) | Verify that a database is consistent with a known - good consensus hash . Return True if valid . Return False if not | 131 | 24 |
224,464 | def check_and_set_envars ( argv ) : special_flags = { '--debug' : { 'arg' : False , 'envar' : 'BLOCKSTACK_DEBUG' , 'exec' : True , } , '--verbose' : { 'arg' : False , 'envar' : 'BLOCKSTACK_DEBUG' , 'exec' : True , } , '--testnet-id' : { 'arg' : True , 'envar' : 'BLOCKSTACK_TESTNET_ID' , 'exec' : True , } , '--testnet-start-block' : { 'arg' : True , 'envar' : 'BLOCKSTACK_TESTNET_START_BLOCK' , 'exec' : True , } , '--working_dir' : { 'arg' : True , 'argname' : 'working_dir' , 'exec' : False , } , '--working-dir' : { 'arg' : True , 'argname' : 'working_dir' , 'exec' : False , } , } cli_envs = { } cli_args = { } new_argv = [ ] stripped_argv = [ ] do_exec = False i = 0 while i < len ( argv ) : arg = argv [ i ] value = None for special_flag in special_flags . keys ( ) : if not arg . startswith ( special_flag ) : continue if special_flags [ special_flag ] [ 'arg' ] : if '=' in arg : argparts = arg . split ( "=" ) value_parts = argparts [ 1 : ] arg = argparts [ 0 ] value = '=' . join ( value_parts ) elif i + 1 < len ( argv ) : value = argv [ i + 1 ] i += 1 else : print >> sys . stderr , "%s requires an argument" % special_flag return False else : # just set value = "1" break i += 1 if value is not None : if 'envar' in special_flags [ special_flag ] : # recognized cli_envs [ special_flags [ special_flag ] [ 'envar' ] ] = value if 'argname' in special_flags [ special_flag ] : # recognized as special argument cli_args [ special_flags [ special_flag ] [ 'argname' ] ] = value new_argv . append ( arg ) new_argv . append ( value ) if special_flags [ special_flag ] [ 'exec' ] : do_exec = True else : # not recognized new_argv . append ( arg ) stripped_argv . append ( arg ) if do_exec : # re-exec for cli_env , cli_env_value in cli_envs . items ( ) : os . environ [ cli_env ] = cli_env_value if os . environ . get ( "BLOCKSTACK_DEBUG" ) is not None : print "Re-exec as {}" . format ( " " . join ( new_argv ) ) os . execv ( new_argv [ 0 ] , new_argv ) log . debug ( "Stripped argv: {}" . format ( ' ' . join ( stripped_argv ) ) ) return cli_args , stripped_argv | Go through argv and find any special command - line flags that set environment variables that affect multiple modules . | 742 | 21 |
224,465 | def load_expected_snapshots ( snapshots_path ) : # use snapshots? snapshots_path = os . path . expanduser ( snapshots_path ) expected_snapshots = { } # legacy chainstate? try : with open ( snapshots_path , "r" ) as f : snapshots_json = f . read ( ) snapshots_data = json . loads ( snapshots_json ) assert 'snapshots' in snapshots_data . keys ( ) , "Not a valid snapshots file" # extract snapshots: map int to consensus hash for ( block_id_str , consensus_hash ) in snapshots_data [ 'snapshots' ] . items ( ) : expected_snapshots [ int ( block_id_str ) ] = str ( consensus_hash ) log . debug ( "Loaded expected snapshots from legacy JSON {}; {} entries" . format ( snapshots_path , len ( expected_snapshots ) ) ) return expected_snapshots except ValueError as ve : log . debug ( "Snapshots file {} is not JSON" . format ( snapshots_path ) ) except Exception as e : if os . environ . get ( 'BLOCKSTACK_DEBUG' ) == '1' : log . exception ( e ) log . debug ( "Failed to read expected snapshots from '{}'" . format ( snapshots_path ) ) return None try : # sqlite3 db? db_con = virtualchain . StateEngine . db_connect ( snapshots_path ) expected_snapshots = virtualchain . StateEngine . get_consensus_hashes ( None , None , db_con = db_con , completeness_check = False ) log . debug ( "Loaded expected snapshots from chainstate DB {}, {} entries" . format ( snapshots_path , len ( expected_snapshots ) ) ) return expected_snapshots except : log . debug ( "{} does not appear to be a chainstate DB" . format ( snapshots_path ) ) return None | Load expected consensus hashes from a . snapshots file . Return the snapshots as a dict on success Return None on error | 414 | 22 |
224,466 | def do_genesis_block_audit ( genesis_block_path = None , key_id = None ) : signing_keys = GENESIS_BLOCK_SIGNING_KEYS if genesis_block_path is not None : # alternative genesis block genesis_block_load ( genesis_block_path ) if key_id is not None : # alternative signing key gpg2_path = find_gpg2 ( ) assert gpg2_path , 'You need to install gpg2' p = subprocess . Popen ( [ gpg2_path , '-a' , '--export' , key_id ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) out , err = p . communicate ( ) if p . returncode != 0 : log . error ( 'Failed to load key {}\n{}' . format ( key_id , err ) ) return False signing_keys = { key_id : out . strip ( ) } res = genesis_block_audit ( get_genesis_block_stages ( ) , key_bundle = signing_keys ) if not res : log . error ( 'Genesis block is NOT signed by {}' . format ( ', ' . join ( signing_keys . keys ( ) ) ) ) return False return True | Loads and audits the genesis block optionally using an alternative key | 291 | 12 |
224,467 | def setup_recovery ( working_dir ) : db = get_db_state ( working_dir ) bitcoind_session = get_bitcoind ( new = True ) assert bitcoind_session is not None _ , current_block = virtualchain . get_index_range ( 'bitcoin' , bitcoind_session , virtualchain_hooks , working_dir ) assert current_block , 'Failed to connect to bitcoind' set_recovery_range ( working_dir , db . lastblock , current_block - NUM_CONFIRMATIONS ) return True | Set up the recovery metadata so we can fully recover secondary state like subdomains . | 129 | 17 |
224,468 | def check_recovery ( working_dir ) : recovery_start_block , recovery_end_block = get_recovery_range ( working_dir ) if recovery_start_block is not None and recovery_end_block is not None : local_current_block = virtualchain_hooks . get_last_block ( working_dir ) if local_current_block <= recovery_end_block : return True # otherwise, we're outside the recovery range and we can clear it log . debug ( 'Chain state is at block {}, and is outside the recovery window {}-{}' . format ( local_current_block , recovery_start_block , recovery_end_block ) ) clear_recovery_range ( working_dir ) return False else : # not recovering return False | Do we need to recover on start - up? | 172 | 10 |
224,469 | def success_response ( self , method_resp , * * kw ) : resp = { 'status' : True , 'indexing' : config . is_indexing ( self . working_dir ) , 'lastblock' : virtualchain_hooks . get_last_block ( self . working_dir ) , } resp . update ( kw ) resp . update ( method_resp ) if self . is_stale ( ) : # our state is stale resp [ 'stale' ] = True resp [ 'warning' ] = 'Daemon has not reindexed since {}' . format ( self . last_indexing_time ) return resp | Make a standard success response which contains some ancilliary data . | 141 | 13 |
224,470 | def load_name_info ( self , db , name_record ) : name = str ( name_record [ 'name' ] ) name_record = self . sanitize_rec ( name_record ) namespace_id = get_namespace_from_name ( name ) namespace_record = db . get_namespace ( namespace_id , include_history = False ) if namespace_record is None : namespace_record = db . get_namespace_reveal ( namespace_id , include_history = False ) if namespace_record is None : # name can't exist (this can be arrived at if we're resolving a DID) return None # when does this name expire (if it expires)? if namespace_record [ 'lifetime' ] != NAMESPACE_LIFE_INFINITE : deadlines = BlockstackDB . get_name_deadlines ( name_record , namespace_record , db . lastblock ) if deadlines is not None : name_record [ 'expire_block' ] = deadlines [ 'expire_block' ] name_record [ 'renewal_deadline' ] = deadlines [ 'renewal_deadline' ] else : # only possible if namespace is not yet ready name_record [ 'expire_block' ] = - 1 name_record [ 'renewal_deadline' ] = - 1 else : name_record [ 'expire_block' ] = - 1 name_record [ 'renewal_deadline' ] = - 1 if name_record [ 'expire_block' ] > 0 and name_record [ 'expire_block' ] <= db . lastblock : name_record [ 'expired' ] = True else : name_record [ 'expired' ] = False # try to get the zonefile as well if 'value_hash' in name_record and name_record [ 'value_hash' ] is not None : conf = get_blockstack_opts ( ) if is_atlas_enabled ( conf ) : zfdata = self . get_zonefile_data ( name_record [ 'value_hash' ] , conf [ 'zonefiles' ] ) if zfdata is not None : zfdata = base64 . b64encode ( zfdata ) name_record [ 'zonefile' ] = zfdata return name_record | Get some extra name information given a db - loaded name record . Return the updated name_record | 509 | 19 |
224,471 | def get_name_DID_info ( self , name ) : db = get_db_state ( self . working_dir ) did_info = db . get_name_DID_info ( name ) if did_info is None : return { 'error' : 'No such name' , 'http_status' : 404 } return did_info | Get a name s DID info Returns None if not found | 77 | 11 |
224,472 | def rpc_get_name_DID ( self , name , * * con_info ) : did_info = None if check_name ( name ) : did_info = self . get_name_DID_info ( name ) elif check_subdomain ( name ) : did_info = self . get_subdomain_DID_info ( name ) else : return { 'error' : 'Invalid name or subdomain' , 'http_status' : 400 } if did_info is None : return { 'error' : 'No DID for this name' , 'http_status' : 404 } did = make_DID ( did_info [ 'name_type' ] , did_info [ 'address' ] , did_info [ 'index' ] ) return self . success_response ( { 'did' : did } ) | Given a name or subdomain return its DID . | 184 | 10 |
224,473 | def rpc_get_DID_record ( self , did , * * con_info ) : if not isinstance ( did , ( str , unicode ) ) : return { 'error' : 'Invalid DID: not a string' , 'http_status' : 400 } try : did_info = parse_DID ( did ) except : return { 'error' : 'Invalid DID' , 'http_status' : 400 } res = None if did_info [ 'name_type' ] == 'name' : res = self . get_name_DID_record ( did ) elif did_info [ 'name_type' ] == 'subdomain' : res = self . get_subdomain_DID_record ( did ) if 'error' in res : return { 'error' : res [ 'error' ] , 'http_status' : res . get ( 'http_status' , 404 ) } return self . success_response ( { 'record' : res [ 'record' ] } ) | Given a DID return the name or subdomain it corresponds to | 221 | 12 |
224,474 | def rpc_get_blockstack_ops_at ( self , block_id , offset , count , * * con_info ) : if not check_block ( block_id ) : return { 'error' : 'Invalid block height' , 'http_status' : 400 } if not check_offset ( offset ) : return { 'error' : 'Invalid offset' , 'http_status' : 400 } if not check_count ( count , 10 ) : return { 'error' : 'Invalid count' , 'http_status' : 400 } db = get_db_state ( self . working_dir ) nameops = db . get_all_blockstack_ops_at ( block_id , offset = offset , count = count ) db . close ( ) log . debug ( "{} name operations at block {}, offset {}, count {}" . format ( len ( nameops ) , block_id , offset , count ) ) ret = [ ] for nameop in nameops : assert 'opcode' in nameop , 'BUG: missing opcode in {}' . format ( json . dumps ( nameop , sort_keys = True ) ) canonical_op = self . sanitize_rec ( nameop ) ret . append ( canonical_op ) return self . success_response ( { 'nameops' : ret } ) | Get the name operations that occured in the given block . Does not include account operations . | 287 | 18 |
224,475 | def rpc_get_blockstack_ops_hash_at ( self , block_id , * * con_info ) : if not check_block ( block_id ) : return { 'error' : 'Invalid block height' , 'http_status' : 400 } db = get_db_state ( self . working_dir ) ops_hash = db . get_block_ops_hash ( block_id ) db . close ( ) return self . success_response ( { 'ops_hash' : ops_hash } ) | Get the hash over the sequence of names and namespaces altered at the given block . Used by SNV clients . | 115 | 23 |
224,476 | def get_bitcoind_info ( self ) : cached_bitcoind_info = self . get_cached_bitcoind_info ( ) if cached_bitcoind_info : return cached_bitcoind_info bitcoind_opts = default_bitcoind_opts ( virtualchain . get_config_filename ( virtualchain_hooks , self . working_dir ) , prefix = True ) bitcoind = get_bitcoind ( new_bitcoind_opts = bitcoind_opts , new = True ) if bitcoind is None : return { 'error' : 'Internal server error: failed to connect to bitcoind' } try : info = bitcoind . getinfo ( ) assert 'error' not in info assert 'blocks' in info self . set_cached_bitcoind_info ( info ) return info except Exception as e : raise | Get bitcoind info . Try the cache and on cache miss fetch from bitcoind and cache . | 198 | 21 |
224,477 | def get_consensus_info ( self ) : cached_consensus_info = self . get_cached_consensus_info ( ) if cached_consensus_info : return cached_consensus_info db = get_db_state ( self . working_dir ) ch = db . get_current_consensus ( ) block = db . get_current_block ( ) db . close ( ) cinfo = { 'consensus_hash' : ch , 'block_height' : block } self . set_cached_consensus_info ( cinfo ) return cinfo | Get block height and consensus hash . Try the cache and on cache miss fetch from the db | 127 | 18 |
224,478 | def rpc_get_account_tokens ( self , address , * * con_info ) : if not check_account_address ( address ) : return { 'error' : 'Invalid address' , 'http_status' : 400 } # must be b58 if is_c32_address ( address ) : address = c32ToB58 ( address ) db = get_db_state ( self . working_dir ) token_list = db . get_account_tokens ( address ) db . close ( ) return self . success_response ( { 'token_types' : token_list } ) | Get the types of tokens that an account owns Returns the list on success | 133 | 14 |
224,479 | def rpc_get_account_balance ( self , address , token_type , * * con_info ) : if not check_account_address ( address ) : return { 'error' : 'Invalid address' , 'http_status' : 400 } if not check_token_type ( token_type ) : return { 'error' : 'Invalid token type' , 'http_status' : 400 } # must be b58 if is_c32_address ( address ) : address = c32ToB58 ( address ) db = get_db_state ( self . working_dir ) account = db . get_account ( address , token_type ) if account is None : return self . success_response ( { 'balance' : 0 } ) balance = db . get_account_balance ( account ) if balance is None : balance = 0 db . close ( ) return self . success_response ( { 'balance' : balance } ) | Get the balance of an address for a particular token type Returns the value on success Returns 0 if the balance is 0 or if there is no address | 202 | 29 |
224,480 | def export_account_state ( self , account_state ) : return { 'address' : account_state [ 'address' ] , 'type' : account_state [ 'type' ] , 'credit_value' : '{}' . format ( account_state [ 'credit_value' ] ) , 'debit_value' : '{}' . format ( account_state [ 'debit_value' ] ) , 'lock_transfer_block_id' : account_state [ 'lock_transfer_block_id' ] , 'block_id' : account_state [ 'block_id' ] , 'vtxindex' : account_state [ 'vtxindex' ] , 'txid' : account_state [ 'txid' ] , } | Make an account state presentable to external consumers | 169 | 9 |
224,481 | def rpc_get_account_record ( self , address , token_type , * * con_info ) : if not check_account_address ( address ) : return { 'error' : 'Invalid address' , 'http_status' : 400 } if not check_token_type ( token_type ) : return { 'error' : 'Invalid token type' , 'http_status' : 400 } # must be b58 if is_c32_address ( address ) : address = c32ToB58 ( address ) db = get_db_state ( self . working_dir ) account = db . get_account ( address , token_type ) db . close ( ) if account is None : return { 'error' : 'No such account' , 'http_status' : 404 } state = self . export_account_state ( account ) return self . success_response ( { 'account' : state } ) | Get the current state of an account | 199 | 7 |
224,482 | def rpc_get_account_at ( self , address , block_height , * * con_info ) : if not check_account_address ( address ) : return { 'error' : 'Invalid address' , 'http_status' : 400 } if not check_block ( block_height ) : return { 'error' : 'Invalid start block' , 'http_status' : 400 } # must be b58 if is_c32_address ( address ) : address = c32ToB58 ( address ) db = get_db_state ( self . working_dir ) account_states = db . get_account_at ( address , block_height ) db . close ( ) # return credit_value and debit_value as strings, so the unwitting JS developer doesn't get confused # as to why large balances get mysteriously converted to doubles. ret = [ self . export_account_state ( hist ) for hist in account_states ] return self . success_response ( { 'history' : ret } ) | Get the account s statuses at a particular block height . Returns the sequence of history states on success | 217 | 20 |
224,483 | def rpc_get_consensus_hashes ( self , block_id_list , * * con_info ) : if type ( block_id_list ) != list : return { 'error' : 'Invalid block heights' , 'http_status' : 400 } if len ( block_id_list ) > 32 : return { 'error' : 'Too many block heights' , 'http_status' : 400 } for bid in block_id_list : if not check_block ( bid ) : return { 'error' : 'Invalid block height' , 'http_status' : 400 } db = get_db_state ( self . working_dir ) ret = { } for block_id in block_id_list : ret [ block_id ] = db . get_consensus_at ( block_id ) db . close ( ) return self . success_response ( { 'consensus_hashes' : ret } ) | Return the consensus hashes at multiple block numbers Return a dict mapping each block ID to its consensus hash . | 203 | 20 |
224,484 | def get_zonefile_data ( self , zonefile_hash , zonefile_dir ) : # check cache atlas_zonefile_data = get_atlas_zonefile_data ( zonefile_hash , zonefile_dir , check = False ) if atlas_zonefile_data is not None : # check hash zfh = get_zonefile_data_hash ( atlas_zonefile_data ) if zfh != zonefile_hash : log . debug ( "Invalid local zonefile %s" % zonefile_hash ) remove_atlas_zonefile_data ( zonefile_hash , zonefile_dir ) else : log . debug ( "Zonefile %s is local" % zonefile_hash ) return atlas_zonefile_data return None | Get a zonefile by hash Return the serialized zonefile on success Return None on error | 170 | 18 |
224,485 | def rpc_get_zonefiles_by_block ( self , from_block , to_block , offset , count , * * con_info ) : conf = get_blockstack_opts ( ) if not is_atlas_enabled ( conf ) : return { 'error' : 'Not an atlas node' , 'http_status' : 400 } if not check_block ( from_block ) : return { 'error' : 'Invalid from_block height' , 'http_status' : 400 } if not check_block ( to_block ) : return { 'error' : 'Invalid to_block height' , 'http_status' : 400 } if not check_offset ( offset ) : return { 'error' : 'invalid offset' , 'http_status' : 400 } if not check_count ( count , 100 ) : return { 'error' : 'invalid count' , 'http_status' : 400 } zonefile_info = atlasdb_get_zonefiles_by_block ( from_block , to_block , offset , count , path = conf [ 'atlasdb_path' ] ) if 'error' in zonefile_info : return zonefile_info return self . success_response ( { 'zonefile_info' : zonefile_info } ) | Get information about zonefiles announced in blocks [ | 284 | 9 |
224,486 | def peer_exchange ( self , peer_host , peer_port ) : # get peers peer_list = atlas_get_live_neighbors ( "%s:%s" % ( peer_host , peer_port ) ) if len ( peer_list ) > atlas_max_neighbors ( ) : random . shuffle ( peer_list ) peer_list = peer_list [ : atlas_max_neighbors ( ) ] log . info ( "Enqueue remote peer {}:{}" . format ( peer_host , peer_port ) ) atlas_peer_enqueue ( "%s:%s" % ( peer_host , peer_port ) ) log . debug ( "Live peers reply to %s:%s: %s" % ( peer_host , peer_port , peer_list ) ) return peer_list | Exchange peers . Add the given peer to the list of new peers to consider . Return the list of healthy peers | 185 | 23 |
224,487 | def rpc_atlas_peer_exchange ( self , remote_peer , * * con_info ) : conf = get_blockstack_opts ( ) if not conf . get ( 'atlas' , False ) : return { 'error' : 'Not an atlas node' , 'http_status' : 404 } # take the socket-given information if this is not localhost client_host = con_info [ 'client_host' ] client_port = con_info [ 'client_port' ] peer_host = None peer_port = None LOCALHOST = [ '127.0.0.1' , '::1' , 'localhost' ] if client_host not in LOCALHOST : # we don't allow a non-localhost peer to insert an arbitrary host peer_host = client_host peer_port = client_port else : try : peer_host , peer_port = url_to_host_port ( remote_peer ) assert peer_host assert peer_port except : # invalid return { 'error' : 'Invalid remote peer address' , 'http_status' : 400 } peers = self . peer_exchange ( peer_host , peer_port ) return self . success_response ( { 'peers' : peers } ) | Accept a remotely - given atlas peer and return our list of healthy peers . The remotely - given atlas peer will only be considered if the caller is localhost ; otherwise the caller s socket - given information will be used . This is to prevent a malicious node from filling up this node s peer table with junk . | 278 | 64 |
224,488 | def stop_server ( self ) : if self . rpc_server is not None : try : self . rpc_server . socket . shutdown ( socket . SHUT_RDWR ) except : log . warning ( "Failed to shut down server socket" ) self . rpc_server . shutdown ( ) | Stop serving . Also stops the thread . | 66 | 8 |
224,489 | def get_last_block ( working_dir ) : # make this usable even if we haven't explicitly configured virtualchain impl = sys . modules [ __name__ ] return BlockstackDB . get_lastblock ( impl , working_dir ) | Get the last block processed Return the integer on success Return None on error | 51 | 14 |
224,490 | def get_or_instantiate_db_state ( working_dir ) : # instantiates new_db = BlockstackDB . borrow_readwrite_instance ( working_dir , - 1 ) BlockstackDB . release_readwrite_instance ( new_db , - 1 ) return get_db_state ( working_dir ) | Get a read - only handle to the DB . Instantiate it first if it doesn t exist . | 72 | 20 |
224,491 | def check_quirks ( block_id , block_op , db_state ) : if op_get_opcode_name ( block_op [ 'op' ] ) in OPCODE_NAME_NAMEOPS and op_get_opcode_name ( block_op [ 'op' ] ) not in OPCODE_NAME_STATE_PREORDER : assert 'last_creation_op' in block_op , 'QUIRK BUG: missing last_creation_op in {}' . format ( op_get_opcode_name ( block_op [ 'op' ] ) ) if block_op [ 'last_creation_op' ] == NAME_IMPORT : # the op_fee will be a float if the name record was created with a NAME_IMPORT assert isinstance ( block_op [ 'op_fee' ] , float ) , 'QUIRK BUG: op_fee is not a float when it should be' return | Check that all serialization compatibility quirks have been preserved . Used primarily for testing . | 210 | 16 |
224,492 | def sync_blockchain ( working_dir , bt_opts , last_block , server_state , expected_snapshots = { } , * * virtualchain_args ) : subdomain_index = server_state [ 'subdomains' ] atlas_state = server_state [ 'atlas' ] # make this usable even if we haven't explicitly configured virtualchain impl = sys . modules [ __name__ ] log . info ( "Synchronizing database {} up to block {}" . format ( working_dir , last_block ) ) # NOTE: this is the only place where a read-write handle should be created, # since this is the only place where the db should be modified. new_db = BlockstackDB . borrow_readwrite_instance ( working_dir , last_block , expected_snapshots = expected_snapshots ) # propagate runtime state to virtualchain callbacks new_db . subdomain_index = subdomain_index new_db . atlas_state = atlas_state rc = virtualchain . sync_virtualchain ( bt_opts , last_block , new_db , expected_snapshots = expected_snapshots , * * virtualchain_args ) BlockstackDB . release_readwrite_instance ( new_db , last_block ) return rc | synchronize state with the blockchain . Return True on success Return False if we re supposed to stop indexing Abort on error | 281 | 26 |
224,493 | def url_protocol ( url , port = None ) : if not url . startswith ( 'http://' ) and not url . startswith ( 'https://' ) : return None urlinfo = urllib2 . urlparse . urlparse ( url ) assert urlinfo . scheme in [ 'http' , 'https' ] , 'Invalid URL scheme in {}' . format ( url ) return urlinfo . scheme | Get the protocol to use for a URL . return http or https or None | 94 | 15 |
224,494 | def make_DID ( name_type , address , index ) : if name_type not in [ 'name' , 'subdomain' ] : raise ValueError ( "Require 'name' or 'subdomain' for name_type" ) if name_type == 'name' : address = virtualchain . address_reencode ( address ) else : # what's the current version byte? vb = keylib . b58check . b58check_version_byte ( address ) if vb == bitcoin_blockchain . version_byte : # singlesig vb = SUBDOMAIN_ADDRESS_VERSION_BYTE else : vb = SUBDOMAIN_ADDRESS_MULTISIG_VERSION_BYTE address = virtualchain . address_reencode ( address , version_byte = vb ) return 'did:stack:v0:{}-{}' . format ( address , index ) | Standard way of making a DID . name_type is name or subdomain | 197 | 15 |
224,495 | def process_request_thread ( self , request , client_address ) : from . . blockstackd import get_gc_thread try : self . finish_request ( request , client_address ) except Exception : self . handle_error ( request , client_address ) finally : self . shutdown_request ( request ) shutdown_thread = False with self . _thread_guard : if threading . current_thread ( ) . ident in self . _threads : del self . _threads [ threading . current_thread ( ) . ident ] shutdown_thread = True if BLOCKSTACK_TEST : log . debug ( '{} active threads (removed {})' . format ( len ( self . _threads ) , threading . current_thread ( ) . ident ) ) if shutdown_thread : gc_thread = get_gc_thread ( ) if gc_thread : # count this towards our preemptive garbage collection gc_thread . gc_event ( ) | Same as in BaseServer but as a thread . In addition exception handling is done here . | 211 | 18 |
224,496 | def get_request ( self ) : # Note that this class must be mixed with another class that implements get_request() request , client_addr = super ( BoundedThreadingMixIn , self ) . get_request ( ) overload = False with self . _thread_guard : if self . _threads is not None and len ( self . _threads ) + 1 > MAX_RPC_THREADS : overload = True if overload : res = self . overloaded ( client_addr ) request . sendall ( res ) sys . stderr . write ( '{} - - [{}] "Overloaded"\n' . format ( client_addr [ 0 ] , time_str ( time . time ( ) ) ) ) self . shutdown_request ( request ) return None , None return request , client_addr | Accept a request up to the given number of allowed threads . Defer to self . overloaded if there are already too many pending requests . | 176 | 27 |
224,497 | def get_epoch_config ( block_height ) : global EPOCHS epoch_number = get_epoch_number ( block_height ) if epoch_number < 0 or epoch_number >= len ( EPOCHS ) : log . error ( "FATAL: invalid epoch %s" % epoch_number ) os . abort ( ) return EPOCHS [ epoch_number ] | Get the epoch constants for the given block height | 86 | 9 |
224,498 | def get_epoch_namespace_lifetime_multiplier ( block_height , namespace_id ) : epoch_config = get_epoch_config ( block_height ) if epoch_config [ 'namespaces' ] . has_key ( namespace_id ) : return epoch_config [ 'namespaces' ] [ namespace_id ] [ 'NAMESPACE_LIFETIME_MULTIPLIER' ] else : return epoch_config [ 'namespaces' ] [ '*' ] [ 'NAMESPACE_LIFETIME_MULTIPLIER' ] | what s the namespace lifetime multipler for this epoch? | 129 | 11 |
224,499 | def get_epoch_namespace_lifetime_grace_period ( block_height , namespace_id ) : epoch_config = get_epoch_config ( block_height ) if epoch_config [ 'namespaces' ] . has_key ( namespace_id ) : return epoch_config [ 'namespaces' ] [ namespace_id ] [ 'NAMESPACE_LIFETIME_GRACE_PERIOD' ] else : return epoch_config [ 'namespaces' ] [ '*' ] [ 'NAMESPACE_LIFETIME_GRACE_PERIOD' ] | what s the namespace lifetime grace period for this epoch? | 133 | 11 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.