idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
36,400
def order_search_results ( query , search_results ) : results = search_results results_names = [ ] old_query = query query = query . split ( ' ' ) first_word = '' second_word = '' third_word = '' if ( len ( query ) < 2 ) : first_word = old_query else : first_word = query [ 0 ] second_word = query [ 1 ] if ( len ( query ) > 2 ) : third_word = query [ 2 ] results_second = [ ] results_third = [ ] for result in results : result_list = result . split ( ' ' ) try : if ( result_list [ 0 ] . startswith ( first_word ) ) : results_names . append ( result ) else : results_second . append ( result ) except : results_second . append ( result ) for result in results_second : result_list = result . split ( ' ' ) try : if ( result_list [ 1 ] . startswith ( first_word ) ) : results_names . append ( result ) else : results_third . append ( result ) except : results_third . append ( result ) return results_names + results_third
order of results should be a ) query in first name b ) query in last name
36,401
def get_data_hash ( data_txt ) : h = hashlib . sha256 ( ) h . update ( data_txt ) return h . hexdigest ( )
Generate a hash over data for immutable storage . Return the hex string .
36,402
def verify_zonefile ( zonefile_str , value_hash ) : zonefile_hash = get_zonefile_data_hash ( zonefile_str ) if zonefile_hash != value_hash : log . debug ( "Zonefile hash mismatch: expected %s, got %s" % ( value_hash , zonefile_hash ) ) return False return True
Verify that a zonefile hashes to the given value hash
36,403
def atlas_peer_table_lock ( ) : global PEER_TABLE_LOCK , PEER_TABLE , PEER_TABLE_LOCK_HOLDER , PEER_TABLE_LOCK_TRACEBACK if PEER_TABLE_LOCK_HOLDER is not None : assert PEER_TABLE_LOCK_HOLDER != threading . current_thread ( ) , "DEADLOCK" PEER_TABLE_LOCK . acquire ( ) PEER_TABLE_LOCK_HOLDER = threading . current_thread ( ) PEER_TABLE_LOCK_TRACEBACK = traceback . format_stack ( ) return PEER_TABLE
Lock the global health info table . Return the table .
36,404
def atlas_peer_table_unlock ( ) : global PEER_TABLE_LOCK , PEER_TABLE_LOCK_HOLDER , PEER_TABLE_LOCK_TRACEBACK try : assert PEER_TABLE_LOCK_HOLDER == threading . current_thread ( ) except : log . error ( "Locked by %s, unlocked by %s" % ( PEER_TABLE_LOCK_HOLDER , threading . current_thread ( ) ) ) log . error ( "Holder locked from:\n%s" % "" . join ( PEER_TABLE_LOCK_TRACEBACK ) ) log . error ( "Errant thread unlocked from:\n%s" % "" . join ( traceback . format_stack ( ) ) ) os . abort ( ) PEER_TABLE_LOCK_HOLDER = None PEER_TABLE_LOCK_TRACEBACK = None PEER_TABLE_LOCK . release ( ) return
Unlock the global health info table .
36,405
def atlasdb_format_query ( query , values ) : return "" . join ( [ "%s %s" % ( frag , "'%s'" % val if type ( val ) in [ str , unicode ] else val ) for ( frag , val ) in zip ( query . split ( "?" ) , values + ( "" , ) ) ] )
Turn a query into a string for printing . Useful for debugging .
36,406
def atlasdb_open ( path ) : if not os . path . exists ( path ) : log . debug ( "Atlas DB doesn't exist at %s" % path ) return None con = sqlite3 . connect ( path , isolation_level = None ) con . row_factory = atlasdb_row_factory return con
Open the atlas db . Return a connection . Return None if it doesn t exist
36,407
def atlasdb_add_zonefile_info ( name , zonefile_hash , txid , present , tried_storage , block_height , con = None , path = None ) : global ZONEFILE_INV , NUM_ZONEFILES , ZONEFILE_INV_LOCK with AtlasDBOpen ( con = con , path = path ) as dbcon : with ZONEFILE_INV_LOCK : if present : present = 1 else : present = 0 if tried_storage : tried_storage = 1 else : tried_storage = 0 sql = "UPDATE zonefiles SET name = ?, zonefile_hash = ?, txid = ?, present = ?, tried_storage = ?, block_height = ? WHERE txid = ?;" args = ( name , zonefile_hash , txid , present , tried_storage , block_height , txid ) cur = dbcon . cursor ( ) update_res = atlasdb_query_execute ( cur , sql , args ) dbcon . commit ( ) if update_res . rowcount == 0 : sql = "INSERT OR IGNORE INTO zonefiles (name, zonefile_hash, txid, present, tried_storage, block_height) VALUES (?,?,?,?,?,?);" args = ( name , zonefile_hash , txid , present , tried_storage , block_height ) cur = dbcon . cursor ( ) atlasdb_query_execute ( cur , sql , args ) dbcon . commit ( ) zfbits = atlasdb_get_zonefile_bits ( zonefile_hash , con = dbcon , path = path ) inv_vec = None if ZONEFILE_INV is None : inv_vec = "" else : inv_vec = ZONEFILE_INV [ : ] ZONEFILE_INV = atlas_inventory_flip_zonefile_bits ( inv_vec , zfbits , present ) log . debug ( 'Set {} ({}) to {}' . format ( zonefile_hash , ',' . join ( str ( i ) for i in zfbits ) , present ) ) NUM_ZONEFILES = atlasdb_zonefile_inv_length ( con = dbcon , path = path ) return True
Add a zonefile to the database . Mark it as present or absent . Keep our in - RAM inventory vector up - to - date
36,408
def atlasdb_get_lastblock ( con = None , path = None ) : row = None with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = "SELECT MAX(block_height) FROM zonefiles;" args = ( ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) row = { } for r in res : row . update ( r ) break return row [ 'MAX(block_height)' ]
Get the highest block height in the atlas db
36,409
def atlasdb_get_zonefiles_missing_count_by_name ( name , max_index = None , indexes_exclude = [ ] , con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = 'SELECT COUNT(*) FROM zonefiles WHERE name = ? AND present = 0 {} {};' . format ( 'AND inv_index <= ?' if max_index is not None else '' , 'AND inv_index NOT IN ({})' . format ( ',' . join ( [ str ( int ( i ) ) for i in indexes_exclude ] ) ) if len ( indexes_exclude ) > 0 else '' ) args = ( name , ) if max_index is not None : args += ( max_index , ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) for row in res : return row [ 'COUNT(*)' ]
Get the number of missing zone files for a particular name optionally up to a maximum zonefile index and optionally omitting particular zone files in the count . Returns an integer
36,410
def atlasdb_get_zonefiles_by_hash ( zonefile_hash , block_height = None , con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = 'SELECT * FROM zonefiles WHERE zonefile_hash = ?' args = ( zonefile_hash , ) if block_height : sql += ' AND block_height = ?' args += ( block_height , ) sql += ' ORDER BY inv_index;' cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) ret = [ ] for zfinfo in res : row = { } row . update ( zfinfo ) ret . append ( row ) if len ( ret ) == 0 : return None return ret
Find all instances of this zone file in the atlasdb . Optionally filter on block height
36,411
def atlasdb_set_zonefile_tried_storage ( zonefile_hash , tried_storage , con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : if tried_storage : tried_storage = 1 else : tried_storage = 0 sql = "UPDATE zonefiles SET tried_storage = ? WHERE zonefile_hash = ?;" args = ( tried_storage , zonefile_hash ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) dbcon . commit ( ) return True
Make a note that we tried to get the zonefile from storage
36,412
def atlasdb_reset_zonefile_tried_storage ( con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = "UPDATE zonefiles SET tried_storage = ? WHERE present = ?;" args = ( 0 , 0 ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) dbcon . commit ( ) return True
For zonefiles that we don t have re - attempt to fetch them from storage .
36,413
def atlasdb_cache_zonefile_info ( con = None , path = None ) : global ZONEFILE_INV , NUM_ZONEFILES , ZONEFILE_INV_LOCK inv = None with ZONEFILE_INV_LOCK : inv_len = atlasdb_zonefile_inv_length ( con = con , path = path ) inv = atlas_make_zonefile_inventory ( 0 , inv_len , con = con , path = path ) ZONEFILE_INV = inv NUM_ZONEFILES = inv_len return inv
Load up and cache our zonefile inventory from the database
36,414
def atlasdb_queue_zonefiles ( con , db , start_block , zonefile_dir , recover = False , validate = True , end_block = None ) : total = 0 if end_block is None : end_block = db . lastblock + 1 ret = [ ] for block_height in range ( start_block , end_block , 1 ) : zonefile_info = db . get_atlas_zonefile_info_at ( block_height ) for name_txid_zfhash in zonefile_info : name = str ( name_txid_zfhash [ 'name' ] ) zfhash = str ( name_txid_zfhash [ 'value_hash' ] ) txid = str ( name_txid_zfhash [ 'txid' ] ) tried_storage = 0 present = is_zonefile_cached ( zfhash , zonefile_dir , validate = validate ) zfinfo = atlasdb_get_zonefile ( zfhash , con = con ) if zfinfo is not None : tried_storage = zfinfo [ 'tried_storage' ] if recover and present : log . debug ( 'Recover: assume that {} is absent so we will reprocess it' . format ( zfhash ) ) present = False log . debug ( "Add %s %s %s at %s (present: %s, tried_storage: %s)" % ( name , zfhash , txid , block_height , present , tried_storage ) ) atlasdb_add_zonefile_info ( name , zfhash , txid , present , tried_storage , block_height , con = con ) total += 1 ret . append ( { 'name' : name , 'zonefile_hash' : zfhash , 'txid' : txid , 'block_height' : block_height , 'present' : present , 'tried_storage' : tried_storage } ) log . debug ( "Queued %s zonefiles from %s-%s" % ( total , start_block , db . lastblock ) ) return ret
Queue all zonefile hashes in the BlockstackDB to the zonefile queue
36,415
def atlasdb_sync_zonefiles ( db , start_block , zonefile_dir , atlas_state , validate = True , end_block = None , path = None , con = None ) : ret = None with AtlasDBOpen ( con = con , path = path ) as dbcon : ret = atlasdb_queue_zonefiles ( dbcon , db , start_block , zonefile_dir , validate = validate , end_block = end_block ) atlasdb_cache_zonefile_info ( con = dbcon ) if atlas_state : crawler_thread = atlas_state [ 'zonefile_crawler' ] for zfinfo in filter ( lambda zfi : zfi [ 'present' ] , ret ) : log . debug ( 'Store re-discovered zonefile {} at {}' . format ( zfinfo [ 'zonefile_hash' ] , zfinfo [ 'block_height' ] ) ) crawler_thread . store_zonefile_cb ( zfinfo [ 'zonefile_hash' ] , zfinfo [ 'block_height' ] ) return ret
Synchronize atlas DB with name db
36,416
def atlasdb_add_peer ( peer_hostport , discovery_time = None , peer_table = None , con = None , path = None , ping_on_evict = True ) : assert len ( peer_hostport ) > 0 sk = random . randint ( 0 , 2 ** 32 ) peer_host , peer_port = url_to_host_port ( peer_hostport ) assert len ( peer_host ) > 0 peer_slot = int ( hashlib . sha256 ( "%s%s" % ( sk , peer_host ) ) . hexdigest ( ) , 16 ) % PEER_MAX_DB with AtlasDBOpen ( con = con , path = path ) as dbcon : if discovery_time is None : discovery_time = int ( time . time ( ) ) do_evict_and_ping = False with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport in ptbl . keys ( ) : log . debug ( "%s already in the peer table" % peer_hostport ) return True if ping_on_evict : do_evict_and_ping = True if do_evict_and_ping : sql = "SELECT peer_hostport FROM peers WHERE peer_slot = ?;" args = ( peer_slot , ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) old_hostports = [ ] for row in res : old_hostport = res [ 'peer_hostport' ] old_hostports . append ( old_hostport ) for old_hostport in old_hostports : res = atlas_peer_getinfo ( old_hostport ) if res : log . debug ( "Peer %s is still alive; will not replace" % ( old_hostport ) ) return False with AtlasPeerTableLocked ( peer_table ) as ptbl : log . debug ( "Add peer '%s' discovered at %s (slot %s)" % ( peer_hostport , discovery_time , peer_slot ) ) sql = "INSERT OR REPLACE INTO peers (peer_hostport, peer_slot, discovery_time) VALUES (?,?,?);" args = ( peer_hostport , peer_slot , discovery_time ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) dbcon . commit ( ) atlas_init_peer_info ( ptbl , peer_hostport , blacklisted = False , whitelisted = False ) return True
Add a peer to the peer table . If the peer conflicts with another peer ping it first and only insert the new peer if the old peer is dead .
36,417
def atlasdb_num_peers ( con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = "SELECT MAX(peer_index) FROM peers;" args = ( ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) ret = [ ] for row in res : tmp = { } tmp . update ( row ) ret . append ( tmp ) assert len ( ret ) == 1 return ret [ 0 ] [ 'MAX(peer_index)' ]
How many peers are there in the db?
36,418
def atlas_get_peer ( peer_hostport , peer_table = None ) : ret = None with AtlasPeerTableLocked ( peer_table ) as ptbl : ret = ptbl . get ( peer_hostport , None ) return ret
Get the given peer s info
36,419
def atlasdb_get_random_peer ( con = None , path = None ) : ret = { } with AtlasDBOpen ( con = con , path = path ) as dbcon : num_peers = atlasdb_num_peers ( con = con , path = path ) if num_peers is None or num_peers == 0 : ret [ 'peer_hostport' ] = None else : r = random . randint ( 1 , num_peers ) sql = "SELECT * FROM peers WHERE peer_index = ?;" args = ( r , ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) ret = { 'peer_hostport' : None } for row in res : ret . update ( row ) break return ret [ 'peer_hostport' ]
Select a peer from the db at random Return None if the table is empty
36,420
def atlasdb_get_old_peers ( now , con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : if now is None : now = time . time ( ) expire = now - atlas_peer_max_age ( ) sql = "SELECT * FROM peers WHERE discovery_time < ?" args = ( expire , ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) rows = [ ] for row in res : tmp = { } tmp . update ( row ) rows . append ( tmp ) return rows
Get peers older than now - PEER_LIFETIME
36,421
def atlasdb_renew_peer ( peer_hostport , now , con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : if now is None : now = time . time ( ) sql = "UPDATE peers SET discovery_time = ? WHERE peer_hostport = ?;" args = ( now , peer_hostport ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) dbcon . commit ( ) return True
Renew a peer s discovery time
36,422
def atlasdb_load_peer_table ( con = None , path = None ) : peer_table = { } with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = "SELECT * FROM peers;" args = ( ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) count = 0 for row in res : if count > 0 and count % 100 == 0 : log . debug ( "Loaded %s peers..." % count ) atlas_init_peer_info ( peer_table , row [ 'peer_hostport' ] ) count += 1 return peer_table
Create a peer table from the peer DB
36,423
def atlasdb_zonefile_inv_list ( bit_offset , bit_length , con = None , path = None ) : with AtlasDBOpen ( con = con , path = path ) as dbcon : sql = "SELECT * FROM zonefiles LIMIT ? OFFSET ?;" args = ( bit_length , bit_offset ) cur = dbcon . cursor ( ) res = atlasdb_query_execute ( cur , sql , args ) ret = [ ] for row in res : tmp = { } tmp . update ( row ) ret . append ( tmp ) return ret
Get an inventory listing . offset and length are in bits .
36,424
def atlas_init_peer_info ( peer_table , peer_hostport , blacklisted = False , whitelisted = False ) : peer_table [ peer_hostport ] = { "time" : [ ] , "zonefile_inv" : "" , "blacklisted" : blacklisted , "whitelisted" : whitelisted }
Initialize peer info table entry
36,425
def atlas_log_socket_error ( method_invocation , peer_hostport , se ) : if isinstance ( se , socket . timeout ) : log . debug ( "%s %s: timed out (socket.timeout)" % ( method_invocation , peer_hostport ) ) elif isinstance ( se , socket . gaierror ) : log . debug ( "%s %s: failed to query address or info (socket.gaierror)" % ( method_invocation , peer_hostport ) ) elif isinstance ( se , socket . herror ) : log . debug ( "%s %s: failed to query host info (socket.herror)" % ( method_invocation , peer_hostport ) ) elif isinstance ( se , socket . error ) : if se . errno == errno . ECONNREFUSED : log . debug ( "%s %s: is unreachable (socket.error ECONNREFUSED)" % ( method_invocation , peer_hostport ) ) elif se . errno == errno . ETIMEDOUT : log . debug ( "%s %s: timed out (socket.error ETIMEDOUT)" % ( method_invocation , peer_hostport ) ) else : log . debug ( "%s %s: socket error" % ( method_invocation , peer_hostport ) ) log . exception ( se ) else : log . debug ( "%s %s: general exception" % ( method_invocation , peer_hostport ) ) log . exception ( se )
Log a socket exception tastefully
36,426
def atlas_peer_ping ( peer_hostport , timeout = None , peer_table = None ) : if timeout is None : timeout = atlas_ping_timeout ( ) assert not atlas_peer_table_is_locked_by_me ( ) host , port = url_to_host_port ( peer_hostport ) RPC = get_rpc_client_class ( ) rpc = RPC ( host , port , timeout = timeout ) log . debug ( "Ping %s" % peer_hostport ) ret = False try : res = blockstack_ping ( proxy = rpc ) if 'error' not in res : ret = True except ( socket . timeout , socket . gaierror , socket . herror , socket . error ) , se : atlas_log_socket_error ( "ping(%s)" % peer_hostport , peer_hostport , se ) pass except Exception , e : log . exception ( e ) pass with AtlasPeerTableLocked ( peer_table ) as ptbl : atlas_peer_update_health ( peer_hostport , ret , peer_table = ptbl ) return ret
Ping a host Return True if alive Return False if not
36,427
def atlas_inventory_count_missing ( inv1 , inv2 ) : count = 0 common = min ( len ( inv1 ) , len ( inv2 ) ) for i in xrange ( 0 , common ) : for j in xrange ( 0 , 8 ) : if ( ( 1 << ( 7 - j ) ) & ord ( inv2 [ i ] ) ) != 0 and ( ( 1 << ( 7 - j ) ) & ord ( inv1 [ i ] ) ) == 0 : count += 1 if len ( inv1 ) < len ( inv2 ) : for i in xrange ( len ( inv1 ) , len ( inv2 ) ) : for j in xrange ( 0 , 8 ) : if ( ( 1 << ( 7 - j ) ) & ord ( inv2 [ i ] ) ) != 0 : count += 1 return count
Find out how many bits are set in inv2 that are not set in inv1 .
36,428
def atlas_revalidate_peers ( con = None , path = None , now = None , peer_table = None ) : global MIN_PEER_HEALTH if now is None : now = time_now ( ) old_peer_infos = atlasdb_get_old_peers ( now , con = con , path = path ) for old_peer_info in old_peer_infos : res = atlas_peer_getinfo ( old_peer_info [ 'peer_hostport' ] ) if not res : log . debug ( "Failed to revalidate %s" % ( old_peer_info [ 'peer_hostport' ] ) ) if atlas_peer_is_whitelisted ( old_peer_info [ 'peer_hostport' ] , peer_table = peer_table ) : continue if atlas_peer_is_blacklisted ( old_peer_info [ 'peer_hostport' ] , peer_table = peer_table ) : continue if atlas_peer_get_health ( old_peer_info [ 'peer_hostport' ] , peer_table = peer_table ) < MIN_PEER_HEALTH : atlasdb_remove_peer ( old_peer_info [ 'peer_hostport' ] , con = con , path = path , peer_table = peer_table ) else : atlasdb_renew_peer ( old_peer_info [ 'peer_hostport' ] , now , con = con , path = path ) return True
Revalidate peers that are older than the maximum peer age . Ping them and if they don t respond remove them .
36,429
def atlas_peer_get_request_count ( peer_hostport , peer_table = None ) : with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return 0 count = 0 for ( t , r ) in ptbl [ peer_hostport ] [ 'time' ] : if r : count += 1 return count
How many times have we contacted this peer?
36,430
def atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = None ) : inv = None with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return None inv = ptbl [ peer_hostport ] [ 'zonefile_inv' ] return inv
What s the zonefile inventory vector for this peer? Return None if not defined
36,431
def atlas_peer_set_zonefile_inventory ( peer_hostport , peer_inv , peer_table = None ) : with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return None ptbl [ peer_hostport ] [ 'zonefile_inv' ] = peer_inv return peer_inv
Set this peer s zonefile inventory
36,432
def atlas_peer_is_whitelisted ( peer_hostport , peer_table = None ) : ret = None with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return None ret = ptbl [ peer_hostport ] . get ( "whitelisted" , False ) return ret
Is a peer whitelisted
36,433
def atlas_peer_update_health ( peer_hostport , received_response , peer_table = None ) : with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return False now = time_now ( ) new_times = [ ] for ( t , r ) in ptbl [ peer_hostport ] [ 'time' ] : if t + atlas_peer_lifetime_interval ( ) < now : continue new_times . append ( ( t , r ) ) new_times . append ( ( now , received_response ) ) ptbl [ peer_hostport ] [ 'time' ] = new_times return True
Mark the given peer as alive at this time . Update times at which we contacted it and update its health score .
36,434
def atlas_peer_download_zonefile_inventory ( my_hostport , peer_hostport , maxlen , bit_offset = 0 , timeout = None , peer_table = { } ) : if timeout is None : timeout = atlas_inv_timeout ( ) interval = 524288 peer_inv = "" log . debug ( "Download zonefile inventory %s-%s from %s" % ( bit_offset , maxlen , peer_hostport ) ) if bit_offset > maxlen : return peer_inv for offset in xrange ( bit_offset , maxlen , interval ) : next_inv = atlas_peer_get_zonefile_inventory_range ( my_hostport , peer_hostport , offset , interval , timeout = timeout , peer_table = peer_table ) if next_inv is None : log . debug ( "Failed to sync inventory for %s from %s to %s" % ( peer_hostport , offset , offset + interval ) ) break peer_inv += next_inv if len ( next_inv ) < interval : break return peer_inv
Get the zonefile inventory from the remote peer Start from the given bit_offset
36,435
def atlas_peer_sync_zonefile_inventory ( my_hostport , peer_hostport , maxlen , timeout = None , peer_table = None ) : if timeout is None : timeout = atlas_inv_timeout ( ) peer_inv = "" bit_offset = None with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return None peer_inv = atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = ptbl ) bit_offset = ( len ( peer_inv ) - 1 ) * 8 if bit_offset < 0 : bit_offset = 0 else : peer_inv = peer_inv [ : - 1 ] peer_inv = atlas_peer_download_zonefile_inventory ( my_hostport , peer_hostport , maxlen , bit_offset = bit_offset , timeout = timeout , peer_table = peer_table ) with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : log . debug ( "%s no longer a peer" % peer_hostport ) return None inv_str = atlas_inventory_to_string ( peer_inv ) if len ( inv_str ) > 40 : inv_str = inv_str [ : 40 ] + "..." log . debug ( "Set zonefile inventory %s: %s" % ( peer_hostport , inv_str ) ) atlas_peer_set_zonefile_inventory ( peer_hostport , peer_inv , peer_table = ptbl ) return peer_inv
Synchronize our knowledge of a peer s zonefiles up to a given byte length NOT THREAD SAFE ; CALL FROM ONLY ONE THREAD .
36,436
def atlas_peer_refresh_zonefile_inventory ( my_hostport , peer_hostport , byte_offset , timeout = None , peer_table = None , con = None , path = None , local_inv = None ) : if timeout is None : timeout = atlas_inv_timeout ( ) if local_inv is None : inv_len = atlasdb_zonefile_inv_length ( con = con , path = path ) local_inv = atlas_make_zonefile_inventory ( 0 , inv_len , con = con , path = path ) maxlen = len ( local_inv ) with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return False cur_inv = atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = ptbl ) atlas_peer_set_zonefile_inventory ( peer_hostport , cur_inv [ : byte_offset ] , peer_table = ptbl ) inv = atlas_peer_sync_zonefile_inventory ( my_hostport , peer_hostport , maxlen , timeout = timeout , peer_table = peer_table ) with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return False ptbl [ peer_hostport ] [ 'zonefile_inventory_last_refresh' ] = time_now ( ) if inv is not None : inv_str = atlas_inventory_to_string ( inv ) if len ( inv_str ) > 40 : inv_str = inv_str [ : 40 ] + "..." log . debug ( "%s: inventory of %s is now '%s'" % ( my_hostport , peer_hostport , inv_str ) ) if inv is None : return False else : return True
Refresh a peer s zonefile recent inventory vector entries by removing every bit after byte_offset and re - synchronizing them .
36,437
def atlas_peer_has_fresh_zonefile_inventory ( peer_hostport , peer_table = None ) : fresh = False with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return False now = time_now ( ) peer_inv = atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = ptbl ) if ptbl [ peer_hostport ] . has_key ( 'zonefile_inventory_last_refresh' ) and ptbl [ peer_hostport ] [ 'zonefile_inventory_last_refresh' ] + atlas_peer_ping_interval ( ) > now : fresh = True return fresh
Does the given atlas node have a fresh zonefile inventory?
36,438
def atlas_find_missing_zonefile_availability ( peer_table = None , con = None , path = None , missing_zonefile_info = None ) : bit_offset = 0 bit_count = 10000 missing = [ ] ret = { } if missing_zonefile_info is None : while True : zfinfo = atlasdb_zonefile_find_missing ( bit_offset , bit_count , con = con , path = path ) if len ( zfinfo ) == 0 : break missing += zfinfo bit_offset += len ( zfinfo ) if len ( missing ) > 0 : log . debug ( "Missing %s zonefiles" % len ( missing ) ) else : missing = missing_zonefile_info if len ( missing ) == 0 : return ret with AtlasPeerTableLocked ( peer_table ) as ptbl : for zfinfo in missing : popularity = 0 byte_index = ( zfinfo [ 'inv_index' ] - 1 ) / 8 bit_index = 7 - ( ( zfinfo [ 'inv_index' ] - 1 ) % 8 ) peers = [ ] if not ret . has_key ( zfinfo [ 'zonefile_hash' ] ) : ret [ zfinfo [ 'zonefile_hash' ] ] = { 'names' : [ ] , 'txid' : zfinfo [ 'txid' ] , 'indexes' : [ ] , 'block_heights' : [ ] , 'popularity' : 0 , 'peers' : [ ] , 'tried_storage' : False } for peer_hostport in ptbl . keys ( ) : peer_inv = atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = ptbl ) if len ( peer_inv ) <= byte_index : continue if ( ord ( peer_inv [ byte_index ] ) & ( 1 << bit_index ) ) == 0 : continue if peer_hostport not in ret [ zfinfo [ 'zonefile_hash' ] ] [ 'peers' ] : popularity += 1 peers . append ( peer_hostport ) ret [ zfinfo [ 'zonefile_hash' ] ] [ 'names' ] . append ( zfinfo [ 'name' ] ) ret [ zfinfo [ 'zonefile_hash' ] ] [ 'indexes' ] . append ( zfinfo [ 'inv_index' ] - 1 ) ret [ zfinfo [ 'zonefile_hash' ] ] [ 'block_heights' ] . append ( zfinfo [ 'block_height' ] ) ret [ zfinfo [ 'zonefile_hash' ] ] [ 'popularity' ] += popularity ret [ zfinfo [ 'zonefile_hash' ] ] [ 'peers' ] += peers ret [ zfinfo [ 'zonefile_hash' ] ] [ 'tried_storage' ] = zfinfo [ 'tried_storage' ] return ret
Find the set of missing zonefiles as well as their popularity amongst our neighbors .
36,439
def atlas_peer_has_zonefile ( peer_hostport , zonefile_hash , zonefile_bits = None , con = None , path = None , peer_table = None ) : bits = None if zonefile_bits is None : bits = atlasdb_get_zonefile_bits ( zonefile_hash , con = con , path = path ) if len ( bits ) == 0 : return None else : bits = zonefile_bits zonefile_inv = None with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_hostport not in ptbl . keys ( ) : return False zonefile_inv = atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = ptbl ) res = atlas_inventory_test_zonefile_bits ( zonefile_inv , bits ) return res
Does the given peer have the given zonefile defined? Check its inventory vector
36,440
def atlas_peer_get_neighbors ( my_hostport , peer_hostport , timeout = None , peer_table = None , con = None , path = None ) : if timeout is None : timeout = atlas_neighbors_timeout ( ) peer_list = None host , port = url_to_host_port ( peer_hostport ) RPC = get_rpc_client_class ( ) rpc = RPC ( host , port , timeout = timeout , src = my_hostport ) max_neighbors = atlas_max_neighbors ( ) assert not atlas_peer_table_is_locked_by_me ( ) try : peer_list = blockstack_atlas_peer_exchange ( peer_hostport , my_hostport , timeout = timeout , proxy = rpc ) if json_is_exception ( peer_list ) : peer_list = blockstack_get_atlas_peers ( peer_hostport , timeout = timeout , proxy = rpc ) except ( socket . timeout , socket . gaierror , socket . herror , socket . error ) , se : atlas_log_socket_error ( "atlas_peer_exchange(%s)" % peer_hostport , peer_hostport , se ) log . error ( "Socket error in response from '%s'" % peer_hostport ) except Exception , e : if os . environ . get ( "BLOCKSTACK_DEBUG" ) == "1" : log . exception ( e ) log . error ( "Failed to talk to '%s'" % peer_hostport ) if peer_list is None : log . error ( "Failed to query remote peer %s" % peer_hostport ) atlas_peer_update_health ( peer_hostport , False , peer_table = peer_table ) return None if 'error' in peer_list : log . debug ( "Remote peer error: %s" % peer_list [ 'error' ] ) log . error ( "Remote peer error on %s" % peer_hostport ) atlas_peer_update_health ( peer_hostport , False , peer_table = peer_table ) return None ret = peer_list [ 'peers' ] atlas_peer_update_health ( peer_hostport , True , peer_table = peer_table ) return ret
Ask the peer server at the given URL for its neighbors .
36,441
def atlas_get_zonefiles ( my_hostport , peer_hostport , zonefile_hashes , timeout = None , peer_table = None ) : if timeout is None : timeout = atlas_zonefiles_timeout ( ) zf_payload = None zonefile_datas = { } host , port = url_to_host_port ( peer_hostport ) RPC = get_rpc_client_class ( ) rpc = RPC ( host , port , timeout = timeout , src = my_hostport ) assert not atlas_peer_table_is_locked_by_me ( ) zf_batches = [ ] for i in xrange ( 0 , len ( zonefile_hashes ) , 100 ) : zf_batches . append ( zonefile_hashes [ i : i + 100 ] ) for zf_batch in zf_batches : zf_payload = None try : zf_payload = blockstack_get_zonefiles ( peer_hostport , zf_batch , timeout = timeout , my_hostport = my_hostport , proxy = rpc ) except ( socket . timeout , socket . gaierror , socket . herror , socket . error ) , se : atlas_log_socket_error ( "get_zonefiles(%s)" % peer_hostport , peer_hostport , se ) except Exception , e : if os . environ . get ( "BLOCKSTACK_DEBUG" ) is not None : log . exception ( e ) log . error ( "Invalid zonefile data from %s" % peer_hostport ) if zf_payload is None : log . error ( "Failed to fetch zonefile data from %s" % peer_hostport ) atlas_peer_update_health ( peer_hostport , False , peer_table = peer_table ) zonefile_datas = None break if 'error' in zf_payload . keys ( ) : log . error ( "Failed to fetch zonefile data from %s: %s" % ( peer_hostport , zf_payload [ 'error' ] ) ) atlas_peer_update_health ( peer_hostport , False , peer_table = peer_table ) zonefile_datas = None break zonefile_datas . update ( zf_payload [ 'zonefiles' ] ) atlas_peer_update_health ( peer_hostport , True , peer_table = peer_table ) return zonefile_datas
Given a list of zonefile hashes . go and get them from the given host .
36,442
def atlas_rank_peers_by_data_availability ( peer_list = None , peer_table = None , local_inv = None , con = None , path = None ) : with AtlasPeerTableLocked ( peer_table ) as ptbl : if peer_list is None : peer_list = ptbl . keys ( ) [ : ] if local_inv is None : inv_len = atlasdb_zonefile_inv_length ( con = con , path = path ) local_inv = atlas_make_zonefile_inventory ( 0 , inv_len , con = con , path = path ) peer_availability_ranking = [ ] for peer_hostport in peer_list : peer_inv = atlas_peer_get_zonefile_inventory ( peer_hostport , peer_table = ptbl ) if len ( peer_inv ) == 0 : continue availability_score = atlas_inventory_count_missing ( local_inv , peer_inv ) peer_availability_ranking . append ( ( availability_score , peer_hostport ) ) peer_availability_ranking . sort ( ) peer_availability_ranking . reverse ( ) return [ peer_hp for _ , peer_hp in peer_availability_ranking ]
Get a ranking of peers to contact for a zonefile . Peers are ranked by the number of zonefiles they have which we don t have .
36,443
def atlas_peer_dequeue_all ( peer_queue = None ) : peers = [ ] with AtlasPeerQueueLocked ( peer_queue ) as pq : while len ( pq ) > 0 : peers . append ( pq . pop ( 0 ) ) return peers
Get all queued peers
36,444
def atlas_zonefile_push_enqueue ( zonefile_hash , name , txid , zonefile_data , zonefile_queue = None , con = None , path = None ) : res = False bits = atlasdb_get_zonefile_bits ( zonefile_hash , path = path , con = con ) if len ( bits ) == 0 : return with AtlasZonefileQueueLocked ( zonefile_queue ) as zfq : if len ( zfq ) < MAX_QUEUED_ZONEFILES : zfdata = { 'zonefile_hash' : zonefile_hash , 'zonefile' : zonefile_data , 'name' : name , 'txid' : txid } zfq . append ( zfdata ) res = True return res
Enqueue the given zonefile into our push queue from which it will be replicated to storage and sent out to other peers who don t have it .
36,445
def atlas_zonefile_push_dequeue ( zonefile_queue = None ) : ret = None with AtlasZonefileQueueLocked ( zonefile_queue ) as zfq : if len ( zfq ) > 0 : ret = zfq . pop ( 0 ) return ret
Dequeue a zonefile s information to replicate Return None if there are none queued
36,446
def atlas_zonefile_push ( my_hostport , peer_hostport , zonefile_data , timeout = None , peer_table = None ) : if timeout is None : timeout = atlas_push_zonefiles_timeout ( ) zonefile_hash = get_zonefile_data_hash ( zonefile_data ) zonefile_data_b64 = base64 . b64encode ( zonefile_data ) host , port = url_to_host_port ( peer_hostport ) RPC = get_rpc_client_class ( ) rpc = RPC ( host , port , timeout = timeout , src = my_hostport ) status = False assert not atlas_peer_table_is_locked_by_me ( ) try : push_info = blockstack_put_zonefiles ( peer_hostport , [ zonefile_data_b64 ] , timeout = timeout , my_hostport = my_hostport , proxy = rpc ) if 'error' not in push_info : if push_info [ 'saved' ] == 1 : saved = True except ( socket . timeout , socket . gaierror , socket . herror , socket . error ) , se : atlas_log_socket_error ( "put_zonefiles(%s)" % peer_hostport , peer_hostport , se ) except AssertionError , ae : log . exception ( ae ) log . error ( "Invalid server response from %s" % peer_hostport ) except Exception , e : log . exception ( e ) log . error ( "Failed to push zonefile %s to %s" % ( zonefile_hash , peer_hostport ) ) with AtlasPeerTableLocked ( peer_table ) as ptbl : atlas_peer_update_health ( peer_hostport , status , peer_table = ptbl ) return status
Push the given zonefile to the given peer Return True on success Return False on failure
36,447
def atlas_node_init ( my_hostname , my_portnum , atlasdb_path , zonefile_dir , working_dir ) : atlas_state = { } atlas_state [ 'peer_crawler' ] = AtlasPeerCrawler ( my_hostname , my_portnum , atlasdb_path , working_dir ) atlas_state [ 'health_checker' ] = AtlasHealthChecker ( my_hostname , my_portnum , atlasdb_path ) atlas_state [ 'zonefile_crawler' ] = AtlasZonefileCrawler ( my_hostname , my_portnum , atlasdb_path , zonefile_dir ) return atlas_state
Start up the atlas node . Return a bundle of atlas state
36,448
def atlas_node_start ( atlas_state ) : for component in atlas_state . keys ( ) : log . debug ( "Starting Atlas component '%s'" % component ) atlas_state [ component ] . start ( )
Start up atlas threads
36,449
def atlas_node_add_callback ( atlas_state , callback_name , callback ) : if callback_name == 'store_zonefile' : atlas_state [ 'zonefile_crawler' ] . set_store_zonefile_callback ( callback ) else : raise ValueError ( "Unrecognized callback {}" . format ( callback_name ) )
Add a callback to the initialized atlas state
36,450
def atlas_node_stop ( atlas_state ) : for component in atlas_state . keys ( ) : log . debug ( "Stopping Atlas component '%s'" % component ) atlas_state [ component ] . ask_join ( ) atlas_state [ component ] . join ( ) return True
Stop the atlas node threads
36,451
def canonical_peer ( self , peer ) : their_host , their_port = url_to_host_port ( peer ) if their_host in [ '127.0.0.1' , '::1' ] : their_host = 'localhost' return "%s:%s" % ( their_host , their_port )
Get the canonical peer name
36,452
def remove_unhealthy_peers ( self , count , con = None , path = None , peer_table = None , min_request_count = 10 , min_health = MIN_PEER_HEALTH ) : if path is None : path = self . atlasdb_path removed = [ ] rank_peer_list = atlas_rank_peers_by_health ( peer_table = peer_table , with_rank = True ) for rank , peer in rank_peer_list : reqcount = atlas_peer_get_request_count ( peer , peer_table = peer_table ) if reqcount >= min_request_count and rank < min_health and not atlas_peer_is_whitelisted ( peer , peer_table = peer_table ) and not atlas_peer_is_blacklisted ( peer , peer_table = peer_table ) : removed . append ( peer ) random . shuffle ( removed ) if len ( removed ) > count : removed = removed [ : count ] for peer in removed : log . debug ( "Remove unhealthy peer %s" % ( peer ) ) atlasdb_remove_peer ( peer , con = con , path = path , peer_table = peer_table ) return removed
Remove up to
36,453
def get_current_peers ( self , peer_table = None ) : current_peers = None with AtlasPeerTableLocked ( peer_table ) as ptbl : current_peers = ptbl . keys ( ) [ : ] return current_peers
Get the current set of peers
36,454
def canonical_new_peer_list ( self , peers_to_add ) : new_peers = list ( set ( self . new_peers + peers_to_add ) ) random . shuffle ( new_peers ) tmp = [ ] for peer in new_peers : tmp . append ( self . canonical_peer ( peer ) ) new_peers = tmp if self . my_hostport in new_peers : new_peers . remove ( self . my_hostport ) return new_peers
Make a list of canonical new peers using the self . new_peers and the given peers to add
36,455
def step ( self , con = None , path = None , peer_table = None , local_inv = None ) : if path is None : path = self . atlasdb_path peer_hostports = [ ] stale_peers = [ ] num_peers = None peer_hostports = None with AtlasPeerTableLocked ( peer_table ) as ptbl : num_peers = len ( ptbl . keys ( ) ) peer_hostports = ptbl . keys ( ) [ : ] for peer in peer_hostports : if not atlas_peer_has_fresh_zonefile_inventory ( peer , peer_table = ptbl ) : stale_peers . append ( peer ) log . debug ( "Peer %s has a stale zonefile inventory" % peer ) if len ( stale_peers ) > 0 : log . debug ( "Refresh zonefile inventories for %s peers" % len ( stale_peers ) ) for peer_hostport in stale_peers : log . debug ( "%s: Refresh zonefile inventory for %s" % ( self . hostport , peer_hostport ) ) res = atlas_peer_refresh_zonefile_inventory ( self . hostport , peer_hostport , 0 , con = con , path = path , peer_table = peer_table , local_inv = local_inv ) if res is None : log . warning ( "Failed to refresh zonefile inventory for %s" % peer_hostport ) return
Find peers with stale zonefile inventory data and refresh them .
36,456
def run ( self , peer_table = None ) : self . running = True while self . running : local_inv = atlas_get_zonefile_inventory ( ) t1 = time_now ( ) self . step ( peer_table = peer_table , local_inv = local_inv , path = self . atlasdb_path ) t2 = time_now ( ) if t2 - t1 < PEER_HEALTH_NEIGHBOR_WORK_INTERVAL : deadline = time_now ( ) + PEER_HEALTH_NEIGHBOR_WORK_INTERVAL - ( t2 - t1 ) while time_now ( ) < deadline and self . running : time_sleep ( self . hostport , self . __class__ . __name__ , 1.0 ) if not self . running : break
Loop forever pinging someone every pass .
36,457
def set_zonefile_present ( self , zfhash , block_height , con = None , path = None ) : was_present = atlasdb_set_zonefile_present ( zfhash , True , con = con , path = path ) if not was_present and self . store_zonefile_cb : log . debug ( '{} was new, so passing it along to zonefile storage watchers...' . format ( zfhash ) ) self . store_zonefile_cb ( zfhash , block_height ) else : log . debug ( '{} was seen before, so not passing it along to zonefile storage watchers' . format ( zfhash ) )
Set a zonefile as present and if it was previously absent inform the storage listener
36,458
def find_zonefile_origins ( self , missing_zfinfo , peer_hostports ) : zonefile_origins = { } for zfhash in missing_zfinfo . keys ( ) : for peer_hostport in peer_hostports : if not zonefile_origins . has_key ( peer_hostport ) : zonefile_origins [ peer_hostport ] = [ ] if peer_hostport in missing_zfinfo [ zfhash ] [ 'peers' ] : zonefile_origins [ peer_hostport ] . append ( zfhash ) return zonefile_origins
Find out which peers can serve which zonefiles
36,459
def step ( self , peer_table = None , zonefile_queue = None , path = None ) : if path is None : path = self . atlasdb_path if BLOCKSTACK_TEST : log . debug ( "%s: %s step" % ( self . hostport , self . __class__ . __name__ ) ) if self . push_timeout is None : self . push_timeout = atlas_push_zonefiles_timeout ( ) zfinfo = atlas_zonefile_push_dequeue ( zonefile_queue = zonefile_queue ) if zfinfo is None : return 0 zfhash = zfinfo [ 'zonefile_hash' ] zfdata_txt = zfinfo [ 'zonefile' ] name = zfinfo [ 'name' ] txid = zfinfo [ 'txid' ] zfbits = atlasdb_get_zonefile_bits ( zfhash , path = path ) if len ( zfbits ) == 0 : return 0 rc = add_atlas_zonefile_data ( str ( zfdata_txt ) , self . zonefile_dir ) if not rc : log . error ( "Failed to replicate zonefile %s to external storage" % zfhash ) peers = None with AtlasPeerTableLocked ( peer_table ) as ptbl : peers = atlas_zonefile_find_push_peers ( zfhash , peer_table = ptbl , zonefile_bits = zfbits ) if len ( peers ) == 0 : log . debug ( "%s: All peers have zonefile %s" % ( self . hostport , zfhash ) ) return 0 ret = 0 for peer in peers : log . debug ( "%s: Push to %s" % ( self . hostport , peer ) ) atlas_zonefile_push ( self . hostport , peer , zfdata_txt , timeout = self . push_timeout ) ret += 1 return ret
Run one step of this algorithm . Push the zonefile to all the peers that need it . Return the number of peers we sent to
36,460
def queuedb_create ( path ) : global QUEUE_SQL , ERROR_SQL lines = [ l + ";" for l in QUEUE_SQL . split ( ";" ) ] con = sqlite3 . connect ( path , isolation_level = None ) db_query_execute ( con , 'pragma mmap_size=536870912' , ( ) ) for line in lines : db_query_execute ( con , line , ( ) ) con . commit ( ) con . row_factory = queuedb_row_factory return con
Create a sqlite3 db at the given path . Create all the tables and indexes we need . Raises if the table already exists
36,461
def queuedb_row_factory ( cursor , row ) : d = { } for idx , col in enumerate ( cursor . description ) : d [ col [ 0 ] ] = row [ idx ] return d
Dict row factory
36,462
def queuedb_findall ( path , queue_id , name = None , offset = None , limit = None ) : sql = "SELECT * FROM queue WHERE queue_id = ? ORDER BY rowid ASC" args = ( queue_id , ) if name : sql += ' AND name = ?' args += ( name , ) if limit : sql += ' LIMIT ?' args += ( limit , ) if offset : sql += ' OFFSET ?' args += ( offset , ) sql += ';' db = queuedb_open ( path ) if db is None : raise Exception ( "Failed to open %s" % path ) cur = db . cursor ( ) rows = queuedb_query_execute ( cur , sql , args ) count = 0 ret = [ ] for row in rows : dat = { } dat . update ( row ) ret . append ( dat ) db . close ( ) return ret
Get all queued entries for a queue and a name . If name is None then find all queue entries
36,463
def queuedb_append ( path , queue_id , name , data ) : sql = "INSERT INTO queue VALUES (?,?,?);" args = ( name , queue_id , data ) db = queuedb_open ( path ) if db is None : raise Exception ( "Failed to open %s" % path ) cur = db . cursor ( ) res = queuedb_query_execute ( cur , sql , args ) db . commit ( ) db . close ( ) return True
Append an element to the back of the queue . Return True on success Raise on error
36,464
def queuedb_remove ( path , entry , cur = None ) : sql = "DELETE FROM queue WHERE queue_id = ? AND name = ?;" args = ( entry [ 'queue_id' ] , entry [ 'name' ] ) cursor = None if cur : cursor = cur else : db = queuedb_open ( path ) if db is None : raise Exception ( "Failed to open %s" % path ) cursor = db . cursor ( ) res = queuedb_query_execute ( cursor , sql , args ) if cur is None : db . commit ( ) db . close ( ) return True
Remove an element from a queue . Return True on success Raise on error
36,465
def queuedb_removeall ( path , entries ) : db = queuedb_open ( path ) if db is None : raise Exception ( "Failed to open %s" % path ) cursor = db . cursor ( ) queuedb_query_execute ( cursor , 'BEGIN' , ( ) ) for entry in entries : queuedb_remove ( path , entry , cur = cursor ) queuedb_query_execute ( cursor , 'END' , ( ) ) db . commit ( ) db . close ( ) return True
Remove all entries from a queue
36,466
def check_payment_in_stacks ( state_engine , nameop , state_op_type , fee_block_id ) : name = nameop [ 'name' ] namespace_id = get_namespace_from_name ( name ) name_without_namespace = get_name_from_fq_name ( name ) namespace = state_engine . get_namespace ( namespace_id ) stacks_payment_info = get_stacks_payment ( state_engine , nameop , state_op_type ) if stacks_payment_info [ 'status' ] : tokens_paid = stacks_payment_info [ 'tokens_paid' ] token_units = stacks_payment_info [ 'token_units' ] log . debug ( 'Transaction pays {} units of {} for {}, even though its namespace was priced in BTC' . format ( tokens_paid , token_units , name ) ) stacks_price = price_name_stacks ( name_without_namespace , namespace , fee_block_id ) res = check_token_payment ( name , stacks_price , stacks_payment_info ) if res [ 'status' ] : return { 'status' : True , 'tokens_paid' : tokens_paid , 'token_units' : token_units } return { 'status' : False }
Verify that if tokens were paid for a name priced in BTC that enough were paid . Does not check account balances or namespace types ; it only inspects the transaction data .
36,467
def check_payment ( state_engine , state_op_type , nameop , fee_block_id , token_address , burn_address , name_fee , block_id ) : assert state_op_type in [ 'NAME_REGISTRATION' , 'NAME_RENEWAL' ] , 'Invalid op type {}' . format ( state_op_type ) assert name_fee is not None assert isinstance ( name_fee , ( int , long ) ) name = nameop [ 'name' ] namespace_id = get_namespace_from_name ( name ) namespace = state_engine . get_namespace ( namespace_id ) res = None log . debug ( '{} is a version-0x{} namespace' . format ( namespace [ 'namespace_id' ] , namespace [ 'version' ] ) ) if namespace [ 'version' ] == NAMESPACE_VERSION_PAY_TO_BURN : res = check_payment_v1 ( state_engine , state_op_type , nameop , fee_block_id , token_address , burn_address , name_fee , block_id ) elif namespace [ 'version' ] == NAMESPACE_VERSION_PAY_TO_CREATOR : res = check_payment_v2 ( state_engine , state_op_type , nameop , fee_block_id , token_address , burn_address , name_fee , block_id ) elif namespace [ 'version' ] == NAMESPACE_VERSION_PAY_WITH_STACKS : res = check_payment_v3 ( state_engine , state_op_type , nameop , fee_block_id , token_address , burn_address , name_fee , block_id ) else : log . warning ( "Namespace {} has version bits 0x{:x}, which has unknown registration rules" . format ( namespace [ 'namespace_id' ] , namespace [ 'version' ] ) ) return { 'status' : False } if not res [ 'status' ] : return res tokens_paid = res [ 'tokens_paid' ] token_units = res [ 'token_units' ] return { 'status' : True , 'tokens_paid' : tokens_paid , 'token_units' : token_units }
Verify that the right payment was made in the right cryptocurrency units . Does not check any accounts or modify the nameop in any way ; it only checks that the name was paid for by the transaction .
36,468
def check ( state_engine , nameop , block_id , checked_ops ) : namespace_id_hash = nameop [ 'preorder_hash' ] consensus_hash = nameop [ 'consensus_hash' ] token_fee = nameop [ 'token_fee' ] if not state_engine . is_new_namespace_preorder ( namespace_id_hash ) : log . warning ( "Namespace preorder '%s' already in use" % namespace_id_hash ) return False if not state_engine . is_consensus_hash_valid ( block_id , consensus_hash ) : valid_consensus_hashes = state_engine . get_valid_consensus_hashes ( block_id ) log . warning ( "Invalid consensus hash '%s': expected any of %s" % ( consensus_hash , "," . join ( valid_consensus_hashes ) ) ) return False if not 'op_fee' in nameop : log . warning ( "Missing namespace preorder fee" ) return False if nameop [ 'burn_address' ] != BLOCKSTACK_BURN_ADDRESS : log . warning ( "Invalid burn address: expected {}, got {}" . format ( BLOCKSTACK_BURN_ADDRESS , nameop [ 'burn_address' ] ) ) return False epoch_features = get_epoch_features ( block_id ) if EPOCH_FEATURE_STACKS_BUY_NAMESPACES in epoch_features : if 'token_fee' not in nameop : log . warning ( "Missing token fee" ) return False token_fee = nameop [ 'token_fee' ] token_address = nameop [ 'address' ] token_type = TOKEN_TYPE_STACKS if token_fee is None : log . warning ( "No tokens paid by this NAMESPACE_PREORDER" ) return False account_info = state_engine . get_account ( token_address , token_type ) if account_info is None : log . warning ( "No account for {} ({})" . format ( token_address , token_type ) ) return False account_balance = state_engine . get_account_balance ( account_info ) assert isinstance ( account_balance , ( int , long ) ) , 'BUG: account_balance of {} is {} (type {})' . format ( token_address , account_balance , type ( account_balance ) ) assert isinstance ( token_fee , ( int , long ) ) , 'BUG: token_fee is {} (type {})' . format ( token_fee , type ( token_fee ) ) if account_balance < token_fee : log . warning ( "Account {} has balance {} {}, but needs to pay {} {}" . format ( token_address , account_balance , token_type , token_fee , token_type ) ) return False state_preorder_put_account_payment_info ( nameop , token_address , token_type , token_fee ) nameop [ 'token_fee' ] = '{}' . format ( token_fee ) nameop [ 'token_units' ] = TOKEN_TYPE_STACKS else : state_preorder_put_account_payment_info ( nameop , None , None , None ) nameop [ 'token_fee' ] = '0' nameop [ 'token_units' ] = 'BTC' return True
Given a NAMESPACE_PREORDER nameop see if we can preorder it . It must be unqiue .
36,469
def snapshot_peek_number ( fd , off ) : fd . seek ( off - 8 , os . SEEK_SET ) value_hex = fd . read ( 8 ) if len ( value_hex ) != 8 : return None try : value = int ( value_hex , 16 ) except ValueError : return None return value
Read the last 8 bytes of fd and interpret it as an int .
36,470
def get_file_hash ( fd , hashfunc , fd_len = None ) : h = hashfunc ( ) fd . seek ( 0 , os . SEEK_SET ) count = 0 while True : buf = fd . read ( 65536 ) if len ( buf ) == 0 : break if fd_len is not None : if count + len ( buf ) > fd_len : buf = buf [ : fd_len - count ] h . update ( buf ) count += len ( buf ) hashed = h . hexdigest ( ) return hashed
Get the hex - encoded hash of the fd s data
36,471
def fast_sync_sign_snapshot ( snapshot_path , private_key , first = False ) : if not os . path . exists ( snapshot_path ) : log . error ( "No such file or directory: {}" . format ( snapshot_path ) ) return False file_size = 0 payload_size = 0 write_offset = 0 try : sb = os . stat ( snapshot_path ) file_size = sb . st_size assert file_size > 8 except Exception as e : log . exception ( e ) return False num_sigs = 0 snapshot_hash = None with open ( snapshot_path , 'r+' ) as f : if not first : info = fast_sync_inspect ( f ) if 'error' in info : log . error ( "Failed to inspect {}: {}" . format ( snapshot_path , info [ 'error' ] ) ) return False num_sigs = len ( info [ 'signatures' ] ) write_offset = info [ 'sig_append_offset' ] payload_size = info [ 'payload_size' ] else : write_offset = file_size num_sigs = 0 payload_size = file_size privkey_hex = keylib . ECPrivateKey ( private_key ) . to_hex ( ) hash_hex = get_file_hash ( f , hashlib . sha256 , fd_len = payload_size ) sigb64 = sign_digest ( hash_hex , privkey_hex , hashfunc = hashlib . sha256 ) if BLOCKSTACK_TEST : log . debug ( "Signed {} with {} to make {}" . format ( hash_hex , keylib . ECPrivateKey ( private_key ) . public_key ( ) . to_hex ( ) , sigb64 ) ) f . seek ( write_offset , os . SEEK_SET ) f . write ( sigb64 ) f . write ( '{:08x}' . format ( len ( sigb64 ) ) ) num_sigs += 1 f . write ( '{:08x}' . format ( num_sigs ) ) f . flush ( ) os . fsync ( f . fileno ( ) ) return True
Append a signature to the end of a snapshot path with the given private key .
36,472
def fast_sync_snapshot_compress ( snapshot_dir , export_path ) : snapshot_dir = os . path . abspath ( snapshot_dir ) export_path = os . path . abspath ( export_path ) if os . path . exists ( export_path ) : return { 'error' : 'Snapshot path exists: {}' . format ( export_path ) } old_dir = os . getcwd ( ) count_ref = [ 0 ] def print_progress ( tarinfo ) : count_ref [ 0 ] += 1 if count_ref [ 0 ] % 100 == 0 : log . debug ( "{} files compressed..." . format ( count_ref [ 0 ] ) ) return tarinfo try : os . chdir ( snapshot_dir ) with tarfile . TarFile . bz2open ( export_path , "w" ) as f : f . add ( "." , filter = print_progress ) except : os . chdir ( old_dir ) raise finally : os . chdir ( old_dir ) return { 'status' : True }
Given the path to a directory compress it and export it to the given path .
36,473
def fast_sync_snapshot_decompress ( snapshot_path , output_dir ) : if not tarfile . is_tarfile ( snapshot_path ) : return { 'error' : 'Not a tarfile-compatible archive: {}' . format ( snapshot_path ) } if not os . path . exists ( output_dir ) : os . makedirs ( output_dir ) with tarfile . TarFile . bz2open ( snapshot_path , 'r' ) as f : tarfile . TarFile . extractall ( f , path = output_dir ) return { 'status' : True }
Given the path to a snapshot file decompress it and write its contents to the given output directory
36,474
def fast_sync_fetch ( working_dir , import_url ) : try : fd , tmppath = tempfile . mkstemp ( prefix = '.blockstack-fast-sync-' , dir = working_dir ) except Exception , e : log . exception ( e ) return None log . debug ( "Fetch {} to {}..." . format ( import_url , tmppath ) ) try : path , headers = urllib . urlretrieve ( import_url , tmppath ) except Exception , e : os . close ( fd ) log . exception ( e ) return None os . close ( fd ) return tmppath
Get the data for an import snapshot . Store it to a temporary path Return the path on success Return None on error
36,475
def state_check_collisions ( state_engine , nameop , history_id_key , block_id , checked_ops , collision_checker ) : collision_check = getattr ( state_engine , collision_checker , None ) try : assert collision_check is not None , "Collision-checker '%s' not defined" % collision_checker assert hasattr ( collision_check , "__call__" ) , "Collision-checker '%s' is not callable" % collision_checker assert history_id_key in nameop . keys ( ) , "History ID key '%s' not in name operation" % ( history_id_key ) assert 'op' in nameop . keys ( ) , "BUG: no op in nameop" except Exception , e : log . exception ( e ) log . error ( "FATAL: incorrect state_create() decorator" ) sys . exit ( 1 ) rc = collision_check ( nameop [ history_id_key ] , block_id , checked_ops ) return rc
See that there are no state - creating or state - preordering collisions at this block for this history ID . Return True if collided ; False if not
36,476
def state_create_is_valid ( nameop ) : assert '__state_create__' in nameop , "Not tagged with @state_create" assert nameop [ '__state_create__' ] , "BUG: tagged False by @state_create" assert '__preorder__' in nameop , "No preorder" assert '__table__' in nameop , "No table given" assert '__history_id_key__' in nameop , "No history ID key given" assert nameop [ '__history_id_key__' ] in nameop , "No history ID given" assert '__always_set__' in nameop , "No always-set fields given" return True
Is a nameop a valid state - preorder operation?
36,477
def state_transition_is_valid ( nameop ) : assert '__state_transition__' in nameop , "Not tagged with @state_transition" assert nameop [ '__state_transition__' ] , "BUG: @state_transition tagged False" assert '__history_id_key__' in nameop , "Missing __history_id_key__" history_id_key = nameop [ '__history_id_key__' ] assert history_id_key in [ "name" , "namespace_id" ] , "Invalid history ID key '%s'" % history_id_key assert '__table__' in nameop , "Missing __table__" assert '__always_set__' in nameop , "No always-set fields given" assert '__account_payment_info__' in nameop , 'No account payment information present' return True
Is this a valid state transition?
36,478
def _read_atlas_zonefile ( zonefile_path , zonefile_hash ) : with open ( zonefile_path , "rb" ) as f : data = f . read ( ) if zonefile_hash is not None : if not verify_zonefile ( data , zonefile_hash ) : log . debug ( "Corrupt zonefile '%s'" % zonefile_hash ) return None return data
Read and verify an atlas zone file
36,479
def get_atlas_zonefile_data ( zonefile_hash , zonefile_dir , check = True ) : zonefile_path = atlas_zonefile_path ( zonefile_dir , zonefile_hash ) zonefile_path_legacy = atlas_zonefile_path_legacy ( zonefile_dir , zonefile_hash ) for zfp in [ zonefile_path , zonefile_path_legacy ] : if not os . path . exists ( zfp ) : continue if check : res = _read_atlas_zonefile ( zfp , zonefile_hash ) else : res = _read_atlas_zonefile ( zfp , None ) if res : return res return None
Get a serialized cached zonefile from local disk Return None if not found
36,480
def store_atlas_zonefile_data ( zonefile_data , zonefile_dir , fsync = True ) : if not os . path . exists ( zonefile_dir ) : os . makedirs ( zonefile_dir , 0700 ) zonefile_hash = get_zonefile_data_hash ( zonefile_data ) zonefile_path = atlas_zonefile_path ( zonefile_dir , zonefile_hash ) zonefile_dir_path = os . path . dirname ( zonefile_path ) if os . path . exists ( zonefile_path ) : return True if not os . path . exists ( zonefile_dir_path ) : os . makedirs ( zonefile_dir_path ) try : with open ( zonefile_path , "wb" ) as f : f . write ( zonefile_data ) f . flush ( ) if fsync : os . fsync ( f . fileno ( ) ) except Exception , e : log . exception ( e ) return False return True
Store a validated zonefile . zonefile_data should be a dict . The caller should first authenticate the zonefile . Return True on success Return False on error
36,481
def remove_atlas_zonefile_data ( zonefile_hash , zonefile_dir ) : if not os . path . exists ( zonefile_dir ) : return True zonefile_path = atlas_zonefile_path ( zonefile_dir , zonefile_hash ) zonefile_path_legacy = atlas_zonefile_path_legacy ( zonefile_dir , zonefile_hash ) for zfp in [ zonefile_path , zonefile_path_legacy ] : if not os . path . exists ( zonefile_path ) : continue try : os . unlink ( zonefile_path ) except : log . error ( "Failed to unlink zonefile %s (%s)" % ( zonefile_hash , zonefile_path ) ) return True
Remove a cached zonefile . Idempotent ; returns True if deleted or it didn t exist . Returns False on error
36,482
def add_atlas_zonefile_data ( zonefile_text , zonefile_dir , fsync = True ) : rc = store_atlas_zonefile_data ( zonefile_text , zonefile_dir , fsync = fsync ) if not rc : zonefile_hash = get_zonefile_data_hash ( zonefile_text ) log . error ( "Failed to save zonefile {}" . format ( zonefile_hash ) ) rc = False return rc
Add a zone file to the atlas zonefiles Return True on success Return False on error
36,483
def transfer_sanity_check ( name , consensus_hash ) : if name is not None and ( not is_b40 ( name ) or "+" in name or name . count ( "." ) > 1 ) : raise Exception ( "Name '%s' has non-base-38 characters" % name ) if name is not None and ( len ( name ) > LENGTHS [ 'blockchain_id_name' ] ) : raise Exception ( "Name '%s' is too long; expected %s bytes" % ( name , LENGTHS [ 'blockchain_id_name' ] ) ) return True
Verify that data for a transfer is valid .
36,484
def find_transfer_consensus_hash ( name_rec , block_id , vtxindex , nameop_consensus_hash ) : for historic_block_number in reversed ( sorted ( name_rec [ 'history' ] . keys ( ) ) ) : for historic_state in reversed ( name_rec [ 'history' ] [ historic_block_number ] ) : if historic_state [ 'block_number' ] > block_id or ( historic_state [ 'block_number' ] == block_id and historic_state [ 'vtxindex' ] > vtxindex ) : continue if historic_state [ 'op' ] in [ NAME_REGISTRATION , NAME_IMPORT ] : return nameop_consensus_hash if historic_state [ 'op' ] == NAME_UPDATE : assert historic_state [ 'consensus_hash' ] is not None , 'BUG: NAME_UPDATE did not set "consensus_hash": {}' . format ( historic_state ) return historic_state [ 'consensus_hash' ] return nameop_consensus_hash
Given a name record find the last consensus hash set by a non - NAME_TRANSFER operation .
36,485
def canonicalize ( parsed_op ) : assert 'op' in parsed_op assert len ( parsed_op [ 'op' ] ) == 2 if parsed_op [ 'op' ] [ 1 ] == TRANSFER_KEEP_DATA : parsed_op [ 'keep_data' ] = True elif parsed_op [ 'op' ] [ 1 ] == TRANSFER_REMOVE_DATA : parsed_op [ 'keep_data' ] = False else : raise ValueError ( "Invalid op '{}'" . format ( parsed_op [ 'op' ] ) ) return parsed_op
Get the canonical form of this operation putting it into a form where it can be serialized to form a consensus hash . This method is meant to preserve compatibility across blockstackd releases .
36,486
def get_bitcoind ( new_bitcoind_opts = None , reset = False , new = False ) : global bitcoind if reset : bitcoind = None elif not new and bitcoind is not None : return bitcoind if new or bitcoind is None : if new_bitcoind_opts is not None : set_bitcoin_opts ( new_bitcoind_opts ) bitcoin_opts = get_bitcoin_opts ( ) new_bitcoind = None try : try : new_bitcoind = virtualchain . connect_bitcoind ( bitcoin_opts ) except KeyError , ke : log . exception ( ke ) log . error ( "Invalid configuration: %s" % bitcoin_opts ) return None if new : return new_bitcoind else : bitcoind = new_bitcoind return bitcoind except Exception , e : log . exception ( e ) return None
Get or instantiate our bitcoind client . Optionally re - set the bitcoind options .
36,487
def get_pidfile_path ( working_dir ) : pid_filename = virtualchain_hooks . get_virtual_chain_name ( ) + ".pid" return os . path . join ( working_dir , pid_filename )
Get the PID file path .
36,488
def put_pidfile ( pidfile_path , pid ) : with open ( pidfile_path , "w" ) as f : f . write ( "%s" % pid ) os . fsync ( f . fileno ( ) ) return
Put a PID into a pidfile
36,489
def get_logfile_path ( working_dir ) : logfile_filename = virtualchain_hooks . get_virtual_chain_name ( ) + ".log" return os . path . join ( working_dir , logfile_filename )
Get the logfile path for our service endpoint .
36,490
def get_index_range ( working_dir ) : bitcoind_session = get_bitcoind ( new = True ) assert bitcoind_session is not None first_block = None last_block = None wait = 1.0 while last_block is None and is_running ( ) : first_block , last_block = virtualchain . get_index_range ( 'bitcoin' , bitcoind_session , virtualchain_hooks , working_dir ) if first_block is None or last_block is None : log . error ( "Reconnect to bitcoind in {} seconds" . format ( wait ) ) time . sleep ( wait ) wait = min ( wait * 2.0 + random . random ( ) * wait , 60 ) bitcoind_session = get_bitcoind ( new = True ) continue else : return first_block , last_block - NUM_CONFIRMATIONS return None , None
Get the bitcoin block index range . Mask connection failures with timeouts . Always try to reconnect .
36,491
def rpc_start ( working_dir , port , subdomain_index = None , thread = True ) : rpc_srv = BlockstackdRPCServer ( working_dir , port , subdomain_index = subdomain_index ) log . debug ( "Starting RPC on port {}" . format ( port ) ) if thread : rpc_srv . start ( ) return rpc_srv
Start the global RPC server thread Returns the RPC server thread
36,492
def rpc_chain_sync ( server_state , new_block_height , finish_time ) : rpc_srv = server_state [ 'rpc' ] if rpc_srv is not None : rpc_srv . cache_flush ( ) rpc_srv . set_last_index_time ( finish_time )
Flush the global RPC server cache and tell the rpc server that we ve reached the given block height at the given time .
36,493
def rpc_stop ( server_state ) : rpc_srv = server_state [ 'rpc' ] if rpc_srv is not None : log . info ( "Shutting down RPC" ) rpc_srv . stop_server ( ) rpc_srv . join ( ) log . info ( "RPC joined" ) else : log . info ( "RPC already joined" ) server_state [ 'rpc' ] = None
Stop the global RPC server thread
36,494
def gc_stop ( ) : global gc_thread if gc_thread : log . info ( "Shutting down GC thread" ) gc_thread . signal_stop ( ) gc_thread . join ( ) log . info ( "GC thread joined" ) gc_thread = None else : log . info ( "GC thread already joined" )
Stop a the optimistic GC thread
36,495
def api_start ( working_dir , host , port , thread = True ) : api_srv = BlockstackdAPIServer ( working_dir , host , port ) log . info ( "Starting API server on port {}" . format ( port ) ) if thread : api_srv . start ( ) return api_srv
Start the global API server Returns the API server thread
36,496
def api_stop ( server_state ) : api_srv = server_state [ 'api' ] if api_srv is not None : log . info ( "Shutting down API" ) api_srv . stop_server ( ) api_srv . join ( ) log . info ( "API server joined" ) else : log . info ( "API already joined" ) server_state [ 'api' ] = None
Stop the global API server thread
36,497
def atlas_init ( blockstack_opts , db , recover = False , port = None ) : if port is None : port = blockstack_opts [ 'rpc_port' ] atlas_state = None if is_atlas_enabled ( blockstack_opts ) : atlas_seed_peers = filter ( lambda x : len ( x ) > 0 , blockstack_opts [ 'atlas_seeds' ] . split ( "," ) ) atlas_blacklist = filter ( lambda x : len ( x ) > 0 , blockstack_opts [ 'atlas_blacklist' ] . split ( "," ) ) zonefile_dir = blockstack_opts [ 'zonefiles' ] my_hostname = blockstack_opts [ 'atlas_hostname' ] my_port = blockstack_opts [ 'atlas_port' ] initial_peer_table = atlasdb_init ( blockstack_opts [ 'atlasdb_path' ] , zonefile_dir , db , atlas_seed_peers , atlas_blacklist , validate = True , recover = recover ) atlas_peer_table_init ( initial_peer_table ) atlas_state = atlas_node_init ( my_hostname , my_port , blockstack_opts [ 'atlasdb_path' ] , zonefile_dir , db . working_dir ) return atlas_state
Start up atlas functionality
36,498
def read_pid_file ( pidfile_path ) : try : fin = open ( pidfile_path , "r" ) except Exception , e : return None else : pid_data = fin . read ( ) . strip ( ) fin . close ( ) try : pid = int ( pid_data ) return pid except : return None
Read the PID from the PID file
36,499
def check_server_running ( pid ) : if pid == os . getpid ( ) : return False try : os . kill ( pid , 0 ) return True except OSError as oe : if oe . errno == errno . ESRCH : return False else : raise
Determine if the given process is running