idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
36,700
def namedb_insert_prepare ( cur , record , table_name ) : namedb_assert_fields_match ( cur , record , table_name ) columns = record . keys ( ) columns . sort ( ) values = [ ] for c in columns : if record [ c ] == False : values . append ( 0 ) elif record [ c ] == True : values . append ( 1 ) else : values . append ( record [ c ] ) values = tuple ( values ) field_placeholders = "," . join ( [ "?" ] * len ( columns ) ) query = "INSERT INTO %s (%s) VALUES (%s);" % ( table_name , "," . join ( columns ) , field_placeholders ) log . debug ( namedb_format_query ( query , values ) ) return ( query , values )
Prepare to insert a record but make sure that all of the column names have values first!
36,701
def namedb_update_must_equal ( rec , change_fields ) : must_equal = [ ] if len ( change_fields ) != 0 : given = rec . keys ( ) for k in given : if k not in change_fields : must_equal . append ( k ) return must_equal
Generate the set of fields that must stay the same across an update .
36,702
def namedb_delete_prepare ( cur , primary_key , primary_key_value , table_name ) : namedb_assert_fields_match ( cur , { primary_key : primary_key_value } , table_name , columns_match_record = False ) query = "DELETE FROM %s WHERE %s = ?;" % ( table_name , primary_key ) values = ( primary_key_value , ) return ( query , values )
Prepare to delete a record but make sure the fields in record correspond to actual columns . Return a DELETE FROM ... WHERE statement on success . Raise an Exception if not .
36,703
def namedb_query_execute ( cur , query , values , abort = True ) : return db_query_execute ( cur , query , values , abort = abort )
Execute a query . If it fails abort . Retry with timeouts on lock
36,704
def namedb_preorder_insert ( cur , preorder_rec ) : preorder_row = copy . deepcopy ( preorder_rec ) assert 'preorder_hash' in preorder_row , "BUG: missing preorder_hash" try : preorder_query , preorder_values = namedb_insert_prepare ( cur , preorder_row , "preorders" ) except Exception , e : log . exception ( e ) log . error ( "FATAL: Failed to insert name preorder '%s'" % preorder_row [ 'preorder_hash' ] ) os . abort ( ) namedb_query_execute ( cur , preorder_query , preorder_values ) return True
Add a name or namespace preorder record if it doesn t exist already .
36,705
def namedb_preorder_remove ( cur , preorder_hash ) : try : query , values = namedb_delete_prepare ( cur , 'preorder_hash' , preorder_hash , 'preorders' ) except Exception , e : log . exception ( e ) log . error ( "FATAL: Failed to delete preorder with hash '%s'" % preorder_hash ) os . abort ( ) log . debug ( namedb_format_query ( query , values ) ) namedb_query_execute ( cur , query , values ) return True
Remove a preorder hash .
36,706
def namedb_name_insert ( cur , input_name_rec ) : name_rec = copy . deepcopy ( input_name_rec ) namedb_name_fields_check ( name_rec ) try : query , values = namedb_insert_prepare ( cur , name_rec , "name_records" ) except Exception , e : log . exception ( e ) log . error ( "FATAL: Failed to insert name '%s'" % name_rec [ 'name' ] ) os . abort ( ) namedb_query_execute ( cur , query , values ) return True
Add the given name record to the database if it doesn t exist already .
36,707
def namedb_name_update ( cur , opcode , input_opdata , only_if = { } , constraints_ignored = [ ] ) : opdata = copy . deepcopy ( input_opdata ) namedb_name_fields_check ( opdata ) mutate_fields = op_get_mutate_fields ( opcode ) if opcode not in OPCODE_CREATION_OPS : assert 'name' not in mutate_fields , "BUG: 'name' listed as a mutate field for '%s'" % ( opcode ) must_equal = namedb_update_must_equal ( opdata , mutate_fields ) must_equal += [ 'name' , 'block_number' ] for ignored in constraints_ignored : if ignored in must_equal : must_equal . remove ( ignored ) try : query , values = namedb_update_prepare ( cur , [ 'name' , 'block_number' ] , opdata , "name_records" , must_equal = must_equal , only_if = only_if ) except Exception , e : log . exception ( e ) log . error ( "FATAL: failed to update name '%s'" % opdata [ 'name' ] ) os . abort ( ) namedb_query_execute ( cur , query , values ) try : assert cur . rowcount == 1 , "Updated %s row(s)" % cur . rowcount except Exception , e : log . exception ( e ) log . error ( "FATAL: failed to update name '%s'" % opdata [ 'name' ] ) log . error ( "Query: %s" , "" . join ( [ "%s %s" % ( frag , "'%s'" % val if type ( val ) in [ str , unicode ] else val ) for ( frag , val ) in zip ( query . split ( "?" ) , values + ( "" , ) ) ] ) ) os . abort ( ) return True
Update an existing name in the database . If non - empty only update the given fields .
36,708
def namedb_state_mutation_sanity_check ( opcode , op_data ) : missing = [ ] mutate_fields = op_get_mutate_fields ( opcode ) for field in mutate_fields : if field not in op_data . keys ( ) : missing . append ( field ) assert len ( missing ) == 0 , ( "BUG: operation '%s' is missing the following fields: %s" % ( opcode , "," . join ( missing ) ) ) return True
Make sure all mutate fields for this operation are present . Return True if so Raise exception if not
36,709
def namedb_get_last_name_import ( cur , name , block_id , vtxindex ) : query = 'SELECT history_data FROM history WHERE history_id = ? AND (block_id < ? OR (block_id = ? AND vtxindex < ?)) ' + 'ORDER BY block_id DESC,vtxindex DESC LIMIT 1;' args = ( name , block_id , block_id , vtxindex ) history_rows = namedb_query_execute ( cur , query , args ) for row in history_rows : history_data = json . loads ( row [ 'history_data' ] ) return history_data return None
Find the last name import for this name
36,710
def namedb_account_transaction_save ( cur , address , token_type , new_credit_value , new_debit_value , block_id , vtxindex , txid , existing_account ) : if existing_account is None : existing_account = { } accounts_insert = { 'address' : address , 'type' : token_type , 'credit_value' : '{}' . format ( new_credit_value ) , 'debit_value' : '{}' . format ( new_debit_value ) , 'lock_transfer_block_id' : existing_account . get ( 'lock_transfer_block_id' , 0 ) , 'receive_whitelisted' : existing_account . get ( 'receive_whitelisted' , True ) , 'metadata' : existing_account . get ( 'metadata' , None ) , 'block_id' : block_id , 'txid' : txid , 'vtxindex' : vtxindex } try : query , values = namedb_insert_prepare ( cur , accounts_insert , 'accounts' ) except Exception as e : log . exception ( e ) log . fatal ( 'FATAL: failed to append account history record for {} at ({},{})' . format ( address , block_id , vtxindex ) ) os . abort ( ) namedb_query_execute ( cur , query , values ) return True
Insert the new state of an account at a particular point in time .
36,711
def namedb_account_debit ( cur , account_addr , token_type , amount , block_id , vtxindex , txid ) : account = namedb_get_account ( cur , account_addr , token_type ) if account is None : traceback . print_stack ( ) log . fatal ( 'Account {} does not exist' . format ( account_addr ) ) os . abort ( ) new_credit_value = account [ 'credit_value' ] new_debit_value = account [ 'debit_value' ] + amount if new_debit_value > new_credit_value : traceback . print_stack ( ) log . fatal ( 'Account {} for "{}" tokens overdrew (debits = {}, credits = {})' . format ( account_addr , token_type , new_debit_value , new_credit_value ) ) os . abort ( ) new_balance = new_credit_value - new_debit_value log . debug ( "Account balance of units of '{}' for {} is now {}" . format ( token_type , account_addr , new_balance ) ) res = namedb_account_transaction_save ( cur , account_addr , token_type , new_credit_value , new_debit_value , block_id , vtxindex , txid , account ) if not res : traceback . print_stack ( ) log . fatal ( 'Failed to save new account state for {}' . format ( account_addr ) ) os . abort ( ) return True
Debit an account at a particular point in time by the given amount . Insert a new history entry for the account into the accounts table .
36,712
def namedb_accounts_vest ( cur , block_height ) : sql = 'SELECT * FROM account_vesting WHERE block_id = ?' args = ( block_height , ) vesting_rows = namedb_query_execute ( cur , sql , args ) rows = [ ] for row in vesting_rows : tmp = { } tmp . update ( row ) rows . append ( tmp ) for row in rows : addr = row [ 'address' ] token_type = row [ 'type' ] token_amount = row [ 'vesting_value' ] log . debug ( "Vest {} with {} {}" . format ( addr , token_amount , token_type ) ) fake_txid = namedb_vesting_txid ( addr , token_type , token_amount , block_height ) res = namedb_account_credit ( cur , addr , token_type , token_amount , block_height , 0 , fake_txid ) if not res : traceback . print_stack ( ) log . fatal ( 'Failed to vest {} {} to {}' . format ( token_amount , token_type , addr ) ) os . abort ( ) return True
Vest tokens at this block to all recipients . Goes through the vesting table and debits each account that should vest on this block .
36,713
def namedb_is_history_snapshot ( history_snapshot ) : missing = [ ] assert 'op' in history_snapshot . keys ( ) , "BUG: no op given" opcode = op_get_opcode_name ( history_snapshot [ 'op' ] ) assert opcode is not None , "BUG: unrecognized op '%s'" % history_snapshot [ 'op' ] consensus_fields = op_get_consensus_fields ( opcode ) for field in consensus_fields : if field not in history_snapshot . keys ( ) : missing . append ( field ) assert len ( missing ) == 0 , ( "BUG: operation '%s' is missing the following fields: %s" % ( opcode , "," . join ( missing ) ) ) return True
Given a dict verify that it is a history snapshot . It must have all consensus fields . Return True if so . Raise an exception of it doesn t .
36,714
def namedb_get_account_tokens ( cur , address ) : sql = 'SELECT DISTINCT type FROM accounts WHERE address = ?;' args = ( address , ) rows = namedb_query_execute ( cur , sql , args ) ret = [ ] for row in rows : ret . append ( row [ 'type' ] ) return ret
Get an account s tokens Returns the list of tokens on success Returns None if not found
36,715
def namedb_get_account ( cur , address , token_type ) : sql = 'SELECT * FROM accounts WHERE address = ? AND type = ? ORDER BY block_id DESC, vtxindex DESC LIMIT 1;' args = ( address , token_type ) rows = namedb_query_execute ( cur , sql , args ) row = rows . fetchone ( ) if row is None : return None ret = { } ret . update ( row ) return ret
Get an account given the address . Returns the account row on success Returns None if not found
36,716
def namedb_get_account_diff ( current , prior ) : if current [ 'address' ] != prior [ 'address' ] or current [ 'type' ] != prior [ 'type' ] : raise ValueError ( "Accounts for two different addresses and/or token types" ) return namedb_get_account_balance ( current ) - namedb_get_account_balance ( prior )
Figure out what the expenditure difference is between two accounts . They must be for the same token type and address . Calculates current - prior
36,717
def namedb_get_account_history ( cur , address , offset = None , count = None ) : sql = 'SELECT * FROM accounts WHERE address = ? ORDER BY block_id DESC, vtxindex DESC' args = ( address , ) if count is not None : sql += ' LIMIT ?' args += ( count , ) if offset is not None : sql += ' OFFSET ?' args += ( offset , ) sql += ';' rows = namedb_query_execute ( cur , sql , args ) ret = [ ] for rowdata in rows : tmp = { } tmp . update ( rowdata ) ret . append ( tmp ) return ret
Get the history of an account s tokens
36,718
def namedb_get_all_account_addresses ( cur ) : assert BLOCKSTACK_TEST , 'BUG: this method is only available in test mode' sql = 'SELECT DISTINCT address FROM accounts;' args = ( ) rows = namedb_query_execute ( cur , sql , args ) ret = [ ] for rowdata in rows : ret . append ( rowdata [ 'address' ] ) return ret
TESTING ONLY get all account addresses
36,719
def namedb_get_name_at ( cur , name , block_number , include_expired = False ) : if not include_expired : name_rec = namedb_get_name ( cur , name , block_number , include_expired = False , include_history = False , only_registered = False ) if name_rec is None : return None history_rows = namedb_get_record_states_at ( cur , name , block_number ) if len ( history_rows ) == 0 : return None else : return history_rows
Get the sequence of states that a name record was in at a particular block height . There can be more than one if the name changed during the block .
36,720
def namedb_get_namespace_at ( cur , namespace_id , block_number , include_expired = False ) : if not include_expired : namespace_rec = namedb_get_namespace ( cur , namespace_id , block_number , include_expired = False , include_history = False , only_revealed = False ) if namespace_rec is None : return None history_rows = namedb_get_record_states_at ( cur , namespace_id , block_number ) if len ( history_rows ) == 0 : return None else : return history_rows
Get the sequence of states that a namespace record was in at a particular block height . There can be more than one if the namespace changed durnig the block . Returns only unexpired namespaces by default . Can return expired namespaces with include_expired = True
36,721
def namedb_get_account_balance ( account ) : balance = account [ 'credit_value' ] - account [ 'debit_value' ] if balance < 0 : log . fatal ( "Balance of {} is {} (credits = {}, debits = {})" . format ( account [ 'address' ] , balance , account [ 'credit_value' ] , account [ 'debit_value' ] ) ) traceback . print_stack ( ) os . abort ( ) return balance
Get the balance of an account for a particular type of token . This is its credits minus its debits . Returns the current balance on success . Aborts on error or if the balance is somehow negative .
36,722
def namedb_get_preorder ( cur , preorder_hash , current_block_number , include_expired = False , expiry_time = None ) : select_query = None args = None if include_expired : select_query = "SELECT * FROM preorders WHERE preorder_hash = ?;" args = ( preorder_hash , ) else : assert expiry_time is not None , "expiry_time is required with include_expired" select_query = "SELECT * FROM preorders WHERE preorder_hash = ? AND block_number < ?;" args = ( preorder_hash , expiry_time + current_block_number ) preorder_rows = namedb_query_execute ( cur , select_query , ( preorder_hash , ) ) preorder_row = preorder_rows . fetchone ( ) if preorder_row is None : return None preorder_rec = { } preorder_rec . update ( preorder_row ) return preorder_rec
Get a preorder record by hash . If include_expired is set then so must expiry_time Return None if not found .
36,723
def namedb_get_num_historic_names_by_address ( cur , address ) : select_query = "SELECT COUNT(*) FROM name_records JOIN history ON name_records.name = history.history_id " + "WHERE history.creator_address = ?;" args = ( address , ) count = namedb_select_count_rows ( cur , select_query , args ) return count
Get the number of names owned by an address throughout history
36,724
def namedb_get_num_names ( cur , current_block , include_expired = False ) : unexpired_query = "" unexpired_args = ( ) if not include_expired : unexpired_query , unexpired_args = namedb_select_where_unexpired_names ( current_block ) unexpired_query = 'WHERE {}' . format ( unexpired_query ) query = "SELECT COUNT(name_records.name) FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + unexpired_query + ";" args = unexpired_args num_rows = namedb_select_count_rows ( cur , query , args , count_column = 'COUNT(name_records.name)' ) return num_rows
Get the number of names that exist at the current block
36,725
def namedb_get_all_names ( cur , current_block , offset = None , count = None , include_expired = False ) : unexpired_query = "" unexpired_args = ( ) if not include_expired : unexpired_query , unexpired_args = namedb_select_where_unexpired_names ( current_block ) unexpired_query = 'WHERE {}' . format ( unexpired_query ) query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + unexpired_query + " ORDER BY name " args = unexpired_args offset_count_query , offset_count_args = namedb_offset_count_predicate ( offset = offset , count = count ) query += offset_count_query + ";" args += offset_count_args name_rows = namedb_query_execute ( cur , query , tuple ( args ) ) ret = [ ] for name_row in name_rows : rec = { } rec . update ( name_row ) ret . append ( rec [ 'name' ] ) return ret
Get a list of all names in the database optionally paginated with offset and count . Exclude expired names . Include revoked names .
36,726
def namedb_get_num_names_in_namespace ( cur , namespace_id , current_block ) : unexpired_query , unexpired_args = namedb_select_where_unexpired_names ( current_block ) query = "SELECT COUNT(name_records.name) FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id WHERE name_records.namespace_id = ? AND " + unexpired_query + " ORDER BY name;" args = ( namespace_id , ) + unexpired_args num_rows = namedb_select_count_rows ( cur , query , args , count_column = 'COUNT(name_records.name)' ) return num_rows
Get the number of names in a given namespace
36,727
def namedb_get_names_in_namespace ( cur , namespace_id , current_block , offset = None , count = None ) : unexpired_query , unexpired_args = namedb_select_where_unexpired_names ( current_block ) query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id WHERE name_records.namespace_id = ? AND " + unexpired_query + " ORDER BY name " args = ( namespace_id , ) + unexpired_args offset_count_query , offset_count_args = namedb_offset_count_predicate ( offset = offset , count = count ) query += offset_count_query + ";" args += offset_count_args name_rows = namedb_query_execute ( cur , query , tuple ( args ) ) ret = [ ] for name_row in name_rows : rec = { } rec . update ( name_row ) ret . append ( rec [ 'name' ] ) return ret
Get a list of all names in a namespace optionally paginated with offset and count . Exclude expired names
36,728
def namedb_get_all_namespace_ids ( cur ) : query = "SELECT namespace_id FROM namespaces WHERE op = ?;" args = ( NAMESPACE_READY , ) namespace_rows = namedb_query_execute ( cur , query , args ) ret = [ ] for namespace_row in namespace_rows : ret . append ( namespace_row [ 'namespace_id' ] ) return ret
Get a list of all READY namespace IDs .
36,729
def namedb_get_all_preordered_namespace_hashes ( cur , current_block ) : query = "SELECT preorder_hash FROM preorders WHERE op = ? AND block_number >= ? AND block_number < ?;" args = ( NAMESPACE_PREORDER , current_block , current_block + NAMESPACE_PREORDER_EXPIRE ) namespace_rows = namedb_query_execute ( cur , query , args ) ret = [ ] for namespace_row in namespace_rows : ret . append ( namespace_row [ 'preorder_hash' ] ) return ret
Get a list of all preordered namespace hashes that haven t expired yet . Used for testing
36,730
def namedb_get_all_revealed_namespace_ids ( self , current_block ) : query = "SELECT namespace_id FROM namespaces WHERE op = ? AND reveal_block < ?;" args = ( NAMESPACE_REVEAL , current_block + NAMESPACE_REVEAL_EXPIRE ) namespace_rows = namedb_query_execute ( cur , query , args ) ret = [ ] for namespace_row in namespace_rows : ret . append ( namespace_row [ 'namespace_id' ] ) return ret
Get all non - expired revealed namespaces .
36,731
def namedb_get_all_importing_namespace_hashes ( self , current_block ) : query = "SELECT preorder_hash FROM namespaces WHERE (op = ? AND reveal_block < ?) OR (op = ? AND block_number < ?);" args = ( NAMESPACE_REVEAL , current_block + NAMESPACE_REVEAL_EXPIRE , NAMESPACE_PREORDER , current_block + NAMESPACE_PREORDER_EXPIRE ) namespace_rows = namedb_query_execute ( cur , query , args ) ret = [ ] for namespace_row in namespace_rows : ret . append ( namespace_row [ 'preorder_hash' ] ) return ret
Get the list of all non - expired preordered and revealed namespace hashes .
36,732
def namedb_get_names_by_sender ( cur , sender , current_block ) : unexpired_query , unexpired_args = namedb_select_where_unexpired_names ( current_block ) query = "SELECT name_records.name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + "WHERE name_records.sender = ? AND name_records.revoked = 0 AND " + unexpired_query + ";" args = ( sender , ) + unexpired_args name_rows = namedb_query_execute ( cur , query , args ) names = [ ] for name_row in name_rows : names . append ( name_row [ 'name' ] ) return names
Given a sender pubkey script find all the non - expired non - revoked names owned by it . Return None if the sender owns no names .
36,733
def namedb_get_namespace_preorder ( db , namespace_preorder_hash , current_block ) : cur = db . cursor ( ) select_query = "SELECT * FROM preorders WHERE preorder_hash = ? AND op = ? AND block_number < ?;" args = ( namespace_preorder_hash , NAMESPACE_PREORDER , current_block + NAMESPACE_PREORDER_EXPIRE ) preorder_rows = namedb_query_execute ( cur , select_query , args ) preorder_row = preorder_rows . fetchone ( ) if preorder_row is None : return None preorder_rec = { } preorder_rec . update ( preorder_row ) cur = db . cursor ( ) select_query = "SELECT preorder_hash FROM namespaces WHERE preorder_hash = ? AND ((op = ?) OR (op = ? AND reveal_block < ?));" args = ( namespace_preorder_hash , NAMESPACE_READY , NAMESPACE_REVEAL , current_block + NAMESPACE_REVEAL_EXPIRE ) ns_rows = namedb_query_execute ( cur , select_query , args ) ns_row = ns_rows . fetchone ( ) if ns_row is not None : return None return preorder_rec
Get a namespace preorder given its hash .
36,734
def namedb_get_namespace_ready ( cur , namespace_id , include_history = True ) : select_query = "SELECT * FROM namespaces WHERE namespace_id = ? AND op = ?;" namespace_rows = namedb_query_execute ( cur , select_query , ( namespace_id , NAMESPACE_READY ) ) namespace_row = namespace_rows . fetchone ( ) if namespace_row is None : return None namespace = { } namespace . update ( namespace_row ) if include_history : hist = namedb_get_history ( cur , namespace_id ) namespace [ 'history' ] = hist namespace = op_decanonicalize ( 'NAMESPACE_READY' , namespace ) return namespace
Get a ready namespace and optionally its history . Only return a namespace if it is ready .
36,735
def namedb_get_name_from_name_hash128 ( cur , name_hash128 , block_number ) : unexpired_query , unexpired_args = namedb_select_where_unexpired_names ( block_number ) select_query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + "WHERE name_hash128 = ? AND revoked = 0 AND " + unexpired_query + ";" args = ( name_hash128 , ) + unexpired_args name_rows = namedb_query_execute ( cur , select_query , args ) name_row = name_rows . fetchone ( ) if name_row is None : return None return name_row [ 'name' ]
Given the hexlified 128 - bit hash of a name get the name .
36,736
def namedb_get_names_with_value_hash ( cur , value_hash , block_number ) : unexpired_query , unexpired_args = namedb_select_where_unexpired_names ( block_number ) select_query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + "WHERE value_hash = ? AND revoked = 0 AND " + unexpired_query + ";" args = ( value_hash , ) + unexpired_args name_rows = namedb_query_execute ( cur , select_query , args ) names = [ ] for name_row in name_rows : names . append ( name_row [ 'name' ] ) if len ( names ) == 0 : return None else : return names
Get the names with the given value hash . Only includes current non - revoked names . Return None if there are no names .
36,737
def namedb_get_value_hash_txids ( cur , value_hash ) : query = 'SELECT txid FROM history WHERE value_hash = ? ORDER BY block_id,vtxindex;' args = ( value_hash , ) rows = namedb_query_execute ( cur , query , args ) txids = [ ] for r in rows : txid = str ( r [ 'txid' ] ) txids . append ( txid ) return txids
Get the list of txs that sent this value hash ordered by block and vtxindex
36,738
def namedb_get_num_block_vtxs ( cur , block_number ) : select_query = "SELECT vtxindex FROM history WHERE history_id = ?;" args = ( block_number , ) rows = namedb_query_execute ( cur , select_query , args ) count = 0 for r in rows : count += 1 return count
How many virtual transactions were processed for this block?
36,739
def namedb_is_name_zonefile_hash ( cur , name , zonefile_hash ) : select_query = 'SELECT COUNT(value_hash) FROM history WHERE history_id = ? AND value_hash = ?' select_args = ( name , zonefile_hash ) rows = namedb_query_execute ( cur , select_query , select_args ) count = None for r in rows : count = r [ 'COUNT(value_hash)' ] break return count > 0
Determine if a zone file hash was sent by a name . Return True if so false if not
36,740
def process_announcement ( sender_namerec , op , working_dir ) : node_config = get_blockstack_opts ( ) announce_hash = op [ 'message_hash' ] announcer_id = op [ 'announcer_id' ] name_history = sender_namerec [ 'history' ] allowed_value_hashes = [ ] for block_height in name_history . keys ( ) : for historic_namerec in name_history [ block_height ] : if historic_namerec . get ( 'value_hash' ) : allowed_value_hashes . append ( historic_namerec [ 'value_hash' ] ) if announce_hash not in allowed_value_hashes : log . warning ( "Announce hash {} not found in name history for {}" . format ( announce_hash , announcer_id ) ) return zonefiles_dir = node_config . get ( 'zonefiles' , None ) if not zonefiles_dir : log . warning ( "This node does not store zone files, so no announcement can be found" ) return announce_text = get_atlas_zonefile_data ( announce_hash , zonefiles_dir ) if announce_text is None : log . warning ( "No zone file {} found" . format ( announce_hash ) ) return log . critical ( "ANNOUNCEMENT (from %s): %s\n------BEGIN MESSAGE------\n%s\n------END MESSAGE------\n" % ( announcer_id , announce_hash , announce_text ) ) store_announcement ( working_dir , announce_hash , announce_text )
If the announcement is valid then immediately record it .
36,741
def check ( state_engine , nameop , block_id , checked_ops ) : sender = nameop [ 'sender' ] sending_blockchain_id = None found = False blockchain_namerec = None for blockchain_id in state_engine . get_announce_ids ( ) : blockchain_namerec = state_engine . get_name ( blockchain_id ) if blockchain_namerec is None : continue if str ( sender ) == str ( blockchain_namerec [ 'sender' ] ) : found = True sending_blockchain_id = blockchain_id break if not found : log . warning ( "Announcement not sent from our whitelist of blockchain IDs" ) return False nameop [ 'announcer_id' ] = sending_blockchain_id process_announcement ( blockchain_namerec , nameop , state_engine . working_dir ) return True
Log an announcement from the blockstack developers but first verify that it is correct . Return True if the announcement came from the announce IDs whitelist Return False otherwise
36,742
def get_bitcoind_client ( ) : bitcoind_opts = get_bitcoin_opts ( ) bitcoind_host = bitcoind_opts [ 'bitcoind_server' ] bitcoind_port = bitcoind_opts [ 'bitcoind_port' ] bitcoind_user = bitcoind_opts [ 'bitcoind_user' ] bitcoind_passwd = bitcoind_opts [ 'bitcoind_passwd' ] return create_bitcoind_service_proxy ( bitcoind_user , bitcoind_passwd , server = bitcoind_host , port = bitcoind_port )
Connect to the bitcoind node
36,743
def txid_to_block_data ( txid , bitcoind_proxy , proxy = None ) : proxy = get_default_proxy ( ) if proxy is None else proxy timeout = 1.0 while True : try : untrusted_tx_data = bitcoind_proxy . getrawtransaction ( txid , 1 ) untrusted_block_hash = untrusted_tx_data [ 'blockhash' ] untrusted_block_data = bitcoind_proxy . getblock ( untrusted_block_hash ) break except ( OSError , IOError ) as ie : log . exception ( ie ) log . error ( 'Network error; retrying...' ) timeout = timeout * 2 + random . randint ( 0 , timeout ) continue except Exception as e : log . exception ( e ) return None , None , None bitcoind_opts = get_bitcoin_opts ( ) spv_headers_path = bitcoind_opts [ 'bitcoind_spv_path' ] untrusted_block_header_hex = virtualchain . block_header_to_hex ( untrusted_block_data , untrusted_block_data [ 'previousblockhash' ] ) block_id = SPVClient . block_header_index ( spv_headers_path , ( '{}00' . format ( untrusted_block_header_hex ) ) . decode ( 'hex' ) ) if block_id < 0 : log . error ( 'Block header "{}" is not in the SPV headers ({})' . format ( untrusted_block_header_hex , spv_headers_path ) ) return None , None , None verified_block_header = virtualchain . block_verify ( untrusted_block_data ) if not verified_block_header : msg = ( 'Block transaction IDs are not consistent ' 'with the Merkle root of the trusted header' ) log . error ( msg ) return None , None , None verified_block_hash = virtualchain . block_header_verify ( untrusted_block_data , untrusted_block_data [ 'previousblockhash' ] , untrusted_block_hash ) if not verified_block_hash : log . error ( 'Block hash is not consistent with block header' ) return None , None , None block_hash = untrusted_block_hash block_data = untrusted_block_data tx_data = untrusted_tx_data return block_hash , block_data , tx_data
Given a txid get its block s data .
36,744
def get_consensus_hash_from_tx ( tx ) : opcode , payload = parse_tx_op_return ( tx ) if opcode is None or payload is None : return None if opcode in [ NAME_PREORDER , NAMESPACE_PREORDER , NAME_TRANSFER ] : consensus_hash = payload [ - 16 : ] . encode ( 'hex' ) return consensus_hash msg = ( 'Blockchain ID transaction is not a ' 'NAME_PREORDER, NAMESPACE_PROERDER or NAME_TRANSFER' ) log . error ( msg ) return None
Given an SPV - verified transaction extract its consensus hash . Only works of the tx encodes a NAME_PREORDER NAMESPACE_PREORDER or NAME_TRANSFER .
36,745
def json_is_exception ( resp ) : if not json_is_error ( resp ) : return False if 'traceback' not in resp . keys ( ) or 'error' not in resp . keys ( ) : return False return True
Is the given response object an exception traceback?
36,746
def put_zonefiles ( hostport , zonefile_data_list , timeout = 30 , my_hostport = None , proxy = None ) : assert hostport or proxy , 'need either hostport or proxy' saved_schema = { 'type' : 'object' , 'properties' : { 'saved' : { 'type' : 'array' , 'items' : { 'type' : 'integer' , 'minimum' : 0 , 'maximum' : 1 , } , 'minItems' : len ( zonefile_data_list ) , 'maxItems' : len ( zonefile_data_list ) } , } , 'required' : [ 'saved' ] } schema = json_response_schema ( saved_schema ) if proxy is None : proxy = connect_hostport ( hostport ) push_info = None try : push_info = proxy . put_zonefiles ( zonefile_data_list ) push_info = json_validate ( schema , push_info ) if json_is_error ( push_info ) : return push_info except socket . timeout : log . error ( "Connection timed out" ) resp = { 'error' : 'Connection to remote host timed out.' , 'http_status' : 503 } return resp except socket . error as se : log . error ( "Connection error {}" . format ( se . errno ) ) resp = { 'error' : 'Connection to remote host failed.' , 'http_status' : 502 } return resp except ValidationError as e : if BLOCKSTACK_DEBUG : log . exception ( e ) resp = { 'error' : 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.' , 'http_status' : 502 } return resp except Exception as ee : if BLOCKSTACK_DEBUG : log . exception ( ee ) log . error ( "Caught exception while connecting to Blockstack node: {}" . format ( ee ) ) resp = { 'error' : 'Failed to contact Blockstack node. Try again with `--debug`.' , 'http_status' : 500 } return resp return push_info
Push one or more zonefiles to the given server . Each zone file in the list must be base64 - encoded
36,747
def get_zonefiles_by_block ( from_block , to_block , hostport = None , proxy = None ) : assert hostport or proxy , 'need either hostport or proxy' if proxy is None : proxy = connect_hostport ( hostport ) zonefile_info_schema = { 'type' : 'array' , 'items' : { 'type' : 'object' , 'properties' : { 'name' : { 'type' : 'string' } , 'zonefile_hash' : { 'type' : 'string' , 'pattern' : OP_ZONEFILE_HASH_PATTERN } , 'txid' : { 'type' : 'string' , 'pattern' : OP_TXID_PATTERN } , 'block_height' : { 'type' : 'integer' } } , 'required' : [ 'zonefile_hash' , 'txid' , 'block_height' ] } } response_schema = { 'type' : 'object' , 'properties' : { 'lastblock' : { 'type' : 'integer' } , 'zonefile_info' : zonefile_info_schema } , 'required' : [ 'lastblock' , 'zonefile_info' ] } offset = 0 output_zonefiles = [ ] last_server_block = 0 resp = { 'zonefile_info' : [ ] } while offset == 0 or len ( resp [ 'zonefile_info' ] ) > 0 : resp = proxy . get_zonefiles_by_block ( from_block , to_block , offset , 100 ) if 'error' in resp : return resp resp = json_validate ( response_schema , resp ) if json_is_error ( resp ) : return resp output_zonefiles += resp [ 'zonefile_info' ] offset += 100 last_server_block = max ( resp [ 'lastblock' ] , last_server_block ) return { 'last_block' : last_server_block , 'zonefile_info' : output_zonefiles }
Get zonefile information for zonefiles announced in [
36,748
def get_account_tokens ( address , hostport = None , proxy = None ) : assert proxy or hostport , 'Need proxy or hostport' if proxy is None : proxy = connect_hostport ( hostport ) tokens_schema = { 'type' : 'object' , 'properties' : { 'token_types' : { 'type' : 'array' , 'pattern' : '^(.+){1,19}' , } , } , 'required' : [ 'token_types' , ] } schema = json_response_schema ( tokens_schema ) try : resp = proxy . get_account_tokens ( address ) resp = json_validate ( schema , resp ) if json_is_error ( resp ) : return resp except ValidationError as ve : if BLOCKSTACK_DEBUG : log . exception ( ve ) resp = { 'error' : 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.' , 'http_status' : 502 } return resp except socket . timeout : log . error ( "Connection timed out" ) resp = { 'error' : 'Connection to remote host timed out.' , 'http_status' : 503 } return resp except socket . error as se : log . error ( "Connection error {}" . format ( se . errno ) ) resp = { 'error' : 'Connection to remote host failed.' , 'http_status' : 502 } return resp except AssertionError as ae : if BLOCKSTACK_DEBUG : log . exception ( ae ) resp = json_traceback ( resp . get ( 'error' ) ) return resp except Exception as ee : if BLOCKSTACK_DEBUG : log . exception ( ee ) log . error ( "Caught exception while connecting to Blockstack node: {}" . format ( ee ) ) resp = { 'error' : 'Failed to contact Blockstack node. Try again with `--debug`.' , 'http_status' : 500 } return resp resp [ 'token_types' ] . sort ( ) return resp [ 'token_types' ]
Get the types of tokens that an address owns Returns a list of token types
36,749
def get_account_balance ( address , token_type , hostport = None , proxy = None ) : assert proxy or hostport , 'Need proxy or hostport' if proxy is None : proxy = connect_hostport ( hostport ) balance_schema = { 'type' : 'object' , 'properties' : { 'balance' : { 'type' : 'integer' , } , } , 'required' : [ 'balance' , ] , } schema = json_response_schema ( balance_schema ) try : resp = proxy . get_account_balance ( address , token_type ) resp = json_validate ( schema , resp ) if json_is_error ( resp ) : return resp except ValidationError as e : if BLOCKSTACK_DEBUG : log . exception ( e ) resp = { 'error' : 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.' , 'http_status' : 502 } return resp except socket . timeout : log . error ( "Connection timed out" ) resp = { 'error' : 'Connection to remote host timed out.' , 'http_status' : 503 } return resp except socket . error as se : log . error ( "Connection error {}" . format ( se . errno ) ) resp = { 'error' : 'Connection to remote host failed.' , 'http_status' : 502 } return resp except Exception as ee : if BLOCKSTACK_DEBUG : log . exception ( ee ) log . error ( "Caught exception while connecting to Blockstack node: {}" . format ( ee ) ) resp = { 'error' : 'Failed to contact Blockstack node. Try again with `--debug`.' , 'http_status' : 500 } return resp return resp [ 'balance' ]
Get the balance of an account for a particular token Returns an int
36,750
def get_name_DID ( name , proxy = None , hostport = None ) : assert proxy or hostport , 'Need proxy or hostport' if proxy is None : proxy = connect_hostport ( hostport ) did_schema = { 'type' : 'object' , 'properties' : { 'did' : { 'type' : 'string' } } , 'required' : [ 'did' ] , } schema = json_response_schema ( did_schema ) resp = { } try : resp = proxy . get_name_DID ( name ) resp = json_validate ( schema , resp ) if json_is_error ( resp ) : return resp assert parse_DID ( resp [ 'did' ] ) except ValidationError as ve : if BLOCKSTACK_DEBUG : log . exception ( ve ) resp = { 'error' : 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.' , 'http_status' : 502 } return resp except AssertionError as e : if BLOCKSTACK_DEBUG : log . exception ( e ) resp = { 'error' : 'Server replied an unparseable DID' } return resp except socket . timeout : log . error ( "Connection timed out" ) resp = { 'error' : 'Connection to remote host timed out.' , 'http_status' : 503 } return resp except socket . error as se : log . error ( "Connection error {}" . format ( se . errno ) ) resp = { 'error' : 'Connection to remote host failed.' , 'http_status' : 502 } return resp except Exception as ee : if BLOCKSTACK_DEBUG : log . exception ( ee ) log . error ( "Caught exception while connecting to Blockstack node: {}" . format ( ee ) ) resp = { 'error' : 'Failed to contact Blockstack node. Try again with `--debug`.' , 'http_status' : 500 } return resp return resp [ 'did' ]
Get the DID for a name or subdomain Return the DID string on success Return None if not found
36,751
def get_JWT ( url , address = None ) : jwt_txt = None jwt = None log . debug ( "Try {}" . format ( url ) ) urlinfo = urllib2 . urlparse . urlparse ( url ) if urlinfo . scheme == 'file' : try : with open ( urlinfo . path , 'r' ) as f : jwt_txt = f . read ( ) except Exception as e : if BLOCKSTACK_TEST : log . exception ( e ) log . warning ( "Failed to read {}" . format ( url ) ) return None else : try : resp = requests . get ( url ) assert resp . status_code == 200 , 'Bad status code on {}: {}' . format ( url , resp . status_code ) jwt_txt = resp . text except Exception as e : if BLOCKSTACK_TEST : log . exception ( e ) log . warning ( "Unable to resolve {}" . format ( url ) ) return None try : try : jwt_txt = json . loads ( jwt_txt ) [ 0 ] [ 'token' ] except : pass jwt = jsontokens . decode_token ( jwt_txt ) except Exception as e : if BLOCKSTACK_TEST : log . exception ( e ) log . warning ( "Unable to decode token at {}" . format ( url ) ) return None try : assert isinstance ( jwt , dict ) assert 'payload' in jwt , jwt assert isinstance ( jwt [ 'payload' ] , dict ) assert 'issuer' in jwt [ 'payload' ] , jwt assert isinstance ( jwt [ 'payload' ] [ 'issuer' ] , dict ) assert 'publicKey' in jwt [ 'payload' ] [ 'issuer' ] , jwt assert virtualchain . ecdsalib . ecdsa_public_key ( str ( jwt [ 'payload' ] [ 'issuer' ] [ 'publicKey' ] ) ) except AssertionError as ae : if BLOCKSTACK_TEST or BLOCKSTACK_DEBUG : log . exception ( ae ) log . warning ( "JWT at {} is malformed" . format ( url ) ) return None if address is not None : public_key = str ( jwt [ 'payload' ] [ 'issuer' ] [ 'publicKey' ] ) addrs = [ virtualchain . address_reencode ( virtualchain . ecdsalib . ecdsa_public_key ( keylib . key_formatting . decompress ( public_key ) ) . address ( ) ) , virtualchain . address_reencode ( virtualchain . ecdsalib . ecdsa_public_key ( keylib . key_formatting . compress ( public_key ) ) . address ( ) ) ] if virtualchain . address_reencode ( address ) not in addrs : log . warning ( "Found JWT at {}, but its public key has addresses {} and {} (expected {})" . format ( url , addrs [ 0 ] , addrs [ 1 ] , address ) ) return None verifier = jsontokens . TokenVerifier ( ) if not verifier . verify ( jwt_txt , public_key ) : log . warning ( "Found JWT at {}, but it was not signed by {} ({})" . format ( url , public_key , address ) ) return None return jwt
Given a URL fetch and decode the JWT it points to . If address is given then authenticate the JWT with the address .
36,752
def decode_name_zonefile ( name , zonefile_txt ) : user_zonefile = None try : user_zonefile_defaultdict = blockstack_zones . parse_zone_file ( zonefile_txt ) user_zonefile = dict ( user_zonefile_defaultdict ) except ( IndexError , ValueError , blockstack_zones . InvalidLineException ) : log . debug ( 'WARN: failed to parse user zonefile; trying to import as legacy' ) try : user_zonefile = json . loads ( zonefile_txt ) if not isinstance ( user_zonefile , dict ) : log . debug ( 'Not a legacy user zonefile' ) return None except Exception as e : log . error ( 'Failed to parse non-standard zonefile' ) return None except Exception as e : if BLOCKSTACK_DEBUG : log . exception ( e ) log . error ( 'Failed to parse zonefile' ) return None if user_zonefile is None : return None return user_zonefile
Decode a zone file for a name . Must be either a well - formed DNS zone file or a legacy Onename profile . Return None on error
36,753
def _send_headers ( self , status_code = 200 , content_type = 'application/json' , more_headers = { } ) : self . send_response ( status_code ) self . send_header ( 'content-type' , content_type ) self . send_header ( 'Access-Control-Allow-Origin' , '*' ) for ( hdr , val ) in more_headers . items ( ) : self . send_header ( hdr , val ) self . end_headers ( )
Generate and reply headers
36,754
def _reply_json ( self , json_payload , status_code = 200 ) : self . _send_headers ( status_code = status_code ) json_str = json . dumps ( json_payload ) self . wfile . write ( json_str )
Return a JSON - serializable data structure
36,755
def _read_json ( self , schema = None , maxlen = JSONRPC_MAX_SIZE ) : request_type = self . headers . get ( 'content-type' , None ) client_address_str = "{}:{}" . format ( self . client_address [ 0 ] , self . client_address [ 1 ] ) if request_type != 'application/json' : log . error ( "Invalid request of type {} from {}" . format ( request_type , client_address_str ) ) return None request_str = self . _read_payload ( maxlen = maxlen ) if request_str is None : log . error ( "Failed to read request" ) return None request = None try : request = json . loads ( request_str ) if schema is not None : jsonschema . validate ( request , schema ) except ValidationError as ve : if BLOCKSTACK_DEBUG : log . exception ( ve ) log . error ( "Validation error on request {}..." . format ( request_str [ : 15 ] ) ) if ve . validator == "maxLength" : return { "error" : "maxLength" } except ( TypeError , ValueError ) as ve : if BLOCKSTACK_DEBUG : log . exception ( ve ) return None return request
Read a JSON payload from the requester Return the parsed payload on success Return None on error
36,756
def parse_qs ( self , qs ) : qs_state = urllib2 . urlparse . parse_qs ( qs ) ret = { } for qs_var , qs_value_list in qs_state . items ( ) : if len ( qs_value_list ) > 1 : return None ret [ qs_var ] = qs_value_list [ 0 ] return ret
Parse query string but enforce one instance of each variable . Return a dict with the variables on success Return None on parse error
36,757
def get_path_and_qs ( self ) : path_parts = self . path . split ( "?" , 1 ) if len ( path_parts ) > 1 : qs = path_parts [ 1 ] . split ( "#" , 1 ) [ 0 ] else : qs = "" path = path_parts [ 0 ] . split ( "#" , 1 ) [ 0 ] path = posixpath . normpath ( urllib . unquote ( path ) ) qs_values = self . parse_qs ( qs ) if qs_values is None : return { 'error' : 'Failed to parse query string' } parts = path . strip ( '/' ) . split ( '/' ) return { 'path' : path , 'qs_values' : qs_values , 'parts' : parts }
Parse and obtain the path and query values . We don t care about fragments .
36,758
def OPTIONS_preflight ( self , path_info ) : self . send_response ( 200 ) self . send_header ( 'Access-Control-Allow-Origin' , '*' ) self . send_header ( 'Access-Control-Allow-Methods' , 'GET, PUT, POST, DELETE' ) self . send_header ( 'Access-Control-Allow-Headers' , 'content-type, authorization, range' ) self . send_header ( 'Access-Control-Expose-Headers' , 'content-length, content-range' ) self . send_header ( 'Access-Control-Max-Age' , 21600 ) self . end_headers ( ) return
Give back CORS preflight check headers
36,759
def GET_names_owned_by_address ( self , path_info , blockchain , address ) : if not check_address ( address ) : return self . _reply_json ( { 'error' : 'Invalid address' } , status_code = 400 ) if blockchain != 'bitcoin' : return self . _reply_json ( { 'error' : 'Unsupported blockchain' } , status_code = 404 ) blockstackd_url = get_blockstackd_url ( ) address = str ( address ) subdomain_names = blockstackd_client . get_subdomains_owned_by_address ( address , hostport = blockstackd_url ) if json_is_error ( subdomain_names ) : log . error ( "Failed to fetch subdomains owned by address" ) log . error ( subdomain_names ) subdomain_names = [ ] new_addr = virtualchain . address_reencode ( address ) if new_addr != address : log . debug ( "Re-encode {} to {}" . format ( new_addr , address ) ) address = new_addr res = blockstackd_client . get_names_owned_by_address ( address , hostport = blockstackd_url ) if json_is_error ( res ) : log . error ( "Failed to get names owned by address" ) self . _reply_json ( { 'error' : 'Failed to list names by address' } , status_code = res . get ( 'http_status' , 502 ) ) return self . _reply_json ( { 'names' : res + subdomain_names } ) return
Get all names owned by an address Returns the list on success Return 404 on unsupported blockchain Return 502 on failure to get names for any non - specified reason
36,760
def GET_account_record ( self , path_info , account_addr , token_type ) : if not check_account_address ( account_addr ) : return self . _reply_json ( { 'error' : 'Invalid address' } , status_code = 400 ) if not check_token_type ( token_type ) : return self . _reply_json ( { 'error' : 'Invalid token type' } , status_code = 400 ) blockstackd_url = get_blockstackd_url ( ) res = blockstackd_client . get_account_record ( account_addr , token_type , hostport = blockstackd_url ) if json_is_error ( res ) : log . error ( "Failed to get account state for {} {}: {}" . format ( account_addr , token_type , res [ 'error' ] ) ) return self . _reply_json ( { 'error' : 'Failed to get account record for {} {}: {}' . format ( token_type , account_addr , res [ 'error' ] ) } , status_code = res . get ( 'http_status' , 500 ) ) self . _reply_json ( res ) return
Get the state of a particular token account Returns the account
36,761
def GET_names ( self , path_info ) : include_expired = False qs_values = path_info [ 'qs_values' ] page = qs_values . get ( 'page' , None ) if page is None : log . error ( "Page required" ) return self . _reply_json ( { 'error' : 'page= argument required' } , status_code = 400 ) try : page = int ( page ) if page < 0 : raise ValueError ( "Page is negative" ) except ValueError : log . error ( "Invalid page" ) return self . _reply_json ( { 'error' : 'Invalid page= value' } , status_code = 400 ) if qs_values . get ( 'all' , '' ) . lower ( ) in [ '1' , 'true' ] : include_expired = True offset = page * 100 count = 100 blockstackd_url = get_blockstackd_url ( ) res = blockstackd_client . get_all_names ( offset , count , include_expired = include_expired , hostport = blockstackd_url ) if json_is_error ( res ) : log . error ( "Failed to list all names (offset={}, count={}): {}" . format ( offset , count , res [ 'error' ] ) ) return self . _reply_json ( { 'error' : 'Failed to list all names' } , status_code = res . get ( 'http_status' , 502 ) ) return self . _reply_json ( res )
Get all names in existence If all = true is set then include expired names . Returns the list on success Returns 400 on invalid arguments Returns 502 on failure to get names
36,762
def GET_name_history ( self , path_info , name ) : if not check_name ( name ) and not check_subdomain ( name ) : return self . _reply_json ( { 'error' : 'Invalid name or subdomain' } , status_code = 400 ) qs_values = path_info [ 'qs_values' ] page = qs_values . get ( 'page' , None ) if page is None : page = "0" try : assert len ( page ) < 10 page = int ( page ) assert page >= 0 assert page <= 2 ** 32 - 1 except : log . error ( "Invalid page" ) self . _reply_json ( { 'error' : 'Invalid page' } , status_code = 400 ) return blockstackd_url = get_blockstackd_url ( ) res = blockstackd_client . get_name_history_page ( name , page , hostport = blockstackd_url ) if json_is_error ( res ) : log . error ( 'Failed to get name history for {}: {}' . format ( name , res [ 'error' ] ) ) return self . _reply_json ( { 'error' : res [ 'error' ] } , status_code = res . get ( 'http_status' , 502 ) ) return self . _reply_json ( res [ 'history' ] )
Get the history of a name or subdomain . Requires page in the query string return the history on success return 400 on invalid start_block or end_block return 502 on failure to query blockstack server
36,763
def GET_name_zonefile_by_hash ( self , path_info , name , zonefile_hash ) : if not check_name ( name ) and not check_subdomain ( name ) : return self . _reply_json ( { 'error' : 'Invalid name or subdomain' } , status_code = 400 ) if not check_string ( zonefile_hash , pattern = OP_ZONEFILE_HASH_PATTERN ) : return self . _reply_json ( { 'error' : 'Invalid zone file hash' } , status_code = 400 ) raw = path_info [ 'qs_values' ] . get ( 'raw' , '' ) raw = ( raw . lower ( ) in [ '1' , 'true' ] ) blockstack_hostport = get_blockstackd_url ( ) was_set = blockstackd_client . is_name_zonefile_hash ( name , zonefile_hash , hostport = blockstack_hostport ) if json_is_error ( was_set ) : return self . _reply_json ( { 'error' : was_set [ 'error' ] } , status_code = was_set . get ( 'http_status' , 502 ) ) if not was_set [ 'result' ] : self . _reply_json ( { 'error' : 'No such zonefile' } , status_code = 404 ) return resp = blockstackd_client . get_zonefiles ( blockstack_hostport , [ str ( zonefile_hash ) ] ) if json_is_error ( resp ) : self . _reply_json ( { 'error' : resp [ 'error' ] } , status_code = resp . get ( 'http_status' , 502 ) ) return if str ( zonefile_hash ) not in resp [ 'zonefiles' ] : return self . _reply_json ( { 'error' : 'Blockstack does not have this zonefile. Try again later.' } , status_code = 404 ) if raw : self . _send_headers ( status_code = 200 , content_type = 'application/octet-stream' ) self . wfile . write ( resp [ 'zonefiles' ] [ str ( zonefile_hash ) ] ) else : if str ( zonefile_hash ) not in resp [ 'zonefiles' ] : log . debug ( 'Failed to find zonefile hash {}, possess {}' . format ( str ( zonefile_hash ) , resp [ 'zonefiles' ] . keys ( ) ) ) return self . _reply_json ( { 'error' : 'No such zonefile' } , status_code = 404 ) zonefile_txt = resp [ 'zonefiles' ] [ str ( zonefile_hash ) ] res = decode_name_zonefile ( name , zonefile_txt ) if res is None : log . error ( "Failed to parse zone file for {}" . format ( name ) ) self . _reply_json ( { 'error' : 'Non-standard zone file for {}' . format ( name ) } , status_code = 204 ) return self . _reply_json ( { 'zonefile' : zonefile_txt } ) return
Get a historic zonefile for a name With raw = 1 on the query string return the raw zone file
36,764
def GET_namespaces ( self , path_info ) : qs_values = path_info [ 'qs_values' ] offset = qs_values . get ( 'offset' , None ) count = qs_values . get ( 'count' , None ) blockstackd_url = get_blockstackd_url ( ) namespaces = blockstackd_client . get_all_namespaces ( offset = offset , count = count , hostport = blockstackd_url ) if json_is_error ( namespaces ) : status_code = namespaces . get ( 'http_status' , 502 ) return self . _reply_json ( { 'error' : namespaces [ 'error' ] } , status_code = status_code ) self . _reply_json ( namespaces ) return
Get the list of all namespaces Reply all existing namespaces Reply 502 if we can t reach the server for whatever reason
36,765
def GET_namespace_info ( self , path_info , namespace_id ) : if not check_namespace ( namespace_id ) : return self . _reply_json ( { 'error' : 'Invalid namespace' } , status_code = 400 ) blockstackd_url = get_blockstackd_url ( ) namespace_rec = blockstackd_client . get_namespace_record ( namespace_id , hostport = blockstackd_url ) if json_is_error ( namespace_rec ) : status_code = namespace_rec . get ( 'http_status' , 502 ) return self . _reply_json ( { 'error' : namespace_rec [ 'error' ] } , status_code = status_code ) self . _reply_json ( namespace_rec ) return
Look up a namespace s info Reply information about a namespace Reply 404 if the namespace doesn t exist Reply 502 for any error in talking to the blocksatck server
36,766
def GET_namespace_num_names ( self , path_info , namespace_id ) : if not check_namespace ( namespace_id ) : return self . _reply_json ( { 'error' : 'Invalid namespace' } , status_code = 400 ) blockstackd_url = get_blockstackd_url ( ) name_count = blockstackd_client . get_num_names_in_namespace ( namespace_id , hostport = blockstackd_url ) if json_is_error ( name_count ) : log . error ( "Failed to load namespace count for {}: {}" . format ( namespace_id , name_count [ 'error' ] ) ) return self . _reply_json ( { 'error' : 'Failed to load namespace count: {}' . format ( name_count [ 'error' ] ) } , status_code = 404 ) self . _reply_json ( { 'names_count' : name_count } )
Get the number of names in a namespace Reply the number on success Reply 404 if the namespace does not exist Reply 502 on failure to talk to the blockstack server
36,767
def GET_namespace_names ( self , path_info , namespace_id ) : if not check_namespace ( namespace_id ) : return self . _reply_json ( { 'error' : 'Invalid namespace' } , status_code = 400 ) qs_values = path_info [ 'qs_values' ] page = qs_values . get ( 'page' , None ) if page is None : log . error ( "Page required" ) return self . _reply_json ( { 'error' : 'page= argument required' } , status_code = 400 ) try : page = int ( page ) if page < 0 : raise ValueError ( ) except ValueError : log . error ( "Invalid page" ) return self . _reply_json ( { 'error' : 'Invalid page= value' } , status_code = 400 ) offset = page * 100 count = 100 blockstackd_url = get_blockstackd_url ( ) namespace_names = blockstackd_client . get_names_in_namespace ( namespace_id , offset = offset , count = count , hostport = blockstackd_url ) if json_is_error ( namespace_names ) : status_code = namespace_names . get ( 'http_status' , 502 ) return self . _reply_json ( { 'error' : namespace_names [ 'error' ] } , status_code = status_code ) self . _reply_json ( namespace_names ) return
Get the list of names in a namespace Reply the list of names in a namespace Reply 404 if the namespace doesn t exist Reply 502 for any error in talking to the blockstack server
36,768
def GET_blockchain_ops ( self , path_info , blockchain_name , blockheight ) : try : blockheight = int ( blockheight ) assert check_block ( blockheight ) except : return self . _reply_json ( { 'error' : 'Invalid block' } , status_code = 400 ) if blockchain_name != 'bitcoin' : return self . _reply_json ( { 'error' : 'Unsupported blockchain' } , status_code = 404 ) blockstackd_url = get_blockstackd_url ( ) nameops = blockstackd_client . get_blockstack_transactions_at ( int ( blockheight ) , hostport = blockstackd_url ) if json_is_error ( nameops ) : status_code = nameops . get ( 'http_status' , 502 ) return self . _reply_json ( { 'error' : nameops [ 'error' ] } , status_code = status_code ) self . _reply_json ( nameops ) return
Get the name s historic name operations Reply the list of nameops at the given block height Reply 404 for blockchains other than those supported Reply 502 for any error we have in talking to the blockstack server
36,769
def GET_blockchain_name_record ( self , path_info , blockchain_name , name ) : if not check_name ( name ) and not check_subdomain ( name ) : return self . _reply_json ( { 'error' : 'Invalid name or subdomain' } , status_code = 400 ) if blockchain_name != 'bitcoin' : self . _reply_json ( { 'error' : 'Unsupported blockchain' } , status_code = 404 ) return blockstackd_url = get_blockstackd_url ( ) name_rec = blockstackd_client . get_name_record ( name , include_history = False , hostport = blockstackd_url ) if json_is_error ( name_rec ) : status_code = name_rec . get ( 'http_status' , 502 ) return self . _reply_json ( { 'error' : name_rec [ 'error' ] } , status_code = status_code ) return self . _reply_json ( name_rec )
Get the name s blockchain record in full Reply the raw blockchain record on success Reply 404 if the name is not found Reply 502 if we have an error talking to the server
36,770
def _get_balance ( self , get_address , min_confs ) : bitcoind_opts = get_bitcoin_opts ( ) bitcoind_host = bitcoind_opts [ 'bitcoind_server' ] bitcoind_port = bitcoind_opts [ 'bitcoind_port' ] bitcoind_user = bitcoind_opts [ 'bitcoind_user' ] bitcoind_passwd = bitcoind_opts [ 'bitcoind_passwd' ] bitcoind = create_bitcoind_service_proxy ( bitcoind_user , bitcoind_passwd , server = bitcoind_host , port = bitcoind_port ) address = virtualchain . address_reencode ( get_address ) try : unspents = get_unspents ( address , bitcoind ) except Exception as e : log . exception ( e ) return { 'error' : 'Failed to get unspents for {}' . format ( get_address ) } satoshis_confirmed = sum ( confirmed_utxo [ 'value' ] for confirmed_utxo in filter ( lambda utxo : utxo [ 'confirmations' ] >= min_confs , unspents ) ) return { 'balance' : satoshis_confirmed }
Works only in test mode! Get the confirmed balance for an address
36,771
def bind ( self ) : log . debug ( "Set SO_REUSADDR" ) self . socket . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) self . daemon_threads = True self . server_bind ( ) self . server_activate ( )
Bind to our port
36,772
def overloaded ( self , client_addr ) : overloaded_txt = 'HTTP/1.0 429 Too Many Requests\r\nServer: BaseHTTP/0.3 Python/2.7.14+\r\nContent-type: text/plain\r\nContent-length: 17\r\n\r\nToo many requests' if BLOCKSTACK_TEST : log . warn ( 'Too many requests; deflecting {}' . format ( client_addr ) ) return overloaded_txt
Deflect if we have too many inbound requests
36,773
def to_bytes ( obj , encoding = 'utf-8' , errors = None , nonstring = 'simplerepr' ) : if isinstance ( obj , binary_type ) : return obj original_errors = errors if errors in _COMPOSED_ERROR_HANDLERS : if HAS_SURROGATEESCAPE : errors = 'surrogateescape' elif errors == 'surrogate_or_strict' : errors = 'strict' else : errors = 'replace' if isinstance ( obj , text_type ) : try : return obj . encode ( encoding , errors ) except UnicodeEncodeError : if original_errors in ( None , 'surrogate_then_replace' ) : return_string = obj . encode ( 'utf-8' , 'surrogateescape' ) return_string = return_string . decode ( 'utf-8' , 'replace' ) return return_string . encode ( encoding , 'replace' ) raise if nonstring == 'simplerepr' : try : value = str ( obj ) except UnicodeError : try : value = repr ( obj ) except UnicodeError : return to_bytes ( '' ) elif nonstring == 'passthru' : return obj elif nonstring == 'empty' : return to_bytes ( '' ) elif nonstring == 'strict' : raise TypeError ( 'obj must be a string type' ) else : raise TypeError ( 'Invalid value %s for to_bytes\' nonstring parameter' % nonstring ) return to_bytes ( value , encoding , errors )
Make sure that a string is a byte string
36,774
def to_text ( obj , encoding = 'utf-8' , errors = None , nonstring = 'simplerepr' ) : if isinstance ( obj , text_type ) : return obj if errors in _COMPOSED_ERROR_HANDLERS : if HAS_SURROGATEESCAPE : errors = 'surrogateescape' elif errors == 'surrogate_or_strict' : errors = 'strict' else : errors = 'replace' if isinstance ( obj , binary_type ) : return obj . decode ( encoding , errors ) if nonstring == 'simplerepr' : try : value = str ( obj ) except UnicodeError : try : value = repr ( obj ) except UnicodeError : return u'' elif nonstring == 'passthru' : return obj elif nonstring == 'empty' : return u'' elif nonstring == 'strict' : raise TypeError ( 'obj must be a string type' ) else : raise TypeError ( 'Invalid value %s for to_text\'s nonstring parameter' % nonstring ) return to_text ( value , encoding , errors )
Make sure that a string is a text string
36,775
def push_images ( base_path , image_namespace , engine_obj , config , ** kwargs ) : config_path = kwargs . get ( 'config_path' , engine_obj . auth_config_path ) username = kwargs . get ( 'username' ) password = kwargs . get ( 'password' ) push_to = kwargs . get ( 'push_to' ) url = engine_obj . default_registry_url registry_name = engine_obj . default_registry_name namespace = image_namespace save_conductor = config . save_conductor repository_prefix = None pull_from_url = None if push_to : if config . get ( 'registries' , dict ( ) ) . get ( push_to ) : url = config [ 'registries' ] [ push_to ] . get ( 'url' ) namespace = config [ 'registries' ] [ push_to ] . get ( 'namespace' , namespace ) repository_prefix = config [ 'registries' ] [ push_to ] . get ( 'repository_prefix' ) pull_from_url = config [ 'registries' ] [ push_to ] . get ( 'pull_from_url' ) if not url : raise AnsibleContainerRegistryAttributeException ( u"Registry {} missing required attribute 'url'" . format ( push_to ) ) else : url , namespace = resolve_push_to ( push_to , engine_obj . default_registry_url , namespace ) if username and not password : if url != engine_obj . default_registry_url : registry_name = url while not password : password = getpass . getpass ( u"Enter password for {0} at {1}: " . format ( username , registry_name ) ) if config_path : config_path = os . path . normpath ( os . path . expanduser ( config_path ) ) if os . path . exists ( config_path ) and os . path . isdir ( config_path ) : raise AnsibleContainerException ( u"Expecting --config-path to be a path to a file, not a directory" ) elif not os . path . exists ( config_path ) : if not os . path . exists ( os . path . dirname ( config_path ) ) : try : os . makedirs ( os . path . dirname ( config_path ) , 0o750 ) except OSError : raise AnsibleContainerException ( u"Failed to create the requested the path {}" . format ( os . path . dirname ( config_path ) ) ) open ( config_path , 'w' ) . close ( ) remove_existing_container ( engine_obj , 'conductor' , remove_volumes = True ) push_params = { } push_params . update ( kwargs ) push_params [ 'config_path' ] = config_path push_params [ 'password' ] = password push_params [ 'url' ] = url push_params [ 'namespace' ] = namespace push_params [ 'repository_prefix' ] = repository_prefix push_params [ 'pull_from_url' ] = pull_from_url engine_obj . await_conductor_command ( 'push' , dict ( config ) , base_path , push_params , save_container = save_conductor ) return { 'url' : url , 'namespace' : namespace , 'repository_prefix' : repository_prefix , 'pull_from_url' : pull_from_url }
Pushes images to a Docker registry . Returns dict containing attributes used to push images .
36,776
def remove_existing_container ( engine_obj , service_name , remove_volumes = False ) : conductor_container_id = engine_obj . get_container_id_for_service ( service_name ) if engine_obj . service_is_running ( service_name ) : engine_obj . stop_container ( conductor_container_id , forcefully = True ) if conductor_container_id : engine_obj . delete_container ( conductor_container_id , remove_volumes = remove_volumes )
Remove a container for an existing service . Handy for removing an existing conductor .
36,777
def resolve_push_to ( push_to , default_url , default_namespace ) : protocol = 'http://' if push_to . startswith ( 'http://' ) else 'https://' url = push_to = REMOVE_HTTP . sub ( '' , push_to ) namespace = default_namespace parts = url . split ( '/' , 1 ) special_set = { '.' , ':' } char_set = set ( [ c for c in parts [ 0 ] ] ) if len ( parts ) == 1 : if not special_set . intersection ( char_set ) and parts [ 0 ] != 'localhost' : registry_url = default_url namespace = push_to else : registry_url = protocol + parts [ 0 ] else : registry_url = protocol + parts [ 0 ] namespace = parts [ 1 ] return registry_url , namespace
Given a push - to value return the registry and namespace .
36,778
def conductorcmd_push ( engine_name , project_name , services , ** kwargs ) : username = kwargs . pop ( 'username' ) password = kwargs . pop ( 'password' ) email = kwargs . pop ( 'email' ) url = kwargs . pop ( 'url' ) namespace = kwargs . pop ( 'namespace' ) tag = kwargs . pop ( 'tag' ) config_path = kwargs . pop ( 'config_path' ) repository_prefix = kwargs . pop ( 'repository_prefix' ) engine = load_engine ( [ 'PUSH' , 'LOGIN' ] , engine_name , project_name , services ) logger . info ( u'Engine integration loaded. Preparing push.' , engine = engine . display_name ) username , password = engine . login ( username , password , email , url , config_path ) for name , service in iteritems ( services ) : if service . get ( 'containers' ) : for c in service [ 'containers' ] : if 'roles' in c : cname = '%s-%s' % ( name , c [ 'container_name' ] ) image_id = engine . get_latest_image_id_for_service ( cname ) engine . push ( image_id , cname , url = url , tag = tag , namespace = namespace , username = username , password = password , repository_prefix = repository_prefix ) elif 'roles' in service : image_id = engine . get_latest_image_id_for_service ( name ) engine . push ( image_id , name , url = url , tag = tag , namespace = namespace , username = username , password = password , repository_prefix = repository_prefix )
Push images to a registry
36,779
def get_route_templates ( self ) : def _get_published_ports ( service_config ) : result = [ ] for port in service_config . get ( 'ports' , [ ] ) : protocol = 'TCP' if isinstance ( port , string_types ) and '/' in port : port , protocol = port . split ( '/' ) if isinstance ( port , string_types ) and ':' in port : host , container = port . split ( ':' ) else : host = port result . append ( { 'port' : host , 'protocol' : protocol . lower ( ) } ) return result templates = [ ] for name , service_config in self . _services . items ( ) : state = service_config . get ( self . CONFIG_KEY , { } ) . get ( 'state' , 'present' ) force = service_config . get ( self . CONFIG_KEY , { } ) . get ( 'force' , False ) published_ports = _get_published_ports ( service_config ) if state != 'present' : continue for port in published_ports : route_name = "%s-%s" % ( name , port [ 'port' ] ) labels = dict ( app = self . _namespace_name , service = name ) template = CommentedMap ( ) template [ 'apiVersion' ] = self . DEFAULT_API_VERSION template [ 'kind' ] = 'Route' template [ 'force' ] = force template [ 'metadata' ] = CommentedMap ( [ ( 'name' , route_name ) , ( 'namespace' , self . _namespace_name ) , ( 'labels' , labels . copy ( ) ) ] ) template [ 'spec' ] = CommentedMap ( [ ( 'to' , CommentedMap ( [ ( 'kind' , 'Service' ) , ( 'name' , name ) ] ) ) , ( 'port' , CommentedMap ( [ ( 'targetPort' , 'port-{}-{}' . format ( port [ 'port' ] , port [ 'protocol' ] ) ) ] ) ) ] ) if service_config . get ( self . CONFIG_KEY , { } ) . get ( 'routes' ) : for route in service_config [ self . CONFIG_KEY ] [ 'routes' ] : if str ( route . get ( 'port' ) ) == str ( port [ 'port' ] ) : for key , value in route . items ( ) : if key not in ( 'force' , 'port' ) : self . copy_attribute ( template [ 'spec' ] , key , value ) templates . append ( template ) return templates
Generate Openshift route templates or playbook tasks . Each port on a service definition found in container . yml represents an externally exposed port .
36,780
def preparse_iter ( self ) : to_yield = { } last_directive = None lines_processed = 0 for line in self . lines_iter ( ) : if not line : continue if line . startswith ( u'#' ) : comment = line . lstrip ( '#' ) . strip ( ) if lines_processed == 1 : if comment . startswith ( u'escape=' ) : self . escape_char = comment . split ( u'=' , 1 ) [ 1 ] continue to_yield . setdefault ( 'comments' , [ ] ) . append ( comment ) else : if last_directive : directive , payload = last_directive , line else : directive , payload = line . split ( u' ' , 1 ) if line . endswith ( self . escape_char ) : payload = payload . rstrip ( self . escape_char ) last_directive = directive else : last_directive = None to_yield [ 'directive' ] = directive to_yield [ 'payload' ] = payload . strip ( ) yield to_yield to_yield = { }
Comments can be anywhere . So break apart the Dockerfile into significant lines and any comments that precede them . And if a line is a carryover from the previous via an escaped - newline bring the directive with it .
36,781
def run_container ( self , image_id , service_name , ** kwargs ) : run_kwargs = self . run_kwargs_for_service ( service_name ) run_kwargs . update ( kwargs , relax = True ) logger . debug ( 'Running container in docker' , image = image_id , params = run_kwargs ) container_obj = self . client . containers . run ( image = image_id , detach = True , ** run_kwargs ) log_iter = container_obj . logs ( stdout = True , stderr = True , stream = True ) mux = logmux . LogMultiplexer ( ) mux . add_iterator ( log_iter , plainLogger ) return container_obj . id
Run a particular container . The kwargs argument contains individual parameter overrides from the service definition .
36,782
def push ( self , image_id , service_name , tag = None , namespace = None , url = None , username = None , password = None , repository_prefix = None , ** kwargs ) : auth_config = { 'username' : username , 'password' : password } build_stamp = self . get_build_stamp_for_image ( image_id ) tag = tag or build_stamp if repository_prefix : image_name = "{}-{}" . format ( repository_prefix , service_name ) elif repository_prefix is None : image_name = "{}-{}" . format ( self . project_name , service_name ) elif repository_prefix == '' : image_name = service_name repository = "{}/{}" . format ( namespace , image_name ) if url != self . default_registry_url : url = REMOVE_HTTP . sub ( '' , url ) repository = "%s/%s" % ( url . rstrip ( '/' ) , repository ) logger . info ( 'Tagging %s' % repository ) self . client . api . tag ( image_id , repository , tag = tag ) logger . info ( 'Pushing %s:%s...' % ( repository , tag ) ) stream = self . client . api . push ( repository , tag = tag , stream = True , auth_config = auth_config ) last_status = None for data in stream : data = data . splitlines ( ) for line in data : line = json . loads ( line ) if type ( line ) is dict and 'error' in line : plainLogger . error ( line [ 'error' ] ) raise exceptions . AnsibleContainerException ( "Failed to push image. {}" . format ( line [ 'error' ] ) ) elif type ( line ) is dict and 'status' in line : if line [ 'status' ] != last_status : plainLogger . info ( line [ 'status' ] ) last_status = line [ 'status' ] else : plainLogger . debug ( line )
Push an image to a remote registry .
36,783
def login ( self , username , password , email , url , config_path ) : if username and password : try : self . client . login ( username = username , password = password , email = email , registry = url , reauth = True ) except docker_errors . APIError as exc : raise exceptions . AnsibleContainerConductorException ( u"Error logging into registry: {}" . format ( exc ) ) except Exception : raise self . _update_config_file ( username , password , email , url , config_path ) username , password = self . _get_registry_auth ( url , config_path ) if not username : raise exceptions . AnsibleContainerConductorException ( u'Please provide login credentials for registry {}.' . format ( url ) ) return username , password
If username and password are provided authenticate with the registry . Otherwise check the config file for existing authentication data .
36,784
def _update_config_file ( username , password , email , url , config_path ) : try : config = json . load ( open ( config_path , "r" ) ) except ValueError : config = dict ( ) if not config . get ( 'auths' ) : config [ 'auths' ] = dict ( ) if not config [ 'auths' ] . get ( url ) : config [ 'auths' ] [ url ] = dict ( ) encoded_credentials = dict ( auth = base64 . b64encode ( username + b':' + password ) , email = email ) config [ 'auths' ] [ url ] = encoded_credentials try : json . dump ( config , open ( config_path , "w" ) , indent = 5 , sort_keys = True ) except Exception as exc : raise exceptions . AnsibleContainerConductorException ( u"Failed to write registry config to {0} - {1}" . format ( config_path , exc ) )
Update the config file with the authorization .
36,785
def _get_registry_auth ( registry_url , config_path ) : username = None password = None try : docker_config = json . load ( open ( config_path ) ) except ValueError : return username , password if docker_config . get ( 'auths' ) : docker_config = docker_config [ 'auths' ] auth_key = docker_config . get ( registry_url , { } ) . get ( 'auth' , None ) if auth_key : username , password = base64 . b64decode ( auth_key ) . split ( ':' , 1 ) return username , password
Retrieve from the config file the current authentication for a given URL and return the username password
36,786
def resolve_role_to_path ( role ) : loader = DataLoader ( ) try : variable_manager = VariableManager ( loader = loader ) except TypeError : variable_manager = VariableManager ( ) role_obj = RoleInclude . load ( data = role , play = None , variable_manager = variable_manager , loader = loader ) return role_obj . _role_path
Given a role definition from a service s list of roles returns the file path to the role
36,787
def get_role_fingerprint ( role , service_name , config_vars ) : def hash_file ( hash_obj , file_path ) : blocksize = 64 * 1024 with open ( file_path , 'rb' ) as ifs : while True : data = ifs . read ( blocksize ) if not data : break hash_obj . update ( data ) hash_obj . update ( '::' ) def hash_dir ( hash_obj , dir_path ) : for root , dirs , files in os . walk ( dir_path , topdown = True ) : for file_path in files : abs_file_path = os . path . join ( root , file_path ) hash_obj . update ( abs_file_path . encode ( 'utf-8' ) ) hash_obj . update ( '::' ) hash_file ( hash_obj , abs_file_path ) def hash_role ( hash_obj , role_path ) : hash_dir ( hash_obj , role_path ) for dependency in get_dependencies_for_role ( role_path ) : if dependency : dependency_path = resolve_role_to_path ( dependency ) hash_role ( hash_obj , dependency_path ) loader = DataLoader ( ) var_man = VariableManager ( loader = loader ) play = Play . load ( generate_playbook_for_role ( service_name , config_vars , role ) [ 0 ] , variable_manager = var_man , loader = loader ) play_context = PlayContext ( play = play ) inv_man = InventoryManager ( loader , sources = [ '%s,' % service_name ] ) host = Host ( service_name ) iterator = PlayIterator ( inv_man , play , play_context , var_man , config_vars ) while True : _ , task = iterator . get_next_task_for_host ( host ) if task is None : break if task . action in FILE_COPY_MODULES : src = task . args . get ( 'src' ) if src is not None : if not os . path . exists ( src ) or not src . startswith ( ( '/' , '..' ) ) : continue src = os . path . realpath ( src ) if os . path . isfile ( src ) : hash_file ( hash_obj , src ) else : hash_dir ( hash_obj , src ) def get_dependencies_for_role ( role_path ) : meta_main_path = os . path . join ( role_path , 'meta' , 'main.yml' ) if os . path . exists ( meta_main_path ) : meta_main = yaml . safe_load ( open ( meta_main_path ) ) if meta_main : for dependency in meta_main . get ( 'dependencies' , [ ] ) : yield dependency . get ( 'role' , None ) hash_obj = hashlib . sha256 ( ) hash_obj . update ( ( json . dumps ( role ) if not isinstance ( role , string_types ) else role ) + '::' ) hash_role ( hash_obj , resolve_role_to_path ( role ) ) return hash_obj . hexdigest ( )
Given a role definition from a service s list of roles returns a hexdigest based on the role definition the role contents and the hexdigest of each dependency
36,788
def on_predicate ( wait_gen , predicate = operator . not_ , max_tries = None , max_time = None , jitter = full_jitter , on_success = None , on_backoff = None , on_giveup = None , logger = 'backoff' , ** wait_gen_kwargs ) : def decorate ( target ) : logger_ = logger if isinstance ( logger_ , basestring ) : logger_ = logging . getLogger ( logger_ ) on_success_ = _config_handlers ( on_success ) on_backoff_ = _config_handlers ( on_backoff , _log_backoff , logger_ ) on_giveup_ = _config_handlers ( on_giveup , _log_giveup , logger_ ) retry = None if sys . version_info >= ( 3 , 5 ) : import asyncio if asyncio . iscoroutinefunction ( target ) : import backoff . _async retry = backoff . _async . retry_predicate elif _is_event_loop ( ) and _is_current_task ( ) : raise TypeError ( "backoff.on_predicate applied to a regular function " "inside coroutine, this will lead to event loop " "hiccups. Use backoff.on_predicate on coroutines in " "asynchronous code." ) if retry is None : retry = _sync . retry_predicate return retry ( target , wait_gen , predicate , max_tries , max_time , jitter , on_success_ , on_backoff_ , on_giveup_ , wait_gen_kwargs ) return decorate
Returns decorator for backoff and retry triggered by predicate .
36,789
def expo ( base = 2 , factor = 1 , max_value = None ) : n = 0 while True : a = factor * base ** n if max_value is None or a < max_value : yield a n += 1 else : yield max_value
Generator for exponential decay .
36,790
def fibo ( max_value = None ) : a = 1 b = 1 while True : if max_value is None or a < max_value : yield a a , b = b , a + b else : yield max_value
Generator for fibonaccial decay .
36,791
def constant ( interval = 1 ) : try : itr = iter ( interval ) except TypeError : itr = itertools . repeat ( interval ) for val in itr : yield val
Generator for constant intervals .
36,792
def detect_incorrect_erc20_interface ( contract ) : functions = [ f for f in contract . functions if f . contract == contract and IncorrectERC20InterfaceDetection . incorrect_erc20_interface ( f . signature ) ] return functions
Detect incorrect ERC20 interface
36,793
def _detect ( self ) : results = [ ] for c in self . contracts : functions = IncorrectERC20InterfaceDetection . detect_incorrect_erc20_interface ( c ) if functions : info = "{} ({}) has incorrect ERC20 function interface(s):\n" info = info . format ( c . name , c . source_mapping_str ) for function in functions : info += "\t-{} ({})\n" . format ( function . name , function . source_mapping_str ) json = self . generate_json_result ( info ) self . add_functions_to_json ( functions , json ) results . append ( json ) return results
Detect incorrect erc20 interface
36,794
def detect_shadowing_definitions ( self , contract ) : result = [ ] for function in contract . functions + contract . modifiers : if function . contract != contract : continue for variable in function . variables : overshadowed = [ ] for scope_contract in [ contract ] + contract . inheritance : for scope_function in scope_contract . functions : if variable . name == scope_function . name and scope_function . contract == scope_contract : overshadowed . append ( ( self . OVERSHADOWED_FUNCTION , scope_contract . name , scope_function ) ) for scope_modifier in scope_contract . modifiers : if variable . name == scope_modifier . name and scope_modifier . contract == scope_contract : overshadowed . append ( ( self . OVERSHADOWED_MODIFIER , scope_contract . name , scope_modifier ) ) for scope_event in scope_contract . events : if variable . name == scope_event . name and scope_event . contract == scope_contract : overshadowed . append ( ( self . OVERSHADOWED_EVENT , scope_contract . name , scope_event ) ) for scope_state_variable in scope_contract . variables : if variable . name == scope_state_variable . name and scope_state_variable . contract == scope_contract : overshadowed . append ( ( self . OVERSHADOWED_STATE_VARIABLE , scope_contract . name , scope_state_variable ) ) if overshadowed : result . append ( ( contract . name , function . name , variable , overshadowed ) ) return result
Detects if functions access modifiers events state variables and local variables are named after reserved keywords . Any such definitions are returned in a list .
36,795
def _detect ( self ) : results = [ ] for contract in self . contracts : shadows = self . detect_shadowing_definitions ( contract ) if shadows : for shadow in shadows : local_parent_name = shadow [ 1 ] local_variable = shadow [ 2 ] overshadowed = shadow [ 3 ] info = '{}.{}.{} (local variable @ {}) shadows:\n' . format ( contract . name , local_parent_name , local_variable . name , local_variable . source_mapping_str ) for overshadowed_entry in overshadowed : info += "\t- {}.{} ({} @ {})\n" . format ( overshadowed_entry [ 1 ] , overshadowed_entry [ 2 ] , overshadowed_entry [ 0 ] , overshadowed_entry [ 2 ] . source_mapping_str ) json = self . generate_json_result ( info ) self . add_variable_to_json ( local_variable , json ) for overshadowed_entry in overshadowed : if overshadowed_entry [ 0 ] in [ self . OVERSHADOWED_FUNCTION , self . OVERSHADOWED_MODIFIER , self . OVERSHADOWED_EVENT ] : self . add_function_to_json ( overshadowed_entry [ 2 ] , json ) elif overshadowed_entry [ 0 ] == self . OVERSHADOWED_STATE_VARIABLE : self . add_variable_to_json ( overshadowed_entry [ 2 ] , json ) results . append ( json ) return results
Detect shadowing local variables
36,796
def _detect ( self ) : results = [ ] all_info = '' all_variables = [ c . state_variables for c in self . slither . contracts ] all_variables = set ( [ item for sublist in all_variables for item in sublist ] ) all_non_constant_elementary_variables = set ( [ v for v in all_variables if self . _valid_candidate ( v ) ] ) all_functions = [ c . all_functions_called for c in self . slither . contracts ] all_functions = list ( set ( [ item for sublist in all_functions for item in sublist ] ) ) all_variables_written = [ f . state_variables_written for f in all_functions ] all_variables_written = set ( [ item for sublist in all_variables_written for item in sublist ] ) constable_variables = [ v for v in all_non_constant_elementary_variables if ( not v in all_variables_written ) and self . _constant_initial_expression ( v ) ] constable_variables = sorted ( constable_variables , key = lambda x : x . canonical_name ) for v in constable_variables : info = "{}.{} should be constant ({})\n" . format ( v . contract . name , v . name , v . source_mapping_str ) all_info += info if all_info != '' : json = self . generate_json_result ( all_info ) self . add_variables_to_json ( constable_variables , json ) results . append ( json ) return results
Detect state variables that could be const
36,797
def detect_suicidal_func ( func ) : if func . is_constructor : return False if func . visibility != 'public' : return False calls = [ c . name for c in func . internal_calls ] if not ( 'suicide(address)' in calls or 'selfdestruct(address)' in calls ) : return False if func . is_protected ( ) : return False return True
Detect if the function is suicidal
36,798
def _detect ( self ) : results = [ ] for c in self . contracts : functions = self . detect_suicidal ( c ) for func in functions : txt = "{}.{} ({}) allows anyone to destruct the contract\n" info = txt . format ( func . contract . name , func . name , func . source_mapping_str ) json = self . generate_json_result ( info ) self . add_function_to_json ( func , json ) results . append ( json ) return results
Detect the suicidal functions
36,799
def _detect ( self ) : results = [ ] for c in self . slither . contracts : for f in c . functions + c . modifiers : if f . contract != c : continue unused_return = self . detect_unused_return_values ( f ) if unused_return : info = "{}.{} ({}) does not use the value returned by external calls:\n" info = info . format ( f . contract . name , f . name , f . source_mapping_str ) for node in unused_return : info += "\t-{} ({})\n" . format ( node . expression , node . source_mapping_str ) json = self . generate_json_result ( info ) self . add_function_to_json ( f , json ) self . add_nodes_to_json ( unused_return , json ) results . append ( json ) return results
Detect high level calls which return a value that are never used