idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
36,600 | def get_all_revealed_namespace_ids ( self ) : cur = self . db . cursor ( ) namespace_ids = namedb_get_all_revealed_namespace_ids ( cur , self . lastblock ) return namespace_ids | Get all revealed namespace IDs that have not expired . |
36,601 | def get_all_preordered_namespace_hashes ( self ) : cur = self . db . cursor ( ) namespace_hashes = namedb_get_all_preordered_namespace_hashes ( cur , self . lastblock ) return namespace_hashes | Get all oustanding namespace preorder hashes that have not expired . Used for testing |
36,602 | def get_all_importing_namespace_hashes ( self ) : cur = self . db . cursor ( ) namespace_hashes = namedb_get_all_importing_namespace_hashes ( cur , self . lastblock ) return namespace_hashes | Get the set of all preordered and revealed namespace hashes that have not expired . |
36,603 | def get_name_preorder ( self , name , sender_script_pubkey , register_addr , include_failed = False ) : name_rec = self . get_name ( name ) if name_rec is not None and not include_failed : return None namespace_id = get_namespace_from_name ( name ) namespace = self . get_namespace ( namespace_id ) if namespace is None : return None preorder_hash = hash_name ( name , sender_script_pubkey , register_addr = register_addr ) preorder = namedb_get_name_preorder ( self . db , preorder_hash , self . lastblock ) if preorder is None : return None namespace_lifetime_multiplier = get_epoch_namespace_lifetime_multiplier ( self . lastblock , namespace_id ) if preorder [ 'block_number' ] + ( namespace [ 'lifetime' ] * namespace_lifetime_multiplier ) <= self . lastblock : log . debug ( "Preorder is too old (accepted at {}, namespace lifetime is {}, current block is {})" . format ( preorder [ 'block_number' ] , namespace [ 'lifetime' ] * namespace_lifetime_multiplier , self . lastblock ) ) return None return preorder | Get the current preorder for a name given the name the sender s script pubkey and the registration address used to calculate the preorder hash . |
36,604 | def get_names_with_value_hash ( self , value_hash ) : cur = self . db . cursor ( ) names = namedb_get_names_with_value_hash ( cur , value_hash , self . lastblock ) return names | Get the list of names with the given value hash at the current block height . This excludes revoked names and expired names . |
36,605 | def get_atlas_zonefile_info_at ( self , block_id ) : nameops = self . get_all_blockstack_ops_at ( block_id ) ret = [ ] for nameop in nameops : if nameop . has_key ( 'op' ) and op_get_opcode_name ( nameop [ 'op' ] ) in [ 'NAME_UPDATE' , 'NAME_IMPORT' , 'NAME_REGISTRATION' , 'NAME_RENEWAL' ] : assert nameop . has_key ( 'value_hash' ) assert nameop . has_key ( 'name' ) assert nameop . has_key ( 'txid' ) if nameop [ 'value_hash' ] is not None : ret . append ( { 'name' : nameop [ 'name' ] , 'value_hash' : nameop [ 'value_hash' ] , 'txid' : nameop [ 'txid' ] } ) return ret | Get the blockchain - ordered sequence of names value hashes and txids . added at the given block height . The order will be in tx - order . |
36,606 | def get_namespace_reveal ( self , namespace_id , include_history = True ) : cur = self . db . cursor ( ) namespace_reveal = namedb_get_namespace_reveal ( cur , namespace_id , self . lastblock , include_history = include_history ) return namespace_reveal | Given the name of a namespace get it if it is currently being revealed . |
36,607 | def is_name_registered ( self , name ) : name_rec = self . get_name ( name ) if name_rec is None : return False if name_rec [ 'revoked' ] : return False return True | Given the fully - qualified name is it registered not revoked and not expired at the current block? |
36,608 | def is_namespace_ready ( self , namespace_id ) : namespace = self . get_namespace ( namespace_id ) if namespace is not None : return True else : return False | Given a namespace ID determine if the namespace is ready at the current block . |
36,609 | def is_namespace_preordered ( self , namespace_id_hash ) : namespace_preorder = self . get_namespace_preorder ( namespace_id_hash ) if namespace_preorder is None : return False else : return True | Given a namespace preorder hash determine if it is preordered at the current block . |
36,610 | def is_namespace_revealed ( self , namespace_id ) : namespace_reveal = self . get_namespace_reveal ( namespace_id ) if namespace_reveal is not None : return True else : return False | Given the name of a namespace has it been revealed but not made ready at the current block? |
36,611 | def is_name_owner ( self , name , sender_script_pubkey ) : if not self . is_name_registered ( name ) : return False owner = self . get_name_owner ( name ) if owner != sender_script_pubkey : return False else : return True | Given the fully - qualified name and a sender s script pubkey determine if the sender owns the name . |
36,612 | def is_new_preorder ( self , preorder_hash , lastblock = None ) : if lastblock is None : lastblock = self . lastblock preorder = namedb_get_name_preorder ( self . db , preorder_hash , lastblock ) if preorder is not None : return False else : return True | Given a preorder hash of a name determine whether or not it is unseen before . |
36,613 | def is_new_namespace_preorder ( self , namespace_id_hash , lastblock = None ) : if lastblock is None : lastblock = self . lastblock preorder = namedb_get_namespace_preorder ( self . db , namespace_id_hash , lastblock ) if preorder is not None : return False else : return True | Given a namespace preorder hash determine whether or not is is unseen before . |
36,614 | def is_name_revoked ( self , name ) : name = self . get_name ( name ) if name is None : return False if name [ 'revoked' ] : return True else : return False | Determine if a name is revoked at this block . |
36,615 | def get_value_hash_txids ( self , value_hash ) : cur = self . db . cursor ( ) return namedb_get_value_hash_txids ( cur , value_hash ) | Get the list of txids by value hash |
36,616 | def nameop_set_collided ( cls , nameop , history_id_key , history_id ) : nameop [ '__collided__' ] = True nameop [ '__collided_history_id_key__' ] = history_id_key nameop [ '__collided_history_id__' ] = history_id | Mark a nameop as collided |
36,617 | def nameop_put_collision ( cls , collisions , nameop ) : history_id_key = nameop . get ( '__collided_history_id_key__' , None ) history_id = nameop . get ( '__collided_history_id__' , None ) try : assert cls . nameop_is_collided ( nameop ) , "Nameop not collided" assert history_id_key is not None , "Nameop missing collision info" assert history_id is not None , "Nameop missing collision info" except Exception , e : log . exception ( e ) log . error ( "FATAL: BUG: bad collision info" ) os . abort ( ) if not collisions . has_key ( history_id_key ) : collisions [ history_id_key ] = [ history_id ] else : collisions [ history_id_key ] . append ( history_id ) | Record a nameop as collided with another nameop in this block . |
36,618 | def extract_consensus_op ( self , opcode , op_data , processed_op_data , current_block_number ) : ret = { } consensus_fields = op_get_consensus_fields ( opcode ) quirk_fields = op_get_quirk_fields ( opcode ) for field in consensus_fields + quirk_fields : try : assert field in processed_op_data , 'Missing consensus field "{}"' . format ( field ) except Exception as e : log . exception ( e ) log . error ( "FATAL: BUG: missing consensus field {}" . format ( field ) ) log . error ( "op_data:\n{}" . format ( json . dumps ( op_data , indent = 4 , sort_keys = True ) ) ) log . error ( "processed_op_data:\n{}" . format ( json . dumps ( op_data , indent = 4 , sort_keys = True ) ) ) os . abort ( ) ret [ field ] = processed_op_data [ field ] return ret | Using the operation data extracted from parsing the virtualchain operation ( |
36,619 | def commit_operation ( self , input_op_data , accepted_nameop , current_block_number ) : if self . disposition != DISPOSITION_RW : log . error ( "FATAL: borrowing violation: not a read-write connection" ) traceback . print_stack ( ) os . abort ( ) cur = self . db . cursor ( ) canonical_op = None op_type_str = None opcode = accepted_nameop . get ( 'opcode' , None ) try : assert opcode is not None , "Undefined op '%s'" % accepted_nameop [ 'op' ] except Exception , e : log . exception ( e ) log . error ( "FATAL: unrecognized op '%s'" % accepted_nameop [ 'op' ] ) os . abort ( ) if opcode in OPCODE_PREORDER_OPS : canonical_op = self . commit_state_preorder ( accepted_nameop , current_block_number ) op_type_str = "state_preorder" elif opcode in OPCODE_CREATION_OPS : canonical_op = self . commit_state_create ( accepted_nameop , current_block_number ) op_type_str = "state_create" elif opcode in OPCODE_TRANSITION_OPS : canonical_op = self . commit_state_transition ( accepted_nameop , current_block_number ) op_type_str = "state_transition" elif opcode in OPCODE_TOKEN_OPS : canonical_op = self . commit_token_operation ( accepted_nameop , current_block_number ) op_type_str = "token_operation" else : raise Exception ( "Unknown operation {}" . format ( opcode ) ) if canonical_op is None : log . error ( "FATAL: no canonical op generated (for {})" . format ( op_type_str ) ) os . abort ( ) log . debug ( "Extract consensus fields for {} in {}, as part of a {}" . format ( opcode , current_block_number , op_type_str ) ) consensus_op = self . extract_consensus_op ( opcode , input_op_data , canonical_op , current_block_number ) return consensus_op | Commit an operation thereby carrying out a state transition . |
36,620 | def commit_token_operation ( self , token_op , current_block_number ) : if self . disposition != DISPOSITION_RW : log . error ( "FATAL: borrowing violation: not a read-write connection" ) traceback . print_stack ( ) os . abort ( ) cur = self . db . cursor ( ) opcode = token_op . get ( 'opcode' , None ) clean_token_op = self . sanitize_op ( token_op ) try : assert token_operation_is_valid ( token_op ) , 'Invalid token operation' assert opcode is not None , 'No opcode given' assert 'txid' in token_op , 'No txid' assert 'vtxindex' in token_op , 'No vtxindex' except Exception as e : log . exception ( e ) log . error ( 'FATAL: failed to commit token operation' ) self . db . rollback ( ) os . abort ( ) table = token_operation_get_table ( token_op ) account_payment_info = token_operation_get_account_payment_info ( token_op ) account_credit_info = token_operation_get_account_credit_info ( token_op ) try : for key in account_payment_info : assert account_payment_info [ key ] is not None , 'BUG: payment info key {} is None' . format ( key ) for key in account_credit_info : assert account_credit_info [ key ] is not None , 'BUG: credit info key {} is not None' . format ( key ) except Exception as e : log . exception ( e ) log . error ( "FATAL: invalid token debit or credit info" ) os . abort ( ) self . log_accept ( current_block_number , token_op [ 'vtxindex' ] , token_op [ 'op' ] , token_op ) self . commit_account_debit ( token_op , account_payment_info , current_block_number , token_op [ 'vtxindex' ] , token_op [ 'txid' ] ) self . commit_account_credit ( token_op , account_credit_info , current_block_number , token_op [ 'vtxindex' ] , token_op [ 'txid' ] ) namedb_history_save ( cur , opcode , token_op [ 'address' ] , None , None , current_block_number , token_op [ 'vtxindex' ] , token_op [ 'txid' ] , clean_token_op ) return clean_token_op | Commit a token operation that debits one account and credits another |
36,621 | def commit_account_vesting ( self , block_height ) : log . debug ( "Commit all database state before vesting" ) self . db . commit ( ) if block_height in self . vesting : traceback . print_stack ( ) log . fatal ( "Tried to vest tokens twice at {}" . format ( block_height ) ) os . abort ( ) cur = self . db . cursor ( ) namedb_query_execute ( cur , 'BEGIN' , ( ) ) res = namedb_accounts_vest ( cur , block_height ) namedb_query_execute ( cur , 'END' , ( ) ) self . vesting [ block_height ] = True return True | vest any tokens at this block height |
36,622 | def is_name_valid ( fqn ) : if not isinstance ( fqn , ( str , unicode ) ) : return False if fqn . count ( "." ) != 1 : return False name , namespace_id = fqn . split ( "." ) if len ( name ) == 0 or len ( namespace_id ) == 0 : return False if not is_b40 ( name ) or "+" in name or "." in name : return False if not is_namespace_valid ( namespace_id ) : return False if len ( fqn ) > LENGTHS [ 'blockchain_id_name' ] : return False return True | Is a fully - qualified name acceptable? Return True if so Return False if not |
36,623 | def is_namespace_valid ( namespace_id ) : if not is_b40 ( namespace_id ) or "+" in namespace_id or namespace_id . count ( "." ) > 0 : return False if len ( namespace_id ) == 0 or len ( namespace_id ) > LENGTHS [ 'blockchain_id_namespace_id' ] : return False return True | Is a namespace ID valid? |
36,624 | def price_namespace ( namespace_id , block_height , units ) : price_table = get_epoch_namespace_prices ( block_height , units ) if price_table is None : return None if len ( namespace_id ) >= len ( price_table ) or len ( namespace_id ) == 0 : return None return price_table [ len ( namespace_id ) ] | Calculate the cost of a namespace . Returns the price on success Returns None if the namespace is invalid or if the units are invalid |
36,625 | def find_by_opcode ( checked_ops , opcode ) : if type ( opcode ) != list : opcode = [ opcode ] ret = [ ] for opdata in checked_ops : if op_get_opcode_name ( opdata [ 'op' ] ) in opcode : ret . append ( opdata ) return ret | Given all previously - accepted operations in this block find the ones that are of a particular opcode . |
36,626 | def get_public_key_hex_from_tx ( inputs , address ) : ret = None for inp in inputs : input_scriptsig = inp [ 'script' ] input_script_code = virtualchain . btc_script_deserialize ( input_scriptsig ) if len ( input_script_code ) == 2 : pubkey_candidate = input_script_code [ 1 ] pubkey = None try : pubkey = virtualchain . BitcoinPublicKey ( pubkey_candidate ) except Exception as e : traceback . print_exc ( ) log . warn ( "Invalid public key {}" . format ( pubkey_candidate ) ) continue if address != pubkey . address ( ) : continue return pubkey_candidate return None | Given a list of inputs and the address of one of the inputs find the public key . |
36,627 | def check_name ( name ) : if type ( name ) not in [ str , unicode ] : return False if not is_name_valid ( name ) : return False return True | Verify the name is well - formed |
36,628 | def check_namespace ( namespace_id ) : if type ( namespace_id ) not in [ str , unicode ] : return False if not is_namespace_valid ( namespace_id ) : return False return True | Verify that a namespace ID is well - formed |
36,629 | def check_token_type ( token_type ) : return check_string ( token_type , min_length = 1 , max_length = LENGTHS [ 'namespace_id' ] , pattern = '^{}$|{}' . format ( TOKEN_TYPE_STACKS , OP_NAMESPACE_PATTERN ) ) | Verify that a token type is well - formed |
36,630 | def check_subdomain ( fqn ) : if type ( fqn ) not in [ str , unicode ] : return False if not is_subdomain ( fqn ) : return False return True | Verify that the given fqn is a subdomain |
36,631 | def check_block ( block_id ) : if type ( block_id ) not in [ int , long ] : return False if BLOCKSTACK_TEST : if block_id <= 0 : return False else : if block_id < FIRST_BLOCK_MAINNET : return False if block_id > 1e7 : return False return True | Verify that a block ID is valid |
36,632 | def check_offset ( offset , max_value = None ) : if type ( offset ) not in [ int , long ] : return False if offset < 0 : return False if max_value and offset > max_value : return False return True | Verify that an offset is valid |
36,633 | def check_string ( value , min_length = None , max_length = None , pattern = None ) : if type ( value ) not in [ str , unicode ] : return False if min_length and len ( value ) < min_length : return False if max_length and len ( value ) > max_length : return False if pattern and not re . match ( pattern , value ) : return False return True | verify that a string has a particular size and conforms to a particular alphabet |
36,634 | def check_address ( address ) : if not check_string ( address , min_length = 26 , max_length = 35 , pattern = OP_ADDRESS_PATTERN ) : return False try : keylib . b58check_decode ( address ) return True except : return False | verify that a string is a base58check address |
36,635 | def check_account_address ( address ) : if address == 'treasury' or address == 'unallocated' : return True if address . startswith ( 'not_distributed_' ) and len ( address ) > len ( 'not_distributed_' ) : return True if re . match ( OP_C32CHECK_PATTERN , address ) : try : c32addressDecode ( address ) return True except : pass return check_address ( address ) | verify that a string is a valid account address . Can be a b58 - check address a c32 - check address as well as the string treasury or unallocated or a string starting with not_distributed_ |
36,636 | def check_tx_output_types ( outputs , block_height ) : supported_output_types = get_epoch_btc_script_types ( block_height ) for out in outputs : out_type = virtualchain . btc_script_classify ( out [ 'script' ] ) if out_type not in supported_output_types : log . warning ( 'Unsupported output type {} ({})' . format ( out_type , out [ 'script' ] ) ) return False return True | Verify that the list of transaction outputs are acceptable |
36,637 | def address_as_b58 ( addr ) : if is_c32_address ( addr ) : return c32ToB58 ( addr ) else : if check_address ( addr ) : return addr else : raise ValueError ( 'Address {} is not b58 or c32' . format ( addr ) ) | Given a b58check or c32check address return the b58check encoding |
36,638 | def verify ( address , plaintext , scriptSigb64 ) : assert isinstance ( address , str ) assert isinstance ( scriptSigb64 , str ) scriptSig = base64 . b64decode ( scriptSigb64 ) hash_hex = hashlib . sha256 ( plaintext ) . hexdigest ( ) vb = keylib . b58check . b58check_version_byte ( address ) if vb == bitcoin_blockchain . version_byte : return verify_singlesig ( address , hash_hex , scriptSig ) elif vb == bitcoin_blockchain . multisig_version_byte : return verify_multisig ( address , hash_hex , scriptSig ) else : log . warning ( "Unrecognized address version byte {}" . format ( vb ) ) raise NotImplementedError ( "Addresses must be single-sig (version-byte = 0) or multi-sig (version-byte = 5)" ) | Verify that a given plaintext is signed by the given scriptSig given the address |
36,639 | def verify_singlesig ( address , hash_hex , scriptSig ) : try : sighex , pubkey_hex = virtualchain . btc_script_deserialize ( scriptSig ) except : log . warn ( "Wrong signature structure for {}" . format ( address ) ) return False if virtualchain . address_reencode ( keylib . public_key_to_address ( pubkey_hex ) ) != virtualchain . address_reencode ( address ) : log . warn ( ( "Address {} does not match signature script {}" . format ( address , scriptSig . encode ( 'hex' ) ) ) ) return False sig64 = base64 . b64encode ( binascii . unhexlify ( sighex ) ) return virtualchain . ecdsalib . verify_digest ( hash_hex , pubkey_hex , sig64 ) | Verify that a p2pkh address is signed by the given pay - to - pubkey - hash scriptsig |
36,640 | def verify_multisig ( address , hash_hex , scriptSig ) : script_parts = virtualchain . btc_script_deserialize ( scriptSig ) if len ( script_parts ) < 2 : log . warn ( "Verfiying multisig failed, couldn't grab script parts" ) return False redeem_script = script_parts [ - 1 ] script_sigs = script_parts [ 1 : - 1 ] if virtualchain . address_reencode ( virtualchain . btc_make_p2sh_address ( redeem_script ) ) != virtualchain . address_reencode ( address ) : log . warn ( ( "Address {} does not match redeem script {}" . format ( address , redeem_script ) ) ) return False m , pubk_hexes = virtualchain . parse_multisig_redeemscript ( redeem_script ) if len ( script_sigs ) != m : log . warn ( "Failed to validate multi-sig, not correct number of signatures: have {}, require {}" . format ( len ( script_sigs ) , m ) ) return False cur_pubk = 0 for cur_sig in script_sigs : sig64 = base64 . b64encode ( binascii . unhexlify ( cur_sig ) ) sig_passed = False while not sig_passed : if cur_pubk >= len ( pubk_hexes ) : log . warn ( "Failed to validate multi-signature, ran out of public keys to check" ) return False sig_passed = virtualchain . ecdsalib . verify_digest ( hash_hex , pubk_hexes [ cur_pubk ] , sig64 ) cur_pubk += 1 return True | verify that a p2sh address is signed by the given scriptsig |
36,641 | def is_subdomain_missing_zonefiles_record ( rec ) : if rec [ 'name' ] != SUBDOMAIN_TXT_RR_MISSING : return False txt_entry = rec [ 'txt' ] if isinstance ( txt_entry , list ) : return False missing = txt_entry . split ( ',' ) try : for m in missing : m = int ( m ) except ValueError : return False return True | Does a given parsed zone file TXT record encode a missing - zonefile vector? Return True if so Return False if not |
36,642 | def is_subdomain_record ( rec ) : txt_entry = rec [ 'txt' ] if not isinstance ( txt_entry , list ) : return False has_parts_entry = False has_pk_entry = False has_seqn_entry = False for entry in txt_entry : if entry . startswith ( SUBDOMAIN_ZF_PARTS + "=" ) : has_parts_entry = True if entry . startswith ( SUBDOMAIN_PUBKEY + "=" ) : has_pk_entry = True if entry . startswith ( SUBDOMAIN_N + "=" ) : has_seqn_entry = True return ( has_parts_entry and has_pk_entry and has_seqn_entry ) | Does a given parsed zone file TXT record ( |
36,643 | def get_subdomain_info ( fqn , db_path = None , atlasdb_path = None , zonefiles_dir = None , check_pending = False , include_did = False ) : opts = get_blockstack_opts ( ) if not is_subdomains_enabled ( opts ) : log . warn ( "Subdomain support is disabled" ) return None if db_path is None : db_path = opts [ 'subdomaindb_path' ] if zonefiles_dir is None : zonefiles_dir = opts [ 'zonefiles' ] if atlasdb_path is None : atlasdb_path = opts [ 'atlasdb_path' ] db = SubdomainDB ( db_path , zonefiles_dir ) try : subrec = db . get_subdomain_entry ( fqn ) except SubdomainNotFound : log . warn ( "No such subdomain: {}" . format ( fqn ) ) return None if check_pending : subrec . pending = db . subdomain_check_pending ( subrec , atlasdb_path ) if include_did : subrec . did_info = db . get_subdomain_DID_info ( fqn ) return subrec | Static method for getting the state of a subdomain given its fully - qualified name . Return the subdomain record on success . Return None if not found . |
36,644 | def get_subdomain_resolver ( name , db_path = None , zonefiles_dir = None ) : opts = get_blockstack_opts ( ) if not is_subdomains_enabled ( opts ) : log . warn ( "Subdomain support is disabled" ) return None if db_path is None : db_path = opts [ 'subdomaindb_path' ] if zonefiles_dir is None : zonefiles_dir = opts [ 'zonefiles' ] db = SubdomainDB ( db_path , zonefiles_dir ) resolver_url = db . get_domain_resolver ( name ) return resolver_url | Static method for determining the last - known resolver for a domain name . Returns the resolver URL on success Returns None on error |
36,645 | def get_subdomains_count ( db_path = None , zonefiles_dir = None ) : opts = get_blockstack_opts ( ) if not is_subdomains_enabled ( opts ) : log . warn ( "Subdomain support is disabled" ) return None if db_path is None : db_path = opts [ 'subdomaindb_path' ] if zonefiles_dir is None : zonefiles_dir = opts [ 'zonefiles' ] db = SubdomainDB ( db_path , zonefiles_dir ) return db . get_subdomains_count ( ) | Static method for getting count of all subdomains Return number of subdomains on success |
36,646 | def get_subdomain_DID_info ( fqn , db_path = None , zonefiles_dir = None ) : opts = get_blockstack_opts ( ) if not is_subdomains_enabled ( opts ) : log . warn ( "Subdomain support is disabled" ) return None if db_path is None : db_path = opts [ 'subdomaindb_path' ] if zonefiles_dir is None : zonefiles_dir = opts [ 'zonefiles' ] db = SubdomainDB ( db_path , zonefiles_dir ) try : subrec = db . get_subdomain_entry ( fqn ) except SubdomainNotFound : log . warn ( "No such subdomain: {}" . format ( fqn ) ) return None try : return db . get_subdomain_DID_info ( fqn ) except SubdomainNotFound : return None | Get a subdomain s DID info . Return None if not found |
36,647 | def get_DID_subdomain ( did , db_path = None , zonefiles_dir = None , atlasdb_path = None , check_pending = False ) : opts = get_blockstack_opts ( ) if not is_subdomains_enabled ( opts ) : log . warn ( "Subdomain support is disabled" ) return None if db_path is None : db_path = opts [ 'subdomaindb_path' ] if zonefiles_dir is None : zonefiles_dir = opts [ 'zonefiles' ] if atlasdb_path is None : atlasdb_path = opts [ 'atlasdb_path' ] db = SubdomainDB ( db_path , zonefiles_dir ) try : subrec = db . get_DID_subdomain ( did ) except Exception as e : if BLOCKSTACK_DEBUG : log . exception ( e ) log . warn ( "Failed to load subdomain for {}" . format ( did ) ) return None if check_pending : subrec . pending = db . subdomain_check_pending ( subrec , atlasdb_path ) return subrec | Static method for resolving a DID to a subdomain Return the subdomain record on success Return None on error |
36,648 | def is_subdomain_zonefile_hash ( fqn , zonefile_hash , db_path = None , zonefiles_dir = None ) : opts = get_blockstack_opts ( ) if not is_subdomains_enabled ( opts ) : return [ ] if db_path is None : db_path = opts [ 'subdomaindb_path' ] if zonefiles_dir is None : zonefiles_dir = opts [ 'zonefiles' ] db = SubdomainDB ( db_path , zonefiles_dir ) zonefile_hashes = db . is_subdomain_zonefile_hash ( fqn , zonefile_hash ) return zonefile_hashes | Static method for getting all historic zone file hashes for a subdomain |
36,649 | def get_subdomain_history ( fqn , offset = None , count = None , reverse = False , db_path = None , zonefiles_dir = None , json = False ) : opts = get_blockstack_opts ( ) if not is_subdomains_enabled ( opts ) : return [ ] if db_path is None : db_path = opts [ 'subdomaindb_path' ] if zonefiles_dir is None : zonefiles_dir = opts [ 'zonefiles' ] db = SubdomainDB ( db_path , zonefiles_dir ) recs = db . get_subdomain_history ( fqn , offset = offset , count = count ) if json : recs = [ rec . to_json ( ) for rec in recs ] ret = { } for rec in recs : if rec [ 'block_number' ] not in ret : ret [ rec [ 'block_number' ] ] = [ ] ret [ rec [ 'block_number' ] ] . append ( rec ) if reverse : for block_height in ret : ret [ block_height ] . sort ( lambda r1 , r2 : - 1 if r1 [ 'parent_zonefile_index' ] > r2 [ 'parent_zonefile_index' ] or ( r1 [ 'parent_zonefile_index' ] == r2 [ 'parent_zonefile_index' ] and r1 [ 'zonefile_offset' ] > r2 [ 'zonefile_offset' ] ) else 1 if r1 [ 'parent_zonefile_index' ] < r2 [ 'parent_zonefile_index' ] or ( r1 [ 'parent_zonefile_index' ] == r2 [ 'parent_zonefile_index' ] and r1 [ 'zonefile_offset' ] < r2 [ 'zonefile_offset' ] ) else 0 ) return ret else : return recs | Static method for getting all historic operations on a subdomain |
36,650 | def get_all_subdomains ( offset = None , count = None , min_sequence = None , db_path = None , zonefiles_dir = None ) : opts = get_blockstack_opts ( ) if not is_subdomains_enabled ( opts ) : return [ ] if db_path is None : db_path = opts [ 'subdomaindb_path' ] if zonefiles_dir is None : zonefiles_dir = opts [ 'zonefiles' ] db = SubdomainDB ( db_path , zonefiles_dir ) return db . get_all_subdomains ( offset = offset , count = count , min_sequence = None ) | Static method for getting the list of all subdomains |
36,651 | def get_subdomain_ops_at_txid ( txid , db_path = None , zonefiles_dir = None ) : opts = get_blockstack_opts ( ) if not is_subdomains_enabled ( opts ) : return [ ] if db_path is None : db_path = opts [ 'subdomaindb_path' ] if zonefiles_dir is None : zonefiles_dir = opts [ 'zonefiles' ] db = SubdomainDB ( db_path , zonefiles_dir ) return db . get_subdomain_ops_at_txid ( txid ) | Static method for getting the list of subdomain operations accepted at a given txid . Includes unaccepted subdomain operations |
36,652 | def get_subdomains_owned_by_address ( address , db_path = None , zonefiles_dir = None ) : opts = get_blockstack_opts ( ) if not is_subdomains_enabled ( opts ) : return [ ] if db_path is None : db_path = opts [ 'subdomaindb_path' ] if zonefiles_dir is None : zonefiles_dir = opts [ 'zonefiles' ] db = SubdomainDB ( db_path , zonefiles_dir ) return db . get_subdomains_owned_by_address ( address ) | Static method for getting the list of subdomains for a given address |
36,653 | def get_subdomain_last_sequence ( db_path = None , zonefiles_dir = None ) : opts = get_blockstack_opts ( ) if not is_subdomains_enabled ( opts ) : return [ ] if db_path is None : db_path = opts [ 'subdomaindb_path' ] if zonefiles_dir is None : zonefiles_dir = opts [ 'zonefiles' ] db = SubdomainDB ( db_path , zonefiles_dir ) return db . get_last_sequence ( ) | Static method for getting the last sequence number in the database |
36,654 | def sign ( privkey_bundle , plaintext ) : if virtualchain . is_singlesig ( privkey_bundle ) : return sign_singlesig ( privkey_bundle , plaintext ) elif virtualchain . is_multisig ( privkey_bundle ) : return sign_multisig ( privkey_bundle , plaintext ) else : raise ValueError ( "private key bundle is neither a singlesig nor multisig bundle" ) | Sign a subdomain plaintext with a private key bundle Returns the base64 - encoded scriptsig |
36,655 | def subdomains_init ( blockstack_opts , working_dir , atlas_state ) : if not is_subdomains_enabled ( blockstack_opts ) : return None subdomain_state = SubdomainIndex ( blockstack_opts [ 'subdomaindb_path' ] , blockstack_opts = blockstack_opts ) atlas_node_add_callback ( atlas_state , 'store_zonefile' , subdomain_state . enqueue_zonefile ) return subdomain_state | Set up subdomain state Returns a SubdomainIndex object that has been successfully connected to Atlas |
36,656 | def verify_signature ( self , addr ) : return verify ( virtualchain . address_reencode ( addr ) , self . get_plaintext_to_sign ( ) , self . sig ) | Given an address verify whether or not it was signed by it |
36,657 | def serialize_to_txt ( self ) : txtrec = { 'name' : self . fqn if self . independent else self . subdomain , 'txt' : self . pack_subdomain ( ) [ 1 : ] } return blockstack_zones . record_processors . process_txt ( [ txtrec ] , '{txt}' ) . strip ( ) | Serialize this subdomain record to a TXT record . The trailing newline will be omitted |
36,658 | def parse_subdomain_missing_zonefiles_record ( cls , rec ) : txt_entry = rec [ 'txt' ] if isinstance ( txt_entry , list ) : raise ParseError ( "TXT entry too long for a missing zone file list" ) try : return [ int ( i ) for i in txt_entry . split ( ',' ) ] if txt_entry is not None and len ( txt_entry ) > 0 else [ ] except ValueError : raise ParseError ( 'Invalid integers' ) | Parse a missing - zonefiles vector given by the domain . Returns the list of zone file indexes on success Raises ParseError on unparseable records |
36,659 | def get_public_key ( self ) : res = self . get_public_key_info ( ) if 'error' in res : raise ValueError ( res [ 'error' ] ) if res [ 'type' ] != 'singlesig' : raise ValueError ( res [ 'error' ] ) return res [ 'public_keys' ] [ 0 ] | Parse the scriptSig and extract the public key . Raises ValueError if this is a multisig - controlled subdomain . |
36,660 | def close ( self ) : with self . subdomain_db_lock : self . subdomain_db . close ( ) self . subdomain_db = None self . subdomain_db_path = None | Close the index |
36,661 | def make_new_subdomain_history ( self , cursor , subdomain_rec ) : hist = self . subdomain_db . get_subdomain_history ( subdomain_rec . get_fqn ( ) , include_unaccepted = True , end_sequence = subdomain_rec . n + 1 , end_zonefile_index = subdomain_rec . parent_zonefile_index + 1 , cur = cursor ) assert len ( hist ) > 0 , 'BUG: not yet stored: {}' . format ( subdomain_rec ) for i in range ( 0 , len ( hist ) ) : hist [ i ] . accepted = False hist . sort ( lambda h1 , h2 : - 1 if h1 . n < h2 . n or ( h1 . n == h2 . n and h1 . parent_zonefile_index < h2 . parent_zonefile_index ) else 0 if h1 . n == h2 . n and h1 . parent_zonefile_index == h2 . parent_zonefile_index else 1 ) if not self . check_initial_subdomain ( hist [ 0 ] ) : log . debug ( "Reject initial {}" . format ( hist [ 0 ] ) ) return hist else : log . debug ( "Accept initial {}" . format ( hist [ 0 ] ) ) pass hist [ 0 ] . accepted = True last_accepted = 0 for i in xrange ( 1 , len ( hist ) ) : if self . check_subdomain_transition ( hist [ last_accepted ] , hist [ i ] ) : log . debug ( "Accept historic update {}" . format ( hist [ i ] ) ) hist [ i ] . accepted = True last_accepted = i else : log . debug ( "Reject historic update {}" . format ( hist [ i ] ) ) hist [ i ] . accepted = False return hist | Recalculate the history for this subdomain from genesis up until this record . Returns the list of subdomain records we need to save . |
36,662 | def make_new_subdomain_future ( self , cursor , subdomain_rec ) : assert subdomain_rec . accepted , 'BUG: given subdomain record must already be accepted' fut = self . subdomain_db . get_subdomain_history ( subdomain_rec . get_fqn ( ) , include_unaccepted = True , start_sequence = subdomain_rec . n , start_zonefile_index = subdomain_rec . parent_zonefile_index , cur = cursor ) for i in range ( 0 , len ( fut ) ) : if fut [ i ] . n == subdomain_rec . n and fut [ i ] . parent_zonefile_index == subdomain_rec . parent_zonefile_index : fut . pop ( i ) break if len ( fut ) == 0 : log . debug ( "At tip: {}" . format ( subdomain_rec ) ) return [ ] for i in range ( 0 , len ( fut ) ) : fut [ i ] . accepted = False fut = [ subdomain_rec ] + fut fut . sort ( lambda h1 , h2 : - 1 if h1 . n < h2 . n or ( h1 . n == h2 . n and h1 . parent_zonefile_index < h2 . parent_zonefile_index ) else 0 if h1 . n == h2 . n and h1 . parent_zonefile_index == h2 . parent_zonefile_index else 1 ) assert fut [ 0 ] . accepted , 'BUG: initial subdomain record is not accepted: {}' . format ( fut [ 0 ] ) last_accepted = 0 for i in range ( 1 , len ( fut ) ) : if self . check_subdomain_transition ( fut [ last_accepted ] , fut [ i ] ) : log . debug ( "Accept future update {}" . format ( fut [ i ] ) ) fut [ i ] . accepted = True last_accepted = i else : log . debug ( "Reject future update {}" . format ( fut [ i ] ) ) fut [ i ] . accepted = False return fut | Recalculate the future for this subdomain from the current record until the latest known record . Returns the list of subdomain records we need to save . |
36,663 | def subdomain_try_insert ( self , cursor , subdomain_rec , history_neighbors ) : blockchain_order = history_neighbors [ 'prev' ] + history_neighbors [ 'cur' ] + history_neighbors [ 'fut' ] last_accepted = - 1 for i in range ( 0 , len ( blockchain_order ) ) : if blockchain_order [ i ] . accepted : last_accepted = i break if blockchain_order [ i ] . n > subdomain_rec . n or ( blockchain_order [ i ] . n == subdomain_rec . n and blockchain_order [ i ] . parent_zonefile_index > subdomain_rec . parent_zonefile_index ) : log . debug ( "No immediate ancestors are accepted on {}" . format ( subdomain_rec ) ) return False if last_accepted < 0 : log . debug ( "No immediate ancestors or successors are accepted on {}" . format ( subdomain_rec ) ) return False chain_tip_status = blockchain_order [ - 1 ] . accepted dirty = [ ] for i in range ( last_accepted + 1 , len ( blockchain_order ) ) : cur_accepted = blockchain_order [ i ] . accepted new_accepted = self . check_subdomain_transition ( blockchain_order [ last_accepted ] , blockchain_order [ i ] ) if new_accepted != cur_accepted : blockchain_order [ i ] . accepted = new_accepted log . debug ( "Changed from {} to {}: {}" . format ( cur_accepted , new_accepted , blockchain_order [ i ] ) ) dirty . append ( blockchain_order [ i ] ) if new_accepted : last_accepted = i if chain_tip_status != blockchain_order [ - 1 ] . accepted and len ( history_neighbors [ 'fut' ] ) > 0 : log . debug ( "Immediate history chain tip altered from {} to {}: {}" . format ( chain_tip_status , blockchain_order [ - 1 ] . accepted , blockchain_order [ - 1 ] ) ) return False for subrec in dirty : log . debug ( "Update to accepted={}: {}" . format ( subrec . accepted , subrec ) ) self . subdomain_db . update_subdomain_entry ( subrec , cur = cursor ) return True | Try to insert a subdomain record into its history neighbors . This is an optimization that handles the usual case . |
36,664 | def enqueue_zonefile ( self , zonefile_hash , block_height ) : with self . serialized_enqueue_zonefile : log . debug ( "Append {} from {}" . format ( zonefile_hash , block_height ) ) queuedb_append ( self . subdomain_queue_path , "zonefiles" , zonefile_hash , json . dumps ( { 'zonefile_hash' : zonefile_hash , 'block_height' : block_height } ) ) | Called when we discover a zone file . Queues up a request to reprocess this name s zone files subdomains . zonefile_hash is the hash of the zonefile . block_height is the minimium block height at which this zone file occurs . |
36,665 | def index_blockchain ( self , block_start , block_end ) : log . debug ( "Processing subdomain updates for zonefiles in blocks {}-{}" . format ( block_start , block_end ) ) res = self . find_zonefile_subdomains ( block_start , block_end ) zonefile_subdomain_info = res [ 'zonefile_info' ] self . process_subdomains ( zonefile_subdomain_info ) | Go through the sequence of zone files discovered in a block range and reindex the names subdomains . |
36,666 | def index_discovered_zonefiles ( self , lastblock ) : all_queued_zfinfos = [ ] subdomain_zonefile_infos = { } name_blocks = { } offset = 0 while True : queued_zfinfos = queuedb_findall ( self . subdomain_queue_path , "zonefiles" , limit = 100 , offset = offset ) if len ( queued_zfinfos ) == 0 : break offset += 100 all_queued_zfinfos += queued_zfinfos if len ( all_queued_zfinfos ) >= 1000 : break log . debug ( "Discovered {} zonefiles" . format ( len ( all_queued_zfinfos ) ) ) for queued_zfinfo in all_queued_zfinfos : zfinfo = json . loads ( queued_zfinfo [ 'data' ] ) zonefile_hash = zfinfo [ 'zonefile_hash' ] block_height = zfinfo [ 'block_height' ] zfinfos = atlasdb_get_zonefiles_by_hash ( zonefile_hash , block_height = block_height , path = self . atlasdb_path ) if zfinfos is None : log . warn ( "Absent zonefile {}" . format ( zonefile_hash ) ) continue for zfi in zfinfos : if zfi [ 'name' ] not in name_blocks : name_blocks [ zfi [ 'name' ] ] = block_height else : name_blocks [ zfi [ 'name' ] ] = min ( block_height , name_blocks [ zfi [ 'name' ] ] ) for name in name_blocks : if name_blocks [ name ] >= lastblock : continue log . debug ( "Finding subdomain updates for {} at block {}" . format ( name , name_blocks [ name ] ) ) res = self . find_zonefile_subdomains ( name_blocks [ name ] , lastblock , name = name ) zonefile_subdomain_info = res [ 'zonefile_info' ] subdomain_index = res [ 'subdomains' ] for fqn in subdomain_index : if fqn not in subdomain_zonefile_infos : subdomain_zonefile_infos [ fqn ] = [ ] for i in subdomain_index [ fqn ] : subdomain_zonefile_infos [ fqn ] . append ( zonefile_subdomain_info [ i ] ) processed = [ ] for fqn in subdomain_zonefile_infos : subseq = filter ( lambda szi : szi [ 'zonefile_hash' ] not in processed , subdomain_zonefile_infos [ fqn ] ) if len ( subseq ) == 0 : continue log . debug ( "Processing {} zone file entries found for {} and others" . format ( len ( subseq ) , fqn ) ) subseq . sort ( cmp = lambda z1 , z2 : - 1 if z1 [ 'block_height' ] < z2 [ 'block_height' ] else 0 if z1 [ 'block_height' ] == z2 [ 'block_height' ] else 1 ) self . process_subdomains ( subseq ) processed += [ szi [ 'zonefile_hash' ] for szi in subseq ] queuedb_removeall ( self . subdomain_queue_path , all_queued_zfinfos ) return True | Go through the list of zone files we discovered via Atlas grouped by name and ordered by block height . Find all subsequent zone files for this name and process all subdomain operations contained within them . |
36,667 | def subdomain_row_factory ( cls , cursor , row ) : d = { } for idx , col in enumerate ( cursor . description ) : d [ col [ 0 ] ] = row [ idx ] return d | Dict row factory for subdomains |
36,668 | def _extract_subdomain ( self , rowdata ) : name = str ( rowdata [ 'fully_qualified_subdomain' ] ) domain = str ( rowdata [ 'domain' ] ) n = str ( rowdata [ 'sequence' ] ) encoded_pubkey = str ( rowdata [ 'owner' ] ) zonefile_hash = str ( rowdata [ 'zonefile_hash' ] ) sig = rowdata [ 'signature' ] block_height = int ( rowdata [ 'block_height' ] ) parent_zonefile_hash = str ( rowdata [ 'parent_zonefile_hash' ] ) parent_zonefile_index = int ( rowdata [ 'parent_zonefile_index' ] ) zonefile_offset = int ( rowdata [ 'zonefile_offset' ] ) txid = str ( rowdata [ 'txid' ] ) missing = [ int ( i ) for i in rowdata [ 'missing' ] . split ( ',' ) ] if rowdata [ 'missing' ] is not None and len ( rowdata [ 'missing' ] ) > 0 else [ ] accepted = int ( rowdata [ 'accepted' ] ) resolver = str ( rowdata [ 'resolver' ] ) if rowdata [ 'resolver' ] is not None else None if accepted == 0 : accepted = False else : accepted = True if sig == '' or sig is None : sig = None else : sig = str ( sig ) name = str ( name ) is_subdomain , _ , _ = is_address_subdomain ( name ) if not is_subdomain : raise Exception ( "Subdomain DB lookup returned bad subdomain result {}" . format ( name ) ) zonefile_str = get_atlas_zonefile_data ( zonefile_hash , self . zonefiles_dir ) if zonefile_str is None : log . error ( "No zone file for {}" . format ( name ) ) raise SubdomainNotFound ( '{}: missing zone file {}' . format ( name , zonefile_hash ) ) return Subdomain ( str ( name ) , str ( domain ) , str ( encoded_pubkey ) , int ( n ) , str ( zonefile_str ) , sig , block_height , parent_zonefile_hash , parent_zonefile_index , zonefile_offset , txid , domain_zonefiles_missing = missing , accepted = accepted , resolver = resolver ) | Extract a single subdomain from a DB cursor Raise SubdomainNotFound if there are no valid rows |
36,669 | def get_subdomains_count ( self , accepted = True , cur = None ) : if accepted : accepted_filter = 'WHERE accepted=1' else : accepted_filter = '' get_cmd = "SELECT COUNT(DISTINCT fully_qualified_subdomain) as count FROM {} {};" . format ( self . subdomain_table , accepted_filter ) cursor = cur if cursor is None : cursor = self . conn . cursor ( ) db_query_execute ( cursor , get_cmd , ( ) ) try : rowdata = cursor . fetchone ( ) return rowdata [ 'count' ] except Exception as e : if BLOCKSTACK_DEBUG : log . exception ( e ) return 0 | Fetch subdomain names |
36,670 | def get_all_subdomains ( self , offset = None , count = None , min_sequence = None , cur = None ) : get_cmd = 'SELECT DISTINCT fully_qualified_subdomain FROM {}' . format ( self . subdomain_table ) args = ( ) if min_sequence is not None : get_cmd += ' WHERE sequence >= ?' args += ( min_sequence , ) if count is not None : get_cmd += ' LIMIT ?' args += ( count , ) if offset is not None : get_cmd += ' OFFSET ?' args += ( offset , ) get_cmd += ';' cursor = None if cur is None : cursor = self . conn . cursor ( ) else : cursor = cur rows = db_query_execute ( cursor , get_cmd , args ) subdomains = [ ] for row in rows : subdomains . append ( row [ 'fully_qualified_subdomain' ] ) return subdomains | Get and all subdomain names optionally over a range |
36,671 | def get_subdomain_ops_at_txid ( self , txid , cur = None ) : get_cmd = 'SELECT * FROM {} WHERE txid = ? ORDER BY zonefile_offset' . format ( self . subdomain_table ) cursor = None if cur is None : cursor = self . conn . cursor ( ) else : cursor = cur db_query_execute ( cursor , get_cmd , ( txid , ) ) try : return [ x for x in cursor . fetchall ( ) ] except Exception as e : if BLOCKSTACK_DEBUG : log . exception ( e ) return [ ] | Given a txid get all subdomain operations at that txid . Include unaccepted operations . Order by zone file index |
36,672 | def get_subdomains_owned_by_address ( self , owner , cur = None ) : get_cmd = "SELECT fully_qualified_subdomain, MAX(sequence) FROM {} WHERE owner = ? AND accepted=1 GROUP BY fully_qualified_subdomain" . format ( self . subdomain_table ) cursor = None if cur is None : cursor = self . conn . cursor ( ) else : cursor = cur db_query_execute ( cursor , get_cmd , ( owner , ) ) try : return [ x [ 'fully_qualified_subdomain' ] for x in cursor . fetchall ( ) ] except Exception as e : if BLOCKSTACK_DEBUG : log . exception ( e ) return [ ] | Get the list of subdomain names that are owned by a given address . |
36,673 | def get_domain_resolver ( self , domain_name , cur = None ) : get_cmd = "SELECT resolver FROM {} WHERE domain=? AND resolver != '' AND accepted=1 ORDER BY sequence DESC, parent_zonefile_index DESC LIMIT 1;" . format ( self . subdomain_table ) cursor = None if cur is None : cursor = self . conn . cursor ( ) else : cursor = cur db_query_execute ( cursor , get_cmd , ( domain_name , ) ) rowdata = cursor . fetchone ( ) if not rowdata : return None return rowdata [ 'resolver' ] | Get the last - knwon resolver entry for a domain name Returns None if not found . |
36,674 | def get_subdomain_DID_info ( self , fqn , cur = None ) : subrec = self . get_subdomain_entry_at_sequence ( fqn , 0 , cur = cur ) cmd = 'SELECT zonefile_offset FROM {} WHERE fully_qualified_subdomain = ? AND owner = ? AND sequence=0 AND parent_zonefile_index <= ? AND accepted=1 ORDER BY parent_zonefile_index, zonefile_offset LIMIT 1;' . format ( self . subdomain_table ) args = ( fqn , subrec . address , subrec . parent_zonefile_index ) cursor = None if cur is None : cursor = self . conn . cursor ( ) else : cursor = cur rows = db_query_execute ( cursor , cmd , args ) zonefile_offset = None for r in rows : zonefile_offset = r [ 'zonefile_offset' ] break if zonefile_offset is None : raise SubdomainNotFound ( 'No rows for {}' . format ( fqn ) ) cmd = 'SELECT COUNT(*) FROM {} WHERE owner = ? AND sequence=0 AND (parent_zonefile_index < ? OR parent_zonefile_index = ? AND zonefile_offset < ?) AND accepted=1 ORDER BY parent_zonefile_index, zonefile_offset LIMIT 1;' . format ( self . subdomain_table ) args = ( subrec . address , subrec . parent_zonefile_index , subrec . parent_zonefile_index , zonefile_offset ) rows = db_query_execute ( cursor , cmd , args ) count = None for r in rows : count = r [ 'COUNT(*)' ] break if count is None : raise SubdomainNotFound ( 'No rows for {}' . format ( fqn ) ) return { 'name_type' : 'subdomain' , 'address' : subrec . address , 'index' : count } | Get the DID information for a subdomain . Raise SubdomainNotFound if there is no such subdomain |
36,675 | def get_DID_subdomain ( self , did , cur = None ) : did = str ( did ) try : did_info = parse_DID ( did ) assert did_info [ 'name_type' ] == 'subdomain' , 'Not a subdomain DID' except : raise ValueError ( "Invalid DID: {}" . format ( did ) ) original_address = did_info [ 'address' ] name_index = did_info [ 'index' ] cmd = 'SELECT fully_qualified_subdomain FROM {} WHERE owner = ? AND sequence = ? ORDER BY parent_zonefile_index, zonefile_offset LIMIT 1 OFFSET ?;' . format ( self . subdomain_table ) args = ( original_address , 0 , name_index ) cursor = None if cur is None : cursor = self . conn . cursor ( ) else : cursor = cur subdomain_name = None rows = db_query_execute ( cursor , cmd , args ) for r in rows : subdomain_name = r [ 'fully_qualified_subdomain' ] break if not subdomain_name : raise SubdomainNotFound ( 'Does not correspond to a subdomain: {}' . format ( did ) ) subrec = self . get_subdomain_entry ( subdomain_name , cur = cur ) subrec . did_info = did_info return subrec | Get a subdomain given its DID Raise ValueError if the DID is invalid Raise SubdomainNotFound if the DID does not correspond to a subdomain |
36,676 | def is_subdomain_zonefile_hash ( self , fqn , zonefile_hash , cur = None ) : sql = 'SELECT COUNT(zonefile_hash) FROM {} WHERE fully_qualified_subdomain = ? and zonefile_hash = ?;' . format ( self . subdomain_table ) args = ( fqn , zonefile_hash ) cursor = None if cur is None : cursor = self . conn . cursor ( ) else : cursor = cur rows = db_query_execute ( cursor , sql , args ) count = None for row in rows : count = row [ 'COUNT(zonefile_hash)' ] break return ( count > 0 ) | Does this zone file hash belong to this subdomain? |
36,677 | def update_subdomain_entry ( self , subdomain_obj , cur = None ) : assert isinstance ( subdomain_obj , Subdomain ) zonefile_hash = get_zonefile_data_hash ( subdomain_obj . zonefile_str ) rc = store_atlas_zonefile_data ( subdomain_obj . zonefile_str , self . zonefiles_dir , fsync = False ) if not rc : raise Exception ( "Failed to store zone file {} from {}" . format ( zonefile_hash , subdomain_obj . get_fqn ( ) ) ) write_cmd = 'INSERT OR REPLACE INTO {} VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)' . format ( self . subdomain_table ) args = ( subdomain_obj . get_fqn ( ) , subdomain_obj . domain , subdomain_obj . n , subdomain_obj . address , zonefile_hash , subdomain_obj . sig , subdomain_obj . block_height , subdomain_obj . parent_zonefile_hash , subdomain_obj . parent_zonefile_index , subdomain_obj . zonefile_offset , subdomain_obj . txid , ',' . join ( str ( i ) for i in subdomain_obj . domain_zonefiles_missing ) , 1 if subdomain_obj . accepted else 0 , subdomain_obj . resolver ) cursor = None if cur is None : cursor = self . conn . cursor ( ) else : cursor = cur db_query_execute ( cursor , write_cmd , args ) num_rows_written = cursor . rowcount if cur is None : self . conn . commit ( ) if num_rows_written != 1 : raise ValueError ( "No row written: fqn={} seq={}" . format ( subdomain_obj . get_fqn ( ) , subdomain_obj . n ) ) return True | Update the subdomain history table for this subdomain entry . Creates it if it doesn t exist . |
36,678 | def get_last_block ( self , cur = None ) : sql = 'SELECT MAX(block_height) FROM {};' . format ( self . subdomain_table ) cursor = None if cur is None : cursor = self . conn . cursor ( ) else : cursor = cur rows = db_query_execute ( cursor , sql , ( ) ) height = 0 try : rowdata = rows . fetchone ( ) height = rowdata [ 'MAX(block_height)' ] except : height = 0 return height | Get the highest block last processed |
36,679 | def get_last_sequence ( self , cur = None ) : sql = 'SELECT sequence FROM {} ORDER BY sequence DESC LIMIT 1;' . format ( self . subdomain_table ) cursor = None if cur is None : cursor = self . conn . cursor ( ) else : cursor = cur db_query_execute ( cursor , sql , ( ) ) last_seq = None try : last_seq = cursor . fetchone ( ) [ 0 ] except : last_seq = 0 return int ( last_seq ) | Get the highest sequence number in this db |
36,680 | def _drop_tables ( self ) : drop_cmd = "DROP TABLE IF EXISTS {};" for table in [ self . subdomain_table , self . blocked_table ] : cursor = self . conn . cursor ( ) db_query_execute ( cursor , drop_cmd . format ( table ) , ( ) ) | Clear the subdomain db s tables |
36,681 | def hash_name ( name , script_pubkey , register_addr = None ) : bin_name = b40_to_bin ( name ) name_and_pubkey = bin_name + unhexlify ( script_pubkey ) if register_addr is not None : name_and_pubkey += str ( register_addr ) return hex_hash160 ( name_and_pubkey ) | Generate the hash over a name and hex - string script pubkey |
36,682 | def fetch_profile_data_from_file ( ) : with open ( SEARCH_PROFILE_DATA_FILE , 'r' ) as fin : profiles = json . load ( fin ) counter = 0 log . debug ( "-" * 5 ) log . debug ( "Fetching profile data from file" ) for entry in profiles : new_entry = { } new_entry [ 'key' ] = entry [ 'fqu' ] new_entry [ 'value' ] = entry [ 'profile' ] try : clean_profile_entries ( entry [ 'profile' ] ) profile_data . save ( new_entry ) except Exception as e : log . exception ( e ) log . error ( "Exception on entry {}" . format ( new_entry ) ) counter += 1 if counter % 1000 == 0 : log . debug ( "Processed entries: %s" % counter ) profile_data . ensure_index ( 'key' ) return | takes profile data from file and saves in the profile_data DB |
36,683 | def create_search_index ( ) : counter = 0 people_names = [ ] twitter_handles = [ ] usernames = [ ] log . debug ( "-" * 5 ) log . debug ( "Creating search index" ) for user in namespace . find ( ) : search_profile = { } counter += 1 if ( counter % 1000 == 0 ) : log . debug ( "Processed entries: %s" % counter ) if validUsername ( user [ 'username' ] ) : pass else : continue profile = get_json ( user [ 'profile' ] ) hasBazaarId = False if 'account' in profile : for accounts in profile [ 'account' ] : if accounts [ 'service' ] == 'openbazaar' : hasBazaarId = True search_profile [ 'openbazaar' ] = accounts [ 'identifier' ] if ( hasBazaarId == False ) : search_profile [ 'openbazaar' ] = None if 'name' in profile : try : name = profile [ 'name' ] except : continue try : name = name [ 'formatted' ] . lower ( ) except : name = name . lower ( ) people_names . append ( name ) search_profile [ 'name' ] = name else : search_profile [ 'name' ] = None if 'twitter' in profile : twitter_handle = profile [ 'twitter' ] try : twitter_handle = twitter_handle [ 'username' ] . lower ( ) except : try : twitter_handle = profile [ 'twitter' ] . lower ( ) except : continue twitter_handles . append ( twitter_handle ) search_profile [ 'twitter_handle' ] = twitter_handle else : search_profile [ 'twitter_handle' ] = None search_profile [ 'fullyQualifiedName' ] = user [ 'fqu' ] search_profile [ 'username' ] = user [ 'username' ] usernames . append ( user [ 'fqu' ] ) search_profile [ 'profile' ] = profile search_profiles . save ( search_profile ) people_names = list ( set ( people_names ) ) people_names = { 'name' : people_names } twitter_handles = list ( set ( twitter_handles ) ) twitter_handles = { 'twitter_handle' : twitter_handles } usernames = list ( set ( usernames ) ) usernames = { 'username' : usernames } people_cache . save ( people_names ) twitter_cache . save ( twitter_handles ) username_cache . save ( usernames ) optimize_db ( ) log . debug ( 'Created name/twitter/username search index' ) | takes people names from blockchain and writes deduped names in a cache |
36,684 | def op_extract ( op_name , data , senders , inputs , outputs , block_id , vtxindex , txid ) : global EXTRACT_METHODS if op_name not in EXTRACT_METHODS . keys ( ) : raise Exception ( "No such operation '%s'" % op_name ) method = EXTRACT_METHODS [ op_name ] op_data = method ( data , senders , inputs , outputs , block_id , vtxindex , txid ) return op_data | Extract an operation from transaction data . Return the extracted fields as a dict . |
36,685 | def op_canonicalize ( op_name , parsed_op ) : global CANONICALIZE_METHODS if op_name not in CANONICALIZE_METHODS : return parsed_op else : return CANONICALIZE_METHODS [ op_name ] ( parsed_op ) | Get the canonical representation of a parsed operation s data . Meant for backwards - compatibility |
36,686 | def op_decanonicalize ( op_name , canonical_op ) : global DECANONICALIZE_METHODS if op_name not in DECANONICALIZE_METHODS : return canonical_op else : return DECANONICALIZE_METHODS [ op_name ] ( canonical_op ) | Get the current representation of a parsed operation s data given the canonical representation Meant for backwards - compatibility |
36,687 | def op_check ( state_engine , nameop , block_id , checked_ops ) : global CHECK_METHODS , MUTATE_FIELDS nameop_clone = copy . deepcopy ( nameop ) opcode = None if 'opcode' not in nameop_clone . keys ( ) : op = nameop_clone . get ( 'op' , None ) try : assert op is not None , "BUG: no op defined" opcode = op_get_opcode_name ( op ) assert opcode is not None , "BUG: op '%s' undefined" % op except Exception , e : log . exception ( e ) log . error ( "FATAL: BUG: no 'op' defined" ) sys . exit ( 1 ) else : opcode = nameop_clone [ 'opcode' ] check_method = CHECK_METHODS . get ( opcode , None ) try : assert check_method is not None , "BUG: no check-method for '%s'" % opcode except Exception , e : log . exception ( e ) log . error ( "FATAL: BUG: no check-method for '%s'" % opcode ) sys . exit ( 1 ) rc = check_method ( state_engine , nameop_clone , block_id , checked_ops ) if not rc : return False nameop . clear ( ) nameop . update ( nameop_clone ) op_canonicalize ( nameop [ 'opcode' ] , nameop ) unstored_canonical_fields = UNSTORED_CANONICAL_FIELDS . get ( nameop [ 'opcode' ] ) assert unstored_canonical_fields is not None , "BUG: no UNSTORED_CANONICAL_FIELDS entry for {}" . format ( nameop [ 'opcode' ] ) for f in unstored_canonical_fields : if f in nameop : del nameop [ f ] return rc | Given the state engine the current block the list of pending operations processed so far and the current operation determine whether or not it should be accepted . |
36,688 | def op_get_mutate_fields ( op_name ) : global MUTATE_FIELDS if op_name not in MUTATE_FIELDS . keys ( ) : raise Exception ( "No such operation '%s'" % op_name ) fields = MUTATE_FIELDS [ op_name ] [ : ] return fields | Get the names of the fields that will change when this operation gets applied to a record . |
36,689 | def op_get_consensus_fields ( op_name ) : global SERIALIZE_FIELDS if op_name not in SERIALIZE_FIELDS . keys ( ) : raise Exception ( "No such operation '%s'" % op_name ) fields = SERIALIZE_FIELDS [ op_name ] [ : ] return fields | Get the set of consensus - generating fields for an operation . |
36,690 | def check ( state_engine , nameop , block_id , checked_ops ) : name_consensus_hash = nameop [ 'name_consensus_hash' ] sender = nameop [ 'sender' ] sender_names = state_engine . get_names_owned_by_sender ( sender ) if len ( sender_names ) > MAX_NAMES_PER_SENDER : log . warning ( "Sender '%s' has exceeded quota: only transfers or revokes are allowed" % ( sender ) ) return False name , consensus_hash = state_engine . get_name_from_name_consensus_hash ( name_consensus_hash , sender , block_id ) if name is None or consensus_hash is None : log . warning ( "Unable to resolve name consensus hash '%s' to a name owned by '%s'" % ( name_consensus_hash , sender ) ) return False namespace_id = get_namespace_from_name ( name ) name_rec = state_engine . get_name ( name ) if name_rec is None : log . warning ( "Name '%s' does not exist" % name ) return False if not state_engine . is_namespace_ready ( namespace_id ) : log . warning ( "Namespace '%s' is not ready" % ( namespace_id ) ) return False if state_engine . is_name_revoked ( name ) : log . warning ( "Name '%s' is revoked" % name ) return False if state_engine . is_name_expired ( name , state_engine . lastblock ) : log . warning ( "Name '%s' is expired" % name ) return False if state_engine . is_name_in_grace_period ( name , block_id ) : log . warning ( "Name '{}' is in the renewal grace period. It can only be renewed at this time." . format ( name ) ) return False if not state_engine . is_name_registered ( name ) : log . warning ( "Name '%s' is not registered" % name ) return False if not state_engine . is_name_owner ( name , sender ) : log . warning ( "Name '%s' is not owned by '%s'" % ( name , sender ) ) return False nameop [ 'name' ] = name nameop [ 'consensus_hash' ] = consensus_hash nameop [ 'sender_pubkey' ] = name_rec [ 'sender_pubkey' ] del nameop [ 'name_consensus_hash' ] return True | Verify the validity of an update to a name s associated data . Use the nameop s 128 - bit name hash to find the name itself . |
36,691 | def genesis_block_audit ( genesis_block_stages , key_bundle = GENESIS_BLOCK_SIGNING_KEYS ) : gpg2_path = find_gpg2 ( ) if gpg2_path is None : raise Exception ( 'You must install gpg2 to audit the genesis block, and it must be in your PATH' ) log . debug ( 'Loading {} signing key(s)...' . format ( len ( key_bundle ) ) ) res = load_signing_keys ( gpg2_path , [ key_bundle [ kid ] for kid in key_bundle ] ) if not res : raise Exception ( 'Failed to install signing keys' ) log . debug ( 'Verifying {} signing key(s)...' . format ( len ( key_bundle ) ) ) res = check_gpg2_keys ( gpg2_path , key_bundle . keys ( ) ) if not res : raise Exception ( 'Failed to verify installation of signing keys' ) d = tempfile . mkdtemp ( prefix = '.genesis-block-audit-' ) for stage_id , stage in enumerate ( genesis_block_stages ) : log . debug ( 'Verify stage {}' . format ( stage_id ) ) try : jsonschema . validate ( GENESIS_BLOCK_SCHEMA , stage ) except jsonschema . ValidationError : shutil . rmtree ( d ) log . error ( 'Invalid genesis block -- does not match schema' ) raise ValueError ( 'Invalid genesis block' ) for history_id , history_row in enumerate ( stage [ 'history' ] ) : with open ( os . path . join ( d , 'sig' ) , 'w' ) as f : f . write ( history_row [ 'signature' ] ) with open ( os . path . join ( d , 'hash' ) , 'w' ) as f : f . write ( history_row [ 'hash' ] ) p = subprocess . Popen ( [ gpg2_path , '--verify' , os . path . join ( d , 'sig' ) , os . path . join ( d , 'hash' ) ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) out , err = p . communicate ( ) if p . returncode != 0 : log . error ( 'Failed to verify stage {} history {}' . format ( stage_id , history_id ) ) shutil . rmtree ( d ) return False gb_rows_str = json . dumps ( stage [ 'rows' ] , sort_keys = True , separators = ( ',' , ':' ) ) + '\n' gb_rows_hash = hashlib . sha256 ( gb_rows_str ) . hexdigest ( ) if gb_rows_hash != stage [ 'history' ] [ - 1 ] [ 'hash' ] : log . error ( 'Genesis block stage {} hash mismatch: {} != {}' . format ( stage_id , gb_rows_hash , stage [ 'history' ] [ - 1 ] [ 'hash' ] ) ) shutil . rmtree ( d ) return False shutil . rmtree ( d ) log . info ( 'Genesis block is legitimate' ) return True | Verify the authenticity of the stages of the genesis block optionally with a given set of keys . Return True if valid Return False if not |
36,692 | def is_profile_in_legacy_format ( profile ) : if isinstance ( profile , dict ) : pass elif isinstance ( profile , ( str , unicode ) ) : try : profile = json . loads ( profile ) except ValueError : return False else : return False if "@type" in profile : return False if "@context" in profile : return False is_in_legacy_format = False if "avatar" in profile : is_in_legacy_format = True elif "cover" in profile : is_in_legacy_format = True elif "bio" in profile : is_in_legacy_format = True elif "twitter" in profile : is_in_legacy_format = True elif "facebook" in profile : is_in_legacy_format = True return is_in_legacy_format | Is a given profile JSON object in legacy format? |
36,693 | def format_profile ( profile , fqa , zone_file , address , public_key ) : if isinstance ( zone_file , ( str , unicode ) ) : try : zone_file = blockstack_zones . parse_zone_file ( zone_file ) except : pass data = { 'profile' : profile , 'zone_file' : zone_file , 'public_key' : public_key , 'owner_address' : address } if not fqa . endswith ( '.id' ) : data [ 'verifications' ] = [ "No verifications for non-id namespaces." ] return data profile_in_legacy_format = is_profile_in_legacy_format ( profile ) if not profile_in_legacy_format : data [ 'verifications' ] = fetch_proofs ( data [ 'profile' ] , fqa , address , profile_ver = 3 , zonefile = zone_file ) else : if type ( profile ) is not dict : data [ 'profile' ] = json . loads ( profile ) data [ 'verifications' ] = fetch_proofs ( data [ 'profile' ] , fqa , address ) return data | Process profile data and 1 ) Insert verifications 2 ) Check if profile data is valid JSON |
36,694 | def get_users ( username ) : reply = { } log . debug ( 'Begin /v[x]/users/' + username ) if username is None : reply [ 'error' ] = "No username given" return jsonify ( reply ) , 404 if ',' in username : reply [ 'error' ] = 'Multiple username queries are no longer supported.' return jsonify ( reply ) , 401 if "." not in username : fqa = "{}.{}" . format ( username , 'id' ) else : fqa = username profile = get_profile ( fqa ) reply [ username ] = profile if 'error' in profile : status_code = 200 if 'status_code' in profile : status_code = profile [ 'status_code' ] del profile [ 'status_code' ] return jsonify ( reply ) , status_code else : return jsonify ( reply ) , 200 | Fetch data from username in . id namespace |
36,695 | def is_earlier_than ( nameop1 , block_id , vtxindex ) : return nameop1 [ 'block_number' ] < block_id or ( nameop1 [ 'block_number' ] == block_id and nameop1 [ 'vtxindex' ] < vtxindex ) | Does nameop1 come before bock_id and vtxindex? |
36,696 | def namespacereveal_sanity_check ( namespace_id , version , lifetime , coeff , base , bucket_exponents , nonalpha_discount , no_vowel_discount ) : if not is_b40 ( namespace_id ) or "+" in namespace_id or namespace_id . count ( "." ) > 0 : raise Exception ( "Namespace ID '%s' has non-base-38 characters" % namespace_id ) if len ( namespace_id ) > LENGTHS [ 'blockchain_id_namespace_id' ] : raise Exception ( "Invalid namespace ID length for '%s' (expected length between 1 and %s)" % ( namespace_id , LENGTHS [ 'blockchain_id_namespace_id' ] ) ) if version not in [ NAMESPACE_VERSION_PAY_TO_BURN , NAMESPACE_VERSION_PAY_TO_CREATOR , NAMESPACE_VERSION_PAY_WITH_STACKS ] : raise Exception ( "Invalid namespace version bits {:x}" . format ( version ) ) if lifetime < 0 or lifetime > ( 2 ** 32 - 1 ) : lifetime = NAMESPACE_LIFE_INFINITE if coeff < 0 or coeff > 255 : raise Exception ( "Invalid cost multiplier %s: must be in range [0, 256)" % coeff ) if base < 0 or base > 255 : raise Exception ( "Invalid base price %s: must be in range [0, 256)" % base ) if type ( bucket_exponents ) != list : raise Exception ( "Bucket exponents must be a list" ) if len ( bucket_exponents ) != 16 : raise Exception ( "Exactly 16 buckets required" ) for i in xrange ( 0 , len ( bucket_exponents ) ) : if bucket_exponents [ i ] < 0 or bucket_exponents [ i ] > 15 : raise Exception ( "Invalid bucket exponent %s (must be in range [0, 16)" % bucket_exponents [ i ] ) if nonalpha_discount <= 0 or nonalpha_discount > 15 : raise Exception ( "Invalid non-alpha discount %s: must be in range [0, 16)" % nonalpha_discount ) if no_vowel_discount <= 0 or no_vowel_discount > 15 : raise Exception ( "Invalid no-vowel discount %s: must be in range [0, 16)" % no_vowel_discount ) return True | Verify the validity of a namespace reveal . Return True if valid Raise an Exception if not valid . |
36,697 | def check ( state_engine , nameop , block_id , checked_ops ) : from . register import get_num_names_owned preorder_name_hash = nameop [ 'preorder_hash' ] consensus_hash = nameop [ 'consensus_hash' ] sender = nameop [ 'sender' ] token_fee = nameop [ 'token_fee' ] token_type = nameop [ 'token_units' ] token_address = nameop [ 'address' ] if not state_engine . is_new_preorder ( preorder_name_hash ) : log . warning ( "Name hash '%s' is already preordered" % preorder_name_hash ) return False if not state_engine . is_consensus_hash_valid ( block_id , consensus_hash ) : log . warning ( "Invalid consensus hash '%s'" % consensus_hash ) return False num_names = get_num_names_owned ( state_engine , checked_ops , sender ) if num_names >= MAX_NAMES_PER_SENDER : log . warning ( "Sender '%s' exceeded name quota of %s" % ( sender , MAX_NAMES_PER_SENDER ) ) return False if not 'op_fee' in nameop : log . warning ( "Missing preorder fee" ) return False epoch_features = get_epoch_features ( block_id ) if EPOCH_FEATURE_NAMEOPS_COST_TOKENS in epoch_features and token_type is not None and token_fee is not None : account_info = state_engine . get_account ( token_address , token_type ) if account_info is None : log . warning ( "No account for {} ({})" . format ( token_address , token_type ) ) return False account_balance = state_engine . get_account_balance ( account_info ) assert isinstance ( account_balance , ( int , long ) ) , 'BUG: account_balance of {} is {} (type {})' . format ( token_address , account_balance , type ( account_balance ) ) assert isinstance ( token_fee , ( int , long ) ) , 'BUG: token_fee is {} (type {})' . format ( token_fee , type ( token_fee ) ) if account_balance < token_fee : log . warning ( "Account {} has balance {} {}, but needs to pay {} {}" . format ( token_address , account_balance , token_type , token_fee , token_type ) ) return False if nameop [ 'burn_address' ] != BLOCKSTACK_BURN_ADDRESS : log . warning ( 'Preorder burned to {}, but expected {}' . format ( nameop [ 'burn_address' ] , BLOCKSTACK_BURN_ADDRESS ) ) return False if nameop [ 'token_units' ] != TOKEN_TYPE_STACKS : log . warning ( 'Preorder burned unrecognized token unit "{}"' . format ( nameop [ 'token_units' ] ) ) return False state_preorder_put_account_payment_info ( nameop , token_address , token_type , token_fee ) nameop [ 'token_fee' ] = '{}' . format ( token_fee ) else : state_preorder_put_account_payment_info ( nameop , None , None , None ) nameop [ 'token_fee' ] = '0' nameop [ 'token_units' ] = 'BTC' return True | Verify that a preorder of a name at a particular block number is well - formed |
36,698 | def namedb_create ( path , genesis_block ) : global BLOCKSTACK_DB_SCRIPT if os . path . exists ( path ) : raise Exception ( "Database '%s' already exists" % path ) lines = [ l + ";" for l in BLOCKSTACK_DB_SCRIPT . split ( ";" ) ] con = sqlite3 . connect ( path , isolation_level = None , timeout = 2 ** 30 ) for line in lines : db_query_execute ( con , line , ( ) ) con . row_factory = namedb_row_factory namedb_create_token_genesis ( con , genesis_block [ 'rows' ] , genesis_block [ 'history' ] ) return con | Create a sqlite3 db at the given path . Create all the tables and indexes we need . |
36,699 | def namedb_open ( path ) : con = sqlite3 . connect ( path , isolation_level = None , timeout = 2 ** 30 ) db_query_execute ( con , 'pragma mmap_size=536870912' , ( ) ) con . row_factory = namedb_row_factory version = namedb_get_version ( con ) if not semver_equal ( version , VERSION ) : raise Exception ( 'Database has version {}, but this node is version {}. Please update your node database (such as with fast_sync).' . format ( version , VERSION ) ) return con | Open a connection to our database |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.