idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
224,700
def put_zonefiles ( hostport , zonefile_data_list , timeout = 30 , my_hostport = None , proxy = None ) : assert hostport or proxy , 'need either hostport or proxy' saved_schema = { 'type' : 'object' , 'properties' : { 'saved' : { 'type' : 'array' , 'items' : { 'type' : 'integer' , 'minimum' : 0 , 'maximum' : 1 , } , 'minItems' : len ( zonefile_data_list ) , 'maxItems' : len ( zonefile_data_list ) } , } , 'required' : [ 'saved' ] } schema = json_response_schema ( saved_schema ) if proxy is None : proxy = connect_hostport ( hostport ) push_info = None try : push_info = proxy . put_zonefiles ( zonefile_data_list ) push_info = json_validate ( schema , push_info ) if json_is_error ( push_info ) : return push_info except socket . timeout : log . error ( "Connection timed out" ) resp = { 'error' : 'Connection to remote host timed out.' , 'http_status' : 503 } return resp except socket . error as se : log . error ( "Connection error {}" . format ( se . errno ) ) resp = { 'error' : 'Connection to remote host failed.' , 'http_status' : 502 } return resp except ValidationError as e : if BLOCKSTACK_DEBUG : log . exception ( e ) resp = { 'error' : 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.' , 'http_status' : 502 } return resp except Exception as ee : if BLOCKSTACK_DEBUG : log . exception ( ee ) log . error ( "Caught exception while connecting to Blockstack node: {}" . format ( ee ) ) resp = { 'error' : 'Failed to contact Blockstack node. Try again with `--debug`.' , 'http_status' : 500 } return resp return push_info
Push one or more zonefiles to the given server . Each zone file in the list must be base64 - encoded
473
23
224,701
def get_zonefiles_by_block ( from_block , to_block , hostport = None , proxy = None ) : assert hostport or proxy , 'need either hostport or proxy' if proxy is None : proxy = connect_hostport ( hostport ) zonefile_info_schema = { 'type' : 'array' , 'items' : { 'type' : 'object' , 'properties' : { 'name' : { 'type' : 'string' } , 'zonefile_hash' : { 'type' : 'string' , 'pattern' : OP_ZONEFILE_HASH_PATTERN } , 'txid' : { 'type' : 'string' , 'pattern' : OP_TXID_PATTERN } , 'block_height' : { 'type' : 'integer' } } , 'required' : [ 'zonefile_hash' , 'txid' , 'block_height' ] } } response_schema = { 'type' : 'object' , 'properties' : { 'lastblock' : { 'type' : 'integer' } , 'zonefile_info' : zonefile_info_schema } , 'required' : [ 'lastblock' , 'zonefile_info' ] } offset = 0 output_zonefiles = [ ] last_server_block = 0 resp = { 'zonefile_info' : [ ] } while offset == 0 or len ( resp [ 'zonefile_info' ] ) > 0 : resp = proxy . get_zonefiles_by_block ( from_block , to_block , offset , 100 ) if 'error' in resp : return resp resp = json_validate ( response_schema , resp ) if json_is_error ( resp ) : return resp output_zonefiles += resp [ 'zonefile_info' ] offset += 100 last_server_block = max ( resp [ 'lastblock' ] , last_server_block ) return { 'last_block' : last_server_block , 'zonefile_info' : output_zonefiles }
Get zonefile information for zonefiles announced in [
453
10
224,702
def get_account_tokens ( address , hostport = None , proxy = None ) : assert proxy or hostport , 'Need proxy or hostport' if proxy is None : proxy = connect_hostport ( hostport ) tokens_schema = { 'type' : 'object' , 'properties' : { 'token_types' : { 'type' : 'array' , 'pattern' : '^(.+){1,19}' , } , } , 'required' : [ 'token_types' , ] } schema = json_response_schema ( tokens_schema ) try : resp = proxy . get_account_tokens ( address ) resp = json_validate ( schema , resp ) if json_is_error ( resp ) : return resp except ValidationError as ve : if BLOCKSTACK_DEBUG : log . exception ( ve ) resp = { 'error' : 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.' , 'http_status' : 502 } return resp except socket . timeout : log . error ( "Connection timed out" ) resp = { 'error' : 'Connection to remote host timed out.' , 'http_status' : 503 } return resp except socket . error as se : log . error ( "Connection error {}" . format ( se . errno ) ) resp = { 'error' : 'Connection to remote host failed.' , 'http_status' : 502 } return resp except AssertionError as ae : if BLOCKSTACK_DEBUG : log . exception ( ae ) resp = json_traceback ( resp . get ( 'error' ) ) return resp except Exception as ee : if BLOCKSTACK_DEBUG : log . exception ( ee ) log . error ( "Caught exception while connecting to Blockstack node: {}" . format ( ee ) ) resp = { 'error' : 'Failed to contact Blockstack node. Try again with `--debug`.' , 'http_status' : 500 } return resp resp [ 'token_types' ] . sort ( ) return resp [ 'token_types' ]
Get the types of tokens that an address owns Returns a list of token types
466
15
224,703
def get_account_balance ( address , token_type , hostport = None , proxy = None ) : assert proxy or hostport , 'Need proxy or hostport' if proxy is None : proxy = connect_hostport ( hostport ) balance_schema = { 'type' : 'object' , 'properties' : { 'balance' : { 'type' : 'integer' , } , } , 'required' : [ 'balance' , ] , } schema = json_response_schema ( balance_schema ) try : resp = proxy . get_account_balance ( address , token_type ) resp = json_validate ( schema , resp ) if json_is_error ( resp ) : return resp except ValidationError as e : if BLOCKSTACK_DEBUG : log . exception ( e ) resp = { 'error' : 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.' , 'http_status' : 502 } return resp except socket . timeout : log . error ( "Connection timed out" ) resp = { 'error' : 'Connection to remote host timed out.' , 'http_status' : 503 } return resp except socket . error as se : log . error ( "Connection error {}" . format ( se . errno ) ) resp = { 'error' : 'Connection to remote host failed.' , 'http_status' : 502 } return resp except Exception as ee : if BLOCKSTACK_DEBUG : log . exception ( ee ) log . error ( "Caught exception while connecting to Blockstack node: {}" . format ( ee ) ) resp = { 'error' : 'Failed to contact Blockstack node. Try again with `--debug`.' , 'http_status' : 500 } return resp return resp [ 'balance' ]
Get the balance of an account for a particular token Returns an int
395
13
224,704
def get_name_DID ( name , proxy = None , hostport = None ) : assert proxy or hostport , 'Need proxy or hostport' if proxy is None : proxy = connect_hostport ( hostport ) did_schema = { 'type' : 'object' , 'properties' : { 'did' : { 'type' : 'string' } } , 'required' : [ 'did' ] , } schema = json_response_schema ( did_schema ) resp = { } try : resp = proxy . get_name_DID ( name ) resp = json_validate ( schema , resp ) if json_is_error ( resp ) : return resp # DID must be well-formed assert parse_DID ( resp [ 'did' ] ) except ValidationError as ve : if BLOCKSTACK_DEBUG : log . exception ( ve ) resp = { 'error' : 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.' , 'http_status' : 502 } return resp except AssertionError as e : if BLOCKSTACK_DEBUG : log . exception ( e ) resp = { 'error' : 'Server replied an unparseable DID' } return resp except socket . timeout : log . error ( "Connection timed out" ) resp = { 'error' : 'Connection to remote host timed out.' , 'http_status' : 503 } return resp except socket . error as se : log . error ( "Connection error {}" . format ( se . errno ) ) resp = { 'error' : 'Connection to remote host failed.' , 'http_status' : 502 } return resp except Exception as ee : if BLOCKSTACK_DEBUG : log . exception ( ee ) log . error ( "Caught exception while connecting to Blockstack node: {}" . format ( ee ) ) resp = { 'error' : 'Failed to contact Blockstack node. Try again with `--debug`.' , 'http_status' : 500 } return resp return resp [ 'did' ]
Get the DID for a name or subdomain Return the DID string on success Return None if not found
451
20
224,705
def get_JWT ( url , address = None ) : jwt_txt = None jwt = None log . debug ( "Try {}" . format ( url ) ) # special case: handle file:// urlinfo = urllib2 . urlparse . urlparse ( url ) if urlinfo . scheme == 'file' : # points to a path on disk try : with open ( urlinfo . path , 'r' ) as f : jwt_txt = f . read ( ) except Exception as e : if BLOCKSTACK_TEST : log . exception ( e ) log . warning ( "Failed to read {}" . format ( url ) ) return None else : # http(s) URL or similar try : resp = requests . get ( url ) assert resp . status_code == 200 , 'Bad status code on {}: {}' . format ( url , resp . status_code ) jwt_txt = resp . text except Exception as e : if BLOCKSTACK_TEST : log . exception ( e ) log . warning ( "Unable to resolve {}" . format ( url ) ) return None try : # one of two things are possible: # * this is a JWT string # * this is a serialized JSON string whose first item is a dict that has 'token' as key, # and that key is a JWT string. try : jwt_txt = json . loads ( jwt_txt ) [ 0 ] [ 'token' ] except : pass jwt = jsontokens . decode_token ( jwt_txt ) except Exception as e : if BLOCKSTACK_TEST : log . exception ( e ) log . warning ( "Unable to decode token at {}" . format ( url ) ) return None try : # must be well-formed assert isinstance ( jwt , dict ) assert 'payload' in jwt , jwt assert isinstance ( jwt [ 'payload' ] , dict ) assert 'issuer' in jwt [ 'payload' ] , jwt assert isinstance ( jwt [ 'payload' ] [ 'issuer' ] , dict ) assert 'publicKey' in jwt [ 'payload' ] [ 'issuer' ] , jwt assert virtualchain . ecdsalib . ecdsa_public_key ( str ( jwt [ 'payload' ] [ 'issuer' ] [ 'publicKey' ] ) ) except AssertionError as ae : if BLOCKSTACK_TEST or BLOCKSTACK_DEBUG : log . exception ( ae ) log . warning ( "JWT at {} is malformed" . format ( url ) ) return None if address is not None : public_key = str ( jwt [ 'payload' ] [ 'issuer' ] [ 'publicKey' ] ) addrs = [ virtualchain . address_reencode ( virtualchain . ecdsalib . ecdsa_public_key ( keylib . key_formatting . decompress ( public_key ) ) . address ( ) ) , virtualchain . address_reencode ( virtualchain . ecdsalib . ecdsa_public_key ( keylib . key_formatting . compress ( public_key ) ) . address ( ) ) ] if virtualchain . address_reencode ( address ) not in addrs : # got a JWT, but it doesn't match the address log . warning ( "Found JWT at {}, but its public key has addresses {} and {} (expected {})" . format ( url , addrs [ 0 ] , addrs [ 1 ] , address ) ) return None verifier = jsontokens . TokenVerifier ( ) if not verifier . verify ( jwt_txt , public_key ) : # got a JWT, and the address matches, but the signature does not log . warning ( "Found JWT at {}, but it was not signed by {} ({})" . format ( url , public_key , address ) ) return None return jwt
Given a URL fetch and decode the JWT it points to . If address is given then authenticate the JWT with the address .
863
27
224,706
def decode_name_zonefile ( name , zonefile_txt ) : user_zonefile = None try : # by default, it's a zonefile-formatted text file user_zonefile_defaultdict = blockstack_zones . parse_zone_file ( zonefile_txt ) # force dict user_zonefile = dict ( user_zonefile_defaultdict ) except ( IndexError , ValueError , blockstack_zones . InvalidLineException ) : # might be legacy profile log . debug ( 'WARN: failed to parse user zonefile; trying to import as legacy' ) try : user_zonefile = json . loads ( zonefile_txt ) if not isinstance ( user_zonefile , dict ) : log . debug ( 'Not a legacy user zonefile' ) return None except Exception as e : log . error ( 'Failed to parse non-standard zonefile' ) return None except Exception as e : if BLOCKSTACK_DEBUG : log . exception ( e ) log . error ( 'Failed to parse zonefile' ) return None if user_zonefile is None : return None return user_zonefile
Decode a zone file for a name . Must be either a well - formed DNS zone file or a legacy Onename profile . Return None on error
242
30
224,707
def _send_headers ( self , status_code = 200 , content_type = 'application/json' , more_headers = { } ) : self . send_response ( status_code ) self . send_header ( 'content-type' , content_type ) self . send_header ( 'Access-Control-Allow-Origin' , '*' ) # CORS for ( hdr , val ) in more_headers . items ( ) : self . send_header ( hdr , val ) self . end_headers ( )
Generate and reply headers
115
5
224,708
def _reply_json ( self , json_payload , status_code = 200 ) : self . _send_headers ( status_code = status_code ) json_str = json . dumps ( json_payload ) self . wfile . write ( json_str )
Return a JSON - serializable data structure
59
8
224,709
def _read_json ( self , schema = None , maxlen = JSONRPC_MAX_SIZE ) : # JSON post? request_type = self . headers . get ( 'content-type' , None ) client_address_str = "{}:{}" . format ( self . client_address [ 0 ] , self . client_address [ 1 ] ) if request_type != 'application/json' : log . error ( "Invalid request of type {} from {}" . format ( request_type , client_address_str ) ) return None request_str = self . _read_payload ( maxlen = maxlen ) if request_str is None : log . error ( "Failed to read request" ) return None # parse the payload request = None try : request = json . loads ( request_str ) if schema is not None : jsonschema . validate ( request , schema ) except ValidationError as ve : if BLOCKSTACK_DEBUG : log . exception ( ve ) log . error ( "Validation error on request {}..." . format ( request_str [ : 15 ] ) ) if ve . validator == "maxLength" : return { "error" : "maxLength" } except ( TypeError , ValueError ) as ve : if BLOCKSTACK_DEBUG : log . exception ( ve ) return None return request
Read a JSON payload from the requester Return the parsed payload on success Return None on error
288
18
224,710
def parse_qs ( self , qs ) : qs_state = urllib2 . urlparse . parse_qs ( qs ) ret = { } for qs_var , qs_value_list in qs_state . items ( ) : if len ( qs_value_list ) > 1 : return None ret [ qs_var ] = qs_value_list [ 0 ] return ret
Parse query string but enforce one instance of each variable . Return a dict with the variables on success Return None on parse error
91
25
224,711
def get_path_and_qs ( self ) : path_parts = self . path . split ( "?" , 1 ) if len ( path_parts ) > 1 : qs = path_parts [ 1 ] . split ( "#" , 1 ) [ 0 ] else : qs = "" path = path_parts [ 0 ] . split ( "#" , 1 ) [ 0 ] path = posixpath . normpath ( urllib . unquote ( path ) ) qs_values = self . parse_qs ( qs ) if qs_values is None : return { 'error' : 'Failed to parse query string' } parts = path . strip ( '/' ) . split ( '/' ) return { 'path' : path , 'qs_values' : qs_values , 'parts' : parts }
Parse and obtain the path and query values . We don t care about fragments .
178
17
224,712
def OPTIONS_preflight ( self , path_info ) : self . send_response ( 200 ) self . send_header ( 'Access-Control-Allow-Origin' , '*' ) # CORS self . send_header ( 'Access-Control-Allow-Methods' , 'GET, PUT, POST, DELETE' ) self . send_header ( 'Access-Control-Allow-Headers' , 'content-type, authorization, range' ) self . send_header ( 'Access-Control-Expose-Headers' , 'content-length, content-range' ) self . send_header ( 'Access-Control-Max-Age' , 21600 ) self . end_headers ( ) return
Give back CORS preflight check headers
156
8
224,713
def GET_names_owned_by_address ( self , path_info , blockchain , address ) : if not check_address ( address ) : return self . _reply_json ( { 'error' : 'Invalid address' } , status_code = 400 ) if blockchain != 'bitcoin' : return self . _reply_json ( { 'error' : 'Unsupported blockchain' } , status_code = 404 ) blockstackd_url = get_blockstackd_url ( ) address = str ( address ) subdomain_names = blockstackd_client . get_subdomains_owned_by_address ( address , hostport = blockstackd_url ) if json_is_error ( subdomain_names ) : log . error ( "Failed to fetch subdomains owned by address" ) log . error ( subdomain_names ) subdomain_names = [ ] # make sure we have the right encoding new_addr = virtualchain . address_reencode ( address ) if new_addr != address : log . debug ( "Re-encode {} to {}" . format ( new_addr , address ) ) address = new_addr res = blockstackd_client . get_names_owned_by_address ( address , hostport = blockstackd_url ) if json_is_error ( res ) : log . error ( "Failed to get names owned by address" ) self . _reply_json ( { 'error' : 'Failed to list names by address' } , status_code = res . get ( 'http_status' , 502 ) ) return self . _reply_json ( { 'names' : res + subdomain_names } ) return
Get all names owned by an address Returns the list on success Return 404 on unsupported blockchain Return 502 on failure to get names for any non - specified reason
363
30
224,714
def GET_account_record ( self , path_info , account_addr , token_type ) : if not check_account_address ( account_addr ) : return self . _reply_json ( { 'error' : 'Invalid address' } , status_code = 400 ) if not check_token_type ( token_type ) : return self . _reply_json ( { 'error' : 'Invalid token type' } , status_code = 400 ) blockstackd_url = get_blockstackd_url ( ) res = blockstackd_client . get_account_record ( account_addr , token_type , hostport = blockstackd_url ) if json_is_error ( res ) : log . error ( "Failed to get account state for {} {}: {}" . format ( account_addr , token_type , res [ 'error' ] ) ) return self . _reply_json ( { 'error' : 'Failed to get account record for {} {}: {}' . format ( token_type , account_addr , res [ 'error' ] ) } , status_code = res . get ( 'http_status' , 500 ) ) self . _reply_json ( res ) return
Get the state of a particular token account Returns the account
264
11
224,715
def GET_names ( self , path_info ) : include_expired = False qs_values = path_info [ 'qs_values' ] page = qs_values . get ( 'page' , None ) if page is None : log . error ( "Page required" ) return self . _reply_json ( { 'error' : 'page= argument required' } , status_code = 400 ) try : page = int ( page ) if page < 0 : raise ValueError ( "Page is negative" ) except ValueError : log . error ( "Invalid page" ) return self . _reply_json ( { 'error' : 'Invalid page= value' } , status_code = 400 ) if qs_values . get ( 'all' , '' ) . lower ( ) in [ '1' , 'true' ] : include_expired = True offset = page * 100 count = 100 blockstackd_url = get_blockstackd_url ( ) res = blockstackd_client . get_all_names ( offset , count , include_expired = include_expired , hostport = blockstackd_url ) if json_is_error ( res ) : log . error ( "Failed to list all names (offset={}, count={}): {}" . format ( offset , count , res [ 'error' ] ) ) return self . _reply_json ( { 'error' : 'Failed to list all names' } , status_code = res . get ( 'http_status' , 502 ) ) return self . _reply_json ( res )
Get all names in existence If all = true is set then include expired names . Returns the list on success Returns 400 on invalid arguments Returns 502 on failure to get names
343
33
224,716
def GET_name_history ( self , path_info , name ) : if not check_name ( name ) and not check_subdomain ( name ) : return self . _reply_json ( { 'error' : 'Invalid name or subdomain' } , status_code = 400 ) qs_values = path_info [ 'qs_values' ] page = qs_values . get ( 'page' , None ) if page is None : page = "0" # compatibility try : assert len ( page ) < 10 page = int ( page ) assert page >= 0 assert page <= 2 ** 32 - 1 except : log . error ( "Invalid page" ) self . _reply_json ( { 'error' : 'Invalid page' } , status_code = 400 ) return blockstackd_url = get_blockstackd_url ( ) res = blockstackd_client . get_name_history_page ( name , page , hostport = blockstackd_url ) if json_is_error ( res ) : log . error ( 'Failed to get name history for {}: {}' . format ( name , res [ 'error' ] ) ) return self . _reply_json ( { 'error' : res [ 'error' ] } , status_code = res . get ( 'http_status' , 502 ) ) return self . _reply_json ( res [ 'history' ] )
Get the history of a name or subdomain . Requires page in the query string return the history on success return 400 on invalid start_block or end_block return 502 on failure to query blockstack server
303
41
224,717
def GET_name_zonefile_by_hash ( self , path_info , name , zonefile_hash ) : if not check_name ( name ) and not check_subdomain ( name ) : return self . _reply_json ( { 'error' : 'Invalid name or subdomain' } , status_code = 400 ) if not check_string ( zonefile_hash , pattern = OP_ZONEFILE_HASH_PATTERN ) : return self . _reply_json ( { 'error' : 'Invalid zone file hash' } , status_code = 400 ) raw = path_info [ 'qs_values' ] . get ( 'raw' , '' ) raw = ( raw . lower ( ) in [ '1' , 'true' ] ) blockstack_hostport = get_blockstackd_url ( ) was_set = blockstackd_client . is_name_zonefile_hash ( name , zonefile_hash , hostport = blockstack_hostport ) if json_is_error ( was_set ) : return self . _reply_json ( { 'error' : was_set [ 'error' ] } , status_code = was_set . get ( 'http_status' , 502 ) ) if not was_set [ 'result' ] : self . _reply_json ( { 'error' : 'No such zonefile' } , status_code = 404 ) return resp = blockstackd_client . get_zonefiles ( blockstack_hostport , [ str ( zonefile_hash ) ] ) if json_is_error ( resp ) : self . _reply_json ( { 'error' : resp [ 'error' ] } , status_code = resp . get ( 'http_status' , 502 ) ) return if str ( zonefile_hash ) not in resp [ 'zonefiles' ] : return self . _reply_json ( { 'error' : 'Blockstack does not have this zonefile. Try again later.' } , status_code = 404 ) if raw : self . _send_headers ( status_code = 200 , content_type = 'application/octet-stream' ) self . wfile . write ( resp [ 'zonefiles' ] [ str ( zonefile_hash ) ] ) else : # make sure it's valid if str ( zonefile_hash ) not in resp [ 'zonefiles' ] : log . debug ( 'Failed to find zonefile hash {}, possess {}' . format ( str ( zonefile_hash ) , resp [ 'zonefiles' ] . keys ( ) ) ) return self . _reply_json ( { 'error' : 'No such zonefile' } , status_code = 404 ) zonefile_txt = resp [ 'zonefiles' ] [ str ( zonefile_hash ) ] res = decode_name_zonefile ( name , zonefile_txt ) if res is None : log . error ( "Failed to parse zone file for {}" . format ( name ) ) self . _reply_json ( { 'error' : 'Non-standard zone file for {}' . format ( name ) } , status_code = 204 ) return self . _reply_json ( { 'zonefile' : zonefile_txt } ) return
Get a historic zonefile for a name With raw = 1 on the query string return the raw zone file
702
21
224,718
def GET_namespaces ( self , path_info ) : qs_values = path_info [ 'qs_values' ] offset = qs_values . get ( 'offset' , None ) count = qs_values . get ( 'count' , None ) blockstackd_url = get_blockstackd_url ( ) namespaces = blockstackd_client . get_all_namespaces ( offset = offset , count = count , hostport = blockstackd_url ) if json_is_error ( namespaces ) : # error status_code = namespaces . get ( 'http_status' , 502 ) return self . _reply_json ( { 'error' : namespaces [ 'error' ] } , status_code = status_code ) self . _reply_json ( namespaces ) return
Get the list of all namespaces Reply all existing namespaces Reply 502 if we can t reach the server for whatever reason
177
24
224,719
def GET_namespace_info ( self , path_info , namespace_id ) : if not check_namespace ( namespace_id ) : return self . _reply_json ( { 'error' : 'Invalid namespace' } , status_code = 400 ) blockstackd_url = get_blockstackd_url ( ) namespace_rec = blockstackd_client . get_namespace_record ( namespace_id , hostport = blockstackd_url ) if json_is_error ( namespace_rec ) : # error status_code = namespace_rec . get ( 'http_status' , 502 ) return self . _reply_json ( { 'error' : namespace_rec [ 'error' ] } , status_code = status_code ) self . _reply_json ( namespace_rec ) return
Look up a namespace s info Reply information about a namespace Reply 404 if the namespace doesn t exist Reply 502 for any error in talking to the blocksatck server
176
32
224,720
def GET_namespace_num_names ( self , path_info , namespace_id ) : if not check_namespace ( namespace_id ) : return self . _reply_json ( { 'error' : 'Invalid namespace' } , status_code = 400 ) blockstackd_url = get_blockstackd_url ( ) name_count = blockstackd_client . get_num_names_in_namespace ( namespace_id , hostport = blockstackd_url ) if json_is_error ( name_count ) : log . error ( "Failed to load namespace count for {}: {}" . format ( namespace_id , name_count [ 'error' ] ) ) return self . _reply_json ( { 'error' : 'Failed to load namespace count: {}' . format ( name_count [ 'error' ] ) } , status_code = 404 ) self . _reply_json ( { 'names_count' : name_count } )
Get the number of names in a namespace Reply the number on success Reply 404 if the namespace does not exist Reply 502 on failure to talk to the blockstack server
214
32
224,721
def GET_namespace_names ( self , path_info , namespace_id ) : if not check_namespace ( namespace_id ) : return self . _reply_json ( { 'error' : 'Invalid namespace' } , status_code = 400 ) qs_values = path_info [ 'qs_values' ] page = qs_values . get ( 'page' , None ) if page is None : log . error ( "Page required" ) return self . _reply_json ( { 'error' : 'page= argument required' } , status_code = 400 ) try : page = int ( page ) if page < 0 : raise ValueError ( ) except ValueError : log . error ( "Invalid page" ) return self . _reply_json ( { 'error' : 'Invalid page= value' } , status_code = 400 ) offset = page * 100 count = 100 blockstackd_url = get_blockstackd_url ( ) namespace_names = blockstackd_client . get_names_in_namespace ( namespace_id , offset = offset , count = count , hostport = blockstackd_url ) if json_is_error ( namespace_names ) : # error status_code = namespace_names . get ( 'http_status' , 502 ) return self . _reply_json ( { 'error' : namespace_names [ 'error' ] } , status_code = status_code ) self . _reply_json ( namespace_names ) return
Get the list of names in a namespace Reply the list of names in a namespace Reply 404 if the namespace doesn t exist Reply 502 for any error in talking to the blockstack server
323
36
224,722
def GET_blockchain_ops ( self , path_info , blockchain_name , blockheight ) : try : blockheight = int ( blockheight ) assert check_block ( blockheight ) except : return self . _reply_json ( { 'error' : 'Invalid block' } , status_code = 400 ) if blockchain_name != 'bitcoin' : # not supported return self . _reply_json ( { 'error' : 'Unsupported blockchain' } , status_code = 404 ) blockstackd_url = get_blockstackd_url ( ) nameops = blockstackd_client . get_blockstack_transactions_at ( int ( blockheight ) , hostport = blockstackd_url ) if json_is_error ( nameops ) : # error status_code = nameops . get ( 'http_status' , 502 ) return self . _reply_json ( { 'error' : nameops [ 'error' ] } , status_code = status_code ) self . _reply_json ( nameops ) return
Get the name s historic name operations Reply the list of nameops at the given block height Reply 404 for blockchains other than those supported Reply 502 for any error we have in talking to the blockstack server
225
41
224,723
def GET_blockchain_name_record ( self , path_info , blockchain_name , name ) : if not check_name ( name ) and not check_subdomain ( name ) : return self . _reply_json ( { 'error' : 'Invalid name or subdomain' } , status_code = 400 ) if blockchain_name != 'bitcoin' : # not supported self . _reply_json ( { 'error' : 'Unsupported blockchain' } , status_code = 404 ) return blockstackd_url = get_blockstackd_url ( ) name_rec = blockstackd_client . get_name_record ( name , include_history = False , hostport = blockstackd_url ) if json_is_error ( name_rec ) : # error status_code = name_rec . get ( 'http_status' , 502 ) return self . _reply_json ( { 'error' : name_rec [ 'error' ] } , status_code = status_code ) return self . _reply_json ( name_rec )
Get the name s blockchain record in full Reply the raw blockchain record on success Reply 404 if the name is not found Reply 502 if we have an error talking to the server
230
34
224,724
def _get_balance ( self , get_address , min_confs ) : bitcoind_opts = get_bitcoin_opts ( ) bitcoind_host = bitcoind_opts [ 'bitcoind_server' ] bitcoind_port = bitcoind_opts [ 'bitcoind_port' ] bitcoind_user = bitcoind_opts [ 'bitcoind_user' ] bitcoind_passwd = bitcoind_opts [ 'bitcoind_passwd' ] bitcoind = create_bitcoind_service_proxy ( bitcoind_user , bitcoind_passwd , server = bitcoind_host , port = bitcoind_port ) address = virtualchain . address_reencode ( get_address ) try : unspents = get_unspents ( address , bitcoind ) except Exception as e : log . exception ( e ) return { 'error' : 'Failed to get unspents for {}' . format ( get_address ) } satoshis_confirmed = sum ( confirmed_utxo [ 'value' ] for confirmed_utxo in filter ( lambda utxo : utxo [ 'confirmations' ] >= min_confs , unspents ) ) return { 'balance' : satoshis_confirmed }
Works only in test mode! Get the confirmed balance for an address
295
13
224,725
def bind ( self ) : log . debug ( "Set SO_REUSADDR" ) self . socket . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) # we want daemon threads, so we join on abrupt shutdown (applies if multithreaded) self . daemon_threads = True self . server_bind ( ) self . server_activate ( )
Bind to our port
90
4
224,726
def overloaded ( self , client_addr ) : overloaded_txt = 'HTTP/1.0 429 Too Many Requests\r\nServer: BaseHTTP/0.3 Python/2.7.14+\r\nContent-type: text/plain\r\nContent-length: 17\r\n\r\nToo many requests' if BLOCKSTACK_TEST : log . warn ( 'Too many requests; deflecting {}' . format ( client_addr ) ) return overloaded_txt
Deflect if we have too many inbound requests
111
10
224,727
def to_bytes ( obj , encoding = 'utf-8' , errors = None , nonstring = 'simplerepr' ) : if isinstance ( obj , binary_type ) : return obj # We're given a text string # If it has surrogates, we know because it will decode original_errors = errors if errors in _COMPOSED_ERROR_HANDLERS : if HAS_SURROGATEESCAPE : errors = 'surrogateescape' elif errors == 'surrogate_or_strict' : errors = 'strict' else : errors = 'replace' if isinstance ( obj , text_type ) : try : # Try this first as it's the fastest return obj . encode ( encoding , errors ) except UnicodeEncodeError : if original_errors in ( None , 'surrogate_then_replace' ) : # Slow but works return_string = obj . encode ( 'utf-8' , 'surrogateescape' ) return_string = return_string . decode ( 'utf-8' , 'replace' ) return return_string . encode ( encoding , 'replace' ) raise # Note: We do these last even though we have to call to_bytes again on the # value because we're optimizing the common case if nonstring == 'simplerepr' : try : value = str ( obj ) except UnicodeError : try : value = repr ( obj ) except UnicodeError : # Giving up return to_bytes ( '' ) elif nonstring == 'passthru' : return obj elif nonstring == 'empty' : # python2.4 doesn't have b'' return to_bytes ( '' ) elif nonstring == 'strict' : raise TypeError ( 'obj must be a string type' ) else : raise TypeError ( 'Invalid value %s for to_bytes\' nonstring parameter' % nonstring ) return to_bytes ( value , encoding , errors )
Make sure that a string is a byte string
413
9
224,728
def to_text ( obj , encoding = 'utf-8' , errors = None , nonstring = 'simplerepr' ) : if isinstance ( obj , text_type ) : return obj if errors in _COMPOSED_ERROR_HANDLERS : if HAS_SURROGATEESCAPE : errors = 'surrogateescape' elif errors == 'surrogate_or_strict' : errors = 'strict' else : errors = 'replace' if isinstance ( obj , binary_type ) : # Note: We don't need special handling for surrogate_then_replace # because all bytes will either be made into surrogates or are valid # to decode. return obj . decode ( encoding , errors ) # Note: We do these last even though we have to call to_text again on the # value because we're optimizing the common case if nonstring == 'simplerepr' : try : value = str ( obj ) except UnicodeError : try : value = repr ( obj ) except UnicodeError : # Giving up return u'' elif nonstring == 'passthru' : return obj elif nonstring == 'empty' : return u'' elif nonstring == 'strict' : raise TypeError ( 'obj must be a string type' ) else : raise TypeError ( 'Invalid value %s for to_text\'s nonstring parameter' % nonstring ) return to_text ( value , encoding , errors )
Make sure that a string is a text string
309
9
224,729
def push_images ( base_path , image_namespace , engine_obj , config , * * kwargs ) : config_path = kwargs . get ( 'config_path' , engine_obj . auth_config_path ) username = kwargs . get ( 'username' ) password = kwargs . get ( 'password' ) push_to = kwargs . get ( 'push_to' ) url = engine_obj . default_registry_url registry_name = engine_obj . default_registry_name namespace = image_namespace save_conductor = config . save_conductor repository_prefix = None pull_from_url = None if push_to : if config . get ( 'registries' , dict ( ) ) . get ( push_to ) : url = config [ 'registries' ] [ push_to ] . get ( 'url' ) namespace = config [ 'registries' ] [ push_to ] . get ( 'namespace' , namespace ) repository_prefix = config [ 'registries' ] [ push_to ] . get ( 'repository_prefix' ) pull_from_url = config [ 'registries' ] [ push_to ] . get ( 'pull_from_url' ) if not url : raise AnsibleContainerRegistryAttributeException ( u"Registry {} missing required attribute 'url'" . format ( push_to ) ) else : url , namespace = resolve_push_to ( push_to , engine_obj . default_registry_url , namespace ) if username and not password : # If a username was supplied without a password, prompt for it if url != engine_obj . default_registry_url : registry_name = url while not password : password = getpass . getpass ( u"Enter password for {0} at {1}: " . format ( username , registry_name ) ) if config_path : # Make sure the config_path exists # - gives us a chance to create the file with correct permissions, if it does not exists # - makes sure we mount a path to the conductor for a specific file config_path = os . path . normpath ( os . path . expanduser ( config_path ) ) if os . path . exists ( config_path ) and os . path . isdir ( config_path ) : raise AnsibleContainerException ( u"Expecting --config-path to be a path to a file, not a directory" ) elif not os . path . exists ( config_path ) : # Make sure the directory path exists if not os . path . exists ( os . path . dirname ( config_path ) ) : try : os . makedirs ( os . path . dirname ( config_path ) , 0o750 ) except OSError : raise AnsibleContainerException ( u"Failed to create the requested the path {}" . format ( os . path . dirname ( config_path ) ) ) # Touch the file open ( config_path , 'w' ) . close ( ) # If you ran build with --save-build-container, then you're broken without first removing # the old build container. remove_existing_container ( engine_obj , 'conductor' , remove_volumes = True ) push_params = { } push_params . update ( kwargs ) push_params [ 'config_path' ] = config_path push_params [ 'password' ] = password push_params [ 'url' ] = url push_params [ 'namespace' ] = namespace push_params [ 'repository_prefix' ] = repository_prefix push_params [ 'pull_from_url' ] = pull_from_url # Push engine_obj . await_conductor_command ( 'push' , dict ( config ) , base_path , push_params , save_container = save_conductor ) return { 'url' : url , 'namespace' : namespace , 'repository_prefix' : repository_prefix , 'pull_from_url' : pull_from_url }
Pushes images to a Docker registry . Returns dict containing attributes used to push images .
878
17
224,730
def remove_existing_container ( engine_obj , service_name , remove_volumes = False ) : conductor_container_id = engine_obj . get_container_id_for_service ( service_name ) if engine_obj . service_is_running ( service_name ) : engine_obj . stop_container ( conductor_container_id , forcefully = True ) if conductor_container_id : engine_obj . delete_container ( conductor_container_id , remove_volumes = remove_volumes )
Remove a container for an existing service . Handy for removing an existing conductor .
112
16
224,731
def resolve_push_to ( push_to , default_url , default_namespace ) : protocol = 'http://' if push_to . startswith ( 'http://' ) else 'https://' url = push_to = REMOVE_HTTP . sub ( '' , push_to ) namespace = default_namespace parts = url . split ( '/' , 1 ) special_set = { '.' , ':' } char_set = set ( [ c for c in parts [ 0 ] ] ) if len ( parts ) == 1 : if not special_set . intersection ( char_set ) and parts [ 0 ] != 'localhost' : registry_url = default_url namespace = push_to else : registry_url = protocol + parts [ 0 ] else : registry_url = protocol + parts [ 0 ] namespace = parts [ 1 ] return registry_url , namespace
Given a push - to value return the registry and namespace .
189
12
224,732
def conductorcmd_push ( engine_name , project_name , services , * * kwargs ) : username = kwargs . pop ( 'username' ) password = kwargs . pop ( 'password' ) email = kwargs . pop ( 'email' ) url = kwargs . pop ( 'url' ) namespace = kwargs . pop ( 'namespace' ) tag = kwargs . pop ( 'tag' ) config_path = kwargs . pop ( 'config_path' ) repository_prefix = kwargs . pop ( 'repository_prefix' ) engine = load_engine ( [ 'PUSH' , 'LOGIN' ] , engine_name , project_name , services ) logger . info ( u'Engine integration loaded. Preparing push.' , engine = engine . display_name ) # Verify that we can authenticate with the registry username , password = engine . login ( username , password , email , url , config_path ) # Push each image that has been built using Ansible roles for name , service in iteritems ( services ) : if service . get ( 'containers' ) : for c in service [ 'containers' ] : if 'roles' in c : cname = '%s-%s' % ( name , c [ 'container_name' ] ) image_id = engine . get_latest_image_id_for_service ( cname ) engine . push ( image_id , cname , url = url , tag = tag , namespace = namespace , username = username , password = password , repository_prefix = repository_prefix ) elif 'roles' in service : # if the service has roles, it's an image we should push image_id = engine . get_latest_image_id_for_service ( name ) engine . push ( image_id , name , url = url , tag = tag , namespace = namespace , username = username , password = password , repository_prefix = repository_prefix )
Push images to a registry
429
5
224,733
def get_route_templates ( self ) : def _get_published_ports ( service_config ) : result = [ ] for port in service_config . get ( 'ports' , [ ] ) : protocol = 'TCP' if isinstance ( port , string_types ) and '/' in port : port , protocol = port . split ( '/' ) if isinstance ( port , string_types ) and ':' in port : host , container = port . split ( ':' ) else : host = port result . append ( { 'port' : host , 'protocol' : protocol . lower ( ) } ) return result templates = [ ] for name , service_config in self . _services . items ( ) : state = service_config . get ( self . CONFIG_KEY , { } ) . get ( 'state' , 'present' ) force = service_config . get ( self . CONFIG_KEY , { } ) . get ( 'force' , False ) published_ports = _get_published_ports ( service_config ) if state != 'present' : continue for port in published_ports : route_name = "%s-%s" % ( name , port [ 'port' ] ) labels = dict ( app = self . _namespace_name , service = name ) template = CommentedMap ( ) template [ 'apiVersion' ] = self . DEFAULT_API_VERSION template [ 'kind' ] = 'Route' template [ 'force' ] = force template [ 'metadata' ] = CommentedMap ( [ ( 'name' , route_name ) , ( 'namespace' , self . _namespace_name ) , ( 'labels' , labels . copy ( ) ) ] ) template [ 'spec' ] = CommentedMap ( [ ( 'to' , CommentedMap ( [ ( 'kind' , 'Service' ) , ( 'name' , name ) ] ) ) , ( 'port' , CommentedMap ( [ ( 'targetPort' , 'port-{}-{}' . format ( port [ 'port' ] , port [ 'protocol' ] ) ) ] ) ) ] ) if service_config . get ( self . CONFIG_KEY , { } ) . get ( 'routes' ) : for route in service_config [ self . CONFIG_KEY ] [ 'routes' ] : if str ( route . get ( 'port' ) ) == str ( port [ 'port' ] ) : for key , value in route . items ( ) : if key not in ( 'force' , 'port' ) : self . copy_attribute ( template [ 'spec' ] , key , value ) templates . append ( template ) return templates
Generate Openshift route templates or playbook tasks . Each port on a service definition found in container . yml represents an externally exposed port .
582
29
224,734
def preparse_iter ( self ) : to_yield = { } last_directive = None lines_processed = 0 for line in self . lines_iter ( ) : if not line : continue if line . startswith ( u'#' ) : comment = line . lstrip ( '#' ) . strip ( ) # Directives have to precede any instructions if lines_processed == 1 : if comment . startswith ( u'escape=' ) : self . escape_char = comment . split ( u'=' , 1 ) [ 1 ] continue to_yield . setdefault ( 'comments' , [ ] ) . append ( comment ) else : # last_directive being set means the previous line ended with a # newline escape if last_directive : directive , payload = last_directive , line else : directive , payload = line . split ( u' ' , 1 ) if line . endswith ( self . escape_char ) : payload = payload . rstrip ( self . escape_char ) last_directive = directive else : last_directive = None to_yield [ 'directive' ] = directive to_yield [ 'payload' ] = payload . strip ( ) yield to_yield to_yield = { }
Comments can be anywhere . So break apart the Dockerfile into significant lines and any comments that precede them . And if a line is a carryover from the previous via an escaped - newline bring the directive with it .
275
45
224,735
def run_container ( self , image_id , service_name , * * kwargs ) : run_kwargs = self . run_kwargs_for_service ( service_name ) run_kwargs . update ( kwargs , relax = True ) logger . debug ( 'Running container in docker' , image = image_id , params = run_kwargs ) container_obj = self . client . containers . run ( image = image_id , detach = True , * * run_kwargs ) log_iter = container_obj . logs ( stdout = True , stderr = True , stream = True ) mux = logmux . LogMultiplexer ( ) mux . add_iterator ( log_iter , plainLogger ) return container_obj . id
Run a particular container . The kwargs argument contains individual parameter overrides from the service definition .
169
20
224,736
def push ( self , image_id , service_name , tag = None , namespace = None , url = None , username = None , password = None , repository_prefix = None , * * kwargs ) : auth_config = { 'username' : username , 'password' : password } build_stamp = self . get_build_stamp_for_image ( image_id ) tag = tag or build_stamp if repository_prefix : image_name = "{}-{}" . format ( repository_prefix , service_name ) elif repository_prefix is None : image_name = "{}-{}" . format ( self . project_name , service_name ) elif repository_prefix == '' : image_name = service_name repository = "{}/{}" . format ( namespace , image_name ) if url != self . default_registry_url : url = REMOVE_HTTP . sub ( '' , url ) repository = "%s/%s" % ( url . rstrip ( '/' ) , repository ) logger . info ( 'Tagging %s' % repository ) self . client . api . tag ( image_id , repository , tag = tag ) logger . info ( 'Pushing %s:%s...' % ( repository , tag ) ) stream = self . client . api . push ( repository , tag = tag , stream = True , auth_config = auth_config ) last_status = None for data in stream : data = data . splitlines ( ) for line in data : line = json . loads ( line ) if type ( line ) is dict and 'error' in line : plainLogger . error ( line [ 'error' ] ) raise exceptions . AnsibleContainerException ( "Failed to push image. {}" . format ( line [ 'error' ] ) ) elif type ( line ) is dict and 'status' in line : if line [ 'status' ] != last_status : plainLogger . info ( line [ 'status' ] ) last_status = line [ 'status' ] else : plainLogger . debug ( line )
Push an image to a remote registry .
452
8
224,737
def login ( self , username , password , email , url , config_path ) : if username and password : try : self . client . login ( username = username , password = password , email = email , registry = url , reauth = True ) except docker_errors . APIError as exc : raise exceptions . AnsibleContainerConductorException ( u"Error logging into registry: {}" . format ( exc ) ) except Exception : raise self . _update_config_file ( username , password , email , url , config_path ) username , password = self . _get_registry_auth ( url , config_path ) if not username : raise exceptions . AnsibleContainerConductorException ( u'Please provide login credentials for registry {}.' . format ( url ) ) return username , password
If username and password are provided authenticate with the registry . Otherwise check the config file for existing authentication data .
166
22
224,738
def _update_config_file ( username , password , email , url , config_path ) : try : # read the existing config config = json . load ( open ( config_path , "r" ) ) except ValueError : config = dict ( ) if not config . get ( 'auths' ) : config [ 'auths' ] = dict ( ) if not config [ 'auths' ] . get ( url ) : config [ 'auths' ] [ url ] = dict ( ) encoded_credentials = dict ( auth = base64 . b64encode ( username + b':' + password ) , email = email ) config [ 'auths' ] [ url ] = encoded_credentials try : json . dump ( config , open ( config_path , "w" ) , indent = 5 , sort_keys = True ) except Exception as exc : raise exceptions . AnsibleContainerConductorException ( u"Failed to write registry config to {0} - {1}" . format ( config_path , exc ) )
Update the config file with the authorization .
223
8
224,739
def _get_registry_auth ( registry_url , config_path ) : username = None password = None try : docker_config = json . load ( open ( config_path ) ) except ValueError : # The configuration file is empty return username , password if docker_config . get ( 'auths' ) : docker_config = docker_config [ 'auths' ] auth_key = docker_config . get ( registry_url , { } ) . get ( 'auth' , None ) if auth_key : username , password = base64 . b64decode ( auth_key ) . split ( ':' , 1 ) return username , password
Retrieve from the config file the current authentication for a given URL and return the username password
140
18
224,740
def resolve_role_to_path ( role ) : loader = DataLoader ( ) try : variable_manager = VariableManager ( loader = loader ) except TypeError : # If Ansible prior to ansible/ansible@8f97aef1a365 variable_manager = VariableManager ( ) role_obj = RoleInclude . load ( data = role , play = None , variable_manager = variable_manager , loader = loader ) return role_obj . _role_path
Given a role definition from a service s list of roles returns the file path to the role
102
18
224,741
def get_role_fingerprint ( role , service_name , config_vars ) : def hash_file ( hash_obj , file_path ) : blocksize = 64 * 1024 with open ( file_path , 'rb' ) as ifs : while True : data = ifs . read ( blocksize ) if not data : break hash_obj . update ( data ) hash_obj . update ( '::' ) def hash_dir ( hash_obj , dir_path ) : for root , dirs , files in os . walk ( dir_path , topdown = True ) : for file_path in files : abs_file_path = os . path . join ( root , file_path ) hash_obj . update ( abs_file_path . encode ( 'utf-8' ) ) hash_obj . update ( '::' ) hash_file ( hash_obj , abs_file_path ) def hash_role ( hash_obj , role_path ) : # Role content is easy to hash - the hash of the role content with the # hash of any role dependencies it has hash_dir ( hash_obj , role_path ) for dependency in get_dependencies_for_role ( role_path ) : if dependency : dependency_path = resolve_role_to_path ( dependency ) hash_role ( hash_obj , dependency_path ) # However tasks within that role might reference files outside of the # role, like source code loader = DataLoader ( ) var_man = VariableManager ( loader = loader ) play = Play . load ( generate_playbook_for_role ( service_name , config_vars , role ) [ 0 ] , variable_manager = var_man , loader = loader ) play_context = PlayContext ( play = play ) inv_man = InventoryManager ( loader , sources = [ '%s,' % service_name ] ) host = Host ( service_name ) iterator = PlayIterator ( inv_man , play , play_context , var_man , config_vars ) while True : _ , task = iterator . get_next_task_for_host ( host ) if task is None : break if task . action in FILE_COPY_MODULES : src = task . args . get ( 'src' ) if src is not None : if not os . path . exists ( src ) or not src . startswith ( ( '/' , '..' ) ) : continue src = os . path . realpath ( src ) if os . path . isfile ( src ) : hash_file ( hash_obj , src ) else : hash_dir ( hash_obj , src ) def get_dependencies_for_role ( role_path ) : meta_main_path = os . path . join ( role_path , 'meta' , 'main.yml' ) if os . path . exists ( meta_main_path ) : meta_main = yaml . safe_load ( open ( meta_main_path ) ) if meta_main : for dependency in meta_main . get ( 'dependencies' , [ ] ) : yield dependency . get ( 'role' , None ) hash_obj = hashlib . sha256 ( ) # Account for variables passed to the role by including the invocation string hash_obj . update ( ( json . dumps ( role ) if not isinstance ( role , string_types ) else role ) + '::' ) # Add each of the role's files and directories hash_role ( hash_obj , resolve_role_to_path ( role ) ) return hash_obj . hexdigest ( )
Given a role definition from a service s list of roles returns a hexdigest based on the role definition the role contents and the hexdigest of each dependency
774
32
224,742
def on_predicate ( wait_gen , predicate = operator . not_ , max_tries = None , max_time = None , jitter = full_jitter , on_success = None , on_backoff = None , on_giveup = None , logger = 'backoff' , * * wait_gen_kwargs ) : def decorate ( target ) : # change names because python 2.x doesn't have nonlocal logger_ = logger if isinstance ( logger_ , basestring ) : logger_ = logging . getLogger ( logger_ ) on_success_ = _config_handlers ( on_success ) on_backoff_ = _config_handlers ( on_backoff , _log_backoff , logger_ ) on_giveup_ = _config_handlers ( on_giveup , _log_giveup , logger_ ) retry = None if sys . version_info >= ( 3 , 5 ) : # pragma: python=3.5 import asyncio if asyncio . iscoroutinefunction ( target ) : import backoff . _async retry = backoff . _async . retry_predicate elif _is_event_loop ( ) and _is_current_task ( ) : # Verify that sync version is not being run from coroutine # (that would lead to event loop hiccups). raise TypeError ( "backoff.on_predicate applied to a regular function " "inside coroutine, this will lead to event loop " "hiccups. Use backoff.on_predicate on coroutines in " "asynchronous code." ) if retry is None : retry = _sync . retry_predicate return retry ( target , wait_gen , predicate , max_tries , max_time , jitter , on_success_ , on_backoff_ , on_giveup_ , wait_gen_kwargs ) # Return a function which decorates a target with a retry loop. return decorate
Returns decorator for backoff and retry triggered by predicate .
435
13
224,743
def expo ( base = 2 , factor = 1 , max_value = None ) : n = 0 while True : a = factor * base ** n if max_value is None or a < max_value : yield a n += 1 else : yield max_value
Generator for exponential decay .
56
6
224,744
def fibo ( max_value = None ) : a = 1 b = 1 while True : if max_value is None or a < max_value : yield a a , b = b , a + b else : yield max_value
Generator for fibonaccial decay .
50
9
224,745
def constant ( interval = 1 ) : try : itr = iter ( interval ) except TypeError : itr = itertools . repeat ( interval ) for val in itr : yield val
Generator for constant intervals .
40
6
224,746
def detect_incorrect_erc20_interface ( contract ) : functions = [ f for f in contract . functions if f . contract == contract and IncorrectERC20InterfaceDetection . incorrect_erc20_interface ( f . signature ) ] return functions
Detect incorrect ERC20 interface
53
6
224,747
def _detect ( self ) : results = [ ] for c in self . contracts : functions = IncorrectERC20InterfaceDetection . detect_incorrect_erc20_interface ( c ) if functions : info = "{} ({}) has incorrect ERC20 function interface(s):\n" info = info . format ( c . name , c . source_mapping_str ) for function in functions : info += "\t-{} ({})\n" . format ( function . name , function . source_mapping_str ) json = self . generate_json_result ( info ) self . add_functions_to_json ( functions , json ) results . append ( json ) return results
Detect incorrect erc20 interface
150
6
224,748
def detect_shadowing_definitions ( self , contract ) : result = [ ] # Loop through all functions + modifiers in this contract. for function in contract . functions + contract . modifiers : # We should only look for functions declared directly in this contract (not in a base contract). if function . contract != contract : continue # This function was declared in this contract, we check what its local variables might shadow. for variable in function . variables : overshadowed = [ ] for scope_contract in [ contract ] + contract . inheritance : # Check functions for scope_function in scope_contract . functions : if variable . name == scope_function . name and scope_function . contract == scope_contract : overshadowed . append ( ( self . OVERSHADOWED_FUNCTION , scope_contract . name , scope_function ) ) # Check modifiers for scope_modifier in scope_contract . modifiers : if variable . name == scope_modifier . name and scope_modifier . contract == scope_contract : overshadowed . append ( ( self . OVERSHADOWED_MODIFIER , scope_contract . name , scope_modifier ) ) # Check events for scope_event in scope_contract . events : if variable . name == scope_event . name and scope_event . contract == scope_contract : overshadowed . append ( ( self . OVERSHADOWED_EVENT , scope_contract . name , scope_event ) ) # Check state variables for scope_state_variable in scope_contract . variables : if variable . name == scope_state_variable . name and scope_state_variable . contract == scope_contract : overshadowed . append ( ( self . OVERSHADOWED_STATE_VARIABLE , scope_contract . name , scope_state_variable ) ) # If we have found any overshadowed objects, we'll want to add it to our result list. if overshadowed : result . append ( ( contract . name , function . name , variable , overshadowed ) ) return result
Detects if functions access modifiers events state variables and local variables are named after reserved keywords . Any such definitions are returned in a list .
423
27
224,749
def _detect ( self ) : results = [ ] for contract in self . contracts : shadows = self . detect_shadowing_definitions ( contract ) if shadows : for shadow in shadows : local_parent_name = shadow [ 1 ] local_variable = shadow [ 2 ] overshadowed = shadow [ 3 ] info = '{}.{}.{} (local variable @ {}) shadows:\n' . format ( contract . name , local_parent_name , local_variable . name , local_variable . source_mapping_str ) for overshadowed_entry in overshadowed : info += "\t- {}.{} ({} @ {})\n" . format ( overshadowed_entry [ 1 ] , overshadowed_entry [ 2 ] , overshadowed_entry [ 0 ] , overshadowed_entry [ 2 ] . source_mapping_str ) # Generate relevant JSON data for this shadowing definition. json = self . generate_json_result ( info ) self . add_variable_to_json ( local_variable , json ) for overshadowed_entry in overshadowed : if overshadowed_entry [ 0 ] in [ self . OVERSHADOWED_FUNCTION , self . OVERSHADOWED_MODIFIER , self . OVERSHADOWED_EVENT ] : self . add_function_to_json ( overshadowed_entry [ 2 ] , json ) elif overshadowed_entry [ 0 ] == self . OVERSHADOWED_STATE_VARIABLE : self . add_variable_to_json ( overshadowed_entry [ 2 ] , json ) results . append ( json ) return results
Detect shadowing local variables
341
5
224,750
def _detect ( self ) : results = [ ] all_info = '' all_variables = [ c . state_variables for c in self . slither . contracts ] all_variables = set ( [ item for sublist in all_variables for item in sublist ] ) all_non_constant_elementary_variables = set ( [ v for v in all_variables if self . _valid_candidate ( v ) ] ) all_functions = [ c . all_functions_called for c in self . slither . contracts ] all_functions = list ( set ( [ item for sublist in all_functions for item in sublist ] ) ) all_variables_written = [ f . state_variables_written for f in all_functions ] all_variables_written = set ( [ item for sublist in all_variables_written for item in sublist ] ) constable_variables = [ v for v in all_non_constant_elementary_variables if ( not v in all_variables_written ) and self . _constant_initial_expression ( v ) ] # Order for deterministic results constable_variables = sorted ( constable_variables , key = lambda x : x . canonical_name ) for v in constable_variables : info = "{}.{} should be constant ({})\n" . format ( v . contract . name , v . name , v . source_mapping_str ) all_info += info if all_info != '' : json = self . generate_json_result ( all_info ) self . add_variables_to_json ( constable_variables , json ) results . append ( json ) return results
Detect state variables that could be const
381
7
224,751
def detect_suicidal_func ( func ) : if func . is_constructor : return False if func . visibility != 'public' : return False calls = [ c . name for c in func . internal_calls ] if not ( 'suicide(address)' in calls or 'selfdestruct(address)' in calls ) : return False if func . is_protected ( ) : return False return True
Detect if the function is suicidal
85
6
224,752
def _detect ( self ) : results = [ ] for c in self . contracts : functions = self . detect_suicidal ( c ) for func in functions : txt = "{}.{} ({}) allows anyone to destruct the contract\n" info = txt . format ( func . contract . name , func . name , func . source_mapping_str ) json = self . generate_json_result ( info ) self . add_function_to_json ( func , json ) results . append ( json ) return results
Detect the suicidal functions
113
4
224,753
def _detect ( self ) : results = [ ] for c in self . slither . contracts : for f in c . functions + c . modifiers : if f . contract != c : continue unused_return = self . detect_unused_return_values ( f ) if unused_return : info = "{}.{} ({}) does not use the value returned by external calls:\n" info = info . format ( f . contract . name , f . name , f . source_mapping_str ) for node in unused_return : info += "\t-{} ({})\n" . format ( node . expression , node . source_mapping_str ) json = self . generate_json_result ( info ) self . add_function_to_json ( f , json ) self . add_nodes_to_json ( unused_return , json ) results . append ( json ) return results
Detect high level calls which return a value that are never used
194
12
224,754
def _summary ( self , contract ) : ret = '' # Add arrows (number them if there is more than one path so we know order of declaration for inheritance). if len ( contract . immediate_inheritance ) == 1 : ret += '%s -> %s;\n' % ( contract . name , contract . immediate_inheritance [ 0 ] ) else : for i in range ( 0 , len ( contract . immediate_inheritance ) ) : ret += '%s -> %s [ label="%s" ];\n' % ( contract . name , contract . immediate_inheritance [ i ] , i + 1 ) # Functions visibilities = [ 'public' , 'external' ] public_functions = [ self . _get_pattern_func ( f , contract ) for f in contract . functions if not f . is_constructor and f . contract == contract and f . visibility in visibilities ] public_functions = '' . join ( public_functions ) private_functions = [ self . _get_pattern_func ( f , contract ) for f in contract . functions if not f . is_constructor and f . contract == contract and f . visibility not in visibilities ] private_functions = '' . join ( private_functions ) # Modifiers modifiers = [ self . _get_pattern_func ( m , contract ) for m in contract . modifiers if m . contract == contract ] modifiers = '' . join ( modifiers ) # Public variables public_variables = [ self . _get_pattern_var ( v , contract ) for v in contract . variables if v . contract == contract and v . visibility in visibilities ] public_variables = '' . join ( public_variables ) private_variables = [ self . _get_pattern_var ( v , contract ) for v in contract . variables if v . contract == contract and v . visibility not in visibilities ] private_variables = '' . join ( private_variables ) # Obtain any indirect shadowing information for this node. indirect_shadowing_information = self . _get_indirect_shadowing_information ( contract ) # Build the node label ret += '%s[shape="box"' % contract . name ret += 'label=< <TABLE border="0">' ret += '<TR><TD align="center"><B>%s</B></TD></TR>' % contract . name if public_functions : ret += '<TR><TD align="left"><I>Public Functions:</I></TD></TR>' ret += '%s' % public_functions if private_functions : ret += '<TR><TD align="left"><I>Private Functions:</I></TD></TR>' ret += '%s' % private_functions if modifiers : ret += '<TR><TD align="left"><I>Modifiers:</I></TD></TR>' ret += '%s' % modifiers if public_variables : ret += '<TR><TD align="left"><I>Public Variables:</I></TD></TR>' ret += '%s' % public_variables if private_variables : ret += '<TR><TD align="left"><I>Private Variables:</I></TD></TR>' ret += '%s' % private_variables if indirect_shadowing_information : ret += '<TR><TD><BR/></TD></TR><TR><TD align="left" border="1"><font color="#777777" point-size="10">%s</font></TD></TR>' % indirect_shadowing_information . replace ( '\n' , '<BR/>' ) ret += '</TABLE> >];\n' return ret
Build summary using HTML
809
4
224,755
def detect_builtin_shadowing_definitions ( self , contract ) : result = [ ] # Loop through all functions, modifiers, variables (state and local) to detect any built-in symbol keywords. for function in contract . functions : if function . contract == contract : if self . is_builtin_symbol ( function . name ) : result . append ( ( self . SHADOWING_FUNCTION , function , None ) ) result += self . detect_builtin_shadowing_locals ( function ) for modifier in contract . modifiers : if modifier . contract == contract : if self . is_builtin_symbol ( modifier . name ) : result . append ( ( self . SHADOWING_MODIFIER , modifier , None ) ) result += self . detect_builtin_shadowing_locals ( modifier ) for variable in contract . variables : if variable . contract == contract : if self . is_builtin_symbol ( variable . name ) : result . append ( ( self . SHADOWING_STATE_VARIABLE , variable , None ) ) for event in contract . events : if event . contract == contract : if self . is_builtin_symbol ( event . name ) : result . append ( ( self . SHADOWING_EVENT , event , None ) ) return result
Detects if functions access modifiers events state variables or local variables are named after built - in symbols . Any such definitions are returned in a list .
286
29
224,756
def _detect ( self ) : results = [ ] for contract in self . contracts : shadows = self . detect_builtin_shadowing_definitions ( contract ) if shadows : for shadow in shadows : # Obtain components shadow_type = shadow [ 0 ] shadow_object = shadow [ 1 ] local_variable_parent = shadow [ 2 ] # Build the path for our info string local_variable_path = contract . name + "." if local_variable_parent is not None : local_variable_path += local_variable_parent . name + "." local_variable_path += shadow_object . name info = '{} ({} @ {}) shadows built-in symbol \"{}"\n' . format ( local_variable_path , shadow_type , shadow_object . source_mapping_str , shadow_object . name ) # Generate relevant JSON data for this shadowing definition. json = self . generate_json_result ( info ) if shadow_type in [ self . SHADOWING_FUNCTION , self . SHADOWING_MODIFIER , self . SHADOWING_EVENT ] : self . add_function_to_json ( shadow_object , json ) elif shadow_type in [ self . SHADOWING_STATE_VARIABLE , self . SHADOWING_LOCAL_VARIABLE ] : self . add_variable_to_json ( shadow_object , json ) results . append ( json ) return results
Detect shadowing of built - in symbols
322
8
224,757
def detect_c3_function_shadowing ( contract ) : # Loop through all contracts, and all underlying functions. results = { } for i in range ( 0 , len ( contract . immediate_inheritance ) - 1 ) : inherited_contract1 = contract . immediate_inheritance [ i ] for function1 in inherited_contract1 . functions_and_modifiers : # If this function has already be handled or is unimplemented, we skip it if function1 . full_name in results or function1 . is_constructor or not function1 . is_implemented : continue # Define our list of function instances which overshadow each other. functions_matching = [ ( inherited_contract1 , function1 ) ] already_processed = set ( [ function1 ] ) # Loop again through other contracts and functions to compare to. for x in range ( i + 1 , len ( contract . immediate_inheritance ) ) : inherited_contract2 = contract . immediate_inheritance [ x ] # Loop for each function in this contract for function2 in inherited_contract2 . functions_and_modifiers : # Skip this function if it is the last function that was shadowed. if function2 in already_processed or function2 . is_constructor or not function2 . is_implemented : continue # If this function does have the same full name, it is shadowing through C3 linearization. if function1 . full_name == function2 . full_name : functions_matching . append ( ( inherited_contract2 , function2 ) ) already_processed . add ( function2 ) # If we have more than one definition matching the same signature, we add it to the results. if len ( functions_matching ) > 1 : results [ function1 . full_name ] = functions_matching return list ( results . values ( ) )
Detects and obtains functions which are indirectly shadowed via multiple inheritance by C3 linearization properties despite not directly inheriting from each other .
400
29
224,758
def _detect ( self ) : results = [ ] for c in self . slither . contracts_derived : ret = self . detect_uninitialized ( c ) for variable , functions in ret : info = "{}.{} ({}) is never initialized. It is used in:\n" info = info . format ( variable . contract . name , variable . name , variable . source_mapping_str ) for f in functions : info += "\t- {} ({})\n" . format ( f . name , f . source_mapping_str ) source = [ variable . source_mapping ] source += [ f . source_mapping for f in functions ] json = self . generate_json_result ( info ) self . add_variable_to_json ( variable , json ) self . add_functions_to_json ( functions , json ) results . append ( json ) return results
Detect uninitialized state variables
191
5
224,759
def detect_functions_called ( contract ) : result = [ ] # Obtain all functions reachable by this contract. for func in contract . all_functions_called : # Loop through all nodes in the function, add all calls to a list. for node in func . nodes : for ir in node . irs : if isinstance ( ir , ( InternalCall , SolidityCall ) ) : result . append ( ir . function ) return result
Returns a list of InternallCall SolidityCall calls made in a function
95
15
224,760
def _contains_internal_dynamic_call ( contract ) : for func in contract . all_functions_called : for node in func . nodes : for ir in node . irs : if isinstance ( ir , ( InternalDynamicCall ) ) : return True return False
Checks if a contract contains a dynamic call either in a direct definition or through inheritance .
59
18
224,761
def get_base_most_function ( function ) : # Loop through the list of inherited contracts and this contract, to find the first function instance which # matches this function's signature. Note here that `inheritance` is in order from most basic to most extended. for contract in function . contract . inheritance + [ function . contract ] : # Loop through the functions not inherited (explicitly defined in this contract). for f in contract . functions_not_inherited : # If it matches names, this is the base most function. if f . full_name == function . full_name : return f # Somehow we couldn't resolve it, which shouldn't happen, as the provided function should be found if we could # not find some any more basic. raise Exception ( "Could not resolve the base-most function for the provided function." )
Obtains the base function definition for the provided function . This could be used to obtain the original definition of a function if the provided function is an override .
177
31
224,762
def get_all_function_definitions ( base_most_function ) : # We assume the provided function is the base-most function, so we check all derived contracts # for a redefinition return [ base_most_function ] + [ function for derived_contract in base_most_function . contract . derived_contracts for function in derived_contract . functions if function . full_name == base_most_function . full_name ]
Obtains all function definitions given a base - most function . This includes the provided function plus any overrides of that function .
94
25
224,763
def detect_complex_func ( func ) : result = [ ] code_complexity = compute_cyclomatic_complexity ( func ) if code_complexity > ComplexFunction . MAX_CYCLOMATIC_COMPLEXITY : result . append ( { "func" : func , "cause" : ComplexFunction . CAUSE_CYCLOMATIC } ) """Detect the number of external calls in the func shouldn't be greater than 5 """ count = 0 for node in func . nodes : for ir in node . irs : if isinstance ( ir , ( HighLevelCall , LowLevelCall , LibraryCall ) ) : count += 1 if count > ComplexFunction . MAX_EXTERNAL_CALLS : result . append ( { "func" : func , "cause" : ComplexFunction . CAUSE_EXTERNAL_CALL } ) """Checks the number of the state variables written shouldn't be greater than 10 """ if len ( func . state_variables_written ) > ComplexFunction . MAX_STATE_VARIABLES : result . append ( { "func" : func , "cause" : ComplexFunction . CAUSE_STATE_VARS } ) return result
Detect the cyclomatic complexity of the contract functions shouldn t be greater than 7
255
15
224,764
def _detect ( self ) : results = [ ] for c in self . slither . contracts_derived : unusedVars = self . detect_unused ( c ) if unusedVars : info = '' for var in unusedVars : info += "{}.{} ({}) is never used in {}\n" . format ( var . contract . name , var . name , var . source_mapping_str , c . name ) json = self . generate_json_result ( info ) self . add_variables_to_json ( unusedVars , json ) results . append ( json ) return results
Detect unused state variables
131
4
224,765
def _detect ( self ) : results = [ ] self . results = [ ] self . visited_all_paths = { } for contract in self . slither . contracts : for function in contract . functions : if function . is_implemented and function . contract == contract : if function . contains_assembly : continue # dont consider storage variable, as they are detected by another detector uninitialized_local_variables = [ v for v in function . local_variables if not v . is_storage and v . uninitialized ] function . entry_point . context [ self . key ] = uninitialized_local_variables self . _detect_uninitialized ( function , function . entry_point , [ ] ) all_results = list ( set ( self . results ) ) for ( function , uninitialized_local_variable ) in all_results : var_name = uninitialized_local_variable . name info = "{} in {}.{} ({}) is a local variable never initialiazed\n" info = info . format ( var_name , function . contract . name , function . name , uninitialized_local_variable . source_mapping_str ) json = self . generate_json_result ( info ) self . add_variable_to_json ( uninitialized_local_variable , json ) self . add_function_to_json ( function , json ) results . append ( json ) return results
Detect uninitialized local variables
304
5
224,766
def _detect ( self ) : results = [ ] for c in self . contracts : unindexed_params = self . detect_erc20_unindexed_event_params ( c ) if unindexed_params : info = "{} ({}) does not mark important ERC20 parameters as 'indexed':\n" info = info . format ( c . name , c . source_mapping_str ) for ( event , parameter ) in unindexed_params : info += "\t-{} ({}) does not index parameter '{}'\n" . format ( event . name , event . source_mapping_str , parameter . name ) # Add the events to the JSON (note: we do not add the params/vars as they have no source mapping). json = self . generate_json_result ( info ) self . add_functions_to_json ( [ event for event , _ in unindexed_params ] , json ) results . append ( json ) return results
Detect un - indexed ERC20 event parameters in all contracts .
216
13
224,767
def print_functions ( self , d ) : for c in self . contracts : for f in c . functions : f . cfg_to_dot ( os . path . join ( d , '{}.{}.dot' . format ( c . name , f . name ) ) )
Export all the functions to dot files
62
7
224,768
def output ( self , filename ) : info = 'Inheritance\n' if not self . contracts : return info += blue ( 'Child_Contract -> ' ) + green ( 'Immediate_Base_Contracts' ) info += green ( ' [Not_Immediate_Base_Contracts]' ) for child in self . contracts : info += blue ( f'\n+ {child.name}' ) if child . inheritance : immediate = child . immediate_inheritance not_immediate = [ i for i in child . inheritance if i not in immediate ] info += ' -> ' + green ( ", " . join ( map ( str , immediate ) ) ) if not_immediate : info += ", [" + green ( ", " . join ( map ( str , not_immediate ) ) ) + "]" info += green ( '\n\nBase_Contract -> ' ) + blue ( 'Immediate_Child_Contracts' ) info += blue ( ' [Not_Immediate_Child_Contracts]' ) for base in self . contracts : info += green ( f'\n+ {base.name}' ) children = list ( self . _get_child_contracts ( base ) ) if children : immediate = [ child for child in children if base in child . immediate_inheritance ] not_immediate = [ child for child in children if not child in immediate ] info += ' -> ' + blue ( ", " . join ( map ( str , immediate ) ) ) if not_immediate : info += ', [' + blue ( ", " . join ( map ( str , not_immediate ) ) ) + ']' self . info ( info )
Output the inheritance relation
357
4
224,769
def _detect ( self ) : results = [ ] self . results = [ ] self . visited_all_paths = { } for contract in self . slither . contracts : for function in contract . functions : if function . is_implemented : uninitialized_storage_variables = [ v for v in function . local_variables if v . is_storage and v . uninitialized ] function . entry_point . context [ self . key ] = uninitialized_storage_variables self . _detect_uninitialized ( function , function . entry_point , [ ] ) for ( function , uninitialized_storage_variable ) in self . results : var_name = uninitialized_storage_variable . name info = "{} in {}.{} ({}) is a storage variable never initialiazed\n" info = info . format ( var_name , function . contract . name , function . name , uninitialized_storage_variable . source_mapping_str ) json = self . generate_json_result ( info ) self . add_variable_to_json ( uninitialized_storage_variable , json ) self . add_function_to_json ( function , json ) results . append ( json ) return results
Detect uninitialized storage variables
263
5
224,770
def _can_callback ( self , irs ) : for ir in irs : if isinstance ( ir , LowLevelCall ) : return True if isinstance ( ir , HighLevelCall ) and not isinstance ( ir , LibraryCall ) : # If solidity >0.5, STATICCALL is used if self . slither . solc_version and self . slither . solc_version . startswith ( '0.5.' ) : if isinstance ( ir . function , Function ) and ( ir . function . view or ir . function . pure ) : continue if isinstance ( ir . function , Variable ) : continue # If there is a call to itself # We can check that the function called is # reentrancy-safe if ir . destination == SolidityVariable ( 'this' ) : if isinstance ( ir . function , Variable ) : continue if not ir . function . all_high_level_calls ( ) : if not ir . function . all_low_level_calls ( ) : continue return True return False
Detect if the node contains a call that can be used to re - entrance
226
15
224,771
def _can_send_eth ( irs ) : for ir in irs : if isinstance ( ir , ( HighLevelCall , LowLevelCall , Transfer , Send ) ) : if ir . call_value : return True return False
Detect if the node can send eth
50
7
224,772
def remove_father ( self , father ) : self . _fathers = [ x for x in self . _fathers if x . node_id != father . node_id ]
Remove the father node . Do nothing if the node is not a father
39
14
224,773
def remove_son ( self , son ) : self . _sons = [ x for x in self . _sons if x . node_id != son . node_id ]
Remove the son node . Do nothing if the node is not a son
39
14
224,774
def detect_deprecated_references_in_node ( self , node ) : # Define our results list results = [ ] # If this node has an expression, we check the underlying expression. if node . expression : results += self . detect_deprecation_in_expression ( node . expression ) # Check if there is usage of any deprecated solidity variables or functions for dep_node in self . DEPRECATED_NODE_TYPES : if node . type == dep_node [ 0 ] : results . append ( dep_node ) return results
Detects if a node makes use of any deprecated standards .
120
12
224,775
def detect_deprecated_references_in_contract ( self , contract ) : results = [ ] for state_variable in contract . variables : if state_variable . contract != contract : continue if state_variable . expression : deprecated_results = self . detect_deprecation_in_expression ( state_variable . expression ) if deprecated_results : results . append ( ( state_variable , deprecated_results ) ) # Loop through all functions + modifiers in this contract. for function in contract . functions + contract . modifiers : # We should only look for functions declared directly in this contract (not in a base contract). if function . contract != contract : continue # Loop through each node in this function. for node in function . nodes : # Detect deprecated references in the node. deprecated_results = self . detect_deprecated_references_in_node ( node ) # Detect additional deprecated low-level-calls. for ir in node . irs : if isinstance ( ir , LowLevelCall ) : for dep_llc in self . DEPRECATED_LOW_LEVEL_CALLS : if ir . function_name == dep_llc [ 0 ] : deprecated_results . append ( dep_llc ) # If we have any results from this iteration, add them to our results list. if deprecated_results : results . append ( ( node , deprecated_results ) ) return results
Detects the usage of any deprecated built - in symbols .
297
12
224,776
def process ( filename , args , detector_classes , printer_classes ) : ast = '--ast-compact-json' if args . legacy_ast : ast = '--ast-json' args . filter_paths = parse_filter_paths ( args ) slither = Slither ( filename , ast_format = ast , * * vars ( args ) ) return _process ( slither , detector_classes , printer_classes )
The core high - level code for running Slither static analysis .
95
13
224,777
def _detect ( self ) : results = [ ] for c in self . contracts : for f in c . functions : if f . contract != c : continue if f . view or f . pure : if f . contains_assembly : attr = 'view' if f . view else 'pure' info = '{}.{} ({}) is declared {} but contains assembly code\n' info = info . format ( f . contract . name , f . name , f . source_mapping_str , attr ) json = self . generate_json_result ( info ) self . add_function_to_json ( f , json ) json [ 'elements' ] . append ( { 'type' : 'info' , 'contains_assembly' : True } ) results . append ( json ) variables_written = f . all_state_variables_written ( ) if variables_written : attr = 'view' if f . view else 'pure' info = '{}.{} ({}) is declared {} but changes state variables:\n' info = info . format ( f . contract . name , f . name , f . source_mapping_str , attr ) for variable_written in variables_written : info += '\t- {}.{}\n' . format ( variable_written . contract . name , variable_written . name ) json = self . generate_json_result ( info ) self . add_function_to_json ( f , json ) self . add_variables_to_json ( variables_written , json ) json [ 'elements' ] . append ( { 'type' : 'info' , 'contains_assembly' : False } ) results . append ( json ) return results
Detect the constant function changing the state
372
7
224,778
def constructor ( self ) : cst = self . constructor_not_inherited if cst : return cst for inherited_contract in self . inheritance : cst = inherited_contract . constructor_not_inherited if cst : return cst return None
Return the contract s immediate constructor . If there is no immediate constructor returns the first constructor executed following the c3 linearization Return None if there is no constructor .
57
32
224,779
def get_functions_reading_from_variable ( self , variable ) : return [ f for f in self . functions if f . is_reading ( variable ) ]
Return the functions reading the variable
36
6
224,780
def get_functions_writing_to_variable ( self , variable ) : return [ f for f in self . functions if f . is_writing ( variable ) ]
Return the functions writting the variable
36
7
224,781
def get_source_var_declaration ( self , var ) : return next ( ( x . source_mapping for x in self . variables if x . name == var ) )
Return the source mapping where the variable is declared
39
9
224,782
def get_source_event_declaration ( self , event ) : return next ( ( x . source_mapping for x in self . events if x . name == event ) )
Return the source mapping where the event is declared
39
9
224,783
def get_summary ( self ) : func_summaries = [ f . get_summary ( ) for f in self . functions ] modif_summaries = [ f . get_summary ( ) for f in self . modifiers ] return ( self . name , [ str ( x ) for x in self . inheritance ] , [ str ( x ) for x in self . variables ] , func_summaries , modif_summaries )
Return the function summary
96
4
224,784
def is_erc20 ( self ) : full_names = [ f . full_name for f in self . functions ] return 'transfer(address,uint256)' in full_names and 'transferFrom(address,address,uint256)' in full_names and 'approve(address,uint256)' in full_names
Check if the contract is an erc20 token
69
10
224,785
def integrate_value_gas ( result ) : was_changed = True calls = [ ] while was_changed : # We loop until we do not find any call to value or gas was_changed = False # Find all the assignments assigments = { } for i in result : if isinstance ( i , OperationWithLValue ) : assigments [ i . lvalue . name ] = i if isinstance ( i , TmpCall ) : if isinstance ( i . called , Variable ) and i . called . name in assigments : ins_ori = assigments [ i . called . name ] i . set_ori ( ins_ori ) to_remove = [ ] variable_to_replace = { } # Replace call to value, gas to an argument of the real call for idx in range ( len ( result ) ) : ins = result [ idx ] # value can be shadowed, so we check that the prev ins # is an Argument if is_value ( ins ) and isinstance ( result [ idx - 1 ] , Argument ) : was_changed = True result [ idx - 1 ] . set_type ( ArgumentType . VALUE ) result [ idx - 1 ] . call_id = ins . ori . variable_left . name calls . append ( ins . ori . variable_left ) to_remove . append ( ins ) variable_to_replace [ ins . lvalue . name ] = ins . ori . variable_left elif is_gas ( ins ) and isinstance ( result [ idx - 1 ] , Argument ) : was_changed = True result [ idx - 1 ] . set_type ( ArgumentType . GAS ) result [ idx - 1 ] . call_id = ins . ori . variable_left . name calls . append ( ins . ori . variable_left ) to_remove . append ( ins ) variable_to_replace [ ins . lvalue . name ] = ins . ori . variable_left # Remove the call to value/gas instruction result = [ i for i in result if not i in to_remove ] # update the real call for ins in result : if isinstance ( ins , TmpCall ) : # use of while if there redirections while ins . called . name in variable_to_replace : was_changed = True ins . call_id = variable_to_replace [ ins . called . name ] . name calls . append ( ins . called ) ins . called = variable_to_replace [ ins . called . name ] if isinstance ( ins , Argument ) : while ins . call_id in variable_to_replace : was_changed = True ins . call_id = variable_to_replace [ ins . call_id ] . name calls = list ( set ( [ str ( c ) for c in calls ] ) ) idx = 0 calls_d = { } for call in calls : calls_d [ str ( call ) ] = idx idx = idx + 1 return result
Integrate value and gas temporary arguments to call instruction
637
10
224,786
def propagate_type_and_convert_call ( result , node ) : calls_value = { } calls_gas = { } call_data = [ ] idx = 0 # use of while len() as result can be modified during the iteration while idx < len ( result ) : ins = result [ idx ] if isinstance ( ins , TmpCall ) : new_ins = extract_tmp_call ( ins , node . function . contract ) if new_ins : new_ins . set_node ( ins . node ) ins = new_ins result [ idx ] = ins if isinstance ( ins , Argument ) : if ins . get_type ( ) in [ ArgumentType . GAS ] : assert not ins . call_id in calls_gas calls_gas [ ins . call_id ] = ins . argument elif ins . get_type ( ) in [ ArgumentType . VALUE ] : assert not ins . call_id in calls_value calls_value [ ins . call_id ] = ins . argument else : assert ins . get_type ( ) == ArgumentType . CALL call_data . append ( ins . argument ) if isinstance ( ins , ( HighLevelCall , NewContract , InternalDynamicCall ) ) : if ins . call_id in calls_value : ins . call_value = calls_value [ ins . call_id ] if ins . call_id in calls_gas : ins . call_gas = calls_gas [ ins . call_id ] if isinstance ( ins , ( Call , NewContract , NewStructure ) ) : ins . arguments = call_data call_data = [ ] if is_temporary ( ins ) : del result [ idx ] continue new_ins = propagate_types ( ins , node ) if new_ins : if isinstance ( new_ins , ( list , ) ) : if len ( new_ins ) == 2 : new_ins [ 0 ] . set_node ( ins . node ) new_ins [ 1 ] . set_node ( ins . node ) del result [ idx ] result . insert ( idx , new_ins [ 0 ] ) result . insert ( idx + 1 , new_ins [ 1 ] ) idx = idx + 1 else : assert len ( new_ins ) == 3 new_ins [ 0 ] . set_node ( ins . node ) new_ins [ 1 ] . set_node ( ins . node ) new_ins [ 2 ] . set_node ( ins . node ) del result [ idx ] result . insert ( idx , new_ins [ 0 ] ) result . insert ( idx + 1 , new_ins [ 1 ] ) result . insert ( idx + 2 , new_ins [ 2 ] ) idx = idx + 2 else : new_ins . set_node ( ins . node ) result [ idx ] = new_ins idx = idx + 1 return result
Propagate the types variables and convert tmp call to real call operation
628
13
224,787
def convert_to_push ( ir , node ) : lvalue = ir . lvalue if isinstance ( ir . arguments [ 0 ] , list ) : ret = [ ] val = TemporaryVariable ( node ) operation = InitArray ( ir . arguments [ 0 ] , val ) ret . append ( operation ) ir = Push ( ir . destination , val ) length = Literal ( len ( operation . init_values ) ) t = operation . init_values [ 0 ] . type ir . lvalue . set_type ( ArrayType ( t , length ) ) ret . append ( ir ) if lvalue : length = Length ( ir . array , lvalue ) length . lvalue . points_to = ir . lvalue ret . append ( length ) return ret ir = Push ( ir . destination , ir . arguments [ 0 ] ) if lvalue : ret = [ ] ret . append ( ir ) length = Length ( ir . array , lvalue ) length . lvalue . points_to = ir . lvalue ret . append ( length ) return ret return ir
Convert a call to a PUSH operaiton
222
11
224,788
def get_type ( t ) : if isinstance ( t , UserDefinedType ) : if isinstance ( t . type , Contract ) : return 'address' return str ( t )
Convert a type to a str If the instance is a Contract return address instead
40
16
224,789
def find_references_origin ( irs ) : for ir in irs : if isinstance ( ir , ( Index , Member ) ) : ir . lvalue . points_to = ir . variable_left
Make lvalue of each Index Member operation points to the left variable
45
13
224,790
def apply_ir_heuristics ( irs , node ) : irs = integrate_value_gas ( irs ) irs = propagate_type_and_convert_call ( irs , node ) irs = remove_unused ( irs ) find_references_origin ( irs ) return irs
Apply a set of heuristic to improve slithIR
70
11
224,791
def return_type ( self ) : returns = self . returns if returns : return [ r . type for r in returns ] return None
Return the list of return type If no return return None
28
11
224,792
def all_solidity_variables_read ( self ) : if self . _all_solidity_variables_read is None : self . _all_solidity_variables_read = self . _explore_functions ( lambda x : x . solidity_variables_read ) return self . _all_solidity_variables_read
recursive version of solidity_read
78
8
224,793
def all_state_variables_written ( self ) : if self . _all_state_variables_written is None : self . _all_state_variables_written = self . _explore_functions ( lambda x : x . state_variables_written ) return self . _all_state_variables_written
recursive version of variables_written
73
7
224,794
def all_internal_calls ( self ) : if self . _all_internals_calls is None : self . _all_internals_calls = self . _explore_functions ( lambda x : x . internal_calls ) return self . _all_internals_calls
recursive version of internal_calls
66
8
224,795
def all_low_level_calls ( self ) : if self . _all_low_level_calls is None : self . _all_low_level_calls = self . _explore_functions ( lambda x : x . low_level_calls ) return self . _all_low_level_calls
recursive version of low_level calls
73
8
224,796
def all_high_level_calls ( self ) : if self . _all_high_level_calls is None : self . _all_high_level_calls = self . _explore_functions ( lambda x : x . high_level_calls ) return self . _all_high_level_calls
recursive version of high_level calls
73
8
224,797
def all_library_calls ( self ) : if self . _all_library_calls is None : self . _all_library_calls = self . _explore_functions ( lambda x : x . library_calls ) return self . _all_library_calls
recursive version of library calls
63
6
224,798
def all_conditional_state_variables_read ( self , include_loop = True ) : if include_loop : if self . _all_conditional_state_variables_read_with_loop is None : self . _all_conditional_state_variables_read_with_loop = self . _explore_functions ( lambda x : self . _explore_func_cond_read ( x , include_loop ) ) return self . _all_conditional_state_variables_read_with_loop else : if self . _all_conditional_state_variables_read is None : self . _all_conditional_state_variables_read = self . _explore_functions ( lambda x : self . _explore_func_cond_read ( x , include_loop ) ) return self . _all_conditional_state_variables_read
Return the state variable used in a condition
199
8
224,799
def all_conditional_solidity_variables_read ( self , include_loop = True ) : if include_loop : if self . _all_conditional_solidity_variables_read_with_loop is None : self . _all_conditional_solidity_variables_read_with_loop = self . _explore_functions ( lambda x : self . _explore_func_conditional ( x , self . _solidity_variable_in_binary , include_loop ) ) return self . _all_conditional_solidity_variables_read_with_loop else : if self . _all_conditional_solidity_variables_read is None : self . _all_conditional_solidity_variables_read = self . _explore_functions ( lambda x : self . _explore_func_conditional ( x , self . _solidity_variable_in_binary , include_loop ) ) return self . _all_conditional_solidity_variables_read
Return the Soldiity variables directly used in a condtion
228
12