idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
234,300
def WriteUInt256 ( self , value ) : if type ( value ) is UInt256 : value . Serialize ( self ) else : raise Exception ( "Cannot write value that is not UInt256" )
Write a UInt256 type to the stream .
46
10
234,301
def Write2000256List ( self , arr ) : for item in arr : ba = bytearray ( binascii . unhexlify ( item ) ) ba . reverse ( ) self . WriteBytes ( ba )
Write an array of 64 byte items to the stream .
48
11
234,302
def get_user_id ( self , user ) : user_field = getattr ( settings , 'SAML_IDP_DJANGO_USERNAME_FIELD' , None ) or getattr ( user , 'USERNAME_FIELD' , 'username' ) return str ( getattr ( user , user_field ) )
Get identifier for a user . Take the one defined in settings . SAML_IDP_DJANGO_USERNAME_FIELD first if not set use the USERNAME_FIELD property which is set on the user Model . This defaults to the user . username field .
70
56
234,303
def create_identity ( self , user , sp_mapping , * * extra_config ) : return { out_attr : getattr ( user , user_attr ) for user_attr , out_attr in sp_mapping . items ( ) if hasattr ( user , user_attr ) }
Generate an identity dictionary of the user based on the given mapping of desired user attributes by the SP
65
20
234,304
def sso_entry ( request ) : if request . method == 'POST' : passed_data = request . POST binding = BINDING_HTTP_POST else : passed_data = request . GET binding = BINDING_HTTP_REDIRECT request . session [ 'Binding' ] = binding try : request . session [ 'SAMLRequest' ] = passed_data [ 'SAMLRequest' ] except ( KeyError , MultiValueDictKeyError ) as e : return HttpResponseBadRequest ( e ) request . session [ 'RelayState' ] = passed_data . get ( 'RelayState' , '' ) # TODO check how the redirect saml way works. Taken from example idp in pysaml2. if "SigAlg" in passed_data and "Signature" in passed_data : request . session [ 'SigAlg' ] = passed_data [ 'SigAlg' ] request . session [ 'Signature' ] = passed_data [ 'Signature' ] return HttpResponseRedirect ( reverse ( 'djangosaml2idp:saml_login_process' ) )
Entrypoint view for SSO . Gathers the parameters from the HTTP request stores them in the session and redirects the requester to the login_process view .
250
34
234,305
def metadata ( request ) : conf = IdPConfig ( ) conf . load ( copy . deepcopy ( settings . SAML_IDP_CONFIG ) ) metadata = entity_descriptor ( conf ) return HttpResponse ( content = text_type ( metadata ) . encode ( 'utf-8' ) , content_type = "text/xml; charset=utf8" )
Returns an XML with the SAML 2 . 0 metadata for this Idp . The metadata is constructed on - the - fly based on the config dict in the django settings .
83
36
234,306
def dispatch ( self , request , * args , * * kwargs ) : conf = IdPConfig ( ) try : conf . load ( copy . deepcopy ( settings . SAML_IDP_CONFIG ) ) self . IDP = Server ( config = conf ) except Exception as e : return self . handle_error ( request , exception = e ) return super ( IdPHandlerViewMixin , self ) . dispatch ( request , * args , * * kwargs )
Construct IDP server with config from settings dict
103
9
234,307
def get_processor ( self , entity_id , sp_config ) : processor_string = sp_config . get ( 'processor' , None ) if processor_string : try : return import_string ( processor_string ) ( entity_id ) except Exception as e : logger . error ( "Failed to instantiate processor: {} - {}" . format ( processor_string , e ) , exc_info = True ) raise return BaseProcessor ( entity_id )
Instantiate user - specified processor or default to an all - access base processor . Raises an exception if the configured processor class can not be found or initialized .
100
32
234,308
def get ( self , include_backups = False ) : leases = [ ] with open ( self . filename ) if not self . gzip else gzip . open ( self . filename ) as lease_file : lease_data = lease_file . read ( ) if self . gzip : lease_data = lease_data . decode ( 'utf-8' ) for match in self . regex_leaseblock . finditer ( lease_data ) : block = match . groupdict ( ) properties , options , sets = _extract_properties ( block [ 'config' ] ) if 'hardware' not in properties and not include_backups : # E.g. rows like {'binding': 'state abandoned', ...} continue lease = Lease ( block [ 'ip' ] , properties = properties , options = options , sets = sets ) leases . append ( lease ) for match in self . regex_leaseblock6 . finditer ( lease_data ) : block = match . groupdict ( ) properties , options , sets = _extract_properties ( block [ 'config' ] ) host_identifier = block [ 'id' ] block_type = block [ 'type' ] last_client_communication = parse_time ( properties [ 'cltt' ] ) for address_block in self . regex_iaaddr . finditer ( block [ 'config' ] ) : block = address_block . groupdict ( ) properties , options , sets = _extract_properties ( block [ 'config' ] ) lease = Lease6 ( block [ 'ip' ] , properties , last_client_communication , host_identifier , block_type , options = options , sets = sets ) leases . append ( lease ) return leases
Parse the lease file and return a list of Lease instances .
369
14
234,309
def get_current ( self ) : all_leases = self . get ( ) leases = { } for lease in all_leases : if lease . valid and lease . active : if type ( lease ) is Lease : leases [ lease . ethernet ] = lease elif type ( lease ) is Lease6 : leases [ '%s-%s' % ( lease . type , lease . host_identifier_string ) ] = lease return leases
Parse the lease file and return a dict of active and valid Lease instances . The key for this dict is the ethernet address of the lease .
95
31
234,310
def create_layout ( lexer = None , reserve_space_for_menu = 8 , get_prompt_tokens = None , get_bottom_toolbar_tokens = None , extra_input_processors = None , multiline = False , wrap_lines = True ) : # Create processors list. input_processors = [ ConditionalProcessor ( # Highlight the reverse-i-search buffer HighlightSearchProcessor ( preview_search = True ) , HasFocus ( SEARCH_BUFFER ) ) , ] if extra_input_processors : input_processors . extend ( extra_input_processors ) lexer = PygmentsLexer ( lexer , sync_from_start = True ) multiline = to_cli_filter ( multiline ) sidebar_token = [ ( Token . Toolbar . Status . Key , "[ctrl+d]" ) , ( Token . Toolbar . Status , " Exit" ) ] sidebar_width = token_list_width ( sidebar_token ) get_sidebar_tokens = lambda _ : sidebar_token def get_height ( cli ) : # If there is an autocompletion menu to be shown, make sure that our # layout has at least a minimal height in order to display it. if reserve_space_for_menu and not cli . is_done : buff = cli . current_buffer # Reserve the space, either when there are completions, or when # `complete_while_typing` is true and we expect completions very # soon. if buff . complete_while_typing ( ) or buff . complete_state is not None : return LayoutDimension ( min = reserve_space_for_menu ) return LayoutDimension ( ) # Create and return Container instance. return HSplit ( [ VSplit ( [ HSplit ( [ # The main input, with completion menus floating on top of it. FloatContainer ( HSplit ( [ Window ( BufferControl ( input_processors = input_processors , lexer = lexer , # enable preview search for reverse-i-search preview_search = True ) , get_height = get_height , wrap_lines = wrap_lines , left_margins = [ # In multiline mode, use the window margin to display # the prompt and continuation tokens. ConditionalMargin ( PromptMargin ( get_prompt_tokens ) , filter = multiline ) ] , ) , ] ) , [ # Completion menu Float ( xcursor = True , ycursor = True , content = CompletionsMenu ( max_height = 16 , scroll_offset = 1 , extra_filter = HasFocus ( DEFAULT_BUFFER ) ) ) , ] ) , # reverse-i-search toolbar (ctrl+r) ConditionalContainer ( SearchToolbar ( ) , multiline ) , ] ) ] ) , ] + [ VSplit ( [ # Left-Aligned Session Toolbar ConditionalContainer ( Window ( TokenListControl ( get_bottom_toolbar_tokens ) , height = LayoutDimension . exact ( 1 ) ) , filter = ~ IsDone ( ) & RendererHeightIsKnown ( ) ) , # Right-Aligned Container ConditionalContainer ( Window ( TokenListControl ( get_sidebar_tokens ) , height = LayoutDimension . exact ( 1 ) , width = LayoutDimension . exact ( sidebar_width ) ) , filter = ~ IsDone ( ) & RendererHeightIsKnown ( ) ) ] ) ] )
Creates a custom Layout for the Crash input REPL
761
10
234,311
def _parse_statements ( lines ) : lines = ( l . strip ( ) for l in lines if l ) lines = ( l for l in lines if l and not l . startswith ( '--' ) ) parts = [ ] for line in lines : parts . append ( line . rstrip ( ';' ) ) if line . endswith ( ';' ) : yield '\n' . join ( parts ) parts [ : ] = [ ] if parts : yield '\n' . join ( parts )
Return a generator of statements
113
5
234,312
def _show_tables ( self , * args ) : v = self . connection . lowest_server_version schema_name = "table_schema" if v >= TABLE_SCHEMA_MIN_VERSION else "schema_name" table_filter = " AND table_type = 'BASE TABLE'" if v >= TABLE_TYPE_MIN_VERSION else "" self . _exec ( "SELECT format('%s.%s', {schema}, table_name) AS name " "FROM information_schema.tables " "WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')" "{table_filter}" . format ( schema = schema_name , table_filter = table_filter ) )
print the existing tables within the doc schema
163
8
234,313
def execute ( self ) : if not self . cmd . is_conn_available ( ) : return if self . cmd . connection . lowest_server_version >= SYSINFO_MIN_VERSION : success , rows = self . _sys_info ( ) self . cmd . exit_code = self . cmd . exit_code or int ( not success ) if success : for result in rows : self . cmd . pprint ( result . rows , result . cols ) self . cmd . logger . info ( "For debugging purposes you can send above listed information to support@crate.io" ) else : tmpl = 'Crate {version} does not support the cluster "sysinfo" command' self . cmd . logger . warn ( tmpl . format ( version = self . cmd . connection . lowest_server_version ) )
print system and cluster info
178
5
234,314
def bwc_bool_transform_from ( cls , x ) : if x . lower ( ) == 'true' : return True elif x . lower ( ) == 'false' : return False return bool ( int ( x ) )
Read boolean values from old config files correctly and interpret True and False as correct booleans .
52
19
234,315
def _transform_field ( field ) : if isinstance ( field , bool ) : return TRUE if field else FALSE elif isinstance ( field , ( list , dict ) ) : return json . dumps ( field , sort_keys = True , ensure_ascii = False ) else : return field
transform field for displaying
63
4
234,316
def script ( ) : parser = argparse . ArgumentParser ( description = "Print all textual tags of one or more audio files." ) parser . add_argument ( "-b" , "--batch" , help = "disable user interaction" , action = "store_true" ) parser . add_argument ( "file" , nargs = "+" , help = "file(s) to print tags of" ) args = parser . parse_args ( ) for filename in args . file : if isinstance ( filename , bytes ) : filename = filename . decode ( sys . getfilesystemencoding ( ) ) line = "TAGS OF '{0}'" . format ( os . path . basename ( filename ) ) print ( "*" * len ( line ) ) print ( line ) print ( "*" * len ( line ) ) audioFile = taglib . File ( filename ) tags = audioFile . tags if len ( tags ) > 0 : maxKeyLen = max ( len ( key ) for key in tags . keys ( ) ) for key , values in tags . items ( ) : for value in values : print ( ( '{0:' + str ( maxKeyLen ) + '} = {1}' ) . format ( key , value ) ) if len ( audioFile . unsupported ) > 0 : print ( 'Unsupported tag elements: ' + "; " . join ( audioFile . unsupported ) ) if sys . version_info [ 0 ] == 2 : inputFunction = raw_input else : inputFunction = input if not args . batch and inputFunction ( "remove unsupported properties? [yN] " ) . lower ( ) in [ "y" , "yes" ] : audioFile . removeUnsupportedProperties ( audioFile . unsupported ) audioFile . save ( )
Run the command - line script .
381
7
234,317
def sw_reset ( self ) : self . write8 ( _STATUS_BASE , _STATUS_SWRST , 0xFF ) time . sleep ( .500 ) chip_id = self . read8 ( _STATUS_BASE , _STATUS_HW_ID ) if chip_id != _HW_ID_CODE : raise RuntimeError ( "Seesaw hardware ID returned (0x{:x}) is not " "correct! Expected 0x{:x}. Please check your wiring." . format ( chip_id , _HW_ID_CODE ) ) pid = self . get_version ( ) >> 16 if pid == _CRICKIT_PID : from adafruit_seesaw . crickit import Crickit_Pinmap self . pin_mapping = Crickit_Pinmap elif pid == _ROBOHATMM1_PID : from adafruit_seesaw . robohat import MM1_Pinmap self . pin_mapping = MM1_Pinmap else : from adafruit_seesaw . samd09 import SAMD09_Pinmap self . pin_mapping = SAMD09_Pinmap
Trigger a software reset of the SeeSaw chip
265
10
234,318
def _get_default_cache_file_path ( self ) : default_list_path = os . path . join ( self . _get_default_cache_dir ( ) , self . _CACHE_FILE_NAME ) if not os . access ( default_list_path , os . F_OK ) : raise CacheFileError ( "Default cache file does not exist " "'{}'!" . format ( default_list_path ) ) return default_list_path
Returns default cache file path
104
5
234,319
def _get_writable_cache_dir ( self ) : dir_path_data = self . _get_default_cache_dir ( ) if os . access ( dir_path_data , os . W_OK ) : self . _default_cache_file = True return dir_path_data dir_path_user = user_cache_dir ( self . _URLEXTRACT_NAME ) if not os . path . exists ( dir_path_user ) : os . makedirs ( dir_path_user , exist_ok = True ) if os . access ( dir_path_user , os . W_OK ) : return dir_path_user dir_path_temp = tempfile . gettempdir ( ) if os . access ( dir_path_temp , os . W_OK ) : return dir_path_temp raise CacheFileError ( "Cache directories are not writable." )
Get writable cache directory with fallback to user s cache directory and global temp directory
198
17
234,320
def _get_cache_file_path ( self , cache_dir = None ) : if cache_dir is None : # Tries to get writable cache dir with fallback to users data dir # and temp directory cache_dir = self . _get_writable_cache_dir ( ) else : if not os . access ( cache_dir , os . W_OK ) : raise CacheFileError ( "None of cache directories is writable." ) # get directory for cached file return os . path . join ( cache_dir , self . _CACHE_FILE_NAME )
Get path for cache file
125
5
234,321
def _load_cached_tlds ( self ) : # check if cached file is readable if not os . access ( self . _tld_list_path , os . R_OK ) : self . _logger . error ( "Cached file is not readable for current " "user. ({})" . format ( self . _tld_list_path ) ) raise CacheFileError ( "Cached file is not readable for current user." ) set_of_tlds = set ( ) with open ( self . _tld_list_path , 'r' ) as f_cache_tld : for line in f_cache_tld : tld = line . strip ( ) . lower ( ) # skip empty lines if not tld : continue # skip comments if tld [ 0 ] == '#' : continue set_of_tlds . add ( "." + tld ) set_of_tlds . add ( "." + idna . decode ( tld ) ) return set_of_tlds
Loads TLDs from cached file to set .
228
11
234,322
def _get_last_cachefile_modification ( self ) : try : mtime = os . path . getmtime ( self . _tld_list_path ) except OSError : return None return datetime . fromtimestamp ( mtime )
Get last modification of cache file with TLDs .
57
11
234,323
def _get_after_tld_chars ( self ) : after_tld_chars = set ( string . whitespace ) after_tld_chars |= { '/' , '\"' , '\'' , '<' , '>' , '?' , ':' , '.' , ',' } # get left enclosure characters _ , right_enclosure = zip ( * self . _enclosure ) # add right enclosure characters to be valid after TLD # for correct parsing of URL e.g. (example.com) after_tld_chars |= set ( right_enclosure ) return after_tld_chars
Initialize after tld characters
142
6
234,324
def update_when_older ( self , days ) : last_cache = self . _get_last_cachefile_modification ( ) if last_cache is None : return self . update ( ) time_to_update = last_cache + timedelta ( days = days ) if datetime . now ( ) >= time_to_update : return self . update ( ) return True
Update TLD list cache file if the list is older than number of days given in parameter days or if does not exist .
82
25
234,325
def set_stop_chars ( self , stop_chars ) : warnings . warn ( "Method set_stop_chars is deprecated, " "use `set_stop_chars_left` or " "`set_stop_chars_right` instead" , DeprecationWarning ) self . _stop_chars = set ( stop_chars ) self . _stop_chars_left = self . _stop_chars self . _stop_chars_right = self . _stop_chars
Set stop characters used when determining end of URL .
114
10
234,326
def set_stop_chars_left ( self , stop_chars ) : if not isinstance ( stop_chars , set ) : raise TypeError ( "stop_chars should be type set " "but {} was given" . format ( type ( stop_chars ) ) ) self . _stop_chars_left = stop_chars self . _stop_chars = self . _stop_chars_left | self . _stop_chars_right
Set stop characters for text on left from TLD . Stop characters are used when determining end of URL .
104
21
234,327
def add_enclosure ( self , left_char , right_char ) : assert len ( left_char ) == 1 , "Parameter left_char must be character not string" assert len ( right_char ) == 1 , "Parameter right_char must be character not string" self . _enclosure . add ( ( left_char , right_char ) ) self . _after_tld_chars = self . _get_after_tld_chars ( )
Add new enclosure pair of characters . That and should be removed when their presence is detected at beginning and end of found URL
102
24
234,328
def remove_enclosure ( self , left_char , right_char ) : assert len ( left_char ) == 1 , "Parameter left_char must be character not string" assert len ( right_char ) == 1 , "Parameter right_char must be character not string" rm_enclosure = ( left_char , right_char ) if rm_enclosure in self . _enclosure : self . _enclosure . remove ( rm_enclosure ) self . _after_tld_chars = self . _get_after_tld_chars ( )
Remove enclosure pair from set of enclosures .
123
9
234,329
def _complete_url ( self , text , tld_pos , tld ) : left_ok = True right_ok = True max_len = len ( text ) - 1 end_pos = tld_pos start_pos = tld_pos while left_ok or right_ok : if left_ok : if start_pos <= 0 : left_ok = False else : if text [ start_pos - 1 ] not in self . _stop_chars_left : start_pos -= 1 else : left_ok = False if right_ok : if end_pos >= max_len : right_ok = False else : if text [ end_pos + 1 ] not in self . _stop_chars_right : end_pos += 1 else : right_ok = False complete_url = text [ start_pos : end_pos + 1 ] . lstrip ( '/' ) # remove last character from url # when it is allowed character right after TLD (e.g. dot, comma) temp_tlds = { tld + c for c in self . _after_tld_chars } # get only dot+tld+one_char and compare if complete_url [ len ( complete_url ) - len ( tld ) - 1 : ] in temp_tlds : complete_url = complete_url [ : - 1 ] complete_url = self . _split_markdown ( complete_url , tld_pos - start_pos ) complete_url = self . _remove_enclosure_from_url ( complete_url , tld_pos - start_pos , tld ) if not self . _is_domain_valid ( complete_url , tld ) : return "" return complete_url
Expand string in both sides to match whole URL .
378
11
234,330
def _validate_tld_match ( self , text , matched_tld , tld_pos ) : if tld_pos > len ( text ) : return False right_tld_pos = tld_pos + len ( matched_tld ) if len ( text ) > right_tld_pos : if text [ right_tld_pos ] in self . _after_tld_chars : if tld_pos > 0 and text [ tld_pos - 1 ] not in self . _stop_chars_left : return True else : if tld_pos > 0 and text [ tld_pos - 1 ] not in self . _stop_chars_left : return True return False
Validate TLD match - tells if at found position is really TLD .
158
16
234,331
def _split_markdown ( text_url , tld_pos ) : # Markdown url can looks like: # [http://example.com/](http://example.com/status/210) left_bracket_pos = text_url . find ( '[' ) # subtract 3 because URL is never shorter than 3 characters if left_bracket_pos > tld_pos - 3 : return text_url right_bracket_pos = text_url . find ( ')' ) if right_bracket_pos < tld_pos : return text_url middle_pos = text_url . rfind ( "](" ) if middle_pos > tld_pos : return text_url [ left_bracket_pos + 1 : middle_pos ] return text_url
Split markdown URL . There is an issue wen Markdown URL is found . Parsing of the URL does not stop on right place so wrongly found URL has to be split .
170
37
234,332
def gen_urls ( self , text ) : tld_pos = 0 matched_tlds = self . _tlds_re . findall ( text ) for tld in matched_tlds : tmp_text = text [ tld_pos : ] offset = tld_pos tld_pos = tmp_text . find ( tld ) validated = self . _validate_tld_match ( text , tld , offset + tld_pos ) if tld_pos != - 1 and validated : tmp_url = self . _complete_url ( text , offset + tld_pos , tld ) if tmp_url : yield tmp_url # do not search for TLD in already extracted URL tld_pos_url = tmp_url . find ( tld ) # move cursor right after found TLD tld_pos += len ( tld ) + offset # move cursor after end of found URL tld_pos += len ( tmp_url [ tld_pos_url + len ( tld ) : ] ) continue # move cursor right after found TLD tld_pos += len ( tld ) + offset
Creates generator over found URLs in given text .
249
10
234,333
def find_urls ( self , text , only_unique = False ) : urls = self . gen_urls ( text ) urls = OrderedDict . fromkeys ( urls ) if only_unique else urls return list ( urls )
Find all URLs in given text .
56
7
234,334
def possibly_award ( self , * * state ) : assert "user" in state if self . async : from . tasks import AsyncBadgeAward state = self . freeze ( * * state ) AsyncBadgeAward . delay ( self , state ) return self . actually_possibly_award ( * * state )
Will see if the user should be awarded a badge . If this badge is asynchronous it just queues up the badge awarding .
71
24
234,335
def actually_possibly_award ( self , * * state ) : user = state [ "user" ] force_timestamp = state . pop ( "force_timestamp" , None ) awarded = self . award ( * * state ) if awarded is None : return if awarded . level is None : assert len ( self . levels ) == 1 awarded . level = 1 # awarded levels are 1 indexed, for conveineince awarded = awarded . level - 1 assert awarded < len ( self . levels ) if ( not self . multiple and BadgeAward . objects . filter ( user = user , slug = self . slug , level = awarded ) ) : return extra_kwargs = { } if force_timestamp is not None : extra_kwargs [ "awarded_at" ] = force_timestamp badge = BadgeAward . objects . create ( user = user , slug = self . slug , level = awarded , * * extra_kwargs ) self . send_badge_messages ( badge ) badge_awarded . send ( sender = self , badge_award = badge )
Does the actual work of possibly awarding a badge .
231
10
234,336
def send_badge_messages ( self , badge_award ) : user_message = getattr ( badge_award . badge , "user_message" , None ) if callable ( user_message ) : message = user_message ( badge_award ) else : message = user_message if message is not None : badge_award . user . message_set . create ( message = message )
If the Badge class defines a message send it to the user who was just awarded the badge .
89
19
234,337
def visit_Print ( self , node ) : self . prints_used [ ( node . lineno , node . col_offset ) ] = VIOLATIONS [ "found" ] [ PRINT_FUNCTION_NAME ]
Only exists in python 2 .
48
6
234,338
def create_lockfile ( self ) : process = subprocess . Popen ( self . pin_command , stdout = subprocess . PIPE , stderr = subprocess . PIPE , ) stdout , stderr = process . communicate ( ) if process . returncode == 0 : self . fix_lockfile ( ) else : logger . critical ( "ERROR executing %s" , ' ' . join ( self . pin_command ) ) logger . critical ( "Exit code: %s" , process . returncode ) logger . critical ( stdout . decode ( 'utf-8' ) ) logger . critical ( stderr . decode ( 'utf-8' ) ) raise RuntimeError ( "Failed to pip-compile {0}" . format ( self . infile ) )
Write recursive dependencies list to outfile with hard - pinned versions . Then fix it .
172
17
234,339
def infile ( self ) : return os . path . join ( OPTIONS [ 'base_dir' ] , '{0}.{1}' . format ( self . name , OPTIONS [ 'in_ext' ] ) )
Path of the input file
50
5
234,340
def outfile ( self ) : return os . path . join ( OPTIONS [ 'base_dir' ] , '{0}.{1}' . format ( self . name , OPTIONS [ 'out_ext' ] ) )
Path of the output file
50
5
234,341
def pin_command ( self ) : parts = [ 'pip-compile' , '--no-header' , '--verbose' , '--rebuild' , '--no-index' , '--output-file' , self . outfile , self . infile , ] if OPTIONS [ 'upgrade' ] : parts . insert ( 3 , '--upgrade' ) if self . add_hashes : parts . insert ( 1 , '--generate-hashes' ) return parts
Compose pip - compile shell command
110
7
234,342
def fix_lockfile ( self ) : with open ( self . outfile , 'rt' ) as fp : lines = [ self . fix_pin ( line ) for line in self . concatenated ( fp ) ] with open ( self . outfile , 'wt' ) as fp : fp . writelines ( [ line + '\n' for line in lines if line is not None ] )
Run each line of outfile through fix_pin
89
10
234,343
def fix_pin ( self , line ) : dep = Dependency ( line ) if dep . valid : if dep . package in self . ignore : ignored_version = self . ignore [ dep . package ] if ignored_version is not None : # ignored_version can be None to disable conflict detection if dep . version and dep . version != ignored_version : logger . error ( "Package %s was resolved to different " "versions in different environments: %s and %s" , dep . package , dep . version , ignored_version , ) raise RuntimeError ( "Please add constraints for the package " "version listed above" ) return None self . packages [ dep . package ] = dep . version if self . forbid_post or dep . is_compatible : # Always drop post for internal packages dep . drop_post ( ) return dep . serialize ( ) return line . strip ( )
Fix dependency by removing post - releases from versions and loosing constraints on internal packages . Drop packages from ignore set
186
22
234,344
def add_references ( self , other_names ) : if not other_names : # Skip on empty list return with open ( self . outfile , 'rt' ) as fp : header , body = self . split_header ( fp ) with open ( self . outfile , 'wt' ) as fp : fp . writelines ( header ) fp . writelines ( '-r {0}.{1}\n' . format ( other_name , OPTIONS [ 'out_ext' ] ) for other_name in sorted ( other_names ) ) fp . writelines ( body )
Add references to other_names in outfile
132
9
234,345
def replace_header ( self , header_text ) : with open ( self . outfile , 'rt' ) as fp : _ , body = self . split_header ( fp ) with open ( self . outfile , 'wt' ) as fp : fp . write ( header_text ) fp . writelines ( body )
Replace pip - compile header with custom text
74
9
234,346
def order_by_refs ( envs ) : topology = { env [ 'name' ] : set ( env [ 'refs' ] ) for env in envs } by_name = { env [ 'name' ] : env for env in envs } return [ by_name [ name ] for name in toposort_flatten ( topology ) ]
Return topologicaly sorted list of environments . I . e . all referenced environments are placed before their references .
80
22
234,347
def is_compatible ( self ) : for pattern in OPTIONS [ 'compatible_patterns' ] : if fnmatch ( self . package . lower ( ) , pattern ) : return True return False
Check if package name is matched by compatible_patterns
41
11
234,348
def drop_post ( self ) : post_index = self . version . find ( '.post' ) if post_index >= 0 : self . version = self . version [ : post_index ]
Remove . postXXXX postfix from version
42
8
234,349
def verify_environments ( ) : env_confs = discover ( os . path . join ( OPTIONS [ 'base_dir' ] , '*.' + OPTIONS [ 'in_ext' ] , ) ) success = True for conf in env_confs : env = Environment ( name = conf [ 'name' ] ) current_comment = generate_hash_comment ( env . infile ) existing_comment = parse_hash_comment ( env . outfile ) if current_comment == existing_comment : logger . info ( "OK - %s was generated from %s." , env . outfile , env . infile ) else : logger . error ( "ERROR! %s was not regenerated after changes in %s." , env . outfile , env . infile ) logger . error ( "Expecting: %s" , current_comment . strip ( ) ) logger . error ( "Found: %s" , existing_comment . strip ( ) ) success = False return success
For each environment verify hash comments and report failures . If any failure occured exit with code 1 .
212
20
234,350
def generate_hash_comment ( file_path ) : with open ( file_path , 'rb' ) as fp : hexdigest = hashlib . sha1 ( fp . read ( ) . strip ( ) ) . hexdigest ( ) return "# SHA1:{0}\n" . format ( hexdigest )
Read file with given file_path and return string of format
71
12
234,351
def parse_value ( key , value ) : default = OPTIONS . get ( key ) if isinstance ( default , collections . Iterable ) : if not isinstance ( default , six . string_types ) : return [ item . strip ( ) for item in value . split ( ',' ) ] return value
Parse value as comma - delimited list if default value for it is list
65
16
234,352
def python_version_matchers ( ) : version = sys . version_info patterns = [ "{0}" , "{0}{1}" , "{0}.{1}" , ] matchers = [ pattern . format ( * version ) for pattern in patterns ] + [ None ] return set ( matchers )
Return set of string representations of current python version
64
9
234,353
def verify ( ctx ) : oks = run_configurations ( skipper ( verify_environments ) , read_sections , ) ctx . exit ( 0 if False not in oks else 1 )
Upgrade locked dependency versions
44
4
234,354
def skipper ( func ) : @ functools . wraps ( func ) def wrapped ( ) : """Dummy docstring to make pylint happy.""" key = ( OPTIONS [ 'base_dir' ] , OPTIONS [ 'in_ext' ] , OPTIONS [ 'out_ext' ] ) if key not in seen : seen [ key ] = func ( ) return seen [ key ] seen = { } return wrapped
Decorator that memorizes base_dir in_ext and out_ext from OPTIONS and skips execution for duplicates .
92
27
234,355
def run_configurations ( callback , sections_reader ) : base = dict ( OPTIONS ) sections = sections_reader ( ) if sections is None : logger . info ( "Configuration not found in .ini files. " "Running with default settings" ) recompile ( ) elif sections == [ ] : logger . info ( "Configuration does not match current runtime. " "Exiting" ) results = [ ] for section , options in sections : OPTIONS . clear ( ) OPTIONS . update ( base ) OPTIONS . update ( options ) logger . debug ( "Running configuration from section \"%s\". OPTIONS: %r" , section , OPTIONS ) results . append ( callback ( ) ) return results
Parse configurations and execute callback for matching .
149
9
234,356
def recompile ( ) : pinned_packages = { } env_confs = discover ( os . path . join ( OPTIONS [ 'base_dir' ] , '*.' + OPTIONS [ 'in_ext' ] , ) , ) if OPTIONS [ 'header_file' ] : with open ( OPTIONS [ 'header_file' ] ) as fp : base_header_text = fp . read ( ) else : base_header_text = DEFAULT_HEADER hashed_by_reference = set ( ) for name in OPTIONS [ 'add_hashes' ] : hashed_by_reference . update ( reference_cluster ( env_confs , name ) ) included_and_refs = set ( OPTIONS [ 'include_names' ] ) for name in set ( included_and_refs ) : included_and_refs . update ( recursive_refs ( env_confs , name ) ) for conf in env_confs : if included_and_refs : if conf [ 'name' ] not in included_and_refs : # Skip envs that are not included or referenced by included: continue rrefs = recursive_refs ( env_confs , conf [ 'name' ] ) add_hashes = conf [ 'name' ] in hashed_by_reference env = Environment ( name = conf [ 'name' ] , ignore = merged_packages ( pinned_packages , rrefs ) , forbid_post = conf [ 'name' ] in OPTIONS [ 'forbid_post' ] , add_hashes = add_hashes , ) logger . info ( "Locking %s to %s. References: %r" , env . infile , env . outfile , sorted ( rrefs ) ) env . create_lockfile ( ) header_text = generate_hash_comment ( env . infile ) + base_header_text env . replace_header ( header_text ) env . add_references ( conf [ 'refs' ] ) pinned_packages [ conf [ 'name' ] ] = env . packages
Compile requirements files for all environments .
456
8
234,357
def merged_packages ( env_packages , names ) : combined_packages = sorted ( itertools . chain . from_iterable ( env_packages [ name ] . items ( ) for name in names ) ) result = { } errors = set ( ) for name , version in combined_packages : if name in result : if result [ name ] != version : errors . add ( ( name , version , result [ name ] ) ) else : result [ name ] = version if errors : for error in sorted ( errors ) : logger . error ( "Package %s was resolved to different " "versions in different environments: %s and %s" , error [ 0 ] , error [ 1 ] , error [ 2 ] , ) raise RuntimeError ( "Please add constraints for the package version listed above" ) return result
Return union set of environment packages with given names
170
9
234,358
def recursive_refs ( envs , name ) : refs_by_name = { env [ 'name' ] : set ( env [ 'refs' ] ) for env in envs } refs = refs_by_name [ name ] if refs : indirect_refs = set ( itertools . chain . from_iterable ( [ recursive_refs ( envs , ref ) for ref in refs ] ) ) else : indirect_refs = set ( ) return set . union ( refs , indirect_refs )
Return set of recursive refs for given env name
119
10
234,359
def reference_cluster ( envs , name ) : edges = [ set ( [ env [ 'name' ] , ref ] ) for env in envs for ref in env [ 'refs' ] ] prev , cluster = set ( ) , set ( [ name ] ) while prev != cluster : # While cluster grows prev = set ( cluster ) to_visit = [ ] for edge in edges : if cluster & edge : # Add adjacent nodes: cluster |= edge else : # Leave only edges that are out # of cluster for the next round: to_visit . append ( edge ) edges = to_visit return cluster
Return set of all env names referencing or referenced by given name .
133
13
234,360
def get_requests_session ( ) : session = requests . sessions . Session ( ) session . mount ( 'http://' , HTTPAdapter ( pool_connections = 25 , pool_maxsize = 25 , pool_block = True ) ) session . mount ( 'https://' , HTTPAdapter ( pool_connections = 25 , pool_maxsize = 25 , pool_block = True ) ) return session
Set connection pool maxsize and block value to avoid connection pool full warnings .
87
15
234,361
def request_tokens ( self , amount , account ) : address = account . address try : tx_hash = self . send_transaction ( 'requestTokens' , ( amount , ) , transact = { 'from' : address , 'passphrase' : account . password } ) logging . debug ( f'{address} requests {amount} tokens, returning receipt' ) try : receipt = Web3Provider . get_web3 ( ) . eth . waitForTransactionReceipt ( tx_hash , timeout = 20 ) logging . debug ( f'requestTokens receipt: {receipt}' ) except Timeout : receipt = None if not receipt : return False if receipt . status == 0 : logging . warning ( f'request tokens failed: Tx-receipt={receipt}' ) logging . warning ( f'request tokens failed: account {address}' ) return False # check for emitted events: rfe = EventFilter ( 'RequestFrequencyExceeded' , self . events . RequestFrequencyExceeded , argument_filters = { 'requester' : Web3Provider . get_web3 ( ) . toBytes ( hexstr = address ) } , from_block = 'latest' , to_block = 'latest' , ) logs = rfe . get_all_entries ( max_tries = 5 ) if logs : logging . warning ( f'request tokens failed RequestFrequencyExceeded' ) logging . info ( f'RequestFrequencyExceeded event logs: {logs}' ) return False rle = EventFilter ( 'RequestLimitExceeded' , self . events . RequestLimitExceeded , argument_filters = { 'requester' : Web3Provider . get_web3 ( ) . toBytes ( hexstr = address ) } , from_block = 'latest' , to_block = 'latest' , ) logs = rle . get_all_entries ( max_tries = 5 ) if logs : logging . warning ( f'request tokens failed RequestLimitExceeded' ) logging . info ( f'RequestLimitExceeded event logs: {logs}' ) return False return True except ValueError as err : raise OceanInvalidTransaction ( f'Requesting {amount} tokens' f' to {address} failed with error: {err}' )
Request an amount of tokens for a particular address . This transaction has gas cost
503
15
234,362
def get_network_name ( network_id ) : if os . environ . get ( 'KEEPER_NETWORK_NAME' ) : logging . debug ( 'keeper network name overridden by an environment variable: {}' . format ( os . environ . get ( 'KEEPER_NETWORK_NAME' ) ) ) return os . environ . get ( 'KEEPER_NETWORK_NAME' ) return Keeper . _network_name_map . get ( network_id , Keeper . DEFAULT_NETWORK_NAME )
Return the keeper network name based on the current ethereum network id . Return development for every network id that is not mapped .
117
25
234,363
def unlock_account ( account ) : return Web3Provider . get_web3 ( ) . personal . unlockAccount ( account . address , account . password )
Unlock the account .
33
5
234,364
def get_condition_name_by_address ( self , address ) : if self . lock_reward_condition . address == address : return 'lockReward' elif self . access_secret_store_condition . address == address : return 'accessSecretStore' elif self . escrow_reward_condition . address == address : return 'escrowReward' else : logging . error ( f'The current address {address} is not a condition address' )
Return the condition name for a given address .
100
9
234,365
def consume_service ( service_agreement_id , service_endpoint , account , files , destination_folder , index = None ) : signature = Keeper . get_instance ( ) . sign_hash ( service_agreement_id , account ) if index is not None : assert isinstance ( index , int ) , logger . error ( 'index has to be an integer.' ) assert index >= 0 , logger . error ( 'index has to be 0 or a positive integer.' ) assert index < len ( files ) , logger . error ( 'index can not be bigger than the number of files' ) consume_url = Brizo . _create_consume_url ( service_endpoint , service_agreement_id , account , None , signature , index ) logger . info ( f'invoke consume endpoint with this url: {consume_url}' ) response = Brizo . _http_client . get ( consume_url , stream = True ) file_name = Brizo . _get_file_name ( response ) Brizo . write_file ( response , destination_folder , file_name ) else : for i , _file in enumerate ( files ) : consume_url = Brizo . _create_consume_url ( service_endpoint , service_agreement_id , account , _file , signature , i ) logger . info ( f'invoke consume endpoint with this url: {consume_url}' ) response = Brizo . _http_client . get ( consume_url , stream = True ) file_name = Brizo . _get_file_name ( response ) Brizo . write_file ( response , destination_folder , file_name )
Call the brizo endpoint to get access to the different files that form the asset .
360
17
234,366
def _prepare_consume_payload ( did , service_agreement_id , service_definition_id , signature , consumer_address ) : return json . dumps ( { 'did' : did , 'serviceAgreementId' : service_agreement_id , ServiceAgreement . SERVICE_DEFINITION_ID : service_definition_id , 'signature' : signature , 'consumerAddress' : consumer_address } )
Prepare a payload to send to Brizo .
94
10
234,367
def get_brizo_url ( config ) : brizo_url = 'http://localhost:8030' if config . has_option ( 'resources' , 'brizo.url' ) : brizo_url = config . get ( 'resources' , 'brizo.url' ) or brizo_url brizo_path = '/api/v1/brizo' return f'{brizo_url}{brizo_path}'
Return the Brizo component url .
103
7
234,368
def validate ( metadata ) : # validate required sections and their sub items for section_key in Metadata . REQUIRED_SECTIONS : if section_key not in metadata or not metadata [ section_key ] or not isinstance ( metadata [ section_key ] , dict ) : return False section = Metadata . MAIN_SECTIONS [ section_key ] section_metadata = metadata [ section_key ] for subkey in section . REQUIRED_VALUES_KEYS : if subkey not in section_metadata or section_metadata [ subkey ] is None : return False return True
Validator of the metadata composition
127
6
234,369
def get_example ( ) : example = dict ( ) for section_key , section in Metadata . MAIN_SECTIONS . items ( ) : example [ section_key ] = section . EXAMPLE . copy ( ) return example
Retrieve an example of the metadata
51
7
234,370
def encrypt_document ( self , document_id , content , threshold = 0 ) : return self . _secret_store_client ( self . _account ) . publish_document ( remove_0x_prefix ( document_id ) , content , threshold )
encrypt string data using the DID as an secret store id if secret store is enabled then return the result from secret store encryption
54
25
234,371
def download ( service_agreement_id , service_definition_id , ddo , consumer_account , destination , brizo , secret_store , index = None ) : did = ddo . did encrypted_files = ddo . metadata [ 'base' ] [ 'encryptedFiles' ] encrypted_files = ( encrypted_files if isinstance ( encrypted_files , str ) else encrypted_files [ 0 ] ) sa = ServiceAgreement . from_ddo ( service_definition_id , ddo ) consume_url = sa . consume_endpoint if not consume_url : logger . error ( 'Consume asset failed, service definition is missing the "serviceEndpoint".' ) raise AssertionError ( 'Consume asset failed, service definition is missing the "serviceEndpoint".' ) if ddo . get_service ( 'Authorization' ) : secret_store_service = ddo . get_service ( service_type = ServiceTypes . AUTHORIZATION ) secret_store_url = secret_store_service . endpoints . service secret_store . set_secret_store_url ( secret_store_url ) # decrypt the contentUrls decrypted_content_urls = json . loads ( secret_store . decrypt_document ( did_to_id ( did ) , encrypted_files ) ) if isinstance ( decrypted_content_urls , str ) : decrypted_content_urls = [ decrypted_content_urls ] logger . debug ( f'got decrypted contentUrls: {decrypted_content_urls}' ) if not os . path . isabs ( destination ) : destination = os . path . abspath ( destination ) if not os . path . exists ( destination ) : os . mkdir ( destination ) asset_folder = os . path . join ( destination , f'datafile.{did_to_id(did)}.{sa.service_definition_id}' ) if not os . path . exists ( asset_folder ) : os . mkdir ( asset_folder ) if index is not None : assert isinstance ( index , int ) , logger . error ( 'index has to be an integer.' ) assert index >= 0 , logger . error ( 'index has to be 0 or a positive integer.' ) assert index < len ( decrypted_content_urls ) , logger . error ( 'index can not be bigger than the number of files' ) brizo . consume_service ( service_agreement_id , consume_url , consumer_account , decrypted_content_urls , asset_folder , index ) return asset_folder
Download asset data files or result files from a compute job .
559
12
234,372
def set_key_value ( self , value , store_type = PUBLIC_KEY_STORE_TYPE_BASE64 ) : if isinstance ( value , dict ) : if PUBLIC_KEY_STORE_TYPE_HEX in value : self . set_key_value ( value [ PUBLIC_KEY_STORE_TYPE_HEX ] , PUBLIC_KEY_STORE_TYPE_HEX ) elif PUBLIC_KEY_STORE_TYPE_BASE64 in value : self . set_key_value ( value [ PUBLIC_KEY_STORE_TYPE_BASE64 ] , PUBLIC_KEY_STORE_TYPE_BASE64 ) elif PUBLIC_KEY_STORE_TYPE_BASE85 in value : self . set_key_value ( value [ PUBLIC_KEY_STORE_TYPE_BASE85 ] , PUBLIC_KEY_STORE_TYPE_BASE85 ) elif PUBLIC_KEY_STORE_TYPE_JWK in value : self . set_key_value ( value [ PUBLIC_KEY_STORE_TYPE_JWK ] , PUBLIC_KEY_STORE_TYPE_JWK ) elif PUBLIC_KEY_STORE_TYPE_PEM in value : self . set_key_value ( value [ PUBLIC_KEY_STORE_TYPE_PEM ] , PUBLIC_KEY_STORE_TYPE_PEM ) else : self . _value = value self . _store_type = store_type
Set the key value based on it s storage type .
319
11
234,373
def set_encode_key_value ( self , value , store_type ) : self . _store_type = store_type if store_type == PUBLIC_KEY_STORE_TYPE_HEX : self . _value = value . hex ( ) elif store_type == PUBLIC_KEY_STORE_TYPE_BASE64 : self . _value = b64encode ( value ) . decode ( ) elif store_type == PUBLIC_KEY_STORE_TYPE_BASE85 : self . _value = b85encode ( value ) . decode ( ) elif store_type == PUBLIC_KEY_STORE_TYPE_JWK : # TODO: need to decide on which jwk library to import? raise NotImplementedError else : self . _value = value return value
Save the key value base on it s storage type .
176
11
234,374
def get_decode_value ( self ) : if self . _store_type == PUBLIC_KEY_STORE_TYPE_HEX : value = bytes . fromhex ( self . _value ) elif self . _store_type == PUBLIC_KEY_STORE_TYPE_BASE64 : value = b64decode ( self . _value ) elif self . _store_type == PUBLIC_KEY_STORE_TYPE_BASE85 : value = b85decode ( self . _value ) elif self . _store_type == PUBLIC_KEY_STORE_TYPE_JWK : # TODO: need to decide on which jwk library to import? raise NotImplementedError else : value = self . _value return value
Return the key value based on it s storage type .
164
11
234,375
def as_text ( self , is_pretty = False ) : values = { 'id' : self . _id , 'type' : self . _type } if self . _owner : values [ 'owner' ] = self . _owner if is_pretty : return json . dumps ( values , indent = 4 , separators = ( ',' , ': ' ) ) return json . dumps ( values )
Return the key as JSON text .
87
7
234,376
def as_dictionary ( self ) : values = { 'id' : self . _id , 'type' : self . _type } if self . _owner : values [ 'owner' ] = self . _owner return values
Return the key as a python dictionary .
49
8
234,377
def get_agreement ( self , agreement_id ) : agreement = self . contract_concise . getAgreement ( agreement_id ) if agreement and len ( agreement ) == 6 : agreement = AgreementValues ( * agreement ) did = add_0x_prefix ( agreement . did . hex ( ) ) cond_ids = [ add_0x_prefix ( _id . hex ( ) ) for _id in agreement . condition_ids ] return AgreementValues ( did , agreement . owner , agreement . template_id , cond_ids , agreement . updated_by , agreement . block_number_updated ) return None
Retrieve the agreement for a agreement_id .
130
10
234,378
def _load ( contract_name ) : contract_definition = ContractHandler . get_contract_dict_by_name ( contract_name ) address = Web3Provider . get_web3 ( ) . toChecksumAddress ( contract_definition [ 'address' ] ) abi = contract_definition [ 'abi' ] contract = Web3Provider . get_web3 ( ) . eth . contract ( address = address , abi = abi ) ContractHandler . _contracts [ contract_name ] = ( contract , ConciseContract ( contract ) ) return ContractHandler . _contracts [ contract_name ]
Retrieve the contract instance for contract_name that represent the smart contract in the keeper network .
130
19
234,379
def get_contract_dict_by_name ( contract_name ) : network_name = Keeper . get_network_name ( Keeper . get_network_id ( ) ) . lower ( ) artifacts_path = ConfigProvider . get_config ( ) . keeper_path # file_name = '{}.{}.json'.format(contract_name, network_name) # path = os.path.join(keeper.artifacts_path, file_name) path = ContractHandler . _get_contract_file_path ( artifacts_path , contract_name , network_name ) if not ( path and os . path . exists ( path ) ) : path = ContractHandler . _get_contract_file_path ( artifacts_path , contract_name , network_name . lower ( ) ) if not ( path and os . path . exists ( path ) ) : path = ContractHandler . _get_contract_file_path ( artifacts_path , contract_name , Keeper . DEFAULT_NETWORK_NAME ) if not ( path and os . path . exists ( path ) ) : raise FileNotFoundError ( f'Keeper contract {contract_name} file ' f'not found in {artifacts_path} ' f'using network name {network_name}' ) with open ( path ) as f : contract_dict = json . loads ( f . read ( ) ) return contract_dict
Retrieve the Contract instance for a given contract name .
299
11
234,380
def buy_asset ( ) : ConfigProvider . set_config ( ExampleConfig . get_config ( ) ) config = ConfigProvider . get_config ( ) # make ocean instance ocn = Ocean ( ) acc = get_publisher_account ( config ) if not acc : acc = ( [ acc for acc in ocn . accounts . list ( ) if acc . password ] or ocn . accounts . list ( ) ) [ 0 ] # Register ddo ddo = ocn . assets . create ( Metadata . get_example ( ) , acc , providers = [ acc . address ] , use_secret_store = False ) logging . info ( f'registered ddo: {ddo.did}' ) # ocn here will be used only to publish the asset. Handling the asset by the publisher # will be performed by the Brizo server running locally keeper = Keeper . get_instance ( ) if 'TEST_LOCAL_NILE' in os . environ and os . environ [ 'TEST_LOCAL_NILE' ] == '1' : provider = keeper . did_registry . to_checksum_address ( '0x413c9ba0a05b8a600899b41b0c62dd661e689354' ) keeper . did_registry . add_provider ( ddo . asset_id , provider , acc ) logging . debug ( f'is did provider: ' f'{keeper.did_registry.is_did_provider(ddo.asset_id, provider)}' ) cons_ocn = Ocean ( ) consumer_account = get_account_from_config ( config , 'parity.address1' , 'parity.password1' ) # sign agreement using the registered asset did above service = ddo . get_service ( service_type = ServiceTypes . ASSET_ACCESS ) # This will send the order request to Brizo which in turn will execute the agreement on-chain cons_ocn . accounts . request_tokens ( consumer_account , 100 ) sa = ServiceAgreement . from_service_dict ( service . as_dictionary ( ) ) agreement_id = cons_ocn . assets . order ( ddo . did , sa . service_definition_id , consumer_account ) logging . info ( 'placed order: %s, %s' , ddo . did , agreement_id ) i = 0 while ocn . agreements . is_access_granted ( agreement_id , ddo . did , consumer_account . address ) is not True and i < 30 : time . sleep ( 1 ) i += 1 assert ocn . agreements . is_access_granted ( agreement_id , ddo . did , consumer_account . address ) ocn . assets . consume ( agreement_id , ddo . did , sa . service_definition_id , consumer_account , config . downloads_path ) logging . info ( 'Success buying asset.' )
Requires all ocean services running .
640
6
234,381
def token_approve ( self , spender_address , price , from_account ) : if not Web3Provider . get_web3 ( ) . isChecksumAddress ( spender_address ) : spender_address = Web3Provider . get_web3 ( ) . toChecksumAddress ( spender_address ) tx_hash = self . send_transaction ( 'approve' , ( spender_address , price ) , transact = { 'from' : from_account . address , 'passphrase' : from_account . password } ) return self . get_tx_receipt ( tx_hash ) . status == 1
Approve the passed address to spend the specified amount of tokens .
141
14
234,382
def transfer ( self , receiver_address , amount , from_account ) : tx_hash = self . send_transaction ( 'transfer' , ( receiver_address , amount ) , transact = { 'from' : from_account . address , 'passphrase' : from_account . password } ) return self . get_tx_receipt ( tx_hash ) . status == 1
Transfer tokens from one account to the receiver address .
83
10
234,383
def fulfill_access_secret_store_condition ( event , agreement_id , did , service_agreement , consumer_address , publisher_account ) : logger . debug ( f"release reward after event {event}." ) name_to_parameter = { param . name : param for param in service_agreement . condition_by_name [ 'accessSecretStore' ] . parameters } document_id = add_0x_prefix ( name_to_parameter [ '_documentId' ] . value ) asset_id = add_0x_prefix ( did_to_id ( did ) ) assert document_id == asset_id , f'document_id {document_id} <=> asset_id {asset_id} mismatch.' try : tx_hash = Keeper . get_instance ( ) . access_secret_store_condition . fulfill ( agreement_id , document_id , consumer_address , publisher_account ) process_tx_receipt ( tx_hash , Keeper . get_instance ( ) . access_secret_store_condition . FULFILLED_EVENT , 'AccessSecretStoreCondition.Fulfilled' ) except Exception as e : # logger.error(f'Error when calling grantAccess condition function: {e}') raise e
Fulfill the access condition .
277
7
234,384
def retire ( self , did ) : try : ddo = self . resolve ( did ) metadata_service = ddo . find_service_by_type ( ServiceTypes . METADATA ) self . _get_aquarius ( metadata_service . endpoints . service ) . retire_asset_ddo ( did ) return True except AquariusGenericError as err : logger . error ( err ) return False
Retire this did of Aquarius
87
7
234,385
def search ( self , text , sort = None , offset = 100 , page = 1 , aquarius_url = None ) : assert page >= 1 , f'Invalid page value {page}. Required page >= 1.' logger . info ( f'Searching asset containing: {text}' ) return [ DDO ( dictionary = ddo_dict ) for ddo_dict in self . _get_aquarius ( aquarius_url ) . text_search ( text , sort , offset , page ) [ 'results' ] ]
Search an asset in oceanDB using aquarius .
111
10
234,386
def query ( self , query , sort = None , offset = 100 , page = 1 , aquarius_url = None ) : logger . info ( f'Searching asset query: {query}' ) aquarius = self . _get_aquarius ( aquarius_url ) return [ DDO ( dictionary = ddo_dict ) for ddo_dict in aquarius . query_search ( query , sort , offset , page ) [ 'results' ] ]
Search an asset in oceanDB using search query .
98
10
234,387
def order ( self , did , service_definition_id , consumer_account , auto_consume = False ) : assert consumer_account . address in self . _keeper . accounts , f'Unrecognized consumer ' f'address `consumer_account`' agreement_id , signature = self . _agreements . prepare ( did , service_definition_id , consumer_account ) logger . debug ( f'about to request create agreement: {agreement_id}' ) self . _agreements . send ( did , agreement_id , service_definition_id , signature , consumer_account , auto_consume = auto_consume ) return agreement_id
Sign service agreement .
141
4
234,388
def consume ( self , service_agreement_id , did , service_definition_id , consumer_account , destination , index = None ) : ddo = self . resolve ( did ) if index is not None : assert isinstance ( index , int ) , logger . error ( 'index has to be an integer.' ) assert index >= 0 , logger . error ( 'index has to be 0 or a positive integer.' ) return self . _asset_consumer . download ( service_agreement_id , service_definition_id , ddo , consumer_account , destination , BrizoProvider . get_brizo ( ) , self . _get_secret_store ( consumer_account ) , index )
Consume the asset data .
150
6
234,389
def propose ( self , template_address , account ) : try : proposed = self . _keeper . template_manager . propose_template ( template_address , account ) return proposed except ValueError as err : template_values = self . _keeper . template_manager . get_template ( template_address ) if not template_values : logger . warning ( f'Propose template failed: {err}' ) return False if template_values . state != 1 : logger . warning ( f'Propose template failed, current state is set to {template_values.state}' ) return False return True
Propose a new template .
126
6
234,390
def approve ( self , template_address , account ) : try : approved = self . _keeper . template_manager . approve_template ( template_address , account ) return approved except ValueError as err : template_values = self . _keeper . template_manager . get_template ( template_address ) if not template_values : logger . warning ( f'Approve template failed: {err}' ) return False if template_values . state == 1 : logger . warning ( f'Approve template failed, this template is ' f'currently in "proposed" state.' ) return False if template_values . state == 3 : logger . warning ( f'Approve template failed, this template appears to be ' f'revoked.' ) return False if template_values . state == 2 : return True return False
Approve a template already proposed . The account needs to be owner of the templateManager contract to be able of approve the template .
175
27
234,391
def revoke ( self , template_address , account ) : try : revoked = self . _keeper . template_manager . revoke_template ( template_address , account ) return revoked except ValueError as err : template_values = self . _keeper . template_manager . get_template ( template_address ) if not template_values : logger . warning ( f'Cannot revoke template since it does not exist: {err}' ) return False logger . warning ( f'Only template admin or owner can revoke a template: {err}' ) return False
Revoke a template already approved . The account needs to be owner of the templateManager contract to be able of revoke the template .
116
26
234,392
def get_price ( self ) : for cond in self . conditions : for p in cond . parameters : if p . name == '_amount' : return p . value
Return the price from the conditions parameters .
36
8
234,393
def get_service_agreement_hash ( self , agreement_id , asset_id , consumer_address , publisher_address , keeper ) : agreement_hash = ServiceAgreement . generate_service_agreement_hash ( self . template_id , self . generate_agreement_condition_ids ( agreement_id , asset_id , consumer_address , publisher_address , keeper ) , self . conditions_timelocks , self . conditions_timeouts , agreement_id ) return agreement_hash
Return the hash of the service agreement values to be signed by a consumer .
107
15
234,394
def keeper_path ( self ) : keeper_path_string = self . get ( self . _section_name , NAME_KEEPER_PATH ) path = Path ( keeper_path_string ) . expanduser ( ) . resolve ( ) # TODO: Handle the default case and make default empty string # assert path.exists(), "Can't find the keeper path: {} ({})"..format(keeper_path_string, # path) if os . path . exists ( path ) : pass elif os . getenv ( 'VIRTUAL_ENV' ) : path = os . path . join ( os . getenv ( 'VIRTUAL_ENV' ) , 'artifacts' ) else : path = os . path . join ( site . PREFIXES [ 0 ] , 'artifacts' ) return path
Path where the keeper - contracts artifacts are allocated .
176
10
234,395
def add_public_key ( self , did , public_key ) : logger . debug ( f'Adding public key {public_key} to the did {did}' ) self . _public_keys . append ( PublicKeyBase ( did , * * { "owner" : public_key , "type" : "EthereumECDSAKey" } ) )
Add a public key object to the list of public keys .
78
12
234,396
def add_authentication ( self , public_key , authentication_type = None ) : authentication = { } if public_key : authentication = { 'type' : authentication_type , 'publicKey' : public_key } logger . debug ( f'Adding authentication {authentication}' ) self . _authentications . append ( authentication )
Add a authentication public key id and type to the list of authentications .
72
15
234,397
def add_service ( self , service_type , service_endpoint = None , values = None ) : if isinstance ( service_type , Service ) : service = service_type else : service = Service ( service_endpoint , service_type , values , did = self . _did ) logger . debug ( f'Adding service with service type {service_type} with did {self._did}' ) self . _services . append ( service )
Add a service to the list of services on the DDO .
97
13
234,398
def as_text ( self , is_proof = True , is_pretty = False ) : data = self . as_dictionary ( is_proof ) if is_pretty : return json . dumps ( data , indent = 2 , separators = ( ',' , ': ' ) ) return json . dumps ( data )
Return the DDO as a JSON text .
68
9
234,399
def as_dictionary ( self , is_proof = True ) : if self . _created is None : self . _created = DDO . _get_timestamp ( ) data = { '@context' : DID_DDO_CONTEXT_URL , 'id' : self . _did , 'created' : self . _created , } if self . _public_keys : values = [ ] for public_key in self . _public_keys : values . append ( public_key . as_dictionary ( ) ) data [ 'publicKey' ] = values if self . _authentications : values = [ ] for authentication in self . _authentications : values . append ( authentication ) data [ 'authentication' ] = values if self . _services : values = [ ] for service in self . _services : values . append ( service . as_dictionary ( ) ) data [ 'service' ] = values if self . _proof and is_proof : data [ 'proof' ] = self . _proof return data
Return the DDO as a JSON dict .
220
9