idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
1,100
def extend_identity ( identity , groups ) : provides = set ( [ UserNeed ( current_user . email ) ] + [ RoleNeed ( '{0}@cern.ch' . format ( name ) ) for name in groups ] ) identity . provides |= provides session [ OAUTHCLIENT_CERN_SESSION_KEY ] = provides
Extend identity with roles based on CERN groups .
76
11
1,101
def get_dict_from_response ( response ) : result = { } if getattr ( response , '_resp' ) and response . _resp . code > 400 : return result for i in response . data : # strip the schema from the key k = i [ 'Type' ] . replace ( REMOTE_APP_RESOURCE_SCHEMA , '' ) result . setdefault ( k , list ( ) ) result [ k ] . append ( i [ 'Value' ] ) return result
Prepare new mapping with Value s groupped by Type .
105
12
1,102
def get_resource ( remote ) : cached_resource = session . pop ( 'cern_resource' , None ) if cached_resource : return cached_resource response = remote . get ( REMOTE_APP_RESOURCE_API_URL ) dict_response = get_dict_from_response ( response ) session [ 'cern_resource' ] = dict_response return dict_response
Query CERN Resources to get user info and groups .
81
11
1,103
def on_identity_changed ( sender , identity ) : if isinstance ( identity , AnonymousIdentity ) : return client_id = current_app . config [ 'CERN_APP_CREDENTIALS' ] [ 'consumer_key' ] account = RemoteAccount . get ( user_id = current_user . get_id ( ) , client_id = client_id , ) groups = [ ] if account : remote = find_remote_by_client_id ( client_id ) resource = get_resource ( remote ) refresh = current_app . config . get ( 'OAUTHCLIENT_CERN_REFRESH_TIMEDELTA' , OAUTHCLIENT_CERN_REFRESH_TIMEDELTA ) groups . extend ( account_groups_and_extra_data ( account , resource , refresh_timedelta = refresh ) ) extend_identity ( identity , groups )
Store groups in session whenever identity changes .
201
8
1,104
def get ( cls , user_id , client_id ) : return cls . query . filter_by ( user_id = user_id , client_id = client_id , ) . first ( )
Get RemoteAccount object for user .
46
7
1,105
def create ( cls , user_id , client_id , extra_data ) : with db . session . begin_nested ( ) : account = cls ( user_id = user_id , client_id = client_id , extra_data = extra_data or dict ( ) ) db . session . add ( account ) return account
Create new remote account for user .
74
7
1,106
def update_token ( self , token , secret ) : if self . access_token != token or self . secret != secret : with db . session . begin_nested ( ) : self . access_token = token self . secret = secret db . session . add ( self )
Update token with new values .
59
6
1,107
def get ( cls , user_id , client_id , token_type = '' , access_token = None ) : args = [ RemoteAccount . id == RemoteToken . id_remote_account , RemoteAccount . user_id == user_id , RemoteAccount . client_id == client_id , RemoteToken . token_type == token_type , ] if access_token : args . append ( RemoteToken . access_token == access_token ) return cls . query . options ( db . joinedload ( 'remote_account' ) ) . filter ( * args ) . first ( )
Get RemoteToken for user .
128
6
1,108
def get_by_token ( cls , client_id , access_token , token_type = '' ) : return cls . query . options ( db . joinedload ( 'remote_account' ) ) . filter ( RemoteAccount . id == RemoteToken . id_remote_account , RemoteAccount . client_id == client_id , RemoteToken . token_type == token_type , RemoteToken . access_token == access_token , ) . first ( )
Get RemoteAccount object for token .
100
7
1,109
def bulk_export ( self , config_ids = None , device_ids = None , package_ids = None , result_ids = None , exclude_captures = False ) : if config_ids is None : config_ids = [ ] if device_ids is None : device_ids = [ ] if package_ids is None : package_ids = [ ] if result_ids is None : result_ids = [ ] json = { 'configs' : map ( int , config_ids ) , 'devices' : map ( int , device_ids ) , 'packages' : map ( int , package_ids ) , 'results' : map ( int , result_ids ) , 'options' : { 'exclude_captures' : exclude_captures } } resp = self . service . post ( self . base , json = json , stream = True ) b = io . BytesIO ( ) stream . stream_response_to_file ( resp , path = b ) resp . close ( ) b . seek ( 0 ) return ( b , self . service . filename ( resp ) )
Bulk export a set of configs devices packages and results .
235
13
1,110
def _init_report ( self ) : self . sections = [ ] self . section_names = [ ] # if the directory already exists, print a warning try : if os . path . isdir ( self . directory ) is False : if self . verbose : print ( "Created directory {}" . format ( self . directory ) ) os . mkdir ( self . directory ) # list of directories created in the constructor for this in self . _to_create : try : os . mkdir ( self . directory + os . sep + this ) except : pass # already created ? except Exception : pass finally : # Once the main directory is created, copy files required temp_path = easydev . get_package_location ( "reports" ) temp_path += os . sep + "reports" + os . sep + "resources" # Copy the CSS from reports/resources/css filenames = glob . glob ( os . sep . join ( [ temp_path , "css" , "*.css" ] ) ) # If there are CSS in the directory with JINJA templates, use them # as well filenames += glob . glob ( os . sep . join ( [ self . searchpath , '*.css' ] ) ) # In addition, the user may also provide his own CSS as a list filenames += self . extra_css_list for filename in filenames : target = os . sep . join ( [ self . directory , 'css' ] ) if os . path . isfile ( target ) is False : shutil . copy ( filename , target ) # We copy all javascript from reports resources for filename in [ 'sorttable.js' , 'highlight.pack.js' , "jquery-1.12.3.min.js" ] : target = os . sep . join ( [ self . directory , 'js' , filename ] ) if os . path . isfile ( target ) is False : filename = os . sep . join ( [ temp_path , "javascript" , filename ] ) shutil . copy ( filename , target ) for filename in self . extra_js_list : basename = os . path . basename ( filename ) target = os . sep . join ( [ self . directory , 'js' , basename ] ) if os . path . isfile ( target ) is False : shutil . copy ( filename , target )
create the report directory and return the directory name
506
9
1,111
def get_time_now ( self ) : import datetime import getpass username = getpass . getuser ( ) # this is not working on some systems: os.environ["USERNAME"] timenow = str ( datetime . datetime . now ( ) ) timenow = timenow . split ( '.' ) [ 0 ] msg = '<div class="date">Created on ' + timenow msg += " by " + username + '</div>' return msg
Returns a time stamp
105
4
1,112
def _track_class_related_field ( cls , field ) : # field = field on current model # related_field = field on related model ( field , related_field ) = field . split ( '__' , 1 ) field_obj = cls . _meta . get_field ( field ) related_cls = field_obj . remote_field . model related_name = field_obj . remote_field . get_accessor_name ( ) if not hasattr ( related_cls , '_tracked_related_fields' ) : setattr ( related_cls , '_tracked_related_fields' , { } ) if related_field not in related_cls . _tracked_related_fields . keys ( ) : related_cls . _tracked_related_fields [ related_field ] = [ ] # There can be several field from different or same model # related to a single model. # Thus _tracked_related_fields will be of the form: # { # 'field name on related model': [ # ('field name on current model', 'field name to current model'), # ('field name on another model', 'field name to another model'), # ... # ], # ... # } related_cls . _tracked_related_fields [ related_field ] . append ( ( field , related_name ) ) _add_signals_to_cls ( related_cls ) # Detect m2m fields changes if isinstance ( related_cls . _meta . get_field ( related_field ) , ManyToManyField ) : m2m_changed . connect ( tracking_m2m , sender = getattr ( related_cls , related_field ) . through , dispatch_uid = repr ( related_cls ) , )
Track a field on a related model
389
7
1,113
def _track_class_field ( cls , field ) : if '__' in field : _track_class_related_field ( cls , field ) return # Will raise FieldDoesNotExist if there is an error cls . _meta . get_field ( field ) # Detect m2m fields changes if isinstance ( cls . _meta . get_field ( field ) , ManyToManyField ) : m2m_changed . connect ( tracking_m2m , sender = getattr ( cls , field ) . through , dispatch_uid = repr ( cls ) , )
Track a field on the current model
129
7
1,114
def _track_class ( cls , fields ) : # Small tests to ensure everything is all right assert not getattr ( cls , '_is_tracked' , False ) for field in fields : _track_class_field ( cls , field ) _add_signals_to_cls ( cls ) # Mark the class as tracked cls . _is_tracked = True # Do not directly track related fields (tracked on related model) # or m2m fields (tracked by another signal) cls . _tracked_fields = [ field for field in fields if '__' not in field ]
Track fields on the specified model
136
6
1,115
def _add_get_tracking_url ( cls ) : def get_tracking_url ( self ) : """ return url to tracking view in admin panel """ url = reverse ( 'admin:tracking_fields_trackingevent_changelist' ) object_id = '{0}%3A{1}' . format ( ContentType . objects . get_for_model ( self ) . pk , self . pk ) return '{0}?object={1}' . format ( url , object_id ) if not hasattr ( cls , 'get_tracking_url' ) : setattr ( cls , 'get_tracking_url' , get_tracking_url )
Add a method to get the tracking url of an object .
150
12
1,116
def track ( * fields ) : def inner ( cls ) : _track_class ( cls , fields ) _add_get_tracking_url ( cls ) return cls return inner
Decorator used to track changes on Model s fields .
41
12
1,117
def indent ( value , n = 2 , character = ' ' ) : prefix = n * character return '\n' . join ( prefix + line for line in value . splitlines ( ) )
Indent a value by n character s
41
8
1,118
def check_instance ( function ) : def wrapper ( self , * args , * * kwargs ) : func_trans = { "commit" : manager . Manager , "compare_config" : manager . Manager , "commit_check" : manager . Manager , "device_info" : manager . Manager , "diff_config" : manager . Manager , "health_check" : manager . Manager , "interface_errors" : manager . Manager , "op_cmd" : paramiko . client . SSHClient , "shell_cmd" : paramiko . client . SSHClient , "scp_pull" : paramiko . client . SSHClient , "scp_push" : paramiko . client . SSHClient } # when doing an operational command, logging in as root # brings you to shell, so we need to enter the device as a shell # connection, and move to cli to perform the command # this is a one-off because the isinstance() check will be bypassed if self . username == "root" and function . __name__ == "op_cmd" : if not self . _session : self . conn_type = "paramiko" self . connect ( ) if not self . _shell : self . conn_type = "root" self . connect ( ) self . shell_to_cli ( ) # check if we're in the cli # Have to call shell command separately, since we are using _shell # for comparison, not _session. elif function . __name__ == 'shell_cmd' : if not self . _shell : self . conn_type = "shell" self . connect ( ) self . cli_to_shell ( ) # check if we're in shell. if isinstance ( self . _session , func_trans [ function . __name__ ] ) : # If they're doing SCP, we have to check for both _session and # _scp if function . __name__ in [ 'scp_pull' , 'scp_push' ] : if not isinstance ( self . _scp , SCPClient ) : self . conn_type = "scp" self . connect ( ) else : self . disconnect ( ) if function . __name__ == "op_cmd" : self . conn_type = "paramiko" elif function . __name__ in [ "scp_pull" , "scp_push" ] : self . conn_type = "scp" else : self . conn_type = "ncclient" self . connect ( ) return function ( self , * args , * * kwargs ) return wrapper
Wrapper that tests the type of _session .
561
10
1,119
def commit ( self , commands = "" , confirmed = None , comment = None , at_time = None , synchronize = False , req_format = 'text' ) : # ncclient doesn't support a truly blank commit, so if nothing is # passed, use 'annotate system' to make a blank commit if not commands : commands = 'annotate system ""' clean_cmds = [ ] for cmd in clean_lines ( commands ) : clean_cmds . append ( cmd ) # try to lock the candidate config so we can make changes. self . lock ( ) self . _session . load_configuration ( action = 'set' , config = commands ) results = "" # confirmed and commit at are mutually exclusive. commit confirm # takes precedence. if confirmed : results = self . _session . commit ( confirmed = True , timeout = str ( confirmed ) , comment = comment , synchronize = synchronize ) else : results = self . _session . commit ( comment = comment , at_time = at_time , synchronize = synchronize ) self . unlock ( ) if results : if req_format == 'xml' : return results # commit() DOES NOT return a parse-able xml tree, so we # convert it to an ElementTree xml tree. results = ET . fromstring ( results . tostring ) out = '' for i in results . iter ( ) : # the success message is just a tag, so we need to get it # specifically. if i . tag == 'commit-check-success' : out += 'configuration check succeeds\n' elif i . tag == 'commit-success' : out += 'commit complete\n' elif i . tag == 'ok' : out += 'commit complete\n' # this is for normal output with a tag and inner text, it will # strip the inner text and add it to the output. elif i . text is not None : if i . text . strip ( ) + '\n' != '\n' : out += i . text . strip ( ) + '\n' # this is for elements that don't have inner text, # it will add the tag to the output. elif i . text is None : if i . tag + '\n' != '\n' : out += i . tag + '\n' return out return False
Perform a commit operation .
499
6
1,120
def commit_check ( self , commands = "" , req_format = "text" ) : if not commands : raise InvalidCommandError ( 'No commands specified' ) clean_cmds = [ ] for cmd in clean_lines ( commands ) : clean_cmds . append ( cmd ) self . lock ( ) self . _session . load_configuration ( action = 'set' , config = clean_cmds ) # conn.validate() DOES NOT return a parse-able xml tree, so we # convert it to an ElementTree xml tree. results = ET . fromstring ( self . _session . validate ( source = 'candidate' ) . tostring ) # release the candidate configuration self . unlock ( ) if req_format == "xml" : return ET . tostring ( results ) out = "" # we have to parse the elementTree object, and get the text # from the xml. for i in results . iter ( ) : # the success message is just a tag, so we need to get it # specifically. if i . tag == 'commit-check-success' : out += 'configuration check succeeds\n' # this is for normal output with a tag and inner text, it will # strip the inner text and add it to the output. elif i . text is not None : if i . text . strip ( ) + '\n' != '\n' : out += i . text . strip ( ) + '\n' # this is for elements that don't have inner text, it will add the # tag to the output. elif i . text is None : if i . tag + '\n' != '\n' : out += i . tag + '\n' return out
Execute a commit check operation .
367
7
1,121
def compare_config ( self , commands = "" , req_format = "text" ) : if not commands : raise InvalidCommandError ( 'No commands specified' ) clean_cmds = [ cmd for cmd in clean_lines ( commands ) ] self . lock ( ) self . _session . load_configuration ( action = 'set' , config = clean_cmds ) out = self . _session . compare_configuration ( ) self . unlock ( ) if req_format . lower ( ) == "xml" : return out return out . xpath ( 'configuration-information/configuration-output' ) [ 0 ] . text
Execute a show | compare against the specified commands .
136
11
1,122
def connect ( self ) : if self . conn_type == 'paramiko' : self . _session = paramiko . SSHClient ( ) # These two lines set the paramiko logging to Critical to # remove extra messages from being sent to the user output. logger = logging . Logger . manager . getLogger ( 'paramiko.transport' ) logger . setLevel ( logging . CRITICAL ) self . _session . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) ) self . _session . connect ( hostname = self . host , username = self . username , password = self . password , port = self . port , timeout = self . connect_timeout ) if self . conn_type == 'scp' : self . _scp_session = paramiko . SSHClient ( ) logger = logging . Logger . manager . getLogger ( 'paramiko.transport' ) logger . setLevel ( logging . CRITICAL ) self . _scp_session . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) ) self . _scp_session . connect ( hostname = self . host , username = self . username , password = self . password , port = self . port , timeout = self . connect_timeout ) self . _scp = SCPClient ( self . _scp_session . get_transport ( ) ) elif self . conn_type == "ncclient" : self . _session = manager . connect ( host = self . host , port = self . port , username = self . username , password = self . password , timeout = self . connect_timeout , device_params = { 'name' : 'junos' } , hostkey_verify = False ) elif self . conn_type == 'shell' : if not self . _session : self . conn_type = 'paramiko' self . connect ( ) self . conn_type = 'shell' if not self . _shell : self . _shell = self . _session . invoke_shell ( ) time . sleep ( 2 ) if self . username != 'root' and not self . _in_cli : self . _in_cli = True if not self . cli_to_shell ( ) : self . _shell . recv ( 9999 ) elif self . conn_type == 'root' : # open the shell if necessary, and move into CLI if not self . _shell : self . _shell = self . _session . invoke_shell ( ) time . sleep ( 2 ) if not self . shell_to_cli ( ) : self . _shell . recv ( 9999 ) self . _update_timeout ( self . session_timeout )
Establish a connection to the device .
589
8
1,123
def _copy_status ( self , filename , size , sent ) : output = "Transferred %.0f%% of the file %s" % ( ( float ( sent ) / float ( size ) * 100 ) , path . normpath ( filename ) ) output += ( ' ' * ( 120 - len ( output ) ) ) if filename != self . _filename : if self . _filename is not None : print ( '' ) self . _filename = filename print ( output , end = '\r' )
Echo status of an SCP operation .
108
8
1,124
def diff_config ( self , second_host , mode = 'stanza' ) : second_conn = manager . connect ( host = second_host , port = self . port , username = self . username , password = self . password , timeout = self . connect_timeout , device_params = { 'name' : 'junos' } , hostkey_verify = False ) command = 'show configuration' if mode == 'set' : command += ' | display set' # get the raw xml config config1 = self . _session . command ( command , format = 'text' ) # for each /configuration-output snippet, turn it to text and join them config1 = '' . join ( [ snippet . text . lstrip ( '\n' ) for snippet in config1 . xpath ( '//configuration-output' ) ] ) config2 = second_conn . command ( command , format = 'text' ) config2 = '' . join ( [ snippet . text . lstrip ( '\n' ) for snippet in config2 . xpath ( '//configuration-output' ) ] ) return difflib . unified_diff ( config1 . splitlines ( ) , config2 . splitlines ( ) , self . host , second_host )
Generate configuration differences with a second device .
270
9
1,125
def _error_parse ( self , interface , face ) : try : error_list = interface . xpath ( face + '-error-list' ) [ 0 ] . getchildren ( ) except IndexError : # no error list on this interface pass else : for x in range ( len ( error_list ) ) : if error_list [ x ] . tag == "carrier-transitions" : if int ( error_list [ x ] . text . strip ( ) ) > 50 : yield " has greater than 50 flaps." elif int ( error_list [ x ] . text . strip ( ) ) > 0 : yield " has %s of %s." % ( error_list [ x ] . text . strip ( ) , error_list [ x ] . tag . strip ( ) )
Parse the extensive xml output of an interface and yield errors .
171
13
1,126
def health_check ( self ) : output = 'Chassis Alarms:\n\t' # Grab chassis alarms, system alarms, show chassis routing-engine, # 'show system processes extensive', and also xpath to the # relevant nodes on each. chassis_alarms = self . _session . command ( "show chassis alarms" ) chassis_alarms = chassis_alarms . xpath ( '//alarm-detail' ) system_alarms = self . _session . command ( "show system alarms" ) system_alarms = system_alarms . xpath ( '//alarm-detail' ) chass = self . _session . command ( command = "show chassis routing-engine" , format = 'text' ) . xpath ( '//output' ) [ 0 ] . text proc = self . _session . command ( "show system processes extensive" ) proc = proc . xpath ( 'output' ) [ 0 ] . text . split ( '\n' ) if chassis_alarms == [ ] : # Chassis Alarms output += 'No chassis alarms active.\n' else : for i in chassis_alarms : output += ( i . xpath ( 'alarm-class' ) [ 0 ] . text . strip ( ) + ' Alarm \t' '\t' + i . xpath ( 'alarm-time' ) [ 0 ] . text . strip ( ) + '\n\t' + i . xpath ( 'alarm-description' ) [ 0 ] . text . strip ( ) + '\n' ) output += '\nSystem Alarms: \n\t' if system_alarms == [ ] : # System Alarms output += 'No system alarms active.\n' else : for i in system_alarms : output += ( i . xpath ( 'alarm-class' ) [ 0 ] . text . strip ( ) + ' Alarm ' '\t\t' + i . xpath ( 'alarm-time' ) [ 0 ] . text . strip ( ) + '\n\t' + i . xpath ( 'alarm-description' ) [ 0 ] . text . strip ( ) + '\n' ) # add the output of the show chassis routing-engine to the command. output += '\n' + chass # Grabs the top 5 processes and the header line. output += ( '\n\nTop 5 busiest processes (high mgd values likely from ' 'script execution):\n' ) for line_number in range ( 8 , 14 ) : output += proc [ line_number ] + '\n' return output
Pull health and alarm information from the device .
569
9
1,127
def interface_errors ( self ) : output = [ ] # used to store the list of interfaces with errors. # get a string of each physical and logical interface element dev_response = self . _session . command ( 'sh interfaces extensive' ) ints = dev_response . xpath ( '//physical-interface' ) ints += dev_response . xpath ( '//logical-interface' ) for i in ints : # Grab the interface name for user output. int_name = i . xpath ( 'name' ) [ 0 ] . text . strip ( ) # Only check certain interface types. if ( ( 'ge' or 'fe' or 'ae' or 'xe' or 'so' or 'et' or 'vlan' or 'lo0' or 'irb' ) in int_name ) : try : status = ( i . xpath ( 'admin-status' ) [ 0 ] . text . strip ( ) + '/' + i . xpath ( 'oper-status' ) [ 0 ] . text . strip ( ) ) except IndexError : pass else : for error in self . _error_parse ( i , "input" ) : output . append ( "%s (%s)%s" % ( int_name , status , error ) ) for error in self . _error_parse ( i , "output" ) : output . append ( "%s (%s)%s" % ( int_name , status , error ) ) if output == [ ] : output . append ( 'No interface errors were detected on this device.' ) return '\n' . join ( output ) + '\n'
Parse show interfaces extensive and return interfaces with errors .
351
11
1,128
def lock ( self ) : if isinstance ( self . _session , manager . Manager ) : self . _session . lock ( )
Lock the candidate config . Requires ncclient . manager . Manager .
28
15
1,129
def op_cmd ( self , command , req_format = 'text' , xpath_expr = "" ) : if not command : raise InvalidCommandError ( "Parameter 'command' cannot be empty" ) if req_format . lower ( ) == 'xml' or xpath_expr : command = command . strip ( ) + ' | display xml' command = command . strip ( ) + ' | no-more\n' out = '' # when logging in as root, we use _shell to get the response. if self . username == 'root' : self . _shell . send ( command ) time . sleep ( 3 ) while self . _shell . recv_ready ( ) : out += self . _shell . recv ( 999999 ) time . sleep ( .75 ) # take off the command being sent and the prompt at the end. out = '\n' . join ( out . split ( '\n' ) [ 1 : - 2 ] ) # not logging in as root, and can grab the output as normal. else : stdin , stdout , stderr = self . _session . exec_command ( command = command , timeout = float ( self . session_timeout ) ) stdin . close ( ) # read normal output while not stdout . channel . exit_status_ready ( ) : out += stdout . read ( ) stdout . close ( ) # read errors while not stderr . channel . exit_status_ready ( ) : out += stderr . read ( ) stderr . close ( ) return out if not xpath_expr else xpath ( out , xpath_expr )
Execute an operational mode command .
350
7
1,130
def unlock ( self ) : if isinstance ( self . _session , manager . Manager ) : self . _session . unlock ( )
Unlock the candidate config .
28
6
1,131
def intercept ( obj , methodname , wrapper ) : original = getattr ( obj , methodname ) def replacement ( * args , * * kwargs ) : wrapfn = wrapper ( * args , * * kwargs ) wrapfn . send ( None ) result = original ( * args , * * kwargs ) try : wrapfn . send ( result ) except StopIteration : return result else : raise AssertionError ( 'Generator did not stop' ) def unwrap ( ) : """ Restores the method to it's original (unwrapped) state. """ setattr ( obj , methodname , original ) replacement . unwrap = unwrap setattr ( obj , methodname , replacement )
Wraps an existing method on an object with the provided generator which will be sent the value when it yields control .
150
23
1,132
def next ( self ) : # need new iterable? if self . r == self . repeats : self . i = ( self . i + 1 ) % self . lenght self . r = 0 self . r += 1 if self . stopping and self . i == 0 and self . r == 1 : self . stopped = True if self . i == 0 and self . stopped : raise StopIteration else : iterator = self . iterators [ self . i ] return iterator . next ( )
Returns the next element or raises StopIteration if stopped .
104
12
1,133
def next ( self ) : return self . iterator . next ( task = self . task , timeout = self . timeout , block = self . block )
Returns a result if availble within timeout else raises a TimeoutError exception . See documentation for NuMap . next .
31
24
1,134
def write_template ( fn , lang = "python" ) : with open ( fn , "wb" ) as fh : if lang == "python" : fh . write ( PY_TEMPLATE ) elif lang == "bash" : fh . write ( SH_TEMPLATE )
Write language - specific script template to file .
67
9
1,135
def script ( inbox , cfg ) : script_name = cfg [ "id" ] script_id = str ( abs ( hash ( ( cfg [ "id" ] , ) + tuple ( inbox [ 0 ] . values ( ) ) ) ) ) [ 0 : 8 ] # LOG.log(mp.DEFAULT, "@papy;script %s:%s started" % (script_name, script_id)) # LOG.log(mp.SUBDEFAULT, "@papy;%s:%s received: %s" % (script_name, script_id, inbox)) args = { } args [ "params" ] = dict ( cfg [ "params" ] ) args [ "in" ] = { } for in_port in cfg [ "in" ] : for inin_ports in inbox : in_path = inin_ports . get ( in_port , None ) if ( in_path is not None ) : # first matching input-output (including type) port is linked remaining ignored args [ "in" ] [ in_port ] = in_path break # check that all input ports are connected if len ( args [ "in" ] ) < len ( cfg [ "in" ] ) : raise Exception ( "not all in_ports connected, got: %s" % ( args [ "in" ] , ) ) # create output file for out_ports args [ "out" ] = { } out = { } for i , ( out_port , out_ext ) in enumerate ( cfg [ "out" ] ) : if cfg [ "in" ] == tuple ( out_port_ for out_port_ , _ in cfg [ "out" ] ) : pfx = args [ "in" ] [ cfg [ "in" ] [ i ] ] . split ( "/" ) [ - 1 ] . split ( "." ) [ 0 ] + "_" base = cfg [ "id" ] else : pfx = args [ "in" ] [ cfg [ "in" ] [ 0 ] ] . split ( "/" ) [ - 1 ] . split ( "." ) [ 0 ] + "_" base = cfg [ "id" ] + "-" + out_port if out_ext : out_path = cfg [ "dir" ] + "/" + pfx + base + "." + out_ext else : out_path = cfg [ "dir" ] + "/" + pfx + base args [ "out" ] [ out_port ] = out_path out [ out_port ] = out_path # evaluate and check for errors ret = _eval_script ( cfg [ "evaluator" ] , cfg [ "preamble" ] , cfg [ "dir" ] , cfg [ "executable" ] , cfg [ "script" ] , args ) if ret [ 0 ] != 0 : # LOG.error("@papy;%s:%s %s:%s:%s" % (script_name, script_id, ret[0], # ret[1].replace("\n", "<br>"), # ret[2].replace("\n", "<br>"))) raise Exception ( ret [ 0 ] , cfg [ "script" ] , ret [ 1 ] , ret [ 2 ] ) #LOG.log(mp.SUBDEFAULT, "@papy;%s:%s produced:%s" % (script_name, script_id, out)) #LOG.log(mp.DEFAULT, "@papy;script %s:%s finished" % (script_name, script_id)) return out
Execute arbitrary scripts .
795
5
1,136
def edit ( self , resource ) : schema = JobSchema ( exclude = ( 'id' , 'status' , 'options' , 'package_name' , 'config_name' , 'device_name' , 'result_id' , 'user_id' , 'created' , 'updated' , 'automatic' , 'run_at' ) ) json = self . service . encode ( schema , resource ) schema = JobSchema ( ) resp = self . service . edit ( self . base , resource . name , json ) return self . service . decode ( schema , resp )
Edit a job .
126
4
1,137
def launch ( self , resource ) : schema = JobSchema ( exclude = ( 'id' , 'status' , 'package_name' , 'config_name' , 'device_name' , 'result_id' , 'user_id' , 'created' , 'updated' , 'automatic' ) ) json = self . service . encode ( schema , resource ) schema = JobSchema ( ) resp = self . service . create ( self . base , json ) return self . service . decode ( schema , resp )
Launch a new job .
112
5
1,138
def bulk_launch ( self , jobs = None , filter = None , all = False ) : # pylint: disable=redefined-builtin json = None if jobs is not None : schema = JobSchema ( exclude = ( 'id' , 'status' , 'package_name' , 'config_name' , 'device_name' , 'result_id' , 'user_id' , 'created' , 'updated' , 'automatic' ) ) jobs_json = self . service . encode ( schema , jobs , many = True ) json = { self . RESOURCE : jobs_json } schema = JobSchema ( ) resp = self . service . post ( self . base , params = { 'bulk' : 'launch' , 'filter' : filter , 'all' : all } , json = json ) return self . service . decode ( schema , resp , many = True )
Bulk launch a set of jobs .
194
8
1,139
def get ( self , id , seq , line ) : # pylint: disable=invalid-name,redefined-builtin schema = HighlightSchema ( ) resp = self . service . get_id ( self . _base ( id , seq ) , line ) return self . service . decode ( schema , resp )
Get a highlight .
70
4
1,140
def create_or_edit ( self , id , seq , resource ) : # pylint: disable=invalid-name,redefined-builtin schema = HighlightSchema ( exclude = ( 'id' , 'seq' ) ) json = self . service . encode ( schema , resource ) schema = HighlightSchema ( ) resp = self . service . edit ( self . _base ( id , seq ) , resource . line , json ) return self . service . decode ( schema , resp )
Create or edit a highlight .
107
6
1,141
def create ( self , id , seq , resource ) : # pylint: disable=invalid-name,redefined-builtin return self . create_or_edit ( id , seq , resource )
Create a highlight .
44
4
1,142
def edit ( self , id , seq , resource ) : # pylint: disable=invalid-name,redefined-builtin return self . create_or_edit ( id , seq , resource )
Edit a highlight .
44
4
1,143
def delete ( self , id , seq , line ) : # pylint: disable=invalid-name,redefined-builtin return self . service . delete_id ( self . _base ( id , seq ) , line )
Delete a highlight .
50
4
1,144
def post_ext_init ( state ) : app = state . app app . config . setdefault ( 'OAUTHCLIENT_SITENAME' , app . config . get ( 'THEME_SITENAME' , 'Invenio' ) ) app . config . setdefault ( 'OAUTHCLIENT_BASE_TEMPLATE' , app . config . get ( 'BASE_TEMPLATE' , 'invenio_oauthclient/base.html' ) ) app . config . setdefault ( 'OAUTHCLIENT_COVER_TEMPLATE' , app . config . get ( 'COVER_TEMPLATE' , 'invenio_oauthclient/base_cover.html' ) ) app . config . setdefault ( 'OAUTHCLIENT_SETTINGS_TEMPLATE' , app . config . get ( 'SETTINGS_TEMPLATE' , 'invenio_oauthclient/settings/base.html' ) )
Setup blueprint .
223
3
1,145
def login ( remote_app ) : oauth = current_app . extensions [ 'oauthlib.client' ] if remote_app not in oauth . remote_apps : return abort ( 404 ) # Get redirect target in safe manner. next_param = get_safe_redirect_target ( arg = 'next' ) # Redirect URI - must be registered in the remote service. callback_url = url_for ( '.authorized' , remote_app = remote_app , _external = True , ) # Create a JSON Web Token that expires after OAUTHCLIENT_STATE_EXPIRES # seconds. state_token = serializer . dumps ( { 'app' : remote_app , 'next' : next_param , 'sid' : _create_identifier ( ) , } ) return oauth . remote_apps [ remote_app ] . authorize ( callback = callback_url , state = state_token , )
Send user to remote application for authentication .
200
8
1,146
def authorized ( remote_app = None ) : if remote_app not in current_oauthclient . handlers : return abort ( 404 ) state_token = request . args . get ( 'state' ) # Verify state parameter try : assert state_token # Checks authenticity and integrity of state and decodes the value. state = serializer . loads ( state_token ) # Verify that state is for this session, app and that next parameter # have not been modified. assert state [ 'sid' ] == _create_identifier ( ) assert state [ 'app' ] == remote_app # Store next URL set_session_next_url ( remote_app , state [ 'next' ] ) except ( AssertionError , BadData ) : if current_app . config . get ( 'OAUTHCLIENT_STATE_ENABLED' , True ) or ( not ( current_app . debug or current_app . testing ) ) : abort ( 403 ) try : handler = current_oauthclient . handlers [ remote_app ] ( ) except OAuthException as e : if e . type == 'invalid_response' : abort ( 500 ) else : raise return handler
Authorized handler callback .
249
5
1,147
def signup ( remote_app ) : if remote_app not in current_oauthclient . signup_handlers : return abort ( 404 ) res = current_oauthclient . signup_handlers [ remote_app ] [ 'view' ] ( ) return abort ( 404 ) if res is None else res
Extra signup step .
68
5
1,148
def disconnect ( remote_app ) : if remote_app not in current_oauthclient . disconnect_handlers : return abort ( 404 ) ret = current_oauthclient . disconnect_handlers [ remote_app ] ( ) db . session . commit ( ) return ret
Disconnect user from remote application .
58
7
1,149
def address_checksum ( address ) : address_bytes = address h = blake2b ( digest_size = 5 ) h . update ( address_bytes ) checksum = bytearray ( h . digest ( ) ) checksum . reverse ( ) return checksum
Returns the checksum in bytes for an address in bytes
58
11
1,150
def keypair_from_seed ( seed , index = 0 ) : h = blake2b ( digest_size = 32 ) h . update ( seed + struct . pack ( ">L" , index ) ) priv_key = h . digest ( ) pub_key = private_to_public_key ( priv_key ) return { 'private' : priv_key , 'public' : pub_key }
Generates a deterministic keypair from seed based on index
89
12
1,151
def verify_signature ( message , signature , public_key ) : try : ed25519_blake2 . checkvalid ( signature , message , public_key ) except ed25519_blake2 . SignatureMismatch : return False return True
Verifies signature is correct for a message signed with public_key
54
13
1,152
def sign_message ( message , private_key , public_key = None ) : if public_key is None : public_key = private_to_public_key ( private_key ) return ed25519_blake2 . signature_unsafe ( message , private_key , public_key )
Signs a message using private_key and public_key
65
12
1,153
def check_for_lounge_upgrade ( self , email , password ) : schema = ReleaseSchema ( ) resp = self . service . post ( self . base + 'lounge/check/' , json = { 'email' : email , 'password' : password } ) return self . service . decode ( schema , resp )
Check the CDRouter Support Lounge for eligible upgrades using your Support Lounge email & password .
72
18
1,154
def lounge_upgrade ( self , email , password , release_id ) : schema = UpgradeSchema ( ) resp = self . service . post ( self . base + 'lounge/upgrade/' , json = { 'email' : email , 'password' : password , 'release' : { 'id' : int ( release_id ) } } ) return self . service . decode ( schema , resp )
Download & install an upgrade from the CDRouter Support Lounge using your Support Lounge email & password . Please note that any running tests will be stopped .
89
30
1,155
def lounge_update_license ( self ) : schema = UpgradeSchema ( ) resp = self . service . post ( self . base + 'license/' ) return self . service . decode ( schema , resp )
Download & install a license for your CDRouter system from the CDRouter Support Lounge .
45
19
1,156
def manual_update_license ( self , fd , filename = 'cdrouter.lic' ) : schema = UpgradeSchema ( ) resp = self . service . post ( self . base + 'license/' , files = { 'file' : ( filename , fd ) } ) return self . service . decode ( schema , resp )
Update the license on your CDRouter system manually by uploading a . lic license from the CDRouter Support Lounge .
73
24
1,157
def space ( self ) : schema = SpaceSchema ( ) resp = self . service . get ( self . base + 'space/' ) return self . service . decode ( schema , resp )
Get system disk space usage .
41
6
1,158
def interfaces ( self , addresses = False ) : schema = InterfaceSchema ( ) resp = self . service . get ( self . base + 'interfaces/' , params = { 'addresses' : addresses } ) return self . service . decode ( schema , resp , many = True )
Get system interfaces .
61
4
1,159
def _set_original_fields ( instance ) : original_fields = { } def _set_original_field ( instance , field ) : if instance . pk is None : original_fields [ field ] = None else : if isinstance ( instance . _meta . get_field ( field ) , ForeignKey ) : # Only get the PK, we don't want to get the object # (which would make an additional request) original_fields [ field ] = getattr ( instance , '{0}_id' . format ( field ) ) else : original_fields [ field ] = getattr ( instance , field ) for field in getattr ( instance , '_tracked_fields' , [ ] ) : _set_original_field ( instance , field ) for field in getattr ( instance , '_tracked_related_fields' , { } ) . keys ( ) : _set_original_field ( instance , field ) instance . _original_fields = original_fields # Include pk to detect the creation of an object instance . _original_fields [ 'pk' ] = instance . pk
Save fields value only for non - m2m fields .
238
12
1,160
def _has_changed ( instance ) : for field , value in instance . _original_fields . items ( ) : if field != 'pk' and not isinstance ( instance . _meta . get_field ( field ) , ManyToManyField ) : try : if field in getattr ( instance , '_tracked_fields' , [ ] ) : if isinstance ( instance . _meta . get_field ( field ) , ForeignKey ) : if getattr ( instance , '{0}_id' . format ( field ) ) != value : return True else : if getattr ( instance , field ) != value : return True except TypeError : # Can't compare old and new value, should be different. return True return False
Check if some tracked fields have changed
157
7
1,161
def _has_changed_related ( instance ) : tracked_related_fields = getattr ( instance , '_tracked_related_fields' , { } ) . keys ( ) for field , value in instance . _original_fields . items ( ) : if field != 'pk' and not isinstance ( instance . _meta . get_field ( field ) , ManyToManyField ) : if field in tracked_related_fields : if isinstance ( instance . _meta . get_field ( field ) , ForeignKey ) : if getattr ( instance , '{0}_id' . format ( field ) ) != value : return True else : if getattr ( instance , field ) != value : return True return False
Check if some related tracked fields have changed
155
8
1,162
def _create_event ( instance , action ) : user = None user_repr = repr ( user ) if CUSER : user = CuserMiddleware . get_user ( ) user_repr = repr ( user ) if user is not None and user . is_anonymous : user = None return TrackingEvent . objects . create ( action = action , object = instance , object_repr = repr ( instance ) , user = user , user_repr = user_repr , )
Create a new event getting the use if django - cuser is available .
105
16
1,163
def _create_tracked_field ( event , instance , field , fieldname = None ) : fieldname = fieldname or field if isinstance ( instance . _meta . get_field ( field ) , ForeignKey ) : # We only have the pk, we need to get the actual object model = instance . _meta . get_field ( field ) . remote_field . model pk = instance . _original_fields [ field ] try : old_value = model . objects . get ( pk = pk ) except model . DoesNotExist : old_value = None else : old_value = instance . _original_fields [ field ] return TrackedFieldModification . objects . create ( event = event , field = fieldname , old_value = _serialize_field ( old_value ) , new_value = _serialize_field ( getattr ( instance , field ) ) )
Create a TrackedFieldModification for the instance .
194
11
1,164
def _create_create_tracking_event ( instance ) : event = _create_event ( instance , CREATE ) for field in instance . _tracked_fields : if not isinstance ( instance . _meta . get_field ( field ) , ManyToManyField ) : _create_tracked_field ( event , instance , field )
Create a TrackingEvent and TrackedFieldModification for a CREATE event .
73
16
1,165
def _create_update_tracking_event ( instance ) : event = _create_event ( instance , UPDATE ) for field in instance . _tracked_fields : if not isinstance ( instance . _meta . get_field ( field ) , ManyToManyField ) : try : if isinstance ( instance . _meta . get_field ( field ) , ForeignKey ) : # Compare pk value = getattr ( instance , '{0}_id' . format ( field ) ) else : value = getattr ( instance , field ) if instance . _original_fields [ field ] != value : _create_tracked_field ( event , instance , field ) except TypeError : # Can't compare old and new value, should be different. _create_tracked_field ( event , instance , field )
Create a TrackingEvent and TrackedFieldModification for an UPDATE event .
173
15
1,166
def _create_update_tracking_related_event ( instance ) : events = { } # Create a dict mapping related model field to modified fields for field , related_fields in instance . _tracked_related_fields . items ( ) : if not isinstance ( instance . _meta . get_field ( field ) , ManyToManyField ) : if isinstance ( instance . _meta . get_field ( field ) , ForeignKey ) : # Compare pk value = getattr ( instance , '{0}_id' . format ( field ) ) else : value = getattr ( instance , field ) if instance . _original_fields [ field ] != value : for related_field in related_fields : events . setdefault ( related_field , [ ] ) . append ( field ) # Create the events from the events dict for related_field , fields in events . items ( ) : try : related_instances = getattr ( instance , related_field [ 1 ] ) except ObjectDoesNotExist : continue # FIXME: isinstance(related_instances, RelatedManager ?) if hasattr ( related_instances , 'all' ) : related_instances = related_instances . all ( ) else : related_instances = [ related_instances ] for related_instance in related_instances : event = _create_event ( related_instance , UPDATE ) for field in fields : fieldname = '{0}__{1}' . format ( related_field [ 0 ] , field ) _create_tracked_field ( event , instance , field , fieldname = fieldname )
Create a TrackingEvent and TrackedFieldModification for an UPDATE event for each related model .
343
19
1,167
def _get_m2m_field ( model , sender ) : for field in getattr ( model , '_tracked_fields' , [ ] ) : if isinstance ( model . _meta . get_field ( field ) , ManyToManyField ) : if getattr ( model , field ) . through == sender : return field for field in getattr ( model , '_tracked_related_fields' , { } ) . keys ( ) : if isinstance ( model . _meta . get_field ( field ) , ManyToManyField ) : if getattr ( model , field ) . through == sender : return field
Get the field name from a model and a sender from m2m_changed signal .
135
18
1,168
def tracking_save ( sender , instance , raw , using , update_fields , * * kwargs ) : if _has_changed ( instance ) : if instance . _original_fields [ 'pk' ] is None : # Create _create_create_tracking_event ( instance ) else : # Update _create_update_tracking_event ( instance ) if _has_changed_related ( instance ) : # Because an object need to be saved before being related, # it can only be an update _create_update_tracking_related_event ( instance ) if _has_changed ( instance ) or _has_changed_related ( instance ) : _set_original_fields ( instance )
Post save detect creation or changes and log them . We need post_save to have the object for a create .
148
23
1,169
def from_entry_dict ( cls , entry_dict ) : # Debug helper # https://circleci.com/gh/andresriancho/w3af-api-docker/30 try : _type = entry_dict [ 'type' ] _id = entry_dict [ 'id' ] _time = entry_dict [ 'time' ] message = entry_dict [ 'message' ] severity = entry_dict [ 'severity' ] except KeyError : msg = ( 'Missing expected log entry attribute. Log entry' ' object is:\n\n%s' ) raise APIException ( msg % json . dumps ( entry_dict , indent = 4 ) ) return cls ( _type , message , _time , severity , _id )
This is a constructor for the LogEntry class .
163
10
1,170
def list ( self , id , seq ) : # pylint: disable=invalid-name,redefined-builtin schema = CaptureSchema ( exclude = ( 'id' , 'seq' ) ) resp = self . service . list ( self . _base ( id , seq ) ) return self . service . decode ( schema , resp , many = True )
Get a list of captures .
78
6
1,171
def get ( self , id , seq , intf ) : # pylint: disable=invalid-name,redefined-builtin schema = CaptureSchema ( ) resp = self . service . get_id ( self . _base ( id , seq ) , intf ) return self . service . decode ( schema , resp )
Get a capture .
71
4
1,172
def download ( self , id , seq , intf , inline = False ) : # pylint: disable=invalid-name,redefined-builtin resp = self . service . get_id ( self . _base ( id , seq ) , intf , params = { 'format' : 'cap' , 'inline' : inline } , stream = True ) b = io . BytesIO ( ) stream . stream_response_to_file ( resp , path = b ) resp . close ( ) b . seek ( 0 ) return ( b , self . service . filename ( resp ) )
Download a capture as a PCAP file .
128
9
1,173
def summary ( self , id , seq , intf , filter = None , inline = False ) : # pylint: disable=invalid-name,redefined-builtin schema = SummarySchema ( ) resp = self . service . get ( self . _base ( id , seq ) + str ( intf ) + '/summary/' , params = { 'filter' : filter , 'inline' : inline } ) return self . service . decode ( schema , resp )
Get a capture s summary .
101
6
1,174
def decode ( self , id , seq , intf , filter = None , frame = None , inline = False ) : # pylint: disable=invalid-name,redefined-builtin schema = DecodeSchema ( ) resp = self . service . get ( self . _base ( id , seq ) + str ( intf ) + '/decode/' , params = { 'filter' : filter , 'frame' : frame , 'inline' : inline } ) return self . service . decode ( schema , resp )
Get a capture s decode .
113
6
1,175
def send_to_cloudshark ( self , id , seq , intf , inline = False ) : # pylint: disable=invalid-name,redefined-builtin schema = CloudSharkSchema ( ) resp = self . service . post ( self . _base ( id , seq ) + str ( intf ) + '/cloudshark/' , params = { 'inline' : inline } ) return self . service . decode ( schema , resp )
Send a capture to a CloudShark Appliance . Both cloudshark_appliance_url and cloudshark_appliance_token must be properly configured via system preferences .
101
39
1,176
def get_dict_from_response ( response ) : if getattr ( response , '_resp' ) and response . _resp . code > 400 : raise OAuthResponseError ( 'Application mis-configuration in Globus' , None , response ) return response . data
Check for errors in the response and return the resulting JSON .
58
12
1,177
def get_user_info ( remote ) : response = remote . get ( GLOBUS_USER_INFO_URL ) user_info = get_dict_from_response ( response ) response . data [ 'username' ] = response . data [ 'preferred_username' ] if '@' in response . data [ 'username' ] : user_info [ 'username' ] , _ = response . data [ 'username' ] . split ( '@' ) return user_info
Get user information from Globus .
104
7
1,178
def get_user_id ( remote , email ) : try : url = '{}?usernames={}' . format ( GLOBUS_USER_ID_URL , email ) user_id = get_dict_from_response ( remote . get ( url ) ) return user_id [ 'identities' ] [ 0 ] [ 'id' ] except KeyError : # If we got here the response was successful but the data was invalid. # It's likely the URL is wrong but possible the API has changed. raise OAuthResponseError ( 'Failed to fetch user id, likely server ' 'mis-configuration' , None , remote )
Get the Globus identity for a users given email .
140
11
1,179
def get_function_signature ( func ) : if func is None : return 'Function is None' try : func_name = func . __name__ except AttributeError : func_name = 'None' if not inspect . isfunction ( func ) : raise TypeError ( 'The argument must be a function object: %s type is %s' % ( func_name , type ( func ) ) ) return func_name + str ( inspect . signature ( func ) )
Return the signature string of the specified function .
101
9
1,180
def acquire_reader ( self ) : with self . mutex : while self . rwlock < 0 or self . rwlock == self . max_reader_concurrency or self . writers_waiting : self . readers_ok . wait ( ) self . rwlock += 1
Acquire a read lock several threads can hold this type of lock .
61
14
1,181
def acquire_writer ( self ) : with self . mutex : while self . rwlock != 0 : self . _writer_wait ( ) self . rwlock = - 1
Acquire a write lock only one thread can hold this lock and only when no read locks are also held .
39
22
1,182
def list ( self , filter = None , type = None , sort = None , limit = None , page = None ) : # pylint: disable=redefined-builtin schema = PackageSchema ( exclude = ( 'testlist' , 'extra_cli_args' , 'agent_id' , 'options' , 'note' ) ) resp = self . service . list ( self . base , filter , type , sort , limit , page ) ps , l = self . service . decode ( schema , resp , many = True , links = True ) return Page ( ps , l )
Get a list of packages .
126
6
1,183
def get ( self , id ) : # pylint: disable=invalid-name,redefined-builtin schema = PackageSchema ( ) resp = self . service . get_id ( self . base , id ) return self . service . decode ( schema , resp )
Get a package .
59
4
1,184
def create ( self , resource ) : schema = PackageSchema ( exclude = ( 'id' , 'created' , 'updated' , 'test_count' , 'agent_id' , 'result_id' ) ) json = self . service . encode ( schema , resource ) schema = PackageSchema ( ) resp = self . service . create ( self . base , json ) return self . service . decode ( schema , resp )
Create a new package .
92
5
1,185
def analyze ( self , id ) : # pylint: disable=invalid-name,redefined-builtin schema = AnalysisSchema ( ) resp = self . service . post ( self . base + str ( id ) + '/' , params = { 'process' : 'analyze' } ) return self . service . decode ( schema , resp )
Get a list of tests that will be skipped for a package .
76
13
1,186
def bulk_copy ( self , ids ) : schema = PackageSchema ( ) return self . service . bulk_copy ( self . base , self . RESOURCE , ids , schema )
Bulk copy a set of packages .
41
8
1,187
def bulk_edit ( self , _fields , ids = None , filter = None , type = None , all = False ) : # pylint: disable=redefined-builtin schema = PackageSchema ( exclude = ( 'id' , 'created' , 'updated' , 'test_count' , 'agent_id' , 'result_id' ) ) _fields = self . service . encode ( schema , _fields , skip_none = True ) return self . service . bulk_edit ( self . base , self . RESOURCE , _fields , ids = ids , filter = filter , type = type , all = all )
Bulk edit a set of packages .
139
8
1,188
def clean_lines ( commands ) : if isinstance ( commands , basestring ) : # if the command argument is a filename, we need to open it. if path . isfile ( commands ) : commands = open ( commands , 'rb' ) # if the command string is a comma separated list, break it up. elif len ( commands . split ( ',' ) ) > 1 : commands = commands . split ( ',' ) else : # if a single command, need to just be returned. try : if commands . strip ( ) [ 0 ] != "#" : yield commands . strip ( ) + '\n' return except IndexError : pass elif isinstance ( commands , list ) : pass else : raise TypeError ( 'clean_lines() accepts a \'str\' or \'list\'' ) for cmd in commands : # exclude commented lines, and skip blank lines (index error) try : if cmd . strip ( ) [ 0 ] != "#" : yield cmd . strip ( ) + '\n' except IndexError : pass
Generate strings that are not comments or lines with only whitespace .
220
14
1,189
def xpath ( source_xml , xpath_expr , req_format = 'string' ) : tree = source_xml if not isinstance ( source_xml , ET . Element ) : tree = objectify . fromstring ( source_xml ) # clean up the namespace in the tags, as namespaces appear to confuse # xpath method for elem in tree . getiterator ( ) : # beware of factory functions such as Comment if isinstance ( elem . tag , basestring ) : i = elem . tag . find ( '}' ) if i >= 0 : elem . tag = elem . tag [ i + 1 : ] # remove unused namespaces objectify . deannotate ( tree , cleanup_namespaces = True ) filtered_list = tree . xpath ( xpath_expr ) # Return string from the list of Elements or pure xml if req_format == 'xml' : return filtered_list matches = '' . join ( etree . tostring ( element , pretty_print = True ) for element in filtered_list ) return matches if matches else ""
Filter xml based on an xpath expression .
230
9
1,190
def set ( self , key , value , lease = None , return_previous = None , timeout = None ) : assembler = commons . PutRequestAssembler ( self . _url , key , value , lease , return_previous ) obj = yield self . _post ( assembler . url , assembler . data , timeout ) revision = Revision . _parse ( obj ) returnValue ( revision )
Set the value for the key in the key - value store .
86
13
1,191
def watch ( self , keys , on_watch , filters = None , start_revision = None , return_previous = None ) : d = self . _start_watching ( keys , on_watch , filters , start_revision , return_previous ) # # ODD: Trying to use a parameter instead of *args errors out as soon as the # parameter is accessed. # def on_err ( * args ) : if args [ 0 ] . type not in [ CancelledError , ResponseFailed ] : self . log . warn ( 'etcd watch terminated with "{error}"' , error = args [ 0 ] . type ) return args [ 0 ] d . addErrback ( on_err ) return d
Watch one or more keys or key sets and invoke a callback .
155
13
1,192
def lease ( self , time_to_live , lease_id = None , timeout = None ) : assembler = commons . LeaseRequestAssembler ( self . _url , time_to_live , lease_id ) obj = yield self . _post ( assembler . url , assembler . data , timeout ) lease = Lease . _parse ( self , obj ) returnValue ( lease )
Creates a lease which expires if the server does not receive a keep alive within a given time to live period .
86
23
1,193
def stage_import_from_file ( self , fd , filename = 'upload.gz' ) : schema = ImportSchema ( ) resp = self . service . post ( self . base , files = { 'file' : ( filename , fd ) } ) return self . service . decode ( schema , resp )
Stage an import from a file upload .
68
8
1,194
def stage_import_from_filesystem ( self , filepath ) : schema = ImportSchema ( ) resp = self . service . post ( self . base , params = { 'path' : filepath } ) return self . service . decode ( schema , resp )
Stage an import from a filesystem path .
57
8
1,195
def stage_import_from_url ( self , url , token = None , username = None , password = None , insecure = False ) : schema = ImportSchema ( ) resp = self . service . post ( self . base , params = { 'url' : url , 'token' : token , 'username' : username , 'password' : password , 'insecure' : insecure } ) return self . service . decode ( schema , resp )
Stage an import from a URL to another CDRouter system .
95
13
1,196
def get_commit_request ( self , id ) : # pylint: disable=invalid-name,redefined-builtin schema = RequestSchema ( ) resp = self . service . get ( self . base + str ( id ) + '/request/' ) return self . service . decode ( schema , resp )
Get a commit request for a staged import .
69
9
1,197
def commit ( self , id , impreq ) : # pylint: disable=invalid-name,redefined-builtin schema = RequestSchema ( ) json = self . service . encode ( schema , impreq ) schema = RequestSchema ( ) resp = self . service . post ( self . base + str ( id ) + '/' , json = json ) return self . service . decode ( schema , resp )
Commit a staged import .
90
6
1,198
def model_to_dict ( instance , * * options ) : options = _defaults ( options ) attrs = { } if options [ 'prehook' ] : if isinstance ( options [ 'prehook' ] , collections . Callable ) : instance = options [ 'prehook' ] ( instance ) if instance is None : return attrs # Items in the `fields` list are the output aliases, not the raw # accessors (field, method, property names) for alias in options [ 'fields' ] : # Get the accessor for the object accessor = options [ 'aliases' ] . get ( alias , alias ) # Create the key that will be used in the output dict key = options [ 'prefix' ] + alias # Optionally camelcase the key if options [ 'camelcase' ] : key = convert_to_camel ( key ) # Get the field value. Use the mapped value to the actually property or # method name. `value` may be a number of things, so the various types # are checked below. value = get_field_value ( instance , accessor , allow_missing = options [ 'allow_missing' ] ) # Related objects, perform some checks on their options if isinstance ( value , ( models . Model , QuerySet ) ) : _options = _defaults ( options [ 'related' ] . get ( accessor , { } ) ) # If the `prefix` follows the below template, generate the # `prefix` for the related object if '%(accessor)s' in _options [ 'prefix' ] : _options [ 'prefix' ] = _options [ 'prefix' ] % { 'accessor' : alias } if isinstance ( value , models . Model ) : if len ( _options [ 'fields' ] ) == 1 and _options [ 'flat' ] and not _options [ 'merge' ] : value = list ( serialize ( value , * * _options ) . values ( ) ) [ 0 ] else : # Recurse, get the dict representation _attrs = serialize ( value , * * _options ) # Check if this object should be merged into the parent, # otherwise nest it under the accessor name if _options [ 'merge' ] : attrs . update ( _attrs ) continue value = _attrs else : value = serialize ( value , * * _options ) attrs [ key ] = value # Apply post-hook to serialized attributes if options [ 'posthook' ] : attrs = options [ 'posthook' ] ( instance , attrs ) return attrs
Takes a model instance and converts it into a dict .
555
12
1,199
def set_save_directory ( base , source ) : root = os . path . join ( base , source ) if not os . path . isdir ( root ) : os . makedirs ( root ) world . screenshot_root = root
Sets the root save directory for saving screenshots . Screenshots will be saved in subdirectories under this directory by browser window size .
51
27