idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
14,000
def to_practice_counts ( request ) : data = None if request . method == "POST" : data = json . loads ( request . body . decode ( "utf-8" ) ) [ "filters" ] if "filters" in request . GET : data = load_query_json ( request . GET , "filters" ) if data is None or len ( data ) == 0 : return render_json ( request , { } , template = 'models_json.html' , help_text = to_practice_counts . __doc__ ) language = get_language ( request ) timer ( 'to_practice_counts' ) filter_names , filter_filters = list ( zip ( * sorted ( data . items ( ) ) ) ) reachable_leaves = Item . objects . filter_all_reachable_leaves_many ( filter_filters , language ) response = { group_id : { 'filter' : data [ group_id ] , 'number_of_items' : len ( items ) , } for group_id , items in zip ( filter_names , reachable_leaves ) } LOGGER . debug ( "to_practice_counts - getting items in groups took %s seconds" , ( timer ( 'to_practice_counts' ) ) ) return render_json ( request , response , template = 'models_json.html' , help_text = to_practice_counts . __doc__ )
Get number of items available to practice .
318
8
14,001
def answer ( request ) : if request . method == 'GET' : return render ( request , 'models_answer.html' , { } , help_text = answer . __doc__ ) elif request . method == 'POST' : practice_filter = get_filter ( request ) practice_context = PracticeContext . objects . from_content ( practice_filter ) saved_answers = _save_answers ( request , practice_context , True ) return render_json ( request , saved_answers , status = 200 , template = 'models_answer.html' ) else : return HttpResponseBadRequest ( "method %s is not allowed" . format ( request . method ) )
Save the answer .
151
4
14,002
def user_stats ( request ) : timer ( 'user_stats' ) response = { } data = None if request . method == "POST" : data = json . loads ( request . body . decode ( "utf-8" ) ) [ "filters" ] if "filters" in request . GET : data = load_query_json ( request . GET , "filters" ) if data is None : return render_json ( request , { } , template = 'models_user_stats.html' , help_text = user_stats . __doc__ ) environment = get_environment ( ) if is_time_overridden ( request ) : environment . shift_time ( get_time ( request ) ) user_id = get_user_id ( request ) language = get_language ( request ) filter_names , filter_filters = list ( zip ( * sorted ( data . items ( ) ) ) ) reachable_leaves = Item . objects . filter_all_reachable_leaves_many ( filter_filters , language ) all_leaves = sorted ( list ( set ( flatten ( reachable_leaves ) ) ) ) answers = environment . number_of_answers_more_items ( all_leaves , user_id ) correct_answers = environment . number_of_correct_answers_more_items ( all_leaves , user_id ) if request . GET . get ( "mastered" ) : timer ( 'user_stats_mastered' ) mastery_threshold = get_mastery_trashold ( ) predictions = Item . objects . predict_for_overview ( environment , user_id , all_leaves ) mastered = dict ( list ( zip ( all_leaves , [ p >= mastery_threshold for p in predictions ] ) ) ) LOGGER . debug ( "user_stats - getting predictions for items took %s seconds" , ( timer ( 'user_stats_mastered' ) ) ) for identifier , items in zip ( filter_names , reachable_leaves ) : if len ( items ) == 0 : response [ identifier ] = { "filter" : data [ identifier ] , "number_of_items" : 0 , } else : response [ identifier ] = { "filter" : data [ identifier ] , "number_of_items" : len ( items ) , "number_of_practiced_items" : sum ( answers [ i ] > 0 for i in items ) , "number_of_answers" : sum ( answers [ i ] for i in items ) , "number_of_correct_answers" : sum ( correct_answers [ i ] for i in items ) , } if request . GET . get ( "mastered" ) : response [ identifier ] [ "number_of_mastered_items" ] = sum ( mastered [ i ] for i in items ) return render_json ( request , response , template = 'models_user_stats.html' , help_text = user_stats . __doc__ )
Get user statistics for selected groups of items
663
8
14,003
def add ( self , start , end , cut_point = None , skip_rate = None , extend_loop = None ) : self . data . append ( self . parse_loop_line ( [ 'LOOP' , start , end , cut_point , skip_rate , extend_loop ] ) ) assert ( start <= end )
Add a new loop definition .
72
6
14,004
def log ( self , text , key = "?????" , force = False ) : if force or self . verbose : print ">>>> {} {}" . format ( key , text ) sys . stdout . flush ( )
Print a string to the logs .
47
7
14,005
def input_yes_no ( msg = '' ) : print '\n' + msg while ( True ) : i = raw_input ( 'Input yes or no: ' ) i = i . lower ( ) if i == 'y' or i == 'yes' : return True elif i == 'n' or i == 'no' : return False else : print 'ERROR: Bad input. Must enter y/n/yes/no'
Simple helper function
95
3
14,006
def resolve_relation_type_config ( value ) : relation_types = current_app . config [ 'PIDRELATIONS_RELATION_TYPES' ] if isinstance ( value , six . string_types ) : try : obj = next ( rt for rt in relation_types if rt . name == value ) except StopIteration : raise ValueError ( "Relation name '{0}' is not configured." . format ( value ) ) elif isinstance ( value , int ) : try : obj = next ( rt for rt in relation_types if rt . id == value ) except StopIteration : raise ValueError ( "Relation ID {0} is not configured." . format ( value ) ) else : raise ValueError ( "Type of value '{0}' is not supported for resolving." . format ( value ) ) api_class = obj_or_import_string ( obj . api ) schema_class = obj_or_import_string ( obj . schema ) return obj . __class__ ( obj . id , obj . name , obj . label , api_class , schema_class )
Resolve the relation type to config object .
245
9
14,007
def match_RCSB_pdb_chains ( pdb_id1 , pdb_id2 , cut_off = 60.0 , allow_multiple_matches = False , multiple_match_error_margin = 3.0 , use_seqres_sequences_if_possible = True , strict = True ) : try : stage = pdb_id1 pdb_1 = PDB ( retrieve_pdb ( pdb_id1 ) , strict = strict ) stage = pdb_id2 pdb_2 = PDB ( retrieve_pdb ( pdb_id2 ) , strict = strict ) except ( PDBParsingException , NonCanonicalResidueException , PDBValidationException ) , e : raise PDBParsingException ( "An error occurred while loading %s: '%s'" % ( stage , str ( e ) ) ) return match_pdb_chains ( pdb_1 , pdb_id1 , pdb_2 , pdb_id2 , cut_off = cut_off , allow_multiple_matches = allow_multiple_matches , multiple_match_error_margin = multiple_match_error_margin , use_seqres_sequences_if_possible = use_seqres_sequences_if_possible )
A convenience function for match_pdb_chains . The required arguments are two PDB IDs from the RCSB .
288
25
14,008
def create_resource ( output_model , rtype , unique , links , existing_ids = None , id_helper = None ) : if isinstance ( id_helper , str ) : idg = idgen ( id_helper ) elif isinstance ( id_helper , GeneratorType ) : idg = id_helper elif id_helper is None : idg = default_idgen ( None ) else : #FIXME: G11N raise ValueError ( 'id_helper must be string (URL), callable or None' ) ctx = context ( None , None , output_model , base = None , idgen = idg , existing_ids = existing_ids , extras = None ) rid = I ( materialize_entity ( ctx , rtype , unique = unique ) ) if existing_ids is not None : if rid in existing_ids : return ( False , rid ) existing_ids . add ( rid ) output_model . add ( rid , VTYPE_REL , rtype ) for r , t in links : output_model . add ( rid , r , t ) return ( True , rid )
General - purpose routine to create a new resource in the output model based on data provided
248
17
14,009
def _read_apps ( self ) : apps = { } for cfgfile in glob . iglob ( os . path . join ( self . confdir , '*.conf' ) ) : name = os . path . basename ( cfgfile ) [ 0 : - 5 ] try : app = AppLogParser ( name , cfgfile , self . args , self . logdir , self . fields , self . name_cache , self . report ) except ( LogRaptorOptionError , LogRaptorConfigError , LogFormatError ) as err : logger . error ( 'cannot add app %r: %s' , name , err ) else : apps [ name ] = app if not apps : raise LogRaptorConfigError ( 'no configured application in %r!' % self . confdir ) return apps
Read the configuration of applications returning a dictionary
177
8
14,010
def patterns ( self ) : # No explicit argument for patterns ==> consider the first source argument as pattern. if not self . args . patterns and not self . args . pattern_files : try : self . args . patterns . append ( self . args . files . pop ( 0 ) ) except IndexError : raise LogRaptorArgumentError ( 'PATTERN' , 'no search pattern' ) # Get the patterns from arguments and files patterns = set ( ) if self . args . pattern_files : patterns . update ( [ p . rstrip ( '\n' ) for p in fileinput . input ( self . args . pattern_files ) ] ) patterns . update ( self . args . patterns ) logger . debug ( "search patterns to be processed: %r" , patterns ) # If one pattern is empty then skip the other patterns if '' in patterns : logger . info ( "an empty pattern provided: match all strings!" ) return tuple ( ) try : flags = re . IGNORECASE if self . args . case else 0 | re . UNICODE return tuple ( [ re . compile ( r'(\b%s\b)' % pat if self . args . word else '(%s)' % pat , flags = flags ) for pat in patterns if pat ] ) except re . error as err : raise LogRaptorArgumentError ( 'wrong regex syntax for pattern: %r' % err )
A tuple with re . RegexObject objects created from regex pattern arguments .
300
15
14,011
def files ( self ) : # If no files but a recursion option ==> use the current directory if not self . args . files and self . recursive : return [ '.' ] else : return self . args . files
A list of input sources . Each item can be a file path a glob path or URL .
46
19
14,012
def apps ( self ) : logger . debug ( "initialize applications ..." ) enabled = None apps = self . args . apps or self . _config_apps . keys ( ) unknown = set ( apps ) - set ( self . _config_apps . keys ( ) ) if unknown : raise LogRaptorArgumentError ( "--apps" , "not found apps %r" % list ( unknown ) ) if apps or enabled is None : return { k : v for k , v in self . _config_apps . items ( ) if k in apps } else : return { k : v for k , v in self . _config_apps . items ( ) if k in apps and v . enabled == enabled }
Dictionary with loaded applications .
152
6
14,013
def apptags ( self ) : logger . debug ( "populate tags map ..." ) apps = self . _apps . keys ( ) unknown = set ( apps ) unknown . difference_update ( self . _config_apps . keys ( ) ) if unknown : raise ValueError ( "unknown apps: %r" % list ( unknown ) ) apps = [ v for v in self . _config_apps . values ( ) if v . name in apps ] tagmap = { } for app in sorted ( apps , key = lambda x : ( x . priority , x . name ) ) : for tag in app . tags : if not tag : raise LogRaptorConfigError ( 'found an empty tag for app %r' % app . name ) try : tagmap [ tag ] . append ( app ) except KeyError : tagmap [ tag ] = [ app ] return tagmap
Map from log app - name to an application .
185
10
14,014
def create_dispatcher ( self ) : before_context = max ( self . args . before_context , self . args . context ) after_context = max ( self . args . after_context , self . args . context ) if self . args . files_with_match is not None or self . args . count or self . args . only_matching or self . args . quiet : # Sending of log lines disabled by arguments return UnbufferedDispatcher ( self . _channels ) elif before_context == 0 and after_context == 0 : # Don't need line buffering return UnbufferedDispatcher ( self . _channels ) elif self . args . thread : return ThreadedDispatcher ( self . _channels , before_context , after_context ) else : return LineBufferDispatcher ( self . _channels , before_context , after_context )
Return a dispatcher for configured channels .
194
7
14,015
def get_config ( self ) : # Create a dummy report object if necessary channels = [ sect . rsplit ( '_' ) [ 0 ] for sect in self . config . sections ( suffix = '_channel' ) ] channels . sort ( ) disabled_apps = [ app for app in self . _config_apps . keys ( ) if app not in self . _apps ] return u'' . join ( [ u"\n--- %s configuration ---" % __package__ , u"\nConfiguration file: %s" % self . config . cfgfile , u"\nConfiguration directory: %s" % self . confdir , u"\nConfigured applications: %s" % ', ' . join ( self . _config_apps . keys ( ) ) , u"\nDisabled applications: %s" % ', ' . join ( disabled_apps ) if disabled_apps else '' , u"\nFilter fields: %s" % ', ' . join ( self . config . options ( 'fields' ) ) , u"\nOutput channels: %s" % ', ' . join ( channels ) if channels else u'No channels defined' , u"\nReports: %s\n" % ', ' . join ( [ section [ : - 7 ] for section in self . config . sections ( suffix = '_report' ) ] ) , '' ] )
Return a formatted text with main configuration parameters .
297
9
14,016
def get_run_summary ( self , run_stats ) : run_stats = run_stats . copy ( ) run_stats [ 'files' ] = len ( run_stats [ 'files' ] ) summary = [ u'\n--- %s run summary ---' % __package__ , u'Number of processed files: %(files)d' , u'Total lines read: %(lines)d' , u'Total log events matched: %(matches)d' , ] if any ( [ app . matches or app . unparsed for app in self . apps . values ( ) ] ) : if self . matcher == 'unruled' : summary . append ( "Applications found (application rules not used):" ) for app in filter ( lambda x : x . matches , self . apps . values ( ) ) : summary . append ( u' %s(matches=%d)' % ( app . name , app . matches ) ) else : summary . append ( "Applications found:" ) for app in filter ( lambda x : x . matches or x . unparsed , self . apps . values ( ) ) : summary . append ( u' %s(matches=%d, unparsed=%s)' % ( app . name , app . matches , app . unparsed ) ) summary . append ( '\n' ) return '\n' . join ( summary ) % run_stats
Produce a text summary from run statistics .
307
9
14,017
def add_template_dirs ( app ) : template_dir = os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , 'templates' ) app . jinja_loader = jinja2 . ChoiceLoader ( [ app . jinja_loader , jinja2 . FileSystemLoader ( template_dir ) ] )
Add flask_mongo_profiler s template directories .
86
12
14,018
def setup ( ) : # Create the Wallace config file if it does not already exist. config_name = ".wallaceconfig" config_path = os . path . join ( os . path . expanduser ( "~" ) , config_name ) if os . path . isfile ( config_path ) : log ( "Wallace config file already exists." , chevrons = False ) else : log ( "Creating Wallace config file at ~/.wallaceconfig..." , chevrons = False ) wallace_module_path = os . path . dirname ( os . path . realpath ( __file__ ) ) src = os . path . join ( wallace_module_path , "config" , config_name ) shutil . copyfile ( src , config_path )
Walk the user though the Wallace setup .
166
8
14,019
def summary ( app ) : r = requests . get ( 'https://{}.herokuapp.com/summary' . format ( app ) ) summary = r . json ( ) [ 'summary' ] click . echo ( "\nstatus \t| count" ) click . echo ( "----------------" ) for s in summary : click . echo ( "{}\t| {}" . format ( s [ 0 ] , s [ 1 ] ) ) num_101s = sum ( [ s [ 1 ] for s in summary if s [ 0 ] == 101 ] ) num_10xs = sum ( [ s [ 1 ] for s in summary if s [ 0 ] >= 100 ] ) if num_10xs > 0 : click . echo ( "\nYield: {:.2%}" . format ( 1.0 * num_101s / num_10xs ) )
Print a summary of a deployed app s status .
183
10
14,020
def scale_up_dynos ( id ) : # Load psiTurk configuration. config = PsiturkConfig ( ) config . load_config ( ) dyno_type = config . get ( 'Server Parameters' , 'dyno_type' ) num_dynos_web = config . get ( 'Server Parameters' , 'num_dynos_web' ) num_dynos_worker = config . get ( 'Server Parameters' , 'num_dynos_worker' ) log ( "Scaling up the dynos..." ) subprocess . call ( "heroku ps:scale web=" + str ( num_dynos_web ) + ":" + str ( dyno_type ) + " --app " + id , shell = True ) subprocess . call ( "heroku ps:scale worker=" + str ( num_dynos_worker ) + ":" + str ( dyno_type ) + " --app " + id , shell = True ) clock_on = config . getboolean ( 'Server Parameters' , 'clock_on' ) if clock_on : subprocess . call ( "heroku ps:scale clock=1:" + dyno_type + " --app " + id , shell = True )
Scale up the Heroku dynos .
274
8
14,021
def deploy ( verbose , app ) : # Load psiTurk configuration. config = PsiturkConfig ( ) config . load_config ( ) # Set the mode. config . set ( "Experiment Configuration" , "mode" , "deploy" ) config . set ( "Server Parameters" , "logfile" , "-" ) # Ensure that psiTurk is not in sandbox mode. config . set ( "Shell Parameters" , "launch_in_sandbox_mode" , "false" ) # Do shared setup. deploy_sandbox_shared_setup ( verbose = verbose , app = app )
Deploy app using Heroku to MTurk .
133
10
14,022
def qualify ( qualification , value , worker ) : # create connection to AWS from boto . mturk . connection import MTurkConnection config = PsiturkConfig ( ) config . load_config ( ) aws_access_key_id = config . get ( 'AWS Access' , 'aws_access_key_id' ) aws_secret_access_key = config . get ( 'AWS Access' , 'aws_secret_access_key' ) conn = MTurkConnection ( aws_access_key_id , aws_secret_access_key ) def get_workers_with_qualification ( qualification ) : """Get workers with the given qualification.""" results = [ ] continue_flag = True page = 1 while ( continue_flag ) : new_results = conn . get_qualifications_for_qualification_type ( qualification , page_size = 100 , page_number = page ) if ( len ( new_results ) == 0 ) : continue_flag = False else : results . extend ( new_results ) page = page + 1 return results results = get_workers_with_qualification ( qualification ) workers = [ x . SubjectId for x in results ] # assign the qualification click . echo ( "Assigning qualification {} with value {} to worker {}" . format ( qualification , value , worker ) ) if worker in workers : result = conn . update_qualification_score ( qualification , worker , value ) else : result = conn . assign_qualification ( qualification , worker , value ) if result : click . echo ( result ) # print out the current set of workers with the qualification results = get_workers_with_qualification ( qualification ) click . echo ( "{} workers with qualification {}:" . format ( len ( results ) , qualification ) ) values = [ r . IntegerValue for r in results ] unique_values = list ( set ( [ r . IntegerValue for r in results ] ) ) for v in unique_values : click . echo ( "{} with value {}" . format ( len ( [ val for val in values if val == v ] ) , v ) )
Assign a qualification to a worker .
454
8
14,023
def dump_database ( id ) : log ( "Generating a backup of the database on Heroku..." ) dump_filename = "data.dump" data_directory = "data" dump_dir = os . path . join ( data_directory , id ) if not os . path . exists ( dump_dir ) : os . makedirs ( dump_dir ) subprocess . call ( "heroku pg:backups capture --app " + id , shell = True ) backup_url = subprocess . check_output ( "heroku pg:backups public-url --app " + id , shell = True ) backup_url = backup_url . replace ( '"' , '' ) . rstrip ( ) backup_url = re . search ( "https:.*" , backup_url ) . group ( 0 ) print ( backup_url ) log ( "Downloading the backup..." ) dump_path = os . path . join ( dump_dir , dump_filename ) with open ( dump_path , 'wb' ) as file : subprocess . call ( [ 'curl' , '-o' , dump_path , backup_url ] , stdout = file ) return dump_path
Backup the Postgres database locally .
256
8
14,024
def backup ( app ) : dump_path = dump_database ( app ) config = PsiturkConfig ( ) config . load_config ( ) conn = boto . connect_s3 ( config . get ( 'AWS Access' , 'aws_access_key_id' ) , config . get ( 'AWS Access' , 'aws_secret_access_key' ) , ) bucket = conn . create_bucket ( app , location = boto . s3 . connection . Location . DEFAULT ) k = boto . s3 . key . Key ( bucket ) k . key = 'database.dump' k . set_contents_from_filename ( dump_path ) url = k . generate_url ( expires_in = 0 , query_auth = False ) log ( "The database backup URL is..." ) print ( url )
Dump the database .
183
5
14,025
def create ( example ) : try : this_dir = os . path . dirname ( os . path . realpath ( __file__ ) ) example_dir = os . path . join ( this_dir , os . pardir , "examples" , example ) shutil . copytree ( example_dir , os . path . join ( os . getcwd ( ) , example ) ) log ( "Example created." , delay = 0 ) except TypeError : click . echo ( "Example '{}' does not exist." . format ( example ) ) except OSError : click . echo ( "Example '{}' already exists here." . format ( example ) )
Create a copy of the given example .
145
8
14,026
def get_datetime_interval ( timestamp , diff , offset = 0 ) : fin_datetime = datetime . datetime . fromtimestamp ( timestamp + offset ) ini_datetime = datetime . datetime . fromtimestamp ( timestamp - diff ) return ini_datetime , fin_datetime
Returns datetime interval from timestamp backward in the past computed using the milliseconds difference passed as argument . The final datetime is corrected with an optional offset .
68
30
14,027
def strftimegen ( start_dt , end_dt ) : if start_dt > end_dt : raise ValueError ( "the start datetime is after the end datetime: (%r,%r)" % ( start_dt , end_dt ) ) def iterftime ( string ) : date_subs = [ i for i in DATE_FORMATS if i [ 1 ] . search ( string ) is not None ] if not date_subs : yield string else : dt = start_dt date_path = string while end_dt >= dt : for item in date_subs : date_path = item [ 1 ] . sub ( dt . strftime ( item [ 0 ] ) , date_path ) yield date_path dt = dt + datetime . timedelta ( days = 1 ) return iterftime
Return a generator function for datetime format strings . The generator produce a day - by - day sequence starting from the first datetime to the second datetime argument .
180
33
14,028
def setup_jobs ( outpath , options , input_files ) : job_inputs = None reverse_mapping = None fasta_file_contents = { } # Generate FASTA files for PDB inputs # fasta_file_contents is a mapping from a file path to a pair (FASTA contents, file type). We remember the file type # since we offset residue IDs depending on file type i.e. for FASTA files, we treat each sequence separately and do # not renumber the fragments in postprocessing. For PDB files, however, we need to respect the order and length of # sequences so that we renumber the fragments appropriately in postprocessing - we assume that if a PDB file is passed in # then all chains (protein, RNA, or DNA) will be used in a Rosetta run. for input_file in input_files : assert ( not ( fasta_file_contents . get ( input_file ) ) ) if any ( fnmatch ( input_file , x ) for x in pdb_file_wildcards ) : pdb = PDB . from_filepath ( input_file , strict = True ) pdb . pdb_id = os . path . basename ( input_file ) . split ( '.' ) [ 0 ] if pdb . pdb_id . startswith ( 'pdb' ) and len ( pdb . pdb_id ) >= 7 : # Hack to rename FASTA identifiers for pdb*.ent files which are present in mirrors of the PDB pdb . pdb_id = pdb . pdb_id . replace ( 'pdb' , '' ) fasta_file_contents [ input_file ] = ( pdb . create_fasta ( prefer_seqres_order = False ) , 'PDB' ) else : fasta_file_contents [ input_file ] = ( read_file ( input_file ) , 'FASTA' ) # Extract sequences from the input FASTA files. found_sequences , reverse_mapping , errors = get_sequences ( options , fasta_file_contents ) if found_sequences : reformat ( found_sequences ) if errors : return None , False , errors # Discard sequences that are the wrong chain. desired_sequences = { } for key , sequence in found_sequences . iteritems ( ) : pdb_id , chain , file_name = key if options . chain is None or chain == options . chain : desired_sequences [ key ] = sequence # Create the input FASTA and script files. job_inputs , errors = create_inputs ( options , outpath , desired_sequences ) # Create the reverse mapping file if reverse_mapping : segment_mapping_file = os . path . join ( outpath , "segment_map.json" ) colorprinter . message ( "Creating a reverse mapping file %s." % segment_mapping_file ) write_file ( segment_mapping_file , json . dumps ( reverse_mapping ) ) # Create the post-processing script file post_processing_script = read_file ( os . path . join ( os . path . split ( os . path . realpath ( __file__ ) ) [ 0 ] , 'post_processing.py' ) ) write_file ( os . path . join ( outpath , 'post_processing.py' ) , post_processing_script , 'w' ) # Create the secondary structure filter file if options . secondary_structure_file : write_file ( os . path . join ( outpath , 'ss_filter.json' ) , json . dumps ( { 'secondary_structure_filter' : SecondaryStructureDefinition . from_filepath ( options . secondary_structure_file ) . data } ) , 'w' ) return job_inputs , reverse_mapping != None , errors
This function sets up the jobs by creating the necessary input files as expected . - outpath is where the output is to be stored . - options is the optparse options object . - input_files is a list of paths to input files .
850
49
14,029
def reformat ( found_sequences ) : for ( pdb_id , chain , file_name ) , sequence in sorted ( found_sequences . iteritems ( ) ) : header = sequence [ 0 ] assert ( header [ 0 ] == '>' ) tokens = header . split ( '|' ) tokens [ 0 ] = tokens [ 0 ] [ : 5 ] assert ( len ( tokens [ 0 ] ) == 5 ) sequence [ 0 ] = "|" . join ( tokens )
Truncate the FASTA headers so that the first field is a 4 - character ID .
104
20
14,030
def search_configuration_files ( findstr , replacestr = None ) : F = open ( configurationFilesLocation , "r" ) lines = F . readlines ( ) F . close ( ) allerrors = { } alloutput = { } for line in lines : line = line . strip ( ) if line : if line . endswith ( "generate_fragments.py" ) : # Do not parse the Python script but check that it exists if not ( os . path . exists ( line ) ) : allerrors [ line ] = "File/directory %s does not exist." % line else : cmd = [ "grep" , "-n" , "-i" , findstr , line ] output = subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) . communicate ( ) errors = output [ 1 ] output = output [ 0 ] if errors : errors = errors . strip ( ) allerrors [ line ] = errors if output : output = output . strip ( ) alloutput [ line ] = output . split ( "\n" ) return alloutput , allerrors
This function could be used to find and replace paths in the configuration files . At present it only finds phrases .
248
22
14,031
def get_local_ip_address ( target ) : ip_adr = '' try : s = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM ) s . connect ( ( target , 8000 ) ) ip_adr = s . getsockname ( ) [ 0 ] s . close ( ) except : pass return ip_adr
Get the local ip address to access one specific target .
77
11
14,032
def connect ( self ) : try : self . sock . connect ( ( self . host , self . port ) ) return self . sock except socket . error as ex : logging . error ( 'Exception while connecting socket on %s:%s - Error %s' % ( self . host , self . port , ex ) ) raise except Exception as ex : logging . exception ( 'Exception while connecting socket on %s:%s - Error %s' % ( self . host , self . port , ex ) ) raise
Connect socket to server
108
4
14,033
def send_by_packet ( self , data ) : total_sent = 0 while total_sent < PACKET_SIZE : sent = self . sock . send ( data [ total_sent : ] ) if sent == 0 : raise RuntimeError ( "socket connection broken" ) total_sent += sent return total_sent
Send data by packet on socket
68
6
14,034
def receive ( self , siz ) : result = bytearray ( ) data = 'x' while len ( data ) > 0 : data = self . sock . recv ( siz - len ( result ) ) result += data if len ( result ) == siz : return result if len ( result ) > siz : raise Exception ( 'Received more bytes than expected' ) raise Exception ( 'Error receiving data. %d bytes received' % len ( result ) )
Receive a known length of bytes from a socket
101
10
14,035
def spawn_container ( addr , env_cls = Environment , mgr_cls = EnvManager , set_seed = True , * args , * * kwargs ) : # Try setting the process name to easily recognize the spawned # environments with 'ps -x' or 'top' try : import setproctitle as spt title = 'creamas: {}({})' . format ( env_cls . __class__ . __name__ , _get_base_url ( addr ) ) spt . setproctitle ( title ) except : pass if set_seed : _set_random_seeds ( ) # kwargs['codec'] = aiomas.MsgPack task = start ( addr , env_cls , mgr_cls , * args , * * kwargs ) loop = asyncio . new_event_loop ( ) asyncio . set_event_loop ( loop ) loop . run_until_complete ( task )
Spawn a new environment in a given address as a coroutine .
210
13
14,036
def _set_random_seeds ( ) : try : import numpy as np np . random . seed ( ) except : pass try : import scipy as sp sp . random . seed ( ) except : pass import random random . seed ( )
Set new random seeds for the process .
53
8
14,037
async def report ( self , msg , timeout = 5 ) : try : host_manager = await self . env . connect ( self . host_manager , timeout = timeout ) except : raise ConnectionError ( "Could not reach host manager ({})." . format ( self . host_manager ) ) ret = await host_manager . handle ( msg ) return ret
Report message to the host manager .
75
7
14,038
def get_agents ( self , addr = True , agent_cls = None , as_coro = False ) : return self . env . get_agents ( addr = addr , agent_cls = agent_cls )
Get agents from the managed environment .
49
7
14,039
async def get_artifacts ( self ) : host_manager = await self . env . connect ( self . _host_manager , timeout = TIMEOUT ) artifacts = await host_manager . get_artifacts ( ) return artifacts
Get all artifacts from the host environment .
47
8
14,040
async def spawn ( self , agent_cls , * args , addr = None , * * kwargs ) : _ , addr = await self . menv . spawn ( agent_cls , * args , addr = addr , * * kwargs ) return addr
Spawn an agent to the environment .
58
7
14,041
async def get_agents ( self , addr = True , agent_cls = None ) : return await self . menv . get_agents ( addr = True , agent_cls = None , as_coro = True )
Get addresses of all agents in all the slave environments .
50
11
14,042
async def get_connections ( self , data = True ) : return await self . menv . get_connections ( data = data , as_coro = True )
Return connections for all the agents in the slave environments .
38
11
14,043
def get_agents ( self , addr = True , agent_cls = None , as_coro = False ) : async def slave_task ( mgr_addr , addr = True , agent_cls = None ) : r_manager = await self . env . connect ( mgr_addr , timeout = TIMEOUT ) return await r_manager . get_agents ( addr = addr , agent_cls = agent_cls ) tasks = create_tasks ( slave_task , self . addrs , addr , agent_cls ) return run_or_coro ( tasks , as_coro )
Get agents from the slave environments .
132
7
14,044
async def is_ready ( self ) : async def slave_task ( addr , timeout ) : try : r_manager = await self . env . connect ( addr , timeout = timeout ) ready = await r_manager . is_ready ( ) if not ready : return False except : return False return True if not self . env . is_ready ( ) : return False if not self . check_ready ( ) : return False rets = await create_tasks ( slave_task , self . addrs , 0.5 ) if not all ( rets ) : return False return True
Check if the multi - environment has been fully initialized .
124
11
14,045
async def spawn_slaves ( self , slave_addrs , slave_env_cls , slave_mgr_cls , slave_kwargs = None ) : pool , r = spawn_containers ( slave_addrs , env_cls = slave_env_cls , env_params = slave_kwargs , mgr_cls = slave_mgr_cls ) self . _pool = pool self . _r = r self . _manager_addrs = [ "{}{}" . format ( _get_base_url ( a ) , 0 ) for a in slave_addrs ]
Spawn slave environments .
134
4
14,046
async def _get_smallest_env ( self ) : async def slave_task ( mgr_addr ) : r_manager = await self . env . connect ( mgr_addr , timeout = TIMEOUT ) ret = await r_manager . get_agents ( addr = True ) return mgr_addr , len ( ret ) sizes = await create_tasks ( slave_task , self . addrs , flatten = False ) return sorted ( sizes , key = lambda x : x [ 1 ] ) [ 0 ] [ 0 ]
Get address of the slave environment manager with the smallest number of agents .
115
14
14,047
async def spawn ( self , agent_cls , * args , addr = None , * * kwargs ) : if addr is None : addr = await self . _get_smallest_env ( ) r_manager = await self . env . connect ( addr ) return await r_manager . spawn ( agent_cls , * args , * * kwargs )
Spawn a new agent in a slave environment .
80
9
14,048
def get_connections ( self , data = True , as_coro = False ) : async def slave_task ( addr , data ) : r_manager = await self . env . connect ( addr ) return await r_manager . get_connections ( data ) tasks = create_tasks ( slave_task , self . addrs , data ) return run_or_coro ( tasks , as_coro )
Return connections from all the agents in the slave environments .
90
11
14,049
def get_artifacts ( self , agent_name = None ) : if agent_name is not None : return [ a for a in self . artifacts if agent_name == a . creator ] return self . artifacts
Get all artifacts or all artifacts published by a specific agent .
44
12
14,050
async def stop_slaves ( self , timeout = 1 ) : for addr in self . addrs : try : r_manager = await self . env . connect ( addr , timeout = timeout ) await r_manager . stop ( ) except : self . _log ( logging . WARNING , "Could not stop {}" . format ( addr ) )
Stop all the slaves by sending a stop - message to their managers .
73
14
14,051
def destroy ( self , folder = None , as_coro = False ) : async def _destroy ( folder ) : ret = self . save_info ( folder ) await self . stop_slaves ( ) # Terminate and join the process pool when we are destroyed. # Do not wait for unfinished processed with pool.close(), # the slaves should be anyway already stopped. if self . _pool is not None : self . _pool . terminate ( ) self . _pool . join ( ) await self . _env . shutdown ( as_coro = True ) return ret return run_or_coro ( _destroy ( folder ) , as_coro )
Destroy the multiprocessing environment and its slave environments .
139
12
14,052
def dump_index ( self , obj ) : if isinstance ( obj , PIDNodeOrdered ) and self . _is_child ( obj ) : return obj . index ( self . context [ 'pid' ] ) else : return None
Dump the index of the child in the relation .
50
11
14,053
def dump_is_last ( self , obj ) : if self . _is_child ( obj ) and isinstance ( obj , PIDNodeOrdered ) : if obj . children . count ( ) > 0 : return obj . children . ordered ( 'asc' ) . all ( ) [ - 1 ] == self . context [ 'pid' ] elif obj . draft_child : return obj . draft_child == self . context [ 'pid' ] else : return True else : return None
Dump the boolean stating if the child in the relation is last .
104
14
14,054
def dump_type ( self , obj ) : if not isinstance ( obj . relation_type , RelationType ) : return resolve_relation_type_config ( obj . relation_type ) . name else : return obj . relation_type . name
Dump the text name of the relation .
53
9
14,055
def dump_children ( self , obj ) : data , errors = PIDSchema ( many = True ) . dump ( obj . children . ordered ( 'asc' ) . all ( ) ) return data
Dump the siblings of a PID .
43
8
14,056
def identify_window ( pid , text ) : proc = None path = None uas = Session . query ( UserApp ) . filter ( UserApp . window_text == text ) nontext = Session . query ( UserApp ) . filter ( UserApp . window_text == None ) if uas . count ( ) : proc = psutil . Process ( pid ) try : path = proc . exe ( ) except psutil . AccessDenied : path = proc . name ( ) logger . debug ( "Trying to identify app, path=%s" , path ) app = uas . filter ( UserApp . path == path ) . first ( ) if app : return app , proc if nontext . count ( ) : if proc == None : proc = psutil . Process ( pid ) path = proc . exe ( ) app = nontext . filter ( UserApp . path == path ) . first ( ) if app : return app , proc return None , None
Identify the app associated with a window .
205
9
14,057
def _assertCALL ( self , url , * , allow_empty = False , check_headers = True , check_status = True , expect_errors = False , name = None , method = 'get' , data = None ) : self . view = resolve ( url ) . func . cls m = getattr ( self . client , method . lower ( ) ) self . filename = self . get_response_filename ( method , name or url ) response = m ( url , data = data ) assert response . accepted_renderer payload = response . data if not allow_empty and not payload : raise ValueError ( f"View {self.view} returned and empty json. Check your test" ) if response . status_code > 299 and not expect_errors : raise ValueError ( f"View {self.view} unexpected response. {response.status_code} - {response.content}" ) if not allow_empty and response . status_code == 404 : raise ValueError ( f"View {self.view} returned 404 status code. Check your test" ) if not os . path . exists ( self . filename ) or os . environ . get ( 'API_CHECKER_RESET' , False ) : _write ( self . filename , serialize_response ( response ) ) stored = load_response ( self . filename ) if ( check_status ) and response . status_code != stored . status_code : raise StatusCodeError ( self . view , response . status_code , stored . status_code ) if check_headers : self . _assert_headers ( response , stored ) self . compare ( payload , stored . data , self . filename , view = self . view )
check url for response changes
363
5
14,058
async def reboot ( ) : async with aiohttp . ClientSession ( ) as session : ghlocalapi = DeviceSettings ( LOOP , session , IPADDRESS ) result = await ghlocalapi . reboot ( ) print ( "Reboot info:" , result )
Reboot a Google Home unit .
57
7
14,059
def filter ( self , record ) : request = get_request ( ) if request : user = getattr ( request , 'user' , None ) if user and not user . is_anonymous ( ) : record . username = user . username else : record . username = '-' meta = getattr ( request , 'META' , { } ) record . remote_addr = meta . get ( 'REMOTE_ADDR' , '-' ) record . http_user_agent = meta . get ( 'HTTP_USER_AGENT' , '-' ) if not hasattr ( record , 'request' ) : record . request = request else : record . username = '-' record . remote_addr = '-' record . http_user_agent = '-' return True
Adds user and remote_addr to the record .
164
10
14,060
def status ( self ) : try : r = self . _response except AttributeError : return None else : return r . status_code , r . reason
Status da resposta recebida do Postmon .
33
11
14,061
def package_theme ( app_name , build_dir , excludes = None , includes = None , path_prefix = None , template_dirs = None ) : #pylint:disable=too-many-locals,too-many-arguments templates_dest = os . path . join ( build_dir , 'templates' ) # override STATIC_URL to prefix APP_NAME. orig_static_url = django_settings . STATIC_URL if ( app_name != settings . APP_NAME and not django_settings . STATIC_URL . startswith ( '/' + app_name ) ) : django_settings . STATIC_URL = '/' + app_name + orig_static_url if not os . path . exists ( templates_dest ) : os . makedirs ( templates_dest ) if template_dirs is None : template_dirs = get_template_search_path ( app_name ) for template_dir in template_dirs : # The first of template_dirs usually contains the most specialized # templates (ie. the ones we truely want to install). if ( templates_dest and not os . path . samefile ( template_dir , templates_dest ) ) : install_templates ( template_dir , templates_dest , excludes = excludes , includes = includes , path_prefix = path_prefix )
Package resources and templates for a multi - tier environment into a zip file .
297
15
14,062
def json_to_response ( self , action = None , json_status = None , success_url = None , json_data = None , * * response_kwargs ) : data = { "status" : self . get_status ( json_status ) , "action" : self . get_action ( action ) , "extra_data" : self . get_json_data ( json_data or { } ) } if self . action == AjaxResponseAction . REDIRECT : data [ "action_url" ] = success_url or self . get_success_url ( ) return JsonResponse ( data , * * response_kwargs )
Valid response with next action to be followed by the JS
141
11
14,063
def get_action ( self , action = None ) : if action : self . action = action if self . action not in AjaxResponseAction . choices : raise ValueError ( "Invalid action selected: '{}'" . format ( self . action ) ) return self . action
Returns action to take after call
57
6
14,064
def get_status ( self , json_status = None ) : if json_status : self . json_status = json_status if self . json_status not in AjaxResponseStatus . choices : raise ValueError ( "Invalid status selected: '{}'" . format ( self . json_status ) ) return self . json_status
Returns status of for json
71
5
14,065
def form_invalid ( self , form , prefix = None ) : response = super ( FormAjaxMixin , self ) . form_invalid ( form ) if self . request . is_ajax ( ) : data = { "errors_list" : self . add_prefix ( form . errors , prefix ) , } return self . json_to_response ( status = 400 , json_data = data , json_status = AjaxResponseStatus . ERROR ) return response
If form invalid return error list in JSON response
102
9
14,066
def form_valid ( self , form ) : response = super ( FormAjaxMixin , self ) . form_valid ( form ) if self . request . is_ajax ( ) : return self . json_to_response ( ) return response
If form valid return response with action
54
7
14,067
def add_prefix ( self , errors , prefix ) : if not prefix : prefix = self . get_prefix ( ) if prefix : return { "%s-%s" % ( prefix , k ) : v for k , v in errors . items ( ) } return errors
Add form prefix to errors
57
5
14,068
def render_to_response ( self , context , * * response_kwargs ) : if self . request . is_ajax ( ) : data = { "content" : render_to_string ( self . get_template_names ( ) , context , request = self . request ) } return JsonResponse ( data ) if settings . DEBUG : return super ( PartialAjaxMixin , self ) . render_to_response ( context , * * response_kwargs ) raise Http404 ( )
Returns the rendered template in JSON format
109
7
14,069
def random_init_map ( interface , state , label , inp ) : import random out = interface . output ( 0 ) centers = { } for row in inp : row = row . strip ( ) . split ( state [ "delimiter" ] ) if len ( row ) > 1 : x = [ ( 0 if row [ i ] in state [ "missing_vals" ] else float ( row [ i ] ) ) for i in state [ "X_indices" ] ] cluster = random . randint ( 0 , state [ 'k' ] - 1 ) vertex = state [ 'create' ] ( x , 1.0 ) centers [ cluster ] = vertex if cluster not in centers else state [ "update" ] ( centers [ cluster ] , vertex ) for cluster , values in centers . iteritems ( ) : out . add ( cluster , values )
Assign datapoint e randomly to one of the k clusters .
184
14
14,070
def estimate_map ( interface , state , label , inp ) : out = interface . output ( 0 ) centers = { } for row in inp : row = row . strip ( ) . split ( state [ "delimiter" ] ) if len ( row ) > 1 : x = [ ( 0 if row [ i ] in state [ "missing_vals" ] else float ( row [ i ] ) ) for i in state [ "X_indices" ] ] cluster = min ( ( state [ 'dist' ] ( c , x ) , i ) for i , c in state [ 'centers' ] ) [ 1 ] vertex = state [ 'create' ] ( x , 1.0 ) centers [ cluster ] = vertex if cluster not in centers else state [ "update" ] ( centers [ cluster ] , vertex ) for cluster , values in centers . iteritems ( ) : out . add ( cluster , values )
Find the cluster i that is closest to the datapoint e .
197
14
14,071
def estimate_reduce ( interface , state , label , inp ) : centers = { } for i , c in inp : centers [ i ] = c if i not in centers else state [ 'update' ] ( centers [ i ] , c ) out = interface . output ( 0 ) for i , c in centers . items ( ) : out . add ( i , state [ 'finalize' ] ( c ) )
Estimate the cluster centers for each cluster .
90
9
14,072
def predict_map ( interface , state , label , inp ) : out = interface . output ( 0 ) for row in inp : if len ( row ) > 1 : row = row . strip ( ) . split ( state [ "delimiter" ] ) x_id = "" if state [ "id_index" ] == - 1 else row [ state [ "id_index" ] ] x = [ ( 0 if row [ i ] in state [ "missing_vals" ] else float ( row [ i ] ) ) for i in state [ "X_indices" ] ] out . add ( x_id , min ( [ ( i , state [ "dist" ] ( c , x ) ) for i , c in state [ "centers" ] ] , key = lambda t : t [ 1 ] ) )
Determine the closest cluster for the datapoint e .
177
13
14,073
def fit ( dataset , n_clusters = 5 , max_iterations = 10 , random_state = None , save_results = True , show = False ) : from disco . job import Job from disco . worker . pipeline . worker import Worker , Stage from disco . core import result_iterator try : n_clusters = int ( n_clusters ) max_iterations = int ( max_iterations ) if n_clusters < 2 : raise Exception ( "Parameter n_clusters should be greater than 1." ) if max_iterations < 1 : raise Exception ( "Parameter max_iterations should be greater than 0." ) except ValueError : raise Exception ( "Parameters should be numerical." ) job = Job ( worker = Worker ( save_results = save_results ) ) job . pipeline = [ ( "split" , Stage ( "kmeans_init_map" , input_chain = dataset . params [ "input_chain" ] , init = map_init , process = random_init_map ) ) , ( 'group_label' , Stage ( "kmeans_init_reduce" , process = estimate_reduce , init = simple_init , combine = True ) ) ] job . params = dict ( dataset . params . items ( ) + mean_point_center . items ( ) ) job . params [ 'seed' ] = random_state job . params [ 'k' ] = n_clusters job . run ( input = dataset . params [ "data_tag" ] , name = "kmeans_init" ) init = job . wait ( show = show ) centers = [ ( i , c ) for i , c in result_iterator ( init ) ] for j in range ( max_iterations ) : job = Job ( worker = Worker ( save_results = save_results ) ) job . params = dict ( dataset . params . items ( ) + mean_point_center . items ( ) ) job . params [ 'k' ] = n_clusters job . params [ 'centers' ] = centers job . pipeline = [ ( 'split' , Stage ( "kmeans_map_iter_%s" % ( j + 1 , ) , input_chain = dataset . params [ "input_chain" ] , process = estimate_map , init = simple_init ) ) , ( 'group_label' , Stage ( "kmeans_reduce_iter_%s" % ( j + 1 , ) , process = estimate_reduce , init = simple_init , combine = True ) ) ] job . run ( input = dataset . params [ "data_tag" ] , name = 'kmeans_iter_%d' % ( j + 1 , ) ) fitmodel_url = job . wait ( show = show ) centers = [ ( i , c ) for i , c in result_iterator ( fitmodel_url ) ] return { "kmeans_fitmodel" : fitmodel_url }
Optimize k - clustering for iterations iterations with cluster center definitions as given in center .
640
18
14,074
def predict ( dataset , fitmodel_url , save_results = True , show = False ) : from disco . job import Job from disco . worker . pipeline . worker import Worker , Stage from disco . core import result_iterator if "kmeans_fitmodel" not in fitmodel_url : raise Exception ( "Incorrect fit model." ) job = Job ( worker = Worker ( save_results = save_results ) ) job . params = dict ( dataset . params . items ( ) + mean_point_center . items ( ) ) job . params [ "centers" ] = [ ( i , c ) for i , c in result_iterator ( fitmodel_url [ "kmeans_fitmodel" ] ) ] job . pipeline = [ ( "split" , Stage ( "kmeans_predict" , input_chain = dataset . params [ "input_chain" ] , init = simple_init , process = predict_map ) ) ] job . run ( input = dataset . params [ "data_tag" ] , name = "kmeans_predict" ) return job . wait ( show = show )
Predict the closest clusters for the datapoints in input .
242
14
14,075
def load_stdlib ( ) : if _stdlib : return _stdlib prefixes = tuple ( { os . path . abspath ( p ) for p in ( sys . prefix , getattr ( sys , 'real_prefix' , sys . prefix ) , getattr ( sys , 'base_prefix' , sys . prefix ) , ) } ) for sp in sys . path : if not sp : continue _import_paths . append ( os . path . abspath ( sp ) ) stdpaths = tuple ( { p for p in _import_paths if p . startswith ( prefixes ) and 'site-packages' not in p } ) _stdlib . update ( sys . builtin_module_names ) for stdpath in stdpaths : if not os . path . isdir ( stdpath ) : continue for item in os . listdir ( stdpath ) : if item . startswith ( '.' ) or item == 'site-packages' : continue p = os . path . join ( stdpath , item ) if not os . path . isdir ( p ) and not item . endswith ( ( '.py' , '.so' ) ) : continue _stdlib . add ( item . split ( '.' , 1 ) [ 0 ] ) return _stdlib
Scans sys . path for standard library modules .
278
10
14,076
def import_path_from_file ( filename , as_list = False ) : module_path = [ ] basename = os . path . splitext ( os . path . basename ( filename ) ) [ 0 ] if basename != '__init__' : module_path . append ( basename ) dirname = os . path . dirname ( filename ) while os . path . isfile ( os . path . join ( dirname , '__init__.py' ) ) : dirname , tail = os . path . split ( dirname ) module_path . insert ( 0 , tail ) if as_list : return module_path , dirname return '.' . join ( module_path ) , dirname
Returns a tuple of the import path and root module directory for the supplied file .
155
16
14,077
def file_containing_import ( import_path , import_root ) : if not _import_paths : load_stdlib ( ) if os . path . isfile ( import_root ) : import_root = os . path . dirname ( import_root ) search_paths = [ import_root ] + _import_paths module_parts = import_path . split ( '.' ) for i in range ( len ( module_parts ) , 0 , - 1 ) : module_path = os . path . join ( * module_parts [ : i ] ) for sp in search_paths : p = os . path . join ( sp , module_path ) if os . path . isdir ( p ) : return os . path . join ( p , '__init__.py' ) elif os . path . isfile ( p + '.py' ) : return p + '.py' return None
Finds the file that might contain the import_path .
198
12
14,078
def resolve_import ( import_path , from_module ) : if not import_path or not import_path . startswith ( '.' ) : return import_path from_module = from_module . split ( '.' ) dots = 0 for c in import_path : if c == '.' : dots += 1 else : break if dots : from_module = from_module [ : - dots ] import_path = import_path [ dots : ] if import_path : from_module . append ( import_path ) return '.' . join ( from_module )
Resolves relative imports from a module .
123
8
14,079
def find_package ( name , installed , package = False ) : if package : name = name . lower ( ) tests = ( lambda x : x . user and name == x . name . lower ( ) , lambda x : x . local and name == x . name . lower ( ) , lambda x : name == x . name . lower ( ) , ) else : tests = ( lambda x : x . user and name in x . import_names , lambda x : x . local and name in x . import_names , lambda x : name in x . import_names , ) for t in tests : try : found = list ( filter ( t , installed ) ) if found and not found [ 0 ] . is_scan : return found [ 0 ] except StopIteration : pass return None
Finds a package in the installed list .
166
9
14,080
def is_script ( filename ) : if not os . path . isfile ( filename ) : return False try : with open ( filename , 'rb' ) as fp : return fp . read ( 2 ) == b'#!' except IOError : pass return False
Checks if a file has a hashbang .
57
10
14,081
def is_python_script ( filename ) : if filename . lower ( ) . endswith ( '.py' ) : return True if not os . path . isfile ( filename ) : return False try : with open ( filename , 'rb' ) as fp : if fp . read ( 2 ) != b'#!' : return False return re . match ( r'.*python' , str_ ( fp . readline ( ) ) ) except IOError : pass return False
Checks a file to see if it s a python script of some sort .
103
16
14,082
def search ( self , key_pattern : str , user_pattern : str ) -> List [ Entry ] : # normalize key key_pattern = _normalized_key ( key_pattern ) # search results = [ ] for entry in self . entries : if key_pattern in entry . key and user_pattern in entry . user : results . append ( entry ) # sort results according to key (stability of sorted() ensures that the order of accounts for any given key remains untouched) return sorted ( results , key = lambda e : e . key )
Search database for given key and user pattern .
116
9
14,083
def load ( path : str ) -> "Store" : # load source (decrypting if necessary) if _gpg . is_encrypted ( path ) : src_bytes = _gpg . decrypt ( path ) else : src_bytes = open ( path , "rb" ) . read ( ) src = src_bytes . decode ( "utf-8" ) # parse database source ext = _gpg . unencrypted_ext ( path ) assert ext not in [ ".yml" , ".yaml" , ] , "YAML support was removed in version 0.12.0" entries = _parse_entries ( src ) return Store ( path , entries )
Load password store from file .
144
6
14,084
def only_passed_and_wait ( result ) : verdict = result . get ( "verdict" , "" ) . strip ( ) . lower ( ) if verdict in Verdicts . PASS + Verdicts . WAIT : return result return None
Returns PASS and WAIT results only skips everything else .
54
12
14,085
def insert_source_info ( result ) : comment = result . get ( "comment" ) # don't change comment if it already exists if comment : return source = result . get ( "source" ) job_name = result . get ( "job_name" ) run = result . get ( "run" ) source_list = [ source , job_name , run ] if not all ( source_list ) : return source_note = "/" . join ( source_list ) source_note = "Source: {}" . format ( source_note ) result [ "comment" ] = source_note
Adds info about source of test result if available .
129
10
14,086
def setup_parametrization ( result , parametrize ) : if parametrize : # remove parameters from title title = result . get ( "title" ) if title : result [ "title" ] = TEST_PARAM_RE . sub ( "" , title ) else : # don't parametrize if not specifically configured if "params" in result : del result [ "params" ]
Modifies result s data according to the parametrization settings .
87
14
14,087
def include_class_in_title ( result ) : classname = result . get ( "classname" , "" ) if classname : filepath = result . get ( "file" , "" ) title = result . get ( "title" ) if title and "/" in filepath and "." in classname : fname = filepath . split ( "/" ) [ - 1 ] . replace ( ".py" , "" ) last_classname = classname . split ( "." ) [ - 1 ] # last part of classname is not file name if fname != last_classname and last_classname not in title : result [ "title" ] = "{}.{}" . format ( last_classname , title ) # we don't need to pass classnames? del result [ "classname" ]
Makes sure that test class is included in title .
175
11
14,088
def parse_rst_description ( testcase ) : description = testcase . get ( "description" ) if not description : return try : with open ( os . devnull , "w" ) as devnull : testcase [ "description" ] = publish_parts ( description , writer_name = "html" , settings_overrides = { "report_level" : 2 , "halt_level" : 2 , "warning_stream" : devnull } , ) [ "html_body" ] # pylint: disable=broad-except except Exception as exp : testcase_id = testcase . get ( "nodeid" ) or testcase . get ( "id" ) or testcase . get ( "title" ) logger . error ( "%s: description: %s" , str ( exp ) , testcase_id )
Creates an HTML version of the RST formatted description .
183
12
14,089
def preformat_plain_description ( testcase ) : description = testcase . get ( "description" ) if not description : return # naive approach to removing indent from pytest docstrings nodeid = testcase . get ( "nodeid" ) or "" indent = None if "::Test" in nodeid : indent = 8 * " " elif "::test_" in nodeid : indent = 4 * " " if indent : orig_lines = description . split ( "\n" ) new_lines = [ ] for line in orig_lines : if line . startswith ( indent ) : line = line . replace ( indent , "" , 1 ) new_lines . append ( line ) description = "\n" . join ( new_lines ) testcase [ "description" ] = "<pre>\n{}\n</pre>" . format ( description )
Creates a preformatted HTML version of the description .
183
12
14,090
def add_unique_runid ( testcase , run_id = None ) : testcase [ "description" ] = '{}<br id="{}"/>' . format ( testcase . get ( "description" ) or "" , run_id or id ( add_unique_runid ) )
Adds run id to the test description .
66
8
14,091
def add_automation_link ( testcase ) : automation_link = ( '<a href="{}">Test Source</a>' . format ( testcase [ "automation_script" ] ) if testcase . get ( "automation_script" ) else "" ) testcase [ "description" ] = "{}<br/>{}" . format ( testcase . get ( "description" ) or "" , automation_link )
Appends link to automation script to the test description .
98
11
14,092
def image ( self ) : r = requests . get ( self . image_url , stream = True ) r . raise_for_status ( ) return r . raw . read ( )
Return an image of the structure of the compound
39
9
14,093
def add_numbers ( ) : a = request . params . get ( 'a' , 0 , type = int ) b = request . params . get ( 'b' , 0 , type = int ) return json . dumps ( { 'result' : a + b } )
Add two numbers server side ridiculous but well ...
59
9
14,094
def entry_archive_year_url ( ) : entry = Entry . objects . filter ( published = True ) . latest ( ) arg_list = [ entry . published_on . strftime ( "%Y" ) ] return reverse ( 'blargg:entry_archive_year' , args = arg_list )
Renders the entry_archive_year URL for the latest Entry .
67
14
14,095
def _extract_parameters_from_properties ( properties ) : new_properties = { } parameters = [ ] for key , value in six . iteritems ( properties ) : if key . startswith ( _PARAMETER_PREFIX ) : parameters . append ( ( key . replace ( _PARAMETER_PREFIX , "" ) , value ) ) else : new_properties [ key ] = value return new_properties , sorted ( parameters )
Extracts parameters from properties .
100
7
14,096
def import_junit ( junit_file , * * kwargs ) : xml_root = _get_xml_root ( junit_file ) results = [ ] for test_data in xml_root : if test_data . tag != "testcase" : continue verdict , comment , properties = _parse_testcase_record ( test_data ) properties , parameters = _extract_parameters_from_properties ( properties ) title = test_data . get ( "name" ) classname = test_data . get ( "classname" ) time = test_data . get ( "time" , 0 ) filepath = test_data . get ( "file" ) data = [ ( "title" , title ) , ( "classname" , classname ) , ( "verdict" , verdict ) , ( "comment" , comment ) , ( "time" , time ) , ( "file" , filepath ) , ] for key in sorted ( properties ) : data . append ( ( key , properties [ key ] ) ) if parameters : data . append ( ( "params" , OrderedDict ( parameters ) ) ) results . append ( OrderedDict ( data ) ) return xunit_exporter . ImportedData ( results = results , testrun = None )
Reads the content of the junit - results file produced by pytest and returns imported data .
278
20
14,097
def libs ( ) : for name , description , version , url in gen_frameworks ( ) : print name print '' . join ( '-' for _ in xrange ( len ( name ) ) ) print description . strip ( '/*\n ' ) print version . strip ( '/*\n ' ) print url . strip ( '/*\n ' ) print
Show zeta libs
77
5
14,098
def watch ( args ) : assert op . isdir ( args . source ) , "Watch mode allowed only for directories." print 'Zeta-library v. %s watch mode' % VERSION print '================================' print 'Ctrl+C for exit\n' observer = Observer ( ) handler = ZetaTrick ( args = args ) observer . schedule ( handler , args . source , recursive = True ) observer . start ( ) try : while True : time . sleep ( 1 ) except KeyboardInterrupt : observer . stop ( ) print "\nWatch mode stoped." observer . join ( )
Watch directory for changes and auto pack sources
124
8
14,099
def pack ( args ) : assert op . exists ( args . source ) , "Does not exists: %s" % args . source zeta_pack ( args )
Parse file or dir import css js code and save with prefix
35
14