idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
15,700
def move_file_to_file ( old_path , new_path ) : try : os . rename ( old_path , new_path ) except : old_file = os . path . basename ( old_path ) target_directory , target_file = os . path . dirname ( os . path . abspath ( new_path ) ) , os . path . basename ( new_path ) Document . move_file_to_directory ( old_path , target_directory ) # move old file to new directory, change name to new name os . rename ( os . path . join ( target_directory , old_file ) , os . path . join ( target_directory , target_file ) )
Moves file from old location to new one
153
9
15,701
def write_data ( self , data ) : with open ( self . path , "w" ) as writer : writer . write ( data )
Writes given data to given path file
30
8
15,702
def get_path_name ( self ) : path = fix_raw_path ( os . path . dirname ( os . path . abspath ( self . path ) ) ) name = os . path . basename ( self . path ) return path , name
Gets path and name of song
55
7
15,703
def get_path_name ( self ) : complete_path = os . path . dirname ( os . path . abspath ( self . path ) ) name = self . path . replace ( complete_path + PATH_SEPARATOR , "" ) if name . endswith ( "/" ) : name = name [ : - 1 ] return complete_path , name
Gets path and name of file
78
7
15,704
def save ( self , page , language , data , change , extra_data = None ) : # if this placeholder is untranslated, we save everything # in the default language if self . untranslated : language = settings . PAGE_DEFAULT_LANGUAGE # the page is being changed if change : # we need create a new content if revision is enabled if ( settings . PAGE_CONTENT_REVISION and self . name not in settings . PAGE_CONTENT_REVISION_EXCLUDE_LIST ) : Content . objects . create_content_if_changed ( page , language , self . name , data ) else : Content . objects . set_or_create_content ( page , language , self . name , data ) # the page is being added else : Content . objects . set_or_create_content ( page , language , self . name , data )
Actually save the placeholder data into the Content object .
190
10
15,705
def render ( self , context ) : content = mark_safe ( self . get_content_from_context ( context ) ) if not content : return '' if self . parsed : try : t = template . Template ( content , name = self . name ) content = mark_safe ( t . render ( context ) ) except TemplateSyntaxError as error : if global_settings . DEBUG : content = PLACEHOLDER_ERROR % { 'name' : self . name , 'error' : error , } else : content = '' if self . as_varname is None : return content context [ self . as_varname ] = content return ''
Output the content of the PlaceholdeNode in the template .
140
13
15,706
def from_string ( cls , action_str ) : args = { } try : mod_obj = ast . parse ( action_str ) except ( SyntaxError , ValueError ) as e : raise e else : call_obj = mod_obj . body [ 0 ] . value if isinstance ( call_obj , ast . Attribute ) : # Seems like we have a simple function name # (for example `module.function`) module = call_obj . value . id func = call_obj . attr elif isinstance ( call_obj , ast . Call ) : # Seems like we have a function call, maybe with # a few parameters. # Note that we only support `module.function()` format. # You can't use `function()`. try : module = call_obj . func . value . id func = call_obj . func . attr except AttributeError : raise UnsupportedActionError ( action_str ) else : # If we have arguments, they MUST be named: for kwarg in call_obj . keywords : # We only support Strings and Numerics: if isinstance ( kwarg . value , ast . Num ) : args . update ( { kwarg . arg : kwarg . value . n } ) elif isinstance ( kwarg . value , ast . Str ) : args . update ( { kwarg . arg : kwarg . value . s } ) else : raise UnsupportedActionArgumentError ( action_str , kwarg ) else : raise UnsupportedActionError ( action_str ) return cls ( module , func , args )
Creates a new Action instance from the given string .
348
11
15,707
def sanitize ( self , content ) : import html5lib from html5lib import sanitizer p = html5lib . HTMLParser ( tokenizer = sanitizer . HTMLSanitizer ) dom_tree = p . parseFragment ( content ) return dom_tree . text
Sanitize a string in order to avoid possible XSS using html5lib .
61
17
15,708
def consume ( self , cwd = None ) : first_pass = Grammar . overall . parseString ( self . string ) lowered = { key . lower ( ) : val for key , val in first_pass . iteritems ( ) } self . commands = [ '\n' . join ( self . _get ( 'commands' , lowered ) ) ] self . job_options = self . _get ( 'job_options' , lowered ) self . global_options = self . _get ( 'options' , lowered ) self . files = self . _get ( 'files' , lowered ) self . paths = self . _get ( 'paths' , lowered ) self . files = self . _parse ( self . files , Grammar . file , True ) self . paths = self . _parse ( self . paths , Grammar . path , True ) self . job_options = self . _parse ( self . job_options , Grammar . line ) try : command_lines = self . _parse ( self . commands , Grammar . command_lines ) [ 0 ] except IndexError : raise ValueError ( 'Did you write any commands?' ) self . commands = [ ] for command_line in command_lines : comments , command = command_line self . commands . append ( [ comments . asList ( ) , self . _parse ( [ '' . join ( command ) ] , Grammar . command ) ] ) self . job_options = [ opt . asList ( ) for opt in self . job_options ] self . paths = ctf . get_paths ( self . paths ) self . files = ctf . get_files ( self . files ) self . paths . reverse ( ) self . files . reverse ( ) self . commands . reverse ( ) return ctf . get_command_templates ( self . commands , self . files [ : ] , self . paths [ : ] , self . job_options )
Converts the lexer tokens into valid statements . This process also checks command syntax .
413
17
15,709
def _get ( self , key , parser_result ) : try : list_data = parser_result [ key ] . asList ( ) if any ( isinstance ( obj , str ) for obj in list_data ) : txt_lines = [ '' . join ( list_data ) ] else : txt_lines = [ '' . join ( f ) for f in list_data ] except KeyError : txt_lines = [ ] return txt_lines
Given a type and a dict of parser results return the items as a list .
100
16
15,710
def _parse ( self , lines , grammar , ignore_comments = False ) : results = [ ] for c in lines : if c != '' and not ( ignore_comments and c [ 0 ] == '#' ) : try : results . append ( grammar . parseString ( c ) ) except pyparsing . ParseException as e : raise ValueError ( 'Invalid syntax. Verify line {} is ' 'correct.\n{}\n\n{}' . format ( e . lineno , c , e ) ) return results
Given a type and a list parse it using the more detailed parse grammar .
114
15
15,711
def objectprep ( self ) : # Move the files to subfolders and create objects self . runmetadata = createobject . ObjectCreation ( self ) if self . extension == 'fastq' : # To streamline the CLARK process, decompress and combine .gz and paired end files as required logging . info ( 'Decompressing and combining .fastq files for CLARK analysis' ) fileprep . Fileprep ( self ) else : logging . info ( 'Using .fasta files for CLARK analysis' ) for sample in self . runmetadata . samples : sample . general . combined = sample . general . fastqfiles [ 0 ]
Create objects to store data and metadata for each sample . Also perform necessary file manipulations
135
17
15,712
def settargets ( self ) : # Define the set targets call. Include the path to the script, the database path and files, as well # as the taxonomic rank to use logging . info ( 'Setting up database' ) self . targetcall = 'cd {} && ./set_targets.sh {} {} --{}' . format ( self . clarkpath , self . databasepath , self . database , self . rank ) # subprocess . call ( self . targetcall , shell = True , stdout = self . devnull , stderr = self . devnull )
Set the targets to be used in the analyses . Involves the path of the database files the database files to use and the level of classification for the analysis
129
31
15,713
def classifymetagenome ( self ) : logging . info ( 'Classifying metagenomes' ) # Define the system call self . classifycall = 'cd {} && ./classify_metagenome.sh -O {} -R {} -n {} --light' . format ( self . clarkpath , self . filelist , self . reportlist , self . cpus ) # Variable to store classification state classify = True for sample in self . runmetadata . samples : try : # Define the name of the .csv classification file sample . general . classification = sample . general . combined . split ( '.' ) [ 0 ] + '.csv' # If the file exists, then set classify to False if os . path . isfile ( sample . general . classification ) : classify = False except KeyError : pass # Run the system call if the samples have not been classified if classify : # Run the call subprocess . call ( self . classifycall , shell = True , stdout = self . devnull , stderr = self . devnull )
Run the classify metagenome of the CLARK package on the samples
221
14
15,714
def lists ( self ) : # Prepare the lists to be used to classify the metagenomes with open ( self . filelist , 'w' ) as filelist : with open ( self . reportlist , 'w' ) as reportlist : for sample in self . runmetadata . samples : if self . extension == 'fastq' : try : status = sample . run . Description if status == 'metagenome' : filelist . write ( sample . general . combined + '\n' ) reportlist . write ( sample . general . combined . split ( '.' ) [ 0 ] + '\n' ) except AttributeError : pass else : if sample . general . combined != 'NA' : filelist . write ( sample . general . combined + '\n' ) reportlist . write ( sample . general . combined . split ( '.' ) [ 0 ] + '\n' )
Prepare the list of files to be processed
190
9
15,715
def estimateabundance ( self ) : logging . info ( 'Estimating abundance of taxonomic groups' ) # Create and start threads for i in range ( self . cpus ) : # Send the threads to the appropriate destination function threads = Thread ( target = self . estimate , args = ( ) ) # Set the daemon to true - something to do with thread management threads . setDaemon ( True ) # Start the threading threads . start ( ) with progressbar ( self . runmetadata . samples ) as bar : for sample in bar : try : if sample . general . combined != 'NA' : # Set the name of the abundance report sample . general . abundance = sample . general . combined . split ( '.' ) [ 0 ] + '_abundance.csv' # if not hasattr(sample, 'commands'): if not sample . commands . datastore : sample . commands = GenObject ( ) # Define system calls sample . commands . target = self . targetcall sample . commands . classify = self . classifycall sample . commands . abundancecall = 'cd {} && ./estimate_abundance.sh -D {} -F {} > {}' . format ( self . clarkpath , self . databasepath , sample . general . classification , sample . general . abundance ) self . abundancequeue . put ( sample ) except KeyError : pass self . abundancequeue . join ( )
Estimate the abundance of taxonomic groups
299
8
15,716
def get_command_templates ( command_tokens , file_tokens = [ ] , path_tokens = [ ] , job_options = [ ] ) : files = get_files ( file_tokens ) paths = get_paths ( path_tokens ) job_options = get_options ( job_options ) templates = _get_command_templates ( command_tokens , files , paths , job_options ) for command_template in templates : command_template . _dependencies = _get_prelim_dependencies ( command_template , templates ) return templates
Given a list of tokens from the grammar return a list of commands .
134
14
15,717
def get_files ( file_tokens , cwd = None ) : if not file_tokens : return [ ] token = file_tokens . pop ( ) try : filename = token . filename except AttributeError : filename = '' if cwd : input = Input ( token . alias , filename , cwd = cwd ) else : input = Input ( token . alias , filename ) return [ input ] + get_files ( file_tokens )
Given a list of parser file tokens return a list of input objects for them .
101
16
15,718
def get_paths ( path_tokens ) : if len ( path_tokens ) == 0 : return [ ] token = path_tokens . pop ( ) path = PathToken ( token . alias , token . path ) return [ path ] + get_paths ( path_tokens )
Given a list of parser path tokens return a list of path objects for them .
68
16
15,719
def _get_command_templates ( command_tokens , files = [ ] , paths = [ ] , job_options = [ ] , count = 1 ) : if not command_tokens : return [ ] comment_tokens , command_token = command_tokens . pop ( ) parts = [ ] parts += job_options + _get_comments ( comment_tokens ) for part in command_token [ 0 ] : # Check for file try : parts . append ( _get_file_by_alias ( part , files ) ) continue except ( AttributeError , ValueError ) : pass # Check for path/string for cut in part . split ( ) : try : parts . append ( _get_path_by_name ( cut , paths ) ) continue except ValueError : pass parts . append ( cut ) command_template = CommandTemplate ( alias = str ( count ) , parts = parts ) [ setattr ( p , 'alias' , command_template . alias ) for p in command_template . output_parts ] return [ command_template ] + _get_command_templates ( command_tokens , files , paths , job_options , count + 1 )
Reversivly create command templates .
259
8
15,720
def _get_prelim_dependencies ( command_template , all_templates ) : deps = [ ] for input in command_template . input_parts : if '.' not in input . alias : continue for template in all_templates : for output in template . output_parts : if input . fuzzy_match ( output ) : deps . append ( template ) break return list ( set ( deps ) )
Given a command_template determine which other templates it depends on . This should not be used as the be - all end - all of dependencies and before calling each command ensure that it s requirements are met .
91
41
15,721
def _is_output ( part ) : if part [ 0 ] . lower ( ) == 'o' : return True elif part [ 0 ] [ : 2 ] . lower ( ) == 'o:' : return True elif part [ 0 ] [ : 2 ] . lower ( ) == 'o.' : return True else : return False
Returns whether the given part represents an output variable .
71
10
15,722
def search_browser ( self , text ) : self . impl . get ( self . base_url ) search_div = self . impl . find_element_by_id ( "search" ) search_term = search_div . find_element_by_id ( "term" ) search_term . send_keys ( text ) search_div . find_element_by_id ( "submit" ) . click ( ) e = self . impl . find_element_by_css_selector ( "table.list tr td a" ) return e . get_attribute ( "href" )
do a slow search via the website and return the first match
129
12
15,723
def search_fast ( self , text ) : resp = self . impl . get ( "{base_url}/{text}/json" . format ( base_url = self . base_url , text = text ) ) return resp . json ( ) [ "info" ] [ "package_url" ]
do a sloppy quick search via the json index
66
9
15,724
def main ( search , query ) : url = search . search ( query ) print ( url ) search . open_page ( url )
main function that does the search
28
6
15,725
def cli_main ( ) : SearchContext . commit ( ) args = parser . parse_args ( ) # open up a browser firefox_remote = Remote ( "http://127.0.0.1:4444/wd/hub" , DesiredCapabilities . FIREFOX ) with contextlib . closing ( firefox_remote ) : context = SearchContext . from_instances ( [ FastSearch ( ) , Browser ( firefox_remote ) ] ) search = Search ( parent = context ) if args . fast : with context . use ( FastSearch , Browser ) : main ( search , args . query ) else : with context . use ( Browser ) : main ( search , args . query )
cli entrypoitns sets up everything needed
149
9
15,726
def camel_to_underscore ( name ) : name = re . sub ( r'(?<!\b)(?<!_)([A-Z][a-z])' , r'_\1' , name ) name = re . sub ( r'(?<!\b)(?<!_)([a-z])([A-Z])' , r'\1_\2' , name ) name = name . lower ( ) return name
Convert camel case name to underscore name .
104
9
15,727
def main_func ( args = None ) : # we have to initialize a gui even if we dont need one right now. # as soon as you call maya.standalone.initialize(), a QApplication # with type Tty is created. This is the type for conosle apps. # Because i have not found a way to replace that, we just init the gui. guimain . init_gui ( ) main . init ( ) launcher = Launcher ( ) parsed , unknown = launcher . parse_args ( args ) parsed . func ( parsed , unknown )
Main funcion when executing this module as script
119
9
15,728
def setup_launch_parser ( self , parser ) : parser . set_defaults ( func = self . launch ) parser . add_argument ( "addon" , help = "The jukebox addon to launch. The addon should be a standalone plugin." )
Setup the given parser for the launch command
55
8
15,729
def parse_args ( self , args = None ) : if args is None : args = sys . argv [ 1 : ] return self . parser . parse_known_args ( args )
Parse the given arguments
40
5
15,730
def list_objects ( self , prefix = None , delimiter = None ) : return self . _client . list_objects ( instance = self . _instance , bucket_name = self . name , prefix = prefix , delimiter = delimiter )
List the objects for this bucket .
52
7
15,731
def upload_object ( self , object_name , file_obj ) : return self . _client . upload_object ( self . _instance , self . name , object_name , file_obj )
Upload an object to this bucket .
43
7
15,732
def delete_object ( self , object_name ) : self . _client . remove_object ( self . _instance , self . name , object_name )
Remove an object from this bucket .
34
7
15,733
def objects ( self ) : return [ ObjectInfo ( o , self . _instance , self . _bucket , self . _client ) for o in self . _proto . object ]
The objects in this listing .
40
6
15,734
def delete ( self ) : self . _client . remove_object ( self . _instance , self . _bucket , self . name )
Remove this object .
30
4
15,735
def download ( self ) : return self . _client . download_object ( self . _instance , self . _bucket , self . name )
Download this object .
31
4
15,736
def upload ( self , file_obj ) : return self . _client . upload_object ( self . _instance , self . _bucket , self . name , file_obj )
Replace the content of this object .
39
8
15,737
def moving_average ( arr : np . ndarray , n : int = 3 ) -> np . ndarray : ret = np . cumsum ( arr , dtype = float ) ret [ n : ] = ret [ n : ] - ret [ : - n ] return ret [ n - 1 : ] / n
Calculate the moving overage over an array .
69
11
15,738
def recursive_getattr ( obj : Any , attr : str , * args ) -> Any : def _getattr ( obj , attr ) : return getattr ( obj , attr , * args ) return functools . reduce ( _getattr , [ obj ] + attr . split ( '.' ) )
Recursive getattar .
68
6
15,739
def recursive_setattr ( obj : Any , attr : str , val : Any ) -> Any : pre , _ , post = attr . rpartition ( '.' ) return setattr ( recursive_getattr ( obj , pre ) if pre else obj , post , val )
Recusrive setattr .
60
6
15,740
def recursive_getitem ( d : Mapping [ str , Any ] , keys : Union [ str , Sequence [ str ] ] ) -> Any : # If only a string, then just just return the item if isinstance ( keys , str ) : return d [ keys ] else : return functools . reduce ( operator . getitem , keys , d )
Recursively retrieve an item from a nested dict .
75
11
15,741
def get_array_for_fit ( observables : dict , track_pt_bin : int , jet_pt_bin : int ) -> histogram . Histogram1D : for name , observable in observables . items ( ) : if observable . track_pt_bin == track_pt_bin and observable . jet_pt_bin == jet_pt_bin : return histogram . Histogram1D . from_existing_hist ( observable . hist ) raise ValueError ( "Cannot find fit with jet pt bin {jet_pt_bin} and track pt bin {track_pt_bin}" )
Get a Histogram1D associated with the selected jet and track pt bins .
131
16
15,742
def epcrparsethreads ( self ) : from Bio import SeqIO # Create the threads for the BLAST analysis for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : threads = Thread ( target = self . epcrparse , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : if sample [ self . analysistype ] . primers != 'NA' : # Initialise a dictionary to store the SeqIO records of each assembly record = dict ( ) # Initialise dictionaries to store results in the object sample [ self . analysistype ] . blastresults = dict ( ) sample [ self . analysistype ] . rawblastresults = dict ( ) # Load the records from the assembly into the dictionary for rec in SeqIO . parse ( sample . general . bestassemblyfile , 'fasta' ) : record [ rec . id ] = str ( rec . seq ) # Iterate through the ePCR results for line in sample [ self . analysistype ] . epcrresults : # The data of interest is in the lines that do not start with a # # TLH 2016-SEQ-0359_4_length_321195_cov_28.6354_ID_3773 + 227879 228086 0 0 208/1000-1000 if not line . startswith ( '#' ) : # Add the variables to the queue self . epcrparsequeue . put ( ( sample , record , line ) ) self . epcrparsequeue . join ( )
Parse the ePCR results and run BLAST on the parsed results
357
15
15,743
def epcrparse ( self ) : from Bio . Blast . Applications import NcbiblastnCommandline while True : sample , record , line = self . epcrparsequeue . get ( ) # Split the data on tabs gene , chromosome , strand , start , end , m_match , gaps , act_len_exp_len = line . split ( '\t' ) # Extract the gene sequence from the contigs # The record dictionary has the contig name, and the sequence. Splice out the data using the start and # end coordinates specified by ePCR genesequence = record [ chromosome ] [ int ( start ) - 1 : int ( end ) ] # Set up BLASTn using blastn-short, as the probe sequences tend to be very short blastn = NcbiblastnCommandline ( db = sample [ self . analysistype ] . probes . split ( '.' ) [ 0 ] , num_threads = 12 , task = 'blastn-short' , num_alignments = 1 , outfmt = "'6 qseqid sseqid positive mismatch gaps " "evalue bitscore slen length'" ) # Run the BLASTn, with the gene sequence as stdin out , err = blastn ( stdin = genesequence ) # Split the output string on tabs results = out . rstrip ( ) . split ( '\t' ) # Populate the raw blast results sample [ self . analysistype ] . rawblastresults [ gene ] = results # Create named variables from the list positives = float ( results [ 2 ] ) mismatches = float ( results [ 3 ] ) gaps = float ( results [ 4 ] ) subjectlength = float ( results [ 7 ] ) # Calculate the percent identity percentidentity = float ( '{:0.2f}' . format ( ( positives - gaps ) / subjectlength * 100 ) ) # Create a dictionary with the desired values to store in the metadata object resultdict = { 'matches' : positives , 'mismatches' : mismatches , 'gaps' : gaps , 'subject_length' : subjectlength , 'percent_identity' : percentidentity , 'match_length' : results [ 8 ] . split ( '\n' ) [ 0 ] } # Populate the metadata object with the dictionary sample [ self . analysistype ] . blastresults [ gene ] = resultdict self . epcrparsequeue . task_done ( )
Run BLAST and record results to the object
527
9
15,744
def report ( self ) : # Initialise a variable to store the results data = '' for sample in self . metadata : if sample [ self . analysistype ] . primers != 'NA' : # Set the name of the strain-specific report sample [ self . analysistype ] . report = os . path . join ( sample [ self . analysistype ] . reportdir , '{}_{}.csv' . format ( sample . name , self . analysistype ) ) # Populate the strain-specific string with header, and strain name strainspecific = 'Strain,{},\n{},' . format ( ',' . join ( sorted ( sample [ self . analysistype ] . targets ) ) , sample . name ) # Iterate through all the genes in the organism-specific analysis for gene in sorted ( sample [ self . analysistype ] . targets ) : try : # Extract the percent identity percentidentity = sample [ self . analysistype ] . blastresults [ gene ] [ 'percent_identity' ] # If the % identity is greater than the cutoff of 50%, the gene is considered to be present if percentidentity > 50 : strainspecific += '{},' . format ( percentidentity ) else : strainspecific += '-,' # If there are no BLAST results, then the gene is absent except KeyError : strainspecific += '-,' strainspecific += '\n' # Open and write the data to the strain-specific report with open ( sample [ self . analysistype ] . report , 'w' ) as specificreport : specificreport . write ( strainspecific ) # Add all the data from each strain to the cumulative data string data += strainspecific # Open and write the cumulative data to the cumulative report with open ( os . path . join ( self . reportdir , '{}.csv' . format ( self . analysistype ) ) , 'w' ) as report : report . write ( data )
Create reports of the findings
420
5
15,745
def setup_environment ( ) : osinter = ostool . get_interface ( ) pypath = osinter . get_maya_envpath ( ) for p in sys . path : pypath = os . pathsep . join ( ( pypath , p ) ) os . environ [ 'PYTHONPATH' ] = pypath
Set up neccessary environment variables
78
7
15,746
def execute_mayapy ( args , wait = True ) : osinter = ostool . get_interface ( ) mayapy = osinter . get_maya_python ( ) allargs = [ mayapy ] allargs . extend ( args ) print "Executing mayapy with: %s" % allargs mayapyprocess = subprocess . Popen ( allargs ) if wait : rc = mayapyprocess . wait ( ) print "Process mayapy finished!" return rc else : return mayapyprocess
Execute mayapython with the given arguments capture and return the output
106
14
15,747
def setDoc ( self , doc ) : self . ui . overAtten . setNum ( doc [ 'overloaded_attenuation' ] ) # also set composite stim type # self.ui.traceType.setText(doc['testtype']) self . ui . componentDetails . clearDoc ( ) self . ui . componentDetails . setDoc ( doc [ 'components' ] )
Presents the documentation
86
4
15,748
def increase_by_changes ( self , changes_amount , ratio ) : increases = round ( changes_amount * ratio ) return self . increase ( int ( increases ) )
Increase version by amount of changes
36
6
15,749
def wrap_maya_ui ( mayaname ) : ptr = apiUI . MQtUtil . findControl ( mayaname ) if ptr is None : ptr = apiUI . MQtUtil . findLayout ( mayaname ) if ptr is None : ptr = apiUI . MQtUtil . findMenuItem ( mayaname ) if ptr is not None : return wrap ( long ( ptr ) )
Given the name of a Maya UI element of any type return the corresponding QWidget or QAction . If the object does not exist returns None
92
28
15,750
def query_args ( self , name ) : sql = 'select type, id from code_items ' 'where kind = 22 and name = ?' logging . debug ( '%s %s' , sql , ( name , ) ) self . cursor . execute ( sql , ( name , ) ) func = self . cursor . fetchone ( ) if func : sql = 'select param_number, type, name ' 'from code_items where parent_id = ?' logging . debug ( '%s %s' , sql , ( func [ 1 ] , ) ) self . cursor . execute ( sql , ( func [ 1 ] , ) ) args = self . cursor . fetchall ( ) ret_type = clean_ret_type ( func [ 0 ] ) args = [ ( arg_number , sanitize_type ( arg_type ) , arg_name ) for arg_number , arg_type , arg_name in args ] return ret_type , name , args return None
Query the return type and argument list of the specified function in the specified database .
210
16
15,751
def query_info ( self , name , like , kind ) : kind = self . _make_kind_id ( kind ) # Database from VS2015 does not have assoc_text. # # sql = 'select name, kind, file_id, type, assoc_text ' \ # 'from code_items ' \ # 'where name {} ?'.format('like' if like else '=') sql = 'select name, kind, file_id, type ' 'from code_items ' 'where name {} ?' . format ( 'like' if like else '=' ) args = ( name , ) if like : sql += ' escape ?' args = ( name , '\\' ) if kind : sql += ' and kind = ?' args = ( name , kind ) if like and kind : args = ( name , '\\' , kind ) logging . debug ( '%s %s' , sql , args ) self . cursor . execute ( sql , args ) return self . cursor . fetchall ( ) , self
Query the information of the name in the database .
219
10
15,752
def query_names ( self , name , like , kind ) : kind = self . _make_kind_id ( kind ) sql = 'select id, name from files ' 'where leaf_name {} ?' . format ( 'like' if like else '=' ) args = ( name , ) if like : sql += ' escape ?' args = ( name , '\\' ) logging . debug ( '%s %s' , sql , args ) self . cursor . execute ( sql , args ) ids = self . cursor . fetchall ( ) files = [ ] for file_id , header in ids : sql = 'select name from code_items ' 'where file_id = ?' args = ( file_id , ) if kind : sql += 'and kind = ?' args = ( file_id , kind ) logging . debug ( '%s %s' , sql , args ) self . cursor . execute ( sql , args ) files . append ( ( header , self . cursor . fetchall ( ) ) ) return files
Query function declarations in the files .
221
7
15,753
def query_struct ( self , name ) : sql = 'select id, file_id, name from code_items ' 'where name = ?' self . cursor . execute ( sql , ( name , ) ) for i in self . cursor . fetchall ( ) : sql = 'select id, type, name from code_items ' 'where parent_id = ?' self . cursor . execute ( sql , ( i [ 0 ] , ) ) members = self . cursor . fetchall ( ) if members : print ( self . file_id_to_name ( i [ 1 ] ) , i [ 2 ] ) print ( members )
Query struct .
135
3
15,754
def file_id_to_name ( self , file_id ) : sql = 'select name from files where id = ?' logging . debug ( '%s %s' , sql , ( file_id , ) ) self . cursor . execute ( sql , ( file_id , ) ) name = self . cursor . fetchone ( ) if name : return name [ 0 ] return ''
Convert a file id to the file name .
83
10
15,755
def _make_kind_id ( self , name_or_id ) : if not name_or_id : return None if name_or_id . isdigit ( ) : return name_or_id return self . kind_name_to_id ( name_or_id )
Make kind_id from kind_name or kind_id .
62
13
15,756
def query_kinds ( self , kind ) : logging . debug ( _ ( 'querying %s' ) , kind ) if kind is None : return self . _kind_id_to_name . items ( ) if kind . isdigit ( ) : kind_name = self . kind_id_to_name ( int ( kind ) ) if kind_name : kind = ( kind , kind_name ) else : logging . warning ( _ ( 'id not found: %s' ) , kind ) kind = None else : kind_id = self . kind_name_to_id ( kind ) if kind_id : kind = ( kind_id , kind ) else : logging . warning ( _ ( 'name not found: %s' ) , kind ) kind = None return [ kind ]
Query kinds .
170
3
15,757
def _init_kind_converter ( self ) : from . . utils import invert_dict kinds = self . session . query ( Kind ) . all ( ) self . _kind_id_to_name = { kind . id : kind . name for kind in kinds } self . _kind_name_to_id = invert_dict ( self . _kind_id_to_name )
Make a dictionary mapping kind ids to the names .
88
11
15,758
def make_export ( self , exports ) : sql = 'drop table if exists export' logging . debug ( sql ) self . cursor . execute ( sql ) sql = 'create table if not exists export ' '(func text unique, module text)' logging . debug ( sql ) self . cursor . execute ( sql ) for module in exports : logging . debug ( _ ( 'insering exports from %s' ) , module ) sql = 'insert into export values (?, ?)' for func in exports [ module ] : if func : try : self . cursor . execute ( sql , ( func , module ) ) except sqlite3 . IntegrityError : pass self . con . commit ( )
Populate library exported function data .
141
7
15,759
def query_func_module ( self , func ) : exp = self . session . query ( Export ) . filter_by ( func = func ) . first ( ) if exp : return exp logging . debug ( _ ( 'Function not found: %s' ) , func ) alt = func + 'A' exp = self . session . query ( Export ) . filter_by ( func = alt ) . first ( ) if exp : logging . warning ( _ ( 'Using ANSI version: %s' ) , alt ) return exp logging . warning ( _ ( 'Not handled: %s or %s' ) , func , alt ) return None
Query the module name of the specified function .
136
9
15,760
def query_module_funcs ( self , module ) : funcs = self . session . query ( Export ) . filter_by ( module = module ) . all ( ) return funcs
Query the functions in the specified module .
40
8
15,761
def _build_named_object_ids ( parameters ) : if isinstance ( parameters , str ) : return [ _build_named_object_id ( parameters ) ] return [ _build_named_object_id ( parameter ) for parameter in parameters ]
Builds a list of NamedObjectId .
54
9
15,762
def _build_command_ids ( issued_commands ) : if isinstance ( issued_commands , IssuedCommand ) : entry = issued_commands . _proto . commandQueueEntry return [ entry . cmdId ] else : return [ issued_command . _proto . commandQueueEntry . cmdId for issued_command in issued_commands ]
Builds a list of CommandId .
77
8
15,763
def _cache_key ( cmd_id ) : return '{}__{}__{}__{}' . format ( cmd_id . generationTime , cmd_id . origin , cmd_id . sequenceNumber , cmd_id . commandName )
commandId is a tuple . Make a unique key for it .
55
13
15,764
def get_command_history ( self , issued_command ) : #pylint: disable=protected-access entry = issued_command . _proto . commandQueueEntry key = self . _cache_key ( entry . cmdId ) if key in self . _cache : return self . _cache [ key ] return None
Gets locally cached CommandHistory for the specified command .
69
11
15,765
def add ( self , parameters , abort_on_invalid = True , send_from_cache = True ) : # Verify that we already know our assigned subscription_id assert self . subscription_id != - 1 if not parameters : return options = web_pb2 . ParameterSubscriptionRequest ( ) options . subscriptionId = self . subscription_id options . abortOnInvalid = abort_on_invalid options . sendFromCache = send_from_cache options . id . extend ( _build_named_object_ids ( parameters ) ) self . _manager . send ( 'subscribe' , options )
Add one or more parameters to this subscription .
129
9
15,766
def remove ( self , parameters ) : # Verify that we already know our assigned subscription_id assert self . subscription_id != - 1 if not parameters : return options = web_pb2 . ParameterSubscriptionRequest ( ) options . subscriptionId = self . subscription_id options . id . extend ( _build_named_object_ids ( parameters ) ) self . _manager . send ( 'unsubscribe' , options )
Remove one or more parameters from this subscription .
90
9
15,767
def set_parameter_value ( self , parameter , value ) : parameter = adapt_name_for_rest ( parameter ) url = '/processors/{}/{}/parameters{}' . format ( self . _instance , self . _processor , parameter ) req = _build_value_proto ( value ) self . _client . put_proto ( url , data = req . SerializeToString ( ) )
Sets the value of the specified parameter .
94
9
15,768
def set_parameter_values ( self , values ) : req = rest_pb2 . BulkSetParameterValueRequest ( ) for key in values : item = req . request . add ( ) item . id . MergeFrom ( _build_named_object_id ( key ) ) item . value . MergeFrom ( _build_value_proto ( values [ key ] ) ) url = '/processors/{}/{}/parameters/mset' . format ( self . _instance , self . _processor ) self . _client . post_proto ( url , data = req . SerializeToString ( ) )
Sets the value of multiple parameters .
135
8
15,769
def issue_command ( self , command , args = None , dry_run = False , comment = None ) : req = rest_pb2 . IssueCommandRequest ( ) req . sequenceNumber = SequenceGenerator . next ( ) req . origin = socket . gethostname ( ) req . dryRun = dry_run if comment : req . comment = comment if args : for key in args : assignment = req . assignment . add ( ) assignment . name = key assignment . value = str ( args [ key ] ) command = adapt_name_for_rest ( command ) url = '/processors/{}/{}/commands{}' . format ( self . _instance , self . _processor , command ) response = self . _client . post_proto ( url , data = req . SerializeToString ( ) ) proto = rest_pb2 . IssueCommandResponse ( ) proto . ParseFromString ( response . content ) return IssuedCommand ( proto , self )
Issue the given command
209
4
15,770
def list_alarms ( self , start = None , stop = None ) : # TODO implement continuation token on server params = { 'order' : 'asc' } if start is not None : params [ 'start' ] = to_isostring ( start ) if stop is not None : params [ 'stop' ] = to_isostring ( stop ) # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods url = '/processors/{}/{}/alarms' . format ( self . _instance , self . _processor ) response = self . _client . get_proto ( path = url , params = params ) message = rest_pb2 . ListAlarmsResponse ( ) message . ParseFromString ( response . content ) alarms = getattr ( message , 'alarm' ) return iter ( [ Alarm ( alarm ) for alarm in alarms ] )
Lists the active alarms .
201
6
15,771
def set_default_calibrator ( self , parameter , type , data ) : # pylint: disable=W0622 req = mdb_pb2 . ChangeParameterRequest ( ) req . action = mdb_pb2 . ChangeParameterRequest . SET_DEFAULT_CALIBRATOR if type : _add_calib ( req . defaultCalibrator , type , data ) url = '/mdb/{}/{}/parameters/{}' . format ( self . _instance , self . _processor , parameter ) response = self . _client . post_proto ( url , data = req . SerializeToString ( ) )
Apply a calibrator while processing raw values of the specified parameter . If there is already a default calibrator associated to this parameter that calibrator gets replaced .
143
31
15,772
def reset_calibrators ( self , parameter ) : req = mdb_pb2 . ChangeParameterRequest ( ) req . action = mdb_pb2 . ChangeParameterRequest . RESET_CALIBRATORS calib_info = req . defaultCalibrator url = '/mdb/{}/{}/parameters/{}' . format ( self . _instance , self . _processor , parameter ) response = self . _client . post_proto ( url , data = req . SerializeToString ( ) )
Reset all calibrators for the specified parameter to their original MDB value .
116
16
15,773
def set_default_alarm_ranges ( self , parameter , watch = None , warning = None , distress = None , critical = None , severe = None , min_violations = 1 ) : req = mdb_pb2 . ChangeParameterRequest ( ) req . action = mdb_pb2 . ChangeParameterRequest . SET_DEFAULT_ALARMS if ( watch or warning or distress or critical or severe ) : _add_alarms ( req . defaultAlarm , watch , warning , distress , critical , severe , min_violations ) url = '/mdb/{}/{}/parameters/{}' . format ( self . _instance , self . _processor , parameter ) response = self . _client . post_proto ( url , data = req . SerializeToString ( ) )
Generate out - of - limit alarms for a parameter using the specified alarm ranges .
176
17
15,774
def reset_alarm_ranges ( self , parameter ) : req = mdb_pb2 . ChangeParameterRequest ( ) req . action = mdb_pb2 . ChangeParameterRequest . RESET_ALARMS url = '/mdb/{}/{}/parameters/{}' . format ( self . _instance , self . _processor , parameter ) response = self . _client . post_proto ( url , data = req . SerializeToString ( ) )
Reset all alarm limits for the specified parameter to their original MDB value .
105
16
15,775
def acknowledge_alarm ( self , alarm , comment = None ) : url = '/processors/{}/{}/parameters{}/alarms/{}' . format ( self . _instance , self . _processor , alarm . name , alarm . sequence_number ) req = rest_pb2 . EditAlarmRequest ( ) req . state = 'acknowledged' if comment is not None : req . comment = comment self . _client . put_proto ( url , data = req . SerializeToString ( ) )
Acknowledges a specific alarm associated with a parameter .
117
10
15,776
def create_command_history_subscription ( self , issued_command = None , on_data = None , timeout = 60 ) : options = web_pb2 . CommandHistorySubscriptionRequest ( ) options . ignorePastCommands = True if issued_command : options . commandId . extend ( _build_command_ids ( issued_command ) ) manager = WebSocketSubscriptionManager ( self . _client , resource = 'cmdhistory' , options = options ) # Represent subscription as a future subscription = CommandHistorySubscription ( manager ) wrapped_callback = functools . partial ( _wrap_callback_parse_cmdhist_data , subscription , on_data ) manager . open ( wrapped_callback , instance = self . _instance , processor = self . _processor ) # Wait until a reply or exception is received subscription . reply ( timeout = timeout ) return subscription
Create a new command history subscription .
183
7
15,777
def create_parameter_subscription ( self , parameters , on_data = None , abort_on_invalid = True , update_on_expiration = False , send_from_cache = True , timeout = 60 ) : options = web_pb2 . ParameterSubscriptionRequest ( ) options . subscriptionId = - 1 # This means 'create a new subscription' options . abortOnInvalid = abort_on_invalid options . updateOnExpiration = update_on_expiration options . sendFromCache = send_from_cache options . id . extend ( _build_named_object_ids ( parameters ) ) manager = WebSocketSubscriptionManager ( self . _client , resource = 'parameter' , options = options ) # Represent subscription as a future subscription = ParameterSubscription ( manager ) wrapped_callback = functools . partial ( _wrap_callback_parse_parameter_data , subscription , on_data ) manager . open ( wrapped_callback , instance = self . _instance , processor = self . _processor ) # Wait until a reply or exception is received subscription . reply ( timeout = timeout ) return subscription
Create a new parameter subscription .
242
6
15,778
def create_alarm_subscription ( self , on_data = None , timeout = 60 ) : manager = WebSocketSubscriptionManager ( self . _client , resource = 'alarms' ) # Represent subscription as a future subscription = AlarmSubscription ( manager ) wrapped_callback = functools . partial ( _wrap_callback_parse_alarm_data , subscription , on_data ) manager . open ( wrapped_callback , instance = self . _instance , processor = self . _processor ) # Wait until a reply or exception is received subscription . reply ( timeout = timeout ) return subscription
Create a new alarm subscription .
126
6
15,779
def get_by ( self , name ) : return next ( ( item for item in self if item . name == name ) , None )
get element by name
29
4
15,780
def fastqc ( self ) : while True : # while daemon threadlock = threading . Lock ( ) # Unpack the variables from the queue ( sample , systemcall , outputdir , fastqcreads ) = self . qcqueue . get ( ) # Check to see if the output HTML file already exists try : _ = glob ( os . path . join ( outputdir , '*.html' ) ) [ 0 ] except IndexError : # Make the output directory make_path ( outputdir ) # Run the system calls outstr = str ( ) errstr = str ( ) out , err = run_subprocess ( systemcall ) outstr += out errstr += err out , err = run_subprocess ( fastqcreads ) outstr += out errstr += err # Acquire thread lock, and write the logs to file threadlock . acquire ( ) write_to_logfile ( systemcall , systemcall , self . logfile , sample . general . logout , sample . general . logerr , None , None ) write_to_logfile ( fastqcreads , fastqcreads , self . logfile , sample . general . logout , sample . general . logerr , None , None ) write_to_logfile ( outstr , errstr , self . logfile , sample . general . logout , sample . general . logerr , None , None ) threadlock . release ( ) # Rename the outputs try : shutil . move ( os . path . join ( outputdir , 'stdin_fastqc.html' ) , os . path . join ( outputdir , '{}_fastqc.html' . format ( sample . name ) ) ) shutil . move ( os . path . join ( outputdir , 'stdin_fastqc.zip' ) , os . path . join ( outputdir , '{}_fastqc.zip' . format ( sample . name ) ) ) except IOError : pass # Signal to qcqueue that job is done self . qcqueue . task_done ( )
Run fastqc system calls
442
6
15,781
def trimquality ( self ) : logging . info ( "Trimming fastq files" ) # Iterate through strains with fastq files with progressbar ( self . metadata ) as bar : for sample in bar : # As the metadata can be populated with 'NA' (string) if there are no fastq files, only process if # :fastqfiles is a list if type ( sample . general . fastqfiles ) is list : # Check to see if the fastq files exist fastqfiles = sorted ( sample . general . fastqfiles ) # Define the output directory outputdir = sample . general . outputdirectory # Define the name of the trimmed fastq files cleanforward = os . path . join ( outputdir , '{}_R1_trimmed.fastq.gz' . format ( sample . name ) ) cleanreverse = os . path . join ( outputdir , '{}_R2_trimmed.fastq.gz' . format ( sample . name ) ) # Incorporate read length into the minlength parameter - set it to 50 unless one or more of the # reads has a lower calculated length than 50 try : lesser_length = min ( int ( sample . run . forwardlength ) , int ( sample . run . reverselength ) ) except ValueError : lesser_length = int ( sample . run . forwardlength ) min_len = 50 if lesser_length >= 50 else lesser_length # Initialise a variable to store the number of bases to automatically trim from the beginning of # each read, as these bases tend to have lower quality scores. If trimming the reads will cause trim_left = 0 # If, for some reason, only the reverse reads are present, use the appropriate output file name try : if 'R2' in fastqfiles [ 0 ] : if not os . path . isfile ( cleanreverse ) : out , err , bbdukcall = bbtools . bbduk_trim ( forward_in = fastqfiles [ 0 ] , reverse_in = None , forward_out = cleanreverse , trimq = 10 , minlength = min_len , forcetrimleft = trim_left , returncmd = True ) else : bbdukcall = str ( ) out = str ( ) err = str ( ) else : if not os . path . isfile ( cleanforward ) : out , err , bbdukcall = bbtools . bbduk_trim ( forward_in = fastqfiles [ 0 ] , forward_out = cleanforward , trimq = 10 , minlength = min_len , forcetrimleft = trim_left , returncmd = True ) else : bbdukcall = str ( ) out = str ( ) err = str ( ) except ( IndexError , CalledProcessError ) : bbdukcall = str ( ) out = str ( ) err = str ( ) # Write the command, stdout, and stderr to the logfile write_to_logfile ( bbdukcall , bbdukcall , self . logfile , sample . general . logout , sample . general . logerr , None , None ) write_to_logfile ( out , err , self . logfile , sample . general . logout , sample . general . logerr , None , None ) # Add the trimmed fastq files to a list trimmedfastqfiles = sorted ( glob ( os . path . join ( sample . general . outputdirectory , '*trimmed.fastq.gz' ) ) ) # Populate the metadata if the files exist sample . general . trimmedfastqfiles = trimmedfastqfiles if trimmedfastqfiles else list ( ) # Add all the trimmed files to the metadata logging . info ( 'Fastq files trimmed' )
Uses bbduk from the bbmap tool suite to quality and adapter trim
810
18
15,782
def estimate_genome_size ( self ) : logging . info ( 'Estimating genome size using kmercountexact' ) for sample in self . metadata : # Initialise the name of the output file sample [ self . analysistype ] . peaksfile = os . path . join ( sample [ self . analysistype ] . outputdir , 'peaks.txt' ) # Run the kmer counting command out , err , cmd = bbtools . kmercountexact ( forward_in = sorted ( sample . general . fastqfiles ) [ 0 ] , peaks = sample [ self . analysistype ] . peaksfile , returncmd = True , threads = self . cpus ) # Set the command in the object sample [ self . analysistype ] . kmercountexactcmd = cmd # Extract the genome size from the peaks file sample [ self . analysistype ] . genomesize = bbtools . genome_size ( sample [ self . analysistype ] . peaksfile ) write_to_logfile ( out , err , self . logfile , sample . general . logout , sample . general . logerr , None , None )
Use kmercountexact from the bbmap suite of tools to estimate the size of the genome
247
21
15,783
def error_correction ( self ) : logging . info ( 'Error correcting reads' ) for sample in self . metadata : sample . general . trimmedcorrectedfastqfiles = [ fastq . split ( '.fastq.gz' ) [ 0 ] + '_trimmed_corrected.fastq.gz' for fastq in sorted ( sample . general . fastqfiles ) ] try : if not os . path . isfile ( sample . general . trimmedcorrectedfastqfiles [ 0 ] ) : try : out , err , cmd = bbtools . tadpole ( forward_in = sorted ( sample . general . trimmedfastqfiles ) [ 0 ] , forward_out = sample . general . trimmedcorrectedfastqfiles [ 0 ] , returncmd = True , mode = 'correct' , threads = self . cpus ) # Set the command in the object sample [ self . analysistype ] . errorcorrectcmd = cmd write_to_logfile ( out = out , err = err , logfile = self . logfile , samplelog = sample . general . logout , sampleerr = sample . general . logerr , analysislog = None , analysiserr = None ) except IndexError : sample . general . trimmedcorrectedfastqfiles = list ( ) except CalledProcessError : sample . general . trimmedcorrectedfastqfiles = sample . general . trimmedfastqfiles except AttributeError : sample . general . trimmedcorrectedfastqfiles = list ( ) except IndexError : sample . general . trimmedcorrectedfastqfiles = list ( )
Use tadpole from the bbmap suite of tools to perform error correction of the reads
333
18
15,784
def normalise_reads ( self ) : logging . info ( 'Normalising reads to a kmer depth of 100' ) for sample in self . metadata : # Set the name of the normalised read files sample . general . normalisedreads = [ fastq . split ( '.fastq.gz' ) [ 0 ] + '_normalised.fastq.gz' for fastq in sorted ( sample . general . fastqfiles ) ] try : # Run the normalisation command out , err , cmd = bbtools . bbnorm ( forward_in = sorted ( sample . general . trimmedcorrectedfastqfiles ) [ 0 ] , forward_out = sample . general . normalisedreads [ 0 ] , returncmd = True , threads = self . cpus ) sample [ self . analysistype ] . normalisecmd = cmd write_to_logfile ( out , err , self . logfile , sample . general . logout , sample . general . logerr , None , None ) except CalledProcessError : sample . general . normalisedreads = sample . general . trimmedfastqfiles except IndexError : sample . general . normalisedreads = list ( )
Use bbnorm from the bbmap suite of tools to perform read normalisation
247
17
15,785
def merge_pairs ( self ) : logging . info ( 'Merging paired reads' ) for sample in self . metadata : # Can only merge paired-end if len ( sample . general . fastqfiles ) == 2 : # Set the name of the merged, and unmerged files sample . general . mergedreads = os . path . join ( sample . general . outputdirectory , '{}_paired.fastq.gz' . format ( sample . name ) ) sample . general . unmergedforward = os . path . join ( sample . general . outputdirectory , '{}_unpaired_R1.fastq.gz' . format ( sample . name ) ) sample . general . unmergedreverse = os . path . join ( sample . general . outputdirectory , '{}_unpaired_R2.fastq.gz' . format ( sample . name ) ) try : # Run the merging command - forward_in=sample.general.normalisedreads[0], out , err , cmd = bbtools . bbmerge ( forward_in = sorted ( sample . general . trimmedcorrectedfastqfiles ) [ 0 ] , merged_reads = sample . general . mergedreads , returncmd = True , outu1 = sample . general . unmergedforward , outu2 = sample . general . unmergedreverse , threads = self . cpus ) sample [ self . analysistype ] . bbmergecmd = cmd write_to_logfile ( out , err , self . logfile , sample . general . logout , sample . general . logerr , None , None ) except CalledProcessError : delattr ( sample . general , 'mergedreads' ) delattr ( sample . general , 'unmergedforward' ) delattr ( sample . general , 'unmergedreverse' ) except IndexError : delattr ( sample . general , 'mergedreads' ) delattr ( sample . general , 'unmergedforward' ) delattr ( sample . general , 'unmergedreverse' ) else : sample . general . mergedreads = sorted ( sample . general . trimmedcorrectedfastqfiles ) [ 0 ]
Use bbmerge from the bbmap suite of tools to merge paired - end reads
466
19
15,786
def main ( self ) : self . fasta_records ( ) self . fasta_stats ( ) self . find_largest_contig ( ) self . find_genome_length ( ) self . find_num_contigs ( ) self . find_n50 ( ) self . perform_pilon ( ) self . clear_attributes ( )
Run all the methods required for pipeline outputs
77
8
15,787
def fasta_records ( self ) : for sample in self . metadata : # Create the analysis-type specific attribute setattr ( sample , self . analysistype , GenObject ( ) ) # Create a dictionary of records for each file try : record_dict = SeqIO . to_dict ( SeqIO . parse ( sample . general . bestassemblyfile , "fasta" ) ) except FileNotFoundError : record_dict = dict ( ) # Set the records dictionary as the attribute for the object sample [ self . analysistype ] . record_dict = record_dict
Use SeqIO to create dictionaries of all records for each FASTA file
124
17
15,788
def fasta_stats ( self ) : for sample in self . metadata : # Initialise variables to store appropriate values parsed from contig records contig_lengths = list ( ) fasta_sequence = str ( ) for contig , record in sample [ self . analysistype ] . record_dict . items ( ) : # Append the length of the contig to the list contig_lengths . append ( len ( record . seq ) ) # Add the contig sequence to the string fasta_sequence += record . seq # Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value sample [ self . analysistype ] . contig_lengths = sorted ( contig_lengths , reverse = True ) try : # Calculate the GC% of the total genome sequence using GC - format to have two decimal places sample [ self . analysistype ] . gc = float ( '{:0.2f}' . format ( GC ( fasta_sequence ) ) ) except TypeError : sample [ self . analysistype ] . gc = 'NA'
Parse the lengths of all contigs for each sample as well as the total GC%
238
18
15,789
def find_largest_contig ( self ) : # for file_name, contig_lengths in contig_lengths_dict.items(): for sample in self . metadata : # As the list is sorted in descending order, the largest contig is the first entry in the list sample [ self . analysistype ] . longest_contig = sample [ self . analysistype ] . contig_lengths
Determine the largest contig for each strain
89
10
15,790
def find_genome_length ( self ) : for sample in self . metadata : # Use the sum() method to add all the contig lengths in the list sample [ self . analysistype ] . genome_length = sum ( sample [ self . analysistype ] . contig_lengths )
Determine the total length of all the contigs for each strain
64
14
15,791
def find_num_contigs ( self ) : for sample in self . metadata : # Use the len() method to count the number of entries in the list sample [ self . analysistype ] . num_contigs = len ( sample [ self . analysistype ] . contig_lengths )
Count the total number of contigs for each strain
64
10
15,792
def find_n50 ( self ) : for sample in self . metadata : # Initialise the N50 attribute in case there is no assembly, and the attribute is not created in the loop sample [ self . analysistype ] . n50 = '-' # Initialise a variable to store a running total of contig lengths currentlength = 0 for contig_length in sample [ self . analysistype ] . contig_lengths : # Increment the current length with the length of the current contig currentlength += contig_length # If the current length is now greater than the total genome / 2, the current contig length is the N50 if currentlength >= sample [ self . analysistype ] . genome_length * 0.5 : # Populate the dictionary, and break the loop sample [ self . analysistype ] . n50 = contig_length break
Calculate the N50 for each strain . N50 is defined as the largest contig such that at least half of the total genome size is contained in contigs equal to or larger than this contig
185
42
15,793
def perform_pilon ( self ) : for sample in self . metadata : try : if sample [ self . analysistype ] . num_contigs > 500 or sample . confindr . contam_status == 'Contaminated' : sample . general . polish = False else : sample . general . polish = True except AttributeError : sample . general . polish = True
Determine if pilon polishing should be attempted . Do not perform polishing if confindr determines that the sample is contaminated or if there are > 500 contigs
79
35
15,794
def clear_attributes ( self ) : for sample in self . metadata : try : delattr ( sample [ self . analysistype ] , 'record_dict' ) delattr ( sample [ self . analysistype ] , 'contig_lengths' ) delattr ( sample [ self . analysistype ] , 'longest_contig' ) except AttributeError : pass
Remove the record_dict attribute from the object as SeqRecords are not JSON - serializable . Also remove the contig_lengths and longest_contig attributes as they are large lists that make the . json file ugly
82
47
15,795
def run_qaml ( self ) : logging . info ( 'Running GenomeQAML quality assessment' ) qaml_call = 'classify.py -t {tf} -r {rf}' . format ( tf = self . qaml_path , rf = self . qaml_report ) make_path ( self . reportpath ) # Only attempt to assess assemblies if the report doesn't already exist if not os . path . isfile ( self . qaml_report ) : # Run the system calls out , err = run_subprocess ( qaml_call ) # Acquire thread lock, and write the logs to file self . threadlock . acquire ( ) write_to_logfile ( qaml_call , qaml_call , self . logfile ) write_to_logfile ( out , err , self . logfile ) self . threadlock . release ( )
Create and run the GenomeQAML system call
191
11
15,796
def parse_qaml ( self ) : logging . info ( 'Parsing GenomeQAML outputs' ) # A dictionary to store the parsed excel file in a more readable format nesteddictionary = dict ( ) # Use pandas to read in the CSV file, and convert the pandas data frame to a dictionary (.to_dict()) dictionary = pandas . read_csv ( self . qaml_report ) . to_dict ( ) # Iterate through the dictionary - each header from the CSV file for header in dictionary : # Sample is the primary key, and value is the value of the cell for that primary key + header combination for sample , value in dictionary [ header ] . items ( ) : # Update the dictionary with the new data try : nesteddictionary [ sample ] . update ( { header : value } ) # Create the nested dictionary if it hasn't been created yet except KeyError : nesteddictionary [ sample ] = dict ( ) nesteddictionary [ sample ] . update ( { header : value } ) # Get the results into the metadata object for sample in self . metadata : # Initialise the plasmid extractor genobject setattr ( sample , self . analysistype , GenObject ( ) ) # Initialise the list of all plasmids sample [ self . analysistype ] . prediction = str ( ) # Iterate through the dictionary of results for line in nesteddictionary : # Extract the sample name from the dictionary name = nesteddictionary [ line ] [ 'Sample' ] # Ensure that the names match if name == sample . name : # Append the plasmid name extracted from the dictionary to the list of plasmids sample [ self . analysistype ] . prediction = nesteddictionary [ line ] [ 'Predicted_Class' ]
Parse the GenomeQAML report and populate metadata objects
373
13
15,797
def init ( self , ) : self . gw = None pm = MayaPluginManager . get ( ) genesis = pm . get_plugin ( "Genesis" ) self . GenesisWin = self . subclass_genesis ( genesis . GenesisWin )
Initialize the plugin . Do nothing .
52
8
15,798
def save_lastfile ( self , tfi ) : tf = models . TaskFile . objects . get ( task = tfi . task , version = tfi . version , releasetype = tfi . releasetype , descriptor = tfi . descriptor , typ = tfi . typ ) c = self . get_config ( ) c [ 'lastfile' ] = tf . pk c . write ( )
Save the taskfile in the config
90
7
15,799
def stash_calibration ( self , attenuations , freqs , frange , calname ) : self . calibration_vector = attenuations self . calibration_freqs = freqs self . calibration_frange = frange self . calname = calname
Save it for later
55
4