idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
16,300
def c_var_decls ( self ) : if self . opts . no_structs : mod_decl = 'HMODULE {} = NULL;\n' . format ( self . name ) return [ mod_decl ] + [ '{} *{} = NULL;\n' . format ( self . _c_type_name ( name ) , name ) for name , dummy_args in self . funcs ] if self . opts . windll : return '' return [ '{} _{} = {{ 0 }};\n' . format ( self . _c_struct_names ( ) [ 1 ] , self . name ) ]
Get the needed variable definitions .
142
6
16,301
def c_module_relocs ( self ) : if self . opts . no_structs or self . opts . windll : return '' , '' x86 = reloc_var ( self . name , self . _c_struct_names ( ) [ 1 ] , self . opts . reloc_delta , self . _c_uses_pointer ( ) ) x64 = '{0} *{1} = &_{1};\n' . format ( self . _c_struct_names ( ) [ 1 ] , self . name ) if self . _c_uses_pointer ( ) else '' return x86 , x64
Build relocation for the module variable .
142
7
16,302
def c_loadlib ( self ) : name = self . _c_base_var ( ) kernel32 = 'windll->kernel32.' if self . name == 'kernel32' : loadlib = '{} = get_kernel32_base();\n' . format ( 'kernel32' if self . opts . no_structs else kernel32 + self . opts . base ) else : loadlib = '{} = {}LoadLibraryA({}{});\n' . format ( name , '' if self . opts . no_structs else kernel32 , self . opts . prefix , self . name ) return loadlib + self . _c_null_check ( name )
Get the loadlib of the module .
151
8
16,303
def c_getprocs ( self ) : getprocs = [ ] for name , dummy_args in self . funcs : if name == 'GetProcAddress' : if callable ( self . opts . hash_func ) : continue getter = 'get_proc_by_string' elif self . opts . no_structs : getter = 'GetProcAddress' else : getter = 'windll->kernel32.GetProcAddress' if callable ( self . opts . hash_func ) : getter = 'get_proc_by_hash' if self . opts . no_structs : var = name else : var = 'windll->{}.{}' . format ( self . name , name ) getproc = '{} = ({} *){}({}, {}{});\n' . format ( var , self . _c_type_name ( name ) , getter , self . _c_base_var ( ) , self . opts . prefix , name ) getprocs . append ( getproc + self . _c_null_check ( var ) ) return getprocs
Get the getprocs of the module .
250
9
16,304
def c_member_funcs ( self , for_struct = False ) : decls = [ '{} *{};' . format ( self . _c_type_name ( name ) , name ) for name , dummy_args in self . funcs ] if for_struct : return decls return [ self . _c_mod_decl ( ) ] + decls
Get the decls of the module .
81
8
16,305
def _c_base_var ( self ) : if self . opts . no_structs : return self . name return 'windll->{}.{}' . format ( self . name , self . opts . base )
Return the name of the module base variable .
50
9
16,306
def get_precursor_mz ( exact_mass , precursor_type ) : # these are just taken from what was present in the massbank .msp file for those missing the exact mass d = { '[M-H]-' : - 1.007276 , '[M+H]+' : 1.007276 , '[M+H-H2O]+' : 1.007276 - ( ( 1.007276 * 2 ) + 15.9949 ) } try : return exact_mass + d [ precursor_type ] except KeyError as e : print ( e ) return False
Calculate precursor mz based on exact mass and precursor type
127
13
16,307
def line_count ( fn ) : with open ( fn ) as f : for i , l in enumerate ( f ) : pass return i + 1
Get line count of file
32
5
16,308
def amplitude ( self , caldb , calv , atten = 0 ) : amp = ( 10 ** ( float ( self . _intensity + atten - caldb ) / 20 ) * calv ) return amp
Calculates the voltage amplitude for this stimulus using internal intensity value and the given reference intensity & voltage
45
20
16,309
def verify ( self , * * kwargs ) : if 'duration' in kwargs : if kwargs [ 'duration' ] < self . _duration : return "Window size must equal or exceed stimulus length" if self . _risefall > self . _duration : return "Rise and fall times exceed component duration" return 0
Checks this component for invalidating conditions
72
8
16,310
def stateDict ( self ) : state = { 'duration' : self . _duration , 'intensity' : self . _intensity , 'risefall' : self . _risefall , 'stim_type' : self . name } return state
Saves internal values to be loaded later
52
8
16,311
def loadState ( self , state ) : self . _duration = state [ 'duration' ] self . _intensity = state [ 'intensity' ] self . _risefall = state [ 'risefall' ]
Loads previously saved values to this component .
44
9
16,312
def initiate ( self , callback = None ) : return initiate ( self . mw_uri , self . consumer_token , callback = callback or self . callback , user_agent = self . user_agent )
Initiate an OAuth handshake with MediaWiki .
44
11
16,313
def _load_variable ( func , program_id , index ) : n = 64 # max name length TODO: read from card bufsize = GLsizei ( n ) length = pointer ( GLsizei ( 0 ) ) size = pointer ( GLint ( 0 ) ) type = pointer ( GLenum ( 0 ) ) uname = create_string_buffer ( n ) func ( program_id , index , bufsize , length , size , type , uname ) return size [ 0 ] , type [ 0 ] , uname . value . decode ( 'utf8' )
Loads the meta data for a uniform or attribute
122
10
16,314
def addWidget ( self , widget , name ) : self . exploreStimTypeCmbbx . addItem ( name ) self . componentStack . addWidget ( widget ) widget . valueChanged . connect ( self . valueChanged . emit )
Add a component editor widget
51
5
16,315
def saveTemplate ( self ) : savedict = { } for comp_editor in self . widgets ( ) : stim = comp_editor . component ( ) comp_editor . saveToObject ( ) savedict [ stim . name ] = stim . stateDict ( ) savedict [ 'delay' ] = self . delaySpnbx . value ( ) return savedict
Get a json structure of the current inputs to be able to load later
77
14
16,316
def expand_short_options ( self , argv ) : new_argv = [ ] for arg in argv : result = self . parse_multi_short_option ( arg ) new_argv . extend ( result ) return new_argv
Convert grouped short options like - abc to - a - b - c .
54
17
16,317
def find_arg ( self , name ) : name = self . normalize_name ( name ) return self . args . get ( name )
Find arg by normalized arg name or parameter name .
30
10
16,318
def find_parameter ( self , name ) : name = self . normalize_name ( name ) arg = self . args . get ( name ) return None if arg is None else arg . parameter
Find parameter by name or normalized arg name .
42
9
16,319
def args ( self ) : params = self . parameters args = OrderedDict ( ) # This will be overridden if the command explicitly defines an # arg named help. args [ 'help' ] = HelpArg ( command = self ) normalize_name = self . normalize_name get_arg_config = self . get_arg_config get_short_option = self . get_short_option_for_arg get_long_option = self . get_long_option_for_arg get_inverse_option = self . get_inverse_option_for_arg names = { normalize_name ( name ) for name in params } used_short_options = set ( ) for param in params . values ( ) : annotation = get_arg_config ( param ) short_option = annotation . short_option if short_option : used_short_options . add ( short_option ) for name , param in params . items ( ) : name = normalize_name ( name ) skip = ( name . startswith ( '_' ) or param . kind is param . VAR_KEYWORD or param . kind is param . KEYWORD_ONLY ) if skip : continue annotation = get_arg_config ( param ) container = annotation . container type = annotation . type choices = annotation . choices help = annotation . help inverse_help = annotation . inverse_help short_option = annotation . short_option long_option = annotation . long_option inverse_option = annotation . inverse_option action = annotation . action nargs = annotation . nargs default = param . default if default is not param . empty : if not short_option : short_option = get_short_option ( name , names , used_short_options ) used_short_options . add ( short_option ) if not long_option : long_option = get_long_option ( name ) if not inverse_option : # NOTE: The DISABLE marker evaluates as True inverse_option = get_inverse_option ( long_option ) args [ name ] = Arg ( command = self , parameter = param , name = name , container = container , type = type , default = default , choices = choices , help = help , inverse_help = inverse_help , short_option = short_option , long_option = long_option , inverse_option = inverse_option , action = action , nargs = nargs , ) option_map = OrderedDict ( ) for arg in args . values ( ) : for option in arg . options : option_map . setdefault ( option , [ ] ) option_map [ option ] . append ( arg ) for option , option_args in option_map . items ( ) : if len ( option_args ) > 1 : names = ', ' . join ( a . parameter . name for a in option_args ) message = ( 'Option {option} of command {self.name} maps to multiple parameters: {names}' ) message = message . format_map ( locals ( ) ) raise CommandError ( message ) return args
Create args from function parameters .
658
6
16,320
def option_map ( self ) : option_map = OrderedDict ( ) for arg in self . args . values ( ) : for option in arg . options : option_map [ option ] = arg return option_map
Map command - line options to args .
48
8
16,321
def objectprep ( self ) : # Create .fastq files if necessary. Otherwise create the metadata object if self . bcltofastq : if self . customsamplesheet : assert os . path . isfile ( self . customsamplesheet ) , 'Cannot find custom sample sheet as specified {}' . format ( self . customsamplesheet ) # Create the FASTQ files self . samples = fastqCreator . CreateFastq ( self ) # Create a dictionary of the object samples_dict = vars ( self . samples ) # Extract the required information from the dictionary self . index = samples_dict [ 'index' ] self . index_length = samples_dict [ 'indexlength' ] self . forward = samples_dict [ 'forwardlength' ] self . reverse = samples_dict [ 'reverselength' ] self . forwardlength = samples_dict [ 'forward' ] self . reverselength = samples_dict [ 'reverse' ] self . header = samples_dict [ 'header' ] else : self . samples = createObject . ObjectCreation ( self )
Creates fastq files from an in - progress Illumina MiSeq run or create an object and moves files appropriately
229
24
16,322
def fileprep ( self ) : # Create and start threads for i in range ( self . cpus ) : # Send the threads to the appropriate destination function threads = Thread ( target = self . prep , args = ( ) ) # Set the daemon to true - something to do with thread management threads . setDaemon ( True ) # Start the threading threads . start ( ) for sample in self . metadata : # Set the name of the decompressed, combined .fastq file sample . general . combined = os . path . join ( sample . general . outputdirectory , '{sample_name}_combined.fastq' . format ( sample_name = sample . name ) ) self . queue . put ( sample ) self . queue . join ( )
Decompress and concatenate . fastq files
158
11
16,323
def chunked_join ( iterable , int1 , int2 , str1 , str2 , func ) : chunks = list ( chunked ( iterable , int1 ) ) logging . debug ( chunks ) groups = [ list ( chunked ( chunk , int2 ) ) for chunk in chunks ] logging . debug ( groups ) return str1 . join ( [ str2 . join ( [ func ( '' . join ( chunk ) ) for chunk in chunks ] ) for chunks in groups ] )
Chunk and join .
103
5
16,324
def bytes_to_c_string ( data ) : rows = chunked_join ( data , 20 , 2 , '"\n "' , '' , r'\x' + X ) logging . debug ( _ ( 'Returning rows: %s' ) , rows ) return '"{}";' . format ( rows )
Convert the hexadecimal string in to C - style string .
71
15
16,325
def bytes_to_c_array ( data ) : chars = [ "'{}'" . format ( encode_escape ( i ) ) for i in decode_escape ( data ) ] return ', ' . join ( chars ) + ', 0'
Make a C array using the given string .
51
9
16,326
def uni_from ( cls , source , * args , * * kwargs ) : logging . debug ( _ ( 'source: %s, args: %s, kwargs: %s' ) , source , args , kwargs ) return getattr ( cls , cls . cons_dict [ source ] ) ( * args , * * kwargs )
Unified from .
82
4
16,327
def uni_to ( self , target , * args , * * kwargs ) : logging . debug ( _ ( 'target: %s, args: %s, kwargs: %s' ) , target , args , kwargs ) return getattr ( self , self . func_dict [ target ] ) ( * args , * * kwargs )
Unified to .
79
4
16,328
def from_section ( cls , stream , section_name = '.pic' ) : binary = Executable ( stream ) section_data = binary . get_section_data ( section_name ) return cls ( section_data , binary . system )
Construct a Converter object from the specified section of the specified binary stream .
54
15
16,329
def to_esc ( self ) : chunks = chunked ( self . stream , 2 ) return '' . join ( r'\x' + '' . join ( pair ) for pair in chunks )
Convert to escape string .
41
6
16,330
def get_percentage_relative_to ( val , other ) : val = float ( val ) other = float ( other ) ratio = val / other - 1 return ratio * 100.0
Finds percentage between 2 numbers
40
6
16,331
def setup_ui ( self , ) : self . main_vbox = QtGui . QVBoxLayout ( self ) self . import_all_references_cb = QtGui . QCheckBox ( "Import references" ) self . main_vbox . addWidget ( self . import_all_references_cb )
Create all ui elements and layouts
72
7
16,332
def get_cleanups ( self , ) : cleanups = [ ] open_unit = ActionUnit ( name = "Open" , description = "Open the maya scene." , actionfunc = open_scene ) cleanups . append ( open_unit ) if self . _option_widget . import_references ( ) : import_unit = ActionUnit ( name = "Import references" , description = "Import all references in the scene." , actionfunc = import_all_references , depsuccess = [ open_unit ] ) cleanups . append ( import_unit ) update_scenenode_unit = ActionUnit ( name = "Update Scene Node" , description = "Change the id from the jbscene node from work to releasefile." , actionfunc = update_scenenode , depsuccess = [ open_unit ] ) cleanups . append ( update_scenenode_unit ) save_unit = ActionUnit ( name = "Save" , description = "Save the scene." , actionfunc = save_scene , depsuccess = [ update_scenenode_unit ] ) cleanups . append ( save_unit ) return ActionCollection ( cleanups )
Get the cleanup actions for a releaes depending on the selected options
258
14
16,333
def epcr_primer_file ( self , formattedprimers ) : logging . info ( 'Creating re-PCR-compatible primer file' ) with open ( formattedprimers , 'w' ) as formatted : # Iterate through all the targets for basename in sorted ( self . forward_dict ) : # Use enumerate to number the iterations for each forward and reverse primer in the lists for forward_index , forward_primer in enumerate ( self . forward_dict [ basename ] ) : for reverse_index , reverse_primer in enumerate ( self . reverse_dict [ basename ] ) : # Set the name of the primer using the target name, and the indices of the primers # e.g. vtx1a_0_0 primer_name = '{bn}_{fi}_{ri}' . format ( bn = basename , fi = forward_index , ri = reverse_index ) # Create the string to write to the ePCR-compatible primer file # e.g. vtx1a_0_0 CCTTTCCAGGTACAACAGCGGTT GGAAACTCATCAGATGCCATTCTGG output_string = '{pn}\t{fp}\t{rp}\n' . format ( pn = primer_name , fp = forward_primer , rp = reverse_primer ) # Write the string to file formatted . write ( output_string )
Create the ePCR - compatible primer file from the dictionaries of primer combinations
312
16
16,334
def epcr_threads ( self , formattedprimers , ampliconsize = 10000 ) : # Create the threads for the ePCR analysis for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : threads = Thread ( target = self . epcr , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) logging . info ( 'Running ePCR analyses' ) for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : setattr ( sample , self . analysistype , GenObject ( ) ) # Get the primers ready sample [ self . analysistype ] . primers = formattedprimers # Make the output path sample [ self . analysistype ] . reportdir = os . path . join ( sample . general . outputdirectory , self . analysistype ) make_path ( sample [ self . analysistype ] . reportdir ) outfile = os . path . join ( sample [ self . analysistype ] . reportdir , sample . name ) # Set the hashing and mapping commands sample . commands . famap = '{famap} -b {outfile}.famap {fasta}' . format ( famap = os . path . join ( self . homepath , 'ePCR' , 'famap' ) , outfile = outfile , fasta = sample . general . bestassemblyfile ) sample . commands . fahash = '{fahash} -b {outfile}.hash {outfile}.famap' . format ( fahash = os . path . join ( self . homepath , 'ePCR' , 'fahash' ) , outfile = outfile ) # re-PCR uses the subtyping primers list to search the contigs file using the following parameters # -S {hash file} (Perform STS lookup using hash-file), -r + (Enable/disable reverse STS lookup) # -m 10000 (Set variability for STS size for lookup), this very large, as I don't necessarily know # the size of the amplicon # -n 1 (Set max allowed mismatches per primer pair for lookup) # -g 0 (Set max allowed indels per primer pair for lookup), # -G (Print alignments in comments) # -o {output file} sample . commands . epcr = '{rePCR} -S {outfile}.hash -r + -d 1-{ampsize} -n {mismatches} -g 0 -G -q ' '-o {outfile}.txt {primers}' . format ( rePCR = os . path . join ( self . homepath , 'ePCR' , 're-PCR' ) , outfile = outfile , ampsize = ampliconsize , mismatches = self . mismatches , primers = sample [ self . analysistype ] . primers ) sample [ self . analysistype ] . resultsfile = '{of}.txt' . format ( of = outfile ) # Add the sample object and the output file to the queue self . epcrqueue . put ( ( sample , outfile ) ) # Join the threads self . epcrqueue . join ( )
Run ePCR in a multi - threaded fashion
694
10
16,335
def epcr_parse ( self ) : logging . info ( 'Parsing ePCR outputs' ) for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : # Create a set to store all the unique results toxin_set = set ( ) if os . path . isfile ( sample [ self . analysistype ] . resultsfile ) : with open ( sample [ self . analysistype ] . resultsfile ) as epcrresults : for result in epcrresults : # Only the lines without a # contain results if "#" not in result : # Split on \t data = result . split ( '\t' ) # The subtyping primer pair is the first entry on lines with results vttype = data [ 0 ] . split ( '_' ) [ 0 ] # Add the verotoxin subtype to the set of detected subtypes toxin_set . add ( vttype ) # Create a string of the entries in the sorted list of toxins joined with ";" sample [ self . analysistype ] . toxinprofile = ";" . join ( sorted ( list ( toxin_set ) ) ) if toxin_set else 'ND' else : setattr ( sample , self . analysistype , GenObject ( ) ) sample [ self . analysistype ] . toxinprofile = 'NA'
Parse the ePCR outputs
283
7
16,336
def epcr_report ( self ) : logging . info ( 'Creating {at} report' . format ( at = self . analysistype ) ) with open ( os . path . join ( self . reportpath , '{at}.csv' . format ( at = self . analysistype ) ) , 'w' ) as report : data = 'Strain,ToxinProfile\n' for sample in self . metadata : data += '{sn},{tp}\n' . format ( sn = sample . name , tp = sample [ self . analysistype ] . toxinprofile ) # Write the data to the report report . write ( data )
Create a report of the ePCR - calculated toxin profiles
139
12
16,337
def parse_epcr ( self ) : # Use the metadata object from the vtyper_object for sample in self . vtyper_object . metadata : # Initialise the dictionary sample [ self . analysistype ] . result_dict = dict ( ) # Read in the output file with open ( sample [ self . analysistype ] . resultsfile ) as epcrresults : for result in epcrresults : # Only the lines without a # contain results if "#" not in result : # Split on \t # vtx2a_0_0 2014-SEQ-0121_127_length_1407_cov_50.7797_ID_10924 - 228 576 2 0 349/100-350 # primer_set: vtx2a_0_0, contig: 2014-SEQ-0121_127_length_1407_cov_50.7797_ID_10924, strand: -, # start: 228, stop: 576, number of forward mismatches: 2, number of reverse mismatches: 2 # amplicon_combo: 349/100-350 primer_set , contig , strand , start , stop , total_mismatches , indels , amplicon_combo = result . rstrip ( ) . split ( '\t' ) # Set the mismatches to be an int total_mismatches = int ( total_mismatches ) # Set the position of the amplicon on the contig. Ensure that the lower value is first genome_pos = '{min}-{max}' . format ( min = min ( [ int ( start ) , int ( stop ) ] ) , max = max ( [ int ( start ) , int ( stop ) ] ) ) # Extract the gene name from the modified name used when creating the primer file: LMhlyA_0_0 # becomes LMhlyA gene_re = re . search ( r'([\w-]+)_(\d{1,3})_(\d{1,3})' , primer_set ) gene = gene_re . groups ( ) [ 0 ] # Split the amplicon length from amplicon_combo: 349/100-350 -> 349 amplicon_length = amplicon_combo . split ( '/' ) [ 0 ] # Populate the dictionary if the 'total_mismatches' key doesn't exist, or if the current number # of mismatches is better than the previous 'best' number of mismatches try : if total_mismatches < sample [ self . analysistype ] . result_dict [ gene ] [ 'total_mismatches' ] : self . populate_results_dict ( sample = sample , gene = gene , total_mismatches = total_mismatches , genome_pos = genome_pos , amplicon_length = amplicon_length , contig = contig , primer_set = primer_set ) except KeyError : self . populate_results_dict ( sample = sample , gene = gene , total_mismatches = total_mismatches , genome_pos = genome_pos , amplicon_length = amplicon_length , contig = contig , primer_set = primer_set )
Parse the ePCR output file . Populate dictionary of resutls . For alleles find the best result based on the number of mismatches before populating dictionary
694
35
16,338
def create_epr_report ( self ) : # Open the report as a .csv file with open ( os . path . join ( self . reportpath , 'ePCR_report.csv' ) , 'w' ) as report : # Initialise a string to store the header results = 'Sample,Gene,GenomeLocation,AmpliconSize,Contig,TotalMismatches,PrimerSet\n' for sample in self . vtyper_object . metadata : # Check to see if there are strain-specific results if sample [ self . analysistype ] . result_dict : for gene , result_dict in sample [ self . analysistype ] . result_dict . items ( ) : # Populate the string with the appropriate values extracted from the dictionary results += '{sn},{gene},{genomelocation},{ampliconsize},{contig},{nm},{ps}\n' . format ( sn = sample . name , gene = gene , genomelocation = result_dict [ 'genome_pos' ] , ampliconsize = result_dict [ 'amplicon_length' ] , contig = result_dict [ 'contig' ] , nm = result_dict [ 'total_mismatches' ] , ps = result_dict [ 'primer_set' ] ) if self . export_amplicons : self . ampliconfile ( sample = sample , contig = result_dict [ 'contig' ] , amplicon_range = result_dict [ 'genome_pos' ] . split ( '-' ) , primer_set = result_dict [ 'primer_set' ] ) else : results += '{sn}\n' . format ( sn = sample . name ) # Write the complete string to the report report . write ( results )
Parse the results dictionaries and create a final report
393
11
16,339
def samplesheet ( self ) : if self . demultiplex : make_path ( self . samplesheetpath ) self . customsamplesheet = os . path . join ( self . samplesheetpath , 'SampleSheet.csv' ) header = [ 'Sample_ID' , 'Sample_Name' , 'Sample_Plate' , 'Sample_Well' , 'I7_Index_ID' , 'index' , 'I5_Index_ID' , 'index2' , 'Sample_Project' , 'Description' ] with open ( self . customsamplesheet , 'w' ) as samplesheet : lines = str ( ) lines += '[Header]\n' lines += 'IEMFileVersion,{}\n' . format ( self . header . IEMFileVersion ) lines += 'Investigator Name,{}\n' . format ( self . header . InvestigatorName ) lines += 'Experiment Name,{}\n' . format ( self . header . ExperimentName ) lines += 'Date,{}\n' . format ( self . header . Date ) lines += 'Workflow,{}\n' . format ( self . header . Workflow ) lines += 'Application,{}\n' . format ( self . header . Application ) lines += 'Assay,{}\n' . format ( self . header . Assay ) lines += 'Description,{}\n' . format ( self . header . Description ) lines += 'Chemistry,{}\n' . format ( self . header . Chemistry ) lines += '\n' lines += '[Reads]\n' lines += str ( self . forward ) + '\n' lines += str ( self . reverse ) + '\n' lines += '\n' lines += '[Settings]\n' lines += 'ReverseComplement,{}\n' . format ( self . header . ReverseComplement ) lines += 'Adapter,{}\n' . format ( self . header . Adapter ) lines += '\n' lines += '[Data]\n' lines += ',' . join ( header ) lines += '\n' # Correlate all the samples added to the list of incomplete samples with their metadata for incomplete in self . incomplete : for sample in self . rundata : if incomplete == sample [ 'SampleID' ] : # Use each entry in the header list as a key for the rundata dictionary for data in header : # Modify the key to be consistent with how the dictionary was populated result = sample [ data . replace ( '_' , '' ) ] # Description is the final entry in the list, and shouldn't have a , following the value if data != 'Description' : lines += '{},' . format ( result . replace ( 'NA' , '' ) ) # This entry should have a newline instead of a , else : lines += '{}\n' . format ( result . replace ( 'NA' , '' ) ) # Write the string to the sample sheet samplesheet . write ( lines )
Create a custom sample sheet based on the original sample sheet for the run but only including the samples that did not pass the quality threshold on the previous iteration
642
30
16,340
def update ( connection = None , silent = False , hgnc_file_path = None , hcop_file_path = None , low_memory = False ) : database = DbManager ( connection ) database . db_import ( silent = silent , hgnc_file_path = hgnc_file_path , hcop_file_path = hcop_file_path , low_memory = low_memory ) database . session . close ( )
Update the database with current version of HGNC
100
9
16,341
def set_connection ( connection = defaults . sqlalchemy_connection_string_default ) : config_path = defaults . config_file_path config = RawConfigParser ( ) if not os . path . exists ( config_path ) : with open ( config_path , 'w' ) as config_file : config [ 'database' ] = { 'sqlalchemy_connection_string' : connection } config . write ( config_file ) log . info ( 'create configuration file {}' . format ( config_path ) ) else : config . read ( config_path ) config . set ( 'database' , 'sqlalchemy_connection_string' , connection ) with open ( config_path , 'w' ) as configfile : config . write ( configfile )
Set the connection string for sqlalchemy and write it to the config file .
165
16
16,342
def set_mysql_connection ( host = 'localhost' , user = 'pyhgnc_user' , passwd = 'pyhgnc_passwd' , db = 'pyhgnc' , charset = 'utf8' ) : connection_string = 'mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}' . format ( host = host , user = user , passwd = passwd , db = db , charset = charset ) set_connection ( connection_string ) return connection_string
Method to set a MySQL connection
133
6
16,343
def relocate ( self , destination ) : for activate in self . bin . activates : activate . vpath = destination for binfile in self . bin . files : if binfile . shebang and ( 'python' in binfile . shebang or 'pypy' in binfile . shebang ) : binfile . shebang = '#!{0}' . format ( os . path . join ( destination , 'bin' , 'python' ) )
Configure the virtual environment for another path .
97
9
16,344
def move ( self , destination ) : self . relocate ( destination ) shutil . move ( self . path , destination ) self . _path = destination
Reconfigure and move the virtual environment to another path .
31
12
16,345
def aggregate ( self ) : for report in self . reportset : printtime ( 'Processing {}' . format ( report . split ( '.' ) [ 0 ] ) , self . start ) # Initialise the header for each report - MLST is different, as the header is different for each # MLST scheme. This provides a generic header instead header = '' if report != 'mlst.csv' else 'Strain,Genus,SequenceType,Matches,1,2,3,4,5,6,7\n' # Initialise a string to hold the data for each report data = '' # Open the aggregated report with open ( os . path . join ( self . reportpath , report ) , 'w' ) as aggregate : for sample in self . runmetadata . samples : # Try to open the report for this run try : # with open ( os . path . join ( sample . general . reportpath , report ) , 'r' ) as runreport : # Only get the header from the first file if not header : header = runreport . readline ( ) else : for row in runreport : # The final entry in a report does not have a newline character. Add \n as required if not row . endswith ( '\n' ) : row += '\n' # For certain reports, the header row is printed above each strain - ignore multiple # instances of the header if row . split ( ',' ) [ 0 ] != header . split ( ',' ) [ 0 ] : # Add the row to the string of data data += row except IOError : pass # Write the strings to the aggregate report file aggregate . write ( header ) aggregate . write ( data )
Aggregate all reports of the same type into a master report
362
12
16,346
def _parse_blocks ( instream ) : ilines = sugar . unblank ( instream ) for line in ilines : if line . startswith ( '[' ) : # Start of block level , one , name , seqcount , params = _parse_block_header ( line ) qlen , qchars = _parse_block_postheader ( next ( ilines ) ) # Pass control to the sequence parser sequences = list ( _parse_sequences ( ilines , qlen ) ) # Validation if not len ( sequences ) == seqcount : logging . warn ( "Expected %d sequences in block %s, found %d" , seqcount , name , len ( sequences ) ) yield { 'level' : level , 'one' : one , 'name' : name , # 'seqcount': seqcount, 'params' : params , 'query_length' : qlen , 'query_chars' : qchars , 'sequences' : sequences , }
Parse an alignment block from the given file handle .
212
11
16,347
def _parse_sequences ( ilines , expect_qlen ) : while True : first = next ( ilines ) if first . startswith ( '_' ) and first . endswith ( '].' ) : # End of sequences & end of block break # ENH: handle wrapped lines? try : index , this_len , query_len = _parse_seq_preheader ( first ) except ValueError : logging . warn ( 'Unparseable line (SKIPPING):\n%s' , first ) continue ( rec_id , dbxrefs , headlen , taillen , phylum , taxchar , description ) = _parse_seq_header ( next ( ilines ) ) try : headseq , molseq , tailseq = _parse_seq_body ( next ( ilines ) ) except ValueError : logging . warn ( 'Unparseable sequence: %s -- SKIPPING' , rec_id ) continue # Validation if expect_qlen != query_len : logging . warn ( "Query length in %s given as %d; expected %d" , rec_id , query_len , expect_qlen ) if not headseq and not headlen : headlen = 0 if not tailseq and not taillen : taillen = 0 if headseq : if headlen is None : headlen = len ( headseq ) elif headlen != len ( headseq ) : logging . warn ( "Conflicting head flank lengths in %s: %d, %d" , rec_id , headlen , len ( headseq ) ) if tailseq : if taillen is None : taillen = len ( tailseq ) elif taillen != len ( tailseq ) : logging . warn ( "Conflicting tail flank lengths in %s: %d, %d" , rec_id , taillen , len ( tailseq ) ) yield { 'index' : index , 'id' : rec_id , 'description' : description , 'dbxrefs' : dbxrefs , 'phylum' : phylum , 'taxchar' : taxchar , 'head_len' : headlen , 'tail_len' : taillen , 'head_seq' : headseq , 'tail_seq' : tailseq , 'length' : this_len , 'seq' : molseq , }
Parse the sequences in the current block .
514
9
16,348
def realign_seqs ( block , gap_char = '.' , align_indels = False ) : # ENH: align inserts using an external tool (if align_indels) all_chars = [ list ( sq [ 'seq' ] ) for sq in block [ 'sequences' ] ] # NB: If speed is an issue here, consider Numpy or Cython # main problem: list.insert is O(n) -- would OrderedDict help? nrows = len ( all_chars ) i = 0 while i < len ( all_chars [ 0 ] ) : rows_need_gaps = [ r for r in all_chars if not r [ i ] . islower ( ) ] if len ( rows_need_gaps ) != nrows : for row in rows_need_gaps : row . insert ( i , gap_char ) i += 1 return [ '' . join ( row ) for row in all_chars ]
Add gaps to a block so all residues in a column are equivalent .
211
14
16,349
def collapse_to_consensus ( seqrecords , strict = False , do_iron = True ) : level = 0 name = seqrecords [ 0 ] . id # If this is a CMA alignment, extract additional info: if hasattr ( seqrecords , '_records' ) : if hasattr ( seqrecords , 'level' ) : level = seqrecords . level if hasattr ( seqrecords , 'name' ) : name = seqrecords . name seqrecords = seqrecords . _records consensus = seqrecords . pop ( 0 ) cons_length = len ( consensus ) for i , s in enumerate ( seqrecords ) : if len ( s ) != cons_length : raise ValueError ( "Sequence #%d has length %d, consensus is %d" % ( i + 2 , len ( s ) , cons_length ) ) if '.' in str ( consensus . seq ) : # Strict -- error if there's a '-' if '-' in str ( consensus . seq ) : if strict : raise ValueError ( "Consensus contains '-' gap characters" ) logging . warn ( "Consensus sequence contains both '.' and '-' gap " "characters -- is it really the consensus?" ) aligned_cols = [ ( c not in '.-' ) for c in str ( consensus . seq ) ] else : aligned_cols = [ c != '.' for c in str ( consensus . seq ) ] else : # A little more ambiguous... aligned_cols = [ c != '-' for c in str ( consensus . seq ) ] consensus . seq = replace_asterisks ( consensus . seq , 'consensus' ) # Start a block with the consensus sequence block = consensus2block ( consensus , level = level , name = name ) qlen = block [ 'query_length' ] # Collapse & add remaining sequences to the block for index , rec in zip ( xrange ( 2 , len ( seqrecords ) + 2 ) , seqrecords ) : # Collapse rec.seq down to aligned size new_mol_seq = [ ] is_beginning = True for aligned_col , char in zip ( aligned_cols , replace_asterisks ( rec . seq , index ) ) : if aligned_col : is_beginning = False if char in '-.' : # deletion new_mol_seq . append ( '-' ) else : # aligned character new_mol_seq . append ( char . upper ( ) ) else : # it's an insert or nothing # (also, skip any left-side inserts) if char not in '-.' and not is_beginning : new_mol_seq . append ( char . lower ( ) ) rec . seq = '' . join ( new_mol_seq ) if do_iron : rec . seq = iron ( rec . seq ) block [ 'sequences' ] . append ( seqrecord2sequence ( rec , qlen , index ) ) return block
Opposite of realign_seqs .
637
9
16,350
def iron ( sequence ) : r_indel = re . compile ( r'(-[a-y]|[a-y]-)' ) orig_sequence = sequence while r_indel . search ( sequence ) : in_insert = False in_gap = False seen_gaps = 0 inserts = [ ] outchars = [ ] for char in sequence : if in_insert : if char . islower ( ) : # Extend the insert inserts . append ( char ) elif char . isupper ( ) : # Indel is over; 'iron' out & emit inserts, then gaps in_insert = False outchars . extend ( inserts ) inserts = [ ] outchars . append ( '-' * seen_gaps ) seen_gaps = 0 outchars . append ( char ) else : # Convert a preceding indel char to a 'match' (uppercase) # If the indel and gap are both multiple chars, this will # capitalize the insert left-to-right, then leave any gap # remainer as-is. assert char == '-' if not inserts : in_insert = False in_gap = True seen_gaps += 1 else : outchars . append ( inserts . pop ( 0 ) . upper ( ) ) # NB: Only leave the insert region if we've finished # converting all the insert chars if not inserts : in_insert = False in_gap = True elif in_gap : if char . islower ( ) : in_insert = True in_gap = False # If some inserts previously seen, emit them now # If no inserts have been seen yet, we'll iron this indel if inserts : outchars . extend ( inserts ) outchars . append ( '-' * seen_gaps ) seen_gaps = 0 inserts = [ char ] elif char . isupper ( ) : in_gap = False # End of the gap -- emit if inserts : outchars . extend ( inserts ) inserts = [ ] outchars . append ( '-' * seen_gaps ) seen_gaps = 0 outchars . append ( char ) else : # Extend the gap assert char == '-' seen_gaps += 1 else : assert not inserts and not seen_gaps , ( "Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s" % ( inserts , seen_gaps , sequence , in_insert , in_gap ) ) # Coming from Match state if char . isupper ( ) : # Extend the match outchars . append ( char ) elif char . islower ( ) : inserts . append ( char ) in_insert = True else : assert char == '-' seen_gaps += 1 in_gap = True # Emit any trailing indel if inserts : outchars . extend ( inserts ) if seen_gaps : outchars . append ( '-' * seen_gaps ) sequence = '' . join ( outchars ) # logging.info(sequence) assert ( sequence . replace ( '-' , '' ) . upper ( ) == orig_sequence . replace ( '-' , '' ) . upper ( ) ) , '\nOrig: ' + orig_sequence + '\nIron: ' + sequence return sequence
Iron out indel regions in the aligned sequence .
704
10
16,351
def get_github_content ( repo , path , auth = None ) : request = requests . get ( file_url . format ( repo = repo , path = path ) , auth = auth ) if not request . ok : print ( "There is a problem with the request" ) print ( file_url . format ( repo = repo , path = path ) ) print ( request . json ( ) ) exit ( 1 ) if not request . json ( ) [ 'encoding' ] == 'base64' : raise RuntimeError ( "Unknown Encoding encountered when fetching {} from repo {}: {}" . format ( path , repo , request . json ( ) [ 'encoding' ] ) ) return request . json ( ) [ 'content' ] . decode ( 'base64' ) . decode ( 'utf8' )
Retrieve text files from a github repo
173
8
16,352
def collect_reponames ( ) : reponames = [ ] #try to figure out the repo from git repo in current directory try : with open ( os . devnull ) as devnull : remote_data = subprocess . check_output ( [ "git" , "remote" , "-v" , "show" ] , stderr = devnull ) branches = { } for line in remote_data . decode ( 'utf-8' ) . split ( "\n" ) : if line . strip ( ) == "" : continue remote_match = re_mote . match ( line ) if not remote_match is None : branches [ remote_match . group ( 1 ) ] = remote_match . group ( 5 ) if len ( branches ) > 0 : if "origin" in branches : reponames . append ( branches [ "origin" ] ) else : reponames . append ( branches . values ( ) [ 0 ] ) except OSError : pass except subprocess . CalledProcessError : pass #scan html files for further repos to consider for fname in glob . iglob ( "*.html" ) : fid = open ( fname , "r" , "utf8" ) #check the second line for the repo marker fid . readline ( ) line = fid . readline ( ) match = re . match ( repo_marker_re , line ) if not match is None : reponames . append ( match . group ( 1 ) ) reponames = list ( set ( reponames ) ) return reponames
Try to figure out a list of repos to consider by default from the contents of the working directory .
333
21
16,353
def collect_github_config ( ) : github_config = { } for field in [ "user" , "token" ] : try : github_config [ field ] = subprocess . check_output ( [ "git" , "config" , "github.{}" . format ( field ) ] ) . decode ( 'utf-8' ) . strip ( ) except ( OSError , subprocess . CalledProcessError ) : pass return github_config
Try load Github configuration such as usernames from the local or global git config
98
16
16,354
def setCurveModel ( self , model ) : self . stimModel = model self . ui . curveWidget . setModel ( model )
Sets the stimulus model for the calibration curve test
30
10
16,355
def addOption ( self , stim ) : # set the editor widgets for noise and sweep self . ui . calTypeCmbbx . insertItem ( 0 , stim . name ) editor = stim . showEditor ( ) # should probably make this less coupled durInput = editor . durationInputWidget ( ) self . durationWidgets . append ( durInput ) durInput . setEnabled ( False ) self . ui . caleditorStack . insertWidget ( 0 , editor ) self . ui . calTypeCmbbx . setCurrentIndex ( 0 )
Adds a stimulus to the list of stims to use for testing calibration
118
14
16,356
def saveToObject ( self ) : for i in range ( self . ui . caleditorStack . count ( ) ) : try : self . ui . caleditorStack . widget ( i ) . saveToObject ( ) except AttributeError : logger = logging . getLogger ( 'main' ) logger . debug ( 'index {} does not have method saveToObject' . format ( i ) )
Saves the current UI setting to the model
86
9
16,357
def isToneCal ( self ) : return self . ui . calTypeCmbbx . currentIndex ( ) == self . ui . calTypeCmbbx . count ( ) - 1
Whether the currently selected calibration stimulus type is the calibration curve
44
11
16,358
def reset_generation ( self , trigger ) : self . tone_lock . acquire ( ) npts = self . stim . size try : self . aotask = AOTaskFinite ( self . aochan , self . fs , npts , trigsrc = trigger ) self . aotask . write ( self . stim ) if self . attenuator is not None : self . attenuator . SetAtten ( self . atten ) else : # print "ERROR: attenuation not set!" pass # raise self . ngenerated += 1 if self . stim_changed : new_gen = self . stim else : new_gen = None self . stim_changed = False except : print u'ERROR! TERMINATE!' self . tone_lock . release ( ) raise self . tone_lock . release ( ) return new_gen
Re - arms the analog output according to current settings
180
10
16,359
def set_stim ( self , signal , fs , attenuation = 0 ) : self . tone_lock . acquire ( ) self . stim = signal self . fs = fs self . atten = attenuation self . stim_changed = True self . tone_lock . release ( )
Sets any vector as the next stimulus to be output . Does not call write to hardware
58
18
16,360
def connect_attenuator ( self , connect = True ) : if connect : try : pa5 = win32com . client . Dispatch ( "PA5.x" ) success = pa5 . ConnectPA5 ( 'GB' , 1 ) if success == 1 : print 'Connection to PA5 attenuator established' pass else : print 'Connection to PA5 attenuator failed' errmsg = pa5 . GetError ( ) print u"Error: " , errmsg raise Exception ( u"Attenuator connection failed" ) except : print "Error connecting to attenuator" pa5 = None self . attenuator = pa5 else : # if there is an attenuator, make sure it is set to 0 before disconnecting if self . attenuator : self . attenuator . setAtten ( 0 ) self . attenuator = None return self . attenuator
Establish a connection to the TDT PA5 attenuator
190
13
16,361
def start_timer ( self , reprate ) : print 'starting digital output at rate {} Hz' . format ( reprate ) self . trigger_task = DigitalOutTask ( self . trigger_src , reprate ) self . trigger_task . start ( )
Start the digital output task that serves as the acquistion trigger
55
13
16,362
def start ( self ) : # this shouldn't actually be possible still... if self . aitask is not None : self . stop ( ) raise Exception ( "FIX ME : NESTED START OPERATIONS ALLOWED" ) self . daq_lock . acquire ( ) self . ngenerated = 0 self . nacquired = 0 return self . reset ( )
Writes output buffer and settings to device
77
8
16,363
def stop ( self ) : try : self . aitask . stop ( ) self . aotask . stop ( ) pass except : print u"No task running" self . aitask = None self . aotask = None
Halts the acquisition this must be called before resetting acquisition
50
12
16,364
def start_continuous ( self , aichans , update_hz = 10 ) : self . daq_lock . acquire ( ) self . ngenerated = 0 # number of stimuli presented during chart run npts = int ( self . aifs / update_hz ) #update display at 10Hz rate nchans = len ( aichans ) self . aitask = AITask ( aichans , self . aifs , npts * 5 * nchans ) self . aitask . register_callback ( self . _read_continuous , npts ) self . aitask . start ( )
Begins a continuous analog generation calling a provided function at a rate of 10Hz
137
16
16,365
def run ( self ) : self . aotask . StartTask ( ) self . aotask . wait ( ) # don't return until generation finished self . aotask . stop ( ) self . aotask = None
Executes the stimulus generation and returns when completed
48
9
16,366
def stop_all ( self ) : if self . aotask is not None : self . aotask . stop ( ) self . aitask . stop ( ) self . daq_lock . release ( ) self . aitask = None self . aotask = None
Halts both the analog output and input tasks
60
9
16,367
def get ( self , url , params = None , raw = False , stream = False , * * request_kwargs ) : full_url = self . build_url ( url ) params = params or { } # Add token (if it's not already there) if self . _token : params . setdefault ( 'token' , self . _token ) response = requests . get ( full_url , params = params , stream = stream , * * request_kwargs ) self . check_for_errors ( response ) # Raise exception if something failed if stream : return response if raw or not response . content : return response . content return json . loads ( response . text )
GET request to AmigoCloud endpoint .
143
8
16,368
def post ( self , url , data = None , files = None , headers = None , raw = False , send_as_json = True , content_type = None , * * request_kwargs ) : return self . _secure_request ( url , 'post' , data = data , files = files , headers = headers , raw = raw , send_as_json = send_as_json , content_type = content_type , * * request_kwargs )
POST request to AmigoCloud endpoint .
102
8
16,369
def upload_gallery_photo ( self , gallery_id , source_amigo_id , file_obj , chunk_size = CHUNK_SIZE , force_chunked = False , metadata = None ) : simple_upload_url = 'related_tables/%s/upload' % gallery_id chunked_upload_url = 'related_tables/%s/chunked_upload' % gallery_id data = { 'source_amigo_id' : source_amigo_id } if isinstance ( file_obj , basestring ) : data [ 'filename' ] = os . path . basename ( file_obj ) else : data [ 'filename' ] = os . path . basename ( file_obj . name ) if metadata : data . update ( metadata ) return self . upload_file ( simple_upload_url , chunked_upload_url , file_obj , chunk_size = chunk_size , force_chunked = force_chunked , extra_data = data )
Upload a photo to a dataset s gallery .
225
9
16,370
def listen_user_events ( self ) : if not self . _user_id : raise AmigoCloudError ( self . error_msg [ 'logged_in_websockets' ] ) response = self . get ( '/me/start_websocket_session' ) websocket_session = response [ 'websocket_session' ] auth_data = { 'userid' : self . _user_id , 'websocket_session' : websocket_session } self . amigosocket . emit ( 'authenticate' , auth_data )
Authenticate to start listening to user events .
124
9
16,371
def listen_dataset_events ( self , owner_id , project_id , dataset_id ) : if not self . _user_id : raise AmigoCloudError ( self . error_msg [ 'logged_in_websockets' ] ) url = '/users/%s/projects/%s/datasets/%s/start_websocket_session' response = self . get ( url % ( owner_id , project_id , dataset_id ) ) websocket_session = response [ 'websocket_session' ] auth_data = { 'userid' : self . _user_id , 'datasetid' : dataset_id , 'websocket_session' : websocket_session } self . amigosocket . emit ( 'authenticate' , auth_data )
Authenticate to start using dataset events .
181
8
16,372
def build_markdown_table ( headers , rows , row_keys = None ) : row_maxes = _find_row_maxes ( headers , rows ) row_keys = row_keys or [ key for key , value in headers . items ( ) ] table = [ _build_row ( headers , row_maxes , row_keys ) , _build_separator ( row_maxes , row_keys ) ] for row in rows : table . append ( _build_row ( row , row_maxes , row_keys ) ) return '\n' . join ( table ) + '\n'
Build a lined up markdown table .
134
8
16,373
def write_to_path ( self , path , suffix = '' , format = 'png' , overwrite = False ) : if os . path . exists ( path ) and overwrite is False : raise ValueError ( "Error: use ovewrite=True to overwrite images" ) if not os . path . exists ( path ) : os . makedirs ( path ) for i , r in self . iterrows ( ) : spath = os . path . join ( path , r [ 'project_name' ] , r [ 'sample_name' ] ) if not os . path . exists ( spath ) : os . makedirs ( spath ) if suffix == '' : fname = os . path . join ( spath , r [ 'frame_name' ] + '.' + format ) else : fname = os . path . join ( spath , r [ 'frame_name' ] + '_' + suffix + '.' + format ) imageio . imwrite ( fname , r [ 'image' ] , format = format )
Output the data the dataframe s image column to a directory structured by project - > sample and named by frame
223
22
16,374
def build_segmentation_image ( self , schema , background = ( 0 , 0 , 0 , 0 ) ) : cummulative = self . copy ( ) def _set_blank ( img , blank ) : img [ : ] [ : ] = blank return img cummulative [ 'merged' ] = cummulative . apply ( lambda x : _set_blank ( np . zeros ( list ( x [ 'shape' ] ) + [ 4 ] ) , background ) , 1 ) for layer in schema : if self . verbose : sys . stderr . write ( "Calculating layer " + str ( layer ) + "\n" ) images = self . get_outline_images ( subset_logic = layer [ 'subset_logic' ] , edge_color = layer [ 'edge_color' ] , watershed_steps = layer [ 'watershed_steps' ] , fill_color = layer [ 'fill_color' ] ) cummulative = cummulative . rename ( columns = { 'merged' : 'old' } ) cummulative = cummulative . merge ( images , on = list ( self . columns ) ) cummulative [ 'new' ] = cummulative . apply ( lambda x : _merge_images ( x [ 'merged' ] , x [ 'old' ] ) , 1 ) cummulative = cummulative . drop ( columns = [ 'old' , 'merged' ] ) . rename ( columns = { 'new' : 'merged' } ) cummulative = cummulative . rename ( columns = { 'merged' : 'image' } ) return SegmentationImageOutput ( cummulative )
Put together an image . Defined by a list of layers with RGBA colors
367
16
16,375
def valid ( number ) : checksum = 0 number_len = len ( number ) offset = ord ( '0' ) i = number_len - 1 while i >= 0 : n = ord ( number [ i ] ) - offset checksum += n i -= 2 i = number_len - 2 while i >= 0 : n = ord ( number [ i ] ) - offset n *= 2 if n > 9 : n -= 9 checksum += n i -= 2 return checksum % 10 == 0
Returns true if the number string is luhn valid and false otherwise . The number string passed to the function must contain only numeric characters otherwise behavior is undefined .
105
32
16,376
def create_publication_assistant ( self , * * args ) : # Check args logdebug ( LOGGER , 'Creating publication assistant..' ) mandatory_args = [ 'drs_id' , 'version_number' , 'is_replica' ] esgfpid . utils . check_presence_of_mandatory_args ( args , mandatory_args ) # Check if service path is given if self . __thredds_service_path is None : msg = 'No thredds_service_path given (but it is mandatory for publication)' logwarn ( LOGGER , msg ) raise esgfpid . exceptions . ArgumentError ( msg ) # Check if data node is given if self . __data_node is None : msg = 'No data_node given (but it is mandatory for publication)' logwarn ( LOGGER , msg ) raise esgfpid . exceptions . ArgumentError ( msg ) # Check if solr has access: if self . __coupler . is_solr_switched_off ( ) : pass # solr access not mandatory anymore # Create publication assistant assistant = esgfpid . assistant . publish . DatasetPublicationAssistant ( drs_id = args [ 'drs_id' ] , version_number = args [ 'version_number' ] , thredds_service_path = self . __thredds_service_path , data_node = self . __data_node , prefix = self . prefix , coupler = self . __coupler , is_replica = args [ 'is_replica' ] , consumer_solr_url = self . __consumer_solr_url # may be None ) logdebug ( LOGGER , 'Creating publication assistant.. done' ) return assistant
Create an assistant for a dataset that allows to make PID requests for the dataset and all of its files .
383
21
16,377
def unpublish_one_version ( self , * * args ) : # Check args optional_args = [ 'handle' , 'drs_id' , 'version_number' ] esgfpid . utils . add_missing_optional_args_with_value_none ( args , optional_args ) # Check if data node is given if self . __data_node is None : msg = 'No data_node given (but it is mandatory for unpublication)' logwarn ( LOGGER , msg ) raise esgfpid . exceptions . ArgumentError ( msg ) # Unpublish assistant = esgfpid . assistant . unpublish . AssistantOneVersion ( drs_id = args [ 'drs_id' ] , data_node = self . __data_node , prefix = self . prefix , coupler = self . __coupler , message_timestamp = esgfpid . utils . get_now_utc_as_formatted_string ( ) ) assistant . unpublish_one_dataset_version ( handle = args [ 'handle' ] , version_number = args [ 'version_number' ] )
Sends a PID update request for the unpublication of one version of a dataset currently published at the given data node .
253
25
16,378
def unpublish_all_versions ( self , * * args ) : # Check args mandatory_args = [ 'drs_id' ] esgfpid . utils . check_presence_of_mandatory_args ( args , mandatory_args ) # Check if data node is given if self . __data_node is None : msg = 'No data_node given (but it is mandatory for publication)' logwarn ( LOGGER , msg ) raise esgfpid . exceptions . ArgumentError ( msg ) # Check if solr has access: if self . __coupler . is_solr_switched_off ( ) : msg = 'Unpublication of all versions. Without solr access, we cannot identify the versions, so the consumer will have to take care of this.' logdebug ( LOGGER , msg ) #raise esgfpid.exceptions.ArgumentError('No solr access. Solr access is needed for publication. Please provide access to a solr index when initializing the library') # Unpublish assistant = esgfpid . assistant . unpublish . AssistantAllVersions ( drs_id = args [ 'drs_id' ] , data_node = self . __data_node , prefix = self . prefix , coupler = self . __coupler , message_timestamp = esgfpid . utils . get_now_utc_as_formatted_string ( ) , consumer_solr_url = self . __consumer_solr_url # may be None ) assistant . unpublish_all_dataset_versions ( )
Sends a PID update request for the unpublication of all versions of a dataset currently published at the given data node .
348
25
16,379
def add_errata_ids ( self , * * args ) : # Check args: mandatory_args = [ 'drs_id' , 'version_number' , 'errata_ids' ] esgfpid . utils . check_presence_of_mandatory_args ( args , mandatory_args ) esgfpid . utils . check_noneness_of_mandatory_args ( args , mandatory_args ) # Perform metadata update assistant = esgfpid . assistant . errata . ErrataAssistant ( coupler = self . __coupler , prefix = self . prefix ) assistant . add_errata_ids ( drs_id = args [ 'drs_id' ] , version_number = args [ 'version_number' ] , errata_ids = args [ 'errata_ids' ] )
Add errata ids to a dataset handle record .
184
11
16,380
def make_handle_from_drsid_and_versionnumber ( self , * * args ) : args [ 'prefix' ] = self . prefix return esgfpid . utils . make_handle_from_drsid_and_versionnumber ( * * args )
Create a handle string for a specific dataset based on its dataset id and version number and the prefix passed to the library at initializing .
61
27
16,381
def mousePressEvent ( self , event ) : super ( AbstractDragView , self ) . mousePressEvent ( event ) self . dragStartPosition = event . pos ( )
saves the drag position so we know when a drag should be initiated
36
14
16,382
def dragLeaveEvent ( self , event ) : super ( AbstractDragView , self ) . dragLeaveEvent ( event ) self . dragline = None self . viewport ( ) . update ( ) event . accept ( )
Clears drop cursor line
46
5
16,383
def childEvent ( self , event ) : super ( AbstractDragView , self ) . childEvent ( event ) if event . type ( ) == QtCore . QEvent . ChildRemoved : # hack to catch drop offs if self . originalPos is not None : selected = self . limbo_component self . model ( ) . insertItem ( self . originalPos , selected ) self . originalPos = None self . dragStartPosition = None self . viewport ( ) . update ( )
Catches items dropped off edge of view reinserts at original position
101
14
16,384
def mouseReleaseEvent ( self , event ) : super ( AbstractDragView , self ) . mouseReleaseEvent ( event ) self . dragStartPosition = None
Resets the drag start position
32
6
16,385
async def setup ( self ) : try : db = await self . db collections = await db . list_collection_names ( ) created = False if self . table_name not in collections : # create table logger . info ( "Creating MongoDB collection [{}]" . format ( self . table_name ) ) await db . create_collection ( self . table_name ) await db [ self . table_name ] . create_index ( [ ( "target_id" , DESCENDING ) , ( "post_id" , DESCENDING ) ] ) created = True # create control collection if not already created. if self . control_table_name and self . control_table_name not in collections : # create table logger . info ( "Creating MongoDB control data collection [{}]" . format ( self . control_table_name ) ) await db . create_collection ( self . control_table_name ) created = True return created except Exception as exc : logger . error ( "[DB] Error when setting up MongoDB collections: {}" . format ( exc ) ) return False
Setting up MongoDB collections if they not exist .
234
10
16,386
def setDoc ( self , docs ) : # sort stim by start time docs = sorted ( docs , key = lambda k : k [ 'start_s' ] ) for doc in docs : stim_type = doc [ 'stim_type' ] if not stim_type in self . displayTable : continue if not stim_type in self . displayTable [ stim_type ] : continue display_attributes = self . displayTable . get ( stim_type , self . defaultAttributes ) self . lyt . addWidget ( ComponentDetailFrame ( doc , display_attributes ) )
Sets the documentation to display
123
6
16,387
def setComponents ( self , components ) : layout = self . layout ( ) for comp in components : attrWidget = ComponentAttributerChecker ( comp ) layout . addWidget ( attrWidget )
Clears and sets the components contained in this widget
44
10
16,388
def setCheckedDetails ( self , checked ) : layout = self . layout ( ) for i in range ( layout . count ( ) ) : w = layout . itemAt ( i ) . widget ( ) if w . stimType in checked : w . setChecked ( checked [ w . stimType ] )
Sets which components are checked
65
6
16,389
def getCheckedDetails ( self ) : attrs = { } layout = self . layout ( ) for i in range ( layout . count ( ) ) : w = layout . itemAt ( i ) . widget ( ) attrs [ w . stimType ] = w . getChecked ( ) return attrs
Gets the currently checked components and checked attributes
65
9
16,390
def getChecked ( self ) : attrs = [ ] layout = self . layout ( ) for i in range ( layout . count ( ) ) : w = layout . itemAt ( i ) . widget ( ) if w . isChecked ( ) : attrs . append ( str ( w . text ( ) ) ) return attrs
Gets the checked attributes
71
5
16,391
def headerData ( self , section , orientation , role ) : if role == QtCore . Qt . DisplayRole : if orientation == QtCore . Qt . Horizontal : return self . headers [ section ]
Get the Header for the columns in the table
42
9
16,392
def cursor ( self , pos ) : row = self . indexAt ( pos ) . row ( ) if row == - 1 : row = self . model ( ) . rowCount ( ) row_height = self . rowHeight ( 0 ) y = row_height * row x = self . width ( ) return QtCore . QLine ( 0 , y , x , y )
Returns a line at the nearest row split between tests .
79
11
16,393
def mousePressEvent ( self , event ) : index = self . indexAt ( event . pos ( ) ) if index . isValid ( ) : if index . column ( ) == 0 : self . edit ( index , QtGui . QAbstractItemView . DoubleClicked , event ) else : super ( ProtocolView , self ) . mousePressEvent ( event )
Launches edit of cell if first column clicked otherwise passes to super class
77
14
16,394
def run ( ) : args = parse_args ( ) codetools . setup_logging ( args . debug ) global g g = pygithub . login_github ( token_path = args . token_path , token = args . token ) if not args . hide : args . hide = [ ] org = g . get_organization ( args . organization ) try : repos = list ( org . get_repos ( ) ) except github . RateLimitExceededException : raise except github . GithubException as e : msg = 'error getting repos' raise pygithub . CaughtOrganizationError ( org , e , msg ) from None for r in repos : try : teamnames = [ t . name for t in r . get_teams ( ) if t . name not in args . hide ] except github . RateLimitExceededException : raise except github . GithubException as e : msg = 'error getting teams' raise pygithub . CaughtRepositoryError ( r , e , msg ) from None maxt = args . maxt if ( args . maxt is not None and args . maxt >= 0 ) else len ( teamnames ) if args . debug : print ( "MAXT=" , maxt ) if args . mint <= len ( teamnames ) <= maxt : print ( r . name . ljust ( 40 ) + args . delimiter . join ( teamnames ) )
List repos and teams
300
5
16,395
def create_bar_chart ( self , x_labels , y_values , y_label ) : self . setup ( 0.25 ) ax1 = self . get_ax ( ) ax1 . set_xticks ( list ( range ( len ( x_labels ) ) ) ) ax1 . set_xticklabels ( [ x_labels [ i ] for i in range ( len ( x_labels ) ) ] , rotation = 90 ) plt . ylabel ( y_label ) x_pos = range ( len ( x_labels ) ) plt . bar ( x_pos , y_values , align = "center" ) return ax1
Creates bar char
146
4
16,396
def create_multiple_bar_chart ( self , x_labels , mul_y_values , mul_y_labels , normalize = False ) : self . setup ( 0.25 ) ax1 = self . get_ax ( ) ax1 . set_xticks ( list ( range ( len ( x_labels ) ) ) ) ax1 . set_xticklabels ( [ x_labels [ i ] for i in range ( len ( x_labels ) ) ] , rotation = 90 ) y_counts = len ( mul_y_values ) colors = cm . rainbow ( np . linspace ( 0 , 1 , y_counts ) ) # different colors max_bar_width = 0.6 bar_width = max_bar_width / y_counts # width of each bar x_shifts = np . linspace ( 0 , max_bar_width , y_counts ) - max_bar_width * 0.5 # center in 0 ax_series = [ ] for i in range ( y_counts ) : x_pos = range ( len ( x_labels ) ) # x points x_pos = np . array ( x_pos ) + x_shifts [ i ] # shift for each y series if normalize : # normalize array y_values = normalize_array ( mul_y_values [ i ] ) else : y_values = mul_y_values [ i ] ax_series . append ( ax1 . bar ( x_pos , y_values , width = bar_width , align = "center" , color = colors [ i ] ) ) ax1 . legend ( ax_series , mul_y_labels ) return ax1
Creates bar chart with multiple lines
374
7
16,397
def file_parts ( self ) : file_parts = [ ] for part in self . parts : try : for sub_part in part : if isinstance ( sub_part , FileToken ) : file_parts . append ( sub_part ) except TypeError : if isinstance ( part , FileToken ) : file_parts . append ( part ) return file_parts
Returns a list of the file tokens in the list of parts .
78
13
16,398
def update_dependent_files ( self , prev_commands = [ ] ) : for command in prev_commands : for my_input in self . input_parts : for their_output in command . output_parts : if their_output == my_input : my_input . filename = their_output . eval ( )
Update the command s dependencies based on the evaluated input and output of previous commands .
70
16
16,399
def eval ( self ) : eval = [ ] for part in self . parts : try : result = part . eval ( ) except AttributeError : result = part if result [ - 1 ] != '\n' : result += ' ' eval . append ( result ) return '' . join ( eval ) . strip ( )
Evaluate the given job and return a complete shell script to be run by the job manager .
67
20