idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
2,300
def add_text_to_image ( fname , txt , opFilename ) : ft = ImageFont . load ( "T://user//dev//src//python//_AS_LIB//timR24.pil" ) print ( "Adding text " , txt , " to " , fname , " pixels wide to file " , opFilename ) im = Image . open ( fname ) draw = ImageDraw . Draw ( im ) draw . text ( ( 0 , 0 ) , txt , fill = ( 0 , ...
convert an image by adding text
2,301
def add_crosshair_to_image ( fname , opFilename ) : im = Image . open ( fname ) draw = ImageDraw . Draw ( im ) draw . line ( ( 0 , 0 ) + im . size , fill = ( 255 , 255 , 255 ) ) draw . line ( ( 0 , im . size [ 1 ] , im . size [ 0 ] , 0 ) , fill = ( 255 , 255 , 255 ) ) del draw im . save ( opFilename )
convert an image by adding a cross hair
2,302
def filter_contour ( imageFile , opFile ) : im = Image . open ( imageFile ) im1 = im . filter ( ImageFilter . CONTOUR ) im1 . save ( opFile )
convert an image by applying a contour
2,303
def get_img_hash ( image , hash_size = 8 ) : image = image . resize ( ( hash_size + 1 , hash_size ) , Image . ANTIALIAS , ) pixels = list ( image . getdata ( ) ) difference = [ ] for row in range ( hash_size ) : for col in range ( hash_size ) : pixel_left = image . getpixel ( ( col , row ) ) pixel_right = image . getpi...
Grayscale and shrink the image in one step
2,304
def load_image ( fname ) : with open ( fname , "rb" ) as f : i = Image . open ( fname ) return i
read an image from file - PIL doesnt close nicely
2,305
def dump_img ( fname ) : img = Image . open ( fname ) width , _ = img . size txt = '' pixels = list ( img . getdata ( ) ) for col in range ( width ) : txt += str ( pixels [ col : col + width ] ) return txt
output the image as text
2,306
def NormInt ( df , sampleA , sampleB ) : c1 = df [ sampleA ] c2 = df [ sampleB ] return np . log10 ( np . sqrt ( c1 * c2 ) )
Normalizes intensities of a gene in two samples
2,307
def is_prime ( number ) : if number < 2 : return False if number % 2 == 0 : return number == 2 limit = int ( math . sqrt ( number ) ) for divisor in range ( 3 , limit + 1 , 2 ) : if number % divisor == 0 : return False return True
Testing given number to be a prime .
2,308
def qmed_all_methods ( self ) : result = { } for method in self . methods : try : result [ method ] = getattr ( self , '_qmed_from_' + method ) ( ) except : result [ method ] = None return result
Returns a dict of QMED methods using all available methods .
2,309
def _qmed_from_amax_records ( self ) : valid_flows = valid_flows_array ( self . catchment ) n = len ( valid_flows ) if n < 2 : raise InsufficientDataError ( "Insufficient annual maximum flow records available for catchment {}." . format ( self . catchment . id ) ) return np . median ( valid_flows )
Return QMED estimate based on annual maximum flow records .
2,310
def _pot_month_counts ( self , pot_dataset ) : periods = pot_dataset . continuous_periods ( ) result = [ set ( ) for x in range ( 12 ) ] for period in periods : year = period . start_date . year month = period . start_date . month while True : result [ month - 1 ] . add ( year ) if year == period . end_date . year and ...
Return a list of 12 sets . Each sets contains the years included in the POT record period .
2,311
def _qmed_from_area ( self ) : try : return 1.172 * self . catchment . descriptors . dtm_area ** self . _area_exponent ( ) except ( TypeError , KeyError ) : raise InsufficientDataError ( "Catchment `descriptors` attribute must be set first." )
Return QMED estimate based on catchment area .
2,312
def _qmed_from_descriptors_1999 ( self , as_rural = False ) : try : qmed_rural = 1.172 * self . catchment . descriptors . dtm_area ** self . _area_exponent ( ) * ( self . catchment . descriptors . saar / 1000.0 ) ** 1.560 * self . catchment . descriptors . farl ** 2.642 * ( self . catchment . descriptors . sprhost / 10...
Return QMED estimation based on FEH catchment descriptors 1999 methodology .
2,313
def _qmed_from_descriptors_2008 ( self , as_rural = False , donor_catchments = None ) : try : lnqmed_rural = 2.1170 + 0.8510 * log ( self . catchment . descriptors . dtm_area ) - 1.8734 * 1000 / self . catchment . descriptors . saar + 3.4451 * log ( self . catchment . descriptors . farl ) - 3.0800 * self . catchment . ...
Return QMED estimation based on FEH catchment descriptors 2008 methodology .
2,314
def _pruaf ( self ) : return 1 + 0.47 * self . catchment . descriptors . urbext ( self . year ) * self . catchment . descriptors . bfihost / ( 1 - self . catchment . descriptors . bfihost )
Return percentage runoff urban adjustment factor .
2,315
def _dist_corr ( dist , phi1 , phi2 , phi3 ) : return phi1 * exp ( - phi2 * dist ) + ( 1 - phi1 ) * exp ( - phi3 * dist )
Generic distance - decaying correlation function
2,316
def _vec_b ( self , donor_catchments ) : p = len ( donor_catchments ) b = 0.1175 * np . ones ( p ) for i in range ( p ) : b [ i ] *= self . _model_error_corr ( self . catchment , donor_catchments [ i ] ) return b
Return vector b of model error covariances to estimate weights
2,317
def _beta ( catchment ) : lnbeta = - 1.1221 - 0.0816 * log ( catchment . descriptors . dtm_area ) - 0.4580 * log ( catchment . descriptors . saar / 1000 ) + 0.1065 * log ( catchment . descriptors . bfihost ) return exp ( lnbeta )
Return beta the GLO scale parameter divided by loc parameter estimated using simple regression model
2,318
def _matrix_sigma_eta ( self , donor_catchments ) : p = len ( donor_catchments ) sigma = 0.1175 * np . ones ( ( p , p ) ) for i in range ( p ) : for j in range ( p ) : if i != j : sigma [ i , j ] *= self . _model_error_corr ( donor_catchments [ i ] , donor_catchments [ j ] ) return sigma
Return model error coveriance matrix Sigma eta
2,319
def _matrix_sigma_eps ( self , donor_catchments ) : p = len ( donor_catchments ) sigma = np . empty ( ( p , p ) ) for i in range ( p ) : beta_i = self . _beta ( donor_catchments [ i ] ) n_i = donor_catchments [ i ] . amax_records_end ( ) - donor_catchments [ i ] . amax_records_start ( ) + 1 for j in range ( p ) : beta_...
Return sampling error coveriance matrix Sigma eta
2,320
def _vec_alpha ( self , donor_catchments ) : return np . dot ( linalg . inv ( self . _matrix_omega ( donor_catchments ) ) , self . _vec_b ( donor_catchments ) )
Return vector alpha which is the weights for donor model errors
2,321
def find_donor_catchments ( self , limit = 6 , dist_limit = 500 ) : if self . gauged_catchments : return self . gauged_catchments . nearest_qmed_catchments ( self . catchment , limit , dist_limit ) else : return [ ]
Return a suitable donor catchment to improve a QMED estimate based on catchment descriptors alone .
2,322
def _var_and_skew ( self , catchments , as_rural = False ) : if not hasattr ( catchments , '__getitem__' ) : l_cv , l_skew = self . _l_cv_and_skew ( self . catchment ) self . results_log [ 'donors' ] = [ ] else : n = len ( catchments ) l_cvs = np . empty ( n ) l_skews = np . empty ( n ) l_cv_weights = np . empty ( n ) ...
Calculate L - CV and L - SKEW from a single catchment or a pooled group of catchments .
2,323
def _l_cv_and_skew ( self , catchment ) : z = self . _dimensionless_flows ( catchment ) l1 , l2 , t3 = lm . lmom_ratios ( z , nmom = 3 ) return l2 / l1 , t3
Calculate L - CV and L - SKEW for a gauged catchment . Uses lmoments3 library .
2,324
def _l_cv_weight ( self , donor_catchment ) : try : dist = donor_catchment . similarity_dist except AttributeError : dist = self . _similarity_distance ( self . catchment , donor_catchment ) b = 0.0047 * sqrt ( dist ) + 0.0023 / 2 c = 0.02609 / ( donor_catchment . record_length - 1 ) return 1 / ( b + c )
Return L - CV weighting for a donor catchment .
2,325
def _l_cv_weight_factor ( self ) : b = 0.0047 * sqrt ( 0 ) + 0.0023 / 2 c = 0.02609 / ( self . catchment . record_length - 1 ) return c / ( b + c )
Return multiplier for L - CV weightings in case of enhanced single site analysis .
2,326
def _l_skew_weight ( self , donor_catchment ) : try : dist = donor_catchment . similarity_dist except AttributeError : dist = self . _similarity_distance ( self . catchment , donor_catchment ) b = 0.0219 * ( 1 - exp ( - dist / 0.2360 ) ) c = 0.2743 / ( donor_catchment . record_length - 2 ) return 1 / ( b + c )
Return L - SKEW weighting for donor catchment .
2,327
def _growth_curve_single_site ( self , distr = 'glo' ) : if self . catchment . amax_records : self . donor_catchments = [ ] return GrowthCurve ( distr , * self . _var_and_skew ( self . catchment ) ) else : raise InsufficientDataError ( "Catchment's `amax_records` must be set for a single site analysis." )
Return flood growth curve function based on amax_records from the subject catchment only .
2,328
def _growth_curve_pooling_group ( self , distr = 'glo' , as_rural = False ) : if not self . donor_catchments : self . find_donor_catchments ( ) gc = GrowthCurve ( distr , * self . _var_and_skew ( self . donor_catchments ) ) self . results_log [ 'distr_name' ] = distr . upper ( ) self . results_log [ 'distr_params' ] = ...
Return flood growth curve function based on amax_records from a pooling group .
2,329
def process ( self , document ) : content = json . dumps ( document ) versions = { } versions . update ( { 'Spline' : Version ( VERSION ) } ) versions . update ( self . get_version ( "Bash" , self . BASH_VERSION ) ) if content . find ( '"docker(container)":' ) >= 0 or content . find ( '"docker(image)":' ) >= 0 : versio...
Logging versions of required tools .
2,330
def get_version ( tool_name , tool_command ) : result = { } for line in Bash ( ShellConfig ( script = tool_command , internal = True ) ) . process ( ) : if line . find ( "command not found" ) >= 0 : VersionsCheck . LOGGER . error ( "Required tool '%s' not found (stopping pipeline)!" , tool_name ) sys . exit ( 1 ) else ...
Get name and version of a tool defined by given command .
2,331
def process ( self , versions ) : for tool_name in sorted ( versions . keys ( ) ) : version = versions [ tool_name ] self . _log ( "Using tool '%s', %s" % ( tool_name , version ) )
Logging version sorted ascending by tool name .
2,332
def register_event ( self , * names ) : for name in names : if name in self . __events : continue self . __events [ name ] = Event ( name )
Registers new events after instance creation
2,333
def emit ( self , name , * args , ** kwargs ) : e = self . __property_events . get ( name ) if e is None : e = self . __events [ name ] return e ( * args , ** kwargs )
Dispatches an event to any subscribed listeners
2,334
def get_dispatcher_event ( self , name ) : e = self . __property_events . get ( name ) if e is None : e = self . __events [ name ] return e
Retrieves an Event object by name
2,335
def emission_lock ( self , name ) : e = self . __property_events . get ( name ) if e is None : e = self . __events [ name ] return e . emission_lock
Holds emission of events and dispatches the last event on release
2,336
def TEST ( fname ) : m = MapObject ( fname , os . path . join ( os . getcwd ( ) , 'img_prog_results' ) ) m . add_layer ( ImagePathFollow ( 'border' ) ) m . add_layer ( ImagePathFollow ( 'river' ) ) m . add_layer ( ImagePathFollow ( 'road' ) ) m . add_layer ( ImageArea ( 'sea' , col = 'Blue' , density = 'light' ) ) m . ...
Test function to step through all functions in order to try and identify all features on a map This test function should be placed in a main section later
2,337
def describe_contents ( self ) : print ( '======================================================================' ) print ( self ) print ( 'Table = ' , str ( len ( self . header ) ) + ' cols x ' + str ( len ( self . arr ) ) + ' rows' ) print ( 'HEADER = ' , self . get_header ( ) ) print ( 'arr = ' , self . arr [ 0 ...
describes various contents of data table
2,338
def get_distinct_values_from_cols ( self , l_col_list ) : uniq_vals = [ ] for l_col_name in l_col_list : uniq_vals . append ( set ( self . get_col_data_by_name ( l_col_name ) ) ) if len ( l_col_list ) == 0 : return [ ] elif len ( l_col_list ) == 1 : return sorted ( [ v for v in uniq_vals ] ) elif len ( l_col_list ) == ...
returns the list of distinct combinations in a dataset based on the columns in the list . Note that this is currently implemented as MAX permutations of the combo so it is not guarenteed to have values in each case .
2,339
def select_where ( self , where_col_list , where_value_list , col_name = '' ) : res = [ ] col_ids = [ ] for col_id , col in enumerate ( self . header ) : if col in where_col_list : col_ids . append ( [ col_id , col ] ) for row_num , row in enumerate ( self . arr ) : keep_this_row = True for ndx , where_col in enumerate...
selects rows from the array where col_list == val_list
2,340
def update_where ( self , col , value , where_col_list , where_value_list ) : if type ( col ) is str : col_ndx = self . get_col_by_name ( col ) else : col_ndx = col new_arr = self . select_where ( where_col_list , where_value_list ) for r in new_arr : self . arr [ r [ 0 ] ] [ col_ndx ] = value
updates the array to set cell = value where col_list == val_list
2,341
def percentile ( self , lst_data , percent , key = lambda x : x ) : new_list = sorted ( lst_data ) k = ( len ( new_list ) - 1 ) * percent f = math . floor ( k ) c = math . ceil ( k ) if f == c : return key ( new_list [ int ( k ) ] ) d0 = float ( key ( new_list [ int ( f ) ] ) ) * ( c - k ) d1 = float ( key ( new_list [...
calculates the num percentile of the items in the list
2,342
def save ( self , filename , content ) : with open ( filename , "w" ) as f : if hasattr ( content , '__iter__' ) : f . write ( '\n' . join ( [ row for row in content ] ) ) else : print ( 'WRINGI CONTWETESWREWR' ) f . write ( str ( content ) )
default is to save a file from list of lines
2,343
def save_csv ( self , filename , write_header_separately = True ) : txt = '' with open ( filename , "w" ) as f : if write_header_separately : f . write ( ',' . join ( [ c for c in self . header ] ) + '\n' ) for row in self . arr : txt = ',' . join ( [ self . force_to_string ( col ) for col in row ] ) f . write ( txt + ...
save the default array as a CSV file
2,344
def drop ( self , fname ) : if self . dataset_type == 'file' : import os try : os . remove ( fname ) except Exception as ex : print ( 'cant drop file "' + fname + '" : ' + str ( ex ) )
drop the table view or delete the file
2,345
def get_col_data_by_name ( self , col_name , WHERE_Clause = '' ) : col_key = self . get_col_by_name ( col_name ) if col_key is None : print ( 'get_col_data_by_name: col_name = ' , col_name , ' NOT FOUND' ) return [ ] res = [ ] for row in self . arr : res . append ( row [ col_key ] ) return res
returns the values of col_name according to where
2,346
def format_rst ( self ) : res = '' num_cols = len ( self . header ) col_width = 25 for _ in range ( num_cols ) : res += '' . join ( [ '=' for _ in range ( col_width - 1 ) ] ) + ' ' res += '\n' for c in self . header : res += c . ljust ( col_width ) res += '\n' for _ in range ( num_cols ) : res += '' . join ( [ '=' for ...
return table in RST format
2,347
def getHomoloGene ( taxfile = "build_inputs/taxid_taxname" , genefile = "homologene.data" , proteinsfile = "build_inputs/all_proteins.data" , proteinsclusterfile = "build_inputs/proteins_for_clustering.data" , baseURL = "http://ftp.ncbi.nih.gov/pub/HomoloGene/current/" ) : def getDf ( inputfile ) : if os . path . isfil...
Returns NBCI s Homolog Gene tables .
2,348
def getFasta ( opened_file , sequence_name ) : lines = opened_file . readlines ( ) seq = str ( "" ) for i in range ( 0 , len ( lines ) ) : line = lines [ i ] if line [ 0 ] == ">" : fChr = line . split ( " " ) [ 0 ] . split ( "\n" ) [ 0 ] fChr = fChr [ 1 : ] if fChr == sequence_name : s = i code = [ 'N' , 'A' , 'C' , 'T...
Retrieves a sequence from an opened multifasta file
2,349
def writeFasta ( sequence , sequence_name , output_file ) : i = 0 f = open ( output_file , 'w' ) f . write ( ">" + str ( sequence_name ) + "\n" ) while i <= len ( sequence ) : f . write ( sequence [ i : i + 60 ] + "\n" ) i = i + 60 f . close ( )
Writes a fasta sequence into a file .
2,350
def rewriteFasta ( sequence , sequence_name , fasta_in , fasta_out ) : f = open ( fasta_in , 'r+' ) f2 = open ( fasta_out , 'w' ) lines = f . readlines ( ) i = 0 while i < len ( lines ) : line = lines [ i ] if line [ 0 ] == ">" : f2 . write ( line ) fChr = line . split ( " " ) [ 0 ] fChr = fChr [ 1 : ] if fChr == seque...
Rewrites a specific sequence in a multifasta file while keeping the sequence header .
2,351
def _get_tool_str ( self , tool ) : res = tool [ 'file' ] try : res += '.' + tool [ 'function' ] except Exception as ex : print ( 'Warning - no function defined for tool ' + str ( tool ) ) res += '\n' return res
get a string representation of the tool
2,352
def get_tool_by_name ( self , nme ) : for t in self . lstTools : if 'name' in t : if t [ 'name' ] == nme : return t if 'file' in t : if t [ 'file' ] == nme : return t return None
get the tool object by name or file
2,353
def save ( self , fname = '' ) : if fname != '' : with open ( fname , 'w' ) as f : for t in self . lstTools : self . verify ( t ) f . write ( self . tool_as_string ( t ) )
Save the list of tools to AIKIF core and optionally to local file fname
2,354
def verify ( self , tool ) : if os . path . isfile ( tool [ 'file' ] ) : print ( 'Toolbox: program exists = TOK :: ' + tool [ 'file' ] ) return True else : print ( 'Toolbox: program exists = FAIL :: ' + tool [ 'file' ] ) return False
check that the tool exists
2,355
def run ( self , tool , args , new_import_path = '' ) : if new_import_path != '' : sys . path . append ( new_import_path ) print ( 'main called ' + tool [ 'file' ] + '->' + tool [ 'function' ] + ' with ' , args , ' = ' , tool [ 'return' ] ) mod = __import__ ( os . path . basename ( tool [ 'file' ] ) . split ( '.' ) [ 0...
import the tool and call the function passing the args .
2,356
def main ( ** kwargs ) : options = ApplicationOptions ( ** kwargs ) Event . configure ( is_logging_enabled = options . event_logging ) application = Application ( options ) application . run ( options . definition )
The Pipeline tool .
2,357
def setup_logging ( self ) : is_custom_logging = len ( self . options . logging_config ) > 0 is_custom_logging = is_custom_logging and os . path . isfile ( self . options . logging_config ) is_custom_logging = is_custom_logging and not self . options . dry_run if is_custom_logging : Logger . configure_by_file ( self . ...
Setup of application logging .
2,358
def validate_document ( self , definition ) : initial_document = { } try : initial_document = Loader . load ( definition ) except RuntimeError as exception : self . logger . error ( str ( exception ) ) sys . exit ( 1 ) document = Validator ( ) . validate ( initial_document ) if document is None : self . logger . info (...
Validate given pipeline document .
2,359
def run_matrix ( self , matrix_definition , document ) : matrix = Matrix ( matrix_definition , 'matrix(parallel)' in document ) process_data = MatrixProcessData ( ) process_data . options = self . options process_data . pipeline = document [ 'pipeline' ] process_data . model = { } if 'model' not in document else docume...
Running pipeline via a matrix .
2,360
def shutdown ( self , collector , success ) : self . event . delegate ( success ) if collector is not None : collector . queue . put ( None ) collector . join ( ) if not success : sys . exit ( 1 )
Shutdown of the application .
2,361
def provide_temporary_scripts_path ( self ) : if len ( self . options . temporary_scripts_path ) > 0 : if os . path . isfile ( self . options . temporary_scripts_path ) : self . logger . error ( "Error: configured script path seems to be a file!" ) sys . exit ( 1 ) if not os . path . isdir ( self . options . temporary_...
When configured trying to ensure that path does exist .
2,362
def create_and_run_collector ( document , options ) : collector = None if not options . report == 'off' : collector = Collector ( ) collector . store . configure ( document ) Event . configure ( collector_queue = collector . queue ) collector . start ( ) return collector
Create and run collector process for report data .
2,363
def docker_environment ( env ) : return ' ' . join ( [ "-e \"%s=%s\"" % ( key , value . replace ( "$" , "\\$" ) . replace ( "\"" , "\\\"" ) . replace ( "`" , "\\`" ) ) for key , value in env . items ( ) ] )
Transform dictionary of environment variables into Docker - e parameters .
2,364
def _retrieve_download_url ( ) : try : with urlopen ( config [ 'nrfa' ] [ 'oh_json_url' ] , timeout = 10 ) as f : remote_config = json . loads ( f . read ( ) . decode ( 'utf-8' ) ) if remote_config [ 'nrfa_url' ] . startswith ( '.' ) : remote_config [ 'nrfa_url' ] = 'file:' + pathname2url ( os . path . abspath ( remote...
Retrieves download location for FEH data zip file from hosted json configuration file .
2,365
def update_available ( after_days = 1 ) : never_downloaded = not bool ( config . get ( 'nrfa' , 'downloaded_on' , fallback = None ) or None ) if never_downloaded : config . set_datetime ( 'nrfa' , 'update_checked_on' , datetime . utcnow ( ) ) config . save ( ) return True last_checked_on = config . get_datetime ( 'nrfa...
Check whether updated NRFA data is available .
2,366
def download_data ( ) : with urlopen ( _retrieve_download_url ( ) ) as f : with open ( os . path . join ( CACHE_FOLDER , CACHE_ZIP ) , "wb" ) as local_file : local_file . write ( f . read ( ) )
Downloads complete station dataset including catchment descriptors and amax records . And saves it into a cache folder .
2,367
def _update_nrfa_metadata ( remote_config ) : config [ 'nrfa' ] [ 'oh_json_url' ] = remote_config [ 'nrfa_oh_json_url' ] config [ 'nrfa' ] [ 'version' ] = remote_config [ 'nrfa_version' ] config [ 'nrfa' ] [ 'url' ] = remote_config [ 'nrfa_url' ] config . set_datetime ( 'nrfa' , 'published_on' , datetime . utcfromtimes...
Save NRFA metadata to local config file using retrieved config data
2,368
def nrfa_metadata ( ) : result = { 'url' : config . get ( 'nrfa' , 'url' , fallback = None ) or None , 'version' : config . get ( 'nrfa' , 'version' , fallback = None ) or None , 'published_on' : config . get_datetime ( 'nrfa' , 'published_on' , fallback = None ) or None , 'downloaded_on' : config . get_datetime ( 'nrf...
Return metadata on the NRFA data .
2,369
def unzip_data ( ) : with ZipFile ( os . path . join ( CACHE_FOLDER , CACHE_ZIP ) , 'r' ) as zf : zf . extractall ( path = CACHE_FOLDER )
Extract all files from downloaded FEH data zip file .
2,370
def get_xml_stats ( fname ) : f = mod_file . TextFile ( fname ) res = { } res [ 'shortname' ] = f . name res [ 'folder' ] = f . path res [ 'filesize' ] = str ( f . size ) + ' bytes' res [ 'num_lines' ] = str ( f . lines ) + ' lines' res [ 'date_modified' ] = f . GetDateAsString ( f . date_modified ) return res
return a dictionary of statistics about an XML file including size in bytes num lines number of elements count by elements
2,371
def make_random_xml_file ( fname , num_elements = 200 , depth = 3 ) : with open ( fname , 'w' ) as f : f . write ( '<?xml version="1.0" ?>\n<random>\n' ) for dep_num , _ in enumerate ( range ( 1 , depth ) ) : f . write ( ' <depth>\n <content>\n' ) for num , _ in enumerate ( range ( 1 , num_elements ) ) : f . write ( '...
makes a random xml file mainly for testing the xml_split
2,372
def organismsKEGG ( ) : organisms = urlopen ( "http://rest.kegg.jp/list/organism" ) . read ( ) organisms = organisms . split ( "\n" ) organisms = [ s . split ( "\t" ) for s in organisms ] organisms = pd . DataFrame ( organisms ) return organisms
Lists all organisms present in the KEGG database .
2,373
def databasesKEGG ( organism , ens_ids ) : all_genes = urlopen ( "http://rest.kegg.jp/list/" + organism ) . read ( ) all_genes = all_genes . split ( "\n" ) dbs = [ ] while len ( dbs ) == 0 : for g in all_genes : if len ( dbs ) == 0 : kid = g . split ( "\t" ) [ 0 ] gene = urlopen ( "http://rest.kegg.jp/get/" + kid ) . r...
Finds KEGG database identifiers for a respective organism given example ensembl ids .
2,374
def ensembl_to_kegg ( organism , kegg_db ) : print ( "KEGG API: http://rest.genome.jp/link/" + kegg_db + "/" + organism ) sys . stdout . flush ( ) kegg_ens = urlopen ( "http://rest.genome.jp/link/" + kegg_db + "/" + organism ) . read ( ) kegg_ens = kegg_ens . split ( "\n" ) final = [ ] for i in kegg_ens : final . appen...
Looks up KEGG mappings of KEGG ids to ensembl ids
2,375
def ecs_idsKEGG ( organism ) : kegg_ec = urlopen ( "http://rest.kegg.jp/link/" + organism + "/enzyme" ) . read ( ) kegg_ec = kegg_ec . split ( "\n" ) final = [ ] for k in kegg_ec : final . append ( k . split ( "\t" ) ) df = pd . DataFrame ( final [ 0 : len ( final ) - 1 ] ) [ [ 0 , 1 ] ] df . columns = [ 'ec' , 'KEGGid...
Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism
2,376
def idsKEGG ( organism ) : ORG = urlopen ( "http://rest.kegg.jp/list/" + organism ) . read ( ) ORG = ORG . split ( "\n" ) final = [ ] for k in ORG : final . append ( k . split ( "\t" ) ) df = pd . DataFrame ( final [ 0 : len ( final ) - 1 ] ) [ [ 0 , 1 ] ] df . columns = [ 'KEGGid' , 'description' ] field = pd . DataFr...
Uses KEGG to retrieve all ids for a given KEGG organism
2,377
def biomaRtTOkegg ( df ) : df = df . dropna ( ) ECcols = df . columns . tolist ( ) df . reset_index ( inplace = True , drop = True ) field = pd . DataFrame ( df [ 'kegg_enzyme' ] . str . split ( '+' , 1 ) . tolist ( ) ) [ 1 ] field = pd . DataFrame ( field ) df = pd . concat ( [ df [ [ 'ensembl_gene_id' ] ] , field ] ,...
Transforms a pandas dataframe with the columns ensembl_gene_id kegg_enzyme to dataframe ready for use in ...
2,378
def expKEGG ( organism , names_KEGGids ) : kegg_paths = urlopen ( "http://rest.kegg.jp/list/pathway/" + organism ) . read ( ) kegg_paths = kegg_paths . split ( "\n" ) final = [ ] for k in kegg_paths : final . append ( k . split ( "\t" ) ) df = pd . DataFrame ( final [ 0 : len ( final ) - 1 ] ) [ [ 0 , 1 ] ] df . column...
Gets all KEGG pathways for an organism
2,379
def RdatabasesBM ( host = rbiomart_host ) : biomaRt = importr ( "biomaRt" ) print ( biomaRt . listMarts ( host = host ) )
Lists BioMart databases through a RPY2 connection .
2,380
def RdatasetsBM ( database , host = rbiomart_host ) : biomaRt = importr ( "biomaRt" ) ensemblMart = biomaRt . useMart ( database , host = host ) print ( biomaRt . listDatasets ( ensemblMart ) )
Lists BioMart datasets through a RPY2 connection .
2,381
def RfiltersBM ( dataset , database , host = rbiomart_host ) : biomaRt = importr ( "biomaRt" ) ensemblMart = biomaRt . useMart ( database , host = host ) ensembl = biomaRt . useDataset ( dataset , mart = ensemblMart ) print ( biomaRt . listFilters ( ensembl ) )
Lists BioMart filters through a RPY2 connection .
2,382
def RattributesBM ( dataset , database , host = rbiomart_host ) : biomaRt = importr ( "biomaRt" ) ensemblMart = biomaRt . useMart ( database , host = rbiomart_host ) ensembl = biomaRt . useDataset ( dataset , mart = ensemblMart ) print ( biomaRt . listAttributes ( ensembl ) )
Lists BioMart attributes through a RPY2 connection .
2,383
def get_list_of_applications ( ) : apps = mod_prg . Programs ( 'Applications' , 'C:\\apps' ) fl = mod_fl . FileList ( [ 'C:\\apps' ] , [ '*.exe' ] , [ "\\bk\\" ] ) for f in fl . get_list ( ) : apps . add ( f , 'autogenerated list' ) apps . list ( ) apps . save ( )
Get list of applications
2,384
def add_field ( self , name , label , field_type , * args , ** kwargs ) : if name in self . _dyn_fields : raise AttributeError ( 'Field already added to the form.' ) else : self . _dyn_fields [ name ] = { 'label' : label , 'type' : field_type , 'args' : args , 'kwargs' : kwargs }
Add the field to the internal configuration dictionary .
2,385
def add_validator ( self , name , validator , * args , ** kwargs ) : if name in self . _dyn_fields : if 'validators' in self . _dyn_fields [ name ] : self . _dyn_fields [ name ] [ 'validators' ] . append ( validator ) self . _dyn_fields [ name ] [ validator . __name__ ] = { } if args : self . _dyn_fields [ name ] [ val...
Add the validator to the internal configuration dictionary .
2,386
def process ( self , form , post ) : if not isinstance ( form , FormMeta ) : raise TypeError ( 'Given form is not a valid WTForm.' ) re_field_name = re . compile ( r'\%([a-zA-Z0-9_]*)\%' ) class F ( form ) : pass for field , data in post . iteritems ( ) : if field in F ( ) : continue else : if field in self . _dyn_fiel...
Process the given WTForm Form object .
2,387
def GetBEDnarrowPeakgz ( URL_or_PATH_TO_file ) : if os . path . isfile ( URL_or_PATH_TO_file ) : response = open ( URL_or_PATH_TO_file , "r" ) compressedFile = StringIO . StringIO ( response . read ( ) ) else : response = urllib2 . urlopen ( URL_or_PATH_TO_file ) compressedFile = StringIO . StringIO ( response . read (...
Reads a gz compressed BED narrow peak file from a web address or local file
2,388
def dfTObedtool ( df ) : df = df . astype ( str ) df = df . drop_duplicates ( ) df = df . values . tolist ( ) df = [ "\t" . join ( s ) for s in df ] df = "\n" . join ( df ) df = BedTool ( df , from_string = True ) return df
Transforms a pandas dataframe into a bedtool
2,389
def configure ( ** kwargs ) : for key in kwargs : if key == 'is_logging_enabled' : Event . is_logging_enabled = kwargs [ key ] elif key == 'collector_queue' : Event . collector_queue = kwargs [ key ] else : Logger . get_logger ( __name__ ) . error ( "Unknown key %s in configure or bad type %s" , key , type ( kwargs [ k...
Global configuration for event handling .
2,390
def failed ( self , ** kwargs ) : self . finished = datetime . now ( ) self . status = 'failed' self . information . update ( kwargs ) self . logger . info ( "Failed - took %f seconds." , self . duration ( ) ) self . update_report_collector ( int ( time . mktime ( self . finished . timetuple ( ) ) ) )
Finish event as failed with optional additional information .
2,391
def update_report_collector ( self , timestamp ) : report_enabled = 'report' in self . information and self . information [ 'report' ] == 'html' report_enabled = report_enabled and 'stage' in self . information report_enabled = report_enabled and Event . collector_queue is not None if report_enabled : Event . collector...
Updating report collector for pipeline details .
2,392
def count_lines_in_file ( src_file ) : tot = 0 res = '' try : with open ( src_file , 'r' ) as f : for line in f : tot += 1 res = str ( tot ) + ' recs read' except : res = 'ERROR -couldnt open file' return res
test function .
2,393
def load_txt_to_sql ( tbl_name , src_file_and_path , src_file , op_folder ) : if op_folder == '' : pth = '' else : pth = op_folder + os . sep fname_create_script = pth + 'CREATE_' + tbl_name + '.SQL' fname_backout_file = pth + 'BACKOUT_' + tbl_name + '.SQL' fname_control_file = pth + tbl_name + '.CTL' cols = read_csv_c...
creates a SQL loader script to load a text file into a database and then executes it . Note that src_file is
2,394
async def anext ( * args ) : if not args : raise TypeError ( 'anext() expected at least 1 arguments, got 0' ) if len ( args ) > 2 : raise TypeError ( 'anext() expected at most 2 arguments, got {}' . format ( len ( args ) ) ) iterable , default , has_default = args [ 0 ] , None , False if len ( args ) == 2 : iterable , ...
Return the next item from an async iterator .
2,395
def repeat ( obj , times = None ) : if times is None : return AsyncIterWrapper ( sync_itertools . repeat ( obj ) ) return AsyncIterWrapper ( sync_itertools . repeat ( obj , times ) )
Make an iterator that returns object over and over again .
2,396
def _async_callable ( func ) : if isinstance ( func , types . CoroutineType ) : return func @ functools . wraps ( func ) async def _async_def_wrapper ( * args , ** kwargs ) : return func ( * args , ** kwargs ) return _async_def_wrapper
Ensure the callable is an async def .
2,397
def tee ( iterable , n = 2 ) : tees = tuple ( AsyncTeeIterable ( iterable ) for _ in range ( n ) ) for tee in tees : tee . _siblings = tees return tees
Return n independent iterators from a single iterable .
2,398
def _on_change ( self , obj , old , value , ** kwargs ) : kwargs [ 'property' ] = self obj . emit ( self . name , obj , value , old = old , ** kwargs )
Called internally to emit changes from the instance object
2,399
def parse_str ( self , s ) : self . object = self . parsed_class ( ) in_section = None for line in s . split ( '\n' ) : if line . lower ( ) . startswith ( '[end]' ) : in_section = None elif line . startswith ( '[' ) : in_section = line . strip ( ) . strip ( '[]' ) . lower ( ) . replace ( ' ' , '_' ) elif in_section : t...
Parse string and return relevant object