idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
5,100
def exit ( self , status = EXIT_OK , message = None ) : if not self . parser : self . parser = argparse . ArgumentParser ( ) if self . msg_on_error_only : # if msg_on_error_only is True if status != EXIT_OK : # if we have an error we'll exit with the message also. self . parser . exit ( status , message ) else : # else we'll exit with the status ongly self . parser . exit ( status , None ) else : # else if msg_on_error_only is not True # we'll exit with the status and the message self . parser . exit ( status , message )
Terminate the script .
145
5
5,101
def analyse_ligand_sasa ( self ) : i = 0 start = timer ( ) if self . trajectory == [ ] : self . trajectory = [ self . topology_data . universe . filename ] try : for traj in self . trajectory : new_traj = mdtraj . load ( traj , top = self . topology_data . universe . filename ) #Analyse only non-H ligand ligand_slice = new_traj . atom_slice ( atom_indices = self . topology_data . universe . ligand_noH . ids ) self . sasa = mdtraj . shrake_rupley ( ligand_slice ) self . atom_sasa [ i ] = self . assign_per_atom_sasa ( ) i += 1 self . total_sasa = self . get_total_per_atom_sasa ( ) except KeyError as e : print "WARNING: SASA analysis cannot be performed due to incorrect atom names in" print "the topology " , e print "SASA: " + str ( timer ( ) - start )
Analysis of ligand SASA .
241
7
5,102
def assign_per_atom_sasa ( self ) : atom_names = [ atom . name for atom in self . topology_data . universe . ligand_noH . atoms ] sasa_dict = { } for atom in range ( 0 , self . topology_data . universe . ligand_noH . n_atoms ) : sasa_dict [ atom_names [ atom ] ] = [ self . sasa [ i ] [ atom ] for i in range ( len ( self . sasa ) ) ] return sasa_dict
Make a dictionary with SASA assigned to each ligand atom stored as list of SASA values over the simulation time .
119
24
5,103
def get_total_per_atom_sasa ( self ) : total_sasa = defaultdict ( int ) for traj in range ( len ( self . atom_sasa ) ) : for atom in self . atom_sasa [ traj ] : total_sasa [ atom ] += float ( sum ( ( self . atom_sasa [ traj ] [ atom ] ) ) ) / len ( self . atom_sasa [ traj ] [ atom ] ) for atom in total_sasa : total_sasa [ atom ] = float ( total_sasa [ atom ] ) / len ( self . atom_sasa ) return total_sasa
Return average SASA of the atoms .
143
8
5,104
def run ( self , * args ) : if self . running : return self self . _mut_finished ( False ) # in case of recovery from a disaster. self . _mut_running ( True ) stream = self . target ( * args ) # noinspection SpellCheckingInspection def subr ( ) : self . _mut_running ( True ) try : for each in stream : self . _product = each desc = self . descriptor_mapping ( each ) event = self . events . get ( desc ) if event : event ( self , each , globals ) self . _mut_finished ( True ) except ThreadExit : pass finally : self . _mut_running ( False ) self . _thread = thread = threading . Thread ( target = subr , args = ( ) ) thread . start ( ) return self
You can choose whether to use lock method when running threads .
176
12
5,105
def _add_consequences ( self , variant_obj ) : consequences = set ( ) for transcript in variant_obj . transcripts : for consequence in transcript . consequence . split ( '&' ) : consequences . add ( consequence ) variant_obj . consequences = list ( consequences )
Add the consequences found in all transcripts
59
7
5,106
def _add_hgnc_symbols ( self , variant_obj ) : hgnc_symbols = set ( ) if variant_obj . transcripts : for transcript in variant_obj . transcripts : if transcript . hgnc_symbol : hgnc_symbols . add ( transcript . hgnc_symbol ) else : chrom = variant_obj . CHROM start = variant_obj . start stop = variant_obj . stop hgnc_symbols = get_gene_symbols ( chrom , start , stop ) #Make unique ids variant_obj . gene_symbols = list ( hgnc_symbols )
Add hgnc symbols to the variant If there are transcripts use the symbols found here otherwise use phizz to get the gene ids .
147
28
5,107
def _add_genes ( self , variant_obj ) : genes = [ ] ensembl_ids = [ ] hgnc_symbols = [ ] if variant_obj . transcripts : for transcript in variant_obj . transcripts : if transcript . ensembl_id : ensembl_ids . append ( transcript . ensembl_id ) if transcript . hgnc_symbol : hgnc_symbols . append ( transcript . hgnc_symbol ) else : hgnc_symbols = variant_obj . gene_symbols genes = get_gene_info ( ensembl_ids = ensembl_ids , hgnc_symbols = hgnc_symbols ) for gene in genes : variant_obj . add_gene ( gene )
Add the Gene objects for a variant
178
7
5,108
def _redis_strict_pc ( func ) : phase = "session_%s" % func . __name__ @ functools . wraps ( func ) def wrapper ( self , session , * args , * * kwargs ) : try : func ( self , session , * args , * * kwargs ) self . logger . debug ( "%s -> %s" % ( session . meepo_unique_id , phase ) ) return True except Exception as e : if self . strict : raise if isinstance ( e , redis . ConnectionError ) : self . logger . warn ( "redis connection error in %s: %s" % ( phase , session . meepo_unique_id ) ) else : self . logger . exception ( e ) return False return wrapper
Strict deco for RedisPrepareCommit
170
11
5,109
def phase ( self , session ) : sp_key , _ = self . _keygen ( session ) if self . r . sismember ( sp_key , session . meepo_unique_id ) : return "prepare" else : return "commit"
Determine the session phase in prepare commit .
57
10
5,110
def prepare ( self , session , event ) : if not event : self . logger . warn ( "event empty!" ) return sp_key , sp_hkey = self . _keygen ( session ) def _pk ( obj ) : pk_values = tuple ( getattr ( obj , c . name ) for c in obj . __mapper__ . primary_key ) if len ( pk_values ) == 1 : return pk_values [ 0 ] return pk_values def _get_dump_value ( value ) : if hasattr ( value , '__mapper__' ) : return _pk ( value ) return value pickled_event = { k : pickle . dumps ( { _get_dump_value ( obj ) for obj in objs } ) for k , objs in event . items ( ) } with self . r . pipeline ( transaction = False ) as p : p . sadd ( sp_key , session . meepo_unique_id ) p . hmset ( sp_hkey , pickled_event ) p . execute ( )
Prepare phase for session .
234
6
5,111
def commit ( self , session ) : sp_key , sp_hkey = self . _keygen ( session ) with self . r . pipeline ( transaction = False ) as p : p . srem ( sp_key , session . meepo_unique_id ) p . expire ( sp_hkey , 60 * 60 ) p . execute ( )
Commit phase for session .
76
6
5,112
def clear ( self , ts = None ) : sp_key = "%s:session_prepare" % self . namespace ( ts or int ( time . time ( ) ) ) return self . r . delete ( sp_key )
Clear all session in prepare phase .
49
7
5,113
def cases ( ctx , root ) : root = root or ctx . obj . get ( 'root' ) or os . path . expanduser ( "~/.puzzle" ) if os . path . isfile ( root ) : logger . error ( "'root' can't be a file" ) ctx . abort ( ) logger . info ( "Root directory is: {}" . format ( root ) ) db_path = os . path . join ( root , 'puzzle_db.sqlite3' ) logger . info ( "db path is: {}" . format ( db_path ) ) if not os . path . exists ( db_path ) : logger . warn ( "database not initialized, run 'puzzle init'" ) ctx . abort ( ) store = SqlStore ( db_path ) for case in store . cases ( ) : click . echo ( case )
Show all cases in the database .
189
7
5,114
def init ( name , subnames , dest , skeleton , description , project_type , skip_core ) : dest = dest or CUR_DIR skeleton = join ( skeleton or SKEL_PATH , project_type ) project = join ( dest , name ) script = join ( project , name + '.py' ) core = join ( project , name ) if project_type == 'standalone' : renames = [ ( join ( project , 'project.py' ) , script ) , ( join ( project , 'project' ) , core ) ] copy_skeleton ( name , skeleton , project , renames = renames , description = description , ignore = False ) else : renames = [ ( join ( project , 'project.py' ) , script ) , ( join ( project , 'project' ) , core ) ] exclude_dirs = [ 'submodule' ] + ( [ 'project' ] if skip_core else [ ] ) copy_skeleton ( name , skeleton , project , renames = renames , description = description , exclude_dirs = exclude_dirs , ignore = True ) for subname in subnames : renames = [ ( join ( project , 'submodule' ) , join ( project , subname ) ) ] copy_skeleton ( subname , skeleton , project , renames = renames , description = description , ignore = True , exclude_dirs = [ 'project' ] , exclude_files = [ 'project.py' ] ) return 0 , "\n{}\n" . format ( project )
Creates a standalone subprojects or submodules script sctrucure
330
15
5,115
def _check ( self ) : for k , ix in six . iteritems ( self . _indices ) : assert k is not None , 'null key' assert ix , 'Key does not map to any indices' assert ix == sorted ( ix ) , "Key's indices are not in order" for i in ix : assert i in self . _lines , 'Key index does not map to line' assert self . _lines [ i ] . key is not None , 'Key maps to comment' assert self . _lines [ i ] . key == k , 'Key does not map to itself' assert self . _lines [ i ] . value is not None , 'Key has null value' prev = None for i , line in six . iteritems ( self . _lines ) : assert prev is None or prev < i , 'Line indices out of order' prev = i if line . key is None : assert line . value is None , 'Comment/blank has value' assert line . source is not None , 'Comment source not stored' assert loads ( line . source ) == { } , 'Comment source is not comment' else : assert line . value is not None , 'Key has null value' if line . source is not None : assert loads ( line . source ) == { line . key : line . value } , 'Key source does not deserialize to itself' assert line . key in self . _indices , 'Key is missing from map' assert i in self . _indices [ line . key ] , 'Key does not map to itself'
Assert the internal consistency of the instance s data structures . This method is for debugging only .
335
19
5,116
def load ( cls , fp ) : obj = cls ( ) for i , ( k , v , src ) in enumerate ( parse ( fp ) ) : if k is not None : obj . _indices . setdefault ( k , [ ] ) . append ( i ) obj . _lines [ i ] = PropertyLine ( k , v , src ) return obj
Parse the contents of the ~io . IOBase . readline - supporting file - like object fp as a simple line - oriented . properties file and return a PropertiesFile instance .
81
39
5,117
def loads ( cls , s ) : if isinstance ( s , six . binary_type ) : fp = six . BytesIO ( s ) else : fp = six . StringIO ( s ) return cls . load ( fp )
Parse the contents of the string s as a simple line - oriented . properties file and return a PropertiesFile instance .
54
24
5,118
def dump ( self , fp , separator = '=' ) : ### TODO: Support setting the timestamp for line in six . itervalues ( self . _lines ) : if line . source is None : print ( join_key_value ( line . key , line . value , separator ) , file = fp ) else : fp . write ( line . source )
Write the mapping to a file in simple line - oriented . properties format .
82
15
5,119
def dumps ( self , separator = '=' ) : s = six . StringIO ( ) self . dump ( s , separator = separator ) return s . getvalue ( )
Convert the mapping to a text string in simple line - oriented . properties format .
39
17
5,120
def copy ( self ) : dup = type ( self ) ( ) dup . _indices = OrderedDict ( ( k , list ( v ) ) for k , v in six . iteritems ( self . _indices ) ) dup . _lines = self . _lines . copy ( ) return dup
Create a copy of the mapping including formatting information
65
9
5,121
def prepare_normal_vectors ( atomselection ) : ring_atomselection = [ atomselection . coordinates ( ) [ a ] for a in [ 0 , 2 , 4 ] ] vect1 = self . vector ( ring_atomselection [ 0 ] , ring_atomselection [ 1 ] ) vect2 = self . vector ( ring_atomselection [ 2 ] , ring_atomselection [ 0 ] ) return self . normalize_vector ( np . cross ( vect1 , vect2 ) )
Create and normalize a vector across ring plane .
107
10
5,122
def refresh_session_if_necessary ( f ) : @ functools . wraps ( f ) def wrapped ( self , * args , * * kwargs ) : try : result = f ( self , * args , * * kwargs ) except Exception as ex : if hasattr ( ex , 'code' ) and ex . code in ( 401 , 403 ) : self . refresh_session ( ) # retry now result = f ( self , * args , * * kwargs ) else : raise ex return result return wrapped
Decorator to use on methods that are allowed to retry the request after reauthenticating the client .
113
22
5,123
def init_db ( db_path ) : logger . info ( "Creating database" ) with closing ( connect_database ( db_path ) ) as db : with open ( SCHEMA , 'r' ) as f : db . cursor ( ) . executescript ( f . read ( ) ) db . commit ( ) return
Build the sqlite database
68
5
5,124
def merge ( self ) : self . merged_root = self . _recursive_merge ( self . root , self . head , self . update ) if self . conflicts : raise MergeError ( 'Conflicts Occurred in Merge Process' , self . conflicts )
Populates result members .
56
5
5,125
def hpo_genes ( phenotype_ids , username , password ) : if phenotype_ids : try : results = query_phenomizer . query ( username , password , phenotype_ids ) return [ result for result in results if result [ 'p_value' ] is not None ] except SystemExit , RuntimeError : pass return None
Return list of HGNC symbols matching HPO phenotype ids .
71
13
5,126
def mangle_form ( form ) : for field , widget in form . fields . iteritems ( ) : if type ( widget ) is forms . widgets . TextInput : form . fields [ field ] . widget = PaperTextInput ( ) form . fields [ field ] . label = '' if type ( widget ) is forms . widgets . PasswordInput : field . widget = PaperPasswordInput ( ) field . label = '' return form
Utility to monkeypatch forms into paperinputs untested
89
12
5,127
def _keygen ( self , event , ts = None ) : return "%s:%s" % ( self . namespace ( ts or time . time ( ) ) , event )
Generate redis key for event at timestamp .
38
10
5,128
def _zadd ( self , key , pk , ts = None , ttl = None ) : return self . r . eval ( self . LUA_ZADD , 1 , key , ts or self . _time ( ) , pk )
Redis lua func to add an event to the corresponding sorted set .
53
15
5,129
def add ( self , event , pk , ts = None , ttl = None ) : key = self . _keygen ( event , ts ) try : self . _zadd ( key , pk , ts , ttl ) return True except redis . ConnectionError as e : # connection error typically happens when redis server can't be # reached or timed out, the error will be silent with an error # log and return None. self . logger . error ( "redis event store failed with connection error %r" % e ) return False
Add an event to event store .
116
7
5,130
def replay ( self , event , ts = 0 , end_ts = None , with_ts = False ) : key = self . _keygen ( event , ts ) end_ts = end_ts if end_ts else "+inf" elements = self . r . zrangebyscore ( key , ts , end_ts , withscores = with_ts ) if not with_ts : return [ s ( e ) for e in elements ] else : return [ ( s ( e [ 0 ] ) , int ( e [ 1 ] ) ) for e in elements ]
Replay events based on timestamp .
121
7
5,131
def query ( self , event , pk , ts = None ) : key = self . _keygen ( event , ts ) pk_ts = self . r . zscore ( key , pk ) return int ( pk_ts ) if pk_ts else None
Query the last update timestamp of an event pk .
59
11
5,132
def clear ( self , event , ts = None ) : return self . r . delete ( self . _keygen ( event , ts ) )
Clear all stored record of event .
30
7
5,133
def add_configuration_file ( self , file_name ) : logger . info ( 'adding %s to configuration files' , file_name ) if file_name not in self . configuration_files and self . _inotify : self . _watch_manager . add_watch ( file_name , pyinotify . IN_MODIFY ) if os . access ( file_name , os . R_OK ) : self . configuration_files [ file_name ] = SafeConfigParser ( ) self . configuration_files [ file_name ] . read ( file_name ) else : logger . warn ( 'could not read %s' , file_name ) warnings . warn ( 'could not read {}' . format ( file_name ) , ResourceWarning )
Register a file path from which to read parameter values .
165
11
5,134
def add_parameter ( self , * * kwargs ) : parameter_name = max ( kwargs [ 'options' ] , key = len ) . lstrip ( '-' ) if 'dest' in kwargs : parameter_name = kwargs [ 'dest' ] group = kwargs . pop ( 'group' , 'default' ) self . groups . add ( group ) parameter_name = '.' . join ( [ group , parameter_name ] ) . lstrip ( '.' ) . replace ( '-' , '_' ) logger . info ( 'adding parameter %s' , parameter_name ) if self . parsed : logger . warn ( 'adding parameter %s after parse' , parameter_name ) warnings . warn ( 'adding parameter {} after parse' . format ( parameter_name ) , RuntimeWarning ) self . parameters [ parameter_name ] = copy . copy ( kwargs ) self . parameters [ parameter_name ] [ 'group' ] = group self . parameters [ parameter_name ] [ 'type' ] = kwargs . get ( 'type' , str ) self . parameters [ parameter_name ] [ 'environment_prefix' ] = kwargs . pop ( 'environment_prefix' , os . path . basename ( sys . argv [ 0 ] ) ) if self . parameters [ parameter_name ] [ 'environment_prefix' ] is not None : self . parameters [ parameter_name ] [ 'environment_prefix' ] = self . parameters [ parameter_name ] [ 'environment_prefix' ] . upper ( ) . replace ( '-' , '_' ) logger . info ( 'group: %s' , group ) self . grouped_parameters . setdefault ( group , { } ) . setdefault ( parameter_name . replace ( group + '.' , '' ) , self . parameters [ parameter_name ] ) action_defaults = { 'store' : kwargs . get ( 'default' ) , 'store_const' : kwargs . get ( 'const' ) , 'store_true' : False , 'store_false' : True , 'append' : [ ] , 'append_const' : [ ] , 'count' : 0 , } self . defaults [ parameter_name ] = action_defaults [ kwargs . get ( 'action' , 'store' ) ] logger . info ( 'default value: %s' , kwargs . get ( 'default' ) ) if 'argument' in kwargs . pop ( 'only' , [ 'argument' ] ) : if group not in self . _group_parsers : self . _group_parsers [ group ] = self . _group_parsers [ 'default' ] . add_argument_group ( group ) if self . _group_prefix and group != 'default' : long_option = max ( kwargs [ 'options' ] , key = len ) kwargs [ 'options' ] . remove ( long_option ) kwargs [ 'options' ] . append ( long_option . replace ( '--' , '--' + group . replace ( '_' , '-' ) + '-' ) ) logger . debug ( 'options: %s' , kwargs [ 'options' ] ) self . _group_parsers [ group ] . add_argument ( * kwargs . pop ( 'options' ) , * * kwargs )
Add the parameter to Parameters .
742
6
5,135
def parse ( self , only_known = False ) : self . parsed = not only_known or self . parsed logger . info ( 'parsing parameters' ) logger . debug ( 'sys.argv: %s' , sys . argv ) if only_known : args = [ _ for _ in copy . copy ( sys . argv ) if not re . match ( '-h|--help' , _ ) ] self . _group_parsers [ 'default' ] . parse_known_args ( args = args , namespace = self . _argument_namespace ) else : self . _group_parsers [ 'default' ] . parse_args ( namespace = self . _argument_namespace )
Ensure all sources are ready to be queried .
156
11
5,136
def read_configuration_files ( self ) : for file_name , configuration_parser in self . configuration_files . items ( ) : if os . access ( file_name , os . R_OK ) : configuration_parser . read ( file_name ) else : logger . warn ( 'could not read %s' , file_name ) warnings . warn ( 'could not read {}' . format ( file_name ) , ResourceWarning )
Explicitly read the configuration files .
95
8
5,137
def nr_genes ( self ) : if self [ 'genes' ] : nr_genes = len ( self [ 'genes' ] ) else : nr_genes = len ( self [ 'gene_symbols' ] ) return nr_genes
Return the number of genes
63
5
5,138
def display_name ( self ) : if self . is_snv : gene_ids = self . gene_symbols [ : 2 ] return ', ' . join ( gene_ids ) else : return "{this.cytoband_start} ({this.sv_len})" . format ( this = self )
Readable name for the variant .
69
7
5,139
def md5 ( self ) : return hashlib . md5 ( '_' . join ( [ self . CHROM , str ( self . POS ) , self . REF , self . ALT ] ) ) . hexdigest ( )
Return a md5 key string based on position ref and alt
51
12
5,140
def add_frequency ( self , name , value ) : logger . debug ( "Adding frequency {0} with value {1} to variant {2}" . format ( name , value , self [ 'variant_id' ] ) ) self [ 'frequencies' ] . append ( { 'label' : name , 'value' : value } )
Add a frequency that will be displayed on the variant level
75
11
5,141
def set_max_freq ( self , max_freq = None ) : if max_freq : self [ 'max_freq' ] = max_freq else : for frequency in self [ 'frequencies' ] : if self [ 'max_freq' ] : if frequency [ 'value' ] > self [ 'max_freq' ] : self [ 'max_freq' ] = frequency [ 'value' ] else : self [ 'max_freq' ] = frequency [ 'value' ] return
Set the max frequency for the variant
116
7
5,142
def add_severity ( self , name , value ) : logger . debug ( "Adding severity {0} with value {1} to variant {2}" . format ( name , value , self [ 'variant_id' ] ) ) self [ 'severities' ] . append ( { name : value } )
Add a severity to the variant
67
6
5,143
def add_individual ( self , genotype ) : logger . debug ( "Adding genotype {0} to variant {1}" . format ( genotype , self [ 'variant_id' ] ) ) self [ 'individuals' ] . append ( genotype )
Add the information for a individual
57
6
5,144
def add_transcript ( self , transcript ) : logger . debug ( "Adding transcript {0} to variant {1}" . format ( transcript , self [ 'variant_id' ] ) ) self [ 'transcripts' ] . append ( transcript )
Add the information transcript
55
4
5,145
def add_gene ( self , gene ) : logger . debug ( "Adding gene {0} to variant {1}" . format ( gene , self [ 'variant_id' ] ) ) self [ 'genes' ] . append ( gene )
Add the information of a gene
54
6
5,146
def add_compound ( self , compound ) : logger . debug ( "Adding compound {0} to variant {1}" . format ( compound , self [ 'variant_id' ] ) ) self [ 'compounds' ] . append ( compound )
Add the information of a compound variant
54
7
5,147
def _set_variant_id ( self , variant_id = None ) : if not variant_id : variant_id = '_' . join ( [ self . CHROM , str ( self . POS ) , self . REF , self . ALT ] ) logger . debug ( "Updating variant id to {0}" . format ( variant_id ) ) self [ 'variant_id' ] = variant_id
Set the variant id for this variant
92
7
5,148
def move_to_result ( self , lst_idx ) : self . in_result_idx . add ( lst_idx ) if lst_idx in self . not_in_result_root_match_idx : self . not_in_result_root_match_idx . remove ( lst_idx )
Moves element from lst available at lst_idx .
78
14
5,149
def add_root_match ( self , lst_idx , root_idx ) : self . root_matches [ lst_idx ] = root_idx if lst_idx in self . in_result_idx : return self . not_in_result_root_match_idx . add ( lst_idx )
Adds a match for the elements avaialble at lst_idx and root_idx .
79
22
5,150
def _add_transcripts ( self , variant_obj , gemini_variant ) : query = "SELECT * from variant_impacts WHERE variant_id = {0}" . format ( gemini_variant [ 'variant_id' ] ) gq = GeminiQuery ( self . db ) gq . run ( query ) for gemini_transcript in gq : transcript = Transcript ( hgnc_symbol = gemini_transcript [ 'gene' ] , transcript_id = gemini_transcript [ 'transcript' ] , consequence = gemini_transcript [ 'impact_so' ] , biotype = gemini_transcript [ 'biotype' ] , polyphen = gemini_transcript [ 'polyphen_pred' ] , sift = gemini_transcript [ 'sift_pred' ] , HGVSc = gemini_transcript [ 'codon_change' ] , HGVSp = ', ' . join ( [ gemini_transcript [ 'aa_change' ] or '' , gemini_transcript [ 'aa_length' ] or '' ] ) ) variant_obj . add_transcript ( transcript )
Add all transcripts for a variant
258
6
5,151
def mysql_pub ( mysql_dsn , tables = None , blocking = False , * * kwargs ) : # parse mysql settings parsed = urlparse ( mysql_dsn ) mysql_settings = { "host" : parsed . hostname , "port" : parsed . port or 3306 , "user" : parsed . username , "passwd" : parsed . password } # connect to binlog stream stream = pymysqlreplication . BinLogStreamReader ( mysql_settings , server_id = random . randint ( 1000000000 , 4294967295 ) , blocking = blocking , only_events = [ DeleteRowsEvent , UpdateRowsEvent , WriteRowsEvent ] , * * kwargs ) def _pk ( values ) : if isinstance ( event . primary_key , str ) : return values [ event . primary_key ] return tuple ( values [ k ] for k in event . primary_key ) for event in stream : if not event . primary_key : continue if tables and event . table not in tables : continue try : rows = event . rows except ( UnicodeDecodeError , ValueError ) as e : logger . exception ( e ) continue timestamp = datetime . datetime . fromtimestamp ( event . timestamp ) if isinstance ( event , WriteRowsEvent ) : sg_name = "%s_write" % event . table sg = signal ( sg_name ) sg_raw = signal ( "%s_raw" % sg_name ) for row in rows : pk = _pk ( row [ "values" ] ) sg . send ( pk ) sg_raw . send ( row ) logger . debug ( "%s -> %s, %s" % ( sg_name , pk , timestamp ) ) elif isinstance ( event , UpdateRowsEvent ) : sg_name = "%s_update" % event . table sg = signal ( sg_name ) sg_raw = signal ( "%s_raw" % sg_name ) for row in rows : pk = _pk ( row [ "after_values" ] ) sg . send ( pk ) sg_raw . send ( row ) logger . debug ( "%s -> %s, %s" % ( sg_name , pk , timestamp ) ) elif isinstance ( event , DeleteRowsEvent ) : sg_name = "%s_delete" % event . table sg = signal ( sg_name ) sg_raw = signal ( "%s_raw" % sg_name ) for row in rows : pk = _pk ( row [ "values" ] ) sg . send ( pk ) sg_raw . send ( row ) logger . debug ( "%s -> %s, %s" % ( sg_name , pk , timestamp ) ) signal ( "mysql_binlog_pos" ) . send ( "%s:%s" % ( stream . log_file , stream . log_pos ) )
MySQL row - based binlog events pub .
656
10
5,152
def load_molecule_in_rdkit_smiles ( self , molSize , kekulize = True , bonds = [ ] , bond_color = None , atom_color = { } , size = { } ) : mol_in_rdkit = self . topology_data . mol #need to reload without hydrogens try : mol_in_rdkit = Chem . RemoveHs ( mol_in_rdkit ) self . topology_data . smiles = Chem . MolFromSmiles ( Chem . MolToSmiles ( mol_in_rdkit ) ) except ValueError : mol_in_rdkit = Chem . RemoveHs ( mol_in_rdkit , sanitize = False ) self . topology_data . smiles = Chem . MolFromSmiles ( Chem . MolToSmiles ( mol_in_rdkit ) , sanitize = False ) self . atom_identities = { } i = 0 for atom in self . topology_data . smiles . GetAtoms ( ) : self . atom_identities [ mol_in_rdkit . GetProp ( '_smilesAtomOutputOrder' ) [ 1 : ] . rsplit ( "," ) [ i ] ] = atom . GetIdx ( ) i += 1 mc = Chem . Mol ( self . topology_data . smiles . ToBinary ( ) ) if kekulize : try : Chem . Kekulize ( mc ) except : mc = Chem . Mol ( self . topology_data . smiles . ToBinary ( ) ) if not mc . GetNumConformers ( ) : rdDepictor . Compute2DCoords ( mc ) atoms = [ ] colors = { } for i in range ( mol_in_rdkit . GetNumAtoms ( ) ) : atoms . append ( i ) if len ( atom_color ) == 0 : colors [ i ] = ( 1 , 1 , 1 ) else : colors = atom_color drawer = rdMolDraw2D . MolDraw2DSVG ( int ( molSize [ 0 ] ) , int ( molSize [ 1 ] ) ) drawer . DrawMolecule ( mc , highlightAtoms = atoms , highlightBonds = bonds , highlightAtomColors = colors , highlightAtomRadii = size , highlightBondColors = bond_color ) drawer . FinishDrawing ( ) self . svg = drawer . GetDrawingText ( ) . replace ( 'svg:' , '' ) filesvg = open ( "molecule.svg" , "w+" ) filesvg . write ( self . svg )
Loads mol file in rdkit without the hydrogens - they do not have to appear in the final figure . Once loaded the molecule is converted to SMILES format which RDKit appears to draw best - since we do not care about the actual coordinates of the original molecule it is sufficient to have just 2D information . Some molecules can be problematic to import and steps such as stopping sanitize function can be taken . This is done automatically if problems are observed . However better solutions can also be implemented and need more research . The molecule is then drawn from SMILES in 2D representation without hydrogens . The drawing is saved as an SVG file .
567
133
5,153
def calc_2d_forces ( self , x1 , y1 , x2 , y2 , width ) : #calculate a if x1 > x2 : a = x1 - x2 else : a = x2 - x1 a_sq = a * a #calculate b if y1 > y2 : b = y1 - y2 else : b = y2 - y1 b_sq = b * b #calculate c from math import sqrt c_sq = a_sq + b_sq c = sqrt ( c_sq ) if c > width : return 0 , 0 else : overlap = width - c return - overlap / 2 , overlap / 2
Calculate overlap in 2D space
150
8
5,154
def do_step ( self , values , xy_values , coeff , width ) : forces = { k : [ ] for k , i in enumerate ( xy_values ) } for ( index1 , value1 ) , ( index2 , value2 ) in combinations ( enumerate ( xy_values ) , 2 ) : f = self . calc_2d_forces ( value1 [ 0 ] , value1 [ 1 ] , value2 [ 0 ] , value2 [ 1 ] , width ) if coeff [ index1 ] < coeff [ index2 ] : if self . b_lenght - coeff [ index2 ] < self . b_lenght / 10 : #a quick and dirty solution, but works forces [ index1 ] . append ( f [ 1 ] ) # push to left (smaller projection value) forces [ index2 ] . append ( f [ 0 ] ) else : #all is normal forces [ index1 ] . append ( f [ 0 ] ) # push to left (smaller projection value) forces [ index2 ] . append ( f [ 1 ] ) else : if self . b_lenght - coeff [ index1 ] < self . b_lenght / 10 : #a quick and dirty solution, but works forces [ index1 ] . append ( f [ 0 ] ) # push to left (smaller projection value) forces [ index2 ] . append ( f [ 1 ] ) else : #if all is normal forces [ index1 ] . append ( f [ 1 ] ) # push to left (smaller projection value) forces [ index2 ] . append ( f [ 0 ] ) forces = { k : sum ( v ) for k , v in forces . items ( ) } energy = sum ( [ abs ( x ) for x in forces . values ( ) ] ) return [ ( forces [ k ] / 10 + v ) for k , v in enumerate ( values ) ] , energy
Calculates forces between two diagrams and pushes them apart by tenth of width
417
15
5,155
def variants ( self , case_id , skip = 0 , count = 1000 , filters = None ) : filters = filters or { } logger . debug ( "Fetching case with case_id: {0}" . format ( case_id ) ) case_obj = self . case ( case_id ) plugin , case_id = self . select_plugin ( case_obj ) self . filters = plugin . filters gene_lists = ( self . gene_list ( list_id ) for list_id in filters . get ( 'gene_lists' , [ ] ) ) nested_geneids = ( gene_list . gene_ids for gene_list in gene_lists ) gene_ids = set ( itertools . chain . from_iterable ( nested_geneids ) ) if filters . get ( 'gene_ids' ) : filters [ 'gene_ids' ] . extend ( gene_ids ) else : filters [ 'gene_ids' ] = gene_ids variants = plugin . variants ( case_id , skip , count , filters ) return variants
Fetch variants for a case .
232
7
5,156
def variant ( self , case_id , variant_id ) : case_obj = self . case ( case_id ) plugin , case_id = self . select_plugin ( case_obj ) variant = plugin . variant ( case_id , variant_id ) return variant
Fetch a single variant from variant source .
58
9
5,157
def redis_es_sub ( session , tables , redis_dsn , strict = False , namespace = None , ttl = 3600 * 24 * 3 , socket_timeout = 1 ) : logger = logging . getLogger ( "meepo.sub.redis_es_sub" ) if not isinstance ( tables , ( list , set ) ) : raise ValueError ( "tables should be list or set" ) # install event store hook for tables event_store = RedisEventStore ( redis_dsn , namespace = namespace , ttl = ttl , socket_timeout = socket_timeout ) def _es_event_sub ( pk , event ) : if event_store . add ( event , str ( pk ) ) : logger . info ( "%s: %s -> %s" % ( event , pk , datetime . datetime . now ( ) ) ) else : logger . error ( "event sourcing failed: %s" % pk ) events = ( "%s_%s" % ( tb , action ) for tb , action in itertools . product ( * [ tables , [ "write" , "update" , "delete" ] ] ) ) for event in events : sub_func = functools . partial ( _es_event_sub , event = event ) signal ( event ) . connect ( sub_func , weak = False ) # install prepare-commit hook prepare_commit = RedisPrepareCommit ( redis_dsn , strict = strict , namespace = namespace , socket_timeout = socket_timeout ) signal ( "session_prepare" ) . connect ( prepare_commit . prepare , sender = session , weak = False ) signal ( "session_commit" ) . connect ( prepare_commit . commit , sender = session , weak = False ) signal ( "session_rollback" ) . connect ( prepare_commit . rollback , sender = session , weak = False ) return event_store , prepare_commit
Redis EventSourcing sub .
427
7
5,158
def setup_parser ( ) : parser = argparse . ArgumentParser ( ) parser . add_argument ( '-p' , '--port' , type = int , default = 5005 ) parser . add_argument ( '-i' , '--interval' , type = int , default = 480 ) parser . add_argument ( 'host' , type = str , help = 'hostname' ) return parser
Setup an ArgumentParser .
89
5
5,159
def main ( ) : args = setup_parser ( ) . parse_args ( ) host = getattr ( args , "host" ) port = getattr ( args , "port" ) ipv4 = socket . gethostbyname ( host ) interval = getattr ( args , "interval" ) receiver = McDevice ( ipv4 , udp_port = port , mc_interval = interval ) receiver . handle_status ( ) # wait for UDP messages while True : time . sleep ( 0.2 )
Connect to a McDevice
111
5
5,160
def from_sqlite ( cls , database_path , base_url , version = 'auto' , client_id = 'ghost-admin' ) : import os import sqlite3 fd = os . open ( database_path , os . O_RDONLY ) connection = sqlite3 . connect ( '/dev/fd/%d' % fd ) os . close ( fd ) try : row = connection . execute ( 'SELECT secret FROM clients WHERE slug = ?' , ( client_id , ) ) . fetchone ( ) if row : return cls ( base_url , version = version , client_id = client_id , client_secret = row [ 0 ] ) else : raise GhostException ( 401 , [ { 'errorType' : 'InternalError' , 'message' : 'No client_secret found for client_id: %s' % client_id } ] ) finally : connection . close ( )
Initialize a new Ghost API client reading the client ID and secret from the SQlite database .
201
19
5,161
def login ( self , username , password ) : data = self . _authenticate ( grant_type = 'password' , username = username , password = password , client_id = self . _client_id , client_secret = self . _client_secret ) self . _username = username self . _password = password return data
Authenticate with the server .
70
6
5,162
def refresh_session ( self ) : if not self . _refresh_token : if self . _username and self . _password : return self . login ( self . _username , self . _password ) return return self . _authenticate ( grant_type = 'refresh_token' , refresh_token = self . _refresh_token , client_id = self . _client_id , client_secret = self . _client_secret )
Re - authenticate using the refresh token if available . Otherwise log in using the username and password if it was used to authenticate initially .
97
28
5,163
def revoke_access_token ( self ) : if not self . _access_token : return self . execute_post ( 'authentication/revoke' , json = dict ( token_type_hint = 'access_token' , token = self . _access_token ) ) self . _access_token = None
Revoke the access token currently in use .
69
9
5,164
def revoke_refresh_token ( self ) : if not self . _refresh_token : return self . execute_post ( 'authentication/revoke' , json = dict ( token_type_hint = 'refresh_token' , token = self . _refresh_token ) ) self . _refresh_token = None
Revoke the refresh token currently active .
74
8
5,165
def logout ( self ) : self . revoke_refresh_token ( ) self . revoke_access_token ( ) self . _username , self . _password = None , None
Log out revoking the access tokens and forgetting the login details if they were given .
39
17
5,166
def upload ( self , file_obj = None , file_path = None , name = None , data = None ) : close = False if file_obj : file_name , content = os . path . basename ( file_obj . name ) , file_obj elif file_path : file_name , content = os . path . basename ( file_path ) , open ( file_path , 'rb' ) close = True elif name and data : file_name , content = name , data else : raise GhostException ( 400 , 'Either `file_obj` or `file_path` or ' '`name` and `data` needs to be specified' ) try : content_type , _ = mimetypes . guess_type ( file_name ) file_arg = ( file_name , content , content_type ) response = self . execute_post ( 'uploads/' , files = { 'uploadimage' : file_arg } ) return response finally : if close : content . close ( )
Upload an image and return its path on the server . Either file_obj or file_path or name and data has to be specified .
219
28
5,167
def execute_get ( self , resource , * * kwargs ) : url = '%s/%s' % ( self . base_url , resource ) headers = kwargs . pop ( 'headers' , dict ( ) ) headers [ 'Accept' ] = 'application/json' headers [ 'Content-Type' ] = 'application/json' if kwargs : separator = '&' if '?' in url else '?' for key , value in kwargs . items ( ) : if hasattr ( value , '__iter__' ) and type ( value ) not in six . string_types : url = '%s%s%s=%s' % ( url , separator , key , ',' . join ( value ) ) else : url = '%s%s%s=%s' % ( url , separator , key , value ) separator = '&' if self . _access_token : headers [ 'Authorization' ] = 'Bearer %s' % self . _access_token else : separator = '&' if '?' in url else '?' url = '%s%sclient_id=%s&client_secret=%s' % ( url , separator , self . _client_id , self . _client_secret ) response = requests . get ( url , headers = headers ) if response . status_code // 100 != 2 : raise GhostException ( response . status_code , response . json ( ) . get ( 'errors' , [ ] ) ) return response . json ( )
Execute an HTTP GET request against the API endpoints . This method is meant for internal use .
338
20
5,168
def execute_post ( self , resource , * * kwargs ) : return self . _request ( resource , requests . post , * * kwargs ) . json ( )
Execute an HTTP POST request against the API endpoints . This method is meant for internal use .
38
20
5,169
def execute_put ( self , resource , * * kwargs ) : return self . _request ( resource , requests . put , * * kwargs ) . json ( )
Execute an HTTP PUT request against the API endpoints . This method is meant for internal use .
38
21
5,170
def execute_delete ( self , resource , * * kwargs ) : self . _request ( resource , requests . delete , * * kwargs )
Execute an HTTP DELETE request against the API endpoints . This method is meant for internal use . Does not return anything but raises an exception when failed .
33
33
5,171
def token_distance ( t1 , t2 , initial_match_penalization ) : if isinstance ( t1 , NameInitial ) or isinstance ( t2 , NameInitial ) : if t1 . token == t2 . token : return 0 if t1 == t2 : return initial_match_penalization return 1.0 return _normalized_edit_dist ( t1 . token , t2 . token )
Calculates the edit distance between two tokens .
92
10
5,172
def simple_tokenize ( name ) : last_names , first_names = name . split ( ',' ) last_names = _RE_NAME_TOKEN_SEPARATOR . split ( last_names ) first_names = _RE_NAME_TOKEN_SEPARATOR . split ( first_names ) first_names = [ NameToken ( n ) if len ( n ) > 1 else NameInitial ( n ) for n in first_names if n ] last_names = [ NameToken ( n ) if len ( n ) > 1 else NameInitial ( n ) for n in last_names if n ] return { 'lastnames' : last_names , 'nonlastnames' : first_names }
Simple tokenizer function to be used with the normalizers .
153
12
5,173
def calculate_descriptors ( self , mol ) : #make dictionary self . ligand_atoms = { index : { "name" : x . name } for index , x in enumerate ( self . topology_data . universe . ligand_noH . atoms ) } #Calculate logP and MR contribs = self . calculate_logP ( mol ) #Calculate Gasteiger charges self . calculate_Gasteiger_charges ( mol ) #Calculate formal charges fcharges = self . calculate_formal_charge ( mol ) for atom in self . ligand_atoms . keys ( ) : self . ligand_atoms [ atom ] [ "logP" ] = contribs [ atom ] [ 0 ] self . ligand_atoms [ atom ] [ "MR" ] = contribs [ atom ] [ 1 ] self . ligand_atoms [ atom ] [ "Gasteiger_ch" ] = mol . GetAtomWithIdx ( atom ) . GetProp ( "_GasteigerCharge" ) self . ligand_atoms [ atom ] [ "Formal charges" ] = fcharges [ atom ] #Determine rotatable bonds self . rot_bonds = self . get_rotatable_bonds ( mol )
Calculates descriptors such as logP charges and MR and saves that in a dictionary .
279
19
5,174
def variants ( self , case_id , skip = 0 , count = 1000 , filters = None ) : filters = filters or { } case_obj = self . case ( case_id = case_id ) limit = count + skip genes = set ( ) if filters . get ( 'gene_ids' ) : genes = set ( [ gene_id . strip ( ) for gene_id in filters [ 'gene_ids' ] ] ) frequency = None if filters . get ( 'frequency' ) : frequency = float ( filters [ 'frequency' ] ) cadd = None if filters . get ( 'cadd' ) : cadd = float ( filters [ 'cadd' ] ) genetic_models = None if filters . get ( 'genetic_models' ) : genetic_models = set ( filters [ 'genetic_models' ] ) sv_len = None if filters . get ( 'sv_len' ) : sv_len = float ( filters [ 'sv_len' ] ) impact_severities = None if filters . get ( 'impact_severities' ) : impact_severities = set ( filters [ 'impact_severities' ] ) vcf_file_path = case_obj . variant_source self . head = get_header ( vcf_file_path ) self . vep_header = self . head . vep_columns self . snpeff_header = self . head . snpeff_columns variants = self . _get_filtered_variants ( vcf_file_path , filters ) result = [ ] skip_index = 0 for index , variant in enumerate ( variants ) : index += 1 if skip_index >= skip : variant_obj = self . _format_variants ( variant = variant , index = index , case_obj = case_obj , ) if genes and variant_obj : if not set ( variant_obj [ 'gene_symbols' ] ) . intersection ( genes ) : variant_obj = None if impact_severities and variant_obj : if not variant_obj [ 'impact_severity' ] in impact_severities : variant_obj = None if frequency and variant_obj : if variant_obj . max_freq > frequency : variant_obj = None if cadd and variant_obj : if variant_obj [ 'cadd_score' ] < cadd : variant_obj = None if genetic_models and variant_obj : models = set ( variant_obj . genetic_models ) if not models . intersection ( genetic_models ) : variant_obj = None if sv_len and variant_obj : if variant_obj . sv_len < sv_len : variant_obj = None if variant_obj : skip_index += 1 if skip_index <= limit : result . append ( variant_obj ) else : break else : skip_index += 1 return Results ( result , len ( result ) )
Return all variants in the VCF .
625
8
5,175
def _get_filtered_variants ( self , vcf_file_path , filters = { } ) : genes = set ( ) consequences = set ( ) sv_types = set ( ) if filters . get ( 'gene_ids' ) : genes = set ( [ gene_id . strip ( ) for gene_id in filters [ 'gene_ids' ] ] ) if filters . get ( 'consequence' ) : consequences = set ( filters [ 'consequence' ] ) if filters . get ( 'sv_types' ) : sv_types = set ( filters [ 'sv_types' ] ) logger . info ( "Get variants from {0}" . format ( vcf_file_path ) ) if filters . get ( 'range' ) : range_str = "{0}:{1}-{2}" . format ( filters [ 'range' ] [ 'chromosome' ] , filters [ 'range' ] [ 'start' ] , filters [ 'range' ] [ 'end' ] ) vcf = VCF ( vcf_file_path ) handle = vcf ( range_str ) else : handle = VCF ( vcf_file_path ) for variant in handle : variant_line = str ( variant ) keep_variant = True if genes and keep_variant : keep_variant = False for gene in genes : if "{0}" . format ( gene ) in variant_line : keep_variant = True break if consequences and keep_variant : keep_variant = False for consequence in consequences : if consequence in variant_line : keep_variant = True break if sv_types and keep_variant : keep_variant = False for sv_type in sv_types : if sv_type in variant_line : keep_variant = True break if keep_variant : yield variant
Check if variants follows the filters
397
6
5,176
def fnv ( data , hval_init , fnv_prime , fnv_size ) : assert isinstance ( data , bytes ) hval = hval_init for byte in data : hval = ( hval * fnv_prime ) % fnv_size hval = hval ^ _get_byte ( byte ) return hval
Core FNV hash algorithm used in FNV0 and FNV1 .
75
15
5,177
def session_prepare ( self , session , _ ) : if not hasattr ( session , 'meepo_unique_id' ) : self . _session_init ( session ) evt = collections . defaultdict ( set ) for action in ( "write" , "update" , "delete" ) : objs = getattr ( session , "pending_%s" % action ) # filter tables if possible if self . tables : objs = [ o for o in objs if o . __table__ . fullname in self . tables ] for obj in objs : evt_name = "%s_%s" % ( obj . __table__ . fullname , action ) evt [ evt_name ] . add ( obj ) self . logger . debug ( "%s - session_prepare: %s -> %s" % ( session . meepo_unique_id , evt_name , evt ) ) # only trigger signal when event exists if evt : signal ( "session_prepare" ) . send ( session , event = evt )
Send session_prepare signal in session before_commit .
233
12
5,178
def session_commit ( self , session ) : # this may happen when there's nothing to commit if not hasattr ( session , 'meepo_unique_id' ) : self . logger . debug ( "skipped - session_commit" ) return # normal session pub self . logger . debug ( "%s - session_commit" % session . meepo_unique_id ) self . _session_pub ( session ) signal ( "session_commit" ) . send ( session ) self . _session_del ( session )
Send session_commit signal in sqlalchemy before_commit .
113
13
5,179
def session_rollback ( self , session ) : # this may happen when there's nothing to rollback if not hasattr ( session , 'meepo_unique_id' ) : self . logger . debug ( "skipped - session_rollback" ) return # del session meepo id after rollback self . logger . debug ( "%s - after_rollback" % session . meepo_unique_id ) signal ( "session_rollback" ) . send ( session ) self . _session_del ( session )
Send session_rollback signal in sqlalchemy after_rollback .
115
15
5,180
def process_fig_and_ax_argument ( fig , ax , default_figsize = None ) : if default_figsize is not None : assert type ( default_figsize ) in [ tuple , list ] assert len ( default_figsize ) == 2 if ( fig is None ) and ( ax is None ) : fig , ax = plt . subplots ( figsize = default_figsize ) else : assert ( is_figure ( fig ) ) and ( is_axes ( ax ) ) return fig , ax
Process fig and ax arguments .
113
6
5,181
def get_square_axes_limits ( coords , margin = 0.05 ) : #coords = [x,y,z] try : coords = [ np . array ( coord ) for coord in coords ] except : raise Exception ( "Failed to convert elements of 'coords' into numpy.array" ) lims = [ ( coord . min ( ) , coord . max ( ) ) for coord in coords ] mids = [ 0.5 * ( lim [ 0 ] + lim [ 1 ] ) for lim in lims ] widths = [ 0.5 * ( lim [ 1 ] - lim [ 0 ] ) for lim in lims ] max_width = max ( widths ) max_width += max_width * margin ax_lims = tuple ( ( mid - max_width , mid + max_width ) for mid in mids ) #xlim, ylim, zlim = ax_lims return ax_lims
Return N - dimensional square s limits
206
7
5,182
def SetPosition ( self , track_id , position ) : self . iface . SetPosition ( convert2dbus ( track_id , 'o' ) , convert2dbus ( position , 'x' ) )
Sets the current track position in microseconds .
47
10
5,183
def process_lists ( self ) : for l1_idx , obj1 in enumerate ( self . l1 ) : for l2_idx , obj2 in enumerate ( self . l2 ) : if self . equal ( obj1 , obj2 ) : self . matches . add ( ( l1_idx , l2_idx ) )
Do any preprocessing of the lists .
78
8
5,184
def get_matches ( self , src , src_idx ) : if src not in ( 'l1' , 'l2' ) : raise ValueError ( 'Must have one of "l1" or "l2" as src' ) if src == 'l1' : target_list = self . l2 else : target_list = self . l1 comparator = { 'l1' : lambda s_idx , t_idx : ( s_idx , t_idx ) in self . matches , 'l2' : lambda s_idx , t_idx : ( t_idx , s_idx ) in self . matches , } [ src ] return [ ( trg_idx , obj ) for trg_idx , obj in enumerate ( target_list ) if comparator ( src_idx , trg_idx ) ]
Get elements equal to the idx th in src from the other list .
195
15
5,185
def find_the_closest_atoms ( self , topology ) : # The measurements are made to ligand molecule without hydrogen atoms (ligand_noH) because the # hydrogen atoms are not plotted in the final graph self . universe . load_new ( topology ) self . universe . ligand_noH = self . universe . ligand . select_atoms ( "not name H*" ) ligand_positions = self . universe . ligand_noH . positions for residue in self . dict_of_plotted_res . keys ( ) : residue_selection = self . universe . select_atoms ( "resname " + residue [ 0 ] + " and resid " + residue [ 1 ] + " and segid " + residue [ 2 ] ) residue_positions = residue_selection . positions dist_array = MDAnalysis . analysis . distances . distance_array ( ligand_positions , residue_positions ) min_values_per_atom = { } i = 0 for atom in self . universe . ligand_noH : min_values_per_atom [ atom . name ] = dist_array [ i ] . min ( ) i += 1 sorted_min_values = sorted ( min_values_per_atom . items ( ) , key = operator . itemgetter ( 1 ) ) self . closest_atoms [ residue ] = [ ( sorted_min_values [ 0 ] [ 0 ] , sorted_min_values [ 0 ] [ 1 ] ) ]
This function defines the ligand atoms that are closest to the residues that will be plotted in the final graph .
325
22
5,186
def load_data ( self , topology , mol_file , ligand_name , offset = 0 ) : self . load_topology ( topology ) self . renumber_system ( offset ) self . rename_ligand ( ligand_name , mol_file ) self . load_mol ( mol_file )
This function loads all relevant data - except trajectories since those are dealt with one at a time . Therefore this process only needs to be done once and every time a trajectory needs to be loaded it can be loaded seperataly and the Data object can be shared across LINTools processes .
69
59
5,187
def analyse_topology ( self , topology , cutoff = 3.5 ) : self . define_residues_for_plotting_topology ( cutoff ) self . find_the_closest_atoms ( topology )
In case user wants to analyse only a single topology file this process will determine the residues that should be plotted and find the ligand atoms closest to these residues .
52
33
5,188
def get_header ( vcf_file_path ) : logger . info ( "Parsing header of file {0}" . format ( vcf_file_path ) ) head = HeaderParser ( ) handle = get_vcf_handle ( infile = vcf_file_path ) # Parse the header for line in handle : line = line . rstrip ( ) if line . startswith ( '#' ) : if line . startswith ( '##' ) : head . parse_meta_data ( line ) else : head . parse_header_line ( line ) else : break handle . close ( ) return head
Parse the header and return a header object
137
9
5,189
def sample_lonlat ( self , n ) : # From http://en.wikipedia.org/wiki/Ellipse#General_parametric_form # However, Martin et al. (2009) use PA theta "from North to East" # Definition of phi (position angle) is offset by pi/4 # Definition of t (eccentric anamoly) remains the same (x,y-frame usual) # In the end, everything is trouble because we use glon, glat... radius = self . sample_radius ( n ) a = radius b = self . jacobian * radius t = 2. * np . pi * np . random . rand ( n ) cost , sint = np . cos ( t ) , np . sin ( t ) phi = np . pi / 2. - np . deg2rad ( self . theta ) cosphi , sinphi = np . cos ( phi ) , np . sin ( phi ) x = a * cost * cosphi - b * sint * sinphi y = a * cost * sinphi + b * sint * cosphi if self . projector is None : logger . debug ( "Creating AITOFF projector for sampling" ) projector = Projector ( self . lon , self . lat , 'ait' ) else : projector = self . projector lon , lat = projector . imageToSphere ( x , y ) return lon , lat
Sample 2D distribution of points in lon lat
306
10
5,190
def group ( iterable , key ) : for _ , grouped in groupby ( sorted ( iterable , key = key ) , key = key ) : yield list ( grouped )
groupby which sorts the input discards the key and returns the output as a sequence of lists .
37
20
5,191
def aggregate_count ( keyname ) : def inner ( docs ) : return sum ( doc [ keyname ] for doc in docs ) return keyname , inner
Straightforward sum of the given keyname .
33
10
5,192
def aggregate_rate ( rate_key , count_key ) : def inner ( docs ) : total = sum ( doc [ count_key ] for doc in docs ) weighted_total = sum ( doc [ rate_key ] * doc [ count_key ] for doc in docs ) total_rate = weighted_total / total return total_rate return rate_key , inner
Compute an aggregate rate for rate_key weighted according to count_rate .
78
16
5,193
def make_aggregate ( docs , aggregations ) : new_doc = dict ( docs [ 0 ] ) for keyname , aggregation_function in aggregations : new_doc [ keyname ] = aggregation_function ( docs ) return new_doc
Given docs and aggregations return a single document with the aggregations applied .
52
15
5,194
def json ( value ) : uncleaned = jsonlib . dumps ( value ) clean = bleach . clean ( uncleaned ) return mark_safe ( clean )
Sanitize the JSON string using the Bleach HTML tag remover
32
13
5,195
def find_pareto_front ( population ) : pareto_front = set ( range ( len ( population ) ) ) for i in range ( len ( population ) ) : if i not in pareto_front : continue ind1 = population [ i ] for j in range ( i + 1 , len ( population ) ) : ind2 = population [ j ] # if individuals are equal on all objectives, mark one of them (the first encountered one) as dominated # to prevent excessive growth of the Pareto front if ind2 . fitness . dominates ( ind1 . fitness ) or ind1 . fitness == ind2 . fitness : pareto_front . discard ( i ) if ind1 . fitness . dominates ( ind2 . fitness ) : pareto_front . discard ( j ) return pareto_front
Finds a subset of nondominated individuals in a given list
175
12
5,196
def _to_ndarray ( self , a ) : if isinstance ( a , ( list , tuple ) ) : a = numpy . array ( a ) if not is_ndarray ( a ) : raise TypeError ( "Expected an ndarray but got object of type '{}' instead" . format ( type ( a ) ) ) return a
Casts Python lists and tuples to a numpy array or raises an AssertionError .
77
20
5,197
def fn_abs ( self , value ) : if is_ndarray ( value ) : return numpy . absolute ( value ) else : return abs ( value )
Return the absolute value of a number .
34
8
5,198
def fn_get_mask ( self , value ) : value = self . _to_ndarray ( value ) if numpy . ma . is_masked ( value ) : return value . mask else : return numpy . zeros ( value . shape ) . astype ( bool )
Return an array mask .
61
5
5,199
def fn_min ( self , a , axis = None ) : return numpy . nanmin ( self . _to_ndarray ( a ) , axis = axis )
Return the minimum of an array ignoring any NaNs .
36
11