idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
5,000
def count_by_type ( self ) : pistack = defaultdict ( int ) for contact in self . timeseries : #count by residue name not by proteinring pkey = ( contact . ligandring , contact . type , contact . resid , contact . resname , contact . segid ) pistack [ pkey ] += 1 dtype = [ ( "ligand_ring_ids" , list ) , ( "type" , "|U4" ) , ( "resid" , int ) , ( "resname" , "|U4" ) , ( "segid" , "|U8" ) , ( "frequency" , float ) ] out = np . empty ( ( len ( pistack ) , ) , dtype = dtype ) tsteps = float ( len ( self . timesteps ) ) for cursor , ( key , count ) in enumerate ( pistack . iteritems ( ) ) : out [ cursor ] = key + ( count / tsteps , ) return out . view ( np . recarray )
Count how many times each individual pi - pi interaction occured throughout the simulation . Returns numpy array .
225
21
5,001
def main ( master_dsn , slave_dsn , tables , blocking = False ) : # currently only supports mysql master assert master_dsn . startswith ( "mysql" ) logger = logging . getLogger ( __name__ ) logger . info ( "replicating tables: %s" % ", " . join ( tables ) ) repl_db_sub ( master_dsn , slave_dsn , tables ) mysql_pub ( master_dsn , blocking = blocking )
DB Replication app .
105
5
5,002
def get_text_position_in_ax_coord ( ax , pos , scale = default_text_relative_padding ) : ratio = get_axes_ratio ( ax ) x , y = scale , scale if ratio > 1 : # vertical is longer y /= ratio elif 0 < ratio : # 0 < ratio <= 1 x *= ratio pos = pos . lower ( ) if pos == 'nw' : y = 1 - y elif pos == 'ne' : x , y = 1 - x , 1 - y elif pos == 'sw' : pass elif pos == 'se' : x = 1 - x else : raise ValueError ( "Unknown value for 'pos': %s" % ( str ( pos ) ) ) return x , y
Return text position corresponding to given pos . The text alignment in the bounding box should be set accordingly in order to have a good - looking layout . This corresponding text alignment can be obtained by get_text_alignment or get_text_position_and_inner_alignment function .
164
59
5,003
def get_text_position_and_inner_alignment ( ax , pos , scale = default_text_relative_padding , with_transAxes_kwargs = True ) : xy = get_text_position_in_ax_coord ( ax , pos , scale = scale ) alignment_fontdict = get_text_alignment ( pos ) if with_transAxes_kwargs : alignment_fontdict = { * * alignment_fontdict , * * { 'transform' : ax . transAxes } } return xy , alignment_fontdict
Return text position and its alignment in its bounding box . The returned position is given in Axes coordinate as defined in matplotlib documentation on transformation .
122
31
5,004
def get_text_position ( fig , ax , ha = 'left' , va = 'top' , pad_scale = 1.0 ) : ## Check and preprocess input arguments try : pad_scale = float ( pad_scale ) except : raise TypeError ( "'pad_scale should be of type 'float'" ) for arg in [ va , ha ] : assert type ( arg ) is str arg = arg . lower ( ) # Make it lowercase to prevent case problem. ## Get axis size in inches ax_height , ax_width = get_ax_size_in_inch ( fig , ax ) ## Construct inversion factor from inch to plot coordinate length_x = ax . get_xlim ( ) [ 1 ] - ax . get_xlim ( ) [ 0 ] length_y = ax . get_ylim ( ) [ 1 ] - ax . get_ylim ( ) [ 0 ] inch2coord_x = length_x / ax_width inch2coord_y = length_y / ax_height ## Set padding size relative to the text size #pad_inch = text_bbox_inch.height * pad_scale #pad_inch = fontsize_points * point2inch * pad_scale ax_length_geom_average = ( ax_height * ax_width ) ** 0.5 pad_inch = ax_length_geom_average * 0.03 * pad_scale pad_inch_x , pad_inch_y = pad_inch , pad_inch pad_coord_x = pad_inch_x * inch2coord_x pad_coord_y = pad_inch_y * inch2coord_y if ha == 'left' : pos_x = ax . get_xlim ( ) [ 0 ] + pad_coord_x elif ha == 'right' : pos_x = ax . get_xlim ( ) [ 1 ] - pad_coord_x else : raise Exception ( "Unsupported value for 'ha'" ) if va in [ 'top' , 'up' , 'upper' ] : pos_y = ax . get_ylim ( ) [ 1 ] - pad_coord_y elif va in [ 'bottom' , 'down' , 'lower' ] : pos_y = ax . get_ylim ( ) [ 0 ] + pad_coord_y else : raise Exception ( "Unsupported value for 'va'" ) return pos_x , pos_y
Return text position inside of the given axis
528
8
5,005
def create_app ( config = None , config_obj = None ) : app = Flask ( __name__ ) # configure application from external configs configure_app ( app , config = config , config_obj = config_obj ) # register different parts of the application register_blueprints ( app ) # setup extensions bind_extensions ( app ) return app
Flask app factory function .
75
6
5,006
def configure_app ( app , config = None , config_obj = None ) : app . config . from_object ( config_obj or BaseConfig ) if config is not None : app . config . from_pyfile ( config )
Configure application instance .
50
5
5,007
def bind_extensions ( app ) : # bind plugin to app object app . db = app . config [ 'PUZZLE_BACKEND' ] app . db . init_app ( app ) # bind bootstrap blueprints bootstrap . init_app ( app ) markdown ( app ) @ app . template_filter ( 'islist' ) def islist ( object ) : return isinstance ( object , ( tuple , list ) )
Configure extensions .
93
4
5,008
def find_donors_and_acceptors_in_ligand ( self ) : atom_names = [ x . name for x in self . topology_data . universe . ligand ] try : for atom in self . topology_data . mol . GetSubstructMatches ( self . HDonorSmarts , uniquify = 1 ) : self . donors . append ( atom_names [ atom [ 0 ] ] ) for atom in self . topology_data . mol . GetSubstructMatches ( self . HAcceptorSmarts , uniquify = 1 ) : self . acceptors . append ( atom_names [ atom [ 0 ] ] ) except Exception as e : m = Chem . MolFromPDBFile ( "lig.pdb" ) self . donors = [ ] self . acceptors = [ ] for atom in m . GetSubstructMatches ( self . HDonorSmarts , uniquify = 1 ) : self . donors . append ( atom_names [ atom [ 0 ] ] ) haccep = "[$([O,S;H1;v2]-[!$(*=[O,N,P,S])]),$([O,S;H0;v2]),$([O,S;-]),$([N;v3;!$(N-*=!@[O,N,P,S])]),$([nH0,o,s;+0])]" self . HAcceptorSmarts = Chem . MolFromSmarts ( haccep ) for atom in m . GetSubstructMatches ( self . HAcceptorSmarts , uniquify = 1 ) : self . acceptors . append ( atom_names [ atom [ 0 ] ] )
Since MDAnalysis a pre - set list for acceptor and donor atoms for proteins and solvents from specific forcefields it is necessary to find donor and acceptor atoms for the ligand molecule . This function uses RDKit and searches through ligand atoms to find matches for pre - set list of possible donor and acceptor atoms . The resulting list is then parsed to MDAnalysis through the donors and acceptors arguments .
375
84
5,009
def count_by_type ( self , table , timesteps ) : hbonds = defaultdict ( int ) for contact in table : #count by residue name not by proteinring pkey = ( contact . donor_idx , contact . acceptor_idx , contact . donor_atom , contact . acceptor_atom , contact . donor_resnm , contact . donor_resid , contact . acceptor_resnm , contact . acceptor_resid ) hbonds [ pkey ] += 1 dtype = [ ( "donor_idx" , int ) , ( "acceptor_idx" , int ) , ( "donor_atom" , "|U4" ) , ( "acceptor_atom" , "|U4" ) , ( "donor_resnm" , "|U8" ) , ( "donor_resid" , "|U8" ) , ( "acceptor_resnm" , "|U8" ) , ( "acceptor_resid" , "|U8" ) , ( "frequency" , float ) ] out = np . empty ( ( len ( hbonds ) , ) , dtype = dtype ) tsteps = float ( len ( timesteps ) ) for cursor , ( key , count ) in enumerate ( hbonds . iteritems ( ) ) : out [ cursor ] = key + ( count / tsteps , ) return out . view ( np . recarray )
Count how many times each individual hydrogen bonds occured throughout the simulation . Returns numpy array .
320
19
5,010
def determine_hbonds_for_drawing ( self , analysis_cutoff ) : self . frequency = defaultdict ( int ) for traj in self . hbonds_by_type : for bond in self . hbonds_by_type [ traj ] : # frequency[(residue_atom_idx,ligand_atom_name,residue_atom_name)]=frequency # residue atom name will be used to determine if hydrogen bond is interacting with a sidechain or bakcbone if bond [ "donor_resnm" ] != "LIG" : self . frequency [ ( bond [ "donor_idx" ] , bond [ "acceptor_atom" ] , bond [ "donor_atom" ] , bond [ "acceptor_idx" ] ) ] += bond [ "frequency" ] #check whether ligand is donor or acceptor else : self . frequency [ ( bond [ "acceptor_idx" ] , bond [ "donor_atom" ] , bond [ "acceptor_atom" ] , bond [ "donor_idx" ] ) ] += bond [ "frequency" ] #Add the frequency counts self . frequency = { i : self . frequency [ i ] for i in self . frequency if self . frequency [ i ] > ( int ( len ( self . trajectory ) ) * analysis_cutoff ) } #change the ligand atomname to a heavy atom - required for plot since only heavy atoms shown in final image self . hbonds_for_drawing = { } for bond in self . frequency : atomname = bond [ 1 ] if atomname . startswith ( "O" , 0 ) or atomname . startswith ( "N" , 0 ) : lig_atom = atomname else : atomindex = [ index for index , atom in enumerate ( self . topology_data . universe . ligand . atoms ) if atom . name == atomname ] [ 0 ] rdkit_atom = self . topology_data . mol . GetAtomWithIdx ( atomindex ) for neigh in rdkit_atom . GetNeighbors ( ) : neigh_atom_id = neigh . GetIdx ( ) lig_atom = [ atom . name for index , atom in enumerate ( self . topology_data . universe . ligand . atoms ) if index == neigh_atom_id ] [ 0 ] self . hbonds_for_drawing [ ( bond [ 0 ] , lig_atom , bond [ 2 ] , bond [ 3 ] ) ] = self . frequency [ bond ]
Since plotting all hydrogen bonds could lead to a messy plot a cutoff has to be imple - mented . In this function the frequency of each hydrogen bond is summated and the total compared against analysis cutoff - a fraction multiplied by trajectory count . Those hydrogen bonds that are present for longer than analysis cutoff will be plotted in the final plot .
560
68
5,011
def convert2dbus ( value , signature ) : if len ( signature ) == 2 and signature . startswith ( 'a' ) : return dbus . Array ( value , signature = signature [ - 1 ] ) dbus_string_type = dbus . String if PY3 else dbus . UTF8String type_map = { 'b' : dbus . Boolean , 'y' : dbus . Byte , 'n' : dbus . Int16 , 'i' : dbus . Int32 , 'x' : dbus . Int64 , 'q' : dbus . UInt16 , 'u' : dbus . UInt32 , 't' : dbus . UInt64 , 'd' : dbus . Double , 'o' : dbus . ObjectPath , 'g' : dbus . Signature , 's' : dbus_string_type } return type_map [ signature ] ( value )
Converts value type from python to dbus according signature .
203
12
5,012
def convert ( dbus_obj ) : _isinstance = partial ( isinstance , dbus_obj ) ConvertType = namedtuple ( 'ConvertType' , 'pytype dbustypes' ) pyint = ConvertType ( int , ( dbus . Byte , dbus . Int16 , dbus . Int32 , dbus . Int64 , dbus . UInt16 , dbus . UInt32 , dbus . UInt64 ) ) pybool = ConvertType ( bool , ( dbus . Boolean , ) ) pyfloat = ConvertType ( float , ( dbus . Double , ) ) pylist = ConvertType ( lambda _obj : list ( map ( convert , dbus_obj ) ) , ( dbus . Array , ) ) pytuple = ConvertType ( lambda _obj : tuple ( map ( convert , dbus_obj ) ) , ( dbus . Struct , ) ) types_str = ( dbus . ObjectPath , dbus . Signature , dbus . String ) if not PY3 : types_str += ( dbus . UTF8String , ) pystr = ConvertType ( str if PY3 else unicode , types_str ) pydict = ConvertType ( lambda _obj : dict ( zip ( map ( convert , dbus_obj . keys ( ) ) , map ( convert , dbus_obj . values ( ) ) ) ) , ( dbus . Dictionary , ) ) for conv in ( pyint , pybool , pyfloat , pylist , pytuple , pystr , pydict ) : if any ( map ( _isinstance , conv . dbustypes ) ) : return conv . pytype ( dbus_obj ) else : return dbus_obj
Converts dbus_obj from dbus type to python type .
370
14
5,013
def converter ( f ) : @ wraps ( f ) def wrapper ( * args , * * kwds ) : return convert ( f ( * args , * * kwds ) ) return wrapper
Decorator to convert value from dbus type to python type .
41
14
5,014
def exception_wrapper ( f ) : @ wraps ( f ) def wrapper ( * args , * * kwds ) : try : return f ( * args , * * kwds ) except dbus . exceptions . DBusException as err : _args = err . args raise PyMPRISException ( * _args ) return wrapper
Decorator to convert dbus exception to pympris exception .
71
15
5,015
def available_players ( ) : bus = dbus . SessionBus ( ) players = set ( ) for name in filter ( lambda item : item . startswith ( MPRIS_NAME_PREFIX ) , bus . list_names ( ) ) : owner_name = bus . get_name_owner ( name ) players . add ( convert ( owner_name ) ) return players
Searchs and returns set of unique names of objects which implements MPRIS2 interfaces .
82
18
5,016
def signal_wrapper ( f ) : @ wraps ( f ) def wrapper ( * args , * * kwds ) : args = map ( convert , args ) kwds = { convert ( k ) : convert ( v ) for k , v in kwds . items ( ) } return f ( * args , * * kwds ) return wrapper
Decorator converts function s arguments from dbus types to python .
75
14
5,017
def filter_properties_signals ( f , signal_iface_name ) : @ wraps ( f ) def wrapper ( iface , changed_props , invalidated_props , * args , * * kwargs ) : if iface == signal_iface_name : f ( changed_props , invalidated_props ) return wrapper
Filters signals by iface name .
76
8
5,018
def distance_function_match ( l1 , l2 , thresh , dist_fn , norm_funcs = [ ] ) : common = [ ] # We will keep track of the global index in the source list as we # will successively reduce their sizes. l1 = list ( enumerate ( l1 ) ) l2 = list ( enumerate ( l2 ) ) # Use the distance function and threshold on hints given by normalization. # See _match_by_norm_func for implementation details. # Also wrap the list element function function to ignore the global list # index computed above. for norm_fn in norm_funcs : new_common , l1 , l2 = _match_by_norm_func ( l1 , l2 , lambda a : norm_fn ( a [ 1 ] ) , lambda a1 , a2 : dist_fn ( a1 [ 1 ] , a2 [ 1 ] ) , thresh ) # Keep only the global list index in the end result. common . extend ( ( c1 [ 0 ] , c2 [ 0 ] ) for c1 , c2 in new_common ) # Take any remaining umatched entries and try to match them using the # Munkres algorithm. dist_matrix = [ [ dist_fn ( e1 , e2 ) for i2 , e2 in l2 ] for i1 , e1 in l1 ] # Call Munkres on connected components on the remaining bipartite graph. # An edge links an element from l1 with an element from l2 only if # the distance between the elements is less (or equal) than the theshold. components = BipartiteConnectedComponents ( ) for l1_i in range ( len ( l1 ) ) : for l2_i in range ( len ( l2 ) ) : if dist_matrix [ l1_i ] [ l2_i ] > thresh : continue components . add_edge ( l1_i , l2_i ) for l1_indices , l2_indices in components . get_connected_components ( ) : # Build a partial distance matrix for each connected component. part_l1 = [ l1 [ i ] for i in l1_indices ] part_l2 = [ l2 [ i ] for i in l2_indices ] part_dist_matrix = [ [ dist_matrix [ l1_i ] [ l2_i ] for l2_i in l2_indices ] for l1_i in l1_indices ] part_cmn = _match_munkres ( part_l1 , part_l2 , part_dist_matrix , thresh ) common . extend ( ( c1 [ 0 ] , c2 [ 0 ] ) for c1 , c2 in part_cmn ) return common
Returns pairs of matching indices from l1 and l2 .
616
12
5,019
def _match_by_norm_func ( l1 , l2 , norm_fn , dist_fn , thresh ) : common = [ ] l1_only_idx = set ( range ( len ( l1 ) ) ) l2_only_idx = set ( range ( len ( l2 ) ) ) buckets_l1 = _group_by_fn ( enumerate ( l1 ) , lambda x : norm_fn ( x [ 1 ] ) ) buckets_l2 = _group_by_fn ( enumerate ( l2 ) , lambda x : norm_fn ( x [ 1 ] ) ) for normed , l1_elements in buckets_l1 . items ( ) : l2_elements = buckets_l2 . get ( normed , [ ] ) if not l1_elements or not l2_elements : continue _ , ( _ , e1_first ) = l1_elements [ 0 ] _ , ( _ , e2_first ) = l2_elements [ 0 ] match_is_ambiguous = not ( len ( l1_elements ) == len ( l2_elements ) and ( all ( e2 == e2_first for ( _ , ( _ , e2 ) ) in l2_elements ) or all ( e1 == e1_first for ( _ , ( _ , e1 ) ) in l1_elements ) ) ) if match_is_ambiguous : continue for ( e1_idx , e1 ) , ( e2_idx , e2 ) in zip ( l1_elements , l2_elements ) : if dist_fn ( e1 , e2 ) > thresh : continue l1_only_idx . remove ( e1_idx ) l2_only_idx . remove ( e2_idx ) common . append ( ( e1 , e2 ) ) l1_only = [ l1 [ i ] for i in l1_only_idx ] l2_only = [ l2 [ i ] for i in l2_only_idx ] return common , l1_only , l2_only
Matches elements in l1 and l2 using normalization functions .
474
14
5,020
def _match_munkres ( l1 , l2 , dist_matrix , thresh ) : equal_dist_matches = set ( ) m = Munkres ( ) indices = m . compute ( dist_matrix ) for l1_idx , l2_idx in indices : dst = dist_matrix [ l1_idx ] [ l2_idx ] if dst > thresh : continue for eq_l2_idx , eq_val in enumerate ( dist_matrix [ l1_idx ] ) : if abs ( dst - eq_val ) < 1e-9 : equal_dist_matches . add ( ( l1_idx , eq_l2_idx ) ) for eq_l1_idx , eq_row in enumerate ( dist_matrix ) : if abs ( dst - eq_row [ l2_idx ] ) < 1e-9 : equal_dist_matches . add ( ( eq_l1_idx , l2_idx ) ) return [ ( l1 [ l1_idx ] , l2 [ l2_idx ] ) for l1_idx , l2_idx in equal_dist_matches ]
Matches two lists using the Munkres algorithm .
274
11
5,021
def add_suspect ( self , case_obj , variant_obj ) : new_suspect = Suspect ( case = case_obj , variant_id = variant_obj . variant_id , name = variant_obj . display_name ) self . session . add ( new_suspect ) self . save ( ) return new_suspect
Link a suspect to a case .
78
7
5,022
def delete_suspect ( self , suspect_id ) : suspect_obj = self . suspect ( suspect_id ) logger . debug ( "Deleting suspect {0}" . format ( suspect_obj . name ) ) self . session . delete ( suspect_obj ) self . save ( )
De - link a suspect from a case .
63
9
5,023
def configure_stream ( level = 'WARNING' ) : # get the root logger root_logger = logging . getLogger ( ) # set the logger level to the same as will be used by the handler root_logger . setLevel ( level ) # customize formatter, align each column template = "[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s" formatter = logging . Formatter ( template ) # add a basic STDERR handler to the logger console = logging . StreamHandler ( ) console . setLevel ( level ) console . setFormatter ( formatter ) root_logger . addHandler ( console ) return root_logger
Configure root logger using a standard stream handler .
151
10
5,024
def _is_same_type_as_root ( self , obj ) : if not self . ALLOWS_SAME_TYPE_AS_ROOT_COLLECT : obj_model = get_model_from_instance ( obj ) obj_key = get_key_from_instance ( obj ) is_same_type_as_root = obj_model == self . root_obj_model and obj_key != self . root_obj_key if is_same_type_as_root : self . emit_event ( type = 'same_type_as_root' , obj = obj ) return is_same_type_as_root else : return False
Testing if we try to collect an object of the same type as root . This is not really a good sign because it means that we are going to collect a whole new tree that will maybe collect a new tree that will ...
145
45
5,025
def initialize ( self , data ) : for item in data : if hasattr ( self , item ) : setattr ( self , item , data [ item ] )
initialize variable from loaded data
34
6
5,026
def _add_transcripts ( self , variant_obj , info_dict ) : vep_string = info_dict . get ( 'CSQ' ) #Check if snpeff annotation: snpeff_string = info_dict . get ( 'ANN' ) # We check one of these. # VEP has presedence over snpeff if vep_string : #Get the vep annotations vep_info = get_vep_info ( vep_string = vep_string , vep_header = self . vep_header ) for transcript_info in vep_info : transcript = self . _get_vep_transcript ( transcript_info ) variant_obj . add_transcript ( transcript ) elif snpeff_string : #Get the vep annotations snpeff_info = get_snpeff_info ( snpeff_string = snpeff_string , snpeff_header = self . snpeff_header ) for transcript_info in snpeff_info : transcript = self . _get_snpeff_transcript ( transcript_info ) variant_obj . add_transcript ( transcript )
Return all transcripts sound in the vcf file
255
9
5,027
def _get_vep_transcript ( self , transcript_info ) : transcript = Transcript ( hgnc_symbol = transcript_info . get ( 'SYMBOL' ) , transcript_id = transcript_info . get ( 'Feature' ) , ensembl_id = transcript_info . get ( 'Gene' ) , biotype = transcript_info . get ( 'BIOTYPE' ) , consequence = transcript_info . get ( 'Consequence' ) , strand = transcript_info . get ( 'STRAND' ) , sift = transcript_info . get ( 'SIFT' ) , polyphen = transcript_info . get ( 'PolyPhen' ) , exon = transcript_info . get ( 'EXON' ) , HGVSc = transcript_info . get ( 'HGVSc' ) , HGVSp = transcript_info . get ( 'HGVSp' ) , GMAF = transcript_info . get ( 'GMAF' ) , ExAC_MAF = transcript_info . get ( 'ExAC_MAF' ) ) return transcript
Create a Transcript based on the vep annotation
239
9
5,028
def _get_snpeff_transcript ( self , transcript_info ) : transcript = Transcript ( hgnc_symbol = transcript_info . get ( 'Gene_Name' ) , transcript_id = transcript_info . get ( 'Feature' ) , ensembl_id = transcript_info . get ( 'Gene_ID' ) , biotype = transcript_info . get ( 'Transcript_BioType' ) , consequence = transcript_info . get ( 'Annotation' ) , exon = transcript_info . get ( 'Rank' ) , HGVSc = transcript_info . get ( 'HGVS.c' ) , HGVSp = transcript_info . get ( 'HGVS.p' ) ) return transcript
Create a transcript based on the snpeff annotation
162
10
5,029
def _makes_clone ( _func , * args , * * kw ) : self = args [ 0 ] . _clone ( ) _func ( self , * args [ 1 : ] , * * kw ) return self
A decorator that returns a clone of the current object so that we can re - use the object for similar requests .
48
24
5,030
def _handle_response ( self , response , data ) : # Content-Type headers can include additional parameters(RFC 1521), so # we split on ; to match against only the type/subtype if data and response . get ( 'content-type' , '' ) . split ( ';' ) [ 0 ] in ( 'application/json' , 'application/x-javascript' , 'text/javascript' , 'text/x-javascript' , 'text/x-json' ) : return json . loads ( data ) else : return data
Deserializes JSON if the content - type matches otherwise returns the response body as is .
117
18
5,031
def get_url ( self , * paths , * * params ) : path_stack = self . _attribute_stack [ : ] if paths : path_stack . extend ( paths ) u = self . _stack_collapser ( path_stack ) url = self . _url_template % { "domain" : self . _api_url , "generated_url" : u , } if self . _params or params : internal_params = self . _params . copy ( ) internal_params . update ( params ) url += self . _generate_params ( internal_params ) return url
Returns the URL for this request .
128
7
5,032
def _clone ( self ) : cls = self . __class__ q = cls . __new__ ( cls ) q . __dict__ = self . __dict__ . copy ( ) q . _params = self . _params . copy ( ) q . _headers = self . _headers . copy ( ) q . _attribute_stack = self . _attribute_stack [ : ] return q
Clones the state of the current operation .
86
9
5,033
def delete ( ctx , family_id , individual_id , root ) : root = root or ctx . obj . get ( 'root' ) or os . path . expanduser ( "~/.puzzle" ) if os . path . isfile ( root ) : logger . error ( "'root' can't be a file" ) ctx . abort ( ) logger . info ( "Root directory is: {}" . format ( root ) ) db_path = os . path . join ( root , 'puzzle_db.sqlite3' ) logger . info ( "db path is: {}" . format ( db_path ) ) if not os . path . exists ( db_path ) : logger . warn ( "database not initialized, run 'puzzle init'" ) ctx . abort ( ) store = SqlStore ( db_path ) if family_id : case_obj = store . case ( case_id = family_id ) if case_obj is None : logger . warning ( "Family {0} does not exist in database" . format ( family_id ) ) ctx . abort ( ) store . delete_case ( case_obj ) elif individual_id : ind_obj = store . individual ( ind_id = individual_id ) if ind_obj . ind_id != individual_id : logger . warning ( "Individual {0} does not exist in database" . format ( individual_id ) ) ctx . abort ( ) store . delete_individual ( ind_obj ) else : logger . warning ( "Please provide a family or individual id" ) ctx . abort ( )
Delete a case or individual from the database .
345
9
5,034
def variants ( case_id ) : filters = parse_filters ( ) values = [ value for key , value in iteritems ( filters ) if not isinstance ( value , dict ) and key != 'skip' ] is_active = any ( values ) variants , nr_of_variants = app . db . variants ( case_id , skip = filters [ 'skip' ] , filters = { 'gene_ids' : filters [ 'gene_symbols' ] , 'frequency' : filters . get ( 'frequency' ) , 'cadd' : filters . get ( 'cadd' ) , 'sv_len' : filters . get ( 'sv_len' ) , 'consequence' : filters [ 'selected_consequences' ] , 'genetic_models' : filters [ 'selected_models' ] , 'sv_types' : filters [ 'selected_sv_types' ] , 'gene_lists' : filters [ 'gene_lists' ] , 'impact_severities' : filters [ 'impact_severities' ] , 'gemini_query' : filters [ 'gemini_query' ] , 'range' : filters [ 'range' ] , } ) gene_lists = ( [ gene_list . list_id for gene_list in app . db . gene_lists ( ) ] if app . config [ 'STORE_ENABLED' ] else [ ] ) queries = ( [ ( query . name or query . query , query . query ) for query in app . db . gemini_queries ( ) ] if app . config [ 'STORE_ENABLED' ] else [ ] ) kwargs = dict ( variants = variants , case_id = case_id , db = app . db , filters = filters , consequences = SO_TERMS , inheritance_models = INHERITANCE_MODELS_SHORT , gene_lists = gene_lists , impact_severities = IMPACT_LEVELS , is_active = is_active , nr_of_variants = nr_of_variants , queries = queries ) if app . db . variant_type == 'sv' : return render_template ( 'sv_variants.html' , sv_types = SV_TYPES , * * kwargs ) else : return render_template ( 'variants.html' , * * kwargs )
Show all variants for a case .
520
7
5,035
def variant ( case_id , variant_id ) : case_obj = app . db . case ( case_id ) variant = app . db . variant ( case_id , variant_id ) if variant is None : return abort ( 404 , "variant not found" ) comments = app . db . comments ( variant_id = variant . md5 ) template = 'sv_variant.html' if app . db . variant_type == 'sv' else 'variant.html' return render_template ( template , variant = variant , case_id = case_id , comments = comments , case = case_obj )
Show a single variant .
134
5
5,036
def parse_filters ( ) : genes_str = request . args . get ( 'gene_symbol' ) filters = { } for key in ( 'frequency' , 'cadd' , 'sv_len' ) : try : filters [ key ] = float ( request . args . get ( key ) ) except ( ValueError , TypeError ) : pass filters [ 'gene_symbols' ] = genes_str . split ( ',' ) if genes_str else None filters [ 'selected_models' ] = request . args . getlist ( 'inheritance_models' ) filters [ 'selected_consequences' ] = request . args . getlist ( 'consequences' ) filters [ 'selected_sv_types' ] = request . args . getlist ( 'sv_types' ) filters [ 'skip' ] = int ( request . args . get ( 'skip' , 0 ) ) filters [ 'gene_lists' ] = request . args . getlist ( 'gene_lists' ) filters [ 'gemini_query' ] = ( request . args . get ( 'gemini_query' ) or request . args . get ( 'preset_gemini_query' ) ) filters [ 'impact_severities' ] = request . args . getlist ( 'impact_severities' ) filters [ 'range' ] = None if request . args . get ( 'range' ) : chromosome , raw_pos = request . args . get ( 'range' ) . split ( ':' ) start , end = map ( int , raw_pos . split ( '-' ) ) filters [ 'range' ] = { 'chromosome' : chromosome , 'start' : start , 'end' : end } filters [ 'query_dict' ] = { key : request . args . getlist ( key ) for key in request . args . keys ( ) } filters [ 'query_dict' ] . update ( { 'skip' : ( filters [ 'skip' ] + 30 ) } ) return filters
Parse variant filters from the request object .
439
9
5,037
def suspects ( case_id , variant_id ) : case_obj = app . db . case ( case_id ) variant_obj = app . db . variant ( case_id , variant_id ) app . db . add_suspect ( case_obj , variant_obj ) return redirect ( request . referrer )
Pin a variant as a suspect for a given case .
70
11
5,038
def queries ( ) : query = request . form [ 'query' ] name = request . form . get ( 'name' ) app . db . add_gemini_query ( name , query ) return redirect ( request . referrer )
Store a new GEMINI query .
50
9
5,039
def load ( fp , object_pairs_hook = dict ) : return object_pairs_hook ( ( k , v ) for k , v , _ in parse ( fp ) if k is not None )
Parse the contents of the ~io . IOBase . readline - supporting file - like object fp as a simple line - oriented . properties file and return a dict of the key - value pairs .
47
43
5,040
def loads ( s , object_pairs_hook = dict ) : fp = BytesIO ( s ) if isinstance ( s , binary_type ) else StringIO ( s ) return load ( fp , object_pairs_hook = object_pairs_hook )
Parse the contents of the string s as a simple line - oriented . properties file and return a dict of the key - value pairs .
60
28
5,041
def _extractClipData ( self , audioClipSpec , showLogs = False ) : command = [ self . _ffmpegPath ] if not showLogs : command += [ '-nostats' , '-loglevel' , '0' ] command += [ '-i' , self . _audioFilePath , '-ss' , '%.3f' % audioClipSpec . start , '-t' , '%.3f' % audioClipSpec . duration ( ) , '-c' , 'copy' , '-map' , '0' , '-acodec' , 'libmp3lame' , '-ab' , '128k' , '-f' , 'mp3' ] # Add clip TEXT as metadata and set a few more to default metadata = { self . _textMetadataName : audioClipSpec . text } for k , v in metadata . items ( ) : command . append ( '-metadata' ) command . append ( "{}='{}'" . format ( k , v ) ) command . append ( 'pipe:1' ) return subprocess . check_output ( command )
Extracts a single clip according to audioClipSpec .
254
13
5,042
def add_phenotype ( self , ind_obj , phenotype_id ) : if phenotype_id . startswith ( 'HP:' ) or len ( phenotype_id ) == 7 : logger . debug ( 'querying on HPO term' ) hpo_results = phizz . query_hpo ( [ phenotype_id ] ) else : logger . debug ( 'querying on OMIM term' ) hpo_results = phizz . query_disease ( [ phenotype_id ] ) added_terms = [ ] if hpo_results else None existing_ids = set ( term . phenotype_id for term in ind_obj . phenotypes ) for result in hpo_results : if result [ 'hpo_term' ] not in existing_ids : term = PhenotypeTerm ( phenotype_id = result [ 'hpo_term' ] , description = result [ 'description' ] ) logger . info ( 'adding new HPO term: %s' , term . phenotype_id ) ind_obj . phenotypes . append ( term ) added_terms . append ( term ) logger . debug ( 'storing new HPO terms' ) self . save ( ) if added_terms is not None and len ( added_terms ) > 0 : for case_obj in ind_obj . cases : self . update_hpolist ( case_obj ) return added_terms
Add a phenotype term to the case .
296
8
5,043
def update_hpolist ( self , case_obj ) : hpo_list = self . case_genelist ( case_obj ) hpo_results = hpo_genes ( case_obj . phenotype_ids ( ) , * self . phenomizer_auth ) if hpo_results is None : pass # Why raise here? # raise RuntimeError("couldn't link to genes, try again") else : gene_ids = [ result [ 'gene_id' ] for result in hpo_results if result [ 'gene_id' ] ] hpo_list . gene_ids = gene_ids self . save ( )
Update the HPO gene list for a case based on current terms .
140
14
5,044
def remove_phenotype ( self , ind_obj , phenotypes = None ) : if phenotypes is None : logger . info ( "delete all phenotypes related to %s" , ind_obj . ind_id ) self . query ( PhenotypeTerm ) . filter_by ( ind_id = ind_obj . id ) . delete ( ) else : for term in ind_obj . phenotypes : if term . phenotype_id in phenotypes : logger . info ( "delete phenotype: %s from %s" , term . phenotype_id , ind_obj . ind_id ) self . session . delete ( term ) logger . debug ( 'persist removals' ) self . save ( ) for case_obj in ind_obj . cases : self . update_hpolist ( case_obj )
Remove multiple phenotypes from an individual .
173
8
5,045
def match ( mode_lst : list , obj : 'object that has __destruct__ method' ) : # noinspection PyUnresolvedReferences try : # noinspection PyUnresolvedReferences structure = obj . __destruct__ ( ) except AttributeError : return False n = len ( mode_lst ) if n > len ( structure ) : return False for i in range ( n ) : mode = mode_lst [ i ] # noinspection PyUnresolvedReferences elem = obj [ i ] if isinstance ( mode , PatternList ) : if not match ( mode , elem ) : return False elif mode is P : # noinspection PyUnresolvedReferences mode_lst [ i ] = elem elif mode is any : pass elif mode != elem : return False return True
>>> from Redy . ADT . Core import match data P >>> from Redy . ADT . traits import ConsInd Discrete >>>
176
28
5,046
def gemini_query ( self , query_id ) : logger . debug ( "Looking for query with id {0}" . format ( query_id ) ) return self . query ( GeminiQuery ) . filter_by ( id = query_id ) . first ( )
Return a gemini query
57
5
5,047
def add_gemini_query ( self , name , query ) : logger . info ( "Adding query {0} with text {1}" . format ( name , query ) ) new_query = GeminiQuery ( name = name , query = query ) self . session . add ( new_query ) self . save ( ) return new_query
Add a user defined gemini query
72
7
5,048
def delete_gemini_query ( self , query_id ) : query_obj = self . gemini_query ( query_id ) logger . debug ( "Delete query: {0}" . format ( query_obj . name_query ) ) self . session . delete ( query_obj ) self . save ( )
Delete a gemini query
68
5
5,049
def distance_to ( self , point , unit = 'km' ) : assert isinstance ( point , GeoPoint ) , ( 'Other point should also be a Point instance.' ) if self == point : return 0.0 coefficient = 69.09 theta = self . longitude - point . longitude unit = unit . lower ( ) if unit else None distance = math . degrees ( math . acos ( math . sin ( self . rad_latitude ) * math . sin ( point . rad_latitude ) + math . cos ( self . rad_latitude ) * math . cos ( point . rad_latitude ) * math . cos ( math . radians ( theta ) ) ) ) * coefficient if unit == 'km' : return utils . mi_to_km ( distance ) return distance
Calculate distance in miles or kilometers between current and other passed point .
172
15
5,050
def rad_latitude ( self ) : if self . _rad_latitude is None : self . _rad_latitude = math . radians ( self . latitude ) return self . _rad_latitude
Lazy conversion degrees latitude to radians .
45
9
5,051
def rad_longitude ( self ) : if self . _rad_longitude is None : self . _rad_longitude = math . radians ( self . longitude ) return self . _rad_longitude
Lazy conversion degrees longitude to radians .
46
10
5,052
def send ( term , stream ) : payload = erlang . term_to_binary ( term ) header = struct . pack ( '!I' , len ( payload ) ) stream . write ( header ) stream . write ( payload ) stream . flush ( )
Write an Erlang term to an output stream .
54
10
5,053
def recv ( stream ) : header = stream . read ( 4 ) if len ( header ) != 4 : return None # EOF ( length , ) = struct . unpack ( '!I' , header ) payload = stream . read ( length ) if len ( payload ) != length : return None term = erlang . binary_to_term ( payload ) return term
Read an Erlang term from an input stream .
78
10
5,054
def recv_loop ( stream ) : message = recv ( stream ) while message : yield message message = recv ( stream )
Yield Erlang terms from an input stream .
28
10
5,055
def _add_genotype_calls ( self , variant_obj , variant_line , case_obj ) : variant_line = variant_line . split ( '\t' ) #if there is gt calls we have no individuals to add if len ( variant_line ) > 8 : gt_format = variant_line [ 8 ] . split ( ':' ) for individual in case_obj . individuals : sample_id = individual . ind_id index = individual . ind_index gt_call = variant_line [ 9 + index ] . split ( ':' ) raw_call = dict ( zip ( gt_format , gt_call ) ) genotype = Genotype ( * * raw_call ) variant_obj . add_individual ( puzzle_genotype ( sample_id = sample_id , genotype = genotype . genotype , case_id = case_obj . name , phenotype = individual . phenotype , ref_depth = genotype . ref_depth , alt_depth = genotype . alt_depth , genotype_quality = genotype . genotype_quality , depth = genotype . depth_of_coverage , supporting_evidence = genotype . supporting_evidence , pe_support = genotype . pe_support , sr_support = genotype . sr_support , ) )
Add the genotype calls for the variant
283
8
5,056
def with_prefix ( self , root_path ) : return Conflict ( self . conflict_type , root_path + self . path , self . body )
Returns a new conflict with a prepended prefix as a path .
33
13
5,057
def to_json ( self ) : # map ConflictType to json-patch operator path = self . path if self . conflict_type in ( 'REORDER' , 'SET_FIELD' ) : op = 'replace' elif self . conflict_type in ( 'MANUAL_MERGE' , 'ADD_BACK_TO_HEAD' ) : op = 'add' path += ( '-' , ) elif self . conflict_type == 'REMOVE_FIELD' : op = 'remove' else : raise ValueError ( 'Conflict Type %s can not be mapped to a json-patch operation' % conflict_type ) # stringify path array json_pointer = '/' + '/' . join ( str ( el ) for el in path ) conflict_values = force_list ( self . body ) conflicts = [ ] for value in conflict_values : if value is not None or self . conflict_type == 'REMOVE_FIELD' : conflicts . append ( { 'path' : json_pointer , 'op' : op , 'value' : value , '$type' : self . conflict_type } ) return json . dumps ( conflicts )
Deserializes conflict to a JSON object .
250
9
5,058
def dump ( props , fp , separator = '=' , comments = None , timestamp = True , sort_keys = False ) : if comments is not None : print ( to_comment ( comments ) , file = fp ) if timestamp is not None and timestamp is not False : print ( to_comment ( java_timestamp ( timestamp ) ) , file = fp ) for k , v in itemize ( props , sort_keys = sort_keys ) : print ( join_key_value ( k , v , separator ) , file = fp )
Write a series of key - value pairs to a file in simple line - oriented . properties format .
120
20
5,059
def dumps ( props , separator = '=' , comments = None , timestamp = True , sort_keys = False ) : s = StringIO ( ) dump ( props , s , separator = separator , comments = comments , timestamp = timestamp , sort_keys = sort_keys ) return s . getvalue ( )
Convert a series of key - value pairs to a text string in simple line - oriented . properties format .
67
22
5,060
def join_key_value ( key , value , separator = '=' ) : # Escapes `key` and `value` the same way as java.util.Properties.store() return escape ( key ) + separator + re . sub ( r'^ +' , lambda m : r'\ ' * m . end ( ) , _base_escape ( value ) )
r Join a key and value together into a single line suitable for adding to a simple line - oriented . properties file . No trailing newline is added .
82
31
5,061
def GetPlaylists ( self , start , max_count , order , reversed ) : cv = convert2dbus return self . iface . GetPlaylists ( cv ( start , 'u' ) , cv ( max_count , 'u' ) , cv ( order , 's' ) , cv ( reversed , 'b' ) )
Gets a set of playlists .
77
8
5,062
def init_app ( self , app ) : self . algorithm = app . config . get ( 'HASHING_METHOD' , 'sha256' ) if self . algorithm not in algs : raise ValueError ( '{} not one of {}' . format ( self . algorithm , algs ) ) self . rounds = app . config . get ( 'HASHING_ROUNDS' , 1 ) if not isinstance ( self . rounds , int ) : raise TypeError ( 'HASHING_ROUNDS must be type int' )
Initializes the Flask application with this extension . It grabs the necessary configuration values from app . config those being HASHING_METHOD and HASHING_ROUNDS . HASHING_METHOD defaults to sha256 but can be any one of hashlib . algorithms . HASHING_ROUNDS specifies the number of times to hash the input with the specified algorithm . This defaults to 1 .
117
82
5,063
def hash_value ( self , value , salt = '' ) : def hashit ( value , salt ) : h = hashlib . new ( self . algorithm ) tgt = salt + value h . update ( tgt ) return h . hexdigest ( ) def fix_unicode ( value ) : if VER < 3 and isinstance ( value , unicode ) : value = str ( value ) elif VER >= 3 and isinstance ( value , str ) : value = str . encode ( value ) return value salt = fix_unicode ( salt ) for i in range ( self . rounds ) : value = fix_unicode ( value ) value = hashit ( value , salt ) return value
Hashes the specified value combined with the specified salt . The hash is done HASHING_ROUNDS times as specified by the application configuration .
147
30
5,064
def check_value ( self , value_hash , value , salt = '' ) : h = self . hash_value ( value , salt = salt ) return h == value_hash
Checks the specified hash value against the hash of the provided salt and value .
38
16
5,065
def AddTrack ( self , uri , after_track , set_as_current ) : self . iface . AddTrack ( uri , convert2dbus ( after_track , 'o' ) , convert2dbus ( set_as_current , 'b' ) )
Adds a URI in the TrackList .
61
8
5,066
def delete_gene ( self , * gene_ids ) : self . gene_ids = [ gene_id for gene_id in self . gene_ids if gene_id not in gene_ids ]
Delete one or more gene ids form the list .
44
11
5,067
def healthy_update_timer ( self ) : state = None if self . update_status_timer and self . update_status_timer . is_alive ( ) : _LOGGER . debug ( "Timer: healthy" ) state = True else : _LOGGER . debug ( "Timer: not healthy" ) state = False return state
Check state of update timer .
72
6
5,068
def initialize ( self ) : self . network_status = self . get_network_status ( ) self . name = self . network_status . get ( 'network_name' , 'Unknown' ) self . location_info = self . get_location_info ( ) self . device_info = self . get_device_info ( ) self . device_id = ( self . device_info . get ( 'device_id' ) if self . device_info else "Unknown" ) self . initialize_socket ( ) self . initialize_worker ( ) self . initialize_zones ( )
initialize the object
127
4
5,069
def initialize_socket ( self ) : try : _LOGGER . debug ( "Trying to open socket." ) self . _socket = socket . socket ( socket . AF_INET , # IPv4 socket . SOCK_DGRAM # UDP ) self . _socket . bind ( ( '' , self . _udp_port ) ) except socket . error as err : raise err else : _LOGGER . debug ( "Socket open." ) socket_thread = threading . Thread ( name = "SocketThread" , target = socket_worker , args = ( self . _socket , self . messages , ) ) socket_thread . setDaemon ( True ) socket_thread . start ( )
initialize the socket
148
4
5,070
def initialize_worker ( self ) : worker_thread = threading . Thread ( name = "WorkerThread" , target = message_worker , args = ( self , ) ) worker_thread . setDaemon ( True ) worker_thread . start ( )
initialize the worker thread
55
5
5,071
def initialize_zones ( self ) : zone_list = self . location_info . get ( 'zone_list' , { 'main' : True } ) for zone_id in zone_list : if zone_list [ zone_id ] : # Location setup is valid self . zones [ zone_id ] = Zone ( self , zone_id = zone_id ) else : # Location setup is not valid _LOGGER . debug ( "Ignoring zone: %s" , zone_id )
initialize receiver zones
107
4
5,072
def handle_status ( self ) : status = self . get_status ( ) if status : # Update main-zone self . zones [ 'main' ] . update_status ( status )
Handle status from device
40
4
5,073
def handle_netusb ( self , message ) : # _LOGGER.debug("message: {}".format(message)) needs_update = 0 if self . _yamaha : if 'play_info_updated' in message : play_info = self . get_play_info ( ) # _LOGGER.debug(play_info) if play_info : new_media_status = MediaStatus ( play_info , self . _ip_address ) if self . _yamaha . media_status != new_media_status : # we need to send an update upwards self . _yamaha . new_media_status ( new_media_status ) needs_update += 1 playback = play_info . get ( 'playback' ) # _LOGGER.debug("Playback: {}".format(playback)) if playback == "play" : new_status = STATE_PLAYING elif playback == "stop" : new_status = STATE_IDLE elif playback == "pause" : new_status = STATE_PAUSED else : new_status = STATE_UNKNOWN if self . _yamaha . status is not new_status : _LOGGER . debug ( "playback: %s" , new_status ) self . _yamaha . status = new_status needs_update += 1 return needs_update
Handles netusb in message
291
6
5,074
def handle_features ( self , device_features ) : self . device_features = device_features if device_features and 'zone' in device_features : for zone in device_features [ 'zone' ] : zone_id = zone . get ( 'id' ) if zone_id in self . zones : _LOGGER . debug ( "handle_features: %s" , zone_id ) input_list = zone . get ( 'input_list' , [ ] ) input_list . sort ( ) self . zones [ zone_id ] . source_list = input_list
Handles features of the device
126
6
5,075
def handle_event ( self , message ) : # _LOGGER.debug(message) needs_update = 0 for zone in self . zones : if zone in message : _LOGGER . debug ( "Received message for zone: %s" , zone ) self . zones [ zone ] . update_status ( message [ zone ] ) if 'netusb' in message : needs_update += self . handle_netusb ( message [ 'netusb' ] ) if needs_update > 0 : _LOGGER . debug ( "needs_update: %d" , needs_update ) self . update_hass ( )
Dispatch all event messages
132
4
5,076
def update_status ( self , reset = False ) : if self . healthy_update_timer and not reset : return # get device features only once if not self . device_features : self . handle_features ( self . get_features ( ) ) # Get status from device to register/keep alive UDP self . handle_status ( ) # Schedule next execution self . setup_update_timer ( )
Update device status .
84
4
5,077
def setup_update_timer ( self , reset = False ) : _LOGGER . debug ( "Timer: firing again in %d seconds" , self . _interval ) self . update_status_timer = threading . Timer ( self . _interval , self . update_status , [ True ] ) self . update_status_timer . setDaemon ( True ) self . update_status_timer . start ( )
Schedule a Timer Thread .
92
7
5,078
def set_playback ( self , playback ) : req_url = ENDPOINTS [ "setPlayback" ] . format ( self . _ip_address ) params = { "playback" : playback } return request ( req_url , params = params )
Send Playback command .
58
5
5,079
def build_gemini_query ( self , query , extra_info ) : if 'WHERE' in query : return "{0} AND {1}" . format ( query , extra_info ) else : return "{0} WHERE {1}" . format ( query , extra_info )
Append sql to a gemini query
60
8
5,080
def variants ( self , case_id , skip = 0 , count = 1000 , filters = None ) : filters = filters or { } logger . debug ( "Looking for variants in {0}" . format ( case_id ) ) limit = count + skip gemini_query = filters . get ( 'gemini_query' ) or "SELECT * from variants v" any_filter = False if filters . get ( 'frequency' ) : frequency = filters [ 'frequency' ] extra_info = "(v.max_aaf_all < {0} or v.max_aaf_all is" " Null)" . format ( frequency ) gemini_query = self . build_gemini_query ( gemini_query , extra_info ) if filters . get ( 'cadd' ) : cadd_score = filters [ 'cadd' ] extra_info = "(v.cadd_scaled > {0})" . format ( cadd_score ) gemini_query = self . build_gemini_query ( gemini_query , extra_info ) if filters . get ( 'gene_ids' ) : gene_list = [ gene_id . strip ( ) for gene_id in filters [ 'gene_ids' ] ] gene_string = "v.gene in (" for index , gene_id in enumerate ( gene_list ) : if index == 0 : gene_string += "'{0}'" . format ( gene_id ) else : gene_string += ", '{0}'" . format ( gene_id ) gene_string += ")" gemini_query = self . build_gemini_query ( gemini_query , gene_string ) if filters . get ( 'range' ) : chrom = filters [ 'range' ] [ 'chromosome' ] if not chrom . startswith ( 'chr' ) : chrom = "chr{0}" . format ( chrom ) range_string = "v.chrom = '{0}' AND " "((v.start BETWEEN {1} AND {2}) OR " "(v.end BETWEEN {1} AND {2}))" . format ( chrom , filters [ 'range' ] [ 'start' ] , filters [ 'range' ] [ 'end' ] ) gemini_query = self . build_gemini_query ( gemini_query , range_string ) filtered_variants = self . _variants ( case_id = case_id , gemini_query = gemini_query , ) if filters . get ( 'consequence' ) : consequences = set ( filters [ 'consequence' ] ) filtered_variants = ( variant for variant in filtered_variants if set ( variant . consequences ) . intersection ( consequences ) ) if filters . get ( 'impact_severities' ) : severities = set ( [ severity . strip ( ) for severity in filters [ 'impact_severities' ] ] ) new_filtered_variants = [ ] filtered_variants = ( variant for variant in filtered_variants if set ( [ variant . impact_severity ] ) . intersection ( severities ) ) if filters . get ( 'sv_len' ) : sv_len = int ( filters [ 'sv_len' ] ) filtered_variants = ( variant for variant in filtered_variants if variant . sv_len >= sv_len ) variants = [ ] for index , variant_obj in enumerate ( filtered_variants ) : if index >= skip : if index < limit : variants . append ( variant_obj ) else : break return Results ( variants , len ( variants ) )
Return count variants for a case .
782
7
5,081
def _variants ( self , case_id , gemini_query ) : individuals = [ ] # Get the individuals for the case case_obj = self . case ( case_id ) for individual in case_obj . individuals : individuals . append ( individual ) self . db = case_obj . variant_source self . variant_type = case_obj . variant_type gq = GeminiQuery ( self . db ) gq . run ( gemini_query ) index = 0 for gemini_variant in gq : variant = None # Check if variant is non ref in the individuals is_variant = self . _is_variant ( gemini_variant , individuals ) if self . variant_type == 'snv' and not is_variant : variant = None else : index += 1 logger . debug ( "Updating index to: {0}" . format ( index ) ) variant = self . _format_variant ( case_id = case_id , gemini_variant = gemini_variant , individual_objs = individuals , index = index ) if variant : yield variant
Return variants found in the gemini database
237
8
5,082
def _format_variant ( self , case_id , gemini_variant , individual_objs , index = 0 , add_all_info = False ) : chrom = gemini_variant [ 'chrom' ] if chrom . startswith ( 'chr' ) or chrom . startswith ( 'CHR' ) : chrom = chrom [ 3 : ] variant_dict = { 'CHROM' : chrom , 'POS' : str ( gemini_variant [ 'start' ] ) , 'ID' : gemini_variant [ 'rs_ids' ] , 'REF' : gemini_variant [ 'ref' ] , 'ALT' : gemini_variant [ 'alt' ] , 'QUAL' : gemini_variant [ 'qual' ] , 'FILTER' : gemini_variant [ 'filter' ] } variant = Variant ( * * variant_dict ) # Use the gemini id for fast search variant . update_variant_id ( gemini_variant [ 'variant_id' ] ) logger . debug ( "Creating a variant object of variant {0}" . format ( variant . variant_id ) ) variant [ 'index' ] = index # Add the most severe consequence self . _add_most_severe_consequence ( variant , gemini_variant ) #Add the impact severity self . _add_impact_severity ( variant , gemini_variant ) ### POSITON ANNOATTIONS ### variant . start = int ( gemini_variant [ 'start' ] ) variant . stop = int ( gemini_variant [ 'end' ] ) #Add the sv specific coordinates if self . variant_type == 'sv' : variant . sv_type = gemini_variant [ 'sub_type' ] variant . stop = int ( gemini_variant [ 'end' ] ) self . _add_sv_coordinates ( variant ) else : ### Consequence and region annotations #Add the transcript information self . _add_transcripts ( variant , gemini_variant ) self . _add_thousand_g ( variant , gemini_variant ) self . _add_exac ( variant , gemini_variant ) self . _add_gmaf ( variant , gemini_variant ) #### Check the impact annotations #### if gemini_variant [ 'cadd_scaled' ] : variant . cadd_score = gemini_variant [ 'cadd_scaled' ] # We use the prediction in text polyphen = gemini_variant [ 'polyphen_pred' ] if polyphen : variant . add_severity ( 'Polyphen' , polyphen ) # We use the prediction in text sift = gemini_variant [ 'sift_pred' ] if sift : variant . add_severity ( 'SIFT' , sift ) #Add the genes based on the hgnc symbols self . _add_hgnc_symbols ( variant ) if self . variant_type == 'snv' : self . _add_genes ( variant ) self . _add_consequences ( variant ) ### GENOTYPE ANNOATTIONS ### #Get the genotype info if add_all_info : self . _add_genotypes ( variant , gemini_variant , case_id , individual_objs ) if self . variant_type == 'sv' : self . _add_genes ( variant ) return variant
Make a puzzle variant from a gemini variant
758
9
5,083
def _is_variant ( self , gemini_variant , ind_objs ) : indexes = ( ind . ind_index for ind in ind_objs ) #Check if any individual have a heterozygous or homozygous variant call for index in indexes : gt_call = gemini_variant [ 'gt_types' ] [ index ] if ( gt_call == 1 or gt_call == 3 ) : return True return False
Check if the variant is a variation in any of the individuals
99
12
5,084
def is_affected ( self ) : phenotype = self . phenotype if phenotype == '1' : return False elif phenotype == '2' : return True else : return False
Boolean for telling if the sample is affected .
36
10
5,085
def gene_list ( self , list_id ) : return self . query ( GeneList ) . filter_by ( list_id = list_id ) . first ( )
Get a gene list from the database .
37
8
5,086
def add_genelist ( self , list_id , gene_ids , case_obj = None ) : new_genelist = GeneList ( list_id = list_id ) new_genelist . gene_ids = gene_ids if case_obj : new_genelist . cases . append ( case_obj ) self . session . add ( new_genelist ) self . save ( ) return new_genelist
Create a new gene list and optionally link to cases .
90
11
5,087
def remove_genelist ( self , list_id , case_obj = None ) : gene_list = self . gene_list ( list_id ) if case_obj : # remove a single link between case and gene list case_ids = [ case_obj . id ] else : # remove all links and the list itself case_ids = [ case . id for case in gene_list . cases ] self . session . delete ( gene_list ) case_links = self . query ( CaseGenelistLink ) . filter ( CaseGenelistLink . case_id . in_ ( case_ids ) , CaseGenelistLink . genelist_id == gene_list . id ) for case_link in case_links : self . session . delete ( case_link ) self . save ( )
Remove a gene list and links to cases .
169
9
5,088
def case_genelist ( self , case_obj ) : list_id = "{}-HPO" . format ( case_obj . case_id ) gene_list = self . gene_list ( list_id ) if gene_list is None : gene_list = GeneList ( list_id = list_id ) case_obj . gene_lists . append ( gene_list ) self . session . add ( gene_list ) return gene_list
Get or create a new case specific gene list record .
98
11
5,089
def add_bigger_box ( self ) : start1 = "width='" + str ( int ( self . molecule . molsize1 ) ) + "px' height='" + str ( int ( self . molecule . molsize2 ) ) + "px' >" start2 = "<rect style='opacity:1.0;fill:#FFFFFF;stroke:none' width='" + str ( int ( self . molecule . molsize1 ) ) + "' height='" + str ( int ( self . molecule . molsize2 ) ) + "' x='0' y='0'> </rect>" bigger_box = "width='100%' height='100%' viewbox='0 0 " + str ( int ( self . molecule . x_dim ) ) + " " + str ( int ( self . molecule . y_dim ) ) + "' > " big_box2 = "<rect style='opacity:1.0;fill:white;stroke:none' width='" + str ( int ( self . molecule . x_dim ) ) + "px' height='" + str ( int ( self . molecule . y_dim ) ) + "px' x='0' y='0'> </rect> <g id='molecularDrawing' transform='translate(" + str ( ( self . molecule . x_dim - self . molecule . molsize1 ) / 2 ) + "," + str ( ( self . molecule . y_dim - self . molecule . molsize2 ) / 2 ) + ")'>'<rect style='opacity:1.0;fill:#ffffff;stroke:none' width='" + str ( self . molecule . molsize1 ) + "' height='" + str ( self . molecule . molsize2 ) + "' x='0' y='0' /> " self . end_symbol = "</svg>" no_end_symbol = "</g>" #Make the lines in molecule drawing thicker to look better with the large plots linewidth1 = "stroke-width:2px" linewidth2 = "stroke-width:5px" self . change_lines_in_svg ( "molecule.svg" , linewidth1 , linewidth2 ) self . change_lines_in_svg ( "molecule.svg" , start1 , bigger_box ) self . change_lines_in_svg ( "molecule.svg" , start2 , big_box2 ) self . change_lines_in_svg ( "molecule.svg" , self . end_symbol , no_end_symbol ) with open ( "molecule.svg" , "r" ) as f : lines = f . readlines ( ) self . filestart = " " . join ( map ( str , lines [ 0 : 8 ] ) ) self . draw_molecule = "" . join ( map ( str , lines [ 8 : ] ) ) f . close ( )
Sets the size of the figure by expanding the space of molecule . svg file . These dimension have been previously determined . Also makes the lines of the molecule thicker .
656
34
5,090
def extend_with ( func ) : if not func . __name__ in ArgParseInator . _plugins : ArgParseInator . _plugins [ func . __name__ ] = func
Extends with class or function
42
6
5,091
def arg ( * args , * * kwargs ) : def decorate ( func ) : """ Decorate """ # we'll set the command name with the passed cmd_name argument, if # exist, else the command name will be the function name func . __cmd_name__ = kwargs . pop ( 'cmd_name' , getattr ( func , '__cmd_name__' , func . __name__ ) ) # retrieve the class (SillyClass) func . __cls__ = utils . check_class ( ) if not hasattr ( func , '__arguments__' ) : # if the funcion hasn't the __arguments__ yet, we'll setup them # using get_functarguments. func . __arguments__ = utils . get_functarguments ( func ) if len ( args ) or len ( kwargs ) : # if we have some argument or keyword argument # we'll try to get the destination name from the kwargs ('dest') # else we'll use the last arg name as destination arg_name = kwargs . get ( 'dest' , args [ - 1 ] . lstrip ( '-' ) . replace ( '-' , '_' ) ) try : # we try to get the command index. idx = func . __named__ . index ( arg_name ) # and delete it from the named list del func . __named__ [ idx ] # and delete it from the arguments list del func . __arguments__ [ idx ] except ValueError : pass # append the args and kwargs to the function arguments list func . __arguments__ . append ( ( args , kwargs , ) ) if func . __cls__ is None and isinstance ( func , types . FunctionType ) : # if the function don't have a class and is a FunctionType # we'll add it directly to he commands list. ap_ = ArgParseInator ( skip_init = True ) if func . __cmd_name__ not in ap_ . commands : # we'll add it if not exists ap_ . commands [ func . __cmd_name__ ] = func return func return decorate
Dcorates a function or a class method to add to the argument parser
467
15
5,092
def class_args ( cls ) : # get the Singleton ap_ = ArgParseInator ( skip_init = True ) # collect special vars (really need?) utils . collect_appendvars ( ap_ , cls ) # set class reference cls . __cls__ = cls cmds = { } # get eventual class arguments cls . __arguments__ = getattr ( cls , '__arguments__' , [ ] ) # cycle through class functions for func in [ f for f in cls . __dict__ . values ( ) if hasattr ( f , '__cmd_name__' ) and not inspect . isclass ( f ) ] : # clear subcommands func . __subcommands__ = None # set the parent class func . __cls__ = cls # assign to commands dict cmds [ func . __cmd_name__ ] = func if hasattr ( cls , '__cmd_name__' ) and cls . __cmd_name__ not in ap_ . commands : # if che class has the __cmd_name__ attribute and is not already present # in the ArgParseInator commands # set the class subcommands cls . __subcommands__ = cmds # add the class as ArgParseInator command ap_ . commands [ cls . __cmd_name__ ] = cls else : # else if we don't have a __cmd_name__ # we will add all the functions directly to the ArgParseInator commands # if it don't already exists. for name , func in cmds . items ( ) : if name not in ap_ . commands : ap_ . commands [ name ] = func return cls
Decorates a class to handle the arguments parser .
370
11
5,093
def cmd_auth ( auth_phrase = None ) : def decorate ( func ) : """ decorates the funcion """ # get the Singleton ap_ = ArgParseInator ( skip_init = True ) # set the authorization name auth_name = id ( func ) if auth_phrase is None : # if we don't have a specific auth_phrase we set the # **authorization needed** to True ap_ . auths [ auth_name ] = True else : # else if we have a specific auth_phrase we set it for the # command authorization ap_ . auths [ auth_name ] = str ( auth_phrase ) return func return decorate
set authorization for command or subcommand .
142
8
5,094
def parse_args ( self ) : # compile the parser self . _compile ( ) # clear the args self . args = None self . _self_event ( 'before_parse' , 'parse' , * sys . argv [ 1 : ] , * * { } ) # list commands/subcommands in argv cmds = [ cmd for cmd in sys . argv [ 1 : ] if not cmd . startswith ( "-" ) ] if ( len ( cmds ) > 0 and not utils . check_help ( ) and self . default_cmd and cmds [ 0 ] not in self . commands ) : # if we have at least one command which is not an help command # and we have a default command and the first command in arguments # is not in commands we insert the default command as second # argument (actually the first command) sys . argv . insert ( 1 , self . default_cmd ) # let's parse the arguments self . args = self . parser . parse_args ( ) # set up the output. if self . args : # if we have some arguments if self . add_output and self . args . output is not None : # If add_output is True and we have an output file # setup the encoding self . encoding = self . args . encoding if self . args . encoding . lower ( ) == 'raw' : # if we have passed a raw encoding we will write directly # to the output file. self . _output = open ( self . args . output , self . args . write_mode ) else : # else we will use the codecs module to write to the # output file. import codecs self . _output = codecs . open ( self . args . output , self . args . write_mode , encoding = self . args . encoding ) if self . _cfg_factory : # if we have a config factory setup the config file with the # right param self . cfg_file = self . args . config # now is parsed. self . _is_parsed = True return self
Parse our arguments .
438
5
5,095
def check_auth ( self , name ) : if name in self . auths : # if the command name is in the **need authorization list** # get the authorization for the command auth = self . auths [ name ] if self . args . auth is None : # if we didn't pass the authorization phrase raise the # appropriate exception raise exceptions . ArgParseInatorAuthorizationRequired elif ( ( auth is True and self . args . auth != self . auth_phrase ) or ( auth is not True and self . args . auth != auth ) ) : # else if the authorization phrase is wrong raise exceptions . ArgParseInatorNotValidAuthorization return True
Check the authorization for the command
140
6
5,096
def check_command ( self , * * new_attributes ) : # let's parse arguments if we didn't before. if not self . _is_parsed : self . parse_args ( ) if not self . commands : # if we don't have commands raise an Exception raise exceptions . ArgParseInatorNoCommandsFound elif self . _single : # if we have a single function we get it directly func = self . _single else : if not self . args . command : self . parser . error ( "too few arguments" ) # get the right command func = self . commands [ self . args . command ] if hasattr ( func , '__subcommands__' ) and func . __subcommands__ : # if we have subcommands get the command from them command = func . __subcommands__ [ self . args . subcommand ] else : # else the command IS the function command = func # get the command name self . cmd_name = command . __cmd_name__ # check authorization if not self . check_auth ( id ( command ) ) : return 0 # let's execute the command. return self . _execute ( func , command , * * new_attributes )
Check if was passed a valid action in the command line and if so executes it by passing parameters and returning the result .
260
24
5,097
def _call_event ( self , event_name , cmd , pargs , kwargs , * * kws ) : def get_result_params ( res ) : """return the right list of params""" if not isinstance ( res , ( list , tuple ) ) : return res , pargs , kwargs elif len ( res ) == 2 : return res , pargs , kwargs return res [ 0 ] , ( pargs [ 0 ] , ) + tuple ( res [ 1 ] ) , kwargs if hasattr ( cmd , event_name ) : return get_result_params ( getattr ( cmd , event_name ) ( pargs [ 0 ] , * pargs [ 1 : ] , * * kwargs ) ) elif hasattr ( cmd . __cls__ , event_name ) : return get_result_params ( getattr ( cmd . __cls__ , event_name ) ( pargs [ 0 ] , cmd . __cmd_name__ or cmd . __name__ , * pargs [ 1 : ] , * * kwargs ) ) return None , pargs , kwargs
Try to call events for cmd .
244
7
5,098
def _self_event ( self , event_name , cmd , * pargs , * * kwargs ) : if hasattr ( self , event_name ) : getattr ( self , event_name ) ( cmd , * pargs , * * kwargs )
Call self event
58
3
5,099
def write ( self , * string ) : self . _output . write ( ' ' . join ( [ six . text_type ( s ) for s in string ] ) ) return self
Writes to the output
39
5