idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
222,500
def get_all_edge_nodes ( self ) : edge_nodes = set ( e for es in self . edges for e in es ) for edges in self . edges_rel . values ( ) : rel_nodes = set ( e for es in edges for e in es ) edge_nodes . update ( rel_nodes ) return edge_nodes
Return a list of all GO IDs that are connected to edges .
79
13
222,501
def chk_edges ( self ) : goids = set ( self . go2obj ) self . chk_edges_nodes ( self . edges , goids , "is_a" ) for reltype , edges in self . edges_rel . items ( ) : self . chk_edges_nodes ( edges , goids , reltype )
Check that all edge nodes exist in local subset .
80
10
222,502
def chk_edges_nodes ( edges , nodes , name ) : edge_nodes = set ( e for es in edges for e in es ) missing_nodes = edge_nodes . difference ( nodes ) assert not missing_nodes , "MISSING: {GOs}\n{NM} EDGES MISSING {N} NODES (OF {T})" . format ( NM = name , N = len ( missing_nodes ) , T = len ( edge_nodes ) , GOs = missing_nodes )
Check that user specified edges have a node which exists .
119
11
222,503
def get_c2ps ( self ) : c2ps = defaultdict ( set ) for goid_child , goid_parent in self . edges : c2ps [ goid_child ] . add ( goid_parent ) return c2ps
Set child2parents dict for all parents used in this set of edges .
55
15
222,504
def _init_edges_relationships ( rel2src2dsts , rel2dst2srcs ) : edge_rel2fromto = { } relationships = set ( rel2src2dsts ) . union ( rel2dst2srcs ) for reltype in relationships : edge_from_to = [ ] if reltype in rel2src2dsts : for parent , children in rel2src2dsts [ reltype ] . items ( ) : for child in children : edge_from_to . append ( ( child , parent ) ) if reltype in rel2dst2srcs : for parent , children in rel2dst2srcs [ reltype ] . items ( ) : for child in children : edge_from_to . append ( ( child , parent ) ) edge_rel2fromto [ reltype ] = edge_from_to return edge_rel2fromto
Get the directed edges from GO term to GO term using relationships .
196
13
222,505
def _traverse_relationship_objs ( self , rel2src2dsts , goobj_child , goids_seen ) : child_id = goobj_child . id goids_seen . add ( child_id ) ##A self.go2obj[child_id] = goobj_child # Update goids_seen and go2obj with child alt_ids for goid_altid in goobj_child . alt_ids : goids_seen . add ( goid_altid ) ##A self.go2obj[goid_altid] = goobj_child # Loop through relationships of child object for reltype , recs in goobj_child . relationship . items ( ) : if reltype in self . relationships : for relationship_obj in recs : relationship_id = relationship_obj . id rel2src2dsts [ reltype ] [ relationship_id ] . add ( child_id ) # If relationship has not been seen, traverse if relationship_id not in goids_seen : self . _traverse_relationship_objs ( rel2src2dsts , relationship_obj , goids_seen )
Traverse from source GO up relationships .
252
8
222,506
def _init_rel2dst2srcs ( self , go_sources , traverse_child ) : if not traverse_child or not self . relationships : return { } rel2dst2srcs = { r : defaultdict ( set ) for r in self . relationships } goids_seen = set ( ) go2obj = self . go2obj for goid_src in go_sources : goobj_src = go2obj [ goid_src ] if goid_src not in goids_seen : self . _traverse_relationship_rev_objs ( rel2dst2srcs , goobj_src , goids_seen ) return rel2dst2srcs
Traverse through reverse relationships .
155
6
222,507
def _init_c2ps ( self , go_sources , traverse_child ) : if not traverse_child : return { } c2ps = defaultdict ( set ) goids_seen = set ( ) go2obj = self . go2obj for goid_src in go_sources : goobj_src = go2obj [ goid_src ] if goid_src not in goids_seen : ##F self._traverse_child_objs(c2ps, goobj_src, go2obj, goids_seen) self . _traverse_child_objs ( c2ps , goobj_src , goids_seen ) return c2ps
Traverse up children .
151
5
222,508
def chk ( self , annotations , fout_err ) : for idx , ntd in enumerate ( annotations ) : self . _chk_fld ( ntd , "Qualifier" ) # optional 0 or greater self . _chk_fld ( ntd , "DB_Reference" , 1 ) # required 1 or greater self . _chk_fld ( ntd , "With_From" ) # optional 0 or greater self . _chk_fld ( ntd , "DB_Name" , 0 , 1 ) # optional 0 or 1 self . _chk_fld ( ntd , "DB_Synonym" ) # optional 0 or greater self . _chk_fld ( ntd , "Taxon" , 1 , 2 ) flds = list ( ntd ) self . _chk_qty_eq_1 ( flds ) # self._chk_qualifier(ntd.Qualifier, flds, idx) if not ntd . Taxon or len ( ntd . Taxon ) not in { 1 , 2 } : self . illegal_lines [ 'BAD TAXON' ] . append ( ( idx , '**{I}) TAXON: {NT}' . format ( I = idx , NT = ntd ) ) ) if self . illegal_lines : self . prt_error_summary ( fout_err ) return not self . illegal_lines
Check annotations .
318
3
222,509
def get_gafvals ( self , line ) : flds = line . split ( '\t' ) flds [ 3 ] = self . _get_qualifier ( flds [ 3 ] ) # 3 Qualifier flds [ 5 ] = self . _get_set ( flds [ 5 ] ) # 5 DB_Reference flds [ 7 ] = self . _get_set ( flds [ 7 ] ) # 7 With_From flds [ 8 ] = self . aspect2ns [ flds [ 8 ] ] # 8 GAF Aspect field converted to BP, MF, or CC flds [ 9 ] = self . _get_set ( flds [ 9 ] ) # 9 DB_Name flds [ 10 ] = self . _get_set ( flds [ 10 ] ) # 10 DB_Synonym flds [ 12 ] = self . _do_taxons ( flds [ 12 ] ) # 12 Taxon flds [ 13 ] = GET_DATE_YYYYMMDD ( flds [ 13 ] ) # self.strptime(flds[13], '%Y%m%d').date(), # 13 Date 20190406 # Version 2.x has these additional fields not found in v1.0 if self . is_long : flds [ 15 ] = get_extensions ( flds [ 15 ] ) # Extensions (or Annotation_Extension) flds [ 16 ] = self . _get_set ( flds [ 16 ] . rstrip ( ) ) else : flds [ 14 ] = self . _get_set ( flds [ 14 ] . rstrip ( ) ) return flds
Convert fields from string to preferred format for GAF ver 2 . 1 and 2 . 0 .
380
20
222,510
def _get_qualifier ( val ) : quals = set ( ) if val == '' : return quals for val in val . split ( '|' ) : val = val . lower ( ) quals . add ( val if val != 'not' else 'NOT' ) return quals
Get qualifiers . Correct for inconsistent capitalization in GAF files
63
12
222,511
def _chk_fld ( self , ntd , name , qty_min = 0 , qty_max = None ) : vals = getattr ( ntd , name ) num_vals = len ( vals ) if num_vals < qty_min : self . illegal_lines [ 'MIN QTY' ] . append ( ( - 1 , "FIELD({F}): MIN QUANTITY({Q}) WASN'T MET: {V}" . format ( F = name , Q = qty_min , V = vals ) ) ) if qty_max is not None : if num_vals > qty_max : self . illegal_lines [ 'MAX QTY' ] . append ( ( - 1 , "FIELD({F}): MAX QUANTITY({Q}) EXCEEDED: {V}\n{NT}" . format ( F = name , Q = qty_max , V = vals , NT = ntd ) ) )
Further split a GAF value within a single field .
211
11
222,512
def prt_line_detail ( self , prt , line ) : values = line . split ( '\t' ) self . _prt_line_detail ( prt , values )
Print line header and values in a readable format .
42
10
222,513
def _prt_line_detail ( self , prt , values , lnum = "" ) : #### data = zip(self.req_str, self.ntgafobj._fields, values) data = zip ( self . req_str , self . flds , values ) txt = [ "{:2}) {:3} {:20} {}" . format ( i , req , hdr , val ) for i , ( req , hdr , val ) in enumerate ( data ) ] prt . write ( "{LNUM}\n{TXT}\n" . format ( LNUM = lnum , TXT = "\n" . join ( txt ) ) )
Print header and field values in a readable format .
148
10
222,514
def prt_error_summary ( self , fout_err ) : # Get summary of error types and their counts errcnts = [ ] if self . ignored : errcnts . append ( " {N:9,} IGNORED associations\n" . format ( N = len ( self . ignored ) ) ) if self . illegal_lines : for err_name , errors in self . illegal_lines . items ( ) : errcnts . append ( " {N:9,} {ERROR}\n" . format ( N = len ( errors ) , ERROR = err_name ) ) # Save error details into a log file fout_log = self . _wrlog_details_illegal_gaf ( fout_err , errcnts ) sys . stdout . write ( " WROTE GAF ERROR LOG: {LOG}:\n" . format ( LOG = fout_log ) ) for err_cnt in errcnts : sys . stdout . write ( err_cnt )
Print a summary about the GAF file that was read .
221
12
222,515
def _wrlog_details_illegal_gaf ( self , fout_err , err_cnts ) : # fout_err = "{}.log".format(fin_gaf) gaf_base = os . path . basename ( fout_err ) with open ( fout_err , 'w' ) as prt : prt . write ( "ILLEGAL GAF ERROR SUMMARY:\n\n" ) for err_cnt in err_cnts : prt . write ( err_cnt ) prt . write ( "\n\nILLEGAL GAF ERROR DETAILS:\n\n" ) for lnum , line in self . ignored : prt . write ( "**WARNING: GAF LINE IGNORED: {FIN}[{LNUM}]:\n{L}\n" . format ( FIN = gaf_base , L = line , LNUM = lnum ) ) self . prt_line_detail ( prt , line ) prt . write ( "\n\n" ) for error , lines in self . illegal_lines . items ( ) : for lnum , line in lines : prt . write ( "**WARNING: GAF LINE ILLEGAL({ERR}): {FIN}[{LNUM}]:\n{L}\n" . format ( ERR = error , FIN = gaf_base , L = line , LNUM = lnum ) ) self . prt_line_detail ( prt , line ) prt . write ( "\n\n" ) return fout_err
Print details regarding illegal GAF lines seen to a log file .
346
13
222,516
def get_relationship_dicts ( self ) : if not self . relationships : return None for goid , goobj in self . go2obj . items ( ) : for reltyp , relset in goobj . relationship . items ( ) : relfwd_goids = set ( o . id for o in relset ) # for relfwd_goid in relfwd_goids: # assert relfwd_goid in self.go2obj, "{GO} {REL} NOT FOUND {GO_R}".format( # GO=goid, REL=reltyp, GO_R=relfwd_goid) print ( "CountRelativesInit RELLLLS" , goid , goobj . id , reltyp , relfwd_goids )
Given GO DAG relationships return summaries per GO ID .
172
12
222,517
def get_goone2ntletter ( self , go2dcnt , depth2goobjs ) : # 1. Group level-01/depth-01 GO terms by namespace ns2dcntgoobj = cx . defaultdict ( list ) for goobj in depth2goobjs [ 1 ] : dcnt = go2dcnt [ goobj . id ] ns2dcntgoobj [ goobj . namespace ] . append ( ( dcnt , goobj ) ) # 2. Assign letters to level-01/depth-01 GO terms go2nt = { } ntobj = cx . namedtuple ( "NtGoLetters" , "D1 dcnt goobj" ) _go2abc = self . go2letter letters = list ( chain ( range ( ord ( 'A' ) , ord ( 'Z' ) + 1 ) , range ( ord ( 'a' ) , ord ( 'z' ) + 1 ) ) ) for list_dcnt_goobj in ns2dcntgoobj . values ( ) : letter_idx = 0 for dcnt , goobj in sorted ( list_dcnt_goobj , key = lambda t : t [ 0 ] , reverse = True ) : letter = chr ( letters [ letter_idx ] ) if _go2abc is None else _go2abc . get ( goobj . id , '' ) go2nt [ goobj . id ] = ntobj . _make ( [ letter , dcnt , goobj ] ) letter_idx += 1 return go2nt
Assign letters to depth - 01 GO terms ordered using descendants cnt .
335
15
222,518
def _init_goslims ( self , dagslim ) : go2obj_main = self . gosubdag . go2obj go2obj_slim = { go for go , o in dagslim . items ( ) if go in go2obj_main } if self . gosubdag . relationships : return self . _get_goslimids_norel ( go2obj_slim ) return set ( dagslim . keys ( ) )
Get GO IDs in GO slims .
107
8
222,519
def _get_goslimids_norel ( self , dagslim ) : go_slims = set ( ) go2obj = self . gosubdag . go2obj for goid in dagslim : goobj = go2obj [ goid ] if not goobj . relationship : go_slims . add ( goobj . id ) return go_slims
Get all GO slim GO IDs that do not have a relationship .
88
13
222,520
def get_gosubdag ( gosubdag = None ) : if gosubdag is not None : if gosubdag . rcntobj is not None : return gosubdag else : gosubdag . init_auxobjs ( ) return gosubdag else : go2obj = get_godag ( ) return GoSubDag ( None , go2obj , rcntobj = True )
Gets a GoSubDag initialized for use by a Grouper object .
99
17
222,521
def getfnc_qual_ev ( self ) : fnc_key = ( self . nd_not2desc [ ( self . _keep_nd , self . _keep_not ) ] , self . incexc2num [ ( self . include_evcodes is not None , self . exclude_evcodes is not None ) ] , ) return self . param2fnc [ fnc_key ]
Keep annotaion if it passes potentially modified selection .
89
11
222,522
def get_kws ( self ) : ret = self . kws [ 'dict' ] . copy ( ) act_set = self . kws [ 'set' ] if 'shorten' in act_set and 'goobj2fncname' not in ret : ret [ 'goobj2fncname' ] = ShortenText ( ) . get_short_plot_name return ret
Only load keywords if they are specified by the user .
86
11
222,523
def get_node ( self , goid , goobj ) : # pydot.Node.objdict holds this information. pydot.Node.objdict['name'] return pydot . Node ( self . get_node_text ( goid , goobj ) , shape = "box" , style = "rounded, filled" , fillcolor = self . go2color . get ( goid , "white" ) , color = self . objcolor . get_bordercolor ( goid ) )
Return pydot node .
111
6
222,524
def str_fmthdr ( self , goid , goobj ) : # Shorten: Ex: GO:0007608 -> G0007608 go_txt = goid . replace ( "GO:" , "G" ) if 'mark_alt_id' in self . present and goid != goobj . id : go_txt += 'a' return go_txt
Return hdr line seen inside a GO Term box .
81
11
222,525
def _get_prtflds ( self ) : # User-specified print fields ntflds = self . gosubdag . prt_attr [ 'flds' ] prt_flds = self . kws . get ( 'prt_flds' ) if prt_flds : return prt_flds . intersection ( ntflds ) exclude = set ( ) # Default print fields if self . gosubdag . relationships : exclude . add ( 'level' ) return set ( f for f in ntflds if f not in exclude )
Get print fields for GO header .
133
7
222,526
def _get_hdr_childcnt ( self , goobj , ntgo ) : if 'childcnt' in self . present : return "c{N}" . format ( N = len ( goobj . children ) ) elif self . gosubdag . relationships and not goobj . children and ntgo . dcnt != 0 : return "c0"
Get string representing count of children for this GO term .
83
11
222,527
def _add_parent_cnt ( self , hdr , goobj , c2ps ) : if goobj . id in c2ps : parents = c2ps [ goobj . id ] if 'prt_pcnt' in self . present or parents and len ( goobj . parents ) != len ( parents ) : assert len ( goobj . parents ) == len ( set ( goobj . parents ) ) hdr . append ( "p{N}" . format ( N = len ( set ( goobj . parents ) ) ) )
Add the parent count to the GO term box for if not all parents are plotted .
117
17
222,528
def prt_summary_anno2ev ( self , prt = sys . stdout ) : prt . write ( '**NOTE: No evidence codes in associations: {F}\n' . format ( F = self . filename ) )
Print a summary of all Evidence Codes seen in annotations
52
10
222,529
def count_terms ( geneset , assoc , obo_dag ) : term_cnt = Counter ( ) for gene in ( g for g in geneset if g in assoc ) : for goid in assoc [ gene ] : if goid in obo_dag : term_cnt [ obo_dag [ goid ] . id ] += 1 return term_cnt
count the number of terms in the study group
87
9
222,530
def get_terms ( desc , geneset , assoc , obo_dag , log ) : _chk_gene2go ( assoc ) term2itemids = defaultdict ( set ) genes = [ g for g in geneset if g in assoc ] for gene in genes : for goid in assoc [ gene ] : if goid in obo_dag : term2itemids [ obo_dag [ goid ] . id ] . add ( gene ) if log is not None : num_stu = len ( genes ) num_pop = len ( geneset ) perc = 100.0 * num_stu / num_pop if num_pop != 0 else 0.0 log . write ( "{P:3.0f}% {N:>6,} of {M:>6,} {DESC} items found in association\n" . format ( DESC = desc , N = num_stu , M = num_pop , P = perc ) ) return term2itemids
Get the terms in the study group
226
7
222,531
def _chk_gene2go ( assoc ) : if not assoc : raise RuntimeError ( "NO ITEMS FOUND IN ASSOCIATIONS {A}" . format ( A = assoc ) ) for key in assoc : if isinstance ( key , str ) and key [ : 3 ] == "GO:" : raise Exception ( "ASSOCIATIONS EXPECTED TO BE gene2go, NOT go2gene: {EX}" . format ( EX = assoc . items ( ) [ : 2 ] ) ) return
Check that associations is gene2go not go2gene .
115
13
222,532
def _init_usrgos ( self , goids ) : usrgos = set ( ) goids_missing = set ( ) _go2obj = self . gosubdag . go2obj for goid in goids : if goid in _go2obj : usrgos . add ( goid ) else : goids_missing . add ( goid ) if goids_missing : print ( "MISSING GO IDs: {GOs}" . format ( GOs = goids_missing ) ) print ( "{N} of {M} GO IDs ARE MISSING" . format ( N = len ( goids_missing ) , M = len ( goids ) ) ) return usrgos
Return user GO IDs which have GO Terms .
154
9
222,533
def get_gos_all ( self ) : gos_all = set ( ) # Get: # * Header GO IDs that are not user GO IDs # * User GO IDs that are under header GOs for hdrgo , usrgos in self . hdrgo2usrgos . items ( ) : gos_all . add ( hdrgo ) gos_all |= usrgos # User GO IDs that are header GOs in groups containing no other user GO IDs gos_all |= self . hdrgo_is_usrgo assert gos_all == self . usrgos . union ( set ( self . hdrgo2usrgos . keys ( ) ) ) assert len ( self . usrgos . difference ( gos_all ) ) == 0 , "GROUPER ERROR: {GOs}" . format ( GOs = self . usrgos . difference ( gos_all ) ) return gos_all
Return a flat list of all GO IDs in grouping object .
206
12
222,534
def _init_h2us ( self , fnc_most_specific ) : # Header GO IDs are main. User GO IDs are as specified by the user hdrgo2usrgos = cx . defaultdict ( set ) # Contains user GO IDs which are also header GO IDs, plus user main GO if needed hdrgo_is_usrgo = set ( ) _go2nt = self . gosubdag . go2nt objhi = GrouperInit . GetGoidHigh ( self . gosubdag , self . hdrobj . hdrgos , self . most_specific_fncs [ fnc_most_specific ] ) for goid_usr in self . usrgos : goid_main = _go2nt [ goid_usr ] . id # Add current GO ID to parents_all in case curr GO ID is a high GO. goid_high = objhi . get_goid_high ( goid_main ) # Don't add user GO ID if it is also the GO header if goid_main != goid_high : hdrgo2usrgos [ goid_high ] . add ( goid_usr ) elif goid_high not in hdrgo2usrgos : hdrgo2usrgos [ goid_high ] = set ( ) if goid_main == goid_high : hdrgo_is_usrgo . add ( goid_main ) if goid_main != goid_usr : hdrgo_is_usrgo . add ( goid_usr ) # Initialize data members self . hdrgo2usrgos = hdrgo2usrgos self . hdrgo_is_usrgo = hdrgo_is_usrgo
Given a set of user GO ids return GO ids grouped under the GO high terms .
387
19
222,535
def get_go2nt ( self , usr_go2nt ) : gos_all = self . get_gos_all ( ) # Minimum set of namedtuple fields available for use with Sorter on grouped GO IDs prt_flds_all = get_hdridx_flds ( ) + self . gosubdag . prt_attr [ 'flds' ] if not usr_go2nt : return self . __init_go2nt_dflt ( gos_all , prt_flds_all ) usr_nt_flds = next ( iter ( usr_go2nt . values ( ) ) ) . _fields # If user namedtuple already contains all fields available, then return usr_go2nt if len ( set ( prt_flds_all ) . difference ( usr_nt_flds ) ) == 0 : return self . _init_go2nt_aug ( usr_go2nt ) # Otherwise, combine user fields and default Sorter fields return self . __init_go2nt_w_usr ( gos_all , usr_go2nt , prt_flds_all )
Combine user namedtuple fields GO object fields and format_txt .
269
15
222,536
def _init_go2nt_aug ( self , go2nt ) : go2obj = self . gosubdag . go2obj # Get alt GO IDs go2nt_aug = { } # NOW for goid_usr , nt_usr in go2nt . items ( ) : goobj = go2obj [ goid_usr ] if goobj . alt_ids : alts = set ( goobj . alt_ids ) alts . add ( goobj . id ) for goid_alt in alts : if goid_alt not in go2nt : go2nt_aug [ goid_alt ] = nt_usr # WAS # Add alt GO IDs to go2nt for goid , gont in go2nt_aug . items ( ) : go2nt [ goid ] = gont return go2nt
Augment go2nt with GO ID key to account for alt GO IDs .
186
16
222,537
def _get_go2nthdridx ( self , gos_all ) : go2nthdridx = { } # NtHdrIdx Namedtuple fields: # * format_txt: Used to determine the format when writing Excel cells # * hdr_idx: Value printed in an Excel cell # shortcuts obj = GrouperInit . NtMaker ( self ) # Create go2nthdridx for goid in gos_all : go2nthdridx [ goid ] = obj . get_nt ( goid ) return go2nthdridx
Get GO IDs header index for each user GO ID and corresponding parent GO IDs .
133
16
222,538
def _init_go2obj ( self , * * kws ) : if 'goids' in kws and 'obodag' in kws : self . godag . go_sources = kws [ 'goids' ] obo = kws [ 'obodag' ] for goid in self . godag . go_sources : self . godag . go2obj [ goid ] = obo [ goid ] elif 'goid2goobj' in kws : goid2goobj = kws [ 'goid2goobj' ] self . godag . go_sources = goid2goobj . keys ( ) for goid , goobj in goid2goobj . items ( ) : self . godag . go2obj [ goid ] = goobj elif 'goea_results' in kws : goea_results = kws [ 'goea_results' ] self . godag . go_sources = [ rec . GO for rec in goea_results ] self . godag . go2obj = { rec . GO : rec . goterm for rec in goea_results }
Initialize go2obj in small dag for source gos .
253
13
222,539
def _init ( self ) : for goid in self . godag . go_sources : goobj = self . godag . go2obj [ goid ] self . godag . go2obj [ goid ] = goobj # Traverse up parents if self . traverse_parent and goid not in self . seen_cids : self . _traverse_parent_objs ( goobj ) # Traverse down children if self . traverse_child and goid not in self . seen_pids : self . _traverse_child_objs ( goobj )
Given GO ids and GOTerm objects create mini GO dag .
125
13
222,540
def prt_hier_rec ( self , item_id , depth = 1 ) : # Shortens hierarchy report by only printing the hierarchy # for the sub-set of user-specified GO terms which are connected. if self . include_only and item_id not in self . include_only : return obj = self . id2obj [ item_id ] # Optionally space the branches for readability if self . space_branches : if depth == 1 and obj . children : self . prt . write ( "\n" ) # Print marks if provided if self . item_marks : self . prt . write ( '{MARK} ' . format ( MARK = self . item_marks . get ( item_id , self . mark_dflt ) ) ) no_repeat = self . concise_prt and item_id in self . items_printed # Print content dashes = self . _str_dash ( depth , no_repeat , obj ) if self . do_prtfmt : self . _prtfmt ( item_id , dashes ) else : self . _prtstr ( obj , dashes ) self . items_printed . add ( item_id ) self . items_list . append ( item_id ) # Do not print hierarchy below this turn if it has already been printed if no_repeat : return depth += 1 if self . max_indent is not None and depth > self . max_indent : return children = obj . children if self . sortby is None else sorted ( obj . children , key = self . sortby ) for child in children : self . prt_hier_rec ( child . item_id , depth )
Write hierarchy for a GO Term record and all GO IDs down to the leaf level .
360
17
222,541
def _init_item_marks ( item_marks ) : if isinstance ( item_marks , dict ) : return item_marks if item_marks : return { item_id : '>' for item_id in item_marks }
Initialize the makred item dict .
51
9
222,542
def _add_to_obj ( self , rec_curr , typedef_curr , line ) : if rec_curr is not None : self . _add_to_ref ( rec_curr , line ) else : add_to_typedef ( typedef_curr , line )
Add information on line to GOTerm or Typedef .
67
12
222,543
def _init_obo_version ( self , line ) : if line [ 0 : 14 ] == "format-version" : self . format_version = line [ 16 : - 1 ] if line [ 0 : 12 ] == "data-version" : self . data_version = line [ 14 : - 1 ]
Save obo version and release .
67
7
222,544
def _init_optional_attrs ( optional_attrs ) : if optional_attrs is None : return None opts = OboOptionalAttrs . get_optional_attrs ( optional_attrs ) if opts : return OboOptionalAttrs ( opts )
Create OboOptionalAttrs or return None .
60
10
222,545
def has_parent ( self , term ) : for parent in self . parents : if parent . item_id == term or parent . has_parent ( term ) : return True return False
Return True if this GO object has a parent GO ID .
39
12
222,546
def has_child ( self , term ) : for parent in self . children : if parent . item_id == term or parent . has_child ( term ) : return True return False
Return True if this GO object has a child GO ID .
39
12
222,547
def get_all_parents ( self ) : all_parents = set ( ) for parent in self . parents : all_parents . add ( parent . item_id ) all_parents |= parent . get_all_parents ( ) return all_parents
Return all parent GO IDs .
54
6
222,548
def get_all_upper ( self ) : all_upper = set ( ) for upper in self . get_goterms_upper ( ) : all_upper . add ( upper . item_id ) all_upper |= upper . get_all_upper ( ) return all_upper
Return all parent GO IDs through both is_a and all relationships .
62
14
222,549
def get_all_children ( self ) : all_children = set ( ) for parent in self . children : all_children . add ( parent . item_id ) all_children |= parent . get_all_children ( ) return all_children
Return all children GO IDs .
54
6
222,550
def get_all_lower ( self ) : all_lower = set ( ) for lower in self . get_goterms_lower ( ) : all_lower . add ( lower . item_id ) all_lower |= lower . get_all_lower ( ) return all_lower
Return all parent GO IDs through both reverse is_a and all relationships .
62
15
222,551
def get_all_parent_edges ( self ) : all_parent_edges = set ( ) for parent in self . parents : all_parent_edges . add ( ( self . item_id , parent . item_id ) ) all_parent_edges |= parent . get_all_parent_edges ( ) return all_parent_edges
Return tuples for all parent GO IDs containing current GO ID and parent GO ID .
80
17
222,552
def get_all_child_edges ( self ) : all_child_edges = set ( ) for parent in self . children : all_child_edges . add ( ( parent . item_id , self . item_id ) ) all_child_edges |= parent . get_all_child_edges ( ) return all_child_edges
Return tuples for all child GO IDs containing current GO ID and child GO ID .
80
17
222,553
def load_obo_file ( self , obo_file , optional_attrs , load_obsolete , prt ) : reader = OBOReader ( obo_file , optional_attrs ) # Save alt_ids and their corresponding main GO ID. Add to GODag after populating GO Terms alt2rec = { } for rec in reader : # Save record if: # 1) Argument load_obsolete is True OR # 2) Argument load_obsolete is False and the GO term is "live" (not obsolete) if load_obsolete or not rec . is_obsolete : self [ rec . item_id ] = rec for alt in rec . alt_ids : alt2rec [ alt ] = rec # Save the typedefs and parsed optional_attrs # self.optobj = reader.optobj self . typedefs = reader . typedefs self . _populate_terms ( reader . optobj ) self . _set_level_depth ( reader . optobj ) # Add alt_ids to go2obj for goid_alt , rec in alt2rec . items ( ) : self [ goid_alt ] = rec desc = self . _str_desc ( reader ) if prt is not None : prt . write ( "{DESC}\n" . format ( DESC = desc ) ) return desc
Read obo file . Store results .
290
8
222,554
def _str_desc ( self , reader ) : data_version = reader . data_version if data_version is not None : data_version = data_version . replace ( "releases/" , "" ) desc = "{OBO}: fmt({FMT}) rel({REL}) {N:,} GO Terms" . format ( OBO = reader . obo_file , FMT = reader . format_version , REL = data_version , N = len ( self ) ) if reader . optobj : desc = "{D}; optional_attrs({A})" . format ( D = desc , A = " " . join ( sorted ( reader . optobj . optional_attrs ) ) ) return desc
String containing information about the current GO DAG .
151
10
222,555
def _populate_terms ( self , optobj ) : has_relationship = optobj is not None and 'relationship' in optobj . optional_attrs # Make parents and relationships references to the actual GO terms. for rec in self . values ( ) : # Given parent GO IDs, set parent GO Term objects rec . parents = set ( [ self [ goid ] for goid in rec . _parents ] ) # For each parent GO Term object, add it's child GO Term to the children data member for parent_rec in rec . parents : parent_rec . children . add ( rec ) if has_relationship : self . _populate_relationships ( rec )
Convert GO IDs to GO Term record objects . Populate children .
145
14
222,556
def _populate_relationships ( self , rec_curr ) : for relationship_type , goids in rec_curr . relationship . items ( ) : parent_recs = set ( [ self [ goid ] for goid in goids ] ) rec_curr . relationship [ relationship_type ] = parent_recs for parent_rec in parent_recs : if relationship_type not in parent_rec . relationship_rev : parent_rec . relationship_rev [ relationship_type ] = set ( [ rec_curr ] ) else : parent_rec . relationship_rev [ relationship_type ] . add ( rec_curr )
Convert GO IDs in relationships to GO Term record objects . Populate children .
140
16
222,557
def _set_level_depth ( self , optobj ) : has_relationship = optobj is not None and 'relationship' in optobj . optional_attrs def _init_level ( rec ) : if rec . level is None : if rec . parents : rec . level = min ( _init_level ( rec ) for rec in rec . parents ) + 1 else : rec . level = 0 return rec . level def _init_depth ( rec ) : if rec . depth is None : if rec . parents : rec . depth = max ( _init_depth ( rec ) for rec in rec . parents ) + 1 else : rec . depth = 0 return rec . depth def _init_reldepth ( rec ) : if not hasattr ( rec , 'reldepth' ) : up_terms = rec . get_goterms_upper ( ) if up_terms : rec . reldepth = max ( _init_reldepth ( rec ) for rec in up_terms ) + 1 else : rec . reldepth = 0 return rec . reldepth for rec in self . values ( ) : # Add invert relationships if has_relationship : if rec . depth is None : _init_reldepth ( rec ) # print("BBBBBBBBBBB1", rec.item_id, rec.relationship) #for (typedef, terms) in rec.relationship.items(): # invert_typedef = self.typedefs[typedef].inverse_of # # print("BBBBBBBBBBB2 {} ({}) ({}) ({})".format( # # rec.item_id, rec.relationship, typedef, invert_typedef)) # if invert_typedef: # # Add inverted relationship # for term in terms: # if not hasattr(term, 'relationship'): # term.relationship = defaultdict(set) # term.relationship[invert_typedef].add(rec) # print("BBBBBBBBBBB3", rec.item_id, rec.relationship) if rec . level is None : _init_level ( rec ) if rec . depth is None : _init_depth ( rec )
Set level depth and add inverted relationships .
491
8
222,558
def write_dag ( self , out = sys . stdout ) : for rec in sorted ( self . values ( ) ) : print ( rec , file = out )
Write info for all GO Terms in obo file sorted numerically .
36
14
222,559
def query_term ( self , term , verbose = False ) : if term not in self : sys . stderr . write ( "Term %s not found!\n" % term ) return rec = self [ term ] if verbose : print ( rec ) sys . stderr . write ( "all parents: {}\n" . format ( repr ( rec . get_all_parents ( ) ) ) ) sys . stderr . write ( "all children: {}\n" . format ( repr ( rec . get_all_children ( ) ) ) ) return rec
Given a GO ID return GO object .
125
8
222,560
def label_wrap ( self , label ) : wrapped_label = r"%s\n%s" % ( label , self [ label ] . name . replace ( "," , r"\n" ) ) return wrapped_label
Label text for plot .
50
5
222,561
def make_graph_pygraphviz ( self , recs , nodecolor , edgecolor , dpi , draw_parents = True , draw_children = True ) : import pygraphviz as pgv grph = pgv . AGraph ( name = "GO tree" ) edgeset = set ( ) for rec in recs : if draw_parents : edgeset . update ( rec . get_all_parent_edges ( ) ) if draw_children : edgeset . update ( rec . get_all_child_edges ( ) ) edgeset = [ ( self . label_wrap ( a ) , self . label_wrap ( b ) ) for ( a , b ) in edgeset ] # add nodes explicitly via add_node # adding nodes implicitly via add_edge misses nodes # without at least one edge for rec in recs : grph . add_node ( self . label_wrap ( rec . item_id ) ) for src , target in edgeset : # default layout in graphviz is top->bottom, so we invert # the direction and plot using dir="back" grph . add_edge ( target , src ) grph . graph_attr . update ( dpi = "%d" % dpi ) grph . node_attr . update ( shape = "box" , style = "rounded,filled" , fillcolor = "beige" , color = nodecolor ) grph . edge_attr . update ( shape = "normal" , color = edgecolor , dir = "back" , label = "is_a" ) # highlight the query terms for rec in recs : try : node = grph . get_node ( self . label_wrap ( rec . item_id ) ) node . attr . update ( fillcolor = "plum" ) except : continue return grph
Draw AMIGO style network lineage containing one query record .
394
12
222,562
def draw_lineage ( self , recs , nodecolor = "mediumseagreen" , edgecolor = "lightslateblue" , dpi = 96 , lineage_img = "GO_lineage.png" , engine = "pygraphviz" , gml = False , draw_parents = True , draw_children = True ) : assert engine in GraphEngines grph = None if engine == "pygraphviz" : grph = self . make_graph_pygraphviz ( recs , nodecolor , edgecolor , dpi , draw_parents = draw_parents , draw_children = draw_children ) else : grph = self . make_graph_pydot ( recs , nodecolor , edgecolor , dpi , draw_parents = draw_parents , draw_children = draw_children ) if gml : import networkx as nx # use networkx to do the conversion gmlbase = lineage_img . rsplit ( "." , 1 ) [ 0 ] obj = nx . from_agraph ( grph ) if engine == "pygraphviz" else nx . from_pydot ( grph ) del obj . graph [ 'node' ] del obj . graph [ 'edge' ] gmlfile = gmlbase + ".gml" nx . write_gml ( self . label_wrap , gmlfile ) sys . stderr . write ( "GML graph written to {0}\n" . format ( gmlfile ) ) sys . stderr . write ( ( "lineage info for terms %s written to %s\n" % ( [ rec . item_id for rec in recs ] , lineage_img ) ) ) if engine == "pygraphviz" : grph . draw ( lineage_img , prog = "dot" ) else : grph . write_png ( lineage_img )
Draw GO DAG subplot .
415
7
222,563
def _get_ntgpadvals ( self , flds , add_ns ) : is_set = False qualifiers = self . _get_qualifier ( flds [ 2 ] ) assert flds [ 3 ] [ : 3 ] == 'GO:' , 'UNRECOGNIZED GO({GO})' . format ( GO = flds [ 3 ] ) db_reference = self . _rd_fld_vals ( "DB_Reference" , flds [ 4 ] , is_set , 1 ) assert flds [ 5 ] [ : 4 ] == 'ECO:' , 'UNRECOGNIZED ECO({ECO})' . format ( ECO = flds [ 3 ] ) with_from = self . _rd_fld_vals ( "With_From" , flds [ 6 ] , is_set ) taxons = self . _get_taxon ( flds [ 7 ] ) assert flds [ 8 ] . isdigit ( ) , 'UNRECOGNIZED DATE({D})' . format ( D = flds [ 8 ] ) assert flds [ 9 ] , '"Assigned By" VALUE WAS NOT FOUND' props = self . _get_properties ( flds [ 11 ] ) self . _chk_qty_eq_1 ( flds , [ 0 , 1 , 3 , 5 , 8 , 9 ] ) # Additional Formatting self . _chk_qualifier ( qualifiers ) # Create list of values eco = flds [ 5 ] goid = flds [ 3 ] gpadvals = [ flds [ 0 ] , # 0 DB flds [ 1 ] , # 1 DB_ID qualifiers , # 3 Qualifier flds [ 3 ] , # 4 GO_ID db_reference , # 5 DB_Reference eco , # 6 ECO ECO2GRP [ eco ] , with_from , # 7 With_From taxons , # 12 Taxon GET_DATE_YYYYMMDD ( flds [ 8 ] ) , # 13 Date flds [ 9 ] , # 14 Assigned_By get_extensions ( flds [ 10 ] ) , # 12 Extension props ] # 12 Annotation_Properties if add_ns : goobj = self . godag . get ( goid , '' ) gpadvals . append ( NAMESPACE2NS [ goobj . namespace ] if goobj else '' ) return gpadvals
Convert fields from string to preferred format for GPAD ver 2 . 1 and 2 . 0 .
539
20
222,564
def _rd_fld_vals ( name , val , set_list_ft = True , qty_min = 0 , qty_max = None ) : if not val and qty_min == 0 : return [ ] if set_list_ft else set ( ) vals = val . split ( '|' ) # Use a pipe to separate entries num_vals = len ( vals ) assert num_vals >= qty_min , "FLD({F}): MIN QUANTITY({Q}) WASN'T MET: {V}" . format ( F = name , Q = qty_min , V = vals ) if qty_max is not None : assert num_vals <= qty_max , "FLD({F}): MAX QUANTITY({Q}) EXCEEDED: {V}" . format ( F = name , Q = qty_max , V = vals ) return vals if set_list_ft else set ( vals )
Further split a GPAD value within a single field .
214
11
222,565
def _get_taxon ( taxon ) : if not taxon : return None ## assert taxon[:6] == 'taxon:', 'UNRECOGNIZED Taxon({Taxon})'.format(Taxon=taxon) ## taxid = taxon[6:] ## assert taxon[:10] == 'NCBITaxon:', 'UNRECOGNIZED Taxon({Taxon})'.format(Taxon=taxon) ## taxid = taxon[10:] # Get tzxon number: taxon:9606 NCBITaxon:9606 sep = taxon . find ( ':' ) taxid = taxon [ sep + 1 : ] assert taxid . isdigit ( ) , "UNEXPECTED TAXON({T})" . format ( T = taxid ) return int ( taxid )
Return Interacting taxon ID | optional | 0 or 1 | gaf column 13 .
185
18
222,566
def _get_ntgpadnt ( self , ver , add_ns ) : hdrs = self . gpad_columns [ ver ] if add_ns : hdrs = hdrs + [ 'NS' ] return cx . namedtuple ( "ntgpadobj" , hdrs )
Create a namedtuple object for each annotation
68
9
222,567
def _split_line ( self , line ) : line = line . rstrip ( '\r\n' ) flds = re . split ( '\t' , line ) assert len ( flds ) == self . exp_numcol , "EXPECTED({E}) COLUMNS, ACTUAL({A}): {L}" . format ( E = self . exp_numcol , A = len ( flds ) , L = line ) return flds
Split line into field values .
104
6
222,568
def chkaddhdr ( self , line ) : mtch = self . cmpline . search ( line ) if mtch : self . gpadhdr . append ( mtch . group ( 1 ) )
If this line contains desired header info save it .
46
10
222,569
def get_dict_w_id2nts ( ids , id2nts , flds , dflt_null = "" ) : assert len ( ids ) == len ( set ( ids ) ) , "NOT ALL IDs ARE UNIQUE: {IDs}" . format ( IDs = ids ) assert len ( flds ) == len ( set ( flds ) ) , "DUPLICATE FIELDS: {IDs}" . format ( IDs = cx . Counter ( flds ) . most_common ( ) ) usr_id_nt = [ ] # 1. Instantiate namedtuple object ntobj = cx . namedtuple ( "Nt" , " " . join ( flds ) ) # 2. Fill dict with namedtuple objects for desired ids for item_id in ids : # 2a. Combine various namedtuples into a single namedtuple nts = [ id2nt . get ( item_id ) for id2nt in id2nts ] vals = _combine_nt_vals ( nts , flds , dflt_null ) usr_id_nt . append ( ( item_id , ntobj . _make ( vals ) ) ) return cx . OrderedDict ( usr_id_nt )
Return a new dict of namedtuples by combining dicts of namedtuples or objects .
286
19
222,570
def get_list_w_id2nts ( ids , id2nts , flds , dflt_null = "" ) : combined_nt_list = [ ] # 1. Instantiate namedtuple object ntobj = cx . namedtuple ( "Nt" , " " . join ( flds ) ) # 2. Fill dict with namedtuple objects for desired ids for item_id in ids : # 2a. Combine various namedtuples into a single namedtuple nts = [ id2nt . get ( item_id ) for id2nt in id2nts ] vals = _combine_nt_vals ( nts , flds , dflt_null ) combined_nt_list . append ( ntobj . _make ( vals ) ) return combined_nt_list
Return a new list of namedtuples by combining dicts of namedtuples or objects .
183
19
222,571
def combine_nt_lists ( lists , flds , dflt_null = "" ) : combined_nt_list = [ ] # Check that all lists are the same length lens = [ len ( lst ) for lst in lists ] assert len ( set ( lens ) ) == 1 , "LIST LENGTHS MUST BE EQUAL: {Ls}" . format ( Ls = " " . join ( str ( l ) for l in lens ) ) # 1. Instantiate namedtuple object ntobj = cx . namedtuple ( "Nt" , " " . join ( flds ) ) # 2. Loop through zipped list for lst0_lstn in zip ( * lists ) : # 2a. Combine various namedtuples into a single namedtuple combined_nt_list . append ( ntobj . _make ( _combine_nt_vals ( lst0_lstn , flds , dflt_null ) ) ) return combined_nt_list
Return a new list of namedtuples by zipping lists of namedtuples or objects .
218
19
222,572
def wr_py_nts ( fout_py , nts , docstring = None , varname = "nts" ) : if nts : with open ( fout_py , 'w' ) as prt : prt . write ( '"""{DOCSTRING}"""\n\n' . format ( DOCSTRING = docstring ) ) prt . write ( "# Created: {DATE}\n" . format ( DATE = str ( datetime . date . today ( ) ) ) ) prt_nts ( prt , nts , varname ) sys . stdout . write ( " {N:7,} items WROTE: {PY}\n" . format ( N = len ( nts ) , PY = fout_py ) )
Save namedtuples into a Python module .
171
9
222,573
def prt_nts ( prt , nts , varname , spc = ' ' ) : first_nt = nts [ 0 ] nt_name = type ( first_nt ) . __name__ prt . write ( "import collections as cx\n\n" ) prt . write ( "NT_FIELDS = [\n" ) for fld in first_nt . _fields : prt . write ( '{SPC}"{F}",\n' . format ( SPC = spc , F = fld ) ) prt . write ( "]\n\n" ) prt . write ( '{NtName} = cx.namedtuple("{NtName}", " ".join(NT_FIELDS))\n\n' . format ( NtName = nt_name ) ) prt . write ( "# {N:,} items\n" . format ( N = len ( nts ) ) ) prt . write ( "# pylint: disable=line-too-long\n" ) prt . write ( "{VARNAME} = [\n" . format ( VARNAME = varname ) ) for ntup in nts : prt . write ( "{SPC}{NT},\n" . format ( SPC = spc , NT = ntup ) ) prt . write ( "]\n" )
Print namedtuples into a Python module .
310
9
222,574
def get_unique_fields ( fld_lists ) : flds = [ ] fld_set = set ( [ f for flst in fld_lists for f in flst ] ) fld_seen = set ( ) # Add unique fields to list of fields in order that they appear for fld_list in fld_lists : for fld in fld_list : # Add fields if the field has not yet been seen if fld not in fld_seen : flds . append ( fld ) fld_seen . add ( fld ) assert len ( flds ) == len ( fld_set ) return flds
Get unique namedtuple fields despite potential duplicates in lists of fields .
143
15
222,575
def _combine_nt_vals ( lst0_lstn , flds , dflt_null ) : vals = [ ] for fld in flds : fld_seen = False # Set field value using the **first** value seen in list of nt lists(lst0_lstn) for nt_curr in lst0_lstn : if hasattr ( nt_curr , fld ) : vals . append ( getattr ( nt_curr , fld ) ) fld_seen = True break # Set default value if GO ID or nt value is not present if fld_seen is False : vals . append ( dflt_null ) return vals
Given a list of lists of nts return a single namedtuple .
161
15
222,576
def get_go2obj ( self , goids ) : goids = goids . intersection ( self . go2obj . keys ( ) ) if len ( goids ) != len ( goids ) : goids_missing = goids . difference ( goids ) print ( " {N} MISSING GO IDs: {GOs}" . format ( N = len ( goids_missing ) , GOs = goids_missing ) ) return { go : self . go2obj [ go ] for go in goids }
Return GO Terms for each user - specified GO ID . Note missing GO IDs .
112
16
222,577
def no_duplicates_sections2d ( sections2d , prt = None ) : no_dups = True ctr = cx . Counter ( ) for _ , hdrgos in sections2d : for goid in hdrgos : ctr [ goid ] += 1 for goid , cnt in ctr . most_common ( ) : if cnt == 1 : break no_dups = False if prt is not None : prt . write ( "**SECTIONS WARNING FOUND: {N:3} {GO}\n" . format ( N = cnt , GO = goid ) ) return no_dups
Check for duplicate header GO IDs in the 2 - D sections variable .
143
14
222,578
def get_evcodes ( self , inc_set = None , exc_set = None ) : codes = self . get_evcodes_all ( inc_set , exc_set ) codes . discard ( 'ND' ) return codes
Get evidence code for all but NOT No biological data
50
10
222,579
def get_evcodes_all ( self , inc_set = None , exc_set = None ) : codes = self . _get_grps_n_codes ( inc_set ) if inc_set else set ( self . code2nt ) if exc_set : codes . difference_update ( self . _get_grps_n_codes ( exc_set ) ) return codes
Get set of evidence codes given include set and exclude set
84
11
222,580
def _get_grps_n_codes ( self , usr_set ) : codes = usr_set . intersection ( self . code2nt ) for grp in usr_set . intersection ( self . grp2codes ) : codes . update ( self . grp2codes [ grp ] ) return codes
Get codes given codes or groups .
70
7
222,581
def sort_nts ( self , nt_list , codekey ) : # Problem is that some members in the nt_list do NOT have # codekey=EvidenceCode, then it returns None, which breaks py34 and 35 # The fix here is that for these members, default to -1 (is this valid?) sortby = lambda nt : self . ev2idx . get ( getattr ( nt , codekey ) , - 1 ) return sorted ( nt_list , key = sortby )
Sort list of namedtuples such so evidence codes in same order as code2nt .
111
18
222,582
def get_grp_name ( self , code ) : nt_code = self . code2nt . get ( code . strip ( ) , None ) if nt_code is not None : return nt_code . group , nt_code . name return "" , ""
Return group and name for an evidence code .
61
9
222,583
def prt_ev_cnts ( self , ctr , prt = sys . stdout ) : for key , cnt in ctr . most_common ( ) : grp , name = self . get_grp_name ( key . replace ( "NOT " , "" ) ) prt . write ( "{CNT:7,} {EV:>7} {GROUP:<15} {NAME}\n" . format ( CNT = cnt , EV = key , GROUP = grp , NAME = name ) )
Prints evidence code counts stored in a collections Counter .
116
11
222,584
def get_order ( self , codes ) : return sorted ( codes , key = lambda e : [ self . ev2idx . get ( e ) ] )
Return evidence codes in order shown in code2name .
34
11
222,585
def get_grp2code2nt ( self ) : grp2code2nt = cx . OrderedDict ( [ ( g , [ ] ) for g in self . grps ] ) for code , ntd in self . code2nt . items ( ) : grp2code2nt [ ntd . group ] . append ( ( code , ntd ) ) for grp , nts in grp2code2nt . items ( ) : grp2code2nt [ grp ] = cx . OrderedDict ( nts ) return grp2code2nt
Return ordered dict for group to namedtuple
127
9
222,586
def _init_grps ( code2nt ) : seen = set ( ) seen_add = seen . add groups = [ nt . group for nt in code2nt . values ( ) ] return [ g for g in groups if not ( g in seen or seen_add ( g ) ) ]
Return list of groups in same order as in code2nt
65
12
222,587
def get_grp2codes ( self ) : grp2codes = cx . defaultdict ( set ) for code , ntd in self . code2nt . items ( ) : grp2codes [ ntd . group ] . add ( code ) return dict ( grp2codes )
Get dict of group name to namedtuples .
62
10
222,588
def plot_sections ( self , fout_dir = "." , * * kws_usr ) : kws_plt , _ = self . _get_kws_plt ( None , * * kws_usr ) PltGroupedGos ( self ) . plot_sections ( fout_dir , * * kws_plt )
Plot groups of GOs which have been placed in sections .
78
12
222,589
def get_pltdotstr ( self , * * kws_usr ) : dotstrs = self . get_pltdotstrs ( * * kws_usr ) assert len ( dotstrs ) == 1 return dotstrs [ 0 ]
Plot one GO header group in Grouper .
55
10
222,590
def plot_groups_unplaced ( self , fout_dir = "." , * * kws_usr ) : # kws: go2color max_gos upper_trigger max_upper plotobj = PltGroupedGos ( self ) return plotobj . plot_groups_unplaced ( fout_dir , * * kws_usr )
Plot each GO group .
78
5
222,591
def _get_kws_plt ( self , usrgos , * * kws_usr ) : kws_plt = kws_usr . copy ( ) kws_dag = { } hdrgo = kws_plt . get ( 'hdrgo' , None ) objcolor = GrouperColors ( self . grprobj ) # GO term colors if 'go2color' not in kws_usr : kws_plt [ 'go2color' ] = objcolor . get_go2color_users ( ) elif hdrgo is not None : go2color = kws_plt . get ( 'go2color' ) . copy ( ) go2color [ hdrgo ] = PltGroupedGosArgs . hdrgo_dflt_color kws_plt [ 'go2color' ] = go2color # GO term border colors if 'go2bordercolor' not in kws_usr : kws_plt [ 'go2bordercolor' ] = objcolor . get_bordercolor ( ) prune = kws_usr . get ( 'prune' , None ) if prune is True and hdrgo is not None : kws_dag [ 'dst_srcs_list' ] = [ ( hdrgo , usrgos ) , ( None , set ( [ hdrgo ] ) ) ] kws_plt [ 'parentcnt' ] = True elif prune : kws_dag [ 'dst_srcs_list' ] = prune kws_plt [ 'parentcnt' ] = True # Group text kws_plt [ 'go2txt' ] = self . get_go2txt ( self . grprobj , kws_plt . get ( 'go2color' ) , kws_plt . get ( 'go2bordercolor' ) ) return kws_plt , kws_dag
Add go2color and go2bordercolor relevant to this grouping into plot .
441
18
222,592
def get_go2txt ( grprobj_cur , grp_go2color , grp_go2bordercolor ) : goids_main = set ( o . id for o in grprobj_cur . gosubdag . go2obj . values ( ) ) hdrobj = grprobj_cur . hdrobj grprobj_all = Grouper ( "all" , grprobj_cur . usrgos . union ( goids_main ) , hdrobj , grprobj_cur . gosubdag ) # Adds section text to all GO terms in plot (misses middle GO terms) _secdflt = hdrobj . secdflt _hilight = set ( grp_go2color . keys ( ) ) . union ( grp_go2bordercolor ) ret_go2txt = { } # Keep sections text only if GO header, GO user, or not Misc. if hdrobj . sections : for goid , txt in grprobj_all . get_go2sectiontxt ( ) . items ( ) : if txt == 'broad' : continue if txt != _secdflt or goid in _hilight : ret_go2txt [ goid ] = txt return ret_go2txt
Adds section text in all GO terms if not Misc . Adds Misc in terms of interest .
284
18
222,593
def download_go_basic_obo ( obo = "go-basic.obo" , prt = sys . stdout , loading_bar = True ) : if not os . path . isfile ( obo ) : http = "http://purl.obolibrary.org/obo/go" if "slim" in obo : http = "http://www.geneontology.org/ontology/subsets" # http = 'http://current.geneontology.org/ontology/subsets' obo_remote = "{HTTP}/{OBO}" . format ( HTTP = http , OBO = os . path . basename ( obo ) ) dnld_file ( obo_remote , obo , prt , loading_bar ) else : if prt is not None : prt . write ( " EXISTS: {FILE}\n" . format ( FILE = obo ) ) return obo
Download Ontologies if necessary .
207
6
222,594
def download_ncbi_associations ( gene2go = "gene2go" , prt = sys . stdout , loading_bar = True ) : # Download: ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz gzip_file = "{GENE2GO}.gz" . format ( GENE2GO = gene2go ) if not os . path . isfile ( gene2go ) : file_remote = "ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/{GZ}" . format ( GZ = os . path . basename ( gzip_file ) ) dnld_file ( file_remote , gene2go , prt , loading_bar ) else : if prt is not None : prt . write ( " EXISTS: {FILE}\n" . format ( FILE = gene2go ) ) return gene2go
Download associations from NCBI if necessary
213
7
222,595
def gunzip ( gzip_file , file_gunzip = None ) : if file_gunzip is None : file_gunzip = os . path . splitext ( gzip_file ) [ 0 ] gzip_open_to ( gzip_file , file_gunzip ) return file_gunzip
Unzip . gz file . Return filename of unzipped file .
68
15
222,596
def get_godag ( fin_obo = "go-basic.obo" , prt = sys . stdout , loading_bar = True , optional_attrs = None ) : from goatools . obo_parser import GODag download_go_basic_obo ( fin_obo , prt , loading_bar ) return GODag ( fin_obo , optional_attrs , load_obsolete = False , prt = prt )
Return GODag object . Initialize if necessary .
96
10
222,597
def dnld_gaf ( species_txt , prt = sys . stdout , loading_bar = True ) : return dnld_gafs ( [ species_txt ] , prt , loading_bar ) [ 0 ]
Download GAF file if necessary .
52
7
222,598
def dnld_gafs ( species_list , prt = sys . stdout , loading_bar = True ) : # Example GAF files in http://current.geneontology.org/annotations/: # http://current.geneontology.org/annotations/mgi.gaf.gz # http://current.geneontology.org/annotations/fb.gaf.gz # http://current.geneontology.org/annotations/goa_human.gaf.gz http = "http://current.geneontology.org/annotations" # There are two filename patterns for gene associations on geneontology.org fin_gafs = [ ] cwd = os . getcwd ( ) for species_txt in species_list : # e.g., goa_human mgi fb gaf_base = '{ABC}.gaf' . format ( ABC = species_txt ) # goa_human.gaf gaf_cwd = os . path . join ( cwd , gaf_base ) # {CWD}/goa_human.gaf wget_cmd = "{HTTP}/{GAF}.gz" . format ( HTTP = http , GAF = gaf_base ) dnld_file ( wget_cmd , gaf_cwd , prt , loading_bar ) fin_gafs . append ( gaf_cwd ) return fin_gafs
Download GAF files if necessary .
324
7
222,599
def http_get ( url , fout = None ) : print ( 'requests.get({URL}, stream=True)' . format ( URL = url ) ) rsp = requests . get ( url , stream = True ) if rsp . status_code == 200 and fout is not None : with open ( fout , 'wb' ) as prt : for chunk in rsp : # .iter_content(chunk_size=128): prt . write ( chunk ) print ( ' WROTE: {F}\n' . format ( F = fout ) ) else : print ( rsp . status_code , rsp . reason , url ) print ( rsp . content ) return rsp
Download a file from http . Save it in a file named by fout
152
15