idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
17,400
def get_gene_names ( self ) : # Collect all gene names in network gene_names = [ ] for node in self . _nodes : members = node [ 'data' ] . get ( 'members' ) if members : gene_names += list ( members . keys ( ) ) else : if node [ 'data' ] [ 'name' ] . startswith ( 'Group' ) : continue gene_names . append ( node [ 'data' ] [ 'name' ] ) self . _gene_names = gene_names
Gather gene names of all nodes and node members
117
10
17,401
def set_CCLE_context ( self , cell_types ) : self . get_gene_names ( ) # Get expression and mutations from context client exp_values = context_client . get_protein_expression ( self . _gene_names , cell_types ) mut_values = context_client . get_mutations ( self . _gene_names , cell_types ) # Make a dict of presence/absence of mutations muts = { cell_line : { } for cell_line in cell_types } for cell_line , entries in mut_values . items ( ) : if entries is not None : for gene , mutations in entries . items ( ) : if mutations : muts [ cell_line ] [ gene ] = 1 else : muts [ cell_line ] [ gene ] = 0 # Create bins for the exp values # because colorbrewer only does 3-9 bins and I don't feel like # reinventing color scheme theory, this will only bin 3-9 bins def bin_exp ( expression_dict ) : d = expression_dict exp_values = [ ] for line in d : for gene in d [ line ] : val = d [ line ] [ gene ] if val is not None : exp_values . append ( val ) thr_dict = { } for n_bins in range ( 3 , 10 ) : bin_thr = np . histogram ( np . log10 ( exp_values ) , n_bins ) [ 1 ] [ 1 : ] thr_dict [ n_bins ] = bin_thr # this dict isn't yet binned, that happens in the loop binned_dict = { x : deepcopy ( expression_dict ) for x in range ( 3 , 10 ) } for n_bins in binned_dict : for line in binned_dict [ n_bins ] : for gene in binned_dict [ n_bins ] [ line ] : # last bin is reserved for None if binned_dict [ n_bins ] [ line ] [ gene ] is None : binned_dict [ n_bins ] [ line ] [ gene ] = n_bins else : val = np . log10 ( binned_dict [ n_bins ] [ line ] [ gene ] ) for thr_idx , thr in enumerate ( thr_dict [ n_bins ] ) : if val <= thr : binned_dict [ n_bins ] [ line ] [ gene ] = thr_idx break return binned_dict binned_exp = bin_exp ( exp_values ) context = { 'bin_expression' : binned_exp , 'mutation' : muts } self . _context [ 'CCLE' ] = context
Set context of all nodes and node members from CCLE .
592
12
17,402
def print_cyjs_graph ( self ) : cyjs_dict = { 'edges' : self . _edges , 'nodes' : self . _nodes } cyjs_str = json . dumps ( cyjs_dict , indent = 1 , sort_keys = True ) return cyjs_str
Return the assembled Cytoscape JS network as a json string .
68
14
17,403
def print_cyjs_context ( self ) : context = self . _context context_str = json . dumps ( context , indent = 1 , sort_keys = True ) return context_str
Return a list of node names and their respective context .
41
11
17,404
def save_json ( self , fname_prefix = 'model' ) : cyjs_str = self . print_cyjs_graph ( ) # outputs the graph with open ( fname_prefix + '.json' , 'wb' ) as fh : fh . write ( cyjs_str . encode ( 'utf-8' ) ) # outputs the context of graph nodes context_str = self . print_cyjs_context ( ) with open ( fname_prefix + '_context.json' , 'wb' ) as fh : fh . write ( context_str . encode ( 'utf-8' ) )
Save the assembled Cytoscape JS network in a json file .
137
14
17,405
def save_model ( self , fname = 'model.js' ) : exp_colorscale_str = json . dumps ( self . _exp_colorscale ) mut_colorscale_str = json . dumps ( self . _mut_colorscale ) cyjs_dict = { 'edges' : self . _edges , 'nodes' : self . _nodes } model_str = json . dumps ( cyjs_dict , indent = 1 , sort_keys = True ) model_dict = { 'exp_colorscale_str' : exp_colorscale_str , 'mut_colorscale_str' : mut_colorscale_str , 'model_elements_str' : model_str } s = '' s += 'var exp_colorscale = %s;\n' % model_dict [ 'exp_colorscale_str' ] s += 'var mut_colorscale = %s;\n' % model_dict [ 'mut_colorscale_str' ] s += 'var model_elements = %s;\n' % model_dict [ 'model_elements_str' ] with open ( fname , 'wb' ) as fh : fh . write ( s . encode ( 'utf-8' ) )
Save the assembled Cytoscape JS network in a js file .
288
14
17,406
def _get_edge_dict ( self ) : edge_dict = collections . defaultdict ( lambda : [ ] ) if len ( self . _edges ) > 0 : for e in self . _edges : data = e [ 'data' ] key = tuple ( [ data [ 'i' ] , data [ 'source' ] , data [ 'target' ] , data [ 'polarity' ] ] ) edge_dict [ key ] = data [ 'id' ] return edge_dict
Return a dict of edges .
107
6
17,407
def _get_node_key ( self , node_dict_item ) : s = tuple ( sorted ( node_dict_item [ 'sources' ] ) ) t = tuple ( sorted ( node_dict_item [ 'targets' ] ) ) return ( s , t )
Return a tuple of sorted sources and targets given a node dict .
62
13
17,408
def _get_node_groups ( self ) : node_dict = { node [ 'data' ] [ 'id' ] : { 'sources' : [ ] , 'targets' : [ ] } for node in self . _nodes } for edge in self . _edges : # Add edge as a source for its target node edge_data = ( edge [ 'data' ] [ 'i' ] , edge [ 'data' ] [ 'polarity' ] , edge [ 'data' ] [ 'source' ] ) node_dict [ edge [ 'data' ] [ 'target' ] ] [ 'sources' ] . append ( edge_data ) # Add edge as target for its source node edge_data = ( edge [ 'data' ] [ 'i' ] , edge [ 'data' ] [ 'polarity' ] , edge [ 'data' ] [ 'target' ] ) node_dict [ edge [ 'data' ] [ 'source' ] ] [ 'targets' ] . append ( edge_data ) # Make a dictionary of nodes based on source/target as a key node_key_dict = collections . defaultdict ( lambda : [ ] ) for node_id , node_d in node_dict . items ( ) : key = self . _get_node_key ( node_d ) node_key_dict [ key ] . append ( node_id ) # Constrain the groups to ones that have more than 1 member node_groups = [ g for g in node_key_dict . values ( ) if ( len ( g ) > 1 ) ] return node_groups
Return a list of node id lists that are topologically identical .
351
13
17,409
def _group_edges ( self ) : # edit edges on parent nodes and make new edges for them edges_to_add = [ [ ] , [ ] ] # [group_edges, uuid_lists] for e in self . _edges : new_edge = deepcopy ( e ) new_edge [ 'data' ] . pop ( 'id' , None ) uuid_list = new_edge [ 'data' ] . pop ( 'uuid_list' , [ ] ) # Check if edge source or target are contained in a parent # If source or target in parent edit edge # Nodes may only point within their container source = e [ 'data' ] [ 'source' ] target = e [ 'data' ] [ 'target' ] source_node = [ x for x in self . _nodes if x [ 'data' ] [ 'id' ] == source ] [ 0 ] target_node = [ x for x in self . _nodes if x [ 'data' ] [ 'id' ] == target ] [ 0 ] # If the source node is in a group, we change the source of this # edge to the group if source_node [ 'data' ] [ 'parent' ] != '' : new_edge [ 'data' ] [ 'source' ] = source_node [ 'data' ] [ 'parent' ] e [ 'data' ] [ 'i' ] = 'Virtual' # If the targete node is in a group, we change the target of this # edge to the group if target_node [ 'data' ] [ 'parent' ] != '' : new_edge [ 'data' ] [ 'target' ] = target_node [ 'data' ] [ 'parent' ] e [ 'data' ] [ 'i' ] = 'Virtual' if e [ 'data' ] [ 'i' ] == 'Virtual' : if new_edge not in edges_to_add [ 0 ] : edges_to_add [ 0 ] . append ( new_edge ) edges_to_add [ 1 ] . append ( uuid_list ) else : idx = edges_to_add [ 0 ] . index ( new_edge ) edges_to_add [ 1 ] [ idx ] += uuid_list edges_to_add [ 1 ] [ idx ] = list ( set ( edges_to_add [ 1 ] [ idx ] ) ) for ze in zip ( * edges_to_add ) : edge = ze [ 0 ] edge [ 'data' ] [ 'id' ] = self . _get_new_id ( ) edge [ 'data' ] [ 'uuid_list' ] = ze [ 1 ] self . _edges . append ( edge )
Group all edges that are topologically identical .
590
9
17,410
def make_stmt ( stmt_cls , tf_agent , target_agent , pmid ) : ev = Evidence ( source_api = 'trrust' , pmid = pmid ) return stmt_cls ( deepcopy ( tf_agent ) , deepcopy ( target_agent ) , evidence = [ ev ] )
Return a Statement based on its type agents and PMID .
72
12
17,411
def get_grounded_agent ( gene_name ) : db_refs = { 'TEXT' : gene_name } if gene_name in hgnc_map : gene_name = hgnc_map [ gene_name ] hgnc_id = hgnc_client . get_hgnc_id ( gene_name ) if hgnc_id : db_refs [ 'HGNC' ] = hgnc_id up_id = hgnc_client . get_uniprot_id ( hgnc_id ) if up_id : db_refs [ 'UP' ] = up_id agent = Agent ( gene_name , db_refs = db_refs ) return agent
Return a grounded Agent based on an HGNC symbol .
162
11
17,412
def extract_statements ( self ) : for _ , ( tf , target , effect , refs ) in self . df . iterrows ( ) : tf_agent = get_grounded_agent ( tf ) target_agent = get_grounded_agent ( target ) if effect == 'Activation' : stmt_cls = IncreaseAmount elif effect == 'Repression' : stmt_cls = DecreaseAmount else : continue pmids = refs . split ( ';' ) for pmid in pmids : stmt = make_stmt ( stmt_cls , tf_agent , target_agent , pmid ) self . statements . append ( stmt )
Process the table to extract Statements .
148
7
17,413
def process_paper ( model_name , pmid ) : json_directory = os . path . join ( model_name , 'jsons' ) json_path = os . path . join ( json_directory , 'PMID%s.json' % pmid ) if pmid . startswith ( 'api' ) or pmid . startswith ( 'PMID' ) : logger . warning ( 'Invalid PMID: %s' % pmid ) # If the paper has been read, use the json output file if os . path . exists ( json_path ) : rp = reach . process_json_file ( json_path , citation = pmid ) txt_format = 'existing_json' # If the paper has not been read, download the text and read else : try : txt , txt_format = get_full_text ( pmid , 'pmid' ) except Exception : return None , None if txt_format == 'pmc_oa_xml' : rp = reach . process_nxml_str ( txt , citation = pmid , offline = True , output_fname = json_path ) elif txt_format == 'elsevier_xml' : # Extract the raw text from the Elsevier XML txt = elsevier_client . extract_text ( txt ) rp = reach . process_text ( txt , citation = pmid , offline = True , output_fname = json_path ) elif txt_format == 'abstract' : rp = reach . process_text ( txt , citation = pmid , offline = True , output_fname = json_path ) else : rp = None if rp is not None : check_pmids ( rp . statements ) return rp , txt_format
Process a paper with the given pubmed identifier
392
9
17,414
def process_paper_helper ( model_name , pmid , start_time_local ) : try : if not aws_available : rp , txt_format = process_paper ( model_name , pmid ) else : rp , txt_format = process_paper_aws ( pmid , start_time_local ) except : logger . exception ( 'uncaught exception while processing %s' , pmid ) return None , None return rp , txt_format
Wraps processing a paper by either a local or remote service and caches any uncaught exceptions
106
18
17,415
def _load_data ( ) : # Get the cwv reader object. csv_path = path . join ( HERE , path . pardir , path . pardir , 'resources' , DATAFILE_NAME ) data_iter = list ( read_unicode_csv ( csv_path ) ) # Get the headers. headers = data_iter [ 0 ] # For some reason this heading is oddly formatted and inconsistent with the # rest, or with the usual key-style for dicts. headers [ headers . index ( 'Approved.Symbol' ) ] = 'approved_symbol' return [ { header : val for header , val in zip ( headers , line ) } for line in data_iter [ 1 : ] ]
Load the data from the csv in data .
160
10
17,416
def run_eidos ( endpoint , * args ) : # Make the full path to the class that should be used call_class = '%s.%s' % ( eidos_package , endpoint ) # Assemble the command line command and append optonal args cmd = [ 'java' , '-Xmx12G' , '-cp' , eip , call_class ] + list ( args ) logger . info ( 'Running Eidos with command "%s"' % ( ' ' . join ( cmd ) ) ) subprocess . call ( cmd )
Run a given enpoint of Eidos through the command line .
121
13
17,417
def extract_from_directory ( path_in , path_out ) : path_in = os . path . realpath ( os . path . expanduser ( path_in ) ) path_out = os . path . realpath ( os . path . expanduser ( path_out ) ) logger . info ( 'Running Eidos on input folder %s' % path_in ) run_eidos ( 'apps.ExtractFromDirectory' , path_in , path_out )
Run Eidos on a set of text files in a folder .
104
13
17,418
def extract_and_process ( path_in , path_out ) : path_in = os . path . realpath ( os . path . expanduser ( path_in ) ) path_out = os . path . realpath ( os . path . expanduser ( path_out ) ) extract_from_directory ( path_in , path_out ) jsons = glob . glob ( os . path . join ( path_out , '*.jsonld' ) ) logger . info ( 'Found %d JSON-LD files to process in %s' % ( len ( jsons ) , path_out ) ) stmts = [ ] for json in jsons : ep = process_json_file ( json ) if ep : stmts += ep . statements return stmts
Run Eidos on a set of text files and process output with INDRA .
167
16
17,419
def get_statements ( subject = None , object = None , agents = None , stmt_type = None , use_exact_type = False , persist = True , timeout = None , simple_response = False , ev_limit = 10 , best_first = True , tries = 2 , max_stmts = None ) : processor = IndraDBRestProcessor ( subject , object , agents , stmt_type , use_exact_type , persist , timeout , ev_limit , best_first , tries , max_stmts ) # Format the result appropriately. if simple_response : ret = processor . statements else : ret = processor return ret
Get a processor for the INDRA DB web API matching given agents and type .
142
16
17,420
def get_statements_by_hash ( hash_list , ev_limit = 100 , best_first = True , tries = 2 ) : if not isinstance ( hash_list , list ) : raise ValueError ( "The `hash_list` input is a list, not %s." % type ( hash_list ) ) if not hash_list : return [ ] if isinstance ( hash_list [ 0 ] , str ) : hash_list = [ int ( h ) for h in hash_list ] if not all ( [ isinstance ( h , int ) for h in hash_list ] ) : raise ValueError ( "Hashes must be ints or strings that can be " "converted into ints." ) resp = submit_statement_request ( 'post' , 'from_hashes' , ev_limit = ev_limit , data = { 'hashes' : hash_list } , best_first = best_first , tries = tries ) return stmts_from_json ( resp . json ( ) [ 'statements' ] . values ( ) )
Get fully formed statements from a list of hashes .
233
10
17,421
def get_statements_for_paper ( ids , ev_limit = 10 , best_first = True , tries = 2 , max_stmts = None ) : id_l = [ { 'id' : id_val , 'type' : id_type } for id_type , id_val in ids ] resp = submit_statement_request ( 'post' , 'from_papers' , data = { 'ids' : id_l } , ev_limit = ev_limit , best_first = best_first , tries = tries , max_stmts = max_stmts ) stmts_json = resp . json ( ) [ 'statements' ] return stmts_from_json ( stmts_json . values ( ) )
Get the set of raw Statements extracted from a paper given by the id .
170
15
17,422
def submit_curation ( hash_val , tag , curator , text = None , source = 'indra_rest_client' , ev_hash = None , is_test = False ) : data = { 'tag' : tag , 'text' : text , 'curator' : curator , 'source' : source , 'ev_hash' : ev_hash } url = 'curation/submit/%s' % hash_val if is_test : qstr = '?test' else : qstr = '' return make_db_rest_request ( 'post' , url , qstr , data = data )
Submit a curation for the given statement at the relevant level .
135
13
17,423
def get_statement_queries ( stmts , * * params ) : def pick_ns ( ag ) : for ns in [ 'HGNC' , 'FPLX' , 'CHEMBL' , 'CHEBI' , 'GO' , 'MESH' ] : if ns in ag . db_refs . keys ( ) : dbid = ag . db_refs [ ns ] break else : ns = 'TEXT' dbid = ag . name return '%s@%s' % ( dbid , ns ) queries = [ ] url_base = get_url_base ( 'statements/from_agents' ) non_binary_statements = [ Complex , SelfModification , ActiveForm ] for stmt in stmts : kwargs = { } if type ( stmt ) not in non_binary_statements : for pos , ag in zip ( [ 'subject' , 'object' ] , stmt . agent_list ( ) ) : if ag is not None : kwargs [ pos ] = pick_ns ( ag ) else : for i , ag in enumerate ( stmt . agent_list ( ) ) : if ag is not None : kwargs [ 'agent%d' % i ] = pick_ns ( ag ) kwargs [ 'type' ] = stmt . __class__ . __name__ kwargs . update ( params ) query_str = '?' + '&' . join ( [ '%s=%s' % ( k , v ) for k , v in kwargs . items ( ) if v is not None ] ) queries . append ( url_base + query_str ) return queries
Get queries used to search based on a statement .
365
10
17,424
def save ( self , model_fname = 'model.pkl' ) : with open ( model_fname , 'wb' ) as fh : pickle . dump ( self . stmts , fh , protocol = 4 )
Save the state of the IncrementalModel in a pickle file .
52
14
17,425
def add_statements ( self , pmid , stmts ) : if pmid not in self . stmts : self . stmts [ pmid ] = stmts else : self . stmts [ pmid ] += stmts
Add INDRA Statements to the incremental model indexed by PMID .
55
13
17,426
def preassemble ( self , filters = None , grounding_map = None ) : stmts = self . get_statements ( ) # Filter out hypotheses stmts = ac . filter_no_hypothesis ( stmts ) # Fix grounding if grounding_map is not None : stmts = ac . map_grounding ( stmts , grounding_map = grounding_map ) else : stmts = ac . map_grounding ( stmts ) if filters and ( 'grounding' in filters ) : stmts = ac . filter_grounded_only ( stmts ) # Fix sites stmts = ac . map_sequence ( stmts ) if filters and 'human_only' in filters : stmts = ac . filter_human_only ( stmts ) # Run preassembly stmts = ac . run_preassembly ( stmts , return_toplevel = False ) # Run relevance filter stmts = self . _relevance_filter ( stmts , filters ) # Save Statements self . assembled_stmts = stmts
Preassemble the Statements collected in the model .
240
10
17,427
def get_model_agents ( self ) : model_stmts = self . get_statements ( ) agents = [ ] for stmt in model_stmts : for a in stmt . agent_list ( ) : if a is not None : agents . append ( a ) return agents
Return a list of all Agents from all Statements .
64
10
17,428
def get_statements ( self ) : stmt_lists = [ v for k , v in self . stmts . items ( ) ] stmts = [ ] for s in stmt_lists : stmts += s return stmts
Return a list of all Statements in a single list .
54
11
17,429
def get_statements_noprior ( self ) : stmt_lists = [ v for k , v in self . stmts . items ( ) if k != 'prior' ] stmts = [ ] for s in stmt_lists : stmts += s return stmts
Return a list of all non - prior Statements in a single list .
65
14
17,430
def process_ndex_neighborhood ( gene_names , network_id = None , rdf_out = 'bel_output.rdf' , print_output = True ) : logger . warning ( 'This method is deprecated and the results are not ' 'guaranteed to be correct. Please use ' 'process_pybel_neighborhood instead.' ) if network_id is None : network_id = '9ea3c170-01ad-11e5-ac0f-000c29cb28fb' url = ndex_bel2rdf + '/network/%s/asBELRDF/query' % network_id params = { 'searchString' : ' ' . join ( gene_names ) } # The ndex_client returns the rdf as the content of a json dict res_json = ndex_client . send_request ( url , params , is_json = True ) if not res_json : logger . error ( 'No response for NDEx neighborhood query.' ) return None if res_json . get ( 'error' ) : error_msg = res_json . get ( 'message' ) logger . error ( 'BEL/RDF response contains error: %s' % error_msg ) return None rdf = res_json . get ( 'content' ) if not rdf : logger . error ( 'BEL/RDF response is empty.' ) return None with open ( rdf_out , 'wb' ) as fh : fh . write ( rdf . encode ( 'utf-8' ) ) bp = process_belrdf ( rdf , print_output = print_output ) return bp
Return a BelRdfProcessor for an NDEx network neighborhood .
369
14
17,431
def process_pybel_neighborhood ( gene_names , network_file = None , network_type = 'belscript' , * * kwargs ) : if network_file is None : # Use large corpus as base network network_file = os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , os . path . pardir , os . path . pardir , os . path . pardir , 'data' , 'large_corpus.bel' ) if network_type == 'belscript' : bp = process_belscript ( network_file , * * kwargs ) elif network_type == 'json' : bp = process_json_file ( network_file ) filtered_stmts = [ ] for stmt in bp . statements : found = False for agent in stmt . agent_list ( ) : if agent is not None : if agent . name in gene_names : found = True if found : filtered_stmts . append ( stmt ) bp . statements = filtered_stmts return bp
Return PybelProcessor around neighborhood of given genes in a network .
244
14
17,432
def process_pybel_graph ( graph ) : bp = PybelProcessor ( graph ) bp . get_statements ( ) if bp . annot_manager . failures : logger . warning ( 'missing %d annotation pairs' , sum ( len ( v ) for v in bp . annot_manager . failures . values ( ) ) ) return bp
Return a PybelProcessor by processing a PyBEL graph .
78
14
17,433
def process_belscript ( file_name , * * kwargs ) : if 'citation_clearing' not in kwargs : kwargs [ 'citation_clearing' ] = False if 'no_identifier_validation' not in kwargs : kwargs [ 'no_identifier_validation' ] = True pybel_graph = pybel . from_path ( file_name , * * kwargs ) return process_pybel_graph ( pybel_graph )
Return a PybelProcessor by processing a BEL script file .
112
13
17,434
def process_json_file ( file_name ) : with open ( file_name , 'rt' ) as fh : pybel_graph = pybel . from_json_file ( fh , False ) return process_pybel_graph ( pybel_graph )
Return a PybelProcessor by processing a Node - Link JSON file .
59
15
17,435
def process_cbn_jgif_file ( file_name ) : with open ( file_name , 'r' ) as jgf : return process_pybel_graph ( pybel . from_cbn_jgif ( json . load ( jgf ) ) )
Return a PybelProcessor by processing a CBN JGIF JSON file .
61
17
17,436
def update_famplex ( ) : famplex_url_pattern = 'https://raw.githubusercontent.com/sorgerlab/famplex/master/%s.csv' csv_names = [ 'entities' , 'equivalences' , 'gene_prefixes' , 'grounding_map' , 'relations' ] for csv_name in csv_names : url = famplex_url_pattern % csv_name save_from_http ( url , os . path . join ( path , 'famplex/%s.csv' % csv_name ) )
Update all the CSV files that form the FamPlex resource .
137
12
17,437
def update_lincs_small_molecules ( ) : url = 'http://lincs.hms.harvard.edu/db/sm/' sm_data = load_lincs_csv ( url ) sm_dict = { d [ 'HMS LINCS ID' ] : d . copy ( ) for d in sm_data } assert len ( sm_dict ) == len ( sm_data ) , "We lost data." fname = os . path . join ( path , 'lincs_small_molecules.json' ) with open ( fname , 'w' ) as fh : json . dump ( sm_dict , fh , indent = 1 )
Load the csv of LINCS small molecule metadata into a dict .
153
14
17,438
def update_lincs_proteins ( ) : url = 'http://lincs.hms.harvard.edu/db/proteins/' prot_data = load_lincs_csv ( url ) prot_dict = { d [ 'HMS LINCS ID' ] : d . copy ( ) for d in prot_data } assert len ( prot_dict ) == len ( prot_data ) , "We lost data." fname = os . path . join ( path , 'lincs_proteins.json' ) with open ( fname , 'w' ) as fh : json . dump ( prot_dict , fh , indent = 1 )
Load the csv of LINCS protein metadata into a dict .
151
13
17,439
def _get_is_direct ( stmt ) : any_indirect = False for ev in stmt . evidence : if ev . epistemics . get ( 'direct' ) is True : return True elif ev . epistemics . get ( 'direct' ) is False : # This guarantees that we have seen at least # some evidence that the statement is indirect any_indirect = True if any_indirect : return False return True
Returns true if there is evidence that the statement is a direct interaction . If any of the evidences associated with the statement indicates a direct interatcion then we assume the interaction is direct . If there is no evidence for the interaction being indirect then we default to direct .
94
55
17,440
def make_model ( self ) : for stmt in self . statements : if isinstance ( stmt , Modification ) : card = assemble_modification ( stmt ) elif isinstance ( stmt , SelfModification ) : card = assemble_selfmodification ( stmt ) elif isinstance ( stmt , Complex ) : card = assemble_complex ( stmt ) elif isinstance ( stmt , Translocation ) : card = assemble_translocation ( stmt ) elif isinstance ( stmt , RegulateActivity ) : card = assemble_regulate_activity ( stmt ) elif isinstance ( stmt , RegulateAmount ) : card = assemble_regulate_amount ( stmt ) else : continue if card is not None : card . card [ 'meta' ] = { 'id' : stmt . uuid , 'belief' : stmt . belief } if self . pmc_override is not None : card . card [ 'pmc_id' ] = self . pmc_override else : card . card [ 'pmc_id' ] = get_pmc_id ( stmt ) self . cards . append ( card )
Assemble statements into index cards .
255
7
17,441
def print_model ( self ) : cards = [ c . card for c in self . cards ] # If there is only one card, print it as a single # card not as a list if len ( cards ) == 1 : cards = cards [ 0 ] cards_json = json . dumps ( cards , indent = 1 ) return cards_json
Return the assembled cards as a JSON string .
72
9
17,442
def geneways_action_to_indra_statement_type ( actiontype , plo ) : actiontype = actiontype . lower ( ) statement_generator = None is_direct = ( plo == 'P' ) if actiontype == 'bind' : statement_generator = lambda substance1 , substance2 , evidence : Complex ( [ substance1 , substance2 ] , evidence = evidence ) is_direct = True elif actiontype == 'phosphorylate' : statement_generator = lambda substance1 , substance2 , evidence : Phosphorylation ( substance1 , substance2 , evidence = evidence ) is_direct = True return ( statement_generator , is_direct )
Return INDRA Statement corresponding to Geneways action type .
147
11
17,443
def make_statement ( self , action , mention ) : ( statement_generator , is_direct ) = geneways_action_to_indra_statement_type ( mention . actiontype , action . plo ) if statement_generator is None : # Geneways statement does not map onto an indra statement return None # Try to find the full-text sentence # Unfortunately, the sentence numbers in the Geneways dataset # don't correspond to an obvious sentence segmentation. # This code looks for sentences with the subject, object, and verb # listed by the Geneways action mention table and only includes # it in the evidence if there is exactly one such sentence text = None if self . get_ft_mention : try : content , content_type = get_full_text ( mention . pmid , 'pmid' ) if content is not None : ftm = FullTextMention ( mention , content ) sentences = ftm . find_matching_sentences ( ) if len ( sentences ) == 1 : text = sentences [ 0 ] except Exception : logger . warning ( 'Could not fetch full text for PMID ' + mention . pmid ) # Make an evidence object epistemics = dict ( ) epistemics [ 'direct' ] = is_direct annotations = mention . make_annotation ( ) annotations [ 'plo' ] = action . plo # plo only in action table evidence = Evidence ( source_api = 'geneways' , source_id = mention . actionmentionid , pmid = mention . pmid , text = text , epistemics = epistemics , annotations = annotations ) # Construct the grounded and name standardized agents # Note that this involves grounding the agent by # converting the Entrez ID listed in the Geneways data with # HGNC and UniProt upstream_agent = get_agent ( mention . upstream , action . up ) downstream_agent = get_agent ( mention . downstream , action . dn ) # Make the statement return statement_generator ( upstream_agent , downstream_agent , evidence )
Makes an INDRA statement from a Geneways action and action mention .
434
15
17,444
def load_from_rdf_file ( self , rdf_file ) : self . graph = rdflib . Graph ( ) self . graph . parse ( os . path . abspath ( rdf_file ) , format = 'nt' ) self . initialize ( )
Initialize given an RDF input file representing the hierarchy .
60
12
17,445
def load_from_rdf_string ( self , rdf_str ) : self . graph = rdflib . Graph ( ) self . graph . parse ( data = rdf_str , format = 'nt' ) self . initialize ( )
Initialize given an RDF string representing the hierarchy .
54
11
17,446
def extend_with ( self , rdf_file ) : self . graph . parse ( os . path . abspath ( rdf_file ) , format = 'nt' ) self . initialize ( )
Extend the RDF graph of this HierarchyManager with another RDF file .
43
17
17,447
def build_transitive_closures ( self ) : self . component_counter = 0 for rel , tc_dict in ( ( self . isa_objects , self . isa_closure ) , ( self . partof_objects , self . partof_closure ) , ( self . isa_or_partof_objects , self . isa_or_partof_closure ) ) : self . build_transitive_closure ( rel , tc_dict )
Build the transitive closures of the hierarchy .
101
9
17,448
def build_transitive_closure ( self , rel , tc_dict ) : # Make a function with the righ argument structure rel_fun = lambda node , graph : rel ( node ) for x in self . graph . all_nodes ( ) : rel_closure = self . graph . transitiveClosure ( rel_fun , x ) xs = x . toPython ( ) for y in rel_closure : ys = y . toPython ( ) if xs == ys : continue try : tc_dict [ xs ] . append ( ys ) except KeyError : tc_dict [ xs ] = [ ys ] if rel == self . isa_or_partof_objects : self . _add_component ( xs , ys )
Build a transitive closure for a given relation in a given dict .
164
14
17,449
def directly_or_indirectly_related ( self , ns1 , id1 , ns2 , id2 , closure_dict , relation_func ) : # if id2 is None, or both are None, then it's by definition isa: if id2 is None or ( id2 is None and id1 is None ) : return True # If only id1 is None, then it cannot be isa elif id1 is None : return False if closure_dict : term1 = self . get_uri ( ns1 , id1 ) term2 = self . get_uri ( ns2 , id2 ) ec = closure_dict . get ( term1 ) if ec is not None and term2 in ec : return True else : return False else : if not self . uri_as_name : e1 = self . find_entity ( id1 ) e2 = self . find_entity ( id2 ) if e1 is None or e2 is None : return False t1 = rdflib . term . URIRef ( e1 ) t2 = rdflib . term . URIRef ( e2 ) else : u1 = self . get_uri ( ns1 , id1 ) u2 = self . get_uri ( ns2 , id2 ) t1 = rdflib . term . URIRef ( u1 ) t2 = rdflib . term . URIRef ( u2 ) to = self . graph . transitiveClosure ( relation_func , t1 ) if t2 in to : return True else : return False
Return True if two entities have the speicified relationship .
339
12
17,450
def isa ( self , ns1 , id1 , ns2 , id2 ) : rel_fun = lambda node , graph : self . isa_objects ( node ) return self . directly_or_indirectly_related ( ns1 , id1 , ns2 , id2 , self . isa_closure , rel_fun )
Return True if one entity has an isa relationship to another .
73
13
17,451
def partof ( self , ns1 , id1 , ns2 , id2 ) : rel_fun = lambda node , graph : self . partof_objects ( node ) return self . directly_or_indirectly_related ( ns1 , id1 , ns2 , id2 , self . partof_closure , rel_fun )
Return True if one entity is partof another .
73
10
17,452
def isa_or_partof ( self , ns1 , id1 , ns2 , id2 ) : rel_fun = lambda node , graph : self . isa_or_partof_objects ( node ) return self . directly_or_indirectly_related ( ns1 , id1 , ns2 , id2 , self . isa_or_partof_closure , rel_fun )
Return True if two entities are in an isa or partof relationship
88
14
17,453
def is_opposite ( self , ns1 , id1 , ns2 , id2 ) : u1 = self . get_uri ( ns1 , id1 ) u2 = self . get_uri ( ns2 , id2 ) t1 = rdflib . term . URIRef ( u1 ) t2 = rdflib . term . URIRef ( u2 ) rel = rdflib . term . URIRef ( self . relations_prefix + 'is_opposite' ) to = self . graph . objects ( t1 , rel ) if t2 in to : return True return False
Return True if two entities are in an is_opposite relationship
134
13
17,454
def get_parents ( self , uri , type = 'all' ) : # First do a quick dict lookup to see if there are any parents all_parents = set ( self . isa_or_partof_closure . get ( uri , [ ] ) ) # If there are no parents or we are looking for all, we can return here if not all_parents or type == 'all' : return all_parents # If we need immediate parents, we search again, this time knowing that # the uri is definitely in the graph since it has some parents if type == 'immediate' : node = rdflib . term . URIRef ( uri ) immediate_parents = list ( set ( self . isa_or_partof_objects ( node ) ) ) return [ p . toPython ( ) for p in immediate_parents ] elif type == 'top' : top_parents = [ p for p in all_parents if not self . isa_or_partof_closure . get ( p ) ] return top_parents
Return parents of a given entry .
226
7
17,455
def _get_perf ( text , msg_id ) : msg = KQMLPerformative ( 'REQUEST' ) msg . set ( 'receiver' , 'READER' ) content = KQMLList ( 'run-text' ) content . sets ( 'text' , text ) msg . set ( 'content' , content ) msg . set ( 'reply-with' , msg_id ) return msg
Return a request message for a given text .
92
9
17,456
def read_pmc ( self , pmcid ) : msg = KQMLPerformative ( 'REQUEST' ) msg . set ( 'receiver' , 'READER' ) content = KQMLList ( 'run-pmcid' ) content . sets ( 'pmcid' , pmcid ) content . set ( 'reply-when-done' , 'true' ) msg . set ( 'content' , content ) msg . set ( 'reply-with' , 'P-%s' % pmcid ) self . reply_counter += 1 self . send ( msg )
Read a given PMC article .
131
7
17,457
def read_text ( self , text ) : logger . info ( 'Reading: "%s"' % text ) msg_id = 'RT000%s' % self . msg_counter kqml_perf = _get_perf ( text , msg_id ) self . reply_counter += 1 self . msg_counter += 1 self . send ( kqml_perf )
Read a given text phrase .
83
6
17,458
def receive_reply ( self , msg , content ) : reply_head = content . head ( ) if reply_head == 'error' : comment = content . gets ( 'comment' ) logger . error ( 'Got error reply: "%s"' % comment ) else : extractions = content . gets ( 'ekb' ) self . extractions . append ( extractions ) self . reply_counter -= 1 if self . reply_counter == 0 : self . exit ( 0 )
Handle replies with reading results .
101
6
17,459
def split_long_sentence ( sentence , words_per_line ) : words = sentence . split ( ' ' ) split_sentence = '' for i in range ( len ( words ) ) : split_sentence = split_sentence + words [ i ] if ( i + 1 ) % words_per_line == 0 : split_sentence = split_sentence + '\n' elif i != len ( words ) - 1 : split_sentence = split_sentence + " " return split_sentence
Takes a sentence and adds a newline every words_per_line words .
114
17
17,460
def shorter_name ( key ) : key_short = key for sep in [ '#' , '/' ] : ind = key_short . rfind ( sep ) if ind is not None : key_short = key_short [ ind + 1 : ] else : key_short = key_short return key_short . replace ( '-' , '_' ) . replace ( '.' , '_' )
Return a shorter name for an id .
88
8
17,461
def add_event_property_edges ( event_entity , entries ) : do_not_log = [ '@type' , '@id' , 'http://worldmodelers.com/DataProvenance#sourced_from' ] for prop in event_entity : if prop not in do_not_log : value = event_entity [ prop ] value_entry = None value_str = None if '@id' in value [ 0 ] : value = value [ 0 ] [ '@id' ] if value in entries : value_str = get_entry_compact_text_repr ( entries [ value ] , entries ) #get_entry_compact_text_repr(entry, entries) if value_str is not None : edges . append ( [ shorter_name ( event_entity [ '@id' ] ) , shorter_name ( value ) , shorter_name ( prop ) ] ) node_labels [ shorter_name ( value ) ] = value_str
Adds edges to the graph for event properties .
216
9
17,462
def get_sourced_from ( entry ) : sourced_from = 'http://worldmodelers.com/DataProvenance#sourced_from' if sourced_from in entry : values = entry [ sourced_from ] values = [ i [ '@id' ] for i in values ] return values
Get a list of values from the source_from attribute
66
11
17,463
def get_entry_compact_text_repr ( entry , entries ) : text = get_shortest_text_value ( entry ) if text is not None : return text else : sources = get_sourced_from ( entry ) # There are a lot of references to this entity, each of which refer # to it by a different text label. For the sake of visualization, # let's pick one of these labels (in this case, the shortest one) if sources is not None : texts = [ ] for source in sources : source_entry = entries [ source ] texts . append ( get_shortest_text_value ( source_entry ) ) return get_shortest_string ( texts )
If the entry has a text value return that . If the entry has a source_from value return the text value of the source . Otherwise return None .
150
31
17,464
def process_text ( text , output_fmt = 'json' , outbuf = None , cleanup = True , key = '' , * * kwargs ) : nxml_str = make_nxml_from_text ( text ) return process_nxml_str ( nxml_str , output_fmt , outbuf , cleanup , key , * * kwargs )
Return processor with Statements extracted by reading text with Sparser .
83
12
17,465
def process_nxml_str ( nxml_str , output_fmt = 'json' , outbuf = None , cleanup = True , key = '' , * * kwargs ) : tmp_fname = 'PMC%s_%d.nxml' % ( key , mp . current_process ( ) . pid ) with open ( tmp_fname , 'wb' ) as fh : fh . write ( nxml_str . encode ( 'utf-8' ) ) try : sp = process_nxml_file ( tmp_fname , output_fmt , outbuf , cleanup , * * kwargs ) finally : if cleanup and os . path . exists ( tmp_fname ) : os . remove ( tmp_fname ) return sp
Return processor with Statements extracted by reading an NXML string .
168
12
17,466
def process_nxml_file ( fname , output_fmt = 'json' , outbuf = None , cleanup = True , * * kwargs ) : sp = None out_fname = None try : out_fname = run_sparser ( fname , output_fmt , outbuf , * * kwargs ) sp = process_sparser_output ( out_fname , output_fmt ) except Exception as e : logger . error ( "Sparser failed to run on %s." % fname ) logger . exception ( e ) finally : if out_fname is not None and os . path . exists ( out_fname ) and cleanup : os . remove ( out_fname ) return sp
Return processor with Statements extracted by reading an NXML file .
158
12
17,467
def process_sparser_output ( output_fname , output_fmt = 'json' ) : if output_fmt not in [ 'json' , 'xml' ] : logger . error ( "Unrecognized output format '%s'." % output_fmt ) return None sp = None with open ( output_fname , 'rt' ) as fh : if output_fmt == 'json' : json_dict = json . load ( fh ) sp = process_json_dict ( json_dict ) else : xml_str = fh . read ( ) sp = process_xml ( xml_str ) return sp
Return a processor with Statements extracted from Sparser XML or JSON
138
12
17,468
def process_xml ( xml_str ) : try : tree = ET . XML ( xml_str , parser = UTB ( ) ) except ET . ParseError as e : logger . error ( 'Could not parse XML string' ) logger . error ( e ) return None sp = _process_elementtree ( tree ) return sp
Return processor with Statements extracted from a Sparser XML .
70
11
17,469
def run_sparser ( fname , output_fmt , outbuf = None , timeout = 600 ) : if not sparser_path or not os . path . exists ( sparser_path ) : logger . error ( 'Sparser executable not set in %s' % sparser_path_var ) return None if output_fmt == 'xml' : format_flag = '-x' suffix = '.xml' elif output_fmt == 'json' : format_flag = '-j' suffix = '.json' else : logger . error ( 'Unknown output format: %s' % output_fmt ) return None sparser_exec_path = os . path . join ( sparser_path , 'save-semantics.sh' ) output_path = fname . split ( '.' ) [ 0 ] + '-semantics' + suffix for fpath in [ sparser_exec_path , fname ] : if not os . path . exists ( fpath ) : raise Exception ( "'%s' is not a valid path." % fpath ) cmd_list = [ sparser_exec_path , format_flag , fname ] # This is mostly a copy of the code found in subprocess.run, with the # key change that proc.kill is replaced with os.killpg. This allows the # process to be killed even if it has children. Solution developed from: # https://stackoverflow.com/questions/36952245/subprocess-timeout-failure with sp . Popen ( cmd_list , stdout = sp . PIPE ) as proc : try : stdout , stderr = proc . communicate ( timeout = timeout ) except sp . TimeoutExpired : # Yes, this is about as bad as it looks. But it is the only way to # be sure the script actually dies. sp . check_call ( [ 'pkill' , '-f' , 'r3.core.*%s' % fname ] ) stdout , stderr = proc . communicate ( ) raise sp . TimeoutExpired ( proc . args , timeout , output = stdout , stderr = stderr ) except BaseException : # See comment on above instance. sp . check_call ( [ 'pkill' , '-f' , fname ] ) proc . wait ( ) raise retcode = proc . poll ( ) if retcode : raise sp . CalledProcessError ( retcode , proc . args , output = stdout , stderr = stderr ) if outbuf is not None : outbuf . write ( stdout ) outbuf . flush ( ) assert os . path . exists ( output_path ) , 'No output file \"%s\" created by sparser.' % output_path return output_path
Return the path to reading output after running Sparser reading .
600
12
17,470
def get_version ( ) : assert sparser_path is not None , "Sparser path is not defined." with open ( os . path . join ( sparser_path , 'version.txt' ) , 'r' ) as f : version = f . read ( ) . strip ( ) return version
Return the version of the Sparser executable on the path .
65
12
17,471
def make_nxml_from_text ( text ) : text = _escape_xml ( text ) header = '<?xml version="1.0" encoding="UTF-8" ?>' + '<OAI-PMH><article><body><sec id="s1"><p>' footer = '</p></sec></body></article></OAI-PMH>' nxml_str = header + text + footer return nxml_str
Return raw text wrapped in NXML structure .
100
9
17,472
def get_hgnc_name ( hgnc_id ) : try : hgnc_name = hgnc_names [ hgnc_id ] except KeyError : xml_tree = get_hgnc_entry ( hgnc_id ) if xml_tree is None : return None hgnc_name_tag = xml_tree . find ( "result/doc/str[@name='symbol']" ) if hgnc_name_tag is None : return None hgnc_name = hgnc_name_tag . text . strip ( ) return hgnc_name
Return the HGNC symbol corresponding to the given HGNC ID .
134
13
17,473
def get_hgnc_entry ( hgnc_id ) : url = hgnc_url + 'hgnc_id/%s' % hgnc_id headers = { 'Accept' : '*/*' } res = requests . get ( url , headers = headers ) if not res . status_code == 200 : return None xml_tree = ET . XML ( res . content , parser = UTB ( ) ) return xml_tree
Return the HGNC entry for the given HGNC ID from the web service .
99
16
17,474
def analyze_reach_log ( log_fname = None , log_str = None ) : assert bool ( log_fname ) ^ bool ( log_str ) , 'Must specify log_fname OR log_str' started_patt = re . compile ( 'Starting ([\d]+)' ) # TODO: it might be interesting to get the time it took to read # each paper here finished_patt = re . compile ( 'Finished ([\d]+)' ) def get_content_nums ( txt ) : pat = 'Retrieved content for ([\d]+) / ([\d]+) papers to be read' res = re . match ( pat , txt ) has_content , total = res . groups ( ) if res else None , None return has_content , total if log_fname : with open ( log_fname , 'r' ) as fh : log_str = fh . read ( ) # has_content, total = get_content_nums(log_str) # unused pmids = { } pmids [ 'started' ] = started_patt . findall ( log_str ) pmids [ 'finished' ] = finished_patt . findall ( log_str ) pmids [ 'not_done' ] = set ( pmids [ 'started' ] ) - set ( pmids [ 'finished' ] ) return pmids
Return unifinished PMIDs given a log file name .
306
12
17,475
def get_logs_from_db_reading ( job_prefix , reading_queue = 'run_db_reading_queue' ) : s3 = boto3 . client ( 's3' ) gen_prefix = 'reading_results/%s/logs/%s' % ( job_prefix , reading_queue ) job_log_data = s3 . list_objects_v2 ( Bucket = 'bigmech' , Prefix = join ( gen_prefix , job_prefix ) ) # TODO: Track success/failure log_strs = [ ] for fdict in job_log_data [ 'Contents' ] : resp = s3 . get_object ( Bucket = 'bigmech' , Key = fdict [ 'Key' ] ) log_strs . append ( resp [ 'Body' ] . read ( ) . decode ( 'utf-8' ) ) return log_strs
Get the logs stashed on s3 for a particular reading .
201
13
17,476
def separate_reach_logs ( log_str ) : log_lines = log_str . splitlines ( ) reach_logs = [ ] reach_lines = [ ] adding_reach_lines = False for l in log_lines [ : ] : if not adding_reach_lines and 'Beginning reach' in l : adding_reach_lines = True elif adding_reach_lines and 'Reach finished' in l : adding_reach_lines = False reach_logs . append ( ( 'SUCCEEDED' , '\n' . join ( reach_lines ) ) ) reach_lines = [ ] elif adding_reach_lines : reach_lines . append ( l . split ( 'readers - ' ) [ 1 ] ) log_lines . remove ( l ) if adding_reach_lines : reach_logs . append ( ( 'FAILURE' , '\n' . join ( reach_lines ) ) ) return '\n' . join ( log_lines ) , reach_logs
Get the list of reach logs from the overall logs .
221
11
17,477
def get_unyielding_tcids ( log_str ) : tcid_strs = re . findall ( 'INFO: \[.*?\].*? - Got no statements for (\d+).*' , log_str ) return { int ( tcid_str ) for tcid_str in tcid_strs }
Extract the set of tcids for which no statements were created .
75
14
17,478
def analyze_db_reading ( job_prefix , reading_queue = 'run_db_reading_queue' ) : # Analyze reach failures log_strs = get_logs_from_db_reading ( job_prefix , reading_queue ) indra_log_strs = [ ] all_reach_logs = [ ] log_stats = [ ] for log_str in log_strs : log_str , reach_logs = separate_reach_logs ( log_str ) all_reach_logs . extend ( reach_logs ) indra_log_strs . append ( log_str ) log_stats . append ( get_reading_stats ( log_str ) ) # Analayze the reach failures. failed_reach_logs = [ reach_log_str for result , reach_log_str in all_reach_logs if result == 'FAILURE' ] failed_id_dicts = [ analyze_reach_log ( log_str = reach_log ) for reach_log in failed_reach_logs if bool ( reach_log ) ] tcids_unfinished = { id_dict [ 'not_done' ] for id_dict in failed_id_dicts } print ( "Found %d unfinished tcids." % len ( tcids_unfinished ) ) # Summarize the global stats if log_stats : sum_dict = dict . fromkeys ( log_stats [ 0 ] . keys ( ) ) for log_stat in log_stats : for k in log_stat . keys ( ) : if isinstance ( log_stat [ k ] , list ) : if k not in sum_dict . keys ( ) : sum_dict [ k ] = [ 0 ] * len ( log_stat [ k ] ) sum_dict [ k ] = [ sum_dict [ k ] [ i ] + log_stat [ k ] [ i ] for i in range ( len ( log_stat [ k ] ) ) ] else : if k not in sum_dict . keys ( ) : sum_dict [ k ] = 0 sum_dict [ k ] += log_stat [ k ] else : sum_dict = { } return tcids_unfinished , sum_dict , log_stats
Run various analysis on a particular reading job .
489
9
17,479
def process_pc_neighborhood ( gene_names , neighbor_limit = 1 , database_filter = None ) : model = pcc . graph_query ( 'neighborhood' , gene_names , neighbor_limit = neighbor_limit , database_filter = database_filter ) if model is not None : return process_model ( model )
Returns a BiopaxProcessor for a PathwayCommons neighborhood query .
76
16
17,480
def process_pc_pathsbetween ( gene_names , neighbor_limit = 1 , database_filter = None , block_size = None ) : if not block_size : model = pcc . graph_query ( 'pathsbetween' , gene_names , neighbor_limit = neighbor_limit , database_filter = database_filter ) if model is not None : return process_model ( model ) else : gene_blocks = [ gene_names [ i : i + block_size ] for i in range ( 0 , len ( gene_names ) , block_size ) ] stmts = [ ] # Run pathsfromto between pairs of blocks and pathsbetween # within each block. This breaks up a single call with N genes into # (N/block_size)*(N/blocksize) calls with block_size genes for genes1 , genes2 in itertools . product ( gene_blocks , repeat = 2 ) : if genes1 == genes2 : bp = process_pc_pathsbetween ( genes1 , database_filter = database_filter , block_size = None ) else : bp = process_pc_pathsfromto ( genes1 , genes2 , database_filter = database_filter ) stmts += bp . statements
Returns a BiopaxProcessor for a PathwayCommons paths - between query .
270
18
17,481
def process_pc_pathsfromto ( source_genes , target_genes , neighbor_limit = 1 , database_filter = None ) : model = pcc . graph_query ( 'pathsfromto' , source_genes , target_genes , neighbor_limit = neighbor_limit , database_filter = database_filter ) if model is not None : return process_model ( model )
Returns a BiopaxProcessor for a PathwayCommons paths - from - to query .
88
20
17,482
def process_model ( model ) : bp = BiopaxProcessor ( model ) bp . get_modifications ( ) bp . get_regulate_activities ( ) bp . get_regulate_amounts ( ) bp . get_activity_modification ( ) bp . get_gef ( ) bp . get_gap ( ) bp . get_conversions ( ) # bp.get_complexes() bp . eliminate_exact_duplicates ( ) return bp
Returns a BiopaxProcessor for a BioPAX model object .
114
15
17,483
def is_background_knowledge ( stmt ) : any_background = False # Iterate over all evidence for the statement for ev in stmt . evidence : epi = ev . epistemics if epi is not None : sec = epi . get ( 'section_type' ) # If there is at least one evidence not from a # background section then we consider this to be # a non-background knowledge finding. if sec is not None and sec not in background_secs : return False # If there is at least one evidence that is explicitly # from a background section then we keep track of that. elif sec in background_secs : any_background = True # If there is any explicit evidence for this statement being # background info (and no evidence otherwise) then we return # True, otherwise (for instnace of there is no section info at all) # we return False. return any_background
Return True if Statement is only supported by background knowledge .
192
11
17,484
def multiple_sources ( stmt ) : sources = list ( set ( [ e . source_api for e in stmt . evidence ] ) ) if len ( sources ) > 1 : return True return False
Return True if statement is supported by multiple sources .
44
10
17,485
def id_to_symbol ( self , entrez_id ) : entrez_id = str ( entrez_id ) if entrez_id not in self . ids_to_symbols : m = 'Could not look up symbol for Entrez ID ' + entrez_id raise Exception ( m ) return self . ids_to_symbols [ entrez_id ]
Gives the symbol for a given entrez id )
86
11
17,486
def make_model ( self , output_file , add_curation_cols = False , up_only = False ) : stmt_header = [ 'INDEX' , 'UUID' , 'TYPE' , 'STR' , 'AG_A_TEXT' , 'AG_A_LINKS' , 'AG_A_STR' , 'AG_B_TEXT' , 'AG_B_LINKS' , 'AG_B_STR' , 'PMID' , 'TEXT' , 'IS_HYP' , 'IS_DIRECT' ] if add_curation_cols : stmt_header = stmt_header + [ 'AG_A_IDS_CORRECT' , 'AG_A_STATE_CORRECT' , 'AG_B_IDS_CORRECT' , 'AG_B_STATE_CORRECT' , 'EVENT_CORRECT' , 'RES_CORRECT' , 'POS_CORRECT' , 'SUBJ_ACT_CORRECT' , 'OBJ_ACT_CORRECT' , 'HYP_CORRECT' , 'DIRECT_CORRECT' ] rows = [ stmt_header ] for ix , stmt in enumerate ( self . statements ) : # Complexes if len ( stmt . agent_list ( ) ) > 2 : logger . info ( "Skipping statement with more than two members: %s" % stmt ) continue # Self-modifications, ActiveForms elif len ( stmt . agent_list ( ) ) == 1 : ag_a = stmt . agent_list ( ) [ 0 ] ag_b = None # All others else : ( ag_a , ag_b ) = stmt . agent_list ( ) # Put together the data row row = [ ix + 1 , stmt . uuid , stmt . __class__ . __name__ , str ( stmt ) ] + _format_agent_entries ( ag_a , up_only ) + _format_agent_entries ( ag_b , up_only ) + [ stmt . evidence [ 0 ] . pmid , stmt . evidence [ 0 ] . text , stmt . evidence [ 0 ] . epistemics . get ( 'hypothesis' , '' ) , stmt . evidence [ 0 ] . epistemics . get ( 'direct' , '' ) ] if add_curation_cols : row = row + ( [ '' ] * 11 ) rows . append ( row ) # Write to file write_unicode_csv ( output_file , rows , delimiter = '\t' )
Export the statements into a tab - separated text file .
571
11
17,487
def get_create_base_agent ( self , agent ) : try : base_agent = self . agents [ _n ( agent . name ) ] except KeyError : base_agent = BaseAgent ( _n ( agent . name ) ) self . agents [ _n ( agent . name ) ] = base_agent # If it's a molecular agent if isinstance ( agent , Agent ) : # Handle bound conditions for bc in agent . bound_conditions : bound_base_agent = self . get_create_base_agent ( bc . agent ) bound_base_agent . create_site ( get_binding_site_name ( agent ) ) base_agent . create_site ( get_binding_site_name ( bc . agent ) ) # Handle modification conditions for mc in agent . mods : base_agent . create_mod_site ( mc ) # Handle mutation conditions for mc in agent . mutations : res_from = mc . residue_from if mc . residue_from else 'mut' res_to = mc . residue_to if mc . residue_to else 'X' if mc . position is None : mut_site_name = res_from else : mut_site_name = res_from + mc . position base_agent . create_site ( mut_site_name , states = [ 'WT' , res_to ] ) # Handle location condition if agent . location is not None : base_agent . create_site ( 'loc' , [ _n ( agent . location ) ] ) # Handle activity if agent . activity is not None : site_name = agent . activity . activity_type base_agent . create_site ( site_name , [ 'inactive' , 'active' ] ) # There might be overwrites here for db_name , db_ref in agent . db_refs . items ( ) : base_agent . db_refs [ db_name ] = db_ref return base_agent
Return base agent with given name creating it if needed .
415
11
17,488
def create_site ( self , site , states = None ) : if site not in self . sites : self . sites . append ( site ) if states is not None : self . site_states . setdefault ( site , [ ] ) try : states = list ( states ) except TypeError : return self . add_site_states ( site , states )
Create a new site on an agent if it doesn t already exist .
75
14
17,489
def create_mod_site ( self , mc ) : site_name = get_mod_site_name ( mc ) ( unmod_site_state , mod_site_state ) = states [ mc . mod_type ] self . create_site ( site_name , ( unmod_site_state , mod_site_state ) ) site_anns = [ Annotation ( ( site_name , mod_site_state ) , mc . mod_type , 'is_modification' ) ] if mc . residue : site_anns . append ( Annotation ( site_name , mc . residue , 'is_residue' ) ) if mc . position : site_anns . append ( Annotation ( site_name , mc . position , 'is_position' ) ) self . site_annotations += site_anns
Create modification site for the BaseAgent from a ModCondition .
182
12
17,490
def add_site_states ( self , site , states ) : for state in states : if state not in self . site_states [ site ] : self . site_states [ site ] . append ( state )
Create new states on an agent site if the state doesn t exist .
45
14
17,491
def add_activity_form ( self , activity_pattern , is_active ) : if is_active : if activity_pattern not in self . active_forms : self . active_forms . append ( activity_pattern ) else : if activity_pattern not in self . inactive_forms : self . inactive_forms . append ( activity_pattern )
Adds the pattern as an active or inactive form to an Agent .
73
13
17,492
def add_activity_type ( self , activity_type ) : if activity_type not in self . activity_types : self . activity_types . append ( activity_type )
Adds an activity type to an Agent .
38
8
17,493
def make_annotation ( self ) : annotation = dict ( ) # Put all properties of the action object into the annotation for item in dir ( self ) : if len ( item ) > 0 and item [ 0 ] != '_' and not inspect . ismethod ( getattr ( self , item ) ) : annotation [ item ] = getattr ( self , item ) # Add properties of each action mention annotation [ 'action_mentions' ] = list ( ) for action_mention in self . action_mentions : annotation_mention = action_mention . make_annotation ( ) annotation [ 'action_mentions' ] . append ( annotation_mention ) return annotation
Returns a dictionary with all properties of the action and each of its action mentions .
145
16
17,494
def _search_path ( self , directory_name , filename ) : full_path = path . join ( directory_name , filename ) if path . exists ( full_path ) : return full_path # Could not find the requested file in any of the directories return None
Searches for a given file in the specified directory .
57
12
17,495
def _init_action_list ( self , action_filename ) : self . actions = list ( ) self . hiid_to_action_index = dict ( ) f = codecs . open ( action_filename , 'r' , encoding = 'latin-1' ) first_line = True for line in f : line = line . rstrip ( ) if first_line : # Ignore the first line first_line = False else : self . actions . append ( GenewaysAction ( line ) ) latestInd = len ( self . actions ) - 1 hiid = self . actions [ latestInd ] . hiid if hiid in self . hiid_to_action_index : raise Exception ( 'action hiid not unique: %d' % hiid ) self . hiid_to_action_index [ hiid ] = latestInd
Parses the file and populates the data .
181
11
17,496
def _link_to_action_mentions ( self , actionmention_filename ) : parser = GenewaysActionMentionParser ( actionmention_filename ) self . action_mentions = parser . action_mentions for action_mention in self . action_mentions : hiid = action_mention . hiid if hiid not in self . hiid_to_action_index : m1 = 'Parsed action mention has hiid %d, which does not exist' m2 = ' in table of action hiids' raise Exception ( ( m1 + m2 ) % hiid ) else : idx = self . hiid_to_action_index [ hiid ] self . actions [ idx ] . action_mentions . append ( action_mention )
Add action mentions
171
3
17,497
def _lookup_symbols ( self , symbols_filename ) : symbol_lookup = GenewaysSymbols ( symbols_filename ) for action in self . actions : action . up_symbol = symbol_lookup . id_to_symbol ( action . up ) action . dn_symbol = symbol_lookup . id_to_symbol ( action . dn )
Look up symbols for actions and action mentions
86
8
17,498
def get_top_n_action_types ( self , top_n ) : # Count action types action_type_to_counts = dict ( ) for action in self . actions : actiontype = action . actiontype if actiontype not in action_type_to_counts : action_type_to_counts [ actiontype ] = 1 else : action_type_to_counts [ actiontype ] = action_type_to_counts [ actiontype ] + 1 # Convert the dictionary representation into a pair of lists action_types = list ( ) counts = list ( ) for actiontype in action_type_to_counts . keys ( ) : action_types . append ( actiontype ) counts . append ( action_type_to_counts [ actiontype ] ) # How many actions in total? num_actions = len ( self . actions ) num_actions2 = 0 for count in counts : num_actions2 = num_actions2 + count if num_actions != num_actions2 : raise ( Exception ( 'Problem counting everything up!' ) ) # Sort action types by count (lowest to highest) sorted_inds = np . argsort ( counts ) last_ind = len ( sorted_inds ) - 1 # Return the top N actions top_actions = list ( ) if top_n > len ( sorted_inds ) : raise Exception ( 'Asked for top %d action types, ' + 'but there are only %d action types' % ( top_n , len ( sorted_inds ) ) ) for i in range ( top_n ) : top_actions . append ( action_types [ sorted_inds [ last_ind - i ] ] ) return top_actions
Returns the top N actions by count .
370
8
17,499
def get_string ( self ) : graph_string = self . graph . to_string ( ) graph_string = graph_string . replace ( '\\N' , '\\n' ) return graph_string
Return the assembled graph as a string .
45
8