idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
17,600
def generate_jupyter_js ( self , cyjs_style = None , cyjs_layout = None ) : # First, export the CAG to CyJS cyjs_elements = self . export_to_cytoscapejs ( ) # Load the Javascript template tempf = os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , 'cag_template.js' ) with open ( tempf , 'r' ) as fh : template = fh . read ( ) # Load the default style and layout stylef = os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , 'cag_style.json' ) with open ( stylef , 'r' ) as fh : style = json . load ( fh ) # Apply style and layout only if arg wasn't passed in if cyjs_style is None : cyjs_style = style [ 'style' ] if cyjs_layout is None : cyjs_layout = style [ 'layout' ] # Now fill in the template formatted_args = tuple ( json . dumps ( x , indent = 2 ) for x in ( cyjs_elements , cyjs_style , cyjs_layout ) ) js_str = template % formatted_args return js_str
Generate Javascript from a template to run in Jupyter notebooks .
295
15
17,601
def _node_name ( self , concept ) : if ( # grounding threshold is specified self . grounding_threshold is not None # The particular eidos ontology grounding (un/wdi/fao) is present and concept . db_refs [ self . grounding_ontology ] # The grounding score is above the grounding threshold and ( concept . db_refs [ self . grounding_ontology ] [ 0 ] [ 1 ] > self . grounding_threshold ) ) : entry = concept . db_refs [ self . grounding_ontology ] [ 0 ] [ 0 ] return entry . split ( '/' ) [ - 1 ] . replace ( '_' , ' ' ) . capitalize ( ) else : return concept . name . capitalize ( )
Return a standardized name for a node given a Concept .
161
11
17,602
def term_from_uri ( uri ) : if uri is None : return None # This insures that if we get a Literal with an integer value (as we # do for modification positions), it will get converted to a string, # not an integer. if isinstance ( uri , rdflib . Literal ) : uri = str ( uri . toPython ( ) ) # This is to handle URIs like # http://www.openbel.org/bel/namespace//MAPK%20Erk1/3%20Family # or # http://www.openbel.org/bel/namespace/MAPK%20Erk1/3%20Family # In the current implementation, the order of the patterns # matters. patterns = [ 'http://www.openbel.org/bel/namespace//(.*)' , 'http://www.openbel.org/vocabulary//(.*)' , 'http://www.openbel.org/bel//(.*)' , 'http://www.openbel.org/bel/namespace/(.*)' , 'http://www.openbel.org/vocabulary/(.*)' , 'http://www.openbel.org/bel/(.*)' ] for pr in patterns : match = re . match ( pr , uri ) if match is not None : term = match . groups ( ) [ 0 ] term = unquote ( term ) return term # If none of the patterns match then the URI is actually a simple term # for instance a site: "341" or a substitution: "sub(V,600,E)" return uri
Removes prepended URI information from terms .
348
9
17,603
def get_activating_mods ( self ) : q_mods = prefixes + """ SELECT ?speciesName ?actType ?mod ?pos ?rel ?stmt ?species WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasRelationship ?rel . ?stmt belvoc:hasSubject ?subject . ?stmt belvoc:hasObject ?object . ?object belvoc:hasActivityType ?actType . ?object belvoc:hasChild ?species . ?species a belvoc:ProteinAbundance . ?species belvoc:hasConcept ?speciesName . ?subject a belvoc:ModifiedProteinAbundance . ?subject belvoc:hasModificationType ?mod . ?subject belvoc:hasChild ?species . OPTIONAL { ?subject belvoc:hasModificationPosition ?pos . } FILTER (?rel = belvoc:DirectlyIncreases || ?rel = belvoc:DirectlyDecreases) } """ # Now make the PySB for the phosphorylation res_mods = self . g . query ( q_mods ) for stmt in res_mods : evidence = self . _get_evidence ( stmt [ 5 ] ) # Parse out the elements of the query species = self . _get_agent ( stmt [ 0 ] , stmt [ 6 ] ) act_type = term_from_uri ( stmt [ 1 ] ) . lower ( ) mod = term_from_uri ( stmt [ 2 ] ) mod_pos = term_from_uri ( stmt [ 3 ] ) mc = self . _get_mod_condition ( mod , mod_pos ) species . mods = [ mc ] rel = term_from_uri ( stmt [ 4 ] ) if rel == 'DirectlyDecreases' : is_active = False else : is_active = True stmt_str = strip_statement ( stmt [ 5 ] ) # Mark this as a converted statement self . converted_direct_stmts . append ( stmt_str ) st = ActiveForm ( species , act_type , is_active , evidence ) self . statements . append ( st )
Extract INDRA ActiveForm Statements with a single mod from BEL .
459
14
17,604
def get_complexes ( self ) : q_cmplx = prefixes + """ SELECT ?complexTerm ?childName ?child ?stmt WHERE { { {?stmt belvoc:hasSubject ?complexTerm} UNION {?stmt belvoc:hasObject ?complexTerm .} UNION {?stmt belvoc:hasSubject ?term . ?term belvoc:hasChild ?complexTerm .} UNION {?stmt belvoc:hasObject ?term . ?term belvoc:hasChild ?complexTerm .} } ?complexTerm a belvoc:Term . ?complexTerm a belvoc:ComplexAbundance . ?complexTerm belvoc:hasChild ?child . ?child belvoc:hasConcept ?childName . } """ # Run the query res_cmplx = self . g . query ( q_cmplx ) # Store the members of each complex in a dict of lists, keyed by the # term for the complex cmplx_dict = collections . defaultdict ( list ) cmplx_ev = { } for stmt in res_cmplx : stmt_uri = stmt [ 3 ] ev = self . _get_evidence ( stmt_uri ) for e in ev : e . epistemics [ 'direct' ] = True cmplx_name = term_from_uri ( stmt [ 0 ] ) cmplx_id = stmt_uri + '#' + cmplx_name child = self . _get_agent ( stmt [ 1 ] , stmt [ 2 ] ) cmplx_dict [ cmplx_id ] . append ( child ) # This might be written multiple times but with the same # evidence cmplx_ev [ cmplx_id ] = ev # Now iterate over the stored complex information and create binding # statements for cmplx_id , cmplx_list in cmplx_dict . items ( ) : if len ( cmplx_list ) < 2 : msg = 'Complex %s has less than 2 members! Skipping.' % cmplx_name logger . warning ( msg ) else : self . statements . append ( Complex ( cmplx_list , evidence = cmplx_ev [ cmplx_id ] ) )
Extract INDRA Complex Statements from BEL .
491
9
17,605
def get_activating_subs ( self ) : q_mods = prefixes + """ SELECT ?enzyme_name ?sub_label ?act_type ?rel ?stmt ?subject WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasRelationship ?rel . ?stmt belvoc:hasSubject ?subject . ?stmt belvoc:hasObject ?object . ?subject a belvoc:ProteinAbundance . ?subject belvoc:hasConcept ?enzyme_name . ?subject belvoc:hasChild ?sub_expr . ?sub_expr rdfs:label ?sub_label . ?object a belvoc:AbundanceActivity . ?object belvoc:hasActivityType ?act_type . ?object belvoc:hasChild ?enzyme . ?enzyme a belvoc:ProteinAbundance . ?enzyme belvoc:hasConcept ?enzyme_name . } """ # Now make the PySB for the phosphorylation res_mods = self . g . query ( q_mods ) for stmt in res_mods : evidence = self . _get_evidence ( stmt [ 4 ] ) # Parse out the elements of the query enz = self . _get_agent ( stmt [ 0 ] , stmt [ 5 ] ) sub_expr = term_from_uri ( stmt [ 1 ] ) act_type = term_from_uri ( stmt [ 2 ] ) . lower ( ) # Parse the WT and substituted residues from the node label. # Strangely, the RDF for substituted residue doesn't break the # terms of the BEL expression down into their meaning, as happens # for modified protein abundances. Instead, the substitution # just comes back as a string, e.g., "sub(V,600,E)". This code # parses the arguments back out using a regular expression. match = re . match ( 'sub\(([A-Z]),([0-9]*),([A-Z])\)' , sub_expr ) if match : matches = match . groups ( ) wt_residue = matches [ 0 ] position = matches [ 1 ] sub_residue = matches [ 2 ] else : logger . warning ( "Could not parse substitution expression %s" % sub_expr ) continue mc = MutCondition ( position , wt_residue , sub_residue ) enz . mutations = [ mc ] rel = strip_statement ( stmt [ 3 ] ) if rel == 'DirectlyDecreases' : is_active = False else : is_active = True stmt_str = strip_statement ( stmt [ 4 ] ) # Mark this as a converted statement self . converted_direct_stmts . append ( stmt_str ) st = ActiveForm ( enz , act_type , is_active , evidence ) self . statements . append ( st )
Extract INDRA ActiveForm Statements based on a mutation from BEL .
621
14
17,606
def get_conversions ( self ) : query = prefixes + """ SELECT DISTINCT ?controller ?controllerName ?controllerActivity ?product ?productName ?reactant ?reactantName ?stmt WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasRelationship ?rel . ?stmt belvoc:hasSubject ?subject . ?stmt belvoc:hasObject ?rxn . ?subject a belvoc:AbundanceActivity . ?subject belvoc:hasActivityType ?controllerActivity . ?subject belvoc:hasChild ?controller . ?controller belvoc:hasConcept ?controllerName . ?rxn a belvoc:Reaction . ?rxn belvoc:hasChild ?reactants . ?reactants rdfs:label ?reactLabel . FILTER (regex(?reactLabel, "^reactants.*")) ?rxn belvoc:hasChild ?products . ?products rdfs:label ?prodLabel . FILTER (regex(?prodLabel, "^products.*")) ?reactants belvoc:hasChild ?reactant . ?products belvoc:hasChild ?product . ?reactant belvoc:hasConcept ?reactantName . ?product belvoc:hasConcept ?productName . } """ res = self . g . query ( query ) # We need to collect all pieces of the same statement so that we can # collect multiple reactants and products stmt_map = collections . defaultdict ( list ) for stmt in res : stmt_map [ stmt [ - 1 ] ] . append ( stmt ) for stmts in stmt_map . values ( ) : # First we get the shared part of the Statement stmt = stmts [ 0 ] subj = self . _get_agent ( stmt [ 1 ] , stmt [ 0 ] ) evidence = self . _get_evidence ( stmt [ - 1 ] ) stmt_str = strip_statement ( stmt [ - 1 ] ) # Now we collect the participants obj_from_map = { } obj_to_map = { } for stmt in stmts : reactant_name = stmt [ 6 ] product_name = stmt [ 4 ] if reactant_name not in obj_from_map : obj_from_map [ reactant_name ] = self . _get_agent ( stmt [ 6 ] , stmt [ 5 ] ) if product_name not in obj_to_map : obj_to_map [ product_name ] = self . _get_agent ( stmt [ 4 ] , stmt [ 3 ] ) obj_from = list ( obj_from_map . values ( ) ) obj_to = list ( obj_to_map . values ( ) ) st = Conversion ( subj , obj_from , obj_to , evidence = evidence ) # If we've matched a pattern, mark this as a converted statement self . statements . append ( st ) self . converted_direct_stmts . append ( stmt_str )
Extract Conversion INDRA Statements from BEL .
653
9
17,607
def get_degenerate_statements ( self ) : logger . info ( "Checking for 'degenerate' statements...\n" ) # Get rules of type protein X -> activity Y q_stmts = prefixes + """ SELECT ?stmt WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasSubject ?subj . ?stmt belvoc:hasObject ?obj . { { ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . } UNION { ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . } } { { ?subj a belvoc:ProteinAbundance . } UNION { ?subj a belvoc:ModifiedProteinAbundance . } } ?subj belvoc:hasConcept ?xName . { { ?obj a belvoc:ProteinAbundance . ?obj belvoc:hasConcept ?yName . } UNION { ?obj a belvoc:ModifiedProteinAbundance . ?obj belvoc:hasChild ?proteinY . ?proteinY belvoc:hasConcept ?yName . } UNION { ?obj a belvoc:AbundanceActivity . ?obj belvoc:hasChild ?objChild . ?objChild a belvoc:ProteinAbundance . ?objChild belvoc:hasConcept ?yName . } } FILTER (?xName != ?yName) } """ res_stmts = self . g . query ( q_stmts ) logger . info ( "Protein -> Protein/Activity statements:" ) logger . info ( "---------------------------------------" ) for stmt in res_stmts : stmt_str = strip_statement ( stmt [ 0 ] ) logger . info ( stmt_str ) self . degenerate_stmts . append ( stmt_str )
Get all degenerate BEL statements .
406
7
17,608
def print_statement_coverage ( self ) : if not self . all_direct_stmts : self . get_all_direct_statements ( ) if not self . degenerate_stmts : self . get_degenerate_statements ( ) if not self . all_indirect_stmts : self . get_all_indirect_statements ( ) logger . info ( '' ) logger . info ( "Total indirect statements: %d" % len ( self . all_indirect_stmts ) ) logger . info ( "Converted indirect statements: %d" % len ( self . converted_indirect_stmts ) ) logger . info ( ">> Unhandled indirect statements: %d" % ( len ( self . all_indirect_stmts ) - len ( self . converted_indirect_stmts ) ) ) logger . info ( '' ) logger . info ( "Total direct statements: %d" % len ( self . all_direct_stmts ) ) logger . info ( "Converted direct statements: %d" % len ( self . converted_direct_stmts ) ) logger . info ( "Degenerate direct statements: %d" % len ( self . degenerate_stmts ) ) logger . info ( ">> Unhandled direct statements: %d" % ( len ( self . all_direct_stmts ) - len ( self . converted_direct_stmts ) - len ( self . degenerate_stmts ) ) ) logger . info ( '' ) logger . info ( "--- Unhandled direct statements ---------" ) for stmt in self . all_direct_stmts : if not ( stmt in self . converted_direct_stmts or stmt in self . degenerate_stmts ) : logger . info ( stmt ) logger . info ( '' ) logger . info ( "--- Unhandled indirect statements ---------" ) for stmt in self . all_indirect_stmts : if not ( stmt in self . converted_indirect_stmts or stmt in self . degenerate_stmts ) : logger . info ( stmt )
Display how many of the direct statements have been converted .
472
11
17,609
def print_statements ( self ) : logger . info ( '--- Direct INDRA statements ----------' ) for i , stmt in enumerate ( self . statements ) : logger . info ( "%s: %s" % ( i , stmt ) ) logger . info ( '--- Indirect INDRA statements ----------' ) for i , stmt in enumerate ( self . indirect_stmts ) : logger . info ( "%s: %s" % ( i , stmt ) )
Print all extracted INDRA Statements .
106
7
17,610
def process_directory_statements_sorted_by_pmid ( directory_name ) : s_dict = defaultdict ( list ) mp = process_directory ( directory_name , lazy = True ) for statement in mp . iter_statements ( ) : s_dict [ statement . evidence [ 0 ] . pmid ] . append ( statement ) return s_dict
Processes a directory filled with CSXML files first normalizing the character encoding to utf - 8 and then processing into INDRA statements sorted by pmid .
79
33
17,611
def process_directory ( directory_name , lazy = False ) : # Parent Medscan processor containing extractions from all files mp = MedscanProcessor ( ) mp . process_directory ( directory_name , lazy ) return mp
Processes a directory filled with CSXML files first normalizing the character encodings to utf - 8 and then processing into a list of INDRA statements .
47
34
17,612
def process_file_sorted_by_pmid ( file_name ) : s_dict = defaultdict ( list ) mp = process_file ( file_name , lazy = True ) for statement in mp . iter_statements ( ) : s_dict [ statement . evidence [ 0 ] . pmid ] . append ( statement ) return s_dict
Processes a file and returns a dictionary mapping pmids to a list of statements corresponding to that pmid .
76
22
17,613
def process_file ( filename , interval = None , lazy = False ) : mp = MedscanProcessor ( ) mp . process_csxml_file ( filename , interval , lazy ) return mp
Process a CSXML file for its relevant information .
41
11
17,614
def stmts_from_path ( path , model , stmts ) : path_stmts = [ ] for path_rule , sign in path : for rule in model . rules : if rule . name == path_rule : stmt = stmt_from_rule ( path_rule , model , stmts ) assert stmt is not None path_stmts . append ( stmt ) return path_stmts
Return source Statements corresponding to a path in a model .
94
11
17,615
def extract_context ( annotations , annot_manager ) : def get_annot ( annotations , key ) : """Return a specific annotation given a key.""" val = annotations . pop ( key , None ) if val : val_list = [ v for v , tf in val . items ( ) if tf ] if len ( val_list ) > 1 : logger . warning ( 'More than one "%s" in annotations' % key ) elif not val_list : return None return val_list [ 0 ] return None bc = BioContext ( ) species = get_annot ( annotations , 'Species' ) if species : name = annot_manager . get_mapping ( 'Species' , species ) bc . species = RefContext ( name = name , db_refs = { 'TAXONOMY' : species } ) mappings = ( ( 'CellLine' , 'cell_line' , None ) , ( 'Disease' , 'disease' , None ) , ( 'Anatomy' , 'organ' , None ) , ( 'Cell' , 'cell_type' , None ) , ( 'CellStructure' , 'location' , 'MESH' ) ) for bel_name , indra_name , ns in mappings : ann = get_annot ( annotations , bel_name ) if ann : ref = annot_manager . get_mapping ( bel_name , ann ) if ref is None : continue if not ns : db_ns , db_id = ref . split ( '_' , 1 ) else : db_ns , db_id = ns , ref setattr ( bc , indra_name , RefContext ( name = ann , db_refs = { db_ns : db_id } ) ) # Overwrite blank BioContext if not bc : bc = None return bc
Return a BioContext object extracted from the annotations .
393
10
17,616
def format_axis ( ax , label_padding = 2 , tick_padding = 0 , yticks_position = 'left' ) : ax . xaxis . set_ticks_position ( 'bottom' ) ax . yaxis . set_ticks_position ( yticks_position ) ax . yaxis . set_tick_params ( which = 'both' , direction = 'out' , labelsize = fontsize , pad = tick_padding , length = 2 , width = 0.5 ) ax . xaxis . set_tick_params ( which = 'both' , direction = 'out' , labelsize = fontsize , pad = tick_padding , length = 2 , width = 0.5 ) ax . xaxis . labelpad = label_padding ax . yaxis . labelpad = label_padding ax . xaxis . label . set_size ( fontsize ) ax . yaxis . label . set_size ( fontsize )
Set standardized axis formatting for figure .
204
7
17,617
def make_model ( self ) : stmts_formatted = [ ] stmt_rows = group_and_sort_statements ( self . statements , self . ev_totals if self . ev_totals else None ) for key , verb , stmts in stmt_rows : # This will now be ordered by prevalence and entity pairs. stmt_info_list = [ ] for stmt in stmts : stmt_hash = stmt . get_hash ( shallow = True ) ev_list = self . _format_evidence_text ( stmt ) english = self . _format_stmt_text ( stmt ) if self . ev_totals : total_evidence = self . ev_totals . get ( int ( stmt_hash ) , '?' ) if total_evidence == '?' : logger . warning ( 'The hash %s was not found in the ' 'evidence totals dict.' % stmt_hash ) evidence_count_str = '%s / %s' % ( len ( ev_list ) , total_evidence ) else : evidence_count_str = str ( len ( ev_list ) ) stmt_info_list . append ( { 'hash' : stmt_hash , 'english' : english , 'evidence' : ev_list , 'evidence_count' : evidence_count_str } ) short_name = make_string_from_sort_key ( key , verb ) short_name_key = str ( uuid . uuid4 ( ) ) stmts_formatted . append ( ( short_name , short_name_key , stmt_info_list ) ) metadata = { k . replace ( '_' , ' ' ) . title ( ) : v for k , v in self . metadata . items ( ) } if self . db_rest_url and not self . db_rest_url . endswith ( 'statements' ) : db_rest_url = self . db_rest_url + '/statements' else : db_rest_url = '.' self . model = template . render ( stmt_data = stmts_formatted , metadata = metadata , title = self . title , db_rest_url = db_rest_url ) return self . model
Return the assembled HTML content as a string .
498
9
17,618
def append_warning ( self , msg ) : assert self . model is not None , "You must already have run make_model!" addendum = ( '\t<span style="color:red;">(CAUTION: %s occurred when ' 'creating this page.)</span>' % msg ) self . model = self . model . replace ( self . title , self . title + addendum ) return self . model
Append a warning message to the model to expose issues .
91
12
17,619
def save_model ( self , fname ) : if self . model is None : self . make_model ( ) with open ( fname , 'wb' ) as fh : fh . write ( self . model . encode ( 'utf-8' ) )
Save the assembled HTML into a file .
57
8
17,620
def _format_evidence_text ( stmt ) : def get_role ( ag_ix ) : if isinstance ( stmt , Complex ) or isinstance ( stmt , SelfModification ) or isinstance ( stmt , ActiveForm ) or isinstance ( stmt , Conversion ) or isinstance ( stmt , Translocation ) : return 'other' else : assert len ( stmt . agent_list ( ) ) == 2 , ( len ( stmt . agent_list ( ) ) , type ( stmt ) ) return 'subject' if ag_ix == 0 else 'object' ev_list = [ ] for ix , ev in enumerate ( stmt . evidence ) : # Expand the source api to include the sub-database if ev . source_api == 'biopax' and 'source_sub_id' in ev . annotations and ev . annotations [ 'source_sub_id' ] : source_api = '%s:%s' % ( ev . source_api , ev . annotations [ 'source_sub_id' ] ) else : source_api = ev . source_api # Prepare the evidence text if ev . text is None : format_text = None else : indices = [ ] for ix , ag in enumerate ( stmt . agent_list ( ) ) : if ag is None : continue # If the statement has been preassembled, it will have # this entry in annotations try : ag_text = ev . annotations [ 'agents' ] [ 'raw_text' ] [ ix ] if ag_text is None : raise KeyError # Otherwise we try to get the agent text from db_refs except KeyError : ag_text = ag . db_refs . get ( 'TEXT' ) if ag_text is None : continue role = get_role ( ix ) # Get the tag with the correct badge tag_start = '<span class="badge badge-%s">' % role tag_close = '</span>' # Build up a set of indices indices += [ ( m . start ( ) , m . start ( ) + len ( ag_text ) , ag_text , tag_start , tag_close ) for m in re . finditer ( re . escape ( ag_text ) , ev . text ) ] format_text = tag_text ( ev . text , indices ) ev_list . append ( { 'source_api' : source_api , 'pmid' : ev . pmid , 'text_refs' : ev . text_refs , 'text' : format_text , 'source_hash' : ev . source_hash } ) return ev_list
Returns evidence metadata with highlighted evidence text .
571
8
17,621
def process_pmc ( pmc_id , offline = False , output_fname = default_output_fname ) : xml_str = pmc_client . get_xml ( pmc_id ) if xml_str is None : return None fname = pmc_id + '.nxml' with open ( fname , 'wb' ) as fh : fh . write ( xml_str . encode ( 'utf-8' ) ) ids = id_lookup ( pmc_id , 'pmcid' ) pmid = ids . get ( 'pmid' ) rp = process_nxml_file ( fname , citation = pmid , offline = offline , output_fname = output_fname ) return rp
Return a ReachProcessor by processing a paper with a given PMC id .
166
16
17,622
def process_pubmed_abstract ( pubmed_id , offline = False , output_fname = default_output_fname , * * kwargs ) : abs_txt = pubmed_client . get_abstract ( pubmed_id ) if abs_txt is None : return None rp = process_text ( abs_txt , citation = pubmed_id , offline = offline , output_fname = output_fname , * * kwargs ) if rp and rp . statements : for st in rp . statements : for ev in st . evidence : ev . epistemics [ 'section_type' ] = 'abstract' return rp
Return a ReachProcessor by processing an abstract with a given Pubmed id .
147
16
17,623
def process_text ( text , citation = None , offline = False , output_fname = default_output_fname , timeout = None ) : if offline : if not try_offline : logger . error ( 'Offline reading is not available.' ) return None try : api_ruler = reach_reader . get_api_ruler ( ) except ReachOfflineReadingError as e : logger . error ( e ) logger . error ( 'Cannot read offline because the REACH ApiRuler ' 'could not be instantiated.' ) return None try : result_map = api_ruler . annotateText ( text , 'fries' ) except JavaException as e : logger . error ( 'Could not process text.' ) logger . error ( e ) return None # REACH version < 1.3.3 json_str = result_map . get ( 'resultJson' ) if not json_str : # REACH version >= 1.3.3 json_str = result_map . get ( 'result' ) if not isinstance ( json_str , bytes ) : json_str = json_str . encode ( 'utf-8' ) else : data = { 'text' : text . encode ( 'utf-8' ) } try : res = requests . post ( reach_text_url , data , timeout = timeout ) except requests . exceptions . RequestException as e : logger . error ( 'Could not connect to REACH service:' ) logger . error ( e ) return None # TODO: we could use res.json() here to get a dict # directly # This is a byte string json_str = res . content if not isinstance ( json_str , bytes ) : raise TypeError ( '{} is {} instead of {}' . format ( json_str , json_str . __class__ , bytes ) ) with open ( output_fname , 'wb' ) as fh : fh . write ( json_str ) return process_json_str ( json_str . decode ( 'utf-8' ) , citation )
Return a ReachProcessor by processing the given text .
441
11
17,624
def process_nxml_str ( nxml_str , citation = None , offline = False , output_fname = default_output_fname ) : if offline : if not try_offline : logger . error ( 'Offline reading is not available.' ) return None try : api_ruler = reach_reader . get_api_ruler ( ) except ReachOfflineReadingError as e : logger . error ( e ) logger . error ( 'Cannot read offline because the REACH ApiRuler ' 'could not be instantiated.' ) return None try : result_map = api_ruler . annotateNxml ( nxml_str , 'fries' ) except JavaException as e : logger . error ( 'Could not process NXML.' ) logger . error ( e ) return None # REACH version < 1.3.3 json_str = result_map . get ( 'resultJson' ) if not json_str : # REACH version >= 1.3.3 json_str = result_map . get ( 'result' ) if json_str is None : logger . warning ( 'No results retrieved' ) return None if isinstance ( json_str , bytes ) : json_str = json_str . decode ( 'utf-8' ) return process_json_str ( json_str , citation ) else : data = { 'nxml' : nxml_str } try : res = requests . post ( reach_nxml_url , data ) except requests . exceptions . RequestException as e : logger . error ( 'Could not connect to REACH service:' ) logger . error ( e ) return None if res . status_code != 200 : logger . error ( 'Could not process NXML via REACH service.' + 'Status code: %d' % res . status_code ) return None json_str = res . text with open ( output_fname , 'wb' ) as fh : fh . write ( json_str . encode ( 'utf-8' ) ) return process_json_str ( json_str , citation )
Return a ReachProcessor by processing the given NXML string .
445
13
17,625
def process_nxml_file ( file_name , citation = None , offline = False , output_fname = default_output_fname ) : with open ( file_name , 'rb' ) as f : nxml_str = f . read ( ) . decode ( 'utf-8' ) return process_nxml_str ( nxml_str , citation , False , output_fname )
Return a ReachProcessor by processing the given NXML file .
88
13
17,626
def process_json_file ( file_name , citation = None ) : try : with open ( file_name , 'rb' ) as fh : json_str = fh . read ( ) . decode ( 'utf-8' ) return process_json_str ( json_str , citation ) except IOError : logger . error ( 'Could not read file %s.' % file_name )
Return a ReachProcessor by processing the given REACH json file .
86
14
17,627
def process_json_str ( json_str , citation = None ) : if not isinstance ( json_str , basestring ) : raise TypeError ( '{} is {} instead of {}' . format ( json_str , json_str . __class__ , basestring ) ) json_str = json_str . replace ( 'frame-id' , 'frame_id' ) json_str = json_str . replace ( 'argument-label' , 'argument_label' ) json_str = json_str . replace ( 'object-meta' , 'object_meta' ) json_str = json_str . replace ( 'doc-id' , 'doc_id' ) json_str = json_str . replace ( 'is-hypothesis' , 'is_hypothesis' ) json_str = json_str . replace ( 'is-negated' , 'is_negated' ) json_str = json_str . replace ( 'is-direct' , 'is_direct' ) json_str = json_str . replace ( 'found-by' , 'found_by' ) try : json_dict = json . loads ( json_str ) except ValueError : logger . error ( 'Could not decode JSON string.' ) return None rp = ReachProcessor ( json_dict , citation ) rp . get_modifications ( ) rp . get_complexes ( ) rp . get_activation ( ) rp . get_translocation ( ) rp . get_regulate_amounts ( ) return rp
Return a ReachProcessor by processing the given REACH json string .
340
14
17,628
def make_parser ( ) : parser = ArgumentParser ( 'wait_for_complete.py' , usage = '%(prog)s [-h] queue_name [options]' , description = ( 'Wait for a set of batch jobs to complete, and monitor ' 'them as they run.' ) , epilog = ( 'Jobs can also be monitored, terminated, and otherwise ' 'managed on the AWS website. However this tool will also tag ' 'the instances, and should be run whenever a job is submitted ' 'to AWS.' ) ) parser . add_argument ( dest = 'queue_name' , help = ( 'The name of the queue to watch and wait for completion. If no ' 'jobs are specified, this will wait until all jobs in the queue ' 'are completed (either SUCCEEDED or FAILED).' ) ) parser . add_argument ( '--watch' , '-w' , dest = 'job_list' , metavar = 'JOB_ID' , nargs = '+' , help = ( 'Specify particular jobs using their job ids, as reported by ' 'the submit command. Many ids may be specified.' ) ) parser . add_argument ( '--prefix' , '-p' , dest = 'job_name_prefix' , help = 'Specify a prefix for the name of the jobs to watch and wait for.' ) parser . add_argument ( '--interval' , '-i' , dest = 'poll_interval' , default = 10 , type = int , help = ( 'The time interval to wait between job status checks, in ' 'seconds (default: %(default)d seconds).' ) ) parser . add_argument ( '--timeout' , '-T' , metavar = 'TIMEOUT' , type = int , help = ( 'If the logs are not updated for %(metavar)s seconds, ' 'print a warning. If `--kill_on_log_timeout` flag is set, then ' 'the offending jobs will be automatically terminated.' ) ) parser . add_argument ( '--kill_on_timeout' , '-K' , action = 'store_true' , help = 'If a log times out, terminate the offending job.' ) parser . add_argument ( '--stash_log_method' , '-l' , choices = [ 's3' , 'local' ] , metavar = 'METHOD' , help = ( 'Select a method from: [%(choices)s] to store the job logs. ' 'If no method is specified, the logs will not be ' 'loaded off of AWS. If \'s3\' is specified, then ' '`job_name_prefix` must also be given, as this will indicate ' 'where on s3 to store the logs.' ) ) return parser
Generate the parser for this script .
621
8
17,629
def id_lookup ( paper_id , idtype ) : if idtype not in ( 'pmid' , 'pmcid' , 'doi' ) : raise ValueError ( "Invalid idtype %s; must be 'pmid', 'pmcid', " "or 'doi'." % idtype ) ids = { 'doi' : None , 'pmid' : None , 'pmcid' : None } pmc_id_results = pmc_client . id_lookup ( paper_id , idtype ) # Start with the results of the PMC lookup and then override with the # provided ID ids [ 'pmid' ] = pmc_id_results . get ( 'pmid' ) ids [ 'pmcid' ] = pmc_id_results . get ( 'pmcid' ) ids [ 'doi' ] = pmc_id_results . get ( 'doi' ) ids [ idtype ] = paper_id # If we gave a DOI, then our work is done after looking for PMID and PMCID if idtype == 'doi' : return ids # If we gave a PMID or PMCID, we need to check to see if we got a DOI. # If we got a DOI back, we're done. elif ids . get ( 'doi' ) : return ids # If we get here, then we've given PMID or PMCID and don't have a DOI yet. # If we gave a PMCID and have neither a PMID nor a DOI, then we'll run # into problems later on when we try to the reverse lookup using CrossRef. # So we bail here and return what we have (PMCID only) with a warning. if ids . get ( 'pmcid' ) and ids . get ( 'doi' ) is None and ids . get ( 'pmid' ) is None : logger . warning ( '%s: PMCID without PMID or DOI' % ids . get ( 'pmcid' ) ) return ids # To clarify the state of things at this point: assert ids . get ( 'pmid' ) is not None assert ids . get ( 'doi' ) is None # As a last result, we try to get the DOI from CrossRef (which internally # tries to get the DOI from Pubmed in the process of collecting the # necessary metadata for the lookup): ids [ 'doi' ] = crossref_client . doi_query ( ids [ 'pmid' ] ) # It may still be None, but at this point there's nothing we can do... return ids
Take an ID of type PMID PMCID or DOI and lookup the other IDs .
572
18
17,630
def get_api_ruler ( self ) : if self . api_ruler is None : try : self . api_ruler = autoclass ( 'org.clulab.reach.export.apis.ApiRuler' ) except JavaException as e : raise ReachOfflineReadingError ( e ) return self . api_ruler
Return the existing reader if it exists or launch a new one .
75
13
17,631
def _download_biogrid_data ( url ) : res = requests . get ( biogrid_file_url ) if res . status_code != 200 : raise Exception ( 'Unable to download Biogrid data: status code %s' % res . status_code ) zip_bytes = BytesIO ( res . content ) zip_file = ZipFile ( zip_bytes ) zip_info_list = zip_file . infolist ( ) # There should be only one file in this zip archive if len ( zip_info_list ) != 1 : raise Exception ( 'There should be exactly zipfile in BioGrid zip ' 'archive: %s' % str ( zip_info_list ) ) unzipped_bytes = zip_file . read ( zip_info_list [ 0 ] ) # Unzip the file biogrid_str = StringIO ( unzipped_bytes . decode ( 'utf8' ) ) # Make file-like obj csv_reader = csv . reader ( biogrid_str , delimiter = '\t' ) # Get csv reader next ( csv_reader ) # Skip the header return csv_reader
Downloads zipped tab - separated Biogrid data in . tab2 format .
253
17
17,632
def _make_agent ( self , entrez_id , text_id ) : hgnc_name , db_refs = self . _make_db_refs ( entrez_id , text_id ) if hgnc_name is not None : name = hgnc_name elif text_id is not None : name = text_id # Handle case where the name is None else : return None return Agent ( name , db_refs = db_refs )
Make an Agent object appropriately grounded .
106
7
17,633
def _make_db_refs ( self , entrez_id , text_id ) : db_refs = { } if text_id != '-' and text_id is not None : db_refs [ 'TEXT' ] = text_id hgnc_id = hgnc_client . get_hgnc_from_entrez ( entrez_id ) hgnc_name = hgnc_client . get_hgnc_name ( hgnc_id ) if hgnc_id is not None : db_refs [ 'HGNC' ] = hgnc_id up_id = hgnc_client . get_uniprot_id ( hgnc_id ) if up_id is not None : db_refs [ 'UP' ] = up_id return ( hgnc_name , db_refs )
Looks up the HGNC ID and name as well as the Uniprot ID .
195
17
17,634
def make_model ( self , policies = None , initial_conditions = True , reverse_effects = False ) : self . processed_policies = self . process_policies ( policies ) ppa = PysbPreassembler ( self . statements ) ppa . replace_activities ( ) if reverse_effects : ppa . add_reverse_effects ( ) self . statements = ppa . statements # Set local policies for this make_model call that overwrite # the global policies of the Kami assembler if policies is not None : global_policies = self . policies if isinstance ( policies , basestring ) : local_policies = { 'other' : policies } else : local_policies = { 'other' : 'default' } local_policies . update ( policies ) self . policies = local_policies self . model = { } graphs = [ ] self . model [ 'graphs' ] = graphs self . model [ 'typing' ] = [ ] # Action graph generated here action_graph = { 'id' : 'action_graph' , 'attrs' : { 'name' : 'action_graph' } } action_graph [ 'graph' ] = { 'nodes' : [ ] , 'edges' : [ ] } graphs . append ( action_graph ) # Iterate over the statements to generate rules self . _assemble ( ) # Add initial conditions #if initial_conditions: # self.add_default_initial_conditions() # If local policies were applied, revert to the global one if policies is not None : self . policies = global_policies return self . model
Assemble the Kami model from the collected INDRA Statements .
360
13
17,635
def add_agent ( self , agent ) : agent_id = self . add_node ( agent . name ) self . add_typing ( agent_id , 'agent' ) # Handle bound conditions for bc in agent . bound_conditions : # Here we make the assumption that the binding site # is simply named after the binding partner if bc . is_bound : test_type = 'is_bnd' else : test_type = 'is_free' bound_name = bc . agent . name agent_bs = get_binding_site_name ( bc . agent ) test_name = '%s_bound_to_%s_test' % ( agent_id , bound_name ) agent_bs_id = self . add_node ( agent_bs ) test_id = self . add_node ( test_name ) self . add_edge ( agent_bs_id , agent_id ) self . add_edge ( agent_bs_id , test_id ) self . add_typing ( agent_bs_id , 'locus' ) self . add_typing ( test_id , test_type ) for mod in agent . mods : mod_site_str = abbrevs [ mod . mod_type ] if mod . residue is not None : mod_site_str = mod . residue mod_pos_str = mod . position if mod . position is not None else '' mod_site = ( '%s%s' % ( mod_site_str , mod_pos_str ) ) site_states = states [ mod . mod_type ] if mod . is_modified : val = site_states [ 1 ] else : val = site_states [ 0 ] mod_site_id = self . add_node ( mod_site , { 'val' : val } ) self . add_edge ( mod_site_id , agent_id ) self . add_typing ( mod_site_id , 'state' ) return agent_id
Add an INDRA Agent and its conditions to the Nugget .
427
13
17,636
def add_node ( self , name_base , attrs = None ) : if name_base not in self . counters : node_id = name_base else : node_id = '%s_%d' % ( name_base , self . counters [ name_base ] ) node = { 'id' : node_id } if attrs : node [ 'attrs' ] = attrs self . nodes . append ( node ) self . counters [ node_id ] += 1 return node_id
Add a node with a given base name to the Nugget and return ID .
109
16
17,637
def get_nugget_dict ( self ) : nugget_dict = { 'id' : self . id , 'graph' : { 'nodes' : self . nodes , 'edges' : self . edges } , 'attrs' : { 'name' : self . name , 'rate' : self . rate } } return nugget_dict
Return the Nugget as a dictionary .
80
8
17,638
def process_text ( text , pmid = None , python2_path = None ) : # Try to locate python2 in one of the directories of the PATH environment # variable if it is not provided if python2_path is None : for path in os . environ [ "PATH" ] . split ( os . pathsep ) : proposed_python2_path = os . path . join ( path , 'python2.7' ) if os . path . isfile ( proposed_python2_path ) : python2_path = proposed_python2_path print ( 'Found python 2 interpreter at' , python2_path ) break if python2_path is None : raise Exception ( 'Could not find python2 in the directories ' + 'listed in the PATH environment variable. ' + 'Need python2 to run TEES.' ) # Run TEES a1_text , a2_text , sentence_segmentations = run_on_text ( text , python2_path ) # Run the TEES processor tp = TEESProcessor ( a1_text , a2_text , sentence_segmentations , pmid ) return tp
Processes the specified plain text with TEES and converts output to supported INDRA statements . Check for the TEES installation is the TEES_PATH environment variable and configuration file ; if not found checks candidate paths in tees_candidate_paths . Raises an exception if TEES cannot be found in any of these places .
248
68
17,639
def _list_to_seq ( lst ) : ml = autoclass ( 'scala.collection.mutable.MutableList' ) ( ) for element in lst : ml . appendElem ( element ) return ml
Return a scala . collection . Seq from a Python list .
50
14
17,640
def process_text ( self , text , format = 'json' ) : if self . eidos_reader is None : self . initialize_reader ( ) default_arg = lambda x : autoclass ( 'scala.Some' ) ( x ) today = datetime . date . today ( ) . strftime ( "%Y-%m-%d" ) fname = 'default_file_name' annot_doc = self . eidos_reader . extractFromText ( text , True , # keep text False , # CAG-relevant only default_arg ( today ) , # doc creation time default_arg ( fname ) # file name ) if format == 'json' : mentions = annot_doc . odinMentions ( ) ser = autoclass ( eidos_package + '.serialization.json.WMJSONSerializer' ) mentions_json = ser . toJsonStr ( mentions ) elif format == 'json_ld' : # We need to get a Scala Seq of annot docs here ml = _list_to_seq ( [ annot_doc ] ) # We currently do not need toinstantiate the adjective grounder # if we want to reinstate it, we would need to do the following # ag = EidosAdjectiveGrounder.fromConfig( # EidosSystem.defaultConfig.getConfig("adjectiveGrounder")) # We now create a JSON-LD corpus jc = autoclass ( eidos_package + '.serialization.json.JLDCorpus' ) corpus = jc ( ml ) # Finally, serialize the corpus into JSON string mentions_json = corpus . toJsonStr ( ) json_dict = json . loads ( mentions_json ) return json_dict
Return a mentions JSON object given text .
378
8
17,641
def process_text ( text , out_format = 'json_ld' , save_json = 'eidos_output.json' , webservice = None ) : if not webservice : if eidos_reader is None : logger . error ( 'Eidos reader is not available.' ) return None json_dict = eidos_reader . process_text ( text , out_format ) else : res = requests . post ( '%s/process_text' % webservice , json = { 'text' : text } ) json_dict = res . json ( ) if save_json : with open ( save_json , 'wt' ) as fh : json . dump ( json_dict , fh , indent = 2 ) return process_json ( json_dict )
Return an EidosProcessor by processing the given text .
173
12
17,642
def process_json_file ( file_name ) : try : with open ( file_name , 'rb' ) as fh : json_str = fh . read ( ) . decode ( 'utf-8' ) return process_json_str ( json_str ) except IOError : logger . exception ( 'Could not read file %s.' % file_name )
Return an EidosProcessor by processing the given Eidos JSON - LD file .
80
17
17,643
def process_json ( json_dict ) : ep = EidosProcessor ( json_dict ) ep . extract_causal_relations ( ) ep . extract_correlations ( ) ep . extract_events ( ) return ep
Return an EidosProcessor by processing a Eidos JSON - LD dict .
48
16
17,644
def get_drug_inhibition_stmts ( drug ) : chebi_id = drug . db_refs . get ( 'CHEBI' ) mesh_id = drug . db_refs . get ( 'MESH' ) if chebi_id : drug_chembl_id = chebi_client . get_chembl_id ( chebi_id ) elif mesh_id : drug_chembl_id = get_chembl_id ( mesh_id ) else : logger . error ( 'Drug missing ChEBI or MESH grounding.' ) return None logger . info ( 'Drug: %s' % ( drug_chembl_id ) ) query_dict = { 'query' : 'activity' , 'params' : { 'molecule_chembl_id' : drug_chembl_id , 'limit' : 10000 } } res = send_query ( query_dict ) activities = res [ 'activities' ] targ_act_dict = activities_by_target ( activities ) target_chembl_ids = [ x for x in targ_act_dict ] protein_targets = get_protein_targets_only ( target_chembl_ids ) filtered_targ_act_dict = { t : targ_act_dict [ t ] for t in [ x for x in protein_targets ] } stmts = [ ] for target_chembl_id in filtered_targ_act_dict : target_activity_ids = filtered_targ_act_dict [ target_chembl_id ] target_activites = [ x for x in activities if x [ 'activity_id' ] in target_activity_ids ] target_upids = [ ] targ_comp = protein_targets [ target_chembl_id ] [ 'target_components' ] for t_c in targ_comp : target_upids . append ( t_c [ 'accession' ] ) evidence = [ ] for assay in target_activites : ev = get_evidence ( assay ) if not ev : continue evidence . append ( ev ) if len ( evidence ) > 0 : for target_upid in target_upids : agent_name = uniprot_client . get_gene_name ( target_upid ) target_agent = Agent ( agent_name , db_refs = { 'UP' : target_upid } ) st = Inhibition ( drug , target_agent , evidence = evidence ) stmts . append ( st ) return stmts
Query ChEMBL for kinetics data given drug as Agent get back statements
563
15
17,645
def send_query ( query_dict ) : query = query_dict [ 'query' ] params = query_dict [ 'params' ] url = 'https://www.ebi.ac.uk/chembl/api/data/' + query + '.json' r = requests . get ( url , params = params ) r . raise_for_status ( ) js = r . json ( ) return js
Query ChEMBL API
89
5
17,646
def query_target ( target_chembl_id ) : query_dict = { 'query' : 'target' , 'params' : { 'target_chembl_id' : target_chembl_id , 'limit' : 1 } } res = send_query ( query_dict ) target = res [ 'targets' ] [ 0 ] return target
Query ChEMBL API target by id
82
8
17,647
def activities_by_target ( activities ) : targ_act_dict = defaultdict ( lambda : [ ] ) for activity in activities : target_chembl_id = activity [ 'target_chembl_id' ] activity_id = activity [ 'activity_id' ] targ_act_dict [ target_chembl_id ] . append ( activity_id ) for target_chembl_id in targ_act_dict : targ_act_dict [ target_chembl_id ] = list ( set ( targ_act_dict [ target_chembl_id ] ) ) return targ_act_dict
Get back lists of activities in a dict keyed by ChEMBL target id
138
16
17,648
def get_protein_targets_only ( target_chembl_ids ) : protein_targets = { } for target_chembl_id in target_chembl_ids : target = query_target ( target_chembl_id ) if 'SINGLE PROTEIN' in target [ 'target_type' ] : protein_targets [ target_chembl_id ] = target return protein_targets
Given list of ChEMBL target ids return dict of SINGLE PROTEIN targets
100
19
17,649
def get_evidence ( assay ) : kin = get_kinetics ( assay ) source_id = assay . get ( 'assay_chembl_id' ) if not kin : return None annotations = { 'kinetics' : kin } chembl_doc_id = str ( assay . get ( 'document_chembl_id' ) ) pmid = get_pmid ( chembl_doc_id ) ev = Evidence ( source_api = 'chembl' , pmid = pmid , source_id = source_id , annotations = annotations ) return ev
Given an activity return an INDRA Evidence object .
127
10
17,650
def get_kinetics ( assay ) : try : val = float ( assay . get ( 'standard_value' ) ) except TypeError : logger . warning ( 'Invalid assay value: %s' % assay . get ( 'standard_value' ) ) return None unit = assay . get ( 'standard_units' ) if unit == 'nM' : unit_sym = 1e-9 * units . mol / units . liter elif unit == 'uM' : unit_sym = 1e-6 * units . mol / units . liter else : logger . warning ( 'Unhandled unit: %s' % unit ) return None param_type = assay . get ( 'standard_type' ) if param_type not in [ 'IC50' , 'EC50' , 'INH' , 'Potency' , 'Kd' ] : logger . warning ( 'Unhandled parameter type: %s' % param_type ) logger . info ( str ( assay ) ) return None kin = { param_type : val * unit_sym } return kin
Given an activity return its kinetics values .
227
9
17,651
def get_pmid ( doc_id ) : url_pmid = 'https://www.ebi.ac.uk/chembl/api/data/document.json' params = { 'document_chembl_id' : doc_id } res = requests . get ( url_pmid , params = params ) js = res . json ( ) pmid = str ( js [ 'documents' ] [ 0 ] [ 'pubmed_id' ] ) return pmid
Get PMID from document_chembl_id
106
11
17,652
def get_target_chemblid ( target_upid ) : url = 'https://www.ebi.ac.uk/chembl/api/data/target.json' params = { 'target_components__accession' : target_upid } r = requests . get ( url , params = params ) r . raise_for_status ( ) js = r . json ( ) target_chemblid = js [ 'targets' ] [ 0 ] [ 'target_chembl_id' ] return target_chemblid
Get ChEMBL ID from UniProt upid
122
10
17,653
def get_mesh_id ( nlm_mesh ) : url_nlm2mesh = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi' params = { 'db' : 'mesh' , 'term' : nlm_mesh , 'retmode' : 'JSON' } r = requests . get ( url_nlm2mesh , params = params ) res = r . json ( ) mesh_id = res [ 'esearchresult' ] [ 'idlist' ] [ 0 ] return mesh_id
Get MESH ID from NLM MESH
134
9
17,654
def get_pcid ( mesh_id ) : url_mesh2pcid = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi' params = { 'dbfrom' : 'mesh' , 'id' : mesh_id , 'db' : 'pccompound' , 'retmode' : 'JSON' } r = requests . get ( url_mesh2pcid , params = params ) res = r . json ( ) pcid = res [ 'linksets' ] [ 0 ] [ 'linksetdbs' ] [ 0 ] [ 'links' ] [ 0 ] return pcid
Get PC ID from MESH ID
148
7
17,655
def get_chembl_id ( nlm_mesh ) : mesh_id = get_mesh_id ( nlm_mesh ) pcid = get_pcid ( mesh_id ) url_mesh2pcid = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/' + 'cid/%s/synonyms/JSON' % pcid r = requests . get ( url_mesh2pcid ) res = r . json ( ) synonyms = res [ 'InformationList' ] [ 'Information' ] [ 0 ] [ 'Synonym' ] chembl_id = [ syn for syn in synonyms if 'CHEMBL' in syn and 'SCHEMBL' not in syn ] [ 0 ] return chembl_id
Get ChEMBL ID from NLM MESH
182
10
17,656
def get_sentences ( self , root_element , block_tags ) : sentences = [ ] for element in root_element : if not self . any_ends_with ( block_tags , element . tag ) : # tag not in block_tags if element . text is not None and not re . match ( '^\s*$' , element . text ) : sentences . extend ( self . sentence_tokenize ( element . text ) ) sentences . extend ( self . get_sentences ( element , block_tags ) ) f = open ( 'sentence_debug.txt' , 'w' ) for s in sentences : f . write ( s . lower ( ) + '\n' ) f . close ( ) return sentences
Returns a list of plain - text sentences by iterating through XML tags except for those listed in block_tags .
158
23
17,657
def any_ends_with ( self , string_list , pattern ) : try : s_base = basestring except : s_base = str is_string = isinstance ( pattern , s_base ) if not is_string : return False for s in string_list : if pattern . endswith ( s ) : return True return False
Returns true iff one of the strings in string_list ends in pattern .
74
16
17,658
def get_tag_names ( self ) : root = etree . fromstring ( self . xml_full_text . encode ( 'utf-8' ) ) return self . get_children_tag_names ( root )
Returns the set of tag names present in the XML .
48
11
17,659
def get_children_tag_names ( self , xml_element ) : tags = set ( ) tags . add ( self . remove_namespace_from_tag ( xml_element . tag ) ) for element in xml_element . iter ( tag = etree . Element ) : if element != xml_element : new_tags = self . get_children_tag_names ( element ) if new_tags is not None : tags . update ( new_tags ) return tags
Returns all tag names of xml element and its children .
101
11
17,660
def string_matches_sans_whitespace ( self , str1 , str2_fuzzy_whitespace ) : str2_fuzzy_whitespace = re . sub ( '\s+' , '\s*' , str2_fuzzy_whitespace ) return re . search ( str2_fuzzy_whitespace , str1 ) is not None
Check if two strings match modulo their whitespace .
90
11
17,661
def sentence_matches ( self , sentence_text ) : has_upstream = False has_downstream = False has_verb = False # Get the first word of the action type and assume this is the verb # (Ex. get depends for depends on) actiontype_words = word_tokenize ( self . mention . actiontype ) actiontype_verb_stemmed = stem ( actiontype_words [ 0 ] ) words = word_tokenize ( sentence_text ) if self . string_matches_sans_whitespace ( sentence_text . lower ( ) , self . mention . upstream . lower ( ) ) : has_upstream = True if self . string_matches_sans_whitespace ( sentence_text . lower ( ) , self . mention . downstream . lower ( ) ) : has_downstream = True for word in words : if actiontype_verb_stemmed == stem ( word ) : has_verb = True return has_upstream and has_downstream and has_verb
Returns true iff the sentence contains this mention s upstream and downstream participants and if one of the stemmed verbs in the sentence is the same as the stemmed action type .
219
33
17,662
def dump_statements ( stmts , fname , protocol = 4 ) : logger . info ( 'Dumping %d statements into %s...' % ( len ( stmts ) , fname ) ) with open ( fname , 'wb' ) as fh : pickle . dump ( stmts , fh , protocol = protocol )
Dump a list of statements into a pickle file .
76
12
17,663
def load_statements ( fname , as_dict = False ) : logger . info ( 'Loading %s...' % fname ) with open ( fname , 'rb' ) as fh : # Encoding argument not available in pickle for Python 2 if sys . version_info [ 0 ] < 3 : stmts = pickle . load ( fh ) # Encoding argument specified here to enable compatibility with # pickle files created with Python 2 else : stmts = pickle . load ( fh , encoding = 'latin1' ) if isinstance ( stmts , dict ) : if as_dict : return stmts st = [ ] for pmid , st_list in stmts . items ( ) : st += st_list stmts = st logger . info ( 'Loaded %d statements' % len ( stmts ) ) return stmts
Load statements from a pickle file .
193
8
17,664
def map_grounding ( stmts_in , * * kwargs ) : from indra . preassembler . grounding_mapper import GroundingMapper from indra . preassembler . grounding_mapper import gm as grounding_map from indra . preassembler . grounding_mapper import default_agent_map as agent_map logger . info ( 'Mapping grounding on %d statements...' % len ( stmts_in ) ) do_rename = kwargs . get ( 'do_rename' ) gm = kwargs . get ( 'grounding_map' , grounding_map ) if do_rename is None : do_rename = True gm = GroundingMapper ( gm , agent_map , use_deft = kwargs . get ( 'use_deft' , True ) ) stmts_out = gm . map_agents ( stmts_in , do_rename = do_rename ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Map grounding using the GroundingMapper .
261
9
17,665
def merge_groundings ( stmts_in ) : def surface_grounding ( stmt ) : # Find the "best" grounding for a given concept and its evidences # and surface that for idx , concept in enumerate ( stmt . agent_list ( ) ) : if concept is None : continue aggregate_groundings = { } for ev in stmt . evidence : if 'agents' in ev . annotations : groundings = ev . annotations [ 'agents' ] [ 'raw_grounding' ] [ idx ] for ns , value in groundings . items ( ) : if ns not in aggregate_groundings : aggregate_groundings [ ns ] = [ ] if isinstance ( value , list ) : aggregate_groundings [ ns ] += value else : aggregate_groundings [ ns ] . append ( value ) best_groundings = get_best_groundings ( aggregate_groundings ) concept . db_refs = best_groundings def get_best_groundings ( aggregate_groundings ) : best_groundings = { } for ns , values in aggregate_groundings . items ( ) : # There are 3 possibilities here # 1. All the entries in the list are scored in which case we # get unique entries and sort them by score if all ( [ isinstance ( v , ( tuple , list ) ) for v in values ] ) : best_groundings [ ns ] = [ ] for unique_value in { v [ 0 ] for v in values } : scores = [ v [ 1 ] for v in values if v [ 0 ] == unique_value ] best_groundings [ ns ] . append ( ( unique_value , max ( scores ) ) ) best_groundings [ ns ] = sorted ( best_groundings [ ns ] , key = lambda x : x [ 1 ] , reverse = True ) # 2. All the entries in the list are unscored in which case we # get the highest frequency entry elif all ( [ not isinstance ( v , ( tuple , list ) ) for v in values ] ) : best_groundings [ ns ] = max ( set ( values ) , key = values . count ) # 3. There is a mixture, which can happen when some entries were # mapped with scores and others had no scores to begin with. # In this case, we again pick the highest frequency non-scored # entry assuming that the unmapped version is more reliable. else : unscored_vals = [ v for v in values if not isinstance ( v , ( tuple , list ) ) ] best_groundings [ ns ] = max ( set ( unscored_vals ) , key = unscored_vals . count ) return best_groundings stmts_out = [ ] for stmt in stmts_in : if not isinstance ( stmt , ( Complex , Conversion ) ) : surface_grounding ( stmt ) stmts_out . append ( stmt ) return stmts_out
Gather and merge original grounding information from evidences .
631
11
17,666
def merge_deltas ( stmts_in ) : stmts_out = [ ] for stmt in stmts_in : # This operation is only applicable to Influences if not isinstance ( stmt , Influence ) : stmts_out . append ( stmt ) continue # At this point this is guaranteed to be an Influence deltas = { } for role in ( 'subj' , 'obj' ) : for info in ( 'polarity' , 'adjectives' ) : key = ( role , info ) deltas [ key ] = [ ] for ev in stmt . evidence : entry = ev . annotations . get ( '%s_%s' % key ) deltas [ key ] . append ( entry if entry else None ) # POLARITY # For polarity we need to work in pairs polarity_pairs = list ( zip ( deltas [ ( 'subj' , 'polarity' ) ] , deltas [ ( 'obj' , 'polarity' ) ] ) ) # If we have some fully defined pairs, we take the most common one both_pols = [ pair for pair in polarity_pairs if pair [ 0 ] is not None and pair [ 1 ] is not None ] if both_pols : subj_pol , obj_pol = max ( set ( both_pols ) , key = both_pols . count ) stmt . subj . delta [ 'polarity' ] = subj_pol stmt . obj . delta [ 'polarity' ] = obj_pol # Otherwise we prefer the case when at least one entry of the # pair is given else : one_pol = [ pair for pair in polarity_pairs if pair [ 0 ] is not None or pair [ 1 ] is not None ] if one_pol : subj_pol , obj_pol = max ( set ( one_pol ) , key = one_pol . count ) stmt . subj . delta [ 'polarity' ] = subj_pol stmt . obj . delta [ 'polarity' ] = obj_pol # ADJECTIVES for attr , role in ( ( stmt . subj . delta , 'subj' ) , ( stmt . obj . delta , 'obj' ) ) : all_adjectives = [ ] for adj in deltas [ ( role , 'adjectives' ) ] : if isinstance ( adj , list ) : all_adjectives += adj elif adj is not None : all_adjectives . append ( adj ) attr [ 'adjectives' ] = all_adjectives stmts_out . append ( stmt ) return stmts_out
Gather and merge original Influence delta information from evidence .
594
11
17,667
def map_sequence ( stmts_in , * * kwargs ) : from indra . preassembler . sitemapper import SiteMapper , default_site_map logger . info ( 'Mapping sites on %d statements...' % len ( stmts_in ) ) kwarg_list = [ 'do_methionine_offset' , 'do_orthology_mapping' , 'do_isoform_mapping' ] sm = SiteMapper ( default_site_map , use_cache = kwargs . pop ( 'use_cache' , False ) , * * _filter ( kwargs , kwarg_list ) ) valid , mapped = sm . map_sites ( stmts_in ) correctly_mapped_stmts = [ ] for ms in mapped : correctly_mapped = all ( [ mm . has_mapping ( ) for mm in ms . mapped_mods ] ) if correctly_mapped : correctly_mapped_stmts . append ( ms . mapped_stmt ) stmts_out = valid + correctly_mapped_stmts logger . info ( '%d statements with valid sites' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) del sm return stmts_out
Map sequences using the SiteMapper .
313
8
17,668
def run_preassembly ( stmts_in , * * kwargs ) : dump_pkl_unique = kwargs . get ( 'save_unique' ) belief_scorer = kwargs . get ( 'belief_scorer' ) use_hierarchies = kwargs [ 'hierarchies' ] if 'hierarchies' in kwargs else hierarchies be = BeliefEngine ( scorer = belief_scorer ) pa = Preassembler ( hierarchies , stmts_in ) run_preassembly_duplicate ( pa , be , save = dump_pkl_unique ) dump_pkl = kwargs . get ( 'save' ) return_toplevel = kwargs . get ( 'return_toplevel' , True ) poolsize = kwargs . get ( 'poolsize' , None ) size_cutoff = kwargs . get ( 'size_cutoff' , 100 ) options = { 'save' : dump_pkl , 'return_toplevel' : return_toplevel , 'poolsize' : poolsize , 'size_cutoff' : size_cutoff , 'flatten_evidence' : kwargs . get ( 'flatten_evidence' , False ) , 'flatten_evidence_collect_from' : kwargs . get ( 'flatten_evidence_collect_from' , 'supported_by' ) } stmts_out = run_preassembly_related ( pa , be , * * options ) return stmts_out
Run preassembly on a list of statements .
342
9
17,669
def run_preassembly_duplicate ( preassembler , beliefengine , * * kwargs ) : logger . info ( 'Combining duplicates on %d statements...' % len ( preassembler . stmts ) ) dump_pkl = kwargs . get ( 'save' ) stmts_out = preassembler . combine_duplicates ( ) beliefengine . set_prior_probs ( stmts_out ) logger . info ( '%d unique statements' % len ( stmts_out ) ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Run deduplication stage of preassembly on a list of statements .
148
15
17,670
def run_preassembly_related ( preassembler , beliefengine , * * kwargs ) : logger . info ( 'Combining related on %d statements...' % len ( preassembler . unique_stmts ) ) return_toplevel = kwargs . get ( 'return_toplevel' , True ) poolsize = kwargs . get ( 'poolsize' , None ) size_cutoff = kwargs . get ( 'size_cutoff' , 100 ) stmts_out = preassembler . combine_related ( return_toplevel = False , poolsize = poolsize , size_cutoff = size_cutoff ) # Calculate beliefs beliefengine . set_hierarchy_probs ( stmts_out ) # Flatten evidence if needed do_flatten_evidence = kwargs . get ( 'flatten_evidence' , False ) if do_flatten_evidence : flatten_evidences_collect_from = kwargs . get ( 'flatten_evidence_collect_from' , 'supported_by' ) stmts_out = flatten_evidence ( stmts_out , flatten_evidences_collect_from ) # Filter to top if needed stmts_top = filter_top_level ( stmts_out ) if return_toplevel : stmts_out = stmts_top logger . info ( '%d top-level statements' % len ( stmts_out ) ) else : logger . info ( '%d statements out of which %d are top-level' % ( len ( stmts_out ) , len ( stmts_top ) ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Run related stage of preassembly on a list of statements .
412
12
17,671
def filter_by_type ( stmts_in , stmt_type , * * kwargs ) : invert = kwargs . get ( 'invert' , False ) logger . info ( 'Filtering %d statements for type %s%s...' % ( len ( stmts_in ) , 'not ' if invert else '' , stmt_type . __name__ ) ) if not invert : stmts_out = [ st for st in stmts_in if isinstance ( st , stmt_type ) ] else : stmts_out = [ st for st in stmts_in if not isinstance ( st , stmt_type ) ] logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter to a given statement type .
218
7
17,672
def _remove_bound_conditions ( agent , keep_criterion ) : new_bc = [ ] for ind in range ( len ( agent . bound_conditions ) ) : if keep_criterion ( agent . bound_conditions [ ind ] . agent ) : new_bc . append ( agent . bound_conditions [ ind ] ) agent . bound_conditions = new_bc
Removes bound conditions of agent such that keep_criterion is False .
84
15
17,673
def _any_bound_condition_fails_criterion ( agent , criterion ) : bc_agents = [ bc . agent for bc in agent . bound_conditions ] for b in bc_agents : if not criterion ( b ) : return True return False
Returns True if any bound condition fails to meet the specified criterion .
55
13
17,674
def filter_grounded_only ( stmts_in , * * kwargs ) : remove_bound = kwargs . get ( 'remove_bound' , False ) logger . info ( 'Filtering %d statements for grounded agents...' % len ( stmts_in ) ) stmts_out = [ ] score_threshold = kwargs . get ( 'score_threshold' ) for st in stmts_in : grounded = True for agent in st . agent_list ( ) : if agent is not None : criterion = lambda x : _agent_is_grounded ( x , score_threshold ) if not criterion ( agent ) : grounded = False break if not isinstance ( agent , Agent ) : continue if remove_bound : _remove_bound_conditions ( agent , criterion ) elif _any_bound_condition_fails_criterion ( agent , criterion ) : grounded = False break if grounded : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter to statements that have grounded agents .
280
8
17,675
def _agent_is_gene ( agent , specific_only ) : if not specific_only : if not ( agent . db_refs . get ( 'HGNC' ) or agent . db_refs . get ( 'UP' ) or agent . db_refs . get ( 'FPLX' ) ) : return False else : if not ( agent . db_refs . get ( 'HGNC' ) or agent . db_refs . get ( 'UP' ) ) : return False return True
Returns whether an agent is for a gene .
114
9
17,676
def filter_genes_only ( stmts_in , * * kwargs ) : remove_bound = 'remove_bound' in kwargs and kwargs [ 'remove_bound' ] specific_only = kwargs . get ( 'specific_only' ) logger . info ( 'Filtering %d statements for ones containing genes only...' % len ( stmts_in ) ) stmts_out = [ ] for st in stmts_in : genes_only = True for agent in st . agent_list ( ) : if agent is not None : criterion = lambda a : _agent_is_gene ( a , specific_only ) if not criterion ( agent ) : genes_only = False break if remove_bound : _remove_bound_conditions ( agent , criterion ) else : if _any_bound_condition_fails_criterion ( agent , criterion ) : genes_only = False break if genes_only : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter to statements containing genes only .
283
7
17,677
def filter_belief ( stmts_in , belief_cutoff , * * kwargs ) : dump_pkl = kwargs . get ( 'save' ) logger . info ( 'Filtering %d statements to above %f belief' % ( len ( stmts_in ) , belief_cutoff ) ) # The first round of filtering is in the top-level list stmts_out = [ ] # Now we eliminate supports/supported-by for stmt in stmts_in : if stmt . belief < belief_cutoff : continue stmts_out . append ( stmt ) supp_by = [ ] supp = [ ] for st in stmt . supports : if st . belief >= belief_cutoff : supp . append ( st ) for st in stmt . supported_by : if st . belief >= belief_cutoff : supp_by . append ( st ) stmt . supports = supp stmt . supported_by = supp_by logger . info ( '%d statements after filter...' % len ( stmts_out ) ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter to statements with belief above a given cutoff .
263
10
17,678
def filter_gene_list ( stmts_in , gene_list , policy , allow_families = False , * * kwargs ) : invert = kwargs . get ( 'invert' , False ) remove_bound = kwargs . get ( 'remove_bound' , False ) if policy not in ( 'one' , 'all' ) : logger . error ( 'Policy %s is invalid, not applying filter.' % policy ) else : genes_str = ', ' . join ( gene_list ) inv_str = 'not ' if invert else '' logger . info ( ( 'Filtering %d statements for ones %scontaining "%s" of: ' '%s...' ) % ( len ( stmts_in ) , inv_str , policy , genes_str ) ) # If we're allowing families, make a list of all FamPlex IDs that # contain members of the gene list, and add them to the filter list filter_list = copy ( gene_list ) if allow_families : for hgnc_name in gene_list : gene_uri = hierarchies [ 'entity' ] . get_uri ( 'HGNC' , hgnc_name ) parents = hierarchies [ 'entity' ] . get_parents ( gene_uri ) for par_uri in parents : ns , id = hierarchies [ 'entity' ] . ns_id_from_uri ( par_uri ) filter_list . append ( id ) stmts_out = [ ] if remove_bound : # If requested, remove agents whose names are not in the list from # all bound conditions if not invert : keep_criterion = lambda a : a . name in filter_list else : keep_criterion = lambda a : a . name not in filter_list for st in stmts_in : for agent in st . agent_list ( ) : _remove_bound_conditions ( agent , keep_criterion ) if policy == 'one' : for st in stmts_in : found_gene = False if not remove_bound : agent_list = st . agent_list_with_bound_condition_agents ( ) else : agent_list = st . agent_list ( ) for agent in agent_list : if agent is not None : if agent . name in filter_list : found_gene = True break if ( found_gene and not invert ) or ( not found_gene and invert ) : stmts_out . append ( st ) elif policy == 'all' : for st in stmts_in : found_genes = True if not remove_bound : agent_list = st . agent_list_with_bound_condition_agents ( ) else : agent_list = st . agent_list ( ) for agent in agent_list : if agent is not None : if agent . name not in filter_list : found_genes = False break if ( found_genes and not invert ) or ( not found_genes and invert ) : stmts_out . append ( st ) else : stmts_out = stmts_in logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Return statements that contain genes given in a list .
749
10
17,679
def filter_by_db_refs ( stmts_in , namespace , values , policy , * * kwargs ) : invert = kwargs . get ( 'invert' , False ) match_suffix = kwargs . get ( 'match_suffix' , False ) if policy not in ( 'one' , 'all' ) : logger . error ( 'Policy %s is invalid, not applying filter.' % policy ) return else : name_str = ', ' . join ( values ) rev_mod = 'not ' if invert else '' logger . info ( ( 'Filtering %d statements for those with %s agents %s' 'grounded to: %s in the %s namespace...' ) % ( len ( stmts_in ) , policy , rev_mod , name_str , namespace ) ) def meets_criterion ( agent ) : if namespace not in agent . db_refs : return False entry = agent . db_refs [ namespace ] if isinstance ( entry , list ) : entry = entry [ 0 ] [ 0 ] ret = False # Match suffix or entire entry if match_suffix : if any ( [ entry . endswith ( e ) for e in values ] ) : ret = True else : if entry in values : ret = True # Invert if needed if invert : return not ret else : return ret enough = all if policy == 'all' else any stmts_out = [ s for s in stmts_in if enough ( [ meets_criterion ( ag ) for ag in s . agent_list ( ) if ag is not None ] ) ] logger . info ( '%d Statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter to Statements whose agents are grounded to a matching entry .
417
12
17,680
def filter_human_only ( stmts_in , * * kwargs ) : from indra . databases import uniprot_client if 'remove_bound' in kwargs and kwargs [ 'remove_bound' ] : remove_bound = True else : remove_bound = False dump_pkl = kwargs . get ( 'save' ) logger . info ( 'Filtering %d statements for human genes only...' % len ( stmts_in ) ) stmts_out = [ ] def criterion ( agent ) : upid = agent . db_refs . get ( 'UP' ) if upid and not uniprot_client . is_human ( upid ) : return False else : return True for st in stmts_in : human_genes = True for agent in st . agent_list ( ) : if agent is not None : if not criterion ( agent ) : human_genes = False break if remove_bound : _remove_bound_conditions ( agent , criterion ) elif _any_bound_condition_fails_criterion ( agent , criterion ) : human_genes = False break if human_genes : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter out statements that are grounded but not to a human gene .
317
13
17,681
def filter_direct ( stmts_in , * * kwargs ) : def get_is_direct ( stmt ) : """Returns true if there is evidence that the statement is a direct interaction. If any of the evidences associated with the statement indicates a direct interatcion then we assume the interaction is direct. If there is no evidence for the interaction being indirect then we default to direct. """ any_indirect = False for ev in stmt . evidence : if ev . epistemics . get ( 'direct' ) is True : return True elif ev . epistemics . get ( 'direct' ) is False : # This guarantees that we have seen at least # some evidence that the statement is indirect any_indirect = True if any_indirect : return False return True logger . info ( 'Filtering %d statements to direct ones...' % len ( stmts_in ) ) stmts_out = [ ] for st in stmts_in : if get_is_direct ( st ) : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter to statements that are direct interactions
295
7
17,682
def filter_no_hypothesis ( stmts_in , * * kwargs ) : logger . info ( 'Filtering %d statements to no hypothesis...' % len ( stmts_in ) ) stmts_out = [ ] for st in stmts_in : all_hypotheses = True ev = None for ev in st . evidence : if not ev . epistemics . get ( 'hypothesis' , False ) : all_hypotheses = False break if ev is None : all_hypotheses = False if not all_hypotheses : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter to statements that are not marked as hypothesis in epistemics .
203
14
17,683
def filter_evidence_source ( stmts_in , source_apis , policy = 'one' , * * kwargs ) : logger . info ( 'Filtering %d statements to evidence source "%s" of: %s...' % ( len ( stmts_in ) , policy , ', ' . join ( source_apis ) ) ) stmts_out = [ ] for st in stmts_in : sources = set ( [ ev . source_api for ev in st . evidence ] ) if policy == 'one' : if sources . intersection ( source_apis ) : stmts_out . append ( st ) if policy == 'all' : if sources . intersection ( source_apis ) == set ( source_apis ) : stmts_out . append ( st ) if policy == 'none' : if not sources . intersection ( source_apis ) : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter to statements that have evidence from a given set of sources .
271
13
17,684
def filter_top_level ( stmts_in , * * kwargs ) : logger . info ( 'Filtering %d statements for top-level...' % len ( stmts_in ) ) stmts_out = [ st for st in stmts_in if not st . supports ] logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter to statements that are at the top - level of the hierarchy .
133
14
17,685
def filter_inconsequential_mods ( stmts_in , whitelist = None , * * kwargs ) : if whitelist is None : whitelist = { } logger . info ( 'Filtering %d statements to remove' % len ( stmts_in ) + ' inconsequential modifications...' ) states_used = whitelist for stmt in stmts_in : for agent in stmt . agent_list ( ) : if agent is not None : if agent . mods : for mc in agent . mods : mod = ( mc . mod_type , mc . residue , mc . position ) try : states_used [ agent . name ] . append ( mod ) except KeyError : states_used [ agent . name ] = [ mod ] for k , v in states_used . items ( ) : states_used [ k ] = list ( set ( v ) ) stmts_out = [ ] for stmt in stmts_in : skip = False if isinstance ( stmt , Modification ) : mod_type = modclass_to_modtype [ stmt . __class__ ] if isinstance ( stmt , RemoveModification ) : mod_type = modtype_to_inverse [ mod_type ] mod = ( mod_type , stmt . residue , stmt . position ) used = states_used . get ( stmt . sub . name , [ ] ) if mod not in used : skip = True if not skip : stmts_out . append ( stmt ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter out Modifications that modify inconsequential sites
394
10
17,686
def filter_inconsequential_acts ( stmts_in , whitelist = None , * * kwargs ) : if whitelist is None : whitelist = { } logger . info ( 'Filtering %d statements to remove' % len ( stmts_in ) + ' inconsequential activations...' ) states_used = whitelist for stmt in stmts_in : for agent in stmt . agent_list ( ) : if agent is not None : if agent . activity : act = agent . activity . activity_type try : states_used [ agent . name ] . append ( act ) except KeyError : states_used [ agent . name ] = [ act ] for k , v in states_used . items ( ) : states_used [ k ] = list ( set ( v ) ) stmts_out = [ ] for stmt in stmts_in : skip = False if isinstance ( stmt , RegulateActivity ) : used = states_used . get ( stmt . obj . name , [ ] ) if stmt . obj_activity not in used : skip = True if not skip : stmts_out . append ( stmt ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter out Activations that modify inconsequential activities
322
10
17,687
def filter_enzyme_kinase ( stmts_in , * * kwargs ) : logger . info ( 'Filtering %d statements to remove ' % len ( stmts_in ) + 'phosphorylation by non-kinases...' ) path = os . path . dirname ( os . path . abspath ( __file__ ) ) kinase_table = read_unicode_csv ( path + '/../resources/kinases.tsv' , delimiter = '\t' ) gene_names = [ lin [ 1 ] for lin in list ( kinase_table ) [ 1 : ] ] stmts_out = [ ] for st in stmts_in : if isinstance ( st , Phosphorylation ) : if st . enz is not None : if st . enz . name in gene_names : stmts_out . append ( st ) else : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter Phosphorylations to ones where the enzyme is a known kinase .
271
16
17,688
def filter_transcription_factor ( stmts_in , * * kwargs ) : logger . info ( 'Filtering %d statements to remove ' % len ( stmts_in ) + 'amount regulations by non-transcription-factors...' ) path = os . path . dirname ( os . path . abspath ( __file__ ) ) tf_table = read_unicode_csv ( path + '/../resources/transcription_factors.csv' ) gene_names = [ lin [ 1 ] for lin in list ( tf_table ) [ 1 : ] ] stmts_out = [ ] for st in stmts_in : if isinstance ( st , RegulateAmount ) : if st . subj is not None : if st . subj . name in gene_names : stmts_out . append ( st ) else : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter out RegulateAmounts where subject is not a transcription factor .
264
14
17,689
def filter_uuid_list ( stmts_in , uuids , * * kwargs ) : invert = kwargs . get ( 'invert' , False ) logger . info ( 'Filtering %d statements for %d UUID%s...' % ( len ( stmts_in ) , len ( uuids ) , 's' if len ( uuids ) > 1 else '' ) ) stmts_out = [ ] for st in stmts_in : if not invert : if st . uuid in uuids : stmts_out . append ( st ) else : if st . uuid not in uuids : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Filter to Statements corresponding to given UUIDs
224
9
17,690
def expand_families ( stmts_in , * * kwargs ) : from indra . tools . expand_families import Expander logger . info ( 'Expanding families on %d statements...' % len ( stmts_in ) ) expander = Expander ( hierarchies ) stmts_out = expander . expand_families ( stmts_in ) logger . info ( '%d statements after expanding families...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Expand FamPlex Agents to individual genes .
151
9
17,691
def reduce_activities ( stmts_in , * * kwargs ) : logger . info ( 'Reducing activities on %d statements...' % len ( stmts_in ) ) stmts_out = [ deepcopy ( st ) for st in stmts_in ] ml = MechLinker ( stmts_out ) ml . gather_explicit_activities ( ) ml . reduce_activities ( ) stmts_out = ml . statements dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Reduce the activity types in a list of statements
147
10
17,692
def strip_agent_context ( stmts_in , * * kwargs ) : logger . info ( 'Stripping agent context on %d statements...' % len ( stmts_in ) ) stmts_out = [ ] for st in stmts_in : new_st = deepcopy ( st ) for agent in new_st . agent_list ( ) : if agent is None : continue agent . mods = [ ] agent . mutations = [ ] agent . activity = None agent . location = None agent . bound_conditions = [ ] stmts_out . append ( new_st ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Strip any context on agents within each statement .
177
10
17,693
def standardize_names_groundings ( stmts ) : print ( 'Standardize names to groundings' ) for stmt in stmts : for concept in stmt . agent_list ( ) : db_ns , db_id = concept . get_grounding ( ) if db_id is not None : if isinstance ( db_id , list ) : db_id = db_id [ 0 ] [ 0 ] . split ( '/' ) [ - 1 ] else : db_id = db_id . split ( '/' ) [ - 1 ] db_id = db_id . replace ( '|' , ' ' ) db_id = db_id . replace ( '_' , ' ' ) db_id = db_id . replace ( 'ONT::' , '' ) db_id = db_id . capitalize ( ) concept . name = db_id return stmts
Standardize the names of Concepts with respect to an ontology .
195
13
17,694
def dump_stmt_strings ( stmts , fname ) : with open ( fname , 'wb' ) as fh : for st in stmts : fh . write ( ( '%s\n' % st ) . encode ( 'utf-8' ) )
Save printed statements in a file .
62
7
17,695
def rename_db_ref ( stmts_in , ns_from , ns_to , * * kwargs ) : logger . info ( 'Remapping "%s" to "%s" in db_refs on %d statements...' % ( ns_from , ns_to , len ( stmts_in ) ) ) stmts_out = [ deepcopy ( st ) for st in stmts_in ] for stmt in stmts_out : for agent in stmt . agent_list ( ) : if agent is not None and ns_from in agent . db_refs : agent . db_refs [ ns_to ] = agent . db_refs . pop ( ns_from ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out
Rename an entry in the db_refs of each Agent .
201
14
17,696
def align_statements ( stmts1 , stmts2 , keyfun = None ) : def name_keyfun ( stmt ) : return tuple ( a . name if a is not None else None for a in stmt . agent_list ( ) ) if not keyfun : keyfun = name_keyfun matches = [ ] keys1 = [ keyfun ( s ) for s in stmts1 ] keys2 = [ keyfun ( s ) for s in stmts2 ] for stmt , key in zip ( stmts1 , keys1 ) : try : match_idx = keys2 . index ( key ) match_stmt = stmts2 [ match_idx ] matches . append ( ( stmt , match_stmt ) ) except ValueError : matches . append ( ( stmt , None ) ) for stmt , key in zip ( stmts2 , keys2 ) : try : match_idx = keys1 . index ( key ) except ValueError : matches . append ( ( None , stmt ) ) return matches
Return alignment of two lists of statements by key .
230
10
17,697
def submit_query_request ( end_point , * args , * * kwargs ) : ev_limit = kwargs . pop ( 'ev_limit' , 10 ) best_first = kwargs . pop ( 'best_first' , True ) tries = kwargs . pop ( 'tries' , 2 ) # This isn't handled by requests because of the multiple identical agent # keys, e.g. {'agent': 'MEK', 'agent': 'ERK'} which is not supported in # python, but is allowed and necessary in these query strings. # TODO because we use the API Gateway, this feature is not longer needed. # We should just use the requests parameters dict. query_str = '?' + '&' . join ( [ '%s=%s' % ( k , v ) for k , v in kwargs . items ( ) if v is not None ] + list ( args ) ) return submit_statement_request ( 'get' , end_point , query_str , ev_limit = ev_limit , best_first = best_first , tries = tries )
Low level function to format the query string .
243
9
17,698
def submit_statement_request ( meth , end_point , query_str = '' , data = None , tries = 2 , * * params ) : full_end_point = 'statements/' + end_point . lstrip ( '/' ) return make_db_rest_request ( meth , full_end_point , query_str , data , params , tries )
Even lower level function to make the request .
81
9
17,699
def render_stmt_graph ( statements , reduce = True , english = False , rankdir = None , agent_style = None ) : from indra . assemblers . english import EnglishAssembler # Set the default agent formatting properties if agent_style is None : agent_style = { 'color' : 'lightgray' , 'style' : 'filled' , 'fontname' : 'arial' } # Sets to store all of the nodes and edges as we recursively process all # of the statements nodes = set ( [ ] ) edges = set ( [ ] ) stmt_dict = { } # Recursive function for processing all statements def process_stmt ( stmt ) : nodes . add ( str ( stmt . matches_key ( ) ) ) stmt_dict [ str ( stmt . matches_key ( ) ) ] = stmt for sby_ix , sby_stmt in enumerate ( stmt . supported_by ) : edges . add ( ( str ( stmt . matches_key ( ) ) , str ( sby_stmt . matches_key ( ) ) ) ) process_stmt ( sby_stmt ) # Process all of the top-level statements, getting the supporting statements # recursively for stmt in statements : process_stmt ( stmt ) # Create a networkx graph from the nodes nx_graph = nx . DiGraph ( ) nx_graph . add_edges_from ( edges ) # Perform transitive reduction if desired if reduce : nx_graph = nx . algorithms . dag . transitive_reduction ( nx_graph ) # Create a pygraphviz graph from the nx graph try : pgv_graph = pgv . AGraph ( name = 'statements' , directed = True , rankdir = rankdir ) except NameError : logger . error ( 'Cannot generate graph because ' 'pygraphviz could not be imported.' ) return None for node in nx_graph . nodes ( ) : stmt = stmt_dict [ node ] if english : ea = EnglishAssembler ( [ stmt ] ) stmt_str = ea . make_model ( ) else : stmt_str = str ( stmt ) pgv_graph . add_node ( node , label = '%s (%d)' % ( stmt_str , len ( stmt . evidence ) ) , * * agent_style ) pgv_graph . add_edges_from ( nx_graph . edges ( ) ) return pgv_graph
Render the statement hierarchy as a pygraphviz graph .
552
12