idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
17,800
def add_section ( self , section_name ) : self . section_headings . append ( section_name ) if section_name in self . sections : raise ValueError ( "Section %s already exists." % section_name ) self . sections [ section_name ] = [ ] return
Create a section of the report to be headed by section_name
62
13
17,801
def set_section_order ( self , section_name_list ) : self . section_headings = section_name_list [ : ] for section_name in self . sections . keys ( ) : if section_name not in section_name_list : self . section_headings . append ( section_name ) return
Set the order of the sections which are by default unorderd .
70
14
17,802
def add_text ( self , text , * args , * * kwargs ) : # Pull down some kwargs. section_name = kwargs . pop ( 'section' , None ) # Actually do the formatting. para , sp = self . _preformat_text ( text , * args , * * kwargs ) # Select the appropriate list to update if section_name is None : relevant_list = self . story else : relevant_list = self . sections [ section_name ] # Add the new content to list. relevant_list . append ( para ) relevant_list . append ( sp ) return
Add text to the document .
132
6
17,803
def add_image ( self , image_path , width = None , height = None , section = None ) : if width is not None : width = width * inch if height is not None : height = height * inch im = Image ( image_path , width , height ) if section is None : self . story . append ( im ) else : self . sections [ section ] . append ( im ) return
Add an image to the document .
85
7
17,804
def make_report ( self , sections_first = True , section_header_params = None ) : full_story = list ( self . _preformat_text ( self . title , style = 'Title' , fontsize = 18 , alignment = 'center' ) ) # Set the default section header parameters if section_header_params is None : section_header_params = { 'style' : 'h1' , 'fontsize' : 14 , 'alignment' : 'center' } # Merge the sections and the rest of the story. if sections_first : full_story += self . _make_sections ( * * section_header_params ) full_story += self . story else : full_story += self . story full_story += self . _make_sections ( * * section_header_params ) fname = self . name + '.pdf' doc = SimpleDocTemplate ( fname , pagesize = letter , rightMargin = 72 , leftMargin = 72 , topMargin = 72 , bottomMargin = 18 ) doc . build ( full_story ) return fname
Create the pdf document with name self . name + . pdf .
235
13
17,805
def _make_sections ( self , * * section_hdr_params ) : sect_story = [ ] if not self . section_headings and len ( self . sections ) : self . section_headings = self . sections . keys ( ) for section_name in self . section_headings : section_story = self . sections [ section_name ] line = '-' * 20 section_head_text = '%s %s %s' % ( line , section_name , line ) title , title_sp = self . _preformat_text ( section_head_text , * * section_hdr_params ) sect_story += [ title , title_sp ] + section_story return sect_story
Flatten the sections into a single story list .
156
10
17,806
def _preformat_text ( self , text , style = 'Normal' , space = None , fontsize = 12 , alignment = 'left' ) : if space is None : space = ( 1 , 12 ) ptext = ( '<para alignment=\"%s\"><font size=%d>%s</font></para>' % ( alignment , fontsize , text ) ) para = Paragraph ( ptext , self . styles [ style ] ) sp = Spacer ( * space ) return para , sp
Format the text for addition to a story list .
111
10
17,807
def get_mesh_name_from_web ( mesh_id ) : url = MESH_URL + mesh_id + '.json' resp = requests . get ( url ) if resp . status_code != 200 : return None mesh_json = resp . json ( ) try : label = mesh_json [ '@graph' ] [ 0 ] [ 'label' ] [ '@value' ] except ( KeyError , IndexError ) as e : return None return label
Get the MESH label for the given MESH ID using the NLM REST API .
101
18
17,808
def get_mesh_name ( mesh_id , offline = False ) : indra_mesh_mapping = mesh_id_to_name . get ( mesh_id ) if offline or indra_mesh_mapping is not None : return indra_mesh_mapping # Look up the MESH mapping from NLM if we don't have it locally return get_mesh_name_from_web ( mesh_id )
Get the MESH label for the given MESH ID .
97
12
17,809
def get_mesh_id_name ( mesh_term , offline = False ) : indra_mesh_id = mesh_name_to_id . get ( mesh_term ) if indra_mesh_id is not None : return indra_mesh_id , mesh_term indra_mesh_id , new_term = mesh_name_to_id_name . get ( mesh_term , ( None , None ) ) if indra_mesh_id is not None : return indra_mesh_id , new_term if offline : return None , None # Look up the MESH mapping from NLM if we don't have it locally return get_mesh_id_name_from_web ( mesh_term )
Get the MESH ID and name for the given MESH term .
165
14
17,810
def make ( directory ) : if os . path . exists ( directory ) : if os . path . isdir ( directory ) : click . echo ( 'Directory already exists' ) else : click . echo ( 'Path exists and is not a directory' ) sys . exit ( ) os . makedirs ( directory ) os . mkdir ( os . path . join ( directory , 'jsons' ) ) copy_default_config ( os . path . join ( directory , 'config.yaml' ) )
Makes a RAS Machine directory
107
7
17,811
def run_with_search ( model_path , config , num_days ) : from indra . tools . machine . machine import run_with_search_helper run_with_search_helper ( model_path , config , num_days = num_days )
Run with PubMed search for new papers .
59
8
17,812
def run_with_pmids ( model_path , pmids ) : from indra . tools . machine . machine import run_with_pmids_helper run_with_pmids_helper ( model_path , pmids )
Run with given list of PMIDs .
52
8
17,813
def id_lookup ( paper_id , idtype = None ) : if idtype is not None and idtype not in ( 'pmid' , 'pmcid' , 'doi' ) : raise ValueError ( "Invalid idtype %s; must be 'pmid', 'pmcid', " "or 'doi'." % idtype ) if paper_id . upper ( ) . startswith ( 'PMC' ) : idtype = 'pmcid' # Strip off any prefix if paper_id . upper ( ) . startswith ( 'PMID' ) : paper_id = paper_id [ 4 : ] elif paper_id . upper ( ) . startswith ( 'DOI' ) : paper_id = paper_id [ 3 : ] data = { 'ids' : paper_id } if idtype is not None : data [ 'idtype' ] = idtype try : tree = pubmed_client . send_request ( pmid_convert_url , data ) except Exception as e : logger . error ( 'Error looking up PMID in PMC: %s' % e ) return { } if tree is None : return { } record = tree . find ( 'record' ) if record is None : return { } doi = record . attrib . get ( 'doi' ) pmid = record . attrib . get ( 'pmid' ) pmcid = record . attrib . get ( 'pmcid' ) ids = { 'doi' : doi , 'pmid' : pmid , 'pmcid' : pmcid } return ids
This function takes a Pubmed ID Pubmed Central ID or DOI and use the Pubmed ID mapping service and looks up all other IDs from one of these . The IDs are returned in a dictionary .
350
40
17,814
def get_xml ( pmc_id ) : if pmc_id . upper ( ) . startswith ( 'PMC' ) : pmc_id = pmc_id [ 3 : ] # Request params params = { } params [ 'verb' ] = 'GetRecord' params [ 'identifier' ] = 'oai:pubmedcentral.nih.gov:%s' % pmc_id params [ 'metadataPrefix' ] = 'pmc' # Submit the request res = requests . get ( pmc_url , params ) if not res . status_code == 200 : logger . warning ( "Couldn't download %s" % pmc_id ) return None # Read the bytestream xml_bytes = res . content # Check for any XML errors; xml_str should still be bytes tree = ET . XML ( xml_bytes , parser = UTB ( ) ) xmlns = "http://www.openarchives.org/OAI/2.0/" err_tag = tree . find ( '{%s}error' % xmlns ) if err_tag is not None : err_code = err_tag . attrib [ 'code' ] err_text = err_tag . text logger . warning ( 'PMC client returned with error %s: %s' % ( err_code , err_text ) ) return None # If no error, return the XML as a unicode string else : return xml_bytes . decode ( 'utf-8' )
Returns XML for the article corresponding to a PMC ID .
323
12
17,815
def extract_paragraphs ( xml_string ) : tree = etree . fromstring ( xml_string . encode ( 'utf-8' ) ) paragraphs = [ ] # In NLM xml, all plaintext is within <p> tags, and is the only thing # that can be contained in <p> tags. To handle to possibility of namespaces # uses regex to search for tags either of the form 'p' or '{<namespace>}p' for element in tree . iter ( ) : if isinstance ( element . tag , basestring ) and re . search ( '(^|})[p|title]$' , element . tag ) and element . text : paragraph = ' ' . join ( element . itertext ( ) ) paragraphs . append ( paragraph ) return paragraphs
Returns list of paragraphs in an NLM XML .
168
10
17,816
def filter_pmids ( pmid_list , source_type ) : global pmids_fulltext_dict # Check args if source_type not in ( 'fulltext' , 'oa_xml' , 'oa_txt' , 'auth_xml' ) : raise ValueError ( "source_type must be one of: 'fulltext', 'oa_xml', " "'oa_txt', or 'auth_xml'." ) # Check if we've loaded this type, and lazily initialize if pmids_fulltext_dict . get ( source_type ) is None : fulltext_list_path = os . path . join ( os . path . dirname ( __file__ ) , 'pmids_%s.txt' % source_type ) with open ( fulltext_list_path , 'rb' ) as f : fulltext_list = set ( [ line . strip ( ) . decode ( 'utf-8' ) for line in f . readlines ( ) ] ) pmids_fulltext_dict [ source_type ] = fulltext_list return list ( set ( pmid_list ) . intersection ( pmids_fulltext_dict . get ( source_type ) ) )
Filter a list of PMIDs for ones with full text from PMC .
259
15
17,817
def get_example_extractions ( fname ) : with open ( fname , 'r' ) as f : sentences = f . read ( ) . splitlines ( ) rdf_xml_dict = { } for sentence in sentences : logger . info ( "Reading \"%s\"..." % sentence ) html = tc . send_query ( sentence , 'cwms' ) try : rdf_xml_dict [ sentence ] = tc . get_xml ( html , 'rdf:RDF' , fail_if_empty = True ) except AssertionError as e : logger . error ( "Got error for %s." % sentence ) logger . exception ( e ) return rdf_xml_dict
Get extractions from one of the examples in cag_examples .
152
15
17,818
def make_example_graphs ( ) : cag_example_rdfs = { } for i , fname in enumerate ( os . listdir ( 'cag_examples' ) ) : cag_example_rdfs [ i + 1 ] = get_example_extractions ( fname ) return make_cag_graphs ( cag_example_rdfs )
Make graphs from all the examples in cag_examples .
84
13
17,819
def _join_list ( lst , oxford = False ) : if len ( lst ) > 2 : s = ', ' . join ( lst [ : - 1 ] ) if oxford : s += ',' s += ' and ' + lst [ - 1 ] elif len ( lst ) == 2 : s = lst [ 0 ] + ' and ' + lst [ 1 ] elif len ( lst ) == 1 : s = lst [ 0 ] else : s = '' return s
Join a list of words in a gramatically correct way .
110
12
17,820
def _assemble_activeform ( stmt ) : subj_str = _assemble_agent_str ( stmt . agent ) if stmt . is_active : is_active_str = 'active' else : is_active_str = 'inactive' if stmt . activity == 'activity' : stmt_str = subj_str + ' is ' + is_active_str elif stmt . activity == 'kinase' : stmt_str = subj_str + ' is kinase-' + is_active_str elif stmt . activity == 'phosphatase' : stmt_str = subj_str + ' is phosphatase-' + is_active_str elif stmt . activity == 'catalytic' : stmt_str = subj_str + ' is catalytically ' + is_active_str elif stmt . activity == 'transcription' : stmt_str = subj_str + ' is transcriptionally ' + is_active_str elif stmt . activity == 'gtpbound' : stmt_str = subj_str + ' is GTP-bound ' + is_active_str return _make_sentence ( stmt_str )
Assemble ActiveForm statements into text .
273
8
17,821
def _assemble_modification ( stmt ) : sub_str = _assemble_agent_str ( stmt . sub ) if stmt . enz is not None : enz_str = _assemble_agent_str ( stmt . enz ) if _get_is_direct ( stmt ) : mod_str = ' ' + _mod_process_verb ( stmt ) + ' ' else : mod_str = ' leads to the ' + _mod_process_noun ( stmt ) + ' of ' stmt_str = enz_str + mod_str + sub_str else : stmt_str = sub_str + ' is ' + _mod_state_stmt ( stmt ) if stmt . residue is not None : if stmt . position is None : mod_str = 'on ' + ist . amino_acids [ stmt . residue ] [ 'full_name' ] else : mod_str = 'on ' + stmt . residue + stmt . position else : mod_str = '' stmt_str += ' ' + mod_str return _make_sentence ( stmt_str )
Assemble Modification statements into text .
250
8
17,822
def _assemble_association ( stmt ) : member_strs = [ _assemble_agent_str ( m . concept ) for m in stmt . members ] stmt_str = member_strs [ 0 ] + ' is associated with ' + _join_list ( member_strs [ 1 : ] ) return _make_sentence ( stmt_str )
Assemble Association statements into text .
83
7
17,823
def _assemble_complex ( stmt ) : member_strs = [ _assemble_agent_str ( m ) for m in stmt . members ] stmt_str = member_strs [ 0 ] + ' binds ' + _join_list ( member_strs [ 1 : ] ) return _make_sentence ( stmt_str )
Assemble Complex statements into text .
78
7
17,824
def _assemble_autophosphorylation ( stmt ) : enz_str = _assemble_agent_str ( stmt . enz ) stmt_str = enz_str + ' phosphorylates itself' if stmt . residue is not None : if stmt . position is None : mod_str = 'on ' + ist . amino_acids [ stmt . residue ] [ 'full_name' ] else : mod_str = 'on ' + stmt . residue + stmt . position else : mod_str = '' stmt_str += ' ' + mod_str return _make_sentence ( stmt_str )
Assemble Autophosphorylation statements into text .
142
11
17,825
def _assemble_regulate_activity ( stmt ) : subj_str = _assemble_agent_str ( stmt . subj ) obj_str = _assemble_agent_str ( stmt . obj ) if stmt . is_activation : rel_str = ' activates ' else : rel_str = ' inhibits ' stmt_str = subj_str + rel_str + obj_str return _make_sentence ( stmt_str )
Assemble RegulateActivity statements into text .
103
9
17,826
def _assemble_regulate_amount ( stmt ) : obj_str = _assemble_agent_str ( stmt . obj ) if stmt . subj is not None : subj_str = _assemble_agent_str ( stmt . subj ) if isinstance ( stmt , ist . IncreaseAmount ) : rel_str = ' increases the amount of ' elif isinstance ( stmt , ist . DecreaseAmount ) : rel_str = ' decreases the amount of ' stmt_str = subj_str + rel_str + obj_str else : if isinstance ( stmt , ist . IncreaseAmount ) : stmt_str = obj_str + ' is produced' elif isinstance ( stmt , ist . DecreaseAmount ) : stmt_str = obj_str + ' is degraded' return _make_sentence ( stmt_str )
Assemble RegulateAmount statements into text .
197
9
17,827
def _assemble_translocation ( stmt ) : agent_str = _assemble_agent_str ( stmt . agent ) stmt_str = agent_str + ' translocates' if stmt . from_location is not None : stmt_str += ' from the ' + stmt . from_location if stmt . to_location is not None : stmt_str += ' to the ' + stmt . to_location return _make_sentence ( stmt_str )
Assemble Translocation statements into text .
109
8
17,828
def _assemble_gap ( stmt ) : subj_str = _assemble_agent_str ( stmt . gap ) obj_str = _assemble_agent_str ( stmt . ras ) stmt_str = subj_str + ' is a GAP for ' + obj_str return _make_sentence ( stmt_str )
Assemble Gap statements into text .
80
7
17,829
def _assemble_gef ( stmt ) : subj_str = _assemble_agent_str ( stmt . gef ) obj_str = _assemble_agent_str ( stmt . ras ) stmt_str = subj_str + ' is a GEF for ' + obj_str return _make_sentence ( stmt_str )
Assemble Gef statements into text .
82
8
17,830
def _assemble_conversion ( stmt ) : reactants = _join_list ( [ _assemble_agent_str ( r ) for r in stmt . obj_from ] ) products = _join_list ( [ _assemble_agent_str ( r ) for r in stmt . obj_to ] ) if stmt . subj is not None : subj_str = _assemble_agent_str ( stmt . subj ) stmt_str = '%s catalyzes the conversion of %s into %s' % ( subj_str , reactants , products ) else : stmt_str = '%s is converted into %s' % ( reactants , products ) return _make_sentence ( stmt_str )
Assemble a Conversion statement into text .
166
8
17,831
def _assemble_influence ( stmt ) : subj_str = _assemble_agent_str ( stmt . subj . concept ) obj_str = _assemble_agent_str ( stmt . obj . concept ) # Note that n is prepended to increase to make it "an increase" if stmt . subj . delta [ 'polarity' ] is not None : subj_delta_str = ' decrease' if stmt . subj . delta [ 'polarity' ] == - 1 else 'n increase' subj_str = 'a%s in %s' % ( subj_delta_str , subj_str ) if stmt . obj . delta [ 'polarity' ] is not None : obj_delta_str = ' decrease' if stmt . obj . delta [ 'polarity' ] == - 1 else 'n increase' obj_str = 'a%s in %s' % ( obj_delta_str , obj_str ) stmt_str = '%s causes %s' % ( subj_str , obj_str ) return _make_sentence ( stmt_str )
Assemble an Influence statement into text .
259
8
17,832
def _make_sentence ( txt ) : #Make sure first letter is capitalized txt = txt . strip ( ' ' ) txt = txt [ 0 ] . upper ( ) + txt [ 1 : ] + '.' return txt
Make a sentence from a piece of text .
55
9
17,833
def _get_is_hypothesis ( stmt ) : for ev in stmt . evidence : if not ev . epistemics . get ( 'hypothesis' ) is True : return True return False
Returns true if there is evidence that the statement is only hypothetical . If all of the evidences associated with the statement indicate a hypothetical interaction then we assume the interaction is hypothetical .
45
35
17,834
def make_model ( self ) : stmt_strs = [ ] for stmt in self . statements : if isinstance ( stmt , ist . Modification ) : stmt_strs . append ( _assemble_modification ( stmt ) ) elif isinstance ( stmt , ist . Autophosphorylation ) : stmt_strs . append ( _assemble_autophosphorylation ( stmt ) ) elif isinstance ( stmt , ist . Association ) : stmt_strs . append ( _assemble_association ( stmt ) ) elif isinstance ( stmt , ist . Complex ) : stmt_strs . append ( _assemble_complex ( stmt ) ) elif isinstance ( stmt , ist . Influence ) : stmt_strs . append ( _assemble_influence ( stmt ) ) elif isinstance ( stmt , ist . RegulateActivity ) : stmt_strs . append ( _assemble_regulate_activity ( stmt ) ) elif isinstance ( stmt , ist . RegulateAmount ) : stmt_strs . append ( _assemble_regulate_amount ( stmt ) ) elif isinstance ( stmt , ist . ActiveForm ) : stmt_strs . append ( _assemble_activeform ( stmt ) ) elif isinstance ( stmt , ist . Translocation ) : stmt_strs . append ( _assemble_translocation ( stmt ) ) elif isinstance ( stmt , ist . Gef ) : stmt_strs . append ( _assemble_gef ( stmt ) ) elif isinstance ( stmt , ist . Gap ) : stmt_strs . append ( _assemble_gap ( stmt ) ) elif isinstance ( stmt , ist . Conversion ) : stmt_strs . append ( _assemble_conversion ( stmt ) ) else : logger . warning ( 'Unhandled statement type: %s.' % type ( stmt ) ) if stmt_strs : return ' ' . join ( stmt_strs ) else : return ''
Assemble text from the set of collected INDRA Statements .
481
12
17,835
def add_statements ( self , stmts ) : for stmt in stmts : if not self . statement_exists ( stmt ) : self . statements . append ( stmt )
Add INDRA Statements to the assembler s list of statements .
43
13
17,836
def make_model ( self ) : ppa = PysbPreassembler ( self . statements ) ppa . replace_activities ( ) self . statements = ppa . statements self . sbgn = emaker . sbgn ( ) self . _map = emaker . map ( ) self . sbgn . append ( self . _map ) for stmt in self . statements : if isinstance ( stmt , Modification ) : self . _assemble_modification ( stmt ) elif isinstance ( stmt , RegulateActivity ) : self . _assemble_regulateactivity ( stmt ) elif isinstance ( stmt , RegulateAmount ) : self . _assemble_regulateamount ( stmt ) elif isinstance ( stmt , Complex ) : self . _assemble_complex ( stmt ) elif isinstance ( stmt , ActiveForm ) : #self._assemble_activeform(stmt) pass else : logger . warning ( "Unhandled Statement type %s" % type ( stmt ) ) continue sbgn_str = self . print_model ( ) return sbgn_str
Assemble the SBGN model from the collected INDRA Statements .
248
13
17,837
def print_model ( self , pretty = True , encoding = 'utf8' ) : return lxml . etree . tostring ( self . sbgn , pretty_print = pretty , encoding = encoding , xml_declaration = True )
Return the assembled SBGN model as an XML string .
52
11
17,838
def save_model ( self , file_name = 'model.sbgn' ) : model = self . print_model ( ) with open ( file_name , 'wb' ) as fh : fh . write ( model )
Save the assembled SBGN model in a file .
50
10
17,839
def _glyph_for_complex_pattern ( self , pattern ) : # Make the main glyph for the agent monomer_glyphs = [ ] for monomer_pattern in pattern . monomer_patterns : glyph = self . _glyph_for_monomer_pattern ( monomer_pattern ) monomer_glyphs . append ( glyph ) if len ( monomer_glyphs ) > 1 : pattern . matches_key = lambda : str ( pattern ) agent_id = self . _make_agent_id ( pattern ) complex_glyph = emaker . glyph ( emaker . bbox ( * * self . complex_style ) , class_ ( 'complex' ) , id = agent_id ) for glyph in monomer_glyphs : glyph . attrib [ 'id' ] = agent_id + glyph . attrib [ 'id' ] complex_glyph . append ( glyph ) return complex_glyph return monomer_glyphs [ 0 ]
Add glyph and member glyphs for a PySB ComplexPattern .
214
13
17,840
def _glyph_for_monomer_pattern ( self , pattern ) : pattern . matches_key = lambda : str ( pattern ) agent_id = self . _make_agent_id ( pattern ) # Handle sources and sinks if pattern . monomer . name in ( '__source' , '__sink' ) : return None # Handle molecules glyph = emaker . glyph ( emaker . label ( text = pattern . monomer . name ) , emaker . bbox ( * * self . monomer_style ) , class_ ( 'macromolecule' ) , id = agent_id ) # Temporarily remove this # Add a glyph for type #type_glyph = emaker.glyph(emaker.label(text='mt:prot'), # class_('unit of information'), # emaker.bbox(**self.entity_type_style), # id=self._make_id()) #glyph.append(type_glyph) for site , value in pattern . site_conditions . items ( ) : if value is None or isinstance ( value , int ) : continue # Make some common abbreviations if site == 'phospho' : site = 'p' elif site == 'activity' : site = 'act' if value == 'active' : value = 'a' elif value == 'inactive' : value = 'i' state = emaker . state ( variable = site , value = value ) state_glyph = emaker . glyph ( state , emaker . bbox ( * * self . entity_state_style ) , class_ ( 'state variable' ) , id = self . _make_id ( ) ) glyph . append ( state_glyph ) return glyph
Add glyph for a PySB MonomerPattern .
372
10
17,841
def load_go_graph ( go_fname ) : global _go_graph if _go_graph is None : _go_graph = rdflib . Graph ( ) logger . info ( "Parsing GO OWL file" ) _go_graph . parse ( os . path . abspath ( go_fname ) ) return _go_graph
Load the GO data from an OWL file and parse into an RDF graph .
78
17
17,842
def update_id_mappings ( g ) : g = load_go_graph ( go_owl_path ) query = _prefixes + """ SELECT ?id ?label WHERE { ?class oboInOwl:id ?id . ?class rdfs:label ?label } """ logger . info ( "Querying for GO ID mappings" ) res = g . query ( query ) mappings = [ ] for id_lit , label_lit in sorted ( res , key = lambda x : x [ 0 ] ) : mappings . append ( ( id_lit . value , label_lit . value ) ) # Write to file write_unicode_csv ( go_mappings_file , mappings , delimiter = '\t' )
Compile all ID - > label mappings and save to a TSV file .
162
17
17,843
def get_default_ndex_cred ( ndex_cred ) : if ndex_cred : username = ndex_cred . get ( 'user' ) password = ndex_cred . get ( 'password' ) if username is not None and password is not None : return username , password username = get_config ( 'NDEX_USERNAME' ) password = get_config ( 'NDEX_PASSWORD' ) return username , password
Gets the NDEx credentials from the dict or tries the environment if None
105
15
17,844
def send_request ( ndex_service_url , params , is_json = True , use_get = False ) : if use_get : res = requests . get ( ndex_service_url , json = params ) else : res = requests . post ( ndex_service_url , json = params ) status = res . status_code # If response is immediate, we get 200 if status == 200 : if is_json : return res . json ( ) else : return res . text # If there is a continuation of the message we get status 300, handled below. # Otherwise we return None. elif status != 300 : logger . error ( 'Request returned with code %d' % status ) return None # In case the response is not immediate, a task ID can be used to get # the result. task_id = res . json ( ) . get ( 'task_id' ) logger . info ( 'NDEx task submitted...' ) time_used = 0 try : while status != 200 : res = requests . get ( ndex_base_url + '/task/' + task_id ) status = res . status_code if status != 200 : time . sleep ( 5 ) time_used += 5 except KeyError : next return None logger . info ( 'NDEx task complete.' ) if is_json : return res . json ( ) else : return res . text
Send a request to the NDEx server .
299
9
17,845
def update_network ( cx_str , network_id , ndex_cred = None ) : server = 'http://public.ndexbio.org' username , password = get_default_ndex_cred ( ndex_cred ) nd = ndex2 . client . Ndex2 ( server , username , password ) try : logger . info ( 'Getting network summary...' ) summary = nd . get_network_summary ( network_id ) except Exception as e : logger . error ( 'Could not get NDEx network summary.' ) logger . error ( e ) return # Update network content try : logger . info ( 'Updating network...' ) cx_stream = io . BytesIO ( cx_str . encode ( 'utf-8' ) ) nd . update_cx_network ( cx_stream , network_id ) except Exception as e : logger . error ( 'Could not update NDEx network.' ) logger . error ( e ) return # Update network profile ver_str = summary . get ( 'version' ) new_ver = _increment_ndex_ver ( ver_str ) profile = { 'name' : summary . get ( 'name' ) , 'description' : summary . get ( 'description' ) , 'version' : new_ver , } logger . info ( 'Updating NDEx network (%s) profile to %s' , network_id , profile ) profile_retries = 5 for _ in range ( profile_retries ) : try : time . sleep ( 5 ) nd . update_network_profile ( network_id , profile ) break except Exception as e : logger . error ( 'Could not update NDEx network profile.' ) logger . error ( e ) set_style ( network_id , ndex_cred )
Update an existing CX network on NDEx with new CX content .
393
15
17,846
def set_style ( network_id , ndex_cred = None , template_id = None ) : if not template_id : template_id = "ea4ea3b7-6903-11e7-961c-0ac135e8bacf" server = 'http://public.ndexbio.org' username , password = get_default_ndex_cred ( ndex_cred ) source_network = ndex2 . create_nice_cx_from_server ( username = username , password = password , uuid = network_id , server = server ) source_network . apply_template ( server , template_id ) source_network . update_to ( network_id , server = server , username = username , password = password )
Set the style of the network to a given template network s style
175
13
17,847
def initialize ( self , cfg_file = None , mode = None ) : self . sim = ScipyOdeSimulator ( self . model ) self . state = numpy . array ( copy . copy ( self . sim . initials ) [ 0 ] ) self . time = numpy . array ( 0.0 ) self . status = 'initialized'
Initialize the model for simulation possibly given a config file .
76
12
17,848
def update ( self , dt = None ) : # EMELI passes dt = -1 so we need to handle that here dt = dt if ( dt is not None and dt > 0 ) else self . dt tspan = [ 0 , dt ] # Run simulaton with initials set to current state res = self . sim . run ( tspan = tspan , initials = self . state ) # Set the state based on the result here self . state = res . species [ - 1 ] self . time += dt if self . time > self . stop_time : self . DONE = True print ( ( self . time , self . state ) ) self . time_course . append ( ( self . time . copy ( ) , self . state . copy ( ) ) )
Simulate the model for a given time interval .
171
10
17,849
def set_value ( self , var_name , value ) : if var_name in self . outside_name_map : var_name = self . outside_name_map [ var_name ] print ( '%s=%.5f' % ( var_name , 1e9 * value ) ) if var_name == 'Precipitation' : value = 1e9 * value species_idx = self . species_name_map [ var_name ] self . state [ species_idx ] = value
Set the value of a given variable to a given value .
113
12
17,850
def get_value ( self , var_name ) : if var_name in self . outside_name_map : var_name = self . outside_name_map [ var_name ] species_idx = self . species_name_map [ var_name ] return self . state [ species_idx ]
Return the value of a given variable .
68
8
17,851
def get_input_var_names ( self ) : in_vars = copy . copy ( self . input_vars ) for idx , var in enumerate ( in_vars ) : if self . _map_in_out ( var ) is not None : in_vars [ idx ] = self . _map_in_out ( var ) return in_vars
Return a list of variables names that can be set as input .
84
13
17,852
def get_output_var_names ( self ) : # Return all the variables that aren't input variables all_vars = list ( self . species_name_map . keys ( ) ) output_vars = list ( set ( all_vars ) - set ( self . input_vars ) ) # Re-map to outside var names if needed for idx , var in enumerate ( output_vars ) : if self . _map_in_out ( var ) is not None : output_vars [ idx ] = self . _map_in_out ( var ) return output_vars
Return a list of variables names that can be read as output .
132
13
17,853
def make_repository_component ( self ) : component = etree . Element ( 'component' ) comp_name = etree . Element ( 'comp_name' ) comp_name . text = self . model . name component . append ( comp_name ) mod_path = etree . Element ( 'module_path' ) mod_path . text = os . getcwd ( ) component . append ( mod_path ) mod_name = etree . Element ( 'module_name' ) mod_name . text = self . model . name component . append ( mod_name ) class_name = etree . Element ( 'class_name' ) class_name . text = 'model_class' component . append ( class_name ) model_name = etree . Element ( 'model_name' ) model_name . text = self . model . name component . append ( model_name ) lang = etree . Element ( 'language' ) lang . text = 'python' component . append ( lang ) ver = etree . Element ( 'version' ) ver . text = self . get_attribute ( 'version' ) component . append ( ver ) au = etree . Element ( 'author' ) au . text = self . get_attribute ( 'author_name' ) component . append ( au ) hu = etree . Element ( 'help_url' ) hu . text = 'http://github.com/sorgerlab/indra' component . append ( hu ) for tag in ( 'cfg_template' , 'time_step_type' , 'time_units' , 'grid_type' , 'description' , 'comp_type' , 'uses_types' ) : elem = etree . Element ( tag ) elem . text = tag component . append ( elem ) return etree . tounicode ( component , pretty_print = True )
Return an XML string representing this BMI in a workflow .
411
11
17,854
def _map_in_out ( self , inside_var_name ) : for out_name , in_name in self . outside_name_map . items ( ) : if inside_var_name == in_name : return out_name return None
Return the external name of a variable mapped from inside .
55
11
17,855
def read_pmid ( pmid , source , cont_path , sparser_version , outbuf = None , cleanup = True ) : signal . signal ( signal . SIGALRM , _timeout_handler ) signal . alarm ( 60 ) try : if ( source is 'content_not_found' or source . startswith ( 'unhandled_content_type' ) or source . endswith ( 'failure' ) ) : logger . info ( 'No content read for %s.' % pmid ) return # No real content here. if cont_path . endswith ( '.nxml' ) and source . startswith ( 'pmc' ) : new_fname = 'PMC%s%d.nxml' % ( pmid , mp . current_process ( ) . pid ) os . rename ( cont_path , new_fname ) try : sp = sparser . process_nxml_file ( new_fname , outbuf = outbuf , cleanup = cleanup ) finally : if cleanup and os . path . exists ( new_fname ) : os . remove ( new_fname ) elif cont_path . endswith ( '.txt' ) : content_str = '' with open ( cont_path , 'r' ) as f : content_str = f . read ( ) sp = sparser . process_text ( content_str , outbuf = outbuf , cleanup = cleanup ) signal . alarm ( 0 ) except Exception as e : logger . error ( 'Failed to process data for %s.' % pmid ) logger . exception ( e ) signal . alarm ( 0 ) return if sp is None : logger . error ( 'Failed to run sparser on pmid: %s.' % pmid ) return # At this point, we rewrite the PMID in the Evidence of Sparser # Statements according to the actual PMID that was read. sp . set_statements_pmid ( pmid ) s3_client . put_reader_output ( 'sparser' , sp . json_stmts , pmid , sparser_version , source ) return sp . statements
Run sparser on a single pmid .
458
9
17,856
def get_stmts ( pmids_unread , cleanup = True , sparser_version = None ) : if sparser_version is None : sparser_version = sparser . get_version ( ) stmts = { } now = datetime . now ( ) outbuf_fname = 'sparser_%s_%s.log' % ( now . strftime ( '%Y%m%d-%H%M%S' ) , mp . current_process ( ) . pid , ) outbuf = open ( outbuf_fname , 'wb' ) try : for pmid , result in pmids_unread . items ( ) : logger . info ( 'Reading %s' % pmid ) source = result [ 'content_source' ] cont_path = result [ 'content_path' ] outbuf . write ( ( '\nReading pmid %s from %s located at %s.\n' % ( pmid , source , cont_path ) ) . encode ( 'utf-8' ) ) outbuf . flush ( ) some_stmts = read_pmid ( pmid , source , cont_path , sparser_version , outbuf , cleanup ) if some_stmts is not None : stmts [ pmid ] = some_stmts else : continue # We didn't get any new statements. except KeyboardInterrupt as e : logger . exception ( e ) logger . info ( 'Caught keyboard interrupt...stopping. \n' 'Results so far will be pickled unless ' 'Keyboard interupt is hit again.' ) finally : outbuf . close ( ) print ( "Sparser logs may be found in %s" % outbuf_fname ) return stmts
Run sparser on the pmids in pmids_unread .
380
14
17,857
def run_sparser ( pmid_list , tmp_dir , num_cores , start_index , end_index , force_read , force_fulltext , cleanup = True , verbose = True ) : reader_version = sparser . get_version ( ) _ , _ , _ , pmids_read , pmids_unread , _ = get_content_to_read ( pmid_list , start_index , end_index , tmp_dir , num_cores , force_fulltext , force_read , 'sparser' , reader_version ) logger . info ( 'Adjusting num cores to length of pmid_list.' ) num_cores = min ( len ( pmid_list ) , num_cores ) logger . info ( 'Adjusted...' ) if num_cores is 1 : stmts = get_stmts ( pmids_unread , cleanup = cleanup ) stmts . update ( { pmid : get_stmts_from_cache ( pmid ) [ pmid ] for pmid in pmids_read . keys ( ) } ) elif num_cores > 1 : logger . info ( "Starting a pool with %d cores." % num_cores ) pool = mp . Pool ( num_cores ) pmids_to_read = list ( pmids_unread . keys ( ) ) N = len ( pmids_unread ) dn = int ( N / num_cores ) logger . info ( "Breaking pmids into batches." ) batches = [ ] for i in range ( num_cores ) : batches . append ( { k : pmids_unread [ k ] for k in pmids_to_read [ i * dn : min ( ( i + 1 ) * dn , N ) ] } ) get_stmts_func = functools . partial ( get_stmts , cleanup = cleanup , sparser_version = reader_version ) logger . info ( "Mapping get_stmts onto pool." ) unread_res = pool . map ( get_stmts_func , batches ) logger . info ( 'len(unread_res)=%d' % len ( unread_res ) ) read_res = pool . map ( get_stmts_from_cache , pmids_read . keys ( ) ) logger . info ( 'len(read_res)=%d' % len ( read_res ) ) pool . close ( ) logger . info ( 'Multiprocessing pool closed.' ) pool . join ( ) logger . info ( 'Multiprocessing pool joined.' ) stmts = { pmid : stmt_list for res_dict in unread_res + read_res for pmid , stmt_list in res_dict . items ( ) } logger . info ( 'len(stmts)=%d' % len ( stmts ) ) return ( stmts , pmids_unread )
Run the sparser reader on the pmids in pmid_list .
651
15
17,858
def get_all_descendants ( parent ) : children = parent . __subclasses__ ( ) descendants = children [ : ] for child in children : descendants += get_all_descendants ( child ) return descendants
Get all the descendants of a parent class recursively .
45
12
17,859
def get_type_hierarchy ( s ) : tp = type ( s ) if not isinstance ( s , type ) else s p_list = [ tp ] for p in tp . __bases__ : if p is not Statement : p_list . extend ( get_type_hierarchy ( p ) ) else : p_list . append ( p ) return p_list
Get the sequence of parents from s to Statement .
86
10
17,860
def get_statement_by_name ( stmt_name ) : stmt_classes = get_all_descendants ( Statement ) for stmt_class in stmt_classes : if stmt_class . __name__ . lower ( ) == stmt_name . lower ( ) : return stmt_class raise NotAStatementName ( '\"%s\" is not recognized as a statement type!' % stmt_name )
Get a statement class given the name of the statement class .
93
12
17,861
def get_unresolved_support_uuids ( stmts ) : return { s . uuid for stmt in stmts for s in stmt . supports + stmt . supported_by if isinstance ( s , Unresolved ) }
Get uuids unresolved in support from stmts from stmts_from_json .
55
20
17,862
def stmt_type ( obj , mk = True ) : if isinstance ( obj , Statement ) and mk : return type ( obj ) else : return type ( obj ) . __name__
Return standardized backwards compatible object type String .
40
8
17,863
def get_hash ( self , shallow = True , refresh = False ) : if shallow : if not hasattr ( self , '_shallow_hash' ) or self . _shallow_hash is None or refresh : self . _shallow_hash = make_hash ( self . matches_key ( ) , 14 ) ret = self . _shallow_hash else : if not hasattr ( self , '_full_hash' ) or self . _full_hash is None or refresh : ev_mk_list = sorted ( [ ev . matches_key ( ) for ev in self . evidence ] ) self . _full_hash = make_hash ( self . matches_key ( ) + str ( ev_mk_list ) , 16 ) ret = self . _full_hash return ret
Get a hash for this Statement .
170
7
17,864
def _tag_evidence ( self ) : h = self . get_hash ( shallow = False ) for ev in self . evidence : ev . stmt_tag = h return
Set all the Evidence stmt_tag to my deep matches - key hash .
37
16
17,865
def agent_list ( self , deep_sorted = False ) : ag_list = [ ] for ag_name in self . _agent_order : ag_attr = getattr ( self , ag_name ) if isinstance ( ag_attr , Concept ) or ag_attr is None : ag_list . append ( ag_attr ) elif isinstance ( ag_attr , list ) : if not all ( [ isinstance ( ag , Concept ) for ag in ag_attr ] ) : raise TypeError ( "Expected all elements of list to be Agent " "and/or Concept, but got: %s" % { type ( ag ) for ag in ag_attr } ) if deep_sorted : ag_attr = sorted_agents ( ag_attr ) ag_list . extend ( ag_attr ) else : raise TypeError ( "Expected type Agent, Concept, or list, got " "type %s." % type ( ag_attr ) ) return ag_list
Get the canonicallized agent list .
211
9
17,866
def to_json ( self , use_sbo = False ) : stmt_type = type ( self ) . __name__ # Original comment: For backwards compatibility, could be removed later all_stmts = [ self ] + self . supports + self . supported_by for st in all_stmts : if not hasattr ( st , 'uuid' ) : st . uuid = '%s' % uuid . uuid4 ( ) ################## json_dict = _o ( type = stmt_type ) json_dict [ 'belief' ] = self . belief if self . evidence : evidence = [ ev . to_json ( ) for ev in self . evidence ] json_dict [ 'evidence' ] = evidence json_dict [ 'id' ] = '%s' % self . uuid if self . supports : json_dict [ 'supports' ] = [ '%s' % st . uuid for st in self . supports ] if self . supported_by : json_dict [ 'supported_by' ] = [ '%s' % st . uuid for st in self . supported_by ] def get_sbo_term ( cls ) : sbo_term = stmt_sbo_map . get ( cls . __name__ . lower ( ) ) while not sbo_term : cls = cls . __bases__ [ 0 ] sbo_term = stmt_sbo_map . get ( cls . __name__ . lower ( ) ) return sbo_term if use_sbo : sbo_term = get_sbo_term ( self . __class__ ) json_dict [ 'sbo' ] = 'http://identifiers.org/sbo/SBO:%s' % sbo_term return json_dict
Return serialized Statement as a JSON dict .
397
9
17,867
def to_graph ( self ) : def json_node ( graph , element , prefix ) : if not element : return None node_id = '|' . join ( prefix ) if isinstance ( element , list ) : graph . add_node ( node_id , label = '' ) # Enumerate children and add nodes and connect to anchor node for i , sub_element in enumerate ( element ) : sub_id = json_node ( graph , sub_element , prefix + [ '%s' % i ] ) if sub_id : graph . add_edge ( node_id , sub_id , label = '' ) elif isinstance ( element , dict ) : graph . add_node ( node_id , label = '' ) # Add node recursively for each element # Connect to this node with edge label according to key for k , v in element . items ( ) : if k == 'id' : continue elif k == 'name' : graph . node [ node_id ] [ 'label' ] = v continue elif k == 'type' : graph . node [ node_id ] [ 'label' ] = v continue sub_id = json_node ( graph , v , prefix + [ '%s' % k ] ) if sub_id : graph . add_edge ( node_id , sub_id , label = ( '%s' % k ) ) else : if isinstance ( element , basestring ) and element . startswith ( 'http' ) : element = element . split ( '/' ) [ - 1 ] graph . add_node ( node_id , label = ( '%s' % str ( element ) ) ) return node_id jd = self . to_json ( ) graph = networkx . DiGraph ( ) json_node ( graph , jd , [ '%s' % self . uuid ] ) return graph
Return Statement as a networkx graph .
406
8
17,868
def make_generic_copy ( self , deeply = False ) : if deeply : kwargs = deepcopy ( self . __dict__ ) else : kwargs = self . __dict__ . copy ( ) for attr in [ 'evidence' , 'belief' , 'uuid' , 'supports' , 'supported_by' , 'is_activation' ] : kwargs . pop ( attr , None ) for attr in [ '_full_hash' , '_shallow_hash' ] : my_hash = kwargs . pop ( attr , None ) my_shallow_hash = kwargs . pop ( attr , None ) for attr in self . _agent_order : attr_value = kwargs . get ( attr ) if isinstance ( attr_value , list ) : kwargs [ attr ] = sorted_agents ( attr_value ) new_instance = self . __class__ ( * * kwargs ) new_instance . _full_hash = my_hash new_instance . _shallow_hash = my_shallow_hash return new_instance
Make a new matching Statement with no provenance .
249
10
17,869
def load_lincs_csv ( url ) : resp = requests . get ( url , params = { 'output_type' : '.csv' } , timeout = 120 ) resp . raise_for_status ( ) if sys . version_info [ 0 ] < 3 : csv_io = BytesIO ( resp . content ) else : csv_io = StringIO ( resp . text ) data_rows = list ( read_unicode_csv_fileobj ( csv_io , delimiter = ',' ) ) headers = data_rows [ 0 ] return [ { header : val for header , val in zip ( headers , line_elements ) } for line_elements in data_rows [ 1 : ] ]
Helper function to turn csv rows into dicts .
157
11
17,870
def get_small_molecule_name ( self , hms_lincs_id ) : entry = self . _get_entry_by_id ( self . _sm_data , hms_lincs_id ) if not entry : return None name = entry [ 'Name' ] return name
Get the name of a small molecule from the LINCS sm metadata .
68
14
17,871
def get_small_molecule_refs ( self , hms_lincs_id ) : refs = { 'HMS-LINCS' : hms_lincs_id } entry = self . _get_entry_by_id ( self . _sm_data , hms_lincs_id ) # If there is no entry for this ID if not entry : return refs # If there is an entry then fill up the refs with existing values mappings = dict ( chembl = 'ChEMBL ID' , chebi = 'ChEBI ID' , pubchem = 'PubChem CID' , lincs = 'LINCS ID' ) for k , v in mappings . items ( ) : if entry . get ( v ) : refs [ k . upper ( ) ] = entry . get ( v ) return refs
Get the id refs of a small molecule from the LINCS sm metadata .
190
16
17,872
def get_protein_refs ( self , hms_lincs_id ) : # TODO: We could get phosphorylation states from the protein data. refs = { 'HMS-LINCS' : hms_lincs_id } entry = self . _get_entry_by_id ( self . _prot_data , hms_lincs_id ) # If there is no entry for this ID if not entry : return refs mappings = dict ( egid = 'Gene ID' , up = 'UniProt ID' ) for k , v in mappings . items ( ) : if entry . get ( v ) : refs [ k . upper ( ) ] = entry . get ( v ) return refs
Get the refs for a protein from the LINCs protein metadata .
163
15
17,873
def get_bel_stmts ( self , filter = False ) : if self . basename is not None : bel_stmt_path = '%s_bel_stmts.pkl' % self . basename # Check for cached BEL stmt file if self . basename is not None and os . path . isfile ( bel_stmt_path ) : logger . info ( "Loading BEL statements from %s" % bel_stmt_path ) with open ( bel_stmt_path , 'rb' ) as f : bel_statements = pickle . load ( f ) # No cache, so perform the queries else : bel_proc = bel . process_pybel_neighborhood ( self . gene_list , network_file = self . bel_corpus ) bel_statements = bel_proc . statements # Save to pickle file if we're caching if self . basename is not None : with open ( bel_stmt_path , 'wb' ) as f : pickle . dump ( bel_statements , f ) # Optionally filter out statements not involving only our gene set if filter : if len ( self . gene_list ) > 1 : bel_statements = ac . filter_gene_list ( bel_statements , self . gene_list , 'all' ) return bel_statements
Get relevant statements from the BEL large corpus .
294
9
17,874
def get_biopax_stmts ( self , filter = False , query = 'pathsbetween' , database_filter = None ) : # If we're using a cache, initialize the appropriate filenames if self . basename is not None : biopax_stmt_path = '%s_biopax_stmts.pkl' % self . basename biopax_ras_owl_path = '%s_pc_pathsbetween.owl' % self . basename # Check for cached Biopax stmt file at the given path # if it's there, return the statements from the cache if self . basename is not None and os . path . isfile ( biopax_stmt_path ) : logger . info ( "Loading Biopax statements from %s" % biopax_stmt_path ) with open ( biopax_stmt_path , 'rb' ) as f : bp_statements = pickle . load ( f ) return bp_statements # Check for cached file before querying Pathway Commons Web API if self . basename is not None and os . path . isfile ( biopax_ras_owl_path ) : logger . info ( "Loading Biopax from OWL file %s" % biopax_ras_owl_path ) bp = biopax . process_owl ( biopax_ras_owl_path ) # OWL file not found; do query and save to file else : if ( len ( self . gene_list ) < 2 ) and ( query == 'pathsbetween' ) : logger . warning ( 'Using neighborhood query for one gene.' ) query = 'neighborhood' if query == 'pathsbetween' : if len ( self . gene_list ) > 60 : block_size = 60 else : block_size = None bp = biopax . process_pc_pathsbetween ( self . gene_list , database_filter = database_filter , block_size = block_size ) elif query == 'neighborhood' : bp = biopax . process_pc_neighborhood ( self . gene_list , database_filter = database_filter ) else : logger . error ( 'Invalid query type: %s' % query ) return [ ] # Save the file if we're caching if self . basename is not None : bp . save_model ( biopax_ras_owl_path ) # Save statements to pickle file if we're caching if self . basename is not None : with open ( biopax_stmt_path , 'wb' ) as f : pickle . dump ( bp . statements , f ) # Optionally filter out statements not involving only our gene set if filter : policy = 'one' if len ( self . gene_list ) > 1 else 'all' stmts = ac . filter_gene_list ( bp . statements , self . gene_list , policy ) else : stmts = bp . statements return stmts
Get relevant statements from Pathway Commons .
664
8
17,875
def get_statements ( self , filter = False ) : bp_stmts = self . get_biopax_stmts ( filter = filter ) bel_stmts = self . get_bel_stmts ( filter = filter ) return bp_stmts + bel_stmts
Return the combined list of statements from BEL and Pathway Commons .
69
13
17,876
def run_preassembly ( self , stmts , print_summary = True ) : # First round of preassembly: remove duplicates before sitemapping pa1 = Preassembler ( hierarchies , stmts ) logger . info ( "Combining duplicates" ) pa1 . combine_duplicates ( ) # Map sites logger . info ( "Mapping sites" ) ( valid , mapped ) = sm . map_sites ( pa1 . unique_stmts ) # Combine valid and successfully mapped statements into single list correctly_mapped_stmts = [ ] for ms in mapped : if all ( [ True if mm [ 1 ] is not None else False for mm in ms . mapped_mods ] ) : correctly_mapped_stmts . append ( ms . mapped_stmt ) mapped_stmts = valid + correctly_mapped_stmts # Second round of preassembly: de-duplicate and combine related pa2 = Preassembler ( hierarchies , mapped_stmts ) logger . info ( "Combining duplicates again" ) pa2 . combine_duplicates ( ) pa2 . combine_related ( ) # Fill out the results dict self . results = { } self . results [ 'raw' ] = stmts self . results [ 'duplicates1' ] = pa1 . unique_stmts self . results [ 'valid' ] = valid self . results [ 'mapped' ] = mapped self . results [ 'mapped_stmts' ] = mapped_stmts self . results [ 'duplicates2' ] = pa2 . unique_stmts self . results [ 'related2' ] = pa2 . related_stmts # Print summary if print_summary : logger . info ( "\nStarting number of statements: %d" % len ( stmts ) ) logger . info ( "After duplicate removal: %d" % len ( pa1 . unique_stmts ) ) logger . info ( "Unique statements with valid sites: %d" % len ( valid ) ) logger . info ( "Unique statements with invalid sites: %d" % len ( mapped ) ) logger . info ( "After post-mapping duplicate removal: %d" % len ( pa2 . unique_stmts ) ) logger . info ( "After combining related statements: %d" % len ( pa2 . related_stmts ) ) # Save the results if we're caching if self . basename is not None : results_filename = '%s_results.pkl' % self . basename with open ( results_filename , 'wb' ) as f : pickle . dump ( self . results , f ) return self . results
Run complete preassembly procedure on the given statements .
585
10
17,877
def _get_grounding ( entity ) : db_refs = { 'TEXT' : entity [ 'text' ] } groundings = entity . get ( 'grounding' ) if not groundings : return db_refs def get_ont_concept ( concept ) : """Strip slash, replace spaces and remove example leafs.""" # In the WM context, groundings have no URL prefix and start with / # The following block does some special handling of these groundings. if concept . startswith ( '/' ) : concept = concept [ 1 : ] concept = concept . replace ( ' ' , '_' ) # We eliminate any entries that aren't ontology categories # these are typically "examples" corresponding to the category while concept not in hume_onto_entries : parts = concept . split ( '/' ) if len ( parts ) == 1 : break concept = '/' . join ( parts [ : - 1 ] ) # Otherwise we just return the concept as is return concept # Basic collection of grounding entries raw_grounding_entries = [ ( get_ont_concept ( g [ 'ontologyConcept' ] ) , g [ 'value' ] ) for g in groundings ] # Occasionally we get duplicate grounding entries, we want to # eliminate those here grounding_dict = { } for cat , score in raw_grounding_entries : if ( cat not in grounding_dict ) or ( score > grounding_dict [ cat ] ) : grounding_dict [ cat ] = score # Then we sort the list in reverse order according to score # Sometimes the exact same score appears multiple times, in this # case we prioritize by the "depth" of the grounding which is # obtained by looking at the number of /-s in the entry. # However, there are still cases where the grounding depth and the score # are the same. In these cases we just sort alphabetically. grounding_entries = sorted ( list ( set ( grounding_dict . items ( ) ) ) , key = lambda x : ( x [ 1 ] , x [ 0 ] . count ( '/' ) , x [ 0 ] ) , reverse = True ) # We could get an empty list here in which case we don't add the # grounding if grounding_entries : db_refs [ 'HUME' ] = grounding_entries return db_refs
Return Hume grounding .
498
4
17,878
def _find_relations ( self ) : # Get all extractions extractions = list ( self . tree . execute ( "$.extractions[(@.@type is 'Extraction')]" ) ) # Get relations from extractions relations = [ ] for e in extractions : label_set = set ( e . get ( 'labels' , [ ] ) ) # If this is a DirectedRelation if 'DirectedRelation' in label_set : self . relation_dict [ e [ '@id' ] ] = e subtype = e . get ( 'subtype' ) if any ( t in subtype for t in polarities . keys ( ) ) : relations . append ( ( subtype , e ) ) # If this is an Event or an Entity if { 'Event' , 'Entity' } & label_set : self . concept_dict [ e [ '@id' ] ] = e if not relations and not self . relation_dict : logger . info ( "No relations found." ) else : logger . info ( '%d relations of types %s found' % ( len ( relations ) , ', ' . join ( polarities . keys ( ) ) ) ) logger . info ( '%d relations in dict.' % len ( self . relation_dict ) ) logger . info ( '%d concepts found.' % len ( self . concept_dict ) ) return relations
Find all relevant relation elements and return them in a list .
297
12
17,879
def _get_documents ( self ) : documents = self . tree . execute ( "$.documents" ) for doc in documents : sentences = { s [ '@id' ] : s [ 'text' ] for s in doc . get ( 'sentences' , [ ] ) } self . document_dict [ doc [ '@id' ] ] = { 'sentences' : sentences , 'location' : doc [ 'location' ] }
Populate sentences attribute with a dict keyed by document id .
96
13
17,880
def _make_context ( self , entity ) : loc_context = None time_context = None # Look for time and place contexts. for argument in entity [ "arguments" ] : if argument [ "type" ] == "place" : entity_id = argument [ "value" ] [ "@id" ] loc_entity = self . concept_dict [ entity_id ] place = loc_entity . get ( "canonicalName" ) if not place : place = loc_entity [ 'text' ] geo_id = loc_entity . get ( 'geoname_id' ) loc_context = RefContext ( name = place , db_refs = { "GEOID" : geo_id } ) if argument [ "type" ] == "time" : entity_id = argument [ "value" ] [ "@id" ] temporal_entity = self . concept_dict [ entity_id ] text = temporal_entity [ 'mentions' ] [ 0 ] [ 'text' ] if len ( temporal_entity . get ( "timeInterval" , [ ] ) ) < 1 : time_context = TimeContext ( text = text ) continue time = temporal_entity [ "timeInterval" ] [ 0 ] start = datetime . strptime ( time [ 'start' ] , '%Y-%m-%dT%H:%M' ) end = datetime . strptime ( time [ 'end' ] , '%Y-%m-%dT%H:%M' ) duration = int ( time [ 'duration' ] ) time_context = TimeContext ( text = text , start = start , end = end , duration = duration ) # Put context together context = None if loc_context or time_context : context = WorldContext ( time = time_context , geo_location = loc_context ) return context
Get place and time info from the json for this entity .
400
12
17,881
def _make_concept ( self , entity ) : # Use the canonical name as the name of the Concept by default name = self . _sanitize ( entity [ 'canonicalName' ] ) # But if there is a trigger head text, we prefer that since # it almost always results in a cleaner name # This is removed for now since the head word seems to be too # minimal for some concepts, e.g. it gives us only "security" # for "food security". """ trigger = entity.get('trigger') if trigger is not None: head_text = trigger.get('head text') if head_text is not None: name = head_text """ # Save raw text and Hume scored groundings as db_refs db_refs = _get_grounding ( entity ) concept = Concept ( name , db_refs = db_refs ) metadata = { arg [ 'type' ] : arg [ 'value' ] [ '@id' ] for arg in entity [ 'arguments' ] } return concept , metadata
Return Concept from a Hume entity .
222
7
17,882
def _get_event_and_context ( self , event , arg_type ) : eid = _choose_id ( event , arg_type ) ev = self . concept_dict [ eid ] concept , metadata = self . _make_concept ( ev ) ev_delta = { 'adjectives' : [ ] , 'states' : get_states ( ev ) , 'polarity' : get_polarity ( ev ) } context = self . _make_context ( ev ) event_obj = Event ( concept , delta = ev_delta , context = context ) return event_obj
Return an INDRA Event based on an event entry .
133
11
17,883
def _get_evidence ( self , event , adjectives ) : provenance = event . get ( 'provenance' ) # First try looking up the full sentence through provenance doc_id = provenance [ 0 ] [ 'document' ] [ '@id' ] sent_id = provenance [ 0 ] [ 'sentence' ] text = self . document_dict [ doc_id ] [ 'sentences' ] [ sent_id ] text = self . _sanitize ( text ) bounds = [ provenance [ 0 ] [ 'documentCharPositions' ] [ k ] for k in [ 'start' , 'end' ] ] annotations = { 'found_by' : event . get ( 'rule' ) , 'provenance' : provenance , 'event_type' : os . path . basename ( event . get ( 'type' ) ) , 'adjectives' : adjectives , 'bounds' : bounds } location = self . document_dict [ doc_id ] [ 'location' ] ev = Evidence ( source_api = 'hume' , text = text , annotations = annotations , pmid = location ) return [ ev ]
Return the Evidence object for the INDRA Statement .
250
10
17,884
def _is_statement_in_list ( new_stmt , old_stmt_list ) : for old_stmt in old_stmt_list : if old_stmt . equals ( new_stmt ) : return True elif old_stmt . evidence_equals ( new_stmt ) and old_stmt . matches ( new_stmt ) : # If we're comparing a complex, make sure the agents are sorted. if isinstance ( new_stmt , Complex ) : agent_pairs = zip ( old_stmt . sorted_members ( ) , new_stmt . sorted_members ( ) ) else : agent_pairs = zip ( old_stmt . agent_list ( ) , new_stmt . agent_list ( ) ) # Compare agent-by-agent. for ag_old , ag_new in agent_pairs : s_old = set ( ag_old . db_refs . items ( ) ) s_new = set ( ag_new . db_refs . items ( ) ) # If they're equal this isn't the one we're interested in. if s_old == s_new : continue # If the new statement has nothing new to offer, just ignore it if s_old > s_new : return True # If the new statement does have something new, add it to the # existing statement. And then ignore it. if s_new > s_old : ag_old . db_refs . update ( ag_new . db_refs ) return True # If this is a case where different CHEBI ids were mapped to # the same entity, set the agent name to the CHEBI id. if _fix_different_refs ( ag_old , ag_new , 'CHEBI' ) : # Check to make sure the newly described statement does # not match anything. return _is_statement_in_list ( new_stmt , old_stmt_list ) # If this is a case, like above, but with UMLS IDs, do the same # thing as above. This will likely never be improved. if _fix_different_refs ( ag_old , ag_new , 'UMLS' ) : # Check to make sure the newly described statement does # not match anything. return _is_statement_in_list ( new_stmt , old_stmt_list ) logger . warning ( "Found an unexpected kind of duplicate. " "Ignoring it." ) return True # This means all the agents matched, which can happen if the # original issue was the ordering of agents in a Complex. return True elif old_stmt . get_hash ( True , True ) == new_stmt . get_hash ( True , True ) : # Check to see if we can improve the annotation of the existing # statement. e_old = old_stmt . evidence [ 0 ] e_new = new_stmt . evidence [ 0 ] if e_old . annotations [ 'last_verb' ] is None : e_old . annotations [ 'last_verb' ] = e_new . annotations [ 'last_verb' ] # If the evidence is "the same", modulo annotations, just ignore it if e_old . get_source_hash ( True ) == e_new . get_source_hash ( True ) : return True return False
Return True of given statement is equivalent to on in a list
728
12
17,885
def normalize_medscan_name ( name ) : suffix = ' complex' for i in range ( 2 ) : if name . endswith ( suffix ) : name = name [ : - len ( suffix ) ] return name
Removes the complex and complex complex suffixes from a medscan agent name so that it better corresponds with the grounding map .
48
25
17,886
def _untag_sentence ( tagged_sentence ) : untagged_sentence = TAG_PATT . sub ( '\\2' , tagged_sentence ) clean_sentence = JUNK_PATT . sub ( '' , untagged_sentence ) return clean_sentence . strip ( )
Removes all tags in the sentence returning the original sentence without Medscan annotations .
68
16
17,887
def _extract_sentence_tags ( tagged_sentence ) : untagged_sentence = _untag_sentence ( tagged_sentence ) decluttered_sentence = JUNK_PATT . sub ( '' , tagged_sentence ) tags = { } # Iteratively look for all matches of this pattern endpos = 0 while True : match = TAG_PATT . search ( decluttered_sentence , pos = endpos ) if not match : break endpos = match . end ( ) text = match . group ( 2 ) text = text . replace ( 'CONTEXT' , '' ) text = text . replace ( 'GLOSSARY' , '' ) text = text . strip ( ) start = untagged_sentence . index ( text ) stop = start + len ( text ) tag_key = match . group ( 1 ) if ',' in tag_key : for sub_key in tag_key . split ( ',' ) : if sub_key == '0' : continue tags [ sub_key ] = { 'text' : text , 'bounds' : ( start , stop ) } else : tags [ tag_key ] = { 'text' : text , 'bounds' : ( start , stop ) } return tags
Given a tagged sentence extracts a dictionary mapping tags to the words or phrases that they tag .
269
18
17,888
def get_sites ( self ) : st = self . site_text suffixes = [ ' residue' , ' residues' , ',' , '/' ] for suffix in suffixes : if st . endswith ( suffix ) : st = st [ : - len ( suffix ) ] assert ( not st . endswith ( ',' ) ) # Strip parentheses st = st . replace ( '(' , '' ) st = st . replace ( ')' , '' ) st = st . replace ( ' or ' , ' and ' ) # Treat end and or the same sites = [ ] parts = st . split ( ' and ' ) for part in parts : if part . endswith ( ',' ) : part = part [ : - 1 ] if len ( part . strip ( ) ) > 0 : sites . extend ( ReachProcessor . _parse_site_text ( part . strip ( ) ) ) return sites
Parse the site - text string and return a list of sites .
193
14
17,889
def process_csxml_file ( self , filename , interval = None , lazy = False ) : if interval is None : interval = ( None , None ) tmp_fname = tempfile . mktemp ( os . path . basename ( filename ) ) fix_character_encoding ( filename , tmp_fname ) self . __f = open ( tmp_fname , 'rb' ) self . _gen = self . _iter_through_csxml_file_from_handle ( * interval ) if not lazy : for stmt in self . _gen : self . statements . append ( stmt ) return
Processes a filehandle to MedScan csxml input into INDRA statements .
131
16
17,890
def get_parser ( description , input_desc ) : parser = ArgumentParser ( description = description ) parser . add_argument ( dest = 'input_file' , help = input_desc ) parser . add_argument ( '-r' , '--readers' , choices = [ 'reach' , 'sparser' , 'trips' ] , help = 'List of readers to be used.' , nargs = '+' ) parser . add_argument ( '-n' , '--num_procs' , dest = 'n_proc' , help = 'Select the number of processes to use.' , type = int , default = 1 ) parser . add_argument ( '-s' , '--sample' , dest = 'n_samp' , help = 'Read a random sample of size N_SAMP of the inputs.' , type = int ) parser . add_argument ( '-I' , '--in_range' , dest = 'range_str' , help = 'Only read input lines in the range given as <start>:<end>.' ) parser . add_argument ( '-v' , '--verbose' , help = 'Include output from the readers.' , action = 'store_true' ) parser . add_argument ( '-q' , '--quiet' , help = 'Suppress most output. Overrides -v and -d options.' , action = 'store_true' ) parser . add_argument ( '-d' , '--debug' , help = 'Set the logging to debug level.' , action = 'store_true' ) # parser.add_argument( # '-m', '--messy', # help='Do not clean up directories created while reading.', # action='store_true' # ) return parser
Get a parser that is generic to reading scripts .
389
10
17,891
def send_request ( endpoint , * * kwargs ) : if api_key is None : logger . error ( 'NewsAPI cannot be used without an API key' ) return None url = '%s/%s' % ( newsapi_url , endpoint ) if 'apiKey' not in kwargs : kwargs [ 'apiKey' ] = api_key if 'pageSize' not in kwargs : kwargs [ 'pageSize' ] = 100 res = requests . get ( url , params = kwargs ) res . raise_for_status ( ) res_json = res . json ( ) return res_json
Return the response to a query as JSON from the NewsAPI web service .
139
15
17,892
def process_cx_file ( file_name , require_grounding = True ) : with open ( file_name , 'rt' ) as fh : json_list = json . load ( fh ) return process_cx ( json_list , require_grounding = require_grounding )
Process a CX JSON file into Statements .
66
9
17,893
def process_ndex_network ( network_id , username = None , password = None , require_grounding = True ) : nd = ndex2 . client . Ndex2 ( username = username , password = password ) res = nd . get_network_as_cx_stream ( network_id ) if res . status_code != 200 : logger . error ( 'Problem downloading network: status code %s' % res . status_code ) logger . error ( 'Response: %s' % res . text ) return None json_list = res . json ( ) summary = nd . get_network_summary ( network_id ) return process_cx ( json_list , summary = summary , require_grounding = require_grounding )
Process an NDEx network into Statements .
166
8
17,894
def process_cx ( cx_json , summary = None , require_grounding = True ) : ncp = NdexCxProcessor ( cx_json , summary = summary , require_grounding = require_grounding ) ncp . get_statements ( ) return ncp
Process a CX JSON object into Statements .
63
9
17,895
def read_files ( files , readers , * * kwargs ) : reading_content = [ Content . from_file ( filepath ) for filepath in files ] output_list = [ ] for reader in readers : res_list = reader . read ( reading_content , * * kwargs ) if res_list is None : logger . info ( "Nothing read by %s." % reader . name ) else : logger . info ( "Successfully read %d content entries with %s." % ( len ( res_list ) , reader . name ) ) output_list += res_list logger . info ( "Read %s text content entries in all." % len ( output_list ) ) return output_list
Read the files in files with the reader objects in readers .
153
12
17,896
def expand_families ( self , stmts ) : new_stmts = [ ] for stmt in stmts : # Put together the lists of families, with their members. E.g., # for a statement involving RAF and MEK, should return a list of # tuples like [(BRAF, RAF1, ARAF), (MAP2K1, MAP2K2)] families_list = [ ] for ag in stmt . agent_list ( ) : ag_children = self . get_children ( ag ) # If the agent has no children, then we use the agent itself if len ( ag_children ) == 0 : families_list . append ( [ ag ] ) # Otherwise, we add the tuple of namespaces/IDs for the children else : families_list . append ( ag_children ) # Now, put together new statements frmo the cross product of the # expanded family members for ag_combo in itertools . product ( * families_list ) : # Create new agents based on the namespaces/IDs, with # appropriate name and db_refs entries child_agents = [ ] for ag_entry in ag_combo : # If we got an agent, or None, that means there were no # children; so we use the original agent rather than # construct a new agent if ag_entry is None or isinstance ( ag_entry , Agent ) : new_agent = ag_entry # Otherwise, create a new agent from the ns/ID elif isinstance ( ag_entry , tuple ) : # FIXME FIXME FIXME # This doesn't reproduce agent state from the original # family-level statements! ag_ns , ag_id = ag_entry new_agent = _agent_from_ns_id ( ag_ns , ag_id ) else : raise Exception ( 'Unrecognized agent entry type.' ) # Add agent to our list of child agents child_agents . append ( new_agent ) # Create a copy of the statement new_stmt = deepcopy ( stmt ) # Replace the agents in the statement with the newly-created # child agents new_stmt . set_agent_list ( child_agents ) # Add to list new_stmts . append ( new_stmt ) return new_stmts
Generate statements by expanding members of families and complexes .
490
11
17,897
def update_ontology ( ont_url , rdf_path ) : yaml_root = load_yaml_from_url ( ont_url ) G = rdf_graph_from_yaml ( yaml_root ) save_hierarchy ( G , rdf_path )
Load an ontology formatted like Eidos from github .
64
11
17,898
def rdf_graph_from_yaml ( yaml_root ) : G = Graph ( ) for top_entry in yaml_root : assert len ( top_entry ) == 1 node = list ( top_entry . keys ( ) ) [ 0 ] build_relations ( G , node , top_entry [ node ] , None ) return G
Convert the YAML object into an RDF Graph object .
75
14
17,899
def load_yaml_from_url ( ont_url ) : res = requests . get ( ont_url ) if res . status_code != 200 : raise Exception ( 'Could not load ontology from %s' % ont_url ) root = yaml . load ( res . content ) return root
Return a YAML object loaded from a YAML file URL .
65
15