idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
17,500
def save_dot ( self , file_name = 'graph.dot' ) : s = self . get_string ( ) with open ( file_name , 'wt' ) as fh : fh . write ( s )
Save the graph in a graphviz dot file .
49
11
17,501
def save_pdf ( self , file_name = 'graph.pdf' , prog = 'dot' ) : self . graph . draw ( file_name , prog = prog )
Draw the graph and save as an image or pdf file .
38
12
17,502
def _add_edge ( self , source , target , * * kwargs ) : # Start with default edge properties edge_properties = self . edge_properties # Overwrite ones that are given in function call explicitly for k , v in kwargs . items ( ) : edge_properties [ k ] = v self . graph . add_edge ( source , target , * * edge_properties )
Add an edge to the graph .
84
7
17,503
def _add_node ( self , agent ) : if agent is None : return node_label = _get_node_label ( agent ) if isinstance ( agent , Agent ) and agent . bound_conditions : bound_agents = [ bc . agent for bc in agent . bound_conditions if bc . is_bound ] if bound_agents : bound_names = [ _get_node_label ( a ) for a in bound_agents ] node_label = _get_node_label ( agent ) + '/' + '/' . join ( bound_names ) self . _complex_nodes . append ( [ agent ] + bound_agents ) else : node_label = _get_node_label ( agent ) node_key = _get_node_key ( agent ) if node_key in self . existing_nodes : return self . existing_nodes . append ( node_key ) self . graph . add_node ( node_key , label = node_label , * * self . node_properties )
Add an Agent as a node to the graph .
220
10
17,504
def _add_stmt_edge ( self , stmt ) : # Skip statements with None in the subject position source = _get_node_key ( stmt . agent_list ( ) [ 0 ] ) target = _get_node_key ( stmt . agent_list ( ) [ 1 ] ) edge_key = ( source , target , stmt . __class__ . __name__ ) if edge_key in self . existing_edges : return self . existing_edges . append ( edge_key ) if isinstance ( stmt , RemoveModification ) or isinstance ( stmt , Inhibition ) or isinstance ( stmt , DecreaseAmount ) or isinstance ( stmt , Gap ) or ( isinstance ( stmt , Influence ) and stmt . overall_polarity ( ) == - 1 ) : color = '#ff0000' else : color = '#000000' params = { 'color' : color , 'arrowhead' : 'normal' , 'dir' : 'forward' } self . _add_edge ( source , target , * * params )
Assemble a Modification statement .
235
7
17,505
def _add_complex ( self , members , is_association = False ) : params = { 'color' : '#0000ff' , 'arrowhead' : 'dot' , 'arrowtail' : 'dot' , 'dir' : 'both' } for m1 , m2 in itertools . combinations ( members , 2 ) : if self . _has_complex_node ( m1 , m2 ) : continue if is_association : m1_key = _get_node_key ( m1 . concept ) m2_key = _get_node_key ( m2 . concept ) else : m1_key = _get_node_key ( m1 ) m2_key = _get_node_key ( m2 ) edge_key = ( set ( [ m1_key , m2_key ] ) , 'complex' ) if edge_key in self . existing_edges : return self . existing_edges . append ( edge_key ) self . _add_edge ( m1_key , m2_key , * * params )
Assemble a Complex statement .
236
6
17,506
def process_from_file ( signor_data_file , signor_complexes_file = None ) : # Get generator over the CSV file data_iter = read_unicode_csv ( signor_data_file , delimiter = ';' , skiprows = 1 ) complexes_iter = None if signor_complexes_file : complexes_iter = read_unicode_csv ( signor_complexes_file , delimiter = ';' , skiprows = 1 ) else : logger . warning ( 'Signor complex mapping file not provided, Statements ' 'involving complexes will not be expanded to members.' ) return _processor_from_data ( data_iter , complexes_iter )
Process Signor interaction data from CSV files .
153
9
17,507
def _handle_response ( res , delimiter ) : if res . status_code == 200 : # Python 2 -- csv.reader will need bytes if sys . version_info [ 0 ] < 3 : csv_io = BytesIO ( res . content ) # Python 3 -- csv.reader needs str else : csv_io = StringIO ( res . text ) data_iter = read_unicode_csv_fileobj ( csv_io , delimiter = delimiter , skiprows = 1 ) else : raise Exception ( 'Could not download Signor data.' ) return data_iter
Get an iterator over the CSV data from the response .
129
11
17,508
def get_protein_expression ( gene_names , cell_types ) : A = 0.2438361 B = 3.0957627 mrna_amounts = cbio_client . get_ccle_mrna ( gene_names , cell_types ) protein_amounts = copy ( mrna_amounts ) for cell_type in cell_types : amounts = mrna_amounts . get ( cell_type ) if amounts is None : continue for gene_name , amount in amounts . items ( ) : if amount is not None : protein_amount = 10 ** ( A * amount + B ) protein_amounts [ cell_type ] [ gene_name ] = protein_amount return protein_amounts
Return the protein expression levels of genes in cell types .
158
11
17,509
def get_aspect ( cx , aspect_name ) : if isinstance ( cx , dict ) : return cx . get ( aspect_name ) for entry in cx : if list ( entry . keys ( ) ) [ 0 ] == aspect_name : return entry [ aspect_name ]
Return an aspect given the name of the aspect
60
9
17,510
def classify_nodes ( graph , hub ) : node_stats = defaultdict ( lambda : defaultdict ( list ) ) for u , v , data in graph . edges ( data = True ) : # This means the node is downstream of the hub if hub == u : h , o = u , v if data [ 'i' ] != 'Complex' : node_stats [ o ] [ 'up' ] . append ( - 1 ) else : node_stats [ o ] [ 'up' ] . append ( 0 ) # This means the node is upstream of the hub elif hub == v : h , o = v , u if data [ 'i' ] != 'Complex' : node_stats [ o ] [ 'up' ] . append ( 1 ) else : node_stats [ o ] [ 'up' ] . append ( 0 ) else : continue node_stats [ o ] [ 'interaction' ] . append ( edge_type_to_class ( data [ 'i' ] ) ) node_classes = { } for node_id , stats in node_stats . items ( ) : up = max ( set ( stats [ 'up' ] ) , key = stats [ 'up' ] . count ) # Special case: if up is not 0 then we should exclude complexes # from the edge_type states so that we don't end up with # (-1, complex, ...) or (1, complex, ...) as the node class interactions = [ i for i in stats [ 'interaction' ] if not ( up != 0 and i == 'complex' ) ] edge_type = max ( set ( interactions ) , key = interactions . count ) node_type = graph . nodes [ node_id ] [ 'type' ] node_classes [ node_id ] = ( up , edge_type , node_type ) return node_classes
Classify each node based on its type and relationship to the hub .
397
14
17,511
def get_attributes ( aspect , id ) : attributes = { } for entry in aspect : if entry [ 'po' ] == id : attributes [ entry [ 'n' ] ] = entry [ 'v' ] return attributes
Return the attributes pointing to a given ID in a given aspect .
48
13
17,512
def cx_to_networkx ( cx ) : graph = networkx . MultiDiGraph ( ) for node_entry in get_aspect ( cx , 'nodes' ) : id = node_entry [ '@id' ] attrs = get_attributes ( get_aspect ( cx , 'nodeAttributes' ) , id ) attrs [ 'n' ] = node_entry [ 'n' ] graph . add_node ( id , * * attrs ) for edge_entry in get_aspect ( cx , 'edges' ) : id = edge_entry [ '@id' ] attrs = get_attributes ( get_aspect ( cx , 'edgeAttributes' ) , id ) attrs [ 'i' ] = edge_entry [ 'i' ] graph . add_edge ( edge_entry [ 's' ] , edge_entry [ 't' ] , key = id , * * attrs ) return graph
Return a MultiDiGraph representation of a CX network .
205
12
17,513
def get_quadrant_from_class ( node_class ) : up , edge_type , _ = node_class if up == 0 : return 0 if random . random ( ) < 0.5 else 7 mappings = { ( - 1 , 'modification' ) : 1 , ( - 1 , 'amount' ) : 2 , ( - 1 , 'activity' ) : 3 , ( 1 , 'activity' ) : 4 , ( 1 , 'amount' ) : 5 , ( 1 , 'modification' ) : 6 } return mappings [ ( up , edge_type ) ]
Return the ID of the segment of the plane corresponding to a class .
126
14
17,514
def get_coordinates ( node_class ) : quadrant_size = ( 2 * math . pi / 8.0 ) quadrant = get_quadrant_from_class ( node_class ) begin_angle = quadrant_size * quadrant r = 200 + 800 * random . random ( ) alpha = begin_angle + random . random ( ) * quadrant_size x = r * math . cos ( alpha ) y = r * math . sin ( alpha ) return x , y
Generate coordinates for a node in a given class .
105
11
17,515
def get_layout_aspect ( hub , node_classes ) : aspect = [ { 'node' : hub , 'x' : 0.0 , 'y' : 0.0 } ] for node , node_class in node_classes . items ( ) : if node == hub : continue x , y = get_coordinates ( node_class ) aspect . append ( { 'node' : node , 'x' : x , 'y' : y } ) return aspect
Get the full layout aspect with coordinates for each node .
102
11
17,516
def get_node_by_name ( graph , name ) : for id , attrs in graph . nodes ( data = True ) : if attrs [ 'n' ] == name : return id
Return a node ID given its name .
42
8
17,517
def add_semantic_hub_layout ( cx , hub ) : graph = cx_to_networkx ( cx ) hub_node = get_node_by_name ( graph , hub ) node_classes = classify_nodes ( graph , hub_node ) layout_aspect = get_layout_aspect ( hub_node , node_classes ) cx [ 'cartesianLayout' ] = layout_aspect
Attach a layout aspect to a CX network given a hub node .
90
14
17,518
def get_metadata ( doi ) : url = crossref_url + 'works/' + doi res = requests . get ( url ) if res . status_code != 200 : logger . info ( 'Could not get CrossRef metadata for DOI %s, code %d' % ( doi , res . status_code ) ) return None raw_message = res . json ( ) metadata = raw_message . get ( 'message' ) return metadata
Returns the metadata of an article given its DOI from CrossRef as a JSON dict
93
15
17,519
def get_agent_rule_str ( agent ) : rule_str_list = [ _n ( agent . name ) ] # If it's a molecular agent if isinstance ( agent , ist . Agent ) : for mod in agent . mods : mstr = abbrevs [ mod . mod_type ] if mod . residue is not None : mstr += mod . residue if mod . position is not None : mstr += mod . position rule_str_list . append ( '%s' % mstr ) for mut in agent . mutations : res_from = mut . residue_from if mut . residue_from else 'mut' res_to = mut . residue_to if mut . residue_to else 'X' if mut . position is None : mut_site_name = res_from else : mut_site_name = res_from + mut . position mstr = mut_site_name + res_to rule_str_list . append ( mstr ) if agent . bound_conditions : for b in agent . bound_conditions : if b . is_bound : rule_str_list . append ( _n ( b . agent . name ) ) else : rule_str_list . append ( 'n' + _n ( b . agent . name ) ) if agent . location is not None : rule_str_list . append ( _n ( agent . location ) ) if agent . activity is not None : if agent . activity . is_active : rule_str_list . append ( agent . activity . activity_type [ : 3 ] ) else : rule_str_list . append ( agent . activity . activity_type [ : 3 ] + '_inact' ) rule_str = '_' . join ( rule_str_list ) return rule_str
Construct a string from an Agent as part of a PySB rule name .
384
15
17,520
def add_rule_to_model ( model , rule , annotations = None ) : try : model . add_component ( rule ) # If the rule was actually added, also add the annotations if annotations : model . annotations += annotations # If this rule is already in the model, issue a warning and continue except ComponentDuplicateNameError : msg = "Rule %s already in model! Skipping." % rule . name logger . debug ( msg )
Add a Rule to a PySB model and handle duplicate component errors .
94
14
17,521
def get_create_parameter ( model , param ) : norm_name = _n ( param . name ) parameter = model . parameters . get ( norm_name ) if not param . unique and parameter is not None : return parameter if param . unique : pnum = 1 while True : pname = norm_name + '_%d' % pnum if model . parameters . get ( pname ) is None : break pnum += 1 else : pname = norm_name parameter = Parameter ( pname , param . value ) model . add_component ( parameter ) return parameter
Return parameter with given name creating it if needed .
124
10
17,522
def get_uncond_agent ( agent ) : agent_uncond = ist . Agent ( _n ( agent . name ) , mutations = agent . mutations ) return agent_uncond
Construct the unconditional state of an Agent .
40
8
17,523
def get_monomer_pattern ( model , agent , extra_fields = None ) : try : monomer = model . monomers [ _n ( agent . name ) ] except KeyError as e : logger . warning ( 'Monomer with name %s not found in model' % _n ( agent . name ) ) return None # Get the agent site pattern pattern = get_site_pattern ( agent ) if extra_fields is not None : for k , v in extra_fields . items ( ) : # This is an important assumption, it only sets the given pattern # on the monomer if that site/key is not already specified at the # Agent level. For instance, if the Agent is specified to have # 'activity', that site will not be updated here. if k not in pattern : pattern [ k ] = v # If a model is given, return the Monomer with the generated pattern, # otherwise just return the pattern try : monomer_pattern = monomer ( * * pattern ) except Exception as e : logger . info ( "Invalid site pattern %s for monomer %s" % ( pattern , monomer ) ) return None return monomer_pattern
Construct a PySB MonomerPattern from an Agent .
247
11
17,524
def get_site_pattern ( agent ) : if not isinstance ( agent , ist . Agent ) : return { } pattern = { } # Handle bound conditions for bc in agent . bound_conditions : # Here we make the assumption that the binding site # is simply named after the binding partner if bc . is_bound : pattern [ get_binding_site_name ( bc . agent ) ] = ANY else : pattern [ get_binding_site_name ( bc . agent ) ] = None # Handle modifications for mod in agent . mods : mod_site_str = abbrevs [ mod . mod_type ] if mod . residue is not None : mod_site_str = mod . residue mod_pos_str = mod . position if mod . position is not None else '' mod_site = ( '%s%s' % ( mod_site_str , mod_pos_str ) ) site_states = states [ mod . mod_type ] if mod . is_modified : pattern [ mod_site ] = ( site_states [ 1 ] , WILD ) else : pattern [ mod_site ] = ( site_states [ 0 ] , WILD ) # Handle mutations for mc in agent . mutations : res_from = mc . residue_from if mc . residue_from else 'mut' res_to = mc . residue_to if mc . residue_to else 'X' if mc . position is None : mut_site_name = res_from else : mut_site_name = res_from + mc . position pattern [ mut_site_name ] = res_to # Handle location if agent . location is not None : pattern [ 'loc' ] = _n ( agent . location ) # Handle activity if agent . activity is not None : active_site_name = agent . activity . activity_type if agent . activity . is_active : active_site_state = 'active' else : active_site_state = 'inactive' pattern [ active_site_name ] = active_site_state return pattern
Construct a dictionary of Monomer site states from an Agent .
434
12
17,525
def set_base_initial_condition ( model , monomer , value ) : # Build up monomer pattern dict sites_dict = { } for site in monomer . sites : if site in monomer . site_states : if site == 'loc' and 'cytoplasm' in monomer . site_states [ 'loc' ] : sites_dict [ 'loc' ] = 'cytoplasm' else : sites_dict [ site ] = monomer . site_states [ site ] [ 0 ] else : sites_dict [ site ] = None mp = monomer ( * * sites_dict ) pname = monomer . name + '_0' try : p = model . parameters [ pname ] p . value = value except KeyError : p = Parameter ( pname , value ) model . add_component ( p ) model . initial ( mp , p )
Set an initial condition for a monomer in its default state .
189
13
17,526
def get_annotation ( component , db_name , db_ref ) : url = get_identifiers_url ( db_name , db_ref ) if not url : return None subj = component ann = Annotation ( subj , url , 'is' ) return ann
Construct model Annotations for each component .
60
7
17,527
def make_model ( self , policies = None , initial_conditions = True , reverse_effects = False , model_name = 'indra_model' ) : ppa = PysbPreassembler ( self . statements ) self . processed_policies = self . process_policies ( policies ) ppa . replace_activities ( ) if reverse_effects : ppa . add_reverse_effects ( ) self . statements = ppa . statements self . model = Model ( ) self . model . name = model_name self . agent_set = BaseAgentSet ( ) # Collect information about the monomers/self.agent_set from the # statements self . _monomers ( ) # Add the monomers to the model based on our BaseAgentSet for agent_name , agent in self . agent_set . items ( ) : m = Monomer ( _n ( agent_name ) , agent . sites , agent . site_states ) m . site_annotations = agent . site_annotations self . model . add_component ( m ) for db_name , db_ref in agent . db_refs . items ( ) : a = get_annotation ( m , db_name , db_ref ) if a is not None : self . model . add_annotation ( a ) # Iterate over the active_forms for af in agent . active_forms : self . model . add_annotation ( Annotation ( m , af , 'has_active_pattern' ) ) for iaf in agent . inactive_forms : self . model . add_annotation ( Annotation ( m , iaf , 'has_inactive_pattern' ) ) for at in agent . activity_types : act_site_cond = { at : 'active' } self . model . add_annotation ( Annotation ( m , act_site_cond , 'has_active_pattern' ) ) inact_site_cond = { at : 'inactive' } self . model . add_annotation ( Annotation ( m , inact_site_cond , 'has_inactive_pattern' ) ) # Iterate over the statements to generate rules self . _assemble ( ) # Add initial conditions if initial_conditions : self . add_default_initial_conditions ( ) return self . model
Assemble the PySB model from the collected INDRA Statements .
502
13
17,528
def add_default_initial_conditions ( self , value = None ) : if value is not None : try : value_num = float ( value ) except ValueError : logger . error ( 'Invalid initial condition value.' ) return else : value_num = self . default_initial_amount if self . model is None : return for m in self . model . monomers : set_base_initial_condition ( self . model , m , value_num )
Set default initial conditions in the PySB model .
98
10
17,529
def set_expression ( self , expression_dict ) : if self . model is None : return monomers_found = [ ] monomers_notfound = [ ] # Iterate over all the monomers for m in self . model . monomers : if ( m . name in expression_dict and expression_dict [ m . name ] is not None ) : # Try to get the expression amount from the dict init = expression_dict [ m . name ] # We interpret nan and None as not expressed if math . isnan ( init ) : init = 0 init_round = round ( init ) set_base_initial_condition ( self . model , m , init_round ) monomers_found . append ( m . name ) else : set_base_initial_condition ( self . model , m , self . default_initial_amount ) monomers_notfound . append ( m . name ) logger . info ( 'Monomers set to given context' ) logger . info ( '-----------------------------' ) for m in monomers_found : logger . info ( '%s' % m ) if monomers_notfound : logger . info ( '' ) logger . info ( 'Monomers not found in given context' ) logger . info ( '-----------------------------------' ) for m in monomers_notfound : logger . info ( '%s' % m )
Set protein expression amounts as initial conditions
289
7
17,530
def set_context ( self , cell_type ) : if self . model is None : return monomer_names = [ m . name for m in self . model . monomers ] res = context_client . get_protein_expression ( monomer_names , [ cell_type ] ) amounts = res . get ( cell_type ) if not amounts : logger . warning ( 'Could not get context for %s cell type.' % cell_type ) self . add_default_initial_conditions ( ) return self . set_expression ( amounts )
Set protein expression amounts from CCLE as initial conditions .
117
11
17,531
def export_model ( self , format , file_name = None ) : # Handle SBGN as special case if format == 'sbgn' : exp_str = export_sbgn ( self . model ) elif format == 'kappa_im' : # NOTE: this export is not a str, rather a graph object return export_kappa_im ( self . model , file_name ) elif format == 'kappa_cm' : # NOTE: this export is not a str, rather a graph object return export_kappa_cm ( self . model , file_name ) else : try : exp_str = pysb . export . export ( self . model , format ) except KeyError : logging . error ( 'Unknown export format: %s' % format ) return None if file_name : with open ( file_name , 'wb' ) as fh : fh . write ( exp_str . encode ( 'utf-8' ) ) return exp_str
Save the assembled model in a modeling formalism other than PySB .
211
14
17,532
def save_rst ( self , file_name = 'pysb_model.rst' , module_name = 'pysb_module' ) : if self . model is not None : with open ( file_name , 'wt' ) as fh : fh . write ( '.. _%s:\n\n' % module_name ) fh . write ( 'Module\n======\n\n' ) fh . write ( 'INDRA-assembled model\n---------------------\n\n' ) fh . write ( '::\n\n' ) model_str = pysb . export . export ( self . model , 'pysb_flat' ) model_str = '\t' + model_str . replace ( '\n' , '\n\t' ) fh . write ( model_str )
Save the assembled model as an RST file for literate modeling .
189
14
17,533
def _monomers ( self ) : for stmt in self . statements : if _is_whitelisted ( stmt ) : self . _dispatch ( stmt , 'monomers' , self . agent_set )
Calls the appropriate monomers method based on policies .
49
11
17,534
def send_query ( text , service_endpoint = 'drum' , query_args = None ) : if service_endpoint in [ 'drum' , 'drum-dev' , 'cwms' , 'cwmsreader' ] : url = base_url + service_endpoint else : logger . error ( 'Invalid service endpoint: %s' % service_endpoint ) return '' if query_args is None : query_args = { } query_args . update ( { 'input' : text } ) res = requests . get ( url , query_args , timeout = 3600 ) if not res . status_code == 200 : logger . error ( 'Problem with TRIPS query: status code %s' % res . status_code ) return '' # Gets unicode content return res . text
Send a query to the TRIPS web service .
177
10
17,535
def get_xml ( html , content_tag = 'ekb' , fail_if_empty = False ) : cont = re . findall ( r'<%(tag)s(.*?)>(.*?)</%(tag)s>' % { 'tag' : content_tag } , html , re . MULTILINE | re . DOTALL ) if cont : events_terms = '' . join ( [ l . strip ( ) for l in cont [ 0 ] [ 1 ] . splitlines ( ) ] ) if 'xmlns' in cont [ 0 ] [ 0 ] : meta = ' ' . join ( [ l . strip ( ) for l in cont [ 0 ] [ 0 ] . splitlines ( ) ] ) else : meta = '' else : events_terms = '' meta = '' if fail_if_empty : assert events_terms != '' , "Got empty string for events content from html:\n%s" % html header = ( '<?xml version="1.0" encoding="utf-8" standalone="yes"?><%s%s>' % ( content_tag , meta ) ) footer = '</%s>' % content_tag return header + events_terms . replace ( '\n' , '' ) + footer
Extract the content XML from the HTML output of the TRIPS web service .
272
16
17,536
def save_xml ( xml_str , file_name , pretty = True ) : try : fh = open ( file_name , 'wt' ) except IOError : logger . error ( 'Could not open %s for writing.' % file_name ) return if pretty : xmld = xml . dom . minidom . parseString ( xml_str ) xml_str_pretty = xmld . toprettyxml ( ) fh . write ( xml_str_pretty ) else : fh . write ( xml_str ) fh . close ( )
Save the TRIPS EKB XML in a file .
121
11
17,537
def process_table ( fname ) : book = openpyxl . load_workbook ( fname , read_only = True ) try : rel_sheet = book [ 'Relations' ] except Exception as e : rel_sheet = book [ 'Causal' ] event_sheet = book [ 'Events' ] entities_sheet = book [ 'Entities' ] sp = SofiaExcelProcessor ( rel_sheet . rows , event_sheet . rows , entities_sheet . rows ) return sp
Return processor by processing a given sheet of a spreadsheet file .
108
12
17,538
def process_text ( text , out_file = 'sofia_output.json' , auth = None ) : text_json = { 'text' : text } if not auth : user , password = _get_sofia_auth ( ) else : user , password = auth if not user or not password : raise ValueError ( 'Could not use SOFIA web service since' ' authentication information is missing. Please' ' set SOFIA_USERNAME and SOFIA_PASSWORD in the' ' INDRA configuration file or as environmental' ' variables.' ) json_response , status_code , process_status = _text_processing ( text_json = text_json , user = user , password = password ) # Check response status if process_status != 'Done' or status_code != 200 : return None # Cache reading output if out_file : with open ( out_file , 'w' ) as fh : json . dump ( json_response , fh , indent = 1 ) return process_json ( json_response )
Return processor by processing text given as a string .
226
10
17,539
def _get_dict_from_list ( dict_key , list_of_dicts ) : the_dict = [ cur_dict for cur_dict in list_of_dicts if cur_dict . get ( dict_key ) ] if not the_dict : raise ValueError ( 'Could not find a dict with key %s' % dict_key ) return the_dict [ 0 ] [ dict_key ]
Retrieve a specific dict from a list of dicts .
91
12
17,540
def _initialize_node_agents ( self ) : nodes = _get_dict_from_list ( 'nodes' , self . cx ) invalid_genes = [ ] for node in nodes : id = node [ '@id' ] cx_db_refs = self . get_aliases ( node ) up_id = cx_db_refs . get ( 'UP' ) if up_id : gene_name = uniprot_client . get_gene_name ( up_id ) hgnc_id = hgnc_client . get_hgnc_id ( gene_name ) db_refs = { 'UP' : up_id , 'HGNC' : hgnc_id , 'TEXT' : gene_name } agent = Agent ( gene_name , db_refs = db_refs ) self . _node_names [ id ] = gene_name self . _node_agents [ id ] = agent continue else : node_name = node [ 'n' ] self . _node_names [ id ] = node_name hgnc_id = hgnc_client . get_hgnc_id ( node_name ) db_refs = { 'TEXT' : node_name } if not hgnc_id : if not self . require_grounding : self . _node_agents [ id ] = Agent ( node_name , db_refs = db_refs ) invalid_genes . append ( node_name ) else : db_refs . update ( { 'HGNC' : hgnc_id } ) up_id = hgnc_client . get_uniprot_id ( hgnc_id ) # It's possible that a valid HGNC ID will not have a # Uniprot ID, as in the case of HOTAIR (HOX transcript # antisense RNA, HGNC:33510) if up_id : db_refs . update ( { 'UP' : up_id } ) self . _node_agents [ id ] = Agent ( node_name , db_refs = db_refs ) if invalid_genes : verb = 'Skipped' if self . require_grounding else 'Included' logger . info ( '%s invalid gene symbols: %s' % ( verb , ', ' . join ( invalid_genes ) ) )
Initialize internal dicts containing node information .
522
9
17,541
def get_pmids ( self ) : pmids = [ ] for ea in self . _edge_attributes . values ( ) : edge_pmids = ea . get ( 'pmids' ) if edge_pmids : pmids += edge_pmids return list ( set ( pmids ) )
Get list of all PMIDs associated with edges in the network .
67
13
17,542
def get_statements ( self ) : edges = _get_dict_from_list ( 'edges' , self . cx ) for edge in edges : edge_type = edge . get ( 'i' ) if not edge_type : continue stmt_type = _stmt_map . get ( edge_type ) if stmt_type : id = edge [ '@id' ] source_agent = self . _node_agents . get ( edge [ 's' ] ) target_agent = self . _node_agents . get ( edge [ 't' ] ) if not source_agent or not target_agent : logger . info ( "Skipping edge %s->%s: %s" % ( self . _node_names [ edge [ 's' ] ] , self . _node_names [ edge [ 't' ] ] , edge ) ) continue ev = self . _create_evidence ( id ) if stmt_type == Complex : stmt = stmt_type ( [ source_agent , target_agent ] , evidence = ev ) else : stmt = stmt_type ( source_agent , target_agent , evidence = ev ) self . statements . append ( stmt ) return self . statements
Convert network edges into Statements .
266
7
17,543
def node_has_edge_with_label ( self , node_name , edge_label ) : G = self . G for edge in G . edges ( node_name ) : to = edge [ 1 ] relation_name = G . edges [ node_name , to ] [ 'relation' ] if relation_name == edge_label : return to return None
Looks for an edge from node_name to some other node with the specified label . Returns the node to which this edge points if it exists or None if it doesn t .
77
35
17,544
def general_node_label ( self , node ) : G = self . G if G . node [ node ] [ 'is_event' ] : return 'event type=' + G . node [ node ] [ 'type' ] else : return 'entity text=' + G . node [ node ] [ 'text' ]
Used for debugging - gives a short text description of a graph node .
68
14
17,545
def print_parent_and_children_info ( self , node ) : G = self . G parents = G . predecessors ( node ) children = G . successors ( node ) print ( general_node_label ( G , node ) ) tabs = '\t' for parent in parents : relation = G . edges [ parent , node ] [ 'relation' ] print ( tabs + 'Parent (%s): %s' % ( relation , general_node_label ( G , parent ) ) ) for cop in G . successors ( parent ) : if cop != node : relation = G . edges [ parent , cop ] [ 'relation' ] print ( tabs + 'Child of parent (%s): %s' % ( relation , general_node_label ( G , cop ) ) ) for child in children : relation = G . edges [ node , child ] [ 'relation' ] print ( tabs + 'Child (%s): (%s)' % ( relation , general_node_label ( G , child ) ) )
Used for debugging - prints a short description of a a node its children its parents and its parents children .
212
21
17,546
def find_event_with_outgoing_edges ( self , event_name , desired_relations ) : G = self . G desired_relations = set ( desired_relations ) desired_event_nodes = [ ] for node in G . node . keys ( ) : if G . node [ node ] [ 'is_event' ] and G . node [ node ] [ 'type' ] == event_name : has_relations = [ G . edges [ node , edge [ 1 ] ] [ 'relation' ] for edge in G . edges ( node ) ] has_relations = set ( has_relations ) # Did the outgoing edges from this node have all of the # desired relations? if desired_relations . issubset ( has_relations ) : desired_event_nodes . append ( node ) return desired_event_nodes
Gets a list of event nodes with the specified event_name and outgoing edges annotated with each of the specified relations .
179
25
17,547
def get_related_node ( self , node , relation ) : G = self . G for edge in G . edges ( node ) : to = edge [ 1 ] to_relation = G . edges [ node , to ] [ 'relation' ] if to_relation == relation : return to return None
Looks for an edge from node to some other node such that the edge is annotated with the given relation . If there exists such an edge returns the name of the node it points to . Otherwise returns None .
63
42
17,548
def get_entity_text_for_relation ( self , node , relation ) : G = self . G related_node = self . get_related_node ( node , relation ) if related_node is not None : if not G . node [ related_node ] [ 'is_event' ] : return G . node [ related_node ] [ 'text' ] else : return None else : return None
Looks for an edge from node to some other node such that the edge is annotated with the given relation . If there exists such an edge and the node at the other edge is an entity return that entity s text . Otherwise returns None .
87
48
17,549
def process_increase_expression_amount ( self ) : statements = [ ] pwcs = self . find_event_parent_with_event_child ( 'Positive_regulation' , 'Gene_expression' ) for pair in pwcs : pos_reg = pair [ 0 ] expression = pair [ 1 ] cause = self . get_entity_text_for_relation ( pos_reg , 'Cause' ) target = self . get_entity_text_for_relation ( expression , 'Theme' ) if cause is not None and target is not None : theme_node = self . get_related_node ( expression , 'Theme' ) assert ( theme_node is not None ) evidence = self . node_to_evidence ( theme_node , is_direct = False ) statements . append ( IncreaseAmount ( s2a ( cause ) , s2a ( target ) , evidence = evidence ) ) return statements
Looks for Positive_Regulation events with a specified Cause and a Gene_Expression theme and processes them into INDRA statements .
195
26
17,550
def process_phosphorylation_statements ( self ) : G = self . G statements = [ ] pwcs = self . find_event_parent_with_event_child ( 'Positive_regulation' , 'Phosphorylation' ) for pair in pwcs : ( pos_reg , phos ) = pair cause = self . get_entity_text_for_relation ( pos_reg , 'Cause' ) theme = self . get_entity_text_for_relation ( phos , 'Theme' ) print ( 'Cause:' , cause , 'Theme:' , theme ) # If the trigger word is dephosphorylate or similar, then we # extract a dephosphorylation statement trigger_word = self . get_entity_text_for_relation ( phos , 'Phosphorylation' ) if 'dephos' in trigger_word : deph = True else : deph = False site = self . get_entity_text_for_relation ( phos , 'Site' ) theme_node = self . get_related_node ( phos , 'Theme' ) assert ( theme_node is not None ) evidence = self . node_to_evidence ( theme_node , is_direct = False ) if theme is not None : if deph : statements . append ( Dephosphorylation ( s2a ( cause ) , s2a ( theme ) , site , evidence = evidence ) ) else : statements . append ( Phosphorylation ( s2a ( cause ) , s2a ( theme ) , site , evidence = evidence ) ) return statements
Looks for Phosphorylation events in the graph and extracts them into INDRA statements .
343
18
17,551
def process_binding_statements ( self ) : G = self . G statements = [ ] binding_nodes = self . find_event_with_outgoing_edges ( 'Binding' , [ 'Theme' , 'Theme2' ] ) for node in binding_nodes : theme1 = self . get_entity_text_for_relation ( node , 'Theme' ) theme1_node = self . get_related_node ( node , 'Theme' ) theme2 = self . get_entity_text_for_relation ( node , 'Theme2' ) assert ( theme1 is not None ) assert ( theme2 is not None ) evidence = self . node_to_evidence ( theme1_node , is_direct = True ) statements . append ( Complex ( [ s2a ( theme1 ) , s2a ( theme2 ) ] , evidence = evidence ) ) return statements
Looks for Binding events in the graph and extracts them into INDRA statements .
193
15
17,552
def node_to_evidence ( self , entity_node , is_direct ) : # We assume that the entire event is within a single sentence, and # get this sentence by getting the sentence containing one of the # entities sentence_text = self . G . node [ entity_node ] [ 'sentence_text' ] # Make annotations object containing the fully connected subgraph # containing these nodes subgraph = self . connected_subgraph ( entity_node ) edge_properties = { } for edge in subgraph . edges ( ) : edge_properties [ edge ] = subgraph . edges [ edge ] annotations = { 'node_properties' : subgraph . node , 'edge_properties' : edge_properties } # Make evidence object epistemics = dict ( ) evidence = Evidence ( source_api = 'tees' , pmid = self . pmid , text = sentence_text , epistemics = { 'direct' : is_direct } , annotations = annotations ) return evidence
Computes an evidence object for a statement .
208
9
17,553
def connected_subgraph ( self , node ) : G = self . G subgraph_nodes = set ( ) subgraph_nodes . add ( node ) subgraph_nodes . update ( dag . ancestors ( G , node ) ) subgraph_nodes . update ( dag . descendants ( G , node ) ) # Keep adding the ancesotrs and descendants on nodes of the graph # until we can't do so any longer graph_changed = True while graph_changed : initial_count = len ( subgraph_nodes ) old_nodes = set ( subgraph_nodes ) for n in old_nodes : subgraph_nodes . update ( dag . ancestors ( G , n ) ) subgraph_nodes . update ( dag . descendants ( G , n ) ) current_count = len ( subgraph_nodes ) graph_changed = current_count > initial_count return G . subgraph ( subgraph_nodes )
Returns the subgraph containing the given node its ancestors and its descendants .
205
14
17,554
def process_text ( text , save_xml_name = 'trips_output.xml' , save_xml_pretty = True , offline = False , service_endpoint = 'drum' ) : if not offline : html = client . send_query ( text , service_endpoint ) xml = client . get_xml ( html ) else : if offline_reading : try : dr = DrumReader ( ) if dr is None : raise Exception ( 'DrumReader could not be instantiated.' ) except BaseException as e : logger . error ( e ) logger . error ( 'Make sure drum/bin/trips-drum is running in' ' a separate process' ) return None try : dr . read_text ( text ) dr . start ( ) except SystemExit : pass xml = dr . extractions [ 0 ] else : logger . error ( 'Offline reading with TRIPS/DRUM not available.' ) logger . error ( 'Error message was: %s' % offline_err ) msg = """ To install DRUM locally, follow instructions at https://github.com/wdebeaum/drum. Next, install the pykqml package either from pip or from https://github.com/bgyori/pykqml. Once installed, run drum/bin/trips-drum in a separate process. """ logger . error ( msg ) return None if save_xml_name : client . save_xml ( xml , save_xml_name , save_xml_pretty ) return process_xml ( xml )
Return a TripsProcessor by processing text .
329
10
17,555
def process_xml_file ( file_name ) : with open ( file_name , 'rb' ) as fh : ekb = fh . read ( ) . decode ( 'utf-8' ) return process_xml ( ekb )
Return a TripsProcessor by processing a TRIPS EKB XML file .
53
16
17,556
def process_xml ( xml_string ) : tp = TripsProcessor ( xml_string ) if tp . tree is None : return None tp . get_modifications_indirect ( ) tp . get_activations_causal ( ) tp . get_activations_stimulate ( ) tp . get_complexes ( ) tp . get_modifications ( ) tp . get_active_forms ( ) tp . get_active_forms_state ( ) tp . get_activations ( ) tp . get_translocation ( ) tp . get_regulate_amounts ( ) tp . get_degradations ( ) tp . get_syntheses ( ) tp . get_conversions ( ) tp . get_simple_increase_decrease ( ) return tp
Return a TripsProcessor by processing a TRIPS EKB XML string .
185
16
17,557
def load_eidos_curation_table ( ) : url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + 'src/main/resources/org/clulab/wm/eidos/english/confidence/' + 'rule_summary.tsv' # Load the table of scores from the URL above into a data frame res = StringIO ( requests . get ( url ) . text ) table = pandas . read_table ( res , sep = '\t' ) # Drop the last "Grant total" row table = table . drop ( table . index [ len ( table ) - 1 ] ) return table
Return a pandas table of Eidos curation data .
146
12
17,558
def get_eidos_bayesian_scorer ( prior_counts = None ) : table = load_eidos_curation_table ( ) subtype_counts = { 'eidos' : { r : [ c , i ] for r , c , i in zip ( table [ 'RULE' ] , table [ 'Num correct' ] , table [ 'Num incorrect' ] ) } } prior_counts = prior_counts if prior_counts else copy . deepcopy ( default_priors ) scorer = BayesianScorer ( prior_counts = prior_counts , subtype_counts = subtype_counts ) return scorer
Return a BayesianScorer based on Eidos curation counts .
147
14
17,559
def get_eidos_scorer ( ) : table = load_eidos_curation_table ( ) # Get the overall precision total_num = table [ 'COUNT of RULE' ] . sum ( ) weighted_sum = table [ 'COUNT of RULE' ] . dot ( table [ '% correct' ] ) precision = weighted_sum / total_num # We have to divide this into a random and systematic component, for now # in an ad-hoc manner syst_error = 0.05 rand_error = 1 - precision - syst_error prior_probs = { 'rand' : { 'eidos' : rand_error } , 'syst' : { 'eidos' : syst_error } } # Get a dict of rule-specific errors. subtype_probs = { 'eidos' : { k : 1.0 - min ( v , 0.95 ) - syst_error for k , v in zip ( table [ 'RULE' ] , table [ '% correct' ] ) } } scorer = SimpleScorer ( prior_probs , subtype_probs ) return scorer
Return a SimpleScorer based on Eidos curated precision estimates .
253
13
17,560
def process_from_web ( ) : logger . info ( 'Downloading table from %s' % trrust_human_url ) res = requests . get ( trrust_human_url ) res . raise_for_status ( ) df = pandas . read_table ( io . StringIO ( res . text ) ) tp = TrrustProcessor ( df ) tp . extract_statements ( ) return tp
Return a TrrustProcessor based on the online interaction table .
92
13
17,561
def process_from_webservice ( id_val , id_type = 'pmcid' , source = 'pmc' , with_grounding = True ) : if with_grounding : fmt = '%s.normed/%s/%s' else : fmt = '%s/%s/%s' resp = requests . get ( RLIMSP_URL + fmt % ( source , id_type , id_val ) ) if resp . status_code != 200 : raise RLIMSP_Error ( "Bad status code: %d - %s" % ( resp . status_code , resp . reason ) ) rp = RlimspProcessor ( resp . json ( ) ) rp . extract_statements ( ) return rp
Return an output from RLIMS - p for the given PubMed ID or PMC ID .
167
19
17,562
def process_from_json_file ( filename , doc_id_type = None ) : with open ( filename , 'rt' ) as f : lines = f . readlines ( ) json_list = [ ] for line in lines : json_list . append ( json . loads ( line ) ) rp = RlimspProcessor ( json_list , doc_id_type = doc_id_type ) rp . extract_statements ( ) return rp
Process RLIMSP extractions from a bulk - download JSON file .
101
14
17,563
def get ( self , key ) : if key in self . keys ( ) : return self [ key ] else : res = None for v in self . values ( ) : # This could get weird if the actual expected returned value # is None, especially in teh case of overlap. Any ambiguity # would be resolved by get_path(s). if hasattr ( v , 'get' ) : res = v . get ( key ) if res is not None : break return res
Find the first value within the tree which has the key .
100
12
17,564
def get_path ( self , key ) : if key in self . keys ( ) : return ( key , ) , self [ key ] else : key_path , res = ( None , None ) for sub_key , v in self . items ( ) : if isinstance ( v , self . __class__ ) : key_path , res = v . get_path ( key ) elif hasattr ( v , 'get' ) : res = v . get ( key ) key_path = ( key , ) if res is not None else None if res is not None and key_path is not None : key_path = ( sub_key , ) + key_path break return key_path , res
Like get but also return the path taken to the value .
151
12
17,565
def gets ( self , key ) : result_list = [ ] if key in self . keys ( ) : result_list . append ( self [ key ] ) for v in self . values ( ) : if isinstance ( v , self . __class__ ) : sub_res_list = v . gets ( key ) for res in sub_res_list : result_list . append ( res ) elif isinstance ( v , dict ) : if key in v . keys ( ) : result_list . append ( v [ key ] ) return result_list
Like get but return all matches not just the first .
119
11
17,566
def get_paths ( self , key ) : result_list = [ ] if key in self . keys ( ) : result_list . append ( ( ( key , ) , self [ key ] ) ) for sub_key , v in self . items ( ) : if isinstance ( v , self . __class__ ) : sub_res_list = v . get_paths ( key ) for key_path , res in sub_res_list : result_list . append ( ( ( sub_key , ) + key_path , res ) ) elif isinstance ( v , dict ) : if key in v . keys ( ) : result_list . append ( ( ( sub_key , key ) , v [ key ] ) ) return result_list
Like gets but include the paths like get_path for all matches .
163
14
17,567
def get_leaves ( self ) : ret_set = set ( ) for val in self . values ( ) : if isinstance ( val , self . __class__ ) : ret_set |= val . get_leaves ( ) elif isinstance ( val , dict ) : ret_set |= set ( val . values ( ) ) elif isinstance ( val , list ) : ret_set |= set ( val ) elif isinstance ( val , set ) : ret_set |= val else : ret_set . add ( val ) return ret_set
Get the deepest entries as a flat set .
123
9
17,568
def determine_reach_subtype ( event_name ) : best_match_length = None best_match = None for ss in reach_rule_regexps : if re . search ( ss , event_name ) : if best_match is None or len ( ss ) > best_match_length : best_match = ss best_match_length = len ( ss ) return best_match
Returns the category of reach rule from the reach rule instance .
85
12
17,569
def print_event_statistics ( self ) : logger . info ( 'All events by type' ) logger . info ( '-------------------' ) for k , v in self . all_events . items ( ) : logger . info ( '%s, %s' % ( k , len ( v ) ) ) logger . info ( '-------------------' )
Print the number of events in the REACH output by type .
76
13
17,570
def get_all_events ( self ) : self . all_events = { } events = self . tree . execute ( "$.events.frames" ) if events is None : return for e in events : event_type = e . get ( 'type' ) frame_id = e . get ( 'frame_id' ) try : self . all_events [ event_type ] . append ( frame_id ) except KeyError : self . all_events [ event_type ] = [ frame_id ]
Gather all event IDs in the REACH output by type .
109
13
17,571
def get_modifications ( self ) : # Find all event frames that are a type of protein modification qstr = "$.events.frames[(@.type is 'protein-modification')]" res = self . tree . execute ( qstr ) if res is None : return # Extract each of the results when possible for r in res : # The subtype of the modification modification_type = r . get ( 'subtype' ) # Skip negated events (i.e. something doesn't happen) epistemics = self . _get_epistemics ( r ) if epistemics . get ( 'negated' ) : continue annotations , context = self . _get_annot_context ( r ) frame_id = r [ 'frame_id' ] args = r [ 'arguments' ] site = None theme = None # Find the substrate (the "theme" agent here) and the # site and position it is modified on for a in args : if self . _get_arg_type ( a ) == 'theme' : theme = a [ 'arg' ] elif self . _get_arg_type ( a ) == 'site' : site = a [ 'text' ] theme_agent , theme_coords = self . _get_agent_from_entity ( theme ) if site is not None : mods = self . _parse_site_text ( site ) else : mods = [ ( None , None ) ] for mod in mods : # Add up to one statement for each site residue , pos = mod # Now we need to look for all regulation event to get to the # enzymes (the "controller" here) qstr = "$.events.frames[(@.type is 'regulation') and " + "(@.arguments[0].arg is '%s')]" % frame_id reg_res = self . tree . execute ( qstr ) reg_res = list ( reg_res ) for reg in reg_res : controller_agent , controller_coords = None , None for a in reg [ 'arguments' ] : if self . _get_arg_type ( a ) == 'controller' : controller = a . get ( 'arg' ) if controller is not None : controller_agent , controller_coords = self . _get_agent_from_entity ( controller ) break # Check the polarity of the regulation and if negative, # flip the modification type. # For instance, negative-regulation of a phosphorylation # will become an (indirect) dephosphorylation reg_subtype = reg . get ( 'subtype' ) if reg_subtype == 'negative-regulation' : modification_type = modtype_to_inverse . get ( modification_type ) if not modification_type : logger . warning ( 'Unhandled modification type: %s' % modification_type ) continue sentence = reg [ 'verbose-text' ] annotations [ 'agents' ] [ 'coords' ] = [ controller_coords , theme_coords ] ev = Evidence ( source_api = 'reach' , text = sentence , annotations = annotations , pmid = self . citation , context = context , epistemics = epistemics ) args = [ controller_agent , theme_agent , residue , pos , ev ] # Here ModStmt is a sub-class of Modification ModStmt = modtype_to_modclass . get ( modification_type ) if ModStmt is None : logger . warning ( 'Unhandled modification type: %s' % modification_type ) else : # Handle this special case here because only # enzyme argument is needed if modification_type == 'autophosphorylation' : args = [ theme_agent , residue , pos , ev ] self . statements . append ( ModStmt ( * args ) )
Extract Modification INDRA Statements .
813
8
17,572
def get_regulate_amounts ( self ) : qstr = "$.events.frames[(@.type is 'transcription')]" res = self . tree . execute ( qstr ) all_res = [ ] if res is not None : all_res += list ( res ) qstr = "$.events.frames[(@.type is 'amount')]" res = self . tree . execute ( qstr ) if res is not None : all_res += list ( res ) for r in all_res : subtype = r . get ( 'subtype' ) epistemics = self . _get_epistemics ( r ) if epistemics . get ( 'negated' ) : continue annotations , context = self . _get_annot_context ( r ) frame_id = r [ 'frame_id' ] args = r [ 'arguments' ] theme = None for a in args : if self . _get_arg_type ( a ) == 'theme' : theme = a [ 'arg' ] break if theme is None : continue theme_agent , theme_coords = self . _get_agent_from_entity ( theme ) qstr = "$.events.frames[(@.type is 'regulation') and " + "(@.arguments[0].arg is '%s')]" % frame_id reg_res = self . tree . execute ( qstr ) for reg in reg_res : controller_agent , controller_coords = None , None for a in reg [ 'arguments' ] : if self . _get_arg_type ( a ) == 'controller' : controller_agent , controller_coords = self . _get_controller_agent ( a ) sentence = reg [ 'verbose-text' ] annotations [ 'agents' ] [ 'coords' ] = [ controller_coords , theme_coords ] ev = Evidence ( source_api = 'reach' , text = sentence , annotations = annotations , pmid = self . citation , context = context , epistemics = epistemics ) args = [ controller_agent , theme_agent , ev ] subtype = reg . get ( 'subtype' ) if subtype == 'positive-regulation' : st = IncreaseAmount ( * args ) else : st = DecreaseAmount ( * args ) self . statements . append ( st )
Extract RegulateAmount INDRA Statements .
507
9
17,573
def get_complexes ( self ) : qstr = "$.events.frames[@.type is 'complex-assembly']" res = self . tree . execute ( qstr ) if res is None : return for r in res : epistemics = self . _get_epistemics ( r ) if epistemics . get ( 'negated' ) : continue # Due to an issue with the REACH output serialization # (though seemingly not with the raw mentions), sometimes # a redundant complex-assembly event is reported which can # be recognized by the missing direct flag, which we can filter # for here if epistemics . get ( 'direct' ) is None : continue annotations , context = self . _get_annot_context ( r ) args = r [ 'arguments' ] sentence = r [ 'verbose-text' ] members = [ ] agent_coordinates = [ ] for a in args : agent , coords = self . _get_agent_from_entity ( a [ 'arg' ] ) members . append ( agent ) agent_coordinates . append ( coords ) annotations [ 'agents' ] [ 'coords' ] = agent_coordinates ev = Evidence ( source_api = 'reach' , text = sentence , annotations = annotations , pmid = self . citation , context = context , epistemics = epistemics ) stmt = Complex ( members , ev ) self . statements . append ( stmt )
Extract INDRA Complex Statements .
309
7
17,574
def get_activation ( self ) : qstr = "$.events.frames[@.type is 'activation']" res = self . tree . execute ( qstr ) if res is None : return for r in res : epistemics = self . _get_epistemics ( r ) if epistemics . get ( 'negated' ) : continue sentence = r [ 'verbose-text' ] annotations , context = self . _get_annot_context ( r ) ev = Evidence ( source_api = 'reach' , text = sentence , pmid = self . citation , annotations = annotations , context = context , epistemics = epistemics ) args = r [ 'arguments' ] for a in args : if self . _get_arg_type ( a ) == 'controller' : controller_agent , controller_coords = self . _get_controller_agent ( a ) if self . _get_arg_type ( a ) == 'controlled' : controlled = a [ 'arg' ] controlled_agent , controlled_coords = self . _get_agent_from_entity ( controlled ) annotations [ 'agents' ] [ 'coords' ] = [ controller_coords , controlled_coords ] if r [ 'subtype' ] == 'positive-activation' : st = Activation ( controller_agent , controlled_agent , evidence = ev ) else : st = Inhibition ( controller_agent , controlled_agent , evidence = ev ) self . statements . append ( st )
Extract INDRA Activation Statements .
323
8
17,575
def get_translocation ( self ) : qstr = "$.events.frames[@.type is 'translocation']" res = self . tree . execute ( qstr ) if res is None : return for r in res : epistemics = self . _get_epistemics ( r ) if epistemics . get ( 'negated' ) : continue sentence = r [ 'verbose-text' ] annotations , context = self . _get_annot_context ( r ) args = r [ 'arguments' ] from_location = None to_location = None for a in args : if self . _get_arg_type ( a ) == 'theme' : agent , theme_coords = self . _get_agent_from_entity ( a [ 'arg' ] ) if agent is None : continue elif self . _get_arg_type ( a ) == 'source' : from_location = self . _get_location_by_id ( a [ 'arg' ] ) elif self . _get_arg_type ( a ) == 'destination' : to_location = self . _get_location_by_id ( a [ 'arg' ] ) annotations [ 'agents' ] [ 'coords' ] = [ theme_coords ] ev = Evidence ( source_api = 'reach' , text = sentence , pmid = self . citation , annotations = annotations , context = context , epistemics = epistemics ) st = Translocation ( agent , from_location , to_location , evidence = ev ) self . statements . append ( st )
Extract INDRA Translocation Statements .
343
8
17,576
def _get_mod_conditions ( self , mod_term ) : site = mod_term . get ( 'site' ) if site is not None : mods = self . _parse_site_text ( site ) else : mods = [ Site ( None , None ) ] mcs = [ ] for mod in mods : mod_res , mod_pos = mod mod_type_str = mod_term [ 'type' ] . lower ( ) mod_state = agent_mod_map . get ( mod_type_str ) if mod_state is not None : mc = ModCondition ( mod_state [ 0 ] , residue = mod_res , position = mod_pos , is_modified = mod_state [ 1 ] ) mcs . append ( mc ) else : logger . warning ( 'Unhandled entity modification type: %s' % mod_type_str ) return mcs
Return a list of ModConditions given a mod term dict .
190
13
17,577
def _get_entity_coordinates ( self , entity_term ) : # The following lines get the starting coordinate of the sentence # containing the entity. sent_id = entity_term . get ( 'sentence' ) if sent_id is None : return None qstr = "$.sentences.frames[(@.frame_id is \'%s')]" % sent_id res = self . tree . execute ( qstr ) if res is None : return None try : sentence = next ( res ) except StopIteration : return None sent_start = sentence . get ( 'start-pos' ) if sent_start is None : return None sent_start = sent_start . get ( 'offset' ) if sent_start is None : return None # Get the entity coordinate in the entire text and subtract the # coordinate of the first character in the associated sentence to # get the sentence coordinate of the entity. Return None if entity # coordinates are missing entity_start = entity_term . get ( 'start-pos' ) entity_stop = entity_term . get ( 'end-pos' ) if entity_start is None or entity_stop is None : return None entity_start = entity_start . get ( 'offset' ) entity_stop = entity_stop . get ( 'offset' ) if entity_start is None or entity_stop is None : return None return ( entity_start - sent_start , entity_stop - sent_start )
Return sentence coordinates for a given entity .
309
8
17,578
def _get_section ( self , event ) : sentence_id = event . get ( 'sentence' ) section = None if sentence_id : qstr = "$.sentences.frames[(@.frame_id is \'%s\')]" % sentence_id res = self . tree . execute ( qstr ) if res : sentence_frame = list ( res ) [ 0 ] passage_id = sentence_frame . get ( 'passage' ) if passage_id : qstr = "$.sentences.frames[(@.frame_id is \'%s\')]" % passage_id res = self . tree . execute ( qstr ) if res : passage_frame = list ( res ) [ 0 ] section = passage_frame . get ( 'section-id' ) # If the section is in the standard list, return as is if section in self . _section_list : return section # Next, handle a few special cases that come up in practice elif section . startswith ( 'fig' ) : return 'figure' elif section . startswith ( 'supm' ) : return 'supplementary' elif section == 'article-title' : return 'title' elif section in [ 'subjects|methods' , 'methods|subjects' ] : return 'methods' elif section == 'conclusions' : return 'conclusion' elif section == 'intro' : return 'introduction' else : return None
Get the section of the paper that the event is from .
318
12
17,579
def _get_controller_agent ( self , arg ) : controller_agent = None controller = arg . get ( 'arg' ) # There is either a single controller here if controller is not None : controller_agent , coords = self . _get_agent_from_entity ( controller ) # Or the controller is a complex elif arg [ 'argument-type' ] == 'complex' : controllers = list ( arg . get ( 'args' ) . values ( ) ) controller_agent , coords = self . _get_agent_from_entity ( controllers [ 0 ] ) bound_agents = [ self . _get_agent_from_entity ( c ) [ 0 ] for c in controllers [ 1 : ] ] bound_conditions = [ BoundCondition ( ba , True ) for ba in bound_agents ] controller_agent . bound_conditions = bound_conditions return controller_agent , coords
Return a single or a complex controller agent .
194
9
17,580
def _sanitize ( text ) : d = { '-LRB-' : '(' , '-RRB-' : ')' } return re . sub ( '|' . join ( d . keys ( ) ) , lambda m : d [ m . group ( 0 ) ] , text )
Return sanitized Eidos text field for human readability .
63
12
17,581
def ref_context_from_geoloc ( geoloc ) : text = geoloc . get ( 'text' ) geoid = geoloc . get ( 'geoID' ) rc = RefContext ( name = text , db_refs = { 'GEOID' : geoid } ) return rc
Return a RefContext object given a geoloc entry .
70
12
17,582
def time_context_from_timex ( timex ) : time_text = timex . get ( 'text' ) constraint = timex [ 'intervals' ] [ 0 ] start = _get_time_stamp ( constraint . get ( 'start' ) ) end = _get_time_stamp ( constraint . get ( 'end' ) ) duration = constraint [ 'duration' ] tc = TimeContext ( text = time_text , start = start , end = end , duration = duration ) return tc
Return a TimeContext object given a timex entry .
111
11
17,583
def find_args ( event , arg_type ) : args = event . get ( 'arguments' , { } ) obj_tags = [ arg for arg in args if arg [ 'type' ] == arg_type ] if obj_tags : return [ o [ 'value' ] [ '@id' ] for o in obj_tags ] else : return [ ]
Return IDs of all arguments of a given type
79
9
17,584
def extract_causal_relations ( self ) : # Get the extractions that are labeled as directed and causal relations = [ e for e in self . doc . extractions if 'DirectedRelation' in e [ 'labels' ] and 'Causal' in e [ 'labels' ] ] # For each relation, we try to extract an INDRA Statement and # save it if its valid for relation in relations : stmt = self . get_causal_relation ( relation ) if stmt is not None : self . statements . append ( stmt )
Extract causal relations as Statements .
120
7
17,585
def get_evidence ( self , relation ) : provenance = relation . get ( 'provenance' ) # First try looking up the full sentence through provenance text = None context = None if provenance : sentence_tag = provenance [ 0 ] . get ( 'sentence' ) if sentence_tag and '@id' in sentence_tag : sentence_id = sentence_tag [ '@id' ] sentence = self . doc . sentences . get ( sentence_id ) if sentence is not None : text = _sanitize ( sentence [ 'text' ] ) # Get temporal constraints if available timexes = sentence . get ( 'timexes' , [ ] ) if timexes : # We currently handle just one timex per statement timex = timexes [ 0 ] tc = time_context_from_timex ( timex ) context = WorldContext ( time = tc ) # Get geolocation if available geolocs = sentence . get ( 'geolocs' , [ ] ) if geolocs : geoloc = geolocs [ 0 ] rc = ref_context_from_geoloc ( geoloc ) if context : context . geo_location = rc else : context = WorldContext ( geo_location = rc ) # Here we try to get the title of the document and set it # in the provenance doc_id = provenance [ 0 ] . get ( 'document' , { } ) . get ( '@id' ) if doc_id : title = self . doc . documents . get ( doc_id , { } ) . get ( 'title' ) if title : provenance [ 0 ] [ 'document' ] [ 'title' ] = title annotations = { 'found_by' : relation . get ( 'rule' ) , 'provenance' : provenance } if self . doc . dct is not None : annotations [ 'document_creation_time' ] = self . doc . dct . to_json ( ) epistemics = { } negations = self . get_negation ( relation ) hedgings = self . get_hedging ( relation ) if hedgings : epistemics [ 'hedgings' ] = hedgings if negations : # This is the INDRA standard to show negation epistemics [ 'negated' ] = True # But we can also save the texts associated with the negation # under annotations, just in case it's needed annotations [ 'negated_texts' ] = negations # If that fails, we can still get the text of the relation if text is None : text = _sanitize ( event . get ( 'text' ) ) ev = Evidence ( source_api = 'eidos' , text = text , annotations = annotations , context = context , epistemics = epistemics ) return ev
Return the Evidence object for the INDRA Statment .
607
11
17,586
def get_negation ( event ) : states = event . get ( 'states' , [ ] ) if not states : return [ ] negs = [ state for state in states if state . get ( 'type' ) == 'NEGATION' ] neg_texts = [ neg [ 'text' ] for neg in negs ] return neg_texts
Return negation attached to an event .
77
8
17,587
def get_hedging ( event ) : states = event . get ( 'states' , [ ] ) if not states : return [ ] hedgings = [ state for state in states if state . get ( 'type' ) == 'HEDGE' ] hedging_texts = [ hedging [ 'text' ] for hedging in hedgings ] return hedging_texts
Return hedging markers attached to an event .
83
9
17,588
def get_groundings ( entity ) : def get_grounding_entries ( grounding ) : if not grounding : return None entries = [ ] values = grounding . get ( 'values' , [ ] ) # Values could still have been a None entry here if values : for entry in values : ont_concept = entry . get ( 'ontologyConcept' ) value = entry . get ( 'value' ) if ont_concept is None or value is None : continue entries . append ( ( ont_concept , value ) ) return entries # Save raw text and Eidos scored groundings as db_refs db_refs = { 'TEXT' : entity [ 'text' ] } groundings = entity . get ( 'groundings' ) if not groundings : return db_refs for g in groundings : entries = get_grounding_entries ( g ) # Only add these groundings if there are actual values listed if entries : key = g [ 'name' ] . upper ( ) if key == 'UN' : db_refs [ key ] = [ ( s [ 0 ] . replace ( ' ' , '_' ) , s [ 1 ] ) for s in entries ] else : db_refs [ key ] = entries return db_refs
Return groundings as db_refs for an entity .
270
12
17,589
def get_concept ( entity ) : # Use the canonical name as the name of the Concept name = entity [ 'canonicalName' ] db_refs = EidosProcessor . get_groundings ( entity ) concept = Concept ( name , db_refs = db_refs ) return concept
Return Concept from an Eidos entity .
64
8
17,590
def time_context_from_ref ( self , timex ) : # If the timex has a value set, it means that it refers to a DCT or # a TimeExpression e.g. "value": {"@id": "_:DCT_1"} and the parameters # need to be taken from there value = timex . get ( 'value' ) if value : # Here we get the TimeContext directly from the stashed DCT # dictionary tc = self . doc . timexes . get ( value [ '@id' ] ) return tc return None
Return a time context object given a timex reference entry .
121
12
17,591
def geo_context_from_ref ( self , ref ) : value = ref . get ( 'value' ) if value : # Here we get the RefContext from the stashed geoloc dictionary rc = self . doc . geolocs . get ( value [ '@id' ] ) return rc return None
Return a ref context object given a location reference entry .
67
11
17,592
def time_context_from_dct ( dct ) : time_text = dct . get ( 'text' ) start = _get_time_stamp ( dct . get ( 'start' ) ) end = _get_time_stamp ( dct . get ( 'end' ) ) duration = dct . get ( 'duration' ) tc = TimeContext ( text = time_text , start = start , end = end , duration = duration ) return tc
Return a time context object given a DCT entry .
103
11
17,593
def make_hash ( s , n_bytes ) : raw_h = int ( md5 ( s . encode ( 'utf-8' ) ) . hexdigest ( ) [ : n_bytes ] , 16 ) # Make it a signed int. return 16 ** n_bytes // 2 - raw_h
Make the hash from a matches key .
66
8
17,594
def parse_a1 ( a1_text ) : entities = { } for line in a1_text . split ( '\n' ) : if len ( line ) == 0 : continue tokens = line . rstrip ( ) . split ( '\t' ) if len ( tokens ) != 3 : raise Exception ( 'Expected three tab-seperated tokens per line ' + 'in the a1 file output from TEES.' ) identifier = tokens [ 0 ] entity_info = tokens [ 1 ] entity_name = tokens [ 2 ] info_tokens = entity_info . split ( ) if len ( info_tokens ) != 3 : raise Exception ( 'Expected three space-seperated tokens in the ' + 'second column of the a2 file output from TEES.' ) entity_type = info_tokens [ 0 ] first_offset = int ( info_tokens [ 1 ] ) second_offset = int ( info_tokens [ 2 ] ) offsets = ( first_offset , second_offset ) entities [ identifier ] = TEESEntity ( identifier , entity_type , entity_name , offsets ) return entities
Parses an a1 file the file TEES outputs that lists the entities in the extracted events .
248
21
17,595
def parse_output ( a1_text , a2_text , sentence_segmentations ) : # Parse the sentence segmentation document tees_sentences = TEESSentences ( sentence_segmentations ) # Parse the a1 (entities) file entities = parse_a1 ( a1_text ) # Parse the a2 (events) file events = parse_a2 ( a2_text , entities , tees_sentences ) return events
Parses the output of the TEES reader and returns a networkx graph with the event information .
102
21
17,596
def tees_parse_networkx_to_dot ( G , output_file , subgraph_nodes ) : with codecs . open ( output_file , 'w' , encoding = 'utf-8' ) as f : f . write ( 'digraph teesParse {\n' ) mentioned_nodes = set ( ) for from_node in subgraph_nodes : for edge in G . edges ( from_node ) : to_node = edge [ 1 ] mentioned_nodes . add ( from_node ) mentioned_nodes . add ( to_node ) relation = G . edges [ from_node , to_node ] [ 'relation' ] f . write ( '%s -> %s [ label = "%s" ];\n' % ( from_node , to_node , relation ) ) for node in mentioned_nodes : is_event = G . node [ node ] [ 'is_event' ] if is_event : node_type = G . node [ node ] [ 'type' ] negated = G . node [ node ] [ 'negated' ] speculation = G . node [ node ] [ 'speculation' ] # Add a tag to the label if the event is negated or speculation if negated and speculation : tag = ' {NS}' elif negated : tag = ' {N}' elif speculation : tag = ' {S}' else : tag = '' node_label = node_type + tag else : node_label = G . node [ node ] [ 'text' ] f . write ( '%s [label="%s"];\n' % ( node , node_label ) ) f . write ( '}\n' )
Converts TEES extractions stored in a networkx graph into a graphviz . dot file .
370
21
17,597
def _get_event ( self , event , find_str ) : # Get the term with the given element id element = event . find ( find_str ) if element is None : return None element_id = element . attrib . get ( 'id' ) element_term = self . tree . find ( "*[@id='%s']" % element_id ) if element_term is None : return None time , location = self . _extract_time_loc ( element_term ) # Now see if there is a modifier like assoc-with connected # to the main concept assoc_with = self . _get_assoc_with ( element_term ) # Get the element's text and use it to construct a Concept element_text_element = element_term . find ( 'text' ) if element_text_element is None : return None element_text = element_text_element . text element_db_refs = { 'TEXT' : element_text } element_name = sanitize_name ( element_text ) element_type_element = element_term . find ( 'type' ) if element_type_element is not None : element_db_refs [ 'CWMS' ] = element_type_element . text # If there's an assoc-with, we tack it on as extra grounding if assoc_with is not None : element_db_refs [ 'CWMS' ] += ( '|%s' % assoc_with ) concept = Concept ( element_name , db_refs = element_db_refs ) if time or location : context = WorldContext ( time = time , geo_location = location ) else : context = None event_obj = Event ( concept , context = context ) return event_obj
Get a concept referred from the event by the given string .
385
12
17,598
def make_model ( self , grounding_ontology = 'UN' , grounding_threshold = None ) : if grounding_threshold is not None : self . grounding_threshold = grounding_threshold self . grounding_ontology = grounding_ontology # Filter to Influence Statements which are currently supported statements = [ stmt for stmt in self . statements if isinstance ( stmt , Influence ) ] # Initialize graph self . CAG = nx . MultiDiGraph ( ) # Add nodes and edges to the graph for s in statements : # Get standardized name of subject and object # subj, obj = (self._node_name(s.subj), self._node_name(s.obj)) # See if both subject and object have polarities given has_both_polarity = ( s . subj . delta [ 'polarity' ] is not None and s . obj . delta [ 'polarity' ] is not None ) # Add the nodes to the graph for node , delta in zip ( ( s . subj . concept , s . obj . concept ) , ( s . subj . delta , s . obj . delta ) ) : self . CAG . add_node ( self . _node_name ( node ) , simulable = has_both_polarity , mods = delta [ 'adjectives' ] ) # Edge is solid if both nodes have polarity given linestyle = 'solid' if has_both_polarity else 'dotted' if has_both_polarity : same_polarity = ( s . subj . delta [ 'polarity' ] == s . obj . delta [ 'polarity' ] ) if same_polarity : target_arrow_shape , linecolor = ( 'circle' , 'green' ) else : target_arrow_shape , linecolor = ( 'tee' , 'maroon' ) else : target_arrow_shape , linecolor = ( 'triangle' , 'maroon' ) # Add edge to the graph with metadata from statement provenance = [ ] if s . evidence : provenance = s . evidence [ 0 ] . annotations . get ( 'provenance' , [ ] ) if provenance : provenance [ 0 ] [ 'text' ] = s . evidence [ 0 ] . text self . CAG . add_edge ( self . _node_name ( s . subj . concept ) , self . _node_name ( s . obj . concept ) , subj_polarity = s . subj . delta [ 'polarity' ] , subj_adjectives = s . subj . delta [ 'adjectives' ] , obj_polarity = s . obj . delta [ 'polarity' ] , obj_adjectives = s . obj . delta [ 'adjectives' ] , linestyle = linestyle , linecolor = linecolor , targetArrowShape = target_arrow_shape , provenance = provenance , ) return self . CAG
Return a networkx MultiDiGraph representing a causal analysis graph .
651
13
17,599
def export_to_cytoscapejs ( self ) : def _create_edge_data_dict ( e ) : """Return a dict from a MultiDiGraph edge for CytoscapeJS export.""" # A hack to get rid of the redundant 'Provenance' label. if e [ 3 ] . get ( 'provenance' ) : tooltip = e [ 3 ] [ 'provenance' ] [ 0 ] if tooltip . get ( '@type' ) : del tooltip [ '@type' ] else : tooltip = None edge_data_dict = { 'id' : e [ 0 ] + '_' + e [ 1 ] , 'source' : e [ 0 ] , 'target' : e [ 1 ] , 'linestyle' : e [ 3 ] [ "linestyle" ] , 'linecolor' : e [ 3 ] [ "linecolor" ] , 'targetArrowShape' : e [ 3 ] [ "targetArrowShape" ] , 'subj_adjectives' : e [ 3 ] [ "subj_adjectives" ] , 'subj_polarity' : e [ 3 ] [ "subj_polarity" ] , 'obj_adjectives' : e [ 3 ] [ "obj_adjectives" ] , 'obj_polarity' : e [ 3 ] [ "obj_polarity" ] , 'tooltip' : tooltip , 'simulable' : False if ( e [ 3 ] [ 'obj_polarity' ] is None or e [ 3 ] [ 'subj_polarity' ] is None ) else True , } return edge_data_dict return { 'nodes' : [ { 'data' : { 'id' : n [ 0 ] , 'simulable' : n [ 1 ] [ 'simulable' ] , 'tooltip' : 'Modifiers: ' + json . dumps ( n [ 1 ] [ 'mods' ] ) } } for n in self . CAG . nodes ( data = True ) ] , 'edges' : [ { 'data' : _create_edge_data_dict ( e ) } for e in self . CAG . edges ( data = True , keys = True ) ] }
Return CAG in format readable by CytoscapeJS .
488
13