idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
17,200
def submit_reading ( basename , pmid_list_filename , readers , start_ix = None , end_ix = None , pmids_per_job = 3000 , num_tries = 2 , force_read = False , force_fulltext = False , project_name = None ) : sub = PmidSubmitter ( basename , readers , project_name ) sub . set_options ( force_read , force_fulltext ) sub . submit_reading ( pmid_list_filename , start_ix , end_ix , pmids_per_job , num_tries ) return sub . job_list
Submit an old - style pmid - centered no - database s3 only reading job .
135
18
17,201
def submit_combine ( basename , readers , job_ids = None , project_name = None ) : sub = PmidSubmitter ( basename , readers , project_name ) sub . job_list = job_ids sub . submit_combine ( ) return sub
Submit a batch job to combine the outputs of a reading job .
59
13
17,202
def submit_reading ( self , input_fname , start_ix , end_ix , ids_per_job , num_tries = 1 , stagger = 0 ) : # stash this for later. self . ids_per_job = ids_per_job # Upload the pmid_list to Amazon S3 id_list_key = 'reading_results/%s/%s' % ( self . basename , self . _s3_input_name ) s3_client = boto3 . client ( 's3' ) s3_client . upload_file ( input_fname , bucket_name , id_list_key ) # If no end index is specified, read all the PMIDs if end_ix is None : with open ( input_fname , 'rt' ) as f : lines = f . readlines ( ) end_ix = len ( lines ) if start_ix is None : start_ix = 0 # Get environment variables environment_vars = get_environment ( ) # Iterate over the list of PMIDs and submit the job in chunks batch_client = boto3 . client ( 'batch' , region_name = 'us-east-1' ) job_list = [ ] for job_start_ix in range ( start_ix , end_ix , ids_per_job ) : sleep ( stagger ) job_end_ix = job_start_ix + ids_per_job if job_end_ix > end_ix : job_end_ix = end_ix job_name , cmd = self . _make_command ( job_start_ix , job_end_ix ) command_list = get_batch_command ( cmd , purpose = self . _purpose , project = self . project_name ) logger . info ( 'Command list: %s' % str ( command_list ) ) job_info = batch_client . submit_job ( jobName = job_name , jobQueue = self . _job_queue , jobDefinition = self . _job_def , containerOverrides = { 'environment' : environment_vars , 'command' : command_list } , retryStrategy = { 'attempts' : num_tries } ) logger . info ( "submitted..." ) job_list . append ( { 'jobId' : job_info [ 'jobId' ] } ) self . job_list = job_list return job_list
Submit a batch of reading jobs
533
6
17,203
def watch_and_wait ( self , poll_interval = 10 , idle_log_timeout = None , kill_on_timeout = False , stash_log_method = None , tag_instances = False , * * kwargs ) : return wait_for_complete ( self . _job_queue , job_list = self . job_list , job_name_prefix = self . basename , poll_interval = poll_interval , idle_log_timeout = idle_log_timeout , kill_on_log_timeout = kill_on_timeout , stash_log_method = stash_log_method , tag_instances = tag_instances , * * kwargs )
This provides shortcut access to the wait_for_complete_function .
153
14
17,204
def run ( self , input_fname , ids_per_job , stagger = 0 , * * wait_params ) : submit_thread = Thread ( target = self . submit_reading , args = ( input_fname , 0 , None , ids_per_job ) , kwargs = { 'stagger' : stagger } , daemon = True ) submit_thread . start ( ) self . watch_and_wait ( * * wait_params ) submit_thread . join ( 0 ) if submit_thread . is_alive ( ) : logger . warning ( "Submit thread is still running even after job" "completion." ) return
Run this submission all the way .
140
7
17,205
def set_options ( self , force_read = False , force_fulltext = False ) : self . options [ 'force_read' ] = force_read self . options [ 'force_fulltext' ] = force_fulltext return
Set the options for this run .
52
7
17,206
def get_chebi_name_from_id ( chebi_id , offline = False ) : chebi_name = chebi_id_to_name . get ( chebi_id ) if chebi_name is None and not offline : chebi_name = get_chebi_name_from_id_web ( chebi_id ) return chebi_name
Return a ChEBI name corresponding to the given ChEBI ID .
82
15
17,207
def get_chebi_name_from_id_web ( chebi_id ) : url_base = 'http://www.ebi.ac.uk/webservices/chebi/2.0/test/' url_fmt = url_base + 'getCompleteEntity?chebiId=%s' resp = requests . get ( url_fmt % chebi_id ) if resp . status_code != 200 : logger . warning ( "Got bad code form CHEBI client: %s" % resp . status_code ) return None tree = etree . fromstring ( resp . content ) # Get rid of the namespaces. # Credit: https://stackoverflow.com/questions/18159221/remove-namespace-and-prefix-from-xml-in-python-using-lxml for elem in tree . getiterator ( ) : if not hasattr ( elem . tag , 'find' ) : continue # (1) i = elem . tag . find ( '}' ) if i >= 0 : elem . tag = elem . tag [ i + 1 : ] objectify . deannotate ( tree , cleanup_namespaces = True ) elem = tree . find ( 'Body/getCompleteEntityResponse/return/chebiAsciiName' ) if elem is not None : return elem . text return None
Return a ChEBI mame corresponding to a given ChEBI ID using a REST API .
301
20
17,208
def get_subnetwork ( statements , nodes , relevance_network = None , relevance_node_lim = 10 ) : if relevance_network is not None : relevant_nodes = _find_relevant_nodes ( nodes , relevance_network , relevance_node_lim ) all_nodes = nodes + relevant_nodes else : all_nodes = nodes filtered_statements = _filter_statements ( statements , all_nodes ) pa = PysbAssembler ( ) pa . add_statements ( filtered_statements ) model = pa . make_model ( ) return model
Return a PySB model based on a subset of given INDRA Statements .
128
15
17,209
def _filter_statements ( statements , agents ) : filtered_statements = [ ] for s in stmts : if all ( [ a is not None for a in s . agent_list ( ) ] ) and all ( [ a . name in agents for a in s . agent_list ( ) ] ) : filtered_statements . append ( s ) return filtered_statements
Return INDRA Statements which have Agents in the given list .
82
12
17,210
def _find_relevant_nodes ( query_nodes , relevance_network , relevance_node_lim ) : all_nodes = relevance_client . get_relevant_nodes ( relevance_network , query_nodes ) nodes = [ n [ 0 ] for n in all_nodes [ : relevance_node_lim ] ] return nodes
Return a list of nodes that are relevant for the query .
75
12
17,211
def process_jsonld_file ( fname ) : with open ( fname , 'r' ) as fh : json_dict = json . load ( fh ) return process_jsonld ( json_dict )
Process a JSON - LD file in the new format to extract Statements .
47
14
17,212
def tag_instance ( instance_id , * * tags ) : logger . debug ( "Got request to add tags %s to instance %s." % ( str ( tags ) , instance_id ) ) ec2 = boto3 . resource ( 'ec2' ) instance = ec2 . Instance ( instance_id ) # Remove None's from `tags` filtered_tags = { k : v for k , v in tags . items ( ) if v and k } # Check for existing tags if instance . tags is not None : existing_tags = { tag . get ( 'Key' ) : tag . get ( 'Value' ) for tag in instance . tags } logger . debug ( "Ignoring existing tags; %s" % str ( existing_tags ) ) for tag_key in existing_tags . keys ( ) : filtered_tags . pop ( tag_key , None ) # If we have new tags to add, add them. tag_list = [ { 'Key' : k , 'Value' : v } for k , v in filtered_tags . items ( ) ] if len ( tag_list ) : logger . info ( 'Adding project tags "%s" to instance %s' % ( filtered_tags , instance_id ) ) instance . create_tags ( Tags = tag_list ) else : logger . info ( 'No new tags from: %s' % str ( tags ) ) return
Tag a single ec2 instance .
300
7
17,213
def tag_myself ( project = 'cwc' , * * other_tags ) : base_url = "http://169.254.169.254" try : resp = requests . get ( base_url + "/latest/meta-data/instance-id" ) except requests . exceptions . ConnectionError : logger . warning ( "Could not connect to service. Note this should only " "be run from within a batch job." ) return instance_id = resp . text tag_instance ( instance_id , project = project , * * other_tags ) return
Function run when indra is used in an EC2 instance to apply tags .
121
16
17,214
def get_batch_command ( command_list , project = None , purpose = None ) : command_str = ' ' . join ( command_list ) ret = [ 'python' , '-m' , 'indra.util.aws' , 'run_in_batch' , command_str ] if not project and has_config ( 'DEFAULT_AWS_PROJECT' ) : project = get_config ( 'DEFAULT_AWS_PROJECT' ) if project : ret += [ '--project' , project ] if purpose : ret += [ '--purpose' , purpose ] return ret
Get the command appropriate for running something on batch .
131
10
17,215
def get_jobs ( job_queue = 'run_reach_queue' , job_status = 'RUNNING' ) : batch = boto3 . client ( 'batch' ) jobs = batch . list_jobs ( jobQueue = job_queue , jobStatus = job_status ) return jobs . get ( 'jobSummaryList' )
Returns a list of dicts with jobName and jobId for each job with the given status .
73
20
17,216
def get_job_log ( job_info , log_group_name = '/aws/batch/job' , write_file = True , verbose = False ) : job_name = job_info [ 'jobName' ] job_id = job_info [ 'jobId' ] logs = boto3 . client ( 'logs' ) batch = boto3 . client ( 'batch' ) resp = batch . describe_jobs ( jobs = [ job_id ] ) job_desc = resp [ 'jobs' ] [ 0 ] job_def_name = job_desc [ 'jobDefinition' ] . split ( '/' ) [ - 1 ] . split ( ':' ) [ 0 ] task_arn_id = job_desc [ 'container' ] [ 'taskArn' ] . split ( '/' ) [ - 1 ] log_stream_name = '%s/default/%s' % ( job_def_name , task_arn_id ) stream_resp = logs . describe_log_streams ( logGroupName = log_group_name , logStreamNamePrefix = log_stream_name ) streams = stream_resp . get ( 'logStreams' ) if not streams : logger . warning ( 'No streams for job' ) return None elif len ( streams ) > 1 : logger . warning ( 'More than 1 stream for job, returning first' ) log_stream_name = streams [ 0 ] [ 'logStreamName' ] if verbose : logger . info ( "Getting log for %s/%s" % ( job_name , job_id ) ) out_file = ( '%s_%s.log' % ( job_name , job_id ) ) if write_file else None lines = get_log_by_name ( log_group_name , log_stream_name , out_file , verbose ) return lines
Gets the Cloudwatch log associated with the given job .
410
12
17,217
def get_log_by_name ( log_group_name , log_stream_name , out_file = None , verbose = True ) : logs = boto3 . client ( 'logs' ) kwargs = { 'logGroupName' : log_group_name , 'logStreamName' : log_stream_name , 'startFromHead' : True } lines = [ ] while True : response = logs . get_log_events ( * * kwargs ) # If we've gotten all the events already, the nextForwardToken for # this call will be the same as the last one if response . get ( 'nextForwardToken' ) == kwargs . get ( 'nextToken' ) : break else : events = response . get ( 'events' ) if events : lines += [ '%s: %s\n' % ( evt [ 'timestamp' ] , evt [ 'message' ] ) for evt in events ] kwargs [ 'nextToken' ] = response . get ( 'nextForwardToken' ) if verbose : logger . info ( '%d %s' % ( len ( lines ) , lines [ - 1 ] ) ) if out_file : with open ( out_file , 'wt' ) as f : for line in lines : f . write ( line ) return lines
Download a log given the log s group and stream name .
289
12
17,218
def dump_logs ( job_queue = 'run_reach_queue' , job_status = 'RUNNING' ) : jobs = get_jobs ( job_queue , job_status ) for job in jobs : get_job_log ( job , write_file = True )
Write logs for all jobs with given the status to files .
62
12
17,219
def get_s3_file_tree ( s3 , bucket , prefix ) : def get_some_keys ( keys , marker = None ) : if marker : relevant_files = s3 . list_objects ( Bucket = bucket , Prefix = prefix , Marker = marker ) else : relevant_files = s3 . list_objects ( Bucket = bucket , Prefix = prefix ) keys . extend ( [ entry [ 'Key' ] for entry in relevant_files [ 'Contents' ] if entry [ 'Key' ] != marker ] ) return relevant_files [ 'IsTruncated' ] file_keys = [ ] marker = None while get_some_keys ( file_keys , marker ) : marker = file_keys [ - 1 ] file_tree = NestedDict ( ) pref_path = prefix . split ( '/' ) [ : - 1 ] # avoid the trailing empty str. for key in file_keys : full_path = key . split ( '/' ) relevant_path = full_path [ len ( pref_path ) : ] curr = file_tree for step in relevant_path : curr = curr [ step ] curr [ 'key' ] = key return file_tree
Overcome s3 response limit and return NestedDict tree of paths .
260
16
17,220
def print_model ( self , include_unsigned_edges = False ) : sif_str = '' for edge in self . graph . edges ( data = True ) : n1 = edge [ 0 ] n2 = edge [ 1 ] data = edge [ 2 ] polarity = data . get ( 'polarity' ) if polarity == 'negative' : rel = '-1' elif polarity == 'positive' : rel = '1' elif include_unsigned_edges : rel = '0' else : continue sif_str += '%s %s %s\n' % ( n1 , rel , n2 ) return sif_str
Return a SIF string of the assembled model .
145
10
17,221
def save_model ( self , fname , include_unsigned_edges = False ) : sif_str = self . print_model ( include_unsigned_edges ) with open ( fname , 'wb' ) as fh : fh . write ( sif_str . encode ( 'utf-8' ) )
Save the assembled model s SIF string into a file .
71
12
17,222
def print_boolean_net ( self , out_file = None ) : init_str = '' for node_key in self . graph . nodes ( ) : node_name = self . graph . node [ node_key ] [ 'name' ] init_str += '%s = False\n' % node_name rule_str = '' for node_key in self . graph . nodes ( ) : node_name = self . graph . node [ node_key ] [ 'name' ] in_edges = self . graph . in_edges ( node_key ) if not in_edges : continue parents = [ e [ 0 ] for e in in_edges ] polarities = [ self . graph . edge [ e [ 0 ] ] [ node_key ] [ 'polarity' ] for e in in_edges ] pos_parents = [ par for par , pol in zip ( parents , polarities ) if pol == 'positive' ] neg_parents = [ par for par , pol in zip ( parents , polarities ) if pol == 'negative' ] rhs_pos_parts = [ ] for par in pos_parents : rhs_pos_parts . append ( self . graph . node [ par ] [ 'name' ] ) rhs_pos_str = ' or ' . join ( rhs_pos_parts ) rhs_neg_parts = [ ] for par in neg_parents : rhs_neg_parts . append ( self . graph . node [ par ] [ 'name' ] ) rhs_neg_str = ' or ' . join ( rhs_neg_parts ) if rhs_pos_str : if rhs_neg_str : rhs_str = '(' + rhs_pos_str + ') and not (' + rhs_neg_str + ')' else : rhs_str = rhs_pos_str else : rhs_str = 'not (' + rhs_neg_str + ')' node_eq = '%s* = %s\n' % ( node_name , rhs_str ) rule_str += node_eq full_str = init_str + '\n' + rule_str if out_file is not None : with open ( out_file , 'wt' ) as fh : fh . write ( full_str ) return full_str
Return a Boolean network from the assembled graph .
514
9
17,223
def _ensure_api_keys ( task_desc , failure_ret = None ) : def check_func_wrapper ( func ) : @ wraps ( func ) def check_api_keys ( * args , * * kwargs ) : global ELSEVIER_KEYS if ELSEVIER_KEYS is None : ELSEVIER_KEYS = { } # Try to read in Elsevier API keys. For each key, first check # the environment variables, then check the INDRA config file. if not has_config ( INST_KEY_ENV_NAME ) : logger . warning ( 'Institution API key %s not found in config ' 'file or environment variable: this will ' 'limit access for %s' % ( INST_KEY_ENV_NAME , task_desc ) ) ELSEVIER_KEYS [ 'X-ELS-Insttoken' ] = get_config ( INST_KEY_ENV_NAME ) if not has_config ( API_KEY_ENV_NAME ) : logger . error ( 'API key %s not found in configuration file ' 'or environment variable: cannot %s' % ( API_KEY_ENV_NAME , task_desc ) ) return failure_ret ELSEVIER_KEYS [ 'X-ELS-APIKey' ] = get_config ( API_KEY_ENV_NAME ) elif 'X-ELS-APIKey' not in ELSEVIER_KEYS . keys ( ) : logger . error ( 'No Elsevier API key %s found: cannot %s' % ( API_KEY_ENV_NAME , task_desc ) ) return failure_ret return func ( * args , * * kwargs ) return check_api_keys return check_func_wrapper
Wrap Elsevier methods which directly use the API keys .
382
12
17,224
def check_entitlement ( doi ) : if doi . lower ( ) . startswith ( 'doi:' ) : doi = doi [ 4 : ] url = '%s/%s' % ( elsevier_entitlement_url , doi ) params = { 'httpAccept' : 'text/xml' } res = requests . get ( url , params , headers = ELSEVIER_KEYS ) if not res . status_code == 200 : logger . error ( 'Could not check entitlements for article %s: ' 'status code %d' % ( doi , res . status_code ) ) logger . error ( 'Response content: %s' % res . text ) return False return True
Check whether IP and credentials enable access to content for a doi .
152
13
17,225
def download_article ( id_val , id_type = 'doi' , on_retry = False ) : if id_type == 'pmid' : id_type = 'pubmed_id' url = '%s/%s' % ( elsevier_article_url_fmt % id_type , id_val ) params = { 'httpAccept' : 'text/xml' } res = requests . get ( url , params , headers = ELSEVIER_KEYS ) if res . status_code == 404 : logger . info ( "Resource for %s not available on elsevier." % url ) return None elif res . status_code == 429 : if not on_retry : logger . warning ( "Broke the speed limit. Waiting half a second then " "trying again..." ) sleep ( 0.5 ) return download_article ( id_val , id_type , True ) else : logger . error ( "Still breaking speed limit after waiting." ) logger . error ( "Elsevier response: %s" % res . text ) return None elif res . status_code != 200 : logger . error ( 'Could not download article %s: status code %d' % ( url , res . status_code ) ) logger . error ( 'Elsevier response: %s' % res . text ) return None else : content_str = res . content . decode ( 'utf-8' ) if content_str . startswith ( '<service-error>' ) : logger . error ( 'Got a service error with 200 status: %s' % content_str ) return None # Return the XML content as a unicode string, assuming UTF-8 encoding return content_str
Low level function to get an XML article for a particular id .
368
13
17,226
def download_article_from_ids ( * * id_dict ) : valid_id_types = [ 'eid' , 'doi' , 'pmid' , 'pii' ] assert all ( [ k in valid_id_types for k in id_dict . keys ( ) ] ) , ( "One of these id keys is invalid: %s Valid keys are: %s." % ( list ( id_dict . keys ( ) ) , valid_id_types ) ) if 'doi' in id_dict . keys ( ) and id_dict [ 'doi' ] . lower ( ) . startswith ( 'doi:' ) : id_dict [ 'doi' ] = id_dict [ 'doi' ] [ 4 : ] content = None for id_type in valid_id_types : if id_type in id_dict . keys ( ) : content = download_article ( id_dict [ id_type ] , id_type ) if content is not None : break else : logger . error ( "Could not download article with any of the ids: %s." % str ( id_dict ) ) return content
Download an article in XML format from Elsevier matching the set of ids .
245
16
17,227
def get_abstract ( doi ) : xml_string = download_article ( doi ) if xml_string is None : return None assert isinstance ( xml_string , str ) xml_tree = ET . XML ( xml_string . encode ( 'utf-8' ) , parser = UTB ( ) ) if xml_tree is None : return None coredata = xml_tree . find ( 'article:coredata' , elsevier_ns ) abstract = coredata . find ( 'dc:description' , elsevier_ns ) abs_text = abstract . text return abs_text
Get the abstract text of an article from Elsevier given a doi .
128
14
17,228
def get_article ( doi , output_format = 'txt' ) : xml_string = download_article ( doi ) if output_format == 'txt' and xml_string is not None : text = extract_text ( xml_string ) return text return xml_string
Get the full body of an article from Elsevier .
58
11
17,229
def extract_paragraphs ( xml_string ) : assert isinstance ( xml_string , str ) xml_tree = ET . XML ( xml_string . encode ( 'utf-8' ) , parser = UTB ( ) ) full_text = xml_tree . find ( 'article:originalText' , elsevier_ns ) if full_text is None : logger . info ( 'Could not find full text element article:originalText' ) return None article_body = _get_article_body ( full_text ) if article_body : return article_body raw_text = _get_raw_text ( full_text ) if raw_text : return [ raw_text ] return None
Get paragraphs from the body of the given Elsevier xml .
149
12
17,230
def get_dois ( query_str , count = 100 ) : url = '%s/%s' % ( elsevier_search_url , query_str ) params = { 'query' : query_str , 'count' : count , 'httpAccept' : 'application/xml' , 'sort' : '-coverdate' , 'field' : 'doi' } res = requests . get ( url , params ) if not res . status_code == 200 : return None tree = ET . XML ( res . content , parser = UTB ( ) ) doi_tags = tree . findall ( 'atom:entry/prism:doi' , elsevier_ns ) dois = [ dt . text for dt in doi_tags ] return dois
Search ScienceDirect through the API for articles .
166
9
17,231
def get_piis ( query_str ) : dates = range ( 1960 , datetime . datetime . now ( ) . year ) all_piis = flatten ( [ get_piis_for_date ( query_str , date ) for date in dates ] ) return all_piis
Search ScienceDirect through the API for articles and return PIIs .
64
13
17,232
def get_piis_for_date ( query_str , date ) : count = 200 params = { 'query' : query_str , 'count' : count , 'start' : 0 , 'sort' : '-coverdate' , 'date' : date , 'field' : 'pii' } all_piis = [ ] while True : res = requests . get ( elsevier_search_url , params , headers = ELSEVIER_KEYS ) if not res . status_code == 200 : logger . info ( 'Got status code: %d' % res . status_code ) break res_json = res . json ( ) entries = res_json [ 'search-results' ] [ 'entry' ] logger . info ( res_json [ 'search-results' ] [ 'opensearch:totalResults' ] ) if entries == [ { '@_fa' : 'true' , 'error' : 'Result set was empty' } ] : logger . info ( 'Search result was empty' ) return [ ] piis = [ entry [ 'pii' ] for entry in entries ] all_piis += piis # Get next batch links = res_json [ 'search-results' ] . get ( 'link' , [ ] ) cont = False for link in links : if link . get ( '@ref' ) == 'next' : logger . info ( 'Found link to next batch of results.' ) params [ 'start' ] += count cont = True break if not cont : break return all_piis
Search ScienceDirect with a query string constrained to a given year .
334
13
17,233
def download_from_search ( query_str , folder , do_extract_text = True , max_results = None ) : piis = get_piis ( query_str ) for pii in piis [ : max_results ] : if os . path . exists ( os . path . join ( folder , '%s.txt' % pii ) ) : continue logger . info ( 'Downloading %s' % pii ) xml = download_article ( pii , 'pii' ) sleep ( 1 ) if do_extract_text : txt = extract_text ( xml ) if not txt : continue with open ( os . path . join ( folder , '%s.txt' % pii ) , 'wb' ) as fh : fh . write ( txt . encode ( 'utf-8' ) ) else : with open ( os . path . join ( folder , '%s.xml' % pii ) , 'wb' ) as fh : fh . write ( xml . encode ( 'utf-8' ) ) return
Save raw text files based on a search for papers on ScienceDirect .
233
14
17,234
def extract_statement_from_query_result ( self , res ) : agent_start , agent_end , affected_start , affected_end = res # Convert from rdflib literals to python integers so we can use # them to index strings agent_start = int ( agent_start ) agent_end = int ( agent_end ) affected_start = int ( affected_start ) affected_end = int ( affected_end ) # Find the text corresponding to these indices agent = self . text [ agent_start : agent_end ] affected = self . text [ affected_start : affected_end ] # Strip off surrounding whitespace agent = agent . lstrip ( ) . rstrip ( ) affected = affected . lstrip ( ) . rstrip ( ) # Make an Agent object for both the subject and the object subj = Agent ( agent , db_refs = { 'TEXT' : agent } ) obj = Agent ( affected , db_refs = { 'TEXT' : affected } ) statement = Influence ( subj = subj , obj = obj ) # Add the statement to the list of statements self . statements . append ( statement )
Adds a statement based on one element of a rdflib SPARQL query .
244
18
17,235
def extract_statements ( self ) : # Look for events that have an AGENT and an AFFECTED, and get the # start and ending text indices for each. query = prefixes + """ SELECT ?agent_start ?agent_end ?affected_start ?affected_end WHERE { ?rel role:AGENT ?agent . ?rel role:AFFECTED ?affected . ?agent lf:start ?agent_start . ?agent lf:end ?agent_end . ?affected lf:start ?affected_start . ?affected lf:end ?affected_end . } """ results = self . graph . query ( query ) for res in results : # Make a statement for each query match self . extract_statement_from_query_result ( res ) # Look for events that have an AGENT and a RESULT, and get the start # and ending text indices for each. query = query . replace ( 'role:AFFECTED' , 'role:RESULT' ) results = self . graph . query ( query ) for res in results : # Make a statement for each query match self . extract_statement_from_query_result ( res )
Extracts INDRA statements from the RDF graph via SPARQL queries .
250
17
17,236
def _recursively_lookup_complex ( self , complex_id ) : assert complex_id in self . complex_map expanded_agent_strings = [ ] expand_these_next = [ complex_id ] while len ( expand_these_next ) > 0 : # Pop next element c = expand_these_next [ 0 ] expand_these_next = expand_these_next [ 1 : ] # If a complex, add expanding it to the end of the queue # If an agent string, add it to the agent string list immediately assert c in self . complex_map for s in self . complex_map [ c ] : if s in self . complex_map : expand_these_next . append ( s ) else : expanded_agent_strings . append ( s ) return expanded_agent_strings
Looks up the constitutents of a complex . If any constituent is itself a complex recursively expands until all constituents are not complexes .
173
28
17,237
def _get_complex_agents ( self , complex_id ) : agents = [ ] components = self . _recursively_lookup_complex ( complex_id ) for c in components : db_refs = { } name = uniprot_client . get_gene_name ( c ) if name is None : db_refs [ 'SIGNOR' ] = c else : db_refs [ 'UP' ] = c hgnc_id = hgnc_client . get_hgnc_id ( name ) if hgnc_id : db_refs [ 'HGNC' ] = hgnc_id famplex_key = ( 'SIGNOR' , c ) if famplex_key in famplex_map : db_refs [ 'FPLX' ] = famplex_map [ famplex_key ] if not name : name = db_refs [ 'FPLX' ] # Set agent name to Famplex name if # the Uniprot name is not available elif not name : # We neither have a Uniprot nor Famplex grounding logger . info ( 'Have neither a Uniprot nor Famplex grounding ' + 'for ' + c ) if not name : name = db_refs [ 'SIGNOR' ] # Set the agent name to the # Signor name if neither the # Uniprot nor Famplex names are # available assert ( name is not None ) agents . append ( Agent ( name , db_refs = db_refs ) ) return agents
Returns a list of agents corresponding to each of the constituents in a SIGNOR complex .
341
17
17,238
def stmts_from_json ( json_in , on_missing_support = 'handle' ) : stmts = [ ] uuid_dict = { } for json_stmt in json_in : try : st = Statement . _from_json ( json_stmt ) except Exception as e : logger . warning ( "Error creating statement: %s" % e ) continue stmts . append ( st ) uuid_dict [ st . uuid ] = st for st in stmts : _promote_support ( st . supports , uuid_dict , on_missing_support ) _promote_support ( st . supported_by , uuid_dict , on_missing_support ) return stmts
Get a list of Statements from Statement jsons .
159
10
17,239
def stmts_to_json_file ( stmts , fname ) : with open ( fname , 'w' ) as fh : json . dump ( stmts_to_json ( stmts ) , fh , indent = 1 )
Serialize a list of INDRA Statements into a JSON file .
57
13
17,240
def stmts_to_json ( stmts_in , use_sbo = False ) : if not isinstance ( stmts_in , list ) : json_dict = stmts_in . to_json ( use_sbo = use_sbo ) return json_dict else : json_dict = [ st . to_json ( use_sbo = use_sbo ) for st in stmts_in ] return json_dict
Return the JSON - serialized form of one or more INDRA Statements .
101
15
17,241
def _promote_support ( sup_list , uuid_dict , on_missing = 'handle' ) : valid_handling_choices = [ 'handle' , 'error' , 'ignore' ] if on_missing not in valid_handling_choices : raise InputError ( 'Invalid option for `on_missing_support`: \'%s\'\n' 'Choices are: %s.' % ( on_missing , str ( valid_handling_choices ) ) ) for idx , uuid in enumerate ( sup_list ) : if uuid in uuid_dict . keys ( ) : sup_list [ idx ] = uuid_dict [ uuid ] elif on_missing == 'handle' : sup_list [ idx ] = Unresolved ( uuid ) elif on_missing == 'ignore' : sup_list . remove ( uuid ) elif on_missing == 'error' : raise UnresolvedUuidError ( "Uuid %s not found in stmt jsons." % uuid ) return
Promote the list of support - related uuids to Statements if possible .
233
16
17,242
def draw_stmt_graph ( stmts ) : import networkx try : import matplotlib . pyplot as plt except Exception : logger . error ( 'Could not import matplotlib, not drawing graph.' ) return try : # This checks whether networkx has this package to work with. import pygraphviz except Exception : logger . error ( 'Could not import pygraphviz, not drawing graph.' ) return import numpy g = networkx . compose_all ( [ stmt . to_graph ( ) for stmt in stmts ] ) plt . figure ( ) plt . ion ( ) g . graph [ 'graph' ] = { 'rankdir' : 'LR' } pos = networkx . drawing . nx_agraph . graphviz_layout ( g , prog = 'dot' ) g = g . to_undirected ( ) # Draw nodes options = { 'marker' : 'o' , 's' : 200 , 'c' : [ 0.85 , 0.85 , 1 ] , 'facecolor' : '0.5' , 'lw' : 0 , } ax = plt . gca ( ) nodelist = list ( g ) xy = numpy . asarray ( [ pos [ v ] for v in nodelist ] ) node_collection = ax . scatter ( xy [ : , 0 ] , xy [ : , 1 ] , * * options ) node_collection . set_zorder ( 2 ) # Draw edges networkx . draw_networkx_edges ( g , pos , arrows = False , edge_color = '0.5' ) # Draw labels edge_labels = { ( e [ 0 ] , e [ 1 ] ) : e [ 2 ] . get ( 'label' ) for e in g . edges ( data = True ) } networkx . draw_networkx_edge_labels ( g , pos , edge_labels = edge_labels ) node_labels = { n [ 0 ] : n [ 1 ] . get ( 'label' ) for n in g . nodes ( data = True ) } for key , label in node_labels . items ( ) : if len ( label ) > 25 : parts = label . split ( ' ' ) parts . insert ( int ( len ( parts ) / 2 ) , '\n' ) label = ' ' . join ( parts ) node_labels [ key ] = label networkx . draw_networkx_labels ( g , pos , labels = node_labels ) ax . get_xaxis ( ) . set_visible ( False ) ax . get_yaxis ( ) . set_visible ( False ) plt . show ( )
Render the attributes of a list of Statements as directed graphs .
584
12
17,243
def _fix_json_agents ( ag_obj ) : if isinstance ( ag_obj , str ) : logger . info ( "Fixing string agent: %s." % ag_obj ) ret = { 'name' : ag_obj , 'db_refs' : { 'TEXT' : ag_obj } } elif isinstance ( ag_obj , list ) : # Recursive for complexes and similar. ret = [ _fix_json_agents ( ag ) for ag in ag_obj ] elif isinstance ( ag_obj , dict ) and 'TEXT' in ag_obj . keys ( ) : ret = deepcopy ( ag_obj ) text = ret . pop ( 'TEXT' ) ret [ 'db_refs' ] [ 'TEXT' ] = text else : ret = ag_obj return ret
Fix the json representation of an agent .
177
8
17,244
def set_statements_pmid ( self , pmid ) : # Replace PMID value in JSON dict first for stmt in self . json_stmts : evs = stmt . get ( 'evidence' , [ ] ) for ev in evs : ev [ 'pmid' ] = pmid # Replace PMID value in extracted Statements next for stmt in self . statements : for ev in stmt . evidence : ev . pmid = pmid
Set the evidence PMID of Statements that have been extracted .
99
12
17,245
def get_args ( node ) : arg_roles = { } args = node . findall ( 'arg' ) + [ node . find ( 'arg1' ) , node . find ( 'arg2' ) , node . find ( 'arg3' ) ] for arg in args : if arg is not None : id = arg . attrib . get ( 'id' ) if id is not None : arg_roles [ arg . attrib [ 'role' ] ] = ( arg . attrib [ 'id' ] , arg ) # Now look at possible inevent links if node . find ( 'features' ) is not None : inevents = node . findall ( 'features/inevent' ) for inevent in inevents : if 'id' in inevent . attrib : arg_roles [ 'inevent' ] = ( inevent . attrib [ 'id' ] , inevent ) ptms = node . findall ( 'features/ptm' ) + node . findall ( 'features/no-ptm' ) for ptm in ptms : if 'id' in inevent . attrib : arg_roles [ 'ptm' ] = ( inevent . attrib [ 'id' ] , ptm ) # And also look for assoc-with links aw = node . find ( 'assoc-with' ) if aw is not None : aw_id = aw . attrib [ 'id' ] arg_roles [ 'assoc-with' ] = ( aw_id , aw ) return arg_roles
Return the arguments of a node in the event graph .
338
11
17,246
def type_match ( a , b ) : # If the types are the same, return True if a [ 'type' ] == b [ 'type' ] : return True # Otherwise, look at some special cases eq_groups = [ { 'ONT::GENE-PROTEIN' , 'ONT::GENE' , 'ONT::PROTEIN' } , { 'ONT::PHARMACOLOGIC-SUBSTANCE' , 'ONT::CHEMICAL' } ] for eq_group in eq_groups : if a [ 'type' ] in eq_group and b [ 'type' ] in eq_group : return True return False
Return True of the types of a and b are compatible False otherwise .
139
14
17,247
def add_graph ( patterns , G ) : if not patterns : patterns . append ( [ G ] ) return for i , graphs in enumerate ( patterns ) : if networkx . is_isomorphic ( graphs [ 0 ] , G , node_match = type_match , edge_match = type_match ) : patterns [ i ] . append ( G ) return patterns . append ( [ G ] )
Add a graph to a set of unique patterns .
85
10
17,248
def draw ( graph , fname ) : ag = networkx . nx_agraph . to_agraph ( graph ) ag . draw ( fname , prog = 'dot' )
Draw a graph and save it into a file
38
9
17,249
def build_event_graph ( graph , tree , node ) : # If we have already added this node then let's return if node_key ( node ) in graph : return type = get_type ( node ) text = get_text ( node ) label = '%s (%s)' % ( type , text ) graph . add_node ( node_key ( node ) , type = type , label = label , text = text ) args = get_args ( node ) for arg_role , ( arg_id , arg_tag ) in args . items ( ) : arg = get_node_by_id ( tree , arg_id ) if arg is None : arg = arg_tag build_event_graph ( graph , tree , arg ) graph . add_edge ( node_key ( node ) , node_key ( arg ) , type = arg_role , label = arg_role )
Return a DiGraph of a specific event structure built recursively
191
13
17,250
def get_extracted_events ( fnames ) : event_list = [ ] for fn in fnames : tp = trips . process_xml_file ( fn ) ed = tp . extracted_events for k , v in ed . items ( ) : event_list += v return event_list
Get a full list of all extracted event IDs from a list of EKB files
65
16
17,251
def check_event_coverage ( patterns , event_list ) : proportions = [ ] for pattern_list in patterns : proportion = 0 for pattern in pattern_list : for node in pattern . nodes ( ) : if node in event_list : proportion += 1.0 / len ( pattern_list ) break proportions . append ( proportion ) return proportions
Calculate the ratio of patterns that were extracted .
73
11
17,252
def map_statements ( self ) : for stmt in self . statements : for agent in stmt . agent_list ( ) : if agent is None : continue all_mappings = [ ] for db_name , db_id in agent . db_refs . items ( ) : if isinstance ( db_id , list ) : db_id = db_id [ 0 ] [ 0 ] mappings = self . _map_id ( db_name , db_id ) all_mappings += mappings for map_db_name , map_db_id , score , orig_db_name in all_mappings : if map_db_name in agent . db_refs : continue if self . scored : # If the original one is a scored grounding, # we take that score and multiply it with the mapping # score. Otherwise we assume the original score is 1. try : orig_score = agent . db_refs [ orig_db_name ] [ 0 ] [ 1 ] except Exception : orig_score = 1.0 agent . db_refs [ map_db_name ] = [ ( map_db_id , score * orig_score ) ] else : if map_db_name in ( 'UN' , 'HUME' ) : agent . db_refs [ map_db_name ] = [ ( map_db_id , 1.0 ) ] else : agent . db_refs [ map_db_name ] = map_db_id
Run the ontology mapping on the statements .
322
9
17,253
def load_grounding_map ( grounding_map_path , ignore_path = None , lineterminator = '\r\n' ) : g_map = { } map_rows = read_unicode_csv ( grounding_map_path , delimiter = ',' , quotechar = '"' , quoting = csv . QUOTE_MINIMAL , lineterminator = '\r\n' ) if ignore_path and os . path . exists ( ignore_path ) : ignore_rows = read_unicode_csv ( ignore_path , delimiter = ',' , quotechar = '"' , quoting = csv . QUOTE_MINIMAL , lineterminator = lineterminator ) else : ignore_rows = [ ] csv_rows = chain ( map_rows , ignore_rows ) for row in csv_rows : key = row [ 0 ] db_refs = { 'TEXT' : key } keys = [ entry for entry in row [ 1 : : 2 ] if entry != '' ] values = [ entry for entry in row [ 2 : : 2 ] if entry != '' ] if len ( keys ) != len ( values ) : logger . info ( 'ERROR: Mismatched keys and values in row %s' % str ( row ) ) continue else : db_refs . update ( dict ( zip ( keys , values ) ) ) if len ( db_refs . keys ( ) ) > 1 : g_map [ key ] = db_refs else : g_map [ key ] = None return g_map
Return a grounding map dictionary loaded from a csv file .
335
12
17,254
def all_agents ( stmts ) : agents = [ ] for stmt in stmts : for agent in stmt . agent_list ( ) : # Agents don't always have a TEXT db_refs entry (for instance # in the case of Statements from databases) so we check for this. if agent is not None and agent . db_refs . get ( 'TEXT' ) is not None : agents . append ( agent ) return agents
Return a list of all of the agents from a list of statements .
96
14
17,255
def get_sentences_for_agent ( text , stmts , max_sentences = None ) : sentences = [ ] for stmt in stmts : for agent in stmt . agent_list ( ) : if agent is not None and agent . db_refs . get ( 'TEXT' ) == text : sentences . append ( ( stmt . evidence [ 0 ] . pmid , stmt . evidence [ 0 ] . text ) ) if max_sentences is not None and len ( sentences ) >= max_sentences : return sentences return sentences
Returns evidence sentences with a given agent text from a list of statements
120
13
17,256
def agent_texts_with_grounding ( stmts ) : allag = all_agents ( stmts ) # Convert PFAM-DEF lists into tuples so that they are hashable and can # be tabulated with a Counter for ag in allag : pfam_def = ag . db_refs . get ( 'PFAM-DEF' ) if pfam_def is not None : ag . db_refs [ 'PFAM-DEF' ] = tuple ( pfam_def ) refs = [ tuple ( ag . db_refs . items ( ) ) for ag in allag ] refs_counter = Counter ( refs ) refs_counter_dict = [ ( dict ( entry [ 0 ] ) , entry [ 1 ] ) for entry in refs_counter . items ( ) ] # First, sort by text so that we can do a groupby refs_counter_dict . sort ( key = lambda x : x [ 0 ] . get ( 'TEXT' ) ) # Then group by text grouped_by_text = [ ] for k , g in groupby ( refs_counter_dict , key = lambda x : x [ 0 ] . get ( 'TEXT' ) ) : # Total occurrences of this agent text total = 0 entry = [ k ] db_ref_list = [ ] for db_refs , count in g : # Check if TEXT is our only key, indicating no grounding if list ( db_refs . keys ( ) ) == [ 'TEXT' ] : db_ref_list . append ( ( None , None , count ) ) # Add any other db_refs (not TEXT) for db , db_id in db_refs . items ( ) : if db == 'TEXT' : continue else : db_ref_list . append ( ( db , db_id , count ) ) total += count # Sort the db_ref_list by the occurrences of each grounding entry . append ( tuple ( sorted ( db_ref_list , key = lambda x : x [ 2 ] , reverse = True ) ) ) # Now add the total frequency to the entry entry . append ( total ) # And add the entry to the overall list grouped_by_text . append ( tuple ( entry ) ) # Sort the list by the total number of occurrences of each unique key grouped_by_text . sort ( key = lambda x : x [ 2 ] , reverse = True ) return grouped_by_text
Return agent text groundings in a list of statements with their counts
525
13
17,257
def ungrounded_texts ( stmts ) : ungrounded = [ ag . db_refs [ 'TEXT' ] for s in stmts for ag in s . agent_list ( ) if ag is not None and list ( ag . db_refs . keys ( ) ) == [ 'TEXT' ] ] ungroundc = Counter ( ungrounded ) ungroundc = ungroundc . items ( ) ungroundc = sorted ( ungroundc , key = lambda x : x [ 1 ] , reverse = True ) return ungroundc
Return a list of all ungrounded entities ordered by number of mentions
120
14
17,258
def get_agents_with_name ( name , stmts ) : return [ ag for stmt in stmts for ag in stmt . agent_list ( ) if ag is not None and ag . name == name ]
Return all agents within a list of statements with a particular name .
49
13
17,259
def save_base_map ( filename , grouped_by_text ) : rows = [ ] for group in grouped_by_text : text_string = group [ 0 ] for db , db_id , count in group [ 1 ] : if db == 'UP' : name = uniprot_client . get_mnemonic ( db_id ) else : name = '' row = [ text_string , db , db_id , count , name ] rows . append ( row ) write_unicode_csv ( filename , rows , delimiter = ',' , quotechar = '"' , quoting = csv . QUOTE_MINIMAL , lineterminator = '\r\n' )
Dump a list of agents along with groundings and counts into a csv file
149
17
17,260
def protein_map_from_twg ( twg ) : protein_map = { } unmatched = 0 matched = 0 logger . info ( 'Building grounding map for human proteins' ) for agent_text , grounding_list , _ in twg : # If 'UP' (Uniprot) not one of the grounding entries for this text, # then we skip it. if 'UP' not in [ entry [ 0 ] for entry in grounding_list ] : continue # Otherwise, collect all the Uniprot IDs for this protein. uniprot_ids = [ entry [ 1 ] for entry in grounding_list if entry [ 0 ] == 'UP' ] # For each Uniprot ID, look up the species for uniprot_id in uniprot_ids : # If it's not a human protein, skip it mnemonic = uniprot_client . get_mnemonic ( uniprot_id ) if mnemonic is None or not mnemonic . endswith ( '_HUMAN' ) : continue # Otherwise, look up the gene name in HGNC and match against the # agent text gene_name = uniprot_client . get_gene_name ( uniprot_id ) if gene_name is None : unmatched += 1 continue if agent_text . upper ( ) == gene_name . upper ( ) : matched += 1 protein_map [ agent_text ] = { 'TEXT' : agent_text , 'UP' : uniprot_id } else : unmatched += 1 logger . info ( 'Exact matches for %d proteins' % matched ) logger . info ( 'No match (or no gene name) for %d proteins' % unmatched ) return protein_map
Build map of entity texts to validate protein grounding .
368
10
17,261
def save_sentences ( twg , stmts , filename , agent_limit = 300 ) : sentences = [ ] unmapped_texts = [ t [ 0 ] for t in twg ] counter = 0 logger . info ( 'Getting sentences for top %d unmapped agent texts.' % agent_limit ) for text in unmapped_texts : agent_sentences = get_sentences_for_agent ( text , stmts ) sentences += map ( lambda tup : ( text , ) + tup , agent_sentences ) counter += 1 if counter >= agent_limit : break # Write sentences to CSV file write_unicode_csv ( filename , sentences , delimiter = ',' , quotechar = '"' , quoting = csv . QUOTE_MINIMAL , lineterminator = '\r\n' )
Write evidence sentences for stmts with ungrounded agents to csv file .
180
17
17,262
def _get_text_for_grounding ( stmt , agent_text ) : text = None # First we will try to get content from the DB try : from indra_db . util . content_scripts import get_text_content_from_text_refs from indra . literature . deft_tools import universal_extract_text refs = stmt . evidence [ 0 ] . text_refs # Prioritize the pmid attribute if given if stmt . evidence [ 0 ] . pmid : refs [ 'PMID' ] = stmt . evidence [ 0 ] . pmid logger . info ( 'Obtaining text for disambiguation with refs: %s' % refs ) content = get_text_content_from_text_refs ( refs ) text = universal_extract_text ( content , contains = agent_text ) if text : return text except Exception as e : logger . info ( 'Could not get text for disambiguation from DB.' ) # If that doesn't work, we try PubMed next if text is None : from indra . literature import pubmed_client pmid = stmt . evidence [ 0 ] . pmid if pmid : logger . info ( 'Obtaining abstract for disambiguation for PMID%s' % pmid ) text = pubmed_client . get_abstract ( pmid ) if text : return text # Finally, falling back on the evidence sentence if text is None : logger . info ( 'Falling back on sentence-based disambiguation' ) text = stmt . evidence [ 0 ] . text return text return None
Get text context for Deft disambiguation
351
10
17,263
def update_agent_db_refs ( self , agent , agent_text , do_rename = True ) : map_db_refs = deepcopy ( self . gm . get ( agent_text ) ) self . standardize_agent_db_refs ( agent , map_db_refs , do_rename )
Update db_refs of agent using the grounding map
74
11
17,264
def map_agents_for_stmt ( self , stmt , do_rename = True ) : mapped_stmt = deepcopy ( stmt ) # Iterate over the agents # Update agents directly participating in the statement agent_list = mapped_stmt . agent_list ( ) for idx , agent in enumerate ( agent_list ) : if agent is None : continue agent_txt = agent . db_refs . get ( 'TEXT' ) if agent_txt is None : continue new_agent , maps_to_none = self . map_agent ( agent , do_rename ) # Check if a deft model exists for agent text if self . use_deft and agent_txt in deft_disambiguators : try : run_deft_disambiguation ( mapped_stmt , agent_list , idx , new_agent , agent_txt ) except Exception as e : logger . error ( 'There was an error during Deft' ' disambiguation.' ) logger . error ( e ) if maps_to_none : # Skip the entire statement if the agent maps to None in the # grounding map return None # If the old agent had bound conditions, but the new agent does # not, copy the bound conditions over if new_agent is not None and len ( new_agent . bound_conditions ) == 0 : new_agent . bound_conditions = agent . bound_conditions agent_list [ idx ] = new_agent mapped_stmt . set_agent_list ( agent_list ) # Update agents in the bound conditions for agent in agent_list : if agent is not None : for bc in agent . bound_conditions : bc . agent , maps_to_none = self . map_agent ( bc . agent , do_rename ) if maps_to_none : # Skip the entire statement if the agent maps to None # in the grounding map return None return mapped_stmt
Return a new Statement whose agents have been grounding mapped .
418
11
17,265
def map_agent ( self , agent , do_rename ) : agent_text = agent . db_refs . get ( 'TEXT' ) mapped_to_agent_json = self . agent_map . get ( agent_text ) if mapped_to_agent_json : mapped_to_agent = Agent . _from_json ( mapped_to_agent_json [ 'agent' ] ) return mapped_to_agent , False # Look this string up in the grounding map # If not in the map, leave agent alone and continue if agent_text in self . gm . keys ( ) : map_db_refs = self . gm [ agent_text ] else : return agent , False # If it's in the map but it maps to None, then filter out # this statement by skipping it if map_db_refs is None : # Increase counter if this statement has not already # been skipped via another agent logger . debug ( "Skipping %s" % agent_text ) return None , True # If it has a value that's not None, map it and add it else : # Otherwise, update the agent's db_refs field self . update_agent_db_refs ( agent , agent_text , do_rename ) return agent , False
Return the given Agent with its grounding mapped .
275
9
17,266
def map_agents ( self , stmts , do_rename = True ) : # Make a copy of the stmts mapped_stmts = [ ] num_skipped = 0 # Iterate over the statements for stmt in stmts : mapped_stmt = self . map_agents_for_stmt ( stmt , do_rename ) # Check if we should skip the statement if mapped_stmt is not None : mapped_stmts . append ( mapped_stmt ) else : num_skipped += 1 logger . info ( '%s statements filtered out' % num_skipped ) return mapped_stmts
Return a new list of statements whose agents have been mapped
141
11
17,267
def rename_agents ( self , stmts ) : # Make a copy of the stmts mapped_stmts = deepcopy ( stmts ) # Iterate over the statements for _ , stmt in enumerate ( mapped_stmts ) : # Iterate over the agents for agent in stmt . agent_list ( ) : if agent is None : continue # If there's a FamPlex ID, prefer that for the name if agent . db_refs . get ( 'FPLX' ) : agent . name = agent . db_refs . get ( 'FPLX' ) # Take a HGNC name from Uniprot next elif agent . db_refs . get ( 'UP' ) : # Try for the gene name gene_name = uniprot_client . get_gene_name ( agent . db_refs . get ( 'UP' ) , web_fallback = False ) if gene_name : agent . name = gene_name hgnc_id = hgnc_client . get_hgnc_id ( gene_name ) if hgnc_id : agent . db_refs [ 'HGNC' ] = hgnc_id # Take the text string #if agent.db_refs.get('TEXT'): # agent.name = agent.db_refs.get('TEXT') # If this fails, then we continue with no change # Fall back to the text string #elif agent.db_refs.get('TEXT'): # agent.name = agent.db_refs.get('TEXT') return mapped_stmts
Return a list of mapped statements with updated agent names .
353
11
17,268
def get_complexes ( self , cplx_df ) : # Group the agents for the complex logger . info ( 'Processing complexes...' ) for cplx_id , this_cplx in cplx_df . groupby ( 'CPLX_ID' ) : agents = [ ] for hprd_id in this_cplx . HPRD_ID : ag = self . _make_agent ( hprd_id ) if ag is not None : agents . append ( ag ) # Make sure we got some agents! if not agents : continue # Get evidence info from first member of complex row0 = this_cplx . iloc [ 0 ] isoform_id = '%s_1' % row0 . HPRD_ID ev_list = self . _get_evidence ( row0 . HPRD_ID , isoform_id , row0 . PMIDS , row0 . EVIDENCE , 'interactions' ) stmt = Complex ( agents , evidence = ev_list ) self . statements . append ( stmt )
Generate Complex Statements from the HPRD protein complexes data .
235
13
17,269
def get_ptms ( self , ptm_df ) : logger . info ( 'Processing PTMs...' ) # Iterate over the rows of the dataframe for ix , row in ptm_df . iterrows ( ) : # Check the modification type; if we can't make an INDRA statement # for it, then skip it ptm_class = _ptm_map [ row [ 'MOD_TYPE' ] ] if ptm_class is None : continue # Use the Refseq protein ID for the substrate to make sure that # we get the right Uniprot ID for the isoform sub_ag = self . _make_agent ( row [ 'HPRD_ID' ] , refseq_id = row [ 'REFSEQ_PROTEIN' ] ) # If we couldn't get the substrate, skip the statement if sub_ag is None : continue enz_id = _nan_to_none ( row [ 'ENZ_HPRD_ID' ] ) enz_ag = self . _make_agent ( enz_id ) res = _nan_to_none ( row [ 'RESIDUE' ] ) pos = _nan_to_none ( row [ 'POSITION' ] ) if pos is not None and ';' in pos : pos , dash = pos . split ( ';' ) assert dash == '-' # As a fallback for later site mapping, we also get the protein # sequence information in case there was a problem with the # RefSeq->Uniprot mapping assert res assert pos motif_dict = self . _get_seq_motif ( row [ 'REFSEQ_PROTEIN' ] , res , pos ) # Get evidence ev_list = self . _get_evidence ( row [ 'HPRD_ID' ] , row [ 'HPRD_ISOFORM' ] , row [ 'PMIDS' ] , row [ 'EVIDENCE' ] , 'ptms' , motif_dict ) stmt = ptm_class ( enz_ag , sub_ag , res , pos , evidence = ev_list ) self . statements . append ( stmt )
Generate Modification statements from the HPRD PTM data .
462
14
17,270
def get_ppis ( self , ppi_df ) : logger . info ( 'Processing PPIs...' ) for ix , row in ppi_df . iterrows ( ) : agA = self . _make_agent ( row [ 'HPRD_ID_A' ] ) agB = self . _make_agent ( row [ 'HPRD_ID_B' ] ) # If don't get valid agents for both, skip this PPI if agA is None or agB is None : continue isoform_id = '%s_1' % row [ 'HPRD_ID_A' ] ev_list = self . _get_evidence ( row [ 'HPRD_ID_A' ] , isoform_id , row [ 'PMIDS' ] , row [ 'EVIDENCE' ] , 'interactions' ) stmt = Complex ( [ agA , agB ] , evidence = ev_list ) self . statements . append ( stmt )
Generate Complex Statements from the HPRD PPI data .
217
13
17,271
def _build_verb_statement_mapping ( ) : path_this = os . path . dirname ( os . path . abspath ( __file__ ) ) map_path = os . path . join ( path_this , 'isi_verb_to_indra_statement_type.tsv' ) with open ( map_path , 'r' ) as f : first_line = True verb_to_statement_type = { } for line in f : if not first_line : line = line [ : - 1 ] tokens = line . split ( '\t' ) if len ( tokens ) == 2 and len ( tokens [ 1 ] ) > 0 : verb = tokens [ 0 ] s_type = tokens [ 1 ] try : statement_class = getattr ( ist , s_type ) verb_to_statement_type [ verb ] = statement_class except Exception : pass else : first_line = False return verb_to_statement_type
Build the mapping between ISI verb strings and INDRA statement classes .
208
13
17,272
def get_statements ( self ) : for k , v in self . reader_output . items ( ) : for interaction in v [ 'interactions' ] : self . _process_interaction ( k , interaction , v [ 'text' ] , self . pmid , self . extra_annotations )
Process reader output to produce INDRA Statements .
66
9
17,273
def _process_interaction ( self , source_id , interaction , text , pmid , extra_annotations ) : verb = interaction [ 0 ] . lower ( ) subj = interaction [ - 2 ] obj = interaction [ - 1 ] # Make ungrounded agent objects for the subject and object # Grounding will happen after all statements are extracted in __init__ subj = self . _make_agent ( subj ) obj = self . _make_agent ( obj ) # Make an evidence object annotations = deepcopy ( extra_annotations ) if 'interaction' in extra_annotations : logger . warning ( "'interaction' key of extra_annotations ignored" + " since this is reserved for storing the raw ISI " + "input." ) annotations [ 'source_id' ] = source_id annotations [ 'interaction' ] = interaction ev = ist . Evidence ( source_api = 'isi' , pmid = pmid , text = text . rstrip ( ) , annotations = annotations ) # For binding time interactions, it is said that a catayst might be # specified. We don't use this for now, but extract in case we want # to in the future cataylst_specified = False if len ( interaction ) == 4 : catalyst = interaction [ 1 ] if catalyst is not None : cataylst_specified = True self . verbs . add ( verb ) statement = None if verb in verb_to_statement_type : statement_class = verb_to_statement_type [ verb ] if statement_class == ist . Complex : statement = ist . Complex ( [ subj , obj ] , evidence = ev ) else : statement = statement_class ( subj , obj , evidence = ev ) if statement is not None : # For Complex statements, the ISI reader produces two events: # binds(A, B) and binds(B, A) # We want only one Complex statement for each sentence, so check # to see if we already have a Complex for this source_id with the # same members already_have = False if type ( statement ) == ist . Complex : for old_s in self . statements : old_id = statement . evidence [ 0 ] . source_id new_id = old_s . evidence [ 0 ] . source_id if type ( old_s ) == ist . Complex and old_id == new_id : old_statement_members = [ m . db_refs [ 'TEXT' ] for m in old_s . members ] old_statement_members = sorted ( old_statement_members ) new_statement_members = [ m . db_refs [ 'TEXT' ] for m in statement . members ] new_statement_members = sorted ( new_statement_members ) if old_statement_members == new_statement_members : already_have = True break if not already_have : self . statements . append ( statement )
Process an interaction JSON tuple from the ISI output and adds up to one statement to the list of extracted statements .
623
22
17,274
def make_annotation ( self ) : annotation = dict ( ) # Put all properties of the action object into the annotation for item in dir ( self ) : if len ( item ) > 0 and item [ 0 ] != '_' and not inspect . ismethod ( getattr ( self , item ) ) : annotation [ item ] = getattr ( self , item ) return annotation
Returns a dictionary with all properties of the action mention .
79
11
17,275
def _match_to_array ( m ) : return [ _cast_biopax_element ( m . get ( i ) ) for i in range ( m . varSize ( ) ) ]
Returns an array consisting of the elements obtained from a pattern search cast into their appropriate classes .
42
18
17,276
def _is_complex ( pe ) : val = isinstance ( pe , _bp ( 'Complex' ) ) or isinstance ( pe , _bpimpl ( 'Complex' ) ) return val
Return True if the physical entity is a complex
43
9
17,277
def _is_protein ( pe ) : val = isinstance ( pe , _bp ( 'Protein' ) ) or isinstance ( pe , _bpimpl ( 'Protein' ) ) or isinstance ( pe , _bp ( 'ProteinReference' ) ) or isinstance ( pe , _bpimpl ( 'ProteinReference' ) ) return val
Return True if the element is a protein
76
8
17,278
def _is_rna ( pe ) : val = isinstance ( pe , _bp ( 'Rna' ) ) or isinstance ( pe , _bpimpl ( 'Rna' ) ) return val
Return True if the element is an RNA
44
8
17,279
def _is_small_molecule ( pe ) : val = isinstance ( pe , _bp ( 'SmallMolecule' ) ) or isinstance ( pe , _bpimpl ( 'SmallMolecule' ) ) or isinstance ( pe , _bp ( 'SmallMoleculeReference' ) ) or isinstance ( pe , _bpimpl ( 'SmallMoleculeReference' ) ) return val
Return True if the element is a small molecule
88
9
17,280
def _is_physical_entity ( pe ) : val = isinstance ( pe , _bp ( 'PhysicalEntity' ) ) or isinstance ( pe , _bpimpl ( 'PhysicalEntity' ) ) return val
Return True if the element is a physical entity
45
9
17,281
def _is_modification_or_activity ( feature ) : if not ( isinstance ( feature , _bp ( 'ModificationFeature' ) ) or isinstance ( feature , _bpimpl ( 'ModificationFeature' ) ) ) : return None mf_type = feature . getModificationType ( ) if mf_type is None : return None mf_type_terms = mf_type . getTerm ( ) . toArray ( ) for term in mf_type_terms : if term in ( 'residue modification, active' , 'residue modification, inactive' , 'active' , 'inactive' ) : return 'activity' return 'modification'
Return True if the feature is a modification
148
8
17,282
def _is_reference ( bpe ) : if isinstance ( bpe , _bp ( 'ProteinReference' ) ) or isinstance ( bpe , _bpimpl ( 'ProteinReference' ) ) or isinstance ( bpe , _bp ( 'SmallMoleculeReference' ) ) or isinstance ( bpe , _bpimpl ( 'SmallMoleculeReference' ) ) or isinstance ( bpe , _bp ( 'RnaReference' ) ) or isinstance ( bpe , _bpimpl ( 'RnaReference' ) ) or isinstance ( bpe , _bp ( 'EntityReference' ) ) or isinstance ( bpe , _bpimpl ( 'EntityReference' ) ) : return True else : return False
Return True if the element is an entity reference .
159
10
17,283
def _is_entity ( bpe ) : if isinstance ( bpe , _bp ( 'Protein' ) ) or isinstance ( bpe , _bpimpl ( 'Protein' ) ) or isinstance ( bpe , _bp ( 'SmallMolecule' ) ) or isinstance ( bpe , _bpimpl ( 'SmallMolecule' ) ) or isinstance ( bpe , _bp ( 'Complex' ) ) or isinstance ( bpe , _bpimpl ( 'Complex' ) ) or isinstance ( bpe , _bp ( 'Rna' ) ) or isinstance ( bpe , _bpimpl ( 'Rna' ) ) or isinstance ( bpe , _bp ( 'RnaRegion' ) ) or isinstance ( bpe , _bpimpl ( 'RnaRegion' ) ) or isinstance ( bpe , _bp ( 'DnaRegion' ) ) or isinstance ( bpe , _bpimpl ( 'DnaRegion' ) ) or isinstance ( bpe , _bp ( 'PhysicalEntity' ) ) or isinstance ( bpe , _bpimpl ( 'PhysicalEntity' ) ) : return True else : return False
Return True if the element is a physical entity .
256
10
17,284
def _is_catalysis ( bpe ) : if isinstance ( bpe , _bp ( 'Catalysis' ) ) or isinstance ( bpe , _bpimpl ( 'Catalysis' ) ) : return True else : return False
Return True if the element is Catalysis .
51
9
17,285
def print_statements ( self ) : for i , stmt in enumerate ( self . statements ) : print ( "%s: %s" % ( i , stmt ) )
Print all INDRA Statements collected by the processors .
39
10
17,286
def save_model ( self , file_name = None ) : if file_name is None : logger . error ( 'Missing file name' ) return pcc . model_to_owl ( self . model , file_name )
Save the BioPAX model object in an OWL file .
49
13
17,287
def eliminate_exact_duplicates ( self ) : # Here we use the deep hash of each Statement, and by making a dict, # we effectively keep only one Statement with a given deep hash self . statements = list ( { stmt . get_hash ( shallow = False , refresh = True ) : stmt for stmt in self . statements } . values ( ) )
Eliminate Statements that were extracted multiple times .
80
10
17,288
def get_complexes ( self ) : for obj in self . model . getObjects ( ) . toArray ( ) : bpe = _cast_biopax_element ( obj ) if not _is_complex ( bpe ) : continue ev = self . _get_evidence ( bpe ) members = self . _get_complex_members ( bpe ) if members is not None : if len ( members ) > 10 : logger . debug ( 'Skipping complex with more than 10 members.' ) continue complexes = _get_combinations ( members ) for c in complexes : self . statements . append ( decode_obj ( Complex ( c , ev ) , encoding = 'utf-8' ) )
Extract INDRA Complex Statements from the BioPAX model .
151
13
17,289
def get_modifications ( self ) : for modtype , modclass in modtype_to_modclass . items ( ) : # TODO: we could possibly try to also extract generic # modifications here if modtype == 'modification' : continue stmts = self . _get_generic_modification ( modclass ) self . statements += stmts
Extract INDRA Modification Statements from the BioPAX model .
77
14
17,290
def get_activity_modification ( self ) : mod_filter = 'residue modification, active' for is_active in [ True , False ] : p = self . _construct_modification_pattern ( ) rel = mcct . GAIN if is_active else mcct . LOSS p . add ( mcc ( rel , mod_filter ) , "input simple PE" , "output simple PE" ) s = _bpp ( 'Searcher' ) res = s . searchPlain ( self . model , p ) res_array = [ _match_to_array ( m ) for m in res . toArray ( ) ] for r in res_array : reaction = r [ p . indexOf ( 'Conversion' ) ] activity = 'activity' input_spe = r [ p . indexOf ( 'input simple PE' ) ] output_spe = r [ p . indexOf ( 'output simple PE' ) ] # Get the modifications mod_in = BiopaxProcessor . _get_entity_mods ( input_spe ) mod_out = BiopaxProcessor . _get_entity_mods ( output_spe ) mod_shared = _get_mod_intersection ( mod_in , mod_out ) gained_mods = _get_mod_difference ( mod_out , mod_in ) # Here we get the evidence for the BiochemicalReaction ev = self . _get_evidence ( reaction ) agents = self . _get_agents_from_entity ( output_spe ) for agent in _listify ( agents ) : static_mods = _get_mod_difference ( agent . mods , gained_mods ) # NOTE: with the ActiveForm representation we cannot # separate static_mods and gained_mods. We assume here # that the static_mods are inconsequential and therefore # are not mentioned as an Agent condition, following # don't care don't write semantics. Therefore only the # gained_mods are listed in the ActiveForm as Agent # conditions. if gained_mods : agent . mods = gained_mods stmt = ActiveForm ( agent , activity , is_active , evidence = ev ) self . statements . append ( decode_obj ( stmt , encoding = 'utf-8' ) )
Extract INDRA ActiveForm statements from the BioPAX model .
482
14
17,291
def get_regulate_amounts ( self ) : p = pb . controlsExpressionWithTemplateReac ( ) s = _bpp ( 'Searcher' ) res = s . searchPlain ( self . model , p ) res_array = [ _match_to_array ( m ) for m in res . toArray ( ) ] stmts = [ ] for res in res_array : # FIXME: for some reason labels are not accessible # for these queries. It would be more reliable # to get results by label instead of index. ''' controller_er = res[p.indexOf('controller ER')] generic_controller_er = res[p.indexOf('generic controller ER')] controller_simple_pe = res[p.indexOf('controller simple PE')] controller_pe = res[p.indexOf('controller PE')] control = res[p.indexOf('Control')] conversion = res[p.indexOf('Conversion')] input_pe = res[p.indexOf('input PE')] input_simple_pe = res[p.indexOf('input simple PE')] changed_generic_er = res[p.indexOf('changed generic ER')] output_pe = res[p.indexOf('output PE')] output_simple_pe = res[p.indexOf('output simple PE')] changed_er = res[p.indexOf('changed ER')] ''' # TODO: here, res[3] is the complex physical entity # for instance http://pathwaycommons.org/pc2/ # Complex_43c6b8330562c1b411d21e9d1185bae9 # consists of 3 components: JUN, FOS and NFAT # where NFAT further contains 3 member physical entities. # # However, res[2] iterates over all 5 member physical entities # of the complex which doesn't represent the underlying # structure faithfully. It would be better to use res[3] # (the complex itself) and look at components and then # members. However, then, it would not be clear how to # construct an INDRA Agent for the controller. controller = self . _get_agents_from_entity ( res [ 2 ] ) controlled_pe = res [ 6 ] controlled = self . _get_agents_from_entity ( controlled_pe ) conversion = res [ 5 ] direction = conversion . getTemplateDirection ( ) if direction is not None : direction = direction . name ( ) if direction != 'FORWARD' : logger . warning ( 'Unhandled conversion direction %s' % direction ) continue # Sometimes interaction type is annotated as # term=='TRANSCRIPTION'. Other times this is not # annotated. int_type = conversion . getInteractionType ( ) . toArray ( ) if int_type : for it in int_type : for term in it . getTerm ( ) . toArray ( ) : pass control = res [ 4 ] control_type = control . getControlType ( ) if control_type : control_type = control_type . name ( ) ev = self . _get_evidence ( control ) for subj , obj in itertools . product ( _listify ( controller ) , _listify ( controlled ) ) : subj_act = ActivityCondition ( 'transcription' , True ) subj . activity = subj_act if control_type == 'ACTIVATION' : st = IncreaseAmount ( subj , obj , evidence = ev ) elif control_type == 'INHIBITION' : st = DecreaseAmount ( subj , obj , evidence = ev ) else : logger . warning ( 'Unhandled control type %s' % control_type ) continue st_dec = decode_obj ( st , encoding = 'utf-8' ) self . statements . append ( st_dec )
Extract INDRA RegulateAmount Statements from the BioPAX model .
832
15
17,292
def get_gef ( self ) : p = self . _gef_gap_base ( ) s = _bpp ( 'Searcher' ) res = s . searchPlain ( self . model , p ) res_array = [ _match_to_array ( m ) for m in res . toArray ( ) ] for r in res_array : controller_pe = r [ p . indexOf ( 'controller PE' ) ] input_pe = r [ p . indexOf ( 'input PE' ) ] input_spe = r [ p . indexOf ( 'input simple PE' ) ] output_pe = r [ p . indexOf ( 'output PE' ) ] output_spe = r [ p . indexOf ( 'output simple PE' ) ] reaction = r [ p . indexOf ( 'Conversion' ) ] control = r [ p . indexOf ( 'Control' ) ] # Make sure the GEF is not a complex # TODO: it could be possible to extract certain complexes here, for # instance ones that only have a single protein if _is_complex ( controller_pe ) : continue members_in = self . _get_complex_members ( input_pe ) members_out = self . _get_complex_members ( output_pe ) if not ( members_in and members_out ) : continue # Make sure the outgoing complex has exactly 2 members # TODO: by finding matching proteins on either side, in principle # it would be possible to find Gef relationships in complexes # with more members if len ( members_out ) != 2 : continue # Make sure complex starts with GDP that becomes GTP gdp_in = False for member in members_in : if isinstance ( member , Agent ) and member . name == 'GDP' : gdp_in = True gtp_out = False for member in members_out : if isinstance ( member , Agent ) and member . name == 'GTP' : gtp_out = True if not ( gdp_in and gtp_out ) : continue ras_list = self . _get_agents_from_entity ( input_spe ) gef_list = self . _get_agents_from_entity ( controller_pe ) ev = self . _get_evidence ( control ) for gef , ras in itertools . product ( _listify ( gef_list ) , _listify ( ras_list ) ) : st = Gef ( gef , ras , evidence = ev ) st_dec = decode_obj ( st , encoding = 'utf-8' ) self . statements . append ( st_dec )
Extract Gef INDRA Statements from the BioPAX model .
566
14
17,293
def get_gap ( self ) : p = self . _gef_gap_base ( ) s = _bpp ( 'Searcher' ) res = s . searchPlain ( self . model , p ) res_array = [ _match_to_array ( m ) for m in res . toArray ( ) ] for r in res_array : controller_pe = r [ p . indexOf ( 'controller PE' ) ] input_pe = r [ p . indexOf ( 'input PE' ) ] input_spe = r [ p . indexOf ( 'input simple PE' ) ] output_pe = r [ p . indexOf ( 'output PE' ) ] output_spe = r [ p . indexOf ( 'output simple PE' ) ] reaction = r [ p . indexOf ( 'Conversion' ) ] control = r [ p . indexOf ( 'Control' ) ] # Make sure the GAP is not a complex # TODO: it could be possible to extract certain complexes here, for # instance ones that only have a single protein if _is_complex ( controller_pe ) : continue members_in = self . _get_complex_members ( input_pe ) members_out = self . _get_complex_members ( output_pe ) if not ( members_in and members_out ) : continue # Make sure the outgoing complex has exactly 2 members # TODO: by finding matching proteins on either side, in principle # it would be possible to find Gap relationships in complexes # with more members if len ( members_out ) != 2 : continue # Make sure complex starts with GDP that becomes GTP gtp_in = False for member in members_in : if isinstance ( member , Agent ) and member . name == 'GTP' : gtp_in = True gdp_out = False for member in members_out : if isinstance ( member , Agent ) and member . name == 'GDP' : gdp_out = True if not ( gtp_in and gdp_out ) : continue ras_list = self . _get_agents_from_entity ( input_spe ) gap_list = self . _get_agents_from_entity ( controller_pe ) ev = self . _get_evidence ( control ) for gap , ras in itertools . product ( _listify ( gap_list ) , _listify ( ras_list ) ) : st = Gap ( gap , ras , evidence = ev ) st_dec = decode_obj ( st , encoding = 'utf-8' ) self . statements . append ( st_dec )
Extract Gap INDRA Statements from the BioPAX model .
559
13
17,294
def _get_entity_mods ( bpe ) : if _is_entity ( bpe ) : features = bpe . getFeature ( ) . toArray ( ) else : features = bpe . getEntityFeature ( ) . toArray ( ) mods = [ ] for feature in features : if not _is_modification ( feature ) : continue mc = BiopaxProcessor . _extract_mod_from_feature ( feature ) if mc is not None : mods . append ( mc ) return mods
Get all the modifications of an entity in INDRA format
108
11
17,295
def _get_generic_modification ( self , mod_class ) : mod_type = modclass_to_modtype [ mod_class ] if issubclass ( mod_class , RemoveModification ) : mod_gain_const = mcct . LOSS mod_type = modtype_to_inverse [ mod_type ] else : mod_gain_const = mcct . GAIN mod_filter = mod_type [ : 5 ] # Start with a generic modification pattern p = BiopaxProcessor . _construct_modification_pattern ( ) p . add ( mcc ( mod_gain_const , mod_filter ) , "input simple PE" , "output simple PE" ) s = _bpp ( 'Searcher' ) res = s . searchPlain ( self . model , p ) res_array = [ _match_to_array ( m ) for m in res . toArray ( ) ] stmts = [ ] for r in res_array : controller_pe = r [ p . indexOf ( 'controller PE' ) ] input_pe = r [ p . indexOf ( 'input PE' ) ] input_spe = r [ p . indexOf ( 'input simple PE' ) ] output_spe = r [ p . indexOf ( 'output simple PE' ) ] reaction = r [ p . indexOf ( 'Conversion' ) ] control = r [ p . indexOf ( 'Control' ) ] if not _is_catalysis ( control ) : continue cat_dir = control . getCatalysisDirection ( ) if cat_dir is not None and cat_dir . name ( ) != 'LEFT_TO_RIGHT' : logger . debug ( 'Unexpected catalysis direction: %s.' % control . getCatalysisDirection ( ) ) continue enzs = BiopaxProcessor . _get_primary_controller ( controller_pe ) if not enzs : continue ''' if _is_complex(input_pe): sub_members_in = self._get_complex_members(input_pe) sub_members_out = self._get_complex_members(output_pe) # TODO: It is possible to find which member of the complex is # actually modified. That member will be the substrate and # all other members of the complex will be bound to it. logger.info('Cannot handle complex substrates.') continue ''' subs = BiopaxProcessor . _get_agents_from_entity ( input_spe , expand_pe = False ) ev = self . _get_evidence ( control ) for enz , sub in itertools . product ( _listify ( enzs ) , _listify ( subs ) ) : # Get the modifications mod_in = BiopaxProcessor . _get_entity_mods ( input_spe ) mod_out = BiopaxProcessor . _get_entity_mods ( output_spe ) sub . mods = _get_mod_intersection ( mod_in , mod_out ) if issubclass ( mod_class , AddModification ) : gained_mods = _get_mod_difference ( mod_out , mod_in ) else : gained_mods = _get_mod_difference ( mod_in , mod_out ) for mod in gained_mods : # Is it guaranteed that these are all modifications # of the type we are extracting? if mod . mod_type not in ( mod_type , modtype_to_inverse [ mod_type ] ) : continue stmt = mod_class ( enz , sub , mod . residue , mod . position , evidence = ev ) stmts . append ( decode_obj ( stmt , encoding = 'utf-8' ) ) return stmts
Get all modification reactions given a Modification class .
805
10
17,296
def _construct_modification_pattern ( ) : # The following constraints were pieced together based on the # following two higher level constrains: pb.controlsStateChange(), # pb.controlsPhosphorylation(). p = _bpp ( 'Pattern' ) ( _bpimpl ( 'PhysicalEntity' ) ( ) . getModelInterface ( ) , 'controller PE' ) # Getting the control itself p . add ( cb . peToControl ( ) , "controller PE" , "Control" ) # Link the control to the conversion that it controls p . add ( cb . controlToConv ( ) , "Control" , "Conversion" ) # The controller shouldn't be a participant of the conversion p . add ( _bpp ( 'constraint.NOT' ) ( cb . participant ( ) ) , "Conversion" , "controller PE" ) # Get the input participant of the conversion p . add ( pt ( rt . INPUT , True ) , "Control" , "Conversion" , "input PE" ) # Get the specific PhysicalEntity p . add ( cb . linkToSpecific ( ) , "input PE" , "input simple PE" ) # Link to ER p . add ( cb . peToER ( ) , "input simple PE" , "input simple ER" ) # Make sure the participant is a protein p . add ( tp ( _bpimpl ( 'Protein' ) ( ) . getModelInterface ( ) ) , "input simple PE" ) # Link to the other side of the conversion p . add ( cs ( cst . OTHER_SIDE ) , "input PE" , "Conversion" , "output PE" ) # Make sure the two sides are not the same p . add ( _bpp ( 'constraint.Equality' ) ( False ) , "input PE" , "output PE" ) # Get the specific PhysicalEntity p . add ( cb . linkToSpecific ( ) , "output PE" , "output simple PE" ) # Link to ER p . add ( cb . peToER ( ) , "output simple PE" , "output simple ER" ) p . add ( _bpp ( 'constraint.Equality' ) ( True ) , "input simple ER" , "output simple ER" ) # Make sure the output is a Protein p . add ( tp ( _bpimpl ( 'Protein' ) ( ) . getModelInterface ( ) ) , "output simple PE" ) p . add ( _bpp ( 'constraint.NOT' ) ( cb . linkToSpecific ( ) ) , "input PE" , "output simple PE" ) p . add ( _bpp ( 'constraint.NOT' ) ( cb . linkToSpecific ( ) ) , "output PE" , "input simple PE" ) return p
Construct the BioPAX pattern to extract modification reactions .
616
11
17,297
def _extract_mod_from_feature ( mf ) : # ModificationFeature / SequenceModificationVocabulary mf_type = mf . getModificationType ( ) if mf_type is None : return None mf_type_terms = mf_type . getTerm ( ) . toArray ( ) known_mf_type = None for t in mf_type_terms : if t . startswith ( 'MOD_RES ' ) : t = t [ 8 : ] mf_type_indra = _mftype_dict . get ( t ) if mf_type_indra is not None : known_mf_type = mf_type_indra break if not known_mf_type : logger . debug ( 'Skipping modification with unknown terms: %s' % ', ' . join ( mf_type_terms ) ) return None mod_type , residue = known_mf_type # getFeatureLocation returns SequenceLocation, which is the # generic parent class of SequenceSite and SequenceInterval. # Here we need to cast to SequenceSite in order to get to # the sequence position. mf_pos = mf . getFeatureLocation ( ) if mf_pos is not None : # If it is not a SequenceSite we can't handle it if not mf_pos . modelInterface . getName ( ) == 'org.biopax.paxtools.model.level3.SequenceSite' : mod_pos = None else : mf_site = cast ( _bp ( 'SequenceSite' ) , mf_pos ) mf_pos_status = mf_site . getPositionStatus ( ) if mf_pos_status is None : mod_pos = None elif mf_pos_status and mf_pos_status . toString ( ) != 'EQUAL' : logger . debug ( 'Modification site position is %s' % mf_pos_status . toString ( ) ) else : mod_pos = mf_site . getSequencePosition ( ) mod_pos = '%s' % mod_pos else : mod_pos = None mc = ModCondition ( mod_type , residue , mod_pos , True ) return mc
Extract the type of modification and the position from a ModificationFeature object in the INDRA format .
491
21
17,298
def _get_entref ( bpe ) : if not _is_reference ( bpe ) : try : er = bpe . getEntityReference ( ) except AttributeError : return None return er else : return bpe
Returns the entity reference of an entity if it exists or return the entity reference that was passed in as argument .
48
22
17,299
def _stmt_location_to_agents ( stmt , location ) : if location is None : return agents = stmt . agent_list ( ) for a in agents : if a is not None : a . location = location
Apply an event location to the Agents in the corresponding Statement .
49
12