id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
19,000
sorgerlab/indra
indra/sources/sparser/api.py
process_sparser_output
def process_sparser_output(output_fname, output_fmt='json'): """Return a processor with Statements extracted from Sparser XML or JSON Parameters ---------- output_fname : str The path to the Sparser output file to be processed. The file can either be JSON or XML output from Sparser, with the output_fmt parameter defining what format is assumed to be processed. output_fmt : Optional[str] The format of the Sparser output to be processed, can either be 'json' or 'xml'. Default: 'json' Returns ------- sp : SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen. """ if output_fmt not in ['json', 'xml']: logger.error("Unrecognized output format '%s'." % output_fmt) return None sp = None with open(output_fname, 'rt') as fh: if output_fmt == 'json': json_dict = json.load(fh) sp = process_json_dict(json_dict) else: xml_str = fh.read() sp = process_xml(xml_str) return sp
python
def process_sparser_output(output_fname, output_fmt='json'): if output_fmt not in ['json', 'xml']: logger.error("Unrecognized output format '%s'." % output_fmt) return None sp = None with open(output_fname, 'rt') as fh: if output_fmt == 'json': json_dict = json.load(fh) sp = process_json_dict(json_dict) else: xml_str = fh.read() sp = process_xml(xml_str) return sp
[ "def", "process_sparser_output", "(", "output_fname", ",", "output_fmt", "=", "'json'", ")", ":", "if", "output_fmt", "not", "in", "[", "'json'", ",", "'xml'", "]", ":", "logger", ".", "error", "(", "\"Unrecognized output format '%s'.\"", "%", "output_fmt", ")",...
Return a processor with Statements extracted from Sparser XML or JSON Parameters ---------- output_fname : str The path to the Sparser output file to be processed. The file can either be JSON or XML output from Sparser, with the output_fmt parameter defining what format is assumed to be processed. output_fmt : Optional[str] The format of the Sparser output to be processed, can either be 'json' or 'xml'. Default: 'json' Returns ------- sp : SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen.
[ "Return", "a", "processor", "with", "Statements", "extracted", "from", "Sparser", "XML", "or", "JSON" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L137-L167
19,001
sorgerlab/indra
indra/sources/sparser/api.py
process_xml
def process_xml(xml_str): """Return processor with Statements extracted from a Sparser XML. Parameters ---------- xml_str : str The XML string obtained by reading content with Sparser, using the 'xml' output mode. Returns ------- sp : SparserXMLProcessor A SparserXMLProcessor which has extracted Statements as its statements attribute. """ try: tree = ET.XML(xml_str, parser=UTB()) except ET.ParseError as e: logger.error('Could not parse XML string') logger.error(e) return None sp = _process_elementtree(tree) return sp
python
def process_xml(xml_str): try: tree = ET.XML(xml_str, parser=UTB()) except ET.ParseError as e: logger.error('Could not parse XML string') logger.error(e) return None sp = _process_elementtree(tree) return sp
[ "def", "process_xml", "(", "xml_str", ")", ":", "try", ":", "tree", "=", "ET", ".", "XML", "(", "xml_str", ",", "parser", "=", "UTB", "(", ")", ")", "except", "ET", ".", "ParseError", "as", "e", ":", "logger", ".", "error", "(", "'Could not parse XML...
Return processor with Statements extracted from a Sparser XML. Parameters ---------- xml_str : str The XML string obtained by reading content with Sparser, using the 'xml' output mode. Returns ------- sp : SparserXMLProcessor A SparserXMLProcessor which has extracted Statements as its statements attribute.
[ "Return", "processor", "with", "Statements", "extracted", "from", "a", "Sparser", "XML", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L190-L212
19,002
sorgerlab/indra
indra/sources/sparser/api.py
run_sparser
def run_sparser(fname, output_fmt, outbuf=None, timeout=600): """Return the path to reading output after running Sparser reading. Parameters ---------- fname : str The path to an input file to be processed. Due to the Spaser executable's assumptions, the file name needs to start with PMC and should be an NXML formatted file. output_fmt : Optional[str] The format in which Sparser should produce its output, can either be 'json' or 'xml'. outbuf : Optional[file] A file like object that the Sparser output is written to. timeout : int The number of seconds to wait until giving up on this one reading. The default is 600 seconds (i.e. 10 minutes). Sparcer is a fast reader and the typical type to read a single full text is a matter of seconds. Returns ------- output_path : str The path to the output file created by Sparser. """ if not sparser_path or not os.path.exists(sparser_path): logger.error('Sparser executable not set in %s' % sparser_path_var) return None if output_fmt == 'xml': format_flag = '-x' suffix = '.xml' elif output_fmt == 'json': format_flag = '-j' suffix = '.json' else: logger.error('Unknown output format: %s' % output_fmt) return None sparser_exec_path = os.path.join(sparser_path, 'save-semantics.sh') output_path = fname.split('.')[0] + '-semantics' + suffix for fpath in [sparser_exec_path, fname]: if not os.path.exists(fpath): raise Exception("'%s' is not a valid path." % fpath) cmd_list = [sparser_exec_path, format_flag, fname] # This is mostly a copy of the code found in subprocess.run, with the # key change that proc.kill is replaced with os.killpg. This allows the # process to be killed even if it has children. Solution developed from: # https://stackoverflow.com/questions/36952245/subprocess-timeout-failure with sp.Popen(cmd_list, stdout=sp.PIPE) as proc: try: stdout, stderr = proc.communicate(timeout=timeout) except sp.TimeoutExpired: # Yes, this is about as bad as it looks. But it is the only way to # be sure the script actually dies. sp.check_call(['pkill', '-f', 'r3.core.*%s' % fname]) stdout, stderr = proc.communicate() raise sp.TimeoutExpired(proc.args, timeout, output=stdout, stderr=stderr) except BaseException: # See comment on above instance. sp.check_call(['pkill', '-f', fname]) proc.wait() raise retcode = proc.poll() if retcode: raise sp.CalledProcessError(retcode, proc.args, output=stdout, stderr=stderr) if outbuf is not None: outbuf.write(stdout) outbuf.flush() assert os.path.exists(output_path),\ 'No output file \"%s\" created by sparser.' % output_path return output_path
python
def run_sparser(fname, output_fmt, outbuf=None, timeout=600): if not sparser_path or not os.path.exists(sparser_path): logger.error('Sparser executable not set in %s' % sparser_path_var) return None if output_fmt == 'xml': format_flag = '-x' suffix = '.xml' elif output_fmt == 'json': format_flag = '-j' suffix = '.json' else: logger.error('Unknown output format: %s' % output_fmt) return None sparser_exec_path = os.path.join(sparser_path, 'save-semantics.sh') output_path = fname.split('.')[0] + '-semantics' + suffix for fpath in [sparser_exec_path, fname]: if not os.path.exists(fpath): raise Exception("'%s' is not a valid path." % fpath) cmd_list = [sparser_exec_path, format_flag, fname] # This is mostly a copy of the code found in subprocess.run, with the # key change that proc.kill is replaced with os.killpg. This allows the # process to be killed even if it has children. Solution developed from: # https://stackoverflow.com/questions/36952245/subprocess-timeout-failure with sp.Popen(cmd_list, stdout=sp.PIPE) as proc: try: stdout, stderr = proc.communicate(timeout=timeout) except sp.TimeoutExpired: # Yes, this is about as bad as it looks. But it is the only way to # be sure the script actually dies. sp.check_call(['pkill', '-f', 'r3.core.*%s' % fname]) stdout, stderr = proc.communicate() raise sp.TimeoutExpired(proc.args, timeout, output=stdout, stderr=stderr) except BaseException: # See comment on above instance. sp.check_call(['pkill', '-f', fname]) proc.wait() raise retcode = proc.poll() if retcode: raise sp.CalledProcessError(retcode, proc.args, output=stdout, stderr=stderr) if outbuf is not None: outbuf.write(stdout) outbuf.flush() assert os.path.exists(output_path),\ 'No output file \"%s\" created by sparser.' % output_path return output_path
[ "def", "run_sparser", "(", "fname", ",", "output_fmt", ",", "outbuf", "=", "None", ",", "timeout", "=", "600", ")", ":", "if", "not", "sparser_path", "or", "not", "os", ".", "path", ".", "exists", "(", "sparser_path", ")", ":", "logger", ".", "error", ...
Return the path to reading output after running Sparser reading. Parameters ---------- fname : str The path to an input file to be processed. Due to the Spaser executable's assumptions, the file name needs to start with PMC and should be an NXML formatted file. output_fmt : Optional[str] The format in which Sparser should produce its output, can either be 'json' or 'xml'. outbuf : Optional[file] A file like object that the Sparser output is written to. timeout : int The number of seconds to wait until giving up on this one reading. The default is 600 seconds (i.e. 10 minutes). Sparcer is a fast reader and the typical type to read a single full text is a matter of seconds. Returns ------- output_path : str The path to the output file created by Sparser.
[ "Return", "the", "path", "to", "reading", "output", "after", "running", "Sparser", "reading", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L215-L287
19,003
sorgerlab/indra
indra/sources/sparser/api.py
get_version
def get_version(): """Return the version of the Sparser executable on the path. Returns ------- version : str The version of Sparser that is found on the Sparser path. """ assert sparser_path is not None, "Sparser path is not defined." with open(os.path.join(sparser_path, 'version.txt'), 'r') as f: version = f.read().strip() return version
python
def get_version(): assert sparser_path is not None, "Sparser path is not defined." with open(os.path.join(sparser_path, 'version.txt'), 'r') as f: version = f.read().strip() return version
[ "def", "get_version", "(", ")", ":", "assert", "sparser_path", "is", "not", "None", ",", "\"Sparser path is not defined.\"", "with", "open", "(", "os", ".", "path", ".", "join", "(", "sparser_path", ",", "'version.txt'", ")", ",", "'r'", ")", "as", "f", ":...
Return the version of the Sparser executable on the path. Returns ------- version : str The version of Sparser that is found on the Sparser path.
[ "Return", "the", "version", "of", "the", "Sparser", "executable", "on", "the", "path", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L290-L301
19,004
sorgerlab/indra
indra/sources/sparser/api.py
make_nxml_from_text
def make_nxml_from_text(text): """Return raw text wrapped in NXML structure. Parameters ---------- text : str The raw text content to be wrapped in an NXML structure. Returns ------- nxml_str : str The NXML string wrapping the raw text input. """ text = _escape_xml(text) header = '<?xml version="1.0" encoding="UTF-8" ?>' + \ '<OAI-PMH><article><body><sec id="s1"><p>' footer = '</p></sec></body></article></OAI-PMH>' nxml_str = header + text + footer return nxml_str
python
def make_nxml_from_text(text): text = _escape_xml(text) header = '<?xml version="1.0" encoding="UTF-8" ?>' + \ '<OAI-PMH><article><body><sec id="s1"><p>' footer = '</p></sec></body></article></OAI-PMH>' nxml_str = header + text + footer return nxml_str
[ "def", "make_nxml_from_text", "(", "text", ")", ":", "text", "=", "_escape_xml", "(", "text", ")", "header", "=", "'<?xml version=\"1.0\" encoding=\"UTF-8\" ?>'", "+", "'<OAI-PMH><article><body><sec id=\"s1\"><p>'", "footer", "=", "'</p></sec></body></article></OAI-PMH>'", "n...
Return raw text wrapped in NXML structure. Parameters ---------- text : str The raw text content to be wrapped in an NXML structure. Returns ------- nxml_str : str The NXML string wrapping the raw text input.
[ "Return", "raw", "text", "wrapped", "in", "NXML", "structure", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/api.py#L304-L322
19,005
sorgerlab/indra
indra/databases/hgnc_client.py
get_hgnc_name
def get_hgnc_name(hgnc_id): """Return the HGNC symbol corresponding to the given HGNC ID. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Returns ------- hgnc_name : str The HGNC symbol corresponding to the given HGNC ID. """ try: hgnc_name = hgnc_names[hgnc_id] except KeyError: xml_tree = get_hgnc_entry(hgnc_id) if xml_tree is None: return None hgnc_name_tag =\ xml_tree.find("result/doc/str[@name='symbol']") if hgnc_name_tag is None: return None hgnc_name = hgnc_name_tag.text.strip() return hgnc_name
python
def get_hgnc_name(hgnc_id): try: hgnc_name = hgnc_names[hgnc_id] except KeyError: xml_tree = get_hgnc_entry(hgnc_id) if xml_tree is None: return None hgnc_name_tag =\ xml_tree.find("result/doc/str[@name='symbol']") if hgnc_name_tag is None: return None hgnc_name = hgnc_name_tag.text.strip() return hgnc_name
[ "def", "get_hgnc_name", "(", "hgnc_id", ")", ":", "try", ":", "hgnc_name", "=", "hgnc_names", "[", "hgnc_id", "]", "except", "KeyError", ":", "xml_tree", "=", "get_hgnc_entry", "(", "hgnc_id", ")", "if", "xml_tree", "is", "None", ":", "return", "None", "hg...
Return the HGNC symbol corresponding to the given HGNC ID. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Returns ------- hgnc_name : str The HGNC symbol corresponding to the given HGNC ID.
[ "Return", "the", "HGNC", "symbol", "corresponding", "to", "the", "given", "HGNC", "ID", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/hgnc_client.py#L83-L107
19,006
sorgerlab/indra
indra/databases/hgnc_client.py
get_hgnc_entry
def get_hgnc_entry(hgnc_id): """Return the HGNC entry for the given HGNC ID from the web service. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Returns ------- xml_tree : ElementTree The XML ElementTree corresponding to the entry for the given HGNC ID. """ url = hgnc_url + 'hgnc_id/%s' % hgnc_id headers = {'Accept': '*/*'} res = requests.get(url, headers=headers) if not res.status_code == 200: return None xml_tree = ET.XML(res.content, parser=UTB()) return xml_tree
python
def get_hgnc_entry(hgnc_id): url = hgnc_url + 'hgnc_id/%s' % hgnc_id headers = {'Accept': '*/*'} res = requests.get(url, headers=headers) if not res.status_code == 200: return None xml_tree = ET.XML(res.content, parser=UTB()) return xml_tree
[ "def", "get_hgnc_entry", "(", "hgnc_id", ")", ":", "url", "=", "hgnc_url", "+", "'hgnc_id/%s'", "%", "hgnc_id", "headers", "=", "{", "'Accept'", ":", "'*/*'", "}", "res", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "if...
Return the HGNC entry for the given HGNC ID from the web service. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Returns ------- xml_tree : ElementTree The XML ElementTree corresponding to the entry for the given HGNC ID.
[ "Return", "the", "HGNC", "entry", "for", "the", "given", "HGNC", "ID", "from", "the", "web", "service", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/hgnc_client.py#L224-L244
19,007
sorgerlab/indra
indra/tools/reading/util/log_analysis_tools.py
analyze_reach_log
def analyze_reach_log(log_fname=None, log_str=None): """Return unifinished PMIDs given a log file name.""" assert bool(log_fname) ^ bool(log_str), 'Must specify log_fname OR log_str' started_patt = re.compile('Starting ([\d]+)') # TODO: it might be interesting to get the time it took to read # each paper here finished_patt = re.compile('Finished ([\d]+)') def get_content_nums(txt): pat = 'Retrieved content for ([\d]+) / ([\d]+) papers to be read' res = re.match(pat, txt) has_content, total = res.groups() if res else None, None return has_content, total if log_fname: with open(log_fname, 'r') as fh: log_str = fh.read() # has_content, total = get_content_nums(log_str) # unused pmids = {} pmids['started'] = started_patt.findall(log_str) pmids['finished'] = finished_patt.findall(log_str) pmids['not_done'] = set(pmids['started']) - set(pmids['finished']) return pmids
python
def analyze_reach_log(log_fname=None, log_str=None): assert bool(log_fname) ^ bool(log_str), 'Must specify log_fname OR log_str' started_patt = re.compile('Starting ([\d]+)') # TODO: it might be interesting to get the time it took to read # each paper here finished_patt = re.compile('Finished ([\d]+)') def get_content_nums(txt): pat = 'Retrieved content for ([\d]+) / ([\d]+) papers to be read' res = re.match(pat, txt) has_content, total = res.groups() if res else None, None return has_content, total if log_fname: with open(log_fname, 'r') as fh: log_str = fh.read() # has_content, total = get_content_nums(log_str) # unused pmids = {} pmids['started'] = started_patt.findall(log_str) pmids['finished'] = finished_patt.findall(log_str) pmids['not_done'] = set(pmids['started']) - set(pmids['finished']) return pmids
[ "def", "analyze_reach_log", "(", "log_fname", "=", "None", ",", "log_str", "=", "None", ")", ":", "assert", "bool", "(", "log_fname", ")", "^", "bool", "(", "log_str", ")", ",", "'Must specify log_fname OR log_str'", "started_patt", "=", "re", ".", "compile", ...
Return unifinished PMIDs given a log file name.
[ "Return", "unifinished", "PMIDs", "given", "a", "log", "file", "name", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/log_analysis_tools.py#L6-L28
19,008
sorgerlab/indra
indra/tools/reading/util/log_analysis_tools.py
get_logs_from_db_reading
def get_logs_from_db_reading(job_prefix, reading_queue='run_db_reading_queue'): """Get the logs stashed on s3 for a particular reading.""" s3 = boto3.client('s3') gen_prefix = 'reading_results/%s/logs/%s' % (job_prefix, reading_queue) job_log_data = s3.list_objects_v2(Bucket='bigmech', Prefix=join(gen_prefix, job_prefix)) # TODO: Track success/failure log_strs = [] for fdict in job_log_data['Contents']: resp = s3.get_object(Bucket='bigmech', Key=fdict['Key']) log_strs.append(resp['Body'].read().decode('utf-8')) return log_strs
python
def get_logs_from_db_reading(job_prefix, reading_queue='run_db_reading_queue'): s3 = boto3.client('s3') gen_prefix = 'reading_results/%s/logs/%s' % (job_prefix, reading_queue) job_log_data = s3.list_objects_v2(Bucket='bigmech', Prefix=join(gen_prefix, job_prefix)) # TODO: Track success/failure log_strs = [] for fdict in job_log_data['Contents']: resp = s3.get_object(Bucket='bigmech', Key=fdict['Key']) log_strs.append(resp['Body'].read().decode('utf-8')) return log_strs
[ "def", "get_logs_from_db_reading", "(", "job_prefix", ",", "reading_queue", "=", "'run_db_reading_queue'", ")", ":", "s3", "=", "boto3", ".", "client", "(", "'s3'", ")", "gen_prefix", "=", "'reading_results/%s/logs/%s'", "%", "(", "job_prefix", ",", "reading_queue",...
Get the logs stashed on s3 for a particular reading.
[ "Get", "the", "logs", "stashed", "on", "s3", "for", "a", "particular", "reading", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/log_analysis_tools.py#L36-L47
19,009
sorgerlab/indra
indra/tools/reading/util/log_analysis_tools.py
separate_reach_logs
def separate_reach_logs(log_str): """Get the list of reach logs from the overall logs.""" log_lines = log_str.splitlines() reach_logs = [] reach_lines = [] adding_reach_lines = False for l in log_lines[:]: if not adding_reach_lines and 'Beginning reach' in l: adding_reach_lines = True elif adding_reach_lines and 'Reach finished' in l: adding_reach_lines = False reach_logs.append(('SUCCEEDED', '\n'.join(reach_lines))) reach_lines = [] elif adding_reach_lines: reach_lines.append(l.split('readers - ')[1]) log_lines.remove(l) if adding_reach_lines: reach_logs.append(('FAILURE', '\n'.join(reach_lines))) return '\n'.join(log_lines), reach_logs
python
def separate_reach_logs(log_str): log_lines = log_str.splitlines() reach_logs = [] reach_lines = [] adding_reach_lines = False for l in log_lines[:]: if not adding_reach_lines and 'Beginning reach' in l: adding_reach_lines = True elif adding_reach_lines and 'Reach finished' in l: adding_reach_lines = False reach_logs.append(('SUCCEEDED', '\n'.join(reach_lines))) reach_lines = [] elif adding_reach_lines: reach_lines.append(l.split('readers - ')[1]) log_lines.remove(l) if adding_reach_lines: reach_logs.append(('FAILURE', '\n'.join(reach_lines))) return '\n'.join(log_lines), reach_logs
[ "def", "separate_reach_logs", "(", "log_str", ")", ":", "log_lines", "=", "log_str", ".", "splitlines", "(", ")", "reach_logs", "=", "[", "]", "reach_lines", "=", "[", "]", "adding_reach_lines", "=", "False", "for", "l", "in", "log_lines", "[", ":", "]", ...
Get the list of reach logs from the overall logs.
[ "Get", "the", "list", "of", "reach", "logs", "from", "the", "overall", "logs", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/log_analysis_tools.py#L50-L68
19,010
sorgerlab/indra
indra/tools/reading/util/log_analysis_tools.py
get_unyielding_tcids
def get_unyielding_tcids(log_str): """Extract the set of tcids for which no statements were created.""" tcid_strs = re.findall('INFO: \[.*?\].*? - Got no statements for (\d+).*', log_str) return {int(tcid_str) for tcid_str in tcid_strs}
python
def get_unyielding_tcids(log_str): tcid_strs = re.findall('INFO: \[.*?\].*? - Got no statements for (\d+).*', log_str) return {int(tcid_str) for tcid_str in tcid_strs}
[ "def", "get_unyielding_tcids", "(", "log_str", ")", ":", "tcid_strs", "=", "re", ".", "findall", "(", "'INFO: \\[.*?\\].*? - Got no statements for (\\d+).*'", ",", "log_str", ")", "return", "{", "int", "(", "tcid_str", ")", "for", "tcid_str", "in", "tcid_strs", "}...
Extract the set of tcids for which no statements were created.
[ "Extract", "the", "set", "of", "tcids", "for", "which", "no", "statements", "were", "created", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/log_analysis_tools.py#L115-L119
19,011
sorgerlab/indra
indra/tools/reading/util/log_analysis_tools.py
analyze_db_reading
def analyze_db_reading(job_prefix, reading_queue='run_db_reading_queue'): """Run various analysis on a particular reading job.""" # Analyze reach failures log_strs = get_logs_from_db_reading(job_prefix, reading_queue) indra_log_strs = [] all_reach_logs = [] log_stats = [] for log_str in log_strs: log_str, reach_logs = separate_reach_logs(log_str) all_reach_logs.extend(reach_logs) indra_log_strs.append(log_str) log_stats.append(get_reading_stats(log_str)) # Analayze the reach failures. failed_reach_logs = [reach_log_str for result, reach_log_str in all_reach_logs if result == 'FAILURE'] failed_id_dicts = [analyze_reach_log(log_str=reach_log) for reach_log in failed_reach_logs if bool(reach_log)] tcids_unfinished = {id_dict['not_done'] for id_dict in failed_id_dicts} print("Found %d unfinished tcids." % len(tcids_unfinished)) # Summarize the global stats if log_stats: sum_dict = dict.fromkeys(log_stats[0].keys()) for log_stat in log_stats: for k in log_stat.keys(): if isinstance(log_stat[k], list): if k not in sum_dict.keys(): sum_dict[k] = [0]*len(log_stat[k]) sum_dict[k] = [sum_dict[k][i] + log_stat[k][i] for i in range(len(log_stat[k]))] else: if k not in sum_dict.keys(): sum_dict[k] = 0 sum_dict[k] += log_stat[k] else: sum_dict = {} return tcids_unfinished, sum_dict, log_stats
python
def analyze_db_reading(job_prefix, reading_queue='run_db_reading_queue'): # Analyze reach failures log_strs = get_logs_from_db_reading(job_prefix, reading_queue) indra_log_strs = [] all_reach_logs = [] log_stats = [] for log_str in log_strs: log_str, reach_logs = separate_reach_logs(log_str) all_reach_logs.extend(reach_logs) indra_log_strs.append(log_str) log_stats.append(get_reading_stats(log_str)) # Analayze the reach failures. failed_reach_logs = [reach_log_str for result, reach_log_str in all_reach_logs if result == 'FAILURE'] failed_id_dicts = [analyze_reach_log(log_str=reach_log) for reach_log in failed_reach_logs if bool(reach_log)] tcids_unfinished = {id_dict['not_done'] for id_dict in failed_id_dicts} print("Found %d unfinished tcids." % len(tcids_unfinished)) # Summarize the global stats if log_stats: sum_dict = dict.fromkeys(log_stats[0].keys()) for log_stat in log_stats: for k in log_stat.keys(): if isinstance(log_stat[k], list): if k not in sum_dict.keys(): sum_dict[k] = [0]*len(log_stat[k]) sum_dict[k] = [sum_dict[k][i] + log_stat[k][i] for i in range(len(log_stat[k]))] else: if k not in sum_dict.keys(): sum_dict[k] = 0 sum_dict[k] += log_stat[k] else: sum_dict = {} return tcids_unfinished, sum_dict, log_stats
[ "def", "analyze_db_reading", "(", "job_prefix", ",", "reading_queue", "=", "'run_db_reading_queue'", ")", ":", "# Analyze reach failures", "log_strs", "=", "get_logs_from_db_reading", "(", "job_prefix", ",", "reading_queue", ")", "indra_log_strs", "=", "[", "]", "all_re...
Run various analysis on a particular reading job.
[ "Run", "various", "analysis", "on", "a", "particular", "reading", "job", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/util/log_analysis_tools.py#L156-L195
19,012
sorgerlab/indra
indra/sources/biopax/api.py
process_pc_neighborhood
def process_pc_neighborhood(gene_names, neighbor_limit=1, database_filter=None): """Returns a BiopaxProcessor for a PathwayCommons neighborhood query. The neighborhood query finds the neighborhood around a set of source genes. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search the neighborhood of. Examples: ['BRAF'], ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the size of the neighborhood around the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ model = pcc.graph_query('neighborhood', gene_names, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model)
python
def process_pc_neighborhood(gene_names, neighbor_limit=1, database_filter=None): model = pcc.graph_query('neighborhood', gene_names, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model)
[ "def", "process_pc_neighborhood", "(", "gene_names", ",", "neighbor_limit", "=", "1", ",", "database_filter", "=", "None", ")", ":", "model", "=", "pcc", ".", "graph_query", "(", "'neighborhood'", ",", "gene_names", ",", "neighbor_limit", "=", "neighbor_limit", ...
Returns a BiopaxProcessor for a PathwayCommons neighborhood query. The neighborhood query finds the neighborhood around a set of source genes. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search the neighborhood of. Examples: ['BRAF'], ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the size of the neighborhood around the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model.
[ "Returns", "a", "BiopaxProcessor", "for", "a", "PathwayCommons", "neighborhood", "query", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/api.py#L8-L41
19,013
sorgerlab/indra
indra/sources/biopax/api.py
process_pc_pathsbetween
def process_pc_pathsbetween(gene_names, neighbor_limit=1, database_filter=None, block_size=None): """Returns a BiopaxProcessor for a PathwayCommons paths-between query. The paths-between query finds the paths between a set of genes. Here source gene names are given in a single list and all directions of paths between these genes are considered. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search for paths between. Examples: ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources block_size : Optional[int] Large paths-between queries (above ~60 genes) can error on the server side. In this case, the query can be replaced by a series of smaller paths-between and paths-from-to queries each of which contains block_size genes. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ if not block_size: model = pcc.graph_query('pathsbetween', gene_names, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model) else: gene_blocks = [gene_names[i:i + block_size] for i in range(0, len(gene_names), block_size)] stmts = [] # Run pathsfromto between pairs of blocks and pathsbetween # within each block. This breaks up a single call with N genes into # (N/block_size)*(N/blocksize) calls with block_size genes for genes1, genes2 in itertools.product(gene_blocks, repeat=2): if genes1 == genes2: bp = process_pc_pathsbetween(genes1, database_filter=database_filter, block_size=None) else: bp = process_pc_pathsfromto(genes1, genes2, database_filter=database_filter) stmts += bp.statements
python
def process_pc_pathsbetween(gene_names, neighbor_limit=1, database_filter=None, block_size=None): if not block_size: model = pcc.graph_query('pathsbetween', gene_names, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model) else: gene_blocks = [gene_names[i:i + block_size] for i in range(0, len(gene_names), block_size)] stmts = [] # Run pathsfromto between pairs of blocks and pathsbetween # within each block. This breaks up a single call with N genes into # (N/block_size)*(N/blocksize) calls with block_size genes for genes1, genes2 in itertools.product(gene_blocks, repeat=2): if genes1 == genes2: bp = process_pc_pathsbetween(genes1, database_filter=database_filter, block_size=None) else: bp = process_pc_pathsfromto(genes1, genes2, database_filter=database_filter) stmts += bp.statements
[ "def", "process_pc_pathsbetween", "(", "gene_names", ",", "neighbor_limit", "=", "1", ",", "database_filter", "=", "None", ",", "block_size", "=", "None", ")", ":", "if", "not", "block_size", ":", "model", "=", "pcc", ".", "graph_query", "(", "'pathsbetween'",...
Returns a BiopaxProcessor for a PathwayCommons paths-between query. The paths-between query finds the paths between a set of genes. Here source gene names are given in a single list and all directions of paths between these genes are considered. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search for paths between. Examples: ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources block_size : Optional[int] Large paths-between queries (above ~60 genes) can error on the server side. In this case, the query can be replaced by a series of smaller paths-between and paths-from-to queries each of which contains block_size genes. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model.
[ "Returns", "a", "BiopaxProcessor", "for", "a", "PathwayCommons", "paths", "-", "between", "query", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/api.py#L44-L101
19,014
sorgerlab/indra
indra/sources/biopax/api.py
process_pc_pathsfromto
def process_pc_pathsfromto(source_genes, target_genes, neighbor_limit=1, database_filter=None): """Returns a BiopaxProcessor for a PathwayCommons paths-from-to query. The paths-from-to query finds the paths from a set of source genes to a set of target genes. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- source_genes : list A list of HGNC gene symbols that are the sources of paths being searched for. Examples: ['BRAF', 'RAF1', 'ARAF'] target_genes : list A list of HGNC gene symbols that are the targets of paths being searched for. Examples: ['MAP2K1', 'MAP2K2'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the source genes and target genes being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ model = pcc.graph_query('pathsfromto', source_genes, target_genes, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model)
python
def process_pc_pathsfromto(source_genes, target_genes, neighbor_limit=1, database_filter=None): model = pcc.graph_query('pathsfromto', source_genes, target_genes, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model)
[ "def", "process_pc_pathsfromto", "(", "source_genes", ",", "target_genes", ",", "neighbor_limit", "=", "1", ",", "database_filter", "=", "None", ")", ":", "model", "=", "pcc", ".", "graph_query", "(", "'pathsfromto'", ",", "source_genes", ",", "target_genes", ",...
Returns a BiopaxProcessor for a PathwayCommons paths-from-to query. The paths-from-to query finds the paths from a set of source genes to a set of target genes. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- source_genes : list A list of HGNC gene symbols that are the sources of paths being searched for. Examples: ['BRAF', 'RAF1', 'ARAF'] target_genes : list A list of HGNC gene symbols that are the targets of paths being searched for. Examples: ['MAP2K1', 'MAP2K2'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the source genes and target genes being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model.
[ "Returns", "a", "BiopaxProcessor", "for", "a", "PathwayCommons", "paths", "-", "from", "-", "to", "query", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/api.py#L104-L143
19,015
sorgerlab/indra
indra/sources/biopax/api.py
process_model
def process_model(model): """Returns a BiopaxProcessor for a BioPAX model object. Parameters ---------- model : org.biopax.paxtools.model.Model A BioPAX model object. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model. """ bp = BiopaxProcessor(model) bp.get_modifications() bp.get_regulate_activities() bp.get_regulate_amounts() bp.get_activity_modification() bp.get_gef() bp.get_gap() bp.get_conversions() # bp.get_complexes() bp.eliminate_exact_duplicates() return bp
python
def process_model(model): bp = BiopaxProcessor(model) bp.get_modifications() bp.get_regulate_activities() bp.get_regulate_amounts() bp.get_activity_modification() bp.get_gef() bp.get_gap() bp.get_conversions() # bp.get_complexes() bp.eliminate_exact_duplicates() return bp
[ "def", "process_model", "(", "model", ")", ":", "bp", "=", "BiopaxProcessor", "(", "model", ")", "bp", ".", "get_modifications", "(", ")", "bp", ".", "get_regulate_activities", "(", ")", "bp", ".", "get_regulate_amounts", "(", ")", "bp", ".", "get_activity_m...
Returns a BiopaxProcessor for a BioPAX model object. Parameters ---------- model : org.biopax.paxtools.model.Model A BioPAX model object. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model.
[ "Returns", "a", "BiopaxProcessor", "for", "a", "BioPAX", "model", "object", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/api.py#L163-L186
19,016
sorgerlab/indra
indra/benchmarks/assembly_eval/batch4/assembly_eval.py
is_background_knowledge
def is_background_knowledge(stmt): '''Return True if Statement is only supported by background knowledge.''' any_background = False # Iterate over all evidence for the statement for ev in stmt.evidence: epi = ev.epistemics if epi is not None: sec = epi.get('section_type') # If there is at least one evidence not from a # background section then we consider this to be # a non-background knowledge finding. if sec is not None and sec not in background_secs: return False # If there is at least one evidence that is explicitly # from a background section then we keep track of that. elif sec in background_secs: any_background = True # If there is any explicit evidence for this statement being # background info (and no evidence otherwise) then we return # True, otherwise (for instnace of there is no section info at all) # we return False. return any_background
python
def is_background_knowledge(stmt): '''Return True if Statement is only supported by background knowledge.''' any_background = False # Iterate over all evidence for the statement for ev in stmt.evidence: epi = ev.epistemics if epi is not None: sec = epi.get('section_type') # If there is at least one evidence not from a # background section then we consider this to be # a non-background knowledge finding. if sec is not None and sec not in background_secs: return False # If there is at least one evidence that is explicitly # from a background section then we keep track of that. elif sec in background_secs: any_background = True # If there is any explicit evidence for this statement being # background info (and no evidence otherwise) then we return # True, otherwise (for instnace of there is no section info at all) # we return False. return any_background
[ "def", "is_background_knowledge", "(", "stmt", ")", ":", "any_background", "=", "False", "# Iterate over all evidence for the statement", "for", "ev", "in", "stmt", ".", "evidence", ":", "epi", "=", "ev", ".", "epistemics", "if", "epi", "is", "not", "None", ":",...
Return True if Statement is only supported by background knowledge.
[ "Return", "True", "if", "Statement", "is", "only", "supported", "by", "background", "knowledge", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/benchmarks/assembly_eval/batch4/assembly_eval.py#L45-L66
19,017
sorgerlab/indra
indra/benchmarks/assembly_eval/batch4/assembly_eval.py
multiple_sources
def multiple_sources(stmt): '''Return True if statement is supported by multiple sources. Note: this is currently not used and replaced by BeliefEngine score cutoff ''' sources = list(set([e.source_api for e in stmt.evidence])) if len(sources) > 1: return True return False
python
def multiple_sources(stmt): '''Return True if statement is supported by multiple sources. Note: this is currently not used and replaced by BeliefEngine score cutoff ''' sources = list(set([e.source_api for e in stmt.evidence])) if len(sources) > 1: return True return False
[ "def", "multiple_sources", "(", "stmt", ")", ":", "sources", "=", "list", "(", "set", "(", "[", "e", ".", "source_api", "for", "e", "in", "stmt", ".", "evidence", "]", ")", ")", "if", "len", "(", "sources", ")", ">", "1", ":", "return", "True", "...
Return True if statement is supported by multiple sources. Note: this is currently not used and replaced by BeliefEngine score cutoff
[ "Return", "True", "if", "statement", "is", "supported", "by", "multiple", "sources", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/benchmarks/assembly_eval/batch4/assembly_eval.py#L68-L76
19,018
sorgerlab/indra
indra/sources/geneways/symbols_parser.py
GenewaysSymbols.id_to_symbol
def id_to_symbol(self, entrez_id): """Gives the symbol for a given entrez id)""" entrez_id = str(entrez_id) if entrez_id not in self.ids_to_symbols: m = 'Could not look up symbol for Entrez ID ' + entrez_id raise Exception(m) return self.ids_to_symbols[entrez_id]
python
def id_to_symbol(self, entrez_id): entrez_id = str(entrez_id) if entrez_id not in self.ids_to_symbols: m = 'Could not look up symbol for Entrez ID ' + entrez_id raise Exception(m) return self.ids_to_symbols[entrez_id]
[ "def", "id_to_symbol", "(", "self", ",", "entrez_id", ")", ":", "entrez_id", "=", "str", "(", "entrez_id", ")", "if", "entrez_id", "not", "in", "self", ".", "ids_to_symbols", ":", "m", "=", "'Could not look up symbol for Entrez ID '", "+", "entrez_id", "raise", ...
Gives the symbol for a given entrez id)
[ "Gives", "the", "symbol", "for", "a", "given", "entrez", "id", ")" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/symbols_parser.py#L59-L66
19,019
sorgerlab/indra
indra/assemblers/tsv/assembler.py
TsvAssembler.make_model
def make_model(self, output_file, add_curation_cols=False, up_only=False): """Export the statements into a tab-separated text file. Parameters ---------- output_file : str Name of the output file. add_curation_cols : bool Whether to add columns to facilitate statement curation. Default is False (no additional columns). up_only : bool Whether to include identifiers.org links *only* for the Uniprot grounding of an agent when one is available. Because most spreadsheets allow only a single hyperlink per cell, this can makes it easier to link to Uniprot information pages for curation purposes. Default is False. """ stmt_header = ['INDEX', 'UUID', 'TYPE', 'STR', 'AG_A_TEXT', 'AG_A_LINKS', 'AG_A_STR', 'AG_B_TEXT', 'AG_B_LINKS', 'AG_B_STR', 'PMID', 'TEXT', 'IS_HYP', 'IS_DIRECT'] if add_curation_cols: stmt_header = stmt_header + \ ['AG_A_IDS_CORRECT', 'AG_A_STATE_CORRECT', 'AG_B_IDS_CORRECT', 'AG_B_STATE_CORRECT', 'EVENT_CORRECT', 'RES_CORRECT', 'POS_CORRECT', 'SUBJ_ACT_CORRECT', 'OBJ_ACT_CORRECT', 'HYP_CORRECT', 'DIRECT_CORRECT'] rows = [stmt_header] for ix, stmt in enumerate(self.statements): # Complexes if len(stmt.agent_list()) > 2: logger.info("Skipping statement with more than two members: %s" % stmt) continue # Self-modifications, ActiveForms elif len(stmt.agent_list()) == 1: ag_a = stmt.agent_list()[0] ag_b = None # All others else: (ag_a, ag_b) = stmt.agent_list() # Put together the data row row = [ix+1, stmt.uuid, stmt.__class__.__name__, str(stmt)] + \ _format_agent_entries(ag_a, up_only) + \ _format_agent_entries(ag_b, up_only) + \ [stmt.evidence[0].pmid, stmt.evidence[0].text, stmt.evidence[0].epistemics.get('hypothesis', ''), stmt.evidence[0].epistemics.get('direct', '')] if add_curation_cols: row = row + ([''] * 11) rows.append(row) # Write to file write_unicode_csv(output_file, rows, delimiter='\t')
python
def make_model(self, output_file, add_curation_cols=False, up_only=False): stmt_header = ['INDEX', 'UUID', 'TYPE', 'STR', 'AG_A_TEXT', 'AG_A_LINKS', 'AG_A_STR', 'AG_B_TEXT', 'AG_B_LINKS', 'AG_B_STR', 'PMID', 'TEXT', 'IS_HYP', 'IS_DIRECT'] if add_curation_cols: stmt_header = stmt_header + \ ['AG_A_IDS_CORRECT', 'AG_A_STATE_CORRECT', 'AG_B_IDS_CORRECT', 'AG_B_STATE_CORRECT', 'EVENT_CORRECT', 'RES_CORRECT', 'POS_CORRECT', 'SUBJ_ACT_CORRECT', 'OBJ_ACT_CORRECT', 'HYP_CORRECT', 'DIRECT_CORRECT'] rows = [stmt_header] for ix, stmt in enumerate(self.statements): # Complexes if len(stmt.agent_list()) > 2: logger.info("Skipping statement with more than two members: %s" % stmt) continue # Self-modifications, ActiveForms elif len(stmt.agent_list()) == 1: ag_a = stmt.agent_list()[0] ag_b = None # All others else: (ag_a, ag_b) = stmt.agent_list() # Put together the data row row = [ix+1, stmt.uuid, stmt.__class__.__name__, str(stmt)] + \ _format_agent_entries(ag_a, up_only) + \ _format_agent_entries(ag_b, up_only) + \ [stmt.evidence[0].pmid, stmt.evidence[0].text, stmt.evidence[0].epistemics.get('hypothesis', ''), stmt.evidence[0].epistemics.get('direct', '')] if add_curation_cols: row = row + ([''] * 11) rows.append(row) # Write to file write_unicode_csv(output_file, rows, delimiter='\t')
[ "def", "make_model", "(", "self", ",", "output_file", ",", "add_curation_cols", "=", "False", ",", "up_only", "=", "False", ")", ":", "stmt_header", "=", "[", "'INDEX'", ",", "'UUID'", ",", "'TYPE'", ",", "'STR'", ",", "'AG_A_TEXT'", ",", "'AG_A_LINKS'", "...
Export the statements into a tab-separated text file. Parameters ---------- output_file : str Name of the output file. add_curation_cols : bool Whether to add columns to facilitate statement curation. Default is False (no additional columns). up_only : bool Whether to include identifiers.org links *only* for the Uniprot grounding of an agent when one is available. Because most spreadsheets allow only a single hyperlink per cell, this can makes it easier to link to Uniprot information pages for curation purposes. Default is False.
[ "Export", "the", "statements", "into", "a", "tab", "-", "separated", "text", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/tsv/assembler.py#L109-L163
19,020
sorgerlab/indra
indra/assemblers/pysb/base_agents.py
BaseAgentSet.get_create_base_agent
def get_create_base_agent(self, agent): """Return base agent with given name, creating it if needed.""" try: base_agent = self.agents[_n(agent.name)] except KeyError: base_agent = BaseAgent(_n(agent.name)) self.agents[_n(agent.name)] = base_agent # If it's a molecular agent if isinstance(agent, Agent): # Handle bound conditions for bc in agent.bound_conditions: bound_base_agent = self.get_create_base_agent(bc.agent) bound_base_agent.create_site(get_binding_site_name(agent)) base_agent.create_site(get_binding_site_name(bc.agent)) # Handle modification conditions for mc in agent.mods: base_agent.create_mod_site(mc) # Handle mutation conditions for mc in agent.mutations: res_from = mc.residue_from if mc.residue_from else 'mut' res_to = mc.residue_to if mc.residue_to else 'X' if mc.position is None: mut_site_name = res_from else: mut_site_name = res_from + mc.position base_agent.create_site(mut_site_name, states=['WT', res_to]) # Handle location condition if agent.location is not None: base_agent.create_site('loc', [_n(agent.location)]) # Handle activity if agent.activity is not None: site_name = agent.activity.activity_type base_agent.create_site(site_name, ['inactive', 'active']) # There might be overwrites here for db_name, db_ref in agent.db_refs.items(): base_agent.db_refs[db_name] = db_ref return base_agent
python
def get_create_base_agent(self, agent): try: base_agent = self.agents[_n(agent.name)] except KeyError: base_agent = BaseAgent(_n(agent.name)) self.agents[_n(agent.name)] = base_agent # If it's a molecular agent if isinstance(agent, Agent): # Handle bound conditions for bc in agent.bound_conditions: bound_base_agent = self.get_create_base_agent(bc.agent) bound_base_agent.create_site(get_binding_site_name(agent)) base_agent.create_site(get_binding_site_name(bc.agent)) # Handle modification conditions for mc in agent.mods: base_agent.create_mod_site(mc) # Handle mutation conditions for mc in agent.mutations: res_from = mc.residue_from if mc.residue_from else 'mut' res_to = mc.residue_to if mc.residue_to else 'X' if mc.position is None: mut_site_name = res_from else: mut_site_name = res_from + mc.position base_agent.create_site(mut_site_name, states=['WT', res_to]) # Handle location condition if agent.location is not None: base_agent.create_site('loc', [_n(agent.location)]) # Handle activity if agent.activity is not None: site_name = agent.activity.activity_type base_agent.create_site(site_name, ['inactive', 'active']) # There might be overwrites here for db_name, db_ref in agent.db_refs.items(): base_agent.db_refs[db_name] = db_ref return base_agent
[ "def", "get_create_base_agent", "(", "self", ",", "agent", ")", ":", "try", ":", "base_agent", "=", "self", ".", "agents", "[", "_n", "(", "agent", ".", "name", ")", "]", "except", "KeyError", ":", "base_agent", "=", "BaseAgent", "(", "_n", "(", "agent...
Return base agent with given name, creating it if needed.
[ "Return", "base", "agent", "with", "given", "name", "creating", "it", "if", "needed", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L13-L57
19,021
sorgerlab/indra
indra/assemblers/pysb/base_agents.py
BaseAgent.create_site
def create_site(self, site, states=None): """Create a new site on an agent if it doesn't already exist.""" if site not in self.sites: self.sites.append(site) if states is not None: self.site_states.setdefault(site, []) try: states = list(states) except TypeError: return self.add_site_states(site, states)
python
def create_site(self, site, states=None): if site not in self.sites: self.sites.append(site) if states is not None: self.site_states.setdefault(site, []) try: states = list(states) except TypeError: return self.add_site_states(site, states)
[ "def", "create_site", "(", "self", ",", "site", ",", "states", "=", "None", ")", ":", "if", "site", "not", "in", "self", ".", "sites", ":", "self", ".", "sites", ".", "append", "(", "site", ")", "if", "states", "is", "not", "None", ":", "self", "...
Create a new site on an agent if it doesn't already exist.
[ "Create", "a", "new", "site", "on", "an", "agent", "if", "it", "doesn", "t", "already", "exist", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L90-L100
19,022
sorgerlab/indra
indra/assemblers/pysb/base_agents.py
BaseAgent.create_mod_site
def create_mod_site(self, mc): """Create modification site for the BaseAgent from a ModCondition.""" site_name = get_mod_site_name(mc) (unmod_site_state, mod_site_state) = states[mc.mod_type] self.create_site(site_name, (unmod_site_state, mod_site_state)) site_anns = [Annotation((site_name, mod_site_state), mc.mod_type, 'is_modification')] if mc.residue: site_anns.append(Annotation(site_name, mc.residue, 'is_residue')) if mc.position: site_anns.append(Annotation(site_name, mc.position, 'is_position')) self.site_annotations += site_anns
python
def create_mod_site(self, mc): site_name = get_mod_site_name(mc) (unmod_site_state, mod_site_state) = states[mc.mod_type] self.create_site(site_name, (unmod_site_state, mod_site_state)) site_anns = [Annotation((site_name, mod_site_state), mc.mod_type, 'is_modification')] if mc.residue: site_anns.append(Annotation(site_name, mc.residue, 'is_residue')) if mc.position: site_anns.append(Annotation(site_name, mc.position, 'is_position')) self.site_annotations += site_anns
[ "def", "create_mod_site", "(", "self", ",", "mc", ")", ":", "site_name", "=", "get_mod_site_name", "(", "mc", ")", "(", "unmod_site_state", ",", "mod_site_state", ")", "=", "states", "[", "mc", ".", "mod_type", "]", "self", ".", "create_site", "(", "site_n...
Create modification site for the BaseAgent from a ModCondition.
[ "Create", "modification", "site", "for", "the", "BaseAgent", "from", "a", "ModCondition", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L102-L113
19,023
sorgerlab/indra
indra/assemblers/pysb/base_agents.py
BaseAgent.add_site_states
def add_site_states(self, site, states): """Create new states on an agent site if the state doesn't exist.""" for state in states: if state not in self.site_states[site]: self.site_states[site].append(state)
python
def add_site_states(self, site, states): for state in states: if state not in self.site_states[site]: self.site_states[site].append(state)
[ "def", "add_site_states", "(", "self", ",", "site", ",", "states", ")", ":", "for", "state", "in", "states", ":", "if", "state", "not", "in", "self", ".", "site_states", "[", "site", "]", ":", "self", ".", "site_states", "[", "site", "]", ".", "appen...
Create new states on an agent site if the state doesn't exist.
[ "Create", "new", "states", "on", "an", "agent", "site", "if", "the", "state", "doesn", "t", "exist", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L115-L119
19,024
sorgerlab/indra
indra/assemblers/pysb/base_agents.py
BaseAgent.add_activity_form
def add_activity_form(self, activity_pattern, is_active): """Adds the pattern as an active or inactive form to an Agent. Parameters ---------- activity_pattern : dict A dictionary of site names and their states. is_active : bool Is True if the given pattern corresponds to an active state. """ if is_active: if activity_pattern not in self.active_forms: self.active_forms.append(activity_pattern) else: if activity_pattern not in self.inactive_forms: self.inactive_forms.append(activity_pattern)
python
def add_activity_form(self, activity_pattern, is_active): if is_active: if activity_pattern not in self.active_forms: self.active_forms.append(activity_pattern) else: if activity_pattern not in self.inactive_forms: self.inactive_forms.append(activity_pattern)
[ "def", "add_activity_form", "(", "self", ",", "activity_pattern", ",", "is_active", ")", ":", "if", "is_active", ":", "if", "activity_pattern", "not", "in", "self", ".", "active_forms", ":", "self", ".", "active_forms", ".", "append", "(", "activity_pattern", ...
Adds the pattern as an active or inactive form to an Agent. Parameters ---------- activity_pattern : dict A dictionary of site names and their states. is_active : bool Is True if the given pattern corresponds to an active state.
[ "Adds", "the", "pattern", "as", "an", "active", "or", "inactive", "form", "to", "an", "Agent", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L121-L136
19,025
sorgerlab/indra
indra/assemblers/pysb/base_agents.py
BaseAgent.add_activity_type
def add_activity_type(self, activity_type): """Adds an activity type to an Agent. Parameters ---------- activity_type : str The type of activity to add such as 'activity', 'kinase', 'gtpbound' """ if activity_type not in self.activity_types: self.activity_types.append(activity_type)
python
def add_activity_type(self, activity_type): if activity_type not in self.activity_types: self.activity_types.append(activity_type)
[ "def", "add_activity_type", "(", "self", ",", "activity_type", ")", ":", "if", "activity_type", "not", "in", "self", ".", "activity_types", ":", "self", ".", "activity_types", ".", "append", "(", "activity_type", ")" ]
Adds an activity type to an Agent. Parameters ---------- activity_type : str The type of activity to add such as 'activity', 'kinase', 'gtpbound'
[ "Adds", "an", "activity", "type", "to", "an", "Agent", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L138-L148
19,026
sorgerlab/indra
indra/sources/geneways/action_parser.py
GenewaysAction.make_annotation
def make_annotation(self): """Returns a dictionary with all properties of the action and each of its action mentions.""" annotation = dict() # Put all properties of the action object into the annotation for item in dir(self): if len(item) > 0 and item[0] != '_' and \ not inspect.ismethod(getattr(self, item)): annotation[item] = getattr(self, item) # Add properties of each action mention annotation['action_mentions'] = list() for action_mention in self.action_mentions: annotation_mention = action_mention.make_annotation() annotation['action_mentions'].append(annotation_mention) return annotation
python
def make_annotation(self): annotation = dict() # Put all properties of the action object into the annotation for item in dir(self): if len(item) > 0 and item[0] != '_' and \ not inspect.ismethod(getattr(self, item)): annotation[item] = getattr(self, item) # Add properties of each action mention annotation['action_mentions'] = list() for action_mention in self.action_mentions: annotation_mention = action_mention.make_annotation() annotation['action_mentions'].append(annotation_mention) return annotation
[ "def", "make_annotation", "(", "self", ")", ":", "annotation", "=", "dict", "(", ")", "# Put all properties of the action object into the annotation", "for", "item", "in", "dir", "(", "self", ")", ":", "if", "len", "(", "item", ")", ">", "0", "and", "item", ...
Returns a dictionary with all properties of the action and each of its action mentions.
[ "Returns", "a", "dictionary", "with", "all", "properties", "of", "the", "action", "and", "each", "of", "its", "action", "mentions", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/action_parser.py#L35-L52
19,027
sorgerlab/indra
indra/sources/geneways/action_parser.py
GenewaysActionParser._search_path
def _search_path(self, directory_name, filename): """Searches for a given file in the specified directory.""" full_path = path.join(directory_name, filename) if path.exists(full_path): return full_path # Could not find the requested file in any of the directories return None
python
def _search_path(self, directory_name, filename): full_path = path.join(directory_name, filename) if path.exists(full_path): return full_path # Could not find the requested file in any of the directories return None
[ "def", "_search_path", "(", "self", ",", "directory_name", ",", "filename", ")", ":", "full_path", "=", "path", ".", "join", "(", "directory_name", ",", "filename", ")", "if", "path", ".", "exists", "(", "full_path", ")", ":", "return", "full_path", "# Cou...
Searches for a given file in the specified directory.
[ "Searches", "for", "a", "given", "file", "in", "the", "specified", "directory", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/action_parser.py#L96-L103
19,028
sorgerlab/indra
indra/sources/geneways/action_parser.py
GenewaysActionParser._init_action_list
def _init_action_list(self, action_filename): """Parses the file and populates the data.""" self.actions = list() self.hiid_to_action_index = dict() f = codecs.open(action_filename, 'r', encoding='latin-1') first_line = True for line in f: line = line.rstrip() if first_line: # Ignore the first line first_line = False else: self.actions.append(GenewaysAction(line)) latestInd = len(self.actions)-1 hiid = self.actions[latestInd].hiid if hiid in self.hiid_to_action_index: raise Exception('action hiid not unique: %d' % hiid) self.hiid_to_action_index[hiid] = latestInd
python
def _init_action_list(self, action_filename): self.actions = list() self.hiid_to_action_index = dict() f = codecs.open(action_filename, 'r', encoding='latin-1') first_line = True for line in f: line = line.rstrip() if first_line: # Ignore the first line first_line = False else: self.actions.append(GenewaysAction(line)) latestInd = len(self.actions)-1 hiid = self.actions[latestInd].hiid if hiid in self.hiid_to_action_index: raise Exception('action hiid not unique: %d' % hiid) self.hiid_to_action_index[hiid] = latestInd
[ "def", "_init_action_list", "(", "self", ",", "action_filename", ")", ":", "self", ".", "actions", "=", "list", "(", ")", "self", ".", "hiid_to_action_index", "=", "dict", "(", ")", "f", "=", "codecs", ".", "open", "(", "action_filename", ",", "'r'", ","...
Parses the file and populates the data.
[ "Parses", "the", "file", "and", "populates", "the", "data", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/action_parser.py#L105-L125
19,029
sorgerlab/indra
indra/sources/geneways/action_parser.py
GenewaysActionParser._link_to_action_mentions
def _link_to_action_mentions(self, actionmention_filename): """Add action mentions""" parser = GenewaysActionMentionParser(actionmention_filename) self.action_mentions = parser.action_mentions for action_mention in self.action_mentions: hiid = action_mention.hiid if hiid not in self.hiid_to_action_index: m1 = 'Parsed action mention has hiid %d, which does not exist' m2 = ' in table of action hiids' raise Exception((m1 + m2) % hiid) else: idx = self.hiid_to_action_index[hiid] self.actions[idx].action_mentions.append(action_mention)
python
def _link_to_action_mentions(self, actionmention_filename): parser = GenewaysActionMentionParser(actionmention_filename) self.action_mentions = parser.action_mentions for action_mention in self.action_mentions: hiid = action_mention.hiid if hiid not in self.hiid_to_action_index: m1 = 'Parsed action mention has hiid %d, which does not exist' m2 = ' in table of action hiids' raise Exception((m1 + m2) % hiid) else: idx = self.hiid_to_action_index[hiid] self.actions[idx].action_mentions.append(action_mention)
[ "def", "_link_to_action_mentions", "(", "self", ",", "actionmention_filename", ")", ":", "parser", "=", "GenewaysActionMentionParser", "(", "actionmention_filename", ")", "self", ".", "action_mentions", "=", "parser", ".", "action_mentions", "for", "action_mention", "in...
Add action mentions
[ "Add", "action", "mentions" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/action_parser.py#L127-L140
19,030
sorgerlab/indra
indra/sources/geneways/action_parser.py
GenewaysActionParser._lookup_symbols
def _lookup_symbols(self, symbols_filename): """Look up symbols for actions and action mentions""" symbol_lookup = GenewaysSymbols(symbols_filename) for action in self.actions: action.up_symbol = symbol_lookup.id_to_symbol(action.up) action.dn_symbol = symbol_lookup.id_to_symbol(action.dn)
python
def _lookup_symbols(self, symbols_filename): symbol_lookup = GenewaysSymbols(symbols_filename) for action in self.actions: action.up_symbol = symbol_lookup.id_to_symbol(action.up) action.dn_symbol = symbol_lookup.id_to_symbol(action.dn)
[ "def", "_lookup_symbols", "(", "self", ",", "symbols_filename", ")", ":", "symbol_lookup", "=", "GenewaysSymbols", "(", "symbols_filename", ")", "for", "action", "in", "self", ".", "actions", ":", "action", ".", "up_symbol", "=", "symbol_lookup", ".", "id_to_sym...
Look up symbols for actions and action mentions
[ "Look", "up", "symbols", "for", "actions", "and", "action", "mentions" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/action_parser.py#L142-L147
19,031
sorgerlab/indra
indra/sources/geneways/action_parser.py
GenewaysActionParser.get_top_n_action_types
def get_top_n_action_types(self, top_n): """Returns the top N actions by count.""" # Count action types action_type_to_counts = dict() for action in self.actions: actiontype = action.actiontype if actiontype not in action_type_to_counts: action_type_to_counts[actiontype] = 1 else: action_type_to_counts[actiontype] = \ action_type_to_counts[actiontype] + 1 # Convert the dictionary representation into a pair of lists action_types = list() counts = list() for actiontype in action_type_to_counts.keys(): action_types.append(actiontype) counts.append(action_type_to_counts[actiontype]) # How many actions in total? num_actions = len(self.actions) num_actions2 = 0 for count in counts: num_actions2 = num_actions2 + count if num_actions != num_actions2: raise(Exception('Problem counting everything up!')) # Sort action types by count (lowest to highest) sorted_inds = np.argsort(counts) last_ind = len(sorted_inds)-1 # Return the top N actions top_actions = list() if top_n > len(sorted_inds): raise Exception('Asked for top %d action types, ' + 'but there are only %d action types' % (top_n, len(sorted_inds))) for i in range(top_n): top_actions.append(action_types[sorted_inds[last_ind-i]]) return top_actions
python
def get_top_n_action_types(self, top_n): # Count action types action_type_to_counts = dict() for action in self.actions: actiontype = action.actiontype if actiontype not in action_type_to_counts: action_type_to_counts[actiontype] = 1 else: action_type_to_counts[actiontype] = \ action_type_to_counts[actiontype] + 1 # Convert the dictionary representation into a pair of lists action_types = list() counts = list() for actiontype in action_type_to_counts.keys(): action_types.append(actiontype) counts.append(action_type_to_counts[actiontype]) # How many actions in total? num_actions = len(self.actions) num_actions2 = 0 for count in counts: num_actions2 = num_actions2 + count if num_actions != num_actions2: raise(Exception('Problem counting everything up!')) # Sort action types by count (lowest to highest) sorted_inds = np.argsort(counts) last_ind = len(sorted_inds)-1 # Return the top N actions top_actions = list() if top_n > len(sorted_inds): raise Exception('Asked for top %d action types, ' + 'but there are only %d action types' % (top_n, len(sorted_inds))) for i in range(top_n): top_actions.append(action_types[sorted_inds[last_ind-i]]) return top_actions
[ "def", "get_top_n_action_types", "(", "self", ",", "top_n", ")", ":", "# Count action types", "action_type_to_counts", "=", "dict", "(", ")", "for", "action", "in", "self", ".", "actions", ":", "actiontype", "=", "action", ".", "actiontype", "if", "actiontype", ...
Returns the top N actions by count.
[ "Returns", "the", "top", "N", "actions", "by", "count", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/action_parser.py#L149-L188
19,032
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler.get_string
def get_string(self): """Return the assembled graph as a string. Returns ------- graph_string : str The assembled graph as a string. """ graph_string = self.graph.to_string() graph_string = graph_string.replace('\\N', '\\n') return graph_string
python
def get_string(self): graph_string = self.graph.to_string() graph_string = graph_string.replace('\\N', '\\n') return graph_string
[ "def", "get_string", "(", "self", ")", ":", "graph_string", "=", "self", ".", "graph", ".", "to_string", "(", ")", "graph_string", "=", "graph_string", ".", "replace", "(", "'\\\\N'", ",", "'\\\\n'", ")", "return", "graph_string" ]
Return the assembled graph as a string. Returns ------- graph_string : str The assembled graph as a string.
[ "Return", "the", "assembled", "graph", "as", "a", "string", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L146-L156
19,033
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler.save_dot
def save_dot(self, file_name='graph.dot'): """Save the graph in a graphviz dot file. Parameters ---------- file_name : Optional[str] The name of the file to save the graph dot string to. """ s = self.get_string() with open(file_name, 'wt') as fh: fh.write(s)
python
def save_dot(self, file_name='graph.dot'): s = self.get_string() with open(file_name, 'wt') as fh: fh.write(s)
[ "def", "save_dot", "(", "self", ",", "file_name", "=", "'graph.dot'", ")", ":", "s", "=", "self", ".", "get_string", "(", ")", "with", "open", "(", "file_name", ",", "'wt'", ")", "as", "fh", ":", "fh", ".", "write", "(", "s", ")" ]
Save the graph in a graphviz dot file. Parameters ---------- file_name : Optional[str] The name of the file to save the graph dot string to.
[ "Save", "the", "graph", "in", "a", "graphviz", "dot", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L158-L168
19,034
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler.save_pdf
def save_pdf(self, file_name='graph.pdf', prog='dot'): """Draw the graph and save as an image or pdf file. Parameters ---------- file_name : Optional[str] The name of the file to save the graph as. Default: graph.pdf prog : Optional[str] The graphviz program to use for graph layout. Default: dot """ self.graph.draw(file_name, prog=prog)
python
def save_pdf(self, file_name='graph.pdf', prog='dot'): self.graph.draw(file_name, prog=prog)
[ "def", "save_pdf", "(", "self", ",", "file_name", "=", "'graph.pdf'", ",", "prog", "=", "'dot'", ")", ":", "self", ".", "graph", ".", "draw", "(", "file_name", ",", "prog", "=", "prog", ")" ]
Draw the graph and save as an image or pdf file. Parameters ---------- file_name : Optional[str] The name of the file to save the graph as. Default: graph.pdf prog : Optional[str] The graphviz program to use for graph layout. Default: dot
[ "Draw", "the", "graph", "and", "save", "as", "an", "image", "or", "pdf", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L170-L180
19,035
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler._add_edge
def _add_edge(self, source, target, **kwargs): """Add an edge to the graph.""" # Start with default edge properties edge_properties = self.edge_properties # Overwrite ones that are given in function call explicitly for k, v in kwargs.items(): edge_properties[k] = v self.graph.add_edge(source, target, **edge_properties)
python
def _add_edge(self, source, target, **kwargs): # Start with default edge properties edge_properties = self.edge_properties # Overwrite ones that are given in function call explicitly for k, v in kwargs.items(): edge_properties[k] = v self.graph.add_edge(source, target, **edge_properties)
[ "def", "_add_edge", "(", "self", ",", "source", ",", "target", ",", "*", "*", "kwargs", ")", ":", "# Start with default edge properties", "edge_properties", "=", "self", ".", "edge_properties", "# Overwrite ones that are given in function call explicitly", "for", "k", "...
Add an edge to the graph.
[ "Add", "an", "edge", "to", "the", "graph", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L182-L189
19,036
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler._add_node
def _add_node(self, agent): """Add an Agent as a node to the graph.""" if agent is None: return node_label = _get_node_label(agent) if isinstance(agent, Agent) and agent.bound_conditions: bound_agents = [bc.agent for bc in agent.bound_conditions if bc.is_bound] if bound_agents: bound_names = [_get_node_label(a) for a in bound_agents] node_label = _get_node_label(agent) + '/' + \ '/'.join(bound_names) self._complex_nodes.append([agent] + bound_agents) else: node_label = _get_node_label(agent) node_key = _get_node_key(agent) if node_key in self.existing_nodes: return self.existing_nodes.append(node_key) self.graph.add_node(node_key, label=node_label, **self.node_properties)
python
def _add_node(self, agent): if agent is None: return node_label = _get_node_label(agent) if isinstance(agent, Agent) and agent.bound_conditions: bound_agents = [bc.agent for bc in agent.bound_conditions if bc.is_bound] if bound_agents: bound_names = [_get_node_label(a) for a in bound_agents] node_label = _get_node_label(agent) + '/' + \ '/'.join(bound_names) self._complex_nodes.append([agent] + bound_agents) else: node_label = _get_node_label(agent) node_key = _get_node_key(agent) if node_key in self.existing_nodes: return self.existing_nodes.append(node_key) self.graph.add_node(node_key, label=node_label, **self.node_properties)
[ "def", "_add_node", "(", "self", ",", "agent", ")", ":", "if", "agent", "is", "None", ":", "return", "node_label", "=", "_get_node_label", "(", "agent", ")", "if", "isinstance", "(", "agent", ",", "Agent", ")", "and", "agent", ".", "bound_conditions", ":...
Add an Agent as a node to the graph.
[ "Add", "an", "Agent", "as", "a", "node", "to", "the", "graph", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L191-L212
19,037
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler._add_stmt_edge
def _add_stmt_edge(self, stmt): """Assemble a Modification statement.""" # Skip statements with None in the subject position source = _get_node_key(stmt.agent_list()[0]) target = _get_node_key(stmt.agent_list()[1]) edge_key = (source, target, stmt.__class__.__name__) if edge_key in self.existing_edges: return self.existing_edges.append(edge_key) if isinstance(stmt, RemoveModification) or \ isinstance(stmt, Inhibition) or \ isinstance(stmt, DecreaseAmount) or \ isinstance(stmt, Gap) or \ (isinstance(stmt, Influence) and stmt.overall_polarity() == -1): color = '#ff0000' else: color = '#000000' params = {'color': color, 'arrowhead': 'normal', 'dir': 'forward'} self._add_edge(source, target, **params)
python
def _add_stmt_edge(self, stmt): # Skip statements with None in the subject position source = _get_node_key(stmt.agent_list()[0]) target = _get_node_key(stmt.agent_list()[1]) edge_key = (source, target, stmt.__class__.__name__) if edge_key in self.existing_edges: return self.existing_edges.append(edge_key) if isinstance(stmt, RemoveModification) or \ isinstance(stmt, Inhibition) or \ isinstance(stmt, DecreaseAmount) or \ isinstance(stmt, Gap) or \ (isinstance(stmt, Influence) and stmt.overall_polarity() == -1): color = '#ff0000' else: color = '#000000' params = {'color': color, 'arrowhead': 'normal', 'dir': 'forward'} self._add_edge(source, target, **params)
[ "def", "_add_stmt_edge", "(", "self", ",", "stmt", ")", ":", "# Skip statements with None in the subject position", "source", "=", "_get_node_key", "(", "stmt", ".", "agent_list", "(", ")", "[", "0", "]", ")", "target", "=", "_get_node_key", "(", "stmt", ".", ...
Assemble a Modification statement.
[ "Assemble", "a", "Modification", "statement", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L214-L234
19,038
sorgerlab/indra
indra/assemblers/graph/assembler.py
GraphAssembler._add_complex
def _add_complex(self, members, is_association=False): """Assemble a Complex statement.""" params = {'color': '#0000ff', 'arrowhead': 'dot', 'arrowtail': 'dot', 'dir': 'both'} for m1, m2 in itertools.combinations(members, 2): if self._has_complex_node(m1, m2): continue if is_association: m1_key = _get_node_key(m1.concept) m2_key = _get_node_key(m2.concept) else: m1_key = _get_node_key(m1) m2_key = _get_node_key(m2) edge_key = (set([m1_key, m2_key]), 'complex') if edge_key in self.existing_edges: return self.existing_edges.append(edge_key) self._add_edge(m1_key, m2_key, **params)
python
def _add_complex(self, members, is_association=False): params = {'color': '#0000ff', 'arrowhead': 'dot', 'arrowtail': 'dot', 'dir': 'both'} for m1, m2 in itertools.combinations(members, 2): if self._has_complex_node(m1, m2): continue if is_association: m1_key = _get_node_key(m1.concept) m2_key = _get_node_key(m2.concept) else: m1_key = _get_node_key(m1) m2_key = _get_node_key(m2) edge_key = (set([m1_key, m2_key]), 'complex') if edge_key in self.existing_edges: return self.existing_edges.append(edge_key) self._add_edge(m1_key, m2_key, **params)
[ "def", "_add_complex", "(", "self", ",", "members", ",", "is_association", "=", "False", ")", ":", "params", "=", "{", "'color'", ":", "'#0000ff'", ",", "'arrowhead'", ":", "'dot'", ",", "'arrowtail'", ":", "'dot'", ",", "'dir'", ":", "'both'", "}", "for...
Assemble a Complex statement.
[ "Assemble", "a", "Complex", "statement", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/graph/assembler.py#L236-L255
19,039
sorgerlab/indra
indra/sources/signor/api.py
process_from_file
def process_from_file(signor_data_file, signor_complexes_file=None): """Process Signor interaction data from CSV files. Parameters ---------- signor_data_file : str Path to the Signor interaction data file in CSV format. signor_complexes_file : str Path to the Signor complexes data in CSV format. If unspecified, Signor complexes will not be expanded to their constitutents. Returns ------- indra.sources.signor.SignorProcessor SignorProcessor containing Statements extracted from the Signor data. """ # Get generator over the CSV file data_iter = read_unicode_csv(signor_data_file, delimiter=';', skiprows=1) complexes_iter = None if signor_complexes_file: complexes_iter = read_unicode_csv(signor_complexes_file, delimiter=';', skiprows=1) else: logger.warning('Signor complex mapping file not provided, Statements ' 'involving complexes will not be expanded to members.') return _processor_from_data(data_iter, complexes_iter)
python
def process_from_file(signor_data_file, signor_complexes_file=None): # Get generator over the CSV file data_iter = read_unicode_csv(signor_data_file, delimiter=';', skiprows=1) complexes_iter = None if signor_complexes_file: complexes_iter = read_unicode_csv(signor_complexes_file, delimiter=';', skiprows=1) else: logger.warning('Signor complex mapping file not provided, Statements ' 'involving complexes will not be expanded to members.') return _processor_from_data(data_iter, complexes_iter)
[ "def", "process_from_file", "(", "signor_data_file", ",", "signor_complexes_file", "=", "None", ")", ":", "# Get generator over the CSV file", "data_iter", "=", "read_unicode_csv", "(", "signor_data_file", ",", "delimiter", "=", "';'", ",", "skiprows", "=", "1", ")", ...
Process Signor interaction data from CSV files. Parameters ---------- signor_data_file : str Path to the Signor interaction data file in CSV format. signor_complexes_file : str Path to the Signor complexes data in CSV format. If unspecified, Signor complexes will not be expanded to their constitutents. Returns ------- indra.sources.signor.SignorProcessor SignorProcessor containing Statements extracted from the Signor data.
[ "Process", "Signor", "interaction", "data", "from", "CSV", "files", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/signor/api.py#L47-L72
19,040
sorgerlab/indra
indra/sources/signor/api.py
_handle_response
def _handle_response(res, delimiter): """Get an iterator over the CSV data from the response.""" if res.status_code == 200: # Python 2 -- csv.reader will need bytes if sys.version_info[0] < 3: csv_io = BytesIO(res.content) # Python 3 -- csv.reader needs str else: csv_io = StringIO(res.text) data_iter = read_unicode_csv_fileobj(csv_io, delimiter=delimiter, skiprows=1) else: raise Exception('Could not download Signor data.') return data_iter
python
def _handle_response(res, delimiter): if res.status_code == 200: # Python 2 -- csv.reader will need bytes if sys.version_info[0] < 3: csv_io = BytesIO(res.content) # Python 3 -- csv.reader needs str else: csv_io = StringIO(res.text) data_iter = read_unicode_csv_fileobj(csv_io, delimiter=delimiter, skiprows=1) else: raise Exception('Could not download Signor data.') return data_iter
[ "def", "_handle_response", "(", "res", ",", "delimiter", ")", ":", "if", "res", ".", "status_code", "==", "200", ":", "# Python 2 -- csv.reader will need bytes", "if", "sys", ".", "version_info", "[", "0", "]", "<", "3", ":", "csv_io", "=", "BytesIO", "(", ...
Get an iterator over the CSV data from the response.
[ "Get", "an", "iterator", "over", "the", "CSV", "data", "from", "the", "response", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/signor/api.py#L89-L102
19,041
sorgerlab/indra
indra/databases/context_client.py
get_protein_expression
def get_protein_expression(gene_names, cell_types): """Return the protein expression levels of genes in cell types. Parameters ---------- gene_names : list HGNC gene symbols for which expression levels are queried. cell_types : list List of cell type names in which expression levels are queried. The cell type names follow the CCLE database conventions. Example: LOXIMVI_SKIN, BT20_BREAST Returns ------- res : dict[dict[float]] A dictionary keyed by cell line, which contains another dictionary that is keyed by gene name, with estimated protein amounts as values. """ A = 0.2438361 B = 3.0957627 mrna_amounts = cbio_client.get_ccle_mrna(gene_names, cell_types) protein_amounts = copy(mrna_amounts) for cell_type in cell_types: amounts = mrna_amounts.get(cell_type) if amounts is None: continue for gene_name, amount in amounts.items(): if amount is not None: protein_amount = 10**(A * amount + B) protein_amounts[cell_type][gene_name] = protein_amount return protein_amounts
python
def get_protein_expression(gene_names, cell_types): A = 0.2438361 B = 3.0957627 mrna_amounts = cbio_client.get_ccle_mrna(gene_names, cell_types) protein_amounts = copy(mrna_amounts) for cell_type in cell_types: amounts = mrna_amounts.get(cell_type) if amounts is None: continue for gene_name, amount in amounts.items(): if amount is not None: protein_amount = 10**(A * amount + B) protein_amounts[cell_type][gene_name] = protein_amount return protein_amounts
[ "def", "get_protein_expression", "(", "gene_names", ",", "cell_types", ")", ":", "A", "=", "0.2438361", "B", "=", "3.0957627", "mrna_amounts", "=", "cbio_client", ".", "get_ccle_mrna", "(", "gene_names", ",", "cell_types", ")", "protein_amounts", "=", "copy", "(...
Return the protein expression levels of genes in cell types. Parameters ---------- gene_names : list HGNC gene symbols for which expression levels are queried. cell_types : list List of cell type names in which expression levels are queried. The cell type names follow the CCLE database conventions. Example: LOXIMVI_SKIN, BT20_BREAST Returns ------- res : dict[dict[float]] A dictionary keyed by cell line, which contains another dictionary that is keyed by gene name, with estimated protein amounts as values.
[ "Return", "the", "protein", "expression", "levels", "of", "genes", "in", "cell", "types", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/context_client.py#L13-L44
19,042
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
get_aspect
def get_aspect(cx, aspect_name): """Return an aspect given the name of the aspect""" if isinstance(cx, dict): return cx.get(aspect_name) for entry in cx: if list(entry.keys())[0] == aspect_name: return entry[aspect_name]
python
def get_aspect(cx, aspect_name): if isinstance(cx, dict): return cx.get(aspect_name) for entry in cx: if list(entry.keys())[0] == aspect_name: return entry[aspect_name]
[ "def", "get_aspect", "(", "cx", ",", "aspect_name", ")", ":", "if", "isinstance", "(", "cx", ",", "dict", ")", ":", "return", "cx", ".", "get", "(", "aspect_name", ")", "for", "entry", "in", "cx", ":", "if", "list", "(", "entry", ".", "keys", "(", ...
Return an aspect given the name of the aspect
[ "Return", "an", "aspect", "given", "the", "name", "of", "the", "aspect" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L13-L19
19,043
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
classify_nodes
def classify_nodes(graph, hub): """Classify each node based on its type and relationship to the hub.""" node_stats = defaultdict(lambda: defaultdict(list)) for u, v, data in graph.edges(data=True): # This means the node is downstream of the hub if hub == u: h, o = u, v if data['i'] != 'Complex': node_stats[o]['up'].append(-1) else: node_stats[o]['up'].append(0) # This means the node is upstream of the hub elif hub == v: h, o = v, u if data['i'] != 'Complex': node_stats[o]['up'].append(1) else: node_stats[o]['up'].append(0) else: continue node_stats[o]['interaction'].append(edge_type_to_class(data['i'])) node_classes = {} for node_id, stats in node_stats.items(): up = max(set(stats['up']), key=stats['up'].count) # Special case: if up is not 0 then we should exclude complexes # from the edge_type states so that we don't end up with # (-1, complex, ...) or (1, complex, ...) as the node class interactions = [i for i in stats['interaction'] if not (up != 0 and i == 'complex')] edge_type = max(set(interactions), key=interactions.count) node_type = graph.nodes[node_id]['type'] node_classes[node_id] = (up, edge_type, node_type) return node_classes
python
def classify_nodes(graph, hub): node_stats = defaultdict(lambda: defaultdict(list)) for u, v, data in graph.edges(data=True): # This means the node is downstream of the hub if hub == u: h, o = u, v if data['i'] != 'Complex': node_stats[o]['up'].append(-1) else: node_stats[o]['up'].append(0) # This means the node is upstream of the hub elif hub == v: h, o = v, u if data['i'] != 'Complex': node_stats[o]['up'].append(1) else: node_stats[o]['up'].append(0) else: continue node_stats[o]['interaction'].append(edge_type_to_class(data['i'])) node_classes = {} for node_id, stats in node_stats.items(): up = max(set(stats['up']), key=stats['up'].count) # Special case: if up is not 0 then we should exclude complexes # from the edge_type states so that we don't end up with # (-1, complex, ...) or (1, complex, ...) as the node class interactions = [i for i in stats['interaction'] if not (up != 0 and i == 'complex')] edge_type = max(set(interactions), key=interactions.count) node_type = graph.nodes[node_id]['type'] node_classes[node_id] = (up, edge_type, node_type) return node_classes
[ "def", "classify_nodes", "(", "graph", ",", "hub", ")", ":", "node_stats", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "list", ")", ")", "for", "u", ",", "v", ",", "data", "in", "graph", ".", "edges", "(", "data", "=", "True", ")", ...
Classify each node based on its type and relationship to the hub.
[ "Classify", "each", "node", "based", "on", "its", "type", "and", "relationship", "to", "the", "hub", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L34-L67
19,044
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
get_attributes
def get_attributes(aspect, id): """Return the attributes pointing to a given ID in a given aspect.""" attributes = {} for entry in aspect: if entry['po'] == id: attributes[entry['n']] = entry['v'] return attributes
python
def get_attributes(aspect, id): attributes = {} for entry in aspect: if entry['po'] == id: attributes[entry['n']] = entry['v'] return attributes
[ "def", "get_attributes", "(", "aspect", ",", "id", ")", ":", "attributes", "=", "{", "}", "for", "entry", "in", "aspect", ":", "if", "entry", "[", "'po'", "]", "==", "id", ":", "attributes", "[", "entry", "[", "'n'", "]", "]", "=", "entry", "[", ...
Return the attributes pointing to a given ID in a given aspect.
[ "Return", "the", "attributes", "pointing", "to", "a", "given", "ID", "in", "a", "given", "aspect", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L70-L76
19,045
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
cx_to_networkx
def cx_to_networkx(cx): """Return a MultiDiGraph representation of a CX network.""" graph = networkx.MultiDiGraph() for node_entry in get_aspect(cx, 'nodes'): id = node_entry['@id'] attrs = get_attributes(get_aspect(cx, 'nodeAttributes'), id) attrs['n'] = node_entry['n'] graph.add_node(id, **attrs) for edge_entry in get_aspect(cx, 'edges'): id = edge_entry['@id'] attrs = get_attributes(get_aspect(cx, 'edgeAttributes'), id) attrs['i'] = edge_entry['i'] graph.add_edge(edge_entry['s'], edge_entry['t'], key=id, **attrs) return graph
python
def cx_to_networkx(cx): graph = networkx.MultiDiGraph() for node_entry in get_aspect(cx, 'nodes'): id = node_entry['@id'] attrs = get_attributes(get_aspect(cx, 'nodeAttributes'), id) attrs['n'] = node_entry['n'] graph.add_node(id, **attrs) for edge_entry in get_aspect(cx, 'edges'): id = edge_entry['@id'] attrs = get_attributes(get_aspect(cx, 'edgeAttributes'), id) attrs['i'] = edge_entry['i'] graph.add_edge(edge_entry['s'], edge_entry['t'], key=id, **attrs) return graph
[ "def", "cx_to_networkx", "(", "cx", ")", ":", "graph", "=", "networkx", ".", "MultiDiGraph", "(", ")", "for", "node_entry", "in", "get_aspect", "(", "cx", ",", "'nodes'", ")", ":", "id", "=", "node_entry", "[", "'@id'", "]", "attrs", "=", "get_attributes...
Return a MultiDiGraph representation of a CX network.
[ "Return", "a", "MultiDiGraph", "representation", "of", "a", "CX", "network", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L79-L92
19,046
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
get_quadrant_from_class
def get_quadrant_from_class(node_class): """Return the ID of the segment of the plane corresponding to a class.""" up, edge_type, _ = node_class if up == 0: return 0 if random.random() < 0.5 else 7 mappings = {(-1, 'modification'): 1, (-1, 'amount'): 2, (-1, 'activity'): 3, (1, 'activity'): 4, (1, 'amount'): 5, (1, 'modification'): 6} return mappings[(up, edge_type)]
python
def get_quadrant_from_class(node_class): up, edge_type, _ = node_class if up == 0: return 0 if random.random() < 0.5 else 7 mappings = {(-1, 'modification'): 1, (-1, 'amount'): 2, (-1, 'activity'): 3, (1, 'activity'): 4, (1, 'amount'): 5, (1, 'modification'): 6} return mappings[(up, edge_type)]
[ "def", "get_quadrant_from_class", "(", "node_class", ")", ":", "up", ",", "edge_type", ",", "_", "=", "node_class", "if", "up", "==", "0", ":", "return", "0", "if", "random", ".", "random", "(", ")", "<", "0.5", "else", "7", "mappings", "=", "{", "("...
Return the ID of the segment of the plane corresponding to a class.
[ "Return", "the", "ID", "of", "the", "segment", "of", "the", "plane", "corresponding", "to", "a", "class", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L95-L106
19,047
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
get_coordinates
def get_coordinates(node_class): """Generate coordinates for a node in a given class.""" quadrant_size = (2 * math.pi / 8.0) quadrant = get_quadrant_from_class(node_class) begin_angle = quadrant_size * quadrant r = 200 + 800*random.random() alpha = begin_angle + random.random() * quadrant_size x = r * math.cos(alpha) y = r * math.sin(alpha) return x, y
python
def get_coordinates(node_class): quadrant_size = (2 * math.pi / 8.0) quadrant = get_quadrant_from_class(node_class) begin_angle = quadrant_size * quadrant r = 200 + 800*random.random() alpha = begin_angle + random.random() * quadrant_size x = r * math.cos(alpha) y = r * math.sin(alpha) return x, y
[ "def", "get_coordinates", "(", "node_class", ")", ":", "quadrant_size", "=", "(", "2", "*", "math", ".", "pi", "/", "8.0", ")", "quadrant", "=", "get_quadrant_from_class", "(", "node_class", ")", "begin_angle", "=", "quadrant_size", "*", "quadrant", "r", "="...
Generate coordinates for a node in a given class.
[ "Generate", "coordinates", "for", "a", "node", "in", "a", "given", "class", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L109-L118
19,048
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
get_layout_aspect
def get_layout_aspect(hub, node_classes): """Get the full layout aspect with coordinates for each node.""" aspect = [{'node': hub, 'x': 0.0, 'y': 0.0}] for node, node_class in node_classes.items(): if node == hub: continue x, y = get_coordinates(node_class) aspect.append({'node': node, 'x': x, 'y': y}) return aspect
python
def get_layout_aspect(hub, node_classes): aspect = [{'node': hub, 'x': 0.0, 'y': 0.0}] for node, node_class in node_classes.items(): if node == hub: continue x, y = get_coordinates(node_class) aspect.append({'node': node, 'x': x, 'y': y}) return aspect
[ "def", "get_layout_aspect", "(", "hub", ",", "node_classes", ")", ":", "aspect", "=", "[", "{", "'node'", ":", "hub", ",", "'x'", ":", "0.0", ",", "'y'", ":", "0.0", "}", "]", "for", "node", ",", "node_class", "in", "node_classes", ".", "items", "(",...
Get the full layout aspect with coordinates for each node.
[ "Get", "the", "full", "layout", "aspect", "with", "coordinates", "for", "each", "node", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L121-L129
19,049
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
get_node_by_name
def get_node_by_name(graph, name): """Return a node ID given its name.""" for id, attrs in graph.nodes(data=True): if attrs['n'] == name: return id
python
def get_node_by_name(graph, name): for id, attrs in graph.nodes(data=True): if attrs['n'] == name: return id
[ "def", "get_node_by_name", "(", "graph", ",", "name", ")", ":", "for", "id", ",", "attrs", "in", "graph", ".", "nodes", "(", "data", "=", "True", ")", ":", "if", "attrs", "[", "'n'", "]", "==", "name", ":", "return", "id" ]
Return a node ID given its name.
[ "Return", "a", "node", "ID", "given", "its", "name", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L132-L136
19,050
sorgerlab/indra
indra/assemblers/cx/hub_layout.py
add_semantic_hub_layout
def add_semantic_hub_layout(cx, hub): """Attach a layout aspect to a CX network given a hub node.""" graph = cx_to_networkx(cx) hub_node = get_node_by_name(graph, hub) node_classes = classify_nodes(graph, hub_node) layout_aspect = get_layout_aspect(hub_node, node_classes) cx['cartesianLayout'] = layout_aspect
python
def add_semantic_hub_layout(cx, hub): graph = cx_to_networkx(cx) hub_node = get_node_by_name(graph, hub) node_classes = classify_nodes(graph, hub_node) layout_aspect = get_layout_aspect(hub_node, node_classes) cx['cartesianLayout'] = layout_aspect
[ "def", "add_semantic_hub_layout", "(", "cx", ",", "hub", ")", ":", "graph", "=", "cx_to_networkx", "(", "cx", ")", "hub_node", "=", "get_node_by_name", "(", "graph", ",", "hub", ")", "node_classes", "=", "classify_nodes", "(", "graph", ",", "hub_node", ")", ...
Attach a layout aspect to a CX network given a hub node.
[ "Attach", "a", "layout", "aspect", "to", "a", "CX", "network", "given", "a", "hub", "node", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cx/hub_layout.py#L139-L145
19,051
sorgerlab/indra
indra/literature/crossref_client.py
get_metadata
def get_metadata(doi): """Returns the metadata of an article given its DOI from CrossRef as a JSON dict""" url = crossref_url + 'works/' + doi res = requests.get(url) if res.status_code != 200: logger.info('Could not get CrossRef metadata for DOI %s, code %d' % (doi, res.status_code)) return None raw_message = res.json() metadata = raw_message.get('message') return metadata
python
def get_metadata(doi): url = crossref_url + 'works/' + doi res = requests.get(url) if res.status_code != 200: logger.info('Could not get CrossRef metadata for DOI %s, code %d' % (doi, res.status_code)) return None raw_message = res.json() metadata = raw_message.get('message') return metadata
[ "def", "get_metadata", "(", "doi", ")", ":", "url", "=", "crossref_url", "+", "'works/'", "+", "doi", "res", "=", "requests", ".", "get", "(", "url", ")", "if", "res", ".", "status_code", "!=", "200", ":", "logger", ".", "info", "(", "'Could not get Cr...
Returns the metadata of an article given its DOI from CrossRef as a JSON dict
[ "Returns", "the", "metadata", "of", "an", "article", "given", "its", "DOI", "from", "CrossRef", "as", "a", "JSON", "dict" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/crossref_client.py#L30-L41
19,052
sorgerlab/indra
indra/literature/crossref_client.py
doi_query
def doi_query(pmid, search_limit=10): """Get the DOI for a PMID by matching CrossRef and Pubmed metadata. Searches CrossRef using the article title and then accepts search hits only if they have a matching journal ISSN and page number with what is obtained from the Pubmed database. """ # Get article metadata from PubMed pubmed_meta_dict = pubmed_client.get_metadata_for_ids([pmid], get_issns_from_nlm=True) if pubmed_meta_dict is None or pubmed_meta_dict.get(pmid) is None: logger.warning('No metadata found in Pubmed for PMID%s' % pmid) return None # The test above ensures we've got this now pubmed_meta = pubmed_meta_dict[pmid] # Check if we already got a DOI from Pubmed itself! if pubmed_meta.get('doi'): return pubmed_meta.get('doi') # Check for the title, which we'll need for the CrossRef search pm_article_title = pubmed_meta.get('title') if pm_article_title is None: logger.warning('No article title found in Pubmed for PMID%s' % pmid) return None # Get the ISSN list pm_issn_list = pubmed_meta.get('issn_list') if not pm_issn_list: logger.warning('No ISSNs found in Pubmed for PMID%s' % pmid) return None # Get the page number pm_page = pubmed_meta.get('page') if not pm_page: logger.debug('No page number found in Pubmed for PMID%s' % pmid) return None # Now query CrossRef using the title we've got url = crossref_search_url params = {'q': pm_article_title, 'sort': 'score'} try: res = requests.get(crossref_search_url, params) except requests.exceptions.ConnectionError as e: logger.error('CrossRef service could not be reached.') logger.error(e) return None except Exception as e: logger.error('Error accessing CrossRef service: %s' % str(e)) return None if res.status_code != 200: logger.info('PMID%s: no search results from CrossRef, code %d' % (pmid, res.status_code)) return None raw_message = res.json() mapped_doi = None # Iterate over the search results, looking up XREF metadata for result_ix, result in enumerate(raw_message): if result_ix > search_limit: logger.info('PMID%s: No match found within first %s results, ' 'giving up!' % (pmid, search_limit)) break xref_doi_url = result['doi'] # Strip the URL prefix off of the DOI m = re.match('^http://dx.doi.org/(.*)$', xref_doi_url) xref_doi = m.groups()[0] # Get the XREF metadata using the DOI xref_meta = get_metadata(xref_doi) if xref_meta is None: continue xref_issn_list = xref_meta.get('ISSN') xref_page = xref_meta.get('page') # If there's no ISSN info for this article, skip to the next result if not xref_issn_list: logger.debug('No ISSN found for DOI %s, skipping' % xref_doi_url) continue # If there's no page info for this article, skip to the next result if not xref_page: logger.debug('No page number found for DOI %s, skipping' % xref_doi_url) continue # Now check for an ISSN match by looking for the set intersection # between the Pubmed ISSN list and the CrossRef ISSN list. matching_issns = set(pm_issn_list).intersection(set(xref_issn_list)) # Before comparing page numbers, regularize the page numbers a bit. # Note that we only compare the first page number, since frequently # the final page number will simply be missing in one of the data # sources. We also canonicalize page numbers of the form '14E' to # 'E14' (which is the format used by Pubmed). pm_start_page = pm_page.split('-')[0].upper() xr_start_page = xref_page.split('-')[0].upper() if xr_start_page.endswith('E'): xr_start_page = 'E' + xr_start_page[:-1] # Now compare the ISSN list and page numbers if matching_issns and pm_start_page == xr_start_page: # We found a match! mapped_doi = xref_doi break # Otherwise, keep looking through the results... # Return a DOI, or None if we didn't find one that met our matching # criteria return mapped_doi
python
def doi_query(pmid, search_limit=10): # Get article metadata from PubMed pubmed_meta_dict = pubmed_client.get_metadata_for_ids([pmid], get_issns_from_nlm=True) if pubmed_meta_dict is None or pubmed_meta_dict.get(pmid) is None: logger.warning('No metadata found in Pubmed for PMID%s' % pmid) return None # The test above ensures we've got this now pubmed_meta = pubmed_meta_dict[pmid] # Check if we already got a DOI from Pubmed itself! if pubmed_meta.get('doi'): return pubmed_meta.get('doi') # Check for the title, which we'll need for the CrossRef search pm_article_title = pubmed_meta.get('title') if pm_article_title is None: logger.warning('No article title found in Pubmed for PMID%s' % pmid) return None # Get the ISSN list pm_issn_list = pubmed_meta.get('issn_list') if not pm_issn_list: logger.warning('No ISSNs found in Pubmed for PMID%s' % pmid) return None # Get the page number pm_page = pubmed_meta.get('page') if not pm_page: logger.debug('No page number found in Pubmed for PMID%s' % pmid) return None # Now query CrossRef using the title we've got url = crossref_search_url params = {'q': pm_article_title, 'sort': 'score'} try: res = requests.get(crossref_search_url, params) except requests.exceptions.ConnectionError as e: logger.error('CrossRef service could not be reached.') logger.error(e) return None except Exception as e: logger.error('Error accessing CrossRef service: %s' % str(e)) return None if res.status_code != 200: logger.info('PMID%s: no search results from CrossRef, code %d' % (pmid, res.status_code)) return None raw_message = res.json() mapped_doi = None # Iterate over the search results, looking up XREF metadata for result_ix, result in enumerate(raw_message): if result_ix > search_limit: logger.info('PMID%s: No match found within first %s results, ' 'giving up!' % (pmid, search_limit)) break xref_doi_url = result['doi'] # Strip the URL prefix off of the DOI m = re.match('^http://dx.doi.org/(.*)$', xref_doi_url) xref_doi = m.groups()[0] # Get the XREF metadata using the DOI xref_meta = get_metadata(xref_doi) if xref_meta is None: continue xref_issn_list = xref_meta.get('ISSN') xref_page = xref_meta.get('page') # If there's no ISSN info for this article, skip to the next result if not xref_issn_list: logger.debug('No ISSN found for DOI %s, skipping' % xref_doi_url) continue # If there's no page info for this article, skip to the next result if not xref_page: logger.debug('No page number found for DOI %s, skipping' % xref_doi_url) continue # Now check for an ISSN match by looking for the set intersection # between the Pubmed ISSN list and the CrossRef ISSN list. matching_issns = set(pm_issn_list).intersection(set(xref_issn_list)) # Before comparing page numbers, regularize the page numbers a bit. # Note that we only compare the first page number, since frequently # the final page number will simply be missing in one of the data # sources. We also canonicalize page numbers of the form '14E' to # 'E14' (which is the format used by Pubmed). pm_start_page = pm_page.split('-')[0].upper() xr_start_page = xref_page.split('-')[0].upper() if xr_start_page.endswith('E'): xr_start_page = 'E' + xr_start_page[:-1] # Now compare the ISSN list and page numbers if matching_issns and pm_start_page == xr_start_page: # We found a match! mapped_doi = xref_doi break # Otherwise, keep looking through the results... # Return a DOI, or None if we didn't find one that met our matching # criteria return mapped_doi
[ "def", "doi_query", "(", "pmid", ",", "search_limit", "=", "10", ")", ":", "# Get article metadata from PubMed", "pubmed_meta_dict", "=", "pubmed_client", ".", "get_metadata_for_ids", "(", "[", "pmid", "]", ",", "get_issns_from_nlm", "=", "True", ")", "if", "pubme...
Get the DOI for a PMID by matching CrossRef and Pubmed metadata. Searches CrossRef using the article title and then accepts search hits only if they have a matching journal ISSN and page number with what is obtained from the Pubmed database.
[ "Get", "the", "DOI", "for", "a", "PMID", "by", "matching", "CrossRef", "and", "Pubmed", "metadata", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/crossref_client.py#L81-L177
19,053
sorgerlab/indra
indra/assemblers/pysb/assembler.py
get_agent_rule_str
def get_agent_rule_str(agent): """Construct a string from an Agent as part of a PySB rule name.""" rule_str_list = [_n(agent.name)] # If it's a molecular agent if isinstance(agent, ist.Agent): for mod in agent.mods: mstr = abbrevs[mod.mod_type] if mod.residue is not None: mstr += mod.residue if mod.position is not None: mstr += mod.position rule_str_list.append('%s' % mstr) for mut in agent.mutations: res_from = mut.residue_from if mut.residue_from else 'mut' res_to = mut.residue_to if mut.residue_to else 'X' if mut.position is None: mut_site_name = res_from else: mut_site_name = res_from + mut.position mstr = mut_site_name + res_to rule_str_list.append(mstr) if agent.bound_conditions: for b in agent.bound_conditions: if b.is_bound: rule_str_list.append(_n(b.agent.name)) else: rule_str_list.append('n' + _n(b.agent.name)) if agent.location is not None: rule_str_list.append(_n(agent.location)) if agent.activity is not None: if agent.activity.is_active: rule_str_list.append(agent.activity.activity_type[:3]) else: rule_str_list.append(agent.activity.activity_type[:3] + '_inact') rule_str = '_'.join(rule_str_list) return rule_str
python
def get_agent_rule_str(agent): rule_str_list = [_n(agent.name)] # If it's a molecular agent if isinstance(agent, ist.Agent): for mod in agent.mods: mstr = abbrevs[mod.mod_type] if mod.residue is not None: mstr += mod.residue if mod.position is not None: mstr += mod.position rule_str_list.append('%s' % mstr) for mut in agent.mutations: res_from = mut.residue_from if mut.residue_from else 'mut' res_to = mut.residue_to if mut.residue_to else 'X' if mut.position is None: mut_site_name = res_from else: mut_site_name = res_from + mut.position mstr = mut_site_name + res_to rule_str_list.append(mstr) if agent.bound_conditions: for b in agent.bound_conditions: if b.is_bound: rule_str_list.append(_n(b.agent.name)) else: rule_str_list.append('n' + _n(b.agent.name)) if agent.location is not None: rule_str_list.append(_n(agent.location)) if agent.activity is not None: if agent.activity.is_active: rule_str_list.append(agent.activity.activity_type[:3]) else: rule_str_list.append(agent.activity.activity_type[:3] + '_inact') rule_str = '_'.join(rule_str_list) return rule_str
[ "def", "get_agent_rule_str", "(", "agent", ")", ":", "rule_str_list", "=", "[", "_n", "(", "agent", ".", "name", ")", "]", "# If it's a molecular agent", "if", "isinstance", "(", "agent", ",", "ist", ".", "Agent", ")", ":", "for", "mod", "in", "agent", "...
Construct a string from an Agent as part of a PySB rule name.
[ "Construct", "a", "string", "from", "an", "Agent", "as", "part", "of", "a", "PySB", "rule", "name", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L55-L90
19,054
sorgerlab/indra
indra/assemblers/pysb/assembler.py
add_rule_to_model
def add_rule_to_model(model, rule, annotations=None): """Add a Rule to a PySB model and handle duplicate component errors.""" try: model.add_component(rule) # If the rule was actually added, also add the annotations if annotations: model.annotations += annotations # If this rule is already in the model, issue a warning and continue except ComponentDuplicateNameError: msg = "Rule %s already in model! Skipping." % rule.name logger.debug(msg)
python
def add_rule_to_model(model, rule, annotations=None): try: model.add_component(rule) # If the rule was actually added, also add the annotations if annotations: model.annotations += annotations # If this rule is already in the model, issue a warning and continue except ComponentDuplicateNameError: msg = "Rule %s already in model! Skipping." % rule.name logger.debug(msg)
[ "def", "add_rule_to_model", "(", "model", ",", "rule", ",", "annotations", "=", "None", ")", ":", "try", ":", "model", ".", "add_component", "(", "rule", ")", "# If the rule was actually added, also add the annotations", "if", "annotations", ":", "model", ".", "an...
Add a Rule to a PySB model and handle duplicate component errors.
[ "Add", "a", "Rule", "to", "a", "PySB", "model", "and", "handle", "duplicate", "component", "errors", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L93-L103
19,055
sorgerlab/indra
indra/assemblers/pysb/assembler.py
get_create_parameter
def get_create_parameter(model, param): """Return parameter with given name, creating it if needed. If unique is false and the parameter exists, the value is not changed; if it does not exist, it will be created. If unique is true then upon conflict a number is added to the end of the parameter name. Parameters ---------- model : pysb.Model The model to add the parameter to param : Param An assembly parameter object """ norm_name = _n(param.name) parameter = model.parameters.get(norm_name) if not param.unique and parameter is not None: return parameter if param.unique: pnum = 1 while True: pname = norm_name + '_%d' % pnum if model.parameters.get(pname) is None: break pnum += 1 else: pname = norm_name parameter = Parameter(pname, param.value) model.add_component(parameter) return parameter
python
def get_create_parameter(model, param): norm_name = _n(param.name) parameter = model.parameters.get(norm_name) if not param.unique and parameter is not None: return parameter if param.unique: pnum = 1 while True: pname = norm_name + '_%d' % pnum if model.parameters.get(pname) is None: break pnum += 1 else: pname = norm_name parameter = Parameter(pname, param.value) model.add_component(parameter) return parameter
[ "def", "get_create_parameter", "(", "model", ",", "param", ")", ":", "norm_name", "=", "_n", "(", "param", ".", "name", ")", "parameter", "=", "model", ".", "parameters", ".", "get", "(", "norm_name", ")", "if", "not", "param", ".", "unique", "and", "p...
Return parameter with given name, creating it if needed. If unique is false and the parameter exists, the value is not changed; if it does not exist, it will be created. If unique is true then upon conflict a number is added to the end of the parameter name. Parameters ---------- model : pysb.Model The model to add the parameter to param : Param An assembly parameter object
[ "Return", "parameter", "with", "given", "name", "creating", "it", "if", "needed", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L106-L138
19,056
sorgerlab/indra
indra/assemblers/pysb/assembler.py
get_uncond_agent
def get_uncond_agent(agent): """Construct the unconditional state of an Agent. The unconditional Agent is a copy of the original agent but without any bound conditions and modification conditions. Mutation conditions, however, are preserved since they are static. """ agent_uncond = ist.Agent(_n(agent.name), mutations=agent.mutations) return agent_uncond
python
def get_uncond_agent(agent): agent_uncond = ist.Agent(_n(agent.name), mutations=agent.mutations) return agent_uncond
[ "def", "get_uncond_agent", "(", "agent", ")", ":", "agent_uncond", "=", "ist", ".", "Agent", "(", "_n", "(", "agent", ".", "name", ")", ",", "mutations", "=", "agent", ".", "mutations", ")", "return", "agent_uncond" ]
Construct the unconditional state of an Agent. The unconditional Agent is a copy of the original agent but without any bound conditions and modification conditions. Mutation conditions, however, are preserved since they are static.
[ "Construct", "the", "unconditional", "state", "of", "an", "Agent", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L141-L149
19,057
sorgerlab/indra
indra/assemblers/pysb/assembler.py
grounded_monomer_patterns
def grounded_monomer_patterns(model, agent, ignore_activities=False): """Get monomer patterns for the agent accounting for grounding information. Parameters ---------- model : pysb.core.Model The model to search for MonomerPatterns matching the given Agent. agent : indra.statements.Agent The Agent to find matching MonomerPatterns for. ignore_activites : bool Whether to ignore any ActivityConditions on the agent when determining the required site conditions for the MonomerPattern. For example, if set to True, will find a match for the agent `MAPK1(activity=kinase)` even if the corresponding MAPK1 Monomer in the model has no site named `kinase`. Default is False (more stringent matching). Returns ------- generator of MonomerPatterns """ # If it's not a molecular agent if not isinstance(agent, ist.Agent): monomer = model.monomers.get(agent.name) if not monomer: return yield monomer() # Iterate over all model annotations to identify the monomer associated # with this agent monomer = None for ann in model.annotations: if monomer: break if not ann.predicate == 'is': continue if not isinstance(ann.subject, Monomer): continue (ns, id) = parse_identifiers_url(ann.object) if ns is None and id is None: continue # We now have an identifiers.org namespace/ID for a given monomer; # we check to see if there is a matching identifier in the db_refs # for this agent for db_ns, db_id in agent.db_refs.items(): # We've found a match! Return first match # FIXME Could also update this to check for alternative # FIXME matches, or make sure that all grounding IDs match, # FIXME etc. if db_ns == ns and db_id == id: monomer = ann.subject break # We looked at all the annotations in the model and didn't find a # match if monomer is None: logger.info('No monomer found corresponding to agent %s' % agent) return # Now that we have a monomer for the agent, look for site/state # combinations corresponding to the state of the agent. For every one of # the modifications specified in the agent signature, check to see if it # can be satisfied based on the agent's annotations. For every one we find # that is consistent, we yield it--there may be more than one. # FIXME # Create a list of tuples, each one representing the site conditions # that can satisfy a particular agent condition. Each entry in the list # will contain a list of dicts associated with a particular mod/activity # condition. Each dict will represent a site/state combination satisfying # the constraints imposed by that mod/activity condition. sc_list = [] for mod in agent.mods: # Find all site/state combinations that have the appropriate # modification type # As we iterate, build up a dict identifying the annotations of # particular sites mod_sites = {} res_sites = set([]) pos_sites = set([]) for ann in monomer.site_annotations: # Don't forget to handle Nones! if ann.predicate == 'is_modification' and \ ann.object == mod.mod_type: site_state = ann.subject assert isinstance(site_state, tuple) assert len(site_state) == 2 mod_sites[site_state[0]] = site_state[1] elif ann.predicate == 'is_residue' and \ ann.object == mod.residue: res_sites.add(ann.subject) elif ann.predicate == 'is_position' and \ ann.object == mod.position: pos_sites.add(ann.subject) # If the residue field of the agent is specified, viable_sites = set(mod_sites.keys()) if mod.residue is not None: viable_sites = viable_sites.intersection(res_sites) if mod.position is not None: viable_sites = viable_sites.intersection(pos_sites) # If there are no viable sites annotated in the model matching the # available info in the mod condition, then we won't be able to # satisfy the conditions on this agent if not viable_sites: return # Otherwise, update the # If there are any sites left after we subject them to residue # and position constraints, then return the relevant monomer patterns! pattern_list = [] for site_name in viable_sites: pattern_list.append({site_name: (mod_sites[site_name], WILD)}) sc_list.append(pattern_list) # Now check for monomer patterns satisfying the agent's activity condition if agent.activity and not ignore_activities: # Iterate through annotations with this monomer as the subject # and a has_active_pattern or has_inactive_pattern relationship # FIXME: Currently activity type is not annotated/checked # FIXME act_type = agent.activity.activity_type rel_type = 'has_active_pattern' if agent.activity.is_active \ else 'has_inactive_pattern' active_form_list = [] for ann in model.annotations: if ann.subject == monomer and ann.predicate == rel_type: # The annotation object contains the active/inactive pattern active_form_list.append(ann.object) sc_list.append(active_form_list) # Now that we've got a list of conditions for pattern_combo in itertools.product(*sc_list): mp_sc = {} for pattern in pattern_combo: mp_sc.update(pattern) if mp_sc: yield monomer(**mp_sc) if not sc_list: yield monomer()
python
def grounded_monomer_patterns(model, agent, ignore_activities=False): # If it's not a molecular agent if not isinstance(agent, ist.Agent): monomer = model.monomers.get(agent.name) if not monomer: return yield monomer() # Iterate over all model annotations to identify the monomer associated # with this agent monomer = None for ann in model.annotations: if monomer: break if not ann.predicate == 'is': continue if not isinstance(ann.subject, Monomer): continue (ns, id) = parse_identifiers_url(ann.object) if ns is None and id is None: continue # We now have an identifiers.org namespace/ID for a given monomer; # we check to see if there is a matching identifier in the db_refs # for this agent for db_ns, db_id in agent.db_refs.items(): # We've found a match! Return first match # FIXME Could also update this to check for alternative # FIXME matches, or make sure that all grounding IDs match, # FIXME etc. if db_ns == ns and db_id == id: monomer = ann.subject break # We looked at all the annotations in the model and didn't find a # match if monomer is None: logger.info('No monomer found corresponding to agent %s' % agent) return # Now that we have a monomer for the agent, look for site/state # combinations corresponding to the state of the agent. For every one of # the modifications specified in the agent signature, check to see if it # can be satisfied based on the agent's annotations. For every one we find # that is consistent, we yield it--there may be more than one. # FIXME # Create a list of tuples, each one representing the site conditions # that can satisfy a particular agent condition. Each entry in the list # will contain a list of dicts associated with a particular mod/activity # condition. Each dict will represent a site/state combination satisfying # the constraints imposed by that mod/activity condition. sc_list = [] for mod in agent.mods: # Find all site/state combinations that have the appropriate # modification type # As we iterate, build up a dict identifying the annotations of # particular sites mod_sites = {} res_sites = set([]) pos_sites = set([]) for ann in monomer.site_annotations: # Don't forget to handle Nones! if ann.predicate == 'is_modification' and \ ann.object == mod.mod_type: site_state = ann.subject assert isinstance(site_state, tuple) assert len(site_state) == 2 mod_sites[site_state[0]] = site_state[1] elif ann.predicate == 'is_residue' and \ ann.object == mod.residue: res_sites.add(ann.subject) elif ann.predicate == 'is_position' and \ ann.object == mod.position: pos_sites.add(ann.subject) # If the residue field of the agent is specified, viable_sites = set(mod_sites.keys()) if mod.residue is not None: viable_sites = viable_sites.intersection(res_sites) if mod.position is not None: viable_sites = viable_sites.intersection(pos_sites) # If there are no viable sites annotated in the model matching the # available info in the mod condition, then we won't be able to # satisfy the conditions on this agent if not viable_sites: return # Otherwise, update the # If there are any sites left after we subject them to residue # and position constraints, then return the relevant monomer patterns! pattern_list = [] for site_name in viable_sites: pattern_list.append({site_name: (mod_sites[site_name], WILD)}) sc_list.append(pattern_list) # Now check for monomer patterns satisfying the agent's activity condition if agent.activity and not ignore_activities: # Iterate through annotations with this monomer as the subject # and a has_active_pattern or has_inactive_pattern relationship # FIXME: Currently activity type is not annotated/checked # FIXME act_type = agent.activity.activity_type rel_type = 'has_active_pattern' if agent.activity.is_active \ else 'has_inactive_pattern' active_form_list = [] for ann in model.annotations: if ann.subject == monomer and ann.predicate == rel_type: # The annotation object contains the active/inactive pattern active_form_list.append(ann.object) sc_list.append(active_form_list) # Now that we've got a list of conditions for pattern_combo in itertools.product(*sc_list): mp_sc = {} for pattern in pattern_combo: mp_sc.update(pattern) if mp_sc: yield monomer(**mp_sc) if not sc_list: yield monomer()
[ "def", "grounded_monomer_patterns", "(", "model", ",", "agent", ",", "ignore_activities", "=", "False", ")", ":", "# If it's not a molecular agent", "if", "not", "isinstance", "(", "agent", ",", "ist", ".", "Agent", ")", ":", "monomer", "=", "model", ".", "mon...
Get monomer patterns for the agent accounting for grounding information. Parameters ---------- model : pysb.core.Model The model to search for MonomerPatterns matching the given Agent. agent : indra.statements.Agent The Agent to find matching MonomerPatterns for. ignore_activites : bool Whether to ignore any ActivityConditions on the agent when determining the required site conditions for the MonomerPattern. For example, if set to True, will find a match for the agent `MAPK1(activity=kinase)` even if the corresponding MAPK1 Monomer in the model has no site named `kinase`. Default is False (more stringent matching). Returns ------- generator of MonomerPatterns
[ "Get", "monomer", "patterns", "for", "the", "agent", "accounting", "for", "grounding", "information", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L152-L281
19,058
sorgerlab/indra
indra/assemblers/pysb/assembler.py
get_monomer_pattern
def get_monomer_pattern(model, agent, extra_fields=None): """Construct a PySB MonomerPattern from an Agent.""" try: monomer = model.monomers[_n(agent.name)] except KeyError as e: logger.warning('Monomer with name %s not found in model' % _n(agent.name)) return None # Get the agent site pattern pattern = get_site_pattern(agent) if extra_fields is not None: for k, v in extra_fields.items(): # This is an important assumption, it only sets the given pattern # on the monomer if that site/key is not already specified at the # Agent level. For instance, if the Agent is specified to have # 'activity', that site will not be updated here. if k not in pattern: pattern[k] = v # If a model is given, return the Monomer with the generated pattern, # otherwise just return the pattern try: monomer_pattern = monomer(**pattern) except Exception as e: logger.info("Invalid site pattern %s for monomer %s" % (pattern, monomer)) return None return monomer_pattern
python
def get_monomer_pattern(model, agent, extra_fields=None): try: monomer = model.monomers[_n(agent.name)] except KeyError as e: logger.warning('Monomer with name %s not found in model' % _n(agent.name)) return None # Get the agent site pattern pattern = get_site_pattern(agent) if extra_fields is not None: for k, v in extra_fields.items(): # This is an important assumption, it only sets the given pattern # on the monomer if that site/key is not already specified at the # Agent level. For instance, if the Agent is specified to have # 'activity', that site will not be updated here. if k not in pattern: pattern[k] = v # If a model is given, return the Monomer with the generated pattern, # otherwise just return the pattern try: monomer_pattern = monomer(**pattern) except Exception as e: logger.info("Invalid site pattern %s for monomer %s" % (pattern, monomer)) return None return monomer_pattern
[ "def", "get_monomer_pattern", "(", "model", ",", "agent", ",", "extra_fields", "=", "None", ")", ":", "try", ":", "monomer", "=", "model", ".", "monomers", "[", "_n", "(", "agent", ".", "name", ")", "]", "except", "KeyError", "as", "e", ":", "logger", ...
Construct a PySB MonomerPattern from an Agent.
[ "Construct", "a", "PySB", "MonomerPattern", "from", "an", "Agent", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L293-L319
19,059
sorgerlab/indra
indra/assemblers/pysb/assembler.py
get_site_pattern
def get_site_pattern(agent): """Construct a dictionary of Monomer site states from an Agent. This crates the mapping to the associated PySB monomer from an INDRA Agent object.""" if not isinstance(agent, ist.Agent): return {} pattern = {} # Handle bound conditions for bc in agent.bound_conditions: # Here we make the assumption that the binding site # is simply named after the binding partner if bc.is_bound: pattern[get_binding_site_name(bc.agent)] = ANY else: pattern[get_binding_site_name(bc.agent)] = None # Handle modifications for mod in agent.mods: mod_site_str = abbrevs[mod.mod_type] if mod.residue is not None: mod_site_str = mod.residue mod_pos_str = mod.position if mod.position is not None else '' mod_site = ('%s%s' % (mod_site_str, mod_pos_str)) site_states = states[mod.mod_type] if mod.is_modified: pattern[mod_site] = (site_states[1], WILD) else: pattern[mod_site] = (site_states[0], WILD) # Handle mutations for mc in agent.mutations: res_from = mc.residue_from if mc.residue_from else 'mut' res_to = mc.residue_to if mc.residue_to else 'X' if mc.position is None: mut_site_name = res_from else: mut_site_name = res_from + mc.position pattern[mut_site_name] = res_to # Handle location if agent.location is not None: pattern['loc'] = _n(agent.location) # Handle activity if agent.activity is not None: active_site_name = agent.activity.activity_type if agent.activity.is_active: active_site_state = 'active' else: active_site_state = 'inactive' pattern[active_site_name] = active_site_state return pattern
python
def get_site_pattern(agent): if not isinstance(agent, ist.Agent): return {} pattern = {} # Handle bound conditions for bc in agent.bound_conditions: # Here we make the assumption that the binding site # is simply named after the binding partner if bc.is_bound: pattern[get_binding_site_name(bc.agent)] = ANY else: pattern[get_binding_site_name(bc.agent)] = None # Handle modifications for mod in agent.mods: mod_site_str = abbrevs[mod.mod_type] if mod.residue is not None: mod_site_str = mod.residue mod_pos_str = mod.position if mod.position is not None else '' mod_site = ('%s%s' % (mod_site_str, mod_pos_str)) site_states = states[mod.mod_type] if mod.is_modified: pattern[mod_site] = (site_states[1], WILD) else: pattern[mod_site] = (site_states[0], WILD) # Handle mutations for mc in agent.mutations: res_from = mc.residue_from if mc.residue_from else 'mut' res_to = mc.residue_to if mc.residue_to else 'X' if mc.position is None: mut_site_name = res_from else: mut_site_name = res_from + mc.position pattern[mut_site_name] = res_to # Handle location if agent.location is not None: pattern['loc'] = _n(agent.location) # Handle activity if agent.activity is not None: active_site_name = agent.activity.activity_type if agent.activity.is_active: active_site_state = 'active' else: active_site_state = 'inactive' pattern[active_site_name] = active_site_state return pattern
[ "def", "get_site_pattern", "(", "agent", ")", ":", "if", "not", "isinstance", "(", "agent", ",", "ist", ".", "Agent", ")", ":", "return", "{", "}", "pattern", "=", "{", "}", "# Handle bound conditions", "for", "bc", "in", "agent", ".", "bound_conditions", ...
Construct a dictionary of Monomer site states from an Agent. This crates the mapping to the associated PySB monomer from an INDRA Agent object.
[ "Construct", "a", "dictionary", "of", "Monomer", "site", "states", "from", "an", "Agent", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L322-L375
19,060
sorgerlab/indra
indra/assemblers/pysb/assembler.py
set_base_initial_condition
def set_base_initial_condition(model, monomer, value): """Set an initial condition for a monomer in its 'default' state.""" # Build up monomer pattern dict sites_dict = {} for site in monomer.sites: if site in monomer.site_states: if site == 'loc' and 'cytoplasm' in monomer.site_states['loc']: sites_dict['loc'] = 'cytoplasm' else: sites_dict[site] = monomer.site_states[site][0] else: sites_dict[site] = None mp = monomer(**sites_dict) pname = monomer.name + '_0' try: p = model.parameters[pname] p.value = value except KeyError: p = Parameter(pname, value) model.add_component(p) model.initial(mp, p)
python
def set_base_initial_condition(model, monomer, value): # Build up monomer pattern dict sites_dict = {} for site in monomer.sites: if site in monomer.site_states: if site == 'loc' and 'cytoplasm' in monomer.site_states['loc']: sites_dict['loc'] = 'cytoplasm' else: sites_dict[site] = monomer.site_states[site][0] else: sites_dict[site] = None mp = monomer(**sites_dict) pname = monomer.name + '_0' try: p = model.parameters[pname] p.value = value except KeyError: p = Parameter(pname, value) model.add_component(p) model.initial(mp, p)
[ "def", "set_base_initial_condition", "(", "model", ",", "monomer", ",", "value", ")", ":", "# Build up monomer pattern dict", "sites_dict", "=", "{", "}", "for", "site", "in", "monomer", ".", "sites", ":", "if", "site", "in", "monomer", ".", "site_states", ":"...
Set an initial condition for a monomer in its 'default' state.
[ "Set", "an", "initial", "condition", "for", "a", "monomer", "in", "its", "default", "state", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L378-L398
19,061
sorgerlab/indra
indra/assemblers/pysb/assembler.py
get_annotation
def get_annotation(component, db_name, db_ref): """Construct model Annotations for each component. Annotation formats follow guidelines at http://identifiers.org/. """ url = get_identifiers_url(db_name, db_ref) if not url: return None subj = component ann = Annotation(subj, url, 'is') return ann
python
def get_annotation(component, db_name, db_ref): url = get_identifiers_url(db_name, db_ref) if not url: return None subj = component ann = Annotation(subj, url, 'is') return ann
[ "def", "get_annotation", "(", "component", ",", "db_name", ",", "db_ref", ")", ":", "url", "=", "get_identifiers_url", "(", "db_name", ",", "db_ref", ")", "if", "not", "url", ":", "return", "None", "subj", "=", "component", "ann", "=", "Annotation", "(", ...
Construct model Annotations for each component. Annotation formats follow guidelines at http://identifiers.org/.
[ "Construct", "model", "Annotations", "for", "each", "component", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L432-L442
19,062
sorgerlab/indra
indra/assemblers/pysb/assembler.py
PysbAssembler.make_model
def make_model(self, policies=None, initial_conditions=True, reverse_effects=False, model_name='indra_model'): """Assemble the PySB model from the collected INDRA Statements. This method assembles a PySB model from the set of INDRA Statements. The assembled model is both returned and set as the assembler's model argument. Parameters ---------- policies : Optional[Union[str, dict]] A string or dictionary that defines one or more assembly policies. If policies is a string, it defines a global assembly policy that applies to all Statement types. Example: one_step, interactions_only A dictionary of policies has keys corresponding to Statement types and values to the policy to be applied to that type of Statement. For Statement types whose policy is undefined, the 'default' policy is applied. Example: {'Phosphorylation': 'two_step'} initial_conditions : Optional[bool] If True, default initial conditions are generated for the Monomers in the model. Default: True reverse_effects : Optional[bool] If True, reverse rules are added to the model for activity, modification and amount regulations that have no corresponding reverse effects. Default: False model_name : Optional[str] The name attribute assigned to the PySB Model object. Default: "indra_model" Returns ------- model : pysb.Model The assembled PySB model object. """ ppa = PysbPreassembler(self.statements) self.processed_policies = self.process_policies(policies) ppa.replace_activities() if reverse_effects: ppa.add_reverse_effects() self.statements = ppa.statements self.model = Model() self.model.name = model_name self.agent_set = BaseAgentSet() # Collect information about the monomers/self.agent_set from the # statements self._monomers() # Add the monomers to the model based on our BaseAgentSet for agent_name, agent in self.agent_set.items(): m = Monomer(_n(agent_name), agent.sites, agent.site_states) m.site_annotations = agent.site_annotations self.model.add_component(m) for db_name, db_ref in agent.db_refs.items(): a = get_annotation(m, db_name, db_ref) if a is not None: self.model.add_annotation(a) # Iterate over the active_forms for af in agent.active_forms: self.model.add_annotation(Annotation(m, af, 'has_active_pattern')) for iaf in agent.inactive_forms: self.model.add_annotation(Annotation(m, iaf, 'has_inactive_pattern')) for at in agent.activity_types: act_site_cond = {at: 'active'} self.model.add_annotation(Annotation(m, act_site_cond, 'has_active_pattern')) inact_site_cond = {at: 'inactive'} self.model.add_annotation(Annotation(m, inact_site_cond, 'has_inactive_pattern')) # Iterate over the statements to generate rules self._assemble() # Add initial conditions if initial_conditions: self.add_default_initial_conditions() return self.model
python
def make_model(self, policies=None, initial_conditions=True, reverse_effects=False, model_name='indra_model'): ppa = PysbPreassembler(self.statements) self.processed_policies = self.process_policies(policies) ppa.replace_activities() if reverse_effects: ppa.add_reverse_effects() self.statements = ppa.statements self.model = Model() self.model.name = model_name self.agent_set = BaseAgentSet() # Collect information about the monomers/self.agent_set from the # statements self._monomers() # Add the monomers to the model based on our BaseAgentSet for agent_name, agent in self.agent_set.items(): m = Monomer(_n(agent_name), agent.sites, agent.site_states) m.site_annotations = agent.site_annotations self.model.add_component(m) for db_name, db_ref in agent.db_refs.items(): a = get_annotation(m, db_name, db_ref) if a is not None: self.model.add_annotation(a) # Iterate over the active_forms for af in agent.active_forms: self.model.add_annotation(Annotation(m, af, 'has_active_pattern')) for iaf in agent.inactive_forms: self.model.add_annotation(Annotation(m, iaf, 'has_inactive_pattern')) for at in agent.activity_types: act_site_cond = {at: 'active'} self.model.add_annotation(Annotation(m, act_site_cond, 'has_active_pattern')) inact_site_cond = {at: 'inactive'} self.model.add_annotation(Annotation(m, inact_site_cond, 'has_inactive_pattern')) # Iterate over the statements to generate rules self._assemble() # Add initial conditions if initial_conditions: self.add_default_initial_conditions() return self.model
[ "def", "make_model", "(", "self", ",", "policies", "=", "None", ",", "initial_conditions", "=", "True", ",", "reverse_effects", "=", "False", ",", "model_name", "=", "'indra_model'", ")", ":", "ppa", "=", "PysbPreassembler", "(", "self", ".", "statements", "...
Assemble the PySB model from the collected INDRA Statements. This method assembles a PySB model from the set of INDRA Statements. The assembled model is both returned and set as the assembler's model argument. Parameters ---------- policies : Optional[Union[str, dict]] A string or dictionary that defines one or more assembly policies. If policies is a string, it defines a global assembly policy that applies to all Statement types. Example: one_step, interactions_only A dictionary of policies has keys corresponding to Statement types and values to the policy to be applied to that type of Statement. For Statement types whose policy is undefined, the 'default' policy is applied. Example: {'Phosphorylation': 'two_step'} initial_conditions : Optional[bool] If True, default initial conditions are generated for the Monomers in the model. Default: True reverse_effects : Optional[bool] If True, reverse rules are added to the model for activity, modification and amount regulations that have no corresponding reverse effects. Default: False model_name : Optional[str] The name attribute assigned to the PySB Model object. Default: "indra_model" Returns ------- model : pysb.Model The assembled PySB model object.
[ "Assemble", "the", "PySB", "model", "from", "the", "collected", "INDRA", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L546-L626
19,063
sorgerlab/indra
indra/assemblers/pysb/assembler.py
PysbAssembler.add_default_initial_conditions
def add_default_initial_conditions(self, value=None): """Set default initial conditions in the PySB model. Parameters ---------- value : Optional[float] Optionally a value can be supplied which will be the initial amount applied. Otherwise a built-in default is used. """ if value is not None: try: value_num = float(value) except ValueError: logger.error('Invalid initial condition value.') return else: value_num = self.default_initial_amount if self.model is None: return for m in self.model.monomers: set_base_initial_condition(self.model, m, value_num)
python
def add_default_initial_conditions(self, value=None): if value is not None: try: value_num = float(value) except ValueError: logger.error('Invalid initial condition value.') return else: value_num = self.default_initial_amount if self.model is None: return for m in self.model.monomers: set_base_initial_condition(self.model, m, value_num)
[ "def", "add_default_initial_conditions", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value_num", "=", "float", "(", "value", ")", "except", "ValueError", ":", "logger", ".", "error", "(", "'Inva...
Set default initial conditions in the PySB model. Parameters ---------- value : Optional[float] Optionally a value can be supplied which will be the initial amount applied. Otherwise a built-in default is used.
[ "Set", "default", "initial", "conditions", "in", "the", "PySB", "model", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L628-L648
19,064
sorgerlab/indra
indra/assemblers/pysb/assembler.py
PysbAssembler.set_expression
def set_expression(self, expression_dict): """Set protein expression amounts as initial conditions Parameters ---------- expression_dict : dict A dictionary in which the keys are gene names and the values are numbers representing the absolute amount (count per cell) of proteins expressed. Proteins that are not expressed can be represented as nan. Entries that are not in the dict or are in there but resolve to None, are set to the default initial amount. Example: {'EGFR': 12345, 'BRAF': 4567, 'ESR1': nan} """ if self.model is None: return monomers_found = [] monomers_notfound = [] # Iterate over all the monomers for m in self.model.monomers: if (m.name in expression_dict and expression_dict[m.name] is not None): # Try to get the expression amount from the dict init = expression_dict[m.name] # We interpret nan and None as not expressed if math.isnan(init): init = 0 init_round = round(init) set_base_initial_condition(self.model, m, init_round) monomers_found.append(m.name) else: set_base_initial_condition(self.model, m, self.default_initial_amount) monomers_notfound.append(m.name) logger.info('Monomers set to given context') logger.info('-----------------------------') for m in monomers_found: logger.info('%s' % m) if monomers_notfound: logger.info('') logger.info('Monomers not found in given context') logger.info('-----------------------------------') for m in monomers_notfound: logger.info('%s' % m)
python
def set_expression(self, expression_dict): if self.model is None: return monomers_found = [] monomers_notfound = [] # Iterate over all the monomers for m in self.model.monomers: if (m.name in expression_dict and expression_dict[m.name] is not None): # Try to get the expression amount from the dict init = expression_dict[m.name] # We interpret nan and None as not expressed if math.isnan(init): init = 0 init_round = round(init) set_base_initial_condition(self.model, m, init_round) monomers_found.append(m.name) else: set_base_initial_condition(self.model, m, self.default_initial_amount) monomers_notfound.append(m.name) logger.info('Monomers set to given context') logger.info('-----------------------------') for m in monomers_found: logger.info('%s' % m) if monomers_notfound: logger.info('') logger.info('Monomers not found in given context') logger.info('-----------------------------------') for m in monomers_notfound: logger.info('%s' % m)
[ "def", "set_expression", "(", "self", ",", "expression_dict", ")", ":", "if", "self", ".", "model", "is", "None", ":", "return", "monomers_found", "=", "[", "]", "monomers_notfound", "=", "[", "]", "# Iterate over all the monomers", "for", "m", "in", "self", ...
Set protein expression amounts as initial conditions Parameters ---------- expression_dict : dict A dictionary in which the keys are gene names and the values are numbers representing the absolute amount (count per cell) of proteins expressed. Proteins that are not expressed can be represented as nan. Entries that are not in the dict or are in there but resolve to None, are set to the default initial amount. Example: {'EGFR': 12345, 'BRAF': 4567, 'ESR1': nan}
[ "Set", "protein", "expression", "amounts", "as", "initial", "conditions" ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L650-L694
19,065
sorgerlab/indra
indra/assemblers/pysb/assembler.py
PysbAssembler.set_context
def set_context(self, cell_type): """Set protein expression amounts from CCLE as initial conditions. This method uses :py:mod:`indra.databases.context_client` to get protein expression levels for a given cell type and set initial conditions for Monomers in the model accordingly. Parameters ---------- cell_type : str Cell type name for which expression levels are queried. The cell type name follows the CCLE database conventions. Example: LOXIMVI_SKIN, BT20_BREAST """ if self.model is None: return monomer_names = [m.name for m in self.model.monomers] res = context_client.get_protein_expression(monomer_names, [cell_type]) amounts = res.get(cell_type) if not amounts: logger.warning('Could not get context for %s cell type.' % cell_type) self.add_default_initial_conditions() return self.set_expression(amounts)
python
def set_context(self, cell_type): if self.model is None: return monomer_names = [m.name for m in self.model.monomers] res = context_client.get_protein_expression(monomer_names, [cell_type]) amounts = res.get(cell_type) if not amounts: logger.warning('Could not get context for %s cell type.' % cell_type) self.add_default_initial_conditions() return self.set_expression(amounts)
[ "def", "set_context", "(", "self", ",", "cell_type", ")", ":", "if", "self", ".", "model", "is", "None", ":", "return", "monomer_names", "=", "[", "m", ".", "name", "for", "m", "in", "self", ".", "model", ".", "monomers", "]", "res", "=", "context_cl...
Set protein expression amounts from CCLE as initial conditions. This method uses :py:mod:`indra.databases.context_client` to get protein expression levels for a given cell type and set initial conditions for Monomers in the model accordingly. Parameters ---------- cell_type : str Cell type name for which expression levels are queried. The cell type name follows the CCLE database conventions. Example: LOXIMVI_SKIN, BT20_BREAST
[ "Set", "protein", "expression", "amounts", "from", "CCLE", "as", "initial", "conditions", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L696-L721
19,066
sorgerlab/indra
indra/assemblers/pysb/assembler.py
PysbAssembler.export_model
def export_model(self, format, file_name=None): """Save the assembled model in a modeling formalism other than PySB. For more details on exporting PySB models, see http://pysb.readthedocs.io/en/latest/modules/export/index.html Parameters ---------- format : str The format to export into, for instance "kappa", "bngl", "sbml", "matlab", "mathematica", "potterswheel". See http://pysb.readthedocs.io/en/latest/modules/export/index.html for a list of supported formats. In addition to the formats supported by PySB itself, this method also provides "sbgn" output. file_name : Optional[str] An optional file name to save the exported model into. Returns ------- exp_str : str or object The exported model string or object """ # Handle SBGN as special case if format == 'sbgn': exp_str = export_sbgn(self.model) elif format == 'kappa_im': # NOTE: this export is not a str, rather a graph object return export_kappa_im(self.model, file_name) elif format == 'kappa_cm': # NOTE: this export is not a str, rather a graph object return export_kappa_cm(self.model, file_name) else: try: exp_str = pysb.export.export(self.model, format) except KeyError: logging.error('Unknown export format: %s' % format) return None if file_name: with open(file_name, 'wb') as fh: fh.write(exp_str.encode('utf-8')) return exp_str
python
def export_model(self, format, file_name=None): # Handle SBGN as special case if format == 'sbgn': exp_str = export_sbgn(self.model) elif format == 'kappa_im': # NOTE: this export is not a str, rather a graph object return export_kappa_im(self.model, file_name) elif format == 'kappa_cm': # NOTE: this export is not a str, rather a graph object return export_kappa_cm(self.model, file_name) else: try: exp_str = pysb.export.export(self.model, format) except KeyError: logging.error('Unknown export format: %s' % format) return None if file_name: with open(file_name, 'wb') as fh: fh.write(exp_str.encode('utf-8')) return exp_str
[ "def", "export_model", "(", "self", ",", "format", ",", "file_name", "=", "None", ")", ":", "# Handle SBGN as special case", "if", "format", "==", "'sbgn'", ":", "exp_str", "=", "export_sbgn", "(", "self", ".", "model", ")", "elif", "format", "==", "'kappa_i...
Save the assembled model in a modeling formalism other than PySB. For more details on exporting PySB models, see http://pysb.readthedocs.io/en/latest/modules/export/index.html Parameters ---------- format : str The format to export into, for instance "kappa", "bngl", "sbml", "matlab", "mathematica", "potterswheel". See http://pysb.readthedocs.io/en/latest/modules/export/index.html for a list of supported formats. In addition to the formats supported by PySB itself, this method also provides "sbgn" output. file_name : Optional[str] An optional file name to save the exported model into. Returns ------- exp_str : str or object The exported model string or object
[ "Save", "the", "assembled", "model", "in", "a", "modeling", "formalism", "other", "than", "PySB", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L746-L789
19,067
sorgerlab/indra
indra/assemblers/pysb/assembler.py
PysbAssembler.save_rst
def save_rst(self, file_name='pysb_model.rst', module_name='pysb_module'): """Save the assembled model as an RST file for literate modeling. Parameters ---------- file_name : Optional[str] The name of the file to save the RST in. Default: pysb_model.rst module_name : Optional[str] The name of the python function defining the module. Default: pysb_module """ if self.model is not None: with open(file_name, 'wt') as fh: fh.write('.. _%s:\n\n' % module_name) fh.write('Module\n======\n\n') fh.write('INDRA-assembled model\n---------------------\n\n') fh.write('::\n\n') model_str = pysb.export.export(self.model, 'pysb_flat') model_str = '\t' + model_str.replace('\n', '\n\t') fh.write(model_str)
python
def save_rst(self, file_name='pysb_model.rst', module_name='pysb_module'): if self.model is not None: with open(file_name, 'wt') as fh: fh.write('.. _%s:\n\n' % module_name) fh.write('Module\n======\n\n') fh.write('INDRA-assembled model\n---------------------\n\n') fh.write('::\n\n') model_str = pysb.export.export(self.model, 'pysb_flat') model_str = '\t' + model_str.replace('\n', '\n\t') fh.write(model_str)
[ "def", "save_rst", "(", "self", ",", "file_name", "=", "'pysb_model.rst'", ",", "module_name", "=", "'pysb_module'", ")", ":", "if", "self", ".", "model", "is", "not", "None", ":", "with", "open", "(", "file_name", ",", "'wt'", ")", "as", "fh", ":", "f...
Save the assembled model as an RST file for literate modeling. Parameters ---------- file_name : Optional[str] The name of the file to save the RST in. Default: pysb_model.rst module_name : Optional[str] The name of the python function defining the module. Default: pysb_module
[ "Save", "the", "assembled", "model", "as", "an", "RST", "file", "for", "literate", "modeling", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L792-L812
19,068
sorgerlab/indra
indra/assemblers/pysb/assembler.py
PysbAssembler._monomers
def _monomers(self): """Calls the appropriate monomers method based on policies.""" for stmt in self.statements: if _is_whitelisted(stmt): self._dispatch(stmt, 'monomers', self.agent_set)
python
def _monomers(self): for stmt in self.statements: if _is_whitelisted(stmt): self._dispatch(stmt, 'monomers', self.agent_set)
[ "def", "_monomers", "(", "self", ")", ":", "for", "stmt", "in", "self", ".", "statements", ":", "if", "_is_whitelisted", "(", "stmt", ")", ":", "self", ".", "_dispatch", "(", "stmt", ",", "'monomers'", ",", "self", ".", "agent_set", ")" ]
Calls the appropriate monomers method based on policies.
[ "Calls", "the", "appropriate", "monomers", "method", "based", "on", "policies", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/assembler.py#L844-L848
19,069
sorgerlab/indra
indra/sources/trips/client.py
send_query
def send_query(text, service_endpoint='drum', query_args=None): """Send a query to the TRIPS web service. Parameters ---------- text : str The text to be processed. service_endpoint : Optional[str] Selects the TRIPS/DRUM web service endpoint to use. Is a choice between "drum" (default), "drum-dev", a nightly build, and "cwms" for use with more general knowledge extraction. query_args : Optional[dict] A dictionary of arguments to be passed with the query. Returns ------- html : str The HTML result returned by the web service. """ if service_endpoint in ['drum', 'drum-dev', 'cwms', 'cwmsreader']: url = base_url + service_endpoint else: logger.error('Invalid service endpoint: %s' % service_endpoint) return '' if query_args is None: query_args = {} query_args.update({'input': text}) res = requests.get(url, query_args, timeout=3600) if not res.status_code == 200: logger.error('Problem with TRIPS query: status code %s' % res.status_code) return '' # Gets unicode content return res.text
python
def send_query(text, service_endpoint='drum', query_args=None): if service_endpoint in ['drum', 'drum-dev', 'cwms', 'cwmsreader']: url = base_url + service_endpoint else: logger.error('Invalid service endpoint: %s' % service_endpoint) return '' if query_args is None: query_args = {} query_args.update({'input': text}) res = requests.get(url, query_args, timeout=3600) if not res.status_code == 200: logger.error('Problem with TRIPS query: status code %s' % res.status_code) return '' # Gets unicode content return res.text
[ "def", "send_query", "(", "text", ",", "service_endpoint", "=", "'drum'", ",", "query_args", "=", "None", ")", ":", "if", "service_endpoint", "in", "[", "'drum'", ",", "'drum-dev'", ",", "'cwms'", ",", "'cwmsreader'", "]", ":", "url", "=", "base_url", "+",...
Send a query to the TRIPS web service. Parameters ---------- text : str The text to be processed. service_endpoint : Optional[str] Selects the TRIPS/DRUM web service endpoint to use. Is a choice between "drum" (default), "drum-dev", a nightly build, and "cwms" for use with more general knowledge extraction. query_args : Optional[dict] A dictionary of arguments to be passed with the query. Returns ------- html : str The HTML result returned by the web service.
[ "Send", "a", "query", "to", "the", "TRIPS", "web", "service", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/client.py#L15-L48
19,070
sorgerlab/indra
indra/sources/trips/client.py
get_xml
def get_xml(html, content_tag='ekb', fail_if_empty=False): """Extract the content XML from the HTML output of the TRIPS web service. Parameters ---------- html : str The HTML output from the TRIPS web service. content_tag : str The xml tag used to label the content. Default is 'ekb'. fail_if_empty : bool If True, and if the xml content found is an empty string, raise an exception. Default is False. Returns ------- The extraction knowledge base (e.g. EKB) XML that contains the event and term extractions. """ cont = re.findall(r'<%(tag)s(.*?)>(.*?)</%(tag)s>' % {'tag': content_tag}, html, re.MULTILINE | re.DOTALL) if cont: events_terms = ''.join([l.strip() for l in cont[0][1].splitlines()]) if 'xmlns' in cont[0][0]: meta = ' '.join([l.strip() for l in cont[0][0].splitlines()]) else: meta = '' else: events_terms = '' meta = '' if fail_if_empty: assert events_terms != '',\ "Got empty string for events content from html:\n%s" % html header = ('<?xml version="1.0" encoding="utf-8" standalone="yes"?><%s%s>' % (content_tag, meta)) footer = '</%s>' % content_tag return header + events_terms.replace('\n', '') + footer
python
def get_xml(html, content_tag='ekb', fail_if_empty=False): cont = re.findall(r'<%(tag)s(.*?)>(.*?)</%(tag)s>' % {'tag': content_tag}, html, re.MULTILINE | re.DOTALL) if cont: events_terms = ''.join([l.strip() for l in cont[0][1].splitlines()]) if 'xmlns' in cont[0][0]: meta = ' '.join([l.strip() for l in cont[0][0].splitlines()]) else: meta = '' else: events_terms = '' meta = '' if fail_if_empty: assert events_terms != '',\ "Got empty string for events content from html:\n%s" % html header = ('<?xml version="1.0" encoding="utf-8" standalone="yes"?><%s%s>' % (content_tag, meta)) footer = '</%s>' % content_tag return header + events_terms.replace('\n', '') + footer
[ "def", "get_xml", "(", "html", ",", "content_tag", "=", "'ekb'", ",", "fail_if_empty", "=", "False", ")", ":", "cont", "=", "re", ".", "findall", "(", "r'<%(tag)s(.*?)>(.*?)</%(tag)s>'", "%", "{", "'tag'", ":", "content_tag", "}", ",", "html", ",", "re", ...
Extract the content XML from the HTML output of the TRIPS web service. Parameters ---------- html : str The HTML output from the TRIPS web service. content_tag : str The xml tag used to label the content. Default is 'ekb'. fail_if_empty : bool If True, and if the xml content found is an empty string, raise an exception. Default is False. Returns ------- The extraction knowledge base (e.g. EKB) XML that contains the event and term extractions.
[ "Extract", "the", "content", "XML", "from", "the", "HTML", "output", "of", "the", "TRIPS", "web", "service", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/client.py#L51-L88
19,071
sorgerlab/indra
indra/sources/trips/client.py
save_xml
def save_xml(xml_str, file_name, pretty=True): """Save the TRIPS EKB XML in a file. Parameters ---------- xml_str : str The TRIPS EKB XML string to be saved. file_name : str The name of the file to save the result in. pretty : Optional[bool] If True, the XML is pretty printed. """ try: fh = open(file_name, 'wt') except IOError: logger.error('Could not open %s for writing.' % file_name) return if pretty: xmld = xml.dom.minidom.parseString(xml_str) xml_str_pretty = xmld.toprettyxml() fh.write(xml_str_pretty) else: fh.write(xml_str) fh.close()
python
def save_xml(xml_str, file_name, pretty=True): try: fh = open(file_name, 'wt') except IOError: logger.error('Could not open %s for writing.' % file_name) return if pretty: xmld = xml.dom.minidom.parseString(xml_str) xml_str_pretty = xmld.toprettyxml() fh.write(xml_str_pretty) else: fh.write(xml_str) fh.close()
[ "def", "save_xml", "(", "xml_str", ",", "file_name", ",", "pretty", "=", "True", ")", ":", "try", ":", "fh", "=", "open", "(", "file_name", ",", "'wt'", ")", "except", "IOError", ":", "logger", ".", "error", "(", "'Could not open %s for writing.'", "%", ...
Save the TRIPS EKB XML in a file. Parameters ---------- xml_str : str The TRIPS EKB XML string to be saved. file_name : str The name of the file to save the result in. pretty : Optional[bool] If True, the XML is pretty printed.
[ "Save", "the", "TRIPS", "EKB", "XML", "in", "a", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/client.py#L91-L114
19,072
sorgerlab/indra
indra/sources/sofia/api.py
process_table
def process_table(fname): """Return processor by processing a given sheet of a spreadsheet file. Parameters ---------- fname : str The name of the Excel file (typically .xlsx extension) to process Returns ------- sp : indra.sources.sofia.processor.SofiaProcessor A SofiaProcessor object which has a list of extracted INDRA Statements as its statements attribute. """ book = openpyxl.load_workbook(fname, read_only=True) try: rel_sheet = book['Relations'] except Exception as e: rel_sheet = book['Causal'] event_sheet = book['Events'] entities_sheet = book['Entities'] sp = SofiaExcelProcessor(rel_sheet.rows, event_sheet.rows, entities_sheet.rows) return sp
python
def process_table(fname): book = openpyxl.load_workbook(fname, read_only=True) try: rel_sheet = book['Relations'] except Exception as e: rel_sheet = book['Causal'] event_sheet = book['Events'] entities_sheet = book['Entities'] sp = SofiaExcelProcessor(rel_sheet.rows, event_sheet.rows, entities_sheet.rows) return sp
[ "def", "process_table", "(", "fname", ")", ":", "book", "=", "openpyxl", ".", "load_workbook", "(", "fname", ",", "read_only", "=", "True", ")", "try", ":", "rel_sheet", "=", "book", "[", "'Relations'", "]", "except", "Exception", "as", "e", ":", "rel_sh...
Return processor by processing a given sheet of a spreadsheet file. Parameters ---------- fname : str The name of the Excel file (typically .xlsx extension) to process Returns ------- sp : indra.sources.sofia.processor.SofiaProcessor A SofiaProcessor object which has a list of extracted INDRA Statements as its statements attribute.
[ "Return", "processor", "by", "processing", "a", "given", "sheet", "of", "a", "spreadsheet", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sofia/api.py#L9-L32
19,073
sorgerlab/indra
indra/sources/sofia/api.py
process_text
def process_text(text, out_file='sofia_output.json', auth=None): """Return processor by processing text given as a string. Parameters ---------- text : str A string containing the text to be processed with Sofia. out_file : Optional[str] The path to a file to save the reader's output into. Default: sofia_output.json auth : Optional[list] A username/password pair for the Sofia web service. If not given, the SOFIA_USERNAME and SOFIA_PASSWORD values are loaded from either the INDRA config or the environment. Returns ------- sp : indra.sources.sofia.processor.SofiaProcessor A SofiaProcessor object which has a list of extracted INDRA Statements as its statements attribute. If the API did not process the text, None is returned. """ text_json = {'text': text} if not auth: user, password = _get_sofia_auth() else: user, password = auth if not user or not password: raise ValueError('Could not use SOFIA web service since' ' authentication information is missing. Please' ' set SOFIA_USERNAME and SOFIA_PASSWORD in the' ' INDRA configuration file or as environmental' ' variables.') json_response, status_code, process_status = \ _text_processing(text_json=text_json, user=user, password=password) # Check response status if process_status != 'Done' or status_code != 200: return None # Cache reading output if out_file: with open(out_file, 'w') as fh: json.dump(json_response, fh, indent=1) return process_json(json_response)
python
def process_text(text, out_file='sofia_output.json', auth=None): text_json = {'text': text} if not auth: user, password = _get_sofia_auth() else: user, password = auth if not user or not password: raise ValueError('Could not use SOFIA web service since' ' authentication information is missing. Please' ' set SOFIA_USERNAME and SOFIA_PASSWORD in the' ' INDRA configuration file or as environmental' ' variables.') json_response, status_code, process_status = \ _text_processing(text_json=text_json, user=user, password=password) # Check response status if process_status != 'Done' or status_code != 200: return None # Cache reading output if out_file: with open(out_file, 'w') as fh: json.dump(json_response, fh, indent=1) return process_json(json_response)
[ "def", "process_text", "(", "text", ",", "out_file", "=", "'sofia_output.json'", ",", "auth", "=", "None", ")", ":", "text_json", "=", "{", "'text'", ":", "text", "}", "if", "not", "auth", ":", "user", ",", "password", "=", "_get_sofia_auth", "(", ")", ...
Return processor by processing text given as a string. Parameters ---------- text : str A string containing the text to be processed with Sofia. out_file : Optional[str] The path to a file to save the reader's output into. Default: sofia_output.json auth : Optional[list] A username/password pair for the Sofia web service. If not given, the SOFIA_USERNAME and SOFIA_PASSWORD values are loaded from either the INDRA config or the environment. Returns ------- sp : indra.sources.sofia.processor.SofiaProcessor A SofiaProcessor object which has a list of extracted INDRA Statements as its statements attribute. If the API did not process the text, None is returned.
[ "Return", "processor", "by", "processing", "text", "given", "as", "a", "string", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sofia/api.py#L35-L80
19,074
sorgerlab/indra
indra/sources/ndex_cx/processor.py
_get_dict_from_list
def _get_dict_from_list(dict_key, list_of_dicts): """Retrieve a specific dict from a list of dicts. Parameters ---------- dict_key : str The (single) key of the dict to be retrieved from the list. list_of_dicts : list The list of dicts to search for the specific dict. Returns ------- dict value The value associated with the dict_key (e.g., a list of nodes or edges). """ the_dict = [cur_dict for cur_dict in list_of_dicts if cur_dict.get(dict_key)] if not the_dict: raise ValueError('Could not find a dict with key %s' % dict_key) return the_dict[0][dict_key]
python
def _get_dict_from_list(dict_key, list_of_dicts): the_dict = [cur_dict for cur_dict in list_of_dicts if cur_dict.get(dict_key)] if not the_dict: raise ValueError('Could not find a dict with key %s' % dict_key) return the_dict[0][dict_key]
[ "def", "_get_dict_from_list", "(", "dict_key", ",", "list_of_dicts", ")", ":", "the_dict", "=", "[", "cur_dict", "for", "cur_dict", "in", "list_of_dicts", "if", "cur_dict", ".", "get", "(", "dict_key", ")", "]", "if", "not", "the_dict", ":", "raise", "ValueE...
Retrieve a specific dict from a list of dicts. Parameters ---------- dict_key : str The (single) key of the dict to be retrieved from the list. list_of_dicts : list The list of dicts to search for the specific dict. Returns ------- dict value The value associated with the dict_key (e.g., a list of nodes or edges).
[ "Retrieve", "a", "specific", "dict", "from", "a", "list", "of", "dicts", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/processor.py#L22-L42
19,075
sorgerlab/indra
indra/sources/ndex_cx/processor.py
NdexCxProcessor._initialize_node_agents
def _initialize_node_agents(self): """Initialize internal dicts containing node information.""" nodes = _get_dict_from_list('nodes', self.cx) invalid_genes = [] for node in nodes: id = node['@id'] cx_db_refs = self.get_aliases(node) up_id = cx_db_refs.get('UP') if up_id: gene_name = uniprot_client.get_gene_name(up_id) hgnc_id = hgnc_client.get_hgnc_id(gene_name) db_refs = {'UP': up_id, 'HGNC': hgnc_id, 'TEXT': gene_name} agent = Agent(gene_name, db_refs=db_refs) self._node_names[id] = gene_name self._node_agents[id] = agent continue else: node_name = node['n'] self._node_names[id] = node_name hgnc_id = hgnc_client.get_hgnc_id(node_name) db_refs = {'TEXT': node_name} if not hgnc_id: if not self.require_grounding: self._node_agents[id] = \ Agent(node_name, db_refs=db_refs) invalid_genes.append(node_name) else: db_refs.update({'HGNC': hgnc_id}) up_id = hgnc_client.get_uniprot_id(hgnc_id) # It's possible that a valid HGNC ID will not have a # Uniprot ID, as in the case of HOTAIR (HOX transcript # antisense RNA, HGNC:33510) if up_id: db_refs.update({'UP': up_id}) self._node_agents[id] = Agent(node_name, db_refs=db_refs) if invalid_genes: verb = 'Skipped' if self.require_grounding else 'Included' logger.info('%s invalid gene symbols: %s' % (verb, ', '.join(invalid_genes)))
python
def _initialize_node_agents(self): nodes = _get_dict_from_list('nodes', self.cx) invalid_genes = [] for node in nodes: id = node['@id'] cx_db_refs = self.get_aliases(node) up_id = cx_db_refs.get('UP') if up_id: gene_name = uniprot_client.get_gene_name(up_id) hgnc_id = hgnc_client.get_hgnc_id(gene_name) db_refs = {'UP': up_id, 'HGNC': hgnc_id, 'TEXT': gene_name} agent = Agent(gene_name, db_refs=db_refs) self._node_names[id] = gene_name self._node_agents[id] = agent continue else: node_name = node['n'] self._node_names[id] = node_name hgnc_id = hgnc_client.get_hgnc_id(node_name) db_refs = {'TEXT': node_name} if not hgnc_id: if not self.require_grounding: self._node_agents[id] = \ Agent(node_name, db_refs=db_refs) invalid_genes.append(node_name) else: db_refs.update({'HGNC': hgnc_id}) up_id = hgnc_client.get_uniprot_id(hgnc_id) # It's possible that a valid HGNC ID will not have a # Uniprot ID, as in the case of HOTAIR (HOX transcript # antisense RNA, HGNC:33510) if up_id: db_refs.update({'UP': up_id}) self._node_agents[id] = Agent(node_name, db_refs=db_refs) if invalid_genes: verb = 'Skipped' if self.require_grounding else 'Included' logger.info('%s invalid gene symbols: %s' % (verb, ', '.join(invalid_genes)))
[ "def", "_initialize_node_agents", "(", "self", ")", ":", "nodes", "=", "_get_dict_from_list", "(", "'nodes'", ",", "self", ".", "cx", ")", "invalid_genes", "=", "[", "]", "for", "node", "in", "nodes", ":", "id", "=", "node", "[", "'@id'", "]", "cx_db_ref...
Initialize internal dicts containing node information.
[ "Initialize", "internal", "dicts", "containing", "node", "information", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/processor.py#L78-L116
19,076
sorgerlab/indra
indra/sources/ndex_cx/processor.py
NdexCxProcessor.get_pmids
def get_pmids(self): """Get list of all PMIDs associated with edges in the network.""" pmids = [] for ea in self._edge_attributes.values(): edge_pmids = ea.get('pmids') if edge_pmids: pmids += edge_pmids return list(set(pmids))
python
def get_pmids(self): pmids = [] for ea in self._edge_attributes.values(): edge_pmids = ea.get('pmids') if edge_pmids: pmids += edge_pmids return list(set(pmids))
[ "def", "get_pmids", "(", "self", ")", ":", "pmids", "=", "[", "]", "for", "ea", "in", "self", ".", "_edge_attributes", ".", "values", "(", ")", ":", "edge_pmids", "=", "ea", ".", "get", "(", "'pmids'", ")", "if", "edge_pmids", ":", "pmids", "+=", "...
Get list of all PMIDs associated with edges in the network.
[ "Get", "list", "of", "all", "PMIDs", "associated", "with", "edges", "in", "the", "network", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/processor.py#L166-L173
19,077
sorgerlab/indra
indra/sources/ndex_cx/processor.py
NdexCxProcessor.get_statements
def get_statements(self): """Convert network edges into Statements. Returns ------- list of Statements Converted INDRA Statements. """ edges = _get_dict_from_list('edges', self.cx) for edge in edges: edge_type = edge.get('i') if not edge_type: continue stmt_type = _stmt_map.get(edge_type) if stmt_type: id = edge['@id'] source_agent = self._node_agents.get(edge['s']) target_agent = self._node_agents.get(edge['t']) if not source_agent or not target_agent: logger.info("Skipping edge %s->%s: %s" % (self._node_names[edge['s']], self._node_names[edge['t']], edge)) continue ev = self._create_evidence(id) if stmt_type == Complex: stmt = stmt_type([source_agent, target_agent], evidence=ev) else: stmt = stmt_type(source_agent, target_agent, evidence=ev) self.statements.append(stmt) return self.statements
python
def get_statements(self): edges = _get_dict_from_list('edges', self.cx) for edge in edges: edge_type = edge.get('i') if not edge_type: continue stmt_type = _stmt_map.get(edge_type) if stmt_type: id = edge['@id'] source_agent = self._node_agents.get(edge['s']) target_agent = self._node_agents.get(edge['t']) if not source_agent or not target_agent: logger.info("Skipping edge %s->%s: %s" % (self._node_names[edge['s']], self._node_names[edge['t']], edge)) continue ev = self._create_evidence(id) if stmt_type == Complex: stmt = stmt_type([source_agent, target_agent], evidence=ev) else: stmt = stmt_type(source_agent, target_agent, evidence=ev) self.statements.append(stmt) return self.statements
[ "def", "get_statements", "(", "self", ")", ":", "edges", "=", "_get_dict_from_list", "(", "'edges'", ",", "self", ".", "cx", ")", "for", "edge", "in", "edges", ":", "edge_type", "=", "edge", ".", "get", "(", "'i'", ")", "if", "not", "edge_type", ":", ...
Convert network edges into Statements. Returns ------- list of Statements Converted INDRA Statements.
[ "Convert", "network", "edges", "into", "Statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/processor.py#L175-L204
19,078
sorgerlab/indra
indra/sources/tees/processor.py
TEESProcessor.node_has_edge_with_label
def node_has_edge_with_label(self, node_name, edge_label): """Looks for an edge from node_name to some other node with the specified label. Returns the node to which this edge points if it exists, or None if it doesn't. Parameters ---------- G : The graph object node_name : Node that the edge starts at edge_label : The text in the relation property of the edge """ G = self.G for edge in G.edges(node_name): to = edge[1] relation_name = G.edges[node_name, to]['relation'] if relation_name == edge_label: return to return None
python
def node_has_edge_with_label(self, node_name, edge_label): G = self.G for edge in G.edges(node_name): to = edge[1] relation_name = G.edges[node_name, to]['relation'] if relation_name == edge_label: return to return None
[ "def", "node_has_edge_with_label", "(", "self", ",", "node_name", ",", "edge_label", ")", ":", "G", "=", "self", ".", "G", "for", "edge", "in", "G", ".", "edges", "(", "node_name", ")", ":", "to", "=", "edge", "[", "1", "]", "relation_name", "=", "G"...
Looks for an edge from node_name to some other node with the specified label. Returns the node to which this edge points if it exists, or None if it doesn't. Parameters ---------- G : The graph object node_name : Node that the edge starts at edge_label : The text in the relation property of the edge
[ "Looks", "for", "an", "edge", "from", "node_name", "to", "some", "other", "node", "with", "the", "specified", "label", ".", "Returns", "the", "node", "to", "which", "this", "edge", "points", "if", "it", "exists", "or", "None", "if", "it", "doesn", "t", ...
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L83-L104
19,079
sorgerlab/indra
indra/sources/tees/processor.py
TEESProcessor.general_node_label
def general_node_label(self, node): """Used for debugging - gives a short text description of a graph node.""" G = self.G if G.node[node]['is_event']: return 'event type=' + G.node[node]['type'] else: return 'entity text=' + G.node[node]['text']
python
def general_node_label(self, node): G = self.G if G.node[node]['is_event']: return 'event type=' + G.node[node]['type'] else: return 'entity text=' + G.node[node]['text']
[ "def", "general_node_label", "(", "self", ",", "node", ")", ":", "G", "=", "self", ".", "G", "if", "G", ".", "node", "[", "node", "]", "[", "'is_event'", "]", ":", "return", "'event type='", "+", "G", ".", "node", "[", "node", "]", "[", "'type'", ...
Used for debugging - gives a short text description of a graph node.
[ "Used", "for", "debugging", "-", "gives", "a", "short", "text", "description", "of", "a", "graph", "node", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L106-L113
19,080
sorgerlab/indra
indra/sources/tees/processor.py
TEESProcessor.print_parent_and_children_info
def print_parent_and_children_info(self, node): """Used for debugging - prints a short description of a a node, its children, its parents, and its parents' children.""" G = self.G parents = G.predecessors(node) children = G.successors(node) print(general_node_label(G, node)) tabs = '\t' for parent in parents: relation = G.edges[parent, node]['relation'] print(tabs + 'Parent (%s): %s' % (relation, general_node_label(G, parent))) for cop in G.successors(parent): if cop != node: relation = G.edges[parent, cop]['relation'] print(tabs + 'Child of parent (%s): %s' % (relation, general_node_label(G, cop))) for child in children: relation = G.edges[node, child]['relation'] print(tabs + 'Child (%s): (%s)' % (relation, general_node_label(G, child)))
python
def print_parent_and_children_info(self, node): G = self.G parents = G.predecessors(node) children = G.successors(node) print(general_node_label(G, node)) tabs = '\t' for parent in parents: relation = G.edges[parent, node]['relation'] print(tabs + 'Parent (%s): %s' % (relation, general_node_label(G, parent))) for cop in G.successors(parent): if cop != node: relation = G.edges[parent, cop]['relation'] print(tabs + 'Child of parent (%s): %s' % (relation, general_node_label(G, cop))) for child in children: relation = G.edges[node, child]['relation'] print(tabs + 'Child (%s): (%s)' % (relation, general_node_label(G, child)))
[ "def", "print_parent_and_children_info", "(", "self", ",", "node", ")", ":", "G", "=", "self", ".", "G", "parents", "=", "G", ".", "predecessors", "(", "node", ")", "children", "=", "G", ".", "successors", "(", "node", ")", "print", "(", "general_node_la...
Used for debugging - prints a short description of a a node, its children, its parents, and its parents' children.
[ "Used", "for", "debugging", "-", "prints", "a", "short", "description", "of", "a", "a", "node", "its", "children", "its", "parents", "and", "its", "parents", "children", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L115-L136
19,081
sorgerlab/indra
indra/sources/tees/processor.py
TEESProcessor.find_event_with_outgoing_edges
def find_event_with_outgoing_edges(self, event_name, desired_relations): """Gets a list of event nodes with the specified event_name and outgoing edges annotated with each of the specified relations. Parameters ---------- event_name : str Look for event nodes with this name desired_relations : list[str] Look for event nodes with outgoing edges annotated with each of these relations Returns ------- event_nodes : list[str] Event nodes that fit the desired criteria """ G = self.G desired_relations = set(desired_relations) desired_event_nodes = [] for node in G.node.keys(): if G.node[node]['is_event'] and G.node[node]['type'] == event_name: has_relations = [G.edges[node, edge[1]]['relation'] for edge in G.edges(node)] has_relations = set(has_relations) # Did the outgoing edges from this node have all of the # desired relations? if desired_relations.issubset(has_relations): desired_event_nodes.append(node) return desired_event_nodes
python
def find_event_with_outgoing_edges(self, event_name, desired_relations): G = self.G desired_relations = set(desired_relations) desired_event_nodes = [] for node in G.node.keys(): if G.node[node]['is_event'] and G.node[node]['type'] == event_name: has_relations = [G.edges[node, edge[1]]['relation'] for edge in G.edges(node)] has_relations = set(has_relations) # Did the outgoing edges from this node have all of the # desired relations? if desired_relations.issubset(has_relations): desired_event_nodes.append(node) return desired_event_nodes
[ "def", "find_event_with_outgoing_edges", "(", "self", ",", "event_name", ",", "desired_relations", ")", ":", "G", "=", "self", ".", "G", "desired_relations", "=", "set", "(", "desired_relations", ")", "desired_event_nodes", "=", "[", "]", "for", "node", "in", ...
Gets a list of event nodes with the specified event_name and outgoing edges annotated with each of the specified relations. Parameters ---------- event_name : str Look for event nodes with this name desired_relations : list[str] Look for event nodes with outgoing edges annotated with each of these relations Returns ------- event_nodes : list[str] Event nodes that fit the desired criteria
[ "Gets", "a", "list", "of", "event", "nodes", "with", "the", "specified", "event_name", "and", "outgoing", "edges", "annotated", "with", "each", "of", "the", "specified", "relations", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L154-L186
19,082
sorgerlab/indra
indra/sources/tees/processor.py
TEESProcessor.get_related_node
def get_related_node(self, node, relation): """Looks for an edge from node to some other node, such that the edge is annotated with the given relation. If there exists such an edge, returns the name of the node it points to. Otherwise, returns None.""" G = self.G for edge in G.edges(node): to = edge[1] to_relation = G.edges[node, to]['relation'] if to_relation == relation: return to return None
python
def get_related_node(self, node, relation): G = self.G for edge in G.edges(node): to = edge[1] to_relation = G.edges[node, to]['relation'] if to_relation == relation: return to return None
[ "def", "get_related_node", "(", "self", ",", "node", ",", "relation", ")", ":", "G", "=", "self", ".", "G", "for", "edge", "in", "G", ".", "edges", "(", "node", ")", ":", "to", "=", "edge", "[", "1", "]", "to_relation", "=", "G", ".", "edges", ...
Looks for an edge from node to some other node, such that the edge is annotated with the given relation. If there exists such an edge, returns the name of the node it points to. Otherwise, returns None.
[ "Looks", "for", "an", "edge", "from", "node", "to", "some", "other", "node", "such", "that", "the", "edge", "is", "annotated", "with", "the", "given", "relation", ".", "If", "there", "exists", "such", "an", "edge", "returns", "the", "name", "of", "the", ...
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L188-L199
19,083
sorgerlab/indra
indra/sources/tees/processor.py
TEESProcessor.get_entity_text_for_relation
def get_entity_text_for_relation(self, node, relation): """Looks for an edge from node to some other node, such that the edge is annotated with the given relation. If there exists such an edge, and the node at the other edge is an entity, return that entity's text. Otherwise, returns None.""" G = self.G related_node = self.get_related_node(node, relation) if related_node is not None: if not G.node[related_node]['is_event']: return G.node[related_node]['text'] else: return None else: return None
python
def get_entity_text_for_relation(self, node, relation): G = self.G related_node = self.get_related_node(node, relation) if related_node is not None: if not G.node[related_node]['is_event']: return G.node[related_node]['text'] else: return None else: return None
[ "def", "get_entity_text_for_relation", "(", "self", ",", "node", ",", "relation", ")", ":", "G", "=", "self", ".", "G", "related_node", "=", "self", ".", "get_related_node", "(", "node", ",", "relation", ")", "if", "related_node", "is", "not", "None", ":",...
Looks for an edge from node to some other node, such that the edge is annotated with the given relation. If there exists such an edge, and the node at the other edge is an entity, return that entity's text. Otherwise, returns None.
[ "Looks", "for", "an", "edge", "from", "node", "to", "some", "other", "node", "such", "that", "the", "edge", "is", "annotated", "with", "the", "given", "relation", ".", "If", "there", "exists", "such", "an", "edge", "and", "the", "node", "at", "the", "o...
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L201-L215
19,084
sorgerlab/indra
indra/sources/tees/processor.py
TEESProcessor.process_increase_expression_amount
def process_increase_expression_amount(self): """Looks for Positive_Regulation events with a specified Cause and a Gene_Expression theme, and processes them into INDRA statements. """ statements = [] pwcs = self.find_event_parent_with_event_child( 'Positive_regulation', 'Gene_expression') for pair in pwcs: pos_reg = pair[0] expression = pair[1] cause = self.get_entity_text_for_relation(pos_reg, 'Cause') target = self.get_entity_text_for_relation(expression, 'Theme') if cause is not None and target is not None: theme_node = self.get_related_node(expression, 'Theme') assert(theme_node is not None) evidence = self.node_to_evidence(theme_node, is_direct=False) statements.append(IncreaseAmount(s2a(cause), s2a(target), evidence=evidence)) return statements
python
def process_increase_expression_amount(self): statements = [] pwcs = self.find_event_parent_with_event_child( 'Positive_regulation', 'Gene_expression') for pair in pwcs: pos_reg = pair[0] expression = pair[1] cause = self.get_entity_text_for_relation(pos_reg, 'Cause') target = self.get_entity_text_for_relation(expression, 'Theme') if cause is not None and target is not None: theme_node = self.get_related_node(expression, 'Theme') assert(theme_node is not None) evidence = self.node_to_evidence(theme_node, is_direct=False) statements.append(IncreaseAmount(s2a(cause), s2a(target), evidence=evidence)) return statements
[ "def", "process_increase_expression_amount", "(", "self", ")", ":", "statements", "=", "[", "]", "pwcs", "=", "self", ".", "find_event_parent_with_event_child", "(", "'Positive_regulation'", ",", "'Gene_expression'", ")", "for", "pair", "in", "pwcs", ":", "pos_reg",...
Looks for Positive_Regulation events with a specified Cause and a Gene_Expression theme, and processes them into INDRA statements.
[ "Looks", "for", "Positive_Regulation", "events", "with", "a", "specified", "Cause", "and", "a", "Gene_Expression", "theme", "and", "processes", "them", "into", "INDRA", "statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L217-L239
19,085
sorgerlab/indra
indra/sources/tees/processor.py
TEESProcessor.process_phosphorylation_statements
def process_phosphorylation_statements(self): """Looks for Phosphorylation events in the graph and extracts them into INDRA statements. In particular, looks for a Positive_regulation event node with a child Phosphorylation event node. If Positive_regulation has an outgoing Cause edge, that's the subject If Phosphorylation has an outgoing Theme edge, that's the object If Phosphorylation has an outgoing Site edge, that's the site """ G = self.G statements = [] pwcs = self.find_event_parent_with_event_child('Positive_regulation', 'Phosphorylation') for pair in pwcs: (pos_reg, phos) = pair cause = self.get_entity_text_for_relation(pos_reg, 'Cause') theme = self.get_entity_text_for_relation(phos, 'Theme') print('Cause:', cause, 'Theme:', theme) # If the trigger word is dephosphorylate or similar, then we # extract a dephosphorylation statement trigger_word = self.get_entity_text_for_relation(phos, 'Phosphorylation') if 'dephos' in trigger_word: deph = True else: deph = False site = self.get_entity_text_for_relation(phos, 'Site') theme_node = self.get_related_node(phos, 'Theme') assert(theme_node is not None) evidence = self.node_to_evidence(theme_node, is_direct=False) if theme is not None: if deph: statements.append(Dephosphorylation(s2a(cause), s2a(theme), site, evidence=evidence)) else: statements.append(Phosphorylation(s2a(cause), s2a(theme), site, evidence=evidence)) return statements
python
def process_phosphorylation_statements(self): G = self.G statements = [] pwcs = self.find_event_parent_with_event_child('Positive_regulation', 'Phosphorylation') for pair in pwcs: (pos_reg, phos) = pair cause = self.get_entity_text_for_relation(pos_reg, 'Cause') theme = self.get_entity_text_for_relation(phos, 'Theme') print('Cause:', cause, 'Theme:', theme) # If the trigger word is dephosphorylate or similar, then we # extract a dephosphorylation statement trigger_word = self.get_entity_text_for_relation(phos, 'Phosphorylation') if 'dephos' in trigger_word: deph = True else: deph = False site = self.get_entity_text_for_relation(phos, 'Site') theme_node = self.get_related_node(phos, 'Theme') assert(theme_node is not None) evidence = self.node_to_evidence(theme_node, is_direct=False) if theme is not None: if deph: statements.append(Dephosphorylation(s2a(cause), s2a(theme), site, evidence=evidence)) else: statements.append(Phosphorylation(s2a(cause), s2a(theme), site, evidence=evidence)) return statements
[ "def", "process_phosphorylation_statements", "(", "self", ")", ":", "G", "=", "self", ".", "G", "statements", "=", "[", "]", "pwcs", "=", "self", ".", "find_event_parent_with_event_child", "(", "'Positive_regulation'", ",", "'Phosphorylation'", ")", "for", "pair",...
Looks for Phosphorylation events in the graph and extracts them into INDRA statements. In particular, looks for a Positive_regulation event node with a child Phosphorylation event node. If Positive_regulation has an outgoing Cause edge, that's the subject If Phosphorylation has an outgoing Theme edge, that's the object If Phosphorylation has an outgoing Site edge, that's the site
[ "Looks", "for", "Phosphorylation", "events", "in", "the", "graph", "and", "extracts", "them", "into", "INDRA", "statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L265-L309
19,086
sorgerlab/indra
indra/sources/tees/processor.py
TEESProcessor.process_binding_statements
def process_binding_statements(self): """Looks for Binding events in the graph and extracts them into INDRA statements. In particular, looks for a Binding event node with outgoing edges with relations Theme and Theme2 - the entities these edges point to are the two constituents of the Complex INDRA statement. """ G = self.G statements = [] binding_nodes = self.find_event_with_outgoing_edges('Binding', ['Theme', 'Theme2']) for node in binding_nodes: theme1 = self.get_entity_text_for_relation(node, 'Theme') theme1_node = self.get_related_node(node, 'Theme') theme2 = self.get_entity_text_for_relation(node, 'Theme2') assert(theme1 is not None) assert(theme2 is not None) evidence = self.node_to_evidence(theme1_node, is_direct=True) statements.append(Complex([s2a(theme1), s2a(theme2)], evidence=evidence)) return statements
python
def process_binding_statements(self): G = self.G statements = [] binding_nodes = self.find_event_with_outgoing_edges('Binding', ['Theme', 'Theme2']) for node in binding_nodes: theme1 = self.get_entity_text_for_relation(node, 'Theme') theme1_node = self.get_related_node(node, 'Theme') theme2 = self.get_entity_text_for_relation(node, 'Theme2') assert(theme1 is not None) assert(theme2 is not None) evidence = self.node_to_evidence(theme1_node, is_direct=True) statements.append(Complex([s2a(theme1), s2a(theme2)], evidence=evidence)) return statements
[ "def", "process_binding_statements", "(", "self", ")", ":", "G", "=", "self", ".", "G", "statements", "=", "[", "]", "binding_nodes", "=", "self", ".", "find_event_with_outgoing_edges", "(", "'Binding'", ",", "[", "'Theme'", ",", "'Theme2'", "]", ")", "for",...
Looks for Binding events in the graph and extracts them into INDRA statements. In particular, looks for a Binding event node with outgoing edges with relations Theme and Theme2 - the entities these edges point to are the two constituents of the Complex INDRA statement.
[ "Looks", "for", "Binding", "events", "in", "the", "graph", "and", "extracts", "them", "into", "INDRA", "statements", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L311-L338
19,087
sorgerlab/indra
indra/sources/tees/processor.py
TEESProcessor.node_to_evidence
def node_to_evidence(self, entity_node, is_direct): """Computes an evidence object for a statement. We assume that the entire event happens within a single statement, and get the text of the sentence by getting the text of the sentence containing the provided node that corresponds to one of the entities participanting in the event. The Evidence's pmid is whatever was provided to the constructor (perhaps None), and the annotations are the subgraph containing the provided node, its ancestors, and its descendants. """ # We assume that the entire event is within a single sentence, and # get this sentence by getting the sentence containing one of the # entities sentence_text = self.G.node[entity_node]['sentence_text'] # Make annotations object containing the fully connected subgraph # containing these nodes subgraph = self.connected_subgraph(entity_node) edge_properties = {} for edge in subgraph.edges(): edge_properties[edge] = subgraph.edges[edge] annotations = {'node_properties': subgraph.node, 'edge_properties': edge_properties} # Make evidence object epistemics = dict() evidence = Evidence(source_api='tees', pmid=self.pmid, text=sentence_text, epistemics={'direct': is_direct}, annotations=annotations) return evidence
python
def node_to_evidence(self, entity_node, is_direct): # We assume that the entire event is within a single sentence, and # get this sentence by getting the sentence containing one of the # entities sentence_text = self.G.node[entity_node]['sentence_text'] # Make annotations object containing the fully connected subgraph # containing these nodes subgraph = self.connected_subgraph(entity_node) edge_properties = {} for edge in subgraph.edges(): edge_properties[edge] = subgraph.edges[edge] annotations = {'node_properties': subgraph.node, 'edge_properties': edge_properties} # Make evidence object epistemics = dict() evidence = Evidence(source_api='tees', pmid=self.pmid, text=sentence_text, epistemics={'direct': is_direct}, annotations=annotations) return evidence
[ "def", "node_to_evidence", "(", "self", ",", "entity_node", ",", "is_direct", ")", ":", "# We assume that the entire event is within a single sentence, and", "# get this sentence by getting the sentence containing one of the", "# entities", "sentence_text", "=", "self", ".", "G", ...
Computes an evidence object for a statement. We assume that the entire event happens within a single statement, and get the text of the sentence by getting the text of the sentence containing the provided node that corresponds to one of the entities participanting in the event. The Evidence's pmid is whatever was provided to the constructor (perhaps None), and the annotations are the subgraph containing the provided node, its ancestors, and its descendants.
[ "Computes", "an", "evidence", "object", "for", "a", "statement", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L340-L375
19,088
sorgerlab/indra
indra/sources/tees/processor.py
TEESProcessor.connected_subgraph
def connected_subgraph(self, node): """Returns the subgraph containing the given node, its ancestors, and its descendants. Parameters ---------- node : str We want to create the subgraph containing this node. Returns ------- subgraph : networkx.DiGraph The subgraph containing the specified node. """ G = self.G subgraph_nodes = set() subgraph_nodes.add(node) subgraph_nodes.update(dag.ancestors(G, node)) subgraph_nodes.update(dag.descendants(G, node)) # Keep adding the ancesotrs and descendants on nodes of the graph # until we can't do so any longer graph_changed = True while graph_changed: initial_count = len(subgraph_nodes) old_nodes = set(subgraph_nodes) for n in old_nodes: subgraph_nodes.update(dag.ancestors(G, n)) subgraph_nodes.update(dag.descendants(G, n)) current_count = len(subgraph_nodes) graph_changed = current_count > initial_count return G.subgraph(subgraph_nodes)
python
def connected_subgraph(self, node): G = self.G subgraph_nodes = set() subgraph_nodes.add(node) subgraph_nodes.update(dag.ancestors(G, node)) subgraph_nodes.update(dag.descendants(G, node)) # Keep adding the ancesotrs and descendants on nodes of the graph # until we can't do so any longer graph_changed = True while graph_changed: initial_count = len(subgraph_nodes) old_nodes = set(subgraph_nodes) for n in old_nodes: subgraph_nodes.update(dag.ancestors(G, n)) subgraph_nodes.update(dag.descendants(G, n)) current_count = len(subgraph_nodes) graph_changed = current_count > initial_count return G.subgraph(subgraph_nodes)
[ "def", "connected_subgraph", "(", "self", ",", "node", ")", ":", "G", "=", "self", ".", "G", "subgraph_nodes", "=", "set", "(", ")", "subgraph_nodes", ".", "add", "(", "node", ")", "subgraph_nodes", ".", "update", "(", "dag", ".", "ancestors", "(", "G"...
Returns the subgraph containing the given node, its ancestors, and its descendants. Parameters ---------- node : str We want to create the subgraph containing this node. Returns ------- subgraph : networkx.DiGraph The subgraph containing the specified node.
[ "Returns", "the", "subgraph", "containing", "the", "given", "node", "its", "ancestors", "and", "its", "descendants", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/tees/processor.py#L377-L412
19,089
sorgerlab/indra
indra/sources/trips/api.py
process_text
def process_text(text, save_xml_name='trips_output.xml', save_xml_pretty=True, offline=False, service_endpoint='drum'): """Return a TripsProcessor by processing text. Parameters ---------- text : str The text to be processed. save_xml_name : Optional[str] The name of the file to save the returned TRIPS extraction knowledge base XML. Default: trips_output.xml save_xml_pretty : Optional[bool] If True, the saved XML is pretty-printed. Some third-party tools require non-pretty-printed XMLs which can be obtained by setting this to False. Default: True offline : Optional[bool] If True, offline reading is used with a local instance of DRUM, if available. Default: False service_endpoint : Optional[str] Selects the TRIPS/DRUM web service endpoint to use. Is a choice between "drum" (default) and "drum-dev", a nightly build. Returns ------- tp : TripsProcessor A TripsProcessor containing the extracted INDRA Statements in tp.statements. """ if not offline: html = client.send_query(text, service_endpoint) xml = client.get_xml(html) else: if offline_reading: try: dr = DrumReader() if dr is None: raise Exception('DrumReader could not be instantiated.') except BaseException as e: logger.error(e) logger.error('Make sure drum/bin/trips-drum is running in' ' a separate process') return None try: dr.read_text(text) dr.start() except SystemExit: pass xml = dr.extractions[0] else: logger.error('Offline reading with TRIPS/DRUM not available.') logger.error('Error message was: %s' % offline_err) msg = """ To install DRUM locally, follow instructions at https://github.com/wdebeaum/drum. Next, install the pykqml package either from pip or from https://github.com/bgyori/pykqml. Once installed, run drum/bin/trips-drum in a separate process. """ logger.error(msg) return None if save_xml_name: client.save_xml(xml, save_xml_name, save_xml_pretty) return process_xml(xml)
python
def process_text(text, save_xml_name='trips_output.xml', save_xml_pretty=True, offline=False, service_endpoint='drum'): if not offline: html = client.send_query(text, service_endpoint) xml = client.get_xml(html) else: if offline_reading: try: dr = DrumReader() if dr is None: raise Exception('DrumReader could not be instantiated.') except BaseException as e: logger.error(e) logger.error('Make sure drum/bin/trips-drum is running in' ' a separate process') return None try: dr.read_text(text) dr.start() except SystemExit: pass xml = dr.extractions[0] else: logger.error('Offline reading with TRIPS/DRUM not available.') logger.error('Error message was: %s' % offline_err) msg = """ To install DRUM locally, follow instructions at https://github.com/wdebeaum/drum. Next, install the pykqml package either from pip or from https://github.com/bgyori/pykqml. Once installed, run drum/bin/trips-drum in a separate process. """ logger.error(msg) return None if save_xml_name: client.save_xml(xml, save_xml_name, save_xml_pretty) return process_xml(xml)
[ "def", "process_text", "(", "text", ",", "save_xml_name", "=", "'trips_output.xml'", ",", "save_xml_pretty", "=", "True", ",", "offline", "=", "False", ",", "service_endpoint", "=", "'drum'", ")", ":", "if", "not", "offline", ":", "html", "=", "client", ".",...
Return a TripsProcessor by processing text. Parameters ---------- text : str The text to be processed. save_xml_name : Optional[str] The name of the file to save the returned TRIPS extraction knowledge base XML. Default: trips_output.xml save_xml_pretty : Optional[bool] If True, the saved XML is pretty-printed. Some third-party tools require non-pretty-printed XMLs which can be obtained by setting this to False. Default: True offline : Optional[bool] If True, offline reading is used with a local instance of DRUM, if available. Default: False service_endpoint : Optional[str] Selects the TRIPS/DRUM web service endpoint to use. Is a choice between "drum" (default) and "drum-dev", a nightly build. Returns ------- tp : TripsProcessor A TripsProcessor containing the extracted INDRA Statements in tp.statements.
[ "Return", "a", "TripsProcessor", "by", "processing", "text", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/api.py#L18-L80
19,090
sorgerlab/indra
indra/sources/trips/api.py
process_xml_file
def process_xml_file(file_name): """Return a TripsProcessor by processing a TRIPS EKB XML file. Parameters ---------- file_name : str Path to a TRIPS extraction knowledge base (EKB) file to be processed. Returns ------- tp : TripsProcessor A TripsProcessor containing the extracted INDRA Statements in tp.statements. """ with open(file_name, 'rb') as fh: ekb = fh.read().decode('utf-8') return process_xml(ekb)
python
def process_xml_file(file_name): with open(file_name, 'rb') as fh: ekb = fh.read().decode('utf-8') return process_xml(ekb)
[ "def", "process_xml_file", "(", "file_name", ")", ":", "with", "open", "(", "file_name", ",", "'rb'", ")", "as", "fh", ":", "ekb", "=", "fh", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "return", "process_xml", "(", "ekb", ")" ]
Return a TripsProcessor by processing a TRIPS EKB XML file. Parameters ---------- file_name : str Path to a TRIPS extraction knowledge base (EKB) file to be processed. Returns ------- tp : TripsProcessor A TripsProcessor containing the extracted INDRA Statements in tp.statements.
[ "Return", "a", "TripsProcessor", "by", "processing", "a", "TRIPS", "EKB", "XML", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/api.py#L83-L99
19,091
sorgerlab/indra
indra/sources/trips/api.py
process_xml
def process_xml(xml_string): """Return a TripsProcessor by processing a TRIPS EKB XML string. Parameters ---------- xml_string : str A TRIPS extraction knowledge base (EKB) string to be processed. http://trips.ihmc.us/parser/api.html Returns ------- tp : TripsProcessor A TripsProcessor containing the extracted INDRA Statements in tp.statements. """ tp = TripsProcessor(xml_string) if tp.tree is None: return None tp.get_modifications_indirect() tp.get_activations_causal() tp.get_activations_stimulate() tp.get_complexes() tp.get_modifications() tp.get_active_forms() tp.get_active_forms_state() tp.get_activations() tp.get_translocation() tp.get_regulate_amounts() tp.get_degradations() tp.get_syntheses() tp.get_conversions() tp.get_simple_increase_decrease() return tp
python
def process_xml(xml_string): tp = TripsProcessor(xml_string) if tp.tree is None: return None tp.get_modifications_indirect() tp.get_activations_causal() tp.get_activations_stimulate() tp.get_complexes() tp.get_modifications() tp.get_active_forms() tp.get_active_forms_state() tp.get_activations() tp.get_translocation() tp.get_regulate_amounts() tp.get_degradations() tp.get_syntheses() tp.get_conversions() tp.get_simple_increase_decrease() return tp
[ "def", "process_xml", "(", "xml_string", ")", ":", "tp", "=", "TripsProcessor", "(", "xml_string", ")", "if", "tp", ".", "tree", "is", "None", ":", "return", "None", "tp", ".", "get_modifications_indirect", "(", ")", "tp", ".", "get_activations_causal", "(",...
Return a TripsProcessor by processing a TRIPS EKB XML string. Parameters ---------- xml_string : str A TRIPS extraction knowledge base (EKB) string to be processed. http://trips.ihmc.us/parser/api.html Returns ------- tp : TripsProcessor A TripsProcessor containing the extracted INDRA Statements in tp.statements.
[ "Return", "a", "TripsProcessor", "by", "processing", "a", "TRIPS", "EKB", "XML", "string", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/api.py#L102-L134
19,092
sorgerlab/indra
indra/belief/wm_scorer.py
load_eidos_curation_table
def load_eidos_curation_table(): """Return a pandas table of Eidos curation data.""" url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + \ 'src/main/resources/org/clulab/wm/eidos/english/confidence/' + \ 'rule_summary.tsv' # Load the table of scores from the URL above into a data frame res = StringIO(requests.get(url).text) table = pandas.read_table(res, sep='\t') # Drop the last "Grant total" row table = table.drop(table.index[len(table)-1]) return table
python
def load_eidos_curation_table(): url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + \ 'src/main/resources/org/clulab/wm/eidos/english/confidence/' + \ 'rule_summary.tsv' # Load the table of scores from the URL above into a data frame res = StringIO(requests.get(url).text) table = pandas.read_table(res, sep='\t') # Drop the last "Grant total" row table = table.drop(table.index[len(table)-1]) return table
[ "def", "load_eidos_curation_table", "(", ")", ":", "url", "=", "'https://raw.githubusercontent.com/clulab/eidos/master/'", "+", "'src/main/resources/org/clulab/wm/eidos/english/confidence/'", "+", "'rule_summary.tsv'", "# Load the table of scores from the URL above into a data frame", "res...
Return a pandas table of Eidos curation data.
[ "Return", "a", "pandas", "table", "of", "Eidos", "curation", "data", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/wm_scorer.py#L11-L21
19,093
sorgerlab/indra
indra/belief/wm_scorer.py
get_eidos_bayesian_scorer
def get_eidos_bayesian_scorer(prior_counts=None): """Return a BayesianScorer based on Eidos curation counts.""" table = load_eidos_curation_table() subtype_counts = {'eidos': {r: [c, i] for r, c, i in zip(table['RULE'], table['Num correct'], table['Num incorrect'])}} prior_counts = prior_counts if prior_counts else copy.deepcopy( default_priors) scorer = BayesianScorer(prior_counts=prior_counts, subtype_counts=subtype_counts) return scorer
python
def get_eidos_bayesian_scorer(prior_counts=None): table = load_eidos_curation_table() subtype_counts = {'eidos': {r: [c, i] for r, c, i in zip(table['RULE'], table['Num correct'], table['Num incorrect'])}} prior_counts = prior_counts if prior_counts else copy.deepcopy( default_priors) scorer = BayesianScorer(prior_counts=prior_counts, subtype_counts=subtype_counts) return scorer
[ "def", "get_eidos_bayesian_scorer", "(", "prior_counts", "=", "None", ")", ":", "table", "=", "load_eidos_curation_table", "(", ")", "subtype_counts", "=", "{", "'eidos'", ":", "{", "r", ":", "[", "c", ",", "i", "]", "for", "r", ",", "c", ",", "i", "in...
Return a BayesianScorer based on Eidos curation counts.
[ "Return", "a", "BayesianScorer", "based", "on", "Eidos", "curation", "counts", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/wm_scorer.py#L24-L35
19,094
sorgerlab/indra
indra/belief/wm_scorer.py
get_eidos_scorer
def get_eidos_scorer(): """Return a SimpleScorer based on Eidos curated precision estimates.""" table = load_eidos_curation_table() # Get the overall precision total_num = table['COUNT of RULE'].sum() weighted_sum = table['COUNT of RULE'].dot(table['% correct']) precision = weighted_sum / total_num # We have to divide this into a random and systematic component, for now # in an ad-hoc manner syst_error = 0.05 rand_error = 1 - precision - syst_error prior_probs = {'rand': {'eidos': rand_error}, 'syst': {'eidos': syst_error}} # Get a dict of rule-specific errors. subtype_probs = {'eidos': {k: 1.0-min(v, 0.95)-syst_error for k, v in zip(table['RULE'], table['% correct'])}} scorer = SimpleScorer(prior_probs, subtype_probs) return scorer
python
def get_eidos_scorer(): table = load_eidos_curation_table() # Get the overall precision total_num = table['COUNT of RULE'].sum() weighted_sum = table['COUNT of RULE'].dot(table['% correct']) precision = weighted_sum / total_num # We have to divide this into a random and systematic component, for now # in an ad-hoc manner syst_error = 0.05 rand_error = 1 - precision - syst_error prior_probs = {'rand': {'eidos': rand_error}, 'syst': {'eidos': syst_error}} # Get a dict of rule-specific errors. subtype_probs = {'eidos': {k: 1.0-min(v, 0.95)-syst_error for k, v in zip(table['RULE'], table['% correct'])}} scorer = SimpleScorer(prior_probs, subtype_probs) return scorer
[ "def", "get_eidos_scorer", "(", ")", ":", "table", "=", "load_eidos_curation_table", "(", ")", "# Get the overall precision", "total_num", "=", "table", "[", "'COUNT of RULE'", "]", ".", "sum", "(", ")", "weighted_sum", "=", "table", "[", "'COUNT of RULE'", "]", ...
Return a SimpleScorer based on Eidos curated precision estimates.
[ "Return", "a", "SimpleScorer", "based", "on", "Eidos", "curated", "precision", "estimates", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/wm_scorer.py#L38-L57
19,095
sorgerlab/indra
indra/sources/trrust/api.py
process_from_web
def process_from_web(): """Return a TrrustProcessor based on the online interaction table. Returns ------- TrrustProcessor A TrrustProcessor object that has a list of INDRA Statements in its statements attribute. """ logger.info('Downloading table from %s' % trrust_human_url) res = requests.get(trrust_human_url) res.raise_for_status() df = pandas.read_table(io.StringIO(res.text)) tp = TrrustProcessor(df) tp.extract_statements() return tp
python
def process_from_web(): logger.info('Downloading table from %s' % trrust_human_url) res = requests.get(trrust_human_url) res.raise_for_status() df = pandas.read_table(io.StringIO(res.text)) tp = TrrustProcessor(df) tp.extract_statements() return tp
[ "def", "process_from_web", "(", ")", ":", "logger", ".", "info", "(", "'Downloading table from %s'", "%", "trrust_human_url", ")", "res", "=", "requests", ".", "get", "(", "trrust_human_url", ")", "res", ".", "raise_for_status", "(", ")", "df", "=", "pandas", ...
Return a TrrustProcessor based on the online interaction table. Returns ------- TrrustProcessor A TrrustProcessor object that has a list of INDRA Statements in its statements attribute.
[ "Return", "a", "TrrustProcessor", "based", "on", "the", "online", "interaction", "table", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trrust/api.py#L18-L33
19,096
sorgerlab/indra
indra/sources/rlimsp/api.py
process_from_webservice
def process_from_webservice(id_val, id_type='pmcid', source='pmc', with_grounding=True): """Return an output from RLIMS-p for the given PubMed ID or PMC ID. Parameters ---------- id_val : str A PMCID, with the prefix PMC, or pmid, with no prefix, of the paper to be "read". id_type : str Either 'pmid' or 'pmcid'. The default is 'pmcid'. source : str Either 'pmc' or 'medline', whether you want pmc fulltext or medline abstracts. with_grounding : bool The RLIMS-P web service provides two endpoints, one pre-grounded, the other not so much. The grounded endpoint returns far less content, and may perform some grounding that can be handled by the grounding mapper. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute. """ if with_grounding: fmt = '%s.normed/%s/%s' else: fmt = '%s/%s/%s' resp = requests.get(RLIMSP_URL + fmt % (source, id_type, id_val)) if resp.status_code != 200: raise RLIMSP_Error("Bad status code: %d - %s" % (resp.status_code, resp.reason)) rp = RlimspProcessor(resp.json()) rp.extract_statements() return rp
python
def process_from_webservice(id_val, id_type='pmcid', source='pmc', with_grounding=True): if with_grounding: fmt = '%s.normed/%s/%s' else: fmt = '%s/%s/%s' resp = requests.get(RLIMSP_URL + fmt % (source, id_type, id_val)) if resp.status_code != 200: raise RLIMSP_Error("Bad status code: %d - %s" % (resp.status_code, resp.reason)) rp = RlimspProcessor(resp.json()) rp.extract_statements() return rp
[ "def", "process_from_webservice", "(", "id_val", ",", "id_type", "=", "'pmcid'", ",", "source", "=", "'pmc'", ",", "with_grounding", "=", "True", ")", ":", "if", "with_grounding", ":", "fmt", "=", "'%s.normed/%s/%s'", "else", ":", "fmt", "=", "'%s/%s/%s'", "...
Return an output from RLIMS-p for the given PubMed ID or PMC ID. Parameters ---------- id_val : str A PMCID, with the prefix PMC, or pmid, with no prefix, of the paper to be "read". id_type : str Either 'pmid' or 'pmcid'. The default is 'pmcid'. source : str Either 'pmc' or 'medline', whether you want pmc fulltext or medline abstracts. with_grounding : bool The RLIMS-P web service provides two endpoints, one pre-grounded, the other not so much. The grounded endpoint returns far less content, and may perform some grounding that can be handled by the grounding mapper. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute.
[ "Return", "an", "output", "from", "RLIMS", "-", "p", "for", "the", "given", "PubMed", "ID", "or", "PMC", "ID", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/rlimsp/api.py#L21-L59
19,097
sorgerlab/indra
indra/sources/rlimsp/api.py
process_from_json_file
def process_from_json_file(filename, doc_id_type=None): """Process RLIMSP extractions from a bulk-download JSON file. Parameters ---------- filename : str Path to the JSON file. doc_id_type : Optional[str] In some cases the RLIMS-P paragraph info doesn't contain 'pmid' or 'pmcid' explicitly, instead if contains a 'docId' key. This parameter allows defining what ID type 'docId' sould be interpreted as. Its values should be 'pmid' or 'pmcid' or None if not used. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute. """ with open(filename, 'rt') as f: lines = f.readlines() json_list = [] for line in lines: json_list.append(json.loads(line)) rp = RlimspProcessor(json_list, doc_id_type=doc_id_type) rp.extract_statements() return rp
python
def process_from_json_file(filename, doc_id_type=None): with open(filename, 'rt') as f: lines = f.readlines() json_list = [] for line in lines: json_list.append(json.loads(line)) rp = RlimspProcessor(json_list, doc_id_type=doc_id_type) rp.extract_statements() return rp
[ "def", "process_from_json_file", "(", "filename", ",", "doc_id_type", "=", "None", ")", ":", "with", "open", "(", "filename", ",", "'rt'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "json_list", "=", "[", "]", "for", "line", ...
Process RLIMSP extractions from a bulk-download JSON file. Parameters ---------- filename : str Path to the JSON file. doc_id_type : Optional[str] In some cases the RLIMS-P paragraph info doesn't contain 'pmid' or 'pmcid' explicitly, instead if contains a 'docId' key. This parameter allows defining what ID type 'docId' sould be interpreted as. Its values should be 'pmid' or 'pmcid' or None if not used. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute.
[ "Process", "RLIMSP", "extractions", "from", "a", "bulk", "-", "download", "JSON", "file", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/rlimsp/api.py#L62-L88
19,098
sorgerlab/indra
indra/util/nested_dict.py
NestedDict.get
def get(self, key): "Find the first value within the tree which has the key." if key in self.keys(): return self[key] else: res = None for v in self.values(): # This could get weird if the actual expected returned value # is None, especially in teh case of overlap. Any ambiguity # would be resolved by get_path(s). if hasattr(v, 'get'): res = v.get(key) if res is not None: break return res
python
def get(self, key): "Find the first value within the tree which has the key." if key in self.keys(): return self[key] else: res = None for v in self.values(): # This could get weird if the actual expected returned value # is None, especially in teh case of overlap. Any ambiguity # would be resolved by get_path(s). if hasattr(v, 'get'): res = v.get(key) if res is not None: break return res
[ "def", "get", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "keys", "(", ")", ":", "return", "self", "[", "key", "]", "else", ":", "res", "=", "None", "for", "v", "in", "self", ".", "values", "(", ")", ":", "# This could ge...
Find the first value within the tree which has the key.
[ "Find", "the", "first", "value", "within", "the", "tree", "which", "has", "the", "key", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/nested_dict.py#L58-L72
19,099
sorgerlab/indra
indra/util/nested_dict.py
NestedDict.get_path
def get_path(self, key): "Like `get`, but also return the path taken to the value." if key in self.keys(): return (key,), self[key] else: key_path, res = (None, None) for sub_key, v in self.items(): if isinstance(v, self.__class__): key_path, res = v.get_path(key) elif hasattr(v, 'get'): res = v.get(key) key_path = (key,) if res is not None else None if res is not None and key_path is not None: key_path = (sub_key,) + key_path break return key_path, res
python
def get_path(self, key): "Like `get`, but also return the path taken to the value." if key in self.keys(): return (key,), self[key] else: key_path, res = (None, None) for sub_key, v in self.items(): if isinstance(v, self.__class__): key_path, res = v.get_path(key) elif hasattr(v, 'get'): res = v.get(key) key_path = (key,) if res is not None else None if res is not None and key_path is not None: key_path = (sub_key,) + key_path break return key_path, res
[ "def", "get_path", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "keys", "(", ")", ":", "return", "(", "key", ",", ")", ",", "self", "[", "key", "]", "else", ":", "key_path", ",", "res", "=", "(", "None", ",", "None", ")"...
Like `get`, but also return the path taken to the value.
[ "Like", "get", "but", "also", "return", "the", "path", "taken", "to", "the", "value", "." ]
79a70415832c5702d7a820c7c9ccc8e25010124b
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/nested_dict.py#L74-L89