text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_scigraph_nodes(id_list)-> Iterator[Dict]: """ Queries scigraph neighbors to get a list of nodes back We use the scigraph neighbors function because ids can be sent in batch which is faster than iteratively querying solr search or the scigraph graph/id function :return: json decoded result from scigraph_ontology._neighbors_graph :raises ValueError: If id is not in scigraph """ |
scigraph = OntologyFactory().create('scigraph:data')
chunks = [id_list[i:i + 400] for i in range(0, len(list(id_list)), 400)]
for chunk in chunks:
params = {
'id': chunk,
'depth': 0
}
try:
result_graph = scigraph._neighbors_graph(**params)
for node in result_graph['nodes']:
yield node
except JSONDecodeError as exception:
# Assume json decode is due to an incorrect class ID
# Should we handle this?
raise ValueError(exception.doc) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_taxon(id: str) -> Optional[Node]: """ get taxon for id Currently via hardcoding, should replace when scigraph when taxa are more universally annotated (having these as node properties would also be more performant) :param id: curie formatted id :return: Node where id is the NCBITaxon curie and label is the scientific name """ |
taxon = None
namespace = id.split(":")[0]
if namespace in namespace_to_taxon():
taxon = namespace_to_taxon()[namespace]
return taxon |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def typed_node_from_id(id: str) -> TypedNode: """ Get typed node from id :param id: id as curie :return: TypedNode object """ |
filter_out_types = [
'cliqueLeader',
'Class',
'Node',
'Individual',
'quality',
'sequence feature'
]
node = next(get_scigraph_nodes([id]))
if 'lbl' in node:
label = node['lbl']
else:
label = None # Empty string or None?
types = [typ.lower() for typ in node['meta']['types']
if typ not in filter_out_types]
return TypedNode(
id=node['id'],
label=label,
type=types[0],
taxon = get_taxon(id)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_report_json(self):
""" Generate a summary in json format """ |
return self.reporter.json(self.n_lines, self.n_assocs, self.skipped) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_markdown(self):
""" Generate a summary in markdown format """ |
json = self.to_report_json()
# summary = json['summary']
s = "# Group: {group} - Dataset: {dataset}\n".format(group=json["group"], dataset=json["dataset"])
s += "\n## SUMMARY\n\n"
s += "This report generated on {}\n\n".format(datetime.date.today())
s += " * Associations: {}\n" . format(json["associations"])
s += " * Lines in file (incl headers): {}\n" . format(json["lines"])
s += " * Lines skipped: {}\n" . format(json["skipped_lines"])
# Header from GAF
s += "## Header From Original Association File\n\n"
s += "\n".join(["> {} ".format(head) for head in self.header])
## Table of Contents
s += "\n\n## Contents\n\n"
for rule, messages in sorted(json["messages"].items(), key=lambda t: t[0]):
any_suppress_tag_in_rule_metadata = any([tag in self.config.rule_metadata.get(rule, {}).get("tags", []) for tag in self.config.suppress_rule_reporting_tags])
# For each tag we say to suppress output for, check if it matches any tag in the rule. If any matches
if self.config.rule_metadata and any_suppress_tag_in_rule_metadata:
print("Skipping {rule_num} because the tag(s) '{tag}' are suppressed".format(rule_num=rule, tag=", ".join(self.config.suppress_rule_reporting_tags)))
continue
s += "[{rule}](#{rule})\n\n".format(rule=rule)
s += "\n## MESSAGES\n\n"
for (rule, messages) in sorted(json["messages"].items(), key=lambda t: t[0]):
any_suppress_tag_in_rule_metadata = any([tag in self.config.rule_metadata.get(rule, {}).get("tags", []) for tag in self.config.suppress_rule_reporting_tags])
# Skip if the rule metadata has "silent" as a tag
if self.config.rule_metadata and any_suppress_tag_in_rule_metadata:
# If there is a rule metadata, and the rule ID is in the config,
# get the list of tags if present and check for existence of "silent".
# If contained, continue to the next rule.
continue
s += "### {rule}\n\n".format(rule=rule)
if rule != "other" and self.config.rule_metadata:
s += "{title}\n\n".format(title=self.config.rule_metadata.get(rule, {}).get("title", ""))
s += "* total: {amount}\n".format(amount=len(messages))
if len(messages) > 0:
s += "#### Messages\n"
for message in messages:
obj = " ({})".format(message["obj"]) if message["obj"] else ""
s += "* {level} - {type}: {message}{obj} -- `{line}`\n".format(level=message["level"], type=message["type"], message=message["message"], line=message["line"], obj=obj)
# for g in json['groups']:
# s += " * {}: {}\n".format(g['level'], g['count'])
# s += "\n\n"
# for g in json['groups']:
# level = g['level']
# msgs = g['messages']
# if len(msgs) > 0:
# s += "### {}\n\n".format(level)
# for m in msgs:
# s += " * {}: obj:'{}' \"{}\" `{}`\n".format(m['type'],m['obj'],m['message'],m['line'])
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(self, file, skipheader=False, outfile=None):
"""Parse a line-oriented association file into a list of association dict objects Note the returned list is of dict objects. TODO: These will later be specified using marshmallow and it should be possible to generate objects Arguments --------- file : file or string The file is parsed into association objects. Can be a http URL, filename or `file-like-object`, for input assoc file outfile : file Optional output file in which processed lines are written. This a file or `file-like-object` Return ------ list Associations generated from the file """ |
associations = self.association_generator(file, skipheader=skipheader, outfile=outfile)
a = list(associations)
return a |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def association_generator(self, file, skipheader=False, outfile=None) -> Dict: """ Returns a generator that yields successive associations from file Yields ------ association """ |
file = self._ensure_file(file)
for line in file:
parsed_result = self.parse_line(line)
self.report.report_parsed_result(parsed_result, outfile, self.config.filtered_evidence_file, self.config.filter_out_evidence)
for association in parsed_result.associations:
# yield association if we don't care if it's a header or if it's definitely a real gaf line
if not skipheader or "header" not in association:
yield association
logging.info(self.report.short_summary())
file.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def map_to_subset(self, file, outfile=None, ontology=None, subset=None, class_map=None, relations=None):
""" Map a file to a subset, writing out results You can pass either a subset name (e.g. goslim_generic) or a dictionary with ready-made mappings Arguments --------- file: file Name or file object for input assoc file outfile: file Name or file object for output (mapped) assoc file; writes to stdout if not set subset: str Optional name of subset to map to, e.g. goslim_generic class_map: dict Mapping between asserted class ids and ids to map to. Many to many ontology: `Ontology` Ontology to extract subset from """ |
if subset is not None:
logging.info("Creating mapping for subset: {}".format(subset))
class_map = ontology.create_slim_mapping(subset=subset, relations=relations)
if class_map is None:
raise ValueError("Neither class_map not subset is set")
col = self.ANNOTATION_CLASS_COLUMN
file = self._ensure_file(file)
tuples = []
for line in file:
if line.startswith("!"):
continue
vals = line.split("\t")
logging.info("LINE: {} VALS: {}".format(line, vals))
if len(vals) < col:
raise ValueError("Line: {} has too few cols, expect class id in col {}".format(line, col))
cid = vals[col]
if cid not in class_map or len(class_map[cid]) == 0:
self.report.error(line, Report.UNMAPPED_ID, cid)
continue
else:
for mcid in class_map[cid]:
vals[col] = mcid
line = "\t".join(vals)
if outfile is not None:
outfile.write(line)
else:
print(line) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_config():
""" Return configuration for current session. When called for the first time, this will create a config object, using whatever is the default load path to find the config yaml """ |
if session.config is None:
path = session.default_config_path
if os.path.isfile(path):
logging.info("LOADING FROM: {}".format(path))
session.config = load_config(path)
else:
session.config = Config()
logging.info("using default session: {}, path does not exist: {}".format(session, path))
else:
logging.info("Using pre-loaded object: {}".format(session.config))
return session.config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_config(path):
""" Set configuration for current session. """ |
logging.info("LOADING FROM: {}".format(path))
session.config = load_config(path)
return session.config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_solr_search_url(self, use_amigo=False):
""" Return solr URL to be used for lexical entity searches A solr search URL is used to search entities/concepts based on a limited set of parameters. Arguments --------- use_amigo : bool If true, get the URL for the GO/AmiGO instance of GOlr. This is typically used for category='function' queries """ |
url = self.endpoint_url(self.solr_search)
if use_amigo:
url = self.endpoint_url(self.amigo_solr_search)
return url |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_source_gafs(group_metadata, target_dir, exclusions=[], base_download_url=None):
""" This looks at a group metadata dictionary and downloads each GAF source that is not in the exclusions list. For each downloaded file, keep track of the path of the file. If the file is zipped, it will unzip it here. This function returns a list of tuples of the dataset dictionary mapped to the downloaded source path. """ |
gaf_urls = [ (data, data["source"]) for data in group_metadata["datasets"] if data["type"] == "gaf" and data["dataset"] not in exclusions ]
# List of dataset metadata to gaf download url
click.echo("Found {}".format(", ".join( [ kv[0]["dataset"] for kv in gaf_urls ] )))
downloaded_paths = []
for dataset_metadata, gaf_url in gaf_urls:
dataset = dataset_metadata["dataset"]
# Local target download path setup - path and then directories
path = download_a_dataset_source(group_metadata["id"], dataset_metadata, target_dir, gaf_url, base_download_url=base_download_url)
if dataset_metadata["compression"] == "gzip":
# Unzip any downloaded file that has gzip, strip of the gzip extension
unzipped = os.path.splitext(path)[0]
unzip(path, unzipped)
path = unzipped
else:
# otherwise file is coming in uncompressed. But we want to make sure
# to zip up the original source also
zipup(path)
downloaded_paths.append((dataset_metadata, path))
return downloaded_paths |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_annotation_sufficiency( self, profile: List[str], negated_classes: List[str], categories: Optional[List] = None, negation_weight: Optional[float] = .25, category_weight: Optional[float] = .5) -> AnnotationSufficiency: """ Given a list of individuals, return the simple, scaled, and categorical scores """ |
if categories is None:
categories = [enum.value for enum in HpoUpperLevel]
ic_map = self.ic_store.get_profile_ic(profile + negated_classes)
# Simple score is the weighted average of the present and
# explicitly stated negative/absent phenotypes
#
# Note that we're deviating from the publication
# to match the reference java implementation where
# mean_max_ic is replaced with max_max_ic:
# https://github.com/owlcollab/owltools/blob/452b4a/
# OWLTools-Sim/src/main/java/owltools/sim2/AbstractOwlSim.java#L1038
simple_score = self._get_simple_score(
profile, negated_classes, self.ic_store.statistics.mean_mean_ic,
self.ic_store.statistics.max_max_ic, self.ic_store.statistics.mean_sum_ic,
negation_weight, ic_map
)
categorical_score = self._get_categorical_score(
profile, negated_classes, categories,
negation_weight, ic_map
)
scaled_score = self._get_scaled_score(
simple_score, categorical_score, category_weight)
return AnnotationSufficiency(
simple_score=simple_score,
scaled_score=scaled_score,
categorical_score=categorical_score
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_scaled_score( simple_score: float, categorical_score: float, category_weight: Optional[float] = .5) -> float: """ Scaled score is the weighted average of the simple score and categorical score """ |
return np.average(
[simple_score, categorical_score], weights=[1, category_weight]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_categorical_score( self, profile: List, negated_classes: List, categories: List, negation_weight: Optional[float] = 1, ic_map: Optional[Dict[str, float]] = None) -> float: """ The average of the simple scores across a list of categories """ |
if ic_map is None:
ic_map = self.ic_store.get_profile_ic(profile + negated_classes)
scores = []
for cat in categories:
if cat not in self.ic_store.category_statistics:
raise ValueError("statistics for {} not indexed".format(cat))
pos_profile = [cls for cls in profile
if cls in self.ic_store.category_statistics[cat].descendants]
neg_profile = [cls for cls in negated_classes
if cls in self.ic_store.category_statistics[cat].descendants]
# Note that we're deviating from the publication
# to match the reference java implementation where
# mean_max_ic is replaced by max_max_ic
scores.append(self._get_simple_score(
pos_profile, neg_profile,
self.ic_store.category_statistics[cat].mean_mean_ic,
self.ic_store.category_statistics[cat].max_max_ic,
self.ic_store.category_statistics[cat].mean_sum_ic,
negation_weight, ic_map
))
return mean(scores) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_entity(self, entity):
""" Write a single entity to a line in the output file """ |
db, db_object_id = self._split_prefix(entity)
taxon = normalize_taxon(entity["taxon"]["id"])
vals = [
db,
db_object_id,
entity.get('label'),
entity.get('full_name'),
entity.get('synonyms'),
entity.get('type'),
taxon,
entity.get('parents'),
entity.get('xrefs'),
entity.get('properties')
]
self._write_row(vals) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search(self, id_list: Iterable, negated_classes: Iterable, limit: Optional[int], method: Optional) -> List[SimResult]: """ Given an input list of classes, searches for similar lists of classes and provides a ranked list of matches """ |
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filtered_search(self, id_list: Iterable, negated_classes: Iterable, limit: Optional[int], taxon_filter: Optional, category_filter: Optional, method: Optional) -> SimResult: """ Given an input iterable of classes or individuals, provides a ranking of similar profiles """ |
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def index_ontology(self, ont):
""" Adds an ontology to the index This iterates through all labels and synonyms in the ontology, creating an index """ |
self.merged_ontology.merge([ont])
syns = ont.all_synonyms(include_label=True)
include_id = self._is_meaningful_ids()
logging.info("Include IDs as synonyms: {}".format(include_id))
if include_id:
for n in ont.nodes():
v = n
# Get fragment
if v.startswith('http'):
v = re.sub('.*/','',v)
v = re.sub('.*#','',v)
syns.append(Synonym(n, val=v, pred='label'))
logging.info("Indexing {} syns in {}".format(len(syns),ont))
logging.info("Distinct lexical values: {}".format(len(self.lmap.keys())))
for syn in syns:
self.index_synonym(syn, ont)
for nid in ont.nodes():
self.id_to_ontology_map[nid].append(ont) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def index_synonym(self, syn, ont):
""" Index a synonym Typically not called from outside this object; called by `index_ontology` """ |
if not syn.val:
if syn.pred == 'label':
if not self._is_meaningful_ids():
if not ont.is_obsolete(syn.class_id):
pass
#logging.error('Use meaningful ids if label not present: {}'.format(syn))
else:
logging.warning("Incomplete syn: {}".format(syn))
return
if self.exclude_obsolete and ont.is_obsolete(syn.class_id):
return
syn.ontology = ont
prefix,_ = ont.prefix_fragment(syn.class_id)
v = syn.val
caps_match = re.match('[A-Z]+',v)
if caps_match:
# if > 75% of length is caps, assume abbreviation
if caps_match.span()[1] >= len(v)/3:
syn.is_abbreviation(True)
# chebi 'synonyms' are often not real synonyms
# https://github.com/ebi-chebi/ChEBI/issues/3294
if not re.match('.*[a-zA-Z]',v):
if prefix != 'CHEBI':
logging.warning('Ignoring suspicous synonym: {}'.format(syn))
return
v = self._standardize_label(v)
# TODO: do this once ahead of time
wsmap = {}
for w,s in self.wsmap.items():
wsmap[w] = s
for ss in self._get_config_val(prefix,'synsets',[]):
# TODO: weights
wsmap[ss['synonym']] = ss['word']
nv = self._normalize_label(v, wsmap)
self._index_synonym_val(syn, v)
nweight = self._get_config_val(prefix, 'normalized_form_confidence', 0.8)
if nweight > 0 and not syn.is_abbreviation():
if nv != v:
nsyn = Synonym(syn.class_id,
val=syn.val,
pred=syn.pred,
lextype=syn.lextype,
ontology=ont,
confidence=syn.confidence * nweight)
self._index_synonym_val(nsyn, nv) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _normalize_label(self, s, wsmap):
""" normalized form of a synonym """ |
toks = []
for tok in list(set(self.npattern.sub(' ', s).split(' '))):
if tok in wsmap:
tok=wsmap[tok]
if tok != "":
toks.append(tok)
toks.sort()
return " ".join(toks) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sim(self, xg, ancs1, ancs2, pfx1, pfx2):
""" Compare two lineages """ |
xancs1 = set()
for a in ancs1:
if a in xg:
# TODO: restrict this to neighbors in single ontology
for n in xg.neighbors(a):
pfx = self._id_to_ontology(n)
if pfx == pfx2:
xancs1.add(n)
logging.debug('SIM={}/{} ## {}'.format(len(xancs1.intersection(ancs2)), len(xancs1), xancs1.intersection(ancs2), xancs1))
n_shared = len(xancs1.intersection(ancs2))
n_total = len(xancs1)
return (1+n_shared) / (1+n_total), n_shared, n_total |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compare_to_xrefs(self, xg1, xg2):
""" Compares a base xref graph with another one """ |
ont = self.merged_ontology
for (i,j,d) in xg1.edges(data=True):
ont_left = self._id_to_ontology(i)
ont_right = self._id_to_ontology(j)
unique_lr = True
num_xrefs_left = 0
same_left = False
if i in xg2:
for j2 in xg2.neighbors(i):
ont_right2 = self._id_to_ontology(j2)
if ont_right2 == ont_right:
unique_lr = False
num_xrefs_left += 1
if j2 == j:
same_left = True
unique_rl = True
num_xrefs_right = 0
same_right = False
if j in xg2:
for i2 in xg2.neighbors(j):
ont_left2 = self._id_to_ontology(i2)
if ont_left2 == ont_left:
unique_rl = False
num_xrefs_right += 1
if i2 == i:
same_right = True
(x,y) = d['idpair']
xg1[x][y]['left_novel'] = num_xrefs_left==0
xg1[x][y]['right_novel'] = num_xrefs_right==0
xg1[x][y]['left_consistent'] = same_left
xg1[x][y]['right_consistent'] = same_right |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def assign_best_matches(self, xg):
""" For each node in the xref graph, tag best match edges """ |
logging.info("assigning best matches for {} nodes".format(len(xg.nodes())))
for i in xg.nodes():
xrefmap = self._neighborscores_by_ontology(xg, i)
for (ontid,score_node_pairs) in xrefmap.items():
score_node_pairs.sort(reverse=True)
(best_score,best_node) = score_node_pairs[0]
logging.info("BEST for {}: {} in {} from {}".format(i, best_node, ontid, score_node_pairs))
edge = xg[i][best_node]
dirn = self._dirn(edge, i, best_node)
best_kwd = 'best_' + dirn
if len(score_node_pairs) == 1 or score_node_pairs[0] > score_node_pairs[1]:
edge[best_kwd] = 2
else:
edge[best_kwd] = 1
for (score,j) in score_node_pairs:
edge_ij = xg[i][j]
dirn_ij = self._dirn(edge_ij, i, j)
edge_ij['cpr_'+dirn_ij] = score / sum([s for s,_ in score_node_pairs])
for (i,j,edge) in xg.edges(data=True):
# reciprocal score is set if (A) i is best for j, and (B) j is best for i
rs = 0
if 'best_fwd' in edge and 'best_rev' in edge:
rs = edge['best_fwd'] * edge['best_rev']
edge['reciprocal_score'] = rs
edge['cpr'] = edge['cpr_fwd'] * edge['cpr_rev'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _best_match_syn(self, sx, sys, scope_map):
""" The best match is determined by the highest magnitude weight """ |
SUBSTRING_WEIGHT = 0.2
WBEST = None
sbest = None
sxv = self._standardize_label(sx.val)
sxp = self._id_to_ontology(sx.class_id)
for sy in sys:
syv = self._standardize_label(sy.val)
syp = self._id_to_ontology(sy.class_id)
W = None
if sxv == syv:
confidence = sx.confidence * sy.confidence
if sx.is_abbreviation() or sy.is_abbreviation:
confidence *= self._get_config_val(sxp, 'abbreviation_confidence', 0.5)
confidence *= self._get_config_val(syp, 'abbreviation_confidence', 0.5)
W = scope_map[sx.scope()][sy.scope()] + logit(confidence/2)
elif sxv in syv:
W = np.array((-SUBSTRING_WEIGHT, SUBSTRING_WEIGHT, 0, 0))
elif syv in sxv:
W = np.array((SUBSTRING_WEIGHT, -SUBSTRING_WEIGHT, 0, 0))
if W is not None:
# The best match is determined by the highest magnitude weight
if WBEST is None or max(abs(W)) > max(abs(WBEST)):
WBEST = W
sbest = sy
return WBEST, sbest |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def grouped_mappings(self,id):
""" return all mappings for a node, grouped by ID prefix """ |
g = self.get_xref_graph()
m = {}
for n in g.neighbors(id):
[prefix, local] = n.split(':')
if prefix not in m:
m[prefix] = []
m[prefix].append(n)
return m |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cliques(self, xg):
""" Return all equivalence set cliques, assuming each edge in the xref graph is treated as equivalent, and all edges in ontology are subClassOf Arguments --------- xg : Graph an xref graph Returns ------- list of sets """ |
g = nx.DiGraph()
for (x,y) in self.merged_ontology.get_graph().edges():
g.add_edge(x,y)
for (x,y) in xg.edges():
g.add_edge(x,y)
g.add_edge(y,x)
return list(strongly_connected_components(g)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_triples(self, ontol):
""" Adds triples to an ontology object. Currently assumes gocam/lego-style """ |
rg = self.rdfgraph
g = ontol.get_graph()
typemap = {}
inds = rg.subjects(RDF.type, OWL.NamedIndividual)
for s in inds:
for (s,p,o) in rg.triples((s,None,None)):
s_id = id(s)
p_id = id(p)
g.add_node(s_id)
if isinstance(o,URIRef):
o_id = id(o)
if p == RDF.type:
if o != OWL.NamedIndividual:
if s_id not in typemap:
typemap[s_id] = []
typemap[s_id].append(o_id)
else:
g.add_edge(o_id,s_id,pred=p_id)
# propagate label from type
for s in typemap.keys():
g.nodes[s]['types'] = typemap[s]
if self.tbox_ontology is not None:
if 'label' not in g.nodes[s]:
g.nodes[s]['label'] = ";".join([self.tbox_ontology.label(x) for x in typemap[s] if self.tbox_ontology.label(x) is not None]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, ontol, **args):
""" Write a `ontology` object """ |
s = self.render(ontol, **args)
if self.outfile is None:
print(s)
else:
f = open(self.outfile, 'w')
f.write(s)
f.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_subgraph(self, ontol, nodes, **args):
""" Render a `ontology` object after inducing a subgraph """ |
subont = ontol.subontology(nodes, **args)
return self.render(subont, **args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_subgraph(self, ontol, nodes, **args):
""" Write a `ontology` object after inducing a subgraph """ |
subont = ontol.subontology(nodes, **args)
self.write(subont, **args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_relation(self, r, **args):
""" Render an object property """ |
if r is None:
return "."
m = self.config.relsymbolmap
if r in m:
return m[r]
return r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_noderef(self, ontol, n, query_ids=None, **args):
""" Render a node object """ |
if query_ids is None:
query_ids = []
marker = ""
if n in query_ids:
marker = " * "
label = ontol.label(n)
s = None
if label is not None:
s = '{} ! {}{}'.format(n,
label,
marker)
else:
s = str(n)
if self.config.show_text_definition:
td = ontol.text_definition(n)
if td:
s += ' "{}"'.format(td.val)
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(fmt):
""" Creates a GraphRenderer """ |
w = None
if fmt == 'tree':
w = AsciiTreeGraphRenderer()
elif fmt == 'dot':
w = DotGraphRenderer(image_format='dot')
elif fmt == 'png':
w = DotGraphRenderer(image_format='png')
elif fmt == 'ndot':
w = NativeDotGraphRenderer()
elif fmt == 'obo':
w = OboFormatGraphRenderer()
elif fmt == 'obog':
w = OboJsonGraphRenderer()
else:
w = SimpleListGraphRenderer()
return w |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_user_agent(name="ontobio", version=ontobio_version, modules=None, caller_name=None):
""" Create a User-Agent string """ |
user_agent_array = ["{}/{}".format(name, version)]
if modules:
module_info_array = []
for m in modules:
mod_name = m.__name__
mod_version = None
if hasattr(m, 'get_version'):
mod_version = m.get_version()
else:
mod_version = m.__version__
module_info_array.append("{}/{}".format(mod_name, mod_version))
if caller_name:
module_info_array.append(caller_name)
user_agent_array.append("({})".format('; '.join(module_info_array)))
else:
if caller_name:
user_agent_array.append("({})".format(caller_name))
return ' '.join(user_agent_array) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search(self, id_list: List, negated_classes: List, limit: Optional[int], method: Optional) -> List[SimResult]: """ Given an input list of classes or individuals, provides a ranking of similar profiles """ |
raise NotImplementedError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_association(self, association: Association) -> Entity: """ 'id' is already `join`ed in both the Association and the Entity, so we don't have to worry about what that looks like. We assume it's correct. """ |
if "header" not in association or association["header"] == False:
# print(json.dumps(association, indent=4))
gpi_obj = {
'id': association["subject"]["id"],
'label': association["subject"]["label"], # db_object_symbol,
'full_name': association["subject"]["fullname"], # db_object_name,
'synonyms': association["subject"]["synonyms"],
'type': association["subject"]["type"], #db_object_type,
'parents': "", # GAF does not have this field, but it's optional in GPI
'xrefs': "", # GAF does not have this field, but it's optional in GPI
'taxon': {
'id': association["subject"]["taxon"]["id"]
}
}
return Entity(gpi_obj)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_filtered_graph(self, relations=None, prefix=None):
""" Returns a networkx graph for the whole ontology, for a subset of relations Only implemented for eager methods. Implementation notes: currently this is not cached Arguments --------- - relations : list list of object property IDs, e.g. subClassOf, BFO:0000050. If empty, uses all. - prefix : String if specified, create a subgraph using only classes with this prefix, e.g. ENVO, PATO, GO Return ------ nx.MultiDiGraph A networkx MultiDiGraph object representing the filtered ontology """ |
# trigger synonym cache
self.all_synonyms()
self.all_obsoletes()
# default method - wrap get_graph
srcg = self.get_graph()
if prefix is not None:
srcg = srcg.subgraph([n for n in srcg.nodes() if n.startswith(prefix+":")])
if relations is None:
logger.info("No filtering on "+str(self))
return srcg
logger.info("Filtering {} for {}".format(self, relations))
g = nx.MultiDiGraph()
# TODO: copy full metadata
logger.info("copying nodes")
for (n,d) in srcg.nodes(data=True):
g.add_node(n, **d)
logger.info("copying edges")
num_edges = 0
for (x,y,d) in srcg.edges(data=True):
if d['pred'] in relations:
num_edges += 1
g.add_edge(x,y,**d)
logger.info("Filtered edges: {}".format(num_edges))
return g |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(self, ontologies):
""" Merges specified ontology into current ontology """ |
if self.xref_graph is None:
self.xref_graph = nx.MultiGraph()
logger.info("Merging source: {} xrefs: {}".format(self, len(self.xref_graph.edges())))
for ont in ontologies:
logger.info("Merging {} into {}".format(ont, self))
g = self.get_graph()
srcg = ont.get_graph()
for n in srcg.nodes():
g.add_node(n, **srcg.node[n])
for (o,s,m) in srcg.edges(data=True):
g.add_edge(o,s,**m)
if ont.xref_graph is not None:
for (o,s,m) in ont.xref_graph.edges(data=True):
self.xref_graph.add_edge(o,s,**m) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def subontology(self, nodes=None, minimal=False, relations=None):
""" Return a new ontology that is an extract of this one Arguments --------- - nodes: list list of node IDs to include in subontology. If None, all are used - relations: list list of relation IDs to include in subontology. If None, all are used """ |
g = None
if nodes is not None:
g = self.subgraph(nodes)
else:
g = self.get_graph()
if minimal:
from ontobio.slimmer import get_minimal_subgraph
g = get_minimal_subgraph(g, nodes)
ont = Ontology(graph=g, xref_graph=self.xref_graph) # TODO - add metadata
if relations is not None:
g = ont.get_filtered_graph(relations)
ont = Ontology(graph=g, xref_graph=self.xref_graph)
return ont |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_slim_mapping(self, subset=None, subset_nodes=None, relations=None, disable_checks=False):
""" Create a dictionary that maps between all nodes in an ontology to a subset Arguments --------- ont : `Ontology` Complete ontology to be mapped. Assumed pre-filtered for relationship types subset : str Name of subset to map to, e.g. goslim_generic nodes : list If no named subset provided, subset is passed in as list of node ids relations : list List of relations to filter on disable_checks: bool Unless this is set, this will prevent a mapping being generated with non-standard relations. The motivation here is that the ontology graph may include relations that it is inappropriate to propagate gene products over, e.g. transports, has-part Return ------ dict maps all nodes in ont to one or more non-redundant nodes in subset Raises ------ ValueError if the subset is empty """ |
if subset is not None:
subset_nodes = self.extract_subset(subset)
logger.info("Extracting subset: {} -> {}".format(subset, subset_nodes))
if subset_nodes is None or len(subset_nodes) == 0:
raise ValueError("subset nodes is blank")
subset_nodes = set(subset_nodes)
logger.debug("SUBSET: {}".format(subset_nodes))
# Use a sub-ontology for mapping
subont = self
if relations is not None:
subont = self.subontology(relations=relations)
if not disable_checks:
for r in subont.relations_used():
if r != 'subClassOf' and r != 'BFO:0000050' and r != 'subPropertyOf':
raise ValueError("Not safe to propagate over a graph with edge type: {}".format(r))
m = {}
for n in subont.nodes():
ancs = subont.ancestors(n, reflexive=True)
ancs_in_subset = subset_nodes.intersection(ancs)
m[n] = list(subont.filter_redundant(ancs_in_subset))
return m |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_redundant(self, ids):
""" Return all non-redundant ids from a list """ |
sids = set(ids)
for id in ids:
sids = sids.difference(self.ancestors(id, reflexive=False))
return sids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_subset(self, subset, contract=True):
""" Return all nodes in a subset. We assume the oboInOwl encoding of subsets, and subset IDs are IRIs, or IR fragments """ |
return [n for n in self.nodes() if subset in self.subsets(n, contract=contract)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def subsets(self, nid, contract=True):
""" Retrieves subset ids for a class or ontology object """ |
n = self.node(nid)
subsets = []
meta = self._meta(nid)
if 'subsets' in meta:
subsets = meta['subsets']
else:
subsets = []
if contract:
subsets = [self._contract_subset(s) for s in subsets]
return subsets |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prefixes(self):
""" list all prefixes used """ |
pset = set()
for n in self.nodes():
pfx = self.prefix(n)
if pfx is not None:
pset.add(pfx)
return list(pset) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def relations_used(self):
""" Return list of all relations used to connect edges """ |
g = self.get_graph()
types = set()
for (x,y,d) in g.edges(data=True):
types.add(d['pred'])
return list(types) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def child_parent_relations(self, subj, obj, graph=None):
""" Get all relationship type ids between a subject and a parent. Typically only one relation ID returned, but in some cases there may be more than one Arguments --------- subj: string Child (subject) id obj: string Parent (object) id Returns ------- list """ |
if graph is None:
graph = self.get_graph()
preds = set()
for _,ea in graph[obj][subj].items():
preds.add(ea['pred'])
logger.debug('{}->{} = {}'.format(subj,obj,preds))
return preds |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parents(self, node, relations=None):
""" Return all direct parents of specified node. Wraps networkx by default. Arguments --------- node: string identifier for node in ontology relations: list of strings list of relation (object property) IDs used to filter """ |
g = self.get_graph()
if node in g:
parents = list(g.predecessors(node))
if relations is None:
return parents
else:
rset = set(relations)
return [p for p in parents if len(self.child_parent_relations(node, p, graph=g).intersection(rset)) > 0 ]
else:
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def children(self, node, relations=None):
""" Return all direct children of specified node. Wraps networkx by default. Arguments --------- node: string identifier for node in ontology relations: list of strings list of relation (object property) IDs used to filter """ |
g = self.get_graph()
if node in g:
children = list(g.successors(node))
if relations is None:
return children
else:
rset = set(relations)
return [c for c in children if len(self.child_parent_relations(c, node, graph=g).intersection(rset)) > 0 ]
else:
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ancestors(self, node, relations=None, reflexive=False):
"""Return all ancestors of specified node. The default implementation is to use networkx, but some implementations of the Ontology class may use a database or service backed implementation, for large graphs. Arguments --------- node : str identifier for node in ontology reflexive : bool if true, return query node in graph relations : list relation (object property) IDs used to filter Returns ------- list[str] ancestor node IDs """ |
if reflexive:
ancs = self.ancestors(node, relations, reflexive=False)
ancs.append(node)
return ancs
g = None
if relations is None:
g = self.get_graph()
else:
g = self.get_filtered_graph(relations)
if node in g:
return list(nx.ancestors(g, node))
else:
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def descendants(self, node, relations=None, reflexive=False):
""" Returns all descendants of specified node. The default implementation is to use networkx, but some implementations of the Ontology class may use a database or service backed implementation, for large graphs. Arguments --------- node : str identifier for node in ontology reflexive : bool if true, return query node in graph relations : list relation (object property) IDs used to filter Returns ------- list[str] descendant node IDs """ |
if reflexive:
decs = self.descendants(node, relations, reflexive=False)
decs.append(node)
return decs
g = None
if relations is None:
g = self.get_graph()
else:
g = self.get_filtered_graph(relations)
if node in g:
return list(nx.descendants(g, node))
else:
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_roots(self, relations=None, prefix=None):
""" Get all nodes that lack parents Arguments --------- relations : list[str] list of relations used to filter prefix : str E.g. GO. Exclude nodes that lack this prefix when testing parentage """ |
g = self.get_filtered_graph(relations=relations, prefix=prefix)
# note: we also eliminate any singletons, which includes obsolete classes
roots = [n for n in g.nodes() if len(list(g.predecessors(n))) == 0 and len(list(g.successors(n))) > 0]
return roots |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_level(self, level, relations=None, **args):
""" Get all nodes at a particular level Arguments --------- relations : list[str] list of relations used to filter """ |
g = self.get_filtered_graph(relations)
nodes = self.get_roots(relations=relations, **args)
for i in range(level):
logger.info(" ITERATING TO LEVEL: {} NODES: {}".format(i, nodes))
nodes = [c for n in nodes
for c in g.successors(n)]
logger.info(" FINAL: {}".format(nodes))
return nodes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parent_index(self, relations=None):
""" Returns a mapping of nodes to all direct parents Arguments --------- relations : list[str] list of relations used to filter Returns: list """ |
g = None
if relations is None:
g = self.get_graph()
else:
g = self.get_filtered_graph(relations)
l = []
for n in g:
l.append([n] + list(g.predecessors(n)))
return l |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def text_definition(self, nid):
""" Retrieves logical definitions for a class or relation id Arguments --------- nid : str Node identifier for entity to be queried Returns ------- TextDefinition """ |
tdefs = []
meta = self._meta(nid)
if 'definition' in meta:
obj = meta['definition']
return TextDefinition(nid, **obj)
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def logical_definitions(self, nid):
""" Retrieves logical definitions for a class id Arguments --------- nid : str Node identifier for entity to be queried Returns ------- LogicalDefinition """ |
ldefs = self.all_logical_definitions
if ldefs is not None:
#print("TESTING: {} AGAINST LD: {}".format(nid, str(ldefs)))
return [x for x in ldefs if x.class_id == nid]
else:
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_property_chain_axioms(self, nid):
""" Retrieves property chain axioms for a class id Arguments --------- nid : str Node identifier for relation to be queried Returns ------- PropertyChainAxiom """ |
pcas = self.all_property_chain_axioms
if pcas is not None:
return [x for x in pcas if x.predicate_id == nid]
else:
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def synonyms(self, nid, include_label=False):
""" Retrieves synonym objects for a class Arguments --------- nid : str Node identifier for entity to be queried include_label : bool If True, include label/names as Synonym objects Returns ------- list[Synonym] :class:`Synonym` objects """ |
n = self.node(nid)
syns = []
if 'meta' in n:
meta = n['meta']
if 'synonyms' in meta:
for obj in meta['synonyms']:
syns.append(Synonym(nid, **obj))
if include_label:
syns.append(Synonym(nid, val=self.label(nid), pred='label'))
return syns |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_node(self, id, label=None, type='CLASS', meta=None):
""" Add a new node to the ontology """ |
g = self.get_graph()
if meta is None:
meta={}
g.add_node(id, label=label, type=type, meta=meta) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inline_xref_graph(self):
""" Copy contents of xref_graph to inlined meta object for each node """ |
xg = self.xref_graph
for n in self.nodes():
if n in xg:
self._add_meta_element(n, 'xrefs', [{'val':x} for x in xg.neighbors(n)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_parent(self, id, pid, relation='subClassOf'):
""" Add a new edge to the ontology """ |
g = self.get_graph()
g.add_edge(pid, id, pred=relation) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_xref(self, id, xref):
""" Adds an xref to the xref graph """ |
# note: does not update meta object
if self.xref_graph is None:
self.xref_graph = nx.MultiGraph()
self.xref_graph.add_edge(xref, id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_synonym(self, syn):
""" Adds a synonym for a node """ |
n = self.node(syn.class_id)
if 'meta' not in n:
n['meta'] = {}
meta = n['meta']
if 'synonyms' not in meta:
meta['synonyms'] = []
meta['synonyms'].append(syn.as_dict()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_to_subset(self, id, s):
""" Adds a node to a subset """ |
n = self.node(id)
if 'meta' not in n:
n['meta'] = {}
meta = n['meta']
if 'subsets' not in meta:
meta['subsets'] = []
meta['subsets'].append(s) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def all_synonyms(self, include_label=False):
""" Retrieves all synonyms Arguments --------- include_label : bool If True, include label/names as Synonym objects Returns ------- list[Synonym] :class:`Synonym` objects """ |
syns = []
for n in self.nodes():
syns = syns + self.synonyms(n, include_label=include_label)
return syns |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def label(self, nid, id_if_null=False):
""" Fetches label for a node Arguments --------- nid : str Node identifier for entity to be queried id_if_null : bool If True and node has no label return id as label Return ------ str """ |
g = self.get_graph()
if nid in g:
n = g.node[nid]
if 'label' in n:
return n['label']
else:
if id_if_null:
return nid
else:
return None
else:
if id_if_null:
return nid
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def xrefs(self, nid, bidirectional=False):
""" Fetches xrefs for a node Arguments --------- nid : str Node identifier for entity to be queried bidirection : bool If True, include nodes xreffed to nid Return ------ list[str] """ |
if self.xref_graph is not None:
xg = self.xref_graph
if nid not in xg:
return []
if bidirectional:
return list(xg.neighbors(nid))
else:
return [x for x in xg.neighbors(nid) if xg[nid][x][0]['source'] == nid]
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolve_names(self, names, synonyms=False, **args):
""" returns a list of identifiers based on an input list of labels and identifiers. Arguments --------- names: list search terms. '%' treated as wildcard synonyms: bool if true, search on synonyms in addition to labels is_regex : bool if true, treats each name as a regular expression is_partial_match : bool if true, treats each name as a regular expression .*name.* """ |
g = self.get_graph()
r_ids = []
for n in names:
logger.debug("Searching for {} syns={}".format(n,synonyms))
if len(n.split(":")) == 2:
r_ids.append(n)
else:
matches = set([nid for nid in g.nodes() if self._is_match(self.label(nid), n, **args)])
if synonyms:
logger.debug("Searching syns for {}".format(names))
for nid in g.nodes():
for s in self.synonyms(nid):
if self._is_match(s.val, n, **args):
matches.add(nid)
r_ids += list(matches)
return r_ids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_golr_wrap(id, category, **args):
""" performs searches in both directions """ |
#assocs1 = search_associations_compact(object=id, subject_category=category, **args)
#assocs2 = search_associations_compact(subject=id, object_category=category, **args)
assocs1, facets1 = search_compact_wrap(object=id, subject_category=category, **args)
assocs2, facets2 = search_compact_wrap(subject=id, object_category=category, **args)
facets = facets1
if len(assocs2) > 0:
facets = facets2
return assocs1 + assocs2, facets |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_gpi(self, gpi_path):
""" Loads a GPI as a file from the `config.gpi_authority_path` """ |
if self.config.gpi_authority_path is not None:
gpis = dict()
parser = entityparser.GpiParser()
with open(self.config.gpi_authority_path) as gpi_f:
entities = parser.parse(file=gpi_f)
for entity in entities:
gpis[entity["id"]] = {
"symbol": entity["label"],
"name": entity["full_name"],
"synonyms": entitywriter.stringify(entity["synonyms"]),
"type": entity["type"]
}
return gpis
# If there is no config file path, return None
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(self, file, outfile=None):
"""Parse a line-oriented entity file into a list of entity dict objects Note the returned list is of dict objects. TODO: These will later be specified using marshmallow and it should be possible to generate objects Arguments --------- file : file or string The file is parsed into entity objects. Can be a http URL, filename or `file-like-object`, for input assoc file outfile : file Optional output file in which processed lines are written. This a file or `file-like-object` Return ------ list Entities generated from the file """ |
file = self._ensure_file(file)
ents = []
skipped = []
n_lines = 0
for line in file:
n_lines += 1
if line.startswith("!"):
if outfile is not None:
outfile.write(line)
continue
line = line.strip("\n")
if line == "":
logging.warning("EMPTY LINE")
continue
parsed_line, new_ents = self.parse_line(line)
if self._skipping_line(new_ents): # Skip if there were no ents
logging.warning("SKIPPING: {}".format(line))
skipped.append(line)
else:
ents += new_ents
if outfile is not None:
outfile.write(parsed_line + "\n")
self.report.skipped += len(skipped)
self.report.n_lines += n_lines
#self.report.n_associations += len(ents)
logging.info("Parsed {} ents from {} lines. Skipped: {}".
format(len(ents),
n_lines,
len(skipped)))
file.close()
return ents |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_line(self, line):
"""Parses a single line of a GPI. Return a tuple `(processed_line, entities)`. Typically there will be a single entity, but in some cases there may be none (invalid line) or multiple (disjunctive clause in annotation extensions) Note: most applications will only need to call this directly if they require fine-grained control of parsing. For most purposes, :method:`parse_file` can be used over the whole file Arguments --------- line : str A single tab-seperated line from a GPAD file """ |
vals = line.split("\t")
if len(vals) < 7:
self.report.error(line, assocparser.Report.WRONG_NUMBER_OF_COLUMNS, "")
return line, []
if len(vals) < 10 and len(vals) >= 7:
missing_columns = 10 - len(vals)
vals += ["" for i in range(missing_columns)]
[
db,
db_object_id,
db_object_symbol,
db_object_name,
db_object_synonym,
db_object_type,
taxon,
parent_object_id,
xrefs,
properties
] = vals
split_line = assocparser.SplitLine(line=line, values=vals, taxon=taxon)
## --
## db + db_object_id. CARD=1
## --
id = self._pair_to_id(db, db_object_id)
if not self._validate_id(id, split_line, context=assocparser.Report):
return line, []
## --
## db_object_synonym CARD=0..*
## --
synonyms = db_object_synonym.split("|")
if db_object_synonym == "":
synonyms = []
# TODO: DRY
parents = parent_object_id.split("|")
if parent_object_id == "":
parents = []
else:
parents = [self._normalize_id(x) for x in parents]
for p in parents:
self._validate_id(p, split_line, context=assocparser.Report)
xref_ids = xrefs.split("|")
if xrefs == "":
xref_ids = []
obj = {
'id': id,
'label': db_object_symbol,
'full_name': db_object_name,
'synonyms': synonyms,
'type': db_object_type,
'parents': parents,
'xrefs': xref_ids,
'taxon': {
'id': self._taxon_id(taxon, split_line)
}
}
return line, [obj] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transform_item(self, item):
""" Transforms JSON object """ |
obj = {
'id': item['primaryId'],
'label': item['symbol'],
'full_name': item['name'],
'type': item['soTermId'],
'taxon': {'id': item['taxonId']},
}
if 'synonyms' in item:
obj['synonyms'] = item['synonyms']
if 'crossReferenceIds' in item:
obj['xrefs'] = [self._normalize_id(x) for x in item['crossReferenceIds']]
# TODO: synonyms
# TODO: genomeLocations
# TODO: geneLiteratureUrl
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def index(self):
""" Creates indexes based on inferred terms. You do not need to call this yourself; called on initialization """ |
self.subjects = list(self.association_map.keys())
# ensure annotations unique
for (subj,terms) in self.association_map.items():
self.association_map[subj] = list(set(self.association_map[subj]))
logging.info("Indexing {} items".format(len(self.subjects)))
n = 0
all_objs = set()
for (subj,terms) in self.association_map.items():
ancs = self.termset_ancestors(terms)
all_objs.update(ancs)
self.subject_to_inferred_map[subj] = ancs
n = n+1
if n<5:
logging.info(" Indexed: {} -> {}".format(subj, ancs))
elif n == 6:
logging.info("[TRUNCATING>5]....")
self.objects = all_objs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def query_associations(self, subjects=None, infer_subjects=True, include_xrefs=True):
""" Query for a set of associations. Note: only a minimal association model is stored, so all results are returned as (subject_id,class_id) tuples Arguments: subjects: list list of subjects (e.g. genes, diseases) used to query associations. Any association to one of these subjects or a descendant of these subjects (assuming infer_subjects=True) are returned. infer_subjects: boolean (default true) See above include_xrefs: boolean (default true) If true, then expand inferred subject set to include all xrefs of those subjects. Example: if a high level disease node (e.g. DOID:14330 Parkinson disease) is specified, then the default behavior (infer_subjects=True, include_xrefs=True) and the ontology includes DO, results will include associations from both descendant DOID classes, and all xrefs (e.g. OMIM) """ |
if subjects is None:
subjects = []
mset = set()
if infer_subjects:
for subj in subjects:
mset.update(self.ontology.descendants(subj))
mset.update(set(subjects))
if include_xrefs:
xset = set()
for m in mset:
xrefs = self.ontology.xrefs(m, bidirectional=True)
if xrefs is not None:
xset.update(xrefs)
mset.update(xset)
logging.debug("Matching subjects: {}".format(mset))
mset = mset.intersection(self.subjects)
logging.debug("Matching subjects with anns: {}".format(mset))
amap = self.association_map
results = []
for m in mset:
if m in amap:
for t in amap[m]:
results.append( (m,t) )
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def query(self, terms=None, negated_terms=None):
""" Basic boolean query, using inference. Arguments: - terms: list list of class ids. Returns the set of subjects that have at least one inferred annotation to each of the specified classes. - negated_terms: list list of class ids. Filters the set of subjects so that there are no inferred annotations to any of the specified classes """ |
if terms is None:
terms = []
matches_all = 'owl:Thing' in terms
if negated_terms is None:
negated_terms = []
termset = set(terms)
negated_termset = set(negated_terms)
matches = []
n_terms = len(termset)
for subj in self.subjects:
if matches_all or len(termset.intersection(self.inferred_types(subj))) == n_terms:
if len(negated_termset.intersection(self.inferred_types(subj))) == 0:
matches.append(subj)
return matches |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def query_intersections(self, x_terms=None, y_terms=None, symmetric=False):
""" Query for intersections of terms in two lists Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score """ |
if x_terms is None:
x_terms = []
if y_terms is None:
y_terms = []
xset = set(x_terms)
yset = set(y_terms)
zset = xset.union(yset)
# first built map of gene->termClosure.
# this could be calculated ahead of time for all g,
# but this may be space-expensive. TODO: benchmark
gmap={}
for z in zset:
gmap[z] = []
for subj in self.subjects:
ancs = self.inferred_types(subj)
for a in ancs.intersection(zset):
gmap[a].append(subj)
for z in zset:
gmap[z] = set(gmap[z])
ilist = []
for x in x_terms:
for y in y_terms:
if not symmetric or x<y:
shared = gmap[x].intersection(gmap[y])
union = gmap[x].union(gmap[y])
j = 0
if len(union)>0:
j = len(shared) / len(union)
ilist.append({'x':x,'y':y,'shared':shared, 'c':len(shared), 'j':j})
return ilist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def intersectionlist_to_matrix(ilist, xterms, yterms):
""" WILL BE DEPRECATED Replace with method to return pandas dataframe """ |
z = [ [0] * len(xterms) for i1 in range(len(yterms)) ]
xmap = {}
xi = 0
for x in xterms:
xmap[x] = xi
xi = xi+1
ymap = {}
yi = 0
for y in yterms:
ymap[y] = yi
yi = yi+1
for i in ilist:
z[ymap[i['y']]][xmap[i['x']]] = i['j']
logging.debug("Z={}".format(z))
return (z,xterms,yterms) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_dataframe(self, fillna=True, subjects=None):
""" Return association set as pandas DataFrame Each row is a subject (e.g. gene) Each column is the inferred class used to describe the subject """ |
entries = []
selected_subjects = self.subjects
if subjects is not None:
selected_subjects = subjects
for s in selected_subjects:
vmap = {}
for c in self.inferred_types(s):
vmap[c] = 1
entries.append(vmap)
logging.debug("Creating DataFrame")
df = pd.DataFrame(entries, index=selected_subjects)
if fillna:
logging.debug("Performing fillna...")
df = df.fillna(0)
return df |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def label(self, id):
""" return label for a subject id Will make use of both the ontology and the association set """ |
if self.ontology is not None:
label = self.ontology.label(id)
if label is not None:
return label
if self.subject_label_map is not None and id in self.subject_label_map:
return self.subject_label_map[id]
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def subontology(self, minimal=False):
""" Generates a sub-ontology based on associations """ |
return self.ontology.subontology(self.objects, minimal=minimal) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def similarity_matrix(self, x_subjects=None, y_subjects=None, symmetric=False):
""" Query for similarity matrix between groups of subjects Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score """ |
if x_subjects is None:
x_subjects = []
if y_subjects is None:
y_subjects = []
xset = set(x_subjects)
yset = set(y_subjects)
zset = xset.union(yset)
# first built map of gene->termClosure.
# this could be calculated ahead of time for all g,
# but this may be space-expensive. TODO: benchmark
gmap={}
for z in zset:
gmap[z] = self.inferred_types(z)
ilist = []
for x in x_subjects:
for y in y_subjects:
if not symmetric or x<y:
shared = gmap[x].intersection(gmap[y])
union = gmap[x].union(gmap[y])
j = 0
if len(union)>0:
j = len(shared) / len(union)
ilist.append({'x':x,'y':y,'shared':shared, 'c':len(shared), 'j':j})
return self.intersectionlist_to_matrix(ilist, x_subjects, y_subjects) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_edges(ont):
""" Fetches all basic edges from a remote ontology """ |
logging.info("QUERYING:"+ont)
edges = [(c,SUBCLASS_OF, d) for (c,d) in fetchall_isa(ont)]
edges += fetchall_svf(ont)
edges += [(c,SUBPROPERTY_OF, d) for (c,d) in fetchall_subPropertyOf(ont)]
if len(edges) == 0:
logging.warn("No edges for {}".format(ont))
return edges |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transformArray(data, keysToSplit=[]):
""" Transform a SPARQL json array based on the rules of transform """ |
transformed = [ ]
for item in data:
transformed.append(transform(item, keysToSplit))
return transformed |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def coderef_to_ecoclass(self, code, reference=None):
""" Map a GAF code to an ECO class Arguments --------- code : str GAF evidence code, e.g. ISS, IDA reference: str CURIE for a reference for the evidence instance. E.g. GO_REF:0000001. Optional - If provided can give a mapping to a more specific ECO class Return ------ str ECO class CURIE/ID """ |
mcls = None
for (this_code,this_ref,cls) in self.mappings():
if str(this_code) == str(code):
if this_ref == reference:
return cls
if this_ref is None:
mcls = cls
return mcls |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ecoclass_to_coderef(self, cls):
""" Map an ECO class to a GAF code This is the reciprocal to :ref:`coderef_to_ecoclass` Arguments --------- cls : str GAF evidence code, e.g. ISS, IDA reference: str ECO class CURIE/ID Return ------ (str, str) code, reference tuple """ |
code = ''
ref = None
for (code,ref,this_cls) in self.mappings():
if cls == this_cls:
return code, ref
return None, None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_checksum(file):
""" Get SHA256 hash from the contents of a given file """ |
with open(file, 'rb') as FH:
contents = FH.read()
return hashlib.sha256(contents).hexdigest() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(self, handle=None, handle_type=None, **args):
""" Creates an ontology based on a handle Handle is one of the following - `FILENAME.json` : creates an ontology from an obographs json file - `obo:ONTID` : E.g. obo:pato - creates an ontology from obolibrary PURL (requires owltools) - `ONTID` : E.g. 'pato' - creates an ontology from a remote SPARQL query Arguments --------- handle : str specifies how to retrieve the ontology info """ |
if handle is None:
self.test = self.test+1
logging.info("T: "+str(self.test))
global default_ontology
if default_ontology is None:
logging.info("Creating new instance of default ontology")
default_ontology = create_ontology(default_ontology_handle)
logging.info("Using default_ontology")
return default_ontology
return create_ontology(handle, **args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _rule_id(self, id: int) -> str: """ Convert an integer into a gorule key id. """ |
if id is None or id == 0 or id >= 10000000:
return "other"
return "gorule-{:0>7}".format(id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_json_file(obographfile, **args):
""" Return a networkx MultiDiGraph of the ontologies serialized as a json string """ |
f = open(obographfile, 'r')
jsonstr = f.read()
f.close()
return convert_json_object(json.loads(jsonstr), **args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_json_object(obographdoc, **args):
""" Return a networkx MultiDiGraph of the ontologies serialized as a json object """ |
digraph = networkx.MultiDiGraph()
xref_graph = networkx.MultiGraph()
logical_definitions = []
property_chain_axioms = []
context = obographdoc.get('@context',{})
logging.info("CONTEXT: {}".format(context))
mapper = OboJsonMapper(digraph=digraph, context=context)
ogs = obographdoc['graphs']
base_og = ogs[0]
for og in ogs:
# TODO: refactor this
mapper.add_obograph_digraph(og, xref_graph=xref_graph,
logical_definitions=logical_definitions,
property_chain_axioms=property_chain_axioms, **args)
return {
'id': base_og.get('id'),
'meta': base_og.get('meta'),
'graph': mapper.digraph,
'xref_graph': xref_graph,
'graphdoc': obographdoc,
'logical_definitions': logical_definitions,
'property_chain_axioms': property_chain_axioms
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def infer_module_name(filename, fspath):
"""Convert a python filename to a module relative to pythonpath.""" |
filename, _ = os.path.splitext(filename)
for f in fspath:
short_name = f.relative_path(filename)
if short_name:
# The module name for __init__.py files is the directory.
if short_name.endswith(os.path.sep + "__init__"):
short_name = short_name[:short_name.rfind(os.path.sep)]
return short_name.replace(os.path.sep, '.')
# We have not found filename relative to anywhere in pythonpath.
return '' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_absolute_name(package, relative_name):
"""Joins a package name and a relative name. Args: package: A dotted name, e.g. foo.bar.baz relative_name: A dotted name with possibly some leading dots, e.g. ..x.y Returns: The relative name appended to the parent's package, after going up one level for each leading dot. e.g. foo.bar.baz + ..hello.world -> foo.hello.world The unchanged relative_name if it does not start with a dot or has too many leading dots. """ |
path = package.split('.') if package else []
name = relative_name.lstrip('.')
ndots = len(relative_name) - len(name)
if ndots > len(path):
return relative_name
absolute_path = path[:len(path) + 1 - ndots]
if name:
absolute_path.append(name)
return '.'.join(absolute_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolve_import(self, item):
"""Simulate how Python resolves imports. Returns the filename of the source file Python would load when processing a statement like 'import name' in the module we're currently under. Args: item: An instance of ImportItem Returns: A filename Raises: ImportException: If the module doesn't exist. """ |
name = item.name
# The last part in `from a.b.c import d` might be a symbol rather than a
# module, so we try a.b.c and a.b.c.d as names.
short_name = None
if item.is_from and not item.is_star:
if '.' in name.lstrip('.'):
# The name is something like `a.b.c`, so strip off `.c`.
rindex = name.rfind('.')
else:
# The name is something like `..c`, so strip off just `c`.
rindex = name.rfind('.') + 1
short_name = name[:rindex]
if import_finder.is_builtin(name):
filename = name + '.so'
return Builtin(filename, name)
filename, level = convert_to_path(name)
if level:
# This is a relative import; we need to resolve the filename
# relative to the importing file path.
filename = os.path.normpath(
os.path.join(self.current_directory, filename))
files = [(name, filename)]
if short_name:
short_filename = os.path.dirname(filename)
files.append((short_name, short_filename))
for module_name, path in files:
for fs in self.fs_path:
f = self._find_file(fs, path)
if not f or f == self.current_module.path:
# We cannot import a file from itself.
continue
if item.is_relative():
package_name = self.current_module.package_name
if package_name is None:
# Relative import in non-package
raise ImportException(name)
module_name = get_absolute_name(package_name, module_name)
if isinstance(self.current_module, System):
return System(f, module_name)
return Local(f, module_name, fs)
# If the module isn't found in the explicit pythonpath, see if python
# itself resolved it.
if item.source:
prefix, ext = os.path.splitext(item.source)
mod_name = name
# We need to check for importing a symbol here too.
if short_name:
mod = prefix.replace(os.path.sep, '.')
mod = utils.strip_suffix(mod, '.__init__')
if not mod.endswith(name) and mod.endswith(short_name):
mod_name = short_name
if ext == '.pyc':
pyfile = prefix + '.py'
if os.path.exists(pyfile):
return System(pyfile, mod_name)
elif not ext:
pyfile = os.path.join(prefix, "__init__.py")
if os.path.exists(pyfile):
return System(pyfile, mod_name)
return System(item.source, mod_name)
raise ImportException(name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolve_all(self, import_items):
"""Resolves a list of imports. Yields filenames. """ |
for import_item in import_items:
try:
yield self.resolve_import(import_item)
except ImportException as err:
logging.info('unknown module %s', err.module_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def path_from_pythonpath(pythonpath):
"""Create an fs.Path object from a pythonpath string.""" |
path = fs.Path()
for p in pythonpath.split(os.pathsep):
path.add_path(utils.expand_path(p), 'os')
return path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_file_node(import_graph, node, indent):
"""Prettyprint nodes based on their provenance.""" |
f = import_graph.provenance[node]
if isinstance(f, resolve.Direct):
out = '+ ' + f.short_path
elif isinstance(f, resolve.Local):
out = ' ' + f.short_path
elif isinstance(f, resolve.System):
out = ':: ' + f.short_path
elif isinstance(f, resolve.Builtin):
out = '(%s)' % f.module_name
else:
out = '%r' % node
return ' '*indent + out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_node(import_graph, node, indent):
"""Helper function for print_tree""" |
if isinstance(node, graph.NodeSet):
ind = ' ' * indent
out = [ind + 'cycle {'] + [
format_file_node(import_graph, n, indent + 1)
for n in node.nodes
] + [ind + '}']
return '\n'.join(out)
else:
return format_file_node(import_graph, node, indent) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_package(parts):
"""Helper function for _resolve_import_versioned.""" |
for i in range(len(parts), 0, -1):
prefix = '.'.join(parts[0:i])
if prefix in sys.modules:
return i, sys.modules[prefix]
return 0, None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _resolve_import(name):
"""Helper function for resolve_import.""" |
if name in sys.modules:
return getattr(sys.modules[name], '__file__', name + '.so')
return _resolve_import_versioned(name) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.