repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
biolink/ontobio
ontobio/sim/api/owlsim2.py
OwlSim2Api.filtered_search
def filtered_search( self, id_list: List, negated_classes: List, limit: Optional[int] = 100, taxon_filter: Optional[int] = None, category_filter: Optional[str] = None, method: Optional[SimAlgorithm] = SimAlgorithm.PHENODIGM) -> SimResult: """ Owlsim2 filtered search, resolves taxon and category to a namespace, calls search_by_attribute_set, and converts to SimResult object """ if len(negated_classes) > 0: logging.warning("Owlsim2 does not support negation, ignoring neg classes") namespace_filter = self._get_namespace_filter(taxon_filter, category_filter) owlsim_results = search_by_attribute_set(self.url, tuple(id_list), limit, namespace_filter) return self._simsearch_to_simresult(owlsim_results, method)
python
def filtered_search( self, id_list: List, negated_classes: List, limit: Optional[int] = 100, taxon_filter: Optional[int] = None, category_filter: Optional[str] = None, method: Optional[SimAlgorithm] = SimAlgorithm.PHENODIGM) -> SimResult: """ Owlsim2 filtered search, resolves taxon and category to a namespace, calls search_by_attribute_set, and converts to SimResult object """ if len(negated_classes) > 0: logging.warning("Owlsim2 does not support negation, ignoring neg classes") namespace_filter = self._get_namespace_filter(taxon_filter, category_filter) owlsim_results = search_by_attribute_set(self.url, tuple(id_list), limit, namespace_filter) return self._simsearch_to_simresult(owlsim_results, method)
[ "def", "filtered_search", "(", "self", ",", "id_list", ":", "List", ",", "negated_classes", ":", "List", ",", "limit", ":", "Optional", "[", "int", "]", "=", "100", ",", "taxon_filter", ":", "Optional", "[", "int", "]", "=", "None", ",", "category_filter...
Owlsim2 filtered search, resolves taxon and category to a namespace, calls search_by_attribute_set, and converts to SimResult object
[ "Owlsim2", "filtered", "search", "resolves", "taxon", "and", "category", "to", "a", "namespace", "calls", "search_by_attribute_set", "and", "converts", "to", "SimResult", "object" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/owlsim2.py#L249-L266
train
204,800
biolink/ontobio
ontobio/sim/api/owlsim2.py
OwlSim2Api.matchers
def matchers() -> List[SimAlgorithm]: """ Matchers in owlsim2 """ return [ SimAlgorithm.PHENODIGM, SimAlgorithm.JACCARD, SimAlgorithm.SIM_GIC, SimAlgorithm.RESNIK, SimAlgorithm.SYMMETRIC_RESNIK ]
python
def matchers() -> List[SimAlgorithm]: """ Matchers in owlsim2 """ return [ SimAlgorithm.PHENODIGM, SimAlgorithm.JACCARD, SimAlgorithm.SIM_GIC, SimAlgorithm.RESNIK, SimAlgorithm.SYMMETRIC_RESNIK ]
[ "def", "matchers", "(", ")", "->", "List", "[", "SimAlgorithm", "]", ":", "return", "[", "SimAlgorithm", ".", "PHENODIGM", ",", "SimAlgorithm", ".", "JACCARD", ",", "SimAlgorithm", ".", "SIM_GIC", ",", "SimAlgorithm", ".", "RESNIK", ",", "SimAlgorithm", ".",...
Matchers in owlsim2
[ "Matchers", "in", "owlsim2" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/owlsim2.py#L280-L290
train
204,801
biolink/ontobio
ontobio/sim/api/owlsim2.py
OwlSim2Api.get_profile_ic
def get_profile_ic(self, profile: List) -> Dict: """ Given a list of individuals, return their information content """ sim_response = get_attribute_information_profile(self.url, tuple(profile)) profile_ic = {} try: for cls in sim_response['input']: profile_ic[cls['id']] = cls['IC'] except JSONDecodeError as json_exc: raise JSONDecodeError( "Cannot parse owlsim2 response: {}".format(json_exc.msg), json_exc.doc, json_exc.pos ) return profile_ic
python
def get_profile_ic(self, profile: List) -> Dict: """ Given a list of individuals, return their information content """ sim_response = get_attribute_information_profile(self.url, tuple(profile)) profile_ic = {} try: for cls in sim_response['input']: profile_ic[cls['id']] = cls['IC'] except JSONDecodeError as json_exc: raise JSONDecodeError( "Cannot parse owlsim2 response: {}".format(json_exc.msg), json_exc.doc, json_exc.pos ) return profile_ic
[ "def", "get_profile_ic", "(", "self", ",", "profile", ":", "List", ")", "->", "Dict", ":", "sim_response", "=", "get_attribute_information_profile", "(", "self", ".", "url", ",", "tuple", "(", "profile", ")", ")", "profile_ic", "=", "{", "}", "try", ":", ...
Given a list of individuals, return their information content
[ "Given", "a", "list", "of", "individuals", "return", "their", "information", "content" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/owlsim2.py#L292-L309
train
204,802
biolink/ontobio
ontobio/sim/api/owlsim2.py
OwlSim2Api._simsearch_to_simresult
def _simsearch_to_simresult(self, sim_resp: Dict, method: SimAlgorithm) -> SimResult: """ Convert owlsim json to SimResult object :param sim_resp: owlsim response from search_by_attribute_set() :param method: SimAlgorithm :return: SimResult object """ sim_ids = get_nodes_from_ids(sim_resp['query_IRIs']) sim_resp['results'] = OwlSim2Api._rank_results(sim_resp['results'], method) # get id type map: ids = [result['j']['id'] for result in sim_resp['results']] id_type_map = get_id_type_map(ids) matches = [] for result in sim_resp['results']: matches.append( SimMatch( id=result['j']['id'], label=result['j']['label'], rank=result['rank'], score=result[OwlSim2Api.method2key[method]], type=id_type_map[result['j']['id']][0], taxon=get_taxon(result['j']['id']), significance="NaN", pairwise_match=OwlSim2Api._make_pairwise_matches(result) ) ) return SimResult( query=SimQuery( ids=sim_ids, unresolved_ids=sim_resp['unresolved'], target_ids=[[]] ), matches=matches, metadata=SimMetadata( max_max_ic=self.statistics.max_max_ic ) )
python
def _simsearch_to_simresult(self, sim_resp: Dict, method: SimAlgorithm) -> SimResult: """ Convert owlsim json to SimResult object :param sim_resp: owlsim response from search_by_attribute_set() :param method: SimAlgorithm :return: SimResult object """ sim_ids = get_nodes_from_ids(sim_resp['query_IRIs']) sim_resp['results'] = OwlSim2Api._rank_results(sim_resp['results'], method) # get id type map: ids = [result['j']['id'] for result in sim_resp['results']] id_type_map = get_id_type_map(ids) matches = [] for result in sim_resp['results']: matches.append( SimMatch( id=result['j']['id'], label=result['j']['label'], rank=result['rank'], score=result[OwlSim2Api.method2key[method]], type=id_type_map[result['j']['id']][0], taxon=get_taxon(result['j']['id']), significance="NaN", pairwise_match=OwlSim2Api._make_pairwise_matches(result) ) ) return SimResult( query=SimQuery( ids=sim_ids, unresolved_ids=sim_resp['unresolved'], target_ids=[[]] ), matches=matches, metadata=SimMetadata( max_max_ic=self.statistics.max_max_ic ) )
[ "def", "_simsearch_to_simresult", "(", "self", ",", "sim_resp", ":", "Dict", ",", "method", ":", "SimAlgorithm", ")", "->", "SimResult", ":", "sim_ids", "=", "get_nodes_from_ids", "(", "sim_resp", "[", "'query_IRIs'", "]", ")", "sim_resp", "[", "'results'", "]...
Convert owlsim json to SimResult object :param sim_resp: owlsim response from search_by_attribute_set() :param method: SimAlgorithm :return: SimResult object
[ "Convert", "owlsim", "json", "to", "SimResult", "object" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/owlsim2.py#L311-L353
train
204,803
biolink/ontobio
ontobio/sim/api/owlsim2.py
OwlSim2Api._rank_results
def _rank_results(results: List[Dict], method: SimAlgorithm) -> List[Dict]: """ Ranks results - for phenodigm results are ranks but ties need to accounted for for other methods, results need to be reranked :param results: Results from search_by_attribute_set()['results'] or compare_attribute_sets()['results'] :param method: sim method used to rank results :return: Sorted results list """ # https://stackoverflow.com/a/73050 sorted_results = sorted( results, reverse=True, key=lambda k: k[OwlSim2Api.method2key[method]] ) if len(sorted_results) > 0: rank = 1 previous_score = sorted_results[0][OwlSim2Api.method2key[method]] for result in sorted_results: if previous_score > result[OwlSim2Api.method2key[method]]: rank += 1 result['rank'] = rank previous_score = result[OwlSim2Api.method2key[method]] return sorted_results
python
def _rank_results(results: List[Dict], method: SimAlgorithm) -> List[Dict]: """ Ranks results - for phenodigm results are ranks but ties need to accounted for for other methods, results need to be reranked :param results: Results from search_by_attribute_set()['results'] or compare_attribute_sets()['results'] :param method: sim method used to rank results :return: Sorted results list """ # https://stackoverflow.com/a/73050 sorted_results = sorted( results, reverse=True, key=lambda k: k[OwlSim2Api.method2key[method]] ) if len(sorted_results) > 0: rank = 1 previous_score = sorted_results[0][OwlSim2Api.method2key[method]] for result in sorted_results: if previous_score > result[OwlSim2Api.method2key[method]]: rank += 1 result['rank'] = rank previous_score = result[OwlSim2Api.method2key[method]] return sorted_results
[ "def", "_rank_results", "(", "results", ":", "List", "[", "Dict", "]", ",", "method", ":", "SimAlgorithm", ")", "->", "List", "[", "Dict", "]", ":", "# https://stackoverflow.com/a/73050", "sorted_results", "=", "sorted", "(", "results", ",", "reverse", "=", ...
Ranks results - for phenodigm results are ranks but ties need to accounted for for other methods, results need to be reranked :param results: Results from search_by_attribute_set()['results'] or compare_attribute_sets()['results'] :param method: sim method used to rank results :return: Sorted results list
[ "Ranks", "results", "-", "for", "phenodigm", "results", "are", "ranks", "but", "ties", "need", "to", "accounted", "for", "for", "other", "methods", "results", "need", "to", "be", "reranked" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/owlsim2.py#L416-L439
train
204,804
biolink/ontobio
ontobio/golr/golr_query.py
translate_facet_field
def translate_facet_field(fcs, invert_subject_object = False): """ Translates solr facet_fields results into something easier to manipulate A solr facet field looks like this: [field1, count1, field2, count2, ..., fieldN, countN] We translate this to a dict {f1: c1, ..., fn: cn} This has slightly higher overhead for sending over the wire, but is easier to use """ if 'facet_fields' not in fcs: return {} ffs = fcs['facet_fields'] rs={} for (facet, facetresults) in ffs.items(): if invert_subject_object: for (k,v) in INVERT_FIELDS_MAP.items(): if facet == k: facet = v break elif facet == v: facet = k break pairs = {} rs[facet] = pairs for i in range(int(len(facetresults)/2)): (fv,fc) = (facetresults[i*2],facetresults[i*2+1]) pairs[fv] = fc return rs
python
def translate_facet_field(fcs, invert_subject_object = False): """ Translates solr facet_fields results into something easier to manipulate A solr facet field looks like this: [field1, count1, field2, count2, ..., fieldN, countN] We translate this to a dict {f1: c1, ..., fn: cn} This has slightly higher overhead for sending over the wire, but is easier to use """ if 'facet_fields' not in fcs: return {} ffs = fcs['facet_fields'] rs={} for (facet, facetresults) in ffs.items(): if invert_subject_object: for (k,v) in INVERT_FIELDS_MAP.items(): if facet == k: facet = v break elif facet == v: facet = k break pairs = {} rs[facet] = pairs for i in range(int(len(facetresults)/2)): (fv,fc) = (facetresults[i*2],facetresults[i*2+1]) pairs[fv] = fc return rs
[ "def", "translate_facet_field", "(", "fcs", ",", "invert_subject_object", "=", "False", ")", ":", "if", "'facet_fields'", "not", "in", "fcs", ":", "return", "{", "}", "ffs", "=", "fcs", "[", "'facet_fields'", "]", "rs", "=", "{", "}", "for", "(", "facet"...
Translates solr facet_fields results into something easier to manipulate A solr facet field looks like this: [field1, count1, field2, count2, ..., fieldN, countN] We translate this to a dict {f1: c1, ..., fn: cn} This has slightly higher overhead for sending over the wire, but is easier to use
[ "Translates", "solr", "facet_fields", "results", "into", "something", "easier", "to", "manipulate" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L205-L234
train
204,805
biolink/ontobio
ontobio/golr/golr_query.py
goassoc_fieldmap
def goassoc_fieldmap(relationship_type=ACTS_UPSTREAM_OF_OR_WITHIN): """ Returns a mapping of canonical monarch fields to amigo-golr. See: https://github.com/geneontology/amigo/blob/master/metadata/ann-config.yaml """ return { M.SUBJECT: 'bioentity', M.SUBJECT_CLOSURE: 'bioentity', ## In the GO AmiGO instance, the type field is not correctly populated ## See above in the code for hack that restores this for planteome instance ## M.SUBJECT_CATEGORY: 'type', M.SUBJECT_CATEGORY: None, M.SUBJECT_LABEL: 'bioentity_label', M.SUBJECT_TAXON: 'taxon', M.SUBJECT_TAXON_LABEL: 'taxon_label', M.SUBJECT_TAXON_CLOSURE: 'taxon_closure', M.RELATION: 'qualifier', M.OBJECT: 'annotation_class', M.OBJECT_CLOSURE: REGULATES_CLOSURE if relationship_type == ACTS_UPSTREAM_OF_OR_WITHIN else ISA_PARTOF_CLOSURE, M.OBJECT_LABEL: 'annotation_class_label', M.OBJECT_TAXON: 'object_taxon', M.OBJECT_TAXON_LABEL: 'object_taxon_label', M.OBJECT_TAXON_CLOSURE: 'object_taxon_closure', M.OBJECT_CATEGORY: None, M.EVIDENCE_OBJECT_CLOSURE: 'evidence_subset_closure', M.IS_DEFINED_BY: 'assigned_by' }
python
def goassoc_fieldmap(relationship_type=ACTS_UPSTREAM_OF_OR_WITHIN): """ Returns a mapping of canonical monarch fields to amigo-golr. See: https://github.com/geneontology/amigo/blob/master/metadata/ann-config.yaml """ return { M.SUBJECT: 'bioentity', M.SUBJECT_CLOSURE: 'bioentity', ## In the GO AmiGO instance, the type field is not correctly populated ## See above in the code for hack that restores this for planteome instance ## M.SUBJECT_CATEGORY: 'type', M.SUBJECT_CATEGORY: None, M.SUBJECT_LABEL: 'bioentity_label', M.SUBJECT_TAXON: 'taxon', M.SUBJECT_TAXON_LABEL: 'taxon_label', M.SUBJECT_TAXON_CLOSURE: 'taxon_closure', M.RELATION: 'qualifier', M.OBJECT: 'annotation_class', M.OBJECT_CLOSURE: REGULATES_CLOSURE if relationship_type == ACTS_UPSTREAM_OF_OR_WITHIN else ISA_PARTOF_CLOSURE, M.OBJECT_LABEL: 'annotation_class_label', M.OBJECT_TAXON: 'object_taxon', M.OBJECT_TAXON_LABEL: 'object_taxon_label', M.OBJECT_TAXON_CLOSURE: 'object_taxon_closure', M.OBJECT_CATEGORY: None, M.EVIDENCE_OBJECT_CLOSURE: 'evidence_subset_closure', M.IS_DEFINED_BY: 'assigned_by' }
[ "def", "goassoc_fieldmap", "(", "relationship_type", "=", "ACTS_UPSTREAM_OF_OR_WITHIN", ")", ":", "return", "{", "M", ".", "SUBJECT", ":", "'bioentity'", ",", "M", ".", "SUBJECT_CLOSURE", ":", "'bioentity'", ",", "## In the GO AmiGO instance, the type field is not correct...
Returns a mapping of canonical monarch fields to amigo-golr. See: https://github.com/geneontology/amigo/blob/master/metadata/ann-config.yaml
[ "Returns", "a", "mapping", "of", "canonical", "monarch", "fields", "to", "amigo", "-", "golr", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L241-L269
train
204,806
biolink/ontobio
ontobio/golr/golr_query.py
map_field
def map_field(fn, m) : """ Maps a field name, given a mapping file. Returns input if fieldname is unmapped. """ if m is None: return fn if fn in m: return m[fn] else: return fn
python
def map_field(fn, m) : """ Maps a field name, given a mapping file. Returns input if fieldname is unmapped. """ if m is None: return fn if fn in m: return m[fn] else: return fn
[ "def", "map_field", "(", "fn", ",", "m", ")", ":", "if", "m", "is", "None", ":", "return", "fn", "if", "fn", "in", "m", ":", "return", "m", "[", "fn", "]", "else", ":", "return", "fn" ]
Maps a field name, given a mapping file. Returns input if fieldname is unmapped.
[ "Maps", "a", "field", "name", "given", "a", "mapping", "file", ".", "Returns", "input", "if", "fieldname", "is", "unmapped", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L271-L281
train
204,807
biolink/ontobio
ontobio/golr/golr_query.py
GolrSearchQuery.search
def search(self): """ Execute solr search query """ params = self.solr_params() logging.info("PARAMS=" + str(params)) results = self.solr.search(**params) logging.info("Docs found: {}".format(results.hits)) return self._process_search_results(results)
python
def search(self): """ Execute solr search query """ params = self.solr_params() logging.info("PARAMS=" + str(params)) results = self.solr.search(**params) logging.info("Docs found: {}".format(results.hits)) return self._process_search_results(results)
[ "def", "search", "(", "self", ")", ":", "params", "=", "self", ".", "solr_params", "(", ")", "logging", ".", "info", "(", "\"PARAMS=\"", "+", "str", "(", "params", ")", ")", "results", "=", "self", ".", "solr", ".", "search", "(", "*", "*", "params...
Execute solr search query
[ "Execute", "solr", "search", "query" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L485-L493
train
204,808
biolink/ontobio
ontobio/golr/golr_query.py
GolrSearchQuery.autocomplete
def autocomplete(self): """ Execute solr autocomplete """ self.facet = False params = self.solr_params() logging.info("PARAMS=" + str(params)) results = self.solr.search(**params) logging.info("Docs found: {}".format(results.hits)) return self._process_autocomplete_results(results)
python
def autocomplete(self): """ Execute solr autocomplete """ self.facet = False params = self.solr_params() logging.info("PARAMS=" + str(params)) results = self.solr.search(**params) logging.info("Docs found: {}".format(results.hits)) return self._process_autocomplete_results(results)
[ "def", "autocomplete", "(", "self", ")", ":", "self", ".", "facet", "=", "False", "params", "=", "self", ".", "solr_params", "(", ")", "logging", ".", "info", "(", "\"PARAMS=\"", "+", "str", "(", "params", ")", ")", "results", "=", "self", ".", "solr...
Execute solr autocomplete
[ "Execute", "solr", "autocomplete" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L495-L504
train
204,809
biolink/ontobio
ontobio/golr/golr_query.py
GolrSearchQuery._process_search_results
def _process_search_results(self, results: pysolr.Results) -> SearchResults: """ Convert solr docs to biolink object :param results: pysolr.Results :return: model.GolrResults.SearchResults """ # map go-golr fields to standard for doc in results.docs: if 'entity' in doc: doc['id'] = doc['entity'] doc['label'] = doc['entity_label'] highlighting = { doc['id']: self._process_highlight(results, doc)._asdict() for doc in results.docs if results.highlighting } payload = SearchResults( facet_counts=translate_facet_field(results.facets), highlighting=highlighting, docs=results.docs, numFound=results.hits ) logging.debug('Docs: {}'.format(len(results.docs))) return payload
python
def _process_search_results(self, results: pysolr.Results) -> SearchResults: """ Convert solr docs to biolink object :param results: pysolr.Results :return: model.GolrResults.SearchResults """ # map go-golr fields to standard for doc in results.docs: if 'entity' in doc: doc['id'] = doc['entity'] doc['label'] = doc['entity_label'] highlighting = { doc['id']: self._process_highlight(results, doc)._asdict() for doc in results.docs if results.highlighting } payload = SearchResults( facet_counts=translate_facet_field(results.facets), highlighting=highlighting, docs=results.docs, numFound=results.hits ) logging.debug('Docs: {}'.format(len(results.docs))) return payload
[ "def", "_process_search_results", "(", "self", ",", "results", ":", "pysolr", ".", "Results", ")", "->", "SearchResults", ":", "# map go-golr fields to standard", "for", "doc", "in", "results", ".", "docs", ":", "if", "'entity'", "in", "doc", ":", "doc", "[", ...
Convert solr docs to biolink object :param results: pysolr.Results :return: model.GolrResults.SearchResults
[ "Convert", "solr", "docs", "to", "biolink", "object" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L506-L533
train
204,810
biolink/ontobio
ontobio/golr/golr_query.py
GolrLayPersonSearch.autocomplete
def autocomplete(self): """ Execute solr query for autocomplete """ params = self.set_lay_params() logging.info("PARAMS="+str(params)) results = self.solr.search(**params) logging.info("Docs found: {}".format(results.hits)) return self._process_layperson_results(results)
python
def autocomplete(self): """ Execute solr query for autocomplete """ params = self.set_lay_params() logging.info("PARAMS="+str(params)) results = self.solr.search(**params) logging.info("Docs found: {}".format(results.hits)) return self._process_layperson_results(results)
[ "def", "autocomplete", "(", "self", ")", ":", "params", "=", "self", ".", "set_lay_params", "(", ")", "logging", ".", "info", "(", "\"PARAMS=\"", "+", "str", "(", "params", ")", ")", "results", "=", "self", ".", "solr", ".", "search", "(", "*", "*", ...
Execute solr query for autocomplete
[ "Execute", "solr", "query", "for", "autocomplete" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L680-L688
train
204,811
biolink/ontobio
ontobio/golr/golr_query.py
GolrAssociationQuery.translate_objs
def translate_objs(self,d,fname): """ Translate a field whose value is expected to be a list """ if fname not in d: # TODO: consider adding arg for failure on null return None #lf = M.label_field(fname) v = d[fname] if not isinstance(v,list): v = [v] objs = [{'id': idval} for idval in v] # todo - labels return objs
python
def translate_objs(self,d,fname): """ Translate a field whose value is expected to be a list """ if fname not in d: # TODO: consider adding arg for failure on null return None #lf = M.label_field(fname) v = d[fname] if not isinstance(v,list): v = [v] objs = [{'id': idval} for idval in v] # todo - labels return objs
[ "def", "translate_objs", "(", "self", ",", "d", ",", "fname", ")", ":", "if", "fname", "not", "in", "d", ":", "# TODO: consider adding arg for failure on null", "return", "None", "#lf = M.label_field(fname)", "v", "=", "d", "[", "fname", "]", "if", "not", "isi...
Translate a field whose value is expected to be a list
[ "Translate", "a", "field", "whose", "value", "is", "expected", "to", "be", "a", "list" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L1415-L1431
train
204,812
biolink/ontobio
ontobio/golr/golr_query.py
GolrAssociationQuery.translate_obj
def translate_obj(self,d,fname): """ Translate a field value from a solr document. This includes special logic for when the field value denotes an object, here we nest it """ if fname not in d: # TODO: consider adding arg for failure on null return None lf = M.label_field(fname) id = d[fname] id = self.make_canonical_identifier(id) #if id.startswith('MGI:MGI:'): # id = id.replace('MGI:MGI:','MGI:') obj = {'id': id} if id: if self._use_amigo_schema(self.object_category): iri = expand_uri(id) else: iri = expand_uri(id, [get_curie_map('{}/cypher/curies'.format(self.config.scigraph_data.url))]) obj['iri'] = iri if lf in d: obj['label'] = d[lf] cf = fname + "_category" if cf in d: obj['category'] = [d[cf]] if 'aspect' in d and id.startswith('GO:'): obj['category'] = [ASPECT_MAP[d['aspect']]] del d['aspect'] return obj
python
def translate_obj(self,d,fname): """ Translate a field value from a solr document. This includes special logic for when the field value denotes an object, here we nest it """ if fname not in d: # TODO: consider adding arg for failure on null return None lf = M.label_field(fname) id = d[fname] id = self.make_canonical_identifier(id) #if id.startswith('MGI:MGI:'): # id = id.replace('MGI:MGI:','MGI:') obj = {'id': id} if id: if self._use_amigo_schema(self.object_category): iri = expand_uri(id) else: iri = expand_uri(id, [get_curie_map('{}/cypher/curies'.format(self.config.scigraph_data.url))]) obj['iri'] = iri if lf in d: obj['label'] = d[lf] cf = fname + "_category" if cf in d: obj['category'] = [d[cf]] if 'aspect' in d and id.startswith('GO:'): obj['category'] = [ASPECT_MAP[d['aspect']]] del d['aspect'] return obj
[ "def", "translate_obj", "(", "self", ",", "d", ",", "fname", ")", ":", "if", "fname", "not", "in", "d", ":", "# TODO: consider adding arg for failure on null", "return", "None", "lf", "=", "M", ".", "label_field", "(", "fname", ")", "id", "=", "d", "[", ...
Translate a field value from a solr document. This includes special logic for when the field value denotes an object, here we nest it
[ "Translate", "a", "field", "value", "from", "a", "solr", "document", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L1434-L1470
train
204,813
biolink/ontobio
ontobio/golr/golr_query.py
GolrAssociationQuery.translate_docs
def translate_docs(self, ds, **kwargs): """ Translate a set of solr results """ for d in ds: self.map_doc(d, {}, self.invert_subject_object) return [self.translate_doc(d, **kwargs) for d in ds]
python
def translate_docs(self, ds, **kwargs): """ Translate a set of solr results """ for d in ds: self.map_doc(d, {}, self.invert_subject_object) return [self.translate_doc(d, **kwargs) for d in ds]
[ "def", "translate_docs", "(", "self", ",", "ds", ",", "*", "*", "kwargs", ")", ":", "for", "d", "in", "ds", ":", "self", ".", "map_doc", "(", "d", ",", "{", "}", ",", "self", ".", "invert_subject_object", ")", "return", "[", "self", ".", "translate...
Translate a set of solr results
[ "Translate", "a", "set", "of", "solr", "results" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L1560-L1567
train
204,814
biolink/ontobio
ontobio/golr/golr_query.py
GolrAssociationQuery.translate_docs_compact
def translate_docs_compact(self, ds, field_mapping=None, slim=None, map_identifiers=None, invert_subject_object=False, **kwargs): """ Translate golr association documents to a compact representation """ amap = {} logging.info("Translating docs to compact form. Slim={}".format(slim)) for d in ds: self.map_doc(d, field_mapping, invert_subject_object=invert_subject_object) subject = d[M.SUBJECT] subject_label = d[M.SUBJECT_LABEL] # TODO: use a more robust method; we need equivalence as separate field in solr if map_identifiers is not None: if M.SUBJECT_CLOSURE in d: subject = self.map_id(subject, map_identifiers, d[M.SUBJECT_CLOSURE]) else: logging.debug("NO SUBJECT CLOSURE IN: "+str(d)) rel = d.get(M.RELATION) skip = False # TODO if rel == 'not' or rel == 'NOT': skip = True # this is a list in GO if isinstance(rel,list): if 'not' in rel or 'NOT' in rel: skip = True if len(rel) > 1: logging.warn(">1 relation: {}".format(rel)) rel = ";".join(rel) if skip: logging.debug("Skipping: {}".format(d)) continue subject = self.make_canonical_identifier(subject) #if subject.startswith('MGI:MGI:'): # subject = subject.replace('MGI:MGI:','MGI:') k = (subject,rel) if k not in amap: amap[k] = {'subject':subject, 'subject_label':subject_label, 'relation':rel, 'objects': []} if slim is not None and len(slim)>0: mapped_objects = [x for x in d[M.OBJECT_CLOSURE] if x in slim] logging.debug("Mapped objects: {}".format(mapped_objects)) amap[k]['objects'] += mapped_objects else: amap[k]['objects'].append(d[M.OBJECT]) for k in amap.keys(): amap[k]['objects'] = list(set(amap[k]['objects'])) return list(amap.values())
python
def translate_docs_compact(self, ds, field_mapping=None, slim=None, map_identifiers=None, invert_subject_object=False, **kwargs): """ Translate golr association documents to a compact representation """ amap = {} logging.info("Translating docs to compact form. Slim={}".format(slim)) for d in ds: self.map_doc(d, field_mapping, invert_subject_object=invert_subject_object) subject = d[M.SUBJECT] subject_label = d[M.SUBJECT_LABEL] # TODO: use a more robust method; we need equivalence as separate field in solr if map_identifiers is not None: if M.SUBJECT_CLOSURE in d: subject = self.map_id(subject, map_identifiers, d[M.SUBJECT_CLOSURE]) else: logging.debug("NO SUBJECT CLOSURE IN: "+str(d)) rel = d.get(M.RELATION) skip = False # TODO if rel == 'not' or rel == 'NOT': skip = True # this is a list in GO if isinstance(rel,list): if 'not' in rel or 'NOT' in rel: skip = True if len(rel) > 1: logging.warn(">1 relation: {}".format(rel)) rel = ";".join(rel) if skip: logging.debug("Skipping: {}".format(d)) continue subject = self.make_canonical_identifier(subject) #if subject.startswith('MGI:MGI:'): # subject = subject.replace('MGI:MGI:','MGI:') k = (subject,rel) if k not in amap: amap[k] = {'subject':subject, 'subject_label':subject_label, 'relation':rel, 'objects': []} if slim is not None and len(slim)>0: mapped_objects = [x for x in d[M.OBJECT_CLOSURE] if x in slim] logging.debug("Mapped objects: {}".format(mapped_objects)) amap[k]['objects'] += mapped_objects else: amap[k]['objects'].append(d[M.OBJECT]) for k in amap.keys(): amap[k]['objects'] = list(set(amap[k]['objects'])) return list(amap.values())
[ "def", "translate_docs_compact", "(", "self", ",", "ds", ",", "field_mapping", "=", "None", ",", "slim", "=", "None", ",", "map_identifiers", "=", "None", ",", "invert_subject_object", "=", "False", ",", "*", "*", "kwargs", ")", ":", "amap", "=", "{", "}...
Translate golr association documents to a compact representation
[ "Translate", "golr", "association", "documents", "to", "a", "compact", "representation" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L1570-L1627
train
204,815
biolink/ontobio
ontobio/golr/golr_query.py
GolrAssociationQuery.map_id
def map_id(self,id, prefix, closure_list): """ Map identifiers based on an equivalence closure list. """ prefixc = prefix + ':' ids = [eid for eid in closure_list if eid.startswith(prefixc)] # TODO: add option to fail if no mapping, or if >1 mapping if len(ids) == 0: # default to input return id return ids[0]
python
def map_id(self,id, prefix, closure_list): """ Map identifiers based on an equivalence closure list. """ prefixc = prefix + ':' ids = [eid for eid in closure_list if eid.startswith(prefixc)] # TODO: add option to fail if no mapping, or if >1 mapping if len(ids) == 0: # default to input return id return ids[0]
[ "def", "map_id", "(", "self", ",", "id", ",", "prefix", ",", "closure_list", ")", ":", "prefixc", "=", "prefix", "+", "':'", "ids", "=", "[", "eid", "for", "eid", "in", "closure_list", "if", "eid", ".", "startswith", "(", "prefixc", ")", "]", "# TODO...
Map identifiers based on an equivalence closure list.
[ "Map", "identifiers", "based", "on", "an", "equivalence", "closure", "list", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L1629-L1639
train
204,816
biolink/ontobio
ontobio/assoc_factory.py
AssociationSetFactory.create
def create(self, ontology=None,subject_category=None,object_category=None,evidence=None,taxon=None,relation=None, file=None, fmt=None, skim=True): """ creates an AssociationSet Currently, this uses an eager binding to a `ontobio.golr` instance. All compact associations for the particular combination of parameters are fetched. Arguments --------- ontology: an `Ontology` object subject_category: string representing category of subjects (e.g. gene, disease, variant) object_category: string representing category of objects (e.g. function, phenotype, disease) taxon: string holding NCBITaxon:nnnn ID """ meta = AssociationSetMetadata(subject_category=subject_category, object_category=object_category, taxon=taxon) if file is not None: return self.create_from_file(file=file, fmt=fmt, ontology=ontology, meta=meta, skim=skim) logging.info("Fetching assocs from store") assocs = bulk_fetch_cached(subject_category=subject_category, object_category=object_category, evidence=evidence, taxon=taxon) logging.info("Creating map for {} subjects".format(len(assocs))) amap = {} subject_label_map = {} for a in assocs: rel = a['relation'] subj = a['subject'] subject_label_map[subj] = a['subject_label'] amap[subj] = a['objects'] aset = AssociationSet(ontology=ontology, meta=meta, subject_label_map=subject_label_map, association_map=amap) return aset
python
def create(self, ontology=None,subject_category=None,object_category=None,evidence=None,taxon=None,relation=None, file=None, fmt=None, skim=True): """ creates an AssociationSet Currently, this uses an eager binding to a `ontobio.golr` instance. All compact associations for the particular combination of parameters are fetched. Arguments --------- ontology: an `Ontology` object subject_category: string representing category of subjects (e.g. gene, disease, variant) object_category: string representing category of objects (e.g. function, phenotype, disease) taxon: string holding NCBITaxon:nnnn ID """ meta = AssociationSetMetadata(subject_category=subject_category, object_category=object_category, taxon=taxon) if file is not None: return self.create_from_file(file=file, fmt=fmt, ontology=ontology, meta=meta, skim=skim) logging.info("Fetching assocs from store") assocs = bulk_fetch_cached(subject_category=subject_category, object_category=object_category, evidence=evidence, taxon=taxon) logging.info("Creating map for {} subjects".format(len(assocs))) amap = {} subject_label_map = {} for a in assocs: rel = a['relation'] subj = a['subject'] subject_label_map[subj] = a['subject_label'] amap[subj] = a['objects'] aset = AssociationSet(ontology=ontology, meta=meta, subject_label_map=subject_label_map, association_map=amap) return aset
[ "def", "create", "(", "self", ",", "ontology", "=", "None", ",", "subject_category", "=", "None", ",", "object_category", "=", "None", ",", "evidence", "=", "None", ",", "taxon", "=", "None", ",", "relation", "=", "None", ",", "file", "=", "None", ",",...
creates an AssociationSet Currently, this uses an eager binding to a `ontobio.golr` instance. All compact associations for the particular combination of parameters are fetched. Arguments --------- ontology: an `Ontology` object subject_category: string representing category of subjects (e.g. gene, disease, variant) object_category: string representing category of objects (e.g. function, phenotype, disease) taxon: string holding NCBITaxon:nnnn ID
[ "creates", "an", "AssociationSet" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assoc_factory.py#L36-L86
train
204,817
biolink/ontobio
ontobio/assoc_factory.py
AssociationSetFactory.create_from_assocs
def create_from_assocs(self, assocs, **args): """ Creates from a list of association objects """ amap = defaultdict(list) subject_label_map = {} for a in assocs: subj = a['subject'] subj_id = subj['id'] subj_label = subj['label'] subject_label_map[subj_id] = subj_label if not a['negated']: amap[subj_id].append(a['object']['id']) aset = AssociationSet(subject_label_map=subject_label_map, association_map=amap, **args) aset.associations_by_subj = defaultdict(list) aset.associations_by_subj_obj = defaultdict(list) for a in assocs: sub_id = a['subject']['id'] obj_id = a['object']['id'] aset.associations_by_subj[sub_id].append(a) aset.associations_by_subj_obj[(sub_id,obj_id)].append(a) return aset
python
def create_from_assocs(self, assocs, **args): """ Creates from a list of association objects """ amap = defaultdict(list) subject_label_map = {} for a in assocs: subj = a['subject'] subj_id = subj['id'] subj_label = subj['label'] subject_label_map[subj_id] = subj_label if not a['negated']: amap[subj_id].append(a['object']['id']) aset = AssociationSet(subject_label_map=subject_label_map, association_map=amap, **args) aset.associations_by_subj = defaultdict(list) aset.associations_by_subj_obj = defaultdict(list) for a in assocs: sub_id = a['subject']['id'] obj_id = a['object']['id'] aset.associations_by_subj[sub_id].append(a) aset.associations_by_subj_obj[(sub_id,obj_id)].append(a) return aset
[ "def", "create_from_assocs", "(", "self", ",", "assocs", ",", "*", "*", "args", ")", ":", "amap", "=", "defaultdict", "(", "list", ")", "subject_label_map", "=", "{", "}", "for", "a", "in", "assocs", ":", "subj", "=", "a", "[", "'subject'", "]", "sub...
Creates from a list of association objects
[ "Creates", "from", "a", "list", "of", "association", "objects" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assoc_factory.py#L104-L127
train
204,818
biolink/ontobio
ontobio/assoc_factory.py
AssociationSetFactory.create_from_file
def create_from_file(self, file=None, fmt='gaf', skim=True, **args): """ Creates from a file. If fmt is set to None then the file suffixes will be used to choose a parser. Arguments --------- file : str or file input file or filename fmt : str name of format e.g. gaf """ if fmt is not None and not fmt.startswith('.'): fmt = '.{}'.format(fmt) d = { '.gaf' : GafParser, '.gpad' : GpadParser, '.hpoa' : HpoaParser, } if fmt is None: filename = file if isinstance(file, str) else file.name suffixes = pathlib.Path(filename).suffixes iterator = (fn() for ext, fn in d.items() if ext in suffixes) else: iterator = (fn() for ext, fn in d.items() if ext == fmt) try: parser = next(iterator) except StopIteration: logging.error("Format not recognized: {}".format(fmt)) logging.info("Parsing {} with {}/{}".format(file, fmt, parser)) if skim: results = parser.skim(file) return self.create_from_tuples(results, **args) else: assocs = parser.parse(file, skipheader=True) return self.create_from_assocs(assocs, **args)
python
def create_from_file(self, file=None, fmt='gaf', skim=True, **args): """ Creates from a file. If fmt is set to None then the file suffixes will be used to choose a parser. Arguments --------- file : str or file input file or filename fmt : str name of format e.g. gaf """ if fmt is not None and not fmt.startswith('.'): fmt = '.{}'.format(fmt) d = { '.gaf' : GafParser, '.gpad' : GpadParser, '.hpoa' : HpoaParser, } if fmt is None: filename = file if isinstance(file, str) else file.name suffixes = pathlib.Path(filename).suffixes iterator = (fn() for ext, fn in d.items() if ext in suffixes) else: iterator = (fn() for ext, fn in d.items() if ext == fmt) try: parser = next(iterator) except StopIteration: logging.error("Format not recognized: {}".format(fmt)) logging.info("Parsing {} with {}/{}".format(file, fmt, parser)) if skim: results = parser.skim(file) return self.create_from_tuples(results, **args) else: assocs = parser.parse(file, skipheader=True) return self.create_from_assocs(assocs, **args)
[ "def", "create_from_file", "(", "self", ",", "file", "=", "None", ",", "fmt", "=", "'gaf'", ",", "skim", "=", "True", ",", "*", "*", "args", ")", ":", "if", "fmt", "is", "not", "None", "and", "not", "fmt", ".", "startswith", "(", "'.'", ")", ":",...
Creates from a file. If fmt is set to None then the file suffixes will be used to choose a parser. Arguments --------- file : str or file input file or filename fmt : str name of format e.g. gaf
[ "Creates", "from", "a", "file", ".", "If", "fmt", "is", "set", "to", "None", "then", "the", "file", "suffixes", "will", "be", "used", "to", "choose", "a", "parser", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assoc_factory.py#L129-L170
train
204,819
biolink/ontobio
ontobio/assoc_factory.py
AssociationSetFactory.create_from_remote_file
def create_from_remote_file(self, group, snapshot=True, **args): """ Creates from remote GAF """ import requests url = "http://snapshot.geneontology.org/annotations/{}.gaf.gz".format(group) r = requests.get(url, stream=True, headers={'User-Agent': get_user_agent(modules=[requests], caller_name=__name__)}) p = GafParser() results = p.skim(r.raw) return self.create_from_tuples(results, **args)
python
def create_from_remote_file(self, group, snapshot=True, **args): """ Creates from remote GAF """ import requests url = "http://snapshot.geneontology.org/annotations/{}.gaf.gz".format(group) r = requests.get(url, stream=True, headers={'User-Agent': get_user_agent(modules=[requests], caller_name=__name__)}) p = GafParser() results = p.skim(r.raw) return self.create_from_tuples(results, **args)
[ "def", "create_from_remote_file", "(", "self", ",", "group", ",", "snapshot", "=", "True", ",", "*", "*", "args", ")", ":", "import", "requests", "url", "=", "\"http://snapshot.geneontology.org/annotations/{}.gaf.gz\"", ".", "format", "(", "group", ")", "r", "="...
Creates from remote GAF
[ "Creates", "from", "remote", "GAF" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assoc_factory.py#L190-L199
train
204,820
biolink/ontobio
bin/ogr.py
render
def render(ont, query_ids, args): """ Writes or displays graph """ if args.slim.find('m') > -1: logging.info("SLIMMING") g = get_minimal_subgraph(g, query_ids) w = GraphRenderer.create(args.to) if args.showdefs: w.config.show_text_definition = True if args.render: if 'd' in args.render: logging.info("Showing text defs") w.config.show_text_definition = True if args.outfile is not None: w.outfile = args.outfile w.write(ont, query_ids=query_ids, container_predicates=args.container_properties)
python
def render(ont, query_ids, args): """ Writes or displays graph """ if args.slim.find('m') > -1: logging.info("SLIMMING") g = get_minimal_subgraph(g, query_ids) w = GraphRenderer.create(args.to) if args.showdefs: w.config.show_text_definition = True if args.render: if 'd' in args.render: logging.info("Showing text defs") w.config.show_text_definition = True if args.outfile is not None: w.outfile = args.outfile w.write(ont, query_ids=query_ids, container_predicates=args.container_properties)
[ "def", "render", "(", "ont", ",", "query_ids", ",", "args", ")", ":", "if", "args", ".", "slim", ".", "find", "(", "'m'", ")", ">", "-", "1", ":", "logging", ".", "info", "(", "\"SLIMMING\"", ")", "g", "=", "get_minimal_subgraph", "(", "g", ",", ...
Writes or displays graph
[ "Writes", "or", "displays", "graph" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/bin/ogr.py#L165-L181
train
204,821
biolink/ontobio
ontobio/golr/golr_sim.py
get_object_closure
def get_object_closure(subject, object_category=None, **kwargs): """ Find all terms used to annotate subject plus ancestors """ results = search_associations(subject=subject, object_category=object_category, select_fields=[], facet_fields=[M.OBJECT_CLOSURE], facet_limit=-1, rows=0, **kwargs) return set(results['facet_counts'][M.OBJECT_CLOSURE].keys())
python
def get_object_closure(subject, object_category=None, **kwargs): """ Find all terms used to annotate subject plus ancestors """ results = search_associations(subject=subject, object_category=object_category, select_fields=[], facet_fields=[M.OBJECT_CLOSURE], facet_limit=-1, rows=0, **kwargs) return set(results['facet_counts'][M.OBJECT_CLOSURE].keys())
[ "def", "get_object_closure", "(", "subject", ",", "object_category", "=", "None", ",", "*", "*", "kwargs", ")", ":", "results", "=", "search_associations", "(", "subject", "=", "subject", ",", "object_category", "=", "object_category", ",", "select_fields", "=",...
Find all terms used to annotate subject plus ancestors
[ "Find", "all", "terms", "used", "to", "annotate", "subject", "plus", "ancestors" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_sim.py#L13-L24
train
204,822
biolink/ontobio
ontobio/util/scigraph_util.py
namespace_to_taxon
def namespace_to_taxon() -> Dict[str, Node]: """ namespace to taxon mapping """ human_taxon = Node( id='NCBITaxon:9606', label='Homo sapiens' ) return { 'MGI': Node( id='NCBITaxon:10090', label='Mus musculus' ), 'MONDO': human_taxon, 'OMIM': human_taxon, 'MONARCH': human_taxon, 'HGNC': human_taxon, 'FlyBase': Node( id='NCBITaxon:7227', label='Drosophila melanogaster' ), 'WormBase': Node( id='NCBITaxon:6239', label='Caenorhabditis elegans' ), 'ZFIN': Node( id='NCBITaxon:7955', label='Danio rerio' ) }
python
def namespace_to_taxon() -> Dict[str, Node]: """ namespace to taxon mapping """ human_taxon = Node( id='NCBITaxon:9606', label='Homo sapiens' ) return { 'MGI': Node( id='NCBITaxon:10090', label='Mus musculus' ), 'MONDO': human_taxon, 'OMIM': human_taxon, 'MONARCH': human_taxon, 'HGNC': human_taxon, 'FlyBase': Node( id='NCBITaxon:7227', label='Drosophila melanogaster' ), 'WormBase': Node( id='NCBITaxon:6239', label='Caenorhabditis elegans' ), 'ZFIN': Node( id='NCBITaxon:7955', label='Danio rerio' ) }
[ "def", "namespace_to_taxon", "(", ")", "->", "Dict", "[", "str", ",", "Node", "]", ":", "human_taxon", "=", "Node", "(", "id", "=", "'NCBITaxon:9606'", ",", "label", "=", "'Homo sapiens'", ")", "return", "{", "'MGI'", ":", "Node", "(", "id", "=", "'NCB...
namespace to taxon mapping
[ "namespace", "to", "taxon", "mapping" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/util/scigraph_util.py#L11-L40
train
204,823
biolink/ontobio
ontobio/util/scigraph_util.py
get_scigraph_nodes
def get_scigraph_nodes(id_list)-> Iterator[Dict]: """ Queries scigraph neighbors to get a list of nodes back We use the scigraph neighbors function because ids can be sent in batch which is faster than iteratively querying solr search or the scigraph graph/id function :return: json decoded result from scigraph_ontology._neighbors_graph :raises ValueError: If id is not in scigraph """ scigraph = OntologyFactory().create('scigraph:data') chunks = [id_list[i:i + 400] for i in range(0, len(list(id_list)), 400)] for chunk in chunks: params = { 'id': chunk, 'depth': 0 } try: result_graph = scigraph._neighbors_graph(**params) for node in result_graph['nodes']: yield node except JSONDecodeError as exception: # Assume json decode is due to an incorrect class ID # Should we handle this? raise ValueError(exception.doc)
python
def get_scigraph_nodes(id_list)-> Iterator[Dict]: """ Queries scigraph neighbors to get a list of nodes back We use the scigraph neighbors function because ids can be sent in batch which is faster than iteratively querying solr search or the scigraph graph/id function :return: json decoded result from scigraph_ontology._neighbors_graph :raises ValueError: If id is not in scigraph """ scigraph = OntologyFactory().create('scigraph:data') chunks = [id_list[i:i + 400] for i in range(0, len(list(id_list)), 400)] for chunk in chunks: params = { 'id': chunk, 'depth': 0 } try: result_graph = scigraph._neighbors_graph(**params) for node in result_graph['nodes']: yield node except JSONDecodeError as exception: # Assume json decode is due to an incorrect class ID # Should we handle this? raise ValueError(exception.doc)
[ "def", "get_scigraph_nodes", "(", "id_list", ")", "->", "Iterator", "[", "Dict", "]", ":", "scigraph", "=", "OntologyFactory", "(", ")", ".", "create", "(", "'scigraph:data'", ")", "chunks", "=", "[", "id_list", "[", "i", ":", "i", "+", "400", "]", "fo...
Queries scigraph neighbors to get a list of nodes back We use the scigraph neighbors function because ids can be sent in batch which is faster than iteratively querying solr search or the scigraph graph/id function :return: json decoded result from scigraph_ontology._neighbors_graph :raises ValueError: If id is not in scigraph
[ "Queries", "scigraph", "neighbors", "to", "get", "a", "list", "of", "nodes", "back" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/util/scigraph_util.py#L43-L70
train
204,824
biolink/ontobio
ontobio/util/scigraph_util.py
get_taxon
def get_taxon(id: str) -> Optional[Node]: """ get taxon for id Currently via hardcoding, should replace when scigraph when taxa are more universally annotated (having these as node properties would also be more performant) :param id: curie formatted id :return: Node where id is the NCBITaxon curie and label is the scientific name """ taxon = None namespace = id.split(":")[0] if namespace in namespace_to_taxon(): taxon = namespace_to_taxon()[namespace] return taxon
python
def get_taxon(id: str) -> Optional[Node]: """ get taxon for id Currently via hardcoding, should replace when scigraph when taxa are more universally annotated (having these as node properties would also be more performant) :param id: curie formatted id :return: Node where id is the NCBITaxon curie and label is the scientific name """ taxon = None namespace = id.split(":")[0] if namespace in namespace_to_taxon(): taxon = namespace_to_taxon()[namespace] return taxon
[ "def", "get_taxon", "(", "id", ":", "str", ")", "->", "Optional", "[", "Node", "]", ":", "taxon", "=", "None", "namespace", "=", "id", ".", "split", "(", "\":\"", ")", "[", "0", "]", "if", "namespace", "in", "namespace_to_taxon", "(", ")", ":", "ta...
get taxon for id Currently via hardcoding, should replace when scigraph when taxa are more universally annotated (having these as node properties would also be more performant) :param id: curie formatted id :return: Node where id is the NCBITaxon curie and label is the scientific name
[ "get", "taxon", "for", "id" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/util/scigraph_util.py#L116-L132
train
204,825
biolink/ontobio
ontobio/util/scigraph_util.py
typed_node_from_id
def typed_node_from_id(id: str) -> TypedNode: """ Get typed node from id :param id: id as curie :return: TypedNode object """ filter_out_types = [ 'cliqueLeader', 'Class', 'Node', 'Individual', 'quality', 'sequence feature' ] node = next(get_scigraph_nodes([id])) if 'lbl' in node: label = node['lbl'] else: label = None # Empty string or None? types = [typ.lower() for typ in node['meta']['types'] if typ not in filter_out_types] return TypedNode( id=node['id'], label=label, type=types[0], taxon = get_taxon(id) )
python
def typed_node_from_id(id: str) -> TypedNode: """ Get typed node from id :param id: id as curie :return: TypedNode object """ filter_out_types = [ 'cliqueLeader', 'Class', 'Node', 'Individual', 'quality', 'sequence feature' ] node = next(get_scigraph_nodes([id])) if 'lbl' in node: label = node['lbl'] else: label = None # Empty string or None? types = [typ.lower() for typ in node['meta']['types'] if typ not in filter_out_types] return TypedNode( id=node['id'], label=label, type=types[0], taxon = get_taxon(id) )
[ "def", "typed_node_from_id", "(", "id", ":", "str", ")", "->", "TypedNode", ":", "filter_out_types", "=", "[", "'cliqueLeader'", ",", "'Class'", ",", "'Node'", ",", "'Individual'", ",", "'quality'", ",", "'sequence feature'", "]", "node", "=", "next", "(", "...
Get typed node from id :param id: id as curie :return: TypedNode object
[ "Get", "typed", "node", "from", "id" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/util/scigraph_util.py#L135-L165
train
204,826
biolink/ontobio
ontobio/io/assocparser.py
Report.to_report_json
def to_report_json(self): """ Generate a summary in json format """ return self.reporter.json(self.n_lines, self.n_assocs, self.skipped)
python
def to_report_json(self): """ Generate a summary in json format """ return self.reporter.json(self.n_lines, self.n_assocs, self.skipped)
[ "def", "to_report_json", "(", "self", ")", ":", "return", "self", ".", "reporter", ".", "json", "(", "self", ".", "n_lines", ",", "self", ".", "n_assocs", ",", "self", ".", "skipped", ")" ]
Generate a summary in json format
[ "Generate", "a", "summary", "in", "json", "format" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/assocparser.py#L264-L269
train
204,827
biolink/ontobio
ontobio/io/assocparser.py
Report.to_markdown
def to_markdown(self): """ Generate a summary in markdown format """ json = self.to_report_json() # summary = json['summary'] s = "# Group: {group} - Dataset: {dataset}\n".format(group=json["group"], dataset=json["dataset"]) s += "\n## SUMMARY\n\n" s += "This report generated on {}\n\n".format(datetime.date.today()) s += " * Associations: {}\n" . format(json["associations"]) s += " * Lines in file (incl headers): {}\n" . format(json["lines"]) s += " * Lines skipped: {}\n" . format(json["skipped_lines"]) # Header from GAF s += "## Header From Original Association File\n\n" s += "\n".join(["> {} ".format(head) for head in self.header]) ## Table of Contents s += "\n\n## Contents\n\n" for rule, messages in sorted(json["messages"].items(), key=lambda t: t[0]): any_suppress_tag_in_rule_metadata = any([tag in self.config.rule_metadata.get(rule, {}).get("tags", []) for tag in self.config.suppress_rule_reporting_tags]) # For each tag we say to suppress output for, check if it matches any tag in the rule. If any matches if self.config.rule_metadata and any_suppress_tag_in_rule_metadata: print("Skipping {rule_num} because the tag(s) '{tag}' are suppressed".format(rule_num=rule, tag=", ".join(self.config.suppress_rule_reporting_tags))) continue s += "[{rule}](#{rule})\n\n".format(rule=rule) s += "\n## MESSAGES\n\n" for (rule, messages) in sorted(json["messages"].items(), key=lambda t: t[0]): any_suppress_tag_in_rule_metadata = any([tag in self.config.rule_metadata.get(rule, {}).get("tags", []) for tag in self.config.suppress_rule_reporting_tags]) # Skip if the rule metadata has "silent" as a tag if self.config.rule_metadata and any_suppress_tag_in_rule_metadata: # If there is a rule metadata, and the rule ID is in the config, # get the list of tags if present and check for existence of "silent". # If contained, continue to the next rule. continue s += "### {rule}\n\n".format(rule=rule) if rule != "other" and self.config.rule_metadata: s += "{title}\n\n".format(title=self.config.rule_metadata.get(rule, {}).get("title", "")) s += "* total: {amount}\n".format(amount=len(messages)) if len(messages) > 0: s += "#### Messages\n" for message in messages: obj = " ({})".format(message["obj"]) if message["obj"] else "" s += "* {level} - {type}: {message}{obj} -- `{line}`\n".format(level=message["level"], type=message["type"], message=message["message"], line=message["line"], obj=obj) # for g in json['groups']: # s += " * {}: {}\n".format(g['level'], g['count']) # s += "\n\n" # for g in json['groups']: # level = g['level'] # msgs = g['messages'] # if len(msgs) > 0: # s += "### {}\n\n".format(level) # for m in msgs: # s += " * {}: obj:'{}' \"{}\" `{}`\n".format(m['type'],m['obj'],m['message'],m['line']) return s
python
def to_markdown(self): """ Generate a summary in markdown format """ json = self.to_report_json() # summary = json['summary'] s = "# Group: {group} - Dataset: {dataset}\n".format(group=json["group"], dataset=json["dataset"]) s += "\n## SUMMARY\n\n" s += "This report generated on {}\n\n".format(datetime.date.today()) s += " * Associations: {}\n" . format(json["associations"]) s += " * Lines in file (incl headers): {}\n" . format(json["lines"]) s += " * Lines skipped: {}\n" . format(json["skipped_lines"]) # Header from GAF s += "## Header From Original Association File\n\n" s += "\n".join(["> {} ".format(head) for head in self.header]) ## Table of Contents s += "\n\n## Contents\n\n" for rule, messages in sorted(json["messages"].items(), key=lambda t: t[0]): any_suppress_tag_in_rule_metadata = any([tag in self.config.rule_metadata.get(rule, {}).get("tags", []) for tag in self.config.suppress_rule_reporting_tags]) # For each tag we say to suppress output for, check if it matches any tag in the rule. If any matches if self.config.rule_metadata and any_suppress_tag_in_rule_metadata: print("Skipping {rule_num} because the tag(s) '{tag}' are suppressed".format(rule_num=rule, tag=", ".join(self.config.suppress_rule_reporting_tags))) continue s += "[{rule}](#{rule})\n\n".format(rule=rule) s += "\n## MESSAGES\n\n" for (rule, messages) in sorted(json["messages"].items(), key=lambda t: t[0]): any_suppress_tag_in_rule_metadata = any([tag in self.config.rule_metadata.get(rule, {}).get("tags", []) for tag in self.config.suppress_rule_reporting_tags]) # Skip if the rule metadata has "silent" as a tag if self.config.rule_metadata and any_suppress_tag_in_rule_metadata: # If there is a rule metadata, and the rule ID is in the config, # get the list of tags if present and check for existence of "silent". # If contained, continue to the next rule. continue s += "### {rule}\n\n".format(rule=rule) if rule != "other" and self.config.rule_metadata: s += "{title}\n\n".format(title=self.config.rule_metadata.get(rule, {}).get("title", "")) s += "* total: {amount}\n".format(amount=len(messages)) if len(messages) > 0: s += "#### Messages\n" for message in messages: obj = " ({})".format(message["obj"]) if message["obj"] else "" s += "* {level} - {type}: {message}{obj} -- `{line}`\n".format(level=message["level"], type=message["type"], message=message["message"], line=message["line"], obj=obj) # for g in json['groups']: # s += " * {}: {}\n".format(g['level'], g['count']) # s += "\n\n" # for g in json['groups']: # level = g['level'] # msgs = g['messages'] # if len(msgs) > 0: # s += "### {}\n\n".format(level) # for m in msgs: # s += " * {}: obj:'{}' \"{}\" `{}`\n".format(m['type'],m['obj'],m['message'],m['line']) return s
[ "def", "to_markdown", "(", "self", ")", ":", "json", "=", "self", ".", "to_report_json", "(", ")", "# summary = json['summary']", "s", "=", "\"# Group: {group} - Dataset: {dataset}\\n\"", ".", "format", "(", "group", "=", "json", "[", "\"group\"", "]", ",", "dat...
Generate a summary in markdown format
[ "Generate", "a", "summary", "in", "markdown", "format" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/assocparser.py#L271-L330
train
204,828
biolink/ontobio
ontobio/io/assocparser.py
AssocParser.parse
def parse(self, file, skipheader=False, outfile=None): """Parse a line-oriented association file into a list of association dict objects Note the returned list is of dict objects. TODO: These will later be specified using marshmallow and it should be possible to generate objects Arguments --------- file : file or string The file is parsed into association objects. Can be a http URL, filename or `file-like-object`, for input assoc file outfile : file Optional output file in which processed lines are written. This a file or `file-like-object` Return ------ list Associations generated from the file """ associations = self.association_generator(file, skipheader=skipheader, outfile=outfile) a = list(associations) return a
python
def parse(self, file, skipheader=False, outfile=None): """Parse a line-oriented association file into a list of association dict objects Note the returned list is of dict objects. TODO: These will later be specified using marshmallow and it should be possible to generate objects Arguments --------- file : file or string The file is parsed into association objects. Can be a http URL, filename or `file-like-object`, for input assoc file outfile : file Optional output file in which processed lines are written. This a file or `file-like-object` Return ------ list Associations generated from the file """ associations = self.association_generator(file, skipheader=skipheader, outfile=outfile) a = list(associations) return a
[ "def", "parse", "(", "self", ",", "file", ",", "skipheader", "=", "False", ",", "outfile", "=", "None", ")", ":", "associations", "=", "self", ".", "association_generator", "(", "file", ",", "skipheader", "=", "skipheader", ",", "outfile", "=", "outfile", ...
Parse a line-oriented association file into a list of association dict objects Note the returned list is of dict objects. TODO: These will later be specified using marshmallow and it should be possible to generate objects Arguments --------- file : file or string The file is parsed into association objects. Can be a http URL, filename or `file-like-object`, for input assoc file outfile : file Optional output file in which processed lines are written. This a file or `file-like-object` Return ------ list Associations generated from the file
[ "Parse", "a", "line", "-", "oriented", "association", "file", "into", "a", "list", "of", "association", "dict", "objects" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/assocparser.py#L341-L362
train
204,829
biolink/ontobio
ontobio/io/assocparser.py
AssocParser.association_generator
def association_generator(self, file, skipheader=False, outfile=None) -> Dict: """ Returns a generator that yields successive associations from file Yields ------ association """ file = self._ensure_file(file) for line in file: parsed_result = self.parse_line(line) self.report.report_parsed_result(parsed_result, outfile, self.config.filtered_evidence_file, self.config.filter_out_evidence) for association in parsed_result.associations: # yield association if we don't care if it's a header or if it's definitely a real gaf line if not skipheader or "header" not in association: yield association logging.info(self.report.short_summary()) file.close()
python
def association_generator(self, file, skipheader=False, outfile=None) -> Dict: """ Returns a generator that yields successive associations from file Yields ------ association """ file = self._ensure_file(file) for line in file: parsed_result = self.parse_line(line) self.report.report_parsed_result(parsed_result, outfile, self.config.filtered_evidence_file, self.config.filter_out_evidence) for association in parsed_result.associations: # yield association if we don't care if it's a header or if it's definitely a real gaf line if not skipheader or "header" not in association: yield association logging.info(self.report.short_summary()) file.close()
[ "def", "association_generator", "(", "self", ",", "file", ",", "skipheader", "=", "False", ",", "outfile", "=", "None", ")", "->", "Dict", ":", "file", "=", "self", ".", "_ensure_file", "(", "file", ")", "for", "line", "in", "file", ":", "parsed_result",...
Returns a generator that yields successive associations from file Yields ------ association
[ "Returns", "a", "generator", "that", "yields", "successive", "associations", "from", "file" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/assocparser.py#L364-L382
train
204,830
biolink/ontobio
ontobio/io/assocparser.py
AssocParser.map_to_subset
def map_to_subset(self, file, outfile=None, ontology=None, subset=None, class_map=None, relations=None): """ Map a file to a subset, writing out results You can pass either a subset name (e.g. goslim_generic) or a dictionary with ready-made mappings Arguments --------- file: file Name or file object for input assoc file outfile: file Name or file object for output (mapped) assoc file; writes to stdout if not set subset: str Optional name of subset to map to, e.g. goslim_generic class_map: dict Mapping between asserted class ids and ids to map to. Many to many ontology: `Ontology` Ontology to extract subset from """ if subset is not None: logging.info("Creating mapping for subset: {}".format(subset)) class_map = ontology.create_slim_mapping(subset=subset, relations=relations) if class_map is None: raise ValueError("Neither class_map not subset is set") col = self.ANNOTATION_CLASS_COLUMN file = self._ensure_file(file) tuples = [] for line in file: if line.startswith("!"): continue vals = line.split("\t") logging.info("LINE: {} VALS: {}".format(line, vals)) if len(vals) < col: raise ValueError("Line: {} has too few cols, expect class id in col {}".format(line, col)) cid = vals[col] if cid not in class_map or len(class_map[cid]) == 0: self.report.error(line, Report.UNMAPPED_ID, cid) continue else: for mcid in class_map[cid]: vals[col] = mcid line = "\t".join(vals) if outfile is not None: outfile.write(line) else: print(line)
python
def map_to_subset(self, file, outfile=None, ontology=None, subset=None, class_map=None, relations=None): """ Map a file to a subset, writing out results You can pass either a subset name (e.g. goslim_generic) or a dictionary with ready-made mappings Arguments --------- file: file Name or file object for input assoc file outfile: file Name or file object for output (mapped) assoc file; writes to stdout if not set subset: str Optional name of subset to map to, e.g. goslim_generic class_map: dict Mapping between asserted class ids and ids to map to. Many to many ontology: `Ontology` Ontology to extract subset from """ if subset is not None: logging.info("Creating mapping for subset: {}".format(subset)) class_map = ontology.create_slim_mapping(subset=subset, relations=relations) if class_map is None: raise ValueError("Neither class_map not subset is set") col = self.ANNOTATION_CLASS_COLUMN file = self._ensure_file(file) tuples = [] for line in file: if line.startswith("!"): continue vals = line.split("\t") logging.info("LINE: {} VALS: {}".format(line, vals)) if len(vals) < col: raise ValueError("Line: {} has too few cols, expect class id in col {}".format(line, col)) cid = vals[col] if cid not in class_map or len(class_map[cid]) == 0: self.report.error(line, Report.UNMAPPED_ID, cid) continue else: for mcid in class_map[cid]: vals[col] = mcid line = "\t".join(vals) if outfile is not None: outfile.write(line) else: print(line)
[ "def", "map_to_subset", "(", "self", ",", "file", ",", "outfile", "=", "None", ",", "ontology", "=", "None", ",", "subset", "=", "None", ",", "class_map", "=", "None", ",", "relations", "=", "None", ")", ":", "if", "subset", "is", "not", "None", ":",...
Map a file to a subset, writing out results You can pass either a subset name (e.g. goslim_generic) or a dictionary with ready-made mappings Arguments --------- file: file Name or file object for input assoc file outfile: file Name or file object for output (mapped) assoc file; writes to stdout if not set subset: str Optional name of subset to map to, e.g. goslim_generic class_map: dict Mapping between asserted class ids and ids to map to. Many to many ontology: `Ontology` Ontology to extract subset from
[ "Map", "a", "file", "to", "a", "subset", "writing", "out", "results" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/assocparser.py#L395-L442
train
204,831
biolink/ontobio
ontobio/config.py
get_config
def get_config(): """ Return configuration for current session. When called for the first time, this will create a config object, using whatever is the default load path to find the config yaml """ if session.config is None: path = session.default_config_path if os.path.isfile(path): logging.info("LOADING FROM: {}".format(path)) session.config = load_config(path) else: session.config = Config() logging.info("using default session: {}, path does not exist: {}".format(session, path)) else: logging.info("Using pre-loaded object: {}".format(session.config)) return session.config
python
def get_config(): """ Return configuration for current session. When called for the first time, this will create a config object, using whatever is the default load path to find the config yaml """ if session.config is None: path = session.default_config_path if os.path.isfile(path): logging.info("LOADING FROM: {}".format(path)) session.config = load_config(path) else: session.config = Config() logging.info("using default session: {}, path does not exist: {}".format(session, path)) else: logging.info("Using pre-loaded object: {}".format(session.config)) return session.config
[ "def", "get_config", "(", ")", ":", "if", "session", ".", "config", "is", "None", ":", "path", "=", "session", ".", "default_config_path", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "logging", ".", "info", "(", "\"LOADING FROM: {}\"",...
Return configuration for current session. When called for the first time, this will create a config object, using whatever is the default load path to find the config yaml
[ "Return", "configuration", "for", "current", "session", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/config.py#L195-L212
train
204,832
biolink/ontobio
ontobio/config.py
set_config
def set_config(path): """ Set configuration for current session. """ logging.info("LOADING FROM: {}".format(path)) session.config = load_config(path) return session.config
python
def set_config(path): """ Set configuration for current session. """ logging.info("LOADING FROM: {}".format(path)) session.config = load_config(path) return session.config
[ "def", "set_config", "(", "path", ")", ":", "logging", ".", "info", "(", "\"LOADING FROM: {}\"", ".", "format", "(", "path", ")", ")", "session", ".", "config", "=", "load_config", "(", "path", ")", "return", "session", ".", "config" ]
Set configuration for current session.
[ "Set", "configuration", "for", "current", "session", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/config.py#L214-L220
train
204,833
biolink/ontobio
ontobio/config.py
Config.get_solr_search_url
def get_solr_search_url(self, use_amigo=False): """ Return solr URL to be used for lexical entity searches A solr search URL is used to search entities/concepts based on a limited set of parameters. Arguments --------- use_amigo : bool If true, get the URL for the GO/AmiGO instance of GOlr. This is typically used for category='function' queries """ url = self.endpoint_url(self.solr_search) if use_amigo: url = self.endpoint_url(self.amigo_solr_search) return url
python
def get_solr_search_url(self, use_amigo=False): """ Return solr URL to be used for lexical entity searches A solr search URL is used to search entities/concepts based on a limited set of parameters. Arguments --------- use_amigo : bool If true, get the URL for the GO/AmiGO instance of GOlr. This is typically used for category='function' queries """ url = self.endpoint_url(self.solr_search) if use_amigo: url = self.endpoint_url(self.amigo_solr_search) return url
[ "def", "get_solr_search_url", "(", "self", ",", "use_amigo", "=", "False", ")", ":", "url", "=", "self", ".", "endpoint_url", "(", "self", ".", "solr_search", ")", "if", "use_amigo", ":", "url", "=", "self", ".", "endpoint_url", "(", "self", ".", "amigo_...
Return solr URL to be used for lexical entity searches A solr search URL is used to search entities/concepts based on a limited set of parameters. Arguments --------- use_amigo : bool If true, get the URL for the GO/AmiGO instance of GOlr. This is typically used for category='function' queries
[ "Return", "solr", "URL", "to", "be", "used", "for", "lexical", "entity", "searches" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/config.py#L152-L166
train
204,834
biolink/ontobio
bin/validate.py
download_source_gafs
def download_source_gafs(group_metadata, target_dir, exclusions=[], base_download_url=None): """ This looks at a group metadata dictionary and downloads each GAF source that is not in the exclusions list. For each downloaded file, keep track of the path of the file. If the file is zipped, it will unzip it here. This function returns a list of tuples of the dataset dictionary mapped to the downloaded source path. """ gaf_urls = [ (data, data["source"]) for data in group_metadata["datasets"] if data["type"] == "gaf" and data["dataset"] not in exclusions ] # List of dataset metadata to gaf download url click.echo("Found {}".format(", ".join( [ kv[0]["dataset"] for kv in gaf_urls ] ))) downloaded_paths = [] for dataset_metadata, gaf_url in gaf_urls: dataset = dataset_metadata["dataset"] # Local target download path setup - path and then directories path = download_a_dataset_source(group_metadata["id"], dataset_metadata, target_dir, gaf_url, base_download_url=base_download_url) if dataset_metadata["compression"] == "gzip": # Unzip any downloaded file that has gzip, strip of the gzip extension unzipped = os.path.splitext(path)[0] unzip(path, unzipped) path = unzipped else: # otherwise file is coming in uncompressed. But we want to make sure # to zip up the original source also zipup(path) downloaded_paths.append((dataset_metadata, path)) return downloaded_paths
python
def download_source_gafs(group_metadata, target_dir, exclusions=[], base_download_url=None): """ This looks at a group metadata dictionary and downloads each GAF source that is not in the exclusions list. For each downloaded file, keep track of the path of the file. If the file is zipped, it will unzip it here. This function returns a list of tuples of the dataset dictionary mapped to the downloaded source path. """ gaf_urls = [ (data, data["source"]) for data in group_metadata["datasets"] if data["type"] == "gaf" and data["dataset"] not in exclusions ] # List of dataset metadata to gaf download url click.echo("Found {}".format(", ".join( [ kv[0]["dataset"] for kv in gaf_urls ] ))) downloaded_paths = [] for dataset_metadata, gaf_url in gaf_urls: dataset = dataset_metadata["dataset"] # Local target download path setup - path and then directories path = download_a_dataset_source(group_metadata["id"], dataset_metadata, target_dir, gaf_url, base_download_url=base_download_url) if dataset_metadata["compression"] == "gzip": # Unzip any downloaded file that has gzip, strip of the gzip extension unzipped = os.path.splitext(path)[0] unzip(path, unzipped) path = unzipped else: # otherwise file is coming in uncompressed. But we want to make sure # to zip up the original source also zipup(path) downloaded_paths.append((dataset_metadata, path)) return downloaded_paths
[ "def", "download_source_gafs", "(", "group_metadata", ",", "target_dir", ",", "exclusions", "=", "[", "]", ",", "base_download_url", "=", "None", ")", ":", "gaf_urls", "=", "[", "(", "data", ",", "data", "[", "\"source\"", "]", ")", "for", "data", "in", ...
This looks at a group metadata dictionary and downloads each GAF source that is not in the exclusions list. For each downloaded file, keep track of the path of the file. If the file is zipped, it will unzip it here. This function returns a list of tuples of the dataset dictionary mapped to the downloaded source path.
[ "This", "looks", "at", "a", "group", "metadata", "dictionary", "and", "downloads", "each", "GAF", "source", "that", "is", "not", "in", "the", "exclusions", "list", ".", "For", "each", "downloaded", "file", "keep", "track", "of", "the", "path", "of", "the",...
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/bin/validate.py#L152-L180
train
204,835
biolink/ontobio
ontobio/sim/annotation_scorer.py
AnnotationScorer.get_annotation_sufficiency
def get_annotation_sufficiency( self, profile: List[str], negated_classes: List[str], categories: Optional[List] = None, negation_weight: Optional[float] = .25, category_weight: Optional[float] = .5) -> AnnotationSufficiency: """ Given a list of individuals, return the simple, scaled, and categorical scores """ if categories is None: categories = [enum.value for enum in HpoUpperLevel] ic_map = self.ic_store.get_profile_ic(profile + negated_classes) # Simple score is the weighted average of the present and # explicitly stated negative/absent phenotypes # # Note that we're deviating from the publication # to match the reference java implementation where # mean_max_ic is replaced with max_max_ic: # https://github.com/owlcollab/owltools/blob/452b4a/ # OWLTools-Sim/src/main/java/owltools/sim2/AbstractOwlSim.java#L1038 simple_score = self._get_simple_score( profile, negated_classes, self.ic_store.statistics.mean_mean_ic, self.ic_store.statistics.max_max_ic, self.ic_store.statistics.mean_sum_ic, negation_weight, ic_map ) categorical_score = self._get_categorical_score( profile, negated_classes, categories, negation_weight, ic_map ) scaled_score = self._get_scaled_score( simple_score, categorical_score, category_weight) return AnnotationSufficiency( simple_score=simple_score, scaled_score=scaled_score, categorical_score=categorical_score )
python
def get_annotation_sufficiency( self, profile: List[str], negated_classes: List[str], categories: Optional[List] = None, negation_weight: Optional[float] = .25, category_weight: Optional[float] = .5) -> AnnotationSufficiency: """ Given a list of individuals, return the simple, scaled, and categorical scores """ if categories is None: categories = [enum.value for enum in HpoUpperLevel] ic_map = self.ic_store.get_profile_ic(profile + negated_classes) # Simple score is the weighted average of the present and # explicitly stated negative/absent phenotypes # # Note that we're deviating from the publication # to match the reference java implementation where # mean_max_ic is replaced with max_max_ic: # https://github.com/owlcollab/owltools/blob/452b4a/ # OWLTools-Sim/src/main/java/owltools/sim2/AbstractOwlSim.java#L1038 simple_score = self._get_simple_score( profile, negated_classes, self.ic_store.statistics.mean_mean_ic, self.ic_store.statistics.max_max_ic, self.ic_store.statistics.mean_sum_ic, negation_weight, ic_map ) categorical_score = self._get_categorical_score( profile, negated_classes, categories, negation_weight, ic_map ) scaled_score = self._get_scaled_score( simple_score, categorical_score, category_weight) return AnnotationSufficiency( simple_score=simple_score, scaled_score=scaled_score, categorical_score=categorical_score )
[ "def", "get_annotation_sufficiency", "(", "self", ",", "profile", ":", "List", "[", "str", "]", ",", "negated_classes", ":", "List", "[", "str", "]", ",", "categories", ":", "Optional", "[", "List", "]", "=", "None", ",", "negation_weight", ":", "Optional"...
Given a list of individuals, return the simple, scaled, and categorical scores
[ "Given", "a", "list", "of", "individuals", "return", "the", "simple", "scaled", "and", "categorical", "scores" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/annotation_scorer.py#L18-L58
train
204,836
biolink/ontobio
ontobio/sim/annotation_scorer.py
AnnotationScorer._get_scaled_score
def _get_scaled_score( simple_score: float, categorical_score: float, category_weight: Optional[float] = .5) -> float: """ Scaled score is the weighted average of the simple score and categorical score """ return np.average( [simple_score, categorical_score], weights=[1, category_weight] )
python
def _get_scaled_score( simple_score: float, categorical_score: float, category_weight: Optional[float] = .5) -> float: """ Scaled score is the weighted average of the simple score and categorical score """ return np.average( [simple_score, categorical_score], weights=[1, category_weight] )
[ "def", "_get_scaled_score", "(", "simple_score", ":", "float", ",", "categorical_score", ":", "float", ",", "category_weight", ":", "Optional", "[", "float", "]", "=", ".5", ")", "->", "float", ":", "return", "np", ".", "average", "(", "[", "simple_score", ...
Scaled score is the weighted average of the simple score and categorical score
[ "Scaled", "score", "is", "the", "weighted", "average", "of", "the", "simple", "score", "and", "categorical", "score" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/annotation_scorer.py#L106-L116
train
204,837
biolink/ontobio
ontobio/sim/annotation_scorer.py
AnnotationScorer._get_categorical_score
def _get_categorical_score( self, profile: List, negated_classes: List, categories: List, negation_weight: Optional[float] = 1, ic_map: Optional[Dict[str, float]] = None) -> float: """ The average of the simple scores across a list of categories """ if ic_map is None: ic_map = self.ic_store.get_profile_ic(profile + negated_classes) scores = [] for cat in categories: if cat not in self.ic_store.category_statistics: raise ValueError("statistics for {} not indexed".format(cat)) pos_profile = [cls for cls in profile if cls in self.ic_store.category_statistics[cat].descendants] neg_profile = [cls for cls in negated_classes if cls in self.ic_store.category_statistics[cat].descendants] # Note that we're deviating from the publication # to match the reference java implementation where # mean_max_ic is replaced by max_max_ic scores.append(self._get_simple_score( pos_profile, neg_profile, self.ic_store.category_statistics[cat].mean_mean_ic, self.ic_store.category_statistics[cat].max_max_ic, self.ic_store.category_statistics[cat].mean_sum_ic, negation_weight, ic_map )) return mean(scores)
python
def _get_categorical_score( self, profile: List, negated_classes: List, categories: List, negation_weight: Optional[float] = 1, ic_map: Optional[Dict[str, float]] = None) -> float: """ The average of the simple scores across a list of categories """ if ic_map is None: ic_map = self.ic_store.get_profile_ic(profile + negated_classes) scores = [] for cat in categories: if cat not in self.ic_store.category_statistics: raise ValueError("statistics for {} not indexed".format(cat)) pos_profile = [cls for cls in profile if cls in self.ic_store.category_statistics[cat].descendants] neg_profile = [cls for cls in negated_classes if cls in self.ic_store.category_statistics[cat].descendants] # Note that we're deviating from the publication # to match the reference java implementation where # mean_max_ic is replaced by max_max_ic scores.append(self._get_simple_score( pos_profile, neg_profile, self.ic_store.category_statistics[cat].mean_mean_ic, self.ic_store.category_statistics[cat].max_max_ic, self.ic_store.category_statistics[cat].mean_sum_ic, negation_weight, ic_map )) return mean(scores)
[ "def", "_get_categorical_score", "(", "self", ",", "profile", ":", "List", ",", "negated_classes", ":", "List", ",", "categories", ":", "List", ",", "negation_weight", ":", "Optional", "[", "float", "]", "=", "1", ",", "ic_map", ":", "Optional", "[", "Dict...
The average of the simple scores across a list of categories
[ "The", "average", "of", "the", "simple", "scores", "across", "a", "list", "of", "categories" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/annotation_scorer.py#L118-L153
train
204,838
biolink/ontobio
ontobio/io/entitywriter.py
GpiWriter.write_entity
def write_entity(self, entity): """ Write a single entity to a line in the output file """ db, db_object_id = self._split_prefix(entity) taxon = normalize_taxon(entity["taxon"]["id"]) vals = [ db, db_object_id, entity.get('label'), entity.get('full_name'), entity.get('synonyms'), entity.get('type'), taxon, entity.get('parents'), entity.get('xrefs'), entity.get('properties') ] self._write_row(vals)
python
def write_entity(self, entity): """ Write a single entity to a line in the output file """ db, db_object_id = self._split_prefix(entity) taxon = normalize_taxon(entity["taxon"]["id"]) vals = [ db, db_object_id, entity.get('label'), entity.get('full_name'), entity.get('synonyms'), entity.get('type'), taxon, entity.get('parents'), entity.get('xrefs'), entity.get('properties') ] self._write_row(vals)
[ "def", "write_entity", "(", "self", ",", "entity", ")", ":", "db", ",", "db_object_id", "=", "self", ".", "_split_prefix", "(", "entity", ")", "taxon", "=", "normalize_taxon", "(", "entity", "[", "\"taxon\"", "]", "[", "\"id\"", "]", ")", "vals", "=", ...
Write a single entity to a line in the output file
[ "Write", "a", "single", "entity", "to", "a", "line", "in", "the", "output", "file" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/entitywriter.py#L95-L115
train
204,839
biolink/ontobio
ontobio/sim/api/interfaces.py
SimApi.search
def search(self, id_list: Iterable, negated_classes: Iterable, limit: Optional[int], method: Optional) -> List[SimResult]: """ Given an input list of classes, searches for similar lists of classes and provides a ranked list of matches """ pass
python
def search(self, id_list: Iterable, negated_classes: Iterable, limit: Optional[int], method: Optional) -> List[SimResult]: """ Given an input list of classes, searches for similar lists of classes and provides a ranked list of matches """ pass
[ "def", "search", "(", "self", ",", "id_list", ":", "Iterable", ",", "negated_classes", ":", "Iterable", ",", "limit", ":", "Optional", "[", "int", "]", ",", "method", ":", "Optional", ")", "->", "List", "[", "SimResult", "]", ":", "pass" ]
Given an input list of classes, searches for similar lists of classes and provides a ranked list of matches
[ "Given", "an", "input", "list", "of", "classes", "searches", "for", "similar", "lists", "of", "classes", "and", "provides", "a", "ranked", "list", "of", "matches" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/interfaces.py#L10-L19
train
204,840
biolink/ontobio
ontobio/sim/api/interfaces.py
FilteredSearchable.filtered_search
def filtered_search(self, id_list: Iterable, negated_classes: Iterable, limit: Optional[int], taxon_filter: Optional, category_filter: Optional, method: Optional) -> SimResult: """ Given an input iterable of classes or individuals, provides a ranking of similar profiles """ pass
python
def filtered_search(self, id_list: Iterable, negated_classes: Iterable, limit: Optional[int], taxon_filter: Optional, category_filter: Optional, method: Optional) -> SimResult: """ Given an input iterable of classes or individuals, provides a ranking of similar profiles """ pass
[ "def", "filtered_search", "(", "self", ",", "id_list", ":", "Iterable", ",", "negated_classes", ":", "Iterable", ",", "limit", ":", "Optional", "[", "int", "]", ",", "taxon_filter", ":", "Optional", ",", "category_filter", ":", "Optional", ",", "method", ":"...
Given an input iterable of classes or individuals, provides a ranking of similar profiles
[ "Given", "an", "input", "iterable", "of", "classes", "or", "individuals", "provides", "a", "ranking", "of", "similar", "profiles" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/interfaces.py#L44-L55
train
204,841
biolink/ontobio
ontobio/lexmap.py
LexicalMapEngine.index_ontology
def index_ontology(self, ont): """ Adds an ontology to the index This iterates through all labels and synonyms in the ontology, creating an index """ self.merged_ontology.merge([ont]) syns = ont.all_synonyms(include_label=True) include_id = self._is_meaningful_ids() logging.info("Include IDs as synonyms: {}".format(include_id)) if include_id: for n in ont.nodes(): v = n # Get fragment if v.startswith('http'): v = re.sub('.*/','',v) v = re.sub('.*#','',v) syns.append(Synonym(n, val=v, pred='label')) logging.info("Indexing {} syns in {}".format(len(syns),ont)) logging.info("Distinct lexical values: {}".format(len(self.lmap.keys()))) for syn in syns: self.index_synonym(syn, ont) for nid in ont.nodes(): self.id_to_ontology_map[nid].append(ont)
python
def index_ontology(self, ont): """ Adds an ontology to the index This iterates through all labels and synonyms in the ontology, creating an index """ self.merged_ontology.merge([ont]) syns = ont.all_synonyms(include_label=True) include_id = self._is_meaningful_ids() logging.info("Include IDs as synonyms: {}".format(include_id)) if include_id: for n in ont.nodes(): v = n # Get fragment if v.startswith('http'): v = re.sub('.*/','',v) v = re.sub('.*#','',v) syns.append(Synonym(n, val=v, pred='label')) logging.info("Indexing {} syns in {}".format(len(syns),ont)) logging.info("Distinct lexical values: {}".format(len(self.lmap.keys()))) for syn in syns: self.index_synonym(syn, ont) for nid in ont.nodes(): self.id_to_ontology_map[nid].append(ont)
[ "def", "index_ontology", "(", "self", ",", "ont", ")", ":", "self", ".", "merged_ontology", ".", "merge", "(", "[", "ont", "]", ")", "syns", "=", "ont", ".", "all_synonyms", "(", "include_label", "=", "True", ")", "include_id", "=", "self", ".", "_is_m...
Adds an ontology to the index This iterates through all labels and synonyms in the ontology, creating an index
[ "Adds", "an", "ontology", "to", "the", "index" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/lexmap.py#L113-L138
train
204,842
biolink/ontobio
ontobio/lexmap.py
LexicalMapEngine.index_synonym
def index_synonym(self, syn, ont): """ Index a synonym Typically not called from outside this object; called by `index_ontology` """ if not syn.val: if syn.pred == 'label': if not self._is_meaningful_ids(): if not ont.is_obsolete(syn.class_id): pass #logging.error('Use meaningful ids if label not present: {}'.format(syn)) else: logging.warning("Incomplete syn: {}".format(syn)) return if self.exclude_obsolete and ont.is_obsolete(syn.class_id): return syn.ontology = ont prefix,_ = ont.prefix_fragment(syn.class_id) v = syn.val caps_match = re.match('[A-Z]+',v) if caps_match: # if > 75% of length is caps, assume abbreviation if caps_match.span()[1] >= len(v)/3: syn.is_abbreviation(True) # chebi 'synonyms' are often not real synonyms # https://github.com/ebi-chebi/ChEBI/issues/3294 if not re.match('.*[a-zA-Z]',v): if prefix != 'CHEBI': logging.warning('Ignoring suspicous synonym: {}'.format(syn)) return v = self._standardize_label(v) # TODO: do this once ahead of time wsmap = {} for w,s in self.wsmap.items(): wsmap[w] = s for ss in self._get_config_val(prefix,'synsets',[]): # TODO: weights wsmap[ss['synonym']] = ss['word'] nv = self._normalize_label(v, wsmap) self._index_synonym_val(syn, v) nweight = self._get_config_val(prefix, 'normalized_form_confidence', 0.8) if nweight > 0 and not syn.is_abbreviation(): if nv != v: nsyn = Synonym(syn.class_id, val=syn.val, pred=syn.pred, lextype=syn.lextype, ontology=ont, confidence=syn.confidence * nweight) self._index_synonym_val(nsyn, nv)
python
def index_synonym(self, syn, ont): """ Index a synonym Typically not called from outside this object; called by `index_ontology` """ if not syn.val: if syn.pred == 'label': if not self._is_meaningful_ids(): if not ont.is_obsolete(syn.class_id): pass #logging.error('Use meaningful ids if label not present: {}'.format(syn)) else: logging.warning("Incomplete syn: {}".format(syn)) return if self.exclude_obsolete and ont.is_obsolete(syn.class_id): return syn.ontology = ont prefix,_ = ont.prefix_fragment(syn.class_id) v = syn.val caps_match = re.match('[A-Z]+',v) if caps_match: # if > 75% of length is caps, assume abbreviation if caps_match.span()[1] >= len(v)/3: syn.is_abbreviation(True) # chebi 'synonyms' are often not real synonyms # https://github.com/ebi-chebi/ChEBI/issues/3294 if not re.match('.*[a-zA-Z]',v): if prefix != 'CHEBI': logging.warning('Ignoring suspicous synonym: {}'.format(syn)) return v = self._standardize_label(v) # TODO: do this once ahead of time wsmap = {} for w,s in self.wsmap.items(): wsmap[w] = s for ss in self._get_config_val(prefix,'synsets',[]): # TODO: weights wsmap[ss['synonym']] = ss['word'] nv = self._normalize_label(v, wsmap) self._index_synonym_val(syn, v) nweight = self._get_config_val(prefix, 'normalized_form_confidence', 0.8) if nweight > 0 and not syn.is_abbreviation(): if nv != v: nsyn = Synonym(syn.class_id, val=syn.val, pred=syn.pred, lextype=syn.lextype, ontology=ont, confidence=syn.confidence * nweight) self._index_synonym_val(nsyn, nv)
[ "def", "index_synonym", "(", "self", ",", "syn", ",", "ont", ")", ":", "if", "not", "syn", ".", "val", ":", "if", "syn", ".", "pred", "==", "'label'", ":", "if", "not", "self", ".", "_is_meaningful_ids", "(", ")", ":", "if", "not", "ont", ".", "i...
Index a synonym Typically not called from outside this object; called by `index_ontology`
[ "Index", "a", "synonym" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/lexmap.py#L143-L200
train
204,843
biolink/ontobio
ontobio/lexmap.py
LexicalMapEngine._normalize_label
def _normalize_label(self, s, wsmap): """ normalized form of a synonym """ toks = [] for tok in list(set(self.npattern.sub(' ', s).split(' '))): if tok in wsmap: tok=wsmap[tok] if tok != "": toks.append(tok) toks.sort() return " ".join(toks)
python
def _normalize_label(self, s, wsmap): """ normalized form of a synonym """ toks = [] for tok in list(set(self.npattern.sub(' ', s).split(' '))): if tok in wsmap: tok=wsmap[tok] if tok != "": toks.append(tok) toks.sort() return " ".join(toks)
[ "def", "_normalize_label", "(", "self", ",", "s", ",", "wsmap", ")", ":", "toks", "=", "[", "]", "for", "tok", "in", "list", "(", "set", "(", "self", ".", "npattern", ".", "sub", "(", "' '", ",", "s", ")", ".", "split", "(", "' '", ")", ")", ...
normalized form of a synonym
[ "normalized", "form", "of", "a", "synonym" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/lexmap.py#L222-L233
train
204,844
biolink/ontobio
ontobio/lexmap.py
LexicalMapEngine._sim
def _sim(self, xg, ancs1, ancs2, pfx1, pfx2): """ Compare two lineages """ xancs1 = set() for a in ancs1: if a in xg: # TODO: restrict this to neighbors in single ontology for n in xg.neighbors(a): pfx = self._id_to_ontology(n) if pfx == pfx2: xancs1.add(n) logging.debug('SIM={}/{} ## {}'.format(len(xancs1.intersection(ancs2)), len(xancs1), xancs1.intersection(ancs2), xancs1)) n_shared = len(xancs1.intersection(ancs2)) n_total = len(xancs1) return (1+n_shared) / (1+n_total), n_shared, n_total
python
def _sim(self, xg, ancs1, ancs2, pfx1, pfx2): """ Compare two lineages """ xancs1 = set() for a in ancs1: if a in xg: # TODO: restrict this to neighbors in single ontology for n in xg.neighbors(a): pfx = self._id_to_ontology(n) if pfx == pfx2: xancs1.add(n) logging.debug('SIM={}/{} ## {}'.format(len(xancs1.intersection(ancs2)), len(xancs1), xancs1.intersection(ancs2), xancs1)) n_shared = len(xancs1.intersection(ancs2)) n_total = len(xancs1) return (1+n_shared) / (1+n_total), n_shared, n_total
[ "def", "_sim", "(", "self", ",", "xg", ",", "ancs1", ",", "ancs2", ",", "pfx1", ",", "pfx2", ")", ":", "xancs1", "=", "set", "(", ")", "for", "a", "in", "ancs1", ":", "if", "a", "in", "xg", ":", "# TODO: restrict this to neighbors in single ontology", ...
Compare two lineages
[ "Compare", "two", "lineages" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/lexmap.py#L390-L405
train
204,845
biolink/ontobio
ontobio/lexmap.py
LexicalMapEngine.compare_to_xrefs
def compare_to_xrefs(self, xg1, xg2): """ Compares a base xref graph with another one """ ont = self.merged_ontology for (i,j,d) in xg1.edges(data=True): ont_left = self._id_to_ontology(i) ont_right = self._id_to_ontology(j) unique_lr = True num_xrefs_left = 0 same_left = False if i in xg2: for j2 in xg2.neighbors(i): ont_right2 = self._id_to_ontology(j2) if ont_right2 == ont_right: unique_lr = False num_xrefs_left += 1 if j2 == j: same_left = True unique_rl = True num_xrefs_right = 0 same_right = False if j in xg2: for i2 in xg2.neighbors(j): ont_left2 = self._id_to_ontology(i2) if ont_left2 == ont_left: unique_rl = False num_xrefs_right += 1 if i2 == i: same_right = True (x,y) = d['idpair'] xg1[x][y]['left_novel'] = num_xrefs_left==0 xg1[x][y]['right_novel'] = num_xrefs_right==0 xg1[x][y]['left_consistent'] = same_left xg1[x][y]['right_consistent'] = same_right
python
def compare_to_xrefs(self, xg1, xg2): """ Compares a base xref graph with another one """ ont = self.merged_ontology for (i,j,d) in xg1.edges(data=True): ont_left = self._id_to_ontology(i) ont_right = self._id_to_ontology(j) unique_lr = True num_xrefs_left = 0 same_left = False if i in xg2: for j2 in xg2.neighbors(i): ont_right2 = self._id_to_ontology(j2) if ont_right2 == ont_right: unique_lr = False num_xrefs_left += 1 if j2 == j: same_left = True unique_rl = True num_xrefs_right = 0 same_right = False if j in xg2: for i2 in xg2.neighbors(j): ont_left2 = self._id_to_ontology(i2) if ont_left2 == ont_left: unique_rl = False num_xrefs_right += 1 if i2 == i: same_right = True (x,y) = d['idpair'] xg1[x][y]['left_novel'] = num_xrefs_left==0 xg1[x][y]['right_novel'] = num_xrefs_right==0 xg1[x][y]['left_consistent'] = same_left xg1[x][y]['right_consistent'] = same_right
[ "def", "compare_to_xrefs", "(", "self", ",", "xg1", ",", "xg2", ")", ":", "ont", "=", "self", ".", "merged_ontology", "for", "(", "i", ",", "j", ",", "d", ")", "in", "xg1", ".", "edges", "(", "data", "=", "True", ")", ":", "ont_left", "=", "self"...
Compares a base xref graph with another one
[ "Compares", "a", "base", "xref", "graph", "with", "another", "one" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/lexmap.py#L432-L467
train
204,846
biolink/ontobio
ontobio/lexmap.py
LexicalMapEngine.assign_best_matches
def assign_best_matches(self, xg): """ For each node in the xref graph, tag best match edges """ logging.info("assigning best matches for {} nodes".format(len(xg.nodes()))) for i in xg.nodes(): xrefmap = self._neighborscores_by_ontology(xg, i) for (ontid,score_node_pairs) in xrefmap.items(): score_node_pairs.sort(reverse=True) (best_score,best_node) = score_node_pairs[0] logging.info("BEST for {}: {} in {} from {}".format(i, best_node, ontid, score_node_pairs)) edge = xg[i][best_node] dirn = self._dirn(edge, i, best_node) best_kwd = 'best_' + dirn if len(score_node_pairs) == 1 or score_node_pairs[0] > score_node_pairs[1]: edge[best_kwd] = 2 else: edge[best_kwd] = 1 for (score,j) in score_node_pairs: edge_ij = xg[i][j] dirn_ij = self._dirn(edge_ij, i, j) edge_ij['cpr_'+dirn_ij] = score / sum([s for s,_ in score_node_pairs]) for (i,j,edge) in xg.edges(data=True): # reciprocal score is set if (A) i is best for j, and (B) j is best for i rs = 0 if 'best_fwd' in edge and 'best_rev' in edge: rs = edge['best_fwd'] * edge['best_rev'] edge['reciprocal_score'] = rs edge['cpr'] = edge['cpr_fwd'] * edge['cpr_rev']
python
def assign_best_matches(self, xg): """ For each node in the xref graph, tag best match edges """ logging.info("assigning best matches for {} nodes".format(len(xg.nodes()))) for i in xg.nodes(): xrefmap = self._neighborscores_by_ontology(xg, i) for (ontid,score_node_pairs) in xrefmap.items(): score_node_pairs.sort(reverse=True) (best_score,best_node) = score_node_pairs[0] logging.info("BEST for {}: {} in {} from {}".format(i, best_node, ontid, score_node_pairs)) edge = xg[i][best_node] dirn = self._dirn(edge, i, best_node) best_kwd = 'best_' + dirn if len(score_node_pairs) == 1 or score_node_pairs[0] > score_node_pairs[1]: edge[best_kwd] = 2 else: edge[best_kwd] = 1 for (score,j) in score_node_pairs: edge_ij = xg[i][j] dirn_ij = self._dirn(edge_ij, i, j) edge_ij['cpr_'+dirn_ij] = score / sum([s for s,_ in score_node_pairs]) for (i,j,edge) in xg.edges(data=True): # reciprocal score is set if (A) i is best for j, and (B) j is best for i rs = 0 if 'best_fwd' in edge and 'best_rev' in edge: rs = edge['best_fwd'] * edge['best_rev'] edge['reciprocal_score'] = rs edge['cpr'] = edge['cpr_fwd'] * edge['cpr_rev']
[ "def", "assign_best_matches", "(", "self", ",", "xg", ")", ":", "logging", ".", "info", "(", "\"assigning best matches for {} nodes\"", ".", "format", "(", "len", "(", "xg", ".", "nodes", "(", ")", ")", ")", ")", "for", "i", "in", "xg", ".", "nodes", "...
For each node in the xref graph, tag best match edges
[ "For", "each", "node", "in", "the", "xref", "graph", "tag", "best", "match", "edges" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/lexmap.py#L471-L499
train
204,847
biolink/ontobio
ontobio/lexmap.py
LexicalMapEngine._best_match_syn
def _best_match_syn(self, sx, sys, scope_map): """ The best match is determined by the highest magnitude weight """ SUBSTRING_WEIGHT = 0.2 WBEST = None sbest = None sxv = self._standardize_label(sx.val) sxp = self._id_to_ontology(sx.class_id) for sy in sys: syv = self._standardize_label(sy.val) syp = self._id_to_ontology(sy.class_id) W = None if sxv == syv: confidence = sx.confidence * sy.confidence if sx.is_abbreviation() or sy.is_abbreviation: confidence *= self._get_config_val(sxp, 'abbreviation_confidence', 0.5) confidence *= self._get_config_val(syp, 'abbreviation_confidence', 0.5) W = scope_map[sx.scope()][sy.scope()] + logit(confidence/2) elif sxv in syv: W = np.array((-SUBSTRING_WEIGHT, SUBSTRING_WEIGHT, 0, 0)) elif syv in sxv: W = np.array((SUBSTRING_WEIGHT, -SUBSTRING_WEIGHT, 0, 0)) if W is not None: # The best match is determined by the highest magnitude weight if WBEST is None or max(abs(W)) > max(abs(WBEST)): WBEST = W sbest = sy return WBEST, sbest
python
def _best_match_syn(self, sx, sys, scope_map): """ The best match is determined by the highest magnitude weight """ SUBSTRING_WEIGHT = 0.2 WBEST = None sbest = None sxv = self._standardize_label(sx.val) sxp = self._id_to_ontology(sx.class_id) for sy in sys: syv = self._standardize_label(sy.val) syp = self._id_to_ontology(sy.class_id) W = None if sxv == syv: confidence = sx.confidence * sy.confidence if sx.is_abbreviation() or sy.is_abbreviation: confidence *= self._get_config_val(sxp, 'abbreviation_confidence', 0.5) confidence *= self._get_config_val(syp, 'abbreviation_confidence', 0.5) W = scope_map[sx.scope()][sy.scope()] + logit(confidence/2) elif sxv in syv: W = np.array((-SUBSTRING_WEIGHT, SUBSTRING_WEIGHT, 0, 0)) elif syv in sxv: W = np.array((SUBSTRING_WEIGHT, -SUBSTRING_WEIGHT, 0, 0)) if W is not None: # The best match is determined by the highest magnitude weight if WBEST is None or max(abs(W)) > max(abs(WBEST)): WBEST = W sbest = sy return WBEST, sbest
[ "def", "_best_match_syn", "(", "self", ",", "sx", ",", "sys", ",", "scope_map", ")", ":", "SUBSTRING_WEIGHT", "=", "0.2", "WBEST", "=", "None", "sbest", "=", "None", "sxv", "=", "self", ".", "_standardize_label", "(", "sx", ".", "val", ")", "sxp", "=",...
The best match is determined by the highest magnitude weight
[ "The", "best", "match", "is", "determined", "by", "the", "highest", "magnitude", "weight" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/lexmap.py#L501-L529
train
204,848
biolink/ontobio
ontobio/lexmap.py
LexicalMapEngine.grouped_mappings
def grouped_mappings(self,id): """ return all mappings for a node, grouped by ID prefix """ g = self.get_xref_graph() m = {} for n in g.neighbors(id): [prefix, local] = n.split(':') if prefix not in m: m[prefix] = [] m[prefix].append(n) return m
python
def grouped_mappings(self,id): """ return all mappings for a node, grouped by ID prefix """ g = self.get_xref_graph() m = {} for n in g.neighbors(id): [prefix, local] = n.split(':') if prefix not in m: m[prefix] = [] m[prefix].append(n) return m
[ "def", "grouped_mappings", "(", "self", ",", "id", ")", ":", "g", "=", "self", ".", "get_xref_graph", "(", ")", "m", "=", "{", "}", "for", "n", "in", "g", ".", "neighbors", "(", "id", ")", ":", "[", "prefix", ",", "local", "]", "=", "n", ".", ...
return all mappings for a node, grouped by ID prefix
[ "return", "all", "mappings", "for", "a", "node", "grouped", "by", "ID", "prefix" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/lexmap.py#L714-L725
train
204,849
biolink/ontobio
ontobio/lexmap.py
LexicalMapEngine.cliques
def cliques(self, xg): """ Return all equivalence set cliques, assuming each edge in the xref graph is treated as equivalent, and all edges in ontology are subClassOf Arguments --------- xg : Graph an xref graph Returns ------- list of sets """ g = nx.DiGraph() for (x,y) in self.merged_ontology.get_graph().edges(): g.add_edge(x,y) for (x,y) in xg.edges(): g.add_edge(x,y) g.add_edge(y,x) return list(strongly_connected_components(g))
python
def cliques(self, xg): """ Return all equivalence set cliques, assuming each edge in the xref graph is treated as equivalent, and all edges in ontology are subClassOf Arguments --------- xg : Graph an xref graph Returns ------- list of sets """ g = nx.DiGraph() for (x,y) in self.merged_ontology.get_graph().edges(): g.add_edge(x,y) for (x,y) in xg.edges(): g.add_edge(x,y) g.add_edge(y,x) return list(strongly_connected_components(g))
[ "def", "cliques", "(", "self", ",", "xg", ")", ":", "g", "=", "nx", ".", "DiGraph", "(", ")", "for", "(", "x", ",", "y", ")", "in", "self", ".", "merged_ontology", ".", "get_graph", "(", ")", ".", "edges", "(", ")", ":", "g", ".", "add_edge", ...
Return all equivalence set cliques, assuming each edge in the xref graph is treated as equivalent, and all edges in ontology are subClassOf Arguments --------- xg : Graph an xref graph Returns ------- list of sets
[ "Return", "all", "equivalence", "set", "cliques", "assuming", "each", "edge", "in", "the", "xref", "graph", "is", "treated", "as", "equivalent", "and", "all", "edges", "in", "ontology", "are", "subClassOf" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/lexmap.py#L856-L876
train
204,850
biolink/ontobio
ontobio/sparql/rdf2nx.py
RdfMapper.add_triples
def add_triples(self, ontol): """ Adds triples to an ontology object. Currently assumes gocam/lego-style """ rg = self.rdfgraph g = ontol.get_graph() typemap = {} inds = rg.subjects(RDF.type, OWL.NamedIndividual) for s in inds: for (s,p,o) in rg.triples((s,None,None)): s_id = id(s) p_id = id(p) g.add_node(s_id) if isinstance(o,URIRef): o_id = id(o) if p == RDF.type: if o != OWL.NamedIndividual: if s_id not in typemap: typemap[s_id] = [] typemap[s_id].append(o_id) else: g.add_edge(o_id,s_id,pred=p_id) # propagate label from type for s in typemap.keys(): g.nodes[s]['types'] = typemap[s] if self.tbox_ontology is not None: if 'label' not in g.nodes[s]: g.nodes[s]['label'] = ";".join([self.tbox_ontology.label(x) for x in typemap[s] if self.tbox_ontology.label(x) is not None])
python
def add_triples(self, ontol): """ Adds triples to an ontology object. Currently assumes gocam/lego-style """ rg = self.rdfgraph g = ontol.get_graph() typemap = {} inds = rg.subjects(RDF.type, OWL.NamedIndividual) for s in inds: for (s,p,o) in rg.triples((s,None,None)): s_id = id(s) p_id = id(p) g.add_node(s_id) if isinstance(o,URIRef): o_id = id(o) if p == RDF.type: if o != OWL.NamedIndividual: if s_id not in typemap: typemap[s_id] = [] typemap[s_id].append(o_id) else: g.add_edge(o_id,s_id,pred=p_id) # propagate label from type for s in typemap.keys(): g.nodes[s]['types'] = typemap[s] if self.tbox_ontology is not None: if 'label' not in g.nodes[s]: g.nodes[s]['label'] = ";".join([self.tbox_ontology.label(x) for x in typemap[s] if self.tbox_ontology.label(x) is not None])
[ "def", "add_triples", "(", "self", ",", "ontol", ")", ":", "rg", "=", "self", ".", "rdfgraph", "g", "=", "ontol", ".", "get_graph", "(", ")", "typemap", "=", "{", "}", "inds", "=", "rg", ".", "subjects", "(", "RDF", ".", "type", ",", "OWL", ".", ...
Adds triples to an ontology object. Currently assumes gocam/lego-style
[ "Adds", "triples", "to", "an", "ontology", "object", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sparql/rdf2nx.py#L38-L68
train
204,851
biolink/ontobio
ontobio/io/ontol_renderers.py
GraphRenderer.write
def write(self, ontol, **args): """ Write a `ontology` object """ s = self.render(ontol, **args) if self.outfile is None: print(s) else: f = open(self.outfile, 'w') f.write(s) f.close()
python
def write(self, ontol, **args): """ Write a `ontology` object """ s = self.render(ontol, **args) if self.outfile is None: print(s) else: f = open(self.outfile, 'w') f.write(s) f.close()
[ "def", "write", "(", "self", ",", "ontol", ",", "*", "*", "args", ")", ":", "s", "=", "self", ".", "render", "(", "ontol", ",", "*", "*", "args", ")", "if", "self", ".", "outfile", "is", "None", ":", "print", "(", "s", ")", "else", ":", "f", ...
Write a `ontology` object
[ "Write", "a", "ontology", "object" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/ontol_renderers.py#L51-L61
train
204,852
biolink/ontobio
ontobio/io/ontol_renderers.py
GraphRenderer.render_subgraph
def render_subgraph(self, ontol, nodes, **args): """ Render a `ontology` object after inducing a subgraph """ subont = ontol.subontology(nodes, **args) return self.render(subont, **args)
python
def render_subgraph(self, ontol, nodes, **args): """ Render a `ontology` object after inducing a subgraph """ subont = ontol.subontology(nodes, **args) return self.render(subont, **args)
[ "def", "render_subgraph", "(", "self", ",", "ontol", ",", "nodes", ",", "*", "*", "args", ")", ":", "subont", "=", "ontol", ".", "subontology", "(", "nodes", ",", "*", "*", "args", ")", "return", "self", ".", "render", "(", "subont", ",", "*", "*",...
Render a `ontology` object after inducing a subgraph
[ "Render", "a", "ontology", "object", "after", "inducing", "a", "subgraph" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/ontol_renderers.py#L63-L68
train
204,853
biolink/ontobio
ontobio/io/ontol_renderers.py
GraphRenderer.write_subgraph
def write_subgraph(self, ontol, nodes, **args): """ Write a `ontology` object after inducing a subgraph """ subont = ontol.subontology(nodes, **args) self.write(subont, **args)
python
def write_subgraph(self, ontol, nodes, **args): """ Write a `ontology` object after inducing a subgraph """ subont = ontol.subontology(nodes, **args) self.write(subont, **args)
[ "def", "write_subgraph", "(", "self", ",", "ontol", ",", "nodes", ",", "*", "*", "args", ")", ":", "subont", "=", "ontol", ".", "subontology", "(", "nodes", ",", "*", "*", "args", ")", "self", ".", "write", "(", "subont", ",", "*", "*", "args", "...
Write a `ontology` object after inducing a subgraph
[ "Write", "a", "ontology", "object", "after", "inducing", "a", "subgraph" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/ontol_renderers.py#L70-L75
train
204,854
biolink/ontobio
ontobio/io/ontol_renderers.py
GraphRenderer.render_relation
def render_relation(self, r, **args): """ Render an object property """ if r is None: return "." m = self.config.relsymbolmap if r in m: return m[r] return r
python
def render_relation(self, r, **args): """ Render an object property """ if r is None: return "." m = self.config.relsymbolmap if r in m: return m[r] return r
[ "def", "render_relation", "(", "self", ",", "r", ",", "*", "*", "args", ")", ":", "if", "r", "is", "None", ":", "return", "\".\"", "m", "=", "self", ".", "config", ".", "relsymbolmap", "if", "r", "in", "m", ":", "return", "m", "[", "r", "]", "r...
Render an object property
[ "Render", "an", "object", "property" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/ontol_renderers.py#L77-L86
train
204,855
biolink/ontobio
ontobio/io/ontol_renderers.py
GraphRenderer.render_noderef
def render_noderef(self, ontol, n, query_ids=None, **args): """ Render a node object """ if query_ids is None: query_ids = [] marker = "" if n in query_ids: marker = " * " label = ontol.label(n) s = None if label is not None: s = '{} ! {}{}'.format(n, label, marker) else: s = str(n) if self.config.show_text_definition: td = ontol.text_definition(n) if td: s += ' "{}"'.format(td.val) return s
python
def render_noderef(self, ontol, n, query_ids=None, **args): """ Render a node object """ if query_ids is None: query_ids = [] marker = "" if n in query_ids: marker = " * " label = ontol.label(n) s = None if label is not None: s = '{} ! {}{}'.format(n, label, marker) else: s = str(n) if self.config.show_text_definition: td = ontol.text_definition(n) if td: s += ' "{}"'.format(td.val) return s
[ "def", "render_noderef", "(", "self", ",", "ontol", ",", "n", ",", "query_ids", "=", "None", ",", "*", "*", "args", ")", ":", "if", "query_ids", "is", "None", ":", "query_ids", "=", "[", "]", "marker", "=", "\"\"", "if", "n", "in", "query_ids", ":"...
Render a node object
[ "Render", "a", "node", "object" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/ontol_renderers.py#L88-L109
train
204,856
biolink/ontobio
ontobio/io/ontol_renderers.py
GraphRenderer.create
def create(fmt): """ Creates a GraphRenderer """ w = None if fmt == 'tree': w = AsciiTreeGraphRenderer() elif fmt == 'dot': w = DotGraphRenderer(image_format='dot') elif fmt == 'png': w = DotGraphRenderer(image_format='png') elif fmt == 'ndot': w = NativeDotGraphRenderer() elif fmt == 'obo': w = OboFormatGraphRenderer() elif fmt == 'obog': w = OboJsonGraphRenderer() else: w = SimpleListGraphRenderer() return w
python
def create(fmt): """ Creates a GraphRenderer """ w = None if fmt == 'tree': w = AsciiTreeGraphRenderer() elif fmt == 'dot': w = DotGraphRenderer(image_format='dot') elif fmt == 'png': w = DotGraphRenderer(image_format='png') elif fmt == 'ndot': w = NativeDotGraphRenderer() elif fmt == 'obo': w = OboFormatGraphRenderer() elif fmt == 'obog': w = OboJsonGraphRenderer() else: w = SimpleListGraphRenderer() return w
[ "def", "create", "(", "fmt", ")", ":", "w", "=", "None", "if", "fmt", "==", "'tree'", ":", "w", "=", "AsciiTreeGraphRenderer", "(", ")", "elif", "fmt", "==", "'dot'", ":", "w", "=", "DotGraphRenderer", "(", "image_format", "=", "'dot'", ")", "elif", ...
Creates a GraphRenderer
[ "Creates", "a", "GraphRenderer" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/ontol_renderers.py#L112-L131
train
204,857
biolink/ontobio
ontobio/util/user_agent.py
get_user_agent
def get_user_agent(name="ontobio", version=ontobio_version, modules=None, caller_name=None): """ Create a User-Agent string """ user_agent_array = ["{}/{}".format(name, version)] if modules: module_info_array = [] for m in modules: mod_name = m.__name__ mod_version = None if hasattr(m, 'get_version'): mod_version = m.get_version() else: mod_version = m.__version__ module_info_array.append("{}/{}".format(mod_name, mod_version)) if caller_name: module_info_array.append(caller_name) user_agent_array.append("({})".format('; '.join(module_info_array))) else: if caller_name: user_agent_array.append("({})".format(caller_name)) return ' '.join(user_agent_array)
python
def get_user_agent(name="ontobio", version=ontobio_version, modules=None, caller_name=None): """ Create a User-Agent string """ user_agent_array = ["{}/{}".format(name, version)] if modules: module_info_array = [] for m in modules: mod_name = m.__name__ mod_version = None if hasattr(m, 'get_version'): mod_version = m.get_version() else: mod_version = m.__version__ module_info_array.append("{}/{}".format(mod_name, mod_version)) if caller_name: module_info_array.append(caller_name) user_agent_array.append("({})".format('; '.join(module_info_array))) else: if caller_name: user_agent_array.append("({})".format(caller_name)) return ' '.join(user_agent_array)
[ "def", "get_user_agent", "(", "name", "=", "\"ontobio\"", ",", "version", "=", "ontobio_version", ",", "modules", "=", "None", ",", "caller_name", "=", "None", ")", ":", "user_agent_array", "=", "[", "\"{}/{}\"", ".", "format", "(", "name", ",", "version", ...
Create a User-Agent string
[ "Create", "a", "User", "-", "Agent", "string" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/util/user_agent.py#L3-L28
train
204,858
biolink/ontobio
ontobio/sim/api/owlsim3.py
OwlSim3Api.search
def search(self, id_list: List, negated_classes: List, limit: Optional[int], method: Optional) -> List[SimResult]: """ Given an input list of classes or individuals, provides a ranking of similar profiles """ raise NotImplementedError
python
def search(self, id_list: List, negated_classes: List, limit: Optional[int], method: Optional) -> List[SimResult]: """ Given an input list of classes or individuals, provides a ranking of similar profiles """ raise NotImplementedError
[ "def", "search", "(", "self", ",", "id_list", ":", "List", ",", "negated_classes", ":", "List", ",", "limit", ":", "Optional", "[", "int", "]", ",", "method", ":", "Optional", ")", "->", "List", "[", "SimResult", "]", ":", "raise", "NotImplementedError" ...
Given an input list of classes or individuals, provides a ranking of similar profiles
[ "Given", "an", "input", "list", "of", "classes", "or", "individuals", "provides", "a", "ranking", "of", "similar", "profiles" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/owlsim3.py#L24-L33
train
204,859
biolink/ontobio
ontobio/io/gafgpibridge.py
GafGpiBridge.convert_association
def convert_association(self, association: Association) -> Entity: """ 'id' is already `join`ed in both the Association and the Entity, so we don't have to worry about what that looks like. We assume it's correct. """ if "header" not in association or association["header"] == False: # print(json.dumps(association, indent=4)) gpi_obj = { 'id': association["subject"]["id"], 'label': association["subject"]["label"], # db_object_symbol, 'full_name': association["subject"]["fullname"], # db_object_name, 'synonyms': association["subject"]["synonyms"], 'type': association["subject"]["type"], #db_object_type, 'parents': "", # GAF does not have this field, but it's optional in GPI 'xrefs': "", # GAF does not have this field, but it's optional in GPI 'taxon': { 'id': association["subject"]["taxon"]["id"] } } return Entity(gpi_obj) return None
python
def convert_association(self, association: Association) -> Entity: """ 'id' is already `join`ed in both the Association and the Entity, so we don't have to worry about what that looks like. We assume it's correct. """ if "header" not in association or association["header"] == False: # print(json.dumps(association, indent=4)) gpi_obj = { 'id': association["subject"]["id"], 'label': association["subject"]["label"], # db_object_symbol, 'full_name': association["subject"]["fullname"], # db_object_name, 'synonyms': association["subject"]["synonyms"], 'type': association["subject"]["type"], #db_object_type, 'parents': "", # GAF does not have this field, but it's optional in GPI 'xrefs': "", # GAF does not have this field, but it's optional in GPI 'taxon': { 'id': association["subject"]["taxon"]["id"] } } return Entity(gpi_obj) return None
[ "def", "convert_association", "(", "self", ",", "association", ":", "Association", ")", "->", "Entity", ":", "if", "\"header\"", "not", "in", "association", "or", "association", "[", "\"header\"", "]", "==", "False", ":", "# print(json.dumps(association, indent=4))"...
'id' is already `join`ed in both the Association and the Entity, so we don't have to worry about what that looks like. We assume it's correct.
[ "id", "is", "already", "join", "ed", "in", "both", "the", "Association", "and", "the", "Entity", "so", "we", "don", "t", "have", "to", "worry", "about", "what", "that", "looks", "like", ".", "We", "assume", "it", "s", "correct", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/gafgpibridge.py#L21-L43
train
204,860
biolink/ontobio
ontobio/ontol.py
Ontology.get_filtered_graph
def get_filtered_graph(self, relations=None, prefix=None): """ Returns a networkx graph for the whole ontology, for a subset of relations Only implemented for eager methods. Implementation notes: currently this is not cached Arguments --------- - relations : list list of object property IDs, e.g. subClassOf, BFO:0000050. If empty, uses all. - prefix : String if specified, create a subgraph using only classes with this prefix, e.g. ENVO, PATO, GO Return ------ nx.MultiDiGraph A networkx MultiDiGraph object representing the filtered ontology """ # trigger synonym cache self.all_synonyms() self.all_obsoletes() # default method - wrap get_graph srcg = self.get_graph() if prefix is not None: srcg = srcg.subgraph([n for n in srcg.nodes() if n.startswith(prefix+":")]) if relations is None: logger.info("No filtering on "+str(self)) return srcg logger.info("Filtering {} for {}".format(self, relations)) g = nx.MultiDiGraph() # TODO: copy full metadata logger.info("copying nodes") for (n,d) in srcg.nodes(data=True): g.add_node(n, **d) logger.info("copying edges") num_edges = 0 for (x,y,d) in srcg.edges(data=True): if d['pred'] in relations: num_edges += 1 g.add_edge(x,y,**d) logger.info("Filtered edges: {}".format(num_edges)) return g
python
def get_filtered_graph(self, relations=None, prefix=None): """ Returns a networkx graph for the whole ontology, for a subset of relations Only implemented for eager methods. Implementation notes: currently this is not cached Arguments --------- - relations : list list of object property IDs, e.g. subClassOf, BFO:0000050. If empty, uses all. - prefix : String if specified, create a subgraph using only classes with this prefix, e.g. ENVO, PATO, GO Return ------ nx.MultiDiGraph A networkx MultiDiGraph object representing the filtered ontology """ # trigger synonym cache self.all_synonyms() self.all_obsoletes() # default method - wrap get_graph srcg = self.get_graph() if prefix is not None: srcg = srcg.subgraph([n for n in srcg.nodes() if n.startswith(prefix+":")]) if relations is None: logger.info("No filtering on "+str(self)) return srcg logger.info("Filtering {} for {}".format(self, relations)) g = nx.MultiDiGraph() # TODO: copy full metadata logger.info("copying nodes") for (n,d) in srcg.nodes(data=True): g.add_node(n, **d) logger.info("copying edges") num_edges = 0 for (x,y,d) in srcg.edges(data=True): if d['pred'] in relations: num_edges += 1 g.add_edge(x,y,**d) logger.info("Filtered edges: {}".format(num_edges)) return g
[ "def", "get_filtered_graph", "(", "self", ",", "relations", "=", "None", ",", "prefix", "=", "None", ")", ":", "# trigger synonym cache", "self", ".", "all_synonyms", "(", ")", "self", ".", "all_obsoletes", "(", ")", "# default method - wrap get_graph", "srcg", ...
Returns a networkx graph for the whole ontology, for a subset of relations Only implemented for eager methods. Implementation notes: currently this is not cached Arguments --------- - relations : list list of object property IDs, e.g. subClassOf, BFO:0000050. If empty, uses all. - prefix : String if specified, create a subgraph using only classes with this prefix, e.g. ENVO, PATO, GO Return ------ nx.MultiDiGraph A networkx MultiDiGraph object representing the filtered ontology
[ "Returns", "a", "networkx", "graph", "for", "the", "whole", "ontology", "for", "a", "subset", "of", "relations" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L88-L134
train
204,861
biolink/ontobio
ontobio/ontol.py
Ontology.merge
def merge(self, ontologies): """ Merges specified ontology into current ontology """ if self.xref_graph is None: self.xref_graph = nx.MultiGraph() logger.info("Merging source: {} xrefs: {}".format(self, len(self.xref_graph.edges()))) for ont in ontologies: logger.info("Merging {} into {}".format(ont, self)) g = self.get_graph() srcg = ont.get_graph() for n in srcg.nodes(): g.add_node(n, **srcg.node[n]) for (o,s,m) in srcg.edges(data=True): g.add_edge(o,s,**m) if ont.xref_graph is not None: for (o,s,m) in ont.xref_graph.edges(data=True): self.xref_graph.add_edge(o,s,**m)
python
def merge(self, ontologies): """ Merges specified ontology into current ontology """ if self.xref_graph is None: self.xref_graph = nx.MultiGraph() logger.info("Merging source: {} xrefs: {}".format(self, len(self.xref_graph.edges()))) for ont in ontologies: logger.info("Merging {} into {}".format(ont, self)) g = self.get_graph() srcg = ont.get_graph() for n in srcg.nodes(): g.add_node(n, **srcg.node[n]) for (o,s,m) in srcg.edges(data=True): g.add_edge(o,s,**m) if ont.xref_graph is not None: for (o,s,m) in ont.xref_graph.edges(data=True): self.xref_graph.add_edge(o,s,**m)
[ "def", "merge", "(", "self", ",", "ontologies", ")", ":", "if", "self", ".", "xref_graph", "is", "None", ":", "self", ".", "xref_graph", "=", "nx", ".", "MultiGraph", "(", ")", "logger", ".", "info", "(", "\"Merging source: {} xrefs: {}\"", ".", "format", ...
Merges specified ontology into current ontology
[ "Merges", "specified", "ontology", "into", "current", "ontology" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L136-L153
train
204,862
biolink/ontobio
ontobio/ontol.py
Ontology.subontology
def subontology(self, nodes=None, minimal=False, relations=None): """ Return a new ontology that is an extract of this one Arguments --------- - nodes: list list of node IDs to include in subontology. If None, all are used - relations: list list of relation IDs to include in subontology. If None, all are used """ g = None if nodes is not None: g = self.subgraph(nodes) else: g = self.get_graph() if minimal: from ontobio.slimmer import get_minimal_subgraph g = get_minimal_subgraph(g, nodes) ont = Ontology(graph=g, xref_graph=self.xref_graph) # TODO - add metadata if relations is not None: g = ont.get_filtered_graph(relations) ont = Ontology(graph=g, xref_graph=self.xref_graph) return ont
python
def subontology(self, nodes=None, minimal=False, relations=None): """ Return a new ontology that is an extract of this one Arguments --------- - nodes: list list of node IDs to include in subontology. If None, all are used - relations: list list of relation IDs to include in subontology. If None, all are used """ g = None if nodes is not None: g = self.subgraph(nodes) else: g = self.get_graph() if minimal: from ontobio.slimmer import get_minimal_subgraph g = get_minimal_subgraph(g, nodes) ont = Ontology(graph=g, xref_graph=self.xref_graph) # TODO - add metadata if relations is not None: g = ont.get_filtered_graph(relations) ont = Ontology(graph=g, xref_graph=self.xref_graph) return ont
[ "def", "subontology", "(", "self", ",", "nodes", "=", "None", ",", "minimal", "=", "False", ",", "relations", "=", "None", ")", ":", "g", "=", "None", "if", "nodes", "is", "not", "None", ":", "g", "=", "self", ".", "subgraph", "(", "nodes", ")", ...
Return a new ontology that is an extract of this one Arguments --------- - nodes: list list of node IDs to include in subontology. If None, all are used - relations: list list of relation IDs to include in subontology. If None, all are used
[ "Return", "a", "new", "ontology", "that", "is", "an", "extract", "of", "this", "one" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L166-L191
train
204,863
biolink/ontobio
ontobio/ontol.py
Ontology.create_slim_mapping
def create_slim_mapping(self, subset=None, subset_nodes=None, relations=None, disable_checks=False): """ Create a dictionary that maps between all nodes in an ontology to a subset Arguments --------- ont : `Ontology` Complete ontology to be mapped. Assumed pre-filtered for relationship types subset : str Name of subset to map to, e.g. goslim_generic nodes : list If no named subset provided, subset is passed in as list of node ids relations : list List of relations to filter on disable_checks: bool Unless this is set, this will prevent a mapping being generated with non-standard relations. The motivation here is that the ontology graph may include relations that it is inappropriate to propagate gene products over, e.g. transports, has-part Return ------ dict maps all nodes in ont to one or more non-redundant nodes in subset Raises ------ ValueError if the subset is empty """ if subset is not None: subset_nodes = self.extract_subset(subset) logger.info("Extracting subset: {} -> {}".format(subset, subset_nodes)) if subset_nodes is None or len(subset_nodes) == 0: raise ValueError("subset nodes is blank") subset_nodes = set(subset_nodes) logger.debug("SUBSET: {}".format(subset_nodes)) # Use a sub-ontology for mapping subont = self if relations is not None: subont = self.subontology(relations=relations) if not disable_checks: for r in subont.relations_used(): if r != 'subClassOf' and r != 'BFO:0000050' and r != 'subPropertyOf': raise ValueError("Not safe to propagate over a graph with edge type: {}".format(r)) m = {} for n in subont.nodes(): ancs = subont.ancestors(n, reflexive=True) ancs_in_subset = subset_nodes.intersection(ancs) m[n] = list(subont.filter_redundant(ancs_in_subset)) return m
python
def create_slim_mapping(self, subset=None, subset_nodes=None, relations=None, disable_checks=False): """ Create a dictionary that maps between all nodes in an ontology to a subset Arguments --------- ont : `Ontology` Complete ontology to be mapped. Assumed pre-filtered for relationship types subset : str Name of subset to map to, e.g. goslim_generic nodes : list If no named subset provided, subset is passed in as list of node ids relations : list List of relations to filter on disable_checks: bool Unless this is set, this will prevent a mapping being generated with non-standard relations. The motivation here is that the ontology graph may include relations that it is inappropriate to propagate gene products over, e.g. transports, has-part Return ------ dict maps all nodes in ont to one or more non-redundant nodes in subset Raises ------ ValueError if the subset is empty """ if subset is not None: subset_nodes = self.extract_subset(subset) logger.info("Extracting subset: {} -> {}".format(subset, subset_nodes)) if subset_nodes is None or len(subset_nodes) == 0: raise ValueError("subset nodes is blank") subset_nodes = set(subset_nodes) logger.debug("SUBSET: {}".format(subset_nodes)) # Use a sub-ontology for mapping subont = self if relations is not None: subont = self.subontology(relations=relations) if not disable_checks: for r in subont.relations_used(): if r != 'subClassOf' and r != 'BFO:0000050' and r != 'subPropertyOf': raise ValueError("Not safe to propagate over a graph with edge type: {}".format(r)) m = {} for n in subont.nodes(): ancs = subont.ancestors(n, reflexive=True) ancs_in_subset = subset_nodes.intersection(ancs) m[n] = list(subont.filter_redundant(ancs_in_subset)) return m
[ "def", "create_slim_mapping", "(", "self", ",", "subset", "=", "None", ",", "subset_nodes", "=", "None", ",", "relations", "=", "None", ",", "disable_checks", "=", "False", ")", ":", "if", "subset", "is", "not", "None", ":", "subset_nodes", "=", "self", ...
Create a dictionary that maps between all nodes in an ontology to a subset Arguments --------- ont : `Ontology` Complete ontology to be mapped. Assumed pre-filtered for relationship types subset : str Name of subset to map to, e.g. goslim_generic nodes : list If no named subset provided, subset is passed in as list of node ids relations : list List of relations to filter on disable_checks: bool Unless this is set, this will prevent a mapping being generated with non-standard relations. The motivation here is that the ontology graph may include relations that it is inappropriate to propagate gene products over, e.g. transports, has-part Return ------ dict maps all nodes in ont to one or more non-redundant nodes in subset Raises ------ ValueError if the subset is empty
[ "Create", "a", "dictionary", "that", "maps", "between", "all", "nodes", "in", "an", "ontology", "to", "a", "subset" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L193-L246
train
204,864
biolink/ontobio
ontobio/ontol.py
Ontology.filter_redundant
def filter_redundant(self, ids): """ Return all non-redundant ids from a list """ sids = set(ids) for id in ids: sids = sids.difference(self.ancestors(id, reflexive=False)) return sids
python
def filter_redundant(self, ids): """ Return all non-redundant ids from a list """ sids = set(ids) for id in ids: sids = sids.difference(self.ancestors(id, reflexive=False)) return sids
[ "def", "filter_redundant", "(", "self", ",", "ids", ")", ":", "sids", "=", "set", "(", "ids", ")", "for", "id", "in", "ids", ":", "sids", "=", "sids", ".", "difference", "(", "self", ".", "ancestors", "(", "id", ",", "reflexive", "=", "False", ")",...
Return all non-redundant ids from a list
[ "Return", "all", "non", "-", "redundant", "ids", "from", "a", "list" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L248-L255
train
204,865
biolink/ontobio
ontobio/ontol.py
Ontology.extract_subset
def extract_subset(self, subset, contract=True): """ Return all nodes in a subset. We assume the oboInOwl encoding of subsets, and subset IDs are IRIs, or IR fragments """ return [n for n in self.nodes() if subset in self.subsets(n, contract=contract)]
python
def extract_subset(self, subset, contract=True): """ Return all nodes in a subset. We assume the oboInOwl encoding of subsets, and subset IDs are IRIs, or IR fragments """ return [n for n in self.nodes() if subset in self.subsets(n, contract=contract)]
[ "def", "extract_subset", "(", "self", ",", "subset", ",", "contract", "=", "True", ")", ":", "return", "[", "n", "for", "n", "in", "self", ".", "nodes", "(", ")", "if", "subset", "in", "self", ".", "subsets", "(", "n", ",", "contract", "=", "contra...
Return all nodes in a subset. We assume the oboInOwl encoding of subsets, and subset IDs are IRIs, or IR fragments
[ "Return", "all", "nodes", "in", "a", "subset", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L257-L263
train
204,866
biolink/ontobio
ontobio/ontol.py
Ontology.subsets
def subsets(self, nid, contract=True): """ Retrieves subset ids for a class or ontology object """ n = self.node(nid) subsets = [] meta = self._meta(nid) if 'subsets' in meta: subsets = meta['subsets'] else: subsets = [] if contract: subsets = [self._contract_subset(s) for s in subsets] return subsets
python
def subsets(self, nid, contract=True): """ Retrieves subset ids for a class or ontology object """ n = self.node(nid) subsets = [] meta = self._meta(nid) if 'subsets' in meta: subsets = meta['subsets'] else: subsets = [] if contract: subsets = [self._contract_subset(s) for s in subsets] return subsets
[ "def", "subsets", "(", "self", ",", "nid", ",", "contract", "=", "True", ")", ":", "n", "=", "self", ".", "node", "(", "nid", ")", "subsets", "=", "[", "]", "meta", "=", "self", ".", "_meta", "(", "nid", ")", "if", "'subsets'", "in", "meta", ":...
Retrieves subset ids for a class or ontology object
[ "Retrieves", "subset", "ids", "for", "a", "class", "or", "ontology", "object" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L265-L278
train
204,867
biolink/ontobio
ontobio/ontol.py
Ontology.prefixes
def prefixes(self): """ list all prefixes used """ pset = set() for n in self.nodes(): pfx = self.prefix(n) if pfx is not None: pset.add(pfx) return list(pset)
python
def prefixes(self): """ list all prefixes used """ pset = set() for n in self.nodes(): pfx = self.prefix(n) if pfx is not None: pset.add(pfx) return list(pset)
[ "def", "prefixes", "(", "self", ")", ":", "pset", "=", "set", "(", ")", "for", "n", "in", "self", ".", "nodes", "(", ")", ":", "pfx", "=", "self", ".", "prefix", "(", "n", ")", "if", "pfx", "is", "not", "None", ":", "pset", ".", "add", "(", ...
list all prefixes used
[ "list", "all", "prefixes", "used" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L294-L303
train
204,868
biolink/ontobio
ontobio/ontol.py
Ontology.relations_used
def relations_used(self): """ Return list of all relations used to connect edges """ g = self.get_graph() types = set() for (x,y,d) in g.edges(data=True): types.add(d['pred']) return list(types)
python
def relations_used(self): """ Return list of all relations used to connect edges """ g = self.get_graph() types = set() for (x,y,d) in g.edges(data=True): types.add(d['pred']) return list(types)
[ "def", "relations_used", "(", "self", ")", ":", "g", "=", "self", ".", "get_graph", "(", ")", "types", "=", "set", "(", ")", "for", "(", "x", ",", "y", ",", "d", ")", "in", "g", ".", "edges", "(", "data", "=", "True", ")", ":", "types", ".", ...
Return list of all relations used to connect edges
[ "Return", "list", "of", "all", "relations", "used", "to", "connect", "edges" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L362-L370
train
204,869
biolink/ontobio
ontobio/ontol.py
Ontology.child_parent_relations
def child_parent_relations(self, subj, obj, graph=None): """ Get all relationship type ids between a subject and a parent. Typically only one relation ID returned, but in some cases there may be more than one Arguments --------- subj: string Child (subject) id obj: string Parent (object) id Returns ------- list """ if graph is None: graph = self.get_graph() preds = set() for _,ea in graph[obj][subj].items(): preds.add(ea['pred']) logger.debug('{}->{} = {}'.format(subj,obj,preds)) return preds
python
def child_parent_relations(self, subj, obj, graph=None): """ Get all relationship type ids between a subject and a parent. Typically only one relation ID returned, but in some cases there may be more than one Arguments --------- subj: string Child (subject) id obj: string Parent (object) id Returns ------- list """ if graph is None: graph = self.get_graph() preds = set() for _,ea in graph[obj][subj].items(): preds.add(ea['pred']) logger.debug('{}->{} = {}'.format(subj,obj,preds)) return preds
[ "def", "child_parent_relations", "(", "self", ",", "subj", ",", "obj", ",", "graph", "=", "None", ")", ":", "if", "graph", "is", "None", ":", "graph", "=", "self", ".", "get_graph", "(", ")", "preds", "=", "set", "(", ")", "for", "_", ",", "ea", ...
Get all relationship type ids between a subject and a parent. Typically only one relation ID returned, but in some cases there may be more than one Arguments --------- subj: string Child (subject) id obj: string Parent (object) id Returns ------- list
[ "Get", "all", "relationship", "type", "ids", "between", "a", "subject", "and", "a", "parent", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L375-L398
train
204,870
biolink/ontobio
ontobio/ontol.py
Ontology.parents
def parents(self, node, relations=None): """ Return all direct parents of specified node. Wraps networkx by default. Arguments --------- node: string identifier for node in ontology relations: list of strings list of relation (object property) IDs used to filter """ g = self.get_graph() if node in g: parents = list(g.predecessors(node)) if relations is None: return parents else: rset = set(relations) return [p for p in parents if len(self.child_parent_relations(node, p, graph=g).intersection(rset)) > 0 ] else: return []
python
def parents(self, node, relations=None): """ Return all direct parents of specified node. Wraps networkx by default. Arguments --------- node: string identifier for node in ontology relations: list of strings list of relation (object property) IDs used to filter """ g = self.get_graph() if node in g: parents = list(g.predecessors(node)) if relations is None: return parents else: rset = set(relations) return [p for p in parents if len(self.child_parent_relations(node, p, graph=g).intersection(rset)) > 0 ] else: return []
[ "def", "parents", "(", "self", ",", "node", ",", "relations", "=", "None", ")", ":", "g", "=", "self", ".", "get_graph", "(", ")", "if", "node", "in", "g", ":", "parents", "=", "list", "(", "g", ".", "predecessors", "(", "node", ")", ")", "if", ...
Return all direct parents of specified node. Wraps networkx by default. Arguments --------- node: string identifier for node in ontology relations: list of strings list of relation (object property) IDs used to filter
[ "Return", "all", "direct", "parents", "of", "specified", "node", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L400-L423
train
204,871
biolink/ontobio
ontobio/ontol.py
Ontology.children
def children(self, node, relations=None): """ Return all direct children of specified node. Wraps networkx by default. Arguments --------- node: string identifier for node in ontology relations: list of strings list of relation (object property) IDs used to filter """ g = self.get_graph() if node in g: children = list(g.successors(node)) if relations is None: return children else: rset = set(relations) return [c for c in children if len(self.child_parent_relations(c, node, graph=g).intersection(rset)) > 0 ] else: return []
python
def children(self, node, relations=None): """ Return all direct children of specified node. Wraps networkx by default. Arguments --------- node: string identifier for node in ontology relations: list of strings list of relation (object property) IDs used to filter """ g = self.get_graph() if node in g: children = list(g.successors(node)) if relations is None: return children else: rset = set(relations) return [c for c in children if len(self.child_parent_relations(c, node, graph=g).intersection(rset)) > 0 ] else: return []
[ "def", "children", "(", "self", ",", "node", ",", "relations", "=", "None", ")", ":", "g", "=", "self", ".", "get_graph", "(", ")", "if", "node", "in", "g", ":", "children", "=", "list", "(", "g", ".", "successors", "(", "node", ")", ")", "if", ...
Return all direct children of specified node. Wraps networkx by default. Arguments --------- node: string identifier for node in ontology relations: list of strings list of relation (object property) IDs used to filter
[ "Return", "all", "direct", "children", "of", "specified", "node", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L425-L452
train
204,872
biolink/ontobio
ontobio/ontol.py
Ontology.ancestors
def ancestors(self, node, relations=None, reflexive=False): """Return all ancestors of specified node. The default implementation is to use networkx, but some implementations of the Ontology class may use a database or service backed implementation, for large graphs. Arguments --------- node : str identifier for node in ontology reflexive : bool if true, return query node in graph relations : list relation (object property) IDs used to filter Returns ------- list[str] ancestor node IDs """ if reflexive: ancs = self.ancestors(node, relations, reflexive=False) ancs.append(node) return ancs g = None if relations is None: g = self.get_graph() else: g = self.get_filtered_graph(relations) if node in g: return list(nx.ancestors(g, node)) else: return []
python
def ancestors(self, node, relations=None, reflexive=False): """Return all ancestors of specified node. The default implementation is to use networkx, but some implementations of the Ontology class may use a database or service backed implementation, for large graphs. Arguments --------- node : str identifier for node in ontology reflexive : bool if true, return query node in graph relations : list relation (object property) IDs used to filter Returns ------- list[str] ancestor node IDs """ if reflexive: ancs = self.ancestors(node, relations, reflexive=False) ancs.append(node) return ancs g = None if relations is None: g = self.get_graph() else: g = self.get_filtered_graph(relations) if node in g: return list(nx.ancestors(g, node)) else: return []
[ "def", "ancestors", "(", "self", ",", "node", ",", "relations", "=", "None", ",", "reflexive", "=", "False", ")", ":", "if", "reflexive", ":", "ancs", "=", "self", ".", "ancestors", "(", "node", ",", "relations", ",", "reflexive", "=", "False", ")", ...
Return all ancestors of specified node. The default implementation is to use networkx, but some implementations of the Ontology class may use a database or service backed implementation, for large graphs. Arguments --------- node : str identifier for node in ontology reflexive : bool if true, return query node in graph relations : list relation (object property) IDs used to filter Returns ------- list[str] ancestor node IDs
[ "Return", "all", "ancestors", "of", "specified", "node", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L454-L489
train
204,873
biolink/ontobio
ontobio/ontol.py
Ontology.descendants
def descendants(self, node, relations=None, reflexive=False): """ Returns all descendants of specified node. The default implementation is to use networkx, but some implementations of the Ontology class may use a database or service backed implementation, for large graphs. Arguments --------- node : str identifier for node in ontology reflexive : bool if true, return query node in graph relations : list relation (object property) IDs used to filter Returns ------- list[str] descendant node IDs """ if reflexive: decs = self.descendants(node, relations, reflexive=False) decs.append(node) return decs g = None if relations is None: g = self.get_graph() else: g = self.get_filtered_graph(relations) if node in g: return list(nx.descendants(g, node)) else: return []
python
def descendants(self, node, relations=None, reflexive=False): """ Returns all descendants of specified node. The default implementation is to use networkx, but some implementations of the Ontology class may use a database or service backed implementation, for large graphs. Arguments --------- node : str identifier for node in ontology reflexive : bool if true, return query node in graph relations : list relation (object property) IDs used to filter Returns ------- list[str] descendant node IDs """ if reflexive: decs = self.descendants(node, relations, reflexive=False) decs.append(node) return decs g = None if relations is None: g = self.get_graph() else: g = self.get_filtered_graph(relations) if node in g: return list(nx.descendants(g, node)) else: return []
[ "def", "descendants", "(", "self", ",", "node", ",", "relations", "=", "None", ",", "reflexive", "=", "False", ")", ":", "if", "reflexive", ":", "decs", "=", "self", ".", "descendants", "(", "node", ",", "relations", ",", "reflexive", "=", "False", ")"...
Returns all descendants of specified node. The default implementation is to use networkx, but some implementations of the Ontology class may use a database or service backed implementation, for large graphs. Arguments --------- node : str identifier for node in ontology reflexive : bool if true, return query node in graph relations : list relation (object property) IDs used to filter Returns ------- list[str] descendant node IDs
[ "Returns", "all", "descendants", "of", "specified", "node", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L491-L526
train
204,874
biolink/ontobio
ontobio/ontol.py
Ontology.get_roots
def get_roots(self, relations=None, prefix=None): """ Get all nodes that lack parents Arguments --------- relations : list[str] list of relations used to filter prefix : str E.g. GO. Exclude nodes that lack this prefix when testing parentage """ g = self.get_filtered_graph(relations=relations, prefix=prefix) # note: we also eliminate any singletons, which includes obsolete classes roots = [n for n in g.nodes() if len(list(g.predecessors(n))) == 0 and len(list(g.successors(n))) > 0] return roots
python
def get_roots(self, relations=None, prefix=None): """ Get all nodes that lack parents Arguments --------- relations : list[str] list of relations used to filter prefix : str E.g. GO. Exclude nodes that lack this prefix when testing parentage """ g = self.get_filtered_graph(relations=relations, prefix=prefix) # note: we also eliminate any singletons, which includes obsolete classes roots = [n for n in g.nodes() if len(list(g.predecessors(n))) == 0 and len(list(g.successors(n))) > 0] return roots
[ "def", "get_roots", "(", "self", ",", "relations", "=", "None", ",", "prefix", "=", "None", ")", ":", "g", "=", "self", ".", "get_filtered_graph", "(", "relations", "=", "relations", ",", "prefix", "=", "prefix", ")", "# note: we also eliminate any singletons,...
Get all nodes that lack parents Arguments --------- relations : list[str] list of relations used to filter prefix : str E.g. GO. Exclude nodes that lack this prefix when testing parentage
[ "Get", "all", "nodes", "that", "lack", "parents" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L574-L588
train
204,875
biolink/ontobio
ontobio/ontol.py
Ontology.get_level
def get_level(self, level, relations=None, **args): """ Get all nodes at a particular level Arguments --------- relations : list[str] list of relations used to filter """ g = self.get_filtered_graph(relations) nodes = self.get_roots(relations=relations, **args) for i in range(level): logger.info(" ITERATING TO LEVEL: {} NODES: {}".format(i, nodes)) nodes = [c for n in nodes for c in g.successors(n)] logger.info(" FINAL: {}".format(nodes)) return nodes
python
def get_level(self, level, relations=None, **args): """ Get all nodes at a particular level Arguments --------- relations : list[str] list of relations used to filter """ g = self.get_filtered_graph(relations) nodes = self.get_roots(relations=relations, **args) for i in range(level): logger.info(" ITERATING TO LEVEL: {} NODES: {}".format(i, nodes)) nodes = [c for n in nodes for c in g.successors(n)] logger.info(" FINAL: {}".format(nodes)) return nodes
[ "def", "get_level", "(", "self", ",", "level", ",", "relations", "=", "None", ",", "*", "*", "args", ")", ":", "g", "=", "self", ".", "get_filtered_graph", "(", "relations", ")", "nodes", "=", "self", ".", "get_roots", "(", "relations", "=", "relations...
Get all nodes at a particular level Arguments --------- relations : list[str] list of relations used to filter
[ "Get", "all", "nodes", "at", "a", "particular", "level" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L590-L606
train
204,876
biolink/ontobio
ontobio/ontol.py
Ontology.parent_index
def parent_index(self, relations=None): """ Returns a mapping of nodes to all direct parents Arguments --------- relations : list[str] list of relations used to filter Returns: list list of lists [[CLASS_1, PARENT_1,1, ..., PARENT_1,N], [CLASS_2, PARENT_2,1, PARENT_2,2, ... ] ... ] """ g = None if relations is None: g = self.get_graph() else: g = self.get_filtered_graph(relations) l = [] for n in g: l.append([n] + list(g.predecessors(n))) return l
python
def parent_index(self, relations=None): """ Returns a mapping of nodes to all direct parents Arguments --------- relations : list[str] list of relations used to filter Returns: list list of lists [[CLASS_1, PARENT_1,1, ..., PARENT_1,N], [CLASS_2, PARENT_2,1, PARENT_2,2, ... ] ... ] """ g = None if relations is None: g = self.get_graph() else: g = self.get_filtered_graph(relations) l = [] for n in g: l.append([n] + list(g.predecessors(n))) return l
[ "def", "parent_index", "(", "self", ",", "relations", "=", "None", ")", ":", "g", "=", "None", "if", "relations", "is", "None", ":", "g", "=", "self", ".", "get_graph", "(", ")", "else", ":", "g", "=", "self", ".", "get_filtered_graph", "(", "relatio...
Returns a mapping of nodes to all direct parents Arguments --------- relations : list[str] list of relations used to filter Returns: list list of lists [[CLASS_1, PARENT_1,1, ..., PARENT_1,N], [CLASS_2, PARENT_2,1, PARENT_2,2, ... ] ... ]
[ "Returns", "a", "mapping", "of", "nodes", "to", "all", "direct", "parents" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L608-L629
train
204,877
biolink/ontobio
ontobio/ontol.py
Ontology.text_definition
def text_definition(self, nid): """ Retrieves logical definitions for a class or relation id Arguments --------- nid : str Node identifier for entity to be queried Returns ------- TextDefinition """ tdefs = [] meta = self._meta(nid) if 'definition' in meta: obj = meta['definition'] return TextDefinition(nid, **obj) else: return None
python
def text_definition(self, nid): """ Retrieves logical definitions for a class or relation id Arguments --------- nid : str Node identifier for entity to be queried Returns ------- TextDefinition """ tdefs = [] meta = self._meta(nid) if 'definition' in meta: obj = meta['definition'] return TextDefinition(nid, **obj) else: return None
[ "def", "text_definition", "(", "self", ",", "nid", ")", ":", "tdefs", "=", "[", "]", "meta", "=", "self", ".", "_meta", "(", "nid", ")", "if", "'definition'", "in", "meta", ":", "obj", "=", "meta", "[", "'definition'", "]", "return", "TextDefinition", ...
Retrieves logical definitions for a class or relation id Arguments --------- nid : str Node identifier for entity to be queried Returns ------- TextDefinition
[ "Retrieves", "logical", "definitions", "for", "a", "class", "or", "relation", "id" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L631-L650
train
204,878
biolink/ontobio
ontobio/ontol.py
Ontology.logical_definitions
def logical_definitions(self, nid): """ Retrieves logical definitions for a class id Arguments --------- nid : str Node identifier for entity to be queried Returns ------- LogicalDefinition """ ldefs = self.all_logical_definitions if ldefs is not None: #print("TESTING: {} AGAINST LD: {}".format(nid, str(ldefs))) return [x for x in ldefs if x.class_id == nid] else: return []
python
def logical_definitions(self, nid): """ Retrieves logical definitions for a class id Arguments --------- nid : str Node identifier for entity to be queried Returns ------- LogicalDefinition """ ldefs = self.all_logical_definitions if ldefs is not None: #print("TESTING: {} AGAINST LD: {}".format(nid, str(ldefs))) return [x for x in ldefs if x.class_id == nid] else: return []
[ "def", "logical_definitions", "(", "self", ",", "nid", ")", ":", "ldefs", "=", "self", ".", "all_logical_definitions", "if", "ldefs", "is", "not", "None", ":", "#print(\"TESTING: {} AGAINST LD: {}\".format(nid, str(ldefs)))", "return", "[", "x", "for", "x", "in", ...
Retrieves logical definitions for a class id Arguments --------- nid : str Node identifier for entity to be queried Returns ------- LogicalDefinition
[ "Retrieves", "logical", "definitions", "for", "a", "class", "id" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L653-L671
train
204,879
biolink/ontobio
ontobio/ontol.py
Ontology.get_property_chain_axioms
def get_property_chain_axioms(self, nid): """ Retrieves property chain axioms for a class id Arguments --------- nid : str Node identifier for relation to be queried Returns ------- PropertyChainAxiom """ pcas = self.all_property_chain_axioms if pcas is not None: return [x for x in pcas if x.predicate_id == nid] else: return []
python
def get_property_chain_axioms(self, nid): """ Retrieves property chain axioms for a class id Arguments --------- nid : str Node identifier for relation to be queried Returns ------- PropertyChainAxiom """ pcas = self.all_property_chain_axioms if pcas is not None: return [x for x in pcas if x.predicate_id == nid] else: return []
[ "def", "get_property_chain_axioms", "(", "self", ",", "nid", ")", ":", "pcas", "=", "self", ".", "all_property_chain_axioms", "if", "pcas", "is", "not", "None", ":", "return", "[", "x", "for", "x", "in", "pcas", "if", "x", ".", "predicate_id", "==", "nid...
Retrieves property chain axioms for a class id Arguments --------- nid : str Node identifier for relation to be queried Returns ------- PropertyChainAxiom
[ "Retrieves", "property", "chain", "axioms", "for", "a", "class", "id" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L673-L690
train
204,880
biolink/ontobio
ontobio/ontol.py
Ontology.synonyms
def synonyms(self, nid, include_label=False): """ Retrieves synonym objects for a class Arguments --------- nid : str Node identifier for entity to be queried include_label : bool If True, include label/names as Synonym objects Returns ------- list[Synonym] :class:`Synonym` objects """ n = self.node(nid) syns = [] if 'meta' in n: meta = n['meta'] if 'synonyms' in meta: for obj in meta['synonyms']: syns.append(Synonym(nid, **obj)) if include_label: syns.append(Synonym(nid, val=self.label(nid), pred='label')) return syns
python
def synonyms(self, nid, include_label=False): """ Retrieves synonym objects for a class Arguments --------- nid : str Node identifier for entity to be queried include_label : bool If True, include label/names as Synonym objects Returns ------- list[Synonym] :class:`Synonym` objects """ n = self.node(nid) syns = [] if 'meta' in n: meta = n['meta'] if 'synonyms' in meta: for obj in meta['synonyms']: syns.append(Synonym(nid, **obj)) if include_label: syns.append(Synonym(nid, val=self.label(nid), pred='label')) return syns
[ "def", "synonyms", "(", "self", ",", "nid", ",", "include_label", "=", "False", ")", ":", "n", "=", "self", ".", "node", "(", "nid", ")", "syns", "=", "[", "]", "if", "'meta'", "in", "n", ":", "meta", "=", "n", "[", "'meta'", "]", "if", "'synon...
Retrieves synonym objects for a class Arguments --------- nid : str Node identifier for entity to be queried include_label : bool If True, include label/names as Synonym objects Returns ------- list[Synonym] :class:`Synonym` objects
[ "Retrieves", "synonym", "objects", "for", "a", "class" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L761-L786
train
204,881
biolink/ontobio
ontobio/ontol.py
Ontology.add_node
def add_node(self, id, label=None, type='CLASS', meta=None): """ Add a new node to the ontology """ g = self.get_graph() if meta is None: meta={} g.add_node(id, label=label, type=type, meta=meta)
python
def add_node(self, id, label=None, type='CLASS', meta=None): """ Add a new node to the ontology """ g = self.get_graph() if meta is None: meta={} g.add_node(id, label=label, type=type, meta=meta)
[ "def", "add_node", "(", "self", ",", "id", ",", "label", "=", "None", ",", "type", "=", "'CLASS'", ",", "meta", "=", "None", ")", ":", "g", "=", "self", ".", "get_graph", "(", ")", "if", "meta", "is", "None", ":", "meta", "=", "{", "}", "g", ...
Add a new node to the ontology
[ "Add", "a", "new", "node", "to", "the", "ontology" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L788-L795
train
204,882
biolink/ontobio
ontobio/ontol.py
Ontology.inline_xref_graph
def inline_xref_graph(self): """ Copy contents of xref_graph to inlined meta object for each node """ xg = self.xref_graph for n in self.nodes(): if n in xg: self._add_meta_element(n, 'xrefs', [{'val':x} for x in xg.neighbors(n)])
python
def inline_xref_graph(self): """ Copy contents of xref_graph to inlined meta object for each node """ xg = self.xref_graph for n in self.nodes(): if n in xg: self._add_meta_element(n, 'xrefs', [{'val':x} for x in xg.neighbors(n)])
[ "def", "inline_xref_graph", "(", "self", ")", ":", "xg", "=", "self", ".", "xref_graph", "for", "n", "in", "self", ".", "nodes", "(", ")", ":", "if", "n", "in", "xg", ":", "self", ".", "_add_meta_element", "(", "n", ",", "'xrefs'", ",", "[", "{", ...
Copy contents of xref_graph to inlined meta object for each node
[ "Copy", "contents", "of", "xref_graph", "to", "inlined", "meta", "object", "for", "each", "node" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L816-L823
train
204,883
biolink/ontobio
ontobio/ontol.py
Ontology.add_parent
def add_parent(self, id, pid, relation='subClassOf'): """ Add a new edge to the ontology """ g = self.get_graph() g.add_edge(pid, id, pred=relation)
python
def add_parent(self, id, pid, relation='subClassOf'): """ Add a new edge to the ontology """ g = self.get_graph() g.add_edge(pid, id, pred=relation)
[ "def", "add_parent", "(", "self", ",", "id", ",", "pid", ",", "relation", "=", "'subClassOf'", ")", ":", "g", "=", "self", ".", "get_graph", "(", ")", "g", ".", "add_edge", "(", "pid", ",", "id", ",", "pred", "=", "relation", ")" ]
Add a new edge to the ontology
[ "Add", "a", "new", "edge", "to", "the", "ontology" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L825-L830
train
204,884
biolink/ontobio
ontobio/ontol.py
Ontology.add_xref
def add_xref(self, id, xref): """ Adds an xref to the xref graph """ # note: does not update meta object if self.xref_graph is None: self.xref_graph = nx.MultiGraph() self.xref_graph.add_edge(xref, id)
python
def add_xref(self, id, xref): """ Adds an xref to the xref graph """ # note: does not update meta object if self.xref_graph is None: self.xref_graph = nx.MultiGraph() self.xref_graph.add_edge(xref, id)
[ "def", "add_xref", "(", "self", ",", "id", ",", "xref", ")", ":", "# note: does not update meta object", "if", "self", ".", "xref_graph", "is", "None", ":", "self", ".", "xref_graph", "=", "nx", ".", "MultiGraph", "(", ")", "self", ".", "xref_graph", ".", ...
Adds an xref to the xref graph
[ "Adds", "an", "xref", "to", "the", "xref", "graph" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L832-L839
train
204,885
biolink/ontobio
ontobio/ontol.py
Ontology.add_synonym
def add_synonym(self, syn): """ Adds a synonym for a node """ n = self.node(syn.class_id) if 'meta' not in n: n['meta'] = {} meta = n['meta'] if 'synonyms' not in meta: meta['synonyms'] = [] meta['synonyms'].append(syn.as_dict())
python
def add_synonym(self, syn): """ Adds a synonym for a node """ n = self.node(syn.class_id) if 'meta' not in n: n['meta'] = {} meta = n['meta'] if 'synonyms' not in meta: meta['synonyms'] = [] meta['synonyms'].append(syn.as_dict())
[ "def", "add_synonym", "(", "self", ",", "syn", ")", ":", "n", "=", "self", ".", "node", "(", "syn", ".", "class_id", ")", "if", "'meta'", "not", "in", "n", ":", "n", "[", "'meta'", "]", "=", "{", "}", "meta", "=", "n", "[", "'meta'", "]", "if...
Adds a synonym for a node
[ "Adds", "a", "synonym", "for", "a", "node" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L841-L851
train
204,886
biolink/ontobio
ontobio/ontol.py
Ontology.add_to_subset
def add_to_subset(self, id, s): """ Adds a node to a subset """ n = self.node(id) if 'meta' not in n: n['meta'] = {} meta = n['meta'] if 'subsets' not in meta: meta['subsets'] = [] meta['subsets'].append(s)
python
def add_to_subset(self, id, s): """ Adds a node to a subset """ n = self.node(id) if 'meta' not in n: n['meta'] = {} meta = n['meta'] if 'subsets' not in meta: meta['subsets'] = [] meta['subsets'].append(s)
[ "def", "add_to_subset", "(", "self", ",", "id", ",", "s", ")", ":", "n", "=", "self", ".", "node", "(", "id", ")", "if", "'meta'", "not", "in", "n", ":", "n", "[", "'meta'", "]", "=", "{", "}", "meta", "=", "n", "[", "'meta'", "]", "if", "'...
Adds a node to a subset
[ "Adds", "a", "node", "to", "a", "subset" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L853-L863
train
204,887
biolink/ontobio
ontobio/ontol.py
Ontology.all_synonyms
def all_synonyms(self, include_label=False): """ Retrieves all synonyms Arguments --------- include_label : bool If True, include label/names as Synonym objects Returns ------- list[Synonym] :class:`Synonym` objects """ syns = [] for n in self.nodes(): syns = syns + self.synonyms(n, include_label=include_label) return syns
python
def all_synonyms(self, include_label=False): """ Retrieves all synonyms Arguments --------- include_label : bool If True, include label/names as Synonym objects Returns ------- list[Synonym] :class:`Synonym` objects """ syns = [] for n in self.nodes(): syns = syns + self.synonyms(n, include_label=include_label) return syns
[ "def", "all_synonyms", "(", "self", ",", "include_label", "=", "False", ")", ":", "syns", "=", "[", "]", "for", "n", "in", "self", ".", "nodes", "(", ")", ":", "syns", "=", "syns", "+", "self", ".", "synonyms", "(", "n", ",", "include_label", "=", ...
Retrieves all synonyms Arguments --------- include_label : bool If True, include label/names as Synonym objects Returns ------- list[Synonym] :class:`Synonym` objects
[ "Retrieves", "all", "synonyms" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L865-L882
train
204,888
biolink/ontobio
ontobio/ontol.py
Ontology.label
def label(self, nid, id_if_null=False): """ Fetches label for a node Arguments --------- nid : str Node identifier for entity to be queried id_if_null : bool If True and node has no label return id as label Return ------ str """ g = self.get_graph() if nid in g: n = g.node[nid] if 'label' in n: return n['label'] else: if id_if_null: return nid else: return None else: if id_if_null: return nid else: return None
python
def label(self, nid, id_if_null=False): """ Fetches label for a node Arguments --------- nid : str Node identifier for entity to be queried id_if_null : bool If True and node has no label return id as label Return ------ str """ g = self.get_graph() if nid in g: n = g.node[nid] if 'label' in n: return n['label'] else: if id_if_null: return nid else: return None else: if id_if_null: return nid else: return None
[ "def", "label", "(", "self", ",", "nid", ",", "id_if_null", "=", "False", ")", ":", "g", "=", "self", ".", "get_graph", "(", ")", "if", "nid", "in", "g", ":", "n", "=", "g", ".", "node", "[", "nid", "]", "if", "'label'", "in", "n", ":", "retu...
Fetches label for a node Arguments --------- nid : str Node identifier for entity to be queried id_if_null : bool If True and node has no label return id as label Return ------ str
[ "Fetches", "label", "for", "a", "node" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L890-L919
train
204,889
biolink/ontobio
ontobio/ontol.py
Ontology.xrefs
def xrefs(self, nid, bidirectional=False): """ Fetches xrefs for a node Arguments --------- nid : str Node identifier for entity to be queried bidirection : bool If True, include nodes xreffed to nid Return ------ list[str] """ if self.xref_graph is not None: xg = self.xref_graph if nid not in xg: return [] if bidirectional: return list(xg.neighbors(nid)) else: return [x for x in xg.neighbors(nid) if xg[nid][x][0]['source'] == nid] return []
python
def xrefs(self, nid, bidirectional=False): """ Fetches xrefs for a node Arguments --------- nid : str Node identifier for entity to be queried bidirection : bool If True, include nodes xreffed to nid Return ------ list[str] """ if self.xref_graph is not None: xg = self.xref_graph if nid not in xg: return [] if bidirectional: return list(xg.neighbors(nid)) else: return [x for x in xg.neighbors(nid) if xg[nid][x][0]['source'] == nid] return []
[ "def", "xrefs", "(", "self", ",", "nid", ",", "bidirectional", "=", "False", ")", ":", "if", "self", ".", "xref_graph", "is", "not", "None", ":", "xg", "=", "self", ".", "xref_graph", "if", "nid", "not", "in", "xg", ":", "return", "[", "]", "if", ...
Fetches xrefs for a node Arguments --------- nid : str Node identifier for entity to be queried bidirection : bool If True, include nodes xreffed to nid Return ------ list[str]
[ "Fetches", "xrefs", "for", "a", "node" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L921-L945
train
204,890
biolink/ontobio
ontobio/ontol.py
Ontology.resolve_names
def resolve_names(self, names, synonyms=False, **args): """ returns a list of identifiers based on an input list of labels and identifiers. Arguments --------- names: list search terms. '%' treated as wildcard synonyms: bool if true, search on synonyms in addition to labels is_regex : bool if true, treats each name as a regular expression is_partial_match : bool if true, treats each name as a regular expression .*name.* """ g = self.get_graph() r_ids = [] for n in names: logger.debug("Searching for {} syns={}".format(n,synonyms)) if len(n.split(":")) == 2: r_ids.append(n) else: matches = set([nid for nid in g.nodes() if self._is_match(self.label(nid), n, **args)]) if synonyms: logger.debug("Searching syns for {}".format(names)) for nid in g.nodes(): for s in self.synonyms(nid): if self._is_match(s.val, n, **args): matches.add(nid) r_ids += list(matches) return r_ids
python
def resolve_names(self, names, synonyms=False, **args): """ returns a list of identifiers based on an input list of labels and identifiers. Arguments --------- names: list search terms. '%' treated as wildcard synonyms: bool if true, search on synonyms in addition to labels is_regex : bool if true, treats each name as a regular expression is_partial_match : bool if true, treats each name as a regular expression .*name.* """ g = self.get_graph() r_ids = [] for n in names: logger.debug("Searching for {} syns={}".format(n,synonyms)) if len(n.split(":")) == 2: r_ids.append(n) else: matches = set([nid for nid in g.nodes() if self._is_match(self.label(nid), n, **args)]) if synonyms: logger.debug("Searching syns for {}".format(names)) for nid in g.nodes(): for s in self.synonyms(nid): if self._is_match(s.val, n, **args): matches.add(nid) r_ids += list(matches) return r_ids
[ "def", "resolve_names", "(", "self", ",", "names", ",", "synonyms", "=", "False", ",", "*", "*", "args", ")", ":", "g", "=", "self", ".", "get_graph", "(", ")", "r_ids", "=", "[", "]", "for", "n", "in", "names", ":", "logger", ".", "debug", "(", ...
returns a list of identifiers based on an input list of labels and identifiers. Arguments --------- names: list search terms. '%' treated as wildcard synonyms: bool if true, search on synonyms in addition to labels is_regex : bool if true, treats each name as a regular expression is_partial_match : bool if true, treats each name as a regular expression .*name.*
[ "returns", "a", "list", "of", "identifiers", "based", "on", "an", "input", "list", "of", "labels", "and", "identifiers", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L948-L978
train
204,891
biolink/ontobio
bin/qbiogolr.py
search_golr_wrap
def search_golr_wrap(id, category, **args): """ performs searches in both directions """ #assocs1 = search_associations_compact(object=id, subject_category=category, **args) #assocs2 = search_associations_compact(subject=id, object_category=category, **args) assocs1, facets1 = search_compact_wrap(object=id, subject_category=category, **args) assocs2, facets2 = search_compact_wrap(subject=id, object_category=category, **args) facets = facets1 if len(assocs2) > 0: facets = facets2 return assocs1 + assocs2, facets
python
def search_golr_wrap(id, category, **args): """ performs searches in both directions """ #assocs1 = search_associations_compact(object=id, subject_category=category, **args) #assocs2 = search_associations_compact(subject=id, object_category=category, **args) assocs1, facets1 = search_compact_wrap(object=id, subject_category=category, **args) assocs2, facets2 = search_compact_wrap(subject=id, object_category=category, **args) facets = facets1 if len(assocs2) > 0: facets = facets2 return assocs1 + assocs2, facets
[ "def", "search_golr_wrap", "(", "id", ",", "category", ",", "*", "*", "args", ")", ":", "#assocs1 = search_associations_compact(object=id, subject_category=category, **args)", "#assocs2 = search_associations_compact(subject=id, object_category=category, **args)", "assocs1", ",", "fac...
performs searches in both directions
[ "performs", "searches", "in", "both", "directions" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/bin/qbiogolr.py#L188-L199
train
204,892
biolink/ontobio
ontobio/io/entityparser.py
load_gpi
def load_gpi(self, gpi_path): """ Loads a GPI as a file from the `config.gpi_authority_path` """ if self.config.gpi_authority_path is not None: gpis = dict() parser = entityparser.GpiParser() with open(self.config.gpi_authority_path) as gpi_f: entities = parser.parse(file=gpi_f) for entity in entities: gpis[entity["id"]] = { "symbol": entity["label"], "name": entity["full_name"], "synonyms": entitywriter.stringify(entity["synonyms"]), "type": entity["type"] } return gpis # If there is no config file path, return None return None
python
def load_gpi(self, gpi_path): """ Loads a GPI as a file from the `config.gpi_authority_path` """ if self.config.gpi_authority_path is not None: gpis = dict() parser = entityparser.GpiParser() with open(self.config.gpi_authority_path) as gpi_f: entities = parser.parse(file=gpi_f) for entity in entities: gpis[entity["id"]] = { "symbol": entity["label"], "name": entity["full_name"], "synonyms": entitywriter.stringify(entity["synonyms"]), "type": entity["type"] } return gpis # If there is no config file path, return None return None
[ "def", "load_gpi", "(", "self", ",", "gpi_path", ")", ":", "if", "self", ".", "config", ".", "gpi_authority_path", "is", "not", "None", ":", "gpis", "=", "dict", "(", ")", "parser", "=", "entityparser", ".", "GpiParser", "(", ")", "with", "open", "(", ...
Loads a GPI as a file from the `config.gpi_authority_path`
[ "Loads", "a", "GPI", "as", "a", "file", "from", "the", "config", ".", "gpi_authority_path" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/entityparser.py#L62-L81
train
204,893
biolink/ontobio
ontobio/io/entityparser.py
EntityParser.parse
def parse(self, file, outfile=None): """Parse a line-oriented entity file into a list of entity dict objects Note the returned list is of dict objects. TODO: These will later be specified using marshmallow and it should be possible to generate objects Arguments --------- file : file or string The file is parsed into entity objects. Can be a http URL, filename or `file-like-object`, for input assoc file outfile : file Optional output file in which processed lines are written. This a file or `file-like-object` Return ------ list Entities generated from the file """ file = self._ensure_file(file) ents = [] skipped = [] n_lines = 0 for line in file: n_lines += 1 if line.startswith("!"): if outfile is not None: outfile.write(line) continue line = line.strip("\n") if line == "": logging.warning("EMPTY LINE") continue parsed_line, new_ents = self.parse_line(line) if self._skipping_line(new_ents): # Skip if there were no ents logging.warning("SKIPPING: {}".format(line)) skipped.append(line) else: ents += new_ents if outfile is not None: outfile.write(parsed_line + "\n") self.report.skipped += len(skipped) self.report.n_lines += n_lines #self.report.n_associations += len(ents) logging.info("Parsed {} ents from {} lines. Skipped: {}". format(len(ents), n_lines, len(skipped))) file.close() return ents
python
def parse(self, file, outfile=None): """Parse a line-oriented entity file into a list of entity dict objects Note the returned list is of dict objects. TODO: These will later be specified using marshmallow and it should be possible to generate objects Arguments --------- file : file or string The file is parsed into entity objects. Can be a http URL, filename or `file-like-object`, for input assoc file outfile : file Optional output file in which processed lines are written. This a file or `file-like-object` Return ------ list Entities generated from the file """ file = self._ensure_file(file) ents = [] skipped = [] n_lines = 0 for line in file: n_lines += 1 if line.startswith("!"): if outfile is not None: outfile.write(line) continue line = line.strip("\n") if line == "": logging.warning("EMPTY LINE") continue parsed_line, new_ents = self.parse_line(line) if self._skipping_line(new_ents): # Skip if there were no ents logging.warning("SKIPPING: {}".format(line)) skipped.append(line) else: ents += new_ents if outfile is not None: outfile.write(parsed_line + "\n") self.report.skipped += len(skipped) self.report.n_lines += n_lines #self.report.n_associations += len(ents) logging.info("Parsed {} ents from {} lines. Skipped: {}". format(len(ents), n_lines, len(skipped))) file.close() return ents
[ "def", "parse", "(", "self", ",", "file", ",", "outfile", "=", "None", ")", ":", "file", "=", "self", ".", "_ensure_file", "(", "file", ")", "ents", "=", "[", "]", "skipped", "=", "[", "]", "n_lines", "=", "0", "for", "line", "in", "file", ":", ...
Parse a line-oriented entity file into a list of entity dict objects Note the returned list is of dict objects. TODO: These will later be specified using marshmallow and it should be possible to generate objects Arguments --------- file : file or string The file is parsed into entity objects. Can be a http URL, filename or `file-like-object`, for input assoc file outfile : file Optional output file in which processed lines are written. This a file or `file-like-object` Return ------ list Entities generated from the file
[ "Parse", "a", "line", "-", "oriented", "entity", "file", "into", "a", "list", "of", "entity", "dict", "objects" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/entityparser.py#L9-L60
train
204,894
biolink/ontobio
ontobio/io/entityparser.py
GpiParser.parse_line
def parse_line(self, line): """Parses a single line of a GPI. Return a tuple `(processed_line, entities)`. Typically there will be a single entity, but in some cases there may be none (invalid line) or multiple (disjunctive clause in annotation extensions) Note: most applications will only need to call this directly if they require fine-grained control of parsing. For most purposes, :method:`parse_file` can be used over the whole file Arguments --------- line : str A single tab-seperated line from a GPAD file """ vals = line.split("\t") if len(vals) < 7: self.report.error(line, assocparser.Report.WRONG_NUMBER_OF_COLUMNS, "") return line, [] if len(vals) < 10 and len(vals) >= 7: missing_columns = 10 - len(vals) vals += ["" for i in range(missing_columns)] [ db, db_object_id, db_object_symbol, db_object_name, db_object_synonym, db_object_type, taxon, parent_object_id, xrefs, properties ] = vals split_line = assocparser.SplitLine(line=line, values=vals, taxon=taxon) ## -- ## db + db_object_id. CARD=1 ## -- id = self._pair_to_id(db, db_object_id) if not self._validate_id(id, split_line, context=assocparser.Report): return line, [] ## -- ## db_object_synonym CARD=0..* ## -- synonyms = db_object_synonym.split("|") if db_object_synonym == "": synonyms = [] # TODO: DRY parents = parent_object_id.split("|") if parent_object_id == "": parents = [] else: parents = [self._normalize_id(x) for x in parents] for p in parents: self._validate_id(p, split_line, context=assocparser.Report) xref_ids = xrefs.split("|") if xrefs == "": xref_ids = [] obj = { 'id': id, 'label': db_object_symbol, 'full_name': db_object_name, 'synonyms': synonyms, 'type': db_object_type, 'parents': parents, 'xrefs': xref_ids, 'taxon': { 'id': self._taxon_id(taxon, split_line) } } return line, [obj]
python
def parse_line(self, line): """Parses a single line of a GPI. Return a tuple `(processed_line, entities)`. Typically there will be a single entity, but in some cases there may be none (invalid line) or multiple (disjunctive clause in annotation extensions) Note: most applications will only need to call this directly if they require fine-grained control of parsing. For most purposes, :method:`parse_file` can be used over the whole file Arguments --------- line : str A single tab-seperated line from a GPAD file """ vals = line.split("\t") if len(vals) < 7: self.report.error(line, assocparser.Report.WRONG_NUMBER_OF_COLUMNS, "") return line, [] if len(vals) < 10 and len(vals) >= 7: missing_columns = 10 - len(vals) vals += ["" for i in range(missing_columns)] [ db, db_object_id, db_object_symbol, db_object_name, db_object_synonym, db_object_type, taxon, parent_object_id, xrefs, properties ] = vals split_line = assocparser.SplitLine(line=line, values=vals, taxon=taxon) ## -- ## db + db_object_id. CARD=1 ## -- id = self._pair_to_id(db, db_object_id) if not self._validate_id(id, split_line, context=assocparser.Report): return line, [] ## -- ## db_object_synonym CARD=0..* ## -- synonyms = db_object_synonym.split("|") if db_object_synonym == "": synonyms = [] # TODO: DRY parents = parent_object_id.split("|") if parent_object_id == "": parents = [] else: parents = [self._normalize_id(x) for x in parents] for p in parents: self._validate_id(p, split_line, context=assocparser.Report) xref_ids = xrefs.split("|") if xrefs == "": xref_ids = [] obj = { 'id': id, 'label': db_object_symbol, 'full_name': db_object_name, 'synonyms': synonyms, 'type': db_object_type, 'parents': parents, 'xrefs': xref_ids, 'taxon': { 'id': self._taxon_id(taxon, split_line) } } return line, [obj]
[ "def", "parse_line", "(", "self", ",", "line", ")", ":", "vals", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "len", "(", "vals", ")", "<", "7", ":", "self", ".", "report", ".", "error", "(", "line", ",", "assocparser", ".", "Report", "....
Parses a single line of a GPI. Return a tuple `(processed_line, entities)`. Typically there will be a single entity, but in some cases there may be none (invalid line) or multiple (disjunctive clause in annotation extensions) Note: most applications will only need to call this directly if they require fine-grained control of parsing. For most purposes, :method:`parse_file` can be used over the whole file Arguments --------- line : str A single tab-seperated line from a GPAD file
[ "Parses", "a", "single", "line", "of", "a", "GPI", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/entityparser.py#L97-L178
train
204,895
biolink/ontobio
ontobio/io/entityparser.py
BgiParser.transform_item
def transform_item(self, item): """ Transforms JSON object """ obj = { 'id': item['primaryId'], 'label': item['symbol'], 'full_name': item['name'], 'type': item['soTermId'], 'taxon': {'id': item['taxonId']}, } if 'synonyms' in item: obj['synonyms'] = item['synonyms'] if 'crossReferenceIds' in item: obj['xrefs'] = [self._normalize_id(x) for x in item['crossReferenceIds']] # TODO: synonyms # TODO: genomeLocations # TODO: geneLiteratureUrl return obj
python
def transform_item(self, item): """ Transforms JSON object """ obj = { 'id': item['primaryId'], 'label': item['symbol'], 'full_name': item['name'], 'type': item['soTermId'], 'taxon': {'id': item['taxonId']}, } if 'synonyms' in item: obj['synonyms'] = item['synonyms'] if 'crossReferenceIds' in item: obj['xrefs'] = [self._normalize_id(x) for x in item['crossReferenceIds']] # TODO: synonyms # TODO: genomeLocations # TODO: geneLiteratureUrl return obj
[ "def", "transform_item", "(", "self", ",", "item", ")", ":", "obj", "=", "{", "'id'", ":", "item", "[", "'primaryId'", "]", ",", "'label'", ":", "item", "[", "'symbol'", "]", ",", "'full_name'", ":", "item", "[", "'name'", "]", ",", "'type'", ":", ...
Transforms JSON object
[ "Transforms", "JSON", "object" ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/entityparser.py#L205-L224
train
204,896
biolink/ontobio
ontobio/assocmodel.py
AssociationSet.index
def index(self): """ Creates indexes based on inferred terms. You do not need to call this yourself; called on initialization """ self.subjects = list(self.association_map.keys()) # ensure annotations unique for (subj,terms) in self.association_map.items(): self.association_map[subj] = list(set(self.association_map[subj])) logging.info("Indexing {} items".format(len(self.subjects))) n = 0 all_objs = set() for (subj,terms) in self.association_map.items(): ancs = self.termset_ancestors(terms) all_objs.update(ancs) self.subject_to_inferred_map[subj] = ancs n = n+1 if n<5: logging.info(" Indexed: {} -> {}".format(subj, ancs)) elif n == 6: logging.info("[TRUNCATING>5]....") self.objects = all_objs
python
def index(self): """ Creates indexes based on inferred terms. You do not need to call this yourself; called on initialization """ self.subjects = list(self.association_map.keys()) # ensure annotations unique for (subj,terms) in self.association_map.items(): self.association_map[subj] = list(set(self.association_map[subj])) logging.info("Indexing {} items".format(len(self.subjects))) n = 0 all_objs = set() for (subj,terms) in self.association_map.items(): ancs = self.termset_ancestors(terms) all_objs.update(ancs) self.subject_to_inferred_map[subj] = ancs n = n+1 if n<5: logging.info(" Indexed: {} -> {}".format(subj, ancs)) elif n == 6: logging.info("[TRUNCATING>5]....") self.objects = all_objs
[ "def", "index", "(", "self", ")", ":", "self", ".", "subjects", "=", "list", "(", "self", ".", "association_map", ".", "keys", "(", ")", ")", "# ensure annotations unique", "for", "(", "subj", ",", "terms", ")", "in", "self", ".", "association_map", ".",...
Creates indexes based on inferred terms. You do not need to call this yourself; called on initialization
[ "Creates", "indexes", "based", "on", "inferred", "terms", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assocmodel.py#L61-L85
train
204,897
biolink/ontobio
ontobio/assocmodel.py
AssociationSet.query_associations
def query_associations(self, subjects=None, infer_subjects=True, include_xrefs=True): """ Query for a set of associations. Note: only a minimal association model is stored, so all results are returned as (subject_id,class_id) tuples Arguments: subjects: list list of subjects (e.g. genes, diseases) used to query associations. Any association to one of these subjects or a descendant of these subjects (assuming infer_subjects=True) are returned. infer_subjects: boolean (default true) See above include_xrefs: boolean (default true) If true, then expand inferred subject set to include all xrefs of those subjects. Example: if a high level disease node (e.g. DOID:14330 Parkinson disease) is specified, then the default behavior (infer_subjects=True, include_xrefs=True) and the ontology includes DO, results will include associations from both descendant DOID classes, and all xrefs (e.g. OMIM) """ if subjects is None: subjects = [] mset = set() if infer_subjects: for subj in subjects: mset.update(self.ontology.descendants(subj)) mset.update(set(subjects)) if include_xrefs: xset = set() for m in mset: xrefs = self.ontology.xrefs(m, bidirectional=True) if xrefs is not None: xset.update(xrefs) mset.update(xset) logging.debug("Matching subjects: {}".format(mset)) mset = mset.intersection(self.subjects) logging.debug("Matching subjects with anns: {}".format(mset)) amap = self.association_map results = [] for m in mset: if m in amap: for t in amap[m]: results.append( (m,t) ) return results
python
def query_associations(self, subjects=None, infer_subjects=True, include_xrefs=True): """ Query for a set of associations. Note: only a minimal association model is stored, so all results are returned as (subject_id,class_id) tuples Arguments: subjects: list list of subjects (e.g. genes, diseases) used to query associations. Any association to one of these subjects or a descendant of these subjects (assuming infer_subjects=True) are returned. infer_subjects: boolean (default true) See above include_xrefs: boolean (default true) If true, then expand inferred subject set to include all xrefs of those subjects. Example: if a high level disease node (e.g. DOID:14330 Parkinson disease) is specified, then the default behavior (infer_subjects=True, include_xrefs=True) and the ontology includes DO, results will include associations from both descendant DOID classes, and all xrefs (e.g. OMIM) """ if subjects is None: subjects = [] mset = set() if infer_subjects: for subj in subjects: mset.update(self.ontology.descendants(subj)) mset.update(set(subjects)) if include_xrefs: xset = set() for m in mset: xrefs = self.ontology.xrefs(m, bidirectional=True) if xrefs is not None: xset.update(xrefs) mset.update(xset) logging.debug("Matching subjects: {}".format(mset)) mset = mset.intersection(self.subjects) logging.debug("Matching subjects with anns: {}".format(mset)) amap = self.association_map results = [] for m in mset: if m in amap: for t in amap[m]: results.append( (m,t) ) return results
[ "def", "query_associations", "(", "self", ",", "subjects", "=", "None", ",", "infer_subjects", "=", "True", ",", "include_xrefs", "=", "True", ")", ":", "if", "subjects", "is", "None", ":", "subjects", "=", "[", "]", "mset", "=", "set", "(", ")", "if",...
Query for a set of associations. Note: only a minimal association model is stored, so all results are returned as (subject_id,class_id) tuples Arguments: subjects: list list of subjects (e.g. genes, diseases) used to query associations. Any association to one of these subjects or a descendant of these subjects (assuming infer_subjects=True) are returned. infer_subjects: boolean (default true) See above include_xrefs: boolean (default true) If true, then expand inferred subject set to include all xrefs of those subjects. Example: if a high level disease node (e.g. DOID:14330 Parkinson disease) is specified, then the default behavior (infer_subjects=True, include_xrefs=True) and the ontology includes DO, results will include associations from both descendant DOID classes, and all xrefs (e.g. OMIM)
[ "Query", "for", "a", "set", "of", "associations", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assocmodel.py#L125-L174
train
204,898
biolink/ontobio
ontobio/assocmodel.py
AssociationSet.query
def query(self, terms=None, negated_terms=None): """ Basic boolean query, using inference. Arguments: - terms: list list of class ids. Returns the set of subjects that have at least one inferred annotation to each of the specified classes. - negated_terms: list list of class ids. Filters the set of subjects so that there are no inferred annotations to any of the specified classes """ if terms is None: terms = [] matches_all = 'owl:Thing' in terms if negated_terms is None: negated_terms = [] termset = set(terms) negated_termset = set(negated_terms) matches = [] n_terms = len(termset) for subj in self.subjects: if matches_all or len(termset.intersection(self.inferred_types(subj))) == n_terms: if len(negated_termset.intersection(self.inferred_types(subj))) == 0: matches.append(subj) return matches
python
def query(self, terms=None, negated_terms=None): """ Basic boolean query, using inference. Arguments: - terms: list list of class ids. Returns the set of subjects that have at least one inferred annotation to each of the specified classes. - negated_terms: list list of class ids. Filters the set of subjects so that there are no inferred annotations to any of the specified classes """ if terms is None: terms = [] matches_all = 'owl:Thing' in terms if negated_terms is None: negated_terms = [] termset = set(terms) negated_termset = set(negated_terms) matches = [] n_terms = len(termset) for subj in self.subjects: if matches_all or len(termset.intersection(self.inferred_types(subj))) == n_terms: if len(negated_termset.intersection(self.inferred_types(subj))) == 0: matches.append(subj) return matches
[ "def", "query", "(", "self", ",", "terms", "=", "None", ",", "negated_terms", "=", "None", ")", ":", "if", "terms", "is", "None", ":", "terms", "=", "[", "]", "matches_all", "=", "'owl:Thing'", "in", "terms", "if", "negated_terms", "is", "None", ":", ...
Basic boolean query, using inference. Arguments: - terms: list list of class ids. Returns the set of subjects that have at least one inferred annotation to each of the specified classes. - negated_terms: list list of class ids. Filters the set of subjects so that there are no inferred annotations to any of the specified classes
[ "Basic", "boolean", "query", "using", "inference", "." ]
4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assocmodel.py#L196-L224
train
204,899