idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
15,900
def _regex_strings ( self ) : domain = 0 if domain not in self . domains : self . register_domain ( domain = domain ) return self . domains [ domain ] . _regex_strings
A property to link into IntentEngine s _regex_strings .
15,901
def regular_expressions_entities ( self ) : domain = 0 if domain not in self . domains : self . register_domain ( domain = domain ) return self . domains [ domain ] . regular_expressions_entities
A property to link into IntentEngine s regular_expressions_entities .
15,902
def register_domain ( self , domain = 0 , tokenizer = None , trie = None ) : self . domains [ domain ] = IntentDeterminationEngine ( tokenizer = tokenizer , trie = trie )
Register a domain with the intent engine .
15,903
def register_entity ( self , entity_value , entity_type , alias_of = None , domain = 0 ) : if domain not in self . domains : self . register_domain ( domain = domain ) self . domains [ domain ] . register_entity ( entity_value = entity_value , entity_type = entity_type , alias_of = alias_of )
Register an entity to be tagged in potential parse results .
15,904
def register_intent_parser ( self , intent_parser , domain = 0 ) : if domain not in self . domains : self . register_domain ( domain = domain ) self . domains [ domain ] . register_intent_parser ( intent_parser = intent_parser )
Register a intent parser with a domain .
15,905
def tokenize ( self , string ) : s = string s = re . sub ( '\t' , " " , s ) s = re . sub ( "(" + regex_separator + ")" , " \g<1> " , s ) s = re . sub ( "([^0-9])," , "\g<1> , " , s ) s = re . sub ( ",([^0-9])" , " , \g<1>" , s ) s = re . sub ( "^(')" , "\g<1> " , s ) s = re . sub ( "(" + regex_not_letter_number + ")'" , "\g<1> '" , s ) s = re . sub ( "(" + regex_clitics + ")$" , " \g<1>" , s ) s = re . sub ( "(" + regex_clitics + ")(" + regex_not_letter_number + ")" , " \g<1> \g<2>" , s ) words = s . strip ( ) . split ( ) p1 = re . compile ( ".*" + regex_letter_number + "\\." ) p2 = re . compile ( "^([A-Za-z]\\.([A-Za-z]\\.)+|[A-Z][bcdfghj-nptvxz]+\\.)$" ) token_list = [ ] for word in words : m1 = p1 . match ( word ) m2 = p2 . match ( word ) if m1 and word not in abbreviations_list and not m2 : token_list . append ( word [ 0 : word . find ( '.' ) ] ) token_list . append ( word [ word . find ( '.' ) ] ) else : token_list . append ( word ) return token_list
Used to parce a string into tokens
15,906
def parse ( self , utterance , context = None , N = 1 ) : start = time . time ( ) context_trie = None if context and isinstance ( context , list ) : context . sort ( key = lambda x : x . get ( 'confidence' ) ) context_trie = Trie ( ) for entity in context : entity_value , entity_type = entity . get ( 'data' ) [ 0 ] context_trie . insert ( entity_value . lower ( ) , data = ( entity_value , entity_type ) , weight = entity . get ( 'confidence' ) ) tagged = self . _tagger . tag ( utterance . lower ( ) , context_trie = context_trie ) self . emit ( "tagged_entities" , { 'utterance' : utterance , 'tags' : list ( tagged ) , 'time' : time . time ( ) - start } ) start = time . time ( ) bke = BronKerboschExpander ( self . _tokenizer ) def score_clique ( clique ) : score = 0.0 for tagged_entity in clique : ec = tagged_entity . get ( 'entities' , [ { 'confidence' : 0.0 } ] ) [ 0 ] . get ( 'confidence' ) score += ec * len ( tagged_entity . get ( 'entities' , [ { 'match' : '' } ] ) [ 0 ] . get ( 'match' ) ) / ( len ( utterance ) + 1 ) return score parse_results = bke . expand ( tagged , clique_scoring_func = score_clique ) count = 0 for result in parse_results : count += 1 parse_confidence = 0.0 for tag in result : sample_entity = tag [ 'entities' ] [ 0 ] entity_confidence = sample_entity . get ( 'confidence' , 0.0 ) * float ( len ( sample_entity . get ( 'match' ) ) ) / len ( utterance ) parse_confidence += entity_confidence yield { 'utterance' : utterance , 'tags' : result , 'time' : time . time ( ) - start , 'confidence' : parse_confidence } if count >= N : break
Used to find tags within utterance with a given confidence
15,907
def metadata_matches ( self , query = { } ) : result = len ( query . keys ( ) ) > 0 for key in query . keys ( ) : result = result and query [ key ] == self . metadata . get ( key ) return result
Returns key matches to metadata
15,908
def merge_context ( self , tag , metadata ) : self . entities . append ( tag ) for k in metadata . keys ( ) : if k not in self . metadata : self . metadata [ k ] = k
merge into contextManagerFrame new entity and metadata .
15,909
def get_context ( self , max_frames = None , missing_entities = [ ] ) : if not max_frames or max_frames > len ( self . frame_stack ) : max_frames = len ( self . frame_stack ) missing_entities = list ( missing_entities ) context = [ ] for i in xrange ( max_frames ) : frame_entities = [ entity . copy ( ) for entity in self . frame_stack [ i ] . entities ] for entity in frame_entities : entity [ 'confidence' ] = entity . get ( 'confidence' , 1.0 ) / ( 2.0 + i ) context += frame_entities result = [ ] if len ( missing_entities ) > 0 : for entity in context : if entity . get ( 'data' ) in missing_entities : result . append ( entity ) missing_entities . remove ( entity . get ( 'data' ) ) else : result = context return result
Constructs a list of entities from the context .
15,910
def find_first_tag ( tags , entity_type , after_index = - 1 ) : for tag in tags : for entity in tag . get ( 'entities' ) : for v , t in entity . get ( 'data' ) : if t . lower ( ) == entity_type . lower ( ) and tag . get ( 'start_token' , 0 ) > after_index : return tag , v , entity . get ( 'confidence' ) return None , None , None
Searches tags for entity type after given index
15,911
def choose_1_from_each ( lists ) : if len ( lists ) == 0 : yield [ ] else : for el in lists [ 0 ] : for next_list in choose_1_from_each ( lists [ 1 : ] ) : yield [ el ] + next_list
Takes a list of lists and returns a list of lists with one item from each list . This new list should be the length of each list multiplied by the others . 18 for an list with lists of 3 2 and 3 . Also the lenght of each sub list should be same as the length of lists passed in .
15,912
def resolve_one_of ( tags , at_least_one ) : if len ( tags ) < len ( at_least_one ) : return None for possible_resolution in choose_1_from_each ( at_least_one ) : resolution = { } pr = possible_resolution [ : ] for entity_type in pr : last_end_index = - 1 if entity_type in resolution : last_end_index = resolution . get [ entity_type ] [ - 1 ] . get ( 'end_token' ) tag , value , c = find_first_tag ( tags , entity_type , after_index = last_end_index ) if not tag : break else : if entity_type not in resolution : resolution [ entity_type ] = [ ] resolution [ entity_type ] . append ( tag ) if len ( resolution ) == len ( possible_resolution ) : return resolution return None
This searches tags for Entites in at_least_one and returns any match
15,913
def validate ( self , tags , confidence ) : intent , tags = self . validate_with_tags ( tags , confidence ) return intent
Using this method removes tags from the result of validate_with_tags
15,914
def validate_with_tags ( self , tags , confidence ) : result = { 'intent_type' : self . name } intent_confidence = 0.0 local_tags = tags [ : ] used_tags = [ ] for require_type , attribute_name in self . requires : required_tag , canonical_form , confidence = find_first_tag ( local_tags , require_type ) if not required_tag : result [ 'confidence' ] = 0.0 return result , [ ] result [ attribute_name ] = canonical_form if required_tag in local_tags : local_tags . remove ( required_tag ) used_tags . append ( required_tag ) intent_confidence += confidence if len ( self . at_least_one ) > 0 : best_resolution = resolve_one_of ( tags , self . at_least_one ) if not best_resolution : result [ 'confidence' ] = 0.0 return result , [ ] else : for key in best_resolution : result [ key ] = best_resolution [ key ] [ 0 ] . get ( 'key' ) intent_confidence += 1.0 used_tags . append ( best_resolution ) if best_resolution in local_tags : local_tags . remove ( best_resolution ) for optional_type , attribute_name in self . optional : optional_tag , canonical_form , conf = find_first_tag ( local_tags , optional_type ) if not optional_tag or attribute_name in result : continue result [ attribute_name ] = canonical_form if optional_tag in local_tags : local_tags . remove ( optional_tag ) used_tags . append ( optional_tag ) intent_confidence += 1.0 total_confidence = intent_confidence / len ( tags ) * confidence target_client , canonical_form , confidence = find_first_tag ( local_tags , CLIENT_ENTITY_NAME ) result [ 'target' ] = target_client . get ( 'key' ) if target_client else None result [ 'confidence' ] = total_confidence return result , used_tags
Validate weather tags has required entites for this intent to fire
15,915
def require ( self , entity_type , attribute_name = None ) : if not attribute_name : attribute_name = entity_type self . requires += [ ( entity_type , attribute_name ) ] return self
The intent parser should require an entity of the provided type .
15,916
def optionally ( self , entity_type , attribute_name = None ) : if not attribute_name : attribute_name = entity_type self . optional += [ ( entity_type , attribute_name ) ] return self
Parsed intents from this parser can optionally include an entity of the provided type .
15,917
def build ( self ) : return Intent ( self . name , self . requires , self . at_least_one , self . optional )
Constructs an intent from the builder s specifications .
15,918
def term_matrix ( idlist , subject_category , taxon , ** kwargs ) : results = search_associations ( objects = idlist , subject_taxon = taxon , subject_category = subject_category , select_fields = [ M . SUBJECT , M . OBJECT_CLOSURE ] , facet_fields = [ ] , rows = - 1 , include_raw = True , ** kwargs ) docs = results [ 'raw' ] . docs subjects_per_term = { } smap = { } for d in docs : smap [ d [ M . SUBJECT ] ] = 1 for c in d [ M . OBJECT_CLOSURE ] : if c in idlist : if c not in subjects_per_term : subjects_per_term [ c ] = [ ] subjects_per_term [ c ] . append ( d [ M . SUBJECT ] ) pop_n = len ( smap . keys ( ) ) cells = [ ] for cx in idlist : csubjs = set ( subjects_per_term [ cx ] ) for dx in idlist : dsubjs = set ( subjects_per_term [ dx ] ) a = len ( csubjs . intersection ( dsubjs ) ) b = len ( csubjs ) - a c = len ( dsubjs ) - a d = pop_n - len ( dsubjs ) - b ctable = [ [ a , b ] , [ c , d ] ] _ , p_under = sp . stats . fisher_exact ( ctable , 'less' ) _ , p_over = sp . stats . fisher_exact ( ctable , 'greater' ) cells . append ( { 'c' : cx , 'd' : dx , 'nc' : len ( csubjs ) , 'nd' : len ( dsubjs ) , 'n' : a , 'p_l' : p_under , 'p_g' : p_over } ) return cells
Intersection between annotated objects
15,919
def get_ancestors_through_subont ( self , go_term , relations ) : all_ancestors = self . ontology . ancestors ( go_term , reflexive = True ) subont = self . ontology . subontology ( all_ancestors ) return subont . ancestors ( go_term , relations )
Returns the ancestors from the relation filtered GO subontology of go_term s ancestors .
15,920
def go_aspect ( self , go_term ) : if not go_term . startswith ( "GO:" ) : return None else : if self . is_molecular_function ( go_term ) : return 'F' elif self . is_cellular_component ( go_term ) : return 'C' elif self . is_biological_process ( go_term ) : return 'P'
For GO terms returns F C or P corresponding to its aspect
15,921
def _neighbors_graph ( self , ** params ) -> Dict : response = self . _get_response ( "graph/neighbors" , format = "json" , ** params ) return response . json ( )
Get neighbors of a node
15,922
def rdfgraph_to_ontol ( rg ) : digraph = networkx . MultiDiGraph ( ) from rdflib . namespace import RDF label_map = { } for c in rg . subjects ( RDF . type , OWL . Class ) : cid = contract_uri_wrap ( c ) logging . info ( "C={}" . format ( cid ) ) for lit in rg . objects ( c , RDFS . label ) : label_map [ cid ] = lit . value digraph . add_node ( cid , label = lit . value ) for s in rg . objects ( c , RDFS . subClassOf ) : sid = contract_uri_wrap ( s ) digraph . add_edge ( sid , cid , pred = 'subClassOf' ) logging . info ( "G={}" . format ( digraph ) ) payload = { 'graph' : digraph , } ont = Ontology ( handle = 'wd' , payload = payload ) return ont
Return an Ontology object from an rdflib graph object
15,923
def get_association ( id , ** kwargs ) : results = search_associations ( id = id , ** kwargs ) assoc = results [ 'associations' ] [ 0 ] if len ( results [ 'associations' ] ) > 0 else { } return assoc
Fetch an association object by ID
15,924
def search_associations ( ** kwargs ) : logging . info ( "CREATING_GOLR_QUERY {}" . format ( kwargs ) ) q = GolrAssociationQuery ( ** kwargs ) return q . exec ( )
Fetch a set of association objects based on a query .
15,925
def bulk_fetch ( subject_category , object_category , taxon , rows = MAX_ROWS , ** kwargs ) : assert subject_category is not None assert object_category is not None time . sleep ( 1 ) logging . info ( "Bulk query: {} {} {}" . format ( subject_category , object_category , taxon ) ) assocs = search_associations_compact ( subject_category = subject_category , object_category = object_category , subject_taxon = taxon , rows = rows , iterate = True , ** kwargs ) logging . info ( "Rows retrieved: {}" . format ( len ( assocs ) ) ) if len ( assocs ) == 0 : logging . error ( "No associations returned for query: {} {} {}" . format ( subject_category , object_category , taxon ) ) return assocs
Fetch associations for a species and pair of categories in bulk .
15,926
def search_associations_go ( subject_category = None , object_category = None , relation = None , subject = None , ** kwargs ) : go_golr_url = "http://golr.geneontology.org/solr/" go_solr = pysolr . Solr ( go_golr_url , timeout = 5 ) go_solr . get_session ( ) . headers [ 'User-Agent' ] = get_user_agent ( caller_name = __name__ ) return search_associations ( subject_category , object_category , relation , subject , solr = go_solr , field_mapping = goassoc_fieldmap ( ) , ** kwargs )
Perform association search using Monarch golr
15,927
def select_distinct ( distinct_field = None , ** kwargs ) : results = search_associations ( rows = 0 , select_fields = [ ] , facet_field_limits = { distinct_field : - 1 } , facet_fields = [ distinct_field ] , ** kwargs ) return list ( results [ 'facet_counts' ] [ distinct_field ] . keys ( ) )
select distinct values for a given field for a given a query
15,928
def pw_score_cosine ( self , s1 : ClassId , s2 : ClassId ) -> SimScore : df = self . assoc_df slice1 = df . loc [ s1 ] . values slice2 = df . loc [ s2 ] . values return 1 - cosine ( slice1 , slice2 )
Cosine similarity of two subjects
15,929
def calculate_mrcas ( self , c1 : ClassId , c2 : ClassId ) -> Set [ ClassId ] : G = self . G ancs1 = self . _ancestors ( c1 ) | { c1 } ancs2 = self . _ancestors ( c2 ) | { c2 } common_ancestors = ancs1 & ancs2 redundant = set ( ) for a in common_ancestors : redundant = redundant | nx . ancestors ( G , a ) return common_ancestors - redundant
Calculate the MRCA for a class pair
15,930
def pw_compare_class_sets ( self , cset1 : Set [ ClassId ] , cset2 : Set [ ClassId ] ) -> Tuple [ ICValue , ICValue , ICValue ] : pairs = self . mica_ic_df . loc [ cset1 , cset2 ] max0 = pairs . max ( axis = 0 ) max1 = pairs . max ( axis = 1 ) idxmax0 = pairs . idxmax ( axis = 0 ) idxmax1 = pairs . idxmax ( axis = 1 ) mean0 = max0 . mean ( ) mean1 = max1 . mean ( ) return ( mean0 + mean1 ) / 2 , mean0 , mean1
Compare two class profiles
15,931
def process_file ( self , filename = None , format = None ) : rdfgraph = rdflib . Graph ( ) if format is None : if filename . endswith ( ".ttl" ) : format = 'turtle' elif filename . endswith ( ".rdf" ) : format = 'xml' rdfgraph . parse ( filename , format = format ) return self . process_rdfgraph ( rdfgraph )
Parse a file into an ontology object using rdflib
15,932
def process_rdfgraph ( self , rg , ont = None ) : if ont is None : ont = Ontology ( ) subjs = list ( rg . subjects ( RDF . type , SKOS . ConceptScheme ) ) if len ( subjs ) == 0 : logging . warning ( "No ConceptScheme" ) else : ont . id = self . _uri2id ( subjs [ 0 ] ) subset_map = { } for concept in rg . subjects ( RDF . type , SKOS . Concept ) : for s in self . _get_schemes ( rg , concept ) : subset_map [ self . _uri2id ( s ) ] = s for concept in sorted ( list ( rg . subjects ( RDF . type , SKOS . Concept ) ) ) : concept_uri = str ( concept ) id = self . _uri2id ( concept ) logging . info ( "ADDING: {}" . format ( id ) ) ont . add_node ( id , self . _get_label ( rg , concept ) ) for defn in rg . objects ( concept , SKOS . definition ) : if ( defn . language == self . lang ) : td = TextDefinition ( id , escape_value ( defn . value ) ) ont . add_text_definition ( td ) for s in rg . objects ( concept , SKOS . broader ) : ont . add_parent ( id , self . _uri2id ( s ) ) for s in rg . objects ( concept , SKOS . related ) : ont . add_parent ( id , self . _uri2id ( s ) , self . _uri2id ( SKOS . related ) ) for m in rg . objects ( concept , SKOS . exactMatch ) : ont . add_xref ( id , self . _uri2id ( m ) ) for m in rg . objects ( concept , SKOS . altLabel ) : syn = Synonym ( id , val = self . _uri2id ( m ) ) ont . add_synonym ( syn ) for s in self . _get_schemes ( rg , concept ) : ont . add_to_subset ( id , self . _uri2id ( s ) ) return ont
Transform a skos terminology expressed in an rdf graph into an Ontology object
15,933
def get_attribute_information_profile ( url : str , profile : Optional [ Tuple [ str ] ] = None , categories : Optional [ Tuple [ str ] ] = None ) -> Dict : owlsim_url = url + 'getAttributeInformationProfile' params = { 'a' : profile , 'r' : categories } return requests . get ( owlsim_url , params = params , timeout = TIMEOUT ) . json ( )
Get the information content for a list of phenotypes and the annotation sufficiency simple and and categorical scores if categories are provied
15,934
def search ( self , id_list : List , negated_classes : List , limit : Optional [ int ] = 100 , method : Optional [ SimAlgorithm ] = SimAlgorithm . PHENODIGM ) -> SimResult : return self . filtered_search ( id_list = id_list , negated_classes = negated_classes , limit = limit , taxon_filter = None , category_filter = None , method = method )
Owlsim2 search calls search_by_attribute_set and converts to SimResult object
15,935
def filtered_search ( self , id_list : List , negated_classes : List , limit : Optional [ int ] = 100 , taxon_filter : Optional [ int ] = None , category_filter : Optional [ str ] = None , method : Optional [ SimAlgorithm ] = SimAlgorithm . PHENODIGM ) -> SimResult : if len ( negated_classes ) > 0 : logging . warning ( "Owlsim2 does not support negation, ignoring neg classes" ) namespace_filter = self . _get_namespace_filter ( taxon_filter , category_filter ) owlsim_results = search_by_attribute_set ( self . url , tuple ( id_list ) , limit , namespace_filter ) return self . _simsearch_to_simresult ( owlsim_results , method )
Owlsim2 filtered search resolves taxon and category to a namespace calls search_by_attribute_set and converts to SimResult object
15,936
def matchers ( ) -> List [ SimAlgorithm ] : return [ SimAlgorithm . PHENODIGM , SimAlgorithm . JACCARD , SimAlgorithm . SIM_GIC , SimAlgorithm . RESNIK , SimAlgorithm . SYMMETRIC_RESNIK ]
Matchers in owlsim2
15,937
def get_profile_ic ( self , profile : List ) -> Dict : sim_response = get_attribute_information_profile ( self . url , tuple ( profile ) ) profile_ic = { } try : for cls in sim_response [ 'input' ] : profile_ic [ cls [ 'id' ] ] = cls [ 'IC' ] except JSONDecodeError as json_exc : raise JSONDecodeError ( "Cannot parse owlsim2 response: {}" . format ( json_exc . msg ) , json_exc . doc , json_exc . pos ) return profile_ic
Given a list of individuals return their information content
15,938
def _simsearch_to_simresult ( self , sim_resp : Dict , method : SimAlgorithm ) -> SimResult : sim_ids = get_nodes_from_ids ( sim_resp [ 'query_IRIs' ] ) sim_resp [ 'results' ] = OwlSim2Api . _rank_results ( sim_resp [ 'results' ] , method ) ids = [ result [ 'j' ] [ 'id' ] for result in sim_resp [ 'results' ] ] id_type_map = get_id_type_map ( ids ) matches = [ ] for result in sim_resp [ 'results' ] : matches . append ( SimMatch ( id = result [ 'j' ] [ 'id' ] , label = result [ 'j' ] [ 'label' ] , rank = result [ 'rank' ] , score = result [ OwlSim2Api . method2key [ method ] ] , type = id_type_map [ result [ 'j' ] [ 'id' ] ] [ 0 ] , taxon = get_taxon ( result [ 'j' ] [ 'id' ] ) , significance = "NaN" , pairwise_match = OwlSim2Api . _make_pairwise_matches ( result ) ) ) return SimResult ( query = SimQuery ( ids = sim_ids , unresolved_ids = sim_resp [ 'unresolved' ] , target_ids = [ [ ] ] ) , matches = matches , metadata = SimMetadata ( max_max_ic = self . statistics . max_max_ic ) )
Convert owlsim json to SimResult object
15,939
def _rank_results ( results : List [ Dict ] , method : SimAlgorithm ) -> List [ Dict ] : sorted_results = sorted ( results , reverse = True , key = lambda k : k [ OwlSim2Api . method2key [ method ] ] ) if len ( sorted_results ) > 0 : rank = 1 previous_score = sorted_results [ 0 ] [ OwlSim2Api . method2key [ method ] ] for result in sorted_results : if previous_score > result [ OwlSim2Api . method2key [ method ] ] : rank += 1 result [ 'rank' ] = rank previous_score = result [ OwlSim2Api . method2key [ method ] ] return sorted_results
Ranks results - for phenodigm results are ranks but ties need to accounted for for other methods results need to be reranked
15,940
def translate_facet_field ( fcs , invert_subject_object = False ) : if 'facet_fields' not in fcs : return { } ffs = fcs [ 'facet_fields' ] rs = { } for ( facet , facetresults ) in ffs . items ( ) : if invert_subject_object : for ( k , v ) in INVERT_FIELDS_MAP . items ( ) : if facet == k : facet = v break elif facet == v : facet = k break pairs = { } rs [ facet ] = pairs for i in range ( int ( len ( facetresults ) / 2 ) ) : ( fv , fc ) = ( facetresults [ i * 2 ] , facetresults [ i * 2 + 1 ] ) pairs [ fv ] = fc return rs
Translates solr facet_fields results into something easier to manipulate
15,941
def goassoc_fieldmap ( relationship_type = ACTS_UPSTREAM_OF_OR_WITHIN ) : return { M . SUBJECT : 'bioentity' , M . SUBJECT_CLOSURE : 'bioentity' , M . SUBJECT_CATEGORY : None , M . SUBJECT_LABEL : 'bioentity_label' , M . SUBJECT_TAXON : 'taxon' , M . SUBJECT_TAXON_LABEL : 'taxon_label' , M . SUBJECT_TAXON_CLOSURE : 'taxon_closure' , M . RELATION : 'qualifier' , M . OBJECT : 'annotation_class' , M . OBJECT_CLOSURE : REGULATES_CLOSURE if relationship_type == ACTS_UPSTREAM_OF_OR_WITHIN else ISA_PARTOF_CLOSURE , M . OBJECT_LABEL : 'annotation_class_label' , M . OBJECT_TAXON : 'object_taxon' , M . OBJECT_TAXON_LABEL : 'object_taxon_label' , M . OBJECT_TAXON_CLOSURE : 'object_taxon_closure' , M . OBJECT_CATEGORY : None , M . EVIDENCE_OBJECT_CLOSURE : 'evidence_subset_closure' , M . IS_DEFINED_BY : 'assigned_by' }
Returns a mapping of canonical monarch fields to amigo - golr .
15,942
def map_field ( fn , m ) : if m is None : return fn if fn in m : return m [ fn ] else : return fn
Maps a field name given a mapping file . Returns input if fieldname is unmapped .
15,943
def search ( self ) : params = self . solr_params ( ) logging . info ( "PARAMS=" + str ( params ) ) results = self . solr . search ( ** params ) logging . info ( "Docs found: {}" . format ( results . hits ) ) return self . _process_search_results ( results )
Execute solr search query
15,944
def autocomplete ( self ) : self . facet = False params = self . solr_params ( ) logging . info ( "PARAMS=" + str ( params ) ) results = self . solr . search ( ** params ) logging . info ( "Docs found: {}" . format ( results . hits ) ) return self . _process_autocomplete_results ( results )
Execute solr autocomplete
15,945
def _process_search_results ( self , results : pysolr . Results ) -> SearchResults : for doc in results . docs : if 'entity' in doc : doc [ 'id' ] = doc [ 'entity' ] doc [ 'label' ] = doc [ 'entity_label' ] highlighting = { doc [ 'id' ] : self . _process_highlight ( results , doc ) . _asdict ( ) for doc in results . docs if results . highlighting } payload = SearchResults ( facet_counts = translate_facet_field ( results . facets ) , highlighting = highlighting , docs = results . docs , numFound = results . hits ) logging . debug ( 'Docs: {}' . format ( len ( results . docs ) ) ) return payload
Convert solr docs to biolink object
15,946
def autocomplete ( self ) : params = self . set_lay_params ( ) logging . info ( "PARAMS=" + str ( params ) ) results = self . solr . search ( ** params ) logging . info ( "Docs found: {}" . format ( results . hits ) ) return self . _process_layperson_results ( results )
Execute solr query for autocomplete
15,947
def translate_objs ( self , d , fname ) : if fname not in d : return None v = d [ fname ] if not isinstance ( v , list ) : v = [ v ] objs = [ { 'id' : idval } for idval in v ] return objs
Translate a field whose value is expected to be a list
15,948
def translate_obj ( self , d , fname ) : if fname not in d : return None lf = M . label_field ( fname ) id = d [ fname ] id = self . make_canonical_identifier ( id ) obj = { 'id' : id } if id : if self . _use_amigo_schema ( self . object_category ) : iri = expand_uri ( id ) else : iri = expand_uri ( id , [ get_curie_map ( '{}/cypher/curies' . format ( self . config . scigraph_data . url ) ) ] ) obj [ 'iri' ] = iri if lf in d : obj [ 'label' ] = d [ lf ] cf = fname + "_category" if cf in d : obj [ 'category' ] = [ d [ cf ] ] if 'aspect' in d and id . startswith ( 'GO:' ) : obj [ 'category' ] = [ ASPECT_MAP [ d [ 'aspect' ] ] ] del d [ 'aspect' ] return obj
Translate a field value from a solr document .
15,949
def translate_docs ( self , ds , ** kwargs ) : for d in ds : self . map_doc ( d , { } , self . invert_subject_object ) return [ self . translate_doc ( d , ** kwargs ) for d in ds ]
Translate a set of solr results
15,950
def translate_docs_compact ( self , ds , field_mapping = None , slim = None , map_identifiers = None , invert_subject_object = False , ** kwargs ) : amap = { } logging . info ( "Translating docs to compact form. Slim={}" . format ( slim ) ) for d in ds : self . map_doc ( d , field_mapping , invert_subject_object = invert_subject_object ) subject = d [ M . SUBJECT ] subject_label = d [ M . SUBJECT_LABEL ] if map_identifiers is not None : if M . SUBJECT_CLOSURE in d : subject = self . map_id ( subject , map_identifiers , d [ M . SUBJECT_CLOSURE ] ) else : logging . debug ( "NO SUBJECT CLOSURE IN: " + str ( d ) ) rel = d . get ( M . RELATION ) skip = False if rel == 'not' or rel == 'NOT' : skip = True if isinstance ( rel , list ) : if 'not' in rel or 'NOT' in rel : skip = True if len ( rel ) > 1 : logging . warn ( ">1 relation: {}" . format ( rel ) ) rel = ";" . join ( rel ) if skip : logging . debug ( "Skipping: {}" . format ( d ) ) continue subject = self . make_canonical_identifier ( subject ) k = ( subject , rel ) if k not in amap : amap [ k ] = { 'subject' : subject , 'subject_label' : subject_label , 'relation' : rel , 'objects' : [ ] } if slim is not None and len ( slim ) > 0 : mapped_objects = [ x for x in d [ M . OBJECT_CLOSURE ] if x in slim ] logging . debug ( "Mapped objects: {}" . format ( mapped_objects ) ) amap [ k ] [ 'objects' ] += mapped_objects else : amap [ k ] [ 'objects' ] . append ( d [ M . OBJECT ] ) for k in amap . keys ( ) : amap [ k ] [ 'objects' ] = list ( set ( amap [ k ] [ 'objects' ] ) ) return list ( amap . values ( ) )
Translate golr association documents to a compact representation
15,951
def map_id ( self , id , prefix , closure_list ) : prefixc = prefix + ':' ids = [ eid for eid in closure_list if eid . startswith ( prefixc ) ] if len ( ids ) == 0 : return id return ids [ 0 ]
Map identifiers based on an equivalence closure list .
15,952
def create ( self , ontology = None , subject_category = None , object_category = None , evidence = None , taxon = None , relation = None , file = None , fmt = None , skim = True ) : meta = AssociationSetMetadata ( subject_category = subject_category , object_category = object_category , taxon = taxon ) if file is not None : return self . create_from_file ( file = file , fmt = fmt , ontology = ontology , meta = meta , skim = skim ) logging . info ( "Fetching assocs from store" ) assocs = bulk_fetch_cached ( subject_category = subject_category , object_category = object_category , evidence = evidence , taxon = taxon ) logging . info ( "Creating map for {} subjects" . format ( len ( assocs ) ) ) amap = { } subject_label_map = { } for a in assocs : rel = a [ 'relation' ] subj = a [ 'subject' ] subject_label_map [ subj ] = a [ 'subject_label' ] amap [ subj ] = a [ 'objects' ] aset = AssociationSet ( ontology = ontology , meta = meta , subject_label_map = subject_label_map , association_map = amap ) return aset
creates an AssociationSet
15,953
def create_from_assocs ( self , assocs , ** args ) : amap = defaultdict ( list ) subject_label_map = { } for a in assocs : subj = a [ 'subject' ] subj_id = subj [ 'id' ] subj_label = subj [ 'label' ] subject_label_map [ subj_id ] = subj_label if not a [ 'negated' ] : amap [ subj_id ] . append ( a [ 'object' ] [ 'id' ] ) aset = AssociationSet ( subject_label_map = subject_label_map , association_map = amap , ** args ) aset . associations_by_subj = defaultdict ( list ) aset . associations_by_subj_obj = defaultdict ( list ) for a in assocs : sub_id = a [ 'subject' ] [ 'id' ] obj_id = a [ 'object' ] [ 'id' ] aset . associations_by_subj [ sub_id ] . append ( a ) aset . associations_by_subj_obj [ ( sub_id , obj_id ) ] . append ( a ) return aset
Creates from a list of association objects
15,954
def create_from_file ( self , file = None , fmt = 'gaf' , skim = True , ** args ) : if fmt is not None and not fmt . startswith ( '.' ) : fmt = '.{}' . format ( fmt ) d = { '.gaf' : GafParser , '.gpad' : GpadParser , '.hpoa' : HpoaParser , } if fmt is None : filename = file if isinstance ( file , str ) else file . name suffixes = pathlib . Path ( filename ) . suffixes iterator = ( fn ( ) for ext , fn in d . items ( ) if ext in suffixes ) else : iterator = ( fn ( ) for ext , fn in d . items ( ) if ext == fmt ) try : parser = next ( iterator ) except StopIteration : logging . error ( "Format not recognized: {}" . format ( fmt ) ) logging . info ( "Parsing {} with {}/{}" . format ( file , fmt , parser ) ) if skim : results = parser . skim ( file ) return self . create_from_tuples ( results , ** args ) else : assocs = parser . parse ( file , skipheader = True ) return self . create_from_assocs ( assocs , ** args )
Creates from a file . If fmt is set to None then the file suffixes will be used to choose a parser .
15,955
def create_from_remote_file ( self , group , snapshot = True , ** args ) : import requests url = "http://snapshot.geneontology.org/annotations/{}.gaf.gz" . format ( group ) r = requests . get ( url , stream = True , headers = { 'User-Agent' : get_user_agent ( modules = [ requests ] , caller_name = __name__ ) } ) p = GafParser ( ) results = p . skim ( r . raw ) return self . create_from_tuples ( results , ** args )
Creates from remote GAF
15,956
def render ( ont , query_ids , args ) : if args . slim . find ( 'm' ) > - 1 : logging . info ( "SLIMMING" ) g = get_minimal_subgraph ( g , query_ids ) w = GraphRenderer . create ( args . to ) if args . showdefs : w . config . show_text_definition = True if args . render : if 'd' in args . render : logging . info ( "Showing text defs" ) w . config . show_text_definition = True if args . outfile is not None : w . outfile = args . outfile w . write ( ont , query_ids = query_ids , container_predicates = args . container_properties )
Writes or displays graph
15,957
def get_object_closure ( subject , object_category = None , ** kwargs ) : results = search_associations ( subject = subject , object_category = object_category , select_fields = [ ] , facet_fields = [ M . OBJECT_CLOSURE ] , facet_limit = - 1 , rows = 0 , ** kwargs ) return set ( results [ 'facet_counts' ] [ M . OBJECT_CLOSURE ] . keys ( ) )
Find all terms used to annotate subject plus ancestors
15,958
def namespace_to_taxon ( ) -> Dict [ str , Node ] : human_taxon = Node ( id = 'NCBITaxon:9606' , label = 'Homo sapiens' ) return { 'MGI' : Node ( id = 'NCBITaxon:10090' , label = 'Mus musculus' ) , 'MONDO' : human_taxon , 'OMIM' : human_taxon , 'MONARCH' : human_taxon , 'HGNC' : human_taxon , 'FlyBase' : Node ( id = 'NCBITaxon:7227' , label = 'Drosophila melanogaster' ) , 'WormBase' : Node ( id = 'NCBITaxon:6239' , label = 'Caenorhabditis elegans' ) , 'ZFIN' : Node ( id = 'NCBITaxon:7955' , label = 'Danio rerio' ) }
namespace to taxon mapping
15,959
def get_scigraph_nodes ( id_list ) -> Iterator [ Dict ] : scigraph = OntologyFactory ( ) . create ( 'scigraph:data' ) chunks = [ id_list [ i : i + 400 ] for i in range ( 0 , len ( list ( id_list ) ) , 400 ) ] for chunk in chunks : params = { 'id' : chunk , 'depth' : 0 } try : result_graph = scigraph . _neighbors_graph ( ** params ) for node in result_graph [ 'nodes' ] : yield node except JSONDecodeError as exception : raise ValueError ( exception . doc )
Queries scigraph neighbors to get a list of nodes back
15,960
def get_taxon ( id : str ) -> Optional [ Node ] : taxon = None namespace = id . split ( ":" ) [ 0 ] if namespace in namespace_to_taxon ( ) : taxon = namespace_to_taxon ( ) [ namespace ] return taxon
get taxon for id
15,961
def typed_node_from_id ( id : str ) -> TypedNode : filter_out_types = [ 'cliqueLeader' , 'Class' , 'Node' , 'Individual' , 'quality' , 'sequence feature' ] node = next ( get_scigraph_nodes ( [ id ] ) ) if 'lbl' in node : label = node [ 'lbl' ] else : label = None types = [ typ . lower ( ) for typ in node [ 'meta' ] [ 'types' ] if typ not in filter_out_types ] return TypedNode ( id = node [ 'id' ] , label = label , type = types [ 0 ] , taxon = get_taxon ( id ) )
Get typed node from id
15,962
def to_report_json ( self ) : return self . reporter . json ( self . n_lines , self . n_assocs , self . skipped )
Generate a summary in json format
15,963
def to_markdown ( self ) : json = self . to_report_json ( ) s = "# Group: {group} - Dataset: {dataset}\n" . format ( group = json [ "group" ] , dataset = json [ "dataset" ] ) s += "\n## SUMMARY\n\n" s += "This report generated on {}\n\n" . format ( datetime . date . today ( ) ) s += " * Associations: {}\n" . format ( json [ "associations" ] ) s += " * Lines in file (incl headers): {}\n" . format ( json [ "lines" ] ) s += " * Lines skipped: {}\n" . format ( json [ "skipped_lines" ] ) s += "## Header From Original Association File\n\n" s += "\n" . join ( [ "> {} " . format ( head ) for head in self . header ] ) s += "\n\n## Contents\n\n" for rule , messages in sorted ( json [ "messages" ] . items ( ) , key = lambda t : t [ 0 ] ) : any_suppress_tag_in_rule_metadata = any ( [ tag in self . config . rule_metadata . get ( rule , { } ) . get ( "tags" , [ ] ) for tag in self . config . suppress_rule_reporting_tags ] ) if self . config . rule_metadata and any_suppress_tag_in_rule_metadata : print ( "Skipping {rule_num} because the tag(s) '{tag}' are suppressed" . format ( rule_num = rule , tag = ", " . join ( self . config . suppress_rule_reporting_tags ) ) ) continue s += "[{rule}](#{rule})\n\n" . format ( rule = rule ) s += "\n## MESSAGES\n\n" for ( rule , messages ) in sorted ( json [ "messages" ] . items ( ) , key = lambda t : t [ 0 ] ) : any_suppress_tag_in_rule_metadata = any ( [ tag in self . config . rule_metadata . get ( rule , { } ) . get ( "tags" , [ ] ) for tag in self . config . suppress_rule_reporting_tags ] ) if self . config . rule_metadata and any_suppress_tag_in_rule_metadata : continue s += "### {rule}\n\n" . format ( rule = rule ) if rule != "other" and self . config . rule_metadata : s += "{title}\n\n" . format ( title = self . config . rule_metadata . get ( rule , { } ) . get ( "title" , "" ) ) s += "* total: {amount}\n" . format ( amount = len ( messages ) ) if len ( messages ) > 0 : s += "#### Messages\n" for message in messages : obj = " ({})" . format ( message [ "obj" ] ) if message [ "obj" ] else "" s += "* {level} - {type}: {message}{obj} -- `{line}`\n" . format ( level = message [ "level" ] , type = message [ "type" ] , message = message [ "message" ] , line = message [ "line" ] , obj = obj ) return s
Generate a summary in markdown format
15,964
def parse ( self , file , skipheader = False , outfile = None ) : associations = self . association_generator ( file , skipheader = skipheader , outfile = outfile ) a = list ( associations ) return a
Parse a line - oriented association file into a list of association dict objects
15,965
def association_generator ( self , file , skipheader = False , outfile = None ) -> Dict : file = self . _ensure_file ( file ) for line in file : parsed_result = self . parse_line ( line ) self . report . report_parsed_result ( parsed_result , outfile , self . config . filtered_evidence_file , self . config . filter_out_evidence ) for association in parsed_result . associations : if not skipheader or "header" not in association : yield association logging . info ( self . report . short_summary ( ) ) file . close ( )
Returns a generator that yields successive associations from file
15,966
def map_to_subset ( self , file , outfile = None , ontology = None , subset = None , class_map = None , relations = None ) : if subset is not None : logging . info ( "Creating mapping for subset: {}" . format ( subset ) ) class_map = ontology . create_slim_mapping ( subset = subset , relations = relations ) if class_map is None : raise ValueError ( "Neither class_map not subset is set" ) col = self . ANNOTATION_CLASS_COLUMN file = self . _ensure_file ( file ) tuples = [ ] for line in file : if line . startswith ( "!" ) : continue vals = line . split ( "\t" ) logging . info ( "LINE: {} VALS: {}" . format ( line , vals ) ) if len ( vals ) < col : raise ValueError ( "Line: {} has too few cols, expect class id in col {}" . format ( line , col ) ) cid = vals [ col ] if cid not in class_map or len ( class_map [ cid ] ) == 0 : self . report . error ( line , Report . UNMAPPED_ID , cid ) continue else : for mcid in class_map [ cid ] : vals [ col ] = mcid line = "\t" . join ( vals ) if outfile is not None : outfile . write ( line ) else : print ( line )
Map a file to a subset writing out results
15,967
def get_config ( ) : if session . config is None : path = session . default_config_path if os . path . isfile ( path ) : logging . info ( "LOADING FROM: {}" . format ( path ) ) session . config = load_config ( path ) else : session . config = Config ( ) logging . info ( "using default session: {}, path does not exist: {}" . format ( session , path ) ) else : logging . info ( "Using pre-loaded object: {}" . format ( session . config ) ) return session . config
Return configuration for current session .
15,968
def set_config ( path ) : logging . info ( "LOADING FROM: {}" . format ( path ) ) session . config = load_config ( path ) return session . config
Set configuration for current session .
15,969
def get_solr_search_url ( self , use_amigo = False ) : url = self . endpoint_url ( self . solr_search ) if use_amigo : url = self . endpoint_url ( self . amigo_solr_search ) return url
Return solr URL to be used for lexical entity searches
15,970
def download_source_gafs ( group_metadata , target_dir , exclusions = [ ] , base_download_url = None ) : gaf_urls = [ ( data , data [ "source" ] ) for data in group_metadata [ "datasets" ] if data [ "type" ] == "gaf" and data [ "dataset" ] not in exclusions ] click . echo ( "Found {}" . format ( ", " . join ( [ kv [ 0 ] [ "dataset" ] for kv in gaf_urls ] ) ) ) downloaded_paths = [ ] for dataset_metadata , gaf_url in gaf_urls : dataset = dataset_metadata [ "dataset" ] path = download_a_dataset_source ( group_metadata [ "id" ] , dataset_metadata , target_dir , gaf_url , base_download_url = base_download_url ) if dataset_metadata [ "compression" ] == "gzip" : unzipped = os . path . splitext ( path ) [ 0 ] unzip ( path , unzipped ) path = unzipped else : zipup ( path ) downloaded_paths . append ( ( dataset_metadata , path ) ) return downloaded_paths
This looks at a group metadata dictionary and downloads each GAF source that is not in the exclusions list . For each downloaded file keep track of the path of the file . If the file is zipped it will unzip it here . This function returns a list of tuples of the dataset dictionary mapped to the downloaded source path .
15,971
def get_annotation_sufficiency ( self , profile : List [ str ] , negated_classes : List [ str ] , categories : Optional [ List ] = None , negation_weight : Optional [ float ] = .25 , category_weight : Optional [ float ] = .5 ) -> AnnotationSufficiency : if categories is None : categories = [ enum . value for enum in HpoUpperLevel ] ic_map = self . ic_store . get_profile_ic ( profile + negated_classes ) simple_score = self . _get_simple_score ( profile , negated_classes , self . ic_store . statistics . mean_mean_ic , self . ic_store . statistics . max_max_ic , self . ic_store . statistics . mean_sum_ic , negation_weight , ic_map ) categorical_score = self . _get_categorical_score ( profile , negated_classes , categories , negation_weight , ic_map ) scaled_score = self . _get_scaled_score ( simple_score , categorical_score , category_weight ) return AnnotationSufficiency ( simple_score = simple_score , scaled_score = scaled_score , categorical_score = categorical_score )
Given a list of individuals return the simple scaled and categorical scores
15,972
def _get_scaled_score ( simple_score : float , categorical_score : float , category_weight : Optional [ float ] = .5 ) -> float : return np . average ( [ simple_score , categorical_score ] , weights = [ 1 , category_weight ] )
Scaled score is the weighted average of the simple score and categorical score
15,973
def _get_categorical_score ( self , profile : List , negated_classes : List , categories : List , negation_weight : Optional [ float ] = 1 , ic_map : Optional [ Dict [ str , float ] ] = None ) -> float : if ic_map is None : ic_map = self . ic_store . get_profile_ic ( profile + negated_classes ) scores = [ ] for cat in categories : if cat not in self . ic_store . category_statistics : raise ValueError ( "statistics for {} not indexed" . format ( cat ) ) pos_profile = [ cls for cls in profile if cls in self . ic_store . category_statistics [ cat ] . descendants ] neg_profile = [ cls for cls in negated_classes if cls in self . ic_store . category_statistics [ cat ] . descendants ] scores . append ( self . _get_simple_score ( pos_profile , neg_profile , self . ic_store . category_statistics [ cat ] . mean_mean_ic , self . ic_store . category_statistics [ cat ] . max_max_ic , self . ic_store . category_statistics [ cat ] . mean_sum_ic , negation_weight , ic_map ) ) return mean ( scores )
The average of the simple scores across a list of categories
15,974
def write_entity ( self , entity ) : db , db_object_id = self . _split_prefix ( entity ) taxon = normalize_taxon ( entity [ "taxon" ] [ "id" ] ) vals = [ db , db_object_id , entity . get ( 'label' ) , entity . get ( 'full_name' ) , entity . get ( 'synonyms' ) , entity . get ( 'type' ) , taxon , entity . get ( 'parents' ) , entity . get ( 'xrefs' ) , entity . get ( 'properties' ) ] self . _write_row ( vals )
Write a single entity to a line in the output file
15,975
def search ( self , id_list : Iterable , negated_classes : Iterable , limit : Optional [ int ] , method : Optional ) -> List [ SimResult ] : pass
Given an input list of classes searches for similar lists of classes and provides a ranked list of matches
15,976
def filtered_search ( self , id_list : Iterable , negated_classes : Iterable , limit : Optional [ int ] , taxon_filter : Optional , category_filter : Optional , method : Optional ) -> SimResult : pass
Given an input iterable of classes or individuals provides a ranking of similar profiles
15,977
def index_ontology ( self , ont ) : self . merged_ontology . merge ( [ ont ] ) syns = ont . all_synonyms ( include_label = True ) include_id = self . _is_meaningful_ids ( ) logging . info ( "Include IDs as synonyms: {}" . format ( include_id ) ) if include_id : for n in ont . nodes ( ) : v = n if v . startswith ( 'http' ) : v = re . sub ( '.*/' , '' , v ) v = re . sub ( '.*#' , '' , v ) syns . append ( Synonym ( n , val = v , pred = 'label' ) ) logging . info ( "Indexing {} syns in {}" . format ( len ( syns ) , ont ) ) logging . info ( "Distinct lexical values: {}" . format ( len ( self . lmap . keys ( ) ) ) ) for syn in syns : self . index_synonym ( syn , ont ) for nid in ont . nodes ( ) : self . id_to_ontology_map [ nid ] . append ( ont )
Adds an ontology to the index
15,978
def index_synonym ( self , syn , ont ) : if not syn . val : if syn . pred == 'label' : if not self . _is_meaningful_ids ( ) : if not ont . is_obsolete ( syn . class_id ) : pass else : logging . warning ( "Incomplete syn: {}" . format ( syn ) ) return if self . exclude_obsolete and ont . is_obsolete ( syn . class_id ) : return syn . ontology = ont prefix , _ = ont . prefix_fragment ( syn . class_id ) v = syn . val caps_match = re . match ( '[A-Z]+' , v ) if caps_match : if caps_match . span ( ) [ 1 ] >= len ( v ) / 3 : syn . is_abbreviation ( True ) if not re . match ( '.*[a-zA-Z]' , v ) : if prefix != 'CHEBI' : logging . warning ( 'Ignoring suspicous synonym: {}' . format ( syn ) ) return v = self . _standardize_label ( v ) wsmap = { } for w , s in self . wsmap . items ( ) : wsmap [ w ] = s for ss in self . _get_config_val ( prefix , 'synsets' , [ ] ) : wsmap [ ss [ 'synonym' ] ] = ss [ 'word' ] nv = self . _normalize_label ( v , wsmap ) self . _index_synonym_val ( syn , v ) nweight = self . _get_config_val ( prefix , 'normalized_form_confidence' , 0.8 ) if nweight > 0 and not syn . is_abbreviation ( ) : if nv != v : nsyn = Synonym ( syn . class_id , val = syn . val , pred = syn . pred , lextype = syn . lextype , ontology = ont , confidence = syn . confidence * nweight ) self . _index_synonym_val ( nsyn , nv )
Index a synonym
15,979
def _normalize_label ( self , s , wsmap ) : toks = [ ] for tok in list ( set ( self . npattern . sub ( ' ' , s ) . split ( ' ' ) ) ) : if tok in wsmap : tok = wsmap [ tok ] if tok != "" : toks . append ( tok ) toks . sort ( ) return " " . join ( toks )
normalized form of a synonym
15,980
def _sim ( self , xg , ancs1 , ancs2 , pfx1 , pfx2 ) : xancs1 = set ( ) for a in ancs1 : if a in xg : for n in xg . neighbors ( a ) : pfx = self . _id_to_ontology ( n ) if pfx == pfx2 : xancs1 . add ( n ) logging . debug ( 'SIM={}/{} ## {}' . format ( len ( xancs1 . intersection ( ancs2 ) ) , len ( xancs1 ) , xancs1 . intersection ( ancs2 ) , xancs1 ) ) n_shared = len ( xancs1 . intersection ( ancs2 ) ) n_total = len ( xancs1 ) return ( 1 + n_shared ) / ( 1 + n_total ) , n_shared , n_total
Compare two lineages
15,981
def compare_to_xrefs ( self , xg1 , xg2 ) : ont = self . merged_ontology for ( i , j , d ) in xg1 . edges ( data = True ) : ont_left = self . _id_to_ontology ( i ) ont_right = self . _id_to_ontology ( j ) unique_lr = True num_xrefs_left = 0 same_left = False if i in xg2 : for j2 in xg2 . neighbors ( i ) : ont_right2 = self . _id_to_ontology ( j2 ) if ont_right2 == ont_right : unique_lr = False num_xrefs_left += 1 if j2 == j : same_left = True unique_rl = True num_xrefs_right = 0 same_right = False if j in xg2 : for i2 in xg2 . neighbors ( j ) : ont_left2 = self . _id_to_ontology ( i2 ) if ont_left2 == ont_left : unique_rl = False num_xrefs_right += 1 if i2 == i : same_right = True ( x , y ) = d [ 'idpair' ] xg1 [ x ] [ y ] [ 'left_novel' ] = num_xrefs_left == 0 xg1 [ x ] [ y ] [ 'right_novel' ] = num_xrefs_right == 0 xg1 [ x ] [ y ] [ 'left_consistent' ] = same_left xg1 [ x ] [ y ] [ 'right_consistent' ] = same_right
Compares a base xref graph with another one
15,982
def assign_best_matches ( self , xg ) : logging . info ( "assigning best matches for {} nodes" . format ( len ( xg . nodes ( ) ) ) ) for i in xg . nodes ( ) : xrefmap = self . _neighborscores_by_ontology ( xg , i ) for ( ontid , score_node_pairs ) in xrefmap . items ( ) : score_node_pairs . sort ( reverse = True ) ( best_score , best_node ) = score_node_pairs [ 0 ] logging . info ( "BEST for {}: {} in {} from {}" . format ( i , best_node , ontid , score_node_pairs ) ) edge = xg [ i ] [ best_node ] dirn = self . _dirn ( edge , i , best_node ) best_kwd = 'best_' + dirn if len ( score_node_pairs ) == 1 or score_node_pairs [ 0 ] > score_node_pairs [ 1 ] : edge [ best_kwd ] = 2 else : edge [ best_kwd ] = 1 for ( score , j ) in score_node_pairs : edge_ij = xg [ i ] [ j ] dirn_ij = self . _dirn ( edge_ij , i , j ) edge_ij [ 'cpr_' + dirn_ij ] = score / sum ( [ s for s , _ in score_node_pairs ] ) for ( i , j , edge ) in xg . edges ( data = True ) : rs = 0 if 'best_fwd' in edge and 'best_rev' in edge : rs = edge [ 'best_fwd' ] * edge [ 'best_rev' ] edge [ 'reciprocal_score' ] = rs edge [ 'cpr' ] = edge [ 'cpr_fwd' ] * edge [ 'cpr_rev' ]
For each node in the xref graph tag best match edges
15,983
def _best_match_syn ( self , sx , sys , scope_map ) : SUBSTRING_WEIGHT = 0.2 WBEST = None sbest = None sxv = self . _standardize_label ( sx . val ) sxp = self . _id_to_ontology ( sx . class_id ) for sy in sys : syv = self . _standardize_label ( sy . val ) syp = self . _id_to_ontology ( sy . class_id ) W = None if sxv == syv : confidence = sx . confidence * sy . confidence if sx . is_abbreviation ( ) or sy . is_abbreviation : confidence *= self . _get_config_val ( sxp , 'abbreviation_confidence' , 0.5 ) confidence *= self . _get_config_val ( syp , 'abbreviation_confidence' , 0.5 ) W = scope_map [ sx . scope ( ) ] [ sy . scope ( ) ] + logit ( confidence / 2 ) elif sxv in syv : W = np . array ( ( - SUBSTRING_WEIGHT , SUBSTRING_WEIGHT , 0 , 0 ) ) elif syv in sxv : W = np . array ( ( SUBSTRING_WEIGHT , - SUBSTRING_WEIGHT , 0 , 0 ) ) if W is not None : if WBEST is None or max ( abs ( W ) ) > max ( abs ( WBEST ) ) : WBEST = W sbest = sy return WBEST , sbest
The best match is determined by the highest magnitude weight
15,984
def grouped_mappings ( self , id ) : g = self . get_xref_graph ( ) m = { } for n in g . neighbors ( id ) : [ prefix , local ] = n . split ( ':' ) if prefix not in m : m [ prefix ] = [ ] m [ prefix ] . append ( n ) return m
return all mappings for a node grouped by ID prefix
15,985
def cliques ( self , xg ) : g = nx . DiGraph ( ) for ( x , y ) in self . merged_ontology . get_graph ( ) . edges ( ) : g . add_edge ( x , y ) for ( x , y ) in xg . edges ( ) : g . add_edge ( x , y ) g . add_edge ( y , x ) return list ( strongly_connected_components ( g ) )
Return all equivalence set cliques assuming each edge in the xref graph is treated as equivalent and all edges in ontology are subClassOf
15,986
def add_triples ( self , ontol ) : rg = self . rdfgraph g = ontol . get_graph ( ) typemap = { } inds = rg . subjects ( RDF . type , OWL . NamedIndividual ) for s in inds : for ( s , p , o ) in rg . triples ( ( s , None , None ) ) : s_id = id ( s ) p_id = id ( p ) g . add_node ( s_id ) if isinstance ( o , URIRef ) : o_id = id ( o ) if p == RDF . type : if o != OWL . NamedIndividual : if s_id not in typemap : typemap [ s_id ] = [ ] typemap [ s_id ] . append ( o_id ) else : g . add_edge ( o_id , s_id , pred = p_id ) for s in typemap . keys ( ) : g . nodes [ s ] [ 'types' ] = typemap [ s ] if self . tbox_ontology is not None : if 'label' not in g . nodes [ s ] : g . nodes [ s ] [ 'label' ] = ";" . join ( [ self . tbox_ontology . label ( x ) for x in typemap [ s ] if self . tbox_ontology . label ( x ) is not None ] )
Adds triples to an ontology object .
15,987
def write ( self , ontol , ** args ) : s = self . render ( ontol , ** args ) if self . outfile is None : print ( s ) else : f = open ( self . outfile , 'w' ) f . write ( s ) f . close ( )
Write a ontology object
15,988
def render_subgraph ( self , ontol , nodes , ** args ) : subont = ontol . subontology ( nodes , ** args ) return self . render ( subont , ** args )
Render a ontology object after inducing a subgraph
15,989
def write_subgraph ( self , ontol , nodes , ** args ) : subont = ontol . subontology ( nodes , ** args ) self . write ( subont , ** args )
Write a ontology object after inducing a subgraph
15,990
def render_relation ( self , r , ** args ) : if r is None : return "." m = self . config . relsymbolmap if r in m : return m [ r ] return r
Render an object property
15,991
def render_noderef ( self , ontol , n , query_ids = None , ** args ) : if query_ids is None : query_ids = [ ] marker = "" if n in query_ids : marker = " * " label = ontol . label ( n ) s = None if label is not None : s = '{} ! {}{}' . format ( n , label , marker ) else : s = str ( n ) if self . config . show_text_definition : td = ontol . text_definition ( n ) if td : s += ' "{}"' . format ( td . val ) return s
Render a node object
15,992
def create ( fmt ) : w = None if fmt == 'tree' : w = AsciiTreeGraphRenderer ( ) elif fmt == 'dot' : w = DotGraphRenderer ( image_format = 'dot' ) elif fmt == 'png' : w = DotGraphRenderer ( image_format = 'png' ) elif fmt == 'ndot' : w = NativeDotGraphRenderer ( ) elif fmt == 'obo' : w = OboFormatGraphRenderer ( ) elif fmt == 'obog' : w = OboJsonGraphRenderer ( ) else : w = SimpleListGraphRenderer ( ) return w
Creates a GraphRenderer
15,993
def get_user_agent ( name = "ontobio" , version = ontobio_version , modules = None , caller_name = None ) : user_agent_array = [ "{}/{}" . format ( name , version ) ] if modules : module_info_array = [ ] for m in modules : mod_name = m . __name__ mod_version = None if hasattr ( m , 'get_version' ) : mod_version = m . get_version ( ) else : mod_version = m . __version__ module_info_array . append ( "{}/{}" . format ( mod_name , mod_version ) ) if caller_name : module_info_array . append ( caller_name ) user_agent_array . append ( "({})" . format ( '; ' . join ( module_info_array ) ) ) else : if caller_name : user_agent_array . append ( "({})" . format ( caller_name ) ) return ' ' . join ( user_agent_array )
Create a User - Agent string
15,994
def search ( self , id_list : List , negated_classes : List , limit : Optional [ int ] , method : Optional ) -> List [ SimResult ] : raise NotImplementedError
Given an input list of classes or individuals provides a ranking of similar profiles
15,995
def convert_association ( self , association : Association ) -> Entity : if "header" not in association or association [ "header" ] == False : gpi_obj = { 'id' : association [ "subject" ] [ "id" ] , 'label' : association [ "subject" ] [ "label" ] , 'full_name' : association [ "subject" ] [ "fullname" ] , 'synonyms' : association [ "subject" ] [ "synonyms" ] , 'type' : association [ "subject" ] [ "type" ] , 'parents' : "" , 'xrefs' : "" , 'taxon' : { 'id' : association [ "subject" ] [ "taxon" ] [ "id" ] } } return Entity ( gpi_obj ) return None
id is already join ed in both the Association and the Entity so we don t have to worry about what that looks like . We assume it s correct .
15,996
def get_filtered_graph ( self , relations = None , prefix = None ) : self . all_synonyms ( ) self . all_obsoletes ( ) srcg = self . get_graph ( ) if prefix is not None : srcg = srcg . subgraph ( [ n for n in srcg . nodes ( ) if n . startswith ( prefix + ":" ) ] ) if relations is None : logger . info ( "No filtering on " + str ( self ) ) return srcg logger . info ( "Filtering {} for {}" . format ( self , relations ) ) g = nx . MultiDiGraph ( ) logger . info ( "copying nodes" ) for ( n , d ) in srcg . nodes ( data = True ) : g . add_node ( n , ** d ) logger . info ( "copying edges" ) num_edges = 0 for ( x , y , d ) in srcg . edges ( data = True ) : if d [ 'pred' ] in relations : num_edges += 1 g . add_edge ( x , y , ** d ) logger . info ( "Filtered edges: {}" . format ( num_edges ) ) return g
Returns a networkx graph for the whole ontology for a subset of relations
15,997
def merge ( self , ontologies ) : if self . xref_graph is None : self . xref_graph = nx . MultiGraph ( ) logger . info ( "Merging source: {} xrefs: {}" . format ( self , len ( self . xref_graph . edges ( ) ) ) ) for ont in ontologies : logger . info ( "Merging {} into {}" . format ( ont , self ) ) g = self . get_graph ( ) srcg = ont . get_graph ( ) for n in srcg . nodes ( ) : g . add_node ( n , ** srcg . node [ n ] ) for ( o , s , m ) in srcg . edges ( data = True ) : g . add_edge ( o , s , ** m ) if ont . xref_graph is not None : for ( o , s , m ) in ont . xref_graph . edges ( data = True ) : self . xref_graph . add_edge ( o , s , ** m )
Merges specified ontology into current ontology
15,998
def subontology ( self , nodes = None , minimal = False , relations = None ) : g = None if nodes is not None : g = self . subgraph ( nodes ) else : g = self . get_graph ( ) if minimal : from ontobio . slimmer import get_minimal_subgraph g = get_minimal_subgraph ( g , nodes ) ont = Ontology ( graph = g , xref_graph = self . xref_graph ) if relations is not None : g = ont . get_filtered_graph ( relations ) ont = Ontology ( graph = g , xref_graph = self . xref_graph ) return ont
Return a new ontology that is an extract of this one
15,999
def create_slim_mapping ( self , subset = None , subset_nodes = None , relations = None , disable_checks = False ) : if subset is not None : subset_nodes = self . extract_subset ( subset ) logger . info ( "Extracting subset: {} -> {}" . format ( subset , subset_nodes ) ) if subset_nodes is None or len ( subset_nodes ) == 0 : raise ValueError ( "subset nodes is blank" ) subset_nodes = set ( subset_nodes ) logger . debug ( "SUBSET: {}" . format ( subset_nodes ) ) subont = self if relations is not None : subont = self . subontology ( relations = relations ) if not disable_checks : for r in subont . relations_used ( ) : if r != 'subClassOf' and r != 'BFO:0000050' and r != 'subPropertyOf' : raise ValueError ( "Not safe to propagate over a graph with edge type: {}" . format ( r ) ) m = { } for n in subont . nodes ( ) : ancs = subont . ancestors ( n , reflexive = True ) ancs_in_subset = subset_nodes . intersection ( ancs ) m [ n ] = list ( subont . filter_redundant ( ancs_in_subset ) ) return m
Create a dictionary that maps between all nodes in an ontology to a subset