idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
16,000
def filter_redundant ( self , ids ) : sids = set ( ids ) for id in ids : sids = sids . difference ( self . ancestors ( id , reflexive = False ) ) return sids
Return all non - redundant ids from a list
16,001
def extract_subset ( self , subset , contract = True ) : return [ n for n in self . nodes ( ) if subset in self . subsets ( n , contract = contract ) ]
Return all nodes in a subset .
16,002
def subsets ( self , nid , contract = True ) : n = self . node ( nid ) subsets = [ ] meta = self . _meta ( nid ) if 'subsets' in meta : subsets = meta [ 'subsets' ] else : subsets = [ ] if contract : subsets = [ self . _contract_subset ( s ) for s in subsets ] return subsets
Retrieves subset ids for a class or ontology object
16,003
def prefixes ( self ) : pset = set ( ) for n in self . nodes ( ) : pfx = self . prefix ( n ) if pfx is not None : pset . add ( pfx ) return list ( pset )
list all prefixes used
16,004
def relations_used ( self ) : g = self . get_graph ( ) types = set ( ) for ( x , y , d ) in g . edges ( data = True ) : types . add ( d [ 'pred' ] ) return list ( types )
Return list of all relations used to connect edges
16,005
def child_parent_relations ( self , subj , obj , graph = None ) : if graph is None : graph = self . get_graph ( ) preds = set ( ) for _ , ea in graph [ obj ] [ subj ] . items ( ) : preds . add ( ea [ 'pred' ] ) logger . debug ( '{}->{} = {}' . format ( subj , obj , preds ) ) return preds
Get all relationship type ids between a subject and a parent .
16,006
def parents ( self , node , relations = None ) : g = self . get_graph ( ) if node in g : parents = list ( g . predecessors ( node ) ) if relations is None : return parents else : rset = set ( relations ) return [ p for p in parents if len ( self . child_parent_relations ( node , p , graph = g ) . intersection ( rset ) ) > 0 ] else : return [ ]
Return all direct parents of specified node .
16,007
def children ( self , node , relations = None ) : g = self . get_graph ( ) if node in g : children = list ( g . successors ( node ) ) if relations is None : return children else : rset = set ( relations ) return [ c for c in children if len ( self . child_parent_relations ( c , node , graph = g ) . intersection ( rset ) ) > 0 ] else : return [ ]
Return all direct children of specified node .
16,008
def ancestors ( self , node , relations = None , reflexive = False ) : if reflexive : ancs = self . ancestors ( node , relations , reflexive = False ) ancs . append ( node ) return ancs g = None if relations is None : g = self . get_graph ( ) else : g = self . get_filtered_graph ( relations ) if node in g : return list ( nx . ancestors ( g , node ) ) else : return [ ]
Return all ancestors of specified node .
16,009
def descendants ( self , node , relations = None , reflexive = False ) : if reflexive : decs = self . descendants ( node , relations , reflexive = False ) decs . append ( node ) return decs g = None if relations is None : g = self . get_graph ( ) else : g = self . get_filtered_graph ( relations ) if node in g : return list ( nx . descendants ( g , node ) ) else : return [ ]
Returns all descendants of specified node .
16,010
def get_roots ( self , relations = None , prefix = None ) : g = self . get_filtered_graph ( relations = relations , prefix = prefix ) roots = [ n for n in g . nodes ( ) if len ( list ( g . predecessors ( n ) ) ) == 0 and len ( list ( g . successors ( n ) ) ) > 0 ] return roots
Get all nodes that lack parents
16,011
def get_level ( self , level , relations = None , ** args ) : g = self . get_filtered_graph ( relations ) nodes = self . get_roots ( relations = relations , ** args ) for i in range ( level ) : logger . info ( " ITERATING TO LEVEL: {} NODES: {}" . format ( i , nodes ) ) nodes = [ c for n in nodes for c in g . successors ( n ) ] logger . info ( " FINAL: {}" . format ( nodes ) ) return nodes
Get all nodes at a particular level
16,012
def parent_index ( self , relations = None ) : g = None if relations is None : g = self . get_graph ( ) else : g = self . get_filtered_graph ( relations ) l = [ ] for n in g : l . append ( [ n ] + list ( g . predecessors ( n ) ) ) return l
Returns a mapping of nodes to all direct parents
16,013
def text_definition ( self , nid ) : tdefs = [ ] meta = self . _meta ( nid ) if 'definition' in meta : obj = meta [ 'definition' ] return TextDefinition ( nid , ** obj ) else : return None
Retrieves logical definitions for a class or relation id
16,014
def logical_definitions ( self , nid ) : ldefs = self . all_logical_definitions if ldefs is not None : return [ x for x in ldefs if x . class_id == nid ] else : return [ ]
Retrieves logical definitions for a class id
16,015
def get_property_chain_axioms ( self , nid ) : pcas = self . all_property_chain_axioms if pcas is not None : return [ x for x in pcas if x . predicate_id == nid ] else : return [ ]
Retrieves property chain axioms for a class id
16,016
def synonyms ( self , nid , include_label = False ) : n = self . node ( nid ) syns = [ ] if 'meta' in n : meta = n [ 'meta' ] if 'synonyms' in meta : for obj in meta [ 'synonyms' ] : syns . append ( Synonym ( nid , ** obj ) ) if include_label : syns . append ( Synonym ( nid , val = self . label ( nid ) , pred = 'label' ) ) return syns
Retrieves synonym objects for a class
16,017
def add_node ( self , id , label = None , type = 'CLASS' , meta = None ) : g = self . get_graph ( ) if meta is None : meta = { } g . add_node ( id , label = label , type = type , meta = meta )
Add a new node to the ontology
16,018
def inline_xref_graph ( self ) : xg = self . xref_graph for n in self . nodes ( ) : if n in xg : self . _add_meta_element ( n , 'xrefs' , [ { 'val' : x } for x in xg . neighbors ( n ) ] )
Copy contents of xref_graph to inlined meta object for each node
16,019
def add_parent ( self , id , pid , relation = 'subClassOf' ) : g = self . get_graph ( ) g . add_edge ( pid , id , pred = relation )
Add a new edge to the ontology
16,020
def add_xref ( self , id , xref ) : if self . xref_graph is None : self . xref_graph = nx . MultiGraph ( ) self . xref_graph . add_edge ( xref , id )
Adds an xref to the xref graph
16,021
def add_synonym ( self , syn ) : n = self . node ( syn . class_id ) if 'meta' not in n : n [ 'meta' ] = { } meta = n [ 'meta' ] if 'synonyms' not in meta : meta [ 'synonyms' ] = [ ] meta [ 'synonyms' ] . append ( syn . as_dict ( ) )
Adds a synonym for a node
16,022
def add_to_subset ( self , id , s ) : n = self . node ( id ) if 'meta' not in n : n [ 'meta' ] = { } meta = n [ 'meta' ] if 'subsets' not in meta : meta [ 'subsets' ] = [ ] meta [ 'subsets' ] . append ( s )
Adds a node to a subset
16,023
def all_synonyms ( self , include_label = False ) : syns = [ ] for n in self . nodes ( ) : syns = syns + self . synonyms ( n , include_label = include_label ) return syns
Retrieves all synonyms
16,024
def label ( self , nid , id_if_null = False ) : g = self . get_graph ( ) if nid in g : n = g . node [ nid ] if 'label' in n : return n [ 'label' ] else : if id_if_null : return nid else : return None else : if id_if_null : return nid else : return None
Fetches label for a node
16,025
def xrefs ( self , nid , bidirectional = False ) : if self . xref_graph is not None : xg = self . xref_graph if nid not in xg : return [ ] if bidirectional : return list ( xg . neighbors ( nid ) ) else : return [ x for x in xg . neighbors ( nid ) if xg [ nid ] [ x ] [ 0 ] [ 'source' ] == nid ] return [ ]
Fetches xrefs for a node
16,026
def resolve_names ( self , names , synonyms = False , ** args ) : g = self . get_graph ( ) r_ids = [ ] for n in names : logger . debug ( "Searching for {} syns={}" . format ( n , synonyms ) ) if len ( n . split ( ":" ) ) == 2 : r_ids . append ( n ) else : matches = set ( [ nid for nid in g . nodes ( ) if self . _is_match ( self . label ( nid ) , n , ** args ) ] ) if synonyms : logger . debug ( "Searching syns for {}" . format ( names ) ) for nid in g . nodes ( ) : for s in self . synonyms ( nid ) : if self . _is_match ( s . val , n , ** args ) : matches . add ( nid ) r_ids += list ( matches ) return r_ids
returns a list of identifiers based on an input list of labels and identifiers .
16,027
def search_golr_wrap ( id , category , ** args ) : assocs1 , facets1 = search_compact_wrap ( object = id , subject_category = category , ** args ) assocs2 , facets2 = search_compact_wrap ( subject = id , object_category = category , ** args ) facets = facets1 if len ( assocs2 ) > 0 : facets = facets2 return assocs1 + assocs2 , facets
performs searches in both directions
16,028
def load_gpi ( self , gpi_path ) : if self . config . gpi_authority_path is not None : gpis = dict ( ) parser = entityparser . GpiParser ( ) with open ( self . config . gpi_authority_path ) as gpi_f : entities = parser . parse ( file = gpi_f ) for entity in entities : gpis [ entity [ "id" ] ] = { "symbol" : entity [ "label" ] , "name" : entity [ "full_name" ] , "synonyms" : entitywriter . stringify ( entity [ "synonyms" ] ) , "type" : entity [ "type" ] } return gpis return None
Loads a GPI as a file from the config . gpi_authority_path
16,029
def parse ( self , file , outfile = None ) : file = self . _ensure_file ( file ) ents = [ ] skipped = [ ] n_lines = 0 for line in file : n_lines += 1 if line . startswith ( "!" ) : if outfile is not None : outfile . write ( line ) continue line = line . strip ( "\n" ) if line == "" : logging . warning ( "EMPTY LINE" ) continue parsed_line , new_ents = self . parse_line ( line ) if self . _skipping_line ( new_ents ) : logging . warning ( "SKIPPING: {}" . format ( line ) ) skipped . append ( line ) else : ents += new_ents if outfile is not None : outfile . write ( parsed_line + "\n" ) self . report . skipped += len ( skipped ) self . report . n_lines += n_lines logging . info ( "Parsed {} ents from {} lines. Skipped: {}" . format ( len ( ents ) , n_lines , len ( skipped ) ) ) file . close ( ) return ents
Parse a line - oriented entity file into a list of entity dict objects
16,030
def parse_line ( self , line ) : vals = line . split ( "\t" ) if len ( vals ) < 7 : self . report . error ( line , assocparser . Report . WRONG_NUMBER_OF_COLUMNS , "" ) return line , [ ] if len ( vals ) < 10 and len ( vals ) >= 7 : missing_columns = 10 - len ( vals ) vals += [ "" for i in range ( missing_columns ) ] [ db , db_object_id , db_object_symbol , db_object_name , db_object_synonym , db_object_type , taxon , parent_object_id , xrefs , properties ] = vals split_line = assocparser . SplitLine ( line = line , values = vals , taxon = taxon ) id = self . _pair_to_id ( db , db_object_id ) if not self . _validate_id ( id , split_line , context = assocparser . Report ) : return line , [ ] synonyms = db_object_synonym . split ( "|" ) if db_object_synonym == "" : synonyms = [ ] parents = parent_object_id . split ( "|" ) if parent_object_id == "" : parents = [ ] else : parents = [ self . _normalize_id ( x ) for x in parents ] for p in parents : self . _validate_id ( p , split_line , context = assocparser . Report ) xref_ids = xrefs . split ( "|" ) if xrefs == "" : xref_ids = [ ] obj = { 'id' : id , 'label' : db_object_symbol , 'full_name' : db_object_name , 'synonyms' : synonyms , 'type' : db_object_type , 'parents' : parents , 'xrefs' : xref_ids , 'taxon' : { 'id' : self . _taxon_id ( taxon , split_line ) } } return line , [ obj ]
Parses a single line of a GPI .
16,031
def transform_item ( self , item ) : obj = { 'id' : item [ 'primaryId' ] , 'label' : item [ 'symbol' ] , 'full_name' : item [ 'name' ] , 'type' : item [ 'soTermId' ] , 'taxon' : { 'id' : item [ 'taxonId' ] } , } if 'synonyms' in item : obj [ 'synonyms' ] = item [ 'synonyms' ] if 'crossReferenceIds' in item : obj [ 'xrefs' ] = [ self . _normalize_id ( x ) for x in item [ 'crossReferenceIds' ] ] return obj
Transforms JSON object
16,032
def index ( self ) : self . subjects = list ( self . association_map . keys ( ) ) for ( subj , terms ) in self . association_map . items ( ) : self . association_map [ subj ] = list ( set ( self . association_map [ subj ] ) ) logging . info ( "Indexing {} items" . format ( len ( self . subjects ) ) ) n = 0 all_objs = set ( ) for ( subj , terms ) in self . association_map . items ( ) : ancs = self . termset_ancestors ( terms ) all_objs . update ( ancs ) self . subject_to_inferred_map [ subj ] = ancs n = n + 1 if n < 5 : logging . info ( " Indexed: {} -> {}" . format ( subj , ancs ) ) elif n == 6 : logging . info ( "[TRUNCATING>5]...." ) self . objects = all_objs
Creates indexes based on inferred terms .
16,033
def query_associations ( self , subjects = None , infer_subjects = True , include_xrefs = True ) : if subjects is None : subjects = [ ] mset = set ( ) if infer_subjects : for subj in subjects : mset . update ( self . ontology . descendants ( subj ) ) mset . update ( set ( subjects ) ) if include_xrefs : xset = set ( ) for m in mset : xrefs = self . ontology . xrefs ( m , bidirectional = True ) if xrefs is not None : xset . update ( xrefs ) mset . update ( xset ) logging . debug ( "Matching subjects: {}" . format ( mset ) ) mset = mset . intersection ( self . subjects ) logging . debug ( "Matching subjects with anns: {}" . format ( mset ) ) amap = self . association_map results = [ ] for m in mset : if m in amap : for t in amap [ m ] : results . append ( ( m , t ) ) return results
Query for a set of associations .
16,034
def query ( self , terms = None , negated_terms = None ) : if terms is None : terms = [ ] matches_all = 'owl:Thing' in terms if negated_terms is None : negated_terms = [ ] termset = set ( terms ) negated_termset = set ( negated_terms ) matches = [ ] n_terms = len ( termset ) for subj in self . subjects : if matches_all or len ( termset . intersection ( self . inferred_types ( subj ) ) ) == n_terms : if len ( negated_termset . intersection ( self . inferred_types ( subj ) ) ) == 0 : matches . append ( subj ) return matches
Basic boolean query using inference .
16,035
def query_intersections ( self , x_terms = None , y_terms = None , symmetric = False ) : if x_terms is None : x_terms = [ ] if y_terms is None : y_terms = [ ] xset = set ( x_terms ) yset = set ( y_terms ) zset = xset . union ( yset ) gmap = { } for z in zset : gmap [ z ] = [ ] for subj in self . subjects : ancs = self . inferred_types ( subj ) for a in ancs . intersection ( zset ) : gmap [ a ] . append ( subj ) for z in zset : gmap [ z ] = set ( gmap [ z ] ) ilist = [ ] for x in x_terms : for y in y_terms : if not symmetric or x < y : shared = gmap [ x ] . intersection ( gmap [ y ] ) union = gmap [ x ] . union ( gmap [ y ] ) j = 0 if len ( union ) > 0 : j = len ( shared ) / len ( union ) ilist . append ( { 'x' : x , 'y' : y , 'shared' : shared , 'c' : len ( shared ) , 'j' : j } ) return ilist
Query for intersections of terms in two lists
16,036
def intersectionlist_to_matrix ( ilist , xterms , yterms ) : z = [ [ 0 ] * len ( xterms ) for i1 in range ( len ( yterms ) ) ] xmap = { } xi = 0 for x in xterms : xmap [ x ] = xi xi = xi + 1 ymap = { } yi = 0 for y in yterms : ymap [ y ] = yi yi = yi + 1 for i in ilist : z [ ymap [ i [ 'y' ] ] ] [ xmap [ i [ 'x' ] ] ] = i [ 'j' ] logging . debug ( "Z={}" . format ( z ) ) return ( z , xterms , yterms )
WILL BE DEPRECATED
16,037
def as_dataframe ( self , fillna = True , subjects = None ) : entries = [ ] selected_subjects = self . subjects if subjects is not None : selected_subjects = subjects for s in selected_subjects : vmap = { } for c in self . inferred_types ( s ) : vmap [ c ] = 1 entries . append ( vmap ) logging . debug ( "Creating DataFrame" ) df = pd . DataFrame ( entries , index = selected_subjects ) if fillna : logging . debug ( "Performing fillna..." ) df = df . fillna ( 0 ) return df
Return association set as pandas DataFrame
16,038
def label ( self , id ) : if self . ontology is not None : label = self . ontology . label ( id ) if label is not None : return label if self . subject_label_map is not None and id in self . subject_label_map : return self . subject_label_map [ id ] return None
return label for a subject id
16,039
def subontology ( self , minimal = False ) : return self . ontology . subontology ( self . objects , minimal = minimal )
Generates a sub - ontology based on associations
16,040
def similarity_matrix ( self , x_subjects = None , y_subjects = None , symmetric = False ) : if x_subjects is None : x_subjects = [ ] if y_subjects is None : y_subjects = [ ] xset = set ( x_subjects ) yset = set ( y_subjects ) zset = xset . union ( yset ) gmap = { } for z in zset : gmap [ z ] = self . inferred_types ( z ) ilist = [ ] for x in x_subjects : for y in y_subjects : if not symmetric or x < y : shared = gmap [ x ] . intersection ( gmap [ y ] ) union = gmap [ x ] . union ( gmap [ y ] ) j = 0 if len ( union ) > 0 : j = len ( shared ) / len ( union ) ilist . append ( { 'x' : x , 'y' : y , 'shared' : shared , 'c' : len ( shared ) , 'j' : j } ) return self . intersectionlist_to_matrix ( ilist , x_subjects , y_subjects )
Query for similarity matrix between groups of subjects
16,041
def get_edges ( ont ) : logging . info ( "QUERYING:" + ont ) edges = [ ( c , SUBCLASS_OF , d ) for ( c , d ) in fetchall_isa ( ont ) ] edges += fetchall_svf ( ont ) edges += [ ( c , SUBPROPERTY_OF , d ) for ( c , d ) in fetchall_subPropertyOf ( ont ) ] if len ( edges ) == 0 : logging . warn ( "No edges for {}" . format ( ont ) ) return edges
Fetches all basic edges from a remote ontology
16,042
def transformArray ( data , keysToSplit = [ ] ) : transformed = [ ] for item in data : transformed . append ( transform ( item , keysToSplit ) ) return transformed
Transform a SPARQL json array based on the rules of transform
16,043
def coderef_to_ecoclass ( self , code , reference = None ) : mcls = None for ( this_code , this_ref , cls ) in self . mappings ( ) : if str ( this_code ) == str ( code ) : if this_ref == reference : return cls if this_ref is None : mcls = cls return mcls
Map a GAF code to an ECO class
16,044
def ecoclass_to_coderef ( self , cls ) : code = '' ref = None for ( code , ref , this_cls ) in self . mappings ( ) : if cls == this_cls : return code , ref return None , None
Map an ECO class to a GAF code
16,045
def get_checksum ( file ) : with open ( file , 'rb' ) as FH : contents = FH . read ( ) return hashlib . sha256 ( contents ) . hexdigest ( )
Get SHA256 hash from the contents of a given file
16,046
def create ( self , handle = None , handle_type = None , ** args ) : if handle is None : self . test = self . test + 1 logging . info ( "T: " + str ( self . test ) ) global default_ontology if default_ontology is None : logging . info ( "Creating new instance of default ontology" ) default_ontology = create_ontology ( default_ontology_handle ) logging . info ( "Using default_ontology" ) return default_ontology return create_ontology ( handle , ** args )
Creates an ontology based on a handle
16,047
def _rule_id ( self , id : int ) -> str : if id is None or id == 0 or id >= 10000000 : return "other" return "gorule-{:0>7}" . format ( id )
Convert an integer into a gorule key id .
16,048
def convert_json_file ( obographfile , ** args ) : f = open ( obographfile , 'r' ) jsonstr = f . read ( ) f . close ( ) return convert_json_object ( json . loads ( jsonstr ) , ** args )
Return a networkx MultiDiGraph of the ontologies serialized as a json string
16,049
def convert_json_object ( obographdoc , ** args ) : digraph = networkx . MultiDiGraph ( ) xref_graph = networkx . MultiGraph ( ) logical_definitions = [ ] property_chain_axioms = [ ] context = obographdoc . get ( '@context' , { } ) logging . info ( "CONTEXT: {}" . format ( context ) ) mapper = OboJsonMapper ( digraph = digraph , context = context ) ogs = obographdoc [ 'graphs' ] base_og = ogs [ 0 ] for og in ogs : mapper . add_obograph_digraph ( og , xref_graph = xref_graph , logical_definitions = logical_definitions , property_chain_axioms = property_chain_axioms , ** args ) return { 'id' : base_og . get ( 'id' ) , 'meta' : base_og . get ( 'meta' ) , 'graph' : mapper . digraph , 'xref_graph' : xref_graph , 'graphdoc' : obographdoc , 'logical_definitions' : logical_definitions , 'property_chain_axioms' : property_chain_axioms }
Return a networkx MultiDiGraph of the ontologies serialized as a json object
16,050
def infer_module_name ( filename , fspath ) : filename , _ = os . path . splitext ( filename ) for f in fspath : short_name = f . relative_path ( filename ) if short_name : if short_name . endswith ( os . path . sep + "__init__" ) : short_name = short_name [ : short_name . rfind ( os . path . sep ) ] return short_name . replace ( os . path . sep , '.' ) return ''
Convert a python filename to a module relative to pythonpath .
16,051
def get_absolute_name ( package , relative_name ) : path = package . split ( '.' ) if package else [ ] name = relative_name . lstrip ( '.' ) ndots = len ( relative_name ) - len ( name ) if ndots > len ( path ) : return relative_name absolute_path = path [ : len ( path ) + 1 - ndots ] if name : absolute_path . append ( name ) return '.' . join ( absolute_path )
Joins a package name and a relative name .
16,052
def resolve_import ( self , item ) : name = item . name short_name = None if item . is_from and not item . is_star : if '.' in name . lstrip ( '.' ) : rindex = name . rfind ( '.' ) else : rindex = name . rfind ( '.' ) + 1 short_name = name [ : rindex ] if import_finder . is_builtin ( name ) : filename = name + '.so' return Builtin ( filename , name ) filename , level = convert_to_path ( name ) if level : filename = os . path . normpath ( os . path . join ( self . current_directory , filename ) ) files = [ ( name , filename ) ] if short_name : short_filename = os . path . dirname ( filename ) files . append ( ( short_name , short_filename ) ) for module_name , path in files : for fs in self . fs_path : f = self . _find_file ( fs , path ) if not f or f == self . current_module . path : continue if item . is_relative ( ) : package_name = self . current_module . package_name if package_name is None : raise ImportException ( name ) module_name = get_absolute_name ( package_name , module_name ) if isinstance ( self . current_module , System ) : return System ( f , module_name ) return Local ( f , module_name , fs ) if item . source : prefix , ext = os . path . splitext ( item . source ) mod_name = name if short_name : mod = prefix . replace ( os . path . sep , '.' ) mod = utils . strip_suffix ( mod , '.__init__' ) if not mod . endswith ( name ) and mod . endswith ( short_name ) : mod_name = short_name if ext == '.pyc' : pyfile = prefix + '.py' if os . path . exists ( pyfile ) : return System ( pyfile , mod_name ) elif not ext : pyfile = os . path . join ( prefix , "__init__.py" ) if os . path . exists ( pyfile ) : return System ( pyfile , mod_name ) return System ( item . source , mod_name ) raise ImportException ( name )
Simulate how Python resolves imports .
16,053
def resolve_all ( self , import_items ) : for import_item in import_items : try : yield self . resolve_import ( import_item ) except ImportException as err : logging . info ( 'unknown module %s' , err . module_name )
Resolves a list of imports .
16,054
def path_from_pythonpath ( pythonpath ) : path = fs . Path ( ) for p in pythonpath . split ( os . pathsep ) : path . add_path ( utils . expand_path ( p ) , 'os' ) return path
Create an fs . Path object from a pythonpath string .
16,055
def format_file_node ( import_graph , node , indent ) : f = import_graph . provenance [ node ] if isinstance ( f , resolve . Direct ) : out = '+ ' + f . short_path elif isinstance ( f , resolve . Local ) : out = ' ' + f . short_path elif isinstance ( f , resolve . System ) : out = ':: ' + f . short_path elif isinstance ( f , resolve . Builtin ) : out = '(%s)' % f . module_name else : out = '%r' % node return ' ' * indent + out
Prettyprint nodes based on their provenance .
16,056
def format_node ( import_graph , node , indent ) : if isinstance ( node , graph . NodeSet ) : ind = ' ' * indent out = [ ind + 'cycle {' ] + [ format_file_node ( import_graph , n , indent + 1 ) for n in node . nodes ] + [ ind + '}' ] return '\n' . join ( out ) else : return format_file_node ( import_graph , node , indent )
Helper function for print_tree
16,057
def _find_package ( parts ) : for i in range ( len ( parts ) , 0 , - 1 ) : prefix = '.' . join ( parts [ 0 : i ] ) if prefix in sys . modules : return i , sys . modules [ prefix ] return 0 , None
Helper function for _resolve_import_versioned .
16,058
def _resolve_import ( name ) : if name in sys . modules : return getattr ( sys . modules [ name ] , '__file__' , name + '.so' ) return _resolve_import_versioned ( name )
Helper function for resolve_import .
16,059
def resolve_import ( name , is_from , is_star ) : if name . startswith ( '.' ) or is_builtin ( name ) : return None ret = _resolve_import ( name ) if ret is None and is_from and not is_star : package , _ = name . rsplit ( '.' , 1 ) ret = _resolve_import ( package ) return ret
Use python to resolve an import .
16,060
def get_imports ( filename ) : with open ( filename , "rb" ) as f : src = f . read ( ) finder = ImportFinder ( ) finder . visit ( ast . parse ( src , filename = filename ) ) imports = [ ] for i in finder . imports : name , _ , is_from , is_star = i imports . append ( i + ( resolve_import ( name , is_from , is_star ) , ) ) return imports
Get all the imports in a file .
16,061
def add_file ( self , filename ) : assert not self . final , 'Trying to mutate a final graph.' self . add_source_file ( filename ) resolved , unresolved = self . get_file_deps ( filename ) self . graph . add_node ( filename ) for f in resolved : self . graph . add_node ( f ) self . graph . add_edge ( filename , f ) for imp in unresolved : self . broken_deps [ filename ] . add ( imp )
Add a file and all its immediate dependencies to the graph .
16,062
def follow_file ( self , f , seen , trim ) : return ( f not in self . graph . nodes and f not in seen and ( not trim or not isinstance ( self . provenance [ f ] , ( resolve . Builtin , resolve . System ) ) ) )
Whether to recurse into a file s dependencies .
16,063
def add_file_recursive ( self , filename , trim = False ) : assert not self . final , 'Trying to mutate a final graph.' self . add_source_file ( filename ) queue = collections . deque ( [ filename ] ) seen = set ( ) while queue : filename = queue . popleft ( ) self . graph . add_node ( filename ) try : deps , broken = self . get_file_deps ( filename ) except parsepy . ParseError : if filename . endswith ( '.py' ) : self . unreadable_files . add ( filename ) else : self . graph . remove_node ( filename ) continue for f in broken : self . broken_deps [ filename ] . add ( f ) for f in deps : if self . follow_file ( f , seen , trim ) : queue . append ( f ) seen . add ( f ) self . graph . add_node ( f ) self . graph . add_edge ( filename , f )
Add a file and all its recursive dependencies to the graph .
16,064
def shrink_to_node ( self , scc ) : assert not self . final , 'Trying to mutate a final graph.' self . graph . add_node ( scc ) edges = list ( self . graph . edges ) for k , v in edges : if k not in scc and v in scc : self . graph . remove_edge ( k , v ) self . graph . add_edge ( k , scc ) elif k in scc and v not in scc : self . graph . remove_edge ( k , v ) self . graph . add_edge ( scc , v ) for node in scc . nodes : self . graph . remove_node ( node )
Shrink a strongly connected component into a node .
16,065
def build ( self ) : assert not self . final , 'Trying to mutate a final graph.' for scc in sorted ( nx . kosaraju_strongly_connected_components ( self . graph ) , key = len , reverse = True ) : if len ( scc ) == 1 : break self . shrink_to_node ( NodeSet ( scc ) ) self . final = True
Finalise the graph after adding all input files to it .
16,066
def sorted_source_files ( self ) : assert self . final , 'Call build() before using the graph.' out = [ ] for node in nx . topological_sort ( self . graph ) : if isinstance ( node , NodeSet ) : out . append ( node . nodes ) else : out . append ( [ node ] ) return list ( reversed ( out ) )
Returns a list of targets in topologically sorted order .
16,067
def get_all_unresolved ( self ) : assert self . final , 'Call build() before using the graph.' out = set ( ) for v in self . broken_deps . values ( ) : out |= v return out
Returns a set of all unresolved imports .
16,068
def create ( cls , env , filenames , trim = False ) : import_graph = cls ( env ) for filename in filenames : import_graph . add_file_recursive ( os . path . abspath ( filename ) , trim ) import_graph . build ( ) return import_graph
Create and return a final graph .
16,069
def get_source_file_provenance ( self , filename ) : module_name = resolve . infer_module_name ( filename , self . path ) return resolve . Direct ( filename , module_name )
Infer the module name if possible .
16,070
def collect_files ( path , extension ) : assert os . path . isdir ( path ) out = [ ] for root , _ , files in os . walk ( path ) : out += [ os . path . join ( root , f ) for f in files if f . endswith ( extension ) ] return out
Collect all the files with extension in a directory tree .
16,071
def expand_source_files ( filenames , cwd = None ) : out = [ ] for f in expand_paths ( filenames , cwd ) : if os . path . isdir ( f ) : out += collect_files ( f , ".py" ) else : if f . endswith ( ".py" ) : out . append ( f ) return sorted ( set ( out ) )
Expand a list of filenames passed in as sources .
16,072
def strip_suffix ( string , suffix ) : if string . endswith ( suffix ) : return string [ : - ( len ( suffix ) ) ] return string
Remove a suffix from a string if it exists .
16,073
def create_directory ( self , filename ) : path = os . path . join ( self . path , filename ) makedirs ( path ) return path
Create a subdirectory in the temporary directory .
16,074
def create_file ( self , filename , indented_data = None ) : filedir , filename = os . path . split ( filename ) if filedir : self . create_directory ( filedir ) path = os . path . join ( self . path , filedir , filename ) data = indented_data if isinstance ( data , bytes ) and not isinstance ( data , str ) : mode = 'wb' else : mode = 'w' if data : data = textwrap . dedent ( data ) with open ( path , mode ) as fi : if data : fi . write ( data ) return path
Create a file in the temporary directory .
16,075
def new_reply ( cls , thread , user , content ) : msg = cls . objects . create ( thread = thread , sender = user , content = content ) thread . userthread_set . exclude ( user = user ) . update ( deleted = False , unread = True ) thread . userthread_set . filter ( user = user ) . update ( deleted = False , unread = False ) message_sent . send ( sender = cls , message = msg , thread = thread , reply = True ) return msg
Create a new reply for an existing Thread .
16,076
def new_message ( cls , from_user , to_users , subject , content ) : thread = Thread . objects . create ( subject = subject ) for user in to_users : thread . userthread_set . create ( user = user , deleted = False , unread = True ) thread . userthread_set . create ( user = from_user , deleted = True , unread = False ) msg = cls . objects . create ( thread = thread , sender = from_user , content = content ) message_sent . send ( sender = cls , message = msg , thread = thread , reply = False ) return msg
Create a new Message and Thread .
16,077
def unread ( thread , user ) : return bool ( thread . userthread_set . filter ( user = user , unread = True ) )
Check whether there are any unread messages for a particular thread for a user .
16,078
def split ( self , text ) : text = cleanup ( text ) return self . sent_detector . tokenize ( text . strip ( ) )
Splits text and returns a list of the resulting sentences .
16,079
def str_from_file ( path ) : with open ( path ) as f : s = f . read ( ) . strip ( ) return s
Return file contents as string .
16,080
def xml_equal ( xml_file1 , xml_file2 ) : def canonical ( xml_file ) : s = et . tostring ( et . parse ( xml_file ) . getroot ( ) ) . decode ( "UTF-8" ) s = re . sub ( "[\n|\t]*" , "" , s ) s = re . sub ( "\s+" , " " , s ) s = "" . join ( sorted ( s ) ) . strip ( ) return s return canonical ( xml_file1 ) == canonical ( xml_file2 )
Parse xml and convert to a canonical string representation so we don t have to worry about semantically meaningless differences
16,081
def list_files ( dir_path , recursive = True ) : for root , dirs , files in os . walk ( dir_path ) : file_list = [ os . path . join ( root , f ) for f in files ] if recursive : for dir in dirs : dir = os . path . join ( root , dir ) file_list . extend ( list_files ( dir , recursive = True ) ) return file_list
Return a list of files in dir_path .
16,082
def process ( input_dir , output_dir , function ) : if not os . path . exists ( output_dir ) : os . makedirs ( output_dir ) logger = log . get_global_console_logger ( ) logger . info ( "Processing files in {}." . format ( input_dir ) ) input_file_names = os . listdir ( input_dir ) for input_file_name in input_file_names : logger . info ( "Processing {}." . format ( input_file_name ) ) input_file = os . path . join ( input_dir , input_file_name ) with codecs . open ( input_file , "r" , encoding = "UTF-8" ) as f : input_string = f . read ( ) output_string = function ( input_string ) output_file = os . path . join ( output_dir , input_file_name ) with codecs . open ( output_file , "w" , encoding = "UTF-8" ) as f : f . write ( output_string ) logger . info ( "Saved processed files to {}." . format ( output_dir ) )
Apply function to all files in input_dir and save the resulting ouput files in output_dir .
16,083
def split_sentences ( self ) : from pyrouge . utils . sentence_splitter import PunktSentenceSplitter self . log . info ( "Splitting sentences." ) ss = PunktSentenceSplitter ( ) sent_split_to_string = lambda s : "\n" . join ( ss . split ( s ) ) process_func = partial ( DirectoryProcessor . process , function = sent_split_to_string ) self . __process_summaries ( process_func )
ROUGE requires texts split into sentences . In case the texts are not already split this method can be used .
16,084
def write_config_static ( system_dir , system_filename_pattern , model_dir , model_filename_pattern , config_file_path , system_id = None ) : system_filenames = [ f for f in os . listdir ( system_dir ) ] system_models_tuples = [ ] system_filename_pattern = re . compile ( system_filename_pattern ) for system_filename in sorted ( system_filenames ) : match = system_filename_pattern . match ( system_filename ) if match : id = match . groups ( 0 ) [ 0 ] model_filenames = Rouge155 . __get_model_filenames_for_id ( id , model_dir , model_filename_pattern ) system_models_tuples . append ( ( system_filename , sorted ( model_filenames ) ) ) if not system_models_tuples : raise Exception ( "Did not find any files matching the pattern {} " "in the system summaries directory {}." . format ( system_filename_pattern . pattern , system_dir ) ) with codecs . open ( config_file_path , 'w' , encoding = 'utf-8' ) as f : f . write ( '<ROUGE-EVAL version="1.55">' ) for task_id , ( system_filename , model_filenames ) in enumerate ( system_models_tuples , start = 1 ) : eval_string = Rouge155 . __get_eval_string ( task_id , system_id , system_dir , system_filename , model_dir , model_filenames ) f . write ( eval_string ) f . write ( "</ROUGE-EVAL>" )
Write the ROUGE configuration file which is basically a list of system summary files and their corresponding model summary files .
16,085
def write_config ( self , config_file_path = None , system_id = None ) : if not system_id : system_id = 1 if ( not config_file_path ) or ( not self . _config_dir ) : self . _config_dir = mkdtemp ( ) config_filename = "rouge_conf.xml" else : config_dir , config_filename = os . path . split ( config_file_path ) verify_dir ( config_dir , "configuration file" ) self . _config_file = os . path . join ( self . _config_dir , config_filename ) Rouge155 . write_config_static ( self . _system_dir , self . _system_filename_pattern , self . _model_dir , self . _model_filename_pattern , self . _config_file , system_id ) self . log . info ( "Written ROUGE configuration to {}" . format ( self . _config_file ) )
Write the ROUGE configuration file which is basically a list of system summary files and their matching model summary files .
16,086
def evaluate ( self , system_id = 1 , rouge_args = None ) : self . write_config ( system_id = system_id ) options = self . __get_options ( rouge_args ) command = [ self . _bin_path ] + options env = None if hasattr ( self , "_home_dir" ) and self . _home_dir : env = { 'ROUGE_EVAL_HOME' : self . _home_dir } self . log . info ( "Running ROUGE with command {}" . format ( " " . join ( command ) ) ) rouge_output = check_output ( command , env = env ) . decode ( "UTF-8" ) return rouge_output
Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir . The summaries are assumed to be in the one - sentence - per - line HTML format ROUGE understands .
16,087
def convert_and_evaluate ( self , system_id = 1 , split_sentences = False , rouge_args = None ) : if split_sentences : self . split_sentences ( ) self . __write_summaries ( ) rouge_output = self . evaluate ( system_id , rouge_args ) return rouge_output
Convert plain text summaries to ROUGE format and run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir . Optionally split texts into sentences in case they aren t already .
16,088
def output_to_dict ( self , output ) : pattern = re . compile ( r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) " r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)" ) results = { } for line in output . split ( "\n" ) : match = pattern . match ( line ) if match : sys_id , rouge_type , measure , result , conf_begin , conf_end = match . groups ( ) measure = { 'Average_R' : 'recall' , 'Average_P' : 'precision' , 'Average_F' : 'f_score' } [ measure ] rouge_type = rouge_type . lower ( ) . replace ( "-" , '_' ) key = "{}_{}" . format ( rouge_type , measure ) results [ key ] = float ( result ) results [ "{}_cb" . format ( key ) ] = float ( conf_begin ) results [ "{}_ce" . format ( key ) ] = float ( conf_end ) return results
Convert the ROUGE output into python dictionary for further processing .
16,089
def __set_rouge_dir ( self , home_dir = None ) : if not home_dir : self . _home_dir = self . __get_rouge_home_dir_from_settings ( ) else : self . _home_dir = home_dir self . save_home_dir ( ) self . _bin_path = os . path . join ( self . _home_dir , 'ROUGE-1.5.5.pl' ) self . data_dir = os . path . join ( self . _home_dir , 'data' ) if not os . path . exists ( self . _bin_path ) : raise Exception ( "ROUGE binary not found at {}. Please set the " "correct path by running pyrouge_set_rouge_path " "/path/to/rouge/home." . format ( self . _bin_path ) )
Verfify presence of ROUGE - 1 . 5 . 5 . pl and data folder and set those paths .
16,090
def __process_summaries ( self , process_func ) : temp_dir = mkdtemp ( ) new_system_dir = os . path . join ( temp_dir , "system" ) os . mkdir ( new_system_dir ) new_model_dir = os . path . join ( temp_dir , "model" ) os . mkdir ( new_model_dir ) self . log . info ( "Processing summaries. Saving system files to {} and " "model files to {}." . format ( new_system_dir , new_model_dir ) ) process_func ( self . _system_dir , new_system_dir ) process_func ( self . _model_dir , new_model_dir ) self . _system_dir = new_system_dir self . _model_dir = new_model_dir
Helper method that applies process_func to the files in the system and model folders and saves the resulting files to new system and model folders .
16,091
def __get_options ( self , rouge_args = None ) : if self . args : options = self . args . split ( ) elif rouge_args : options = rouge_args . split ( ) else : options = [ '-e' , self . _data_dir , '-c' , 95 , '-2' , '-1' , '-U' , '-r' , 1000 , '-n' , 4 , '-w' , 1.2 , '-a' , ] options = list ( map ( str , options ) ) options = self . __add_config_option ( options ) return options
Get supplied command line arguments for ROUGE or use default ones .
16,092
def __create_dir_property ( self , dir_name , docstring ) : property_name = "{}_dir" . format ( dir_name ) private_name = "_" + property_name setattr ( self , private_name , None ) def fget ( self ) : return getattr ( self , private_name ) def fset ( self , path ) : verify_dir ( path , dir_name ) setattr ( self , private_name , path ) p = property ( fget = fget , fset = fset , doc = docstring ) setattr ( self . __class__ , property_name , p )
Generate getter and setter for a directory property .
16,093
def __set_dir_properties ( self ) : directories = [ ( "home" , "The ROUGE home directory." ) , ( "data" , "The path of the ROUGE 'data' directory." ) , ( "system" , "Path of the directory containing system summaries." ) , ( "model" , "Path of the directory containing model summaries." ) , ] for ( dirname , docstring ) in directories : self . __create_dir_property ( dirname , docstring )
Automatically generate the properties for directories .
16,094
def __clean_rouge_args ( self , rouge_args ) : if not rouge_args : return quot_mark_pattern = re . compile ( '"(.+)"' ) match = quot_mark_pattern . match ( rouge_args ) if match : cleaned_args = match . group ( 1 ) return cleaned_args else : return rouge_args
Remove enclosing quotation marks if any .
16,095
def check_auth ( self , username , password ) : return username == self . queryname and password == self . querypw
This function is called to check if a username password combination is valid .
16,096
def requires_auth ( self , func ) : @ wraps ( func ) def decorated ( * args , ** kwargs ) : auth = request . authorization if not auth or not self . check_auth ( auth . username , auth . password ) : return self . authenticate ( ) return func ( * args , ** kwargs ) return decorated
Decorator to prompt for user name and password . Useful for data dumps etc . That you don t want to be public .
16,097
def handle_exp_error ( exception ) : app . logger . error ( "%s (%s) %s" , exception . value , exception . errornum , str ( dict ( request . args ) ) ) return exception . error_page ( request , CONFIG . get ( 'HIT Configuration' , 'contact_email_on_error' ) )
Handle errors by sending an error page .
16,098
def check_worker_status ( ) : if 'workerId' not in request . args : resp = { "status" : "bad request" } return jsonify ( ** resp ) else : worker_id = request . args [ 'workerId' ] assignment_id = request . args [ 'assignmentId' ] allow_repeats = CONFIG . getboolean ( 'HIT Configuration' , 'allow_repeats' ) if allow_repeats : try : part = Participant . query . filter ( Participant . workerid == worker_id ) . filter ( Participant . assignmentid == assignment_id ) . one ( ) status = part . status except exc . SQLAlchemyError : status = NOT_ACCEPTED else : try : matches = Participant . query . filter ( Participant . workerid == worker_id ) . all ( ) numrecs = len ( matches ) if numrecs == 0 : status = NOT_ACCEPTED else : status = max ( [ record . status for record in matches ] ) except exc . SQLAlchemyError : status = NOT_ACCEPTED resp = { "status" : status } return jsonify ( ** resp )
Check worker status route
16,099
def give_consent ( ) : if not ( 'hitId' in request . args and 'assignmentId' in request . args and 'workerId' in request . args ) : raise ExperimentError ( 'hit_assign_worker_id_not_set_in_consent' ) hit_id = request . args [ 'hitId' ] assignment_id = request . args [ 'assignmentId' ] worker_id = request . args [ 'workerId' ] mode = request . args [ 'mode' ] with open ( 'templates/consent.html' , 'r' ) as temp_file : consent_string = temp_file . read ( ) consent_string = insert_mode ( consent_string , mode ) return render_template_string ( consent_string , hitid = hit_id , assignmentid = assignment_id , workerid = worker_id )
Serves up the consent in the popup window .