idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
39,000
def seq_int_arr ( seqs ) : return np . array ( [ [ NT_TO_INT [ c ] for c in x . upper ( ) ] for x in seqs ] )
Convert list of ACGT strings to matix of 1 - 4 ints
39,001
def group_alleles_by_start_end_Xbp ( arr , bp = 28 ) : starts = arr [ : , 0 : bp ] ends = arr [ : , - bp : ] starts_ends_idxs = defaultdict ( list ) l , seq_len = arr . shape for i in range ( l ) : start_i = starts [ i ] end_i = ends [ i ] start_i_str = '' . join ( [ str ( x ) for x in start_i ] ) end_i_str = '' . join ( [ str ( x ) for x in end_i ] ) starts_ends_idxs [ start_i_str + end_i_str ] . append ( i ) return starts_ends_idxs
Group alleles by matching ends
39,002
def allele_clusters ( dists , t = 0.025 ) : clusters = fcluster ( linkage ( dists ) , 0.025 , criterion = 'distance' ) cluster_idx = defaultdict ( list ) for idx , cl in enumerate ( clusters ) : cluster_idx [ cl ] . append ( idx ) return cluster_idx
Flat clusters from distance matrix
39,003
def min_row_dist_sum_idx ( dists ) : row_sums = np . apply_along_axis ( arr = dists , axis = 0 , func1d = np . sum ) return row_sums . argmin ( )
Find the index of the row with the minimum row distance sum
39,004
def find_centroid_alleles ( alleles , bp = 28 , t = 0.025 ) : centroid_alleles = set ( ) len_allele = group_alleles_by_size ( alleles ) for length , seqs in len_allele . items ( ) : if len ( seqs ) == 1 : centroid_alleles . add ( seqs [ 0 ] ) continue seq_arr = seq_int_arr ( seqs ) starts_ends_idxs = group_alleles_by_start_end_Xbp ( seq_arr , bp = bp ) for k , idxs in starts_ends_idxs . items ( ) : if len ( idxs ) == 1 : centroid_alleles . add ( seqs [ idxs [ 0 ] ] ) continue seq_arr_subset = seq_arr [ idxs ] dists = pdist ( seq_arr_subset , 'hamming' ) cl = allele_clusters ( dists , t = t ) dm_sq = squareform ( dists ) for cl_key , cl_idxs in cl . items ( ) : if len ( cl_idxs ) == 1 or len ( cl_idxs ) == 2 : centroid_alleles . add ( seq_int_arr_to_nt ( seq_arr_subset [ cl_idxs [ 0 ] ] ) ) continue dm_sub = dm_subset ( dm_sq , cl_idxs ) min_idx = min_row_dist_sum_idx ( dm_sub ) centroid_alleles . add ( seq_int_arr_to_nt ( seq_arr_subset [ min_idx ] ) ) return centroid_alleles
Reduce list of alleles to set of centroid alleles based on size grouping ends matching and hierarchical clustering
39,005
def mash_dist_trusted ( fasta_path ) : args = [ MASH_BIN , 'dist' , MASH_SKETCH_FILE , fasta_path ] p = Popen ( args , stderr = PIPE , stdout = PIPE ) ( stdout , stderr ) = p . communicate ( ) retcode = p . returncode if retcode != 0 : raise Exception ( 'Could not run Mash dist {}' . format ( stderr ) ) return stdout
Compute Mash distances of sketch file of genome fasta to RefSeq sketch DB .
39,006
def nr_profiles ( arr , genomes ) : gs_collapse = [ ] genome_idx_dict = { } indices = [ ] patt_dict = { } for i , g in enumerate ( genomes ) : p = arr [ i , : ] . tostring ( ) if p in patt_dict : parent = patt_dict [ p ] idx = genome_idx_dict [ parent ] gs_collapse [ idx ] . append ( g ) else : indices . append ( i ) patt_dict [ p ] = g genome_idx_dict [ g ] = len ( gs_collapse ) gs_collapse . append ( [ g ] ) return arr [ indices , : ] , gs_collapse
Get a condensed cgMLST pairwise distance matrix for specified Genomes_ where condensed means redundant cgMLST profiles are only represented once in the distance matrix .
39,007
def overall_serovar_call ( serovar_prediction , antigen_predictor ) : assert isinstance ( serovar_prediction , SerovarPrediction ) assert isinstance ( antigen_predictor , SerovarPredictor ) h1 = antigen_predictor . h1 h2 = antigen_predictor . h2 sg = antigen_predictor . serogroup spp = serovar_prediction . cgmlst_subspecies if spp is None : if 'mash_match' in serovar_prediction . __dict__ : spp = serovar_prediction . __dict__ [ 'mash_subspecies' ] serovar_prediction . serovar_antigen = antigen_predictor . serovar cgmlst_serovar = serovar_prediction . serovar_cgmlst cgmlst_distance = float ( serovar_prediction . cgmlst_distance ) null_result = '-:-:-' try : spp_roman = spp_name_to_roman [ spp ] except : spp_roman = None is_antigen_null = lambda x : ( x is None or x == '' or x == '-' ) if antigen_predictor . serovar is None : if is_antigen_null ( sg ) and is_antigen_null ( h1 ) and is_antigen_null ( h2 ) : if spp_roman is not None : serovar_prediction . serovar = '{} {}:{}:{}' . format ( spp_roman , sg , h1 , h2 ) else : serovar_prediction . serovar = '{}:{}:{}' . format ( spp_roman , sg , h1 , h2 ) elif cgmlst_serovar is not None and cgmlst_distance <= CGMLST_DISTANCE_THRESHOLD : serovar_prediction . serovar = cgmlst_serovar else : serovar_prediction . serovar = null_result if 'mash_match' in serovar_prediction . __dict__ : spd = serovar_prediction . __dict__ mash_dist = float ( spd [ 'mash_distance' ] ) if mash_dist <= MASH_DISTANCE_THRESHOLD : serovar_prediction . serovar = spd [ 'mash_serovar' ] else : serovars_from_antigen = antigen_predictor . serovar . split ( '|' ) if not isinstance ( serovars_from_antigen , list ) : serovars_from_antigen = [ serovars_from_antigen ] if cgmlst_serovar is not None : if cgmlst_serovar in serovars_from_antigen : serovar_prediction . serovar = cgmlst_serovar else : if float ( cgmlst_distance ) <= CGMLST_DISTANCE_THRESHOLD : serovar_prediction . serovar = cgmlst_serovar elif 'mash_match' in serovar_prediction . __dict__ : spd = serovar_prediction . __dict__ mash_serovar = spd [ 'mash_serovar' ] mash_dist = float ( spd [ 'mash_distance' ] ) if mash_serovar in serovars_from_antigen : serovar_prediction . serovar = mash_serovar else : if mash_dist <= MASH_DISTANCE_THRESHOLD : serovar_prediction . serovar = mash_serovar if serovar_prediction . serovar is None : serovar_prediction . serovar = serovar_prediction . serovar_antigen if serovar_prediction . h1 is None : serovar_prediction . h1 = '-' if serovar_prediction . h2 is None : serovar_prediction . h2 = '-' if serovar_prediction . serogroup is None : serovar_prediction . serogroup = '-' if serovar_prediction . serovar_antigen is None : if spp_roman is not None : serovar_prediction . serovar_antigen = '{} -:-:-' . format ( spp_roman ) else : serovar_prediction . serovar_antigen = '-:-:-' if serovar_prediction . serovar is None : serovar_prediction . serovar = serovar_prediction . serovar_antigen return serovar_prediction
Predict serovar from cgMLST cluster membership analysis and antigen BLAST results . SerovarPrediction object is assigned H1 H2 and Serogroup from the antigen BLAST results . Antigen BLAST results will predict a particular serovar or list of serovars however the cgMLST membership may be able to help narrow down the list of potential serovars .
39,008
def process_cgmlst_results ( df ) : assert isinstance ( df , pd . DataFrame ) markers = [ ] alleles = [ ] for x in df [ 'qseqid' ] : marker , allele = x . split ( '|' ) markers . append ( marker ) alleles . append ( int ( allele ) ) df . loc [ : , 'marker' ] = markers df . loc [ : , 'allele' ] = alleles df . loc [ : , 'is_match' ] = ( df [ 'coverage' ] >= 1.0 ) & ( df [ 'pident' ] >= 90.0 ) & ~ ( df [ 'is_trunc' ] ) df . loc [ : , 'allele_name' ] = df . apply ( lambda x : allele_name ( x . sseq . replace ( '-' , '' ) ) , axis = 1 ) df . loc [ : , 'is_perfect' ] = ( df [ 'coverage' ] == 1.0 ) & ( df [ 'pident' ] == 100.0 ) df_perf = df [ df [ 'is_perfect' ] ] perf_markers = df_perf [ 'marker' ] . unique ( ) df . loc [ : , 'has_perfect_match' ] = df [ 'marker' ] . isin ( perf_markers ) start_idxs , end_idxs , needs_revcomps , trunc , is_extended = extend_subj_match_vec ( df ) df . loc [ : , 'start_idx' ] = start_idxs df . loc [ : , 'end_idx' ] = end_idxs df . loc [ : , 'needs_revcomp' ] = needs_revcomps df . loc [ : , 'trunc' ] = trunc df . loc [ : , 'is_extended' ] = is_extended df . loc [ : , 'sseq_msa_gaps' ] = np . zeros ( df . shape [ 0 ] , dtype = np . int64 ) df . loc [ : , 'sseq_msa_p_gaps' ] = np . zeros ( df . shape [ 0 ] , dtype = np . float64 ) df . loc [ : , 'too_many_gaps' ] = trunc return df
Append informative fields to cgMLST330 BLAST results DataFrame
39,009
def alleles_to_retrieve ( df ) : contig_blastn_records = defaultdict ( list ) markers = df . marker . unique ( ) for m in markers : dfsub = df [ df . marker == m ] for i , r in dfsub . iterrows ( ) : if r . coverage < 1.0 : contig_blastn_records [ r . stitle ] . append ( r ) break return contig_blastn_records
Alleles to retrieve from genome fasta
39,010
def matches_to_marker_results ( df ) : assert isinstance ( df , pd . DataFrame ) from collections import defaultdict d = defaultdict ( list ) for idx , row in df . iterrows ( ) : marker = row [ 'marker' ] d [ marker ] . append ( row ) marker_results = { } for k , v in d . items ( ) : if len ( v ) > 1 : logging . debug ( 'Multiple potential cgMLST allele matches (n=%s) found for marker %s. Selecting match on longest contig.' , len ( v ) , k ) df_marker = pd . DataFrame ( v ) df_marker . sort_values ( 'slen' , ascending = False , inplace = True ) for i , r in df_marker . iterrows ( ) : allele = r [ 'allele_name' ] slen = r [ 'slen' ] logging . debug ( 'Selecting allele %s from contig with length %s' , allele , slen ) seq = r [ 'sseq' ] if '-' in seq : logging . warning ( 'Gaps found in allele. Removing gaps. %s' , r ) seq = seq . replace ( '-' , '' ) . upper ( ) allele = allele_name ( seq ) marker_results [ k ] = allele_result_dict ( allele , seq , r . to_dict ( ) ) break elif len ( v ) == 1 : row = v [ 0 ] seq = row [ 'sseq' ] if '-' in seq : logging . warning ( 'Gaps found in allele. Removing gaps. %s' , row ) seq = seq . replace ( '-' , '' ) . upper ( ) allele = allele_name ( seq ) marker_results [ k ] = allele_result_dict ( allele , seq , row . to_dict ( ) ) else : err_msg = 'Empty list of matches for marker {}' . format ( k ) logging . error ( err_msg ) raise Exception ( err_msg ) return marker_results
Perfect BLAST matches to marker results dict
39,011
def cgmlst_subspecies_call ( df_relatives ) : closest_distance = df_relatives [ 'distance' ] . min ( ) if closest_distance > CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD : logging . warning ( 'Min cgMLST distance (%s) above subspeciation distance threshold (%s)' , closest_distance , CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD ) return None else : df_relatives = df_relatives . loc [ df_relatives . distance <= CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD , : ] df_relatives = df_relatives . sort_values ( 'distance' , ascending = True ) logging . debug ( 'df_relatives by cgmlst %s' , df_relatives . head ( ) ) genome_spp = genomes_to_subspecies ( ) subspecies_below_threshold = [ genome_spp [ member_genome ] if member_genome in genome_spp else None for member_genome in df_relatives . index ] subspecies_below_threshold = filter ( None , subspecies_below_threshold ) subspecies_counter = Counter ( subspecies_below_threshold ) logging . debug ( 'Subspecies counter: %s' , subspecies_counter ) return ( subspecies_counter . most_common ( 1 ) [ 0 ] [ 0 ] , closest_distance , dict ( subspecies_counter ) )
Call Salmonella subspecies based on cgMLST results
39,012
def genome_name_from_fasta_path ( fasta_path ) : filename = os . path . basename ( fasta_path ) return re . sub ( r'(\.fa$)|(\.fas$)|(\.fasta$)|(\.fna$)|(\.\w{1,}$)' , '' , filename )
Extract genome name from fasta filename
39,013
def df_first_row_to_dict ( df ) : if df is not None : return [ dict ( r ) for i , r in df . head ( 1 ) . iterrows ( ) ] [ 0 ]
First DataFrame row to list of dict
39,014
def sketch_fasta ( fasta_path , outdir ) : genome_name = genome_name_from_fasta_path ( fasta_path ) outpath = os . path . join ( outdir , genome_name ) args = [ 'mash' , 'sketch' , '-o' , outpath , fasta_path ] logging . info ( 'Running Mash sketch with command: %s' , ' ' . join ( args ) ) p = Popen ( args ) p . wait ( ) sketch_path = outpath + '.msh' assert os . path . exists ( sketch_path ) , 'Mash sketch for genome {} was not created at {}' . format ( genome_name , sketch_path ) return sketch_path
Create a Mash sketch from an input fasta file
39,015
def merge_sketches ( outdir , sketch_paths ) : merge_sketch_path = os . path . join ( outdir , 'sistr.msh' ) args = [ 'mash' , 'paste' , merge_sketch_path ] for x in sketch_paths : args . append ( x ) args . append ( MASH_SKETCH_FILE ) logging . info ( 'Running Mash paste with command: %s' , ' ' . join ( args ) ) p = Popen ( args ) p . wait ( ) assert os . path . exists ( merge_sketch_path ) , 'Merged sketch was not created at {}' . format ( merge_sketch_path ) return merge_sketch_path
Merge new Mash sketches with current Mash sketches
39,016
def get_file ( self , key , file ) : self . _check_valid_key ( key ) if isinstance ( file , str ) : return self . _get_filename ( key , file ) else : return self . _get_file ( key , file )
Write contents of key to file
39,017
def put_file ( self , key , file ) : if isinstance ( file , str ) : return self . _put_filename ( key , file ) else : return self . _put_file ( key , file )
Store into key from file on disk
39,018
def copy ( self , source , dest ) : self . _check_valid_key ( source ) self . _check_valid_key ( dest ) return self . _copy ( source , dest )
Copies a key . The destination is overwritten if does exist .
39,019
def compare_config ( self ) : if self . config_session is None : return '' else : commands = [ 'show session-config named %s diffs' % self . config_session ] result = self . device . run_commands ( commands , encoding = 'text' ) [ 0 ] [ 'output' ] result = '\n' . join ( result . splitlines ( ) [ 2 : ] ) return result . strip ( )
Implementation of NAPALM method compare_config .
39,020
def discard_config ( self ) : if self . config_session is not None : commands = [ ] commands . append ( 'configure session {}' . format ( self . config_session ) ) commands . append ( 'abort' ) self . device . run_commands ( commands ) self . config_session = None
Implementation of NAPALM method discard_config .
39,021
def rollback ( self ) : commands = [ ] commands . append ( 'configure replace flash:rollback-0' ) commands . append ( 'write memory' ) self . device . run_commands ( commands )
Implementation of NAPALM method rollback .
39,022
def get_facts ( self ) : commands = [ ] commands . append ( 'show version' ) commands . append ( 'show hostname' ) commands . append ( 'show interfaces' ) result = self . device . run_commands ( commands ) version = result [ 0 ] hostname = result [ 1 ] interfaces_dict = result [ 2 ] [ 'interfaces' ] uptime = time . time ( ) - version [ 'bootupTimestamp' ] interfaces = [ i for i in interfaces_dict . keys ( ) if '.' not in i ] interfaces = string_parsers . sorted_nicely ( interfaces ) return { 'hostname' : hostname [ 'hostname' ] , 'fqdn' : hostname [ 'fqdn' ] , 'vendor' : u'Arista' , 'model' : version [ 'modelName' ] , 'serial_number' : version [ 'serialNumber' ] , 'os_version' : version [ 'internalVersion' ] , 'uptime' : int ( uptime ) , 'interface_list' : interfaces , }
Implementation of NAPALM method get_facts .
39,023
def get_config ( self , retrieve = "all" ) : get_startup = retrieve == "all" or retrieve == "startup" get_running = retrieve == "all" or retrieve == "running" get_candidate = ( retrieve == "all" or retrieve == "candidate" ) and self . config_session if retrieve == "all" : commands = [ 'show startup-config' , 'show running-config' ] if self . config_session : commands . append ( 'show session-config named {}' . format ( self . config_session ) ) output = self . device . run_commands ( commands , encoding = "text" ) return { 'startup' : py23_compat . text_type ( output [ 0 ] [ 'output' ] ) if get_startup else u"" , 'running' : py23_compat . text_type ( output [ 1 ] [ 'output' ] ) if get_running else u"" , 'candidate' : py23_compat . text_type ( output [ 2 ] [ 'output' ] ) if get_candidate else u"" , } elif get_startup or get_running : commands = [ 'show {}-config' . format ( retrieve ) ] output = self . device . run_commands ( commands , encoding = "text" ) return { 'startup' : py23_compat . text_type ( output [ 0 ] [ 'output' ] ) if get_startup else u"" , 'running' : py23_compat . text_type ( output [ 0 ] [ 'output' ] ) if get_running else u"" , 'candidate' : "" , } elif get_candidate : commands = [ 'show session-config named {}' . format ( self . config_session ) ] output = self . device . run_commands ( commands , encoding = "text" ) return { 'startup' : "" , 'running' : "" , 'candidate' : py23_compat . text_type ( output [ 0 ] [ 'output' ] ) , } elif retrieve == "candidate" : return { 'startup' : "" , 'running' : "" , 'candidate' : "" , } else : raise Exception ( "Wrong retrieve filter: {}" . format ( retrieve ) )
get_config implementation for EOS .
39,024
def map_boto_exceptions ( key = None , exc_pass = ( ) ) : from boto . exception import BotoClientError , BotoServerError , StorageResponseError try : yield except StorageResponseError as e : if e . code == 'NoSuchKey' : raise KeyError ( key ) raise IOError ( str ( e ) ) except ( BotoClientError , BotoServerError ) as e : if e . __class__ . __name__ not in exc_pass : raise IOError ( str ( e ) )
Map boto - specific exceptions to the simplekv - API .
39,025
def _file_md5 ( file_ ) : md5 = hashlib . md5 ( ) chunk_size = 128 * md5 . block_size for chunk in iter ( lambda : file_ . read ( chunk_size ) , b'' ) : md5 . update ( chunk ) file_ . seek ( 0 ) byte_digest = md5 . digest ( ) return base64 . b64encode ( byte_digest ) . decode ( )
Compute the md5 digest of a file in base64 encoding .
39,026
def _byte_buffer_md5 ( buffer_ ) : md5 = hashlib . md5 ( buffer_ ) byte_digest = md5 . digest ( ) return base64 . b64encode ( byte_digest ) . decode ( )
Computes the md5 digest of a byte buffer in base64 encoding .
39,027
def map_azure_exceptions ( key = None , exc_pass = ( ) ) : from azure . common import AzureMissingResourceHttpError , AzureHttpError , AzureException try : yield except AzureMissingResourceHttpError as ex : if ex . __class__ . __name__ not in exc_pass : s = str ( ex ) if s . startswith ( u"The specified container does not exist." ) : raise IOError ( s ) raise KeyError ( key ) except AzureHttpError as ex : if ex . __class__ . __name__ not in exc_pass : raise IOError ( str ( ex ) ) except AzureException as ex : if ex . __class__ . __name__ not in exc_pass : raise IOError ( str ( ex ) )
Map Azure - specific exceptions to the simplekv - API .
39,028
def read ( self , size = - 1 ) : if self . closed : raise ValueError ( "I/O operation on closed file" ) with map_azure_exceptions ( key = self . key ) : if size < 0 : size = self . size - self . pos end = min ( self . pos + size - 1 , self . size - 1 ) if self . pos > end : return b'' b = self . block_blob_service . get_blob_to_bytes ( container_name = self . container_name , blob_name = self . key , start_range = self . pos , end_range = end , max_connections = self . max_connections , ) self . pos += len ( b . content ) return b . content
Returns size amount of bytes or less if there is no more data . If no size is given all data is returned . size can be > = 0 .
39,029
def seek ( self , offset , whence = 0 ) : if self . closed : raise ValueError ( "I/O operation on closed file" ) if whence == 0 : if offset < 0 : raise IOError ( 'seek would move position outside the file' ) self . pos = offset elif whence == 1 : if self . pos + offset < 0 : raise IOError ( 'seek would move position outside the file' ) self . pos += offset elif whence == 2 : if self . size + offset < 0 : raise IOError ( 'seek would move position outside the file' ) self . pos = self . size + offset return self . pos
Move to a new offset either relative or absolute . whence = 0 is absolute whence = 1 is relative whence = 2 is relative to the end .
39,030
def _on_tree ( repo , tree , components , obj ) : if len ( components ) == 1 : if isinstance ( obj , Blob ) : mode = 0o100644 elif isinstance ( obj , Tree ) : mode = 0o040000 elif obj is None : mode = None else : raise TypeError ( 'Can only mount Blobs or Trees' ) name = components [ 0 ] if mode is not None : tree [ name ] = mode , obj . id return [ tree ] if name in tree : del tree [ name ] return [ tree ] elif len ( components ) > 1 : a , bc = components [ 0 ] , components [ 1 : ] if a in tree : a_tree = repo [ tree [ a ] [ 1 ] ] if not isinstance ( a_tree , Tree ) : a_tree = Tree ( ) else : a_tree = Tree ( ) res = _on_tree ( repo , a_tree , bc , obj ) a_tree_new = res [ - 1 ] if a_tree_new . items ( ) : tree [ a ] = 0o040000 , a_tree_new . id return res + [ tree ] if a in tree : del tree [ a ] return [ tree ] else : raise ValueError ( 'Components can\'t be empty.' )
Mounts an object on a tree using the given path components .
39,031
def _connect ( self ) : resource = None try : resource = boto3 . resource ( 'ec2' , aws_access_key_id = self . access_key_id , aws_secret_access_key = self . secret_access_key , region_name = self . region ) resource . meta . client . describe_account_attributes ( ) except Exception : raise EC2CloudException ( 'Could not connect to region: %s' % self . region ) return resource
Connect to ec2 resource .
39,032
def _get_user_data ( self ) : key = ipa_utils . generate_public_ssh_key ( self . ssh_private_key_file ) . decode ( ) script = BASH_SSH_SCRIPT . format ( user = self . ssh_user , key = key ) return script
Return formatted bash script string .
39,033
def _set_instance_ip ( self ) : instance = self . _get_instance ( ) try : ipv6 = instance . network_interfaces [ 0 ] . ipv6_addresses [ 0 ] except ( IndexError , TypeError ) : ipv6 = None self . instance_ip = instance . public_ip_address or ipv6 or instance . private_ip_address if not self . instance_ip : raise EC2CloudException ( 'IP address for instance cannot be found.' )
Retrieve instance ip and cache it .
39,034
def _process_worker ( call_queue , result_queue ) : signal . signal ( signal . SIGINT , signal . SIG_IGN ) return _process_worker_base ( call_queue , result_queue )
This worker is wrapped to block KeyboardInterrupt
39,035
def _set_init_system ( self , client ) : if not self . init_system : try : out = ipa_utils . execute_ssh_command ( client , 'ps -p 1 -o comm=' ) except Exception as e : raise IpaDistroException ( 'An error occurred while retrieving' ' the distro init system: %s' % e ) if out : self . init_system = out . strip ( )
Determine the init system of distribution .
39,036
def get_vm_info ( self , client ) : out = '' self . _set_init_system ( client ) if self . init_system == 'systemd' : try : out += 'systemd-analyze:\n\n' out += ipa_utils . execute_ssh_command ( client , 'systemd-analyze' ) out += 'systemd-analyze blame:\n\n' out += ipa_utils . execute_ssh_command ( client , 'systemd-analyze blame' ) out += 'journalctl -b:\n\n' out += ipa_utils . execute_ssh_command ( client , 'sudo journalctl -b' ) except Exception as error : out = 'Failed to collect VM info: {0}.' . format ( error ) return out
Return vm info .
39,037
def install_package ( self , client , package ) : install_cmd = "{sudo} '{install} {package}'" . format ( sudo = self . get_sudo_exec_wrapper ( ) , install = self . get_install_cmd ( ) , package = package ) try : out = ipa_utils . execute_ssh_command ( client , install_cmd ) except Exception as error : raise IpaDistroException ( 'An error occurred installing package {package} ' 'on instance: {error}' . format ( package = package , error = error ) ) else : return out
Install package on instance .
39,038
def reboot ( self , client ) : self . _set_init_system ( client ) reboot_cmd = "{sudo} '{stop_ssh};{reboot}'" . format ( sudo = self . get_sudo_exec_wrapper ( ) , stop_ssh = self . get_stop_ssh_service_cmd ( ) , reboot = self . get_reboot_cmd ( ) ) try : ipa_utils . execute_ssh_command ( client , reboot_cmd ) except Exception as error : raise IpaDistroException ( 'An error occurred rebooting instance: %s' % error ) ipa_utils . clear_cache ( )
Execute reboot command on instance .
39,039
def update ( self , client ) : update_cmd = "{sudo} '{refresh};{update}'" . format ( sudo = self . get_sudo_exec_wrapper ( ) , refresh = self . get_refresh_repo_cmd ( ) , update = self . get_update_cmd ( ) ) out = '' try : out = ipa_utils . execute_ssh_command ( client , update_cmd ) except Exception as error : raise IpaDistroException ( 'An error occurred updating instance: %s' % error ) return out
Execute update command on instance .
39,040
def ordered ( start , edges , predicate = None , inverse = False ) : s , o = 'sub' , 'obj' if inverse : s , o = o , s for edge in edges : if predicate is not None and edge [ 'pred' ] != predicate : print ( 'scoop!' ) continue if edge [ s ] == start : yield edge yield from Graph . ordered ( edge [ o ] , edges , predicate = predicate )
Depth first edges from a SciGraph response .
39,041
def tcsort ( item ) : return len ( item [ 1 ] ) + sum ( tcsort ( kv ) for kv in item [ 1 ] . items ( ) )
get len of transitive closure assume type items is tree ...
39,042
def get_node ( start , tree , pnames ) : def get_first_branch ( node ) : if node not in pnames : return [ ] if pnames [ node ] : fp = pnames [ node ] [ 0 ] if cycle_check ( node , fp , pnames ) : fp = pnames [ node ] [ 1 ] print ( fp ) return [ fp ] + get_first_branch ( fp ) else : return [ ] branch = get_first_branch ( start ) for n in branch [ : : - 1 ] : tree = tree [ n ] assert start in tree , "our start wasnt in the tree! OH NO!" branch = [ start ] + branch print ( 'branch' , branch ) return tree , branch
for each parent find a single branch to root
39,043
def dematerialize ( parent_name , parent_node ) : lleaves = { } children = parent_node [ parent_name ] if not children : lleaves [ parent_name ] = None return lleaves children_ord = reversed ( sorted ( sorted ( ( ( k , v ) for k , v in children . items ( ) ) , key = alphasortkey ) , key = tcsort ) ) for child_name , _ in children_ord : new_lleaves = dematerialize ( child_name , children ) if child_name == 'magnetic resonance imaging' : pass if child_name in new_lleaves or all ( l in lleaves for l in new_lleaves ) : if child_name in lleaves : children . pop ( child_name ) lleaves [ child_name ] = parent_name lleaves . update ( new_lleaves ) return lleaves
Remove nodes higher in the tree that occur further down the SAME branch . If they occur down OTHER branchs leave them alone .
39,044
def get_terms ( self ) : if not self . terms . empty : return self . terms if self . from_backup : self . terms = open_pickle ( TERMS_BACKUP_PATH ) return self . terms engine = create_engine ( self . db_url ) data = self . terms = pd . read_sql ( data , engine ) create_pickle ( self . terms , TERMS_BACKUP_PATH ) return self . terms
GROUP BY is a shortcut to only getting the first in every list of group
39,045
def superclasses_bug_fix ( data ) : for i , value in enumerate ( data [ 'superclasses' ] ) : data [ 'superclasses' ] [ i ] [ 'superclass_tid' ] = data [ 'superclasses' ] [ i ] . pop ( 'id' ) return data
PHP returns id in superclass but only accepts superclass_tid
39,046
def log_info ( self , data ) : info = 'label={label}, id={id}, ilx={ilx}, superclass_tid={super_id}' info_filled = info . format ( label = data [ 'label' ] , id = data [ 'id' ] , ilx = data [ 'ilx' ] , super_id = data [ 'superclasses' ] [ 0 ] [ 'id' ] ) logging . info ( info_filled ) return info_filled
Logs successful responses
39,047
def process_request ( self , req ) : req . raise_for_status ( ) try : output = req . json ( ) except : exit ( req . text ) try : error = output [ 'data' ] . get ( 'errormsg' ) except : error = output . get ( 'errormsg' ) finally : if error : exit ( error ) return output
Checks to see if data returned from database is useable
39,048
def is_equal ( self , string1 , string2 ) : return string1 . lower ( ) . strip ( ) == string2 . lower ( ) . strip ( )
Simple string comparator
39,049
def are_ilx ( self , ilx_ids ) : total_data = [ ] for ilx_id in ilx_ids : ilx_id = ilx_id . replace ( 'http' , '' ) . replace ( '.' , '' ) . replace ( '/' , '' ) data , success = self . get_data_from_ilx ( ilx_id ) if success : total_data . append ( data [ 'data' ] ) else : total_data . append ( { } ) return total_data
Checks list of objects to see if they are usable ILX IDs
39,050
def add_triple ( self , subj , pred , obj ) : subj_data , pred_data , obj_data = self . are_ilx ( [ subj , pred , obj ] ) if subj_data . get ( 'id' ) and pred_data . get ( 'id' ) and obj_data . get ( 'id' ) : if pred_data [ 'type' ] != 'relationship' : return self . test_check ( 'Adding a relationship as formate \ "term1_ilx relationship_ilx term2_ilx"' ) return self . add_relationship ( term1 = subj_data , relationship = pred_data , term2 = obj_data ) elif subj_data . get ( 'id' ) and pred_data . get ( 'id' ) : if pred_data [ 'type' ] != 'annotation' : return self . test_check ( 'Adding a relationship as formate \ "term_ilx annotation_ilx value"' ) return self . add_annotation ( entity = subj_data , annotation = pred_data , value = obj ) elif subj_data . get ( 'id' ) : data = subj_data _pred = self . ttl2sci_map . get ( pred ) if not _pred : error = pred + " doesnt not have correct RDF format or It is not an option" return self . test_check ( error ) data = self . custom_update ( data , _pred , obj ) if data == 'failed' : return data data = superclasses_bug_fix ( data ) url_base = self . base_path + 'term/edit/{id}' url = url_base . format ( id = data [ 'id' ] ) return self . post ( url , data ) else : return self . test_check ( 'The ILX ID(s) provided do not exist' )
Adds an entity property to an existing entity
39,051
def add_relationship ( self , term1 , relationship , term2 ) : url = self . base_path + 'term/add-relationship' data = { 'term1_id' : term1 [ 'id' ] , 'relationship_tid' : relationship [ 'id' ] , 'term2_id' : term2 [ 'id' ] , 'term1_version' : term1 [ 'version' ] , 'relationship_term_version' : relationship [ 'version' ] , 'term2_version' : term2 [ 'version' ] } return self . post ( url , data )
Creates a relationship between 3 entities in database
39,052
def add_annotation ( self , entity , annotation , value ) : url = self . base_path + 'term/add-annotation' data = { 'tid' : entity [ 'id' ] , 'annotation_tid' : annotation [ 'id' ] , 'value' : value , 'term_version' : entity [ 'version' ] , 'annotation_term_version' : annotation [ 'version' ] } return self . post ( url , data )
Adds an annotation proprty to existing entity
39,053
def custom_update ( self , data , pred , obj ) : if isinstance ( data [ pred ] , str ) : data [ pred ] = str ( obj ) else : if pred == 'synonyms' : literals = [ d [ 'literal' ] for d in data [ pred ] ] if obj not in literals : data [ pred ] . append ( { 'literal' : obj } ) elif pred == 'superclasses' : ilx_ids = [ d [ 'ilx' ] for d in data [ pred ] ] if obj not in ilx_ids : _obj = obj . replace ( 'ILX:' , 'ilx_' ) super_data , success = self . get_data_from_ilx ( ilx_id = _obj ) super_data = super_data [ 'data' ] if success : data [ pred ] . append ( { 'id' : super_data [ 'id' ] , 'ilx' : _obj } ) else : return self . test_check ( 'Your superclass ILX ID ' + _obj + ' does not exist.' ) elif pred == 'existing_ids' : iris = [ d [ 'iri' ] for d in data [ pred ] ] if obj not in iris : if 'http' not in obj : return self . test_check ( 'exisiting id value must \ be a uri containing "http"' ) data [ pred ] . append ( { 'curie' : self . qname ( obj ) , 'iri' : obj , 'preferred' : '0' } ) data = self . preferred_change ( data ) else : return self . test_check ( pred + ' Has slipped through the cracks' ) return data
Updates existing entity proprty based on the predicate input
39,054
def sortProperties ( self , properties ) : for prop , objects in properties . items ( ) : objects . sort ( key = self . _globalSortKey ) return sorted ( properties , key = lambda p : self . predicate_rank [ p ] )
Take a hash from predicate uris to lists of values . Sort the lists of values . Return a sorted list of properties .
39,055
def _buildPredicateHash ( self , subject ) : properties = { } for s , p , o in self . store . triples ( ( subject , None , None ) ) : oList = properties . get ( p , [ ] ) oList . append ( o ) properties [ p ] = oList return properties
Build a hash key by predicate to a list of objects for the given subject
39,056
def isValidList ( self , l ) : try : if self . store . value ( l , RDF . first ) is None : return False except : return False while l : if l != RDF . nil : po = list ( self . store . predicate_objects ( l ) ) if ( RDF . type , RDF . List ) in po and len ( po ) == 3 : pass elif len ( po ) != 2 : return False l = self . store . value ( l , RDF . rest ) return True
Checks if l is a valid RDF list i . e . no nodes have other properties .
39,057
def _write ( self , value ) : if ' ' in value : s = inspect . stack ( ) fn = s [ 1 ] . function super ( ) . write ( '%%DEBUG {} %%' . format ( fn ) ) super ( ) . write ( value )
rename to write and import inspect to debut the callstack
39,058
def serialize ( self , * args , ** kwargs ) : if 'labels' in kwargs : self . _labels . update ( kwargs [ 'labels' ] ) super ( HtmlTurtleSerializer , self ) . serialize ( * args , ** kwargs )
Modified to allow additional labels to be passed in .
39,059
def addLNT ( LocalName , phenoId , predicate , g = None ) : if g is None : s = inspect . stack ( 0 ) checkCalledInside ( 'LocalNameManager' , s ) g = s [ 1 ] [ 0 ] . f_locals addLN ( LocalName , Phenotype ( phenoId , predicate ) , g )
Add a local name for a phenotype from a pair of identifiers
39,060
def load_existing ( self ) : from pyontutils . closed_namespaces import rdfs try : next ( iter ( self . neurons ( ) ) ) raise self . ExistingNeuronsError ( 'Existing neurons detected. Please ' 'load from file before creating neurons!' ) except StopIteration : pass def getClassType ( s ) : graph = self . load_graph Class = infixowl . Class ( s , graph = graph ) for ec in Class . equivalentClass : if isinstance ( ec . identifier , rdflib . BNode ) : bc = infixowl . CastClass ( ec , graph = graph ) if isinstance ( bc , infixowl . BooleanClass ) : for id_ in bc . _rdfList : if isinstance ( id_ , rdflib . URIRef ) : yield id_ if not graphBase . ignore_existing : ogp = Path ( graphBase . ng . filename ) if ogp . exists ( ) : from itertools import chain from rdflib import Graph self . load_graph = Graph ( ) . parse ( graphBase . ng . filename , format = 'turtle' ) graphBase . load_graph = self . load_graph _ = [ graphBase . in_graph . add ( t ) for t in graphBase . load_graph ] if len ( graphBase . python_subclasses ) == 2 : ebms = [ type ( OntId ( s ) . suffix , ( NeuronCUT , ) , dict ( owlClass = s ) ) for s in self . load_graph [ : rdfs . subClassOf : NeuronEBM . owlClass ] if not graphBase . knownClasses . append ( s ) ] else : ebms = [ ] class_types = [ ( type , s ) for s in self . load_graph [ : rdf . type : owl . Class ] for type in getClassType ( s ) if type ] sc = None for sc in chain ( graphBase . python_subclasses , ebms ) : sc . owlClass iris = [ s for type , s in class_types if type == sc . owlClass ] if iris : sc . _load_existing ( iris ) if sc is None : raise ImportError ( f'Failed to find any neurons to load in {graphBase.ng.filename}' )
advanced usage allows loading multiple sets of neurons and using a config object to keep track of the different graphs
39,061
def label_maker ( self ) : if ( not hasattr ( graphBase , '_label_maker' ) or graphBase . _label_maker . local_conventions != graphBase . local_conventions ) : graphBase . _label_maker = LabelMaker ( graphBase . local_conventions ) return graphBase . _label_maker
needed to defer loading of local conventions to avoid circular dependency issue
39,062
def _graphify ( self , * args , graph = None ) : if graph is None : graph = self . out_graph gl = self . genLabel ll = self . localLabel ol = self . origLabel graph . add ( ( self . id_ , ilxtr . genLabel , rdflib . Literal ( gl ) ) ) if ll != gl : graph . add ( ( self . id_ , ilxtr . localLabel , rdflib . Literal ( ll ) ) ) if ol and ol != gl : graph . add ( ( self . id_ , ilxtr . origLabel , rdflib . Literal ( ol ) ) ) members = [ self . expand ( self . owlClass ) ] for pe in self . pes : target = pe . _graphify ( graph = graph ) if isinstance ( pe , NegPhenotype ) : djc = infixowl . Class ( graph = graph ) djc . complementOf = target members . append ( djc ) else : members . append ( target ) intersection = infixowl . BooleanClass ( members = members , graph = graph ) ec = [ intersection ] self . Class . equivalentClass = ec return self . Class
Lift phenotypeEdges to Restrictions
39,063
def qname ( uri , warning = False ) : if warning : print ( tc . red ( 'WARNING:' ) , tc . yellow ( f'qname({uri}) is deprecated! please use OntId({uri}).curie' ) ) return __helper_graph . qname ( uri )
compute qname from defaults
39,064
def cull_prefixes ( graph , prefixes = { k : v for k , v in uPREFIXES . items ( ) if k != 'NIFTTL' } , cleanup = lambda ps , graph : None , keep = False ) : prefs = [ '' ] if keep : prefixes . update ( { p : str ( n ) for p , n in graph . namespaces ( ) } ) if '' not in prefixes : prefixes [ '' ] = null_prefix pi = { v : k for k , v in prefixes . items ( ) } asdf = { } asdf . update ( pi ) for uri in set ( ( e for t in graph for e in t ) ) : if uri . endswith ( '.owl' ) or uri . endswith ( '.ttl' ) or uri . endswith ( '$$ID$$' ) : continue for rn , rp in sorted ( asdf . items ( ) , key = lambda a : - len ( a [ 0 ] ) ) : lrn = len ( rn ) if type ( uri ) == rdflib . BNode : continue elif uri . startswith ( rn ) and '#' not in uri [ lrn : ] and '/' not in uri [ lrn : ] : prefs . append ( rp ) break ps = { p : prefixes [ p ] for p in prefs } cleanup ( ps , graph ) ng = makeGraph ( '' , prefixes = ps ) [ ng . g . add ( t ) for t in graph ] return ng
Remove unused curie prefixes and normalize to a standard set .
39,065
def displayTriples ( triples , qname = qname ) : [ print ( * ( e [ : 5 ] if isinstance ( e , rdflib . BNode ) else qname ( e ) for e in t ) , '.' ) for t in sorted ( triples ) ]
triples can also be an rdflib Graph instance
39,066
def write ( self , cull = False ) : if cull : cull_prefixes ( self ) . write ( ) else : ser = self . g . serialize ( format = 'nifttl' ) with open ( self . filename , 'wb' ) as f : f . write ( ser )
Serialize self . g and write to self . filename set cull to true to remove unwanted prefixes
39,067
def add_hierarchy ( self , parent , edge , child ) : if type ( parent ) != rdflib . URIRef : parent = self . check_thing ( parent ) if type ( edge ) != rdflib . URIRef : edge = self . check_thing ( edge ) if type ( child ) != infixowl . Class : if type ( child ) != rdflib . URIRef : child = self . check_thing ( child ) child = infixowl . Class ( child , graph = self . g ) restriction = infixowl . Restriction ( edge , graph = self . g , someValuesFrom = parent ) child . subClassOf = [ restriction ] + [ c for c in child . subClassOf ]
Helper function to simplify the addition of part_of style objectProperties to graphs . FIXME make a method of makeGraph?
39,068
def add_restriction ( self , subject , predicate , object_ ) : if type ( object_ ) != rdflib . URIRef : object_ = self . check_thing ( object_ ) if type ( predicate ) != rdflib . URIRef : predicate = self . check_thing ( predicate ) if type ( subject ) != infixowl . Class : if type ( subject ) != rdflib . URIRef : subject = self . check_thing ( subject ) subject = infixowl . Class ( subject , graph = self . g ) restriction = infixowl . Restriction ( predicate , graph = self . g , someValuesFrom = object_ ) subject . subClassOf = [ restriction ] + [ c for c in subject . subClassOf ]
Lift normal triples into restrictions using someValuesFrom .
39,069
def qname ( self , uri , generate = False ) : try : prefix , namespace , name = self . g . namespace_manager . compute_qname ( uri , generate = generate ) qname = ':' . join ( ( prefix , name ) ) return qname except ( KeyError , ValueError ) as e : return uri . toPython ( ) if isinstance ( uri , rdflib . URIRef ) else uri
Given a uri return the qname if it exists otherwise return the uri .
39,070
def archive_history_item ( item , destination , no_color ) : log_src , description = split_history_item ( item . strip ( ) ) log_dest = os . path . sep . join ( log_src . rsplit ( os . path . sep , 4 ) [ 1 : ] ) results_src = log_src . rsplit ( '.' , 1 ) [ 0 ] + '.results' results_dest = log_dest . rsplit ( '.' , 1 ) [ 0 ] + '.results' destination_path = os . path . join ( destination , log_dest ) log_dir = os . path . dirname ( destination_path ) try : if not os . path . isdir ( log_dir ) : os . makedirs ( log_dir ) shutil . copyfile ( log_src , destination_path ) shutil . copyfile ( results_src , os . path . join ( destination , results_dest ) ) except Exception as error : echo_style ( 'Unable to archive history item: %s' % error , no_color , fg = 'red' ) sys . exit ( 1 ) else : update_history_log ( os . path . join ( destination , '.history' ) , description = description , test_log = log_dest )
Archive the log and results file for the given history item .
39,071
def echo_verbose_results ( data , no_color ) : click . echo ( ) click . echo ( '\n' . join ( '{}: {}' . format ( key , val ) for key , val in data [ 'info' ] . items ( ) ) ) click . echo ( ) for test in data [ 'tests' ] : if test [ 'outcome' ] == 'passed' : fg = 'green' elif test [ 'outcome' ] == 'skipped' : fg = 'yellow' else : fg = 'red' name = parse_test_name ( test [ 'name' ] ) echo_style ( '{} {}' . format ( name , test [ 'outcome' ] . upper ( ) ) , no_color , fg = fg )
Print list of tests and result of each test .
39,072
def get_log_file_from_item ( history ) : try : log_file , description = shlex . split ( history ) except ValueError : log_file = history . strip ( ) return log_file
Return the log file based on provided history item .
39,073
def results_history ( history_log , no_color ) : try : with open ( history_log , 'r' ) as f : lines = f . readlines ( ) except Exception as error : echo_style ( 'Unable to process results history log: %s' % error , no_color , fg = 'red' ) sys . exit ( 1 ) index = len ( lines ) for item in lines : click . echo ( '{} {}' . format ( index , item ) , nl = False ) index -= 1
Display a list of ipa test results history .
39,074
def split_history_item ( history ) : try : log_file , description = shlex . split ( history ) except ValueError : log_file = history . strip ( ) description = None return log_file , description
Return the log file and optional description for item .
39,075
def get_working_dir ( script__file__ ) : start = Path ( script__file__ ) . resolve ( ) _root = Path ( start . root ) working_dir = start while not list ( working_dir . glob ( '.git' ) ) : if working_dir == _root : return working_dir = working_dir . parent return working_dir
hardcoded sets the equivalent working directory if not in git
39,076
def sysidpath ( ignore_options = False ) : failover = Path ( '/tmp/machine-id' ) if not ignore_options : options = ( Path ( '/etc/machine-id' ) , failover , ) for option in options : if ( option . exists ( ) and os . access ( option , os . R_OK ) and option . stat ( ) . st_size > 0 ) : return option uuid = uuid4 ( ) with open ( failover , 'wt' ) as f : f . write ( uuid . hex ) return failover
get a unique identifier for the machine running this function
39,077
def gencode ( self ) : ledict = requests . get ( self . api_url ) . json ( ) ledict = self . dotopdict ( ledict ) out = self . dodict ( ledict ) self . _code = out
Run this to generate the code
39,078
def dotopdict ( self , dict_ ) : mlookup = { 'get' : 'GET' , 'post' : 'POST' } def rearrange ( path , method_dict , method ) : oid = method_dict [ 'operationId' ] self . _paths [ oid ] = path method_dict [ 'nickname' ] = oid method_dict [ 'method' ] = mlookup [ method ] paths = dict_ [ 'paths' ] for path , path_dict in paths . items ( ) : if self . path_prefix and self . path_prefix not in path : continue path_dict [ 'operations' ] = [ ] for method , method_dict in sorted ( path_dict . items ( ) ) : if method == 'operations' : continue rearrange ( path , method_dict , method ) path_dict [ 'operations' ] . append ( method_dict ) path_dict [ 'path' ] = path def setp ( v , lenp = len ( self . path_prefix ) ) : v [ 'path' ] = v [ 'path' ] [ lenp : ] return v dict_ [ 'apis' ] = [ ] for tag_dict in dict_ [ 'tags' ] : path = '/' + tag_dict [ 'name' ] d = { 'path' : path , 'description' : tag_dict [ 'description' ] , 'class_json' : { 'docstring' : tag_dict [ 'description' ] , 'resourcePath' : path , 'apis' : [ setp ( v ) for k , v in paths . items ( ) if k . startswith ( self . path_prefix + path ) ] } , } dict_ [ 'apis' ] . append ( d ) self . _swagger ( dict_ [ 'swagger' ] ) self . _info ( dict_ [ 'info' ] ) self . _definitions ( dict_ [ 'definitions' ] ) return dict_
Rewrite the 2 . 0 json to match what we feed the code for 1 . 2
39,079
def write ( self , filename , type_ = 'obo' ) : if os . path . exists ( filename ) : name , ext = filename . rsplit ( '.' , 1 ) try : prefix , num = name . rsplit ( '_' , 1 ) n = int ( num ) n += 1 filename = prefix + '_' + str ( n ) + '.' + ext except ValueError : filename = name + '_1.' + ext print ( 'file exists, renaming to %s' % filename ) self . write ( filename , type_ ) else : with open ( filename , 'wt' , encoding = 'utf-8' ) as f : if type_ == 'obo' : f . write ( str ( self ) ) elif type_ == 'ttl' : f . write ( self . __ttl__ ( ) ) else : raise TypeError ( 'No exporter for file type %s!' % type_ )
Write file will not overwrite files with the same name outputs to obo by default but can also output to ttl if passed type_ = ttl when called .
39,080
def main ( ) : from IPython import embed from time import time rdflib . plugin . register ( 'librdfxml' , rdflib . parser . Parser , 'librdflib' , 'libRdfxmlParser' ) rdflib . plugin . register ( 'libttl' , rdflib . parser . Parser , 'librdflib' , 'libTurtleParser' ) p1 = Path ( '~/git/NIF-Ontology/ttl/NIF-Molecule.ttl' ) . expanduser ( ) start = time ( ) graph = rdflib . Graph ( ) . parse ( p1 . as_posix ( ) , format = 'libttl' ) stop = time ( ) lttime = stop - start print ( 'libttl' , lttime ) start = time ( ) graph = rdflib . Graph ( ) . parse ( p1 . as_posix ( ) , format = 'turtle' ) stop = time ( ) ttltime = stop - start print ( 'ttl' , ttltime ) print ( 'diff lt - ttl' , lttime - ttltime ) p2 = Path ( '~/git/NIF-Ontology/ttl/external/uberon.owl' ) . expanduser ( ) start = time ( ) graph2 = rdflib . Graph ( ) . parse ( p2 . as_posix ( ) , format = 'librdfxml' ) stop = time ( ) lrtime = stop - start print ( 'librdfxml' , lrtime ) if True : start = time ( ) graph2 = rdflib . Graph ( ) . parse ( p2 . as_posix ( ) , format = 'xml' ) stop = time ( ) rltime = stop - start print ( 'rdfxml' , rltime ) print ( 'diff lr - rl' , lrtime - rltime ) if True : file_uri = p2 . as_uri ( ) parser = RDF . Parser ( name = 'rdfxml' ) stream = parser . parse_as_stream ( file_uri ) start = time ( ) t = tuple ( statement_to_tuple ( statement ) for statement in stream ) stop = time ( ) stime = stop - start print ( 'simple time' , stime ) embed ( )
Python 3 . 6 . 6 ibttl 2 . 605194091796875 ttl 3 . 8316309452056885 diff lt - ttl - 1 . 2264368534088135 librdfxml 31 . 267616748809814 rdfxml 58 . 25124502182007 diff lr - rl - 26 . 983628273010254 simple time 17 . 405116319656372
39,081
def make_predicate_object_combinator ( function , p , o ) : def predicate_object_combinator ( subject ) : return function ( subject , p , o ) return predicate_object_combinator
Combinator to hold predicate object pairs until a subject is supplied and then call a function that accepts a subject predicate and object .
39,082
def serialize ( self , subject , * objects_or_combinators ) : ec_s = rdflib . BNode ( ) if self . operator is not None : if subject is not None : yield subject , self . predicate , ec_s yield from oc ( ec_s ) yield from self . _list . serialize ( ec_s , self . operator , * objects_or_combinators ) else : for thing in objects_or_combinators : if isinstance ( thing , Combinator ) : object = rdflib . BNode ( ) hasType = False for t in thing ( object ) : if t [ 1 ] == rdf . type : hasType = True yield t if not hasType : yield object , rdf . type , owl . Class else : object = thing yield subject , self . predicate , object
object_combinators may also be URIRefs or Literals
39,083
def update_with ( self , ** query ) : for k , v in self . _filter_attrs ( query ) . items ( ) : setattr ( self , k , v ) return self . save ( )
secure update mass assignment protected
39,084
def _validate ( self ) : errors = { } for name , validator in self . _validators . items ( ) : value = getattr ( self , name ) try : validator ( self , value ) except ValidationError as e : errors [ name ] = str ( e ) self . _validate_errors = errors
Validate model data and save errors
39,085
def penn_to_wn ( tag ) : if tag . startswith ( 'N' ) : return 'n' if tag . startswith ( 'V' ) : return 'v' if tag . startswith ( 'J' ) : return 'a' if tag . startswith ( 'R' ) : return 'r' return None
Convert between a Penn Treebank tag to a simplified Wordnet tag
39,086
def sentence_similarity ( sentence1 , sentence2 ) : sentence1 = pos_tag ( word_tokenize ( sentence1 ) ) sentence2 = pos_tag ( word_tokenize ( sentence2 ) ) synsets1 = [ tagged_to_synset ( * tagged_word ) for tagged_word in sentence1 ] synsets2 = [ tagged_to_synset ( * tagged_word ) for tagged_word in sentence2 ] synsets1 = [ ss for ss in synsets1 if ss ] synsets2 = [ ss for ss in synsets2 if ss ] score , count = 0.0 , 0.0 for synset in synsets1 : best_score = [ synset . path_similarity ( ss ) for ss in synsets2 if synset . path_similarity ( ss ) ] if best_score : score += max ( best_score ) count += 1 if count > 0 : score /= count else : score = 0 return score
compute the sentence similarity using Wordnet
39,087
def command_line ( ) : from docopt import docopt doc = docopt ( __doc__ , version = VERSION ) args = pd . Series ( { k . replace ( '--' , '' ) : v for k , v in doc . items ( ) } ) if args . all : graph = Graph2Pandas ( args . file , _type = 'all' ) elif args . type : graph = Graph2Pandas ( args . file , _type = args . type ) else : graph = Graph2Pandas ( args . file ) graph . save ( args . output )
If you want to use the command line
39,088
def save ( self , foldername : str , path_to_folder : str = None ) -> None : self . create_pickle ( ( self . g . namespaces , ) ) self . df . to_pickle ( output )
Saves entities into multiple files within the same folder because of pickle - recursive errors that would happen if squeezed into one
39,089
def qname ( self , uri : str ) -> str : try : prefix , namespace , name = self . g . compute_qname ( uri ) qname = prefix + ':' + name return qname except : try : print ( 'prefix:' , prefix ) print ( 'namespace:' , namespace ) print ( 'name:' , name ) except : print ( 'Could not print from compute_qname' ) exit ( 'No qname for ' + uri )
Returns qname of uri in rdflib graph while also saving it
39,090
def get_sparql_dataframe ( self ) : self . result = self . g . query ( self . query ) cols = set ( ) indx = set ( ) data = { } curr_subj = None bindings = [ ] for i , binding in enumerate ( self . result . bindings ) : subj_binding = binding [ rdflib . term . Variable ( 'subj' ) ] pred_binding = binding [ rdflib . term . Variable ( 'pred' ) ] obj_binding = binding [ rdflib . term . Variable ( 'obj' ) ] subj = subj_binding pred = pred_binding obj = obj_binding if isinstance ( subj , BNode ) : continue elif isinstance ( pred , BNode ) : continue elif isinstance ( obj , BNode ) and obj : continue cols . add ( pred ) indx . add ( subj ) bindings . append ( binding ) bindings = sorted ( bindings , key = lambda k : k [ rdflib . term . Variable ( 'subj' ) ] ) df = pd . DataFrame ( columns = cols , index = indx ) for i , binding in enumerate ( bindings ) : subj_binding = binding [ rdflib . term . Variable ( 'subj' ) ] pred_binding = binding [ rdflib . term . Variable ( 'pred' ) ] obj_binding = binding [ rdflib . term . Variable ( 'obj' ) ] subj = subj_binding pred = pred_binding obj = obj_binding if isinstance ( subj , BNode ) : continue elif isinstance ( pred , BNode ) : continue elif isinstance ( obj , BNode ) and obj : continue if curr_subj == None : curr_subj = subj if not data . get ( subj ) : data [ subj ] = defaultdict ( list ) data [ subj ] [ pred ] . append ( obj ) elif curr_subj != subj : curr_subj = subj for data_subj , data_pred_objs in data . items ( ) : for data_pred , data_objs in data_pred_objs . items ( ) : if len ( data_objs ) == 1 : data_pred_objs [ data_pred ] = data_objs [ 0 ] df . loc [ data_subj ] = pd . Series ( data_pred_objs ) data = { } if not data . get ( subj ) : data [ subj ] = defaultdict ( list ) data [ subj ] [ pred ] . append ( obj ) else : if not data . get ( subj ) : data [ subj ] = defaultdict ( list ) data [ subj ] [ pred ] . append ( obj ) for data_subj , data_pred_objs in data . items ( ) : for data_pred , data_objs in data_pred_objs . items ( ) : if len ( data_objs ) == 1 : data_pred_objs [ data_pred ] = data_objs [ 0 ] df . loc [ data_subj ] = pd . Series ( data_pred_objs ) df = df . where ( ( pd . notnull ( df ) ) , None ) return df
Iterates through the sparql table and condenses it into a Pandas DataFrame
39,091
def df ( self , qname_predicates : bool = False , keep_variable_type : bool = True ) -> pd . DataFrame : local_df = self . df . copy ( ) if qname_predicates : for col in self . columns : local_df . rename ( { col : self . g . qname ( col ) } ) if not keep_variable_type : pass return local_df
Multi funcitonal DataFrame with settings
39,092
def qname ( self , iri : str ) -> str : prefix , namespace , name = self . g . compute_qname ( uri ) qname = prefix + ':' + name self . rqname [ qname ] = iri return qname
Get qualified name of uri in rdflib graph while also saving it
39,093
def find_prefix ( self , iri : Union [ URIRef , Literal , str ] ) -> Union [ None , str ] : iri = str ( iri ) max_iri_len = 0 max_prefix = None for prefix , uri in common_namespaces . items ( ) : if uri in iri and max_iri_len < len ( uri ) : max_prefix = prefix max_iri_len = len ( uri ) return max_prefix
Finds if uri is in common_namespaces
39,094
def add_annotation ( self , subj : URIRef , pred : URIRef , obj : Union [ Literal , URIRef ] , a_p : URIRef , a_o : Union [ Literal , URIRef ] , ) -> BNode : bnode : BNode = self . triple2annotation_bnode . get ( ( subj , pred , obj ) ) if not bnode : a_s : BNode = BNode ( ) self . triple2annotation_bnode [ ( subj , pred , obj ) ] : BNode = a_s self . g . add ( ( a_s , RDF . type , OWL . Axiom ) ) self . g . add ( ( a_s , OWL . annotatedSource , self . process_subj_or_pred ( subj ) ) ) self . g . add ( ( a_s , OWL . annotatedProperty , self . process_subj_or_pred ( pred ) ) ) self . g . add ( ( a_s , OWL . annotatedTarget , self . process_obj ( obj ) ) ) else : a_s : BNode = bnode self . g . add ( ( a_s , self . process_subj_or_pred ( a_p ) , self . process_obj ( a_o ) ) ) return bnode
Adds annotation to rdflib graph .
39,095
def add_triple ( self , subj : Union [ URIRef , str ] , pred : Union [ URIRef , str ] , obj : Union [ URIRef , Literal , str ] ) -> None : if obj in [ None , "" , " " ] : return _subj = self . process_subj_or_pred ( subj ) _pred = self . process_subj_or_pred ( pred ) _obj = self . process_obj ( obj ) self . g . add ( ( _subj , _pred , _obj ) )
Adds triple to rdflib Graph
39,096
def process_prefix ( self , prefix : str ) -> Union [ Namespace , None ] : if self . namespaces . get ( prefix ) : return self . namespaces [ prefix ] iri : str = common_namespaces . get ( prefix ) if iri : return self . add_namespace ( prefix , iri )
Add namespace to graph if it has a local match
39,097
def process_subj_or_pred ( self , component : Union [ URIRef , str ] ) -> URIRef : if 'http' in component : prefix = self . find_prefix ( component ) if prefix : self . process_prefix ( prefix ) return URIRef ( component ) elif ':' in component : presumed_prefix , info = component . split ( ':' , 1 ) namespace : Union [ Namespace , None ] = self . process_prefix ( presumed_prefix ) if not namespace : exit ( component + ': qname namespace does\'t exist yet.' ) return namespace [ info ] exit ( component + ': is not a valid subject or predicate' )
Adds viable uri from iri or expands viable qname to iri to be triple ready
39,098
def process_obj ( self , obj : Union [ URIRef , Literal , str ] ) -> Union [ URIRef , Literal ] : if isinstance ( obj , dict ) or isinstance ( obj , list ) : exit ( str ( obj ) + ': should be str or intended to be a URIRef or Literal.' ) if isinstance ( obj , Literal ) or isinstance ( obj , URIRef ) : prefix = self . find_prefix ( obj ) if prefix : self . process_prefix ( prefix ) return obj if len ( obj ) > 8 : if 'http' == obj [ : 4 ] and '://' in obj and ' ' not in obj : prefix = self . find_prefix ( obj ) if prefix : self . process_prefix ( prefix ) return URIRef ( obj ) if ':' in str ( obj ) : presumed_prefix , info = obj . split ( ':' , 1 ) namespace : Union [ Namespace , None ] = self . process_prefix ( presumed_prefix ) if namespace : return namespace [ info ] return Literal ( obj )
Gives component the proper node type
39,099
def remove_triple ( self , subj : URIRef , pred : URIRef , obj : Union [ URIRef , Literal ] ) -> None : self . g . remove ( ( subj , pred , obj ) )
Removes triple from rdflib Graph