repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
phac-nml/sistr_cmd | sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py | seq_int_arr | def seq_int_arr(seqs):
"""Convert list of ACGT strings to matix of 1-4 ints
Args:
seqs (list of str): nucleotide sequences with only 'ACGT' characters
Returns:
numpy.array of int: matrix of integers from 1 to 4 inclusive representing A, C, G, and T
str: nucleotide sequence string
"""
return np.array([[NT_TO_INT[c] for c in x.upper()] for x in seqs]) | python | def seq_int_arr(seqs):
"""Convert list of ACGT strings to matix of 1-4 ints
Args:
seqs (list of str): nucleotide sequences with only 'ACGT' characters
Returns:
numpy.array of int: matrix of integers from 1 to 4 inclusive representing A, C, G, and T
str: nucleotide sequence string
"""
return np.array([[NT_TO_INT[c] for c in x.upper()] for x in seqs]) | [
"def",
"seq_int_arr",
"(",
"seqs",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"[",
"NT_TO_INT",
"[",
"c",
"]",
"for",
"c",
"in",
"x",
".",
"upper",
"(",
")",
"]",
"for",
"x",
"in",
"seqs",
"]",
")"
] | Convert list of ACGT strings to matix of 1-4 ints
Args:
seqs (list of str): nucleotide sequences with only 'ACGT' characters
Returns:
numpy.array of int: matrix of integers from 1 to 4 inclusive representing A, C, G, and T
str: nucleotide sequence string | [
"Convert",
"list",
"of",
"ACGT",
"strings",
"to",
"matix",
"of",
"1",
"-",
"4",
"ints"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py#L19-L29 | train | 39,000 |
phac-nml/sistr_cmd | sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py | group_alleles_by_start_end_Xbp | def group_alleles_by_start_end_Xbp(arr, bp=28):
"""Group alleles by matching ends
Args:
arr (numpy.array): 2D int matrix of alleles
bp (int): length of ends to group by
Returns:
dict of lists: key of start + end strings to list of indices of alleles with matching ends
"""
starts = arr[:,0:bp]
ends = arr[:,-bp:]
starts_ends_idxs = defaultdict(list)
l, seq_len = arr.shape
for i in range(l):
start_i = starts[i]
end_i = ends[i]
start_i_str = ''.join([str(x) for x in start_i])
end_i_str = ''.join([str(x) for x in end_i])
starts_ends_idxs[start_i_str + end_i_str].append(i)
return starts_ends_idxs | python | def group_alleles_by_start_end_Xbp(arr, bp=28):
"""Group alleles by matching ends
Args:
arr (numpy.array): 2D int matrix of alleles
bp (int): length of ends to group by
Returns:
dict of lists: key of start + end strings to list of indices of alleles with matching ends
"""
starts = arr[:,0:bp]
ends = arr[:,-bp:]
starts_ends_idxs = defaultdict(list)
l, seq_len = arr.shape
for i in range(l):
start_i = starts[i]
end_i = ends[i]
start_i_str = ''.join([str(x) for x in start_i])
end_i_str = ''.join([str(x) for x in end_i])
starts_ends_idxs[start_i_str + end_i_str].append(i)
return starts_ends_idxs | [
"def",
"group_alleles_by_start_end_Xbp",
"(",
"arr",
",",
"bp",
"=",
"28",
")",
":",
"starts",
"=",
"arr",
"[",
":",
",",
"0",
":",
"bp",
"]",
"ends",
"=",
"arr",
"[",
":",
",",
"-",
"bp",
":",
"]",
"starts_ends_idxs",
"=",
"defaultdict",
"(",
"lis... | Group alleles by matching ends
Args:
arr (numpy.array): 2D int matrix of alleles
bp (int): length of ends to group by
Returns:
dict of lists: key of start + end strings to list of indices of alleles with matching ends | [
"Group",
"alleles",
"by",
"matching",
"ends"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py#L32-L52 | train | 39,001 |
phac-nml/sistr_cmd | sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py | allele_clusters | def allele_clusters(dists, t=0.025):
"""Flat clusters from distance matrix
Args:
dists (numpy.array): pdist distance matrix
t (float): fcluster (tree cutting) distance threshold
Returns:
dict of lists: cluster number to list of indices of distances in cluster
"""
clusters = fcluster(linkage(dists), 0.025, criterion='distance')
cluster_idx = defaultdict(list)
for idx, cl in enumerate(clusters):
cluster_idx[cl].append(idx)
return cluster_idx | python | def allele_clusters(dists, t=0.025):
"""Flat clusters from distance matrix
Args:
dists (numpy.array): pdist distance matrix
t (float): fcluster (tree cutting) distance threshold
Returns:
dict of lists: cluster number to list of indices of distances in cluster
"""
clusters = fcluster(linkage(dists), 0.025, criterion='distance')
cluster_idx = defaultdict(list)
for idx, cl in enumerate(clusters):
cluster_idx[cl].append(idx)
return cluster_idx | [
"def",
"allele_clusters",
"(",
"dists",
",",
"t",
"=",
"0.025",
")",
":",
"clusters",
"=",
"fcluster",
"(",
"linkage",
"(",
"dists",
")",
",",
"0.025",
",",
"criterion",
"=",
"'distance'",
")",
"cluster_idx",
"=",
"defaultdict",
"(",
"list",
")",
"for",
... | Flat clusters from distance matrix
Args:
dists (numpy.array): pdist distance matrix
t (float): fcluster (tree cutting) distance threshold
Returns:
dict of lists: cluster number to list of indices of distances in cluster | [
"Flat",
"clusters",
"from",
"distance",
"matrix"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py#L55-L69 | train | 39,002 |
phac-nml/sistr_cmd | sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py | min_row_dist_sum_idx | def min_row_dist_sum_idx(dists):
"""Find the index of the row with the minimum row distance sum
This should return the index of the row index with the least distance overall
to all other rows.
Args:
dists (np.array): must be square distance matrix
Returns:
int: index of row with min dist row sum
"""
row_sums = np.apply_along_axis(arr=dists, axis=0, func1d=np.sum)
return row_sums.argmin() | python | def min_row_dist_sum_idx(dists):
"""Find the index of the row with the minimum row distance sum
This should return the index of the row index with the least distance overall
to all other rows.
Args:
dists (np.array): must be square distance matrix
Returns:
int: index of row with min dist row sum
"""
row_sums = np.apply_along_axis(arr=dists, axis=0, func1d=np.sum)
return row_sums.argmin() | [
"def",
"min_row_dist_sum_idx",
"(",
"dists",
")",
":",
"row_sums",
"=",
"np",
".",
"apply_along_axis",
"(",
"arr",
"=",
"dists",
",",
"axis",
"=",
"0",
",",
"func1d",
"=",
"np",
".",
"sum",
")",
"return",
"row_sums",
".",
"argmin",
"(",
")"
] | Find the index of the row with the minimum row distance sum
This should return the index of the row index with the least distance overall
to all other rows.
Args:
dists (np.array): must be square distance matrix
Returns:
int: index of row with min dist row sum | [
"Find",
"the",
"index",
"of",
"the",
"row",
"with",
"the",
"minimum",
"row",
"distance",
"sum"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py#L84-L97 | train | 39,003 |
phac-nml/sistr_cmd | sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py | find_centroid_alleles | def find_centroid_alleles(alleles, bp=28, t=0.025):
"""Reduce list of alleles to set of centroid alleles based on size grouping, ends matching and hierarchical clustering
Workflow for finding centroid alleles:
- grouping by size (e.g. 100bp, 101bp, 103bp, etc)
- then grouped by `bp` nucleotides at ends matching
- size and ends grouped alleles hierarchically clustered (Hamming distance, complete linkage)
- tree cutting at threshold `t`
- select allele with minimum distance to other alleles in cluster as centroid
Args:
alleles (iterable): collection of allele nucleotide sequences
bp (int): number of bp matching at allele ends for size grouping (default=28 due to default blastn megablast word size)
t (float): cluster generation (tree cutting) distance threshold for size grouped alleles
Returns:
set of str: centroid alleles
"""
centroid_alleles = set()
len_allele = group_alleles_by_size(alleles)
for length, seqs in len_allele.items():
# if only one alelle of a particular size, add as centroid, move onto next size group
if len(seqs) == 1:
centroid_alleles.add(seqs[0])
continue
# convert allele nucleotide sequences to integer matrix
seq_arr = seq_int_arr(seqs)
# group alleles by matching ends
starts_ends_idxs = group_alleles_by_start_end_Xbp(seq_arr, bp=bp)
for k, idxs in starts_ends_idxs.items():
# if only one allele for a particular matching ends group, then add as centroid and move onto next ends group
if len(idxs) == 1:
centroid_alleles.add(seqs[idxs[0]])
continue
# fetch subset of int allele sequences for a matching ends group
seq_arr_subset = seq_arr[idxs]
# Hamming distances between alleles
dists = pdist(seq_arr_subset, 'hamming')
# create flat clusters (tree cut) at t threshold
cl = allele_clusters(dists, t=t)
# for each allele cluster
dm_sq = squareform(dists)
for cl_key, cl_idxs in cl.items():
# if only 1 or 2 alleles in cluster then return first
if len(cl_idxs) == 1 or len(cl_idxs) == 2:
# get first cluster index and get nt seq for that index
centroid_alleles.add(seq_int_arr_to_nt(seq_arr_subset[cl_idxs[0]]))
continue
# else find allele with min distances to all other alleles in cluster
dm_sub = dm_subset(dm_sq, cl_idxs)
min_idx = min_row_dist_sum_idx(dm_sub)
# add nucleotide seq for cluster centroid allele to centroids set
centroid_alleles.add(seq_int_arr_to_nt(seq_arr_subset[min_idx]))
#end for cl_key, cl_idxs in cl.iteritems():
#end for k, idxs in starts_ends_idxs.iteritems():
#end for length, seqs in alleles.iteritems():
return centroid_alleles | python | def find_centroid_alleles(alleles, bp=28, t=0.025):
"""Reduce list of alleles to set of centroid alleles based on size grouping, ends matching and hierarchical clustering
Workflow for finding centroid alleles:
- grouping by size (e.g. 100bp, 101bp, 103bp, etc)
- then grouped by `bp` nucleotides at ends matching
- size and ends grouped alleles hierarchically clustered (Hamming distance, complete linkage)
- tree cutting at threshold `t`
- select allele with minimum distance to other alleles in cluster as centroid
Args:
alleles (iterable): collection of allele nucleotide sequences
bp (int): number of bp matching at allele ends for size grouping (default=28 due to default blastn megablast word size)
t (float): cluster generation (tree cutting) distance threshold for size grouped alleles
Returns:
set of str: centroid alleles
"""
centroid_alleles = set()
len_allele = group_alleles_by_size(alleles)
for length, seqs in len_allele.items():
# if only one alelle of a particular size, add as centroid, move onto next size group
if len(seqs) == 1:
centroid_alleles.add(seqs[0])
continue
# convert allele nucleotide sequences to integer matrix
seq_arr = seq_int_arr(seqs)
# group alleles by matching ends
starts_ends_idxs = group_alleles_by_start_end_Xbp(seq_arr, bp=bp)
for k, idxs in starts_ends_idxs.items():
# if only one allele for a particular matching ends group, then add as centroid and move onto next ends group
if len(idxs) == 1:
centroid_alleles.add(seqs[idxs[0]])
continue
# fetch subset of int allele sequences for a matching ends group
seq_arr_subset = seq_arr[idxs]
# Hamming distances between alleles
dists = pdist(seq_arr_subset, 'hamming')
# create flat clusters (tree cut) at t threshold
cl = allele_clusters(dists, t=t)
# for each allele cluster
dm_sq = squareform(dists)
for cl_key, cl_idxs in cl.items():
# if only 1 or 2 alleles in cluster then return first
if len(cl_idxs) == 1 or len(cl_idxs) == 2:
# get first cluster index and get nt seq for that index
centroid_alleles.add(seq_int_arr_to_nt(seq_arr_subset[cl_idxs[0]]))
continue
# else find allele with min distances to all other alleles in cluster
dm_sub = dm_subset(dm_sq, cl_idxs)
min_idx = min_row_dist_sum_idx(dm_sub)
# add nucleotide seq for cluster centroid allele to centroids set
centroid_alleles.add(seq_int_arr_to_nt(seq_arr_subset[min_idx]))
#end for cl_key, cl_idxs in cl.iteritems():
#end for k, idxs in starts_ends_idxs.iteritems():
#end for length, seqs in alleles.iteritems():
return centroid_alleles | [
"def",
"find_centroid_alleles",
"(",
"alleles",
",",
"bp",
"=",
"28",
",",
"t",
"=",
"0.025",
")",
":",
"centroid_alleles",
"=",
"set",
"(",
")",
"len_allele",
"=",
"group_alleles_by_size",
"(",
"alleles",
")",
"for",
"length",
",",
"seqs",
"in",
"len_alle... | Reduce list of alleles to set of centroid alleles based on size grouping, ends matching and hierarchical clustering
Workflow for finding centroid alleles:
- grouping by size (e.g. 100bp, 101bp, 103bp, etc)
- then grouped by `bp` nucleotides at ends matching
- size and ends grouped alleles hierarchically clustered (Hamming distance, complete linkage)
- tree cutting at threshold `t`
- select allele with minimum distance to other alleles in cluster as centroid
Args:
alleles (iterable): collection of allele nucleotide sequences
bp (int): number of bp matching at allele ends for size grouping (default=28 due to default blastn megablast word size)
t (float): cluster generation (tree cutting) distance threshold for size grouped alleles
Returns:
set of str: centroid alleles | [
"Reduce",
"list",
"of",
"alleles",
"to",
"set",
"of",
"centroid",
"alleles",
"based",
"on",
"size",
"grouping",
"ends",
"matching",
"and",
"hierarchical",
"clustering"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py#L112-L169 | train | 39,004 |
phac-nml/sistr_cmd | sistr/src/mash.py | mash_dist_trusted | def mash_dist_trusted(fasta_path):
"""
Compute Mash distances of sketch file of genome fasta to RefSeq sketch DB.
Args:
mash_bin (str): Mash binary path
Returns:
(str): Mash STDOUT string
"""
args = [MASH_BIN,
'dist',
MASH_SKETCH_FILE,
fasta_path]
p = Popen(args, stderr=PIPE, stdout=PIPE)
(stdout, stderr) = p.communicate()
retcode = p.returncode
if retcode != 0:
raise Exception('Could not run Mash dist {}'.format(stderr))
return stdout | python | def mash_dist_trusted(fasta_path):
"""
Compute Mash distances of sketch file of genome fasta to RefSeq sketch DB.
Args:
mash_bin (str): Mash binary path
Returns:
(str): Mash STDOUT string
"""
args = [MASH_BIN,
'dist',
MASH_SKETCH_FILE,
fasta_path]
p = Popen(args, stderr=PIPE, stdout=PIPE)
(stdout, stderr) = p.communicate()
retcode = p.returncode
if retcode != 0:
raise Exception('Could not run Mash dist {}'.format(stderr))
return stdout | [
"def",
"mash_dist_trusted",
"(",
"fasta_path",
")",
":",
"args",
"=",
"[",
"MASH_BIN",
",",
"'dist'",
",",
"MASH_SKETCH_FILE",
",",
"fasta_path",
"]",
"p",
"=",
"Popen",
"(",
"args",
",",
"stderr",
"=",
"PIPE",
",",
"stdout",
"=",
"PIPE",
")",
"(",
"st... | Compute Mash distances of sketch file of genome fasta to RefSeq sketch DB.
Args:
mash_bin (str): Mash binary path
Returns:
(str): Mash STDOUT string | [
"Compute",
"Mash",
"distances",
"of",
"sketch",
"file",
"of",
"genome",
"fasta",
"to",
"RefSeq",
"sketch",
"DB",
"."
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/mash.py#L14-L34 | train | 39,005 |
phac-nml/sistr_cmd | sistr/src/cgmlst/extras/hclust_cutree.py | nr_profiles | def nr_profiles(arr, genomes):
"""
Get a condensed cgMLST pairwise distance matrix for specified Genomes_
where condensed means redundant cgMLST profiles are only represented once in the distance matrix.
Args:
user_name (list): List of Genome_ names to retrieve condensed distance matrix for
Returns:
(numpy.array, list): tuple of condensed cgMLST distance matrix and list of grouped Genomes_
"""
gs_collapse = []
genome_idx_dict = {}
indices = []
patt_dict = {}
for i, g in enumerate(genomes):
p = arr[i, :].tostring()
if p in patt_dict:
parent = patt_dict[p]
idx = genome_idx_dict[parent]
gs_collapse[idx].append(g)
else:
indices.append(i)
patt_dict[p] = g
genome_idx_dict[g] = len(gs_collapse)
gs_collapse.append([g])
return arr[indices, :], gs_collapse | python | def nr_profiles(arr, genomes):
"""
Get a condensed cgMLST pairwise distance matrix for specified Genomes_
where condensed means redundant cgMLST profiles are only represented once in the distance matrix.
Args:
user_name (list): List of Genome_ names to retrieve condensed distance matrix for
Returns:
(numpy.array, list): tuple of condensed cgMLST distance matrix and list of grouped Genomes_
"""
gs_collapse = []
genome_idx_dict = {}
indices = []
patt_dict = {}
for i, g in enumerate(genomes):
p = arr[i, :].tostring()
if p in patt_dict:
parent = patt_dict[p]
idx = genome_idx_dict[parent]
gs_collapse[idx].append(g)
else:
indices.append(i)
patt_dict[p] = g
genome_idx_dict[g] = len(gs_collapse)
gs_collapse.append([g])
return arr[indices, :], gs_collapse | [
"def",
"nr_profiles",
"(",
"arr",
",",
"genomes",
")",
":",
"gs_collapse",
"=",
"[",
"]",
"genome_idx_dict",
"=",
"{",
"}",
"indices",
"=",
"[",
"]",
"patt_dict",
"=",
"{",
"}",
"for",
"i",
",",
"g",
"in",
"enumerate",
"(",
"genomes",
")",
":",
"p"... | Get a condensed cgMLST pairwise distance matrix for specified Genomes_
where condensed means redundant cgMLST profiles are only represented once in the distance matrix.
Args:
user_name (list): List of Genome_ names to retrieve condensed distance matrix for
Returns:
(numpy.array, list): tuple of condensed cgMLST distance matrix and list of grouped Genomes_ | [
"Get",
"a",
"condensed",
"cgMLST",
"pairwise",
"distance",
"matrix",
"for",
"specified",
"Genomes_",
"where",
"condensed",
"means",
"redundant",
"cgMLST",
"profiles",
"are",
"only",
"represented",
"once",
"in",
"the",
"distance",
"matrix",
"."
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/cgmlst/extras/hclust_cutree.py#L19-L45 | train | 39,006 |
phac-nml/sistr_cmd | sistr/src/serovar_prediction/__init__.py | overall_serovar_call | def overall_serovar_call(serovar_prediction, antigen_predictor):
"""
Predict serovar from cgMLST cluster membership analysis and antigen BLAST results.
SerovarPrediction object is assigned H1, H2 and Serogroup from the antigen BLAST results.
Antigen BLAST results will predict a particular serovar or list of serovars, however,
the cgMLST membership may be able to help narrow down the list of potential serovars.
Notes:
If the cgMLST predicted serovar is within the list of antigen BLAST predicted serovars,
then the serovar is assigned the cgMLST predicted serovar.
If all antigens are found, but an antigen serovar is not found then the serovar is assigned
a pseudo-antigenic formula (Serogroup:H1:H2), otherwise the serovar is assigned the cgMLST prediction.
If the antigen predicted serovar does not match the cgMLST predicted serovar,
- the serovar is the cgMLST serovar if the cgMLST cluster level is <= 0.1 (10% or less)
- otherwise, the serovar is antigen predicted serovar(s)
Args:
serovar_prediction (src.serovar_prediction.SerovarPrediction): Serovar prediction results (antigen+cgMLST[+Mash])
antigen_predictor (src.serovar_prediction.SerovarPredictor): Antigen search results
Returns:
src.serovar_prediction.SerovarPrediction: Serovar prediction results with overall prediction from antigen + cgMLST
"""
assert isinstance(serovar_prediction, SerovarPrediction)
assert isinstance(antigen_predictor, SerovarPredictor)
h1 = antigen_predictor.h1
h2 = antigen_predictor.h2
sg = antigen_predictor.serogroup
spp = serovar_prediction.cgmlst_subspecies
if spp is None:
if 'mash_match' in serovar_prediction.__dict__:
spp = serovar_prediction.__dict__['mash_subspecies']
serovar_prediction.serovar_antigen = antigen_predictor.serovar
cgmlst_serovar = serovar_prediction.serovar_cgmlst
cgmlst_distance = float(serovar_prediction.cgmlst_distance)
null_result = '-:-:-'
try:
spp_roman = spp_name_to_roman[spp]
except:
spp_roman = None
is_antigen_null = lambda x: (x is None or x == '' or x == '-')
if antigen_predictor.serovar is None:
if is_antigen_null(sg) and is_antigen_null(h1) and is_antigen_null(h2):
if spp_roman is not None:
serovar_prediction.serovar = '{} {}:{}:{}'.format(spp_roman, sg, h1, h2)
else:
serovar_prediction.serovar = '{}:{}:{}'.format(spp_roman, sg, h1, h2)
elif cgmlst_serovar is not None and cgmlst_distance <= CGMLST_DISTANCE_THRESHOLD:
serovar_prediction.serovar = cgmlst_serovar
else:
serovar_prediction.serovar = null_result
if 'mash_match' in serovar_prediction.__dict__:
spd = serovar_prediction.__dict__
mash_dist = float(spd['mash_distance'])
if mash_dist <= MASH_DISTANCE_THRESHOLD:
serovar_prediction.serovar = spd['mash_serovar']
else:
serovars_from_antigen = antigen_predictor.serovar.split('|')
if not isinstance(serovars_from_antigen, list):
serovars_from_antigen = [serovars_from_antigen]
if cgmlst_serovar is not None:
if cgmlst_serovar in serovars_from_antigen:
serovar_prediction.serovar = cgmlst_serovar
else:
if float(cgmlst_distance) <= CGMLST_DISTANCE_THRESHOLD:
serovar_prediction.serovar = cgmlst_serovar
elif 'mash_match' in serovar_prediction.__dict__:
spd = serovar_prediction.__dict__
mash_serovar = spd['mash_serovar']
mash_dist = float(spd['mash_distance'])
if mash_serovar in serovars_from_antigen:
serovar_prediction.serovar = mash_serovar
else:
if mash_dist <= MASH_DISTANCE_THRESHOLD:
serovar_prediction.serovar = mash_serovar
if serovar_prediction.serovar is None:
serovar_prediction.serovar = serovar_prediction.serovar_antigen
if serovar_prediction.h1 is None:
serovar_prediction.h1 = '-'
if serovar_prediction.h2 is None:
serovar_prediction.h2 = '-'
if serovar_prediction.serogroup is None:
serovar_prediction.serogroup = '-'
if serovar_prediction.serovar_antigen is None:
if spp_roman is not None:
serovar_prediction.serovar_antigen = '{} -:-:-'.format(spp_roman)
else:
serovar_prediction.serovar_antigen = '-:-:-'
if serovar_prediction.serovar is None:
serovar_prediction.serovar = serovar_prediction.serovar_antigen
return serovar_prediction | python | def overall_serovar_call(serovar_prediction, antigen_predictor):
"""
Predict serovar from cgMLST cluster membership analysis and antigen BLAST results.
SerovarPrediction object is assigned H1, H2 and Serogroup from the antigen BLAST results.
Antigen BLAST results will predict a particular serovar or list of serovars, however,
the cgMLST membership may be able to help narrow down the list of potential serovars.
Notes:
If the cgMLST predicted serovar is within the list of antigen BLAST predicted serovars,
then the serovar is assigned the cgMLST predicted serovar.
If all antigens are found, but an antigen serovar is not found then the serovar is assigned
a pseudo-antigenic formula (Serogroup:H1:H2), otherwise the serovar is assigned the cgMLST prediction.
If the antigen predicted serovar does not match the cgMLST predicted serovar,
- the serovar is the cgMLST serovar if the cgMLST cluster level is <= 0.1 (10% or less)
- otherwise, the serovar is antigen predicted serovar(s)
Args:
serovar_prediction (src.serovar_prediction.SerovarPrediction): Serovar prediction results (antigen+cgMLST[+Mash])
antigen_predictor (src.serovar_prediction.SerovarPredictor): Antigen search results
Returns:
src.serovar_prediction.SerovarPrediction: Serovar prediction results with overall prediction from antigen + cgMLST
"""
assert isinstance(serovar_prediction, SerovarPrediction)
assert isinstance(antigen_predictor, SerovarPredictor)
h1 = antigen_predictor.h1
h2 = antigen_predictor.h2
sg = antigen_predictor.serogroup
spp = serovar_prediction.cgmlst_subspecies
if spp is None:
if 'mash_match' in serovar_prediction.__dict__:
spp = serovar_prediction.__dict__['mash_subspecies']
serovar_prediction.serovar_antigen = antigen_predictor.serovar
cgmlst_serovar = serovar_prediction.serovar_cgmlst
cgmlst_distance = float(serovar_prediction.cgmlst_distance)
null_result = '-:-:-'
try:
spp_roman = spp_name_to_roman[spp]
except:
spp_roman = None
is_antigen_null = lambda x: (x is None or x == '' or x == '-')
if antigen_predictor.serovar is None:
if is_antigen_null(sg) and is_antigen_null(h1) and is_antigen_null(h2):
if spp_roman is not None:
serovar_prediction.serovar = '{} {}:{}:{}'.format(spp_roman, sg, h1, h2)
else:
serovar_prediction.serovar = '{}:{}:{}'.format(spp_roman, sg, h1, h2)
elif cgmlst_serovar is not None and cgmlst_distance <= CGMLST_DISTANCE_THRESHOLD:
serovar_prediction.serovar = cgmlst_serovar
else:
serovar_prediction.serovar = null_result
if 'mash_match' in serovar_prediction.__dict__:
spd = serovar_prediction.__dict__
mash_dist = float(spd['mash_distance'])
if mash_dist <= MASH_DISTANCE_THRESHOLD:
serovar_prediction.serovar = spd['mash_serovar']
else:
serovars_from_antigen = antigen_predictor.serovar.split('|')
if not isinstance(serovars_from_antigen, list):
serovars_from_antigen = [serovars_from_antigen]
if cgmlst_serovar is not None:
if cgmlst_serovar in serovars_from_antigen:
serovar_prediction.serovar = cgmlst_serovar
else:
if float(cgmlst_distance) <= CGMLST_DISTANCE_THRESHOLD:
serovar_prediction.serovar = cgmlst_serovar
elif 'mash_match' in serovar_prediction.__dict__:
spd = serovar_prediction.__dict__
mash_serovar = spd['mash_serovar']
mash_dist = float(spd['mash_distance'])
if mash_serovar in serovars_from_antigen:
serovar_prediction.serovar = mash_serovar
else:
if mash_dist <= MASH_DISTANCE_THRESHOLD:
serovar_prediction.serovar = mash_serovar
if serovar_prediction.serovar is None:
serovar_prediction.serovar = serovar_prediction.serovar_antigen
if serovar_prediction.h1 is None:
serovar_prediction.h1 = '-'
if serovar_prediction.h2 is None:
serovar_prediction.h2 = '-'
if serovar_prediction.serogroup is None:
serovar_prediction.serogroup = '-'
if serovar_prediction.serovar_antigen is None:
if spp_roman is not None:
serovar_prediction.serovar_antigen = '{} -:-:-'.format(spp_roman)
else:
serovar_prediction.serovar_antigen = '-:-:-'
if serovar_prediction.serovar is None:
serovar_prediction.serovar = serovar_prediction.serovar_antigen
return serovar_prediction | [
"def",
"overall_serovar_call",
"(",
"serovar_prediction",
",",
"antigen_predictor",
")",
":",
"assert",
"isinstance",
"(",
"serovar_prediction",
",",
"SerovarPrediction",
")",
"assert",
"isinstance",
"(",
"antigen_predictor",
",",
"SerovarPredictor",
")",
"h1",
"=",
"... | Predict serovar from cgMLST cluster membership analysis and antigen BLAST results.
SerovarPrediction object is assigned H1, H2 and Serogroup from the antigen BLAST results.
Antigen BLAST results will predict a particular serovar or list of serovars, however,
the cgMLST membership may be able to help narrow down the list of potential serovars.
Notes:
If the cgMLST predicted serovar is within the list of antigen BLAST predicted serovars,
then the serovar is assigned the cgMLST predicted serovar.
If all antigens are found, but an antigen serovar is not found then the serovar is assigned
a pseudo-antigenic formula (Serogroup:H1:H2), otherwise the serovar is assigned the cgMLST prediction.
If the antigen predicted serovar does not match the cgMLST predicted serovar,
- the serovar is the cgMLST serovar if the cgMLST cluster level is <= 0.1 (10% or less)
- otherwise, the serovar is antigen predicted serovar(s)
Args:
serovar_prediction (src.serovar_prediction.SerovarPrediction): Serovar prediction results (antigen+cgMLST[+Mash])
antigen_predictor (src.serovar_prediction.SerovarPredictor): Antigen search results
Returns:
src.serovar_prediction.SerovarPrediction: Serovar prediction results with overall prediction from antigen + cgMLST | [
"Predict",
"serovar",
"from",
"cgMLST",
"cluster",
"membership",
"analysis",
"and",
"antigen",
"BLAST",
"results",
".",
"SerovarPrediction",
"object",
"is",
"assigned",
"H1",
"H2",
"and",
"Serogroup",
"from",
"the",
"antigen",
"BLAST",
"results",
".",
"Antigen",
... | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/serovar_prediction/__init__.py#L434-L540 | train | 39,007 |
phac-nml/sistr_cmd | sistr/src/cgmlst/__init__.py | process_cgmlst_results | def process_cgmlst_results(df):
"""Append informative fields to cgMLST330 BLAST results DataFrame
The `qseqid` column must contain cgMLST330 query IDs with `{marker name}|{allele number}` format.
The `qseqid` parsed allele numbers and marker names are appended as new fields.
`is_perfect` column contains boolean values for whether an allele result is 100% identity and coverage.
`has_perfect_match` denotes if a cgMLST330 marker has a perfect allele match.
The top result with the largest bitscore for a marker with no perfect match is used to retrieve the allele present
at that marker locus.
Args:
df (pandas.DataFrame): DataFrame of cgMLST330 BLAST results
Returns:
pandas.DataFrame: cgMLST330 BLAST results DataFrame with extra fields (`marker`, `allele`, `is_perfect`, `has_perfect_match`)
"""
assert isinstance(df, pd.DataFrame)
markers = []
alleles = []
for x in df['qseqid']:
marker, allele = x.split('|')
markers.append(marker)
alleles.append(int(allele))
df.loc[:, 'marker'] = markers
df.loc[:, 'allele'] = alleles
df.loc[:, 'is_match'] = (df['coverage'] >= 1.0) & (df['pident'] >= 90.0) & ~(df['is_trunc'])
df.loc[:, 'allele_name'] = df.apply(lambda x: allele_name(x.sseq.replace('-', '')), axis=1)
df.loc[:, 'is_perfect'] = (df['coverage'] == 1.0) & (df['pident'] == 100.0)
df_perf = df[df['is_perfect']]
perf_markers = df_perf['marker'].unique()
df.loc[:, 'has_perfect_match'] = df['marker'].isin(perf_markers)
start_idxs, end_idxs, needs_revcomps, trunc, is_extended = extend_subj_match_vec(df)
df.loc[:, 'start_idx'] = start_idxs
df.loc[:, 'end_idx'] = end_idxs
df.loc[:, 'needs_revcomp'] = needs_revcomps
df.loc[:, 'trunc'] = trunc
df.loc[:, 'is_extended'] = is_extended
df.loc[:, 'sseq_msa_gaps'] = np.zeros(df.shape[0], dtype=np.int64)
df.loc[:, 'sseq_msa_p_gaps'] = np.zeros(df.shape[0], dtype=np.float64)
df.loc[:, 'too_many_gaps'] = trunc
return df | python | def process_cgmlst_results(df):
"""Append informative fields to cgMLST330 BLAST results DataFrame
The `qseqid` column must contain cgMLST330 query IDs with `{marker name}|{allele number}` format.
The `qseqid` parsed allele numbers and marker names are appended as new fields.
`is_perfect` column contains boolean values for whether an allele result is 100% identity and coverage.
`has_perfect_match` denotes if a cgMLST330 marker has a perfect allele match.
The top result with the largest bitscore for a marker with no perfect match is used to retrieve the allele present
at that marker locus.
Args:
df (pandas.DataFrame): DataFrame of cgMLST330 BLAST results
Returns:
pandas.DataFrame: cgMLST330 BLAST results DataFrame with extra fields (`marker`, `allele`, `is_perfect`, `has_perfect_match`)
"""
assert isinstance(df, pd.DataFrame)
markers = []
alleles = []
for x in df['qseqid']:
marker, allele = x.split('|')
markers.append(marker)
alleles.append(int(allele))
df.loc[:, 'marker'] = markers
df.loc[:, 'allele'] = alleles
df.loc[:, 'is_match'] = (df['coverage'] >= 1.0) & (df['pident'] >= 90.0) & ~(df['is_trunc'])
df.loc[:, 'allele_name'] = df.apply(lambda x: allele_name(x.sseq.replace('-', '')), axis=1)
df.loc[:, 'is_perfect'] = (df['coverage'] == 1.0) & (df['pident'] == 100.0)
df_perf = df[df['is_perfect']]
perf_markers = df_perf['marker'].unique()
df.loc[:, 'has_perfect_match'] = df['marker'].isin(perf_markers)
start_idxs, end_idxs, needs_revcomps, trunc, is_extended = extend_subj_match_vec(df)
df.loc[:, 'start_idx'] = start_idxs
df.loc[:, 'end_idx'] = end_idxs
df.loc[:, 'needs_revcomp'] = needs_revcomps
df.loc[:, 'trunc'] = trunc
df.loc[:, 'is_extended'] = is_extended
df.loc[:, 'sseq_msa_gaps'] = np.zeros(df.shape[0], dtype=np.int64)
df.loc[:, 'sseq_msa_p_gaps'] = np.zeros(df.shape[0], dtype=np.float64)
df.loc[:, 'too_many_gaps'] = trunc
return df | [
"def",
"process_cgmlst_results",
"(",
"df",
")",
":",
"assert",
"isinstance",
"(",
"df",
",",
"pd",
".",
"DataFrame",
")",
"markers",
"=",
"[",
"]",
"alleles",
"=",
"[",
"]",
"for",
"x",
"in",
"df",
"[",
"'qseqid'",
"]",
":",
"marker",
",",
"allele",... | Append informative fields to cgMLST330 BLAST results DataFrame
The `qseqid` column must contain cgMLST330 query IDs with `{marker name}|{allele number}` format.
The `qseqid` parsed allele numbers and marker names are appended as new fields.
`is_perfect` column contains boolean values for whether an allele result is 100% identity and coverage.
`has_perfect_match` denotes if a cgMLST330 marker has a perfect allele match.
The top result with the largest bitscore for a marker with no perfect match is used to retrieve the allele present
at that marker locus.
Args:
df (pandas.DataFrame): DataFrame of cgMLST330 BLAST results
Returns:
pandas.DataFrame: cgMLST330 BLAST results DataFrame with extra fields (`marker`, `allele`, `is_perfect`, `has_perfect_match`) | [
"Append",
"informative",
"fields",
"to",
"cgMLST330",
"BLAST",
"results",
"DataFrame"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/cgmlst/__init__.py#L40-L82 | train | 39,008 |
phac-nml/sistr_cmd | sistr/src/cgmlst/__init__.py | alleles_to_retrieve | def alleles_to_retrieve(df):
"""Alleles to retrieve from genome fasta
Get a dict of the genome fasta contig title to a list of blastn results of the allele sequences that must be
retrieved from the genome contig.
Args:
df (pandas.DataFrame): blastn results dataframe
Returns:
{str:[pandas.Series]}: dict of contig title (header name) to list of top blastn result records for each marker
for which the allele sequence must be retrieved from the original sequence.
"""
contig_blastn_records = defaultdict(list)
markers = df.marker.unique()
for m in markers:
dfsub = df[df.marker == m]
for i, r in dfsub.iterrows():
if r.coverage < 1.0:
contig_blastn_records[r.stitle].append(r)
break
return contig_blastn_records | python | def alleles_to_retrieve(df):
"""Alleles to retrieve from genome fasta
Get a dict of the genome fasta contig title to a list of blastn results of the allele sequences that must be
retrieved from the genome contig.
Args:
df (pandas.DataFrame): blastn results dataframe
Returns:
{str:[pandas.Series]}: dict of contig title (header name) to list of top blastn result records for each marker
for which the allele sequence must be retrieved from the original sequence.
"""
contig_blastn_records = defaultdict(list)
markers = df.marker.unique()
for m in markers:
dfsub = df[df.marker == m]
for i, r in dfsub.iterrows():
if r.coverage < 1.0:
contig_blastn_records[r.stitle].append(r)
break
return contig_blastn_records | [
"def",
"alleles_to_retrieve",
"(",
"df",
")",
":",
"contig_blastn_records",
"=",
"defaultdict",
"(",
"list",
")",
"markers",
"=",
"df",
".",
"marker",
".",
"unique",
"(",
")",
"for",
"m",
"in",
"markers",
":",
"dfsub",
"=",
"df",
"[",
"df",
".",
"marke... | Alleles to retrieve from genome fasta
Get a dict of the genome fasta contig title to a list of blastn results of the allele sequences that must be
retrieved from the genome contig.
Args:
df (pandas.DataFrame): blastn results dataframe
Returns:
{str:[pandas.Series]}: dict of contig title (header name) to list of top blastn result records for each marker
for which the allele sequence must be retrieved from the original sequence. | [
"Alleles",
"to",
"retrieve",
"from",
"genome",
"fasta"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/cgmlst/__init__.py#L85-L106 | train | 39,009 |
phac-nml/sistr_cmd | sistr/src/cgmlst/__init__.py | matches_to_marker_results | def matches_to_marker_results(df):
"""Perfect BLAST matches to marker results dict
Parse perfect BLAST matches to marker results dict.
Args:
df (pandas.DataFrame): DataFrame of perfect BLAST matches
Returns:
dict: cgMLST330 marker names to matching allele numbers
"""
assert isinstance(df, pd.DataFrame)
from collections import defaultdict
d = defaultdict(list)
for idx, row in df.iterrows():
marker = row['marker']
d[marker].append(row)
marker_results = {}
for k,v in d.items():
if len(v) > 1:
logging.debug('Multiple potential cgMLST allele matches (n=%s) found for marker %s. Selecting match on longest contig.', len(v), k)
df_marker = pd.DataFrame(v)
df_marker.sort_values('slen', ascending=False, inplace=True)
for i,r in df_marker.iterrows():
allele = r['allele_name']
slen = r['slen']
logging.debug('Selecting allele %s from contig with length %s', allele, slen)
seq = r['sseq']
if '-' in seq:
logging.warning('Gaps found in allele. Removing gaps. %s', r)
seq = seq.replace('-', '').upper()
allele = allele_name(seq)
marker_results[k] = allele_result_dict(allele, seq, r.to_dict())
break
elif len(v) == 1:
row = v[0]
seq = row['sseq']
if '-' in seq:
logging.warning('Gaps found in allele. Removing gaps. %s', row)
seq = seq.replace('-', '').upper()
allele = allele_name(seq)
marker_results[k] = allele_result_dict(allele, seq, row.to_dict())
else:
err_msg = 'Empty list of matches for marker {}'.format(k)
logging.error(err_msg)
raise Exception(err_msg)
return marker_results | python | def matches_to_marker_results(df):
"""Perfect BLAST matches to marker results dict
Parse perfect BLAST matches to marker results dict.
Args:
df (pandas.DataFrame): DataFrame of perfect BLAST matches
Returns:
dict: cgMLST330 marker names to matching allele numbers
"""
assert isinstance(df, pd.DataFrame)
from collections import defaultdict
d = defaultdict(list)
for idx, row in df.iterrows():
marker = row['marker']
d[marker].append(row)
marker_results = {}
for k,v in d.items():
if len(v) > 1:
logging.debug('Multiple potential cgMLST allele matches (n=%s) found for marker %s. Selecting match on longest contig.', len(v), k)
df_marker = pd.DataFrame(v)
df_marker.sort_values('slen', ascending=False, inplace=True)
for i,r in df_marker.iterrows():
allele = r['allele_name']
slen = r['slen']
logging.debug('Selecting allele %s from contig with length %s', allele, slen)
seq = r['sseq']
if '-' in seq:
logging.warning('Gaps found in allele. Removing gaps. %s', r)
seq = seq.replace('-', '').upper()
allele = allele_name(seq)
marker_results[k] = allele_result_dict(allele, seq, r.to_dict())
break
elif len(v) == 1:
row = v[0]
seq = row['sseq']
if '-' in seq:
logging.warning('Gaps found in allele. Removing gaps. %s', row)
seq = seq.replace('-', '').upper()
allele = allele_name(seq)
marker_results[k] = allele_result_dict(allele, seq, row.to_dict())
else:
err_msg = 'Empty list of matches for marker {}'.format(k)
logging.error(err_msg)
raise Exception(err_msg)
return marker_results | [
"def",
"matches_to_marker_results",
"(",
"df",
")",
":",
"assert",
"isinstance",
"(",
"df",
",",
"pd",
".",
"DataFrame",
")",
"from",
"collections",
"import",
"defaultdict",
"d",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"idx",
",",
"row",
"in",
"df",
... | Perfect BLAST matches to marker results dict
Parse perfect BLAST matches to marker results dict.
Args:
df (pandas.DataFrame): DataFrame of perfect BLAST matches
Returns:
dict: cgMLST330 marker names to matching allele numbers | [
"Perfect",
"BLAST",
"matches",
"to",
"marker",
"results",
"dict"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/cgmlst/__init__.py#L186-L234 | train | 39,010 |
phac-nml/sistr_cmd | sistr/src/cgmlst/__init__.py | cgmlst_subspecies_call | def cgmlst_subspecies_call(df_relatives):
"""Call Salmonella subspecies based on cgMLST results
This method attempts to find the majority subspecies type within curated
public genomes above a cgMLST allelic profile distance threshold.
Note:
``CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD`` is the cgMLST distance
threshold used to determine the subspecies by cgMLST. It is set at a
distance of 0.9 which translates to a cgMLST allelic similarity of 10%.
A threshold of 0.9 is generous and reasonable given the congruence
between subspecies designations and 10% cgMLST clusters by Adjusted
Rand (~0.850) and Adjusted Wallace metrics (~0.850 both ways).
Args:
df_relatives (pandas.DataFrame): Table of genomes related by cgMLST to input genome
Returns:
None: if no curated public genomes found to have a cgMLST profile similarity of 10% or greater
(string, float, dict): most common subspecies, closest related public genome distance, subspecies frequencies
"""
closest_distance = df_relatives['distance'].min()
if closest_distance > CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD:
logging.warning('Min cgMLST distance (%s) above subspeciation distance threshold (%s)',
closest_distance,
CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD)
return None
else:
df_relatives = df_relatives.loc[df_relatives.distance <= CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD, :]
df_relatives = df_relatives.sort_values('distance', ascending=True)
logging.debug('df_relatives by cgmlst %s', df_relatives.head())
genome_spp = genomes_to_subspecies()
subspecies_below_threshold = [genome_spp[member_genome] if member_genome in genome_spp else None for member_genome in df_relatives.index]
subspecies_below_threshold = filter(None, subspecies_below_threshold)
subspecies_counter = Counter(subspecies_below_threshold)
logging.debug('Subspecies counter: %s', subspecies_counter)
return (subspecies_counter.most_common(1)[0][0], closest_distance, dict(subspecies_counter)) | python | def cgmlst_subspecies_call(df_relatives):
"""Call Salmonella subspecies based on cgMLST results
This method attempts to find the majority subspecies type within curated
public genomes above a cgMLST allelic profile distance threshold.
Note:
``CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD`` is the cgMLST distance
threshold used to determine the subspecies by cgMLST. It is set at a
distance of 0.9 which translates to a cgMLST allelic similarity of 10%.
A threshold of 0.9 is generous and reasonable given the congruence
between subspecies designations and 10% cgMLST clusters by Adjusted
Rand (~0.850) and Adjusted Wallace metrics (~0.850 both ways).
Args:
df_relatives (pandas.DataFrame): Table of genomes related by cgMLST to input genome
Returns:
None: if no curated public genomes found to have a cgMLST profile similarity of 10% or greater
(string, float, dict): most common subspecies, closest related public genome distance, subspecies frequencies
"""
closest_distance = df_relatives['distance'].min()
if closest_distance > CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD:
logging.warning('Min cgMLST distance (%s) above subspeciation distance threshold (%s)',
closest_distance,
CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD)
return None
else:
df_relatives = df_relatives.loc[df_relatives.distance <= CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD, :]
df_relatives = df_relatives.sort_values('distance', ascending=True)
logging.debug('df_relatives by cgmlst %s', df_relatives.head())
genome_spp = genomes_to_subspecies()
subspecies_below_threshold = [genome_spp[member_genome] if member_genome in genome_spp else None for member_genome in df_relatives.index]
subspecies_below_threshold = filter(None, subspecies_below_threshold)
subspecies_counter = Counter(subspecies_below_threshold)
logging.debug('Subspecies counter: %s', subspecies_counter)
return (subspecies_counter.most_common(1)[0][0], closest_distance, dict(subspecies_counter)) | [
"def",
"cgmlst_subspecies_call",
"(",
"df_relatives",
")",
":",
"closest_distance",
"=",
"df_relatives",
"[",
"'distance'",
"]",
".",
"min",
"(",
")",
"if",
"closest_distance",
">",
"CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD",
":",
"logging",
".",
"warning",
"(",
"'Mi... | Call Salmonella subspecies based on cgMLST results
This method attempts to find the majority subspecies type within curated
public genomes above a cgMLST allelic profile distance threshold.
Note:
``CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD`` is the cgMLST distance
threshold used to determine the subspecies by cgMLST. It is set at a
distance of 0.9 which translates to a cgMLST allelic similarity of 10%.
A threshold of 0.9 is generous and reasonable given the congruence
between subspecies designations and 10% cgMLST clusters by Adjusted
Rand (~0.850) and Adjusted Wallace metrics (~0.850 both ways).
Args:
df_relatives (pandas.DataFrame): Table of genomes related by cgMLST to input genome
Returns:
None: if no curated public genomes found to have a cgMLST profile similarity of 10% or greater
(string, float, dict): most common subspecies, closest related public genome distance, subspecies frequencies | [
"Call",
"Salmonella",
"subspecies",
"based",
"on",
"cgMLST",
"results"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/cgmlst/__init__.py#L262-L300 | train | 39,011 |
phac-nml/sistr_cmd | sistr/sistr_cmd.py | genome_name_from_fasta_path | def genome_name_from_fasta_path(fasta_path):
"""Extract genome name from fasta filename
Get the filename without directory and remove the file extension.
Example:
With fasta file path ``/path/to/genome_1.fasta``::
fasta_path = '/path/to/genome_1.fasta'
genome_name = genome_name_from_fasta_path(fasta_path)
print(genome_name)
# => "genome_1"
Args:
fasta_path (str): fasta file path
Returns:
str: genome name
"""
filename = os.path.basename(fasta_path)
return re.sub(r'(\.fa$)|(\.fas$)|(\.fasta$)|(\.fna$)|(\.\w{1,}$)', '', filename) | python | def genome_name_from_fasta_path(fasta_path):
"""Extract genome name from fasta filename
Get the filename without directory and remove the file extension.
Example:
With fasta file path ``/path/to/genome_1.fasta``::
fasta_path = '/path/to/genome_1.fasta'
genome_name = genome_name_from_fasta_path(fasta_path)
print(genome_name)
# => "genome_1"
Args:
fasta_path (str): fasta file path
Returns:
str: genome name
"""
filename = os.path.basename(fasta_path)
return re.sub(r'(\.fa$)|(\.fas$)|(\.fasta$)|(\.fna$)|(\.\w{1,}$)', '', filename) | [
"def",
"genome_name_from_fasta_path",
"(",
"fasta_path",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fasta_path",
")",
"return",
"re",
".",
"sub",
"(",
"r'(\\.fa$)|(\\.fas$)|(\\.fasta$)|(\\.fna$)|(\\.\\w{1,}$)'",
",",
"''",
",",
"filename",
... | Extract genome name from fasta filename
Get the filename without directory and remove the file extension.
Example:
With fasta file path ``/path/to/genome_1.fasta``::
fasta_path = '/path/to/genome_1.fasta'
genome_name = genome_name_from_fasta_path(fasta_path)
print(genome_name)
# => "genome_1"
Args:
fasta_path (str): fasta file path
Returns:
str: genome name | [
"Extract",
"genome",
"name",
"from",
"fasta",
"filename"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/sistr_cmd.py#L234-L254 | train | 39,012 |
phac-nml/sistr_cmd | sistr/src/blast_wrapper/__init__.py | BlastReader.df_first_row_to_dict | def df_first_row_to_dict(df):
"""First DataFrame row to list of dict
Args:
df (pandas.DataFrame): A DataFrame with at least one row
Returns:
A list of dict that looks like:
[{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}]
from a DataFrame that looks like:
C1 C2 C3
1 x y z
Else if `df` is `None`, returns `None`
"""
if df is not None:
return [dict(r) for i, r in df.head(1).iterrows()][0] | python | def df_first_row_to_dict(df):
"""First DataFrame row to list of dict
Args:
df (pandas.DataFrame): A DataFrame with at least one row
Returns:
A list of dict that looks like:
[{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}]
from a DataFrame that looks like:
C1 C2 C3
1 x y z
Else if `df` is `None`, returns `None`
"""
if df is not None:
return [dict(r) for i, r in df.head(1).iterrows()][0] | [
"def",
"df_first_row_to_dict",
"(",
"df",
")",
":",
"if",
"df",
"is",
"not",
"None",
":",
"return",
"[",
"dict",
"(",
"r",
")",
"for",
"i",
",",
"r",
"in",
"df",
".",
"head",
"(",
"1",
")",
".",
"iterrows",
"(",
")",
"]",
"[",
"0",
"]"
] | First DataFrame row to list of dict
Args:
df (pandas.DataFrame): A DataFrame with at least one row
Returns:
A list of dict that looks like:
[{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}]
from a DataFrame that looks like:
C1 C2 C3
1 x y z
Else if `df` is `None`, returns `None` | [
"First",
"DataFrame",
"row",
"to",
"list",
"of",
"dict"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/blast_wrapper/__init__.py#L200-L219 | train | 39,013 |
phac-nml/sistr_cmd | sistr/misc/add_ref_genomes.py | sketch_fasta | def sketch_fasta(fasta_path, outdir):
"""Create a Mash sketch from an input fasta file
Args:
fasta_path (str): input fasta file path. Genome name in fasta filename
outdir (str): output directory path to write Mash sketch file to
Returns:
str: output Mash sketch file path
"""
genome_name = genome_name_from_fasta_path(fasta_path)
outpath = os.path.join(outdir, genome_name)
args = ['mash', 'sketch', '-o', outpath, fasta_path]
logging.info('Running Mash sketch with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
sketch_path = outpath + '.msh'
assert os.path.exists(sketch_path), 'Mash sketch for genome {} was not created at {}'.format(
genome_name,
sketch_path)
return sketch_path | python | def sketch_fasta(fasta_path, outdir):
"""Create a Mash sketch from an input fasta file
Args:
fasta_path (str): input fasta file path. Genome name in fasta filename
outdir (str): output directory path to write Mash sketch file to
Returns:
str: output Mash sketch file path
"""
genome_name = genome_name_from_fasta_path(fasta_path)
outpath = os.path.join(outdir, genome_name)
args = ['mash', 'sketch', '-o', outpath, fasta_path]
logging.info('Running Mash sketch with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
sketch_path = outpath + '.msh'
assert os.path.exists(sketch_path), 'Mash sketch for genome {} was not created at {}'.format(
genome_name,
sketch_path)
return sketch_path | [
"def",
"sketch_fasta",
"(",
"fasta_path",
",",
"outdir",
")",
":",
"genome_name",
"=",
"genome_name_from_fasta_path",
"(",
"fasta_path",
")",
"outpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"genome_name",
")",
"args",
"=",
"[",
"'mash'",
... | Create a Mash sketch from an input fasta file
Args:
fasta_path (str): input fasta file path. Genome name in fasta filename
outdir (str): output directory path to write Mash sketch file to
Returns:
str: output Mash sketch file path | [
"Create",
"a",
"Mash",
"sketch",
"from",
"an",
"input",
"fasta",
"file"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/misc/add_ref_genomes.py#L65-L85 | train | 39,014 |
phac-nml/sistr_cmd | sistr/misc/add_ref_genomes.py | merge_sketches | def merge_sketches(outdir, sketch_paths):
"""Merge new Mash sketches with current Mash sketches
Args:
outdir (str): output directory to write merged Mash sketch file
sketch_paths (list of str): Mash sketch file paths for input fasta files
Returns:
str: output path for Mash sketch file with new and old sketches
"""
merge_sketch_path = os.path.join(outdir, 'sistr.msh')
args = ['mash', 'paste', merge_sketch_path]
for x in sketch_paths:
args.append(x)
args.append(MASH_SKETCH_FILE)
logging.info('Running Mash paste with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
assert os.path.exists(merge_sketch_path), 'Merged sketch was not created at {}'.format(merge_sketch_path)
return merge_sketch_path | python | def merge_sketches(outdir, sketch_paths):
"""Merge new Mash sketches with current Mash sketches
Args:
outdir (str): output directory to write merged Mash sketch file
sketch_paths (list of str): Mash sketch file paths for input fasta files
Returns:
str: output path for Mash sketch file with new and old sketches
"""
merge_sketch_path = os.path.join(outdir, 'sistr.msh')
args = ['mash', 'paste', merge_sketch_path]
for x in sketch_paths:
args.append(x)
args.append(MASH_SKETCH_FILE)
logging.info('Running Mash paste with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
assert os.path.exists(merge_sketch_path), 'Merged sketch was not created at {}'.format(merge_sketch_path)
return merge_sketch_path | [
"def",
"merge_sketches",
"(",
"outdir",
",",
"sketch_paths",
")",
":",
"merge_sketch_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"'sistr.msh'",
")",
"args",
"=",
"[",
"'mash'",
",",
"'paste'",
",",
"merge_sketch_path",
"]",
"for",
"x",
... | Merge new Mash sketches with current Mash sketches
Args:
outdir (str): output directory to write merged Mash sketch file
sketch_paths (list of str): Mash sketch file paths for input fasta files
Returns:
str: output path for Mash sketch file with new and old sketches | [
"Merge",
"new",
"Mash",
"sketches",
"with",
"current",
"Mash",
"sketches"
] | 4630fae72439723b354a94b94fbe76ad2f9f6295 | https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/misc/add_ref_genomes.py#L88-L107 | train | 39,015 |
mbr/simplekv | simplekv/__init__.py | KeyValueStore.get_file | def get_file(self, key, file):
"""Write contents of key to file
Like :meth:`.KeyValueStore.put_file`, this method allows backends to
implement a specialized function if data needs to be written to disk or
streamed.
If *file* is a string, contents of *key* are written to a newly
created file with the filename *file*. Otherwise, the data will be
written using the *write* method of *file*.
:param key: The key to be read
:param file: Output filename or an object with a *write* method.
:raises exceptions.ValueError: If the key is not valid.
:raises exceptions.IOError: If there was a problem reading or writing
data.
:raises exceptions.KeyError: If the key was not found.
"""
self._check_valid_key(key)
if isinstance(file, str):
return self._get_filename(key, file)
else:
return self._get_file(key, file) | python | def get_file(self, key, file):
"""Write contents of key to file
Like :meth:`.KeyValueStore.put_file`, this method allows backends to
implement a specialized function if data needs to be written to disk or
streamed.
If *file* is a string, contents of *key* are written to a newly
created file with the filename *file*. Otherwise, the data will be
written using the *write* method of *file*.
:param key: The key to be read
:param file: Output filename or an object with a *write* method.
:raises exceptions.ValueError: If the key is not valid.
:raises exceptions.IOError: If there was a problem reading or writing
data.
:raises exceptions.KeyError: If the key was not found.
"""
self._check_valid_key(key)
if isinstance(file, str):
return self._get_filename(key, file)
else:
return self._get_file(key, file) | [
"def",
"get_file",
"(",
"self",
",",
"key",
",",
"file",
")",
":",
"self",
".",
"_check_valid_key",
"(",
"key",
")",
"if",
"isinstance",
"(",
"file",
",",
"str",
")",
":",
"return",
"self",
".",
"_get_filename",
"(",
"key",
",",
"file",
")",
"else",
... | Write contents of key to file
Like :meth:`.KeyValueStore.put_file`, this method allows backends to
implement a specialized function if data needs to be written to disk or
streamed.
If *file* is a string, contents of *key* are written to a newly
created file with the filename *file*. Otherwise, the data will be
written using the *write* method of *file*.
:param key: The key to be read
:param file: Output filename or an object with a *write* method.
:raises exceptions.ValueError: If the key is not valid.
:raises exceptions.IOError: If there was a problem reading or writing
data.
:raises exceptions.KeyError: If the key was not found. | [
"Write",
"contents",
"of",
"key",
"to",
"file"
] | fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6 | https://github.com/mbr/simplekv/blob/fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6/simplekv/__init__.py#L77-L100 | train | 39,016 |
mbr/simplekv | simplekv/__init__.py | KeyValueStore.put_file | def put_file(self, key, file):
"""Store into key from file on disk
Stores data from a source into key. *file* can either be a string,
which will be interpretet as a filename, or an object with a *read()*
method.
If the passed object has a *fileno()* method, it may be used to speed
up the operation.
The file specified by *file*, if it is a filename, may be removed in
the process, to avoid copying if possible. If you need to make a copy,
pass the opened file instead.
:param key: The key under which the data is to be stored
:param file: A filename or an object with a read method. If a filename,
may be removed
:returns: The key under which data was stored
:raises exceptions.ValueError: If the key is not valid.
:raises exceptions.IOError: If there was a problem moving the file in.
"""
# FIXME: shouldn't we call self._check_valid_key here?
if isinstance(file, str):
return self._put_filename(key, file)
else:
return self._put_file(key, file) | python | def put_file(self, key, file):
"""Store into key from file on disk
Stores data from a source into key. *file* can either be a string,
which will be interpretet as a filename, or an object with a *read()*
method.
If the passed object has a *fileno()* method, it may be used to speed
up the operation.
The file specified by *file*, if it is a filename, may be removed in
the process, to avoid copying if possible. If you need to make a copy,
pass the opened file instead.
:param key: The key under which the data is to be stored
:param file: A filename or an object with a read method. If a filename,
may be removed
:returns: The key under which data was stored
:raises exceptions.ValueError: If the key is not valid.
:raises exceptions.IOError: If there was a problem moving the file in.
"""
# FIXME: shouldn't we call self._check_valid_key here?
if isinstance(file, str):
return self._put_filename(key, file)
else:
return self._put_file(key, file) | [
"def",
"put_file",
"(",
"self",
",",
"key",
",",
"file",
")",
":",
"# FIXME: shouldn't we call self._check_valid_key here?",
"if",
"isinstance",
"(",
"file",
",",
"str",
")",
":",
"return",
"self",
".",
"_put_filename",
"(",
"key",
",",
"file",
")",
"else",
... | Store into key from file on disk
Stores data from a source into key. *file* can either be a string,
which will be interpretet as a filename, or an object with a *read()*
method.
If the passed object has a *fileno()* method, it may be used to speed
up the operation.
The file specified by *file*, if it is a filename, may be removed in
the process, to avoid copying if possible. If you need to make a copy,
pass the opened file instead.
:param key: The key under which the data is to be stored
:param file: A filename or an object with a read method. If a filename,
may be removed
:returns: The key under which data was stored
:raises exceptions.ValueError: If the key is not valid.
:raises exceptions.IOError: If there was a problem moving the file in. | [
"Store",
"into",
"key",
"from",
"file",
"on",
"disk"
] | fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6 | https://github.com/mbr/simplekv/blob/fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6/simplekv/__init__.py#L152-L179 | train | 39,017 |
mbr/simplekv | simplekv/__init__.py | CopyMixin.copy | def copy(self, source, dest):
"""Copies a key. The destination is overwritten if does exist.
:param source: The source key to copy
:param dest: The destination for the copy
:returns: The destination key
:raises: exceptions.ValueError: If the source or target key are not valid
:raises: exceptions.KeyError: If the source key was not found"""
self._check_valid_key(source)
self._check_valid_key(dest)
return self._copy(source, dest) | python | def copy(self, source, dest):
"""Copies a key. The destination is overwritten if does exist.
:param source: The source key to copy
:param dest: The destination for the copy
:returns: The destination key
:raises: exceptions.ValueError: If the source or target key are not valid
:raises: exceptions.KeyError: If the source key was not found"""
self._check_valid_key(source)
self._check_valid_key(dest)
return self._copy(source, dest) | [
"def",
"copy",
"(",
"self",
",",
"source",
",",
"dest",
")",
":",
"self",
".",
"_check_valid_key",
"(",
"source",
")",
"self",
".",
"_check_valid_key",
"(",
"dest",
")",
"return",
"self",
".",
"_copy",
"(",
"source",
",",
"dest",
")"
] | Copies a key. The destination is overwritten if does exist.
:param source: The source key to copy
:param dest: The destination for the copy
:returns: The destination key
:raises: exceptions.ValueError: If the source or target key are not valid
:raises: exceptions.KeyError: If the source key was not found | [
"Copies",
"a",
"key",
".",
"The",
"destination",
"is",
"overwritten",
"if",
"does",
"exist",
"."
] | fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6 | https://github.com/mbr/simplekv/blob/fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6/simplekv/__init__.py#L441-L453 | train | 39,018 |
napalm-automation/napalm-eos | napalm_eos/eos.py | EOSDriver.compare_config | def compare_config(self):
"""Implementation of NAPALM method compare_config."""
if self.config_session is None:
return ''
else:
commands = ['show session-config named %s diffs' % self.config_session]
result = self.device.run_commands(commands, encoding='text')[0]['output']
result = '\n'.join(result.splitlines()[2:])
return result.strip() | python | def compare_config(self):
"""Implementation of NAPALM method compare_config."""
if self.config_session is None:
return ''
else:
commands = ['show session-config named %s diffs' % self.config_session]
result = self.device.run_commands(commands, encoding='text')[0]['output']
result = '\n'.join(result.splitlines()[2:])
return result.strip() | [
"def",
"compare_config",
"(",
"self",
")",
":",
"if",
"self",
".",
"config_session",
"is",
"None",
":",
"return",
"''",
"else",
":",
"commands",
"=",
"[",
"'show session-config named %s diffs'",
"%",
"self",
".",
"config_session",
"]",
"result",
"=",
"self",
... | Implementation of NAPALM method compare_config. | [
"Implementation",
"of",
"NAPALM",
"method",
"compare_config",
"."
] | a3b37d6ee353e326ab9ea1a09ecc14045b12928b | https://github.com/napalm-automation/napalm-eos/blob/a3b37d6ee353e326ab9ea1a09ecc14045b12928b/napalm_eos/eos.py#L178-L188 | train | 39,019 |
napalm-automation/napalm-eos | napalm_eos/eos.py | EOSDriver.discard_config | def discard_config(self):
"""Implementation of NAPALM method discard_config."""
if self.config_session is not None:
commands = []
commands.append('configure session {}'.format(self.config_session))
commands.append('abort')
self.device.run_commands(commands)
self.config_session = None | python | def discard_config(self):
"""Implementation of NAPALM method discard_config."""
if self.config_session is not None:
commands = []
commands.append('configure session {}'.format(self.config_session))
commands.append('abort')
self.device.run_commands(commands)
self.config_session = None | [
"def",
"discard_config",
"(",
"self",
")",
":",
"if",
"self",
".",
"config_session",
"is",
"not",
"None",
":",
"commands",
"=",
"[",
"]",
"commands",
".",
"append",
"(",
"'configure session {}'",
".",
"format",
"(",
"self",
".",
"config_session",
")",
")",... | Implementation of NAPALM method discard_config. | [
"Implementation",
"of",
"NAPALM",
"method",
"discard_config",
"."
] | a3b37d6ee353e326ab9ea1a09ecc14045b12928b | https://github.com/napalm-automation/napalm-eos/blob/a3b37d6ee353e326ab9ea1a09ecc14045b12928b/napalm_eos/eos.py#L201-L208 | train | 39,020 |
napalm-automation/napalm-eos | napalm_eos/eos.py | EOSDriver.rollback | def rollback(self):
"""Implementation of NAPALM method rollback."""
commands = []
commands.append('configure replace flash:rollback-0')
commands.append('write memory')
self.device.run_commands(commands) | python | def rollback(self):
"""Implementation of NAPALM method rollback."""
commands = []
commands.append('configure replace flash:rollback-0')
commands.append('write memory')
self.device.run_commands(commands) | [
"def",
"rollback",
"(",
"self",
")",
":",
"commands",
"=",
"[",
"]",
"commands",
".",
"append",
"(",
"'configure replace flash:rollback-0'",
")",
"commands",
".",
"append",
"(",
"'write memory'",
")",
"self",
".",
"device",
".",
"run_commands",
"(",
"commands"... | Implementation of NAPALM method rollback. | [
"Implementation",
"of",
"NAPALM",
"method",
"rollback",
"."
] | a3b37d6ee353e326ab9ea1a09ecc14045b12928b | https://github.com/napalm-automation/napalm-eos/blob/a3b37d6ee353e326ab9ea1a09ecc14045b12928b/napalm_eos/eos.py#L210-L215 | train | 39,021 |
napalm-automation/napalm-eos | napalm_eos/eos.py | EOSDriver.get_facts | def get_facts(self):
"""Implementation of NAPALM method get_facts."""
commands = []
commands.append('show version')
commands.append('show hostname')
commands.append('show interfaces')
result = self.device.run_commands(commands)
version = result[0]
hostname = result[1]
interfaces_dict = result[2]['interfaces']
uptime = time.time() - version['bootupTimestamp']
interfaces = [i for i in interfaces_dict.keys() if '.' not in i]
interfaces = string_parsers.sorted_nicely(interfaces)
return {
'hostname': hostname['hostname'],
'fqdn': hostname['fqdn'],
'vendor': u'Arista',
'model': version['modelName'],
'serial_number': version['serialNumber'],
'os_version': version['internalVersion'],
'uptime': int(uptime),
'interface_list': interfaces,
} | python | def get_facts(self):
"""Implementation of NAPALM method get_facts."""
commands = []
commands.append('show version')
commands.append('show hostname')
commands.append('show interfaces')
result = self.device.run_commands(commands)
version = result[0]
hostname = result[1]
interfaces_dict = result[2]['interfaces']
uptime = time.time() - version['bootupTimestamp']
interfaces = [i for i in interfaces_dict.keys() if '.' not in i]
interfaces = string_parsers.sorted_nicely(interfaces)
return {
'hostname': hostname['hostname'],
'fqdn': hostname['fqdn'],
'vendor': u'Arista',
'model': version['modelName'],
'serial_number': version['serialNumber'],
'os_version': version['internalVersion'],
'uptime': int(uptime),
'interface_list': interfaces,
} | [
"def",
"get_facts",
"(",
"self",
")",
":",
"commands",
"=",
"[",
"]",
"commands",
".",
"append",
"(",
"'show version'",
")",
"commands",
".",
"append",
"(",
"'show hostname'",
")",
"commands",
".",
"append",
"(",
"'show interfaces'",
")",
"result",
"=",
"s... | Implementation of NAPALM method get_facts. | [
"Implementation",
"of",
"NAPALM",
"method",
"get_facts",
"."
] | a3b37d6ee353e326ab9ea1a09ecc14045b12928b | https://github.com/napalm-automation/napalm-eos/blob/a3b37d6ee353e326ab9ea1a09ecc14045b12928b/napalm_eos/eos.py#L217-L244 | train | 39,022 |
napalm-automation/napalm-eos | napalm_eos/eos.py | EOSDriver.get_config | def get_config(self, retrieve="all"):
"""get_config implementation for EOS."""
get_startup = retrieve == "all" or retrieve == "startup"
get_running = retrieve == "all" or retrieve == "running"
get_candidate = (retrieve == "all" or retrieve == "candidate") and self.config_session
if retrieve == "all":
commands = ['show startup-config',
'show running-config']
if self.config_session:
commands.append('show session-config named {}'.format(self.config_session))
output = self.device.run_commands(commands, encoding="text")
return {
'startup': py23_compat.text_type(output[0]['output']) if get_startup else u"",
'running': py23_compat.text_type(output[1]['output']) if get_running else u"",
'candidate': py23_compat.text_type(output[2]['output']) if get_candidate else u"",
}
elif get_startup or get_running:
commands = ['show {}-config'.format(retrieve)]
output = self.device.run_commands(commands, encoding="text")
return {
'startup': py23_compat.text_type(output[0]['output']) if get_startup else u"",
'running': py23_compat.text_type(output[0]['output']) if get_running else u"",
'candidate': "",
}
elif get_candidate:
commands = ['show session-config named {}'.format(self.config_session)]
output = self.device.run_commands(commands, encoding="text")
return {
'startup': "",
'running': "",
'candidate': py23_compat.text_type(output[0]['output']),
}
elif retrieve == "candidate":
# If we get here it means that we want the candidate but there is none.
return {
'startup': "",
'running': "",
'candidate': "",
}
else:
raise Exception("Wrong retrieve filter: {}".format(retrieve)) | python | def get_config(self, retrieve="all"):
"""get_config implementation for EOS."""
get_startup = retrieve == "all" or retrieve == "startup"
get_running = retrieve == "all" or retrieve == "running"
get_candidate = (retrieve == "all" or retrieve == "candidate") and self.config_session
if retrieve == "all":
commands = ['show startup-config',
'show running-config']
if self.config_session:
commands.append('show session-config named {}'.format(self.config_session))
output = self.device.run_commands(commands, encoding="text")
return {
'startup': py23_compat.text_type(output[0]['output']) if get_startup else u"",
'running': py23_compat.text_type(output[1]['output']) if get_running else u"",
'candidate': py23_compat.text_type(output[2]['output']) if get_candidate else u"",
}
elif get_startup or get_running:
commands = ['show {}-config'.format(retrieve)]
output = self.device.run_commands(commands, encoding="text")
return {
'startup': py23_compat.text_type(output[0]['output']) if get_startup else u"",
'running': py23_compat.text_type(output[0]['output']) if get_running else u"",
'candidate': "",
}
elif get_candidate:
commands = ['show session-config named {}'.format(self.config_session)]
output = self.device.run_commands(commands, encoding="text")
return {
'startup': "",
'running': "",
'candidate': py23_compat.text_type(output[0]['output']),
}
elif retrieve == "candidate":
# If we get here it means that we want the candidate but there is none.
return {
'startup': "",
'running': "",
'candidate': "",
}
else:
raise Exception("Wrong retrieve filter: {}".format(retrieve)) | [
"def",
"get_config",
"(",
"self",
",",
"retrieve",
"=",
"\"all\"",
")",
":",
"get_startup",
"=",
"retrieve",
"==",
"\"all\"",
"or",
"retrieve",
"==",
"\"startup\"",
"get_running",
"=",
"retrieve",
"==",
"\"all\"",
"or",
"retrieve",
"==",
"\"running\"",
"get_ca... | get_config implementation for EOS. | [
"get_config",
"implementation",
"for",
"EOS",
"."
] | a3b37d6ee353e326ab9ea1a09ecc14045b12928b | https://github.com/napalm-automation/napalm-eos/blob/a3b37d6ee353e326ab9ea1a09ecc14045b12928b/napalm_eos/eos.py#L1517-L1560 | train | 39,023 |
mbr/simplekv | simplekv/net/botostore.py | map_boto_exceptions | def map_boto_exceptions(key=None, exc_pass=()):
"""Map boto-specific exceptions to the simplekv-API."""
from boto.exception import BotoClientError, BotoServerError, \
StorageResponseError
try:
yield
except StorageResponseError as e:
if e.code == 'NoSuchKey':
raise KeyError(key)
raise IOError(str(e))
except (BotoClientError, BotoServerError) as e:
if e.__class__.__name__ not in exc_pass:
raise IOError(str(e)) | python | def map_boto_exceptions(key=None, exc_pass=()):
"""Map boto-specific exceptions to the simplekv-API."""
from boto.exception import BotoClientError, BotoServerError, \
StorageResponseError
try:
yield
except StorageResponseError as e:
if e.code == 'NoSuchKey':
raise KeyError(key)
raise IOError(str(e))
except (BotoClientError, BotoServerError) as e:
if e.__class__.__name__ not in exc_pass:
raise IOError(str(e)) | [
"def",
"map_boto_exceptions",
"(",
"key",
"=",
"None",
",",
"exc_pass",
"=",
"(",
")",
")",
":",
"from",
"boto",
".",
"exception",
"import",
"BotoClientError",
",",
"BotoServerError",
",",
"StorageResponseError",
"try",
":",
"yield",
"except",
"StorageResponseEr... | Map boto-specific exceptions to the simplekv-API. | [
"Map",
"boto",
"-",
"specific",
"exceptions",
"to",
"the",
"simplekv",
"-",
"API",
"."
] | fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6 | https://github.com/mbr/simplekv/blob/fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6/simplekv/net/botostore.py#L11-L23 | train | 39,024 |
mbr/simplekv | simplekv/net/azurestore.py | _file_md5 | def _file_md5(file_):
"""
Compute the md5 digest of a file in base64 encoding.
"""
md5 = hashlib.md5()
chunk_size = 128 * md5.block_size
for chunk in iter(lambda: file_.read(chunk_size), b''):
md5.update(chunk)
file_.seek(0)
byte_digest = md5.digest()
return base64.b64encode(byte_digest).decode() | python | def _file_md5(file_):
"""
Compute the md5 digest of a file in base64 encoding.
"""
md5 = hashlib.md5()
chunk_size = 128 * md5.block_size
for chunk in iter(lambda: file_.read(chunk_size), b''):
md5.update(chunk)
file_.seek(0)
byte_digest = md5.digest()
return base64.b64encode(byte_digest).decode() | [
"def",
"_file_md5",
"(",
"file_",
")",
":",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"chunk_size",
"=",
"128",
"*",
"md5",
".",
"block_size",
"for",
"chunk",
"in",
"iter",
"(",
"lambda",
":",
"file_",
".",
"read",
"(",
"chunk_size",
")",
",",
"... | Compute the md5 digest of a file in base64 encoding. | [
"Compute",
"the",
"md5",
"digest",
"of",
"a",
"file",
"in",
"base64",
"encoding",
"."
] | fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6 | https://github.com/mbr/simplekv/blob/fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6/simplekv/net/azurestore.py#L25-L35 | train | 39,025 |
mbr/simplekv | simplekv/net/azurestore.py | _byte_buffer_md5 | def _byte_buffer_md5(buffer_):
"""
Computes the md5 digest of a byte buffer in base64 encoding.
"""
md5 = hashlib.md5(buffer_)
byte_digest = md5.digest()
return base64.b64encode(byte_digest).decode() | python | def _byte_buffer_md5(buffer_):
"""
Computes the md5 digest of a byte buffer in base64 encoding.
"""
md5 = hashlib.md5(buffer_)
byte_digest = md5.digest()
return base64.b64encode(byte_digest).decode() | [
"def",
"_byte_buffer_md5",
"(",
"buffer_",
")",
":",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
"buffer_",
")",
"byte_digest",
"=",
"md5",
".",
"digest",
"(",
")",
"return",
"base64",
".",
"b64encode",
"(",
"byte_digest",
")",
".",
"decode",
"(",
")"
] | Computes the md5 digest of a byte buffer in base64 encoding. | [
"Computes",
"the",
"md5",
"digest",
"of",
"a",
"byte",
"buffer",
"in",
"base64",
"encoding",
"."
] | fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6 | https://github.com/mbr/simplekv/blob/fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6/simplekv/net/azurestore.py#L46-L52 | train | 39,026 |
mbr/simplekv | simplekv/net/azurestore.py | map_azure_exceptions | def map_azure_exceptions(key=None, exc_pass=()):
"""Map Azure-specific exceptions to the simplekv-API."""
from azure.common import AzureMissingResourceHttpError, AzureHttpError,\
AzureException
try:
yield
except AzureMissingResourceHttpError as ex:
if ex.__class__.__name__ not in exc_pass:
s = str(ex)
if s.startswith(u"The specified container does not exist."):
raise IOError(s)
raise KeyError(key)
except AzureHttpError as ex:
if ex.__class__.__name__ not in exc_pass:
raise IOError(str(ex))
except AzureException as ex:
if ex.__class__.__name__ not in exc_pass:
raise IOError(str(ex)) | python | def map_azure_exceptions(key=None, exc_pass=()):
"""Map Azure-specific exceptions to the simplekv-API."""
from azure.common import AzureMissingResourceHttpError, AzureHttpError,\
AzureException
try:
yield
except AzureMissingResourceHttpError as ex:
if ex.__class__.__name__ not in exc_pass:
s = str(ex)
if s.startswith(u"The specified container does not exist."):
raise IOError(s)
raise KeyError(key)
except AzureHttpError as ex:
if ex.__class__.__name__ not in exc_pass:
raise IOError(str(ex))
except AzureException as ex:
if ex.__class__.__name__ not in exc_pass:
raise IOError(str(ex)) | [
"def",
"map_azure_exceptions",
"(",
"key",
"=",
"None",
",",
"exc_pass",
"=",
"(",
")",
")",
":",
"from",
"azure",
".",
"common",
"import",
"AzureMissingResourceHttpError",
",",
"AzureHttpError",
",",
"AzureException",
"try",
":",
"yield",
"except",
"AzureMissin... | Map Azure-specific exceptions to the simplekv-API. | [
"Map",
"Azure",
"-",
"specific",
"exceptions",
"to",
"the",
"simplekv",
"-",
"API",
"."
] | fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6 | https://github.com/mbr/simplekv/blob/fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6/simplekv/net/azurestore.py#L56-L73 | train | 39,027 |
mbr/simplekv | simplekv/net/azurestore.py | IOInterface.read | def read(self, size=-1):
"""Returns 'size' amount of bytes or less if there is no more data.
If no size is given all data is returned. size can be >= 0."""
if self.closed:
raise ValueError("I/O operation on closed file")
with map_azure_exceptions(key=self.key):
if size < 0:
size = self.size - self.pos
end = min(self.pos + size - 1, self.size - 1)
if self.pos > end:
return b''
b = self.block_blob_service.get_blob_to_bytes(
container_name=self.container_name,
blob_name=self.key,
start_range=self.pos,
end_range=end, # end_range is inclusive
max_connections=self.max_connections,
)
self.pos += len(b.content)
return b.content | python | def read(self, size=-1):
"""Returns 'size' amount of bytes or less if there is no more data.
If no size is given all data is returned. size can be >= 0."""
if self.closed:
raise ValueError("I/O operation on closed file")
with map_azure_exceptions(key=self.key):
if size < 0:
size = self.size - self.pos
end = min(self.pos + size - 1, self.size - 1)
if self.pos > end:
return b''
b = self.block_blob_service.get_blob_to_bytes(
container_name=self.container_name,
blob_name=self.key,
start_range=self.pos,
end_range=end, # end_range is inclusive
max_connections=self.max_connections,
)
self.pos += len(b.content)
return b.content | [
"def",
"read",
"(",
"self",
",",
"size",
"=",
"-",
"1",
")",
":",
"if",
"self",
".",
"closed",
":",
"raise",
"ValueError",
"(",
"\"I/O operation on closed file\"",
")",
"with",
"map_azure_exceptions",
"(",
"key",
"=",
"self",
".",
"key",
")",
":",
"if",
... | Returns 'size' amount of bytes or less if there is no more data.
If no size is given all data is returned. size can be >= 0. | [
"Returns",
"size",
"amount",
"of",
"bytes",
"or",
"less",
"if",
"there",
"is",
"no",
"more",
"data",
".",
"If",
"no",
"size",
"is",
"given",
"all",
"data",
"is",
"returned",
".",
"size",
"can",
"be",
">",
"=",
"0",
"."
] | fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6 | https://github.com/mbr/simplekv/blob/fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6/simplekv/net/azurestore.py#L231-L251 | train | 39,028 |
mbr/simplekv | simplekv/net/azurestore.py | IOInterface.seek | def seek(self, offset, whence=0):
"""Move to a new offset either relative or absolute. whence=0 is
absolute, whence=1 is relative, whence=2 is relative to the end.
Any relative or absolute seek operation which would result in a
negative position is undefined and that case can be ignored
in the implementation.
Any seek operation which moves the position after the stream
should succeed. tell() should report that position and read()
should return an empty bytes object."""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == 0:
if offset < 0:
raise IOError('seek would move position outside the file')
self.pos = offset
elif whence == 1:
if self.pos + offset < 0:
raise IOError('seek would move position outside the file')
self.pos += offset
elif whence == 2:
if self.size + offset < 0:
raise IOError('seek would move position outside the file')
self.pos = self.size + offset
return self.pos | python | def seek(self, offset, whence=0):
"""Move to a new offset either relative or absolute. whence=0 is
absolute, whence=1 is relative, whence=2 is relative to the end.
Any relative or absolute seek operation which would result in a
negative position is undefined and that case can be ignored
in the implementation.
Any seek operation which moves the position after the stream
should succeed. tell() should report that position and read()
should return an empty bytes object."""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == 0:
if offset < 0:
raise IOError('seek would move position outside the file')
self.pos = offset
elif whence == 1:
if self.pos + offset < 0:
raise IOError('seek would move position outside the file')
self.pos += offset
elif whence == 2:
if self.size + offset < 0:
raise IOError('seek would move position outside the file')
self.pos = self.size + offset
return self.pos | [
"def",
"seek",
"(",
"self",
",",
"offset",
",",
"whence",
"=",
"0",
")",
":",
"if",
"self",
".",
"closed",
":",
"raise",
"ValueError",
"(",
"\"I/O operation on closed file\"",
")",
"if",
"whence",
"==",
"0",
":",
"if",
"offset",
"<",
"0",
":",
"raise",... | Move to a new offset either relative or absolute. whence=0 is
absolute, whence=1 is relative, whence=2 is relative to the end.
Any relative or absolute seek operation which would result in a
negative position is undefined and that case can be ignored
in the implementation.
Any seek operation which moves the position after the stream
should succeed. tell() should report that position and read()
should return an empty bytes object. | [
"Move",
"to",
"a",
"new",
"offset",
"either",
"relative",
"or",
"absolute",
".",
"whence",
"=",
"0",
"is",
"absolute",
"whence",
"=",
"1",
"is",
"relative",
"whence",
"=",
"2",
"is",
"relative",
"to",
"the",
"end",
"."
] | fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6 | https://github.com/mbr/simplekv/blob/fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6/simplekv/net/azurestore.py#L253-L278 | train | 39,029 |
mbr/simplekv | simplekv/git.py | _on_tree | def _on_tree(repo, tree, components, obj):
"""Mounts an object on a tree, using the given path components.
:param tree: Tree object to mount on.
:param components: A list of strings of subpaths (i.e. ['foo', 'bar'] is
equivalent to '/foo/bar')
:param obj: Object to mount. If None, removes the object found at path
and prunes the tree downwards.
:return: A list of new entities that need to be added to the object store,
where the last one is the new tree.
"""
# pattern-matching:
if len(components) == 1:
if isinstance(obj, Blob):
mode = 0o100644
elif isinstance(obj, Tree):
mode = 0o040000
elif obj is None:
mode = None
else:
raise TypeError('Can only mount Blobs or Trees')
name = components[0]
if mode is not None:
tree[name] = mode, obj.id
return [tree]
if name in tree:
del tree[name]
return [tree]
elif len(components) > 1:
a, bc = components[0], components[1:]
if a in tree:
a_tree = repo[tree[a][1]]
if not isinstance(a_tree, Tree):
a_tree = Tree()
else:
a_tree = Tree()
res = _on_tree(repo, a_tree, bc, obj)
a_tree_new = res[-1]
if a_tree_new.items():
tree[a] = 0o040000, a_tree_new.id
return res + [tree]
# tree is empty
if a in tree:
del tree[a]
return [tree]
else:
raise ValueError('Components can\'t be empty.') | python | def _on_tree(repo, tree, components, obj):
"""Mounts an object on a tree, using the given path components.
:param tree: Tree object to mount on.
:param components: A list of strings of subpaths (i.e. ['foo', 'bar'] is
equivalent to '/foo/bar')
:param obj: Object to mount. If None, removes the object found at path
and prunes the tree downwards.
:return: A list of new entities that need to be added to the object store,
where the last one is the new tree.
"""
# pattern-matching:
if len(components) == 1:
if isinstance(obj, Blob):
mode = 0o100644
elif isinstance(obj, Tree):
mode = 0o040000
elif obj is None:
mode = None
else:
raise TypeError('Can only mount Blobs or Trees')
name = components[0]
if mode is not None:
tree[name] = mode, obj.id
return [tree]
if name in tree:
del tree[name]
return [tree]
elif len(components) > 1:
a, bc = components[0], components[1:]
if a in tree:
a_tree = repo[tree[a][1]]
if not isinstance(a_tree, Tree):
a_tree = Tree()
else:
a_tree = Tree()
res = _on_tree(repo, a_tree, bc, obj)
a_tree_new = res[-1]
if a_tree_new.items():
tree[a] = 0o040000, a_tree_new.id
return res + [tree]
# tree is empty
if a in tree:
del tree[a]
return [tree]
else:
raise ValueError('Components can\'t be empty.') | [
"def",
"_on_tree",
"(",
"repo",
",",
"tree",
",",
"components",
",",
"obj",
")",
":",
"# pattern-matching:",
"if",
"len",
"(",
"components",
")",
"==",
"1",
":",
"if",
"isinstance",
"(",
"obj",
",",
"Blob",
")",
":",
"mode",
"=",
"0o100644",
"elif",
... | Mounts an object on a tree, using the given path components.
:param tree: Tree object to mount on.
:param components: A list of strings of subpaths (i.e. ['foo', 'bar'] is
equivalent to '/foo/bar')
:param obj: Object to mount. If None, removes the object found at path
and prunes the tree downwards.
:return: A list of new entities that need to be added to the object store,
where the last one is the new tree. | [
"Mounts",
"an",
"object",
"on",
"a",
"tree",
"using",
"the",
"given",
"path",
"components",
"."
] | fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6 | https://github.com/mbr/simplekv/blob/fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6/simplekv/git.py#L12-L62 | train | 39,030 |
SUSE-Enceladus/ipa | ipa/ipa_ec2.py | EC2Cloud._connect | def _connect(self):
"""Connect to ec2 resource."""
resource = None
try:
resource = boto3.resource(
'ec2',
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region
)
# boto3 resource is lazy so attempt method to test connection
resource.meta.client.describe_account_attributes()
except Exception:
raise EC2CloudException(
'Could not connect to region: %s' % self.region
)
return resource | python | def _connect(self):
"""Connect to ec2 resource."""
resource = None
try:
resource = boto3.resource(
'ec2',
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region
)
# boto3 resource is lazy so attempt method to test connection
resource.meta.client.describe_account_attributes()
except Exception:
raise EC2CloudException(
'Could not connect to region: %s' % self.region
)
return resource | [
"def",
"_connect",
"(",
"self",
")",
":",
"resource",
"=",
"None",
"try",
":",
"resource",
"=",
"boto3",
".",
"resource",
"(",
"'ec2'",
",",
"aws_access_key_id",
"=",
"self",
".",
"access_key_id",
",",
"aws_secret_access_key",
"=",
"self",
".",
"secret_acces... | Connect to ec2 resource. | [
"Connect",
"to",
"ec2",
"resource",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_ec2.py#L168-L184 | train | 39,031 |
SUSE-Enceladus/ipa | ipa/ipa_ec2.py | EC2Cloud._get_user_data | def _get_user_data(self):
"""
Return formatted bash script string.
The public ssh key is added by cloud init to the instance based on
the ssh user and private key file.
"""
key = ipa_utils.generate_public_ssh_key(
self.ssh_private_key_file
).decode()
script = BASH_SSH_SCRIPT.format(user=self.ssh_user, key=key)
return script | python | def _get_user_data(self):
"""
Return formatted bash script string.
The public ssh key is added by cloud init to the instance based on
the ssh user and private key file.
"""
key = ipa_utils.generate_public_ssh_key(
self.ssh_private_key_file
).decode()
script = BASH_SSH_SCRIPT.format(user=self.ssh_user, key=key)
return script | [
"def",
"_get_user_data",
"(",
"self",
")",
":",
"key",
"=",
"ipa_utils",
".",
"generate_public_ssh_key",
"(",
"self",
".",
"ssh_private_key_file",
")",
".",
"decode",
"(",
")",
"script",
"=",
"BASH_SSH_SCRIPT",
".",
"format",
"(",
"user",
"=",
"self",
".",
... | Return formatted bash script string.
The public ssh key is added by cloud init to the instance based on
the ssh user and private key file. | [
"Return",
"formatted",
"bash",
"script",
"string",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_ec2.py#L221-L232 | train | 39,032 |
SUSE-Enceladus/ipa | ipa/ipa_ec2.py | EC2Cloud._set_instance_ip | def _set_instance_ip(self):
"""
Retrieve instance ip and cache it.
Current preference is for public ipv4, ipv6 and private.
"""
instance = self._get_instance()
# ipv6
try:
ipv6 = instance.network_interfaces[0].ipv6_addresses[0]
except (IndexError, TypeError):
ipv6 = None
self.instance_ip = instance.public_ip_address or \
ipv6 or instance.private_ip_address
if not self.instance_ip:
raise EC2CloudException(
'IP address for instance cannot be found.'
) | python | def _set_instance_ip(self):
"""
Retrieve instance ip and cache it.
Current preference is for public ipv4, ipv6 and private.
"""
instance = self._get_instance()
# ipv6
try:
ipv6 = instance.network_interfaces[0].ipv6_addresses[0]
except (IndexError, TypeError):
ipv6 = None
self.instance_ip = instance.public_ip_address or \
ipv6 or instance.private_ip_address
if not self.instance_ip:
raise EC2CloudException(
'IP address for instance cannot be found.'
) | [
"def",
"_set_instance_ip",
"(",
"self",
")",
":",
"instance",
"=",
"self",
".",
"_get_instance",
"(",
")",
"# ipv6",
"try",
":",
"ipv6",
"=",
"instance",
".",
"network_interfaces",
"[",
"0",
"]",
".",
"ipv6_addresses",
"[",
"0",
"]",
"except",
"(",
"Inde... | Retrieve instance ip and cache it.
Current preference is for public ipv4, ipv6 and private. | [
"Retrieve",
"instance",
"ip",
"and",
"cache",
"it",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_ec2.py#L293-L313 | train | 39,033 |
tgbugs/pyontutils | pyontutils/process_fixed.py | _process_worker | def _process_worker(call_queue, result_queue):
""" This worker is wrapped to block KeyboardInterrupt """
signal.signal(signal.SIGINT, signal.SIG_IGN) #block ctrl-c
return _process_worker_base(call_queue, result_queue) | python | def _process_worker(call_queue, result_queue):
""" This worker is wrapped to block KeyboardInterrupt """
signal.signal(signal.SIGINT, signal.SIG_IGN) #block ctrl-c
return _process_worker_base(call_queue, result_queue) | [
"def",
"_process_worker",
"(",
"call_queue",
",",
"result_queue",
")",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIG_IGN",
")",
"#block ctrl-c",
"return",
"_process_worker_base",
"(",
"call_queue",
",",
"result_queue",
")"
... | This worker is wrapped to block KeyboardInterrupt | [
"This",
"worker",
"is",
"wrapped",
"to",
"block",
"KeyboardInterrupt"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/process_fixed.py#L6-L9 | train | 39,034 |
SUSE-Enceladus/ipa | ipa/ipa_distro.py | Distro._set_init_system | def _set_init_system(self, client):
"""Determine the init system of distribution."""
if not self.init_system:
try:
out = ipa_utils.execute_ssh_command(
client,
'ps -p 1 -o comm='
)
except Exception as e:
raise IpaDistroException(
'An error occurred while retrieving'
' the distro init system: %s' % e
)
if out:
self.init_system = out.strip() | python | def _set_init_system(self, client):
"""Determine the init system of distribution."""
if not self.init_system:
try:
out = ipa_utils.execute_ssh_command(
client,
'ps -p 1 -o comm='
)
except Exception as e:
raise IpaDistroException(
'An error occurred while retrieving'
' the distro init system: %s' % e
)
if out:
self.init_system = out.strip() | [
"def",
"_set_init_system",
"(",
"self",
",",
"client",
")",
":",
"if",
"not",
"self",
".",
"init_system",
":",
"try",
":",
"out",
"=",
"ipa_utils",
".",
"execute_ssh_command",
"(",
"client",
",",
"'ps -p 1 -o comm='",
")",
"except",
"Exception",
"as",
"e",
... | Determine the init system of distribution. | [
"Determine",
"the",
"init",
"system",
"of",
"distribution",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_distro.py#L36-L50 | train | 39,035 |
SUSE-Enceladus/ipa | ipa/ipa_distro.py | Distro.get_vm_info | def get_vm_info(self, client):
"""Return vm info."""
out = ''
self._set_init_system(client)
if self.init_system == 'systemd':
try:
out += 'systemd-analyze:\n\n'
out += ipa_utils.execute_ssh_command(
client,
'systemd-analyze'
)
out += 'systemd-analyze blame:\n\n'
out += ipa_utils.execute_ssh_command(
client,
'systemd-analyze blame'
)
out += 'journalctl -b:\n\n'
out += ipa_utils.execute_ssh_command(
client,
'sudo journalctl -b'
)
except Exception as error:
out = 'Failed to collect VM info: {0}.'.format(error)
return out | python | def get_vm_info(self, client):
"""Return vm info."""
out = ''
self._set_init_system(client)
if self.init_system == 'systemd':
try:
out += 'systemd-analyze:\n\n'
out += ipa_utils.execute_ssh_command(
client,
'systemd-analyze'
)
out += 'systemd-analyze blame:\n\n'
out += ipa_utils.execute_ssh_command(
client,
'systemd-analyze blame'
)
out += 'journalctl -b:\n\n'
out += ipa_utils.execute_ssh_command(
client,
'sudo journalctl -b'
)
except Exception as error:
out = 'Failed to collect VM info: {0}.'.format(error)
return out | [
"def",
"get_vm_info",
"(",
"self",
",",
"client",
")",
":",
"out",
"=",
"''",
"self",
".",
"_set_init_system",
"(",
"client",
")",
"if",
"self",
".",
"init_system",
"==",
"'systemd'",
":",
"try",
":",
"out",
"+=",
"'systemd-analyze:\\n\\n'",
"out",
"+=",
... | Return vm info. | [
"Return",
"vm",
"info",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_distro.py#L76-L103 | train | 39,036 |
SUSE-Enceladus/ipa | ipa/ipa_distro.py | Distro.install_package | def install_package(self, client, package):
"""Install package on instance."""
install_cmd = "{sudo} '{install} {package}'".format(
sudo=self.get_sudo_exec_wrapper(),
install=self.get_install_cmd(),
package=package
)
try:
out = ipa_utils.execute_ssh_command(
client,
install_cmd
)
except Exception as error:
raise IpaDistroException(
'An error occurred installing package {package} '
'on instance: {error}'.format(
package=package,
error=error
)
)
else:
return out | python | def install_package(self, client, package):
"""Install package on instance."""
install_cmd = "{sudo} '{install} {package}'".format(
sudo=self.get_sudo_exec_wrapper(),
install=self.get_install_cmd(),
package=package
)
try:
out = ipa_utils.execute_ssh_command(
client,
install_cmd
)
except Exception as error:
raise IpaDistroException(
'An error occurred installing package {package} '
'on instance: {error}'.format(
package=package,
error=error
)
)
else:
return out | [
"def",
"install_package",
"(",
"self",
",",
"client",
",",
"package",
")",
":",
"install_cmd",
"=",
"\"{sudo} '{install} {package}'\"",
".",
"format",
"(",
"sudo",
"=",
"self",
".",
"get_sudo_exec_wrapper",
"(",
")",
",",
"install",
"=",
"self",
".",
"get_inst... | Install package on instance. | [
"Install",
"package",
"on",
"instance",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_distro.py#L105-L127 | train | 39,037 |
SUSE-Enceladus/ipa | ipa/ipa_distro.py | Distro.reboot | def reboot(self, client):
"""Execute reboot command on instance."""
self._set_init_system(client)
reboot_cmd = "{sudo} '{stop_ssh};{reboot}'".format(
sudo=self.get_sudo_exec_wrapper(),
stop_ssh=self.get_stop_ssh_service_cmd(),
reboot=self.get_reboot_cmd()
)
try:
ipa_utils.execute_ssh_command(
client,
reboot_cmd
)
except Exception as error:
raise IpaDistroException(
'An error occurred rebooting instance: %s' % error
)
ipa_utils.clear_cache() | python | def reboot(self, client):
"""Execute reboot command on instance."""
self._set_init_system(client)
reboot_cmd = "{sudo} '{stop_ssh};{reboot}'".format(
sudo=self.get_sudo_exec_wrapper(),
stop_ssh=self.get_stop_ssh_service_cmd(),
reboot=self.get_reboot_cmd()
)
try:
ipa_utils.execute_ssh_command(
client,
reboot_cmd
)
except Exception as error:
raise IpaDistroException(
'An error occurred rebooting instance: %s' % error
)
ipa_utils.clear_cache() | [
"def",
"reboot",
"(",
"self",
",",
"client",
")",
":",
"self",
".",
"_set_init_system",
"(",
"client",
")",
"reboot_cmd",
"=",
"\"{sudo} '{stop_ssh};{reboot}'\"",
".",
"format",
"(",
"sudo",
"=",
"self",
".",
"get_sudo_exec_wrapper",
"(",
")",
",",
"stop_ssh",... | Execute reboot command on instance. | [
"Execute",
"reboot",
"command",
"on",
"instance",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_distro.py#L129-L148 | train | 39,038 |
SUSE-Enceladus/ipa | ipa/ipa_distro.py | Distro.update | def update(self, client):
"""Execute update command on instance."""
update_cmd = "{sudo} '{refresh};{update}'".format(
sudo=self.get_sudo_exec_wrapper(),
refresh=self.get_refresh_repo_cmd(),
update=self.get_update_cmd()
)
out = ''
try:
out = ipa_utils.execute_ssh_command(
client,
update_cmd
)
except Exception as error:
raise IpaDistroException(
'An error occurred updating instance: %s' % error
)
return out | python | def update(self, client):
"""Execute update command on instance."""
update_cmd = "{sudo} '{refresh};{update}'".format(
sudo=self.get_sudo_exec_wrapper(),
refresh=self.get_refresh_repo_cmd(),
update=self.get_update_cmd()
)
out = ''
try:
out = ipa_utils.execute_ssh_command(
client,
update_cmd
)
except Exception as error:
raise IpaDistroException(
'An error occurred updating instance: %s' % error
)
return out | [
"def",
"update",
"(",
"self",
",",
"client",
")",
":",
"update_cmd",
"=",
"\"{sudo} '{refresh};{update}'\"",
".",
"format",
"(",
"sudo",
"=",
"self",
".",
"get_sudo_exec_wrapper",
"(",
")",
",",
"refresh",
"=",
"self",
".",
"get_refresh_repo_cmd",
"(",
")",
... | Execute update command on instance. | [
"Execute",
"update",
"command",
"on",
"instance",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_distro.py#L150-L168 | train | 39,039 |
tgbugs/pyontutils | pyontutils/scigraph_client.py | Graph.ordered | def ordered(start, edges, predicate=None, inverse=False):
""" Depth first edges from a SciGraph response. """
s, o = 'sub', 'obj'
if inverse:
s, o = o, s
for edge in edges:
if predicate is not None and edge['pred'] != predicate:
print('scoop!')
continue
if edge[s] == start:
yield edge
yield from Graph.ordered(edge[o], edges, predicate=predicate) | python | def ordered(start, edges, predicate=None, inverse=False):
""" Depth first edges from a SciGraph response. """
s, o = 'sub', 'obj'
if inverse:
s, o = o, s
for edge in edges:
if predicate is not None and edge['pred'] != predicate:
print('scoop!')
continue
if edge[s] == start:
yield edge
yield from Graph.ordered(edge[o], edges, predicate=predicate) | [
"def",
"ordered",
"(",
"start",
",",
"edges",
",",
"predicate",
"=",
"None",
",",
"inverse",
"=",
"False",
")",
":",
"s",
",",
"o",
"=",
"'sub'",
",",
"'obj'",
"if",
"inverse",
":",
"s",
",",
"o",
"=",
"o",
",",
"s",
"for",
"edge",
"in",
"edges... | Depth first edges from a SciGraph response. | [
"Depth",
"first",
"edges",
"from",
"a",
"SciGraph",
"response",
"."
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/scigraph_client.py#L741-L753 | train | 39,040 |
tgbugs/pyontutils | pyontutils/hierarchies.py | tcsort | def tcsort(item): # FIXME SUCH WOW SO INEFFICIENT O_O
""" get len of transitive closure assume type items is tree... """
return len(item[1]) + sum(tcsort(kv) for kv in item[1].items()) | python | def tcsort(item): # FIXME SUCH WOW SO INEFFICIENT O_O
""" get len of transitive closure assume type items is tree... """
return len(item[1]) + sum(tcsort(kv) for kv in item[1].items()) | [
"def",
"tcsort",
"(",
"item",
")",
":",
"# FIXME SUCH WOW SO INEFFICIENT O_O",
"return",
"len",
"(",
"item",
"[",
"1",
"]",
")",
"+",
"sum",
"(",
"tcsort",
"(",
"kv",
")",
"for",
"kv",
"in",
"item",
"[",
"1",
"]",
".",
"items",
"(",
")",
")"
] | get len of transitive closure assume type items is tree... | [
"get",
"len",
"of",
"transitive",
"closure",
"assume",
"type",
"items",
"is",
"tree",
"..."
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/hierarchies.py#L43-L45 | train | 39,041 |
tgbugs/pyontutils | pyontutils/hierarchies.py | get_node | def get_node(start, tree, pnames):
""" for each parent find a single branch to root """
def get_first_branch(node):
if node not in pnames: # one way to hit a root
return []
if pnames[node]: # mmmm names
fp = pnames[node][0]
if cycle_check(node, fp, pnames):
fp = pnames[node][1] # if there are double cycles I WILL KILL FOR THE PLEASURE IF IT
print(fp)
return [fp] + get_first_branch(fp)
else:
return []
branch = get_first_branch(start)
for n in branch[::-1]:
tree = tree[n]
assert start in tree, "our start wasnt in the tree! OH NO!"
branch = [start] + branch
print('branch', branch)
return tree, branch | python | def get_node(start, tree, pnames):
""" for each parent find a single branch to root """
def get_first_branch(node):
if node not in pnames: # one way to hit a root
return []
if pnames[node]: # mmmm names
fp = pnames[node][0]
if cycle_check(node, fp, pnames):
fp = pnames[node][1] # if there are double cycles I WILL KILL FOR THE PLEASURE IF IT
print(fp)
return [fp] + get_first_branch(fp)
else:
return []
branch = get_first_branch(start)
for n in branch[::-1]:
tree = tree[n]
assert start in tree, "our start wasnt in the tree! OH NO!"
branch = [start] + branch
print('branch', branch)
return tree, branch | [
"def",
"get_node",
"(",
"start",
",",
"tree",
",",
"pnames",
")",
":",
"def",
"get_first_branch",
"(",
"node",
")",
":",
"if",
"node",
"not",
"in",
"pnames",
":",
"# one way to hit a root",
"return",
"[",
"]",
"if",
"pnames",
"[",
"node",
"]",
":",
"# ... | for each parent find a single branch to root | [
"for",
"each",
"parent",
"find",
"a",
"single",
"branch",
"to",
"root"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/hierarchies.py#L58-L80 | train | 39,042 |
tgbugs/pyontutils | pyontutils/hierarchies.py | dematerialize | def dematerialize(parent_name, parent_node): # FIXME we need to demat more than just leaves!
#FIXME still an issue: Fornix, Striatum, Diagonal Band
""" Remove nodes higher in the tree that occur further down the
SAME branch. If they occur down OTHER branchs leave them alone.
NOTE: modifies in place!
"""
lleaves = {}
children = parent_node[parent_name]
if not children: # children could be empty ? i think this only happens @ root?
#print('at bottom', parent_name)
lleaves[parent_name] = None
return lleaves
children_ord = reversed(sorted(sorted(((k, v)
for k, v in children.items()),
key=alphasortkey),
#key=lambda a: f'{a[0]}'.split('>')[1] if '>' in f'{a[0]}' else f'a[0]'),
#key=lambda a: a[0].split('>') if '>' in a[0] else a[0]),
key=tcsort)) # make sure we hit deepest first
for child_name, _ in children_ord: # get list so we can go ahead and pop
#print(child_name)
new_lleaves = dematerialize(child_name, children)
if child_name == 'magnetic resonance imaging': # debugging failing demat
pass
#embed()
if child_name in new_lleaves or all(l in lleaves for l in new_lleaves):
# if it is a leaf or all childs are leaves as well
if child_name in lleaves: # if it has previously been identified as a leaf!
#print('MATERIALIZATION DETECTED! LOWER PARENT:',
#lleaves[child_name],'ZAPPING!:', child_name,
#'OF PARENT:', parent_name)
children.pop(child_name)
#print('cn', child_name, 'pn', parent_name, 'BOTTOM')
#else: # if it has NOT previously been identified as a leaf, add the parent!
#new_lleaves[child_name] = parent_name # pass it back up to nodes above
#print('cn', child_name, 'pn', parent_name)
#else: # it is a node but we want to dematerizlize them too!
lleaves[child_name] = parent_name
lleaves.update(new_lleaves)
return lleaves | python | def dematerialize(parent_name, parent_node): # FIXME we need to demat more than just leaves!
#FIXME still an issue: Fornix, Striatum, Diagonal Band
""" Remove nodes higher in the tree that occur further down the
SAME branch. If they occur down OTHER branchs leave them alone.
NOTE: modifies in place!
"""
lleaves = {}
children = parent_node[parent_name]
if not children: # children could be empty ? i think this only happens @ root?
#print('at bottom', parent_name)
lleaves[parent_name] = None
return lleaves
children_ord = reversed(sorted(sorted(((k, v)
for k, v in children.items()),
key=alphasortkey),
#key=lambda a: f'{a[0]}'.split('>')[1] if '>' in f'{a[0]}' else f'a[0]'),
#key=lambda a: a[0].split('>') if '>' in a[0] else a[0]),
key=tcsort)) # make sure we hit deepest first
for child_name, _ in children_ord: # get list so we can go ahead and pop
#print(child_name)
new_lleaves = dematerialize(child_name, children)
if child_name == 'magnetic resonance imaging': # debugging failing demat
pass
#embed()
if child_name in new_lleaves or all(l in lleaves for l in new_lleaves):
# if it is a leaf or all childs are leaves as well
if child_name in lleaves: # if it has previously been identified as a leaf!
#print('MATERIALIZATION DETECTED! LOWER PARENT:',
#lleaves[child_name],'ZAPPING!:', child_name,
#'OF PARENT:', parent_name)
children.pop(child_name)
#print('cn', child_name, 'pn', parent_name, 'BOTTOM')
#else: # if it has NOT previously been identified as a leaf, add the parent!
#new_lleaves[child_name] = parent_name # pass it back up to nodes above
#print('cn', child_name, 'pn', parent_name)
#else: # it is a node but we want to dematerizlize them too!
lleaves[child_name] = parent_name
lleaves.update(new_lleaves)
return lleaves | [
"def",
"dematerialize",
"(",
"parent_name",
",",
"parent_node",
")",
":",
"# FIXME we need to demat more than just leaves!",
"#FIXME still an issue: Fornix, Striatum, Diagonal Band",
"lleaves",
"=",
"{",
"}",
"children",
"=",
"parent_node",
"[",
"parent_name",
"]",
"if",
"n... | Remove nodes higher in the tree that occur further down the
SAME branch. If they occur down OTHER branchs leave them alone.
NOTE: modifies in place! | [
"Remove",
"nodes",
"higher",
"in",
"the",
"tree",
"that",
"occur",
"further",
"down",
"the",
"SAME",
"branch",
".",
"If",
"they",
"occur",
"down",
"OTHER",
"branchs",
"leave",
"them",
"alone",
"."
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/hierarchies.py#L115-L160 | train | 39,043 |
tgbugs/pyontutils | ilxutils/ilxutils/interlex_sql.py | IlxSql.get_terms | def get_terms(self):
''' GROUP BY is a shortcut to only getting the first in every list of group '''
if not self.terms.empty:
return self.terms
if self.from_backup:
self.terms = open_pickle(TERMS_BACKUP_PATH)
return self.terms
engine = create_engine(self.db_url)
data = """
SELECT t.id as tid, t.ilx, t.label, t.definition, t.type, t.comment, t.version
FROM terms t
GROUP BY t.ilx
"""
self.terms = pd.read_sql(data, engine)
create_pickle(self.terms, TERMS_BACKUP_PATH)
return self.terms | python | def get_terms(self):
''' GROUP BY is a shortcut to only getting the first in every list of group '''
if not self.terms.empty:
return self.terms
if self.from_backup:
self.terms = open_pickle(TERMS_BACKUP_PATH)
return self.terms
engine = create_engine(self.db_url)
data = """
SELECT t.id as tid, t.ilx, t.label, t.definition, t.type, t.comment, t.version
FROM terms t
GROUP BY t.ilx
"""
self.terms = pd.read_sql(data, engine)
create_pickle(self.terms, TERMS_BACKUP_PATH)
return self.terms | [
"def",
"get_terms",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"terms",
".",
"empty",
":",
"return",
"self",
".",
"terms",
"if",
"self",
".",
"from_backup",
":",
"self",
".",
"terms",
"=",
"open_pickle",
"(",
"TERMS_BACKUP_PATH",
")",
"return",
"... | GROUP BY is a shortcut to only getting the first in every list of group | [
"GROUP",
"BY",
"is",
"a",
"shortcut",
"to",
"only",
"getting",
"the",
"first",
"in",
"every",
"list",
"of",
"group"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/interlex_sql.py#L67-L82 | train | 39,044 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_scicrunch_client.py | superclasses_bug_fix | def superclasses_bug_fix(data):
''' PHP returns "id" in superclass but only accepts superclass_tid '''
for i, value in enumerate(data['superclasses']):
data['superclasses'][i]['superclass_tid'] = data['superclasses'][i].pop('id')
return data | python | def superclasses_bug_fix(data):
''' PHP returns "id" in superclass but only accepts superclass_tid '''
for i, value in enumerate(data['superclasses']):
data['superclasses'][i]['superclass_tid'] = data['superclasses'][i].pop('id')
return data | [
"def",
"superclasses_bug_fix",
"(",
"data",
")",
":",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"data",
"[",
"'superclasses'",
"]",
")",
":",
"data",
"[",
"'superclasses'",
"]",
"[",
"i",
"]",
"[",
"'superclass_tid'",
"]",
"=",
"data",
"[",
"'s... | PHP returns "id" in superclass but only accepts superclass_tid | [
"PHP",
"returns",
"id",
"in",
"superclass",
"but",
"only",
"accepts",
"superclass_tid"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L47-L51 | train | 39,045 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_scicrunch_client.py | Client.log_info | def log_info(self, data):
''' Logs successful responses '''
info = 'label={label}, id={id}, ilx={ilx}, superclass_tid={super_id}'
info_filled = info.format(label = data['label'],
id = data['id'],
ilx = data['ilx'],
super_id = data['superclasses'][0]['id'])
logging.info(info_filled)
return info_filled | python | def log_info(self, data):
''' Logs successful responses '''
info = 'label={label}, id={id}, ilx={ilx}, superclass_tid={super_id}'
info_filled = info.format(label = data['label'],
id = data['id'],
ilx = data['ilx'],
super_id = data['superclasses'][0]['id'])
logging.info(info_filled)
return info_filled | [
"def",
"log_info",
"(",
"self",
",",
"data",
")",
":",
"info",
"=",
"'label={label}, id={id}, ilx={ilx}, superclass_tid={super_id}'",
"info_filled",
"=",
"info",
".",
"format",
"(",
"label",
"=",
"data",
"[",
"'label'",
"]",
",",
"id",
"=",
"data",
"[",
"'id'"... | Logs successful responses | [
"Logs",
"successful",
"responses"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L80-L88 | train | 39,046 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_scicrunch_client.py | Client.process_request | def process_request(self, req):
''' Checks to see if data returned from database is useable '''
# Check status code of request
req.raise_for_status() # if codes not in 200s; error raise
# Proper status code, but check if server returned a warning
try:
output = req.json()
except:
exit(req.text) # server returned html error
# Try to find an error msg in the server response
try:
error = output['data'].get('errormsg')
except:
error = output.get('errormsg') # server has 2 variations of errormsg
finally:
if error:
exit(error)
return output | python | def process_request(self, req):
''' Checks to see if data returned from database is useable '''
# Check status code of request
req.raise_for_status() # if codes not in 200s; error raise
# Proper status code, but check if server returned a warning
try:
output = req.json()
except:
exit(req.text) # server returned html error
# Try to find an error msg in the server response
try:
error = output['data'].get('errormsg')
except:
error = output.get('errormsg') # server has 2 variations of errormsg
finally:
if error:
exit(error)
return output | [
"def",
"process_request",
"(",
"self",
",",
"req",
")",
":",
"# Check status code of request",
"req",
".",
"raise_for_status",
"(",
")",
"# if codes not in 200s; error raise",
"# Proper status code, but check if server returned a warning",
"try",
":",
"output",
"=",
"req",
... | Checks to see if data returned from database is useable | [
"Checks",
"to",
"see",
"if",
"data",
"returned",
"from",
"database",
"is",
"useable"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L111-L128 | train | 39,047 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_scicrunch_client.py | Client.is_equal | def is_equal(self, string1, string2):
''' Simple string comparator '''
return string1.lower().strip() == string2.lower().strip() | python | def is_equal(self, string1, string2):
''' Simple string comparator '''
return string1.lower().strip() == string2.lower().strip() | [
"def",
"is_equal",
"(",
"self",
",",
"string1",
",",
"string2",
")",
":",
"return",
"string1",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"==",
"string2",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")"
] | Simple string comparator | [
"Simple",
"string",
"comparator"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L130-L132 | train | 39,048 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_scicrunch_client.py | Client.are_ilx | def are_ilx(self, ilx_ids):
''' Checks list of objects to see if they are usable ILX IDs '''
total_data = []
for ilx_id in ilx_ids:
ilx_id = ilx_id.replace('http', '').replace('.', '').replace('/', '')
data, success = self.get_data_from_ilx(ilx_id)
if success:
total_data.append(data['data'])
else:
total_data.append({})
return total_data | python | def are_ilx(self, ilx_ids):
''' Checks list of objects to see if they are usable ILX IDs '''
total_data = []
for ilx_id in ilx_ids:
ilx_id = ilx_id.replace('http', '').replace('.', '').replace('/', '')
data, success = self.get_data_from_ilx(ilx_id)
if success:
total_data.append(data['data'])
else:
total_data.append({})
return total_data | [
"def",
"are_ilx",
"(",
"self",
",",
"ilx_ids",
")",
":",
"total_data",
"=",
"[",
"]",
"for",
"ilx_id",
"in",
"ilx_ids",
":",
"ilx_id",
"=",
"ilx_id",
".",
"replace",
"(",
"'http'",
",",
"''",
")",
".",
"replace",
"(",
"'.'",
",",
"''",
")",
".",
... | Checks list of objects to see if they are usable ILX IDs | [
"Checks",
"list",
"of",
"objects",
"to",
"see",
"if",
"they",
"are",
"usable",
"ILX",
"IDs"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L168-L178 | train | 39,049 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_scicrunch_client.py | Client.add_triple | def add_triple(self, subj, pred, obj):
''' Adds an entity property to an existing entity '''
subj_data, pred_data, obj_data = self.are_ilx([subj, pred, obj])
# RELATIONSHIP PROPERTY
if subj_data.get('id') and pred_data.get('id') and obj_data.get('id'):
if pred_data['type'] != 'relationship':
return self.test_check('Adding a relationship as formate \
"term1_ilx relationship_ilx term2_ilx"')
return self.add_relationship(term1=subj_data,
relationship=pred_data,
term2=obj_data)
# ANNOTATION PROPERTY
elif subj_data.get('id') and pred_data.get('id'):
if pred_data['type'] != 'annotation':
return self.test_check('Adding a relationship as formate \
"term_ilx annotation_ilx value"')
return self.add_annotation(entity=subj_data,
annotation=pred_data,
value=obj)
# UPDATE ENTITY
elif subj_data.get('id'):
data = subj_data
_pred = self.ttl2sci_map.get(pred)
if not _pred:
error = pred + " doesnt not have correct RDF format or It is not an option"
return self.test_check(error)
data = self.custom_update(data, _pred, obj)
if data == 'failed': # for debugging custom_update
return data
data = superclasses_bug_fix(data)
url_base = self.base_path + 'term/edit/{id}'
url = url_base.format(id=data['id'])
return self.post(url, data)
else:
return self.test_check('The ILX ID(s) provided do not exist') | python | def add_triple(self, subj, pred, obj):
''' Adds an entity property to an existing entity '''
subj_data, pred_data, obj_data = self.are_ilx([subj, pred, obj])
# RELATIONSHIP PROPERTY
if subj_data.get('id') and pred_data.get('id') and obj_data.get('id'):
if pred_data['type'] != 'relationship':
return self.test_check('Adding a relationship as formate \
"term1_ilx relationship_ilx term2_ilx"')
return self.add_relationship(term1=subj_data,
relationship=pred_data,
term2=obj_data)
# ANNOTATION PROPERTY
elif subj_data.get('id') and pred_data.get('id'):
if pred_data['type'] != 'annotation':
return self.test_check('Adding a relationship as formate \
"term_ilx annotation_ilx value"')
return self.add_annotation(entity=subj_data,
annotation=pred_data,
value=obj)
# UPDATE ENTITY
elif subj_data.get('id'):
data = subj_data
_pred = self.ttl2sci_map.get(pred)
if not _pred:
error = pred + " doesnt not have correct RDF format or It is not an option"
return self.test_check(error)
data = self.custom_update(data, _pred, obj)
if data == 'failed': # for debugging custom_update
return data
data = superclasses_bug_fix(data)
url_base = self.base_path + 'term/edit/{id}'
url = url_base.format(id=data['id'])
return self.post(url, data)
else:
return self.test_check('The ILX ID(s) provided do not exist') | [
"def",
"add_triple",
"(",
"self",
",",
"subj",
",",
"pred",
",",
"obj",
")",
":",
"subj_data",
",",
"pred_data",
",",
"obj_data",
"=",
"self",
".",
"are_ilx",
"(",
"[",
"subj",
",",
"pred",
",",
"obj",
"]",
")",
"# RELATIONSHIP PROPERTY",
"if",
"subj_d... | Adds an entity property to an existing entity | [
"Adds",
"an",
"entity",
"property",
"to",
"an",
"existing",
"entity"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L180-L214 | train | 39,050 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_scicrunch_client.py | Client.add_relationship | def add_relationship(self, term1, relationship, term2):
''' Creates a relationship between 3 entities in database '''
url = self.base_path + 'term/add-relationship'
data = {'term1_id': term1['id'],
'relationship_tid': relationship['id'],
'term2_id': term2['id'],
'term1_version': term1['version'],
'relationship_term_version': relationship['version'],
'term2_version': term2['version']}
return self.post(url, data) | python | def add_relationship(self, term1, relationship, term2):
''' Creates a relationship between 3 entities in database '''
url = self.base_path + 'term/add-relationship'
data = {'term1_id': term1['id'],
'relationship_tid': relationship['id'],
'term2_id': term2['id'],
'term1_version': term1['version'],
'relationship_term_version': relationship['version'],
'term2_version': term2['version']}
return self.post(url, data) | [
"def",
"add_relationship",
"(",
"self",
",",
"term1",
",",
"relationship",
",",
"term2",
")",
":",
"url",
"=",
"self",
".",
"base_path",
"+",
"'term/add-relationship'",
"data",
"=",
"{",
"'term1_id'",
":",
"term1",
"[",
"'id'",
"]",
",",
"'relationship_tid'"... | Creates a relationship between 3 entities in database | [
"Creates",
"a",
"relationship",
"between",
"3",
"entities",
"in",
"database"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L216-L225 | train | 39,051 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_scicrunch_client.py | Client.add_annotation | def add_annotation(self, entity, annotation, value):
''' Adds an annotation proprty to existing entity '''
url = self.base_path + 'term/add-annotation'
data = {'tid': entity['id'],
'annotation_tid': annotation['id'],
'value': value,
'term_version': entity['version'],
'annotation_term_version': annotation['version']}
return self.post(url, data) | python | def add_annotation(self, entity, annotation, value):
''' Adds an annotation proprty to existing entity '''
url = self.base_path + 'term/add-annotation'
data = {'tid': entity['id'],
'annotation_tid': annotation['id'],
'value': value,
'term_version': entity['version'],
'annotation_term_version': annotation['version']}
return self.post(url, data) | [
"def",
"add_annotation",
"(",
"self",
",",
"entity",
",",
"annotation",
",",
"value",
")",
":",
"url",
"=",
"self",
".",
"base_path",
"+",
"'term/add-annotation'",
"data",
"=",
"{",
"'tid'",
":",
"entity",
"[",
"'id'",
"]",
",",
"'annotation_tid'",
":",
... | Adds an annotation proprty to existing entity | [
"Adds",
"an",
"annotation",
"proprty",
"to",
"existing",
"entity"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L227-L235 | train | 39,052 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_scicrunch_client.py | Client.custom_update | def custom_update(self, data, pred, obj):
''' Updates existing entity proprty based on the predicate input '''
if isinstance(data[pred], str): # for all simple properties of str value
data[pred] = str(obj)
else: # synonyms, superclasses, and existing_ids have special requirements
if pred == 'synonyms':
literals = [d['literal'] for d in data[pred]]
if obj not in literals:
data[pred].append({'literal': obj}) # synonyms req for post
elif pred == 'superclasses':
ilx_ids = [d['ilx'] for d in data[pred]]
if obj not in ilx_ids:
_obj = obj.replace('ILX:', 'ilx_')
super_data, success = self.get_data_from_ilx(ilx_id=_obj)
super_data = super_data['data']
if success:
# superclass req post
data[pred].append({'id': super_data['id'], 'ilx': _obj})
else:
return self.test_check('Your superclass ILX ID '
+ _obj + ' does not exist.')
elif pred == 'existing_ids': # FIXME need to autogenerate curies from a map
iris = [d['iri'] for d in data[pred]]
if obj not in iris:
if 'http' not in obj:
return self.test_check('exisiting id value must \
be a uri containing "http"')
data[pred].append({
'curie': self.qname(obj),
'iri': obj,
'preferred': '0' # preferred is auto generated by preferred_change
})
#data[pred] = []
data = self.preferred_change(data) # One ex id is determined to be preferred
else:
# Somehow broke this code
return self.test_check(pred + ' Has slipped through the cracks')
return data | python | def custom_update(self, data, pred, obj):
''' Updates existing entity proprty based on the predicate input '''
if isinstance(data[pred], str): # for all simple properties of str value
data[pred] = str(obj)
else: # synonyms, superclasses, and existing_ids have special requirements
if pred == 'synonyms':
literals = [d['literal'] for d in data[pred]]
if obj not in literals:
data[pred].append({'literal': obj}) # synonyms req for post
elif pred == 'superclasses':
ilx_ids = [d['ilx'] for d in data[pred]]
if obj not in ilx_ids:
_obj = obj.replace('ILX:', 'ilx_')
super_data, success = self.get_data_from_ilx(ilx_id=_obj)
super_data = super_data['data']
if success:
# superclass req post
data[pred].append({'id': super_data['id'], 'ilx': _obj})
else:
return self.test_check('Your superclass ILX ID '
+ _obj + ' does not exist.')
elif pred == 'existing_ids': # FIXME need to autogenerate curies from a map
iris = [d['iri'] for d in data[pred]]
if obj not in iris:
if 'http' not in obj:
return self.test_check('exisiting id value must \
be a uri containing "http"')
data[pred].append({
'curie': self.qname(obj),
'iri': obj,
'preferred': '0' # preferred is auto generated by preferred_change
})
#data[pred] = []
data = self.preferred_change(data) # One ex id is determined to be preferred
else:
# Somehow broke this code
return self.test_check(pred + ' Has slipped through the cracks')
return data | [
"def",
"custom_update",
"(",
"self",
",",
"data",
",",
"pred",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"data",
"[",
"pred",
"]",
",",
"str",
")",
":",
"# for all simple properties of str value",
"data",
"[",
"pred",
"]",
"=",
"str",
"(",
"obj",
... | Updates existing entity proprty based on the predicate input | [
"Updates",
"existing",
"entity",
"proprty",
"based",
"on",
"the",
"predicate",
"input"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L237-L274 | train | 39,053 |
tgbugs/pyontutils | ttlser/ttlser/serializers.py | CustomTurtleSerializer.sortProperties | def sortProperties(self, properties): # modified to sort objects using their global rank
"""Take a hash from predicate uris to lists of values.
Sort the lists of values. Return a sorted list of properties."""
# Sort object lists
for prop, objects in properties.items():
objects.sort(key=self._globalSortKey)
# Make sorted list of properties
return sorted(properties, key=lambda p: self.predicate_rank[p]) | python | def sortProperties(self, properties): # modified to sort objects using their global rank
"""Take a hash from predicate uris to lists of values.
Sort the lists of values. Return a sorted list of properties."""
# Sort object lists
for prop, objects in properties.items():
objects.sort(key=self._globalSortKey)
# Make sorted list of properties
return sorted(properties, key=lambda p: self.predicate_rank[p]) | [
"def",
"sortProperties",
"(",
"self",
",",
"properties",
")",
":",
"# modified to sort objects using their global rank",
"# Sort object lists",
"for",
"prop",
",",
"objects",
"in",
"properties",
".",
"items",
"(",
")",
":",
"objects",
".",
"sort",
"(",
"key",
"=",... | Take a hash from predicate uris to lists of values.
Sort the lists of values. Return a sorted list of properties. | [
"Take",
"a",
"hash",
"from",
"predicate",
"uris",
"to",
"lists",
"of",
"values",
".",
"Sort",
"the",
"lists",
"of",
"values",
".",
"Return",
"a",
"sorted",
"list",
"of",
"properties",
"."
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ttlser/ttlser/serializers.py#L561-L569 | train | 39,054 |
tgbugs/pyontutils | ttlser/ttlser/serializers.py | CustomTurtleSerializer._buildPredicateHash | def _buildPredicateHash(self, subject): # XXX unmodified
"""
Build a hash key by predicate to a list of objects for the given
subject
"""
properties = {}
for s, p, o in self.store.triples((subject, None, None)):
oList = properties.get(p, [])
oList.append(o)
properties[p] = oList
return properties | python | def _buildPredicateHash(self, subject): # XXX unmodified
"""
Build a hash key by predicate to a list of objects for the given
subject
"""
properties = {}
for s, p, o in self.store.triples((subject, None, None)):
oList = properties.get(p, [])
oList.append(o)
properties[p] = oList
return properties | [
"def",
"_buildPredicateHash",
"(",
"self",
",",
"subject",
")",
":",
"# XXX unmodified",
"properties",
"=",
"{",
"}",
"for",
"s",
",",
"p",
",",
"o",
"in",
"self",
".",
"store",
".",
"triples",
"(",
"(",
"subject",
",",
"None",
",",
"None",
")",
")",... | Build a hash key by predicate to a list of objects for the given
subject | [
"Build",
"a",
"hash",
"key",
"by",
"predicate",
"to",
"a",
"list",
"of",
"objects",
"for",
"the",
"given",
"subject"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ttlser/ttlser/serializers.py#L571-L582 | train | 39,055 |
tgbugs/pyontutils | ttlser/ttlser/serializers.py | CustomTurtleSerializer.isValidList | def isValidList(self, l): # modified to flatten lists specified using [ a rdf:List; ] syntax
"""
Checks if l is a valid RDF list, i.e. no nodes have other properties.
"""
try:
if self.store.value(l, RDF.first) is None:
return False
except:
return False
while l:
if l != RDF.nil:
po = list(self.store.predicate_objects(l))
if (RDF.type, RDF.List) in po and len(po) == 3:
pass
elif len(po) != 2:
return False
l = self.store.value(l, RDF.rest)
return True | python | def isValidList(self, l): # modified to flatten lists specified using [ a rdf:List; ] syntax
"""
Checks if l is a valid RDF list, i.e. no nodes have other properties.
"""
try:
if self.store.value(l, RDF.first) is None:
return False
except:
return False
while l:
if l != RDF.nil:
po = list(self.store.predicate_objects(l))
if (RDF.type, RDF.List) in po and len(po) == 3:
pass
elif len(po) != 2:
return False
l = self.store.value(l, RDF.rest)
return True | [
"def",
"isValidList",
"(",
"self",
",",
"l",
")",
":",
"# modified to flatten lists specified using [ a rdf:List; ] syntax",
"try",
":",
"if",
"self",
".",
"store",
".",
"value",
"(",
"l",
",",
"RDF",
".",
"first",
")",
"is",
"None",
":",
"return",
"False",
... | Checks if l is a valid RDF list, i.e. no nodes have other properties. | [
"Checks",
"if",
"l",
"is",
"a",
"valid",
"RDF",
"list",
"i",
".",
"e",
".",
"no",
"nodes",
"have",
"other",
"properties",
"."
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ttlser/ttlser/serializers.py#L620-L637 | train | 39,056 |
tgbugs/pyontutils | ttlser/ttlser/serializers.py | CustomTurtleSerializer._write | def _write(self, value):
""" rename to write and import inspect to debut the callstack """
if ' ' in value:
s = inspect.stack()
fn = s[1].function
super().write('%%DEBUG {} %%'.format(fn))
super().write(value) | python | def _write(self, value):
""" rename to write and import inspect to debut the callstack """
if ' ' in value:
s = inspect.stack()
fn = s[1].function
super().write('%%DEBUG {} %%'.format(fn))
super().write(value) | [
"def",
"_write",
"(",
"self",
",",
"value",
")",
":",
"if",
"' '",
"in",
"value",
":",
"s",
"=",
"inspect",
".",
"stack",
"(",
")",
"fn",
"=",
"s",
"[",
"1",
"]",
".",
"function",
"super",
"(",
")",
".",
"write",
"(",
"'%%DEBUG {} %%'",
".",
"f... | rename to write and import inspect to debut the callstack | [
"rename",
"to",
"write",
"and",
"import",
"inspect",
"to",
"debut",
"the",
"callstack"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ttlser/ttlser/serializers.py#L737-L743 | train | 39,057 |
tgbugs/pyontutils | ttlser/ttlser/serializers.py | HtmlTurtleSerializer.serialize | def serialize(self, *args, **kwargs):
""" Modified to allow additional labels to be passed in. """
if 'labels' in kwargs:
# populate labels from outside the local graph
self._labels.update(kwargs['labels'])
super(HtmlTurtleSerializer, self).serialize(*args, **kwargs) | python | def serialize(self, *args, **kwargs):
""" Modified to allow additional labels to be passed in. """
if 'labels' in kwargs:
# populate labels from outside the local graph
self._labels.update(kwargs['labels'])
super(HtmlTurtleSerializer, self).serialize(*args, **kwargs) | [
"def",
"serialize",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'labels'",
"in",
"kwargs",
":",
"# populate labels from outside the local graph",
"self",
".",
"_labels",
".",
"update",
"(",
"kwargs",
"[",
"'labels'",
"]",
")",
... | Modified to allow additional labels to be passed in. | [
"Modified",
"to",
"allow",
"additional",
"labels",
"to",
"be",
"passed",
"in",
"."
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ttlser/ttlser/serializers.py#L818-L823 | train | 39,058 |
tgbugs/pyontutils | neurondm/neurondm/core.py | addLNT | def addLNT(LocalName, phenoId, predicate, g=None): # XXX deprecated
""" Add a local name for a phenotype from a pair of identifiers """
if g is None:
s = inspect.stack(0) # horribly inefficient
checkCalledInside('LocalNameManager', s)
g = s[1][0].f_locals # get globals of calling scope
addLN(LocalName, Phenotype(phenoId, predicate), g) | python | def addLNT(LocalName, phenoId, predicate, g=None): # XXX deprecated
""" Add a local name for a phenotype from a pair of identifiers """
if g is None:
s = inspect.stack(0) # horribly inefficient
checkCalledInside('LocalNameManager', s)
g = s[1][0].f_locals # get globals of calling scope
addLN(LocalName, Phenotype(phenoId, predicate), g) | [
"def",
"addLNT",
"(",
"LocalName",
",",
"phenoId",
",",
"predicate",
",",
"g",
"=",
"None",
")",
":",
"# XXX deprecated",
"if",
"g",
"is",
"None",
":",
"s",
"=",
"inspect",
".",
"stack",
"(",
"0",
")",
"# horribly inefficient",
"checkCalledInside",
"(",
... | Add a local name for a phenotype from a pair of identifiers | [
"Add",
"a",
"local",
"name",
"for",
"a",
"phenotype",
"from",
"a",
"pair",
"of",
"identifiers"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/neurondm/neurondm/core.py#L2468-L2474 | train | 39,059 |
tgbugs/pyontutils | neurondm/neurondm/core.py | Config.load_existing | def load_existing(self):
""" advanced usage allows loading multiple sets of neurons and using a config
object to keep track of the different graphs """
from pyontutils.closed_namespaces import rdfs
# bag existing
try:
next(iter(self.neurons()))
raise self.ExistingNeuronsError('Existing neurons detected. Please '
'load from file before creating neurons!')
except StopIteration:
pass
def getClassType(s):
graph = self.load_graph
Class = infixowl.Class(s, graph=graph)
for ec in Class.equivalentClass:
if isinstance(ec.identifier, rdflib.BNode):
bc = infixowl.CastClass(ec, graph=graph)
if isinstance(bc, infixowl.BooleanClass):
for id_ in bc._rdfList:
if isinstance(id_, rdflib.URIRef):
yield id_ # its one of our types
# bug is that I am not wiping graphBase.knownClasses and swapping it for each config
# OR the bug is that self.load_graph is persisting, either way the call to type()
# below seems to be the primary suspect for the issue
if not graphBase.ignore_existing:
ogp = Path(graphBase.ng.filename) # FIXME ng.filename <-> out_graph_path property ...
if ogp.exists():
from itertools import chain
from rdflib import Graph # FIXME
self.load_graph = Graph().parse(graphBase.ng.filename, format='turtle')
graphBase.load_graph = self.load_graph
# FIXME memory inefficiency here ...
_ = [graphBase.in_graph.add(t) for t in graphBase.load_graph] # FIXME use conjuctive ...
if len(graphBase.python_subclasses) == 2: # FIXME magic number for Neuron and NeuronCUT
ebms = [type(OntId(s).suffix, (NeuronCUT,), dict(owlClass=s))
for s in self.load_graph[:rdfs.subClassOf:NeuronEBM.owlClass]
if not graphBase.knownClasses.append(s)]
else:
ebms = []
class_types = [(type, s) for s in self.load_graph[:rdf.type:owl.Class]
for type in getClassType(s) if type]
sc = None
for sc in chain(graphBase.python_subclasses, ebms):
sc.owlClass
iris = [s for type, s in class_types if type == sc.owlClass]
if iris:
sc._load_existing(iris)
if sc is None:
raise ImportError(f'Failed to find any neurons to load in {graphBase.ng.filename}') | python | def load_existing(self):
""" advanced usage allows loading multiple sets of neurons and using a config
object to keep track of the different graphs """
from pyontutils.closed_namespaces import rdfs
# bag existing
try:
next(iter(self.neurons()))
raise self.ExistingNeuronsError('Existing neurons detected. Please '
'load from file before creating neurons!')
except StopIteration:
pass
def getClassType(s):
graph = self.load_graph
Class = infixowl.Class(s, graph=graph)
for ec in Class.equivalentClass:
if isinstance(ec.identifier, rdflib.BNode):
bc = infixowl.CastClass(ec, graph=graph)
if isinstance(bc, infixowl.BooleanClass):
for id_ in bc._rdfList:
if isinstance(id_, rdflib.URIRef):
yield id_ # its one of our types
# bug is that I am not wiping graphBase.knownClasses and swapping it for each config
# OR the bug is that self.load_graph is persisting, either way the call to type()
# below seems to be the primary suspect for the issue
if not graphBase.ignore_existing:
ogp = Path(graphBase.ng.filename) # FIXME ng.filename <-> out_graph_path property ...
if ogp.exists():
from itertools import chain
from rdflib import Graph # FIXME
self.load_graph = Graph().parse(graphBase.ng.filename, format='turtle')
graphBase.load_graph = self.load_graph
# FIXME memory inefficiency here ...
_ = [graphBase.in_graph.add(t) for t in graphBase.load_graph] # FIXME use conjuctive ...
if len(graphBase.python_subclasses) == 2: # FIXME magic number for Neuron and NeuronCUT
ebms = [type(OntId(s).suffix, (NeuronCUT,), dict(owlClass=s))
for s in self.load_graph[:rdfs.subClassOf:NeuronEBM.owlClass]
if not graphBase.knownClasses.append(s)]
else:
ebms = []
class_types = [(type, s) for s in self.load_graph[:rdf.type:owl.Class]
for type in getClassType(s) if type]
sc = None
for sc in chain(graphBase.python_subclasses, ebms):
sc.owlClass
iris = [s for type, s in class_types if type == sc.owlClass]
if iris:
sc._load_existing(iris)
if sc is None:
raise ImportError(f'Failed to find any neurons to load in {graphBase.ng.filename}') | [
"def",
"load_existing",
"(",
"self",
")",
":",
"from",
"pyontutils",
".",
"closed_namespaces",
"import",
"rdfs",
"# bag existing",
"try",
":",
"next",
"(",
"iter",
"(",
"self",
".",
"neurons",
"(",
")",
")",
")",
"raise",
"self",
".",
"ExistingNeuronsError",... | advanced usage allows loading multiple sets of neurons and using a config
object to keep track of the different graphs | [
"advanced",
"usage",
"allows",
"loading",
"multiple",
"sets",
"of",
"neurons",
"and",
"using",
"a",
"config",
"object",
"to",
"keep",
"track",
"of",
"the",
"different",
"graphs"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/neurondm/neurondm/core.py#L583-L636 | train | 39,060 |
tgbugs/pyontutils | neurondm/neurondm/core.py | graphBase.label_maker | def label_maker(self):
""" needed to defer loading of local conventions to avoid circular dependency issue """
if (not hasattr(graphBase, '_label_maker') or
graphBase._label_maker.local_conventions != graphBase.local_conventions):
graphBase._label_maker = LabelMaker(graphBase.local_conventions)
return graphBase._label_maker | python | def label_maker(self):
""" needed to defer loading of local conventions to avoid circular dependency issue """
if (not hasattr(graphBase, '_label_maker') or
graphBase._label_maker.local_conventions != graphBase.local_conventions):
graphBase._label_maker = LabelMaker(graphBase.local_conventions)
return graphBase._label_maker | [
"def",
"label_maker",
"(",
"self",
")",
":",
"if",
"(",
"not",
"hasattr",
"(",
"graphBase",
",",
"'_label_maker'",
")",
"or",
"graphBase",
".",
"_label_maker",
".",
"local_conventions",
"!=",
"graphBase",
".",
"local_conventions",
")",
":",
"graphBase",
".",
... | needed to defer loading of local conventions to avoid circular dependency issue | [
"needed",
"to",
"defer",
"loading",
"of",
"local",
"conventions",
"to",
"avoid",
"circular",
"dependency",
"issue"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/neurondm/neurondm/core.py#L1123-L1129 | train | 39,061 |
tgbugs/pyontutils | neurondm/neurondm/core.py | Neuron._graphify | def _graphify(self, *args, graph=None): # defined
""" Lift phenotypeEdges to Restrictions """
if graph is None:
graph = self.out_graph
################## LABELS ARE DEFINED HERE ##################
gl = self.genLabel
ll = self.localLabel
ol = self.origLabel
graph.add((self.id_, ilxtr.genLabel, rdflib.Literal(gl)))
if ll != gl:
graph.add((self.id_, ilxtr.localLabel, rdflib.Literal(ll)))
if ol and ol != gl:
graph.add((self.id_, ilxtr.origLabel, rdflib.Literal(ol)))
members = [self.expand(self.owlClass)]
for pe in self.pes:
target = pe._graphify(graph=graph)
if isinstance(pe, NegPhenotype): # isinstance will match NegPhenotype -> Phenotype
#self.Class.disjointWith = [target] # FIXME for defined neurons this is what we need and I think it is strong than the complementOf version
djc = infixowl.Class(graph=graph) # TODO for generic neurons this is what we need
djc.complementOf = target
members.append(djc)
else:
members.append(target) # FIXME negative logical phenotypes :/
intersection = infixowl.BooleanClass(members=members, graph=graph) # FIXME dupes
#existing = list(self.Class.equivalentClass)
#if existing or str(pe.pLabel) == 'Htr3a':
#embed()
ec = [intersection]
self.Class.equivalentClass = ec
return self.Class | python | def _graphify(self, *args, graph=None): # defined
""" Lift phenotypeEdges to Restrictions """
if graph is None:
graph = self.out_graph
################## LABELS ARE DEFINED HERE ##################
gl = self.genLabel
ll = self.localLabel
ol = self.origLabel
graph.add((self.id_, ilxtr.genLabel, rdflib.Literal(gl)))
if ll != gl:
graph.add((self.id_, ilxtr.localLabel, rdflib.Literal(ll)))
if ol and ol != gl:
graph.add((self.id_, ilxtr.origLabel, rdflib.Literal(ol)))
members = [self.expand(self.owlClass)]
for pe in self.pes:
target = pe._graphify(graph=graph)
if isinstance(pe, NegPhenotype): # isinstance will match NegPhenotype -> Phenotype
#self.Class.disjointWith = [target] # FIXME for defined neurons this is what we need and I think it is strong than the complementOf version
djc = infixowl.Class(graph=graph) # TODO for generic neurons this is what we need
djc.complementOf = target
members.append(djc)
else:
members.append(target) # FIXME negative logical phenotypes :/
intersection = infixowl.BooleanClass(members=members, graph=graph) # FIXME dupes
#existing = list(self.Class.equivalentClass)
#if existing or str(pe.pLabel) == 'Htr3a':
#embed()
ec = [intersection]
self.Class.equivalentClass = ec
return self.Class | [
"def",
"_graphify",
"(",
"self",
",",
"*",
"args",
",",
"graph",
"=",
"None",
")",
":",
"# defined",
"if",
"graph",
"is",
"None",
":",
"graph",
"=",
"self",
".",
"out_graph",
"################## LABELS ARE DEFINED HERE ##################",
"gl",
"=",
"self",
... | Lift phenotypeEdges to Restrictions | [
"Lift",
"phenotypeEdges",
"to",
"Restrictions"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/neurondm/neurondm/core.py#L2131-L2163 | train | 39,062 |
tgbugs/pyontutils | pyontutils/core.py | qname | def qname(uri, warning=False):
""" compute qname from defaults """
if warning:
print(tc.red('WARNING:'), tc.yellow(f'qname({uri}) is deprecated! please use OntId({uri}).curie'))
return __helper_graph.qname(uri) | python | def qname(uri, warning=False):
""" compute qname from defaults """
if warning:
print(tc.red('WARNING:'), tc.yellow(f'qname({uri}) is deprecated! please use OntId({uri}).curie'))
return __helper_graph.qname(uri) | [
"def",
"qname",
"(",
"uri",
",",
"warning",
"=",
"False",
")",
":",
"if",
"warning",
":",
"print",
"(",
"tc",
".",
"red",
"(",
"'WARNING:'",
")",
",",
"tc",
".",
"yellow",
"(",
"f'qname({uri}) is deprecated! please use OntId({uri}).curie'",
")",
")",
"return... | compute qname from defaults | [
"compute",
"qname",
"from",
"defaults"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/core.py#L447-L451 | train | 39,063 |
tgbugs/pyontutils | pyontutils/core.py | cull_prefixes | def cull_prefixes(graph, prefixes={k:v for k, v in uPREFIXES.items() if k != 'NIFTTL'},
cleanup=lambda ps, graph: None, keep=False):
""" Remove unused curie prefixes and normalize to a standard set. """
prefs = ['']
if keep:
prefixes.update({p:str(n) for p, n in graph.namespaces()})
if '' not in prefixes:
prefixes[''] = null_prefix # null prefix
pi = {v:k for k, v in prefixes.items()}
asdf = {} #{v:k for k, v in ps.items()}
asdf.update(pi)
# determine which prefixes we need
for uri in set((e for t in graph for e in t)):
if uri.endswith('.owl') or uri.endswith('.ttl') or uri.endswith('$$ID$$'):
continue # don't prefix imports or templates
for rn, rp in sorted(asdf.items(), key=lambda a: -len(a[0])): # make sure we get longest first
lrn = len(rn)
if type(uri) == rdflib.BNode:
continue
elif uri.startswith(rn) and '#' not in uri[lrn:] and '/' not in uri[lrn:]: # prevent prefixing when there is another sep
prefs.append(rp)
break
ps = {p:prefixes[p] for p in prefs}
cleanup(ps, graph)
ng = makeGraph('', prefixes=ps)
[ng.g.add(t) for t in graph]
return ng | python | def cull_prefixes(graph, prefixes={k:v for k, v in uPREFIXES.items() if k != 'NIFTTL'},
cleanup=lambda ps, graph: None, keep=False):
""" Remove unused curie prefixes and normalize to a standard set. """
prefs = ['']
if keep:
prefixes.update({p:str(n) for p, n in graph.namespaces()})
if '' not in prefixes:
prefixes[''] = null_prefix # null prefix
pi = {v:k for k, v in prefixes.items()}
asdf = {} #{v:k for k, v in ps.items()}
asdf.update(pi)
# determine which prefixes we need
for uri in set((e for t in graph for e in t)):
if uri.endswith('.owl') or uri.endswith('.ttl') or uri.endswith('$$ID$$'):
continue # don't prefix imports or templates
for rn, rp in sorted(asdf.items(), key=lambda a: -len(a[0])): # make sure we get longest first
lrn = len(rn)
if type(uri) == rdflib.BNode:
continue
elif uri.startswith(rn) and '#' not in uri[lrn:] and '/' not in uri[lrn:]: # prevent prefixing when there is another sep
prefs.append(rp)
break
ps = {p:prefixes[p] for p in prefs}
cleanup(ps, graph)
ng = makeGraph('', prefixes=ps)
[ng.g.add(t) for t in graph]
return ng | [
"def",
"cull_prefixes",
"(",
"graph",
",",
"prefixes",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"uPREFIXES",
".",
"items",
"(",
")",
"if",
"k",
"!=",
"'NIFTTL'",
"}",
",",
"cleanup",
"=",
"lambda",
"ps",
",",
"graph",
":",
"None",
","... | Remove unused curie prefixes and normalize to a standard set. | [
"Remove",
"unused",
"curie",
"prefixes",
"and",
"normalize",
"to",
"a",
"standard",
"set",
"."
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/core.py#L455-L486 | train | 39,064 |
tgbugs/pyontutils | pyontutils/core.py | displayTriples | def displayTriples(triples, qname=qname):
""" triples can also be an rdflib Graph instance """
[print(*(e[:5]
if isinstance(e, rdflib.BNode) else
qname(e)
for e in t), '.')
for t in sorted(triples)] | python | def displayTriples(triples, qname=qname):
""" triples can also be an rdflib Graph instance """
[print(*(e[:5]
if isinstance(e, rdflib.BNode) else
qname(e)
for e in t), '.')
for t in sorted(triples)] | [
"def",
"displayTriples",
"(",
"triples",
",",
"qname",
"=",
"qname",
")",
":",
"[",
"print",
"(",
"*",
"(",
"e",
"[",
":",
"5",
"]",
"if",
"isinstance",
"(",
"e",
",",
"rdflib",
".",
"BNode",
")",
"else",
"qname",
"(",
"e",
")",
"for",
"e",
"in... | triples can also be an rdflib Graph instance | [
"triples",
"can",
"also",
"be",
"an",
"rdflib",
"Graph",
"instance"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/core.py#L1171-L1177 | train | 39,065 |
tgbugs/pyontutils | pyontutils/core.py | makeGraph.write | def write(self, cull=False):
""" Serialize self.g and write to self.filename, set cull to true to remove unwanted prefixes """
if cull:
cull_prefixes(self).write()
else:
ser = self.g.serialize(format='nifttl')
with open(self.filename, 'wb') as f:
f.write(ser) | python | def write(self, cull=False):
""" Serialize self.g and write to self.filename, set cull to true to remove unwanted prefixes """
if cull:
cull_prefixes(self).write()
else:
ser = self.g.serialize(format='nifttl')
with open(self.filename, 'wb') as f:
f.write(ser) | [
"def",
"write",
"(",
"self",
",",
"cull",
"=",
"False",
")",
":",
"if",
"cull",
":",
"cull_prefixes",
"(",
"self",
")",
".",
"write",
"(",
")",
"else",
":",
"ser",
"=",
"self",
".",
"g",
".",
"serialize",
"(",
"format",
"=",
"'nifttl'",
")",
"wit... | Serialize self.g and write to self.filename, set cull to true to remove unwanted prefixes | [
"Serialize",
"self",
".",
"g",
"and",
"write",
"to",
"self",
".",
"filename",
"set",
"cull",
"to",
"true",
"to",
"remove",
"unwanted",
"prefixes"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/core.py#L177-L184 | train | 39,066 |
tgbugs/pyontutils | pyontutils/core.py | makeGraph.add_hierarchy | def add_hierarchy(self, parent, edge, child): # XXX DEPRECATED
""" Helper function to simplify the addition of part_of style
objectProperties to graphs. FIXME make a method of makeGraph?
"""
if type(parent) != rdflib.URIRef:
parent = self.check_thing(parent)
if type(edge) != rdflib.URIRef:
edge = self.check_thing(edge)
if type(child) != infixowl.Class:
if type(child) != rdflib.URIRef:
child = self.check_thing(child)
child = infixowl.Class(child, graph=self.g)
restriction = infixowl.Restriction(edge, graph=self.g, someValuesFrom=parent)
child.subClassOf = [restriction] + [c for c in child.subClassOf] | python | def add_hierarchy(self, parent, edge, child): # XXX DEPRECATED
""" Helper function to simplify the addition of part_of style
objectProperties to graphs. FIXME make a method of makeGraph?
"""
if type(parent) != rdflib.URIRef:
parent = self.check_thing(parent)
if type(edge) != rdflib.URIRef:
edge = self.check_thing(edge)
if type(child) != infixowl.Class:
if type(child) != rdflib.URIRef:
child = self.check_thing(child)
child = infixowl.Class(child, graph=self.g)
restriction = infixowl.Restriction(edge, graph=self.g, someValuesFrom=parent)
child.subClassOf = [restriction] + [c for c in child.subClassOf] | [
"def",
"add_hierarchy",
"(",
"self",
",",
"parent",
",",
"edge",
",",
"child",
")",
":",
"# XXX DEPRECATED",
"if",
"type",
"(",
"parent",
")",
"!=",
"rdflib",
".",
"URIRef",
":",
"parent",
"=",
"self",
".",
"check_thing",
"(",
"parent",
")",
"if",
"typ... | Helper function to simplify the addition of part_of style
objectProperties to graphs. FIXME make a method of makeGraph? | [
"Helper",
"function",
"to",
"simplify",
"the",
"addition",
"of",
"part_of",
"style",
"objectProperties",
"to",
"graphs",
".",
"FIXME",
"make",
"a",
"method",
"of",
"makeGraph?"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/core.py#L282-L298 | train | 39,067 |
tgbugs/pyontutils | pyontutils/core.py | makeGraph.add_restriction | def add_restriction(self, subject, predicate, object_):
""" Lift normal triples into restrictions using someValuesFrom. """
if type(object_) != rdflib.URIRef:
object_ = self.check_thing(object_)
if type(predicate) != rdflib.URIRef:
predicate = self.check_thing(predicate)
if type(subject) != infixowl.Class:
if type(subject) != rdflib.URIRef:
subject = self.check_thing(subject)
subject = infixowl.Class(subject, graph=self.g)
restriction = infixowl.Restriction(predicate, graph=self.g, someValuesFrom=object_)
subject.subClassOf = [restriction] + [c for c in subject.subClassOf] | python | def add_restriction(self, subject, predicate, object_):
""" Lift normal triples into restrictions using someValuesFrom. """
if type(object_) != rdflib.URIRef:
object_ = self.check_thing(object_)
if type(predicate) != rdflib.URIRef:
predicate = self.check_thing(predicate)
if type(subject) != infixowl.Class:
if type(subject) != rdflib.URIRef:
subject = self.check_thing(subject)
subject = infixowl.Class(subject, graph=self.g)
restriction = infixowl.Restriction(predicate, graph=self.g, someValuesFrom=object_)
subject.subClassOf = [restriction] + [c for c in subject.subClassOf] | [
"def",
"add_restriction",
"(",
"self",
",",
"subject",
",",
"predicate",
",",
"object_",
")",
":",
"if",
"type",
"(",
"object_",
")",
"!=",
"rdflib",
".",
"URIRef",
":",
"object_",
"=",
"self",
".",
"check_thing",
"(",
"object_",
")",
"if",
"type",
"("... | Lift normal triples into restrictions using someValuesFrom. | [
"Lift",
"normal",
"triples",
"into",
"restrictions",
"using",
"someValuesFrom",
"."
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/core.py#L300-L314 | train | 39,068 |
tgbugs/pyontutils | pyontutils/core.py | makeGraph.qname | def qname(self, uri, generate=False):
""" Given a uri return the qname if it exists, otherwise return the uri. """
try:
prefix, namespace, name = self.g.namespace_manager.compute_qname(uri, generate=generate)
qname = ':'.join((prefix, name))
return qname
except (KeyError, ValueError) as e:
return uri.toPython() if isinstance(uri, rdflib.URIRef) else uri | python | def qname(self, uri, generate=False):
""" Given a uri return the qname if it exists, otherwise return the uri. """
try:
prefix, namespace, name = self.g.namespace_manager.compute_qname(uri, generate=generate)
qname = ':'.join((prefix, name))
return qname
except (KeyError, ValueError) as e:
return uri.toPython() if isinstance(uri, rdflib.URIRef) else uri | [
"def",
"qname",
"(",
"self",
",",
"uri",
",",
"generate",
"=",
"False",
")",
":",
"try",
":",
"prefix",
",",
"namespace",
",",
"name",
"=",
"self",
".",
"g",
".",
"namespace_manager",
".",
"compute_qname",
"(",
"uri",
",",
"generate",
"=",
"generate",
... | Given a uri return the qname if it exists, otherwise return the uri. | [
"Given",
"a",
"uri",
"return",
"the",
"qname",
"if",
"it",
"exists",
"otherwise",
"return",
"the",
"uri",
"."
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/core.py#L353-L360 | train | 39,069 |
SUSE-Enceladus/ipa | ipa/scripts/cli_utils.py | archive_history_item | def archive_history_item(item, destination, no_color):
"""
Archive the log and results file for the given history item.
Copy the files and update the results file in destination directory.
"""
log_src, description = split_history_item(item.strip())
# Get relative path for log:
# {provider}/{image}/{instance}/{timestamp}.log
log_dest = os.path.sep.join(log_src.rsplit(os.path.sep, 4)[1:])
# Get results src and destination based on log paths.
results_src = log_src.rsplit('.', 1)[0] + '.results'
results_dest = log_dest.rsplit('.', 1)[0] + '.results'
destination_path = os.path.join(destination, log_dest)
log_dir = os.path.dirname(destination_path)
try:
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
# Copy results and log files to archive directory.
shutil.copyfile(log_src, destination_path)
shutil.copyfile(results_src, os.path.join(destination, results_dest))
except Exception as error:
echo_style(
'Unable to archive history item: %s' % error,
no_color,
fg='red'
)
sys.exit(1)
else:
# Only update the archive results log if no error occur.
update_history_log(
os.path.join(destination, '.history'),
description=description,
test_log=log_dest
) | python | def archive_history_item(item, destination, no_color):
"""
Archive the log and results file for the given history item.
Copy the files and update the results file in destination directory.
"""
log_src, description = split_history_item(item.strip())
# Get relative path for log:
# {provider}/{image}/{instance}/{timestamp}.log
log_dest = os.path.sep.join(log_src.rsplit(os.path.sep, 4)[1:])
# Get results src and destination based on log paths.
results_src = log_src.rsplit('.', 1)[0] + '.results'
results_dest = log_dest.rsplit('.', 1)[0] + '.results'
destination_path = os.path.join(destination, log_dest)
log_dir = os.path.dirname(destination_path)
try:
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
# Copy results and log files to archive directory.
shutil.copyfile(log_src, destination_path)
shutil.copyfile(results_src, os.path.join(destination, results_dest))
except Exception as error:
echo_style(
'Unable to archive history item: %s' % error,
no_color,
fg='red'
)
sys.exit(1)
else:
# Only update the archive results log if no error occur.
update_history_log(
os.path.join(destination, '.history'),
description=description,
test_log=log_dest
) | [
"def",
"archive_history_item",
"(",
"item",
",",
"destination",
",",
"no_color",
")",
":",
"log_src",
",",
"description",
"=",
"split_history_item",
"(",
"item",
".",
"strip",
"(",
")",
")",
"# Get relative path for log:",
"# {provider}/{image}/{instance}/{timestamp}.lo... | Archive the log and results file for the given history item.
Copy the files and update the results file in destination directory. | [
"Archive",
"the",
"log",
"and",
"results",
"file",
"for",
"the",
"given",
"history",
"item",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/scripts/cli_utils.py#L34-L73 | train | 39,070 |
SUSE-Enceladus/ipa | ipa/scripts/cli_utils.py | echo_verbose_results | def echo_verbose_results(data, no_color):
"""Print list of tests and result of each test."""
click.echo()
click.echo(
'\n'.join(
'{}: {}'.format(key, val) for key, val in data['info'].items()
)
)
click.echo()
for test in data['tests']:
if test['outcome'] == 'passed':
fg = 'green'
elif test['outcome'] == 'skipped':
fg = 'yellow'
else:
fg = 'red'
name = parse_test_name(test['name'])
echo_style(
'{} {}'.format(name, test['outcome'].upper()),
no_color,
fg=fg
) | python | def echo_verbose_results(data, no_color):
"""Print list of tests and result of each test."""
click.echo()
click.echo(
'\n'.join(
'{}: {}'.format(key, val) for key, val in data['info'].items()
)
)
click.echo()
for test in data['tests']:
if test['outcome'] == 'passed':
fg = 'green'
elif test['outcome'] == 'skipped':
fg = 'yellow'
else:
fg = 'red'
name = parse_test_name(test['name'])
echo_style(
'{} {}'.format(name, test['outcome'].upper()),
no_color,
fg=fg
) | [
"def",
"echo_verbose_results",
"(",
"data",
",",
"no_color",
")",
":",
"click",
".",
"echo",
"(",
")",
"click",
".",
"echo",
"(",
"'\\n'",
".",
"join",
"(",
"'{}: {}'",
".",
"format",
"(",
"key",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"da... | Print list of tests and result of each test. | [
"Print",
"list",
"of",
"tests",
"and",
"result",
"of",
"each",
"test",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/scripts/cli_utils.py#L152-L174 | train | 39,071 |
SUSE-Enceladus/ipa | ipa/scripts/cli_utils.py | get_log_file_from_item | def get_log_file_from_item(history):
"""
Return the log file based on provided history item.
Description is optional.
"""
try:
log_file, description = shlex.split(history)
except ValueError:
log_file = history.strip()
return log_file | python | def get_log_file_from_item(history):
"""
Return the log file based on provided history item.
Description is optional.
"""
try:
log_file, description = shlex.split(history)
except ValueError:
log_file = history.strip()
return log_file | [
"def",
"get_log_file_from_item",
"(",
"history",
")",
":",
"try",
":",
"log_file",
",",
"description",
"=",
"shlex",
".",
"split",
"(",
"history",
")",
"except",
"ValueError",
":",
"log_file",
"=",
"history",
".",
"strip",
"(",
")",
"return",
"log_file"
] | Return the log file based on provided history item.
Description is optional. | [
"Return",
"the",
"log",
"file",
"based",
"on",
"provided",
"history",
"item",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/scripts/cli_utils.py#L177-L188 | train | 39,072 |
SUSE-Enceladus/ipa | ipa/scripts/cli_utils.py | results_history | def results_history(history_log, no_color):
"""Display a list of ipa test results history."""
try:
with open(history_log, 'r') as f:
lines = f.readlines()
except Exception as error:
echo_style(
'Unable to process results history log: %s' % error,
no_color,
fg='red'
)
sys.exit(1)
index = len(lines)
for item in lines:
click.echo('{} {}'.format(index, item), nl=False)
index -= 1 | python | def results_history(history_log, no_color):
"""Display a list of ipa test results history."""
try:
with open(history_log, 'r') as f:
lines = f.readlines()
except Exception as error:
echo_style(
'Unable to process results history log: %s' % error,
no_color,
fg='red'
)
sys.exit(1)
index = len(lines)
for item in lines:
click.echo('{} {}'.format(index, item), nl=False)
index -= 1 | [
"def",
"results_history",
"(",
"history_log",
",",
"no_color",
")",
":",
"try",
":",
"with",
"open",
"(",
"history_log",
",",
"'r'",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"except",
"Exception",
"as",
"error",
":",
"echo_st... | Display a list of ipa test results history. | [
"Display",
"a",
"list",
"of",
"ipa",
"test",
"results",
"history",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/scripts/cli_utils.py#L191-L207 | train | 39,073 |
SUSE-Enceladus/ipa | ipa/scripts/cli_utils.py | split_history_item | def split_history_item(history):
"""
Return the log file and optional description for item.
"""
try:
log_file, description = shlex.split(history)
except ValueError:
log_file = history.strip()
description = None
return log_file, description | python | def split_history_item(history):
"""
Return the log file and optional description for item.
"""
try:
log_file, description = shlex.split(history)
except ValueError:
log_file = history.strip()
description = None
return log_file, description | [
"def",
"split_history_item",
"(",
"history",
")",
":",
"try",
":",
"log_file",
",",
"description",
"=",
"shlex",
".",
"split",
"(",
"history",
")",
"except",
"ValueError",
":",
"log_file",
"=",
"history",
".",
"strip",
"(",
")",
"description",
"=",
"None",... | Return the log file and optional description for item. | [
"Return",
"the",
"log",
"file",
"and",
"optional",
"description",
"for",
"item",
"."
] | 0845eed0ea25a27dbb059ad1016105fa60002228 | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/scripts/cli_utils.py#L210-L220 | train | 39,074 |
tgbugs/pyontutils | pyontutils/utils.py | get_working_dir | def get_working_dir(script__file__):
""" hardcoded sets the 'equivalent' working directory if not in git """
start = Path(script__file__).resolve()
_root = Path(start.root)
working_dir = start
while not list(working_dir.glob('.git')):
if working_dir == _root:
return
working_dir = working_dir.parent
return working_dir | python | def get_working_dir(script__file__):
""" hardcoded sets the 'equivalent' working directory if not in git """
start = Path(script__file__).resolve()
_root = Path(start.root)
working_dir = start
while not list(working_dir.glob('.git')):
if working_dir == _root:
return
working_dir = working_dir.parent
return working_dir | [
"def",
"get_working_dir",
"(",
"script__file__",
")",
":",
"start",
"=",
"Path",
"(",
"script__file__",
")",
".",
"resolve",
"(",
")",
"_root",
"=",
"Path",
"(",
"start",
".",
"root",
")",
"working_dir",
"=",
"start",
"while",
"not",
"list",
"(",
"workin... | hardcoded sets the 'equivalent' working directory if not in git | [
"hardcoded",
"sets",
"the",
"equivalent",
"working",
"directory",
"if",
"not",
"in",
"git"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/utils.py#L23-L34 | train | 39,075 |
tgbugs/pyontutils | pyontutils/utils.py | sysidpath | def sysidpath(ignore_options=False):
""" get a unique identifier for the machine running this function """
# in the event we have to make our own
# this should not be passed in a as a parameter
# since we need these definitions to be more or less static
failover = Path('/tmp/machine-id')
if not ignore_options:
options = (
Path('/etc/machine-id'),
failover, # always read to see if we somehow managed to persist this
)
for option in options:
if (option.exists() and
os.access(option, os.R_OK) and
option.stat().st_size > 0):
return option
uuid = uuid4()
with open(failover, 'wt') as f:
f.write(uuid.hex)
return failover | python | def sysidpath(ignore_options=False):
""" get a unique identifier for the machine running this function """
# in the event we have to make our own
# this should not be passed in a as a parameter
# since we need these definitions to be more or less static
failover = Path('/tmp/machine-id')
if not ignore_options:
options = (
Path('/etc/machine-id'),
failover, # always read to see if we somehow managed to persist this
)
for option in options:
if (option.exists() and
os.access(option, os.R_OK) and
option.stat().st_size > 0):
return option
uuid = uuid4()
with open(failover, 'wt') as f:
f.write(uuid.hex)
return failover | [
"def",
"sysidpath",
"(",
"ignore_options",
"=",
"False",
")",
":",
"# in the event we have to make our own",
"# this should not be passed in a as a parameter",
"# since we need these definitions to be more or less static",
"failover",
"=",
"Path",
"(",
"'/tmp/machine-id'",
")",
"if... | get a unique identifier for the machine running this function | [
"get",
"a",
"unique",
"identifier",
"for",
"the",
"machine",
"running",
"this",
"function"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/utils.py#L45-L67 | train | 39,076 |
tgbugs/pyontutils | pyontutils/scigraph_codegen.py | State.gencode | def gencode(self):
""" Run this to generate the code """
ledict = requests.get(self.api_url).json()
ledict = self.dotopdict(ledict)
out = self.dodict(ledict)
self._code = out | python | def gencode(self):
""" Run this to generate the code """
ledict = requests.get(self.api_url).json()
ledict = self.dotopdict(ledict)
out = self.dodict(ledict)
self._code = out | [
"def",
"gencode",
"(",
"self",
")",
":",
"ledict",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"api_url",
")",
".",
"json",
"(",
")",
"ledict",
"=",
"self",
".",
"dotopdict",
"(",
"ledict",
")",
"out",
"=",
"self",
".",
"dodict",
"(",
"ledict",
... | Run this to generate the code | [
"Run",
"this",
"to",
"generate",
"the",
"code"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/scigraph_codegen.py#L613-L618 | train | 39,077 |
tgbugs/pyontutils | pyontutils/scigraph_codegen.py | State2.dotopdict | def dotopdict(self, dict_):
""" Rewrite the 2.0 json to match what we feed the code for 1.2 """
mlookup = {'get':'GET', 'post':'POST'}
def rearrange(path, method_dict, method):
oid = method_dict['operationId']
self._paths[oid] = path
method_dict['nickname'] = oid
method_dict['method'] = mlookup[method]
paths = dict_['paths']
for path, path_dict in paths.items():
if self.path_prefix and self.path_prefix not in path:
continue
path_dict['operations'] = []
for method, method_dict in sorted(path_dict.items()):
if method == 'operations':
continue
rearrange(path, method_dict, method)
#print(self.operation(method_dict))
path_dict['operations'].append(method_dict)
path_dict['path'] = path
def setp(v, lenp=len(self.path_prefix)):
v['path'] = v['path'][lenp:]
return v
dict_['apis'] = []
for tag_dict in dict_['tags']:
path = '/' + tag_dict['name']
d = {'path':path,
'description':tag_dict['description'],
'class_json':{
'docstring':tag_dict['description'],
'resourcePath':path,
'apis':[setp(v) for k, v in paths.items()
if k.startswith(self.path_prefix + path)]},
}
dict_['apis'].append(d)
# make sure this is run first so we don't get key errors
self._swagger(dict_['swagger'])
self._info(dict_['info'])
self._definitions(dict_['definitions'])
return dict_ | python | def dotopdict(self, dict_):
""" Rewrite the 2.0 json to match what we feed the code for 1.2 """
mlookup = {'get':'GET', 'post':'POST'}
def rearrange(path, method_dict, method):
oid = method_dict['operationId']
self._paths[oid] = path
method_dict['nickname'] = oid
method_dict['method'] = mlookup[method]
paths = dict_['paths']
for path, path_dict in paths.items():
if self.path_prefix and self.path_prefix not in path:
continue
path_dict['operations'] = []
for method, method_dict in sorted(path_dict.items()):
if method == 'operations':
continue
rearrange(path, method_dict, method)
#print(self.operation(method_dict))
path_dict['operations'].append(method_dict)
path_dict['path'] = path
def setp(v, lenp=len(self.path_prefix)):
v['path'] = v['path'][lenp:]
return v
dict_['apis'] = []
for tag_dict in dict_['tags']:
path = '/' + tag_dict['name']
d = {'path':path,
'description':tag_dict['description'],
'class_json':{
'docstring':tag_dict['description'],
'resourcePath':path,
'apis':[setp(v) for k, v in paths.items()
if k.startswith(self.path_prefix + path)]},
}
dict_['apis'].append(d)
# make sure this is run first so we don't get key errors
self._swagger(dict_['swagger'])
self._info(dict_['info'])
self._definitions(dict_['definitions'])
return dict_ | [
"def",
"dotopdict",
"(",
"self",
",",
"dict_",
")",
":",
"mlookup",
"=",
"{",
"'get'",
":",
"'GET'",
",",
"'post'",
":",
"'POST'",
"}",
"def",
"rearrange",
"(",
"path",
",",
"method_dict",
",",
"method",
")",
":",
"oid",
"=",
"method_dict",
"[",
"'op... | Rewrite the 2.0 json to match what we feed the code for 1.2 | [
"Rewrite",
"the",
"2",
".",
"0",
"json",
"to",
"match",
"what",
"we",
"feed",
"the",
"code",
"for",
"1",
".",
"2"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/scigraph_codegen.py#L622-L666 | train | 39,078 |
tgbugs/pyontutils | pyontutils/obo_io.py | OboFile.write | def write(self, filename, type_='obo'): #FIXME this is bugged
""" Write file, will not overwrite files with the same name
outputs to obo by default but can also output to ttl if
passed type_='ttl' when called.
"""
if os.path.exists(filename):
name, ext = filename.rsplit('.',1)
try:
prefix, num = name.rsplit('_',1)
n = int(num)
n += 1
filename = prefix + '_' + str(n) + '.' + ext
except ValueError:
filename = name + '_1.' + ext
print('file exists, renaming to %s' % filename)
self.write(filename, type_)
else:
with open(filename, 'wt', encoding='utf-8') as f:
if type_ == 'obo':
f.write(str(self)) # FIXME this is incredibly slow for big files :/
elif type_ == 'ttl':
f.write(self.__ttl__())
else:
raise TypeError('No exporter for file type %s!' % type_) | python | def write(self, filename, type_='obo'): #FIXME this is bugged
""" Write file, will not overwrite files with the same name
outputs to obo by default but can also output to ttl if
passed type_='ttl' when called.
"""
if os.path.exists(filename):
name, ext = filename.rsplit('.',1)
try:
prefix, num = name.rsplit('_',1)
n = int(num)
n += 1
filename = prefix + '_' + str(n) + '.' + ext
except ValueError:
filename = name + '_1.' + ext
print('file exists, renaming to %s' % filename)
self.write(filename, type_)
else:
with open(filename, 'wt', encoding='utf-8') as f:
if type_ == 'obo':
f.write(str(self)) # FIXME this is incredibly slow for big files :/
elif type_ == 'ttl':
f.write(self.__ttl__())
else:
raise TypeError('No exporter for file type %s!' % type_) | [
"def",
"write",
"(",
"self",
",",
"filename",
",",
"type_",
"=",
"'obo'",
")",
":",
"#FIXME this is bugged",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"name",
",",
"ext",
"=",
"filename",
".",
"rsplit",
"(",
"'.'",
",",
"1",
... | Write file, will not overwrite files with the same name
outputs to obo by default but can also output to ttl if
passed type_='ttl' when called. | [
"Write",
"file",
"will",
"not",
"overwrite",
"files",
"with",
"the",
"same",
"name",
"outputs",
"to",
"obo",
"by",
"default",
"but",
"can",
"also",
"output",
"to",
"ttl",
"if",
"passed",
"type_",
"=",
"ttl",
"when",
"called",
"."
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/obo_io.py#L189-L213 | train | 39,079 |
tgbugs/pyontutils | librdflib/librdflib/__init__.py | main | def main():
from IPython import embed
""" Python 3.6.6
ibttl 2.605194091796875
ttl 3.8316309452056885
diff lt - ttl -1.2264368534088135
librdfxml 31.267616748809814
rdfxml 58.25124502182007
diff lr - rl -26.983628273010254
simple time 17.405116319656372
"""
""" Python 3.5.3 (pypy3)
libttl 2.387338638305664
ttl 1.3430471420288086
diff lt - ttl 1.0442914962768555
librdfxml 24.70371127128601
rdfxml 17.85916304588318
diff lr - rl 6.844548225402832
simple time 18.32300615310669
"""
# well I guess that answers that question ...
# librdf much faster for cpython, not for pypy3
from time import time
rdflib.plugin.register('librdfxml', rdflib.parser.Parser,
'librdflib', 'libRdfxmlParser')
rdflib.plugin.register('libttl', rdflib.parser.Parser,
'librdflib', 'libTurtleParser')
p1 = Path('~/git/NIF-Ontology/ttl/NIF-Molecule.ttl').expanduser()
start = time()
graph = rdflib.Graph().parse(p1.as_posix(), format='libttl')
stop = time()
lttime = stop - start
print('libttl', lttime)
#serialize(graph)
start = time()
graph = rdflib.Graph().parse(p1.as_posix(), format='turtle')
stop = time()
ttltime = stop - start
print('ttl', ttltime)
print('diff lt - ttl', lttime - ttltime)
p2 = Path('~/git/NIF-Ontology/ttl/external/uberon.owl').expanduser()
start = time()
graph2 = rdflib.Graph().parse(p2.as_posix(), format='librdfxml')
stop = time()
lrtime = stop - start
print('librdfxml', lrtime)
if True:
start = time()
graph2 = rdflib.Graph().parse(p2.as_posix(), format='xml')
stop = time()
rltime = stop - start
print('rdfxml', rltime)
print('diff lr - rl', lrtime - rltime)
if True:
file_uri = p2.as_uri()
parser = RDF.Parser(name='rdfxml')
stream = parser.parse_as_stream(file_uri)
start = time()
# t = list(stream)
t = tuple(statement_to_tuple(statement) for statement in stream)
stop = time()
stime = stop - start
print('simple time', stime)
embed() | python | def main():
from IPython import embed
""" Python 3.6.6
ibttl 2.605194091796875
ttl 3.8316309452056885
diff lt - ttl -1.2264368534088135
librdfxml 31.267616748809814
rdfxml 58.25124502182007
diff lr - rl -26.983628273010254
simple time 17.405116319656372
"""
""" Python 3.5.3 (pypy3)
libttl 2.387338638305664
ttl 1.3430471420288086
diff lt - ttl 1.0442914962768555
librdfxml 24.70371127128601
rdfxml 17.85916304588318
diff lr - rl 6.844548225402832
simple time 18.32300615310669
"""
# well I guess that answers that question ...
# librdf much faster for cpython, not for pypy3
from time import time
rdflib.plugin.register('librdfxml', rdflib.parser.Parser,
'librdflib', 'libRdfxmlParser')
rdflib.plugin.register('libttl', rdflib.parser.Parser,
'librdflib', 'libTurtleParser')
p1 = Path('~/git/NIF-Ontology/ttl/NIF-Molecule.ttl').expanduser()
start = time()
graph = rdflib.Graph().parse(p1.as_posix(), format='libttl')
stop = time()
lttime = stop - start
print('libttl', lttime)
#serialize(graph)
start = time()
graph = rdflib.Graph().parse(p1.as_posix(), format='turtle')
stop = time()
ttltime = stop - start
print('ttl', ttltime)
print('diff lt - ttl', lttime - ttltime)
p2 = Path('~/git/NIF-Ontology/ttl/external/uberon.owl').expanduser()
start = time()
graph2 = rdflib.Graph().parse(p2.as_posix(), format='librdfxml')
stop = time()
lrtime = stop - start
print('librdfxml', lrtime)
if True:
start = time()
graph2 = rdflib.Graph().parse(p2.as_posix(), format='xml')
stop = time()
rltime = stop - start
print('rdfxml', rltime)
print('diff lr - rl', lrtime - rltime)
if True:
file_uri = p2.as_uri()
parser = RDF.Parser(name='rdfxml')
stream = parser.parse_as_stream(file_uri)
start = time()
# t = list(stream)
t = tuple(statement_to_tuple(statement) for statement in stream)
stop = time()
stime = stop - start
print('simple time', stime)
embed() | [
"def",
"main",
"(",
")",
":",
"from",
"IPython",
"import",
"embed",
"\"\"\" Python 3.5.3 (pypy3)\n libttl 2.387338638305664\n ttl 1.3430471420288086\n diff lt - ttl 1.0442914962768555\n librdfxml 24.70371127128601\n rdfxml 17.85916304588318\n diff lr - rl 6.844548225402832\n s... | Python 3.6.6
ibttl 2.605194091796875
ttl 3.8316309452056885
diff lt - ttl -1.2264368534088135
librdfxml 31.267616748809814
rdfxml 58.25124502182007
diff lr - rl -26.983628273010254
simple time 17.405116319656372 | [
"Python",
"3",
".",
"6",
".",
"6",
"ibttl",
"2",
".",
"605194091796875",
"ttl",
"3",
".",
"8316309452056885",
"diff",
"lt",
"-",
"ttl",
"-",
"1",
".",
"2264368534088135",
"librdfxml",
"31",
".",
"267616748809814",
"rdfxml",
"58",
".",
"25124502182007",
"di... | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/librdflib/librdflib/__init__.py#L125-L196 | train | 39,080 |
tgbugs/pyontutils | pyontutils/combinators.py | make_predicate_object_combinator | def make_predicate_object_combinator(function, p, o):
""" Combinator to hold predicate object pairs until a subject is supplied and then
call a function that accepts a subject, predicate, and object.
Create a combinator to defer production of a triple until the missing pieces are supplied.
Note that the naming here tells you what is stored IN the combinator. The argument to the
combinator is the piece that is missing. """
def predicate_object_combinator(subject):
return function(subject, p, o)
return predicate_object_combinator | python | def make_predicate_object_combinator(function, p, o):
""" Combinator to hold predicate object pairs until a subject is supplied and then
call a function that accepts a subject, predicate, and object.
Create a combinator to defer production of a triple until the missing pieces are supplied.
Note that the naming here tells you what is stored IN the combinator. The argument to the
combinator is the piece that is missing. """
def predicate_object_combinator(subject):
return function(subject, p, o)
return predicate_object_combinator | [
"def",
"make_predicate_object_combinator",
"(",
"function",
",",
"p",
",",
"o",
")",
":",
"def",
"predicate_object_combinator",
"(",
"subject",
")",
":",
"return",
"function",
"(",
"subject",
",",
"p",
",",
"o",
")",
"return",
"predicate_object_combinator"
] | Combinator to hold predicate object pairs until a subject is supplied and then
call a function that accepts a subject, predicate, and object.
Create a combinator to defer production of a triple until the missing pieces are supplied.
Note that the naming here tells you what is stored IN the combinator. The argument to the
combinator is the piece that is missing. | [
"Combinator",
"to",
"hold",
"predicate",
"object",
"pairs",
"until",
"a",
"subject",
"is",
"supplied",
"and",
"then",
"call",
"a",
"function",
"that",
"accepts",
"a",
"subject",
"predicate",
"and",
"object",
"."
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/combinators.py#L17-L26 | train | 39,081 |
tgbugs/pyontutils | pyontutils/combinators.py | EquivalentClass.serialize | def serialize(self, subject, *objects_or_combinators):
""" object_combinators may also be URIRefs or Literals """
ec_s = rdflib.BNode()
if self.operator is not None:
if subject is not None:
yield subject, self.predicate, ec_s
yield from oc(ec_s)
yield from self._list.serialize(ec_s, self.operator, *objects_or_combinators)
else:
for thing in objects_or_combinators:
if isinstance(thing, Combinator):
object = rdflib.BNode()
#anything = list(thing(object))
#if anything:
#[print(_) for _ in anything]
hasType = False
for t in thing(object):
if t[1] == rdf.type:
hasType = True
yield t
if not hasType:
yield object, rdf.type, owl.Class
else:
object = thing
yield subject, self.predicate, object | python | def serialize(self, subject, *objects_or_combinators):
""" object_combinators may also be URIRefs or Literals """
ec_s = rdflib.BNode()
if self.operator is not None:
if subject is not None:
yield subject, self.predicate, ec_s
yield from oc(ec_s)
yield from self._list.serialize(ec_s, self.operator, *objects_or_combinators)
else:
for thing in objects_or_combinators:
if isinstance(thing, Combinator):
object = rdflib.BNode()
#anything = list(thing(object))
#if anything:
#[print(_) for _ in anything]
hasType = False
for t in thing(object):
if t[1] == rdf.type:
hasType = True
yield t
if not hasType:
yield object, rdf.type, owl.Class
else:
object = thing
yield subject, self.predicate, object | [
"def",
"serialize",
"(",
"self",
",",
"subject",
",",
"*",
"objects_or_combinators",
")",
":",
"ec_s",
"=",
"rdflib",
".",
"BNode",
"(",
")",
"if",
"self",
".",
"operator",
"is",
"not",
"None",
":",
"if",
"subject",
"is",
"not",
"None",
":",
"yield",
... | object_combinators may also be URIRefs or Literals | [
"object_combinators",
"may",
"also",
"be",
"URIRefs",
"or",
"Literals"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/combinators.py#L681-L707 | train | 39,082 |
shanbay/peeweext | peeweext/model.py | Model.update_with | def update_with(self, **query):
"""
secure update, mass assignment protected
"""
for k, v in self._filter_attrs(query).items():
setattr(self, k, v)
return self.save() | python | def update_with(self, **query):
"""
secure update, mass assignment protected
"""
for k, v in self._filter_attrs(query).items():
setattr(self, k, v)
return self.save() | [
"def",
"update_with",
"(",
"self",
",",
"*",
"*",
"query",
")",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_filter_attrs",
"(",
"query",
")",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"self",
",",
"k",
",",
"v",
")",
"return",
"self",
".... | secure update, mass assignment protected | [
"secure",
"update",
"mass",
"assignment",
"protected"
] | ff62a3d01e4584d50fde1944b9616c3b4236ecf0 | https://github.com/shanbay/peeweext/blob/ff62a3d01e4584d50fde1944b9616c3b4236ecf0/peeweext/model.py#L53-L59 | train | 39,083 |
shanbay/peeweext | peeweext/model.py | Model._validate | def _validate(self):
"""Validate model data and save errors
"""
errors = {}
for name, validator in self._validators.items():
value = getattr(self, name)
try:
validator(self, value)
except ValidationError as e:
errors[name] = str(e)
self._validate_errors = errors | python | def _validate(self):
"""Validate model data and save errors
"""
errors = {}
for name, validator in self._validators.items():
value = getattr(self, name)
try:
validator(self, value)
except ValidationError as e:
errors[name] = str(e)
self._validate_errors = errors | [
"def",
"_validate",
"(",
"self",
")",
":",
"errors",
"=",
"{",
"}",
"for",
"name",
",",
"validator",
"in",
"self",
".",
"_validators",
".",
"items",
"(",
")",
":",
"value",
"=",
"getattr",
"(",
"self",
",",
"name",
")",
"try",
":",
"validator",
"("... | Validate model data and save errors | [
"Validate",
"model",
"data",
"and",
"save",
"errors"
] | ff62a3d01e4584d50fde1944b9616c3b4236ecf0 | https://github.com/shanbay/peeweext/blob/ff62a3d01e4584d50fde1944b9616c3b4236ecf0/peeweext/model.py#L115-L128 | train | 39,084 |
tgbugs/pyontutils | ilxutils/ilxutils/nltklib.py | penn_to_wn | def penn_to_wn(tag):
""" Convert between a Penn Treebank tag to a simplified Wordnet tag """
if tag.startswith('N'):
return 'n'
if tag.startswith('V'):
return 'v'
if tag.startswith('J'):
return 'a'
if tag.startswith('R'):
return 'r'
return None | python | def penn_to_wn(tag):
""" Convert between a Penn Treebank tag to a simplified Wordnet tag """
if tag.startswith('N'):
return 'n'
if tag.startswith('V'):
return 'v'
if tag.startswith('J'):
return 'a'
if tag.startswith('R'):
return 'r'
return None | [
"def",
"penn_to_wn",
"(",
"tag",
")",
":",
"if",
"tag",
".",
"startswith",
"(",
"'N'",
")",
":",
"return",
"'n'",
"if",
"tag",
".",
"startswith",
"(",
"'V'",
")",
":",
"return",
"'v'",
"if",
"tag",
".",
"startswith",
"(",
"'J'",
")",
":",
"return",... | Convert between a Penn Treebank tag to a simplified Wordnet tag | [
"Convert",
"between",
"a",
"Penn",
"Treebank",
"tag",
"to",
"a",
"simplified",
"Wordnet",
"tag"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/nltklib.py#L9-L23 | train | 39,085 |
tgbugs/pyontutils | ilxutils/ilxutils/nltklib.py | sentence_similarity | def sentence_similarity(sentence1, sentence2):
""" compute the sentence similarity using Wordnet """
# Tokenize and tag
sentence1 = pos_tag(word_tokenize(sentence1))
sentence2 = pos_tag(word_tokenize(sentence2))
# Get the synsets for the tagged words
synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]
synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]
# Filter out the Nones
synsets1 = [ss for ss in synsets1 if ss]
synsets2 = [ss for ss in synsets2 if ss]
#print(synsets1)
#print(synsets2)
score, count = 0.0, 0.0
# For each word in the first sentence
for synset in synsets1:
# Get the similarity value of the most similar word in the other sentence
best_score=[synset.path_similarity(ss) for ss in synsets2 if synset.path_similarity(ss)]
# Check that the similarity could have been computed
if best_score:
score += max(best_score)
count += 1
# Average the values
if count > 0:
score /= count
else:
score = 0
return score | python | def sentence_similarity(sentence1, sentence2):
""" compute the sentence similarity using Wordnet """
# Tokenize and tag
sentence1 = pos_tag(word_tokenize(sentence1))
sentence2 = pos_tag(word_tokenize(sentence2))
# Get the synsets for the tagged words
synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]
synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]
# Filter out the Nones
synsets1 = [ss for ss in synsets1 if ss]
synsets2 = [ss for ss in synsets2 if ss]
#print(synsets1)
#print(synsets2)
score, count = 0.0, 0.0
# For each word in the first sentence
for synset in synsets1:
# Get the similarity value of the most similar word in the other sentence
best_score=[synset.path_similarity(ss) for ss in synsets2 if synset.path_similarity(ss)]
# Check that the similarity could have been computed
if best_score:
score += max(best_score)
count += 1
# Average the values
if count > 0:
score /= count
else:
score = 0
return score | [
"def",
"sentence_similarity",
"(",
"sentence1",
",",
"sentence2",
")",
":",
"# Tokenize and tag",
"sentence1",
"=",
"pos_tag",
"(",
"word_tokenize",
"(",
"sentence1",
")",
")",
"sentence2",
"=",
"pos_tag",
"(",
"word_tokenize",
"(",
"sentence2",
")",
")",
"# Get... | compute the sentence similarity using Wordnet | [
"compute",
"the",
"sentence",
"similarity",
"using",
"Wordnet"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/nltklib.py#L36-L70 | train | 39,086 |
tgbugs/pyontutils | ilxutils/ilxutils/ontopandas.py | command_line | def command_line():
''' If you want to use the command line '''
from docopt import docopt
doc = docopt( __doc__, version=VERSION )
args = pd.Series({k.replace('--', ''): v for k, v in doc.items()})
if args.all:
graph = Graph2Pandas(args.file, _type='all')
elif args.type:
graph = Graph2Pandas(args.file, _type=args.type)
else:
graph = Graph2Pandas(args.file)
graph.save(args.output) | python | def command_line():
''' If you want to use the command line '''
from docopt import docopt
doc = docopt( __doc__, version=VERSION )
args = pd.Series({k.replace('--', ''): v for k, v in doc.items()})
if args.all:
graph = Graph2Pandas(args.file, _type='all')
elif args.type:
graph = Graph2Pandas(args.file, _type=args.type)
else:
graph = Graph2Pandas(args.file)
graph.save(args.output) | [
"def",
"command_line",
"(",
")",
":",
"from",
"docopt",
"import",
"docopt",
"doc",
"=",
"docopt",
"(",
"__doc__",
",",
"version",
"=",
"VERSION",
")",
"args",
"=",
"pd",
".",
"Series",
"(",
"{",
"k",
".",
"replace",
"(",
"'--'",
",",
"''",
")",
":"... | If you want to use the command line | [
"If",
"you",
"want",
"to",
"use",
"the",
"command",
"line"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/ontopandas.py#L279-L290 | train | 39,087 |
tgbugs/pyontutils | ilxutils/ilxutils/ontopandas.py | OntoPandas.save | def save(self, foldername: str, path_to_folder: str=None) -> None:
''' Saves entities into multiple files within the same folder because of pickle-recursive
errors that would happen if squeezed into one '''
self.create_pickle((self.g.namespaces, ))
self.df.to_pickle(output) | python | def save(self, foldername: str, path_to_folder: str=None) -> None:
''' Saves entities into multiple files within the same folder because of pickle-recursive
errors that would happen if squeezed into one '''
self.create_pickle((self.g.namespaces, ))
self.df.to_pickle(output) | [
"def",
"save",
"(",
"self",
",",
"foldername",
":",
"str",
",",
"path_to_folder",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"create_pickle",
"(",
"(",
"self",
".",
"g",
".",
"namespaces",
",",
")",
")",
"self",
".",
"df",
".",
... | Saves entities into multiple files within the same folder because of pickle-recursive
errors that would happen if squeezed into one | [
"Saves",
"entities",
"into",
"multiple",
"files",
"within",
"the",
"same",
"folder",
"because",
"of",
"pickle",
"-",
"recursive",
"errors",
"that",
"would",
"happen",
"if",
"squeezed",
"into",
"one"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/ontopandas.py#L54-L58 | train | 39,088 |
tgbugs/pyontutils | ilxutils/ilxutils/ontopandas.py | OntoPandas.qname | def qname(self, uri: str) -> str:
''' Returns qname of uri in rdflib graph while also saving it '''
try:
prefix, namespace, name = self.g.compute_qname(uri)
qname = prefix + ':' + name
return qname
except:
try:
print('prefix:', prefix)
print('namespace:', namespace)
print('name:', name)
except:
print('Could not print from compute_qname')
exit('No qname for ' + uri) | python | def qname(self, uri: str) -> str:
''' Returns qname of uri in rdflib graph while also saving it '''
try:
prefix, namespace, name = self.g.compute_qname(uri)
qname = prefix + ':' + name
return qname
except:
try:
print('prefix:', prefix)
print('namespace:', namespace)
print('name:', name)
except:
print('Could not print from compute_qname')
exit('No qname for ' + uri) | [
"def",
"qname",
"(",
"self",
",",
"uri",
":",
"str",
")",
"->",
"str",
":",
"try",
":",
"prefix",
",",
"namespace",
",",
"name",
"=",
"self",
".",
"g",
".",
"compute_qname",
"(",
"uri",
")",
"qname",
"=",
"prefix",
"+",
"':'",
"+",
"name",
"retur... | Returns qname of uri in rdflib graph while also saving it | [
"Returns",
"qname",
"of",
"uri",
"in",
"rdflib",
"graph",
"while",
"also",
"saving",
"it"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/ontopandas.py#L60-L73 | train | 39,089 |
tgbugs/pyontutils | ilxutils/ilxutils/ontopandas.py | OntoPandas.get_sparql_dataframe | def get_sparql_dataframe( self ):
''' Iterates through the sparql table and condenses it into a Pandas DataFrame '''
self.result = self.g.query(self.query)
cols = set() # set(['qname'])
indx = set()
data = {}
curr_subj = None # place marker for first subj to be processed
bindings = []
for i, binding in enumerate(self.result.bindings):
subj_binding = binding[rdflib.term.Variable('subj')]
pred_binding = binding[rdflib.term.Variable('pred')]
obj_binding = binding[rdflib.term.Variable('obj')]
subj = subj_binding
pred = pred_binding # self.qname(pred_binding) if self.predicate_as_qname else pred_binding
obj = obj_binding
# stops at BNodes; could be exanded here
if isinstance(subj, BNode):
continue
elif isinstance(pred, BNode):
continue
elif isinstance(obj, BNode) and obj:
continue
# else:
# subj = str(subj)
# pred = str(pred)
# obj = str(obj)
cols.add(pred)
indx.add(subj)
bindings.append(binding)
bindings = sorted(bindings, key=lambda k: k[rdflib.term.Variable('subj')])
df = pd.DataFrame(columns=cols, index=indx)
for i, binding in enumerate(bindings):
subj_binding = binding[rdflib.term.Variable('subj')]
pred_binding = binding[rdflib.term.Variable('pred')]
obj_binding = binding[rdflib.term.Variable('obj')]
subj = subj_binding
pred = pred_binding # self.qname(pred_binding) if self.qname_predicates else pred_binding
obj = obj_binding
# stops at BNodes; could be exanded here
if isinstance(subj, BNode):
continue
elif isinstance(pred, BNode):
continue
elif isinstance(obj, BNode) and obj:
continue
# elif self.value_type:
# subj = str(subj)
# pred = str(pred)
# obj = str(obj)
if curr_subj == None:
curr_subj = subj
if not data.get(subj): # Prepare defaultdict home if it doesn't exist
data[subj] = defaultdict(list)
data[subj][pred].append(obj)
elif curr_subj != subj:
curr_subj = subj
for data_subj, data_pred_objs in data.items():
for data_pred, data_objs in data_pred_objs.items():
if len(data_objs) == 1: # clean lists of just 1 value
data_pred_objs[data_pred] = data_objs[0]
df.loc[data_subj] = pd.Series(data_pred_objs)
data = {}
if not data.get(subj): # Prepare defaultdict home if it doesn't exist
data[subj] = defaultdict(list)
data[subj][pred].append(obj)
else:
if not data.get(subj): # Prepare defaultdict home if it doesn't exist
data[subj] = defaultdict(list)
data[subj][pred].append(obj)
for data_subj, data_pred_objs in data.items():
for data_pred, data_objs in data_pred_objs.items():
if len(data_objs) == 1: # clean lists of just 1 value
data_pred_objs[data_pred] = data_objs[0]
df.loc[data_subj] = pd.Series(data_pred_objs)
df = df.where((pd.notnull(df)), None) # default Null is fricken Float NaN
return df | python | def get_sparql_dataframe( self ):
''' Iterates through the sparql table and condenses it into a Pandas DataFrame '''
self.result = self.g.query(self.query)
cols = set() # set(['qname'])
indx = set()
data = {}
curr_subj = None # place marker for first subj to be processed
bindings = []
for i, binding in enumerate(self.result.bindings):
subj_binding = binding[rdflib.term.Variable('subj')]
pred_binding = binding[rdflib.term.Variable('pred')]
obj_binding = binding[rdflib.term.Variable('obj')]
subj = subj_binding
pred = pred_binding # self.qname(pred_binding) if self.predicate_as_qname else pred_binding
obj = obj_binding
# stops at BNodes; could be exanded here
if isinstance(subj, BNode):
continue
elif isinstance(pred, BNode):
continue
elif isinstance(obj, BNode) and obj:
continue
# else:
# subj = str(subj)
# pred = str(pred)
# obj = str(obj)
cols.add(pred)
indx.add(subj)
bindings.append(binding)
bindings = sorted(bindings, key=lambda k: k[rdflib.term.Variable('subj')])
df = pd.DataFrame(columns=cols, index=indx)
for i, binding in enumerate(bindings):
subj_binding = binding[rdflib.term.Variable('subj')]
pred_binding = binding[rdflib.term.Variable('pred')]
obj_binding = binding[rdflib.term.Variable('obj')]
subj = subj_binding
pred = pred_binding # self.qname(pred_binding) if self.qname_predicates else pred_binding
obj = obj_binding
# stops at BNodes; could be exanded here
if isinstance(subj, BNode):
continue
elif isinstance(pred, BNode):
continue
elif isinstance(obj, BNode) and obj:
continue
# elif self.value_type:
# subj = str(subj)
# pred = str(pred)
# obj = str(obj)
if curr_subj == None:
curr_subj = subj
if not data.get(subj): # Prepare defaultdict home if it doesn't exist
data[subj] = defaultdict(list)
data[subj][pred].append(obj)
elif curr_subj != subj:
curr_subj = subj
for data_subj, data_pred_objs in data.items():
for data_pred, data_objs in data_pred_objs.items():
if len(data_objs) == 1: # clean lists of just 1 value
data_pred_objs[data_pred] = data_objs[0]
df.loc[data_subj] = pd.Series(data_pred_objs)
data = {}
if not data.get(subj): # Prepare defaultdict home if it doesn't exist
data[subj] = defaultdict(list)
data[subj][pred].append(obj)
else:
if not data.get(subj): # Prepare defaultdict home if it doesn't exist
data[subj] = defaultdict(list)
data[subj][pred].append(obj)
for data_subj, data_pred_objs in data.items():
for data_pred, data_objs in data_pred_objs.items():
if len(data_objs) == 1: # clean lists of just 1 value
data_pred_objs[data_pred] = data_objs[0]
df.loc[data_subj] = pd.Series(data_pred_objs)
df = df.where((pd.notnull(df)), None) # default Null is fricken Float NaN
return df | [
"def",
"get_sparql_dataframe",
"(",
"self",
")",
":",
"self",
".",
"result",
"=",
"self",
".",
"g",
".",
"query",
"(",
"self",
".",
"query",
")",
"cols",
"=",
"set",
"(",
")",
"# set(['qname'])",
"indx",
"=",
"set",
"(",
")",
"data",
"=",
"{",
"}",... | Iterates through the sparql table and condenses it into a Pandas DataFrame | [
"Iterates",
"through",
"the",
"sparql",
"table",
"and",
"condenses",
"it",
"into",
"a",
"Pandas",
"DataFrame"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/ontopandas.py#L171-L259 | train | 39,090 |
tgbugs/pyontutils | ilxutils/ilxutils/ontopandas.py | OntoPandas.df | def df(self, qname_predicates:bool=False, keep_variable_type:bool=True) -> pd.DataFrame:
''' Multi funcitonal DataFrame with settings '''
local_df = self.df.copy()
if qname_predicates:
for col in self.columns:
local_df.rename({col: self.g.qname(col)})
if not keep_variable_type:
pass
# convert all to strings, watch out for lists
return local_df | python | def df(self, qname_predicates:bool=False, keep_variable_type:bool=True) -> pd.DataFrame:
''' Multi funcitonal DataFrame with settings '''
local_df = self.df.copy()
if qname_predicates:
for col in self.columns:
local_df.rename({col: self.g.qname(col)})
if not keep_variable_type:
pass
# convert all to strings, watch out for lists
return local_df | [
"def",
"df",
"(",
"self",
",",
"qname_predicates",
":",
"bool",
"=",
"False",
",",
"keep_variable_type",
":",
"bool",
"=",
"True",
")",
"->",
"pd",
".",
"DataFrame",
":",
"local_df",
"=",
"self",
".",
"df",
".",
"copy",
"(",
")",
"if",
"qname_predicate... | Multi funcitonal DataFrame with settings | [
"Multi",
"funcitonal",
"DataFrame",
"with",
"settings"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/ontopandas.py#L261-L270 | train | 39,091 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_rdflib.py | SimpleGraph.qname | def qname(self, iri: str) -> str:
""" Get qualified name of uri in rdflib graph while also saving it
Args: iri: The iri that you want to replace the uri with a known prefix with
Returns: qualified name of the iri to be used as the new predicate
"""
prefix, namespace, name = self.g.compute_qname(uri)
qname = prefix + ':' + name
self.rqname[qname] = iri
return qname | python | def qname(self, iri: str) -> str:
""" Get qualified name of uri in rdflib graph while also saving it
Args: iri: The iri that you want to replace the uri with a known prefix with
Returns: qualified name of the iri to be used as the new predicate
"""
prefix, namespace, name = self.g.compute_qname(uri)
qname = prefix + ':' + name
self.rqname[qname] = iri
return qname | [
"def",
"qname",
"(",
"self",
",",
"iri",
":",
"str",
")",
"->",
"str",
":",
"prefix",
",",
"namespace",
",",
"name",
"=",
"self",
".",
"g",
".",
"compute_qname",
"(",
"uri",
")",
"qname",
"=",
"prefix",
"+",
"':'",
"+",
"name",
"self",
".",
"rqna... | Get qualified name of uri in rdflib graph while also saving it
Args: iri: The iri that you want to replace the uri with a known prefix with
Returns: qualified name of the iri to be used as the new predicate | [
"Get",
"qualified",
"name",
"of",
"uri",
"in",
"rdflib",
"graph",
"while",
"also",
"saving",
"it"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_rdflib.py#L61-L71 | train | 39,092 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_rdflib.py | SimpleGraph.find_prefix | def find_prefix(self, iri: Union[URIRef, Literal, str]) -> Union[None, str]:
""" Finds if uri is in common_namespaces
Auto adds prefix if incoming iri has a uri in common_namespaces. If its not in the local
library, then it will just be spit out as the iri and not saved/condensed into qualified
names.
The reason for the maxes is find the longest string match. This is to avoid accidently
matching iris with small uris when really is a more complete uri that is the match.
Args: iri: iri to be searched to find a known uri in it.
Eample:
In [1]: print(find_prefix("http://www.w3.org/2000/01/rdf-schema#label"))
Out [1]: "http://www.w3.org/2000/01/rdf-schema#"
In [2]: print(find_prefix("http://made_up_uri/label"))
Out [2]: None
"""
iri = str(iri)
max_iri_len = 0
max_prefix = None
for prefix, uri in common_namespaces.items():
if uri in iri and max_iri_len < len(uri): # if matched uri is larger; replace as king
max_prefix = prefix
max_iri_len = len(uri)
return max_prefix | python | def find_prefix(self, iri: Union[URIRef, Literal, str]) -> Union[None, str]:
""" Finds if uri is in common_namespaces
Auto adds prefix if incoming iri has a uri in common_namespaces. If its not in the local
library, then it will just be spit out as the iri and not saved/condensed into qualified
names.
The reason for the maxes is find the longest string match. This is to avoid accidently
matching iris with small uris when really is a more complete uri that is the match.
Args: iri: iri to be searched to find a known uri in it.
Eample:
In [1]: print(find_prefix("http://www.w3.org/2000/01/rdf-schema#label"))
Out [1]: "http://www.w3.org/2000/01/rdf-schema#"
In [2]: print(find_prefix("http://made_up_uri/label"))
Out [2]: None
"""
iri = str(iri)
max_iri_len = 0
max_prefix = None
for prefix, uri in common_namespaces.items():
if uri in iri and max_iri_len < len(uri): # if matched uri is larger; replace as king
max_prefix = prefix
max_iri_len = len(uri)
return max_prefix | [
"def",
"find_prefix",
"(",
"self",
",",
"iri",
":",
"Union",
"[",
"URIRef",
",",
"Literal",
",",
"str",
"]",
")",
"->",
"Union",
"[",
"None",
",",
"str",
"]",
":",
"iri",
"=",
"str",
"(",
"iri",
")",
"max_iri_len",
"=",
"0",
"max_prefix",
"=",
"N... | Finds if uri is in common_namespaces
Auto adds prefix if incoming iri has a uri in common_namespaces. If its not in the local
library, then it will just be spit out as the iri and not saved/condensed into qualified
names.
The reason for the maxes is find the longest string match. This is to avoid accidently
matching iris with small uris when really is a more complete uri that is the match.
Args: iri: iri to be searched to find a known uri in it.
Eample:
In [1]: print(find_prefix("http://www.w3.org/2000/01/rdf-schema#label"))
Out [1]: "http://www.w3.org/2000/01/rdf-schema#"
In [2]: print(find_prefix("http://made_up_uri/label"))
Out [2]: None | [
"Finds",
"if",
"uri",
"is",
"in",
"common_namespaces"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_rdflib.py#L102-L127 | train | 39,093 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_rdflib.py | SimpleGraph.add_annotation | def add_annotation(
self,
subj: URIRef,
pred: URIRef,
obj: Union[Literal, URIRef],
a_p: URIRef ,
a_o: Union[Literal, URIRef],
) -> BNode:
""" Adds annotation to rdflib graph.
The annotation axiom will filled in if this is a new annotation for the triple.
Args:
subj: Entity subject to be annotated
pref: Entities Predicate Anchor to be annotated
obj: Entities Object Anchor to be annotated
a_p: Annotation predicate
a_o: Annotation object
Returns:
A BNode which is an address to the location in the RDF graph that is storing the
annotation information.
"""
bnode: BNode = self.triple2annotation_bnode.get( (subj, pred, obj) )
if not bnode:
a_s: BNode = BNode()
self.triple2annotation_bnode[(subj, pred, obj)]: BNode = a_s
self.g.add((a_s, RDF.type, OWL.Axiom))
self.g.add((a_s, OWL.annotatedSource, self.process_subj_or_pred(subj)))
self.g.add((a_s, OWL.annotatedProperty,self.process_subj_or_pred(pred)))
self.g.add((a_s, OWL.annotatedTarget, self.process_obj(obj)))
else:
a_s: BNode = bnode
self.g.add((a_s, self.process_subj_or_pred(a_p), self.process_obj(a_o)))
return bnode | python | def add_annotation(
self,
subj: URIRef,
pred: URIRef,
obj: Union[Literal, URIRef],
a_p: URIRef ,
a_o: Union[Literal, URIRef],
) -> BNode:
""" Adds annotation to rdflib graph.
The annotation axiom will filled in if this is a new annotation for the triple.
Args:
subj: Entity subject to be annotated
pref: Entities Predicate Anchor to be annotated
obj: Entities Object Anchor to be annotated
a_p: Annotation predicate
a_o: Annotation object
Returns:
A BNode which is an address to the location in the RDF graph that is storing the
annotation information.
"""
bnode: BNode = self.triple2annotation_bnode.get( (subj, pred, obj) )
if not bnode:
a_s: BNode = BNode()
self.triple2annotation_bnode[(subj, pred, obj)]: BNode = a_s
self.g.add((a_s, RDF.type, OWL.Axiom))
self.g.add((a_s, OWL.annotatedSource, self.process_subj_or_pred(subj)))
self.g.add((a_s, OWL.annotatedProperty,self.process_subj_or_pred(pred)))
self.g.add((a_s, OWL.annotatedTarget, self.process_obj(obj)))
else:
a_s: BNode = bnode
self.g.add((a_s, self.process_subj_or_pred(a_p), self.process_obj(a_o)))
return bnode | [
"def",
"add_annotation",
"(",
"self",
",",
"subj",
":",
"URIRef",
",",
"pred",
":",
"URIRef",
",",
"obj",
":",
"Union",
"[",
"Literal",
",",
"URIRef",
"]",
",",
"a_p",
":",
"URIRef",
",",
"a_o",
":",
"Union",
"[",
"Literal",
",",
"URIRef",
"]",
","... | Adds annotation to rdflib graph.
The annotation axiom will filled in if this is a new annotation for the triple.
Args:
subj: Entity subject to be annotated
pref: Entities Predicate Anchor to be annotated
obj: Entities Object Anchor to be annotated
a_p: Annotation predicate
a_o: Annotation object
Returns:
A BNode which is an address to the location in the RDF graph that is storing the
annotation information. | [
"Adds",
"annotation",
"to",
"rdflib",
"graph",
"."
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_rdflib.py#L129-L163 | train | 39,094 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_rdflib.py | SimpleGraph.add_triple | def add_triple(
self,
subj: Union[URIRef, str],
pred: Union[URIRef, str],
obj: Union[URIRef, Literal, str]
) -> None:
""" Adds triple to rdflib Graph
Triple can be of any subject, predicate, and object of the entity without a need for order.
Args:
subj: Entity subject
pred: Entity predicate
obj: Entity object
Example:
In [1]: add_triple(
...: 'http://uri.interlex.org/base/ilx_0101431',
...: RDF.type,
...: 'http://www.w3.org/2002/07/owl#Class')
...: )
"""
if obj in [None, "", " "]: return # Empty objects are bad practice
_subj = self.process_subj_or_pred(subj)
_pred = self.process_subj_or_pred(pred)
_obj = self.process_obj(obj)
self.g.add( (_subj, _pred, _obj) ) | python | def add_triple(
self,
subj: Union[URIRef, str],
pred: Union[URIRef, str],
obj: Union[URIRef, Literal, str]
) -> None:
""" Adds triple to rdflib Graph
Triple can be of any subject, predicate, and object of the entity without a need for order.
Args:
subj: Entity subject
pred: Entity predicate
obj: Entity object
Example:
In [1]: add_triple(
...: 'http://uri.interlex.org/base/ilx_0101431',
...: RDF.type,
...: 'http://www.w3.org/2002/07/owl#Class')
...: )
"""
if obj in [None, "", " "]: return # Empty objects are bad practice
_subj = self.process_subj_or_pred(subj)
_pred = self.process_subj_or_pred(pred)
_obj = self.process_obj(obj)
self.g.add( (_subj, _pred, _obj) ) | [
"def",
"add_triple",
"(",
"self",
",",
"subj",
":",
"Union",
"[",
"URIRef",
",",
"str",
"]",
",",
"pred",
":",
"Union",
"[",
"URIRef",
",",
"str",
"]",
",",
"obj",
":",
"Union",
"[",
"URIRef",
",",
"Literal",
",",
"str",
"]",
")",
"->",
"None",
... | Adds triple to rdflib Graph
Triple can be of any subject, predicate, and object of the entity without a need for order.
Args:
subj: Entity subject
pred: Entity predicate
obj: Entity object
Example:
In [1]: add_triple(
...: 'http://uri.interlex.org/base/ilx_0101431',
...: RDF.type,
...: 'http://www.w3.org/2002/07/owl#Class')
...: ) | [
"Adds",
"triple",
"to",
"rdflib",
"Graph"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_rdflib.py#L165-L191 | train | 39,095 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_rdflib.py | SimpleGraph.process_prefix | def process_prefix(self, prefix: str) -> Union[Namespace, None]:
""" Add namespace to graph if it has a local match
This allows qnames to be used without adding their respected namespaces if they are in
the common_namespaces local dict. This is is to save a butt-ton of time trying to see what
the ontology has as far as uris go.
Args: prefix: prefix of the uri in the rdflib namespace to be checked if it exists in
the local dict of common_namespaces.
Returns: Namespace of uri if add or already exists; else None
"""
if self.namespaces.get(prefix):
return self.namespaces[prefix]
iri: str = common_namespaces.get(prefix)
if iri:
return self.add_namespace(prefix, iri) | python | def process_prefix(self, prefix: str) -> Union[Namespace, None]:
""" Add namespace to graph if it has a local match
This allows qnames to be used without adding their respected namespaces if they are in
the common_namespaces local dict. This is is to save a butt-ton of time trying to see what
the ontology has as far as uris go.
Args: prefix: prefix of the uri in the rdflib namespace to be checked if it exists in
the local dict of common_namespaces.
Returns: Namespace of uri if add or already exists; else None
"""
if self.namespaces.get(prefix):
return self.namespaces[prefix]
iri: str = common_namespaces.get(prefix)
if iri:
return self.add_namespace(prefix, iri) | [
"def",
"process_prefix",
"(",
"self",
",",
"prefix",
":",
"str",
")",
"->",
"Union",
"[",
"Namespace",
",",
"None",
"]",
":",
"if",
"self",
".",
"namespaces",
".",
"get",
"(",
"prefix",
")",
":",
"return",
"self",
".",
"namespaces",
"[",
"prefix",
"]... | Add namespace to graph if it has a local match
This allows qnames to be used without adding their respected namespaces if they are in
the common_namespaces local dict. This is is to save a butt-ton of time trying to see what
the ontology has as far as uris go.
Args: prefix: prefix of the uri in the rdflib namespace to be checked if it exists in
the local dict of common_namespaces.
Returns: Namespace of uri if add or already exists; else None | [
"Add",
"namespace",
"to",
"graph",
"if",
"it",
"has",
"a",
"local",
"match"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_rdflib.py#L193-L209 | train | 39,096 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_rdflib.py | SimpleGraph.process_subj_or_pred | def process_subj_or_pred(self, component: Union[URIRef, str]) -> URIRef:
""" Adds viable uri from iri or expands viable qname to iri to be triple ready
Need to have a viable qualified name (qname) in order to use a qname. You can make it
viable by either add the namespace beforehand with add_namespace(s) or if its already
in the local common_namespaces preloaded.
Args:
component: entity subject or predicate to be expanded or have its uri saved.
Returns:
rdflib URIRef ready subject or predicate to be put into a triple.
Raises:
SystemExit: When expecting a qname to be expanded, but is not valid or if
component is not a qualified name or a iri.
"""
if 'http' in component:
prefix = self.find_prefix(component) # Find uri in iri based on common_namespaces
if prefix: self.process_prefix(prefix) # if match, will add to Graph namespaces
return URIRef(component)
elif ':' in component:
presumed_prefix, info = component.split(':', 1)
namespace: Union[Namespace, None] = self.process_prefix(presumed_prefix)
if not namespace: exit(component + ': qname namespace does\'t exist yet.')
return namespace[info]
exit(component + ': is not a valid subject or predicate') | python | def process_subj_or_pred(self, component: Union[URIRef, str]) -> URIRef:
""" Adds viable uri from iri or expands viable qname to iri to be triple ready
Need to have a viable qualified name (qname) in order to use a qname. You can make it
viable by either add the namespace beforehand with add_namespace(s) or if its already
in the local common_namespaces preloaded.
Args:
component: entity subject or predicate to be expanded or have its uri saved.
Returns:
rdflib URIRef ready subject or predicate to be put into a triple.
Raises:
SystemExit: When expecting a qname to be expanded, but is not valid or if
component is not a qualified name or a iri.
"""
if 'http' in component:
prefix = self.find_prefix(component) # Find uri in iri based on common_namespaces
if prefix: self.process_prefix(prefix) # if match, will add to Graph namespaces
return URIRef(component)
elif ':' in component:
presumed_prefix, info = component.split(':', 1)
namespace: Union[Namespace, None] = self.process_prefix(presumed_prefix)
if not namespace: exit(component + ': qname namespace does\'t exist yet.')
return namespace[info]
exit(component + ': is not a valid subject or predicate') | [
"def",
"process_subj_or_pred",
"(",
"self",
",",
"component",
":",
"Union",
"[",
"URIRef",
",",
"str",
"]",
")",
"->",
"URIRef",
":",
"if",
"'http'",
"in",
"component",
":",
"prefix",
"=",
"self",
".",
"find_prefix",
"(",
"component",
")",
"# Find uri in i... | Adds viable uri from iri or expands viable qname to iri to be triple ready
Need to have a viable qualified name (qname) in order to use a qname. You can make it
viable by either add the namespace beforehand with add_namespace(s) or if its already
in the local common_namespaces preloaded.
Args:
component: entity subject or predicate to be expanded or have its uri saved.
Returns:
rdflib URIRef ready subject or predicate to be put into a triple.
Raises:
SystemExit: When expecting a qname to be expanded, but is not valid or if
component is not a qualified name or a iri. | [
"Adds",
"viable",
"uri",
"from",
"iri",
"or",
"expands",
"viable",
"qname",
"to",
"iri",
"to",
"be",
"triple",
"ready"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_rdflib.py#L211-L237 | train | 39,097 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_rdflib.py | SimpleGraph.process_obj | def process_obj(self, obj: Union[URIRef, Literal, str]) -> Union[URIRef, Literal]:
""" Gives component the proper node type
Args:
obj: Entity object to be converted to its appropriate node type
Returns:
URIRef or Literal type of the object provided.
Raises:
SystemExit: If object is a dict or list it becomes str with broken data. Needs to
come in one object at a time.
"""
if isinstance(obj, dict) or isinstance(obj, list):
exit(str(obj) + ': should be str or intended to be a URIRef or Literal.')
if isinstance(obj, Literal) or isinstance(obj, URIRef):
prefix = self.find_prefix(obj)
if prefix: self.process_prefix(prefix)
return obj
if len(obj) > 8:
if 'http' == obj[:4] and '://' in obj and ' ' not in obj:
prefix = self.find_prefix(obj)
if prefix: self.process_prefix(prefix)
return URIRef(obj)
if ':' in str(obj):
presumed_prefix, info = obj.split(':', 1)
namespace: Union[Namespace, None] = self.process_prefix(presumed_prefix)
if namespace: return namespace[info]
return Literal(obj) | python | def process_obj(self, obj: Union[URIRef, Literal, str]) -> Union[URIRef, Literal]:
""" Gives component the proper node type
Args:
obj: Entity object to be converted to its appropriate node type
Returns:
URIRef or Literal type of the object provided.
Raises:
SystemExit: If object is a dict or list it becomes str with broken data. Needs to
come in one object at a time.
"""
if isinstance(obj, dict) or isinstance(obj, list):
exit(str(obj) + ': should be str or intended to be a URIRef or Literal.')
if isinstance(obj, Literal) or isinstance(obj, URIRef):
prefix = self.find_prefix(obj)
if prefix: self.process_prefix(prefix)
return obj
if len(obj) > 8:
if 'http' == obj[:4] and '://' in obj and ' ' not in obj:
prefix = self.find_prefix(obj)
if prefix: self.process_prefix(prefix)
return URIRef(obj)
if ':' in str(obj):
presumed_prefix, info = obj.split(':', 1)
namespace: Union[Namespace, None] = self.process_prefix(presumed_prefix)
if namespace: return namespace[info]
return Literal(obj) | [
"def",
"process_obj",
"(",
"self",
",",
"obj",
":",
"Union",
"[",
"URIRef",
",",
"Literal",
",",
"str",
"]",
")",
"->",
"Union",
"[",
"URIRef",
",",
"Literal",
"]",
":",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
"or",
"isinstance",
"(",
"ob... | Gives component the proper node type
Args:
obj: Entity object to be converted to its appropriate node type
Returns:
URIRef or Literal type of the object provided.
Raises:
SystemExit: If object is a dict or list it becomes str with broken data. Needs to
come in one object at a time. | [
"Gives",
"component",
"the",
"proper",
"node",
"type"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_rdflib.py#L239-L271 | train | 39,098 |
tgbugs/pyontutils | ilxutils/ilxutils/simple_rdflib.py | SimpleGraph.remove_triple | def remove_triple(
self,
subj: URIRef,
pred: URIRef,
obj: Union[URIRef, Literal]
) -> None:
""" Removes triple from rdflib Graph
You must input the triple in its URIRef or Literal form for each node exactly the way it
was inputed or it will not delete the triple.
Args:
subj: Entity subject to be removed it its the only node with this subject; else this is
just going to delete a desciption I.E. predicate_object of this entity.
pred: Entity predicate to be removed
obj: Entity object to be removed
"""
self.g.remove( (subj, pred, obj) ) | python | def remove_triple(
self,
subj: URIRef,
pred: URIRef,
obj: Union[URIRef, Literal]
) -> None:
""" Removes triple from rdflib Graph
You must input the triple in its URIRef or Literal form for each node exactly the way it
was inputed or it will not delete the triple.
Args:
subj: Entity subject to be removed it its the only node with this subject; else this is
just going to delete a desciption I.E. predicate_object of this entity.
pred: Entity predicate to be removed
obj: Entity object to be removed
"""
self.g.remove( (subj, pred, obj) ) | [
"def",
"remove_triple",
"(",
"self",
",",
"subj",
":",
"URIRef",
",",
"pred",
":",
"URIRef",
",",
"obj",
":",
"Union",
"[",
"URIRef",
",",
"Literal",
"]",
")",
"->",
"None",
":",
"self",
".",
"g",
".",
"remove",
"(",
"(",
"subj",
",",
"pred",
","... | Removes triple from rdflib Graph
You must input the triple in its URIRef or Literal form for each node exactly the way it
was inputed or it will not delete the triple.
Args:
subj: Entity subject to be removed it its the only node with this subject; else this is
just going to delete a desciption I.E. predicate_object of this entity.
pred: Entity predicate to be removed
obj: Entity object to be removed | [
"Removes",
"triple",
"from",
"rdflib",
"Graph"
] | 3d913db29c177db39151592909a4f56170ef8b35 | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_rdflib.py#L291-L308 | train | 39,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.