repository_name stringlengths 5 67 | func_path_in_repository stringlengths 4 234 | func_name stringlengths 0 314 | whole_func_string stringlengths 52 3.87M | language stringclasses 6
values | func_code_string stringlengths 52 3.87M | func_code_tokens listlengths 15 672k | func_documentation_string stringlengths 1 47.2k | func_documentation_tokens listlengths 1 3.92k | split_name stringclasses 1
value | func_code_url stringlengths 85 339 |
|---|---|---|---|---|---|---|---|---|---|---|
biocore/burrito-fillings | bfillings/usearch.py | get_retained_chimeras | def get_retained_chimeras(output_fp_de_novo_nonchimeras,
output_fp_ref_nonchimeras,
output_combined_fp,
chimeras_retention='union'):
""" Gets union or intersection of two supplied fasta files
output_fp_de_novo_nonchimeras: filepath of nonchimeras from de novo
usearch detection.
output_fp_ref_nonchimeras: filepath of nonchimeras from reference based
usearch detection.
output_combined_fp: filepath to write retained sequences to.
chimeras_retention: accepts either 'intersection' or 'union'. Will test
for chimeras against the full input error clustered sequence set, and
retain sequences flagged as non-chimeras by either (union) or
only those flagged as non-chimeras by both (intersection)."""
de_novo_non_chimeras = []
reference_non_chimeras = []
de_novo_nonchimeras_f = open(output_fp_de_novo_nonchimeras, "U")
reference_nonchimeras_f = open(output_fp_ref_nonchimeras, "U")
output_combined_f = open(output_combined_fp, "w")
for label, seq in parse_fasta(de_novo_nonchimeras_f):
de_novo_non_chimeras.append(label)
de_novo_nonchimeras_f.close()
for label, seq in parse_fasta(reference_nonchimeras_f):
reference_non_chimeras.append(label)
reference_nonchimeras_f.close()
de_novo_non_chimeras = set(de_novo_non_chimeras)
reference_non_chimeras = set(reference_non_chimeras)
if chimeras_retention == 'union':
all_non_chimeras = de_novo_non_chimeras.union(reference_non_chimeras)
elif chimeras_retention == 'intersection':
all_non_chimeras =\
de_novo_non_chimeras.intersection(reference_non_chimeras)
de_novo_nonchimeras_f = open(output_fp_de_novo_nonchimeras, "U")
reference_nonchimeras_f = open(output_fp_ref_nonchimeras, "U")
# Save a list of already-written labels
labels_written = []
for label, seq in parse_fasta(de_novo_nonchimeras_f):
if label in all_non_chimeras:
if label not in labels_written:
output_combined_f.write('>%s\n%s\n' % (label, seq))
labels_written.append(label)
de_novo_nonchimeras_f.close()
for label, seq in parse_fasta(reference_nonchimeras_f):
if label in all_non_chimeras:
if label not in labels_written:
output_combined_f.write('>%s\n%s\n' % (label, seq))
labels_written.append(label)
reference_nonchimeras_f.close()
output_combined_f.close()
return output_combined_fp | python | def get_retained_chimeras(output_fp_de_novo_nonchimeras,
output_fp_ref_nonchimeras,
output_combined_fp,
chimeras_retention='union'):
""" Gets union or intersection of two supplied fasta files
output_fp_de_novo_nonchimeras: filepath of nonchimeras from de novo
usearch detection.
output_fp_ref_nonchimeras: filepath of nonchimeras from reference based
usearch detection.
output_combined_fp: filepath to write retained sequences to.
chimeras_retention: accepts either 'intersection' or 'union'. Will test
for chimeras against the full input error clustered sequence set, and
retain sequences flagged as non-chimeras by either (union) or
only those flagged as non-chimeras by both (intersection)."""
de_novo_non_chimeras = []
reference_non_chimeras = []
de_novo_nonchimeras_f = open(output_fp_de_novo_nonchimeras, "U")
reference_nonchimeras_f = open(output_fp_ref_nonchimeras, "U")
output_combined_f = open(output_combined_fp, "w")
for label, seq in parse_fasta(de_novo_nonchimeras_f):
de_novo_non_chimeras.append(label)
de_novo_nonchimeras_f.close()
for label, seq in parse_fasta(reference_nonchimeras_f):
reference_non_chimeras.append(label)
reference_nonchimeras_f.close()
de_novo_non_chimeras = set(de_novo_non_chimeras)
reference_non_chimeras = set(reference_non_chimeras)
if chimeras_retention == 'union':
all_non_chimeras = de_novo_non_chimeras.union(reference_non_chimeras)
elif chimeras_retention == 'intersection':
all_non_chimeras =\
de_novo_non_chimeras.intersection(reference_non_chimeras)
de_novo_nonchimeras_f = open(output_fp_de_novo_nonchimeras, "U")
reference_nonchimeras_f = open(output_fp_ref_nonchimeras, "U")
# Save a list of already-written labels
labels_written = []
for label, seq in parse_fasta(de_novo_nonchimeras_f):
if label in all_non_chimeras:
if label not in labels_written:
output_combined_f.write('>%s\n%s\n' % (label, seq))
labels_written.append(label)
de_novo_nonchimeras_f.close()
for label, seq in parse_fasta(reference_nonchimeras_f):
if label in all_non_chimeras:
if label not in labels_written:
output_combined_f.write('>%s\n%s\n' % (label, seq))
labels_written.append(label)
reference_nonchimeras_f.close()
output_combined_f.close()
return output_combined_fp | [
"def",
"get_retained_chimeras",
"(",
"output_fp_de_novo_nonchimeras",
",",
"output_fp_ref_nonchimeras",
",",
"output_combined_fp",
",",
"chimeras_retention",
"=",
"'union'",
")",
":",
"de_novo_non_chimeras",
"=",
"[",
"]",
"reference_non_chimeras",
"=",
"[",
"]",
"de_novo... | Gets union or intersection of two supplied fasta files
output_fp_de_novo_nonchimeras: filepath of nonchimeras from de novo
usearch detection.
output_fp_ref_nonchimeras: filepath of nonchimeras from reference based
usearch detection.
output_combined_fp: filepath to write retained sequences to.
chimeras_retention: accepts either 'intersection' or 'union'. Will test
for chimeras against the full input error clustered sequence set, and
retain sequences flagged as non-chimeras by either (union) or
only those flagged as non-chimeras by both (intersection). | [
"Gets",
"union",
"or",
"intersection",
"of",
"two",
"supplied",
"fasta",
"files"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L1039-L1100 |
biocore/burrito-fillings | bfillings/usearch.py | assign_reads_to_otus | def assign_reads_to_otus(original_fasta,
filtered_fasta,
output_filepath=None,
log_name="assign_reads_to_otus.log",
perc_id_blast=0.97,
global_alignment=True,
HALT_EXEC=False,
save_intermediate_files=False,
remove_usearch_logs=False,
working_dir=None):
""" Uses original fasta file, blasts to assign reads to filtered fasta
original_fasta = filepath to original query fasta
filtered_fasta = filepath to enumerated, filtered fasta
output_filepath = output path to clusters (uc) file
log_name = string specifying output log name
perc_id_blast = percent ID for blasting original seqs against filtered set
usersort = Enable if input fasta not sorted by length purposefully, lest
usearch will raise an error. In post chimera checked sequences, the seqs
are sorted by abundance, so this should be set to True.
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created.
"""
# Not sure if I feel confortable using blast as a way to recapitulate
# original read ids....
if not output_filepath:
_, output_filepath = mkstemp(prefix='assign_reads_to_otus',
suffix='.uc')
log_filepath = join(working_dir, log_name)
params = {'--id': perc_id_blast,
'--global': global_alignment}
app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
data = {'--query': original_fasta,
'--db': filtered_fasta,
'--uc': output_filepath
}
if not remove_usearch_logs:
data['--log'] = log_filepath
app_result = app(data)
return app_result, output_filepath | python | def assign_reads_to_otus(original_fasta,
filtered_fasta,
output_filepath=None,
log_name="assign_reads_to_otus.log",
perc_id_blast=0.97,
global_alignment=True,
HALT_EXEC=False,
save_intermediate_files=False,
remove_usearch_logs=False,
working_dir=None):
""" Uses original fasta file, blasts to assign reads to filtered fasta
original_fasta = filepath to original query fasta
filtered_fasta = filepath to enumerated, filtered fasta
output_filepath = output path to clusters (uc) file
log_name = string specifying output log name
perc_id_blast = percent ID for blasting original seqs against filtered set
usersort = Enable if input fasta not sorted by length purposefully, lest
usearch will raise an error. In post chimera checked sequences, the seqs
are sorted by abundance, so this should be set to True.
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created.
"""
# Not sure if I feel confortable using blast as a way to recapitulate
# original read ids....
if not output_filepath:
_, output_filepath = mkstemp(prefix='assign_reads_to_otus',
suffix='.uc')
log_filepath = join(working_dir, log_name)
params = {'--id': perc_id_blast,
'--global': global_alignment}
app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
data = {'--query': original_fasta,
'--db': filtered_fasta,
'--uc': output_filepath
}
if not remove_usearch_logs:
data['--log'] = log_filepath
app_result = app(data)
return app_result, output_filepath | [
"def",
"assign_reads_to_otus",
"(",
"original_fasta",
",",
"filtered_fasta",
",",
"output_filepath",
"=",
"None",
",",
"log_name",
"=",
"\"assign_reads_to_otus.log\"",
",",
"perc_id_blast",
"=",
"0.97",
",",
"global_alignment",
"=",
"True",
",",
"HALT_EXEC",
"=",
"F... | Uses original fasta file, blasts to assign reads to filtered fasta
original_fasta = filepath to original query fasta
filtered_fasta = filepath to enumerated, filtered fasta
output_filepath = output path to clusters (uc) file
log_name = string specifying output log name
perc_id_blast = percent ID for blasting original seqs against filtered set
usersort = Enable if input fasta not sorted by length purposefully, lest
usearch will raise an error. In post chimera checked sequences, the seqs
are sorted by abundance, so this should be set to True.
HALT_EXEC: Used for debugging app controller
save_intermediate_files: Preserve all intermediate files created. | [
"Uses",
"original",
"fasta",
"file",
"blasts",
"to",
"assign",
"reads",
"to",
"filtered",
"fasta"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L1103-L1150 |
biocore/burrito-fillings | bfillings/usearch.py | usearch_qf | def usearch_qf(
fasta_filepath,
refseqs_fp=None,
output_dir=None,
percent_id=0.97,
percent_id_err=0.97,
minsize=4,
abundance_skew=2.0,
db_filepath=None,
rev=False,
label_prefix="",
label_suffix="",
retain_label_as_comment=False,
count_start=0,
perc_id_blast=0.97,
save_intermediate_files=False,
HALT_EXEC=False,
global_alignment=True,
sizein=True,
sizeout=True,
w=64,
slots=16769023,
maxrejects=64,
minlen=64,
de_novo_chimera_detection=True,
derep_fullseq=False,
reference_chimera_detection=True,
cluster_size_filtering=True,
remove_usearch_logs=False,
usersort=True,
suppress_new_clusters=False,
chimeras_retention="union",
verbose=False
):
""" Main convenience wrapper for using usearch to filter/cluster seqs
The complete 'usearch_qf' process is a multistep process with many calls
to usearch with various parameters. It is likely to change from the
original implementation. A lot.
fasta_filepath = fasta filepath to filtering/clustering (e.g., output
seqs.fna file from split_libraries.py)
refseqs_fp = fasta filepath for ref-based otu picking.
output_dir = directory to store the otu mapping file, as well logs and
the intermediate files created if save_intermediate_files is True.
percent_ID = percent ID for clustering sequences.
percent_ID_err = percent ID for filtering out chimeras
minsize = Minimum size of cluster for retention after chimera removal.
abundance_skew = threshold setting for chimera removal with de novo
chimera detection.
db_filepath = filepath of reference fasta sequence set for ref based
chimera detection.
rev = search plus and minus strands of sequences, used in ref based chimera
detection.
label_prefix = optional prefix added to filtered fasta file.
label_suffix = optional suffix added to filtered fasta file.
retain_label_as_comment = option to add usearch generated label to
enumerated fasta labels.
count_start = integer to begin counting at for sequence enumeration.
perc_id_blast = percent identity setting for using blast algorithm to
assign original sequence labels to filtered fasta.
global_alignment = Setting for assignment of original seq labels to filtered
seqs.
sizein = not defined in usearch helpstring
sizeout = not defined in usearch helpstring
w = Word length for U-sorting
slots = Size of compressed index table. Should be prime, e.g. 40000003.
Should also specify --w, typical is --w 16 or --w 32.
maxrejects = Max rejected targets, 0=ignore, default 32.
save_intermediate_files = retain all the intermediate files created during
this process.
minlen = (not specified in usearch helpstring), but seems like a good bet
that this refers to the minimum length of the sequences for dereplication.
HALT_EXEC = used to debug app controller problems.
de_novo_chimera_detection = If True, will detect chimeras de novo
reference_chimera_detection = If True, will detect chimeras ref based
cluster_size_filtering = If True, will filter OTUs according to seq counts.
remove_usearch_logs = If True, will not call the --log function for each
usearch call.
usersort = Used for specifying custom sorting (i.e., non-length based
sorting) with usearch/uclust.
suppress_new_clusters = with reference based OTU picking, if enabled,
will prevent new clusters that do not match the reference from being
clustered.
chimeras_retention = accepts either 'intersection' or 'union'. Will test
for chimeras against the full input error clustered sequence set, and
retain sequences flagged as non-chimeras by either (union) or
only those flagged as non-chimeras by both (intersection).
"""
# Save a list of intermediate filepaths in case they are to be removed.
intermediate_files = []
# Need absolute paths to avoid problems with app controller
if output_dir:
output_dir = abspath(output_dir) + '/'
fasta_filepath = abspath(fasta_filepath)
try:
if verbose:
print "Sorting sequences by length..."
# Sort seqs by length
app_result, output_filepath_len_sorted =\
usearch_fasta_sort_from_filepath(fasta_filepath, output_filepath=
join(
output_dir,
'len_sorted.fasta'),
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs,
working_dir=output_dir, HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_filepath_len_sorted)
if verbose:
print "Dereplicating sequences..."
# Dereplicate sequences
app_result, output_filepath_dereplicated =\
usearch_dereplicate_exact_subseqs(output_filepath_len_sorted,
output_filepath=join(
output_dir,
'dereplicated_seqs.fasta'),
minlen=minlen, w=w, slots=slots, sizeout=sizeout,
maxrejects=maxrejects, save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs,
working_dir=output_dir, HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_filepath_dereplicated)
if verbose:
print "Sorting by abundance..."
# Sort by abundance, initially no filter based on seqs/otu
app_result, output_fp =\
usearch_sort_by_abundance(output_filepath_dereplicated,
output_filepath=join(
output_dir,
'abundance_sorted.fasta'),
usersort=True, sizein=sizein, sizeout=sizeout, minsize=0,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_fp)
if verbose:
print "Clustering sequences for error correction..."
# Create .uc file of clusters file, to identify original sequences
# later
output_uc_filepath = output_dir + 'err_corrected_clusters.uc'
app_result, error_clustered_output_fp =\
usearch_cluster_error_correction(output_fp,
output_filepath=join(output_dir,
'clustered_error_corrected.fasta'),
output_uc_filepath=output_uc_filepath,
usersort=True, percent_id_err=percent_id_err, sizein=sizein,
sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects,
remove_usearch_logs=remove_usearch_logs,
save_intermediate_files=save_intermediate_files,
working_dir=output_dir, HALT_EXEC=HALT_EXEC)
intermediate_files.append(error_clustered_output_fp)
intermediate_files.append(output_uc_filepath)
# Series of conditional tests, using generic 'output_fp' name so the
# conditional filtering, if any/all are selected, do not matter.
if de_novo_chimera_detection:
if verbose:
print "Performing de novo chimera detection..."
app_result, output_fp_de_novo_nonchimeras =\
usearch_chimera_filter_de_novo(error_clustered_output_fp,
abundance_skew=abundance_skew, output_chimera_filepath=
join(
output_dir,
'de_novo_chimeras.fasta'),
output_non_chimera_filepath=join(
output_dir,
'de_novo_non_chimeras.fasta'), usersort=True,
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_fp_de_novo_nonchimeras)
output_fp = output_fp_de_novo_nonchimeras
if reference_chimera_detection:
if verbose:
print "Performing reference based chimera detection..."
app_result, output_fp_ref_nonchimeras =\
usearch_chimera_filter_ref_based(error_clustered_output_fp,
db_filepath=db_filepath, output_chimera_filepath=
join(
output_dir,
'reference_chimeras.fasta'),
output_non_chimera_filepath=
join(output_dir, 'reference_non_chimeras.fasta'), usersort=True,
save_intermediate_files=save_intermediate_files, rev=rev,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_fp_ref_nonchimeras)
output_fp = output_fp_ref_nonchimeras
# get intersection or union if both ref and de novo chimera detection
if de_novo_chimera_detection and reference_chimera_detection:
if verbose:
print "Finding %s of non-chimeras..." % chimeras_retention
output_fp = get_retained_chimeras(
output_fp_de_novo_nonchimeras, output_fp_ref_nonchimeras,
output_combined_fp=
join(output_dir, 'combined_non_chimeras.fasta'),
chimeras_retention=chimeras_retention)
intermediate_files.append(output_fp)
if cluster_size_filtering:
# Test for empty filepath following filters, raise error if all seqs
# have been removed
if verbose:
print "Filtering by cluster size..."
# chimera detection was not performed, use output file of step 4 as input
# to filtering by cluster size
if not (reference_chimera_detection and de_novo_chimera_detection):
output_fp = error_clustered_output_fp
app_result, output_fp =\
usearch_sort_by_abundance(output_fp, output_filepath=
join(output_dir, 'abundance_sorted_minsize_' + str(minsize) +
'.fasta'),
minsize=minsize, sizein=sizein, sizeout=sizeout,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_fp)
# cluster seqs
# Should we add in option to use alternative OTU picking here?
# Seems like it will be a bit of a mess...maybe after we determine
# if usearch_qf should become standard.
if refseqs_fp:
if verbose:
print "Clustering against reference sequences..."
app_result, output_filepath =\
usearch_cluster_seqs_ref(output_fp, output_filepath=
join(
output_dir,
'ref_clustered_seqs.uc'),
percent_id=percent_id, sizein=sizein,
sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects,
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs,
suppress_new_clusters=suppress_new_clusters, refseqs_fp=refseqs_fp,
output_dir=output_dir, working_dir=output_dir, rev=rev,
HALT_EXEC=HALT_EXEC
)
else:
if verbose:
print "De novo clustering sequences..."
app_result, output_filepath =\
usearch_cluster_seqs(output_fp, output_filepath=
join(output_dir, 'clustered_seqs.fasta'),
percent_id=percent_id, sizein=sizein,
sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects,
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_filepath)
# Enumerate the OTUs in the clusters
if not suppress_new_clusters:
if verbose:
print "Enumerating OTUs..."
output_filepath =\
enumerate_otus(output_filepath, output_filepath=
join(output_dir, 'enumerated_otus.fasta'),
label_prefix=label_prefix,
label_suffix=label_suffix, count_start=count_start,
retain_label_as_comment=retain_label_as_comment)
intermediate_files.append(output_filepath)
# Get original sequence label identities
if verbose:
print "Assigning sequences to clusters..."
app_result, clusters_file = assign_reads_to_otus(fasta_filepath,
filtered_fasta=output_filepath, output_filepath=join(
output_dir,
'assign_reads_to_otus.uc'), perc_id_blast=percent_id,
global_alignment=global_alignment,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(clusters_file)
except ApplicationError:
raise ApplicationError('Error running usearch. Possible causes are '
'unsupported version (current supported version is usearch ' +
'v5.2.236) is installed or improperly formatted input file was ' +
'provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('usearch not found, is it properly ' +
'installed?')
# Get dict of clusters, list of failures
# Set OTU ID field to 9 for the case of closed reference OTU picking
if suppress_new_clusters:
otu_id_field = 9
else:
otu_id_field = 1
clusters, failures = clusters_from_blast_uc_file(open(clusters_file, "U"),
otu_id_field)
# Remove temp files unless user specifies output filepath
if not save_intermediate_files:
remove_files(intermediate_files)
return clusters, failures | python | def usearch_qf(
fasta_filepath,
refseqs_fp=None,
output_dir=None,
percent_id=0.97,
percent_id_err=0.97,
minsize=4,
abundance_skew=2.0,
db_filepath=None,
rev=False,
label_prefix="",
label_suffix="",
retain_label_as_comment=False,
count_start=0,
perc_id_blast=0.97,
save_intermediate_files=False,
HALT_EXEC=False,
global_alignment=True,
sizein=True,
sizeout=True,
w=64,
slots=16769023,
maxrejects=64,
minlen=64,
de_novo_chimera_detection=True,
derep_fullseq=False,
reference_chimera_detection=True,
cluster_size_filtering=True,
remove_usearch_logs=False,
usersort=True,
suppress_new_clusters=False,
chimeras_retention="union",
verbose=False
):
""" Main convenience wrapper for using usearch to filter/cluster seqs
The complete 'usearch_qf' process is a multistep process with many calls
to usearch with various parameters. It is likely to change from the
original implementation. A lot.
fasta_filepath = fasta filepath to filtering/clustering (e.g., output
seqs.fna file from split_libraries.py)
refseqs_fp = fasta filepath for ref-based otu picking.
output_dir = directory to store the otu mapping file, as well logs and
the intermediate files created if save_intermediate_files is True.
percent_ID = percent ID for clustering sequences.
percent_ID_err = percent ID for filtering out chimeras
minsize = Minimum size of cluster for retention after chimera removal.
abundance_skew = threshold setting for chimera removal with de novo
chimera detection.
db_filepath = filepath of reference fasta sequence set for ref based
chimera detection.
rev = search plus and minus strands of sequences, used in ref based chimera
detection.
label_prefix = optional prefix added to filtered fasta file.
label_suffix = optional suffix added to filtered fasta file.
retain_label_as_comment = option to add usearch generated label to
enumerated fasta labels.
count_start = integer to begin counting at for sequence enumeration.
perc_id_blast = percent identity setting for using blast algorithm to
assign original sequence labels to filtered fasta.
global_alignment = Setting for assignment of original seq labels to filtered
seqs.
sizein = not defined in usearch helpstring
sizeout = not defined in usearch helpstring
w = Word length for U-sorting
slots = Size of compressed index table. Should be prime, e.g. 40000003.
Should also specify --w, typical is --w 16 or --w 32.
maxrejects = Max rejected targets, 0=ignore, default 32.
save_intermediate_files = retain all the intermediate files created during
this process.
minlen = (not specified in usearch helpstring), but seems like a good bet
that this refers to the minimum length of the sequences for dereplication.
HALT_EXEC = used to debug app controller problems.
de_novo_chimera_detection = If True, will detect chimeras de novo
reference_chimera_detection = If True, will detect chimeras ref based
cluster_size_filtering = If True, will filter OTUs according to seq counts.
remove_usearch_logs = If True, will not call the --log function for each
usearch call.
usersort = Used for specifying custom sorting (i.e., non-length based
sorting) with usearch/uclust.
suppress_new_clusters = with reference based OTU picking, if enabled,
will prevent new clusters that do not match the reference from being
clustered.
chimeras_retention = accepts either 'intersection' or 'union'. Will test
for chimeras against the full input error clustered sequence set, and
retain sequences flagged as non-chimeras by either (union) or
only those flagged as non-chimeras by both (intersection).
"""
# Save a list of intermediate filepaths in case they are to be removed.
intermediate_files = []
# Need absolute paths to avoid problems with app controller
if output_dir:
output_dir = abspath(output_dir) + '/'
fasta_filepath = abspath(fasta_filepath)
try:
if verbose:
print "Sorting sequences by length..."
# Sort seqs by length
app_result, output_filepath_len_sorted =\
usearch_fasta_sort_from_filepath(fasta_filepath, output_filepath=
join(
output_dir,
'len_sorted.fasta'),
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs,
working_dir=output_dir, HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_filepath_len_sorted)
if verbose:
print "Dereplicating sequences..."
# Dereplicate sequences
app_result, output_filepath_dereplicated =\
usearch_dereplicate_exact_subseqs(output_filepath_len_sorted,
output_filepath=join(
output_dir,
'dereplicated_seqs.fasta'),
minlen=minlen, w=w, slots=slots, sizeout=sizeout,
maxrejects=maxrejects, save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs,
working_dir=output_dir, HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_filepath_dereplicated)
if verbose:
print "Sorting by abundance..."
# Sort by abundance, initially no filter based on seqs/otu
app_result, output_fp =\
usearch_sort_by_abundance(output_filepath_dereplicated,
output_filepath=join(
output_dir,
'abundance_sorted.fasta'),
usersort=True, sizein=sizein, sizeout=sizeout, minsize=0,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_fp)
if verbose:
print "Clustering sequences for error correction..."
# Create .uc file of clusters file, to identify original sequences
# later
output_uc_filepath = output_dir + 'err_corrected_clusters.uc'
app_result, error_clustered_output_fp =\
usearch_cluster_error_correction(output_fp,
output_filepath=join(output_dir,
'clustered_error_corrected.fasta'),
output_uc_filepath=output_uc_filepath,
usersort=True, percent_id_err=percent_id_err, sizein=sizein,
sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects,
remove_usearch_logs=remove_usearch_logs,
save_intermediate_files=save_intermediate_files,
working_dir=output_dir, HALT_EXEC=HALT_EXEC)
intermediate_files.append(error_clustered_output_fp)
intermediate_files.append(output_uc_filepath)
# Series of conditional tests, using generic 'output_fp' name so the
# conditional filtering, if any/all are selected, do not matter.
if de_novo_chimera_detection:
if verbose:
print "Performing de novo chimera detection..."
app_result, output_fp_de_novo_nonchimeras =\
usearch_chimera_filter_de_novo(error_clustered_output_fp,
abundance_skew=abundance_skew, output_chimera_filepath=
join(
output_dir,
'de_novo_chimeras.fasta'),
output_non_chimera_filepath=join(
output_dir,
'de_novo_non_chimeras.fasta'), usersort=True,
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_fp_de_novo_nonchimeras)
output_fp = output_fp_de_novo_nonchimeras
if reference_chimera_detection:
if verbose:
print "Performing reference based chimera detection..."
app_result, output_fp_ref_nonchimeras =\
usearch_chimera_filter_ref_based(error_clustered_output_fp,
db_filepath=db_filepath, output_chimera_filepath=
join(
output_dir,
'reference_chimeras.fasta'),
output_non_chimera_filepath=
join(output_dir, 'reference_non_chimeras.fasta'), usersort=True,
save_intermediate_files=save_intermediate_files, rev=rev,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_fp_ref_nonchimeras)
output_fp = output_fp_ref_nonchimeras
# get intersection or union if both ref and de novo chimera detection
if de_novo_chimera_detection and reference_chimera_detection:
if verbose:
print "Finding %s of non-chimeras..." % chimeras_retention
output_fp = get_retained_chimeras(
output_fp_de_novo_nonchimeras, output_fp_ref_nonchimeras,
output_combined_fp=
join(output_dir, 'combined_non_chimeras.fasta'),
chimeras_retention=chimeras_retention)
intermediate_files.append(output_fp)
if cluster_size_filtering:
# Test for empty filepath following filters, raise error if all seqs
# have been removed
if verbose:
print "Filtering by cluster size..."
# chimera detection was not performed, use output file of step 4 as input
# to filtering by cluster size
if not (reference_chimera_detection and de_novo_chimera_detection):
output_fp = error_clustered_output_fp
app_result, output_fp =\
usearch_sort_by_abundance(output_fp, output_filepath=
join(output_dir, 'abundance_sorted_minsize_' + str(minsize) +
'.fasta'),
minsize=minsize, sizein=sizein, sizeout=sizeout,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_fp)
# cluster seqs
# Should we add in option to use alternative OTU picking here?
# Seems like it will be a bit of a mess...maybe after we determine
# if usearch_qf should become standard.
if refseqs_fp:
if verbose:
print "Clustering against reference sequences..."
app_result, output_filepath =\
usearch_cluster_seqs_ref(output_fp, output_filepath=
join(
output_dir,
'ref_clustered_seqs.uc'),
percent_id=percent_id, sizein=sizein,
sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects,
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs,
suppress_new_clusters=suppress_new_clusters, refseqs_fp=refseqs_fp,
output_dir=output_dir, working_dir=output_dir, rev=rev,
HALT_EXEC=HALT_EXEC
)
else:
if verbose:
print "De novo clustering sequences..."
app_result, output_filepath =\
usearch_cluster_seqs(output_fp, output_filepath=
join(output_dir, 'clustered_seqs.fasta'),
percent_id=percent_id, sizein=sizein,
sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects,
save_intermediate_files=save_intermediate_files,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(output_filepath)
# Enumerate the OTUs in the clusters
if not suppress_new_clusters:
if verbose:
print "Enumerating OTUs..."
output_filepath =\
enumerate_otus(output_filepath, output_filepath=
join(output_dir, 'enumerated_otus.fasta'),
label_prefix=label_prefix,
label_suffix=label_suffix, count_start=count_start,
retain_label_as_comment=retain_label_as_comment)
intermediate_files.append(output_filepath)
# Get original sequence label identities
if verbose:
print "Assigning sequences to clusters..."
app_result, clusters_file = assign_reads_to_otus(fasta_filepath,
filtered_fasta=output_filepath, output_filepath=join(
output_dir,
'assign_reads_to_otus.uc'), perc_id_blast=percent_id,
global_alignment=global_alignment,
remove_usearch_logs=remove_usearch_logs, working_dir=output_dir,
HALT_EXEC=HALT_EXEC)
intermediate_files.append(clusters_file)
except ApplicationError:
raise ApplicationError('Error running usearch. Possible causes are '
'unsupported version (current supported version is usearch ' +
'v5.2.236) is installed or improperly formatted input file was ' +
'provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('usearch not found, is it properly ' +
'installed?')
# Get dict of clusters, list of failures
# Set OTU ID field to 9 for the case of closed reference OTU picking
if suppress_new_clusters:
otu_id_field = 9
else:
otu_id_field = 1
clusters, failures = clusters_from_blast_uc_file(open(clusters_file, "U"),
otu_id_field)
# Remove temp files unless user specifies output filepath
if not save_intermediate_files:
remove_files(intermediate_files)
return clusters, failures | [
"def",
"usearch_qf",
"(",
"fasta_filepath",
",",
"refseqs_fp",
"=",
"None",
",",
"output_dir",
"=",
"None",
",",
"percent_id",
"=",
"0.97",
",",
"percent_id_err",
"=",
"0.97",
",",
"minsize",
"=",
"4",
",",
"abundance_skew",
"=",
"2.0",
",",
"db_filepath",
... | Main convenience wrapper for using usearch to filter/cluster seqs
The complete 'usearch_qf' process is a multistep process with many calls
to usearch with various parameters. It is likely to change from the
original implementation. A lot.
fasta_filepath = fasta filepath to filtering/clustering (e.g., output
seqs.fna file from split_libraries.py)
refseqs_fp = fasta filepath for ref-based otu picking.
output_dir = directory to store the otu mapping file, as well logs and
the intermediate files created if save_intermediate_files is True.
percent_ID = percent ID for clustering sequences.
percent_ID_err = percent ID for filtering out chimeras
minsize = Minimum size of cluster for retention after chimera removal.
abundance_skew = threshold setting for chimera removal with de novo
chimera detection.
db_filepath = filepath of reference fasta sequence set for ref based
chimera detection.
rev = search plus and minus strands of sequences, used in ref based chimera
detection.
label_prefix = optional prefix added to filtered fasta file.
label_suffix = optional suffix added to filtered fasta file.
retain_label_as_comment = option to add usearch generated label to
enumerated fasta labels.
count_start = integer to begin counting at for sequence enumeration.
perc_id_blast = percent identity setting for using blast algorithm to
assign original sequence labels to filtered fasta.
global_alignment = Setting for assignment of original seq labels to filtered
seqs.
sizein = not defined in usearch helpstring
sizeout = not defined in usearch helpstring
w = Word length for U-sorting
slots = Size of compressed index table. Should be prime, e.g. 40000003.
Should also specify --w, typical is --w 16 or --w 32.
maxrejects = Max rejected targets, 0=ignore, default 32.
save_intermediate_files = retain all the intermediate files created during
this process.
minlen = (not specified in usearch helpstring), but seems like a good bet
that this refers to the minimum length of the sequences for dereplication.
HALT_EXEC = used to debug app controller problems.
de_novo_chimera_detection = If True, will detect chimeras de novo
reference_chimera_detection = If True, will detect chimeras ref based
cluster_size_filtering = If True, will filter OTUs according to seq counts.
remove_usearch_logs = If True, will not call the --log function for each
usearch call.
usersort = Used for specifying custom sorting (i.e., non-length based
sorting) with usearch/uclust.
suppress_new_clusters = with reference based OTU picking, if enabled,
will prevent new clusters that do not match the reference from being
clustered.
chimeras_retention = accepts either 'intersection' or 'union'. Will test
for chimeras against the full input error clustered sequence set, and
retain sequences flagged as non-chimeras by either (union) or
only those flagged as non-chimeras by both (intersection). | [
"Main",
"convenience",
"wrapper",
"for",
"using",
"usearch",
"to",
"filter",
"/",
"cluster",
"seqs"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L1153-L1476 |
biocore/burrito-fillings | bfillings/usearch.py | usearch61_ref_cluster | def usearch61_ref_cluster(seq_path,
refseqs_fp,
percent_id=0.97,
rev=False,
save_intermediate_files=True,
minlen=64,
output_dir='.',
remove_usearch_logs=False,
verbose=False,
wordlength=8,
usearch_fast_cluster=False,
usearch61_sort_method='abundance',
otu_prefix="denovo",
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
suppress_new_clusters=False,
threads=1.0,
HALT_EXEC=False
):
""" Returns dictionary of cluster IDs:seq IDs
Overall function for reference-based clustering with usearch61
seq_path: fasta filepath to be clustered with usearch61
refseqs_fp: reference fasta filepath, used to cluster sequences against.
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
save_intermediate_files: Saves intermediate files created during clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
verbose: print current processing step to stdout
wordlength: word length to use for clustering
usearch_fast_cluster: Use usearch61 fast cluster option, not as memory
efficient as the default cluster_smallmem option, requires sorting by
length, and does not allow reverse strand matching.
usearch61_sort_method: Sort sequences by abundance or length by using
functionality provided by usearch61, or do not sort by using None option.
otu_prefix: label to place in front of OTU IDs, used to prevent duplicate
IDs from appearing with reference based OTU picking.
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
sizeorder: used for clustering based upon abundance of seeds (only applies
when doing open reference de novo clustering)
suppress_new_clusters: If True, will allow de novo clustering on top of
reference clusters.
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
Description of analysis workflows
---------------------------------
closed-reference approach:
dereplicate sequences first, do reference based clustering,
merge clusters/failures and dereplicated data,
write OTU mapping and failures file.
open-reference approach:
dereplicate sequences first, do reference based clustering, parse failures,
sort failures fasta according to chosen method, cluster failures, merge
reference clustering results/de novo results/dereplicated data, write
OTU mapping file.
Dereplication should save processing time for large datasets.
"""
files_to_remove = []
# Need absolute paths to avoid potential problems with app controller
if output_dir:
output_dir = join(abspath(output_dir), '')
seq_path = abspath(seq_path)
try:
if verbose:
print "Presorting sequences according to abundance..."
intermediate_fasta, dereplicated_uc, app_result =\
sort_by_abundance_usearch61(seq_path, output_dir, rev,
minlen, remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(
output_dir,
'abundance_sorted.fna'),
output_uc_filepath=join(
output_dir,
'abundance_sorted.uc'),
threads=threads)
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
files_to_remove.append(dereplicated_uc)
if verbose:
print "Performing reference based clustering..."
clusters_fp, app_result = usearch61_cluster_ref(intermediate_fasta,
refseqs_fp, percent_id, rev, minlen, output_dir,
remove_usearch_logs, wordlength, usearch61_maxrejects,
usearch61_maxaccepts, HALT_EXEC,
output_uc_filepath=join(
output_dir,
'ref_clustered.uc'),
threads=threads)
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix="",
ref_clustered=True)
dereplicated_clusters =\
parse_dereplicated_uc(open(dereplicated_uc, "U"))
clusters = merge_clusters_dereplicated_seqs(clusters,
dereplicated_clusters)
failures = merge_failures_dereplicated_seqs(failures,
dereplicated_clusters)
if not suppress_new_clusters and failures:
if verbose:
print "Parsing out sequences that failed to cluster..."
failures_fasta = parse_usearch61_failures(seq_path, set(failures),
output_fasta_fp=join(output_dir, "failures_parsed.fna"))
if not save_intermediate_files:
files_to_remove.append(failures_fasta)
denovo_clusters = usearch61_denovo_cluster(failures_fasta,
percent_id, rev, save_intermediate_files, minlen, output_dir,
remove_usearch_logs, verbose, wordlength, usearch_fast_cluster,
usearch61_sort_method, otu_prefix, usearch61_maxrejects,
usearch61_maxaccepts, sizeorder, threads, HALT_EXEC)
failures = []
# Merge ref and denovo clusters
clusters.update(denovo_clusters)
except ApplicationError:
raise ApplicationError('Error running usearch61. Possible causes are '
'unsupported version (current supported version is usearch '
'v6.1.544) is installed or improperly formatted input file was '
'provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('usearch61 not found, is it properly '
'installed?')
if not save_intermediate_files:
remove_files(files_to_remove)
return clusters, failures | python | def usearch61_ref_cluster(seq_path,
refseqs_fp,
percent_id=0.97,
rev=False,
save_intermediate_files=True,
minlen=64,
output_dir='.',
remove_usearch_logs=False,
verbose=False,
wordlength=8,
usearch_fast_cluster=False,
usearch61_sort_method='abundance',
otu_prefix="denovo",
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
suppress_new_clusters=False,
threads=1.0,
HALT_EXEC=False
):
""" Returns dictionary of cluster IDs:seq IDs
Overall function for reference-based clustering with usearch61
seq_path: fasta filepath to be clustered with usearch61
refseqs_fp: reference fasta filepath, used to cluster sequences against.
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
save_intermediate_files: Saves intermediate files created during clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
verbose: print current processing step to stdout
wordlength: word length to use for clustering
usearch_fast_cluster: Use usearch61 fast cluster option, not as memory
efficient as the default cluster_smallmem option, requires sorting by
length, and does not allow reverse strand matching.
usearch61_sort_method: Sort sequences by abundance or length by using
functionality provided by usearch61, or do not sort by using None option.
otu_prefix: label to place in front of OTU IDs, used to prevent duplicate
IDs from appearing with reference based OTU picking.
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
sizeorder: used for clustering based upon abundance of seeds (only applies
when doing open reference de novo clustering)
suppress_new_clusters: If True, will allow de novo clustering on top of
reference clusters.
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
Description of analysis workflows
---------------------------------
closed-reference approach:
dereplicate sequences first, do reference based clustering,
merge clusters/failures and dereplicated data,
write OTU mapping and failures file.
open-reference approach:
dereplicate sequences first, do reference based clustering, parse failures,
sort failures fasta according to chosen method, cluster failures, merge
reference clustering results/de novo results/dereplicated data, write
OTU mapping file.
Dereplication should save processing time for large datasets.
"""
files_to_remove = []
# Need absolute paths to avoid potential problems with app controller
if output_dir:
output_dir = join(abspath(output_dir), '')
seq_path = abspath(seq_path)
try:
if verbose:
print "Presorting sequences according to abundance..."
intermediate_fasta, dereplicated_uc, app_result =\
sort_by_abundance_usearch61(seq_path, output_dir, rev,
minlen, remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(
output_dir,
'abundance_sorted.fna'),
output_uc_filepath=join(
output_dir,
'abundance_sorted.uc'),
threads=threads)
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
files_to_remove.append(dereplicated_uc)
if verbose:
print "Performing reference based clustering..."
clusters_fp, app_result = usearch61_cluster_ref(intermediate_fasta,
refseqs_fp, percent_id, rev, minlen, output_dir,
remove_usearch_logs, wordlength, usearch61_maxrejects,
usearch61_maxaccepts, HALT_EXEC,
output_uc_filepath=join(
output_dir,
'ref_clustered.uc'),
threads=threads)
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix="",
ref_clustered=True)
dereplicated_clusters =\
parse_dereplicated_uc(open(dereplicated_uc, "U"))
clusters = merge_clusters_dereplicated_seqs(clusters,
dereplicated_clusters)
failures = merge_failures_dereplicated_seqs(failures,
dereplicated_clusters)
if not suppress_new_clusters and failures:
if verbose:
print "Parsing out sequences that failed to cluster..."
failures_fasta = parse_usearch61_failures(seq_path, set(failures),
output_fasta_fp=join(output_dir, "failures_parsed.fna"))
if not save_intermediate_files:
files_to_remove.append(failures_fasta)
denovo_clusters = usearch61_denovo_cluster(failures_fasta,
percent_id, rev, save_intermediate_files, minlen, output_dir,
remove_usearch_logs, verbose, wordlength, usearch_fast_cluster,
usearch61_sort_method, otu_prefix, usearch61_maxrejects,
usearch61_maxaccepts, sizeorder, threads, HALT_EXEC)
failures = []
# Merge ref and denovo clusters
clusters.update(denovo_clusters)
except ApplicationError:
raise ApplicationError('Error running usearch61. Possible causes are '
'unsupported version (current supported version is usearch '
'v6.1.544) is installed or improperly formatted input file was '
'provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('usearch61 not found, is it properly '
'installed?')
if not save_intermediate_files:
remove_files(files_to_remove)
return clusters, failures | [
"def",
"usearch61_ref_cluster",
"(",
"seq_path",
",",
"refseqs_fp",
",",
"percent_id",
"=",
"0.97",
",",
"rev",
"=",
"False",
",",
"save_intermediate_files",
"=",
"True",
",",
"minlen",
"=",
"64",
",",
"output_dir",
"=",
"'.'",
",",
"remove_usearch_logs",
"=",... | Returns dictionary of cluster IDs:seq IDs
Overall function for reference-based clustering with usearch61
seq_path: fasta filepath to be clustered with usearch61
refseqs_fp: reference fasta filepath, used to cluster sequences against.
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
save_intermediate_files: Saves intermediate files created during clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
verbose: print current processing step to stdout
wordlength: word length to use for clustering
usearch_fast_cluster: Use usearch61 fast cluster option, not as memory
efficient as the default cluster_smallmem option, requires sorting by
length, and does not allow reverse strand matching.
usearch61_sort_method: Sort sequences by abundance or length by using
functionality provided by usearch61, or do not sort by using None option.
otu_prefix: label to place in front of OTU IDs, used to prevent duplicate
IDs from appearing with reference based OTU picking.
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
sizeorder: used for clustering based upon abundance of seeds (only applies
when doing open reference de novo clustering)
suppress_new_clusters: If True, will allow de novo clustering on top of
reference clusters.
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
Description of analysis workflows
---------------------------------
closed-reference approach:
dereplicate sequences first, do reference based clustering,
merge clusters/failures and dereplicated data,
write OTU mapping and failures file.
open-reference approach:
dereplicate sequences first, do reference based clustering, parse failures,
sort failures fasta according to chosen method, cluster failures, merge
reference clustering results/de novo results/dereplicated data, write
OTU mapping file.
Dereplication should save processing time for large datasets. | [
"Returns",
"dictionary",
"of",
"cluster",
"IDs",
":",
"seq",
"IDs"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L1710-L1857 |
biocore/burrito-fillings | bfillings/usearch.py | usearch61_denovo_cluster | def usearch61_denovo_cluster(seq_path,
percent_id=0.97,
rev=False,
save_intermediate_files=True,
minlen=64,
output_dir='.',
remove_usearch_logs=False,
verbose=False,
wordlength=8,
usearch_fast_cluster=False,
usearch61_sort_method='abundance',
otu_prefix="denovo",
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
threads=1.0,
HALT_EXEC=False,
file_prefix="denovo_"
):
""" Returns dictionary of cluster IDs:seq IDs
Overall function for de novo clustering with usearch61
seq_path: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
save_intermediate_files: Saves intermediate files created during clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
verbose: print current processing step to stdout
wordlength: word length to use for clustering
usearch_fast_cluster: Use usearch61 fast cluster option, not as memory
efficient as the default cluster_smallmem option, requires sorting by
length, and does not allow reverse strand matching.
usearch61_sort_method: Sort sequences by abundance or length by using
functionality provided by usearch61, or do not sort by using None option.
otu_prefix: label to place in front of OTU IDs, used to prevent duplicate
IDs from appearing with reference based OTU picking.
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
sizeorder: used for clustering based upon abundance of seeds
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
"""
files_to_remove = []
# Need absolute paths to avoid potential problems with app controller
if output_dir:
output_dir = abspath(output_dir) + '/'
seq_path = abspath(seq_path)
try:
if verbose and usearch61_sort_method is not None and\
not usearch_fast_cluster:
print "Sorting sequences according to %s..." % usearch61_sort_method
# fast sorting option automatically performs length sorting
if usearch61_sort_method == 'abundance' and not usearch_fast_cluster:
intermediate_fasta, dereplicated_uc, app_result =\
sort_by_abundance_usearch61(seq_path, output_dir, rev,
minlen, remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(
output_dir,
file_prefix + 'abundance_sorted.fna'),
output_uc_filepath=join(output_dir,
file_prefix + 'abundance_sorted.uc'), threads=threads)
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
files_to_remove.append(dereplicated_uc)
elif usearch61_sort_method == 'length' and not usearch_fast_cluster:
intermediate_fasta, app_result =\
sort_by_length_usearch61(seq_path, output_dir, minlen,
remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(output_dir,
file_prefix + 'length_sorted.fna'))
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
else:
intermediate_fasta = seq_path
if verbose:
print "Clustering sequences de novo..."
if usearch_fast_cluster:
clusters_fp, app_result = usearch61_fast_cluster(
intermediate_fasta,
percent_id, minlen, output_dir, remove_usearch_logs, wordlength,
usearch61_maxrejects, usearch61_maxaccepts, HALT_EXEC,
output_uc_filepath=join(
output_dir,
file_prefix + 'fast_clustered.uc'), threads=threads)
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
else:
clusters_fp, app_result =\
usearch61_smallmem_cluster(intermediate_fasta, percent_id,
minlen, rev, output_dir, remove_usearch_logs, wordlength,
usearch61_maxrejects, usearch61_maxaccepts, sizeorder, HALT_EXEC,
output_uc_filepath=join(output_dir,
file_prefix + 'smallmem_clustered.uc'))
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
except ApplicationError:
raise ApplicationError('Error running usearch61. Possible causes are '
'unsupported version (current supported version is usearch ' +
'v6.1.544) is installed or improperly formatted input file was ' +
'provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('usearch61 not found, is it properly ' +
'installed?')
if usearch61_sort_method == 'abundance' and not usearch_fast_cluster:
de_novo_clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix)
dereplicated_clusters =\
parse_dereplicated_uc(open(dereplicated_uc, "U"))
clusters = merge_clusters_dereplicated_seqs(de_novo_clusters,
dereplicated_clusters)
else:
clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix)
if not save_intermediate_files:
remove_files(files_to_remove)
return clusters | python | def usearch61_denovo_cluster(seq_path,
percent_id=0.97,
rev=False,
save_intermediate_files=True,
minlen=64,
output_dir='.',
remove_usearch_logs=False,
verbose=False,
wordlength=8,
usearch_fast_cluster=False,
usearch61_sort_method='abundance',
otu_prefix="denovo",
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
threads=1.0,
HALT_EXEC=False,
file_prefix="denovo_"
):
""" Returns dictionary of cluster IDs:seq IDs
Overall function for de novo clustering with usearch61
seq_path: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
save_intermediate_files: Saves intermediate files created during clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
verbose: print current processing step to stdout
wordlength: word length to use for clustering
usearch_fast_cluster: Use usearch61 fast cluster option, not as memory
efficient as the default cluster_smallmem option, requires sorting by
length, and does not allow reverse strand matching.
usearch61_sort_method: Sort sequences by abundance or length by using
functionality provided by usearch61, or do not sort by using None option.
otu_prefix: label to place in front of OTU IDs, used to prevent duplicate
IDs from appearing with reference based OTU picking.
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
sizeorder: used for clustering based upon abundance of seeds
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
"""
files_to_remove = []
# Need absolute paths to avoid potential problems with app controller
if output_dir:
output_dir = abspath(output_dir) + '/'
seq_path = abspath(seq_path)
try:
if verbose and usearch61_sort_method is not None and\
not usearch_fast_cluster:
print "Sorting sequences according to %s..." % usearch61_sort_method
# fast sorting option automatically performs length sorting
if usearch61_sort_method == 'abundance' and not usearch_fast_cluster:
intermediate_fasta, dereplicated_uc, app_result =\
sort_by_abundance_usearch61(seq_path, output_dir, rev,
minlen, remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(
output_dir,
file_prefix + 'abundance_sorted.fna'),
output_uc_filepath=join(output_dir,
file_prefix + 'abundance_sorted.uc'), threads=threads)
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
files_to_remove.append(dereplicated_uc)
elif usearch61_sort_method == 'length' and not usearch_fast_cluster:
intermediate_fasta, app_result =\
sort_by_length_usearch61(seq_path, output_dir, minlen,
remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(output_dir,
file_prefix + 'length_sorted.fna'))
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
else:
intermediate_fasta = seq_path
if verbose:
print "Clustering sequences de novo..."
if usearch_fast_cluster:
clusters_fp, app_result = usearch61_fast_cluster(
intermediate_fasta,
percent_id, minlen, output_dir, remove_usearch_logs, wordlength,
usearch61_maxrejects, usearch61_maxaccepts, HALT_EXEC,
output_uc_filepath=join(
output_dir,
file_prefix + 'fast_clustered.uc'), threads=threads)
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
else:
clusters_fp, app_result =\
usearch61_smallmem_cluster(intermediate_fasta, percent_id,
minlen, rev, output_dir, remove_usearch_logs, wordlength,
usearch61_maxrejects, usearch61_maxaccepts, sizeorder, HALT_EXEC,
output_uc_filepath=join(output_dir,
file_prefix + 'smallmem_clustered.uc'))
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
except ApplicationError:
raise ApplicationError('Error running usearch61. Possible causes are '
'unsupported version (current supported version is usearch ' +
'v6.1.544) is installed or improperly formatted input file was ' +
'provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('usearch61 not found, is it properly ' +
'installed?')
if usearch61_sort_method == 'abundance' and not usearch_fast_cluster:
de_novo_clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix)
dereplicated_clusters =\
parse_dereplicated_uc(open(dereplicated_uc, "U"))
clusters = merge_clusters_dereplicated_seqs(de_novo_clusters,
dereplicated_clusters)
else:
clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix)
if not save_intermediate_files:
remove_files(files_to_remove)
return clusters | [
"def",
"usearch61_denovo_cluster",
"(",
"seq_path",
",",
"percent_id",
"=",
"0.97",
",",
"rev",
"=",
"False",
",",
"save_intermediate_files",
"=",
"True",
",",
"minlen",
"=",
"64",
",",
"output_dir",
"=",
"'.'",
",",
"remove_usearch_logs",
"=",
"False",
",",
... | Returns dictionary of cluster IDs:seq IDs
Overall function for de novo clustering with usearch61
seq_path: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
save_intermediate_files: Saves intermediate files created during clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
verbose: print current processing step to stdout
wordlength: word length to use for clustering
usearch_fast_cluster: Use usearch61 fast cluster option, not as memory
efficient as the default cluster_smallmem option, requires sorting by
length, and does not allow reverse strand matching.
usearch61_sort_method: Sort sequences by abundance or length by using
functionality provided by usearch61, or do not sort by using None option.
otu_prefix: label to place in front of OTU IDs, used to prevent duplicate
IDs from appearing with reference based OTU picking.
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
sizeorder: used for clustering based upon abundance of seeds
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution. | [
"Returns",
"dictionary",
"of",
"cluster",
"IDs",
":",
"seq",
"IDs"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L1860-L1991 |
biocore/burrito-fillings | bfillings/usearch.py | sort_by_abundance_usearch61 | def sort_by_abundance_usearch61(seq_path,
output_dir='.',
rev=False,
minlen=64,
remove_usearch_logs=False,
HALT_EXEC=False,
output_fna_filepath=None,
output_uc_filepath=None,
log_name="abundance_sorted.log",
threads=1.0):
""" usearch61 application call to sort fasta file by abundance.
seq_path: fasta filepath to be clustered with usearch61
output_dir: directory to output log, OTU mapping, and intermediate files
rev: enable reverse strand matching for clustering/sorting
minlen: minimum sequence length
remove_usearch_logs: Saves usearch log files
HALT_EXEC: application controller option to halt execution
output_fna_filepath: path to write sorted fasta filepath
output_uc_filepath: path to write usearch61 generated .uc file
log_name: filepath to write usearch61 generated log file
threads: Specify number of threads used per core per CPU
"""
if not output_fna_filepath:
_, output_fna_filepath = mkstemp(prefix='abundance_sorted',
suffix='.fna')
if not output_uc_filepath:
_, output_uc_filepath = mkstemp(prefix='abundance_sorted',
suffix='.uc')
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--sizeout': True,
'--derep_fulllength': seq_path,
'--output': output_fna_filepath,
'--uc': output_uc_filepath,
'--threads': threads
}
if rev:
params['--strand'] = 'both'
if not remove_usearch_logs:
params['--log'] = log_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return output_fna_filepath, output_uc_filepath, app_result | python | def sort_by_abundance_usearch61(seq_path,
output_dir='.',
rev=False,
minlen=64,
remove_usearch_logs=False,
HALT_EXEC=False,
output_fna_filepath=None,
output_uc_filepath=None,
log_name="abundance_sorted.log",
threads=1.0):
""" usearch61 application call to sort fasta file by abundance.
seq_path: fasta filepath to be clustered with usearch61
output_dir: directory to output log, OTU mapping, and intermediate files
rev: enable reverse strand matching for clustering/sorting
minlen: minimum sequence length
remove_usearch_logs: Saves usearch log files
HALT_EXEC: application controller option to halt execution
output_fna_filepath: path to write sorted fasta filepath
output_uc_filepath: path to write usearch61 generated .uc file
log_name: filepath to write usearch61 generated log file
threads: Specify number of threads used per core per CPU
"""
if not output_fna_filepath:
_, output_fna_filepath = mkstemp(prefix='abundance_sorted',
suffix='.fna')
if not output_uc_filepath:
_, output_uc_filepath = mkstemp(prefix='abundance_sorted',
suffix='.uc')
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--sizeout': True,
'--derep_fulllength': seq_path,
'--output': output_fna_filepath,
'--uc': output_uc_filepath,
'--threads': threads
}
if rev:
params['--strand'] = 'both'
if not remove_usearch_logs:
params['--log'] = log_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return output_fna_filepath, output_uc_filepath, app_result | [
"def",
"sort_by_abundance_usearch61",
"(",
"seq_path",
",",
"output_dir",
"=",
"'.'",
",",
"rev",
"=",
"False",
",",
"minlen",
"=",
"64",
",",
"remove_usearch_logs",
"=",
"False",
",",
"HALT_EXEC",
"=",
"False",
",",
"output_fna_filepath",
"=",
"None",
",",
... | usearch61 application call to sort fasta file by abundance.
seq_path: fasta filepath to be clustered with usearch61
output_dir: directory to output log, OTU mapping, and intermediate files
rev: enable reverse strand matching for clustering/sorting
minlen: minimum sequence length
remove_usearch_logs: Saves usearch log files
HALT_EXEC: application controller option to halt execution
output_fna_filepath: path to write sorted fasta filepath
output_uc_filepath: path to write usearch61 generated .uc file
log_name: filepath to write usearch61 generated log file
threads: Specify number of threads used per core per CPU | [
"usearch61",
"application",
"call",
"to",
"sort",
"fasta",
"file",
"by",
"abundance",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L1995-L2046 |
biocore/burrito-fillings | bfillings/usearch.py | sort_by_length_usearch61 | def sort_by_length_usearch61(seq_path,
output_dir=".",
minlen=64,
remove_usearch_logs=False,
HALT_EXEC=False,
output_fna_filepath=None,
log_name="length_sorted.log"):
""" usearch61 application call to sort fasta file by length.
seq_path: fasta filepath to be clustered with usearch61
output_dir: directory to output log, OTU mapping, and intermediate files
minlen: minimum sequence length
remove_usearch_logs: Saves usearch log files
HALT_EXEC: application controller option to halt execution
output_fna_filepath: path to write sorted fasta filepath
log_name: filepath to write usearch61 generated log file
"""
if not output_fna_filepath:
_, output_fna_filepath = mkstemp(prefix='length_sorted', suffix='.fna')
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--sortbylength': seq_path,
'--output': output_fna_filepath
}
if not remove_usearch_logs:
params['--log'] = log_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return output_fna_filepath, app_result | python | def sort_by_length_usearch61(seq_path,
output_dir=".",
minlen=64,
remove_usearch_logs=False,
HALT_EXEC=False,
output_fna_filepath=None,
log_name="length_sorted.log"):
""" usearch61 application call to sort fasta file by length.
seq_path: fasta filepath to be clustered with usearch61
output_dir: directory to output log, OTU mapping, and intermediate files
minlen: minimum sequence length
remove_usearch_logs: Saves usearch log files
HALT_EXEC: application controller option to halt execution
output_fna_filepath: path to write sorted fasta filepath
log_name: filepath to write usearch61 generated log file
"""
if not output_fna_filepath:
_, output_fna_filepath = mkstemp(prefix='length_sorted', suffix='.fna')
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--sortbylength': seq_path,
'--output': output_fna_filepath
}
if not remove_usearch_logs:
params['--log'] = log_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return output_fna_filepath, app_result | [
"def",
"sort_by_length_usearch61",
"(",
"seq_path",
",",
"output_dir",
"=",
"\".\"",
",",
"minlen",
"=",
"64",
",",
"remove_usearch_logs",
"=",
"False",
",",
"HALT_EXEC",
"=",
"False",
",",
"output_fna_filepath",
"=",
"None",
",",
"log_name",
"=",
"\"length_sort... | usearch61 application call to sort fasta file by length.
seq_path: fasta filepath to be clustered with usearch61
output_dir: directory to output log, OTU mapping, and intermediate files
minlen: minimum sequence length
remove_usearch_logs: Saves usearch log files
HALT_EXEC: application controller option to halt execution
output_fna_filepath: path to write sorted fasta filepath
log_name: filepath to write usearch61 generated log file | [
"usearch61",
"application",
"call",
"to",
"sort",
"fasta",
"file",
"by",
"length",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L2049-L2083 |
biocore/burrito-fillings | bfillings/usearch.py | usearch61_cluster_ref | def usearch61_cluster_ref(intermediate_fasta,
refseqs_fp,
percent_id=0.97,
rev=False,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
wordlength=8,
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
HALT_EXEC=False,
output_uc_filepath=None,
log_filepath="ref_clustered.log",
threads=1.0
):
""" Cluster input fasta seqs against reference database
seq_path: fasta filepath to be clustered with usearch61
refseqs_fp: reference fasta filepath, used to cluster sequences against.
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for clustering
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
output_uc_filepath: path to write usearch61 generated .uc file
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
"""
log_filepath = join(output_dir, log_filepath)
params = {
'--usearch_global': intermediate_fasta,
'--db': refseqs_fp,
'--minseqlength': minlen,
'--id': percent_id,
'--uc': output_uc_filepath,
'--wordlength': wordlength,
'--maxrejects': usearch61_maxrejects,
'--maxaccepts': usearch61_maxaccepts,
'--threads': threads
}
if not remove_usearch_logs:
params['--log'] = log_filepath
if rev:
params['--strand'] = 'both'
else:
params['--strand'] = 'plus'
clusters_fp = output_uc_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return clusters_fp, app_result | python | def usearch61_cluster_ref(intermediate_fasta,
refseqs_fp,
percent_id=0.97,
rev=False,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
wordlength=8,
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
HALT_EXEC=False,
output_uc_filepath=None,
log_filepath="ref_clustered.log",
threads=1.0
):
""" Cluster input fasta seqs against reference database
seq_path: fasta filepath to be clustered with usearch61
refseqs_fp: reference fasta filepath, used to cluster sequences against.
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for clustering
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
output_uc_filepath: path to write usearch61 generated .uc file
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
"""
log_filepath = join(output_dir, log_filepath)
params = {
'--usearch_global': intermediate_fasta,
'--db': refseqs_fp,
'--minseqlength': minlen,
'--id': percent_id,
'--uc': output_uc_filepath,
'--wordlength': wordlength,
'--maxrejects': usearch61_maxrejects,
'--maxaccepts': usearch61_maxaccepts,
'--threads': threads
}
if not remove_usearch_logs:
params['--log'] = log_filepath
if rev:
params['--strand'] = 'both'
else:
params['--strand'] = 'plus'
clusters_fp = output_uc_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return clusters_fp, app_result | [
"def",
"usearch61_cluster_ref",
"(",
"intermediate_fasta",
",",
"refseqs_fp",
",",
"percent_id",
"=",
"0.97",
",",
"rev",
"=",
"False",
",",
"minlen",
"=",
"64",
",",
"output_dir",
"=",
"\".\"",
",",
"remove_usearch_logs",
"=",
"False",
",",
"wordlength",
"=",... | Cluster input fasta seqs against reference database
seq_path: fasta filepath to be clustered with usearch61
refseqs_fp: reference fasta filepath, used to cluster sequences against.
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for clustering
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
output_uc_filepath: path to write usearch61 generated .uc file
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution. | [
"Cluster",
"input",
"fasta",
"seqs",
"against",
"reference",
"database"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L2090-L2149 |
biocore/burrito-fillings | bfillings/usearch.py | usearch61_fast_cluster | def usearch61_fast_cluster(intermediate_fasta,
percent_id=0.97,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
wordlength=8,
usearch61_maxrejects=8,
usearch61_maxaccepts=1,
HALT_EXEC=False,
output_uc_filepath=None,
log_name="fast_clustered.log",
threads=1.0):
""" Performs usearch61 de novo fast clustering via cluster_fast option
Only supposed to be used with length sorted data (and performs length
sorting automatically) and does not support reverse strand matching
intermediate_fasta: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for initial high probability sequence matches
usearch61_maxrejects: Set to 'default' or an int value specifying max
rejects
usearch61_maxaccepts: Number of accepts allowed by usearch61
HALT_EXEC: application controller option to halt execution
output_uc_filepath: Path to write clusters (.uc) file.
log_name: filepath to write usearch61 generated log file
threads: Specify number of threads used per core per CPU
"""
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--cluster_fast': intermediate_fasta,
'--id': percent_id,
'--uc': output_uc_filepath,
'--wordlength': wordlength,
'--maxrejects': usearch61_maxrejects,
'--maxaccepts': usearch61_maxaccepts,
'--usersort': True,
'--threads': threads
}
if not remove_usearch_logs:
params['--log'] = log_filepath
clusters_fp = output_uc_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return clusters_fp, app_result | python | def usearch61_fast_cluster(intermediate_fasta,
percent_id=0.97,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
wordlength=8,
usearch61_maxrejects=8,
usearch61_maxaccepts=1,
HALT_EXEC=False,
output_uc_filepath=None,
log_name="fast_clustered.log",
threads=1.0):
""" Performs usearch61 de novo fast clustering via cluster_fast option
Only supposed to be used with length sorted data (and performs length
sorting automatically) and does not support reverse strand matching
intermediate_fasta: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for initial high probability sequence matches
usearch61_maxrejects: Set to 'default' or an int value specifying max
rejects
usearch61_maxaccepts: Number of accepts allowed by usearch61
HALT_EXEC: application controller option to halt execution
output_uc_filepath: Path to write clusters (.uc) file.
log_name: filepath to write usearch61 generated log file
threads: Specify number of threads used per core per CPU
"""
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--cluster_fast': intermediate_fasta,
'--id': percent_id,
'--uc': output_uc_filepath,
'--wordlength': wordlength,
'--maxrejects': usearch61_maxrejects,
'--maxaccepts': usearch61_maxaccepts,
'--usersort': True,
'--threads': threads
}
if not remove_usearch_logs:
params['--log'] = log_filepath
clusters_fp = output_uc_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return clusters_fp, app_result | [
"def",
"usearch61_fast_cluster",
"(",
"intermediate_fasta",
",",
"percent_id",
"=",
"0.97",
",",
"minlen",
"=",
"64",
",",
"output_dir",
"=",
"\".\"",
",",
"remove_usearch_logs",
"=",
"False",
",",
"wordlength",
"=",
"8",
",",
"usearch61_maxrejects",
"=",
"8",
... | Performs usearch61 de novo fast clustering via cluster_fast option
Only supposed to be used with length sorted data (and performs length
sorting automatically) and does not support reverse strand matching
intermediate_fasta: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for initial high probability sequence matches
usearch61_maxrejects: Set to 'default' or an int value specifying max
rejects
usearch61_maxaccepts: Number of accepts allowed by usearch61
HALT_EXEC: application controller option to halt execution
output_uc_filepath: Path to write clusters (.uc) file.
log_name: filepath to write usearch61 generated log file
threads: Specify number of threads used per core per CPU | [
"Performs",
"usearch61",
"de",
"novo",
"fast",
"clustering",
"via",
"cluster_fast",
"option"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L2156-L2210 |
biocore/burrito-fillings | bfillings/usearch.py | usearch61_smallmem_cluster | def usearch61_smallmem_cluster(intermediate_fasta,
percent_id=0.97,
minlen=64,
rev=False,
output_dir=".",
remove_usearch_logs=False,
wordlength=8,
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
HALT_EXEC=False,
output_uc_filepath=None,
log_name="smallmem_clustered.log",
sizeout=False,
consout_filepath=None):
""" Performs usearch61 de novo clustering via cluster_smallmem option
Only supposed to be used with length sorted data (and performs length
sorting automatically) and does not support reverse strand matching
intermediate_fasta: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
minlen: minimum sequence length
rev: will enable reverse strand matching if True
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for initial high probability sequence matches
usearch61_maxrejects: Set to 'default' or an int value specifying max
rejects
usearch61_maxaccepts: Number of accepts allowed by usearch61
HALT_EXEC: application controller option to halt execution
output_uc_filepath: Path to write clusters (.uc) file.
log_name: filepath to write usearch61 generated log file
sizeout: If True, will save abundance data in output fasta labels.
consout_filepath: Needs to be set to save clustered consensus fasta
filepath used for chimera checking.
"""
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--cluster_smallmem': intermediate_fasta,
'--id': percent_id,
'--uc': output_uc_filepath,
'--wordlength': wordlength,
'--maxrejects': usearch61_maxrejects,
'--maxaccepts': usearch61_maxaccepts,
'--usersort': True
}
if sizeorder:
params['--sizeorder'] = True
if not remove_usearch_logs:
params['--log'] = log_filepath
if rev:
params['--strand'] = 'both'
else:
params['--strand'] = 'plus'
if sizeout:
params['--sizeout'] = True
if consout_filepath:
params['--consout'] = consout_filepath
clusters_fp = output_uc_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return clusters_fp, app_result | python | def usearch61_smallmem_cluster(intermediate_fasta,
percent_id=0.97,
minlen=64,
rev=False,
output_dir=".",
remove_usearch_logs=False,
wordlength=8,
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
HALT_EXEC=False,
output_uc_filepath=None,
log_name="smallmem_clustered.log",
sizeout=False,
consout_filepath=None):
""" Performs usearch61 de novo clustering via cluster_smallmem option
Only supposed to be used with length sorted data (and performs length
sorting automatically) and does not support reverse strand matching
intermediate_fasta: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
minlen: minimum sequence length
rev: will enable reverse strand matching if True
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for initial high probability sequence matches
usearch61_maxrejects: Set to 'default' or an int value specifying max
rejects
usearch61_maxaccepts: Number of accepts allowed by usearch61
HALT_EXEC: application controller option to halt execution
output_uc_filepath: Path to write clusters (.uc) file.
log_name: filepath to write usearch61 generated log file
sizeout: If True, will save abundance data in output fasta labels.
consout_filepath: Needs to be set to save clustered consensus fasta
filepath used for chimera checking.
"""
log_filepath = join(output_dir, log_name)
params = {'--minseqlength': minlen,
'--cluster_smallmem': intermediate_fasta,
'--id': percent_id,
'--uc': output_uc_filepath,
'--wordlength': wordlength,
'--maxrejects': usearch61_maxrejects,
'--maxaccepts': usearch61_maxaccepts,
'--usersort': True
}
if sizeorder:
params['--sizeorder'] = True
if not remove_usearch_logs:
params['--log'] = log_filepath
if rev:
params['--strand'] = 'both'
else:
params['--strand'] = 'plus'
if sizeout:
params['--sizeout'] = True
if consout_filepath:
params['--consout'] = consout_filepath
clusters_fp = output_uc_filepath
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return clusters_fp, app_result | [
"def",
"usearch61_smallmem_cluster",
"(",
"intermediate_fasta",
",",
"percent_id",
"=",
"0.97",
",",
"minlen",
"=",
"64",
",",
"rev",
"=",
"False",
",",
"output_dir",
"=",
"\".\"",
",",
"remove_usearch_logs",
"=",
"False",
",",
"wordlength",
"=",
"8",
",",
"... | Performs usearch61 de novo clustering via cluster_smallmem option
Only supposed to be used with length sorted data (and performs length
sorting automatically) and does not support reverse strand matching
intermediate_fasta: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
minlen: minimum sequence length
rev: will enable reverse strand matching if True
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
wordlength: word length to use for initial high probability sequence matches
usearch61_maxrejects: Set to 'default' or an int value specifying max
rejects
usearch61_maxaccepts: Number of accepts allowed by usearch61
HALT_EXEC: application controller option to halt execution
output_uc_filepath: Path to write clusters (.uc) file.
log_name: filepath to write usearch61 generated log file
sizeout: If True, will save abundance data in output fasta labels.
consout_filepath: Needs to be set to save clustered consensus fasta
filepath used for chimera checking. | [
"Performs",
"usearch61",
"de",
"novo",
"clustering",
"via",
"cluster_smallmem",
"option"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L2213-L2282 |
biocore/burrito-fillings | bfillings/usearch.py | usearch61_chimera_check_denovo | def usearch61_chimera_check_denovo(abundance_fp,
uchime_denovo_fp,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
uchime_denovo_log_fp="uchime_denovo.log",
usearch61_minh=0.28,
usearch61_xn=8.0,
usearch61_dn=1.4,
usearch61_mindiffs=3,
usearch61_mindiv=0.8,
usearch61_abundance_skew=2.0,
HALT_EXEC=False):
""" Does de novo, abundance based chimera checking with usearch61
abundance_fp: input consensus fasta file with abundance information for
each cluster.
uchime_denovo_fp: output uchime file for chimera results.
minlen: minimum sequence length for usearch input fasta seqs.
output_dir: output directory
removed_usearch_logs: suppresses creation of log file.
uchime_denovo_log_fp: output filepath for log file.
usearch61_minh: Minimum score (h) to be classified as chimera.
Increasing this value tends to the number of false positives (and also
sensitivity).
usearch61_xn: Weight of "no" vote. Increasing this value tends to the
number of false positives (and also sensitivity).
usearch61_dn: Pseudo-count prior for "no" votes. (n). Increasing this
value tends to the number of false positives (and also sensitivity).
usearch61_mindiffs: Minimum number of diffs in a segment. Increasing this
value tends to reduce the number of false positives while reducing
sensitivity to very low-divergence chimeras.
usearch61_mindiv: Minimum divergence, i.e. 100% - identity between the
query and closest reference database sequence. Expressed as a percentage,
so the default is 0.8%, which allows chimeras that are up to 99.2% similar
to a reference sequence.
usearch61_abundance_skew: abundance skew for de novo chimera comparisons.
HALTEXEC: halt execution and returns command used for app controller.
"""
params = {'--minseqlength': minlen,
'--uchime_denovo': abundance_fp,
'--uchimeout': uchime_denovo_fp,
'--minh': usearch61_minh,
'--xn': usearch61_xn,
'--dn': usearch61_dn,
'--mindiffs': usearch61_mindiffs,
'--mindiv': usearch61_mindiv,
'--abskew': usearch61_abundance_skew
}
if not remove_usearch_logs:
params['--log'] = uchime_denovo_log_fp
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return uchime_denovo_fp, app_result | python | def usearch61_chimera_check_denovo(abundance_fp,
uchime_denovo_fp,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
uchime_denovo_log_fp="uchime_denovo.log",
usearch61_minh=0.28,
usearch61_xn=8.0,
usearch61_dn=1.4,
usearch61_mindiffs=3,
usearch61_mindiv=0.8,
usearch61_abundance_skew=2.0,
HALT_EXEC=False):
""" Does de novo, abundance based chimera checking with usearch61
abundance_fp: input consensus fasta file with abundance information for
each cluster.
uchime_denovo_fp: output uchime file for chimera results.
minlen: minimum sequence length for usearch input fasta seqs.
output_dir: output directory
removed_usearch_logs: suppresses creation of log file.
uchime_denovo_log_fp: output filepath for log file.
usearch61_minh: Minimum score (h) to be classified as chimera.
Increasing this value tends to the number of false positives (and also
sensitivity).
usearch61_xn: Weight of "no" vote. Increasing this value tends to the
number of false positives (and also sensitivity).
usearch61_dn: Pseudo-count prior for "no" votes. (n). Increasing this
value tends to the number of false positives (and also sensitivity).
usearch61_mindiffs: Minimum number of diffs in a segment. Increasing this
value tends to reduce the number of false positives while reducing
sensitivity to very low-divergence chimeras.
usearch61_mindiv: Minimum divergence, i.e. 100% - identity between the
query and closest reference database sequence. Expressed as a percentage,
so the default is 0.8%, which allows chimeras that are up to 99.2% similar
to a reference sequence.
usearch61_abundance_skew: abundance skew for de novo chimera comparisons.
HALTEXEC: halt execution and returns command used for app controller.
"""
params = {'--minseqlength': minlen,
'--uchime_denovo': abundance_fp,
'--uchimeout': uchime_denovo_fp,
'--minh': usearch61_minh,
'--xn': usearch61_xn,
'--dn': usearch61_dn,
'--mindiffs': usearch61_mindiffs,
'--mindiv': usearch61_mindiv,
'--abskew': usearch61_abundance_skew
}
if not remove_usearch_logs:
params['--log'] = uchime_denovo_log_fp
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return uchime_denovo_fp, app_result | [
"def",
"usearch61_chimera_check_denovo",
"(",
"abundance_fp",
",",
"uchime_denovo_fp",
",",
"minlen",
"=",
"64",
",",
"output_dir",
"=",
"\".\"",
",",
"remove_usearch_logs",
"=",
"False",
",",
"uchime_denovo_log_fp",
"=",
"\"uchime_denovo.log\"",
",",
"usearch61_minh",
... | Does de novo, abundance based chimera checking with usearch61
abundance_fp: input consensus fasta file with abundance information for
each cluster.
uchime_denovo_fp: output uchime file for chimera results.
minlen: minimum sequence length for usearch input fasta seqs.
output_dir: output directory
removed_usearch_logs: suppresses creation of log file.
uchime_denovo_log_fp: output filepath for log file.
usearch61_minh: Minimum score (h) to be classified as chimera.
Increasing this value tends to the number of false positives (and also
sensitivity).
usearch61_xn: Weight of "no" vote. Increasing this value tends to the
number of false positives (and also sensitivity).
usearch61_dn: Pseudo-count prior for "no" votes. (n). Increasing this
value tends to the number of false positives (and also sensitivity).
usearch61_mindiffs: Minimum number of diffs in a segment. Increasing this
value tends to reduce the number of false positives while reducing
sensitivity to very low-divergence chimeras.
usearch61_mindiv: Minimum divergence, i.e. 100% - identity between the
query and closest reference database sequence. Expressed as a percentage,
so the default is 0.8%, which allows chimeras that are up to 99.2% similar
to a reference sequence.
usearch61_abundance_skew: abundance skew for de novo chimera comparisons.
HALTEXEC: halt execution and returns command used for app controller. | [
"Does",
"de",
"novo",
"abundance",
"based",
"chimera",
"checking",
"with",
"usearch61"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L2289-L2347 |
biocore/burrito-fillings | bfillings/usearch.py | usearch61_chimera_check_ref | def usearch61_chimera_check_ref(abundance_fp,
uchime_ref_fp,
reference_seqs_fp,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
uchime_ref_log_fp="uchime_ref.log",
usearch61_minh=0.28,
usearch61_xn=8.0,
usearch61_dn=1.4,
usearch61_mindiffs=3,
usearch61_mindiv=0.8,
threads=1.0,
HALT_EXEC=False):
""" Does reference based chimera checking with usearch61
abundance_fp: input consensus fasta file with abundance information for
each cluster.
uchime_ref_fp: output uchime filepath for reference results
reference_seqs_fp: reference fasta database for chimera checking.
minlen: minimum sequence length for usearch input fasta seqs.
output_dir: output directory
removed_usearch_logs: suppresses creation of log file.
uchime_denovo_log_fp: output filepath for log file.
usearch61_minh: Minimum score (h) to be classified as chimera.
Increasing this value tends to the number of false positives (and also
sensitivity).
usearch61_xn: Weight of "no" vote. Increasing this value tends to the
number of false positives (and also sensitivity).
usearch61_dn: Pseudo-count prior for "no" votes. (n). Increasing this
value tends to the number of false positives (and also sensitivity).
usearch61_mindiffs: Minimum number of diffs in a segment. Increasing this
value tends to reduce the number of false positives while reducing
sensitivity to very low-divergence chimeras.
usearch61_mindiv: Minimum divergence, i.e. 100% - identity between the
query and closest reference database sequence. Expressed as a percentage,
so the default is 0.8%, which allows chimeras that are up to 99.2% similar
to a reference sequence.
threads: Specify number of threads used per core per CPU
HALTEXEC: halt execution and returns command used for app controller.
"""
params = {'--minseqlength': minlen,
'--uchime_ref': abundance_fp,
'--uchimeout': uchime_ref_fp,
'--db': reference_seqs_fp,
'--minh': usearch61_minh,
'--xn': usearch61_xn,
'--dn': usearch61_dn,
'--mindiffs': usearch61_mindiffs,
'--mindiv': usearch61_mindiv,
# Only works in plus according to usearch doc
'--strand': 'plus',
'--threads': threads
}
if not remove_usearch_logs:
params['--log'] = uchime_ref_log_fp
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return uchime_ref_fp, app_result | python | def usearch61_chimera_check_ref(abundance_fp,
uchime_ref_fp,
reference_seqs_fp,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
uchime_ref_log_fp="uchime_ref.log",
usearch61_minh=0.28,
usearch61_xn=8.0,
usearch61_dn=1.4,
usearch61_mindiffs=3,
usearch61_mindiv=0.8,
threads=1.0,
HALT_EXEC=False):
""" Does reference based chimera checking with usearch61
abundance_fp: input consensus fasta file with abundance information for
each cluster.
uchime_ref_fp: output uchime filepath for reference results
reference_seqs_fp: reference fasta database for chimera checking.
minlen: minimum sequence length for usearch input fasta seqs.
output_dir: output directory
removed_usearch_logs: suppresses creation of log file.
uchime_denovo_log_fp: output filepath for log file.
usearch61_minh: Minimum score (h) to be classified as chimera.
Increasing this value tends to the number of false positives (and also
sensitivity).
usearch61_xn: Weight of "no" vote. Increasing this value tends to the
number of false positives (and also sensitivity).
usearch61_dn: Pseudo-count prior for "no" votes. (n). Increasing this
value tends to the number of false positives (and also sensitivity).
usearch61_mindiffs: Minimum number of diffs in a segment. Increasing this
value tends to reduce the number of false positives while reducing
sensitivity to very low-divergence chimeras.
usearch61_mindiv: Minimum divergence, i.e. 100% - identity between the
query and closest reference database sequence. Expressed as a percentage,
so the default is 0.8%, which allows chimeras that are up to 99.2% similar
to a reference sequence.
threads: Specify number of threads used per core per CPU
HALTEXEC: halt execution and returns command used for app controller.
"""
params = {'--minseqlength': minlen,
'--uchime_ref': abundance_fp,
'--uchimeout': uchime_ref_fp,
'--db': reference_seqs_fp,
'--minh': usearch61_minh,
'--xn': usearch61_xn,
'--dn': usearch61_dn,
'--mindiffs': usearch61_mindiffs,
'--mindiv': usearch61_mindiv,
# Only works in plus according to usearch doc
'--strand': 'plus',
'--threads': threads
}
if not remove_usearch_logs:
params['--log'] = uchime_ref_log_fp
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return uchime_ref_fp, app_result | [
"def",
"usearch61_chimera_check_ref",
"(",
"abundance_fp",
",",
"uchime_ref_fp",
",",
"reference_seqs_fp",
",",
"minlen",
"=",
"64",
",",
"output_dir",
"=",
"\".\"",
",",
"remove_usearch_logs",
"=",
"False",
",",
"uchime_ref_log_fp",
"=",
"\"uchime_ref.log\"",
",",
... | Does reference based chimera checking with usearch61
abundance_fp: input consensus fasta file with abundance information for
each cluster.
uchime_ref_fp: output uchime filepath for reference results
reference_seqs_fp: reference fasta database for chimera checking.
minlen: minimum sequence length for usearch input fasta seqs.
output_dir: output directory
removed_usearch_logs: suppresses creation of log file.
uchime_denovo_log_fp: output filepath for log file.
usearch61_minh: Minimum score (h) to be classified as chimera.
Increasing this value tends to the number of false positives (and also
sensitivity).
usearch61_xn: Weight of "no" vote. Increasing this value tends to the
number of false positives (and also sensitivity).
usearch61_dn: Pseudo-count prior for "no" votes. (n). Increasing this
value tends to the number of false positives (and also sensitivity).
usearch61_mindiffs: Minimum number of diffs in a segment. Increasing this
value tends to reduce the number of false positives while reducing
sensitivity to very low-divergence chimeras.
usearch61_mindiv: Minimum divergence, i.e. 100% - identity between the
query and closest reference database sequence. Expressed as a percentage,
so the default is 0.8%, which allows chimeras that are up to 99.2% similar
to a reference sequence.
threads: Specify number of threads used per core per CPU
HALTEXEC: halt execution and returns command used for app controller. | [
"Does",
"reference",
"based",
"chimera",
"checking",
"with",
"usearch61"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L2350-L2413 |
biocore/burrito-fillings | bfillings/usearch.py | parse_dereplicated_uc | def parse_dereplicated_uc(dereplicated_uc_lines):
""" Return dict of seq ID:dereplicated seq IDs from dereplicated .uc lines
dereplicated_uc_lines: list of lines of .uc file from dereplicated seqs from
usearch61 (i.e. open file of abundance sorted .uc data)
"""
dereplicated_clusters = {}
seed_hit_ix = 0
seq_id_ix = 8
seed_id_ix = 9
for line in dereplicated_uc_lines:
if line.startswith("#") or len(line.strip()) == 0:
continue
curr_line = line.strip().split('\t')
if curr_line[seed_hit_ix] == "S":
dereplicated_clusters[curr_line[seq_id_ix]] = []
if curr_line[seed_hit_ix] == "H":
curr_seq_id = curr_line[seq_id_ix]
dereplicated_clusters[curr_line[seed_id_ix]].append(curr_seq_id)
return dereplicated_clusters | python | def parse_dereplicated_uc(dereplicated_uc_lines):
""" Return dict of seq ID:dereplicated seq IDs from dereplicated .uc lines
dereplicated_uc_lines: list of lines of .uc file from dereplicated seqs from
usearch61 (i.e. open file of abundance sorted .uc data)
"""
dereplicated_clusters = {}
seed_hit_ix = 0
seq_id_ix = 8
seed_id_ix = 9
for line in dereplicated_uc_lines:
if line.startswith("#") or len(line.strip()) == 0:
continue
curr_line = line.strip().split('\t')
if curr_line[seed_hit_ix] == "S":
dereplicated_clusters[curr_line[seq_id_ix]] = []
if curr_line[seed_hit_ix] == "H":
curr_seq_id = curr_line[seq_id_ix]
dereplicated_clusters[curr_line[seed_id_ix]].append(curr_seq_id)
return dereplicated_clusters | [
"def",
"parse_dereplicated_uc",
"(",
"dereplicated_uc_lines",
")",
":",
"dereplicated_clusters",
"=",
"{",
"}",
"seed_hit_ix",
"=",
"0",
"seq_id_ix",
"=",
"8",
"seed_id_ix",
"=",
"9",
"for",
"line",
"in",
"dereplicated_uc_lines",
":",
"if",
"line",
".",
"startsw... | Return dict of seq ID:dereplicated seq IDs from dereplicated .uc lines
dereplicated_uc_lines: list of lines of .uc file from dereplicated seqs from
usearch61 (i.e. open file of abundance sorted .uc data) | [
"Return",
"dict",
"of",
"seq",
"ID",
":",
"dereplicated",
"seq",
"IDs",
"from",
"dereplicated",
".",
"uc",
"lines"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L2420-L2443 |
biocore/burrito-fillings | bfillings/usearch.py | parse_usearch61_clusters | def parse_usearch61_clusters(clustered_uc_lines,
otu_prefix='denovo',
ref_clustered=False):
""" Returns dict of cluster ID:seq IDs
clustered_uc_lines: lines from .uc file resulting from de novo clustering
otu_prefix: string added to beginning of OTU ID.
ref_clustered: If True, will attempt to create dict keys for clusters as
they are read from the .uc file, rather than from seed lines.
"""
clusters = {}
failures = []
seed_hit_ix = 0
otu_id_ix = 1
seq_id_ix = 8
ref_id_ix = 9
for line in clustered_uc_lines:
if line.startswith("#") or len(line.strip()) == 0:
continue
curr_line = line.strip().split('\t')
if curr_line[seed_hit_ix] == "S":
# Need to split on semicolons for sequence IDs to handle case of
# abundance sorted data
clusters[otu_prefix + curr_line[otu_id_ix]] =\
[curr_line[seq_id_ix].split(';')[0].split()[0]]
if curr_line[seed_hit_ix] == "H":
curr_id = curr_line[seq_id_ix].split(';')[0].split()[0]
if ref_clustered:
try:
clusters[otu_prefix + curr_line[ref_id_ix]].append(curr_id)
except KeyError:
clusters[otu_prefix + curr_line[ref_id_ix]] = [curr_id]
else:
clusters[otu_prefix +
curr_line[otu_id_ix]].append(curr_id)
if curr_line[seed_hit_ix] == "N":
failures.append(curr_line[seq_id_ix].split(';')[0])
return clusters, failures | python | def parse_usearch61_clusters(clustered_uc_lines,
otu_prefix='denovo',
ref_clustered=False):
""" Returns dict of cluster ID:seq IDs
clustered_uc_lines: lines from .uc file resulting from de novo clustering
otu_prefix: string added to beginning of OTU ID.
ref_clustered: If True, will attempt to create dict keys for clusters as
they are read from the .uc file, rather than from seed lines.
"""
clusters = {}
failures = []
seed_hit_ix = 0
otu_id_ix = 1
seq_id_ix = 8
ref_id_ix = 9
for line in clustered_uc_lines:
if line.startswith("#") or len(line.strip()) == 0:
continue
curr_line = line.strip().split('\t')
if curr_line[seed_hit_ix] == "S":
# Need to split on semicolons for sequence IDs to handle case of
# abundance sorted data
clusters[otu_prefix + curr_line[otu_id_ix]] =\
[curr_line[seq_id_ix].split(';')[0].split()[0]]
if curr_line[seed_hit_ix] == "H":
curr_id = curr_line[seq_id_ix].split(';')[0].split()[0]
if ref_clustered:
try:
clusters[otu_prefix + curr_line[ref_id_ix]].append(curr_id)
except KeyError:
clusters[otu_prefix + curr_line[ref_id_ix]] = [curr_id]
else:
clusters[otu_prefix +
curr_line[otu_id_ix]].append(curr_id)
if curr_line[seed_hit_ix] == "N":
failures.append(curr_line[seq_id_ix].split(';')[0])
return clusters, failures | [
"def",
"parse_usearch61_clusters",
"(",
"clustered_uc_lines",
",",
"otu_prefix",
"=",
"'denovo'",
",",
"ref_clustered",
"=",
"False",
")",
":",
"clusters",
"=",
"{",
"}",
"failures",
"=",
"[",
"]",
"seed_hit_ix",
"=",
"0",
"otu_id_ix",
"=",
"1",
"seq_id_ix",
... | Returns dict of cluster ID:seq IDs
clustered_uc_lines: lines from .uc file resulting from de novo clustering
otu_prefix: string added to beginning of OTU ID.
ref_clustered: If True, will attempt to create dict keys for clusters as
they are read from the .uc file, rather than from seed lines. | [
"Returns",
"dict",
"of",
"cluster",
"ID",
":",
"seq",
"IDs"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L2446-L2487 |
biocore/burrito-fillings | bfillings/usearch.py | merge_clusters_dereplicated_seqs | def merge_clusters_dereplicated_seqs(de_novo_clusters,
dereplicated_clusters):
""" combines de novo clusters and dereplicated seqs to OTU id:seqs dict
de_novo_clusters: dict of OTU ID:clustered sequences
dereplicated_clusters: dict of seq IDs: dereplicated seq IDs
"""
clusters = {}
for curr_denovo_key in de_novo_clusters.keys():
clusters[curr_denovo_key] = de_novo_clusters[curr_denovo_key]
curr_clusters = []
for curr_denovo_id in de_novo_clusters[curr_denovo_key]:
curr_clusters += dereplicated_clusters[curr_denovo_id]
clusters[curr_denovo_key] += curr_clusters
return clusters | python | def merge_clusters_dereplicated_seqs(de_novo_clusters,
dereplicated_clusters):
""" combines de novo clusters and dereplicated seqs to OTU id:seqs dict
de_novo_clusters: dict of OTU ID:clustered sequences
dereplicated_clusters: dict of seq IDs: dereplicated seq IDs
"""
clusters = {}
for curr_denovo_key in de_novo_clusters.keys():
clusters[curr_denovo_key] = de_novo_clusters[curr_denovo_key]
curr_clusters = []
for curr_denovo_id in de_novo_clusters[curr_denovo_key]:
curr_clusters += dereplicated_clusters[curr_denovo_id]
clusters[curr_denovo_key] += curr_clusters
return clusters | [
"def",
"merge_clusters_dereplicated_seqs",
"(",
"de_novo_clusters",
",",
"dereplicated_clusters",
")",
":",
"clusters",
"=",
"{",
"}",
"for",
"curr_denovo_key",
"in",
"de_novo_clusters",
".",
"keys",
"(",
")",
":",
"clusters",
"[",
"curr_denovo_key",
"]",
"=",
"de... | combines de novo clusters and dereplicated seqs to OTU id:seqs dict
de_novo_clusters: dict of OTU ID:clustered sequences
dereplicated_clusters: dict of seq IDs: dereplicated seq IDs | [
"combines",
"de",
"novo",
"clusters",
"and",
"dereplicated",
"seqs",
"to",
"OTU",
"id",
":",
"seqs",
"dict"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L2490-L2507 |
biocore/burrito-fillings | bfillings/usearch.py | merge_failures_dereplicated_seqs | def merge_failures_dereplicated_seqs(failures,
dereplicated_clusters):
""" Appends failures from dereplicated seqs to failures list
failures: list of failures
dereplicated_clusters: dict of seq IDs: dereplicated seq IDs
"""
curr_failures = set(failures)
dereplicated_ids = set(dereplicated_clusters)
for curr_failure in curr_failures:
if curr_failure in dereplicated_ids:
failures += dereplicated_clusters[curr_failure]
return failures | python | def merge_failures_dereplicated_seqs(failures,
dereplicated_clusters):
""" Appends failures from dereplicated seqs to failures list
failures: list of failures
dereplicated_clusters: dict of seq IDs: dereplicated seq IDs
"""
curr_failures = set(failures)
dereplicated_ids = set(dereplicated_clusters)
for curr_failure in curr_failures:
if curr_failure in dereplicated_ids:
failures += dereplicated_clusters[curr_failure]
return failures | [
"def",
"merge_failures_dereplicated_seqs",
"(",
"failures",
",",
"dereplicated_clusters",
")",
":",
"curr_failures",
"=",
"set",
"(",
"failures",
")",
"dereplicated_ids",
"=",
"set",
"(",
"dereplicated_clusters",
")",
"for",
"curr_failure",
"in",
"curr_failures",
":",... | Appends failures from dereplicated seqs to failures list
failures: list of failures
dereplicated_clusters: dict of seq IDs: dereplicated seq IDs | [
"Appends",
"failures",
"from",
"dereplicated",
"seqs",
"to",
"failures",
"list"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L2510-L2525 |
biocore/burrito-fillings | bfillings/usearch.py | parse_usearch61_failures | def parse_usearch61_failures(seq_path,
failures,
output_fasta_fp):
""" Parses seq IDs from failures list, writes to output_fasta_fp
seq_path: filepath of original input fasta file.
failures: list/set of failure seq IDs
output_fasta_fp: path to write parsed sequences
"""
parsed_out = open(output_fasta_fp, "w")
for label, seq in parse_fasta(open(seq_path), "U"):
curr_label = label.split()[0]
if curr_label in failures:
parsed_out.write(">%s\n%s\n" % (label, seq))
parsed_out.close()
return output_fasta_fp | python | def parse_usearch61_failures(seq_path,
failures,
output_fasta_fp):
""" Parses seq IDs from failures list, writes to output_fasta_fp
seq_path: filepath of original input fasta file.
failures: list/set of failure seq IDs
output_fasta_fp: path to write parsed sequences
"""
parsed_out = open(output_fasta_fp, "w")
for label, seq in parse_fasta(open(seq_path), "U"):
curr_label = label.split()[0]
if curr_label in failures:
parsed_out.write(">%s\n%s\n" % (label, seq))
parsed_out.close()
return output_fasta_fp | [
"def",
"parse_usearch61_failures",
"(",
"seq_path",
",",
"failures",
",",
"output_fasta_fp",
")",
":",
"parsed_out",
"=",
"open",
"(",
"output_fasta_fp",
",",
"\"w\"",
")",
"for",
"label",
",",
"seq",
"in",
"parse_fasta",
"(",
"open",
"(",
"seq_path",
")",
"... | Parses seq IDs from failures list, writes to output_fasta_fp
seq_path: filepath of original input fasta file.
failures: list/set of failure seq IDs
output_fasta_fp: path to write parsed sequences | [
"Parses",
"seq",
"IDs",
"from",
"failures",
"list",
"writes",
"to",
"output_fasta_fp"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L2528-L2545 |
dailymuse/oz | oz/error_pages/middleware.py | ErrorPageMiddleware._on_error_page_write_error | def _on_error_page_write_error(self, status_code, **kwargs):
"""Replaces the default Tornado error page with a Django-styled one"""
if oz.settings.get('debug'):
exception_type, exception_value, tback = sys.exc_info()
is_breakpoint = isinstance(exception_value, oz.error_pages.DebugBreakException)
frames = oz.error_pages.get_frames(tback, is_breakpoint)
frames.reverse()
if is_breakpoint:
exception_type = 'Debug breakpoint'
exception_value = ''
self.render(oz.settings["error_pages_template"],
exception_type=exception_type,
exception_value=exception_value,
frames=frames,
request_input=self.request.body,
request_cookies=self.cookies,
request_headers=self.request.headers,
request_path=self.request.uri,
request_method=self.request.method,
response_output="".join(self._write_buffer),
response_headers=self._headers,
prettify_object=oz.error_pages.prettify_object,
)
return oz.break_trigger | python | def _on_error_page_write_error(self, status_code, **kwargs):
"""Replaces the default Tornado error page with a Django-styled one"""
if oz.settings.get('debug'):
exception_type, exception_value, tback = sys.exc_info()
is_breakpoint = isinstance(exception_value, oz.error_pages.DebugBreakException)
frames = oz.error_pages.get_frames(tback, is_breakpoint)
frames.reverse()
if is_breakpoint:
exception_type = 'Debug breakpoint'
exception_value = ''
self.render(oz.settings["error_pages_template"],
exception_type=exception_type,
exception_value=exception_value,
frames=frames,
request_input=self.request.body,
request_cookies=self.cookies,
request_headers=self.request.headers,
request_path=self.request.uri,
request_method=self.request.method,
response_output="".join(self._write_buffer),
response_headers=self._headers,
prettify_object=oz.error_pages.prettify_object,
)
return oz.break_trigger | [
"def",
"_on_error_page_write_error",
"(",
"self",
",",
"status_code",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"oz",
".",
"settings",
".",
"get",
"(",
"'debug'",
")",
":",
"exception_type",
",",
"exception_value",
",",
"tback",
"=",
"sys",
".",
"exc_info",... | Replaces the default Tornado error page with a Django-styled one | [
"Replaces",
"the",
"default",
"Tornado",
"error",
"page",
"with",
"a",
"Django",
"-",
"styled",
"one"
] | train | https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/error_pages/middleware.py#L16-L44 |
dailymuse/oz | oz/blinks/middleware.py | BlinkMiddleware.get_blink_cookie | def get_blink_cookie(self, name):
"""Gets a blink cookie value"""
value = self.get_cookie(name)
if value != None:
self.clear_cookie(name)
return escape.url_unescape(value) | python | def get_blink_cookie(self, name):
"""Gets a blink cookie value"""
value = self.get_cookie(name)
if value != None:
self.clear_cookie(name)
return escape.url_unescape(value) | [
"def",
"get_blink_cookie",
"(",
"self",
",",
"name",
")",
":",
"value",
"=",
"self",
".",
"get_cookie",
"(",
"name",
")",
"if",
"value",
"!=",
"None",
":",
"self",
".",
"clear_cookie",
"(",
"name",
")",
"return",
"escape",
".",
"url_unescape",
"(",
"va... | Gets a blink cookie value | [
"Gets",
"a",
"blink",
"cookie",
"value"
] | train | https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/blinks/middleware.py#L14-L20 |
dailymuse/oz | oz/blinks/middleware.py | BlinkMiddleware.set_blink | def set_blink(self, message, type="info"):
"""
Sets the blink, a one-time transactional message that is shown on the
next page load
"""
self.set_cookie("blink_message", escape.url_escape(message), httponly=True)
self.set_cookie("blink_type", escape.url_escape(type), httponly=True) | python | def set_blink(self, message, type="info"):
"""
Sets the blink, a one-time transactional message that is shown on the
next page load
"""
self.set_cookie("blink_message", escape.url_escape(message), httponly=True)
self.set_cookie("blink_type", escape.url_escape(type), httponly=True) | [
"def",
"set_blink",
"(",
"self",
",",
"message",
",",
"type",
"=",
"\"info\"",
")",
":",
"self",
".",
"set_cookie",
"(",
"\"blink_message\"",
",",
"escape",
".",
"url_escape",
"(",
"message",
")",
",",
"httponly",
"=",
"True",
")",
"self",
".",
"set_cook... | Sets the blink, a one-time transactional message that is shown on the
next page load | [
"Sets",
"the",
"blink",
"a",
"one",
"-",
"time",
"transactional",
"message",
"that",
"is",
"shown",
"on",
"the",
"next",
"page",
"load"
] | train | https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/blinks/middleware.py#L22-L28 |
biocore/burrito-fillings | bfillings/cd_hit.py | cdhit_clusters_from_seqs | def cdhit_clusters_from_seqs(seqs, moltype=DNA, params=None):
"""Returns the CD-HIT clusters given seqs
seqs : dict like collection of sequences
moltype : cogent.core.moltype object
params : cd-hit parameters
NOTE: This method will call CD_HIT if moltype is PROTIEN,
CD_HIT_EST if moltype is RNA/DNA, and raise if any other
moltype is passed.
"""
# keys are not remapped. Tested against seq_ids of 100char length
seqs = SequenceCollection(seqs, MolType=moltype)
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seqs.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
# setup params and make sure the output argument is set
if params is None:
params = {}
if '-o' not in params:
_, params['-o'] = mkstemp()
# call the correct version of cd-hit base on moltype
working_dir = mkdtemp()
if moltype is PROTEIN:
app = CD_HIT(WorkingDir=working_dir, params=params)
elif moltype is RNA:
app = CD_HIT_EST(WorkingDir=working_dir, params=params)
elif moltype is DNA:
app = CD_HIT_EST(WorkingDir=working_dir, params=params)
else:
raise ValueError, "Moltype must be either PROTEIN, RNA, or DNA"
# grab result
res = app(int_map.toFasta())
clusters = parse_cdhit_clstr_file(res['CLSTR'])
remapped_clusters = []
for c in clusters:
curr = [int_keys[i] for i in c]
remapped_clusters.append(curr)
# perform cleanup
res.cleanUp()
shutil.rmtree(working_dir)
remove(params['-o'] + '.bak.clstr')
return remapped_clusters | python | def cdhit_clusters_from_seqs(seqs, moltype=DNA, params=None):
"""Returns the CD-HIT clusters given seqs
seqs : dict like collection of sequences
moltype : cogent.core.moltype object
params : cd-hit parameters
NOTE: This method will call CD_HIT if moltype is PROTIEN,
CD_HIT_EST if moltype is RNA/DNA, and raise if any other
moltype is passed.
"""
# keys are not remapped. Tested against seq_ids of 100char length
seqs = SequenceCollection(seqs, MolType=moltype)
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seqs.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
# setup params and make sure the output argument is set
if params is None:
params = {}
if '-o' not in params:
_, params['-o'] = mkstemp()
# call the correct version of cd-hit base on moltype
working_dir = mkdtemp()
if moltype is PROTEIN:
app = CD_HIT(WorkingDir=working_dir, params=params)
elif moltype is RNA:
app = CD_HIT_EST(WorkingDir=working_dir, params=params)
elif moltype is DNA:
app = CD_HIT_EST(WorkingDir=working_dir, params=params)
else:
raise ValueError, "Moltype must be either PROTEIN, RNA, or DNA"
# grab result
res = app(int_map.toFasta())
clusters = parse_cdhit_clstr_file(res['CLSTR'])
remapped_clusters = []
for c in clusters:
curr = [int_keys[i] for i in c]
remapped_clusters.append(curr)
# perform cleanup
res.cleanUp()
shutil.rmtree(working_dir)
remove(params['-o'] + '.bak.clstr')
return remapped_clusters | [
"def",
"cdhit_clusters_from_seqs",
"(",
"seqs",
",",
"moltype",
"=",
"DNA",
",",
"params",
"=",
"None",
")",
":",
"# keys are not remapped. Tested against seq_ids of 100char length",
"seqs",
"=",
"SequenceCollection",
"(",
"seqs",
",",
"MolType",
"=",
"moltype",
")",
... | Returns the CD-HIT clusters given seqs
seqs : dict like collection of sequences
moltype : cogent.core.moltype object
params : cd-hit parameters
NOTE: This method will call CD_HIT if moltype is PROTIEN,
CD_HIT_EST if moltype is RNA/DNA, and raise if any other
moltype is passed. | [
"Returns",
"the",
"CD",
"-",
"HIT",
"clusters",
"given",
"seqs"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/cd_hit.py#L225-L274 |
biocore/burrito-fillings | bfillings/cd_hit.py | cdhit_from_seqs | def cdhit_from_seqs(seqs, moltype, params=None):
"""Returns the CD-HIT results given seqs
seqs : dict like collection of sequences
moltype : cogent.core.moltype object
params : cd-hit parameters
NOTE: This method will call CD_HIT if moltype is PROTIEN,
CD_HIT_EST if moltype is RNA/DNA, and raise if any other
moltype is passed.
"""
# keys are not remapped. Tested against seq_ids of 100char length
seqs = SequenceCollection(seqs, MolType=moltype)
# setup params and make sure the output argument is set
if params is None:
params = {}
if '-o' not in params:
_, params['-o'] = mkstemp()
# call the correct version of cd-hit base on moltype
working_dir = mkdtemp()
if moltype is PROTEIN:
app = CD_HIT(WorkingDir=working_dir, params=params)
elif moltype is RNA:
app = CD_HIT_EST(WorkingDir=working_dir, params=params)
elif moltype is DNA:
app = CD_HIT_EST(WorkingDir=working_dir, params=params)
else:
raise ValueError, "Moltype must be either PROTEIN, RNA, or DNA"
# grab result
res = app(seqs.toFasta())
new_seqs = dict(parse_fasta(res['FASTA']))
# perform cleanup
res.cleanUp()
shutil.rmtree(working_dir)
remove(params['-o'] + '.bak.clstr')
return SequenceCollection(new_seqs, MolType=moltype) | python | def cdhit_from_seqs(seqs, moltype, params=None):
"""Returns the CD-HIT results given seqs
seqs : dict like collection of sequences
moltype : cogent.core.moltype object
params : cd-hit parameters
NOTE: This method will call CD_HIT if moltype is PROTIEN,
CD_HIT_EST if moltype is RNA/DNA, and raise if any other
moltype is passed.
"""
# keys are not remapped. Tested against seq_ids of 100char length
seqs = SequenceCollection(seqs, MolType=moltype)
# setup params and make sure the output argument is set
if params is None:
params = {}
if '-o' not in params:
_, params['-o'] = mkstemp()
# call the correct version of cd-hit base on moltype
working_dir = mkdtemp()
if moltype is PROTEIN:
app = CD_HIT(WorkingDir=working_dir, params=params)
elif moltype is RNA:
app = CD_HIT_EST(WorkingDir=working_dir, params=params)
elif moltype is DNA:
app = CD_HIT_EST(WorkingDir=working_dir, params=params)
else:
raise ValueError, "Moltype must be either PROTEIN, RNA, or DNA"
# grab result
res = app(seqs.toFasta())
new_seqs = dict(parse_fasta(res['FASTA']))
# perform cleanup
res.cleanUp()
shutil.rmtree(working_dir)
remove(params['-o'] + '.bak.clstr')
return SequenceCollection(new_seqs, MolType=moltype) | [
"def",
"cdhit_from_seqs",
"(",
"seqs",
",",
"moltype",
",",
"params",
"=",
"None",
")",
":",
"# keys are not remapped. Tested against seq_ids of 100char length",
"seqs",
"=",
"SequenceCollection",
"(",
"seqs",
",",
"MolType",
"=",
"moltype",
")",
"# setup params and mak... | Returns the CD-HIT results given seqs
seqs : dict like collection of sequences
moltype : cogent.core.moltype object
params : cd-hit parameters
NOTE: This method will call CD_HIT if moltype is PROTIEN,
CD_HIT_EST if moltype is RNA/DNA, and raise if any other
moltype is passed. | [
"Returns",
"the",
"CD",
"-",
"HIT",
"results",
"given",
"seqs"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/cd_hit.py#L276-L316 |
biocore/burrito-fillings | bfillings/cd_hit.py | parse_cdhit_clstr_file | def parse_cdhit_clstr_file(lines):
"""Returns a list of list of sequence ids representing clusters"""
clusters = []
curr_cluster = []
for l in lines:
if l.startswith('>Cluster'):
if not curr_cluster:
continue
clusters.append(curr_cluster)
curr_cluster = []
else:
curr_cluster.append(clean_cluster_seq_id(l.split()[2]))
if curr_cluster:
clusters.append(curr_cluster)
return clusters | python | def parse_cdhit_clstr_file(lines):
"""Returns a list of list of sequence ids representing clusters"""
clusters = []
curr_cluster = []
for l in lines:
if l.startswith('>Cluster'):
if not curr_cluster:
continue
clusters.append(curr_cluster)
curr_cluster = []
else:
curr_cluster.append(clean_cluster_seq_id(l.split()[2]))
if curr_cluster:
clusters.append(curr_cluster)
return clusters | [
"def",
"parse_cdhit_clstr_file",
"(",
"lines",
")",
":",
"clusters",
"=",
"[",
"]",
"curr_cluster",
"=",
"[",
"]",
"for",
"l",
"in",
"lines",
":",
"if",
"l",
".",
"startswith",
"(",
"'>Cluster'",
")",
":",
"if",
"not",
"curr_cluster",
":",
"continue",
... | Returns a list of list of sequence ids representing clusters | [
"Returns",
"a",
"list",
"of",
"list",
"of",
"sequence",
"ids",
"representing",
"clusters"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/cd_hit.py#L326-L343 |
biocore/burrito-fillings | bfillings/cd_hit.py | CD_HIT._input_as_multiline_string | def _input_as_multiline_string(self, data):
"""Writes data to tempfile and sets -i parameter
data -- list of lines
"""
if data:
self.Parameters['-i']\
.on(super(CD_HIT,self)._input_as_multiline_string(data))
return '' | python | def _input_as_multiline_string(self, data):
"""Writes data to tempfile and sets -i parameter
data -- list of lines
"""
if data:
self.Parameters['-i']\
.on(super(CD_HIT,self)._input_as_multiline_string(data))
return '' | [
"def",
"_input_as_multiline_string",
"(",
"self",
",",
"data",
")",
":",
"if",
"data",
":",
"self",
".",
"Parameters",
"[",
"'-i'",
"]",
".",
"on",
"(",
"super",
"(",
"CD_HIT",
",",
"self",
")",
".",
"_input_as_multiline_string",
"(",
"data",
")",
")",
... | Writes data to tempfile and sets -i parameter
data -- list of lines | [
"Writes",
"data",
"to",
"tempfile",
"and",
"sets",
"-",
"i",
"parameter"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/cd_hit.py#L151-L159 |
biocore/burrito-fillings | bfillings/cd_hit.py | CD_HIT._input_as_lines | def _input_as_lines(self, data):
"""Writes data to tempfile and sets -i parameter
data -- list of lines, ready to be written to file
"""
if data:
self.Parameters['-i']\
.on(super(CD_HIT,self)._input_as_lines(data))
return '' | python | def _input_as_lines(self, data):
"""Writes data to tempfile and sets -i parameter
data -- list of lines, ready to be written to file
"""
if data:
self.Parameters['-i']\
.on(super(CD_HIT,self)._input_as_lines(data))
return '' | [
"def",
"_input_as_lines",
"(",
"self",
",",
"data",
")",
":",
"if",
"data",
":",
"self",
".",
"Parameters",
"[",
"'-i'",
"]",
".",
"on",
"(",
"super",
"(",
"CD_HIT",
",",
"self",
")",
".",
"_input_as_lines",
"(",
"data",
")",
")",
"return",
"''"
] | Writes data to tempfile and sets -i parameter
data -- list of lines, ready to be written to file | [
"Writes",
"data",
"to",
"tempfile",
"and",
"sets",
"-",
"i",
"parameter"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/cd_hit.py#L161-L169 |
biocore/burrito-fillings | bfillings/cd_hit.py | CD_HIT._get_clstr_outfile | def _get_clstr_outfile(self):
"""Returns the absolute path to the clstr outfile"""
if self.Parameters['-o'].isOn():
return ''.join([self.Parameters['-o'].Value, '.clstr'])
else:
raise ValueError, "No output file specified" | python | def _get_clstr_outfile(self):
"""Returns the absolute path to the clstr outfile"""
if self.Parameters['-o'].isOn():
return ''.join([self.Parameters['-o'].Value, '.clstr'])
else:
raise ValueError, "No output file specified" | [
"def",
"_get_clstr_outfile",
"(",
"self",
")",
":",
"if",
"self",
".",
"Parameters",
"[",
"'-o'",
"]",
".",
"isOn",
"(",
")",
":",
"return",
"''",
".",
"join",
"(",
"[",
"self",
".",
"Parameters",
"[",
"'-o'",
"]",
".",
"Value",
",",
"'.clstr'",
"]... | Returns the absolute path to the clstr outfile | [
"Returns",
"the",
"absolute",
"path",
"to",
"the",
"clstr",
"outfile"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/cd_hit.py#L196-L201 |
biocore/burrito-fillings | bfillings/cd_hit.py | CD_HIT._get_result_paths | def _get_result_paths(self, data):
"""Return dict of {key: ResultPath}"""
result = {}
result['FASTA'] = ResultPath(Path=self._get_seqs_outfile())
result['CLSTR'] = ResultPath(Path=self._get_clstr_outfile())
return result | python | def _get_result_paths(self, data):
"""Return dict of {key: ResultPath}"""
result = {}
result['FASTA'] = ResultPath(Path=self._get_seqs_outfile())
result['CLSTR'] = ResultPath(Path=self._get_clstr_outfile())
return result | [
"def",
"_get_result_paths",
"(",
"self",
",",
"data",
")",
":",
"result",
"=",
"{",
"}",
"result",
"[",
"'FASTA'",
"]",
"=",
"ResultPath",
"(",
"Path",
"=",
"self",
".",
"_get_seqs_outfile",
"(",
")",
")",
"result",
"[",
"'CLSTR'",
"]",
"=",
"ResultPat... | Return dict of {key: ResultPath} | [
"Return",
"dict",
"of",
"{",
"key",
":",
"ResultPath",
"}"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/cd_hit.py#L203-L208 |
halfak/deltas | deltas/segmenters/paragraphs_sentences_and_whitespace.py | ParagraphsSentencesAndWhitespace.segment | def segment(self, tokens):
"""
Segments a sequence of tokens into a sequence of segments.
:Parameters:
tokens : `list` ( :class:`~deltas.Token` )
"""
look_ahead = LookAhead(tokens)
segments = Segment()
while not look_ahead.empty():
if look_ahead.peek().type not in self.whitespace: # Paragraph!
paragraph = MatchableSegment(look_ahead.i)
while not look_ahead.empty() and \
look_ahead.peek().type not in self.paragraph_end:
if look_ahead.peek().type == "tab_open": # Table
tab_depth = 1
sentence = MatchableSegment(
look_ahead.i, [next(look_ahead)])
while not look_ahead.empty() and tab_depth > 0:
tab_depth += look_ahead.peek().type == "tab_open"
tab_depth -= look_ahead.peek().type == "tab_close"
sentence.append(next(look_ahead))
paragraph.append(sentence)
elif look_ahead.peek().type not in self.whitespace: # Sentence!
sentence = MatchableSegment(
look_ahead.i, [next(look_ahead)])
sub_depth = int(sentence[0].type in SUB_OPEN)
while not look_ahead.empty():
sub_depth += look_ahead.peek().type in SUB_OPEN
sub_depth -= look_ahead.peek().type in SUB_CLOSE
sentence.append(next(look_ahead))
if sentence[-1].type in self.sentence_end and sub_depth <= 0:
non_whitespace = sum(s.type not in self.whitespace for s in sentence)
if non_whitespace >= self.min_sentence:
break
paragraph.append(sentence)
else: # look_ahead.peek().type in self.whitespace
whitespace = Segment(look_ahead.i, [next(look_ahead)])
paragraph.append(whitespace)
segments.append(paragraph)
else: # look_ahead.peek().type in self.whitespace
whitespace = Segment(look_ahead.i, [next(look_ahead)])
segments.append(whitespace)
return segments | python | def segment(self, tokens):
"""
Segments a sequence of tokens into a sequence of segments.
:Parameters:
tokens : `list` ( :class:`~deltas.Token` )
"""
look_ahead = LookAhead(tokens)
segments = Segment()
while not look_ahead.empty():
if look_ahead.peek().type not in self.whitespace: # Paragraph!
paragraph = MatchableSegment(look_ahead.i)
while not look_ahead.empty() and \
look_ahead.peek().type not in self.paragraph_end:
if look_ahead.peek().type == "tab_open": # Table
tab_depth = 1
sentence = MatchableSegment(
look_ahead.i, [next(look_ahead)])
while not look_ahead.empty() and tab_depth > 0:
tab_depth += look_ahead.peek().type == "tab_open"
tab_depth -= look_ahead.peek().type == "tab_close"
sentence.append(next(look_ahead))
paragraph.append(sentence)
elif look_ahead.peek().type not in self.whitespace: # Sentence!
sentence = MatchableSegment(
look_ahead.i, [next(look_ahead)])
sub_depth = int(sentence[0].type in SUB_OPEN)
while not look_ahead.empty():
sub_depth += look_ahead.peek().type in SUB_OPEN
sub_depth -= look_ahead.peek().type in SUB_CLOSE
sentence.append(next(look_ahead))
if sentence[-1].type in self.sentence_end and sub_depth <= 0:
non_whitespace = sum(s.type not in self.whitespace for s in sentence)
if non_whitespace >= self.min_sentence:
break
paragraph.append(sentence)
else: # look_ahead.peek().type in self.whitespace
whitespace = Segment(look_ahead.i, [next(look_ahead)])
paragraph.append(whitespace)
segments.append(paragraph)
else: # look_ahead.peek().type in self.whitespace
whitespace = Segment(look_ahead.i, [next(look_ahead)])
segments.append(whitespace)
return segments | [
"def",
"segment",
"(",
"self",
",",
"tokens",
")",
":",
"look_ahead",
"=",
"LookAhead",
"(",
"tokens",
")",
"segments",
"=",
"Segment",
"(",
")",
"while",
"not",
"look_ahead",
".",
"empty",
"(",
")",
":",
"if",
"look_ahead",
".",
"peek",
"(",
")",
".... | Segments a sequence of tokens into a sequence of segments.
:Parameters:
tokens : `list` ( :class:`~deltas.Token` ) | [
"Segments",
"a",
"sequence",
"of",
"tokens",
"into",
"a",
"sequence",
"of",
"segments",
"."
] | train | https://github.com/halfak/deltas/blob/4173f4215b93426a877f4bb4a7a3547834e60ac3/deltas/segmenters/paragraphs_sentences_and_whitespace.py#L63-L120 |
matrix-org/pushbaby | pushbaby/__init__.py | PushBaby.send | def send(self, payload, token, expiration=None, priority=None, identifier=None):
"""
Attempts to send a push message. On network failures, progagates the exception.
It is advised to make all text in the payload dictionary unicode objects and not
mix unicode objects and str objects. If str objects are used, they must be
in UTF-8 encoding.
Args:
payload (dict): The dictionary payload of the push to send
token (str): token to send the push to (raw, unencoded bytes)
expiration (int, seconds): When the message becomes irrelevant (time in seconds, as from time.time())
priority (int): Integer priority for the message as per Apple's documentation
identifier (any): optional identifier that will be returned if the push fails.
This is opaque to the library and not limited to 4 bytes.
Throws:
BodyTooLongException: If the payload body is too long and cannot be truncated to fit
"""
# we only use one conn at a time currently but we may as well do this...
created_conn = False
while not created_conn:
if len(self.conns) == 0:
self.conns.append(PushConnection(self, self.address, self.certfile, self.keyfile))
created_conn = True
conn = random.choice(self.conns)
try:
conn.send(payload, token, expiration=expiration, priority=priority, identifier=identifier)
return
except:
logger.info("Connection died: removing")
self.conns.remove(conn)
raise SendFailedException() | python | def send(self, payload, token, expiration=None, priority=None, identifier=None):
"""
Attempts to send a push message. On network failures, progagates the exception.
It is advised to make all text in the payload dictionary unicode objects and not
mix unicode objects and str objects. If str objects are used, they must be
in UTF-8 encoding.
Args:
payload (dict): The dictionary payload of the push to send
token (str): token to send the push to (raw, unencoded bytes)
expiration (int, seconds): When the message becomes irrelevant (time in seconds, as from time.time())
priority (int): Integer priority for the message as per Apple's documentation
identifier (any): optional identifier that will be returned if the push fails.
This is opaque to the library and not limited to 4 bytes.
Throws:
BodyTooLongException: If the payload body is too long and cannot be truncated to fit
"""
# we only use one conn at a time currently but we may as well do this...
created_conn = False
while not created_conn:
if len(self.conns) == 0:
self.conns.append(PushConnection(self, self.address, self.certfile, self.keyfile))
created_conn = True
conn = random.choice(self.conns)
try:
conn.send(payload, token, expiration=expiration, priority=priority, identifier=identifier)
return
except:
logger.info("Connection died: removing")
self.conns.remove(conn)
raise SendFailedException() | [
"def",
"send",
"(",
"self",
",",
"payload",
",",
"token",
",",
"expiration",
"=",
"None",
",",
"priority",
"=",
"None",
",",
"identifier",
"=",
"None",
")",
":",
"# we only use one conn at a time currently but we may as well do this...",
"created_conn",
"=",
"False"... | Attempts to send a push message. On network failures, progagates the exception.
It is advised to make all text in the payload dictionary unicode objects and not
mix unicode objects and str objects. If str objects are used, they must be
in UTF-8 encoding.
Args:
payload (dict): The dictionary payload of the push to send
token (str): token to send the push to (raw, unencoded bytes)
expiration (int, seconds): When the message becomes irrelevant (time in seconds, as from time.time())
priority (int): Integer priority for the message as per Apple's documentation
identifier (any): optional identifier that will be returned if the push fails.
This is opaque to the library and not limited to 4 bytes.
Throws:
BodyTooLongException: If the payload body is too long and cannot be truncated to fit | [
"Attempts",
"to",
"send",
"a",
"push",
"message",
".",
"On",
"network",
"failures",
"progagates",
"the",
"exception",
".",
"It",
"is",
"advised",
"to",
"make",
"all",
"text",
"in",
"the",
"payload",
"dictionary",
"unicode",
"objects",
"and",
"not",
"mix",
... | train | https://github.com/matrix-org/pushbaby/blob/d3265e32dba12cb25474cb9383481def4a8b3bbe/pushbaby/__init__.py#L81-L111 |
matrix-org/pushbaby | pushbaby/__init__.py | PushBaby.get_all_feedback | def get_all_feedback(self):
"""
Connects to the feedback service and returns any feedback that is sent
as a list of FeedbackItem objects.
Blocks the current greenlet until all feedback is returned.
If a network error occurs before any feedback is received, it is
propagated to the caller. Otherwise, it is ignored and the feedback
that had arrived is returned.
"""
if not self.fbaddress:
raise Exception("Attempted to fetch feedback but no feedback_address supplied")
fbconn = FeedbackConnection(self, self.fbaddress, self.certfile, self.keyfile)
return fbconn.get_all() | python | def get_all_feedback(self):
"""
Connects to the feedback service and returns any feedback that is sent
as a list of FeedbackItem objects.
Blocks the current greenlet until all feedback is returned.
If a network error occurs before any feedback is received, it is
propagated to the caller. Otherwise, it is ignored and the feedback
that had arrived is returned.
"""
if not self.fbaddress:
raise Exception("Attempted to fetch feedback but no feedback_address supplied")
fbconn = FeedbackConnection(self, self.fbaddress, self.certfile, self.keyfile)
return fbconn.get_all() | [
"def",
"get_all_feedback",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"fbaddress",
":",
"raise",
"Exception",
"(",
"\"Attempted to fetch feedback but no feedback_address supplied\"",
")",
"fbconn",
"=",
"FeedbackConnection",
"(",
"self",
",",
"self",
".",
"fba... | Connects to the feedback service and returns any feedback that is sent
as a list of FeedbackItem objects.
Blocks the current greenlet until all feedback is returned.
If a network error occurs before any feedback is received, it is
propagated to the caller. Otherwise, it is ignored and the feedback
that had arrived is returned. | [
"Connects",
"to",
"the",
"feedback",
"service",
"and",
"returns",
"any",
"feedback",
"that",
"is",
"sent",
"as",
"a",
"list",
"of",
"FeedbackItem",
"objects",
"."
] | train | https://github.com/matrix-org/pushbaby/blob/d3265e32dba12cb25474cb9383481def4a8b3bbe/pushbaby/__init__.py#L125-L140 |
andrewgross/pyrelic | pyrelic/client.py | Client._parse_xml | def _parse_xml(self, response):
"""
Run our XML parser (lxml in this case) over our response text. Lxml
doesn't enjoy having xml/encoding information in the header so we strip
that out if necessary. We return a parsed XML object that can be
used by the calling API method and massaged into a more appropriate
format.
"""
if response.startswith('\n'):
response = response[1:]
tree = etree.fromstring(response)
return tree | python | def _parse_xml(self, response):
"""
Run our XML parser (lxml in this case) over our response text. Lxml
doesn't enjoy having xml/encoding information in the header so we strip
that out if necessary. We return a parsed XML object that can be
used by the calling API method and massaged into a more appropriate
format.
"""
if response.startswith('\n'):
response = response[1:]
tree = etree.fromstring(response)
return tree | [
"def",
"_parse_xml",
"(",
"self",
",",
"response",
")",
":",
"if",
"response",
".",
"startswith",
"(",
"'\\n'",
")",
":",
"response",
"=",
"response",
"[",
"1",
":",
"]",
"tree",
"=",
"etree",
".",
"fromstring",
"(",
"response",
")",
"return",
"tree"
] | Run our XML parser (lxml in this case) over our response text. Lxml
doesn't enjoy having xml/encoding information in the header so we strip
that out if necessary. We return a parsed XML object that can be
used by the calling API method and massaged into a more appropriate
format. | [
"Run",
"our",
"XML",
"parser",
"(",
"lxml",
"in",
"this",
"case",
")",
"over",
"our",
"response",
"text",
".",
"Lxml",
"doesn",
"t",
"enjoy",
"having",
"xml",
"/",
"encoding",
"information",
"in",
"the",
"header",
"so",
"we",
"strip",
"that",
"out",
"i... | train | https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/client.py#L45-L56 |
andrewgross/pyrelic | pyrelic/client.py | Client._handle_api_error | def _handle_api_error(self, error):
"""
New Relic cheerfully provides expected API error codes depending on your
API call deficiencies so we convert these to exceptions and raise them
for the user to handle as they see fit.
"""
status_code = error.response.status_code
message = error.message
if 403 == status_code:
raise NewRelicInvalidApiKeyException(message)
elif 404 == status_code:
raise NewRelicUnknownApplicationException(message)
elif 422 == status_code:
raise NewRelicInvalidParameterException(message)
else:
raise NewRelicApiException(message) | python | def _handle_api_error(self, error):
"""
New Relic cheerfully provides expected API error codes depending on your
API call deficiencies so we convert these to exceptions and raise them
for the user to handle as they see fit.
"""
status_code = error.response.status_code
message = error.message
if 403 == status_code:
raise NewRelicInvalidApiKeyException(message)
elif 404 == status_code:
raise NewRelicUnknownApplicationException(message)
elif 422 == status_code:
raise NewRelicInvalidParameterException(message)
else:
raise NewRelicApiException(message) | [
"def",
"_handle_api_error",
"(",
"self",
",",
"error",
")",
":",
"status_code",
"=",
"error",
".",
"response",
".",
"status_code",
"message",
"=",
"error",
".",
"message",
"if",
"403",
"==",
"status_code",
":",
"raise",
"NewRelicInvalidApiKeyException",
"(",
"... | New Relic cheerfully provides expected API error codes depending on your
API call deficiencies so we convert these to exceptions and raise them
for the user to handle as they see fit. | [
"New",
"Relic",
"cheerfully",
"provides",
"expected",
"API",
"error",
"codes",
"depending",
"on",
"your",
"API",
"call",
"deficiencies",
"so",
"we",
"convert",
"these",
"to",
"exceptions",
"and",
"raise",
"them",
"for",
"the",
"user",
"to",
"handle",
"as",
"... | train | https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/client.py#L58-L74 |
andrewgross/pyrelic | pyrelic/client.py | Client._api_rate_limit_exceeded | def _api_rate_limit_exceeded(self, api_call, window=60):
"""
We want to keep track of the last time we sent a request to the NewRelic
API, but only for certain operations. This method will dynamically add
an attribute to the Client class with a unix timestamp with the name of
the API api_call we make so that we can check it later. We return the
amount of time until we can perform another API call so that appropriate
waiting can be implemented.
"""
current = datetime.datetime.now()
try:
previous = getattr(self, api_call.__name__ + "_window")
# Force the calling of our property so we can
# handle not having set it yet.
previous.__str__
except AttributeError:
now = datetime.datetime.now()
outside_window = datetime.timedelta(seconds=window+1)
previous = now - outside_window
if current - previous > datetime.timedelta(seconds=window):
setattr(self, api_call.__name__ + "_window", current)
else:
timeout = window - (current - previous).seconds
raise NewRelicApiRateLimitException(str(timeout)) | python | def _api_rate_limit_exceeded(self, api_call, window=60):
"""
We want to keep track of the last time we sent a request to the NewRelic
API, but only for certain operations. This method will dynamically add
an attribute to the Client class with a unix timestamp with the name of
the API api_call we make so that we can check it later. We return the
amount of time until we can perform another API call so that appropriate
waiting can be implemented.
"""
current = datetime.datetime.now()
try:
previous = getattr(self, api_call.__name__ + "_window")
# Force the calling of our property so we can
# handle not having set it yet.
previous.__str__
except AttributeError:
now = datetime.datetime.now()
outside_window = datetime.timedelta(seconds=window+1)
previous = now - outside_window
if current - previous > datetime.timedelta(seconds=window):
setattr(self, api_call.__name__ + "_window", current)
else:
timeout = window - (current - previous).seconds
raise NewRelicApiRateLimitException(str(timeout)) | [
"def",
"_api_rate_limit_exceeded",
"(",
"self",
",",
"api_call",
",",
"window",
"=",
"60",
")",
":",
"current",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"try",
":",
"previous",
"=",
"getattr",
"(",
"self",
",",
"api_call",
".",
"__name__",... | We want to keep track of the last time we sent a request to the NewRelic
API, but only for certain operations. This method will dynamically add
an attribute to the Client class with a unix timestamp with the name of
the API api_call we make so that we can check it later. We return the
amount of time until we can perform another API call so that appropriate
waiting can be implemented. | [
"We",
"want",
"to",
"keep",
"track",
"of",
"the",
"last",
"time",
"we",
"sent",
"a",
"request",
"to",
"the",
"NewRelic",
"API",
"but",
"only",
"for",
"certain",
"operations",
".",
"This",
"method",
"will",
"dynamically",
"add",
"an",
"attribute",
"to",
"... | train | https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/client.py#L76-L100 |
andrewgross/pyrelic | pyrelic/client.py | Client.view_applications | def view_applications(self):
"""
Requires: account ID (taken from Client object)
Returns: a list of Application objects
Endpoint: rpm.newrelic.com
Errors: 403 Invalid API Key
Method: Get
"""
endpoint = "https://rpm.newrelic.com"
uri = "{endpoint}/accounts/{id}/applications.xml".format(endpoint=endpoint, id=self.account_id)
response = self._make_get_request(uri)
applications = []
for application in response.findall('.//application'):
application_properties = {}
for field in application:
application_properties[field.tag] = field.text
applications.append(Application(application_properties))
return applications | python | def view_applications(self):
"""
Requires: account ID (taken from Client object)
Returns: a list of Application objects
Endpoint: rpm.newrelic.com
Errors: 403 Invalid API Key
Method: Get
"""
endpoint = "https://rpm.newrelic.com"
uri = "{endpoint}/accounts/{id}/applications.xml".format(endpoint=endpoint, id=self.account_id)
response = self._make_get_request(uri)
applications = []
for application in response.findall('.//application'):
application_properties = {}
for field in application:
application_properties[field.tag] = field.text
applications.append(Application(application_properties))
return applications | [
"def",
"view_applications",
"(",
"self",
")",
":",
"endpoint",
"=",
"\"https://rpm.newrelic.com\"",
"uri",
"=",
"\"{endpoint}/accounts/{id}/applications.xml\"",
".",
"format",
"(",
"endpoint",
"=",
"endpoint",
",",
"id",
"=",
"self",
".",
"account_id",
")",
"respons... | Requires: account ID (taken from Client object)
Returns: a list of Application objects
Endpoint: rpm.newrelic.com
Errors: 403 Invalid API Key
Method: Get | [
"Requires",
":",
"account",
"ID",
"(",
"taken",
"from",
"Client",
"object",
")",
"Returns",
":",
"a",
"list",
"of",
"Application",
"objects",
"Endpoint",
":",
"rpm",
".",
"newrelic",
".",
"com",
"Errors",
":",
"403",
"Invalid",
"API",
"Key",
"Method",
":... | train | https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/client.py#L102-L120 |
andrewgross/pyrelic | pyrelic/client.py | Client.delete_applications | def delete_applications(self, applications):
"""
Requires: account ID, application ID (or name).
Input should be a dictionary { 'app_id': 1234 , 'app': 'My Application'}
Returns: list of failed deletions (if any)
Endpoint: api.newrelic.com
Errors: None Explicit, failed deletions will be in XML
Method: Post
"""
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/accounts/{account_id}/applications/delete.xml"\
.format(endpoint=endpoint, account_id=self.account_id)
payload = applications
response = self._make_post_request(uri, payload)
failed_deletions = {}
for application in response.findall('.//application'):
if not 'deleted' in application.findall('.//result')[0].text:
failed_deletions['app_id'] = application.get('id')
return failed_deletions | python | def delete_applications(self, applications):
"""
Requires: account ID, application ID (or name).
Input should be a dictionary { 'app_id': 1234 , 'app': 'My Application'}
Returns: list of failed deletions (if any)
Endpoint: api.newrelic.com
Errors: None Explicit, failed deletions will be in XML
Method: Post
"""
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/accounts/{account_id}/applications/delete.xml"\
.format(endpoint=endpoint, account_id=self.account_id)
payload = applications
response = self._make_post_request(uri, payload)
failed_deletions = {}
for application in response.findall('.//application'):
if not 'deleted' in application.findall('.//result')[0].text:
failed_deletions['app_id'] = application.get('id')
return failed_deletions | [
"def",
"delete_applications",
"(",
"self",
",",
"applications",
")",
":",
"endpoint",
"=",
"\"https://api.newrelic.com\"",
"uri",
"=",
"\"{endpoint}/api/v1/accounts/{account_id}/applications/delete.xml\"",
".",
"format",
"(",
"endpoint",
"=",
"endpoint",
",",
"account_id",
... | Requires: account ID, application ID (or name).
Input should be a dictionary { 'app_id': 1234 , 'app': 'My Application'}
Returns: list of failed deletions (if any)
Endpoint: api.newrelic.com
Errors: None Explicit, failed deletions will be in XML
Method: Post | [
"Requires",
":",
"account",
"ID",
"application",
"ID",
"(",
"or",
"name",
")",
".",
"Input",
"should",
"be",
"a",
"dictionary",
"{",
"app_id",
":",
"1234",
"app",
":",
"My",
"Application",
"}",
"Returns",
":",
"list",
"of",
"failed",
"deletions",
"(",
... | train | https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/client.py#L122-L142 |
andrewgross/pyrelic | pyrelic/client.py | Client.notify_deployment | def notify_deployment(self, application_id=None, application_name=None, description=None, revision=None, changelog=None, user=None):
"""
Notify NewRelic of a deployment.
http://newrelic.github.io/newrelic_api/NewRelicApi/Deployment.html
:param description:
:param revision:
:param changelog:
:param user:
:return: A dictionary containing all of the returned keys from the API
"""
endpoint = "https://rpm.newrelic.com"
uri = "{endpoint}/deployments.xml".format(endpoint=endpoint)
deploy_event = {}
if not application_id is None:
deploy_event['deployment[application_id]'] = application_id
elif not application_name is None:
deploy_event['deployment[app_name]'] = application_name
else:
raise NewRelicInvalidParameterException("Must specify either application_id or application_name.")
if not description is None:
deploy_event['deployment[description]'] = description
if not revision is None:
deploy_event['deployment[revision]'] = revision
if not changelog is None:
deploy_event['deployment[changelog]'] = changelog
if not user is None:
deploy_event['deployment[user]'] = user
response = self._make_post_request(uri, deploy_event)
result = {}
for value in response:
result[value.tag] = value.text
return result | python | def notify_deployment(self, application_id=None, application_name=None, description=None, revision=None, changelog=None, user=None):
"""
Notify NewRelic of a deployment.
http://newrelic.github.io/newrelic_api/NewRelicApi/Deployment.html
:param description:
:param revision:
:param changelog:
:param user:
:return: A dictionary containing all of the returned keys from the API
"""
endpoint = "https://rpm.newrelic.com"
uri = "{endpoint}/deployments.xml".format(endpoint=endpoint)
deploy_event = {}
if not application_id is None:
deploy_event['deployment[application_id]'] = application_id
elif not application_name is None:
deploy_event['deployment[app_name]'] = application_name
else:
raise NewRelicInvalidParameterException("Must specify either application_id or application_name.")
if not description is None:
deploy_event['deployment[description]'] = description
if not revision is None:
deploy_event['deployment[revision]'] = revision
if not changelog is None:
deploy_event['deployment[changelog]'] = changelog
if not user is None:
deploy_event['deployment[user]'] = user
response = self._make_post_request(uri, deploy_event)
result = {}
for value in response:
result[value.tag] = value.text
return result | [
"def",
"notify_deployment",
"(",
"self",
",",
"application_id",
"=",
"None",
",",
"application_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"revision",
"=",
"None",
",",
"changelog",
"=",
"None",
",",
"user",
"=",
"None",
")",
":",
"endpoint",... | Notify NewRelic of a deployment.
http://newrelic.github.io/newrelic_api/NewRelicApi/Deployment.html
:param description:
:param revision:
:param changelog:
:param user:
:return: A dictionary containing all of the returned keys from the API | [
"Notify",
"NewRelic",
"of",
"a",
"deployment",
".",
"http",
":",
"//",
"newrelic",
".",
"github",
".",
"io",
"/",
"newrelic_api",
"/",
"NewRelicApi",
"/",
"Deployment",
".",
"html"
] | train | https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/client.py#L167-L209 |
andrewgross/pyrelic | pyrelic/client.py | Client.get_metric_names | def get_metric_names(self, agent_id, re=None, limit=5000):
"""
Requires: application ID
Optional: Regex to filter metric names, limit of results
Returns: A dictionary,
key: metric name,
value: list of fields available for a given metric
Method: Get
Restrictions: Rate limit to 1x per minute
Errors: 403 Invalid API Key, 422 Invalid Parameters
Endpoint: api.newrelic.com
"""
# Make sure we play it slow
self._api_rate_limit_exceeded(self.get_metric_names)
# Construct our GET request parameters into a nice dictionary
parameters = {'re': re, 'limit': limit}
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/applications/{agent_id}/metrics.xml"\
.format(endpoint=endpoint, agent_id=agent_id)
# A longer timeout is needed due to the amount of
# data that can be returned without a regex search
response = self._make_get_request(uri, parameters=parameters, timeout=max(self.timeout, 5.0))
# Parse the response. It seems clearer to return a dict of
# metrics/fields instead of a list of metric objects. It might be more
# consistent with the retrieval of metric data to make them objects but
# since the attributes in each type of metric object are different
# (and we aren't going to make heavyweight objects) we don't want to.
metrics = {}
for metric in response.findall('.//metric'):
fields = []
for field in metric.findall('.//field'):
fields.append(field.get('name'))
metrics[metric.get('name')] = fields
return metrics | python | def get_metric_names(self, agent_id, re=None, limit=5000):
"""
Requires: application ID
Optional: Regex to filter metric names, limit of results
Returns: A dictionary,
key: metric name,
value: list of fields available for a given metric
Method: Get
Restrictions: Rate limit to 1x per minute
Errors: 403 Invalid API Key, 422 Invalid Parameters
Endpoint: api.newrelic.com
"""
# Make sure we play it slow
self._api_rate_limit_exceeded(self.get_metric_names)
# Construct our GET request parameters into a nice dictionary
parameters = {'re': re, 'limit': limit}
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/applications/{agent_id}/metrics.xml"\
.format(endpoint=endpoint, agent_id=agent_id)
# A longer timeout is needed due to the amount of
# data that can be returned without a regex search
response = self._make_get_request(uri, parameters=parameters, timeout=max(self.timeout, 5.0))
# Parse the response. It seems clearer to return a dict of
# metrics/fields instead of a list of metric objects. It might be more
# consistent with the retrieval of metric data to make them objects but
# since the attributes in each type of metric object are different
# (and we aren't going to make heavyweight objects) we don't want to.
metrics = {}
for metric in response.findall('.//metric'):
fields = []
for field in metric.findall('.//field'):
fields.append(field.get('name'))
metrics[metric.get('name')] = fields
return metrics | [
"def",
"get_metric_names",
"(",
"self",
",",
"agent_id",
",",
"re",
"=",
"None",
",",
"limit",
"=",
"5000",
")",
":",
"# Make sure we play it slow",
"self",
".",
"_api_rate_limit_exceeded",
"(",
"self",
".",
"get_metric_names",
")",
"# Construct our GET request para... | Requires: application ID
Optional: Regex to filter metric names, limit of results
Returns: A dictionary,
key: metric name,
value: list of fields available for a given metric
Method: Get
Restrictions: Rate limit to 1x per minute
Errors: 403 Invalid API Key, 422 Invalid Parameters
Endpoint: api.newrelic.com | [
"Requires",
":",
"application",
"ID",
"Optional",
":",
"Regex",
"to",
"filter",
"metric",
"names",
"limit",
"of",
"results",
"Returns",
":",
"A",
"dictionary",
"key",
":",
"metric",
"name",
"value",
":",
"list",
"of",
"fields",
"available",
"for",
"a",
"gi... | train | https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/client.py#L211-L248 |
andrewgross/pyrelic | pyrelic/client.py | Client.get_metric_data | def get_metric_data(self, applications, metrics, field, begin, end, summary=False):
"""
Requires: account ID,
list of application IDs,
list of metrics,
metric fields,
begin,
end
Method: Get
Endpoint: api.newrelic.com
Restrictions: Rate limit to 1x per minute
Errors: 403 Invalid API key, 422 Invalid Parameters
Returns: A list of metric objects, each will have information about its
start/end time, application, metric name and any associated
values
"""
# TODO: it may be nice to have some helper methods that make it easier
# to query by common time frames based off the time period folding
# of the metrics returned by the New Relic API.
# Make sure we aren't going to hit an API timeout
self._api_rate_limit_exceeded(self.get_metric_data)
# Just in case the API needs parameters to be in order
parameters = {}
# Figure out what we were passed and set our parameter correctly
# TODO: allow querying by something other than an application name/id,
# such as server id or agent id
try:
int(applications[0])
except ValueError:
app_string = "app"
else:
app_string = "app_id"
if len(applications) > 1:
app_string = app_string + "[]"
# Set our parameters
parameters[app_string] = applications
parameters['metrics[]'] = metrics
parameters['field'] = field
parameters['begin'] = begin
parameters['end'] = end
parameters['summary'] = int(summary)
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/accounts/{account_id}/metrics/data.xml"\
.format(endpoint=endpoint, account_id=self.account_id)
# A longer timeout is needed due to the
# amount of data that can be returned
response = self._make_get_request(uri, parameters=parameters, timeout=max(self.timeout, 5.0))
# Parsing our response into lightweight objects and creating a list.
# The dividing factor is the time period covered by the metric,
# there should be no overlaps in time.
metrics = []
for metric in response.findall('.//metric'):
metrics.append(Metric(metric))
return metrics | python | def get_metric_data(self, applications, metrics, field, begin, end, summary=False):
"""
Requires: account ID,
list of application IDs,
list of metrics,
metric fields,
begin,
end
Method: Get
Endpoint: api.newrelic.com
Restrictions: Rate limit to 1x per minute
Errors: 403 Invalid API key, 422 Invalid Parameters
Returns: A list of metric objects, each will have information about its
start/end time, application, metric name and any associated
values
"""
# TODO: it may be nice to have some helper methods that make it easier
# to query by common time frames based off the time period folding
# of the metrics returned by the New Relic API.
# Make sure we aren't going to hit an API timeout
self._api_rate_limit_exceeded(self.get_metric_data)
# Just in case the API needs parameters to be in order
parameters = {}
# Figure out what we were passed and set our parameter correctly
# TODO: allow querying by something other than an application name/id,
# such as server id or agent id
try:
int(applications[0])
except ValueError:
app_string = "app"
else:
app_string = "app_id"
if len(applications) > 1:
app_string = app_string + "[]"
# Set our parameters
parameters[app_string] = applications
parameters['metrics[]'] = metrics
parameters['field'] = field
parameters['begin'] = begin
parameters['end'] = end
parameters['summary'] = int(summary)
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/accounts/{account_id}/metrics/data.xml"\
.format(endpoint=endpoint, account_id=self.account_id)
# A longer timeout is needed due to the
# amount of data that can be returned
response = self._make_get_request(uri, parameters=parameters, timeout=max(self.timeout, 5.0))
# Parsing our response into lightweight objects and creating a list.
# The dividing factor is the time period covered by the metric,
# there should be no overlaps in time.
metrics = []
for metric in response.findall('.//metric'):
metrics.append(Metric(metric))
return metrics | [
"def",
"get_metric_data",
"(",
"self",
",",
"applications",
",",
"metrics",
",",
"field",
",",
"begin",
",",
"end",
",",
"summary",
"=",
"False",
")",
":",
"# TODO: it may be nice to have some helper methods that make it easier",
"# to query by common time frames base... | Requires: account ID,
list of application IDs,
list of metrics,
metric fields,
begin,
end
Method: Get
Endpoint: api.newrelic.com
Restrictions: Rate limit to 1x per minute
Errors: 403 Invalid API key, 422 Invalid Parameters
Returns: A list of metric objects, each will have information about its
start/end time, application, metric name and any associated
values | [
"Requires",
":",
"account",
"ID",
"list",
"of",
"application",
"IDs",
"list",
"of",
"metrics",
"metric",
"fields",
"begin",
"end",
"Method",
":",
"Get",
"Endpoint",
":",
"api",
".",
"newrelic",
".",
"com",
"Restrictions",
":",
"Rate",
"limit",
"to",
"1x",
... | train | https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/client.py#L250-L310 |
andrewgross/pyrelic | pyrelic/client.py | Client.get_threshold_values | def get_threshold_values(self, application_id):
"""
Requires: account ID, list of application ID
Method: Get
Endpoint: api.newrelic.com
Restrictions: ???
Errors: 403 Invalid API key, 422 Invalid Parameters
Returns: A list of threshold_value objects, each will have information
about its start/end time, metric name, metric value, and the
current threshold
"""
endpoint = "https://rpm.newrelic.com"
remote_file = "threshold_values.xml"
uri = "{endpoint}/accounts/{account_id}/applications/{app_id}/{xml}".format(endpoint=endpoint, account_id=self.account_id, app_id=application_id, xml=remote_file)
response = self._make_get_request(uri)
thresholds = []
for threshold_value in response.findall('.//threshold_value'):
properties = {}
# a little ugly, but the output works fine.
for tag, text in threshold_value.items():
properties[tag] = text
thresholds.append(Threshold(properties))
return thresholds | python | def get_threshold_values(self, application_id):
"""
Requires: account ID, list of application ID
Method: Get
Endpoint: api.newrelic.com
Restrictions: ???
Errors: 403 Invalid API key, 422 Invalid Parameters
Returns: A list of threshold_value objects, each will have information
about its start/end time, metric name, metric value, and the
current threshold
"""
endpoint = "https://rpm.newrelic.com"
remote_file = "threshold_values.xml"
uri = "{endpoint}/accounts/{account_id}/applications/{app_id}/{xml}".format(endpoint=endpoint, account_id=self.account_id, app_id=application_id, xml=remote_file)
response = self._make_get_request(uri)
thresholds = []
for threshold_value in response.findall('.//threshold_value'):
properties = {}
# a little ugly, but the output works fine.
for tag, text in threshold_value.items():
properties[tag] = text
thresholds.append(Threshold(properties))
return thresholds | [
"def",
"get_threshold_values",
"(",
"self",
",",
"application_id",
")",
":",
"endpoint",
"=",
"\"https://rpm.newrelic.com\"",
"remote_file",
"=",
"\"threshold_values.xml\"",
"uri",
"=",
"\"{endpoint}/accounts/{account_id}/applications/{app_id}/{xml}\"",
".",
"format",
"(",
"e... | Requires: account ID, list of application ID
Method: Get
Endpoint: api.newrelic.com
Restrictions: ???
Errors: 403 Invalid API key, 422 Invalid Parameters
Returns: A list of threshold_value objects, each will have information
about its start/end time, metric name, metric value, and the
current threshold | [
"Requires",
":",
"account",
"ID",
"list",
"of",
"application",
"ID",
"Method",
":",
"Get",
"Endpoint",
":",
"api",
".",
"newrelic",
".",
"com",
"Restrictions",
":",
"???",
"Errors",
":",
"403",
"Invalid",
"API",
"key",
"422",
"Invalid",
"Parameters",
"Retu... | train | https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/client.py#L312-L335 |
andrewgross/pyrelic | pyrelic/client.py | Client.view_servers | def view_servers(self):
"""
Requires: account ID (taken from Client object)
Returns: a list of Server objects
Endpoint: api.newrelic.com
Errors: 403 Invalid API Key
Method: Get
"""
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/accounts/{id}/servers.xml".format(endpoint=endpoint, id=self.account_id)
response = self._make_get_request(uri)
servers = []
for server in response.findall('.//server'):
server_properties = {}
for field in server:
server_properties[field.tag] = field.text
servers.append(Server(server_properties))
return servers | python | def view_servers(self):
"""
Requires: account ID (taken from Client object)
Returns: a list of Server objects
Endpoint: api.newrelic.com
Errors: 403 Invalid API Key
Method: Get
"""
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/accounts/{id}/servers.xml".format(endpoint=endpoint, id=self.account_id)
response = self._make_get_request(uri)
servers = []
for server in response.findall('.//server'):
server_properties = {}
for field in server:
server_properties[field.tag] = field.text
servers.append(Server(server_properties))
return servers | [
"def",
"view_servers",
"(",
"self",
")",
":",
"endpoint",
"=",
"\"https://api.newrelic.com\"",
"uri",
"=",
"\"{endpoint}/api/v1/accounts/{id}/servers.xml\"",
".",
"format",
"(",
"endpoint",
"=",
"endpoint",
",",
"id",
"=",
"self",
".",
"account_id",
")",
"response",... | Requires: account ID (taken from Client object)
Returns: a list of Server objects
Endpoint: api.newrelic.com
Errors: 403 Invalid API Key
Method: Get | [
"Requires",
":",
"account",
"ID",
"(",
"taken",
"from",
"Client",
"object",
")",
"Returns",
":",
"a",
"list",
"of",
"Server",
"objects",
"Endpoint",
":",
"api",
".",
"newrelic",
".",
"com",
"Errors",
":",
"403",
"Invalid",
"API",
"Key",
"Method",
":",
... | train | https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/client.py#L337-L355 |
andrewgross/pyrelic | pyrelic/client.py | Client.delete_servers | def delete_servers(self, server_id):
"""
Requires: account ID, server ID
Input should be server id
Returns: list of failed deletions (if any)
Endpoint: api.newrelic.com
Errors: 403 Invalid API Key
Method: Delete
"""
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/accounts/{account_id}/servers/{server_id}.xml".format(
endpoint=endpoint,
account_id=self.account_id,
server_id=server_id)
response = self._make_delete_request(uri)
failed_deletions = []
for server in response.findall('.//server'):
if not 'deleted' in server.findall('.//result')[0].text:
failed_deletions.append({'server_id': server.get('id')})
return failed_deletions | python | def delete_servers(self, server_id):
"""
Requires: account ID, server ID
Input should be server id
Returns: list of failed deletions (if any)
Endpoint: api.newrelic.com
Errors: 403 Invalid API Key
Method: Delete
"""
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/accounts/{account_id}/servers/{server_id}.xml".format(
endpoint=endpoint,
account_id=self.account_id,
server_id=server_id)
response = self._make_delete_request(uri)
failed_deletions = []
for server in response.findall('.//server'):
if not 'deleted' in server.findall('.//result')[0].text:
failed_deletions.append({'server_id': server.get('id')})
return failed_deletions | [
"def",
"delete_servers",
"(",
"self",
",",
"server_id",
")",
":",
"endpoint",
"=",
"\"https://api.newrelic.com\"",
"uri",
"=",
"\"{endpoint}/api/v1/accounts/{account_id}/servers/{server_id}.xml\"",
".",
"format",
"(",
"endpoint",
"=",
"endpoint",
",",
"account_id",
"=",
... | Requires: account ID, server ID
Input should be server id
Returns: list of failed deletions (if any)
Endpoint: api.newrelic.com
Errors: 403 Invalid API Key
Method: Delete | [
"Requires",
":",
"account",
"ID",
"server",
"ID",
"Input",
"should",
"be",
"server",
"id",
"Returns",
":",
"list",
"of",
"failed",
"deletions",
"(",
"if",
"any",
")",
"Endpoint",
":",
"api",
".",
"newrelic",
".",
"com",
"Errors",
":",
"403",
"Invalid",
... | train | https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/client.py#L357-L377 |
biocore/burrito-fillings | bfillings/fastq_join.py | join_paired_end_reads_fastqjoin | def join_paired_end_reads_fastqjoin(
reads1_infile_path,
reads2_infile_path,
perc_max_diff=None, # typical default is 8
min_overlap=None, # typical default is 6
outfile_label='fastqjoin',
params={},
working_dir=tempfile.gettempdir(),
SuppressStderr=True,
SuppressStdout=True,
HALT_EXEC=False):
""" Runs fastq-join, with default parameters to assemble paired-end reads.
Returns file path string.
-reads1_infile_path : reads1.fastq infile path
-reads2_infile_path : reads2.fastq infile path
-perc_max_diff : maximum % diff of overlap differences allowed
-min_overlap : minimum allowed overlap required to assemble reads
-outfile_label : base name for output files.
-params : dictionary of application controller parameters
"""
abs_r1_path = os.path.abspath(reads1_infile_path)
abs_r2_path = os.path.abspath(reads2_infile_path)
infile_paths = [abs_r1_path, abs_r2_path]
# check / make absolute infile paths
for p in infile_paths:
if not os.path.exists(p):
raise IOError('File not found at: %s' % p)
fastq_join_app = FastqJoin(params=params,
WorkingDir=working_dir,
SuppressStderr=SuppressStderr,
SuppressStdout=SuppressStdout,
HALT_EXEC=HALT_EXEC)
# set param. Helps with QIIME integration to have these values
# set to None by default. This way we do not have to worry
# about changes in default behaviour of the wrapped
# application
if perc_max_diff is not None:
if isinstance(perc_max_diff, int) and 0 <= perc_max_diff <= 100:
fastq_join_app.Parameters['-p'].on(perc_max_diff)
else:
raise ValueError("perc_max_diff must be int between 0-100!")
if min_overlap is not None:
if isinstance(min_overlap, int) and 0 < min_overlap:
fastq_join_app.Parameters['-m'].on(min_overlap)
else:
raise ValueError("min_overlap must be an int >= 0!")
if outfile_label is not None:
if isinstance(outfile_label, str):
fastq_join_app.Parameters['-o'].on(outfile_label + '.')
else:
raise ValueError("outfile_label must be a string!")
else:
pass
# run assembler
result = fastq_join_app(infile_paths)
# Store output file path data to dict
path_dict = {}
path_dict['Assembled'] = result['Assembled'].name
path_dict['UnassembledReads1'] = result['UnassembledReads1'].name
path_dict['UnassembledReads2'] = result['UnassembledReads2'].name
# sanity check that files actually exist in path lcoations
for path in path_dict.values():
if not os.path.exists(path):
raise IOError('Output file not found at: %s' % path)
# fastq-join automatically appends: 'join', 'un1', or 'un2'
# to the end of the file names. But we want to rename them so
# they end in '.fastq'. So, we iterate through path_dict to
# rename the files and overwrite the dict values.
for key, file_path in path_dict.items():
new_file_path = file_path + '.fastq'
shutil.move(file_path, new_file_path)
path_dict[key] = new_file_path
return path_dict | python | def join_paired_end_reads_fastqjoin(
reads1_infile_path,
reads2_infile_path,
perc_max_diff=None, # typical default is 8
min_overlap=None, # typical default is 6
outfile_label='fastqjoin',
params={},
working_dir=tempfile.gettempdir(),
SuppressStderr=True,
SuppressStdout=True,
HALT_EXEC=False):
""" Runs fastq-join, with default parameters to assemble paired-end reads.
Returns file path string.
-reads1_infile_path : reads1.fastq infile path
-reads2_infile_path : reads2.fastq infile path
-perc_max_diff : maximum % diff of overlap differences allowed
-min_overlap : minimum allowed overlap required to assemble reads
-outfile_label : base name for output files.
-params : dictionary of application controller parameters
"""
abs_r1_path = os.path.abspath(reads1_infile_path)
abs_r2_path = os.path.abspath(reads2_infile_path)
infile_paths = [abs_r1_path, abs_r2_path]
# check / make absolute infile paths
for p in infile_paths:
if not os.path.exists(p):
raise IOError('File not found at: %s' % p)
fastq_join_app = FastqJoin(params=params,
WorkingDir=working_dir,
SuppressStderr=SuppressStderr,
SuppressStdout=SuppressStdout,
HALT_EXEC=HALT_EXEC)
# set param. Helps with QIIME integration to have these values
# set to None by default. This way we do not have to worry
# about changes in default behaviour of the wrapped
# application
if perc_max_diff is not None:
if isinstance(perc_max_diff, int) and 0 <= perc_max_diff <= 100:
fastq_join_app.Parameters['-p'].on(perc_max_diff)
else:
raise ValueError("perc_max_diff must be int between 0-100!")
if min_overlap is not None:
if isinstance(min_overlap, int) and 0 < min_overlap:
fastq_join_app.Parameters['-m'].on(min_overlap)
else:
raise ValueError("min_overlap must be an int >= 0!")
if outfile_label is not None:
if isinstance(outfile_label, str):
fastq_join_app.Parameters['-o'].on(outfile_label + '.')
else:
raise ValueError("outfile_label must be a string!")
else:
pass
# run assembler
result = fastq_join_app(infile_paths)
# Store output file path data to dict
path_dict = {}
path_dict['Assembled'] = result['Assembled'].name
path_dict['UnassembledReads1'] = result['UnassembledReads1'].name
path_dict['UnassembledReads2'] = result['UnassembledReads2'].name
# sanity check that files actually exist in path lcoations
for path in path_dict.values():
if not os.path.exists(path):
raise IOError('Output file not found at: %s' % path)
# fastq-join automatically appends: 'join', 'un1', or 'un2'
# to the end of the file names. But we want to rename them so
# they end in '.fastq'. So, we iterate through path_dict to
# rename the files and overwrite the dict values.
for key, file_path in path_dict.items():
new_file_path = file_path + '.fastq'
shutil.move(file_path, new_file_path)
path_dict[key] = new_file_path
return path_dict | [
"def",
"join_paired_end_reads_fastqjoin",
"(",
"reads1_infile_path",
",",
"reads2_infile_path",
",",
"perc_max_diff",
"=",
"None",
",",
"# typical default is 8",
"min_overlap",
"=",
"None",
",",
"# typical default is 6",
"outfile_label",
"=",
"'fastqjoin'",
",",
"params",
... | Runs fastq-join, with default parameters to assemble paired-end reads.
Returns file path string.
-reads1_infile_path : reads1.fastq infile path
-reads2_infile_path : reads2.fastq infile path
-perc_max_diff : maximum % diff of overlap differences allowed
-min_overlap : minimum allowed overlap required to assemble reads
-outfile_label : base name for output files.
-params : dictionary of application controller parameters | [
"Runs",
"fastq",
"-",
"join",
"with",
"default",
"parameters",
"to",
"assemble",
"paired",
"-",
"end",
"reads",
".",
"Returns",
"file",
"path",
"string",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/fastq_join.py#L144-L229 |
biocore/burrito-fillings | bfillings/fastq_join.py | FastqJoin._get_output_path | def _get_output_path(self):
"""Checks if a base file label / path is set. Returns absolute path."""
if self.Parameters['-o'].isOn():
output_path = self._absolute(str(self.Parameters['-o'].Value))
else:
raise ValueError("No output path specified.")
return output_path | python | def _get_output_path(self):
"""Checks if a base file label / path is set. Returns absolute path."""
if self.Parameters['-o'].isOn():
output_path = self._absolute(str(self.Parameters['-o'].Value))
else:
raise ValueError("No output path specified.")
return output_path | [
"def",
"_get_output_path",
"(",
"self",
")",
":",
"if",
"self",
".",
"Parameters",
"[",
"'-o'",
"]",
".",
"isOn",
"(",
")",
":",
"output_path",
"=",
"self",
".",
"_absolute",
"(",
"str",
"(",
"self",
".",
"Parameters",
"[",
"'-o'",
"]",
".",
"Value",... | Checks if a base file label / path is set. Returns absolute path. | [
"Checks",
"if",
"a",
"base",
"file",
"label",
"/",
"path",
"is",
"set",
".",
"Returns",
"absolute",
"path",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/fastq_join.py#L61-L67 |
biocore/burrito-fillings | bfillings/fastq_join.py | FastqJoin._get_stitch_report_path | def _get_stitch_report_path(self):
"""Checks if stitch report label / path is set. Returns absolute path."""
if self.Parameters['-r'].isOn():
stitch_path = self._absolute(str(self.Parameters['-r'].Value))
return stitch_path
elif self.Parameters['-r'].isOff():
return None | python | def _get_stitch_report_path(self):
"""Checks if stitch report label / path is set. Returns absolute path."""
if self.Parameters['-r'].isOn():
stitch_path = self._absolute(str(self.Parameters['-r'].Value))
return stitch_path
elif self.Parameters['-r'].isOff():
return None | [
"def",
"_get_stitch_report_path",
"(",
"self",
")",
":",
"if",
"self",
".",
"Parameters",
"[",
"'-r'",
"]",
".",
"isOn",
"(",
")",
":",
"stitch_path",
"=",
"self",
".",
"_absolute",
"(",
"str",
"(",
"self",
".",
"Parameters",
"[",
"'-r'",
"]",
".",
"... | Checks if stitch report label / path is set. Returns absolute path. | [
"Checks",
"if",
"stitch",
"report",
"label",
"/",
"path",
"is",
"set",
".",
"Returns",
"absolute",
"path",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/fastq_join.py#L69-L75 |
biocore/burrito-fillings | bfillings/fastq_join.py | FastqJoin._get_result_paths | def _get_result_paths(self, data):
"""Capture fastq-join output.
Three output files are produced, in the form of
outputjoin : assembled paired reads
outputun1 : unassembled reads_1
outputun2 : unassembled reads_2
If a barcode / mate-pairs file is also provided then the following
additional files are output:
outputjoin2
outputun3
If a verbose stitch length report (-r) is chosen to be written by the
user then use a user specified filename.
"""
output_path = self._get_output_path()
result = {}
# always output:
result['Assembled'] = ResultPath(Path=output_path + 'join',
IsWritten=True)
result['UnassembledReads1'] = ResultPath(Path=output_path + 'un1',
IsWritten=True)
result['UnassembledReads2'] = ResultPath(Path=output_path + 'un2',
IsWritten=True)
# check if stitch report is requested:
stitch_path = self._get_stitch_report_path()
if stitch_path:
result['Report'] = ResultPath(Path=stitch_path,
IsWritten=True)
# Check if mate file / barcode file is present.
# If not, return result
# We need to check this way becuase there are no infile parameters.
mate_path_string = output_path + 'join2'
mate_unassembled_path_string = output_path + 'un3'
if os.path.exists(mate_path_string) and \
os.path.exists(mate_unassembled_path_string):
result['Mate'] = ResultPath(Path=mate_path_string,
IsWritten=True)
result['MateUnassembled'] = ResultPath(Path=
mate_unassembled_path_string,
IsWritten=True)
else:
pass
return result | python | def _get_result_paths(self, data):
"""Capture fastq-join output.
Three output files are produced, in the form of
outputjoin : assembled paired reads
outputun1 : unassembled reads_1
outputun2 : unassembled reads_2
If a barcode / mate-pairs file is also provided then the following
additional files are output:
outputjoin2
outputun3
If a verbose stitch length report (-r) is chosen to be written by the
user then use a user specified filename.
"""
output_path = self._get_output_path()
result = {}
# always output:
result['Assembled'] = ResultPath(Path=output_path + 'join',
IsWritten=True)
result['UnassembledReads1'] = ResultPath(Path=output_path + 'un1',
IsWritten=True)
result['UnassembledReads2'] = ResultPath(Path=output_path + 'un2',
IsWritten=True)
# check if stitch report is requested:
stitch_path = self._get_stitch_report_path()
if stitch_path:
result['Report'] = ResultPath(Path=stitch_path,
IsWritten=True)
# Check if mate file / barcode file is present.
# If not, return result
# We need to check this way becuase there are no infile parameters.
mate_path_string = output_path + 'join2'
mate_unassembled_path_string = output_path + 'un3'
if os.path.exists(mate_path_string) and \
os.path.exists(mate_unassembled_path_string):
result['Mate'] = ResultPath(Path=mate_path_string,
IsWritten=True)
result['MateUnassembled'] = ResultPath(Path=
mate_unassembled_path_string,
IsWritten=True)
else:
pass
return result | [
"def",
"_get_result_paths",
"(",
"self",
",",
"data",
")",
":",
"output_path",
"=",
"self",
".",
"_get_output_path",
"(",
")",
"result",
"=",
"{",
"}",
"# always output:",
"result",
"[",
"'Assembled'",
"]",
"=",
"ResultPath",
"(",
"Path",
"=",
"output_path",... | Capture fastq-join output.
Three output files are produced, in the form of
outputjoin : assembled paired reads
outputun1 : unassembled reads_1
outputun2 : unassembled reads_2
If a barcode / mate-pairs file is also provided then the following
additional files are output:
outputjoin2
outputun3
If a verbose stitch length report (-r) is chosen to be written by the
user then use a user specified filename. | [
"Capture",
"fastq",
"-",
"join",
"output",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/fastq_join.py#L77-L125 |
kejbaly2/metrique | metrique/parse.py | parse_fields | def parse_fields(fields, as_dict=False):
'''
Given a list of fields (or several other variants of the same),
return back a consistent, normalized form of the same.
To forms are currently supported:
dictionary form: dict 'key' is the field name
and dict 'value' is either 1 (include)
or 0 (exclude).
list form (other): list values are field names to be included
If fields passed is one of the following values, it will be assumed
the user wants to include all fields and thus, we return an empty
dict or list to indicate this, accordingly:
* all fields: ['~', None, False, True, {}, []]
'''
_fields = {}
if fields in ['~', None, False, True, {}, []]:
# all these signify 'all fields'
_fields = {}
elif isinstance(fields, dict):
_fields.update(
{unicode(k).strip(): int(v) for k, v in fields.iteritems()})
elif isinstance(fields, basestring):
_fields.update({unicode(s).strip(): 1 for s in fields.split(',')})
elif isinstance(fields, (list, tuple)):
_fields.update({unicode(s).strip(): 1 for s in fields})
else:
raise ValueError("invalid fields value")
if as_dict:
return _fields
else:
return sorted(_fields.keys()) | python | def parse_fields(fields, as_dict=False):
'''
Given a list of fields (or several other variants of the same),
return back a consistent, normalized form of the same.
To forms are currently supported:
dictionary form: dict 'key' is the field name
and dict 'value' is either 1 (include)
or 0 (exclude).
list form (other): list values are field names to be included
If fields passed is one of the following values, it will be assumed
the user wants to include all fields and thus, we return an empty
dict or list to indicate this, accordingly:
* all fields: ['~', None, False, True, {}, []]
'''
_fields = {}
if fields in ['~', None, False, True, {}, []]:
# all these signify 'all fields'
_fields = {}
elif isinstance(fields, dict):
_fields.update(
{unicode(k).strip(): int(v) for k, v in fields.iteritems()})
elif isinstance(fields, basestring):
_fields.update({unicode(s).strip(): 1 for s in fields.split(',')})
elif isinstance(fields, (list, tuple)):
_fields.update({unicode(s).strip(): 1 for s in fields})
else:
raise ValueError("invalid fields value")
if as_dict:
return _fields
else:
return sorted(_fields.keys()) | [
"def",
"parse_fields",
"(",
"fields",
",",
"as_dict",
"=",
"False",
")",
":",
"_fields",
"=",
"{",
"}",
"if",
"fields",
"in",
"[",
"'~'",
",",
"None",
",",
"False",
",",
"True",
",",
"{",
"}",
",",
"[",
"]",
"]",
":",
"# all these signify 'all fields... | Given a list of fields (or several other variants of the same),
return back a consistent, normalized form of the same.
To forms are currently supported:
dictionary form: dict 'key' is the field name
and dict 'value' is either 1 (include)
or 0 (exclude).
list form (other): list values are field names to be included
If fields passed is one of the following values, it will be assumed
the user wants to include all fields and thus, we return an empty
dict or list to indicate this, accordingly:
* all fields: ['~', None, False, True, {}, []] | [
"Given",
"a",
"list",
"of",
"fields",
"(",
"or",
"several",
"other",
"variants",
"of",
"the",
"same",
")",
"return",
"back",
"a",
"consistent",
"normalized",
"form",
"of",
"the",
"same",
"."
] | train | https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/parse.py#L37-L71 |
kejbaly2/metrique | metrique/parse.py | date_range | def date_range(date, func='date'):
'''
Return back start and end dates given date string
:param date: metrique date (range) to apply to pql query
The tilde '~' symbol is used as a date range separated.
A tilde by itself will mean 'all dates ranges possible'
and will therefore search all objects irrelevant of it's
_end date timestamp.
A date on the left with a tilde but no date on the right
will generate a query where the date range starts
at the date provide and ends 'today'.
ie, from date -> now.
A date on the right with a tilde but no date on the left
will generate a query where the date range starts from
the first date available in the past (oldest) and ends
on the date provided.
ie, from beginning of known time -> date.
A date on both the left and right will be a simple date
range query where the date range starts from the date
on the left and ends on the date on the right.
ie, from date to date.
'''
if isinstance(date, basestring):
date = date.strip()
if not date:
return '_end == None'
if date == '~':
return ''
# don't include objects which have start EXACTLY on the
# date in question, since we're looking for objects
# which were true BEFORE the given date, not before or on.
before = lambda d: '_start < %s("%s")' % (func, ts2dt(d) if d else None)
after = lambda d: '(_end >= %s("%s") or _end == None)' % \
(func, ts2dt(d) if d else None)
split = date.split('~')
# replace all occurances of 'T' with ' '
# this is used for when datetime is passed in
# like YYYY-MM-DDTHH:MM:SS instead of
# YYYY-MM-DD HH:MM:SS as expected
# and drop all occurances of 'timezone' like substring
# FIXME: need to adjust (to UTC) for the timezone info we're dropping!
split = [re.sub('\+\d\d:\d\d', '', d.replace('T', ' ')) for d in split]
if len(split) == 1: # 'dt'
return '%s and %s' % (before(split[0]), after(split[0]))
elif split[0] in ['', None]: # '~dt'
return before(split[1])
elif split[1] in ['', None]: # 'dt~'
return after(split[0])
else: # 'dt~dt'
return '%s and %s' % (before(split[1]), after(split[0])) | python | def date_range(date, func='date'):
'''
Return back start and end dates given date string
:param date: metrique date (range) to apply to pql query
The tilde '~' symbol is used as a date range separated.
A tilde by itself will mean 'all dates ranges possible'
and will therefore search all objects irrelevant of it's
_end date timestamp.
A date on the left with a tilde but no date on the right
will generate a query where the date range starts
at the date provide and ends 'today'.
ie, from date -> now.
A date on the right with a tilde but no date on the left
will generate a query where the date range starts from
the first date available in the past (oldest) and ends
on the date provided.
ie, from beginning of known time -> date.
A date on both the left and right will be a simple date
range query where the date range starts from the date
on the left and ends on the date on the right.
ie, from date to date.
'''
if isinstance(date, basestring):
date = date.strip()
if not date:
return '_end == None'
if date == '~':
return ''
# don't include objects which have start EXACTLY on the
# date in question, since we're looking for objects
# which were true BEFORE the given date, not before or on.
before = lambda d: '_start < %s("%s")' % (func, ts2dt(d) if d else None)
after = lambda d: '(_end >= %s("%s") or _end == None)' % \
(func, ts2dt(d) if d else None)
split = date.split('~')
# replace all occurances of 'T' with ' '
# this is used for when datetime is passed in
# like YYYY-MM-DDTHH:MM:SS instead of
# YYYY-MM-DD HH:MM:SS as expected
# and drop all occurances of 'timezone' like substring
# FIXME: need to adjust (to UTC) for the timezone info we're dropping!
split = [re.sub('\+\d\d:\d\d', '', d.replace('T', ' ')) for d in split]
if len(split) == 1: # 'dt'
return '%s and %s' % (before(split[0]), after(split[0]))
elif split[0] in ['', None]: # '~dt'
return before(split[1])
elif split[1] in ['', None]: # 'dt~'
return after(split[0])
else: # 'dt~dt'
return '%s and %s' % (before(split[1]), after(split[0])) | [
"def",
"date_range",
"(",
"date",
",",
"func",
"=",
"'date'",
")",
":",
"if",
"isinstance",
"(",
"date",
",",
"basestring",
")",
":",
"date",
"=",
"date",
".",
"strip",
"(",
")",
"if",
"not",
"date",
":",
"return",
"'_end == None'",
"if",
"date",
"==... | Return back start and end dates given date string
:param date: metrique date (range) to apply to pql query
The tilde '~' symbol is used as a date range separated.
A tilde by itself will mean 'all dates ranges possible'
and will therefore search all objects irrelevant of it's
_end date timestamp.
A date on the left with a tilde but no date on the right
will generate a query where the date range starts
at the date provide and ends 'today'.
ie, from date -> now.
A date on the right with a tilde but no date on the left
will generate a query where the date range starts from
the first date available in the past (oldest) and ends
on the date provided.
ie, from beginning of known time -> date.
A date on both the left and right will be a simple date
range query where the date range starts from the date
on the left and ends on the date on the right.
ie, from date to date. | [
"Return",
"back",
"start",
"and",
"end",
"dates",
"given",
"date",
"string"
] | train | https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/parse.py#L74-L130 |
kejbaly2/metrique | metrique/parse.py | parse | def parse(table, query=None, date=None, fields=None,
distinct=False, limit=None, alias=None):
'''
Given a SQLAlchemy Table() instance, generate a SQLAlchemy
Query() instance with the given parameters.
:param table: SQLAlchemy Table() instance
:param query: MQL query
:param date: metrique date range query
:param date: metrique date range query element
:param fields: list of field names to return as columns
:param distinct: apply DISTINCT to this query
:param limit: apply LIMIT to this query
:param alias: apply ALIAS AS to this query
'''
date = date_range(date)
limit = int(limit or -1)
if query and date:
query = '%s and %s' % (query, date)
elif date:
query = date
elif query:
pass
else: # date is null, query is not
query = None
fields = parse_fields(fields=fields) or None
# we must pass in the table column objects themselves to ensure
# our bind / result processors are mapped properly
fields = fields if fields else table.columns
msg = 'parse(query=%s, fields=%s)' % (query, fields)
#msg = re.sub(' in \[[^\]]+\]', ' in [...]', msg)
logger.debug(msg)
kwargs = {}
if query:
interpreter = MQLInterpreter(table)
query = interpreter.parse(query)
kwargs['whereclause'] = query
if distinct:
kwargs['distinct'] = distinct
query = select(fields, from_obj=table, **kwargs)
if limit >= 1:
query = query.limit(limit)
if alias:
query = query.alias(alias)
return query | python | def parse(table, query=None, date=None, fields=None,
distinct=False, limit=None, alias=None):
'''
Given a SQLAlchemy Table() instance, generate a SQLAlchemy
Query() instance with the given parameters.
:param table: SQLAlchemy Table() instance
:param query: MQL query
:param date: metrique date range query
:param date: metrique date range query element
:param fields: list of field names to return as columns
:param distinct: apply DISTINCT to this query
:param limit: apply LIMIT to this query
:param alias: apply ALIAS AS to this query
'''
date = date_range(date)
limit = int(limit or -1)
if query and date:
query = '%s and %s' % (query, date)
elif date:
query = date
elif query:
pass
else: # date is null, query is not
query = None
fields = parse_fields(fields=fields) or None
# we must pass in the table column objects themselves to ensure
# our bind / result processors are mapped properly
fields = fields if fields else table.columns
msg = 'parse(query=%s, fields=%s)' % (query, fields)
#msg = re.sub(' in \[[^\]]+\]', ' in [...]', msg)
logger.debug(msg)
kwargs = {}
if query:
interpreter = MQLInterpreter(table)
query = interpreter.parse(query)
kwargs['whereclause'] = query
if distinct:
kwargs['distinct'] = distinct
query = select(fields, from_obj=table, **kwargs)
if limit >= 1:
query = query.limit(limit)
if alias:
query = query.alias(alias)
return query | [
"def",
"parse",
"(",
"table",
",",
"query",
"=",
"None",
",",
"date",
"=",
"None",
",",
"fields",
"=",
"None",
",",
"distinct",
"=",
"False",
",",
"limit",
"=",
"None",
",",
"alias",
"=",
"None",
")",
":",
"date",
"=",
"date_range",
"(",
"date",
... | Given a SQLAlchemy Table() instance, generate a SQLAlchemy
Query() instance with the given parameters.
:param table: SQLAlchemy Table() instance
:param query: MQL query
:param date: metrique date range query
:param date: metrique date range query element
:param fields: list of field names to return as columns
:param distinct: apply DISTINCT to this query
:param limit: apply LIMIT to this query
:param alias: apply ALIAS AS to this query | [
"Given",
"a",
"SQLAlchemy",
"Table",
"()",
"instance",
"generate",
"a",
"SQLAlchemy",
"Query",
"()",
"instance",
"with",
"the",
"given",
"parameters",
"."
] | train | https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/parse.py#L290-L336 |
michaelpb/omnic | omnic/worker/manager.py | WorkerManager.enqueue_sync | def enqueue_sync(self, func, *func_args):
'''
Enqueue an arbitrary synchronous function.
Deprecated: Use async version instead
'''
worker = self.pick_sticky(0) # just pick first always
args = (func,) + func_args
coro = worker.enqueue(enums.Task.FUNC, args)
asyncio.ensure_future(coro) | python | def enqueue_sync(self, func, *func_args):
'''
Enqueue an arbitrary synchronous function.
Deprecated: Use async version instead
'''
worker = self.pick_sticky(0) # just pick first always
args = (func,) + func_args
coro = worker.enqueue(enums.Task.FUNC, args)
asyncio.ensure_future(coro) | [
"def",
"enqueue_sync",
"(",
"self",
",",
"func",
",",
"*",
"func_args",
")",
":",
"worker",
"=",
"self",
".",
"pick_sticky",
"(",
"0",
")",
"# just pick first always",
"args",
"=",
"(",
"func",
",",
")",
"+",
"func_args",
"coro",
"=",
"worker",
".",
"e... | Enqueue an arbitrary synchronous function.
Deprecated: Use async version instead | [
"Enqueue",
"an",
"arbitrary",
"synchronous",
"function",
"."
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/worker/manager.py#L32-L41 |
michaelpb/omnic | omnic/worker/manager.py | WorkerManager.async_enqueue_sync | async def async_enqueue_sync(self, func, *func_args):
'''
Enqueue an arbitrary synchronous function.
'''
worker = self.pick_sticky(0) # just pick first always
args = (func,) + func_args
await worker.enqueue(enums.Task.FUNC, args) | python | async def async_enqueue_sync(self, func, *func_args):
'''
Enqueue an arbitrary synchronous function.
'''
worker = self.pick_sticky(0) # just pick first always
args = (func,) + func_args
await worker.enqueue(enums.Task.FUNC, args) | [
"async",
"def",
"async_enqueue_sync",
"(",
"self",
",",
"func",
",",
"*",
"func_args",
")",
":",
"worker",
"=",
"self",
".",
"pick_sticky",
"(",
"0",
")",
"# just pick first always",
"args",
"=",
"(",
"func",
",",
")",
"+",
"func_args",
"await",
"worker",
... | Enqueue an arbitrary synchronous function. | [
"Enqueue",
"an",
"arbitrary",
"synchronous",
"function",
"."
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/worker/manager.py#L43-L49 |
michaelpb/omnic | omnic/worker/manager.py | WorkerManager.enqueue_download | def enqueue_download(self, resource):
'''
Enqueue the download of the given foreign resource.
Deprecated: Use async version instead
'''
worker = self.pick_sticky(resource.url_string)
coro = worker.enqueue(enums.Task.DOWNLOAD, (resource,))
asyncio.ensure_future(coro) | python | def enqueue_download(self, resource):
'''
Enqueue the download of the given foreign resource.
Deprecated: Use async version instead
'''
worker = self.pick_sticky(resource.url_string)
coro = worker.enqueue(enums.Task.DOWNLOAD, (resource,))
asyncio.ensure_future(coro) | [
"def",
"enqueue_download",
"(",
"self",
",",
"resource",
")",
":",
"worker",
"=",
"self",
".",
"pick_sticky",
"(",
"resource",
".",
"url_string",
")",
"coro",
"=",
"worker",
".",
"enqueue",
"(",
"enums",
".",
"Task",
".",
"DOWNLOAD",
",",
"(",
"resource"... | Enqueue the download of the given foreign resource.
Deprecated: Use async version instead | [
"Enqueue",
"the",
"download",
"of",
"the",
"given",
"foreign",
"resource",
"."
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/worker/manager.py#L51-L59 |
michaelpb/omnic | omnic/worker/manager.py | WorkerManager.async_enqueue_download | async def async_enqueue_download(self, resource):
'''
Enqueue the download of the given foreign resource.
'''
worker = self.pick_sticky(resource.url_string)
await worker.enqueue(enums.Task.DOWNLOAD, (resource,)) | python | async def async_enqueue_download(self, resource):
'''
Enqueue the download of the given foreign resource.
'''
worker = self.pick_sticky(resource.url_string)
await worker.enqueue(enums.Task.DOWNLOAD, (resource,)) | [
"async",
"def",
"async_enqueue_download",
"(",
"self",
",",
"resource",
")",
":",
"worker",
"=",
"self",
".",
"pick_sticky",
"(",
"resource",
".",
"url_string",
")",
"await",
"worker",
".",
"enqueue",
"(",
"enums",
".",
"Task",
".",
"DOWNLOAD",
",",
"(",
... | Enqueue the download of the given foreign resource. | [
"Enqueue",
"the",
"download",
"of",
"the",
"given",
"foreign",
"resource",
"."
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/worker/manager.py#L61-L66 |
michaelpb/omnic | omnic/worker/manager.py | WorkerManager.enqueue_convert | def enqueue_convert(self, converter, from_resource, to_resource):
'''
Enqueue use of the given converter to convert to given
resources.
Deprecated: Use async version instead
'''
worker = self.pick_sticky(from_resource.url_string)
args = (converter, from_resource, to_resource)
coro = worker.enqueue(enums.Task.CONVERT, args)
asyncio.ensure_future(coro) | python | def enqueue_convert(self, converter, from_resource, to_resource):
'''
Enqueue use of the given converter to convert to given
resources.
Deprecated: Use async version instead
'''
worker = self.pick_sticky(from_resource.url_string)
args = (converter, from_resource, to_resource)
coro = worker.enqueue(enums.Task.CONVERT, args)
asyncio.ensure_future(coro) | [
"def",
"enqueue_convert",
"(",
"self",
",",
"converter",
",",
"from_resource",
",",
"to_resource",
")",
":",
"worker",
"=",
"self",
".",
"pick_sticky",
"(",
"from_resource",
".",
"url_string",
")",
"args",
"=",
"(",
"converter",
",",
"from_resource",
",",
"t... | Enqueue use of the given converter to convert to given
resources.
Deprecated: Use async version instead | [
"Enqueue",
"use",
"of",
"the",
"given",
"converter",
"to",
"convert",
"to",
"given",
"resources",
"."
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/worker/manager.py#L68-L78 |
michaelpb/omnic | omnic/worker/manager.py | WorkerManager.async_enqueue_convert | async def async_enqueue_convert(self, converter, from_, to):
'''
Enqueue use of the given converter to convert to given
from and to resources.
'''
worker = self.pick_sticky(from_.url_string)
args = (converter, from_, to)
await worker.enqueue(enums.Task.CONVERT, args) | python | async def async_enqueue_convert(self, converter, from_, to):
'''
Enqueue use of the given converter to convert to given
from and to resources.
'''
worker = self.pick_sticky(from_.url_string)
args = (converter, from_, to)
await worker.enqueue(enums.Task.CONVERT, args) | [
"async",
"def",
"async_enqueue_convert",
"(",
"self",
",",
"converter",
",",
"from_",
",",
"to",
")",
":",
"worker",
"=",
"self",
".",
"pick_sticky",
"(",
"from_",
".",
"url_string",
")",
"args",
"=",
"(",
"converter",
",",
"from_",
",",
"to",
")",
"aw... | Enqueue use of the given converter to convert to given
from and to resources. | [
"Enqueue",
"use",
"of",
"the",
"given",
"converter",
"to",
"convert",
"to",
"given",
"from",
"and",
"to",
"resources",
"."
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/worker/manager.py#L80-L87 |
michaelpb/omnic | omnic/worker/manager.py | WorkerManager.async_enqueue_multiconvert | async def async_enqueue_multiconvert(self, url_string, to_type):
'''
Enqueue a multi-step conversion process, from the given URL string
(which is assumed to have been downloaded / resolved)
'''
worker = self.pick_sticky(url_string)
args = (url_string, to_type)
await worker.enqueue(enums.Task.MULTICONVERT, args) | python | async def async_enqueue_multiconvert(self, url_string, to_type):
'''
Enqueue a multi-step conversion process, from the given URL string
(which is assumed to have been downloaded / resolved)
'''
worker = self.pick_sticky(url_string)
args = (url_string, to_type)
await worker.enqueue(enums.Task.MULTICONVERT, args) | [
"async",
"def",
"async_enqueue_multiconvert",
"(",
"self",
",",
"url_string",
",",
"to_type",
")",
":",
"worker",
"=",
"self",
".",
"pick_sticky",
"(",
"url_string",
")",
"args",
"=",
"(",
"url_string",
",",
"to_type",
")",
"await",
"worker",
".",
"enqueue",... | Enqueue a multi-step conversion process, from the given URL string
(which is assumed to have been downloaded / resolved) | [
"Enqueue",
"a",
"multi",
"-",
"step",
"conversion",
"process",
"from",
"the",
"given",
"URL",
"string",
"(",
"which",
"is",
"assumed",
"to",
"have",
"been",
"downloaded",
"/",
"resolved",
")"
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/worker/manager.py#L89-L96 |
scott-maddox/openbandparams | src/openbandparams/iii_v_zinc_blende_strained.py | IIIVZincBlendeStrained001.strain_in_plane | def strain_in_plane(self, **kwargs):
'''
Returns the in-plane strain assuming no lattice relaxation, which
is positive for tensile strain and negative for compressive strain.
'''
if self._strain_out_of_plane is not None:
return ((self._strain_out_of_plane / -2.) *
(self.unstrained.c11(**kwargs) /
self.unstrained.c12(**kwargs) ) )
else:
return 1 - self.unstrained.a(**kwargs) / self.substrate.a(**kwargs) | python | def strain_in_plane(self, **kwargs):
'''
Returns the in-plane strain assuming no lattice relaxation, which
is positive for tensile strain and negative for compressive strain.
'''
if self._strain_out_of_plane is not None:
return ((self._strain_out_of_plane / -2.) *
(self.unstrained.c11(**kwargs) /
self.unstrained.c12(**kwargs) ) )
else:
return 1 - self.unstrained.a(**kwargs) / self.substrate.a(**kwargs) | [
"def",
"strain_in_plane",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_strain_out_of_plane",
"is",
"not",
"None",
":",
"return",
"(",
"(",
"self",
".",
"_strain_out_of_plane",
"/",
"-",
"2.",
")",
"*",
"(",
"self",
".",
"unstrai... | Returns the in-plane strain assuming no lattice relaxation, which
is positive for tensile strain and negative for compressive strain. | [
"Returns",
"the",
"in",
"-",
"plane",
"strain",
"assuming",
"no",
"lattice",
"relaxation",
"which",
"is",
"positive",
"for",
"tensile",
"strain",
"and",
"negative",
"for",
"compressive",
"strain",
"."
] | train | https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_strained.py#L86-L96 |
scott-maddox/openbandparams | src/openbandparams/iii_v_zinc_blende_strained.py | IIIVZincBlendeStrained001.strain_out_of_plane | def strain_out_of_plane(self, **kwargs):
'''
Returns the out-of-plane strain assuming no lattice relaxation, which
is negative for tensile strain and positive for compressive strain.
This is the strain measured by X-ray diffraction (XRD) symmetric
omega-2theta scans.
'''
if self._strain_out_of_plane is not None:
return self._strain_out_of_plane
else:
return (-2 * self.unstrained.c12(**kwargs) /
self.unstrained.c11(**kwargs) *
self.strain_in_plane(**kwargs) ) | python | def strain_out_of_plane(self, **kwargs):
'''
Returns the out-of-plane strain assuming no lattice relaxation, which
is negative for tensile strain and positive for compressive strain.
This is the strain measured by X-ray diffraction (XRD) symmetric
omega-2theta scans.
'''
if self._strain_out_of_plane is not None:
return self._strain_out_of_plane
else:
return (-2 * self.unstrained.c12(**kwargs) /
self.unstrained.c11(**kwargs) *
self.strain_in_plane(**kwargs) ) | [
"def",
"strain_out_of_plane",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_strain_out_of_plane",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_strain_out_of_plane",
"else",
":",
"return",
"(",
"-",
"2",
"*",
"self",
".",
"uns... | Returns the out-of-plane strain assuming no lattice relaxation, which
is negative for tensile strain and positive for compressive strain.
This is the strain measured by X-ray diffraction (XRD) symmetric
omega-2theta scans. | [
"Returns",
"the",
"out",
"-",
"of",
"-",
"plane",
"strain",
"assuming",
"no",
"lattice",
"relaxation",
"which",
"is",
"negative",
"for",
"tensile",
"strain",
"and",
"positive",
"for",
"compressive",
"strain",
".",
"This",
"is",
"the",
"strain",
"measured",
"... | train | https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_strained.py#L100-L112 |
scott-maddox/openbandparams | src/openbandparams/iii_v_zinc_blende_strained.py | IIIVZincBlendeStrained001.substrate_a | def substrate_a(self, **kwargs):
'''
Returns the substrate's lattice parameter.
'''
if self.substrate is not None:
return self.substrate.a(**kwargs)
else:
return (self.unstrained.a(**kwargs) /
(1. - self.strain_in_plane(**kwargs))) | python | def substrate_a(self, **kwargs):
'''
Returns the substrate's lattice parameter.
'''
if self.substrate is not None:
return self.substrate.a(**kwargs)
else:
return (self.unstrained.a(**kwargs) /
(1. - self.strain_in_plane(**kwargs))) | [
"def",
"substrate_a",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"substrate",
"is",
"not",
"None",
":",
"return",
"self",
".",
"substrate",
".",
"a",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"(",
"self",
".",
"... | Returns the substrate's lattice parameter. | [
"Returns",
"the",
"substrate",
"s",
"lattice",
"parameter",
"."
] | train | https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_strained.py#L116-L124 |
scott-maddox/openbandparams | src/openbandparams/iii_v_zinc_blende_strained.py | IIIVZincBlendeStrained001.CBO | def CBO(self, **kwargs):
'''
Returns the strain-shifted conduction band offset (CBO), assuming
the strain affects all conduction band valleys equally.
'''
return self.unstrained.CBO(**kwargs) + self.CBO_strain_shift(**kwargs) | python | def CBO(self, **kwargs):
'''
Returns the strain-shifted conduction band offset (CBO), assuming
the strain affects all conduction band valleys equally.
'''
return self.unstrained.CBO(**kwargs) + self.CBO_strain_shift(**kwargs) | [
"def",
"CBO",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"unstrained",
".",
"CBO",
"(",
"*",
"*",
"kwargs",
")",
"+",
"self",
".",
"CBO_strain_shift",
"(",
"*",
"*",
"kwargs",
")"
] | Returns the strain-shifted conduction band offset (CBO), assuming
the strain affects all conduction band valleys equally. | [
"Returns",
"the",
"strain",
"-",
"shifted",
"conduction",
"band",
"offset",
"(",
"CBO",
")",
"assuming",
"the",
"strain",
"affects",
"all",
"conduction",
"band",
"valleys",
"equally",
"."
] | train | https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_strained.py#L141-L146 |
scott-maddox/openbandparams | src/openbandparams/iii_v_zinc_blende_strained.py | IIIVZincBlendeStrained001.CBO_Gamma | def CBO_Gamma(self, **kwargs):
'''
Returns the strain-shifted Gamma-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally.
'''
return (self.unstrained.CBO_Gamma(**kwargs) +
self.CBO_strain_shift(**kwargs)) | python | def CBO_Gamma(self, **kwargs):
'''
Returns the strain-shifted Gamma-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally.
'''
return (self.unstrained.CBO_Gamma(**kwargs) +
self.CBO_strain_shift(**kwargs)) | [
"def",
"CBO_Gamma",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"(",
"self",
".",
"unstrained",
".",
"CBO_Gamma",
"(",
"*",
"*",
"kwargs",
")",
"+",
"self",
".",
"CBO_strain_shift",
"(",
"*",
"*",
"kwargs",
")",
")"
] | Returns the strain-shifted Gamma-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally. | [
"Returns",
"the",
"strain",
"-",
"shifted",
"Gamma",
"-",
"valley",
"conduction",
"band",
"offset",
"(",
"CBO",
")",
"assuming",
"the",
"strain",
"affects",
"all",
"conduction",
"band",
"valleys",
"equally",
"."
] | train | https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_strained.py#L150-L156 |
scott-maddox/openbandparams | src/openbandparams/iii_v_zinc_blende_strained.py | IIIVZincBlendeStrained001.CBO_L | def CBO_L(self, **kwargs):
'''
Returns the strain-shifted L-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally.
'''
return (self.unstrained.CBO_L(**kwargs) +
self.CBO_strain_shift(**kwargs)) | python | def CBO_L(self, **kwargs):
'''
Returns the strain-shifted L-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally.
'''
return (self.unstrained.CBO_L(**kwargs) +
self.CBO_strain_shift(**kwargs)) | [
"def",
"CBO_L",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"(",
"self",
".",
"unstrained",
".",
"CBO_L",
"(",
"*",
"*",
"kwargs",
")",
"+",
"self",
".",
"CBO_strain_shift",
"(",
"*",
"*",
"kwargs",
")",
")"
] | Returns the strain-shifted L-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally. | [
"Returns",
"the",
"strain",
"-",
"shifted",
"L",
"-",
"valley",
"conduction",
"band",
"offset",
"(",
"CBO",
")",
"assuming",
"the",
"strain",
"affects",
"all",
"conduction",
"band",
"valleys",
"equally",
"."
] | train | https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_strained.py#L160-L166 |
scott-maddox/openbandparams | src/openbandparams/iii_v_zinc_blende_strained.py | IIIVZincBlendeStrained001.CBO_X | def CBO_X(self, **kwargs):
'''
Returns the strain-shifted X-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally.
'''
return (self.unstrained.CBO_X(**kwargs) +
self.CBO_strain_shift(**kwargs)) | python | def CBO_X(self, **kwargs):
'''
Returns the strain-shifted X-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally.
'''
return (self.unstrained.CBO_X(**kwargs) +
self.CBO_strain_shift(**kwargs)) | [
"def",
"CBO_X",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"(",
"self",
".",
"unstrained",
".",
"CBO_X",
"(",
"*",
"*",
"kwargs",
")",
"+",
"self",
".",
"CBO_strain_shift",
"(",
"*",
"*",
"kwargs",
")",
")"
] | Returns the strain-shifted X-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally. | [
"Returns",
"the",
"strain",
"-",
"shifted",
"X",
"-",
"valley",
"conduction",
"band",
"offset",
"(",
"CBO",
")",
"assuming",
"the",
"strain",
"affects",
"all",
"conduction",
"band",
"valleys",
"equally",
"."
] | train | https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_strained.py#L170-L176 |
scott-maddox/openbandparams | src/openbandparams/iii_v_zinc_blende_strained.py | IIIVZincBlendeStrained001.Eg | def Eg(self, **kwargs):
'''
Returns the strain-shifted bandgap, ``Eg``.
'''
return self.unstrained.Eg(**kwargs) + self.Eg_strain_shift(**kwargs) | python | def Eg(self, **kwargs):
'''
Returns the strain-shifted bandgap, ``Eg``.
'''
return self.unstrained.Eg(**kwargs) + self.Eg_strain_shift(**kwargs) | [
"def",
"Eg",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"unstrained",
".",
"Eg",
"(",
"*",
"*",
"kwargs",
")",
"+",
"self",
".",
"Eg_strain_shift",
"(",
"*",
"*",
"kwargs",
")"
] | Returns the strain-shifted bandgap, ``Eg``. | [
"Returns",
"the",
"strain",
"-",
"shifted",
"bandgap",
"Eg",
"."
] | train | https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_strained.py#L180-L184 |
halfak/deltas | deltas/segmenters/segments.py | Segment.tokens | def tokens(self):
"""
`generator` : the tokens in this segment
"""
for subsegment_or_token in self:
if isinstance(subsegment_or_token, Segment):
subsegment = subsegment_or_token
for token in subsegment.tokens():
yield token
else:
token = subsegment_or_token
yield token | python | def tokens(self):
"""
`generator` : the tokens in this segment
"""
for subsegment_or_token in self:
if isinstance(subsegment_or_token, Segment):
subsegment = subsegment_or_token
for token in subsegment.tokens():
yield token
else:
token = subsegment_or_token
yield token | [
"def",
"tokens",
"(",
"self",
")",
":",
"for",
"subsegment_or_token",
"in",
"self",
":",
"if",
"isinstance",
"(",
"subsegment_or_token",
",",
"Segment",
")",
":",
"subsegment",
"=",
"subsegment_or_token",
"for",
"token",
"in",
"subsegment",
".",
"tokens",
"(",... | `generator` : the tokens in this segment | [
"generator",
":",
"the",
"tokens",
"in",
"this",
"segment"
] | train | https://github.com/halfak/deltas/blob/4173f4215b93426a877f4bb4a7a3547834e60ac3/deltas/segmenters/segments.py#L51-L62 |
dailymuse/oz | oz/json_api/middleware.py | ApiMiddleware._api_on_write_error | def _api_on_write_error(self, status_code, **kwargs):
"""
Catches errors and renders it as a JSON message. Adds the traceback if
debug is enabled.
"""
return_error = { "code": self.get_status() }
exc_info = kwargs.get("exc_info")
if exc_info and isinstance(exc_info[1], oz.json_api.ApiError):
return_error["error"] = exc_info[1].message
else:
return_error["error"] = API_ERROR_CODE_MAP.get(self.get_status(), "Unknown error")
if oz.settings.get("debug"):
return_error["trace"] = "".join(traceback.format_exception(*exc_info))
self.finish(return_error)
return oz.break_trigger | python | def _api_on_write_error(self, status_code, **kwargs):
"""
Catches errors and renders it as a JSON message. Adds the traceback if
debug is enabled.
"""
return_error = { "code": self.get_status() }
exc_info = kwargs.get("exc_info")
if exc_info and isinstance(exc_info[1], oz.json_api.ApiError):
return_error["error"] = exc_info[1].message
else:
return_error["error"] = API_ERROR_CODE_MAP.get(self.get_status(), "Unknown error")
if oz.settings.get("debug"):
return_error["trace"] = "".join(traceback.format_exception(*exc_info))
self.finish(return_error)
return oz.break_trigger | [
"def",
"_api_on_write_error",
"(",
"self",
",",
"status_code",
",",
"*",
"*",
"kwargs",
")",
":",
"return_error",
"=",
"{",
"\"code\"",
":",
"self",
".",
"get_status",
"(",
")",
"}",
"exc_info",
"=",
"kwargs",
".",
"get",
"(",
"\"exc_info\"",
")",
"if",
... | Catches errors and renders it as a JSON message. Adds the traceback if
debug is enabled. | [
"Catches",
"errors",
"and",
"renders",
"it",
"as",
"a",
"JSON",
"message",
".",
"Adds",
"the",
"traceback",
"if",
"debug",
"is",
"enabled",
"."
] | train | https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/json_api/middleware.py#L29-L47 |
dailymuse/oz | oz/json_api/middleware.py | ApiMiddleware.respond | def respond(self, obj):
"""Gives a response JSON(P) message"""
# Get the callback argument if JSONP is allowed
callback = self.get_argument("callback", None) if oz.settings["allow_jsonp"] else None
# We're pretty strict with what callback names are allowed, just in case
if callback and not CALLBACK_VALIDATOR.match(callback):
raise oz.json_api.ApiError("Invalid callback identifier - only functions with ASCII characters are allowed")
# Provide the response in a different manner depending on whether a
# JSONP callback is specified
json = escape.json_encode(obj)
if callback:
self.set_header("Content-Type", "application/javascript; charset=UTF-8")
self.finish("%s(%s)" % (callback, json))
else:
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.finish(json) | python | def respond(self, obj):
"""Gives a response JSON(P) message"""
# Get the callback argument if JSONP is allowed
callback = self.get_argument("callback", None) if oz.settings["allow_jsonp"] else None
# We're pretty strict with what callback names are allowed, just in case
if callback and not CALLBACK_VALIDATOR.match(callback):
raise oz.json_api.ApiError("Invalid callback identifier - only functions with ASCII characters are allowed")
# Provide the response in a different manner depending on whether a
# JSONP callback is specified
json = escape.json_encode(obj)
if callback:
self.set_header("Content-Type", "application/javascript; charset=UTF-8")
self.finish("%s(%s)" % (callback, json))
else:
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.finish(json) | [
"def",
"respond",
"(",
"self",
",",
"obj",
")",
":",
"# Get the callback argument if JSONP is allowed",
"callback",
"=",
"self",
".",
"get_argument",
"(",
"\"callback\"",
",",
"None",
")",
"if",
"oz",
".",
"settings",
"[",
"\"allow_jsonp\"",
"]",
"else",
"None",... | Gives a response JSON(P) message | [
"Gives",
"a",
"response",
"JSON",
"(",
"P",
")",
"message"
] | train | https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/json_api/middleware.py#L49-L68 |
dailymuse/oz | oz/json_api/middleware.py | ApiMiddleware.body | def body(self):
"""Gets the JSON body of the request"""
if self._decoded_body == None:
# Try to decode the JSON body. But raise an error if the
# content-type is unexpected, or the JSON is invalid.
raw_content_type = self.request.headers.get("content-type") or ""
content_type = raw_content_type.split(";")[0].strip().lower()
if content_type == "application/json":
try:
self._decoded_body = escape.json_decode(self.request.body)
except:
raise oz.json_api.ApiError("Bad JSON body")
else:
raise oz.json_api.ApiError("JSON body expected")
return self._decoded_body | python | def body(self):
"""Gets the JSON body of the request"""
if self._decoded_body == None:
# Try to decode the JSON body. But raise an error if the
# content-type is unexpected, or the JSON is invalid.
raw_content_type = self.request.headers.get("content-type") or ""
content_type = raw_content_type.split(";")[0].strip().lower()
if content_type == "application/json":
try:
self._decoded_body = escape.json_decode(self.request.body)
except:
raise oz.json_api.ApiError("Bad JSON body")
else:
raise oz.json_api.ApiError("JSON body expected")
return self._decoded_body | [
"def",
"body",
"(",
"self",
")",
":",
"if",
"self",
".",
"_decoded_body",
"==",
"None",
":",
"# Try to decode the JSON body. But raise an error if the",
"# content-type is unexpected, or the JSON is invalid.",
"raw_content_type",
"=",
"self",
".",
"request",
".",
"headers",... | Gets the JSON body of the request | [
"Gets",
"the",
"JSON",
"body",
"of",
"the",
"request"
] | train | https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/json_api/middleware.py#L70-L88 |
biocore/burrito | burrito/parameters.py | Parameter._get_id | def _get_id(self):
"""Construct and return the identifier"""
return ''.join(map(str,
filter(is_not_None,
[self.Prefix, self.Name]))) | python | def _get_id(self):
"""Construct and return the identifier"""
return ''.join(map(str,
filter(is_not_None,
[self.Prefix, self.Name]))) | [
"def",
"_get_id",
"(",
"self",
")",
":",
"return",
"''",
".",
"join",
"(",
"map",
"(",
"str",
",",
"filter",
"(",
"is_not_None",
",",
"[",
"self",
".",
"Prefix",
",",
"self",
".",
"Name",
"]",
")",
")",
")"
] | Construct and return the identifier | [
"Construct",
"and",
"return",
"the",
"identifier"
] | train | https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/parameters.py#L130-L134 |
biocore/burrito | burrito/parameters.py | MixedParameter.on | def on(self, val=None):
"""Turns the MixedParameter ON by setting its Value to val
An attempt to turn the parameter on with value 'False' will result
in an error, since this is the same as turning the parameter off.
Turning the MixedParameter ON without a value or with value 'None'
will let the parameter behave as a flag.
"""
if val is False:
raise ParameterError("Turning the ValuedParameter on with value "
"False is the same as turning it off. Use "
"another value.")
elif self.IsPath:
self.Value = FilePath(val)
else:
self.Value = val | python | def on(self, val=None):
"""Turns the MixedParameter ON by setting its Value to val
An attempt to turn the parameter on with value 'False' will result
in an error, since this is the same as turning the parameter off.
Turning the MixedParameter ON without a value or with value 'None'
will let the parameter behave as a flag.
"""
if val is False:
raise ParameterError("Turning the ValuedParameter on with value "
"False is the same as turning it off. Use "
"another value.")
elif self.IsPath:
self.Value = FilePath(val)
else:
self.Value = val | [
"def",
"on",
"(",
"self",
",",
"val",
"=",
"None",
")",
":",
"if",
"val",
"is",
"False",
":",
"raise",
"ParameterError",
"(",
"\"Turning the ValuedParameter on with value \"",
"\"False is the same as turning it off. Use \"",
"\"another value.\"",
")",
"elif",
"self",
... | Turns the MixedParameter ON by setting its Value to val
An attempt to turn the parameter on with value 'False' will result
in an error, since this is the same as turning the parameter off.
Turning the MixedParameter ON without a value or with value 'None'
will let the parameter behave as a flag. | [
"Turns",
"the",
"MixedParameter",
"ON",
"by",
"setting",
"its",
"Value",
"to",
"val"
] | train | https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/parameters.py#L433-L449 |
michaelpb/omnic | omnic/web/viewer.py | ViewerManager.get_assets | def get_assets(self):
'''
Return a flat list of absolute paths to all assets required by this
viewer
'''
return sum([
[self.prefix_asset(viewer, relpath) for relpath in viewer.assets]
for viewer in self.viewers
], []) | python | def get_assets(self):
'''
Return a flat list of absolute paths to all assets required by this
viewer
'''
return sum([
[self.prefix_asset(viewer, relpath) for relpath in viewer.assets]
for viewer in self.viewers
], []) | [
"def",
"get_assets",
"(",
"self",
")",
":",
"return",
"sum",
"(",
"[",
"[",
"self",
".",
"prefix_asset",
"(",
"viewer",
",",
"relpath",
")",
"for",
"relpath",
"in",
"viewer",
".",
"assets",
"]",
"for",
"viewer",
"in",
"self",
".",
"viewers",
"]",
","... | Return a flat list of absolute paths to all assets required by this
viewer | [
"Return",
"a",
"flat",
"list",
"of",
"absolute",
"paths",
"to",
"all",
"assets",
"required",
"by",
"this",
"viewer"
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/web/viewer.py#L20-L28 |
michaelpb/omnic | omnic/web/viewer.py | ViewerManager.get_resource | def get_resource(self):
'''
Returns a BytesResource to build the viewers JavaScript
'''
# Basename could be used for controlling caching
# basename = 'viewers_%s' % settings.get_cache_string()
node_packages = self.get_node_packages()
# sort_keys is essential to ensure resulting string is
# deterministic (and thus hashable)
viewers_data_str = json.dumps(node_packages, sort_keys=True)
viewers_data = viewers_data_str.encode('utf8')
viewers_resource = ForeignBytesResource(
viewers_data,
extension=VIEWER_EXT,
# basename=basename,
)
return viewers_resource | python | def get_resource(self):
'''
Returns a BytesResource to build the viewers JavaScript
'''
# Basename could be used for controlling caching
# basename = 'viewers_%s' % settings.get_cache_string()
node_packages = self.get_node_packages()
# sort_keys is essential to ensure resulting string is
# deterministic (and thus hashable)
viewers_data_str = json.dumps(node_packages, sort_keys=True)
viewers_data = viewers_data_str.encode('utf8')
viewers_resource = ForeignBytesResource(
viewers_data,
extension=VIEWER_EXT,
# basename=basename,
)
return viewers_resource | [
"def",
"get_resource",
"(",
"self",
")",
":",
"# Basename could be used for controlling caching",
"# basename = 'viewers_%s' % settings.get_cache_string()",
"node_packages",
"=",
"self",
".",
"get_node_packages",
"(",
")",
"# sort_keys is essential to ensure resulting string is",
"# ... | Returns a BytesResource to build the viewers JavaScript | [
"Returns",
"a",
"BytesResource",
"to",
"build",
"the",
"viewers",
"JavaScript"
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/web/viewer.py#L40-L56 |
9b/frisbee | frisbee/cli/client.py | main | def main():
"""Run the core."""
parser = ArgumentParser()
subs = parser.add_subparsers(dest='cmd')
setup_parser = subs.add_parser('search')
setup_parser.add_argument('-e', '--engine', dest='engine', required=True,
help='Search engine to use.',
choices=['bing'])
setup_parser.add_argument('-d', '--domain', dest='domain', required=True,
help='Email domain to collect upon.', type=str)
setup_parser.add_argument('-l', '--limit', dest='limit', required=False,
help='Limit number of results.', type=int,
default=100)
setup_parser.add_argument('-m', '--modifier', dest='modifier', required=False,
help='Search modifier to add to the query.',
type=str, default=None)
setup_parser.add_argument('-s', '--save', dest='to_save', required=False,
help='Save results to a file.', default=False,
action='store_true')
setup_parser.add_argument('-g', '--greedy', dest='greedy', required=False,
help='Use found results to search more.', default=False,
action='store_true')
setup_parser.add_argument('-f', '--fuzzy', dest='fuzzy', required=False,
help='Use keyword instead of domain.', default=False,
action='store_true')
args = parser.parse_args()
if args.cmd == 'search':
frisbee = Frisbee(log_level=logging.DEBUG, save=args.to_save)
jobs = [{'engine': args.engine, 'modifier': args.modifier,
'domain': args.domain, 'limit': args.limit,
'greedy': args.greedy, 'fuzzy': args.fuzzy}]
frisbee.search(jobs)
results = frisbee.get_results()
for job in results:
print("-= %s Details =-" % job['project'].upper())
print("\t[*] Engine: %s" % job['engine'])
print("\t[*] Domain: %s" % job['domain'])
print("\t[*] Modifer: %s" % job['modifier'])
print("\t[*] Limit: %d" % job['limit'])
print("\t[*] Duration: %s seconds" % job['duration'])
print("\n-= Email Results=-")
for email in job['results']['emails']:
print(email)
print("")
sys.exit(1) | python | def main():
"""Run the core."""
parser = ArgumentParser()
subs = parser.add_subparsers(dest='cmd')
setup_parser = subs.add_parser('search')
setup_parser.add_argument('-e', '--engine', dest='engine', required=True,
help='Search engine to use.',
choices=['bing'])
setup_parser.add_argument('-d', '--domain', dest='domain', required=True,
help='Email domain to collect upon.', type=str)
setup_parser.add_argument('-l', '--limit', dest='limit', required=False,
help='Limit number of results.', type=int,
default=100)
setup_parser.add_argument('-m', '--modifier', dest='modifier', required=False,
help='Search modifier to add to the query.',
type=str, default=None)
setup_parser.add_argument('-s', '--save', dest='to_save', required=False,
help='Save results to a file.', default=False,
action='store_true')
setup_parser.add_argument('-g', '--greedy', dest='greedy', required=False,
help='Use found results to search more.', default=False,
action='store_true')
setup_parser.add_argument('-f', '--fuzzy', dest='fuzzy', required=False,
help='Use keyword instead of domain.', default=False,
action='store_true')
args = parser.parse_args()
if args.cmd == 'search':
frisbee = Frisbee(log_level=logging.DEBUG, save=args.to_save)
jobs = [{'engine': args.engine, 'modifier': args.modifier,
'domain': args.domain, 'limit': args.limit,
'greedy': args.greedy, 'fuzzy': args.fuzzy}]
frisbee.search(jobs)
results = frisbee.get_results()
for job in results:
print("-= %s Details =-" % job['project'].upper())
print("\t[*] Engine: %s" % job['engine'])
print("\t[*] Domain: %s" % job['domain'])
print("\t[*] Modifer: %s" % job['modifier'])
print("\t[*] Limit: %d" % job['limit'])
print("\t[*] Duration: %s seconds" % job['duration'])
print("\n-= Email Results=-")
for email in job['results']['emails']:
print(email)
print("")
sys.exit(1) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"ArgumentParser",
"(",
")",
"subs",
"=",
"parser",
".",
"add_subparsers",
"(",
"dest",
"=",
"'cmd'",
")",
"setup_parser",
"=",
"subs",
".",
"add_parser",
"(",
"'search'",
")",
"setup_parser",
".",
"add_argument"... | Run the core. | [
"Run",
"the",
"core",
"."
] | train | https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/cli/client.py#L10-L57 |
thibault/django-nexmo | djexmo/views.py | callback | def callback(request):
"""Callback URL for Nexmo."""
message_id = request.GET.get('messageId')
status_id = request.GET.get('status')
status_msg = NEXMO_STATUSES.get(status_id, UNKNOWN_STATUS)
error_id = int(request.GET.get('err-code'))
error_msg = NEXMO_MESSAGES.get(error_id, UNKNOWN_MESSAGE)
logger.info(u'Nexmo callback: Sms = %s, Status = %s, message = %s' % (
message_id,
status_msg,
error_msg
))
# Nexmo expects a 200 response code
return HttpResponse('') | python | def callback(request):
"""Callback URL for Nexmo."""
message_id = request.GET.get('messageId')
status_id = request.GET.get('status')
status_msg = NEXMO_STATUSES.get(status_id, UNKNOWN_STATUS)
error_id = int(request.GET.get('err-code'))
error_msg = NEXMO_MESSAGES.get(error_id, UNKNOWN_MESSAGE)
logger.info(u'Nexmo callback: Sms = %s, Status = %s, message = %s' % (
message_id,
status_msg,
error_msg
))
# Nexmo expects a 200 response code
return HttpResponse('') | [
"def",
"callback",
"(",
"request",
")",
":",
"message_id",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'messageId'",
")",
"status_id",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'status'",
")",
"status_msg",
"=",
"NEXMO_STATUSES",
".",
"get",
"(",
... | Callback URL for Nexmo. | [
"Callback",
"URL",
"for",
"Nexmo",
"."
] | train | https://github.com/thibault/django-nexmo/blob/6cab80c96b85fdcbb03ddab5ad1a01440be4992d/djexmo/views.py#L12-L27 |
michaelpb/omnic | omnic/utils/security.py | get_hmac_sha1_digest | def get_hmac_sha1_digest(secret, resource_url, target_type, api_key=None):
'''
Utilize hmac module to hash a secret, a string specifying a resource URL,
and a string specifying a target type into a (string) hex digest.
'''
# Normalize and sanitize input resource URL and target type, and then
# convert to bytes
target_type_bytes = str(TypeString(target_type)).encode('utf8')
resource_url_bytes = str(ResourceURL(resource_url)).encode('utf8')
# Create new hmac digest, optionally including an optional public api key
hm = hmac.new(secret.encode('utf8'), digestmod=hashlib.sha1)
if api_key:
hm.update(api_key.encode('utf8'))
hm.update(target_type_bytes)
hm.update(resource_url_bytes)
return hm.hexdigest() | python | def get_hmac_sha1_digest(secret, resource_url, target_type, api_key=None):
'''
Utilize hmac module to hash a secret, a string specifying a resource URL,
and a string specifying a target type into a (string) hex digest.
'''
# Normalize and sanitize input resource URL and target type, and then
# convert to bytes
target_type_bytes = str(TypeString(target_type)).encode('utf8')
resource_url_bytes = str(ResourceURL(resource_url)).encode('utf8')
# Create new hmac digest, optionally including an optional public api key
hm = hmac.new(secret.encode('utf8'), digestmod=hashlib.sha1)
if api_key:
hm.update(api_key.encode('utf8'))
hm.update(target_type_bytes)
hm.update(resource_url_bytes)
return hm.hexdigest() | [
"def",
"get_hmac_sha1_digest",
"(",
"secret",
",",
"resource_url",
",",
"target_type",
",",
"api_key",
"=",
"None",
")",
":",
"# Normalize and sanitize input resource URL and target type, and then",
"# convert to bytes",
"target_type_bytes",
"=",
"str",
"(",
"TypeString",
"... | Utilize hmac module to hash a secret, a string specifying a resource URL,
and a string specifying a target type into a (string) hex digest. | [
"Utilize",
"hmac",
"module",
"to",
"hash",
"a",
"secret",
"a",
"string",
"specifying",
"a",
"resource",
"URL",
"and",
"a",
"string",
"specifying",
"a",
"target",
"type",
"into",
"a",
"(",
"string",
")",
"hex",
"digest",
"."
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/utils/security.py#L8-L24 |
natea/django-deployer | django_deployer/tasks.py | init | def init(provider=None):
"""
Runs through a questionnaire to set up your project's deploy settings
"""
if os.path.exists(DEPLOY_YAML):
_yellow("\nIt looks like you've already gone through the questionnaire.")
cont = prompt("Do you want to go through it again and overwrite the current one?", default="No")
if cont.strip().lower() == "no":
return None
_green("\nWelcome to the django-deployer!")
_green("\nWe need to ask a few questions in order to set up your project to be deployed to a PaaS provider.")
# TODO: identify the project dir based on where we find the settings.py or urls.py
django_settings = prompt(
"* What is your Django settings module?",
default="settings",
validate=_validate_django_settings
)
managepy = prompt(
"* Where is your manage.py file?",
default="./manage.py",
validate=_validate_managepy
)
requirements = prompt(
"* Where is your requirements.txt file?",
default="requirements.txt",
validate=_validate_requirements
)
# TODO: confirm that the file exists
# parse the requirements file and warn the user about best practices:
# Django==1.4.1
# psycopg2 if they selected PostgreSQL
# MySQL-python if they selected MySQL
# South for database migrations
# dj-database-url
pyversion = prompt("* What version of Python does your app need?", default="Python2.7")
# TODO: get these values by reading the settings.py file
static_url = prompt("* What is your STATIC_URL?", default="/static/")
media_url = prompt("* What is your MEDIA_URL?", default="/media/")
if not provider:
provider = prompt("* Which provider would you like to deploy to (dotcloud, appengine, stackato, openshift)?",
validate=_validate_providers)
# Where to place the provider specific questions
site = {}
additional_site = {}
if provider == "appengine":
applicationid = prompt("* What's your Google App Engine application ID (see https://appengine.google.com/)?", validate=r'.+')
instancename = prompt("* What's the full instance ID of your Cloud SQL instance\n"
"(should be in format \"projectid:instanceid\" found at https://code.google.com/apis/console/)?", validate=r'.+:.+')
databasename = prompt("* What's your database name?", validate=r'.+')
sdk_location = prompt("* Where is your Google App Engine SDK location?",
default="/usr/local/google_appengine",
validate=r'.+' # TODO: validate that this path exists
)
additional_site.update({
# quotes for the yaml issue
'application_id': applicationid,
'instancename': instancename,
'databasename': databasename,
'sdk_location': sdk_location,
})
# only option with Google App Engine is MySQL, so we'll just hardcode it
site = {
'database': 'MySQL'
}
elif provider == "openshift":
application_name = prompt("* What is your openshift application name?")
site = {
'application_name': application_name
}
else:
database = prompt("* What database does your app use?", default="PostgreSQL")
site = {
'database': database,
}
# TODO: add some validation that the admin password is valid
# TODO: let the user choose the admin username instead of hardcoding it to 'admin'
admin_password = prompt("* What do you want to set as the admin password?",
validate=_validate_admin_password
)
import random
SECRET_KEY = ''.join([random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
SECRET_KEY = "'" + SECRET_KEY + "'"
site.update({
'pyversion': pyversion,
'django_settings': django_settings,
'managepy': managepy,
'requirements': requirements,
'static_url': static_url,
'media_url': media_url,
'provider': provider,
'admin_password': admin_password,
'secret_key': SECRET_KEY,
})
site.update(additional_site)
_create_deploy_yaml(site)
return site | python | def init(provider=None):
"""
Runs through a questionnaire to set up your project's deploy settings
"""
if os.path.exists(DEPLOY_YAML):
_yellow("\nIt looks like you've already gone through the questionnaire.")
cont = prompt("Do you want to go through it again and overwrite the current one?", default="No")
if cont.strip().lower() == "no":
return None
_green("\nWelcome to the django-deployer!")
_green("\nWe need to ask a few questions in order to set up your project to be deployed to a PaaS provider.")
# TODO: identify the project dir based on where we find the settings.py or urls.py
django_settings = prompt(
"* What is your Django settings module?",
default="settings",
validate=_validate_django_settings
)
managepy = prompt(
"* Where is your manage.py file?",
default="./manage.py",
validate=_validate_managepy
)
requirements = prompt(
"* Where is your requirements.txt file?",
default="requirements.txt",
validate=_validate_requirements
)
# TODO: confirm that the file exists
# parse the requirements file and warn the user about best practices:
# Django==1.4.1
# psycopg2 if they selected PostgreSQL
# MySQL-python if they selected MySQL
# South for database migrations
# dj-database-url
pyversion = prompt("* What version of Python does your app need?", default="Python2.7")
# TODO: get these values by reading the settings.py file
static_url = prompt("* What is your STATIC_URL?", default="/static/")
media_url = prompt("* What is your MEDIA_URL?", default="/media/")
if not provider:
provider = prompt("* Which provider would you like to deploy to (dotcloud, appengine, stackato, openshift)?",
validate=_validate_providers)
# Where to place the provider specific questions
site = {}
additional_site = {}
if provider == "appengine":
applicationid = prompt("* What's your Google App Engine application ID (see https://appengine.google.com/)?", validate=r'.+')
instancename = prompt("* What's the full instance ID of your Cloud SQL instance\n"
"(should be in format \"projectid:instanceid\" found at https://code.google.com/apis/console/)?", validate=r'.+:.+')
databasename = prompt("* What's your database name?", validate=r'.+')
sdk_location = prompt("* Where is your Google App Engine SDK location?",
default="/usr/local/google_appengine",
validate=r'.+' # TODO: validate that this path exists
)
additional_site.update({
# quotes for the yaml issue
'application_id': applicationid,
'instancename': instancename,
'databasename': databasename,
'sdk_location': sdk_location,
})
# only option with Google App Engine is MySQL, so we'll just hardcode it
site = {
'database': 'MySQL'
}
elif provider == "openshift":
application_name = prompt("* What is your openshift application name?")
site = {
'application_name': application_name
}
else:
database = prompt("* What database does your app use?", default="PostgreSQL")
site = {
'database': database,
}
# TODO: add some validation that the admin password is valid
# TODO: let the user choose the admin username instead of hardcoding it to 'admin'
admin_password = prompt("* What do you want to set as the admin password?",
validate=_validate_admin_password
)
import random
SECRET_KEY = ''.join([random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
SECRET_KEY = "'" + SECRET_KEY + "'"
site.update({
'pyversion': pyversion,
'django_settings': django_settings,
'managepy': managepy,
'requirements': requirements,
'static_url': static_url,
'media_url': media_url,
'provider': provider,
'admin_password': admin_password,
'secret_key': SECRET_KEY,
})
site.update(additional_site)
_create_deploy_yaml(site)
return site | [
"def",
"init",
"(",
"provider",
"=",
"None",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"DEPLOY_YAML",
")",
":",
"_yellow",
"(",
"\"\\nIt looks like you've already gone through the questionnaire.\"",
")",
"cont",
"=",
"prompt",
"(",
"\"Do you want to g... | Runs through a questionnaire to set up your project's deploy settings | [
"Runs",
"through",
"a",
"questionnaire",
"to",
"set",
"up",
"your",
"project",
"s",
"deploy",
"settings"
] | train | https://github.com/natea/django-deployer/blob/5ce7d972db2f8500ec53ad89e7eb312d3360d074/django_deployer/tasks.py#L24-L140 |
natea/django-deployer | django_deployer/tasks.py | setup | def setup(provider=None):
"""
Creates the provider config files needed to deploy your project
"""
site = init(provider)
if not site:
site = yaml.safe_load(_read_file(DEPLOY_YAML))
provider_class = PROVIDERS[site['provider']]
provider_class.init(site) | python | def setup(provider=None):
"""
Creates the provider config files needed to deploy your project
"""
site = init(provider)
if not site:
site = yaml.safe_load(_read_file(DEPLOY_YAML))
provider_class = PROVIDERS[site['provider']]
provider_class.init(site) | [
"def",
"setup",
"(",
"provider",
"=",
"None",
")",
":",
"site",
"=",
"init",
"(",
"provider",
")",
"if",
"not",
"site",
":",
"site",
"=",
"yaml",
".",
"safe_load",
"(",
"_read_file",
"(",
"DEPLOY_YAML",
")",
")",
"provider_class",
"=",
"PROVIDERS",
"["... | Creates the provider config files needed to deploy your project | [
"Creates",
"the",
"provider",
"config",
"files",
"needed",
"to",
"deploy",
"your",
"project"
] | train | https://github.com/natea/django-deployer/blob/5ce7d972db2f8500ec53ad89e7eb312d3360d074/django_deployer/tasks.py#L143-L152 |
natea/django-deployer | django_deployer/tasks.py | deploy | def deploy(provider=None):
"""
Deploys your project
"""
if os.path.exists(DEPLOY_YAML):
site = yaml.safe_load(_read_file(DEPLOY_YAML))
provider_class = PROVIDERS[site['provider']]
provider_class.deploy() | python | def deploy(provider=None):
"""
Deploys your project
"""
if os.path.exists(DEPLOY_YAML):
site = yaml.safe_load(_read_file(DEPLOY_YAML))
provider_class = PROVIDERS[site['provider']]
provider_class.deploy() | [
"def",
"deploy",
"(",
"provider",
"=",
"None",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"DEPLOY_YAML",
")",
":",
"site",
"=",
"yaml",
".",
"safe_load",
"(",
"_read_file",
"(",
"DEPLOY_YAML",
")",
")",
"provider_class",
"=",
"PROVIDERS",
"... | Deploys your project | [
"Deploys",
"your",
"project"
] | train | https://github.com/natea/django-deployer/blob/5ce7d972db2f8500ec53ad89e7eb312d3360d074/django_deployer/tasks.py#L155-L163 |
zardus/idalink | idalink/client.py | ida_connect | def ida_connect(host='localhost', port=18861, retry=10):
"""
Connect to an instance of IDA running our server.py.
:param host: The host to connect to
:param port: The port to connect to
:param retry: How many times to try after errors before giving up
"""
for i in range(retry):
try:
LOG.debug('Connectint to %s:%d, try %d...', host, port, i + 1)
link = rpyc_classic.connect(host, port)
link.eval('2 + 2')
except socket.error:
time.sleep(1)
continue
else:
LOG.debug('Connected to %s:%d', host, port)
return link
raise IDALinkError("Could not connect to %s:%d after %d tries" % (host, port, retry)) | python | def ida_connect(host='localhost', port=18861, retry=10):
"""
Connect to an instance of IDA running our server.py.
:param host: The host to connect to
:param port: The port to connect to
:param retry: How many times to try after errors before giving up
"""
for i in range(retry):
try:
LOG.debug('Connectint to %s:%d, try %d...', host, port, i + 1)
link = rpyc_classic.connect(host, port)
link.eval('2 + 2')
except socket.error:
time.sleep(1)
continue
else:
LOG.debug('Connected to %s:%d', host, port)
return link
raise IDALinkError("Could not connect to %s:%d after %d tries" % (host, port, retry)) | [
"def",
"ida_connect",
"(",
"host",
"=",
"'localhost'",
",",
"port",
"=",
"18861",
",",
"retry",
"=",
"10",
")",
":",
"for",
"i",
"in",
"range",
"(",
"retry",
")",
":",
"try",
":",
"LOG",
".",
"debug",
"(",
"'Connectint to %s:%d, try %d...'",
",",
"host... | Connect to an instance of IDA running our server.py.
:param host: The host to connect to
:param port: The port to connect to
:param retry: How many times to try after errors before giving up | [
"Connect",
"to",
"an",
"instance",
"of",
"IDA",
"running",
"our",
"server",
".",
"py",
"."
] | train | https://github.com/zardus/idalink/blob/cf68144e7c72679a5429d8b8d9e9aa316d9b79ac/idalink/client.py#L43-L63 |
zardus/idalink | idalink/client.py | ida_spawn | def ida_spawn(ida_binary, filename, port=18861, mode='oneshot',
processor_type=None, logfile=None):
"""
Open IDA on the the file we want to analyse.
:param ida_binary: The binary name or path to ida
:param filename: The filename to open in IDA
:param port: The port on which to serve rpc from ida
:param mode: The server mode. "oneshot" to close ida when the connection is closed, or
"threaded" to run IDA visible to the user and allow multiple connections
:param processor_type:
Which processor IDA should analyze this binary as, e.g. "metapc". If not
provided, IDA will guess.
:param logfile: The file to log IDA's output to. Default /tmp/idalink-{port}.log
"""
ida_progname = _which(ida_binary)
if ida_progname is None:
raise IDALinkError('Could not find executable %s' % ida_binary)
if mode not in ('oneshot', 'threaded'):
raise ValueError("Bad mode %s" % mode)
if logfile is None:
logfile = LOGFILE.format(port=port)
ida_realpath = os.path.expanduser(ida_progname)
file_realpath = os.path.realpath(os.path.expanduser(filename))
server_script = os.path.join(MODULE_DIR, 'server.py')
LOG.info('Launching IDA (%s) on %s, listening on port %d, logging to %s',
ida_realpath, file_realpath, port, logfile)
env = dict(os.environ)
if mode == 'oneshot':
env['TVHEADLESS'] = '1'
if sys.platform == "darwin":
# If we are running in a virtual environment, which we should, we need
# to insert the python lib into the launched process in order for IDA
# to not default back to the Apple-installed python because of the use
# of paths in library identifiers on macOS.
if "VIRTUAL_ENV" in os.environ:
env['DYLD_INSERT_LIBRARIES'] = os.environ['VIRTUAL_ENV'] + '/.Python'
# The parameters are:
# -A Automatic mode
# -S Run a script (our server script)
# -L Log all output to our logfile
# -p Set the processor type
command = [
ida_realpath,
'-A',
'-S%s %d %s' % (server_script, port, mode),
'-L%s' % logfile,
]
if processor_type is not None:
command.append('-p%s' % processor_type)
command.append(file_realpath)
LOG.debug('IDA command is %s', ' '.join("%s" % s for s in command))
return subprocess.Popen(command, env=env) | python | def ida_spawn(ida_binary, filename, port=18861, mode='oneshot',
processor_type=None, logfile=None):
"""
Open IDA on the the file we want to analyse.
:param ida_binary: The binary name or path to ida
:param filename: The filename to open in IDA
:param port: The port on which to serve rpc from ida
:param mode: The server mode. "oneshot" to close ida when the connection is closed, or
"threaded" to run IDA visible to the user and allow multiple connections
:param processor_type:
Which processor IDA should analyze this binary as, e.g. "metapc". If not
provided, IDA will guess.
:param logfile: The file to log IDA's output to. Default /tmp/idalink-{port}.log
"""
ida_progname = _which(ida_binary)
if ida_progname is None:
raise IDALinkError('Could not find executable %s' % ida_binary)
if mode not in ('oneshot', 'threaded'):
raise ValueError("Bad mode %s" % mode)
if logfile is None:
logfile = LOGFILE.format(port=port)
ida_realpath = os.path.expanduser(ida_progname)
file_realpath = os.path.realpath(os.path.expanduser(filename))
server_script = os.path.join(MODULE_DIR, 'server.py')
LOG.info('Launching IDA (%s) on %s, listening on port %d, logging to %s',
ida_realpath, file_realpath, port, logfile)
env = dict(os.environ)
if mode == 'oneshot':
env['TVHEADLESS'] = '1'
if sys.platform == "darwin":
# If we are running in a virtual environment, which we should, we need
# to insert the python lib into the launched process in order for IDA
# to not default back to the Apple-installed python because of the use
# of paths in library identifiers on macOS.
if "VIRTUAL_ENV" in os.environ:
env['DYLD_INSERT_LIBRARIES'] = os.environ['VIRTUAL_ENV'] + '/.Python'
# The parameters are:
# -A Automatic mode
# -S Run a script (our server script)
# -L Log all output to our logfile
# -p Set the processor type
command = [
ida_realpath,
'-A',
'-S%s %d %s' % (server_script, port, mode),
'-L%s' % logfile,
]
if processor_type is not None:
command.append('-p%s' % processor_type)
command.append(file_realpath)
LOG.debug('IDA command is %s', ' '.join("%s" % s for s in command))
return subprocess.Popen(command, env=env) | [
"def",
"ida_spawn",
"(",
"ida_binary",
",",
"filename",
",",
"port",
"=",
"18861",
",",
"mode",
"=",
"'oneshot'",
",",
"processor_type",
"=",
"None",
",",
"logfile",
"=",
"None",
")",
":",
"ida_progname",
"=",
"_which",
"(",
"ida_binary",
")",
"if",
"ida... | Open IDA on the the file we want to analyse.
:param ida_binary: The binary name or path to ida
:param filename: The filename to open in IDA
:param port: The port on which to serve rpc from ida
:param mode: The server mode. "oneshot" to close ida when the connection is closed, or
"threaded" to run IDA visible to the user and allow multiple connections
:param processor_type:
Which processor IDA should analyze this binary as, e.g. "metapc". If not
provided, IDA will guess.
:param logfile: The file to log IDA's output to. Default /tmp/idalink-{port}.log | [
"Open",
"IDA",
"on",
"the",
"the",
"file",
"we",
"want",
"to",
"analyse",
"."
] | train | https://github.com/zardus/idalink/blob/cf68144e7c72679a5429d8b8d9e9aa316d9b79ac/idalink/client.py#L66-L127 |
michaelpb/omnic | omnic/cli/commands.py | _clear_cache | def _clear_cache(url, ts=None):
'''
Helper function used by precache and clearcache that clears the cache
of a given URL and type
'''
if ts is None:
# Clears an entire ForeignResource cache
res = ForeignResource(url)
if not os.path.exists(res.cache_path_base):
cli.printerr('%s is not cached (looked at %s)'
% (url, res.cache_path_base))
return
cli.print('%s: clearing ALL at %s'
% (url, res.cache_path_base))
res.cache_remove_all()
else:
# Clears an entire ForeignResource cache
res = TypedResource(url, ts)
if not res.cache_exists():
cli.printerr('%s is not cached for type %s (looked at %s)'
% (url, str(ts), res.cache_path))
return
cli.print('%s: clearing "%s" at %s'
% (url, str(ts), res.cache_path))
if os.path.isdir(res.cache_path):
res.cache_remove_as_dir()
else:
res.cache_remove() | python | def _clear_cache(url, ts=None):
'''
Helper function used by precache and clearcache that clears the cache
of a given URL and type
'''
if ts is None:
# Clears an entire ForeignResource cache
res = ForeignResource(url)
if not os.path.exists(res.cache_path_base):
cli.printerr('%s is not cached (looked at %s)'
% (url, res.cache_path_base))
return
cli.print('%s: clearing ALL at %s'
% (url, res.cache_path_base))
res.cache_remove_all()
else:
# Clears an entire ForeignResource cache
res = TypedResource(url, ts)
if not res.cache_exists():
cli.printerr('%s is not cached for type %s (looked at %s)'
% (url, str(ts), res.cache_path))
return
cli.print('%s: clearing "%s" at %s'
% (url, str(ts), res.cache_path))
if os.path.isdir(res.cache_path):
res.cache_remove_as_dir()
else:
res.cache_remove() | [
"def",
"_clear_cache",
"(",
"url",
",",
"ts",
"=",
"None",
")",
":",
"if",
"ts",
"is",
"None",
":",
"# Clears an entire ForeignResource cache",
"res",
"=",
"ForeignResource",
"(",
"url",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"res",
"."... | Helper function used by precache and clearcache that clears the cache
of a given URL and type | [
"Helper",
"function",
"used",
"by",
"precache",
"and",
"clearcache",
"that",
"clears",
"the",
"cache",
"of",
"a",
"given",
"URL",
"and",
"type"
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/cli/commands.py#L80-L107 |
michaelpb/omnic | omnic/cli/commands.py | _precache | async def _precache(url, to_type, force=False):
'''
Helper function used by precache and precache-named which does the
actual precaching
'''
if force:
cli.print('%s: force clearing' % url)
_clear_cache(url)
cli.print('%s: precaching "%s"' % (url, to_type))
with autodrain_worker():
await singletons.workers.async_enqueue_multiconvert(url, to_type)
result = TypedResource(url, TypeString(to_type))
cli.print('%s: %s precached at: %s' % (url, to_type, result.cache_path)) | python | async def _precache(url, to_type, force=False):
'''
Helper function used by precache and precache-named which does the
actual precaching
'''
if force:
cli.print('%s: force clearing' % url)
_clear_cache(url)
cli.print('%s: precaching "%s"' % (url, to_type))
with autodrain_worker():
await singletons.workers.async_enqueue_multiconvert(url, to_type)
result = TypedResource(url, TypeString(to_type))
cli.print('%s: %s precached at: %s' % (url, to_type, result.cache_path)) | [
"async",
"def",
"_precache",
"(",
"url",
",",
"to_type",
",",
"force",
"=",
"False",
")",
":",
"if",
"force",
":",
"cli",
".",
"print",
"(",
"'%s: force clearing'",
"%",
"url",
")",
"_clear_cache",
"(",
"url",
")",
"cli",
".",
"print",
"(",
"'%s: preca... | Helper function used by precache and precache-named which does the
actual precaching | [
"Helper",
"function",
"used",
"by",
"precache",
"and",
"precache",
"-",
"named",
"which",
"does",
"the",
"actual",
"precaching"
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/cli/commands.py#L110-L122 |
mokelly/wabbit_wappa | wabbit_wappa/active_learner.py | ActiveVWProcess.expect_exact | def expect_exact(self, *args, **kwargs):
"""This does not attempt to duplicate the expect_exact API,
but just sets self.before to the latest response line."""
response = self._recvline()
self.before = response.strip() | python | def expect_exact(self, *args, **kwargs):
"""This does not attempt to duplicate the expect_exact API,
but just sets self.before to the latest response line."""
response = self._recvline()
self.before = response.strip() | [
"def",
"expect_exact",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"self",
".",
"_recvline",
"(",
")",
"self",
".",
"before",
"=",
"response",
".",
"strip",
"(",
")"
] | This does not attempt to duplicate the expect_exact API,
but just sets self.before to the latest response line. | [
"This",
"does",
"not",
"attempt",
"to",
"duplicate",
"the",
"expect_exact",
"API",
"but",
"just",
"sets",
"self",
".",
"before",
"to",
"the",
"latest",
"response",
"line",
"."
] | train | https://github.com/mokelly/wabbit_wappa/blob/dfe5bf6d6036079e473c4148335cd6f339d0299b/wabbit_wappa/active_learner.py#L84-L88 |
bitlabstudio/django-document-library | document_library/south_migrations/0022_move_from_simple_trans_to_hvad.py | Migration.forwards | def forwards(self, orm):
"Write your forwards methods here."
for category in orm['document_library.DocumentCategory'].objects.all():
for trans_old in orm['document_library.DocumentCategoryTitle'].objects.filter(category=category):
orm['document_library.DocumentCategoryTranslation'].objects.create(
master=category,
language_code=trans_old.language,
title=trans_old.title,
)
for document in orm['document_library.Document'].objects.all():
for trans_old in orm['document_library.DocumentTitle'].objects.filter(document=document):
orm['document_library.DocumentTranslation'].objects.create(
master=document,
language_code=trans_old.language,
title=trans_old.title,
description=trans_old.description,
filer_file=trans_old.filer_file,
thumbnail=trans_old.thumbnail,
copyright_notice=trans_old.copyright_notice,
is_published=trans_old.is_published,
meta_description=trans_old.meta_description,
) | python | def forwards(self, orm):
"Write your forwards methods here."
for category in orm['document_library.DocumentCategory'].objects.all():
for trans_old in orm['document_library.DocumentCategoryTitle'].objects.filter(category=category):
orm['document_library.DocumentCategoryTranslation'].objects.create(
master=category,
language_code=trans_old.language,
title=trans_old.title,
)
for document in orm['document_library.Document'].objects.all():
for trans_old in orm['document_library.DocumentTitle'].objects.filter(document=document):
orm['document_library.DocumentTranslation'].objects.create(
master=document,
language_code=trans_old.language,
title=trans_old.title,
description=trans_old.description,
filer_file=trans_old.filer_file,
thumbnail=trans_old.thumbnail,
copyright_notice=trans_old.copyright_notice,
is_published=trans_old.is_published,
meta_description=trans_old.meta_description,
) | [
"def",
"forwards",
"(",
"self",
",",
"orm",
")",
":",
"for",
"category",
"in",
"orm",
"[",
"'document_library.DocumentCategory'",
"]",
".",
"objects",
".",
"all",
"(",
")",
":",
"for",
"trans_old",
"in",
"orm",
"[",
"'document_library.DocumentCategoryTitle'",
... | Write your forwards methods here. | [
"Write",
"your",
"forwards",
"methods",
"here",
"."
] | train | https://github.com/bitlabstudio/django-document-library/blob/508737277455f182e81780cfca8d8eceb989a45b/document_library/south_migrations/0022_move_from_simple_trans_to_hvad.py#L27-L49 |
dailymuse/oz | oz/aws_cdn/actions.py | cache_busting_scan | def cache_busting_scan(*prefixes):
"""
(Re-)generates the cache buster values for all files with the specified
prefixes.
"""
redis = oz.redis.create_connection()
pipe = redis.pipeline()
# Get all items that match any of the patterns. Put it in a set to
# prevent duplicates.
if oz.settings["s3_bucket"]:
bucket = oz.aws_cdn.get_bucket()
matches = set([oz.aws_cdn.S3File(key) for prefix in prefixes for key in bucket.list(prefix)])
else:
matches = set([])
static_path = oz.settings["static_path"]
for root, _, filenames in os.walk(static_path):
for filename in filenames:
path = os.path.relpath(os.path.join(root, filename), static_path)
for prefix in prefixes:
if path.startswith(prefix):
matches.add(oz.aws_cdn.LocalFile(static_path, path))
break
# Set the cache busters
for f in matches:
file_hash = f.hash()
print(file_hash, f.path())
oz.aws_cdn.set_cache_buster(pipe, f.path(), file_hash)
pipe.execute() | python | def cache_busting_scan(*prefixes):
"""
(Re-)generates the cache buster values for all files with the specified
prefixes.
"""
redis = oz.redis.create_connection()
pipe = redis.pipeline()
# Get all items that match any of the patterns. Put it in a set to
# prevent duplicates.
if oz.settings["s3_bucket"]:
bucket = oz.aws_cdn.get_bucket()
matches = set([oz.aws_cdn.S3File(key) for prefix in prefixes for key in bucket.list(prefix)])
else:
matches = set([])
static_path = oz.settings["static_path"]
for root, _, filenames in os.walk(static_path):
for filename in filenames:
path = os.path.relpath(os.path.join(root, filename), static_path)
for prefix in prefixes:
if path.startswith(prefix):
matches.add(oz.aws_cdn.LocalFile(static_path, path))
break
# Set the cache busters
for f in matches:
file_hash = f.hash()
print(file_hash, f.path())
oz.aws_cdn.set_cache_buster(pipe, f.path(), file_hash)
pipe.execute() | [
"def",
"cache_busting_scan",
"(",
"*",
"prefixes",
")",
":",
"redis",
"=",
"oz",
".",
"redis",
".",
"create_connection",
"(",
")",
"pipe",
"=",
"redis",
".",
"pipeline",
"(",
")",
"# Get all items that match any of the patterns. Put it in a set to",
"# prevent duplica... | (Re-)generates the cache buster values for all files with the specified
prefixes. | [
"(",
"Re",
"-",
")",
"generates",
"the",
"cache",
"buster",
"values",
"for",
"all",
"files",
"with",
"the",
"specified",
"prefixes",
"."
] | train | https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/aws_cdn/actions.py#L14-L47 |
biocore/burrito-fillings | bfillings/bwa.py | create_bwa_index_from_fasta_file | def create_bwa_index_from_fasta_file(fasta_in, params=None):
"""Create a BWA index from an input fasta file.
fasta_in: the input fasta file from which to create the index
params: dict of bwa index specific paramters
This method returns a dictionary where the keys are the various
output suffixes (.amb, .ann, .bwt, .pac, .sa) and the values
are open file objects.
The index prefix will be the same as fasta_in, unless the -p parameter
is passed in params.
"""
if params is None:
params = {}
# Instantiate the app controller
index = BWA_index(params)
# call the application, passing the fasta file in
results = index({'fasta_in': fasta_in})
return results | python | def create_bwa_index_from_fasta_file(fasta_in, params=None):
"""Create a BWA index from an input fasta file.
fasta_in: the input fasta file from which to create the index
params: dict of bwa index specific paramters
This method returns a dictionary where the keys are the various
output suffixes (.amb, .ann, .bwt, .pac, .sa) and the values
are open file objects.
The index prefix will be the same as fasta_in, unless the -p parameter
is passed in params.
"""
if params is None:
params = {}
# Instantiate the app controller
index = BWA_index(params)
# call the application, passing the fasta file in
results = index({'fasta_in': fasta_in})
return results | [
"def",
"create_bwa_index_from_fasta_file",
"(",
"fasta_in",
",",
"params",
"=",
"None",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"{",
"}",
"# Instantiate the app controller",
"index",
"=",
"BWA_index",
"(",
"params",
")",
"# call the application... | Create a BWA index from an input fasta file.
fasta_in: the input fasta file from which to create the index
params: dict of bwa index specific paramters
This method returns a dictionary where the keys are the various
output suffixes (.amb, .ann, .bwt, .pac, .sa) and the values
are open file objects.
The index prefix will be the same as fasta_in, unless the -p parameter
is passed in params. | [
"Create",
"a",
"BWA",
"index",
"from",
"an",
"input",
"fasta",
"file",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/bwa.py#L614-L635 |
biocore/burrito-fillings | bfillings/bwa.py | assign_reads_to_database | def assign_reads_to_database(query, database_fasta, out_path, params=None):
"""Assign a set of query sequences to a reference database
database_fasta_fp: absolute file path to the reference database
query_fasta_fp: absolute file path to query sequences
output_fp: absolute file path of the file to be output
params: dict of BWA specific parameters.
* Specify which algorithm to use (bwa-short or bwasw) using the
dict key "algorithm"
* if algorithm is bwasw, specify params for the bwa bwasw
subcommand
* if algorithm is bwa-short, specify params for the bwa samse
subcommand
* if algorithm is bwa-short, must also specify params to use with
bwa aln, which is used to get the sai file necessary to run samse.
bwa aln params should be passed in using dict key "aln_params" and
the associated value should be a dict of params for the bwa aln
subcommand
* if a temporary directory is not specified in params using dict
key "temp_dir", it will be assumed to be /tmp
This method returns an open file object (SAM format).
"""
if params is None:
params = {}
# set the output path
params['-f'] = out_path
# if the algorithm is not specified in the params dict, or the algorithm
# is not recognized, raise an exception
if 'algorithm' not in params:
raise InvalidArgumentApplicationError("Must specify which algorithm to"
" use ('bwa-short' or 'bwasw')")
elif params['algorithm'] not in ('bwa-short', 'bwasw'):
raise InvalidArgumentApplicationError("Unknown algorithm '%s' Please "
"enter either 'bwa-short' or "
"'bwasw'." % params['algorithm'])
# if the temp directory is not specified, assume /tmp
if 'temp_dir' not in params:
params['temp_dir'] = '/tmp'
# if the algorithm is bwa-short, we must build use bwa aln to get an sai
# file before calling bwa samse on that sai file, so we need to know how
# to run bwa aln. Therefore, we must ensure there's an entry containing
# those parameters
if params['algorithm'] == 'bwa-short':
if 'aln_params' not in params:
raise InvalidArgumentApplicationError("With bwa-short, need to "
"specify a key 'aln_params' "
"and its value, a dictionary"
" to pass to bwa aln, since"
" bwa aln is an intermediate"
" step when doing "
"bwa-short.")
# we have this params dict, with "algorithm" and "temp_dir", etc which are
# not for any of the subcommands, so make a new params dict that is the
# same as the original minus these addendums
subcommand_params = {}
for k, v in params.iteritems():
if k not in ('algorithm', 'temp_dir', 'aln_params'):
subcommand_params[k] = v
# build index from database_fasta
# get a temporary file name that is not in use
_, index_prefix = mkstemp(dir=params['temp_dir'], suffix='')
create_bwa_index_from_fasta_file(database_fasta, {'-p': index_prefix})
# if the algorithm is bwasw, things are pretty simple. Just instantiate
# the proper controller and set the files
if params['algorithm'] == 'bwasw':
bwa = BWA_bwasw(params=subcommand_params)
files = {'prefix': index_prefix, 'query_fasta': query}
# if the algorithm is bwa-short, it's not so simple
elif params['algorithm'] == 'bwa-short':
# we have to call bwa_aln to get the sai file needed for samse
# use the aln_params we ensured we had above
bwa_aln = BWA_aln(params=params['aln_params'])
aln_files = {'prefix': index_prefix, 'fastq_in': query}
# get the path to the sai file
sai_file_path = bwa_aln(aln_files)['output'].name
# we will use that sai file to run samse
bwa = BWA_samse(params=subcommand_params)
files = {'prefix': index_prefix, 'sai_in': sai_file_path,
'fastq_in': query}
# run which ever app controller we decided was correct on the files
# we set up
result = bwa(files)
# they both return a SAM file, so return that
return result['output'] | python | def assign_reads_to_database(query, database_fasta, out_path, params=None):
"""Assign a set of query sequences to a reference database
database_fasta_fp: absolute file path to the reference database
query_fasta_fp: absolute file path to query sequences
output_fp: absolute file path of the file to be output
params: dict of BWA specific parameters.
* Specify which algorithm to use (bwa-short or bwasw) using the
dict key "algorithm"
* if algorithm is bwasw, specify params for the bwa bwasw
subcommand
* if algorithm is bwa-short, specify params for the bwa samse
subcommand
* if algorithm is bwa-short, must also specify params to use with
bwa aln, which is used to get the sai file necessary to run samse.
bwa aln params should be passed in using dict key "aln_params" and
the associated value should be a dict of params for the bwa aln
subcommand
* if a temporary directory is not specified in params using dict
key "temp_dir", it will be assumed to be /tmp
This method returns an open file object (SAM format).
"""
if params is None:
params = {}
# set the output path
params['-f'] = out_path
# if the algorithm is not specified in the params dict, or the algorithm
# is not recognized, raise an exception
if 'algorithm' not in params:
raise InvalidArgumentApplicationError("Must specify which algorithm to"
" use ('bwa-short' or 'bwasw')")
elif params['algorithm'] not in ('bwa-short', 'bwasw'):
raise InvalidArgumentApplicationError("Unknown algorithm '%s' Please "
"enter either 'bwa-short' or "
"'bwasw'." % params['algorithm'])
# if the temp directory is not specified, assume /tmp
if 'temp_dir' not in params:
params['temp_dir'] = '/tmp'
# if the algorithm is bwa-short, we must build use bwa aln to get an sai
# file before calling bwa samse on that sai file, so we need to know how
# to run bwa aln. Therefore, we must ensure there's an entry containing
# those parameters
if params['algorithm'] == 'bwa-short':
if 'aln_params' not in params:
raise InvalidArgumentApplicationError("With bwa-short, need to "
"specify a key 'aln_params' "
"and its value, a dictionary"
" to pass to bwa aln, since"
" bwa aln is an intermediate"
" step when doing "
"bwa-short.")
# we have this params dict, with "algorithm" and "temp_dir", etc which are
# not for any of the subcommands, so make a new params dict that is the
# same as the original minus these addendums
subcommand_params = {}
for k, v in params.iteritems():
if k not in ('algorithm', 'temp_dir', 'aln_params'):
subcommand_params[k] = v
# build index from database_fasta
# get a temporary file name that is not in use
_, index_prefix = mkstemp(dir=params['temp_dir'], suffix='')
create_bwa_index_from_fasta_file(database_fasta, {'-p': index_prefix})
# if the algorithm is bwasw, things are pretty simple. Just instantiate
# the proper controller and set the files
if params['algorithm'] == 'bwasw':
bwa = BWA_bwasw(params=subcommand_params)
files = {'prefix': index_prefix, 'query_fasta': query}
# if the algorithm is bwa-short, it's not so simple
elif params['algorithm'] == 'bwa-short':
# we have to call bwa_aln to get the sai file needed for samse
# use the aln_params we ensured we had above
bwa_aln = BWA_aln(params=params['aln_params'])
aln_files = {'prefix': index_prefix, 'fastq_in': query}
# get the path to the sai file
sai_file_path = bwa_aln(aln_files)['output'].name
# we will use that sai file to run samse
bwa = BWA_samse(params=subcommand_params)
files = {'prefix': index_prefix, 'sai_in': sai_file_path,
'fastq_in': query}
# run which ever app controller we decided was correct on the files
# we set up
result = bwa(files)
# they both return a SAM file, so return that
return result['output'] | [
"def",
"assign_reads_to_database",
"(",
"query",
",",
"database_fasta",
",",
"out_path",
",",
"params",
"=",
"None",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"{",
"}",
"# set the output path",
"params",
"[",
"'-f'",
"]",
"=",
"out_path",
... | Assign a set of query sequences to a reference database
database_fasta_fp: absolute file path to the reference database
query_fasta_fp: absolute file path to query sequences
output_fp: absolute file path of the file to be output
params: dict of BWA specific parameters.
* Specify which algorithm to use (bwa-short or bwasw) using the
dict key "algorithm"
* if algorithm is bwasw, specify params for the bwa bwasw
subcommand
* if algorithm is bwa-short, specify params for the bwa samse
subcommand
* if algorithm is bwa-short, must also specify params to use with
bwa aln, which is used to get the sai file necessary to run samse.
bwa aln params should be passed in using dict key "aln_params" and
the associated value should be a dict of params for the bwa aln
subcommand
* if a temporary directory is not specified in params using dict
key "temp_dir", it will be assumed to be /tmp
This method returns an open file object (SAM format). | [
"Assign",
"a",
"set",
"of",
"query",
"sequences",
"to",
"a",
"reference",
"database"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/bwa.py#L638-L734 |
biocore/burrito-fillings | bfillings/bwa.py | assign_dna_reads_to_dna_database | def assign_dna_reads_to_dna_database(query_fasta_fp, database_fasta_fp, out_fp,
params={}):
"""Wraps assign_reads_to_database, setting various parameters.
The default settings are below, but may be overwritten and/or added to
using the params dict:
algorithm: bwasw
"""
my_params = {'algorithm': 'bwasw'}
my_params.update(params)
result = assign_reads_to_database(query_fasta_fp, database_fasta_fp,
out_fp, my_params)
return result | python | def assign_dna_reads_to_dna_database(query_fasta_fp, database_fasta_fp, out_fp,
params={}):
"""Wraps assign_reads_to_database, setting various parameters.
The default settings are below, but may be overwritten and/or added to
using the params dict:
algorithm: bwasw
"""
my_params = {'algorithm': 'bwasw'}
my_params.update(params)
result = assign_reads_to_database(query_fasta_fp, database_fasta_fp,
out_fp, my_params)
return result | [
"def",
"assign_dna_reads_to_dna_database",
"(",
"query_fasta_fp",
",",
"database_fasta_fp",
",",
"out_fp",
",",
"params",
"=",
"{",
"}",
")",
":",
"my_params",
"=",
"{",
"'algorithm'",
":",
"'bwasw'",
"}",
"my_params",
".",
"update",
"(",
"params",
")",
"resul... | Wraps assign_reads_to_database, setting various parameters.
The default settings are below, but may be overwritten and/or added to
using the params dict:
algorithm: bwasw | [
"Wraps",
"assign_reads_to_database",
"setting",
"various",
"parameters",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/bwa.py#L737-L752 |
biocore/burrito-fillings | bfillings/bwa.py | BWA.check_arguments | def check_arguments(self):
"""Sanity check the arguments passed in.
Uses the boolean functions specified in the subclasses in the
_valid_arguments dictionary to determine if an argument is valid
or invalid.
"""
for k, v in self.Parameters.iteritems():
if self.Parameters[k].isOn():
if k in self._valid_arguments:
if not self._valid_arguments[k](v.Value):
error_message = 'Invalid argument (%s) ' % v.Value
error_message += 'for parameter %s\n' % k
raise InvalidArgumentApplicationError(error_message) | python | def check_arguments(self):
"""Sanity check the arguments passed in.
Uses the boolean functions specified in the subclasses in the
_valid_arguments dictionary to determine if an argument is valid
or invalid.
"""
for k, v in self.Parameters.iteritems():
if self.Parameters[k].isOn():
if k in self._valid_arguments:
if not self._valid_arguments[k](v.Value):
error_message = 'Invalid argument (%s) ' % v.Value
error_message += 'for parameter %s\n' % k
raise InvalidArgumentApplicationError(error_message) | [
"def",
"check_arguments",
"(",
"self",
")",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"Parameters",
".",
"iteritems",
"(",
")",
":",
"if",
"self",
".",
"Parameters",
"[",
"k",
"]",
".",
"isOn",
"(",
")",
":",
"if",
"k",
"in",
"self",
".",
"... | Sanity check the arguments passed in.
Uses the boolean functions specified in the subclasses in the
_valid_arguments dictionary to determine if an argument is valid
or invalid. | [
"Sanity",
"check",
"the",
"arguments",
"passed",
"in",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/bwa.py#L92-L105 |
biocore/burrito-fillings | bfillings/bwa.py | BWA._get_base_command | def _get_base_command(self):
""" Returns the full command string
Overridden here because there are positional arguments (specifically
the input and output files).
"""
command_parts = []
# Append a change directory to the beginning of the command to change
# to self.WorkingDir before running the command
# WorkingDir should be in quotes -- filenames might contain spaces
cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])
if self._command is None:
raise ApplicationError('_command has not been set.')
command = self._command
# also make sure there's a subcommand!
if self._subcommand is None:
raise ApplicationError('_subcommand has not been set.')
subcommand = self._subcommand
# sorting makes testing easier, since the options will be written out
# in alphabetical order. Could of course use option parsing scripts
# in cogent for this, but this works as well.
parameters = sorted([str(x) for x in self.Parameters.values()
if str(x)])
synonyms = self._synonyms
command_parts.append(cd_command)
command_parts.append(command)
# add in subcommand
command_parts.append(subcommand)
command_parts += parameters
# add in the positional arguments in the correct order
for k in self._input_order:
# this check is necessary to account for optional positional
# arguments, such as the mate file for bwa bwasw
# Note that the input handler will ensure that all required
# parameters have valid values
if k in self._input:
command_parts.append(self._input[k])
return self._command_delimiter.join(command_parts).strip() | python | def _get_base_command(self):
""" Returns the full command string
Overridden here because there are positional arguments (specifically
the input and output files).
"""
command_parts = []
# Append a change directory to the beginning of the command to change
# to self.WorkingDir before running the command
# WorkingDir should be in quotes -- filenames might contain spaces
cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])
if self._command is None:
raise ApplicationError('_command has not been set.')
command = self._command
# also make sure there's a subcommand!
if self._subcommand is None:
raise ApplicationError('_subcommand has not been set.')
subcommand = self._subcommand
# sorting makes testing easier, since the options will be written out
# in alphabetical order. Could of course use option parsing scripts
# in cogent for this, but this works as well.
parameters = sorted([str(x) for x in self.Parameters.values()
if str(x)])
synonyms = self._synonyms
command_parts.append(cd_command)
command_parts.append(command)
# add in subcommand
command_parts.append(subcommand)
command_parts += parameters
# add in the positional arguments in the correct order
for k in self._input_order:
# this check is necessary to account for optional positional
# arguments, such as the mate file for bwa bwasw
# Note that the input handler will ensure that all required
# parameters have valid values
if k in self._input:
command_parts.append(self._input[k])
return self._command_delimiter.join(command_parts).strip() | [
"def",
"_get_base_command",
"(",
"self",
")",
":",
"command_parts",
"=",
"[",
"]",
"# Append a change directory to the beginning of the command to change",
"# to self.WorkingDir before running the command",
"# WorkingDir should be in quotes -- filenames might contain spaces",
"cd_command",... | Returns the full command string
Overridden here because there are positional arguments (specifically
the input and output files). | [
"Returns",
"the",
"full",
"command",
"string"
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/bwa.py#L107-L146 |
biocore/burrito-fillings | bfillings/bwa.py | BWA._input_as_dict | def _input_as_dict(self, data):
"""Takes dictionary that sets input and output files.
Valid keys for the dictionary are specified in the subclasses. File
paths must be absolute.
"""
# clear self._input; ready to receive new input and output files
self._input = {}
# Check that the arguments to the
# subcommand-specific parameters are valid
self.check_arguments()
# Ensure that we have all required input (file I/O)
for k in self._input_order:
# N.B.: optional positional arguments begin with underscore (_)!
# (e.g., see _mate_in for bwa bwasw)
if k[0] != '_' and k not in data:
raise MissingRequiredArgumentApplicationError("Missing "
"required "
"input %s" % k)
# Set values for input and output files
for k in data:
# check for unexpected keys in the dict
if k not in self._input_order:
error_message = "Invalid input arguments (%s)\n" % k
error_message += "Valid keys are: %s" % repr(self._input_order)
raise InvalidArgumentApplicationError(error_message + '\n')
# check for absolute paths
if not isabs(data[k][0]):
raise InvalidArgumentApplicationError("Only absolute paths "
"allowed.\n%s" %
repr(data))
self._input[k] = data[k]
# if there is a -f option to specify an output file, force the user to
# use it (otherwise things to to stdout)
if '-f' in self.Parameters and not self.Parameters['-f'].isOn():
raise InvalidArgumentApplicationError("Please specify an output "
"file with -f")
return '' | python | def _input_as_dict(self, data):
"""Takes dictionary that sets input and output files.
Valid keys for the dictionary are specified in the subclasses. File
paths must be absolute.
"""
# clear self._input; ready to receive new input and output files
self._input = {}
# Check that the arguments to the
# subcommand-specific parameters are valid
self.check_arguments()
# Ensure that we have all required input (file I/O)
for k in self._input_order:
# N.B.: optional positional arguments begin with underscore (_)!
# (e.g., see _mate_in for bwa bwasw)
if k[0] != '_' and k not in data:
raise MissingRequiredArgumentApplicationError("Missing "
"required "
"input %s" % k)
# Set values for input and output files
for k in data:
# check for unexpected keys in the dict
if k not in self._input_order:
error_message = "Invalid input arguments (%s)\n" % k
error_message += "Valid keys are: %s" % repr(self._input_order)
raise InvalidArgumentApplicationError(error_message + '\n')
# check for absolute paths
if not isabs(data[k][0]):
raise InvalidArgumentApplicationError("Only absolute paths "
"allowed.\n%s" %
repr(data))
self._input[k] = data[k]
# if there is a -f option to specify an output file, force the user to
# use it (otherwise things to to stdout)
if '-f' in self.Parameters and not self.Parameters['-f'].isOn():
raise InvalidArgumentApplicationError("Please specify an output "
"file with -f")
return '' | [
"def",
"_input_as_dict",
"(",
"self",
",",
"data",
")",
":",
"# clear self._input; ready to receive new input and output files",
"self",
".",
"_input",
"=",
"{",
"}",
"# Check that the arguments to the",
"# subcommand-specific parameters are valid",
"self",
".",
"check_argument... | Takes dictionary that sets input and output files.
Valid keys for the dictionary are specified in the subclasses. File
paths must be absolute. | [
"Takes",
"dictionary",
"that",
"sets",
"input",
"and",
"output",
"files",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/bwa.py#L150-L192 |
biocore/burrito-fillings | bfillings/bwa.py | BWA_index._get_result_paths | def _get_result_paths(self, data):
"""Gets the results for a run of bwa index.
bwa index outputs 5 files when the index is created. The filename
prefix will be the same as the input fasta, unless overridden with
the -p option, and the 5 extensions are listed below:
.amb
.ann
.bwt
.pac
.sa
and these extentions (including the period) are the keys to the
dictionary that is returned.
"""
# determine the names of the files. The name will be the same as the
# input fasta file unless overridden with the -p option
if self.Parameters['-p'].isOn():
prefix = self.Parameters['-p'].Value
else:
prefix = data['fasta_in']
# the 5 output file suffixes
suffixes = ['.amb', '.ann', '.bwt', '.pac', '.sa']
out_files = {}
for suffix in suffixes:
out_files[suffix] = ResultPath(prefix + suffix, IsWritten=True)
return out_files | python | def _get_result_paths(self, data):
"""Gets the results for a run of bwa index.
bwa index outputs 5 files when the index is created. The filename
prefix will be the same as the input fasta, unless overridden with
the -p option, and the 5 extensions are listed below:
.amb
.ann
.bwt
.pac
.sa
and these extentions (including the period) are the keys to the
dictionary that is returned.
"""
# determine the names of the files. The name will be the same as the
# input fasta file unless overridden with the -p option
if self.Parameters['-p'].isOn():
prefix = self.Parameters['-p'].Value
else:
prefix = data['fasta_in']
# the 5 output file suffixes
suffixes = ['.amb', '.ann', '.bwt', '.pac', '.sa']
out_files = {}
for suffix in suffixes:
out_files[suffix] = ResultPath(prefix + suffix, IsWritten=True)
return out_files | [
"def",
"_get_result_paths",
"(",
"self",
",",
"data",
")",
":",
"# determine the names of the files. The name will be the same as the",
"# input fasta file unless overridden with the -p option",
"if",
"self",
".",
"Parameters",
"[",
"'-p'",
"]",
".",
"isOn",
"(",
")",
":",
... | Gets the results for a run of bwa index.
bwa index outputs 5 files when the index is created. The filename
prefix will be the same as the input fasta, unless overridden with
the -p option, and the 5 extensions are listed below:
.amb
.ann
.bwt
.pac
.sa
and these extentions (including the period) are the keys to the
dictionary that is returned. | [
"Gets",
"the",
"results",
"for",
"a",
"run",
"of",
"bwa",
"index",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/bwa.py#L243-L273 |
michaelpb/omnic | omnic/utils/graph.py | DirectedGraph.get_all_paths_from | def get_all_paths_from(self, start, seen=None):
'''
Return a list of all paths to all nodes from a given start node
'''
if seen is None:
seen = frozenset()
results = [(0, (start, ))]
if start in seen or start not in self.edges:
return results
seen = seen | frozenset((start,))
for node, edge_weight in self.edges[start].items():
for subpath_weight, subpath in self.get_all_paths_from(node, seen):
total_weight = edge_weight + subpath_weight
full_path = (start, ) + subpath
results.append((total_weight, full_path))
return tuple(results) | python | def get_all_paths_from(self, start, seen=None):
'''
Return a list of all paths to all nodes from a given start node
'''
if seen is None:
seen = frozenset()
results = [(0, (start, ))]
if start in seen or start not in self.edges:
return results
seen = seen | frozenset((start,))
for node, edge_weight in self.edges[start].items():
for subpath_weight, subpath in self.get_all_paths_from(node, seen):
total_weight = edge_weight + subpath_weight
full_path = (start, ) + subpath
results.append((total_weight, full_path))
return tuple(results) | [
"def",
"get_all_paths_from",
"(",
"self",
",",
"start",
",",
"seen",
"=",
"None",
")",
":",
"if",
"seen",
"is",
"None",
":",
"seen",
"=",
"frozenset",
"(",
")",
"results",
"=",
"[",
"(",
"0",
",",
"(",
"start",
",",
")",
")",
"]",
"if",
"start",
... | Return a list of all paths to all nodes from a given start node | [
"Return",
"a",
"list",
"of",
"all",
"paths",
"to",
"all",
"nodes",
"from",
"a",
"given",
"start",
"node"
] | train | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/utils/graph.py#L46-L61 |
halfak/deltas | deltas/apply.py | apply | def apply(operations, a_tokens, b_tokens):
"""
Applies a sequences of operations to tokens -- copies tokens from
`a_tokens` and `b_tokens` according to `operations`.
:Parameters:
operations : sequence of :~class:`deltas.Operation`
Operations to perform
a_tokens : list of `comparable`
Starting sequence of comparable tokens
b_tokens : list of `comparable`
Ending list of comparable tokens
:Returns:
A new list of tokens
"""
for operation in operations:
if isinstance(operation, Equal):
#print("Equal: {0}".format(str(a_tokens[operation.a1:operation.a2])))
for t in a_tokens[operation.a1:operation.a2]: yield t
elif isinstance(operation, Insert):
#print("Insert: {0}".format(str(b_tokens[operation.b1:operation.b2])))
for t in b_tokens[operation.b1:operation.b2]: yield t
elif isinstance(operation, Delete):
#print("Delete: {0}".format(str(a_tokens[operation.a1:operation.a2])))
pass
else:
raise TypeError("Unexpected operation type " + \
"{0}".format(type(operation))) | python | def apply(operations, a_tokens, b_tokens):
"""
Applies a sequences of operations to tokens -- copies tokens from
`a_tokens` and `b_tokens` according to `operations`.
:Parameters:
operations : sequence of :~class:`deltas.Operation`
Operations to perform
a_tokens : list of `comparable`
Starting sequence of comparable tokens
b_tokens : list of `comparable`
Ending list of comparable tokens
:Returns:
A new list of tokens
"""
for operation in operations:
if isinstance(operation, Equal):
#print("Equal: {0}".format(str(a_tokens[operation.a1:operation.a2])))
for t in a_tokens[operation.a1:operation.a2]: yield t
elif isinstance(operation, Insert):
#print("Insert: {0}".format(str(b_tokens[operation.b1:operation.b2])))
for t in b_tokens[operation.b1:operation.b2]: yield t
elif isinstance(operation, Delete):
#print("Delete: {0}".format(str(a_tokens[operation.a1:operation.a2])))
pass
else:
raise TypeError("Unexpected operation type " + \
"{0}".format(type(operation))) | [
"def",
"apply",
"(",
"operations",
",",
"a_tokens",
",",
"b_tokens",
")",
":",
"for",
"operation",
"in",
"operations",
":",
"if",
"isinstance",
"(",
"operation",
",",
"Equal",
")",
":",
"#print(\"Equal: {0}\".format(str(a_tokens[operation.a1:operation.a2])))",
"for",
... | Applies a sequences of operations to tokens -- copies tokens from
`a_tokens` and `b_tokens` according to `operations`.
:Parameters:
operations : sequence of :~class:`deltas.Operation`
Operations to perform
a_tokens : list of `comparable`
Starting sequence of comparable tokens
b_tokens : list of `comparable`
Ending list of comparable tokens
:Returns:
A new list of tokens | [
"Applies",
"a",
"sequences",
"of",
"operations",
"to",
"tokens",
"--",
"copies",
"tokens",
"from",
"a_tokens",
"and",
"b_tokens",
"according",
"to",
"operations",
"."
] | train | https://github.com/halfak/deltas/blob/4173f4215b93426a877f4bb4a7a3547834e60ac3/deltas/apply.py#L9-L41 |
salbrandi/stressypy | stressypy/cpustresser.py | create_job | def create_job(cpu_width, time_height):
"""
:param cpu_width: number of cpus
:param time_height: amount of time
:return: the instantiated JobBlock object
"""
shell_command = stress_string.format(cpu_width, time_height)
job = JobBlock(cpu_width, time_height)
job.set_job(subprocess.call, shell_command, shell=True)
return job | python | def create_job(cpu_width, time_height):
"""
:param cpu_width: number of cpus
:param time_height: amount of time
:return: the instantiated JobBlock object
"""
shell_command = stress_string.format(cpu_width, time_height)
job = JobBlock(cpu_width, time_height)
job.set_job(subprocess.call, shell_command, shell=True)
return job | [
"def",
"create_job",
"(",
"cpu_width",
",",
"time_height",
")",
":",
"shell_command",
"=",
"stress_string",
".",
"format",
"(",
"cpu_width",
",",
"time_height",
")",
"job",
"=",
"JobBlock",
"(",
"cpu_width",
",",
"time_height",
")",
"job",
".",
"set_job",
"(... | :param cpu_width: number of cpus
:param time_height: amount of time
:return: the instantiated JobBlock object | [
":",
"param",
"cpu_width",
":",
"number",
"of",
"cpus",
":",
"param",
"time_height",
":",
"amount",
"of",
"time",
":",
"return",
":",
"the",
"instantiated",
"JobBlock",
"object"
] | train | https://github.com/salbrandi/stressypy/blob/7e2901e131a40f3597921358a1c8647a346bd0cc/stressypy/cpustresser.py#L52-L62 |
biocore/burrito-fillings | bfillings/infernal.py | cmbuild_from_alignment | def cmbuild_from_alignment(aln, structure_string, refine=False, \
return_alignment=False,params=None):
"""Uses cmbuild to build a CM file given an alignment and structure string.
- aln: an Alignment object or something that can be used to construct
one. All sequences must be the same length.
- structure_string: vienna structure string representing the consensus
stucture for the sequences in aln. Must be the same length as the
alignment.
- refine: refine the alignment and realign before building the cm.
(Default=False)
- return_alignment: Return (in Stockholm format) alignment file used to
construct the CM file. This will either be the original alignment
and structure string passed in, or the refined alignment if --refine
was used. (Default=False)
- Note. This will be a string that can either be written to a file
or parsed.
"""
aln = Alignment(aln)
if len(structure_string) != aln.SeqLen:
raise ValueError, """Structure string is not same length as alignment. Structure string is %s long. Alignment is %s long."""%(len(structure_string),\
aln.SeqLen)
else:
struct_dict = {'SS_cons':structure_string}
#Make new Cmbuild app instance.
app = Cmbuild(InputHandler='_input_as_paths',WorkingDir='/tmp',\
params=params)
#turn on refine flag if True.
if refine:
_, tmp_file = mkstemp(dir=app.WorkingDir)
app.Parameters['--refine'].on(tmp_file)
#Get alignment in Stockholm format
aln_file_string = stockholm_from_alignment(aln,GC_annotation=struct_dict)
#get path to alignment filename
aln_path = app._input_as_multiline_string(aln_file_string)
cm_path = aln_path.split('.txt')[0]+'.cm'
app.Parameters['-n'].on(cm_path)
filepaths = [cm_path,aln_path]
res = app(filepaths)
cm_file = res['CmFile'].read()
if return_alignment:
#If alignment was refined, return refined alignment and structure,
# otherwise return original alignment and structure.
if refine:
aln_file_string = res['Refined'].read()
res.cleanUp()
return cm_file, aln_file_string
#Just return cm_file
else:
res.cleanUp()
return cm_file | python | def cmbuild_from_alignment(aln, structure_string, refine=False, \
return_alignment=False,params=None):
"""Uses cmbuild to build a CM file given an alignment and structure string.
- aln: an Alignment object or something that can be used to construct
one. All sequences must be the same length.
- structure_string: vienna structure string representing the consensus
stucture for the sequences in aln. Must be the same length as the
alignment.
- refine: refine the alignment and realign before building the cm.
(Default=False)
- return_alignment: Return (in Stockholm format) alignment file used to
construct the CM file. This will either be the original alignment
and structure string passed in, or the refined alignment if --refine
was used. (Default=False)
- Note. This will be a string that can either be written to a file
or parsed.
"""
aln = Alignment(aln)
if len(structure_string) != aln.SeqLen:
raise ValueError, """Structure string is not same length as alignment. Structure string is %s long. Alignment is %s long."""%(len(structure_string),\
aln.SeqLen)
else:
struct_dict = {'SS_cons':structure_string}
#Make new Cmbuild app instance.
app = Cmbuild(InputHandler='_input_as_paths',WorkingDir='/tmp',\
params=params)
#turn on refine flag if True.
if refine:
_, tmp_file = mkstemp(dir=app.WorkingDir)
app.Parameters['--refine'].on(tmp_file)
#Get alignment in Stockholm format
aln_file_string = stockholm_from_alignment(aln,GC_annotation=struct_dict)
#get path to alignment filename
aln_path = app._input_as_multiline_string(aln_file_string)
cm_path = aln_path.split('.txt')[0]+'.cm'
app.Parameters['-n'].on(cm_path)
filepaths = [cm_path,aln_path]
res = app(filepaths)
cm_file = res['CmFile'].read()
if return_alignment:
#If alignment was refined, return refined alignment and structure,
# otherwise return original alignment and structure.
if refine:
aln_file_string = res['Refined'].read()
res.cleanUp()
return cm_file, aln_file_string
#Just return cm_file
else:
res.cleanUp()
return cm_file | [
"def",
"cmbuild_from_alignment",
"(",
"aln",
",",
"structure_string",
",",
"refine",
"=",
"False",
",",
"return_alignment",
"=",
"False",
",",
"params",
"=",
"None",
")",
":",
"aln",
"=",
"Alignment",
"(",
"aln",
")",
"if",
"len",
"(",
"structure_string",
... | Uses cmbuild to build a CM file given an alignment and structure string.
- aln: an Alignment object or something that can be used to construct
one. All sequences must be the same length.
- structure_string: vienna structure string representing the consensus
stucture for the sequences in aln. Must be the same length as the
alignment.
- refine: refine the alignment and realign before building the cm.
(Default=False)
- return_alignment: Return (in Stockholm format) alignment file used to
construct the CM file. This will either be the original alignment
and structure string passed in, or the refined alignment if --refine
was used. (Default=False)
- Note. This will be a string that can either be written to a file
or parsed. | [
"Uses",
"cmbuild",
"to",
"build",
"a",
"CM",
"file",
"given",
"an",
"alignment",
"and",
"structure",
"string",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/infernal.py#L1234-L1291 |
biocore/burrito-fillings | bfillings/infernal.py | cmbuild_from_file | def cmbuild_from_file(stockholm_file_path, refine=False,return_alignment=False,\
params=None):
"""Uses cmbuild to build a CM file given a stockholm file.
- stockholm_file_path: a path to a stockholm file. This file should
contain a multiple sequence alignment formated in Stockholm format.
This must contain a sequence structure line:
#=GC SS_cons <structure string>
- refine: refine the alignment and realign before building the cm.
(Default=False)
- return_alignment: Return alignment and structure string used to
construct the CM file. This will either be the original alignment
and structure string passed in, or the refined alignment if
--refine was used. (Default=False)
"""
#get alignment and structure string from stockholm file.
info, aln, structure_string = \
list(MinimalRfamParser(open(stockholm_file_path,'U'),\
seq_constructor=ChangedSequence))[0]
#call cmbuild_from_alignment.
res = cmbuild_from_alignment(aln, structure_string, refine=refine, \
return_alignment=return_alignment,params=params)
return res | python | def cmbuild_from_file(stockholm_file_path, refine=False,return_alignment=False,\
params=None):
"""Uses cmbuild to build a CM file given a stockholm file.
- stockholm_file_path: a path to a stockholm file. This file should
contain a multiple sequence alignment formated in Stockholm format.
This must contain a sequence structure line:
#=GC SS_cons <structure string>
- refine: refine the alignment and realign before building the cm.
(Default=False)
- return_alignment: Return alignment and structure string used to
construct the CM file. This will either be the original alignment
and structure string passed in, or the refined alignment if
--refine was used. (Default=False)
"""
#get alignment and structure string from stockholm file.
info, aln, structure_string = \
list(MinimalRfamParser(open(stockholm_file_path,'U'),\
seq_constructor=ChangedSequence))[0]
#call cmbuild_from_alignment.
res = cmbuild_from_alignment(aln, structure_string, refine=refine, \
return_alignment=return_alignment,params=params)
return res | [
"def",
"cmbuild_from_file",
"(",
"stockholm_file_path",
",",
"refine",
"=",
"False",
",",
"return_alignment",
"=",
"False",
",",
"params",
"=",
"None",
")",
":",
"#get alignment and structure string from stockholm file.",
"info",
",",
"aln",
",",
"structure_string",
"... | Uses cmbuild to build a CM file given a stockholm file.
- stockholm_file_path: a path to a stockholm file. This file should
contain a multiple sequence alignment formated in Stockholm format.
This must contain a sequence structure line:
#=GC SS_cons <structure string>
- refine: refine the alignment and realign before building the cm.
(Default=False)
- return_alignment: Return alignment and structure string used to
construct the CM file. This will either be the original alignment
and structure string passed in, or the refined alignment if
--refine was used. (Default=False) | [
"Uses",
"cmbuild",
"to",
"build",
"a",
"CM",
"file",
"given",
"a",
"stockholm",
"file",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/infernal.py#L1294-L1317 |
biocore/burrito-fillings | bfillings/infernal.py | cmalign_from_alignment | def cmalign_from_alignment(aln, structure_string, seqs, moltype=DNA,\
include_aln=True,refine=False, return_stdout=False,params=None,\
cmbuild_params=None):
"""Uses cmbuild to build a CM file, then cmalign to build an alignment.
- aln: an Alignment object or something that can be used to construct
one. All sequences must be the same length.
- structure_string: vienna structure string representing the consensus
stucture for the sequences in aln. Must be the same length as the
alignment.
- seqs: SequenceCollection object or something that can be used to
construct one, containing unaligned sequences that are to be aligned
to the aligned sequences in aln.
- moltype: Cogent moltype object. Must be RNA or DNA.
- include_aln: Boolean to include sequences in aln in final alignment.
(Default=True)
- refine: refine the alignment and realign before building the cm.
(Default=False)
- return_stdout: Boolean to return standard output from infernal. This
includes alignment and structure bit scores and average
probabilities for each sequence. (Default=False)
"""
#NOTE: Must degap seqs or Infernal well seg fault!
seqs = SequenceCollection(seqs,MolType=moltype).degap()
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seqs.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
cm_file, aln_file_string = cmbuild_from_alignment(aln, structure_string,\
refine=refine,return_alignment=True,params=cmbuild_params)
if params is None:
params = {}
params.update({MOLTYPE_MAP[moltype]:True})
app = Cmalign(InputHandler='_input_as_paths',WorkingDir='/tmp',\
params=params)
app.Parameters['--informat'].on('FASTA')
#files to remove that aren't cleaned up by ResultPath object
to_remove = []
#turn on --withali flag if True.
if include_aln:
app.Parameters['--withali'].on(\
app._tempfile_as_multiline_string(aln_file_string))
#remove this file at end
to_remove.append(app.Parameters['--withali'].Value)
seqs_path = app._input_as_multiline_string(int_map.toFasta())
cm_path = app._tempfile_as_multiline_string(cm_file)
#add cm_path to to_remove
to_remove.append(cm_path)
paths = [cm_path,seqs_path]
_, tmp_file = mkstemp(dir=app.WorkingDir)
app.Parameters['-o'].on(tmp_file)
res = app(paths)
info, aligned, struct_string = \
list(MinimalRfamParser(res['Alignment'].readlines(),\
seq_constructor=SEQ_CONSTRUCTOR_MAP[moltype]))[0]
#Make new dict mapping original IDs
new_alignment={}
for k,v in aligned.NamedSeqs.items():
new_alignment[int_keys.get(k,k)]=v
#Create an Alignment object from alignment dict
new_alignment = Alignment(new_alignment,MolType=moltype)
std_out = res['StdOut'].read()
#clean up files
res.cleanUp()
for f in to_remove: remove(f)
if return_stdout:
return new_alignment, struct_string, std_out
else:
return new_alignment, struct_string | python | def cmalign_from_alignment(aln, structure_string, seqs, moltype=DNA,\
include_aln=True,refine=False, return_stdout=False,params=None,\
cmbuild_params=None):
"""Uses cmbuild to build a CM file, then cmalign to build an alignment.
- aln: an Alignment object or something that can be used to construct
one. All sequences must be the same length.
- structure_string: vienna structure string representing the consensus
stucture for the sequences in aln. Must be the same length as the
alignment.
- seqs: SequenceCollection object or something that can be used to
construct one, containing unaligned sequences that are to be aligned
to the aligned sequences in aln.
- moltype: Cogent moltype object. Must be RNA or DNA.
- include_aln: Boolean to include sequences in aln in final alignment.
(Default=True)
- refine: refine the alignment and realign before building the cm.
(Default=False)
- return_stdout: Boolean to return standard output from infernal. This
includes alignment and structure bit scores and average
probabilities for each sequence. (Default=False)
"""
#NOTE: Must degap seqs or Infernal well seg fault!
seqs = SequenceCollection(seqs,MolType=moltype).degap()
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seqs.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
cm_file, aln_file_string = cmbuild_from_alignment(aln, structure_string,\
refine=refine,return_alignment=True,params=cmbuild_params)
if params is None:
params = {}
params.update({MOLTYPE_MAP[moltype]:True})
app = Cmalign(InputHandler='_input_as_paths',WorkingDir='/tmp',\
params=params)
app.Parameters['--informat'].on('FASTA')
#files to remove that aren't cleaned up by ResultPath object
to_remove = []
#turn on --withali flag if True.
if include_aln:
app.Parameters['--withali'].on(\
app._tempfile_as_multiline_string(aln_file_string))
#remove this file at end
to_remove.append(app.Parameters['--withali'].Value)
seqs_path = app._input_as_multiline_string(int_map.toFasta())
cm_path = app._tempfile_as_multiline_string(cm_file)
#add cm_path to to_remove
to_remove.append(cm_path)
paths = [cm_path,seqs_path]
_, tmp_file = mkstemp(dir=app.WorkingDir)
app.Parameters['-o'].on(tmp_file)
res = app(paths)
info, aligned, struct_string = \
list(MinimalRfamParser(res['Alignment'].readlines(),\
seq_constructor=SEQ_CONSTRUCTOR_MAP[moltype]))[0]
#Make new dict mapping original IDs
new_alignment={}
for k,v in aligned.NamedSeqs.items():
new_alignment[int_keys.get(k,k)]=v
#Create an Alignment object from alignment dict
new_alignment = Alignment(new_alignment,MolType=moltype)
std_out = res['StdOut'].read()
#clean up files
res.cleanUp()
for f in to_remove: remove(f)
if return_stdout:
return new_alignment, struct_string, std_out
else:
return new_alignment, struct_string | [
"def",
"cmalign_from_alignment",
"(",
"aln",
",",
"structure_string",
",",
"seqs",
",",
"moltype",
"=",
"DNA",
",",
"include_aln",
"=",
"True",
",",
"refine",
"=",
"False",
",",
"return_stdout",
"=",
"False",
",",
"params",
"=",
"None",
",",
"cmbuild_params"... | Uses cmbuild to build a CM file, then cmalign to build an alignment.
- aln: an Alignment object or something that can be used to construct
one. All sequences must be the same length.
- structure_string: vienna structure string representing the consensus
stucture for the sequences in aln. Must be the same length as the
alignment.
- seqs: SequenceCollection object or something that can be used to
construct one, containing unaligned sequences that are to be aligned
to the aligned sequences in aln.
- moltype: Cogent moltype object. Must be RNA or DNA.
- include_aln: Boolean to include sequences in aln in final alignment.
(Default=True)
- refine: refine the alignment and realign before building the cm.
(Default=False)
- return_stdout: Boolean to return standard output from infernal. This
includes alignment and structure bit scores and average
probabilities for each sequence. (Default=False) | [
"Uses",
"cmbuild",
"to",
"build",
"a",
"CM",
"file",
"then",
"cmalign",
"to",
"build",
"an",
"alignment",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/infernal.py#L1319-L1399 |
biocore/burrito-fillings | bfillings/infernal.py | cmalign_from_file | def cmalign_from_file(cm_file_path, seqs, moltype=DNA, alignment_file_path=None,\
include_aln=False,return_stdout=False,params=None):
"""Uses cmalign to align seqs to alignment in cm_file_path.
- cm_file_path: path to the file created by cmbuild, containing aligned
sequences. This will be used to align sequences in seqs.
- seqs: unaligned sequendes that are to be aligned to the sequences in
cm_file.
- moltype: cogent.core.moltype object. Must be DNA or RNA
- alignment_file_path: path to stockholm alignment file used to create
cm_file.
__IMPORTANT__: This MUST be the same file used by cmbuild
originally. Only need to pass in this file if include_aln=True.
This helper function will NOT check if the alignment file is correct
so you must use it correctly.
- include_aln: Boolean to include sequences in aln_file in final
alignment. (Default=False)
- return_stdout: Boolean to return standard output from infernal. This
includes alignment and structure bit scores and average
probabilities for each sequence. (Default=False)
"""
#NOTE: Must degap seqs or Infernal well seg fault!
seqs = SequenceCollection(seqs,MolType=moltype).degap()
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seqs.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
if params is None:
params = {}
params.update({MOLTYPE_MAP[moltype]:True})
app = Cmalign(InputHandler='_input_as_paths',WorkingDir='/tmp',\
params=params)
app.Parameters['--informat'].on('FASTA')
#turn on --withali flag if True.
if include_aln:
if alignment_file_path is None:
raise DataError, """Must have path to alignment file used to build CM if include_aln=True."""
else:
app.Parameters['--withali'].on(alignment_file_path)
seqs_path = app._input_as_multiline_string(int_map.toFasta())
paths = [cm_file_path,seqs_path]
_, tmp_file = mkstemp(dir=app.WorkingDir)
app.Parameters['-o'].on(tmp_file)
res = app(paths)
info, aligned, struct_string = \
list(MinimalRfamParser(res['Alignment'].readlines(),\
seq_constructor=SEQ_CONSTRUCTOR_MAP[moltype]))[0]
#Make new dict mapping original IDs
new_alignment={}
for k,v in aligned.items():
new_alignment[int_keys.get(k,k)]=v
#Create an Alignment object from alignment dict
new_alignment = Alignment(new_alignment,MolType=moltype)
std_out = res['StdOut'].read()
res.cleanUp()
if return_stdout:
return new_alignment, struct_string, std_out
else:
return new_alignment, struct_string | python | def cmalign_from_file(cm_file_path, seqs, moltype=DNA, alignment_file_path=None,\
include_aln=False,return_stdout=False,params=None):
"""Uses cmalign to align seqs to alignment in cm_file_path.
- cm_file_path: path to the file created by cmbuild, containing aligned
sequences. This will be used to align sequences in seqs.
- seqs: unaligned sequendes that are to be aligned to the sequences in
cm_file.
- moltype: cogent.core.moltype object. Must be DNA or RNA
- alignment_file_path: path to stockholm alignment file used to create
cm_file.
__IMPORTANT__: This MUST be the same file used by cmbuild
originally. Only need to pass in this file if include_aln=True.
This helper function will NOT check if the alignment file is correct
so you must use it correctly.
- include_aln: Boolean to include sequences in aln_file in final
alignment. (Default=False)
- return_stdout: Boolean to return standard output from infernal. This
includes alignment and structure bit scores and average
probabilities for each sequence. (Default=False)
"""
#NOTE: Must degap seqs or Infernal well seg fault!
seqs = SequenceCollection(seqs,MolType=moltype).degap()
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seqs.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
if params is None:
params = {}
params.update({MOLTYPE_MAP[moltype]:True})
app = Cmalign(InputHandler='_input_as_paths',WorkingDir='/tmp',\
params=params)
app.Parameters['--informat'].on('FASTA')
#turn on --withali flag if True.
if include_aln:
if alignment_file_path is None:
raise DataError, """Must have path to alignment file used to build CM if include_aln=True."""
else:
app.Parameters['--withali'].on(alignment_file_path)
seqs_path = app._input_as_multiline_string(int_map.toFasta())
paths = [cm_file_path,seqs_path]
_, tmp_file = mkstemp(dir=app.WorkingDir)
app.Parameters['-o'].on(tmp_file)
res = app(paths)
info, aligned, struct_string = \
list(MinimalRfamParser(res['Alignment'].readlines(),\
seq_constructor=SEQ_CONSTRUCTOR_MAP[moltype]))[0]
#Make new dict mapping original IDs
new_alignment={}
for k,v in aligned.items():
new_alignment[int_keys.get(k,k)]=v
#Create an Alignment object from alignment dict
new_alignment = Alignment(new_alignment,MolType=moltype)
std_out = res['StdOut'].read()
res.cleanUp()
if return_stdout:
return new_alignment, struct_string, std_out
else:
return new_alignment, struct_string | [
"def",
"cmalign_from_file",
"(",
"cm_file_path",
",",
"seqs",
",",
"moltype",
"=",
"DNA",
",",
"alignment_file_path",
"=",
"None",
",",
"include_aln",
"=",
"False",
",",
"return_stdout",
"=",
"False",
",",
"params",
"=",
"None",
")",
":",
"#NOTE: Must degap se... | Uses cmalign to align seqs to alignment in cm_file_path.
- cm_file_path: path to the file created by cmbuild, containing aligned
sequences. This will be used to align sequences in seqs.
- seqs: unaligned sequendes that are to be aligned to the sequences in
cm_file.
- moltype: cogent.core.moltype object. Must be DNA or RNA
- alignment_file_path: path to stockholm alignment file used to create
cm_file.
__IMPORTANT__: This MUST be the same file used by cmbuild
originally. Only need to pass in this file if include_aln=True.
This helper function will NOT check if the alignment file is correct
so you must use it correctly.
- include_aln: Boolean to include sequences in aln_file in final
alignment. (Default=False)
- return_stdout: Boolean to return standard output from infernal. This
includes alignment and structure bit scores and average
probabilities for each sequence. (Default=False) | [
"Uses",
"cmalign",
"to",
"align",
"seqs",
"to",
"alignment",
"in",
"cm_file_path",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/infernal.py#L1402-L1469 |
biocore/burrito-fillings | bfillings/infernal.py | cmsearch_from_alignment | def cmsearch_from_alignment(aln, structure_string, seqs, moltype, cutoff=0.0,\
refine=False,params=None):
"""Uses cmbuild to build a CM file, then cmsearch to find homologs.
- aln: an Alignment object or something that can be used to construct
one. All sequences must be the same length.
- structure_string: vienna structure string representing the consensus
stucture for the sequences in aln. Must be the same length as the
alignment.
- seqs: SequenceCollection object or something that can be used to
construct one, containing unaligned sequences that are to be
searched.
- moltype: cogent.core.moltype object. Must be DNA or RNA
- cutoff: bitscore cutoff. No sequences < cutoff will be kept in
search results. (Default=0.0). Infernal documentation suggests
a cutoff of log2(number nucleotides searching) will give most
likely true homologs.
- refine: refine the alignment and realign before building the cm.
(Default=False)
"""
#NOTE: Must degap seqs or Infernal well seg fault!
seqs = SequenceCollection(seqs,MolType=moltype).degap()
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seqs.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
cm_file, aln_file_string = cmbuild_from_alignment(aln, structure_string,\
refine=refine,return_alignment=True)
app = Cmsearch(InputHandler='_input_as_paths',WorkingDir='/tmp',\
params=params)
app.Parameters['--informat'].on('FASTA')
app.Parameters['-T'].on(cutoff)
to_remove = []
seqs_path = app._input_as_multiline_string(int_map.toFasta())
cm_path = app._tempfile_as_multiline_string(cm_file)
paths = [cm_path,seqs_path]
to_remove.append(cm_path)
_, tmp_file = mkstemp(dir=app.WorkingDir)
app.Parameters['--tabfile'].on(tmp_file)
res = app(paths)
search_results = list(CmsearchParser(res['SearchResults'].readlines()))
if search_results:
for i,line in enumerate(search_results):
label = line[1]
search_results[i][1]=int_keys.get(label,label)
res.cleanUp()
for f in to_remove:remove(f)
return search_results | python | def cmsearch_from_alignment(aln, structure_string, seqs, moltype, cutoff=0.0,\
refine=False,params=None):
"""Uses cmbuild to build a CM file, then cmsearch to find homologs.
- aln: an Alignment object or something that can be used to construct
one. All sequences must be the same length.
- structure_string: vienna structure string representing the consensus
stucture for the sequences in aln. Must be the same length as the
alignment.
- seqs: SequenceCollection object or something that can be used to
construct one, containing unaligned sequences that are to be
searched.
- moltype: cogent.core.moltype object. Must be DNA or RNA
- cutoff: bitscore cutoff. No sequences < cutoff will be kept in
search results. (Default=0.0). Infernal documentation suggests
a cutoff of log2(number nucleotides searching) will give most
likely true homologs.
- refine: refine the alignment and realign before building the cm.
(Default=False)
"""
#NOTE: Must degap seqs or Infernal well seg fault!
seqs = SequenceCollection(seqs,MolType=moltype).degap()
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seqs.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
cm_file, aln_file_string = cmbuild_from_alignment(aln, structure_string,\
refine=refine,return_alignment=True)
app = Cmsearch(InputHandler='_input_as_paths',WorkingDir='/tmp',\
params=params)
app.Parameters['--informat'].on('FASTA')
app.Parameters['-T'].on(cutoff)
to_remove = []
seqs_path = app._input_as_multiline_string(int_map.toFasta())
cm_path = app._tempfile_as_multiline_string(cm_file)
paths = [cm_path,seqs_path]
to_remove.append(cm_path)
_, tmp_file = mkstemp(dir=app.WorkingDir)
app.Parameters['--tabfile'].on(tmp_file)
res = app(paths)
search_results = list(CmsearchParser(res['SearchResults'].readlines()))
if search_results:
for i,line in enumerate(search_results):
label = line[1]
search_results[i][1]=int_keys.get(label,label)
res.cleanUp()
for f in to_remove:remove(f)
return search_results | [
"def",
"cmsearch_from_alignment",
"(",
"aln",
",",
"structure_string",
",",
"seqs",
",",
"moltype",
",",
"cutoff",
"=",
"0.0",
",",
"refine",
"=",
"False",
",",
"params",
"=",
"None",
")",
":",
"#NOTE: Must degap seqs or Infernal well seg fault!",
"seqs",
"=",
"... | Uses cmbuild to build a CM file, then cmsearch to find homologs.
- aln: an Alignment object or something that can be used to construct
one. All sequences must be the same length.
- structure_string: vienna structure string representing the consensus
stucture for the sequences in aln. Must be the same length as the
alignment.
- seqs: SequenceCollection object or something that can be used to
construct one, containing unaligned sequences that are to be
searched.
- moltype: cogent.core.moltype object. Must be DNA or RNA
- cutoff: bitscore cutoff. No sequences < cutoff will be kept in
search results. (Default=0.0). Infernal documentation suggests
a cutoff of log2(number nucleotides searching) will give most
likely true homologs.
- refine: refine the alignment and realign before building the cm.
(Default=False) | [
"Uses",
"cmbuild",
"to",
"build",
"a",
"CM",
"file",
"then",
"cmsearch",
"to",
"find",
"homologs",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/infernal.py#L1471-L1526 |
biocore/burrito-fillings | bfillings/infernal.py | cmsearch_from_file | def cmsearch_from_file(cm_file_path, seqs, moltype, cutoff=0.0, params=None):
"""Uses cmbuild to build a CM file, then cmsearch to find homologs.
- cm_file_path: path to the file created by cmbuild, containing aligned
sequences. This will be used to search sequences in seqs.
- seqs: SequenceCollection object or something that can be used to
construct one, containing unaligned sequences that are to be
searched.
- moltype: cogent.core.moltype object. Must be DNA or RNA
- cutoff: bitscore cutoff. No sequences < cutoff will be kept in
search results. (Default=0.0). Infernal documentation suggests
a cutoff of log2(number nucleotides searching) will give most
likely true homologs.
"""
#NOTE: Must degap seqs or Infernal well seg fault!
seqs = SequenceCollection(seqs,MolType=moltype).degap()
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seqs.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
app = Cmsearch(InputHandler='_input_as_paths',WorkingDir='/tmp',\
params=params)
app.Parameters['--informat'].on('FASTA')
app.Parameters['-T'].on(cutoff)
seqs_path = app._input_as_multiline_string(int_map.toFasta())
paths = [cm_file_path,seqs_path]
_, tmp_file = mkstemp(dir=app.WorkingDir)
app.Parameters['--tabfile'].on(tmp_file)
res = app(paths)
search_results = list(CmsearchParser(res['SearchResults'].readlines()))
if search_results:
for i,line in enumerate(search_results):
label = line[1]
search_results[i][1]=int_keys.get(label,label)
res.cleanUp()
return search_results | python | def cmsearch_from_file(cm_file_path, seqs, moltype, cutoff=0.0, params=None):
"""Uses cmbuild to build a CM file, then cmsearch to find homologs.
- cm_file_path: path to the file created by cmbuild, containing aligned
sequences. This will be used to search sequences in seqs.
- seqs: SequenceCollection object or something that can be used to
construct one, containing unaligned sequences that are to be
searched.
- moltype: cogent.core.moltype object. Must be DNA or RNA
- cutoff: bitscore cutoff. No sequences < cutoff will be kept in
search results. (Default=0.0). Infernal documentation suggests
a cutoff of log2(number nucleotides searching) will give most
likely true homologs.
"""
#NOTE: Must degap seqs or Infernal well seg fault!
seqs = SequenceCollection(seqs,MolType=moltype).degap()
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seqs.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
app = Cmsearch(InputHandler='_input_as_paths',WorkingDir='/tmp',\
params=params)
app.Parameters['--informat'].on('FASTA')
app.Parameters['-T'].on(cutoff)
seqs_path = app._input_as_multiline_string(int_map.toFasta())
paths = [cm_file_path,seqs_path]
_, tmp_file = mkstemp(dir=app.WorkingDir)
app.Parameters['--tabfile'].on(tmp_file)
res = app(paths)
search_results = list(CmsearchParser(res['SearchResults'].readlines()))
if search_results:
for i,line in enumerate(search_results):
label = line[1]
search_results[i][1]=int_keys.get(label,label)
res.cleanUp()
return search_results | [
"def",
"cmsearch_from_file",
"(",
"cm_file_path",
",",
"seqs",
",",
"moltype",
",",
"cutoff",
"=",
"0.0",
",",
"params",
"=",
"None",
")",
":",
"#NOTE: Must degap seqs or Infernal well seg fault!",
"seqs",
"=",
"SequenceCollection",
"(",
"seqs",
",",
"MolType",
"=... | Uses cmbuild to build a CM file, then cmsearch to find homologs.
- cm_file_path: path to the file created by cmbuild, containing aligned
sequences. This will be used to search sequences in seqs.
- seqs: SequenceCollection object or something that can be used to
construct one, containing unaligned sequences that are to be
searched.
- moltype: cogent.core.moltype object. Must be DNA or RNA
- cutoff: bitscore cutoff. No sequences < cutoff will be kept in
search results. (Default=0.0). Infernal documentation suggests
a cutoff of log2(number nucleotides searching) will give most
likely true homologs. | [
"Uses",
"cmbuild",
"to",
"build",
"a",
"CM",
"file",
"then",
"cmsearch",
"to",
"find",
"homologs",
"."
] | train | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/infernal.py#L1528-L1571 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.