id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
237,600
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_get_vqsr_training
def _get_vqsr_training(filter_type, vrn_files, gatk_type): """Return parameters for VQSR training, handling SNPs and Indels. """ params = [] for name, train_info, fname in _get_training_data(vrn_files)[filter_type]: if gatk_type == "gatk4": params.extend(["--resource:%s,%s" % (name, train_info), fname]) if filter_type == "INDEL": params.extend(["--max-gaussians", "4"]) else: params.extend(["-resource:%s,VCF,%s" % (name, train_info), fname]) if filter_type == "INDEL": params.extend(["--maxGaussians", "4"]) return params
python
def _get_vqsr_training(filter_type, vrn_files, gatk_type): params = [] for name, train_info, fname in _get_training_data(vrn_files)[filter_type]: if gatk_type == "gatk4": params.extend(["--resource:%s,%s" % (name, train_info), fname]) if filter_type == "INDEL": params.extend(["--max-gaussians", "4"]) else: params.extend(["-resource:%s,VCF,%s" % (name, train_info), fname]) if filter_type == "INDEL": params.extend(["--maxGaussians", "4"]) return params
[ "def", "_get_vqsr_training", "(", "filter_type", ",", "vrn_files", ",", "gatk_type", ")", ":", "params", "=", "[", "]", "for", "name", ",", "train_info", ",", "fname", "in", "_get_training_data", "(", "vrn_files", ")", "[", "filter_type", "]", ":", "if", "...
Return parameters for VQSR training, handling SNPs and Indels.
[ "Return", "parameters", "for", "VQSR", "training", "handling", "SNPs", "and", "Indels", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L153-L166
237,601
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_get_vqsr_annotations
def _get_vqsr_annotations(filter_type, data): """Retrieve appropriate annotations to use for VQSR based on filter type. Issues reported with MQ and bwa-mem quality distribution, results in intermittent failures to use VQSR: http://gatkforums.broadinstitute.org/discussion/4425/variant-recalibration-failing http://gatkforums.broadinstitute.org/discussion/4248/variantrecalibrator-removing-all-snps-from-the-training-set """ if filter_type == "SNP": # MQ, MQRankSum anns = ["QD", "FS", "ReadPosRankSum", "SOR"] else: assert filter_type == "INDEL" # MQRankSum anns = ["QD", "FS", "ReadPosRankSum", "SOR"] if dd.get_coverage_interval(data) == "genome": anns += ["DP"] return anns
python
def _get_vqsr_annotations(filter_type, data): if filter_type == "SNP": # MQ, MQRankSum anns = ["QD", "FS", "ReadPosRankSum", "SOR"] else: assert filter_type == "INDEL" # MQRankSum anns = ["QD", "FS", "ReadPosRankSum", "SOR"] if dd.get_coverage_interval(data) == "genome": anns += ["DP"] return anns
[ "def", "_get_vqsr_annotations", "(", "filter_type", ",", "data", ")", ":", "if", "filter_type", "==", "\"SNP\"", ":", "# MQ, MQRankSum", "anns", "=", "[", "\"QD\"", ",", "\"FS\"", ",", "\"ReadPosRankSum\"", ",", "\"SOR\"", "]", "else", ":", "assert", "filter_t...
Retrieve appropriate annotations to use for VQSR based on filter type. Issues reported with MQ and bwa-mem quality distribution, results in intermittent failures to use VQSR: http://gatkforums.broadinstitute.org/discussion/4425/variant-recalibration-failing http://gatkforums.broadinstitute.org/discussion/4248/variantrecalibrator-removing-all-snps-from-the-training-set
[ "Retrieve", "appropriate", "annotations", "to", "use", "for", "VQSR", "based", "on", "filter", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L168-L185
237,602
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_run_vqsr
def _run_vqsr(in_file, ref_file, vrn_files, sensitivity_cutoff, filter_type, data): """Run variant quality score recalibration. """ cutoffs = ["100.0", "99.99", "99.98", "99.97", "99.96", "99.95", "99.94", "99.93", "99.92", "99.91", "99.9", "99.8", "99.7", "99.6", "99.5", "99.0", "98.0", "90.0"] if sensitivity_cutoff not in cutoffs: cutoffs.append(sensitivity_cutoff) cutoffs.sort() broad_runner = broad.runner_from_config(data["config"]) gatk_type = broad_runner.gatk_type() base = utils.splitext_plus(in_file)[0] recal_file = ("%s-vqsrrecal.vcf.gz" % base) if gatk_type == "gatk4" else ("%s.recal" % base) tranches_file = "%s.tranches" % base plot_file = "%s-plots.R" % base if not utils.file_exists(recal_file): with file_transaction(data, recal_file, tranches_file, plot_file) as (tx_recal, tx_tranches, tx_plot_file): params = ["-T", "VariantRecalibrator", "-R", ref_file, "--mode", filter_type] if gatk_type == "gatk4": params += ["--variant", in_file, "--output", tx_recal, "--tranches-file", tx_tranches, "--rscript-file", tx_plot_file] else: params += ["--input", in_file, "--recal_file", tx_recal, "--tranches_file", tx_tranches, "--rscript_file", tx_plot_file] params += _get_vqsr_training(filter_type, vrn_files, gatk_type) resources = config_utils.get_resources("gatk_variant_recalibrator", data["config"]) opts = resources.get("options", []) if not opts: for cutoff in cutoffs: opts += ["-tranche", str(cutoff)] for a in _get_vqsr_annotations(filter_type, data): opts += ["-an", a] params += opts cores = dd.get_cores(data) memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None try: broad_runner.new_resources("gatk-vqsr") broad_runner.run_gatk(params, log_error=False, memscale=memscale, parallel_gc=True) except: # Can fail to run if not enough values are present to train. return None, None if gatk_type == "gatk4": vcfutils.bgzip_and_index(recal_file, data["config"]) return recal_file, tranches_file
python
def _run_vqsr(in_file, ref_file, vrn_files, sensitivity_cutoff, filter_type, data): cutoffs = ["100.0", "99.99", "99.98", "99.97", "99.96", "99.95", "99.94", "99.93", "99.92", "99.91", "99.9", "99.8", "99.7", "99.6", "99.5", "99.0", "98.0", "90.0"] if sensitivity_cutoff not in cutoffs: cutoffs.append(sensitivity_cutoff) cutoffs.sort() broad_runner = broad.runner_from_config(data["config"]) gatk_type = broad_runner.gatk_type() base = utils.splitext_plus(in_file)[0] recal_file = ("%s-vqsrrecal.vcf.gz" % base) if gatk_type == "gatk4" else ("%s.recal" % base) tranches_file = "%s.tranches" % base plot_file = "%s-plots.R" % base if not utils.file_exists(recal_file): with file_transaction(data, recal_file, tranches_file, plot_file) as (tx_recal, tx_tranches, tx_plot_file): params = ["-T", "VariantRecalibrator", "-R", ref_file, "--mode", filter_type] if gatk_type == "gatk4": params += ["--variant", in_file, "--output", tx_recal, "--tranches-file", tx_tranches, "--rscript-file", tx_plot_file] else: params += ["--input", in_file, "--recal_file", tx_recal, "--tranches_file", tx_tranches, "--rscript_file", tx_plot_file] params += _get_vqsr_training(filter_type, vrn_files, gatk_type) resources = config_utils.get_resources("gatk_variant_recalibrator", data["config"]) opts = resources.get("options", []) if not opts: for cutoff in cutoffs: opts += ["-tranche", str(cutoff)] for a in _get_vqsr_annotations(filter_type, data): opts += ["-an", a] params += opts cores = dd.get_cores(data) memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None try: broad_runner.new_resources("gatk-vqsr") broad_runner.run_gatk(params, log_error=False, memscale=memscale, parallel_gc=True) except: # Can fail to run if not enough values are present to train. return None, None if gatk_type == "gatk4": vcfutils.bgzip_and_index(recal_file, data["config"]) return recal_file, tranches_file
[ "def", "_run_vqsr", "(", "in_file", ",", "ref_file", ",", "vrn_files", ",", "sensitivity_cutoff", ",", "filter_type", ",", "data", ")", ":", "cutoffs", "=", "[", "\"100.0\"", ",", "\"99.99\"", ",", "\"99.98\"", ",", "\"99.97\"", ",", "\"99.96\"", ",", "\"99....
Run variant quality score recalibration.
[ "Run", "variant", "quality", "score", "recalibration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L187-L230
237,603
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_already_cutoff_filtered
def _already_cutoff_filtered(in_file, filter_type): """Check if we have a pre-existing cutoff-based filter file from previous VQSR failure. """ filter_file = "%s-filter%s.vcf.gz" % (utils.splitext_plus(in_file)[0], filter_type) return utils.file_exists(filter_file)
python
def _already_cutoff_filtered(in_file, filter_type): filter_file = "%s-filter%s.vcf.gz" % (utils.splitext_plus(in_file)[0], filter_type) return utils.file_exists(filter_file)
[ "def", "_already_cutoff_filtered", "(", "in_file", ",", "filter_type", ")", ":", "filter_file", "=", "\"%s-filter%s.vcf.gz\"", "%", "(", "utils", ".", "splitext_plus", "(", "in_file", ")", "[", "0", "]", ",", "filter_type", ")", "return", "utils", ".", "file_e...
Check if we have a pre-existing cutoff-based filter file from previous VQSR failure.
[ "Check", "if", "we", "have", "a", "pre", "-", "existing", "cutoff", "-", "based", "filter", "file", "from", "previous", "VQSR", "failure", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L234-L238
237,604
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_variant_filtration
def _variant_filtration(in_file, ref_file, vrn_files, data, filter_type, hard_filter_fn): """Filter SNP and indel variant calls using GATK best practice recommendations. Use cutoff-based soft filters if configuration indicates too little data or already finished a cutoff-based filtering step, otherwise try VQSR. """ # Algorithms multiplied by number of input files to check for large enough sample sizes algs = [data["config"]["algorithm"]] * len(data.get("vrn_files", [1])) if (not config_utils.use_vqsr(algs, in_file) or _already_cutoff_filtered(in_file, filter_type)): logger.info("Skipping VQSR, using cutoff-based filers: we don't have whole genome input data") return hard_filter_fn(in_file, data) elif not _have_training_data(vrn_files): logger.info("Skipping VQSR, using cutoff-based filers: genome build does not have sufficient training data") return hard_filter_fn(in_file, data) else: sensitivities = {"INDEL": "98.0", "SNP": "99.97"} recal_file, tranches_file = _run_vqsr(in_file, ref_file, vrn_files, sensitivities[filter_type], filter_type, data) if recal_file is None: # VQSR failed logger.info("VQSR failed due to lack of training data. Using cutoff-based soft filtering.") return hard_filter_fn(in_file, data) else: return _apply_vqsr(in_file, ref_file, recal_file, tranches_file, sensitivities[filter_type], filter_type, data)
python
def _variant_filtration(in_file, ref_file, vrn_files, data, filter_type, hard_filter_fn): # Algorithms multiplied by number of input files to check for large enough sample sizes algs = [data["config"]["algorithm"]] * len(data.get("vrn_files", [1])) if (not config_utils.use_vqsr(algs, in_file) or _already_cutoff_filtered(in_file, filter_type)): logger.info("Skipping VQSR, using cutoff-based filers: we don't have whole genome input data") return hard_filter_fn(in_file, data) elif not _have_training_data(vrn_files): logger.info("Skipping VQSR, using cutoff-based filers: genome build does not have sufficient training data") return hard_filter_fn(in_file, data) else: sensitivities = {"INDEL": "98.0", "SNP": "99.97"} recal_file, tranches_file = _run_vqsr(in_file, ref_file, vrn_files, sensitivities[filter_type], filter_type, data) if recal_file is None: # VQSR failed logger.info("VQSR failed due to lack of training data. Using cutoff-based soft filtering.") return hard_filter_fn(in_file, data) else: return _apply_vqsr(in_file, ref_file, recal_file, tranches_file, sensitivities[filter_type], filter_type, data)
[ "def", "_variant_filtration", "(", "in_file", ",", "ref_file", ",", "vrn_files", ",", "data", ",", "filter_type", ",", "hard_filter_fn", ")", ":", "# Algorithms multiplied by number of input files to check for large enough sample sizes", "algs", "=", "[", "data", "[", "\"...
Filter SNP and indel variant calls using GATK best practice recommendations. Use cutoff-based soft filters if configuration indicates too little data or already finished a cutoff-based filtering step, otherwise try VQSR.
[ "Filter", "SNP", "and", "indel", "variant", "calls", "using", "GATK", "best", "practice", "recommendations", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L240-L265
237,605
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
gatk_remove_missingalt
def gatk_remove_missingalt(in_file, data): """ GATK 4.1.0.0 outputs variants that have missing ALTs, which breaks downstream tools, this filters those out. """ base = in_file.split('.vcf.gz')[0] out_file = "%s-nomissingalt%s" % (base, '.vcf.gz') if utils.file_exists(out_file): return out_file no_gzip_out = out_file.replace(".vcf.gz", ".vcf") with file_transaction(no_gzip_out) as tx_out_file: with utils.open_gzipsafe(in_file) as in_handle, open(tx_out_file, "w") as out_handle: for line in in_handle: line = remove_missingalt(line) if line: out_handle.write(line) return vcfutils.bgzip_and_index(no_gzip_out, data["config"])
python
def gatk_remove_missingalt(in_file, data): base = in_file.split('.vcf.gz')[0] out_file = "%s-nomissingalt%s" % (base, '.vcf.gz') if utils.file_exists(out_file): return out_file no_gzip_out = out_file.replace(".vcf.gz", ".vcf") with file_transaction(no_gzip_out) as tx_out_file: with utils.open_gzipsafe(in_file) as in_handle, open(tx_out_file, "w") as out_handle: for line in in_handle: line = remove_missingalt(line) if line: out_handle.write(line) return vcfutils.bgzip_and_index(no_gzip_out, data["config"])
[ "def", "gatk_remove_missingalt", "(", "in_file", ",", "data", ")", ":", "base", "=", "in_file", ".", "split", "(", "'.vcf.gz'", ")", "[", "0", "]", "out_file", "=", "\"%s-nomissingalt%s\"", "%", "(", "base", ",", "'.vcf.gz'", ")", "if", "utils", ".", "fi...
GATK 4.1.0.0 outputs variants that have missing ALTs, which breaks downstream tools, this filters those out.
[ "GATK", "4", ".", "1", ".", "0", ".", "0", "outputs", "variants", "that", "have", "missing", "ALTs", "which", "breaks", "downstream", "tools", "this", "filters", "those", "out", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L276-L292
237,606
bcbio/bcbio-nextgen
bcbio/rnaseq/cufflinks.py
strand_unknown
def strand_unknown(db, transcript): """ for unstranded data with novel transcripts single exon genes will have no strand information. single exon novel genes are also a source of noise in the Cufflinks assembly so this removes them """ features = list(db.children(transcript)) strand = features[0].strand if strand == ".": return True else: return False
python
def strand_unknown(db, transcript): features = list(db.children(transcript)) strand = features[0].strand if strand == ".": return True else: return False
[ "def", "strand_unknown", "(", "db", ",", "transcript", ")", ":", "features", "=", "list", "(", "db", ".", "children", "(", "transcript", ")", ")", "strand", "=", "features", "[", "0", "]", ".", "strand", "if", "strand", "==", "\".\"", ":", "return", ...
for unstranded data with novel transcripts single exon genes will have no strand information. single exon novel genes are also a source of noise in the Cufflinks assembly so this removes them
[ "for", "unstranded", "data", "with", "novel", "transcripts", "single", "exon", "genes", "will", "have", "no", "strand", "information", ".", "single", "exon", "novel", "genes", "are", "also", "a", "source", "of", "noise", "in", "the", "Cufflinks", "assembly", ...
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/cufflinks.py#L156-L167
237,607
bcbio/bcbio-nextgen
bcbio/rnaseq/cufflinks.py
fix_cufflinks_attributes
def fix_cufflinks_attributes(ref_gtf, merged_gtf, data, out_file=None): """ replace the cufflinks gene_id and transcript_id with the gene_id and transcript_id from ref_gtf, where available """ base, ext = os.path.splitext(merged_gtf) fixed = out_file if out_file else base + ".clean.fixed" + ext if file_exists(fixed): return fixed ref_db = gtf.get_gtf_db(ref_gtf) merged_db = gtf.get_gtf_db(merged_gtf, in_memory=True) ref_tid_to_gid = {} for gene in ref_db.features_of_type('gene'): for transcript in ref_db.children(gene, level=1): ref_tid_to_gid[transcript.id] = gene.id ctid_to_cgid = {} ctid_to_oid = {} for gene in merged_db.features_of_type('gene'): for transcript in merged_db.children(gene, level=1): ctid_to_cgid[transcript.id] = gene.id feature = list(merged_db.children(transcript))[0] oid = feature.attributes.get("oId", [None])[0] if oid: ctid_to_oid[transcript.id] = oid cgid_to_gid = {} for ctid, oid in ctid_to_oid.items(): cgid = ctid_to_cgid.get(ctid, None) oid = ctid_to_oid.get(ctid, None) gid = ref_tid_to_gid.get(oid, None) if oid else None if cgid and gid: cgid_to_gid[cgid] = gid with file_transaction(data, fixed) as tmp_fixed_file: with open(tmp_fixed_file, "w") as out_handle: for gene in merged_db.features_of_type('gene'): for transcript in merged_db.children(gene, level=1): for feature in merged_db.children(transcript): cgid = feature.attributes.get("gene_id", [None])[0] gid = cgid_to_gid.get(cgid, None) ctid = None if gid: feature.attributes["gene_id"][0] = gid ctid = feature.attributes.get("transcript_id", [None])[0] tid = ctid_to_oid.get(ctid, None) if tid: feature.attributes["transcript_id"][0] = tid if "nearest_ref" in feature.attributes: del feature.attributes["nearest_ref"] if "oId" in feature.attributes: del feature.attributes["oId"] out_handle.write(str(feature) + "\n") return fixed
python
def fix_cufflinks_attributes(ref_gtf, merged_gtf, data, out_file=None): base, ext = os.path.splitext(merged_gtf) fixed = out_file if out_file else base + ".clean.fixed" + ext if file_exists(fixed): return fixed ref_db = gtf.get_gtf_db(ref_gtf) merged_db = gtf.get_gtf_db(merged_gtf, in_memory=True) ref_tid_to_gid = {} for gene in ref_db.features_of_type('gene'): for transcript in ref_db.children(gene, level=1): ref_tid_to_gid[transcript.id] = gene.id ctid_to_cgid = {} ctid_to_oid = {} for gene in merged_db.features_of_type('gene'): for transcript in merged_db.children(gene, level=1): ctid_to_cgid[transcript.id] = gene.id feature = list(merged_db.children(transcript))[0] oid = feature.attributes.get("oId", [None])[0] if oid: ctid_to_oid[transcript.id] = oid cgid_to_gid = {} for ctid, oid in ctid_to_oid.items(): cgid = ctid_to_cgid.get(ctid, None) oid = ctid_to_oid.get(ctid, None) gid = ref_tid_to_gid.get(oid, None) if oid else None if cgid and gid: cgid_to_gid[cgid] = gid with file_transaction(data, fixed) as tmp_fixed_file: with open(tmp_fixed_file, "w") as out_handle: for gene in merged_db.features_of_type('gene'): for transcript in merged_db.children(gene, level=1): for feature in merged_db.children(transcript): cgid = feature.attributes.get("gene_id", [None])[0] gid = cgid_to_gid.get(cgid, None) ctid = None if gid: feature.attributes["gene_id"][0] = gid ctid = feature.attributes.get("transcript_id", [None])[0] tid = ctid_to_oid.get(ctid, None) if tid: feature.attributes["transcript_id"][0] = tid if "nearest_ref" in feature.attributes: del feature.attributes["nearest_ref"] if "oId" in feature.attributes: del feature.attributes["oId"] out_handle.write(str(feature) + "\n") return fixed
[ "def", "fix_cufflinks_attributes", "(", "ref_gtf", ",", "merged_gtf", ",", "data", ",", "out_file", "=", "None", ")", ":", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "merged_gtf", ")", "fixed", "=", "out_file", "if", "out_file", "e...
replace the cufflinks gene_id and transcript_id with the gene_id and transcript_id from ref_gtf, where available
[ "replace", "the", "cufflinks", "gene_id", "and", "transcript_id", "with", "the", "gene_id", "and", "transcript_id", "from", "ref_gtf", "where", "available" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/cufflinks.py#L179-L234
237,608
bcbio/bcbio-nextgen
bcbio/rnaseq/cufflinks.py
merge
def merge(assembled_gtfs, ref_file, gtf_file, num_cores, data): """ run cuffmerge on a set of assembled GTF files """ assembled_file = tempfile.NamedTemporaryFile(delete=False).name with open(assembled_file, "w") as temp_handle: for assembled in assembled_gtfs: temp_handle.write(assembled + "\n") out_dir = os.path.join("assembly", "cuffmerge") merged_file = os.path.join(out_dir, "merged.gtf") out_file = os.path.join(out_dir, "assembled.gtf") if file_exists(out_file): return out_file if not file_exists(merged_file): with file_transaction(data, out_dir) as tmp_out_dir: cmd = ("cuffmerge -o {tmp_out_dir} --ref-gtf {gtf_file} " "--num-threads {num_cores} --ref-sequence {ref_file} " "{assembled_file}") cmd = cmd.format(**locals()) message = ("Merging the following transcript assemblies with " "Cuffmerge: %s" % ", ".join(assembled_gtfs)) do.run(cmd, message) clean, _ = clean_assembly(merged_file) fixed = fix_cufflinks_attributes(gtf_file, clean, data) classified = annotate_gtf.annotate_novel_coding(fixed, gtf_file, ref_file, data) filtered = annotate_gtf.cleanup_transcripts(classified, gtf_file, ref_file) shutil.move(filtered, out_file) return out_file
python
def merge(assembled_gtfs, ref_file, gtf_file, num_cores, data): assembled_file = tempfile.NamedTemporaryFile(delete=False).name with open(assembled_file, "w") as temp_handle: for assembled in assembled_gtfs: temp_handle.write(assembled + "\n") out_dir = os.path.join("assembly", "cuffmerge") merged_file = os.path.join(out_dir, "merged.gtf") out_file = os.path.join(out_dir, "assembled.gtf") if file_exists(out_file): return out_file if not file_exists(merged_file): with file_transaction(data, out_dir) as tmp_out_dir: cmd = ("cuffmerge -o {tmp_out_dir} --ref-gtf {gtf_file} " "--num-threads {num_cores} --ref-sequence {ref_file} " "{assembled_file}") cmd = cmd.format(**locals()) message = ("Merging the following transcript assemblies with " "Cuffmerge: %s" % ", ".join(assembled_gtfs)) do.run(cmd, message) clean, _ = clean_assembly(merged_file) fixed = fix_cufflinks_attributes(gtf_file, clean, data) classified = annotate_gtf.annotate_novel_coding(fixed, gtf_file, ref_file, data) filtered = annotate_gtf.cleanup_transcripts(classified, gtf_file, ref_file) shutil.move(filtered, out_file) return out_file
[ "def", "merge", "(", "assembled_gtfs", ",", "ref_file", ",", "gtf_file", ",", "num_cores", ",", "data", ")", ":", "assembled_file", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", ".", "name", "with", "open", "(", "assembled_fil...
run cuffmerge on a set of assembled GTF files
[ "run", "cuffmerge", "on", "a", "set", "of", "assembled", "GTF", "files" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/cufflinks.py#L237-L265
237,609
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_vcf_info
def _vcf_info(start, end, mate_id, info=None): """Return breakend information line with mate and imprecise location. """ out = "SVTYPE=BND;MATEID={mate};IMPRECISE;CIPOS=0,{size}".format( mate=mate_id, size=end-start) if info is not None: extra_info = ";".join("{0}={1}".format(k, v) for k, v in info.iteritems()) out = "{0};{1}".format(out, extra_info) return out
python
def _vcf_info(start, end, mate_id, info=None): out = "SVTYPE=BND;MATEID={mate};IMPRECISE;CIPOS=0,{size}".format( mate=mate_id, size=end-start) if info is not None: extra_info = ";".join("{0}={1}".format(k, v) for k, v in info.iteritems()) out = "{0};{1}".format(out, extra_info) return out
[ "def", "_vcf_info", "(", "start", ",", "end", ",", "mate_id", ",", "info", "=", "None", ")", ":", "out", "=", "\"SVTYPE=BND;MATEID={mate};IMPRECISE;CIPOS=0,{size}\"", ".", "format", "(", "mate", "=", "mate_id", ",", "size", "=", "end", "-", "start", ")", "...
Return breakend information line with mate and imprecise location.
[ "Return", "breakend", "information", "line", "with", "mate", "and", "imprecise", "location", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L47-L55
237,610
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_vcf_alt
def _vcf_alt(base, other_chr, other_pos, isrc, is_first): """Create ALT allele line in VCF 4.1 format associating with other paired end. """ if is_first: pipe = "[" if isrc else "]" out_str = "{base}{pipe}{chr}:{pos}{pipe}" else: pipe = "]" if isrc else "[" out_str = "{pipe}{chr}:{pos}{pipe}{base}" return out_str.format(pipe=pipe, chr=other_chr, pos=other_pos + 1, base=base)
python
def _vcf_alt(base, other_chr, other_pos, isrc, is_first): if is_first: pipe = "[" if isrc else "]" out_str = "{base}{pipe}{chr}:{pos}{pipe}" else: pipe = "]" if isrc else "[" out_str = "{pipe}{chr}:{pos}{pipe}{base}" return out_str.format(pipe=pipe, chr=other_chr, pos=other_pos + 1, base=base)
[ "def", "_vcf_alt", "(", "base", ",", "other_chr", ",", "other_pos", ",", "isrc", ",", "is_first", ")", ":", "if", "is_first", ":", "pipe", "=", "\"[\"", "if", "isrc", "else", "\"]\"", "out_str", "=", "\"{base}{pipe}{chr}:{pos}{pipe}\"", "else", ":", "pipe", ...
Create ALT allele line in VCF 4.1 format associating with other paired end.
[ "Create", "ALT", "allele", "line", "in", "VCF", "4", ".", "1", "format", "associating", "with", "other", "paired", "end", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L57-L67
237,611
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_breakend_orientation
def _breakend_orientation(strand1, strand2): """Convert BEDPE strand representation of breakpoints into VCF. | strand1 | strand2 | VCF | +----------+----------+--------------+ | + | - | t[p[ ]p]t | | + | + | t]p] t]p] | | - | - | [p[t [p[t | | - | + | ]p]t t[p[ | """ EndOrientation = namedtuple("EndOrientation", ["is_first1", "is_rc1", "is_first2", "is_rc2"]) if strand1 == "+" and strand2 == "-": return EndOrientation(True, True, False, True) elif strand1 == "+" and strand2 == "+": return EndOrientation(True, False, True, False) elif strand1 == "-" and strand2 == "-": return EndOrientation(False, False, False, False) elif strand1 == "-" and strand2 == "+": return EndOrientation(False, True, True, True) else: raise ValueError("Unexpected strand pairing: {0} {1}".format( strand1, strand2))
python
def _breakend_orientation(strand1, strand2): EndOrientation = namedtuple("EndOrientation", ["is_first1", "is_rc1", "is_first2", "is_rc2"]) if strand1 == "+" and strand2 == "-": return EndOrientation(True, True, False, True) elif strand1 == "+" and strand2 == "+": return EndOrientation(True, False, True, False) elif strand1 == "-" and strand2 == "-": return EndOrientation(False, False, False, False) elif strand1 == "-" and strand2 == "+": return EndOrientation(False, True, True, True) else: raise ValueError("Unexpected strand pairing: {0} {1}".format( strand1, strand2))
[ "def", "_breakend_orientation", "(", "strand1", ",", "strand2", ")", ":", "EndOrientation", "=", "namedtuple", "(", "\"EndOrientation\"", ",", "[", "\"is_first1\"", ",", "\"is_rc1\"", ",", "\"is_first2\"", ",", "\"is_rc2\"", "]", ")", "if", "strand1", "==", "\"+...
Convert BEDPE strand representation of breakpoints into VCF. | strand1 | strand2 | VCF | +----------+----------+--------------+ | + | - | t[p[ ]p]t | | + | + | t]p] t]p] | | - | - | [p[t [p[t | | - | + | ]p]t t[p[ |
[ "Convert", "BEDPE", "strand", "representation", "of", "breakpoints", "into", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L69-L91
237,612
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
build_vcf_parts
def build_vcf_parts(feature, genome_2bit, info=None): """Convert BedPe feature information into VCF part representation. Each feature will have two VCF lines for each side of the breakpoint. """ base1 = genome_2bit[feature.chrom1].get( feature.start1, feature.start1 + 1).upper() id1 = "hydra{0}a".format(feature.name) base2 = genome_2bit[feature.chrom2].get( feature.start2, feature.start2 + 1).upper() id2 = "hydra{0}b".format(feature.name) orientation = _breakend_orientation(feature.strand1, feature.strand2) return (VcfLine(feature.chrom1, feature.start1, id1, base1, _vcf_alt(base1, feature.chrom2, feature.start2, orientation.is_rc1, orientation.is_first1), _vcf_info(feature.start1, feature.end1, id2, info)), VcfLine(feature.chrom2, feature.start2, id2, base2, _vcf_alt(base2, feature.chrom1, feature.start1, orientation.is_rc2, orientation.is_first2), _vcf_info(feature.start2, feature.end2, id1, info)))
python
def build_vcf_parts(feature, genome_2bit, info=None): base1 = genome_2bit[feature.chrom1].get( feature.start1, feature.start1 + 1).upper() id1 = "hydra{0}a".format(feature.name) base2 = genome_2bit[feature.chrom2].get( feature.start2, feature.start2 + 1).upper() id2 = "hydra{0}b".format(feature.name) orientation = _breakend_orientation(feature.strand1, feature.strand2) return (VcfLine(feature.chrom1, feature.start1, id1, base1, _vcf_alt(base1, feature.chrom2, feature.start2, orientation.is_rc1, orientation.is_first1), _vcf_info(feature.start1, feature.end1, id2, info)), VcfLine(feature.chrom2, feature.start2, id2, base2, _vcf_alt(base2, feature.chrom1, feature.start1, orientation.is_rc2, orientation.is_first2), _vcf_info(feature.start2, feature.end2, id1, info)))
[ "def", "build_vcf_parts", "(", "feature", ",", "genome_2bit", ",", "info", "=", "None", ")", ":", "base1", "=", "genome_2bit", "[", "feature", ".", "chrom1", "]", ".", "get", "(", "feature", ".", "start1", ",", "feature", ".", "start1", "+", "1", ")", ...
Convert BedPe feature information into VCF part representation. Each feature will have two VCF lines for each side of the breakpoint.
[ "Convert", "BedPe", "feature", "information", "into", "VCF", "part", "representation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L95-L114
237,613
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
build_vcf_deletion
def build_vcf_deletion(x, genome_2bit): """Provide representation of deletion from BedPE breakpoints. """ base1 = genome_2bit[x.chrom1].get(x.start1, x.start1 + 1).upper() id1 = "hydra{0}".format(x.name) return VcfLine(x.chrom1, x.start1, id1, base1, "<DEL>", _vcf_single_end_info(x, "DEL", True))
python
def build_vcf_deletion(x, genome_2bit): base1 = genome_2bit[x.chrom1].get(x.start1, x.start1 + 1).upper() id1 = "hydra{0}".format(x.name) return VcfLine(x.chrom1, x.start1, id1, base1, "<DEL>", _vcf_single_end_info(x, "DEL", True))
[ "def", "build_vcf_deletion", "(", "x", ",", "genome_2bit", ")", ":", "base1", "=", "genome_2bit", "[", "x", ".", "chrom1", "]", ".", "get", "(", "x", ".", "start1", ",", "x", ".", "start1", "+", "1", ")", ".", "upper", "(", ")", "id1", "=", "\"hy...
Provide representation of deletion from BedPE breakpoints.
[ "Provide", "representation", "of", "deletion", "from", "BedPE", "breakpoints", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L136-L142
237,614
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
build_vcf_inversion
def build_vcf_inversion(x1, x2, genome_2bit): """Provide representation of inversion from BedPE breakpoints. """ id1 = "hydra{0}".format(x1.name) start_coords = sorted([x1.start1, x1.end1, x2.start1, x2.end1]) end_coords = sorted([x1.start2, x1.end2, x2.start2, x2.start2]) start_pos = (start_coords[1] + start_coords[2]) // 2 end_pos = (end_coords[1] + end_coords[2]) // 2 base1 = genome_2bit[x1.chrom1].get(start_pos, start_pos + 1).upper() info = "SVTYPE=INV;IMPRECISE;CIPOS={cip1},{cip2};CIEND={cie1},{cie2};" \ "END={end};SVLEN={length}".format(cip1=start_pos - start_coords[0], cip2=start_coords[-1] - start_pos, cie1=end_pos - end_coords[0], cie2=end_coords[-1] - end_pos, end=end_pos, length=end_pos-start_pos) return VcfLine(x1.chrom1, start_pos, id1, base1, "<INV>", info)
python
def build_vcf_inversion(x1, x2, genome_2bit): id1 = "hydra{0}".format(x1.name) start_coords = sorted([x1.start1, x1.end1, x2.start1, x2.end1]) end_coords = sorted([x1.start2, x1.end2, x2.start2, x2.start2]) start_pos = (start_coords[1] + start_coords[2]) // 2 end_pos = (end_coords[1] + end_coords[2]) // 2 base1 = genome_2bit[x1.chrom1].get(start_pos, start_pos + 1).upper() info = "SVTYPE=INV;IMPRECISE;CIPOS={cip1},{cip2};CIEND={cie1},{cie2};" \ "END={end};SVLEN={length}".format(cip1=start_pos - start_coords[0], cip2=start_coords[-1] - start_pos, cie1=end_pos - end_coords[0], cie2=end_coords[-1] - end_pos, end=end_pos, length=end_pos-start_pos) return VcfLine(x1.chrom1, start_pos, id1, base1, "<INV>", info)
[ "def", "build_vcf_inversion", "(", "x1", ",", "x2", ",", "genome_2bit", ")", ":", "id1", "=", "\"hydra{0}\"", ".", "format", "(", "x1", ".", "name", ")", "start_coords", "=", "sorted", "(", "[", "x1", ".", "start1", ",", "x1", ".", "end1", ",", "x2",...
Provide representation of inversion from BedPE breakpoints.
[ "Provide", "representation", "of", "inversion", "from", "BedPE", "breakpoints", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L166-L182
237,615
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
hydra_parser
def hydra_parser(in_file, options=None): """Parse hydra input file into namedtuple of values. """ if options is None: options = {} BedPe = namedtuple('BedPe', ["chrom1", "start1", "end1", "chrom2", "start2", "end2", "name", "strand1", "strand2", "support"]) with open(in_file) as in_handle: reader = csv.reader(in_handle, dialect="excel-tab") for line in reader: cur = BedPe(line[0], int(line[1]), int(line[2]), line[3], int(line[4]), int(line[5]), line[6], line[8], line[9], float(line[18])) if cur.support >= options.get("min_support", 0): yield cur
python
def hydra_parser(in_file, options=None): if options is None: options = {} BedPe = namedtuple('BedPe', ["chrom1", "start1", "end1", "chrom2", "start2", "end2", "name", "strand1", "strand2", "support"]) with open(in_file) as in_handle: reader = csv.reader(in_handle, dialect="excel-tab") for line in reader: cur = BedPe(line[0], int(line[1]), int(line[2]), line[3], int(line[4]), int(line[5]), line[6], line[8], line[9], float(line[18])) if cur.support >= options.get("min_support", 0): yield cur
[ "def", "hydra_parser", "(", "in_file", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "BedPe", "=", "namedtuple", "(", "'BedPe'", ",", "[", "\"chrom1\"", ",", "\"start1\"", ",", "\"end1\"", ",", "\"...
Parse hydra input file into namedtuple of values.
[ "Parse", "hydra", "input", "file", "into", "namedtuple", "of", "values", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L198-L214
237,616
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_cluster_by
def _cluster_by(end_iter, attr1, attr2, cluster_distance): """Cluster breakends by specified attributes. """ ClusterInfo = namedtuple("ClusterInfo", ["chroms", "clusters", "lookup"]) chr_clusters = {} chroms = [] brends_by_id = {} for brend in end_iter: if not chr_clusters.has_key(brend.chrom1): chroms.append(brend.chrom1) chr_clusters[brend.chrom1] = ClusterTree(cluster_distance, 1) brends_by_id[int(brend.name)] = brend chr_clusters[brend.chrom1].insert(getattr(brend, attr1), getattr(brend, attr2), int(brend.name)) return ClusterInfo(chroms, chr_clusters, brends_by_id)
python
def _cluster_by(end_iter, attr1, attr2, cluster_distance): ClusterInfo = namedtuple("ClusterInfo", ["chroms", "clusters", "lookup"]) chr_clusters = {} chroms = [] brends_by_id = {} for brend in end_iter: if not chr_clusters.has_key(brend.chrom1): chroms.append(brend.chrom1) chr_clusters[brend.chrom1] = ClusterTree(cluster_distance, 1) brends_by_id[int(brend.name)] = brend chr_clusters[brend.chrom1].insert(getattr(brend, attr1), getattr(brend, attr2), int(brend.name)) return ClusterInfo(chroms, chr_clusters, brends_by_id)
[ "def", "_cluster_by", "(", "end_iter", ",", "attr1", ",", "attr2", ",", "cluster_distance", ")", ":", "ClusterInfo", "=", "namedtuple", "(", "\"ClusterInfo\"", ",", "[", "\"chroms\"", ",", "\"clusters\"", ",", "\"lookup\"", "]", ")", "chr_clusters", "=", "{", ...
Cluster breakends by specified attributes.
[ "Cluster", "breakends", "by", "specified", "attributes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L216-L231
237,617
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_calculate_cluster_distance
def _calculate_cluster_distance(end_iter): """Compute allowed distance for clustering based on end confidence intervals. """ out = [] sizes = [] for x in end_iter: out.append(x) sizes.append(x.end1 - x.start1) sizes.append(x.end2 - x.start2) distance = sum(sizes) // len(sizes) return distance, out
python
def _calculate_cluster_distance(end_iter): out = [] sizes = [] for x in end_iter: out.append(x) sizes.append(x.end1 - x.start1) sizes.append(x.end2 - x.start2) distance = sum(sizes) // len(sizes) return distance, out
[ "def", "_calculate_cluster_distance", "(", "end_iter", ")", ":", "out", "=", "[", "]", "sizes", "=", "[", "]", "for", "x", "in", "end_iter", ":", "out", ".", "append", "(", "x", ")", "sizes", ".", "append", "(", "x", ".", "end1", "-", "x", ".", "...
Compute allowed distance for clustering based on end confidence intervals.
[ "Compute", "allowed", "distance", "for", "clustering", "based", "on", "end", "confidence", "intervals", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L233-L243
237,618
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
group_hydra_breakends
def group_hydra_breakends(end_iter): """Group together hydra breakends with overlapping ends. This provides a way to identify inversions, translocations and insertions present in hydra break point ends. We cluster together the endpoints and return together any items with closely oriented pairs. This helps in describing more complex rearrangement events. """ cluster_distance, all_ends = _calculate_cluster_distance(end_iter) first_cluster = _cluster_by(all_ends, "start1", "end1", cluster_distance) for chrom in first_cluster.chroms: for _, _, brends in first_cluster.clusters[chrom].getregions(): if len(brends) == 1: yield [first_cluster.lookup[brends[0]]] else: second_cluster = _cluster_by([first_cluster.lookup[x] for x in brends], "start2", "end2", cluster_distance) for chrom2 in second_cluster.chroms: for _, _, brends in second_cluster.clusters[chrom].getregions(): yield [second_cluster.lookup[x] for x in brends]
python
def group_hydra_breakends(end_iter): cluster_distance, all_ends = _calculate_cluster_distance(end_iter) first_cluster = _cluster_by(all_ends, "start1", "end1", cluster_distance) for chrom in first_cluster.chroms: for _, _, brends in first_cluster.clusters[chrom].getregions(): if len(brends) == 1: yield [first_cluster.lookup[brends[0]]] else: second_cluster = _cluster_by([first_cluster.lookup[x] for x in brends], "start2", "end2", cluster_distance) for chrom2 in second_cluster.chroms: for _, _, brends in second_cluster.clusters[chrom].getregions(): yield [second_cluster.lookup[x] for x in brends]
[ "def", "group_hydra_breakends", "(", "end_iter", ")", ":", "cluster_distance", ",", "all_ends", "=", "_calculate_cluster_distance", "(", "end_iter", ")", "first_cluster", "=", "_cluster_by", "(", "all_ends", ",", "\"start1\"", ",", "\"end1\"", ",", "cluster_distance",...
Group together hydra breakends with overlapping ends. This provides a way to identify inversions, translocations and insertions present in hydra break point ends. We cluster together the endpoints and return together any items with closely oriented pairs. This helps in describing more complex rearrangement events.
[ "Group", "together", "hydra", "breakends", "with", "overlapping", "ends", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L245-L264
237,619
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_write_vcf_header
def _write_vcf_header(out_handle): """Write VCF header information for Hydra structural variant. """ def w(line): out_handle.write("{0}\n".format(line)) w('##fileformat=VCFv4.1') w('##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">') w('##INFO=<ID=END,Number=1,Type=Integer,' 'Description="End position of the variant described in this record">') w('##INFO=<ID=CIPOS,Number=2,Type=Integer,' 'Description="Confidence interval around POS for imprecise variants">') w('##INFO=<ID=CIEND,Number=2,Type=Integer,' 'Description="Confidence interval around END for imprecise variants">') w('##INFO=<ID=SVLEN,Number=.,Type=Integer,' 'Description="Difference in length between REF and ALT alleles">') w('##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">') w('##INFO=<ID=MATEID,Number=.,Type=String,Description="ID of mate breakends">') w('##INFO=<ID=EVENT,Number=1,Type=String,Description="ID of event associated to breakend">') w('##ALT=<ID=DEL,Description="Deletion">') w('##ALT=<ID=INV,Description="Inversion">') w('##ALT=<ID=DUP,Description="Duplication">') w('##ALT=<ID=DUP:TANDEM,Description="Tandem Duplication">') w('##source=hydra') w("#" + "\t".join(["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO"]))
python
def _write_vcf_header(out_handle): def w(line): out_handle.write("{0}\n".format(line)) w('##fileformat=VCFv4.1') w('##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">') w('##INFO=<ID=END,Number=1,Type=Integer,' 'Description="End position of the variant described in this record">') w('##INFO=<ID=CIPOS,Number=2,Type=Integer,' 'Description="Confidence interval around POS for imprecise variants">') w('##INFO=<ID=CIEND,Number=2,Type=Integer,' 'Description="Confidence interval around END for imprecise variants">') w('##INFO=<ID=SVLEN,Number=.,Type=Integer,' 'Description="Difference in length between REF and ALT alleles">') w('##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">') w('##INFO=<ID=MATEID,Number=.,Type=String,Description="ID of mate breakends">') w('##INFO=<ID=EVENT,Number=1,Type=String,Description="ID of event associated to breakend">') w('##ALT=<ID=DEL,Description="Deletion">') w('##ALT=<ID=INV,Description="Inversion">') w('##ALT=<ID=DUP,Description="Duplication">') w('##ALT=<ID=DUP:TANDEM,Description="Tandem Duplication">') w('##source=hydra') w("#" + "\t".join(["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO"]))
[ "def", "_write_vcf_header", "(", "out_handle", ")", ":", "def", "w", "(", "line", ")", ":", "out_handle", ".", "write", "(", "\"{0}\\n\"", ".", "format", "(", "line", ")", ")", "w", "(", "'##fileformat=VCFv4.1'", ")", "w", "(", "'##INFO=<ID=IMPRECISE,Number=...
Write VCF header information for Hydra structural variant.
[ "Write", "VCF", "header", "information", "for", "Hydra", "structural", "variant", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L268-L291
237,620
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_write_vcf_breakend
def _write_vcf_breakend(brend, out_handle): """Write out a single VCF line with breakpoint information. """ out_handle.write("{0}\n".format("\t".join(str(x) for x in [brend.chrom, brend.pos + 1, brend.id, brend.ref, brend.alt, ".", "PASS", brend.info])))
python
def _write_vcf_breakend(brend, out_handle): out_handle.write("{0}\n".format("\t".join(str(x) for x in [brend.chrom, brend.pos + 1, brend.id, brend.ref, brend.alt, ".", "PASS", brend.info])))
[ "def", "_write_vcf_breakend", "(", "brend", ",", "out_handle", ")", ":", "out_handle", ".", "write", "(", "\"{0}\\n\"", ".", "format", "(", "\"\\t\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "[", "brend", ".", "chrom", ",", "brend", ...
Write out a single VCF line with breakpoint information.
[ "Write", "out", "a", "single", "VCF", "line", "with", "breakpoint", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L293-L298
237,621
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_get_vcf_breakends
def _get_vcf_breakends(hydra_file, genome_2bit, options=None): """Parse BEDPE input, yielding VCF ready breakends. """ if options is None: options = {} for features in group_hydra_breakends(hydra_parser(hydra_file, options)): if len(features) == 1 and is_deletion(features[0], options): yield build_vcf_deletion(features[0], genome_2bit) elif len(features) == 1 and is_tandem_dup(features[0], options): yield build_tandem_deletion(features[0], genome_2bit) elif len(features) == 2 and is_inversion(*features): yield build_vcf_inversion(features[0], features[1], genome_2bit) elif len(features) == 2 and is_translocation(*features): info = get_translocation_info(features[0], features[1]) for feature in features: for brend in build_vcf_parts(feature, genome_2bit, info): yield brend else: for feature in features: for brend in build_vcf_parts(feature, genome_2bit): yield brend
python
def _get_vcf_breakends(hydra_file, genome_2bit, options=None): if options is None: options = {} for features in group_hydra_breakends(hydra_parser(hydra_file, options)): if len(features) == 1 and is_deletion(features[0], options): yield build_vcf_deletion(features[0], genome_2bit) elif len(features) == 1 and is_tandem_dup(features[0], options): yield build_tandem_deletion(features[0], genome_2bit) elif len(features) == 2 and is_inversion(*features): yield build_vcf_inversion(features[0], features[1], genome_2bit) elif len(features) == 2 and is_translocation(*features): info = get_translocation_info(features[0], features[1]) for feature in features: for brend in build_vcf_parts(feature, genome_2bit, info): yield brend else: for feature in features: for brend in build_vcf_parts(feature, genome_2bit): yield brend
[ "def", "_get_vcf_breakends", "(", "hydra_file", ",", "genome_2bit", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "for", "features", "in", "group_hydra_breakends", "(", "hydra_parser", "(", "hydra_file", ...
Parse BEDPE input, yielding VCF ready breakends.
[ "Parse", "BEDPE", "input", "yielding", "VCF", "ready", "breakends", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L300-L319
237,622
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
hydra_to_vcf_writer
def hydra_to_vcf_writer(hydra_file, genome_2bit, options, out_handle): """Write hydra output as sorted VCF file. Requires loading the hydra file into memory to perform sorting on output VCF. Could generalize this to no sorting or by-chromosome approach if this proves too memory intensive. """ _write_vcf_header(out_handle) brends = list(_get_vcf_breakends(hydra_file, genome_2bit, options)) brends.sort(key=attrgetter("chrom", "pos")) for brend in brends: _write_vcf_breakend(brend, out_handle)
python
def hydra_to_vcf_writer(hydra_file, genome_2bit, options, out_handle): _write_vcf_header(out_handle) brends = list(_get_vcf_breakends(hydra_file, genome_2bit, options)) brends.sort(key=attrgetter("chrom", "pos")) for brend in brends: _write_vcf_breakend(brend, out_handle)
[ "def", "hydra_to_vcf_writer", "(", "hydra_file", ",", "genome_2bit", ",", "options", ",", "out_handle", ")", ":", "_write_vcf_header", "(", "out_handle", ")", "brends", "=", "list", "(", "_get_vcf_breakends", "(", "hydra_file", ",", "genome_2bit", ",", "options", ...
Write hydra output as sorted VCF file. Requires loading the hydra file into memory to perform sorting on output VCF. Could generalize this to no sorting or by-chromosome approach if this proves too memory intensive.
[ "Write", "hydra", "output", "as", "sorted", "VCF", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L321-L332
237,623
bcbio/bcbio-nextgen
bcbio/rnaseq/kallisto.py
kallisto_table
def kallisto_table(kallisto_dir, index): """ convert kallisto output to a count table where the rows are equivalence classes and the columns are cells """ quant_dir = os.path.join(kallisto_dir, "quant") out_file = os.path.join(quant_dir, "matrix.csv") if file_exists(out_file): return out_file tsvfile = os.path.join(quant_dir, "matrix.tsv") ecfile = os.path.join(quant_dir, "matrix.ec") cellsfile = os.path.join(quant_dir, "matrix.cells") fastafile = os.path.splitext(index)[0] + ".fa" fasta_names = fasta.sequence_names(fastafile) ec_names = get_ec_names(ecfile, fasta_names) df = pd.read_table(tsvfile, header=None, names=["ec", "cell", "count"]) df["ec"] = [ec_names[x] for x in df["ec"]] df = df.pivot(index='ec', columns='cell', values='count') cellnames = get_cell_names(cellsfile) colnames = [cellnames[x] for x in df.columns] df.columns = colnames df.to_csv(out_file) return out_file
python
def kallisto_table(kallisto_dir, index): quant_dir = os.path.join(kallisto_dir, "quant") out_file = os.path.join(quant_dir, "matrix.csv") if file_exists(out_file): return out_file tsvfile = os.path.join(quant_dir, "matrix.tsv") ecfile = os.path.join(quant_dir, "matrix.ec") cellsfile = os.path.join(quant_dir, "matrix.cells") fastafile = os.path.splitext(index)[0] + ".fa" fasta_names = fasta.sequence_names(fastafile) ec_names = get_ec_names(ecfile, fasta_names) df = pd.read_table(tsvfile, header=None, names=["ec", "cell", "count"]) df["ec"] = [ec_names[x] for x in df["ec"]] df = df.pivot(index='ec', columns='cell', values='count') cellnames = get_cell_names(cellsfile) colnames = [cellnames[x] for x in df.columns] df.columns = colnames df.to_csv(out_file) return out_file
[ "def", "kallisto_table", "(", "kallisto_dir", ",", "index", ")", ":", "quant_dir", "=", "os", ".", "path", ".", "join", "(", "kallisto_dir", ",", "\"quant\"", ")", "out_file", "=", "os", ".", "path", ".", "join", "(", "quant_dir", ",", "\"matrix.csv\"", ...
convert kallisto output to a count table where the rows are equivalence classes and the columns are cells
[ "convert", "kallisto", "output", "to", "a", "count", "table", "where", "the", "rows", "are", "equivalence", "classes", "and", "the", "columns", "are", "cells" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/kallisto.py#L127-L149
237,624
bcbio/bcbio-nextgen
bcbio/rnaseq/kallisto.py
get_ec_names
def get_ec_names(ecfile, fasta_names): """ convert equivalence classes to their set of transcripts """ df = pd.read_table(ecfile, header=None, names=["ec", "transcripts"]) transcript_groups = [x.split(",") for x in df["transcripts"]] transcripts = [] for group in transcript_groups: transcripts.append(":".join([fasta_names[int(x)] for x in group])) return transcripts
python
def get_ec_names(ecfile, fasta_names): df = pd.read_table(ecfile, header=None, names=["ec", "transcripts"]) transcript_groups = [x.split(",") for x in df["transcripts"]] transcripts = [] for group in transcript_groups: transcripts.append(":".join([fasta_names[int(x)] for x in group])) return transcripts
[ "def", "get_ec_names", "(", "ecfile", ",", "fasta_names", ")", ":", "df", "=", "pd", ".", "read_table", "(", "ecfile", ",", "header", "=", "None", ",", "names", "=", "[", "\"ec\"", ",", "\"transcripts\"", "]", ")", "transcript_groups", "=", "[", "x", "...
convert equivalence classes to their set of transcripts
[ "convert", "equivalence", "classes", "to", "their", "set", "of", "transcripts" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/kallisto.py#L151-L160
237,625
bcbio/bcbio-nextgen
bcbio/illumina/flowcell.py
parse_dirname
def parse_dirname(fc_dir): """Parse the flow cell ID and date from a flow cell directory. """ (_, fc_dir) = os.path.split(fc_dir) parts = fc_dir.split("_") name = None date = None for p in parts: if p.endswith(("XX", "xx", "XY", "X2")): name = p elif len(p) == 6: try: int(p) date = p except ValueError: pass if name is None or date is None: raise ValueError("Did not find flowcell name: %s" % fc_dir) return name, date
python
def parse_dirname(fc_dir): (_, fc_dir) = os.path.split(fc_dir) parts = fc_dir.split("_") name = None date = None for p in parts: if p.endswith(("XX", "xx", "XY", "X2")): name = p elif len(p) == 6: try: int(p) date = p except ValueError: pass if name is None or date is None: raise ValueError("Did not find flowcell name: %s" % fc_dir) return name, date
[ "def", "parse_dirname", "(", "fc_dir", ")", ":", "(", "_", ",", "fc_dir", ")", "=", "os", ".", "path", ".", "split", "(", "fc_dir", ")", "parts", "=", "fc_dir", ".", "split", "(", "\"_\"", ")", "name", "=", "None", "date", "=", "None", "for", "p"...
Parse the flow cell ID and date from a flow cell directory.
[ "Parse", "the", "flow", "cell", "ID", "and", "date", "from", "a", "flow", "cell", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/flowcell.py#L8-L26
237,626
bcbio/bcbio-nextgen
bcbio/illumina/flowcell.py
get_qseq_dir
def get_qseq_dir(fc_dir): """Retrieve the qseq directory within Solexa flowcell output. """ machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return machine_bc # otherwise assume we are in the qseq directory # XXX What other cases can we end up with here? else: return fc_dir
python
def get_qseq_dir(fc_dir): machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return machine_bc # otherwise assume we are in the qseq directory # XXX What other cases can we end up with here? else: return fc_dir
[ "def", "get_qseq_dir", "(", "fc_dir", ")", ":", "machine_bc", "=", "os", ".", "path", ".", "join", "(", "fc_dir", ",", "\"Data\"", ",", "\"Intensities\"", ",", "\"BaseCalls\"", ")", "if", "os", ".", "path", ".", "exists", "(", "machine_bc", ")", ":", "...
Retrieve the qseq directory within Solexa flowcell output.
[ "Retrieve", "the", "qseq", "directory", "within", "Solexa", "flowcell", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/flowcell.py#L28-L37
237,627
bcbio/bcbio-nextgen
bcbio/illumina/flowcell.py
get_fastq_dir
def get_fastq_dir(fc_dir): """Retrieve the fastq directory within Solexa flowcell output. """ full_goat_bc = glob.glob(os.path.join(fc_dir, "Data", "*Firecrest*", "Bustard*")) bustard_bc = glob.glob(os.path.join(fc_dir, "Data", "Intensities", "*Bustard*")) machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return os.path.join(machine_bc, "fastq") elif len(full_goat_bc) > 0: return os.path.join(full_goat_bc[0], "fastq") elif len(bustard_bc) > 0: return os.path.join(bustard_bc[0], "fastq") # otherwise assume we are in the fastq directory # XXX What other cases can we end up with here? else: return fc_dir
python
def get_fastq_dir(fc_dir): full_goat_bc = glob.glob(os.path.join(fc_dir, "Data", "*Firecrest*", "Bustard*")) bustard_bc = glob.glob(os.path.join(fc_dir, "Data", "Intensities", "*Bustard*")) machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return os.path.join(machine_bc, "fastq") elif len(full_goat_bc) > 0: return os.path.join(full_goat_bc[0], "fastq") elif len(bustard_bc) > 0: return os.path.join(bustard_bc[0], "fastq") # otherwise assume we are in the fastq directory # XXX What other cases can we end up with here? else: return fc_dir
[ "def", "get_fastq_dir", "(", "fc_dir", ")", ":", "full_goat_bc", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "fc_dir", ",", "\"Data\"", ",", "\"*Firecrest*\"", ",", "\"Bustard*\"", ")", ")", "bustard_bc", "=", "glob", ".", "glob",...
Retrieve the fastq directory within Solexa flowcell output.
[ "Retrieve", "the", "fastq", "directory", "within", "Solexa", "flowcell", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/flowcell.py#L39-L54
237,628
bcbio/bcbio-nextgen
bcbio/illumina/flowcell.py
GalaxySqnLimsApi.run_details
def run_details(self, run): """Retrieve sequencing run details as a dictionary. """ run_data = dict(run=run) req = urllib.request.Request("%s/nglims/api_run_details" % self._base_url, urllib.parse.urlencode(run_data)) response = urllib.request.urlopen(req) info = json.loads(response.read()) if "error" in info: raise ValueError("Problem retrieving info: %s" % info["error"]) else: return info["details"]
python
def run_details(self, run): run_data = dict(run=run) req = urllib.request.Request("%s/nglims/api_run_details" % self._base_url, urllib.parse.urlencode(run_data)) response = urllib.request.urlopen(req) info = json.loads(response.read()) if "error" in info: raise ValueError("Problem retrieving info: %s" % info["error"]) else: return info["details"]
[ "def", "run_details", "(", "self", ",", "run", ")", ":", "run_data", "=", "dict", "(", "run", "=", "run", ")", "req", "=", "urllib", ".", "request", ".", "Request", "(", "\"%s/nglims/api_run_details\"", "%", "self", ".", "_base_url", ",", "urllib", ".", ...
Retrieve sequencing run details as a dictionary.
[ "Retrieve", "sequencing", "run", "details", "as", "a", "dictionary", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/flowcell.py#L70-L81
237,629
bcbio/bcbio-nextgen
bcbio/ngsalign/mosaik.py
_mosaik_args_from_config
def _mosaik_args_from_config(config): """Configurable high level options for mosaik. """ multi_mappers = config["algorithm"].get("multiple_mappers", True) multi_flags = ["-m", "all"] if multi_mappers else ["-m", "unique"] error_flags = ["-mm", "2"] num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] return core_flags + multi_flags + error_flags
python
def _mosaik_args_from_config(config): multi_mappers = config["algorithm"].get("multiple_mappers", True) multi_flags = ["-m", "all"] if multi_mappers else ["-m", "unique"] error_flags = ["-mm", "2"] num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] return core_flags + multi_flags + error_flags
[ "def", "_mosaik_args_from_config", "(", "config", ")", ":", "multi_mappers", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"multiple_mappers\"", ",", "True", ")", "multi_flags", "=", "[", "\"-m\"", ",", "\"all\"", "]", "if", "multi_mappers", "el...
Configurable high level options for mosaik.
[ "Configurable", "high", "level", "options", "for", "mosaik", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/mosaik.py#L14-L22
237,630
bcbio/bcbio-nextgen
bcbio/ngsalign/mosaik.py
_convert_fastq
def _convert_fastq(fastq_file, pair_file, rg_name, out_file, config): """Convert fastq inputs into internal Mosaik representation. """ out_file = "{0}-fq.mkb".format(os.path.splitext(out_file)[0]) if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: cl = [config_utils.get_program("mosaik", config, default="MosaikAligner").replace("Aligner", "Build")] cl += ["-q", fastq_file, "-out", tx_out_file, "-st", config["algorithm"].get("platform", "illumina").lower()] if pair_file: cl += ["-q2", pair_file] if rg_name: cl += ["-id", rg_name] env_set = "export MOSAIK_TMP={0}".format(os.path.dirname(tx_out_file)) subprocess.check_call(env_set + " && " + " ".join(cl), shell=True) return out_file
python
def _convert_fastq(fastq_file, pair_file, rg_name, out_file, config): out_file = "{0}-fq.mkb".format(os.path.splitext(out_file)[0]) if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: cl = [config_utils.get_program("mosaik", config, default="MosaikAligner").replace("Aligner", "Build")] cl += ["-q", fastq_file, "-out", tx_out_file, "-st", config["algorithm"].get("platform", "illumina").lower()] if pair_file: cl += ["-q2", pair_file] if rg_name: cl += ["-id", rg_name] env_set = "export MOSAIK_TMP={0}".format(os.path.dirname(tx_out_file)) subprocess.check_call(env_set + " && " + " ".join(cl), shell=True) return out_file
[ "def", "_convert_fastq", "(", "fastq_file", ",", "pair_file", ",", "rg_name", ",", "out_file", ",", "config", ")", ":", "out_file", "=", "\"{0}-fq.mkb\"", ".", "format", "(", "os", ".", "path", ".", "splitext", "(", "out_file", ")", "[", "0", "]", ")", ...
Convert fastq inputs into internal Mosaik representation.
[ "Convert", "fastq", "inputs", "into", "internal", "Mosaik", "representation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/mosaik.py#L24-L41
237,631
bcbio/bcbio-nextgen
bcbio/ngsalign/mosaik.py
_get_mosaik_nn_args
def _get_mosaik_nn_args(out_file): """Retrieve default neural network files from GitHub to pass to Mosaik. """ base_nn_url = "https://raw.github.com/wanpinglee/MOSAIK/master/src/networkFile/" out = [] for arg, fname in [("-annse", "2.1.26.se.100.005.ann"), ("-annpe", "2.1.26.pe.100.0065.ann")]: arg_fname = os.path.join(os.path.dirname(out_file), fname) if not file_exists(arg_fname): subprocess.check_call(["wget", "-O", arg_fname, base_nn_url + fname]) out += [arg, arg_fname] return out
python
def _get_mosaik_nn_args(out_file): base_nn_url = "https://raw.github.com/wanpinglee/MOSAIK/master/src/networkFile/" out = [] for arg, fname in [("-annse", "2.1.26.se.100.005.ann"), ("-annpe", "2.1.26.pe.100.0065.ann")]: arg_fname = os.path.join(os.path.dirname(out_file), fname) if not file_exists(arg_fname): subprocess.check_call(["wget", "-O", arg_fname, base_nn_url + fname]) out += [arg, arg_fname] return out
[ "def", "_get_mosaik_nn_args", "(", "out_file", ")", ":", "base_nn_url", "=", "\"https://raw.github.com/wanpinglee/MOSAIK/master/src/networkFile/\"", "out", "=", "[", "]", "for", "arg", ",", "fname", "in", "[", "(", "\"-annse\"", ",", "\"2.1.26.se.100.005.ann\"", ")", ...
Retrieve default neural network files from GitHub to pass to Mosaik.
[ "Retrieve", "default", "neural", "network", "files", "from", "GitHub", "to", "pass", "to", "Mosaik", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/mosaik.py#L43-L54
237,632
bcbio/bcbio-nextgen
bcbio/ngsalign/mosaik.py
align
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): """Alignment with MosaikAligner. """ config = data["config"] rg_name = names.get("rg", None) if names else None out_file = os.path.join(align_dir, "%s-align.bam" % names["lane"]) if not file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: built_fastq = _convert_fastq(fastq_file, pair_file, rg_name, out_file, config) cl = [config_utils.get_program("mosaik", config, default="MosaikAligner")] cl += _mosaik_args_from_config(config) cl += extra_args if extra_args is not None else [] cl += ["-ia", ref_file, "-in", built_fastq, "-out", os.path.splitext(tx_out_file)[0]] jump_base = os.path.splitext(ref_file)[0] key_file = "{0}_keys.jmp".format(jump_base) if file_exists(key_file): cl += ["-j", jump_base] # XXX hacky way to guess key size which needs to match # Can I get hash size directly jump_size_gb = os.path.getsize(key_file) / 1073741824.0 if jump_size_gb < 1.0: cl += ["-hs", "13"] cl += _get_mosaik_nn_args(out_file) env_set = "export MOSAIK_TMP={0}".format(os.path.dirname(tx_out_file)) subprocess.check_call(env_set + " && "+ " ".join([str(x) for x in cl]), shell=True) os.remove(built_fastq) return out_file
python
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): config = data["config"] rg_name = names.get("rg", None) if names else None out_file = os.path.join(align_dir, "%s-align.bam" % names["lane"]) if not file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: built_fastq = _convert_fastq(fastq_file, pair_file, rg_name, out_file, config) cl = [config_utils.get_program("mosaik", config, default="MosaikAligner")] cl += _mosaik_args_from_config(config) cl += extra_args if extra_args is not None else [] cl += ["-ia", ref_file, "-in", built_fastq, "-out", os.path.splitext(tx_out_file)[0]] jump_base = os.path.splitext(ref_file)[0] key_file = "{0}_keys.jmp".format(jump_base) if file_exists(key_file): cl += ["-j", jump_base] # XXX hacky way to guess key size which needs to match # Can I get hash size directly jump_size_gb = os.path.getsize(key_file) / 1073741824.0 if jump_size_gb < 1.0: cl += ["-hs", "13"] cl += _get_mosaik_nn_args(out_file) env_set = "export MOSAIK_TMP={0}".format(os.path.dirname(tx_out_file)) subprocess.check_call(env_set + " && "+ " ".join([str(x) for x in cl]), shell=True) os.remove(built_fastq) return out_file
[ "def", "align", "(", "fastq_file", ",", "pair_file", ",", "ref_file", ",", "names", ",", "align_dir", ",", "data", ",", "extra_args", "=", "None", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "rg_name", "=", "names", ".", "get", "(", "\"rg\...
Alignment with MosaikAligner.
[ "Alignment", "with", "MosaikAligner", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/mosaik.py#L56-L87
237,633
bcbio/bcbio-nextgen
bcbio/graph/graph.py
get_bcbio_timings
def get_bcbio_timings(path): """Fetch timing information from a bcbio log file.""" with open(path, 'r') as file_handle: steps = {} for line in file_handle: matches = re.search(r'^\[([^\]]+)\] ([^:]+: .*)', line) if not matches: continue tstamp = matches.group(1) msg = matches.group(2) # XXX: new special logs do not have this #if not msg.find('Timing: ') >= 0: # continue when = datetime.strptime(tstamp, '%Y-%m-%dT%H:%MZ').replace( tzinfo=pytz.timezone('UTC')) step = msg.split(":")[-1].strip() steps[when] = step return steps
python
def get_bcbio_timings(path): with open(path, 'r') as file_handle: steps = {} for line in file_handle: matches = re.search(r'^\[([^\]]+)\] ([^:]+: .*)', line) if not matches: continue tstamp = matches.group(1) msg = matches.group(2) # XXX: new special logs do not have this #if not msg.find('Timing: ') >= 0: # continue when = datetime.strptime(tstamp, '%Y-%m-%dT%H:%MZ').replace( tzinfo=pytz.timezone('UTC')) step = msg.split(":")[-1].strip() steps[when] = step return steps
[ "def", "get_bcbio_timings", "(", "path", ")", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "file_handle", ":", "steps", "=", "{", "}", "for", "line", "in", "file_handle", ":", "matches", "=", "re", ".", "search", "(", "r'^\\[([^\\]]+)\\] ([^:...
Fetch timing information from a bcbio log file.
[ "Fetch", "timing", "information", "from", "a", "bcbio", "log", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L54-L76
237,634
bcbio/bcbio-nextgen
bcbio/graph/graph.py
this_and_prev
def this_and_prev(iterable): """Walk an iterable, returning the current and previous items as a two-tuple.""" try: item = next(iterable) while True: next_item = next(iterable) yield item, next_item item = next_item except StopIteration: return
python
def this_and_prev(iterable): try: item = next(iterable) while True: next_item = next(iterable) yield item, next_item item = next_item except StopIteration: return
[ "def", "this_and_prev", "(", "iterable", ")", ":", "try", ":", "item", "=", "next", "(", "iterable", ")", "while", "True", ":", "next_item", "=", "next", "(", "iterable", ")", "yield", "item", ",", "next_item", "item", "=", "next_item", "except", "StopIt...
Walk an iterable, returning the current and previous items as a two-tuple.
[ "Walk", "an", "iterable", "returning", "the", "current", "and", "previous", "items", "as", "a", "two", "-", "tuple", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L89-L99
237,635
bcbio/bcbio-nextgen
bcbio/graph/graph.py
remove_outliers
def remove_outliers(series, stddev): """Remove the outliers from a series.""" return series[(series - series.mean()).abs() < stddev * series.std()]
python
def remove_outliers(series, stddev): return series[(series - series.mean()).abs() < stddev * series.std()]
[ "def", "remove_outliers", "(", "series", ",", "stddev", ")", ":", "return", "series", "[", "(", "series", "-", "series", ".", "mean", "(", ")", ")", ".", "abs", "(", ")", "<", "stddev", "*", "series", ".", "std", "(", ")", "]" ]
Remove the outliers from a series.
[ "Remove", "the", "outliers", "from", "a", "series", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L135-L137
237,636
bcbio/bcbio-nextgen
bcbio/graph/graph.py
prep_for_graph
def prep_for_graph(data_frame, series=None, delta_series=None, smoothing=None, outlier_stddev=None): """Prepare a dataframe for graphing by calculating deltas for series that need them, resampling, and removing outliers. """ series = series or [] delta_series = delta_series or [] graph = calc_deltas(data_frame, delta_series) for s in series + delta_series: if smoothing: graph[s] = graph[s].resample(smoothing) if outlier_stddev: graph[s] = remove_outliers(graph[s], outlier_stddev) return graph[series + delta_series]
python
def prep_for_graph(data_frame, series=None, delta_series=None, smoothing=None, outlier_stddev=None): series = series or [] delta_series = delta_series or [] graph = calc_deltas(data_frame, delta_series) for s in series + delta_series: if smoothing: graph[s] = graph[s].resample(smoothing) if outlier_stddev: graph[s] = remove_outliers(graph[s], outlier_stddev) return graph[series + delta_series]
[ "def", "prep_for_graph", "(", "data_frame", ",", "series", "=", "None", ",", "delta_series", "=", "None", ",", "smoothing", "=", "None", ",", "outlier_stddev", "=", "None", ")", ":", "series", "=", "series", "or", "[", "]", "delta_series", "=", "delta_seri...
Prepare a dataframe for graphing by calculating deltas for series that need them, resampling, and removing outliers.
[ "Prepare", "a", "dataframe", "for", "graphing", "by", "calculating", "deltas", "for", "series", "that", "need", "them", "resampling", "and", "removing", "outliers", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L140-L155
237,637
bcbio/bcbio-nextgen
bcbio/graph/graph.py
add_common_plot_features
def add_common_plot_features(plot, steps): """Add plot features common to all plots, such as bcbio step information. """ _setup_matplotlib() plot.yaxis.set_tick_params(labelright=True) plot.set_xlabel('') ymax = plot.get_ylim()[1] ticks = {} for tstamp, step in steps.items(): if step == 'finished': continue plot.vlines(tstamp, 0, ymax, linestyles='dashed') tstamp = mpl.dates.num2epoch(mpl.dates.date2num(tstamp)) ticks[tstamp] = step tick_kvs = sorted(ticks.items()) top_axis = plot.twiny() top_axis.set_xlim(*plot.get_xlim()) top_axis.set_xticks([k for k, v in tick_kvs]) top_axis.set_xticklabels([v for k, v in tick_kvs], rotation=45, ha='left', size=pylab.rcParams['font.size']) plot.set_ylim(0) return plot
python
def add_common_plot_features(plot, steps): _setup_matplotlib() plot.yaxis.set_tick_params(labelright=True) plot.set_xlabel('') ymax = plot.get_ylim()[1] ticks = {} for tstamp, step in steps.items(): if step == 'finished': continue plot.vlines(tstamp, 0, ymax, linestyles='dashed') tstamp = mpl.dates.num2epoch(mpl.dates.date2num(tstamp)) ticks[tstamp] = step tick_kvs = sorted(ticks.items()) top_axis = plot.twiny() top_axis.set_xlim(*plot.get_xlim()) top_axis.set_xticks([k for k, v in tick_kvs]) top_axis.set_xticklabels([v for k, v in tick_kvs], rotation=45, ha='left', size=pylab.rcParams['font.size']) plot.set_ylim(0) return plot
[ "def", "add_common_plot_features", "(", "plot", ",", "steps", ")", ":", "_setup_matplotlib", "(", ")", "plot", ".", "yaxis", ".", "set_tick_params", "(", "labelright", "=", "True", ")", "plot", ".", "set_xlabel", "(", "''", ")", "ymax", "=", "plot", ".", ...
Add plot features common to all plots, such as bcbio step information.
[ "Add", "plot", "features", "common", "to", "all", "plots", "such", "as", "bcbio", "step", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L158-L183
237,638
bcbio/bcbio-nextgen
bcbio/graph/graph.py
log_time_frame
def log_time_frame(bcbio_log): """The bcbio running time frame. :return: an instance of :class collections.namedtuple: with the following fields: start and end """ output = collections.namedtuple("Time", ["start", "end", "steps"]) bcbio_timings = get_bcbio_timings(bcbio_log) return output(min(bcbio_timings), max(bcbio_timings), bcbio_timings)
python
def log_time_frame(bcbio_log): output = collections.namedtuple("Time", ["start", "end", "steps"]) bcbio_timings = get_bcbio_timings(bcbio_log) return output(min(bcbio_timings), max(bcbio_timings), bcbio_timings)
[ "def", "log_time_frame", "(", "bcbio_log", ")", ":", "output", "=", "collections", ".", "namedtuple", "(", "\"Time\"", ",", "[", "\"start\"", ",", "\"end\"", ",", "\"steps\"", "]", ")", "bcbio_timings", "=", "get_bcbio_timings", "(", "bcbio_log", ")", "return"...
The bcbio running time frame. :return: an instance of :class collections.namedtuple: with the following fields: start and end
[ "The", "bcbio", "running", "time", "frame", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L298-L306
237,639
bcbio/bcbio-nextgen
bcbio/graph/graph.py
resource_usage
def resource_usage(bcbio_log, cluster, rawdir, verbose): """Generate system statistics from bcbio runs. Parse the obtained files and put the information in a :class pandas.DataFrame:. :param bcbio_log: local path to bcbio log file written by the run :param cluster: :param rawdir: directory to put raw data files :param verbose: increase verbosity :return: a tuple with three dictionaries, the first one contains an instance of :pandas.DataFrame: for each host, the second one contains information regarding the hardware configuration and the last one contains information regarding timing. :type return: tuple """ data_frames = {} hardware_info = {} time_frame = log_time_frame(bcbio_log) for collectl_file in sorted(os.listdir(rawdir)): if not collectl_file.endswith('.raw.gz'): continue # Only load filenames within sampling timerange (gathered from bcbio_log time_frame) if rawfile_within_timeframe(collectl_file, time_frame): collectl_path = os.path.join(rawdir, collectl_file) data, hardware = load_collectl( collectl_path, time_frame.start, time_frame.end) if len(data) == 0: #raise ValueError("No data present in collectl file %s, mismatch in timestamps between raw collectl and log file?", collectl_path) continue host = re.sub(r'-\d{8}-\d{6}\.raw\.gz$', '', collectl_file) hardware_info[host] = hardware if host not in data_frames: data_frames[host] = data else: data_frames[host] = pd.concat([data_frames[host], data]) return (data_frames, hardware_info, time_frame.steps)
python
def resource_usage(bcbio_log, cluster, rawdir, verbose): data_frames = {} hardware_info = {} time_frame = log_time_frame(bcbio_log) for collectl_file in sorted(os.listdir(rawdir)): if not collectl_file.endswith('.raw.gz'): continue # Only load filenames within sampling timerange (gathered from bcbio_log time_frame) if rawfile_within_timeframe(collectl_file, time_frame): collectl_path = os.path.join(rawdir, collectl_file) data, hardware = load_collectl( collectl_path, time_frame.start, time_frame.end) if len(data) == 0: #raise ValueError("No data present in collectl file %s, mismatch in timestamps between raw collectl and log file?", collectl_path) continue host = re.sub(r'-\d{8}-\d{6}\.raw\.gz$', '', collectl_file) hardware_info[host] = hardware if host not in data_frames: data_frames[host] = data else: data_frames[host] = pd.concat([data_frames[host], data]) return (data_frames, hardware_info, time_frame.steps)
[ "def", "resource_usage", "(", "bcbio_log", ",", "cluster", ",", "rawdir", ",", "verbose", ")", ":", "data_frames", "=", "{", "}", "hardware_info", "=", "{", "}", "time_frame", "=", "log_time_frame", "(", "bcbio_log", ")", "for", "collectl_file", "in", "sorte...
Generate system statistics from bcbio runs. Parse the obtained files and put the information in a :class pandas.DataFrame:. :param bcbio_log: local path to bcbio log file written by the run :param cluster: :param rawdir: directory to put raw data files :param verbose: increase verbosity :return: a tuple with three dictionaries, the first one contains an instance of :pandas.DataFrame: for each host, the second one contains information regarding the hardware configuration and the last one contains information regarding timing. :type return: tuple
[ "Generate", "system", "statistics", "from", "bcbio", "runs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L319-L362
237,640
bcbio/bcbio-nextgen
bcbio/graph/graph.py
generate_graphs
def generate_graphs(data_frames, hardware_info, steps, outdir, verbose=False): """Generate all graphs for a bcbio run.""" _setup_matplotlib() # Hash of hosts containing (data, hardware, steps) tuple collectl_info = collections.defaultdict(dict) for host, data_frame in data_frames.items(): if verbose: print('Generating CPU graph for {}...'.format(host)) graph, data_cpu = graph_cpu(data_frame, steps, hardware_info[host]['num_cpus']) graph.get_figure().savefig( os.path.join(outdir, '{}_cpu.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() ifaces = set([series.split('_')[0] for series in data_frame.keys() if series.startswith(('eth', 'ib'))]) if verbose: print('Generating network graphs for {}...'.format(host)) graph, data_net_bytes = graph_net_bytes(data_frame, steps, ifaces) graph.get_figure().savefig( os.path.join(outdir, '{}_net_bytes.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() graph, data_net_pkts = graph_net_pkts(data_frame, steps, ifaces) graph.get_figure().savefig( os.path.join(outdir, '{}_net_pkts.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() if verbose: print('Generating memory graph for {}...'.format(host)) graph, data_mem = graph_memory(data_frame, steps, hardware_info[host]["memory"]) graph.get_figure().savefig( os.path.join(outdir, '{}_memory.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() if verbose: print('Generating storage I/O graph for {}...'.format(host)) drives = set([ series.split('_')[0] for series in data_frame.keys() if series.startswith(('sd', 'vd', 'hd', 'xvd')) ]) graph, data_disk = graph_disk_io(data_frame, steps, drives) graph.get_figure().savefig( os.path.join(outdir, '{}_disk_io.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() print('Serializing output to pickle object for node {}...'.format(host)) # "Clean" dataframes ready to be plotted collectl_info[host] = { "hardware": hardware_info, "steps": steps, "cpu": data_cpu, "mem": data_mem, "disk": data_disk, "net_bytes": data_net_bytes, "net_pkts": data_net_pkts } return collectl_info
python
def generate_graphs(data_frames, hardware_info, steps, outdir, verbose=False): _setup_matplotlib() # Hash of hosts containing (data, hardware, steps) tuple collectl_info = collections.defaultdict(dict) for host, data_frame in data_frames.items(): if verbose: print('Generating CPU graph for {}...'.format(host)) graph, data_cpu = graph_cpu(data_frame, steps, hardware_info[host]['num_cpus']) graph.get_figure().savefig( os.path.join(outdir, '{}_cpu.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() ifaces = set([series.split('_')[0] for series in data_frame.keys() if series.startswith(('eth', 'ib'))]) if verbose: print('Generating network graphs for {}...'.format(host)) graph, data_net_bytes = graph_net_bytes(data_frame, steps, ifaces) graph.get_figure().savefig( os.path.join(outdir, '{}_net_bytes.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() graph, data_net_pkts = graph_net_pkts(data_frame, steps, ifaces) graph.get_figure().savefig( os.path.join(outdir, '{}_net_pkts.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() if verbose: print('Generating memory graph for {}...'.format(host)) graph, data_mem = graph_memory(data_frame, steps, hardware_info[host]["memory"]) graph.get_figure().savefig( os.path.join(outdir, '{}_memory.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() if verbose: print('Generating storage I/O graph for {}...'.format(host)) drives = set([ series.split('_')[0] for series in data_frame.keys() if series.startswith(('sd', 'vd', 'hd', 'xvd')) ]) graph, data_disk = graph_disk_io(data_frame, steps, drives) graph.get_figure().savefig( os.path.join(outdir, '{}_disk_io.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() print('Serializing output to pickle object for node {}...'.format(host)) # "Clean" dataframes ready to be plotted collectl_info[host] = { "hardware": hardware_info, "steps": steps, "cpu": data_cpu, "mem": data_mem, "disk": data_disk, "net_bytes": data_net_bytes, "net_pkts": data_net_pkts } return collectl_info
[ "def", "generate_graphs", "(", "data_frames", ",", "hardware_info", ",", "steps", ",", "outdir", ",", "verbose", "=", "False", ")", ":", "_setup_matplotlib", "(", ")", "# Hash of hosts containing (data, hardware, steps) tuple", "collectl_info", "=", "collections", ".", ...
Generate all graphs for a bcbio run.
[ "Generate", "all", "graphs", "for", "a", "bcbio", "run", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L365-L428
237,641
bcbio/bcbio-nextgen
bcbio/variation/ploidy.py
get_ploidy
def get_ploidy(items, region=None): """Retrieve ploidy of a region, handling special cases. """ chrom = chromosome_special_cases(region[0] if isinstance(region, (list, tuple)) else None) ploidy = _configured_ploidy(items) sexes = _configured_genders(items) if chrom == "mitochondrial": # For now, do haploid calling. Could also do pooled calling # but not entirely clear what the best default would be. return ploidy.get("mitochondrial", 1) elif chrom == "X": # Do standard diploid calling if we have any females or unspecified. if "female" in sexes or "f" in sexes: return ploidy.get("female", ploidy["default"]) elif "male" in sexes or "m" in sexes: return ploidy.get("male", 1) else: return ploidy.get("female", ploidy["default"]) elif chrom == "Y": # Always call Y single. If female, filter_vcf_by_sex removes Y regions. return 1 else: return ploidy["default"]
python
def get_ploidy(items, region=None): chrom = chromosome_special_cases(region[0] if isinstance(region, (list, tuple)) else None) ploidy = _configured_ploidy(items) sexes = _configured_genders(items) if chrom == "mitochondrial": # For now, do haploid calling. Could also do pooled calling # but not entirely clear what the best default would be. return ploidy.get("mitochondrial", 1) elif chrom == "X": # Do standard diploid calling if we have any females or unspecified. if "female" in sexes or "f" in sexes: return ploidy.get("female", ploidy["default"]) elif "male" in sexes or "m" in sexes: return ploidy.get("male", 1) else: return ploidy.get("female", ploidy["default"]) elif chrom == "Y": # Always call Y single. If female, filter_vcf_by_sex removes Y regions. return 1 else: return ploidy["default"]
[ "def", "get_ploidy", "(", "items", ",", "region", "=", "None", ")", ":", "chrom", "=", "chromosome_special_cases", "(", "region", "[", "0", "]", "if", "isinstance", "(", "region", ",", "(", "list", ",", "tuple", ")", ")", "else", "None", ")", "ploidy",...
Retrieve ploidy of a region, handling special cases.
[ "Retrieve", "ploidy", "of", "a", "region", "handling", "special", "cases", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ploidy.py#L43-L66
237,642
bcbio/bcbio-nextgen
bcbio/variation/ploidy.py
filter_vcf_by_sex
def filter_vcf_by_sex(vcf_file, items): """Post-filter a single sample VCF, handling sex chromosomes. Removes Y chromosomes from batches with all female samples. """ out_file = "%s-ploidyfix%s" % utils.splitext_plus(vcf_file) if not utils.file_exists(out_file): genders = list(_configured_genders(items)) is_female = len(genders) == 1 and genders[0] and genders[0] in ["female", "f"] if is_female: orig_out_file = out_file out_file = orig_out_file.replace(".vcf.gz", ".vcf") with file_transaction(items[0], out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: with utils.open_gzipsafe(vcf_file) as in_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: chrom = chromosome_special_cases(line.split("\t")) if chrom != "Y": out_handle.write(line) if orig_out_file.endswith(".gz"): out_file = vcfutils.bgzip_and_index(out_file, items[0]["config"]) else: out_file = vcf_file return out_file
python
def filter_vcf_by_sex(vcf_file, items): out_file = "%s-ploidyfix%s" % utils.splitext_plus(vcf_file) if not utils.file_exists(out_file): genders = list(_configured_genders(items)) is_female = len(genders) == 1 and genders[0] and genders[0] in ["female", "f"] if is_female: orig_out_file = out_file out_file = orig_out_file.replace(".vcf.gz", ".vcf") with file_transaction(items[0], out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: with utils.open_gzipsafe(vcf_file) as in_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: chrom = chromosome_special_cases(line.split("\t")) if chrom != "Y": out_handle.write(line) if orig_out_file.endswith(".gz"): out_file = vcfutils.bgzip_and_index(out_file, items[0]["config"]) else: out_file = vcf_file return out_file
[ "def", "filter_vcf_by_sex", "(", "vcf_file", ",", "items", ")", ":", "out_file", "=", "\"%s-ploidyfix%s\"", "%", "utils", ".", "splitext_plus", "(", "vcf_file", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "genders", "=", "list",...
Post-filter a single sample VCF, handling sex chromosomes. Removes Y chromosomes from batches with all female samples.
[ "Post", "-", "filter", "a", "single", "sample", "VCF", "handling", "sex", "chromosomes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ploidy.py#L68-L94
237,643
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
variant_filtration
def variant_filtration(call_file, ref_file, vrn_files, data, items): """Filter variant calls using Variant Quality Score Recalibration. Newer GATK with Haplotype calling has combined SNP/indel filtering. """ caller = data["config"]["algorithm"].get("variantcaller") if "gvcf" not in dd.get_tools_on(data): call_file = ploidy.filter_vcf_by_sex(call_file, items) if caller in ["freebayes"]: return vfilter.freebayes(call_file, ref_file, vrn_files, data) elif caller in ["platypus"]: return vfilter.platypus(call_file, data) elif caller in ["samtools"]: return vfilter.samtools(call_file, data) elif caller in ["gatk", "gatk-haplotype", "haplotyper"]: if dd.get_analysis(data).lower().find("rna-seq") >= 0: from bcbio.rnaseq import variation as rnaseq_variation return rnaseq_variation.gatk_filter_rnaseq(call_file, data) else: return gatkfilter.run(call_file, ref_file, vrn_files, data) # no additional filtration for callers that filter as part of call process else: return call_file
python
def variant_filtration(call_file, ref_file, vrn_files, data, items): caller = data["config"]["algorithm"].get("variantcaller") if "gvcf" not in dd.get_tools_on(data): call_file = ploidy.filter_vcf_by_sex(call_file, items) if caller in ["freebayes"]: return vfilter.freebayes(call_file, ref_file, vrn_files, data) elif caller in ["platypus"]: return vfilter.platypus(call_file, data) elif caller in ["samtools"]: return vfilter.samtools(call_file, data) elif caller in ["gatk", "gatk-haplotype", "haplotyper"]: if dd.get_analysis(data).lower().find("rna-seq") >= 0: from bcbio.rnaseq import variation as rnaseq_variation return rnaseq_variation.gatk_filter_rnaseq(call_file, data) else: return gatkfilter.run(call_file, ref_file, vrn_files, data) # no additional filtration for callers that filter as part of call process else: return call_file
[ "def", "variant_filtration", "(", "call_file", ",", "ref_file", ",", "vrn_files", ",", "data", ",", "items", ")", ":", "caller", "=", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", ".", "get", "(", "\"variantcaller\"", ")", "if", "\"gvcf\"", "n...
Filter variant calls using Variant Quality Score Recalibration. Newer GATK with Haplotype calling has combined SNP/indel filtering.
[ "Filter", "variant", "calls", "using", "Variant", "Quality", "Score", "Recalibration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L23-L45
237,644
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_split_by_ready_regions
def _split_by_ready_regions(ext, file_key, dir_ext_fn): """Organize splits based on regions generated by parallel_prep_region. Sort splits so largest regions analyzed first, avoiding potentially lagging runs at end. """ def _sort_by_size(region_w_bams): region, _ = region_w_bams _, start, end = region return end - start def _assign_bams_to_regions(data): """Ensure BAMs aligned with input regions, either global or individual. """ for i, region in enumerate(data["region"]): work_bams = [] for xs in data["region_bams"]: if len(xs) == 1: work_bams.append(xs[0]) else: work_bams.append(xs[i]) for work_bam in work_bams: assert os.path.exists(work_bam), work_bam yield region, work_bams def _do_work(data): if "region" in data: name = data["group"][0] if "group" in data else data["description"] out_dir = os.path.join(data["dirs"]["work"], dir_ext_fn(data)) out_file = os.path.join(out_dir, "%s%s" % (name, ext)) assert isinstance(data["region"], (list, tuple)) out_parts = [] for r, work_bams in sorted(_assign_bams_to_regions(data), key=_sort_by_size, reverse=True): out_region_dir = os.path.join(out_dir, r[0]) out_region_file = os.path.join(out_region_dir, "%s-%s%s" % (name, pregion.to_safestr(r), ext)) out_parts.append((r, work_bams, out_region_file)) return out_file, out_parts else: return None, [] return _do_work
python
def _split_by_ready_regions(ext, file_key, dir_ext_fn): def _sort_by_size(region_w_bams): region, _ = region_w_bams _, start, end = region return end - start def _assign_bams_to_regions(data): """Ensure BAMs aligned with input regions, either global or individual. """ for i, region in enumerate(data["region"]): work_bams = [] for xs in data["region_bams"]: if len(xs) == 1: work_bams.append(xs[0]) else: work_bams.append(xs[i]) for work_bam in work_bams: assert os.path.exists(work_bam), work_bam yield region, work_bams def _do_work(data): if "region" in data: name = data["group"][0] if "group" in data else data["description"] out_dir = os.path.join(data["dirs"]["work"], dir_ext_fn(data)) out_file = os.path.join(out_dir, "%s%s" % (name, ext)) assert isinstance(data["region"], (list, tuple)) out_parts = [] for r, work_bams in sorted(_assign_bams_to_regions(data), key=_sort_by_size, reverse=True): out_region_dir = os.path.join(out_dir, r[0]) out_region_file = os.path.join(out_region_dir, "%s-%s%s" % (name, pregion.to_safestr(r), ext)) out_parts.append((r, work_bams, out_region_file)) return out_file, out_parts else: return None, [] return _do_work
[ "def", "_split_by_ready_regions", "(", "ext", ",", "file_key", ",", "dir_ext_fn", ")", ":", "def", "_sort_by_size", "(", "region_w_bams", ")", ":", "region", ",", "_", "=", "region_w_bams", "_", ",", "start", ",", "end", "=", "region", "return", "end", "-"...
Organize splits based on regions generated by parallel_prep_region. Sort splits so largest regions analyzed first, avoiding potentially lagging runs at end.
[ "Organize", "splits", "based", "on", "regions", "generated", "by", "parallel_prep_region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L116-L154
237,645
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_collapse_by_bam_variantcaller
def _collapse_by_bam_variantcaller(samples): """Collapse regions to a single representative by BAM input, variant caller and batch. """ by_bam = collections.OrderedDict() for data in (x[0] for x in samples): work_bam = utils.get_in(data, ("combine", "work_bam", "out"), data.get("align_bam")) variantcaller = get_variantcaller(data) if isinstance(work_bam, list): work_bam = tuple(work_bam) key = (multi.get_batch_for_key(data), work_bam, variantcaller) try: by_bam[key].append(data) except KeyError: by_bam[key] = [data] out = [] for grouped_data in by_bam.values(): cur = grouped_data[0] cur.pop("region", None) region_bams = cur.pop("region_bams", None) if region_bams and len(region_bams[0]) > 1: cur.pop("work_bam", None) out.append([cur]) return out
python
def _collapse_by_bam_variantcaller(samples): by_bam = collections.OrderedDict() for data in (x[0] for x in samples): work_bam = utils.get_in(data, ("combine", "work_bam", "out"), data.get("align_bam")) variantcaller = get_variantcaller(data) if isinstance(work_bam, list): work_bam = tuple(work_bam) key = (multi.get_batch_for_key(data), work_bam, variantcaller) try: by_bam[key].append(data) except KeyError: by_bam[key] = [data] out = [] for grouped_data in by_bam.values(): cur = grouped_data[0] cur.pop("region", None) region_bams = cur.pop("region_bams", None) if region_bams and len(region_bams[0]) > 1: cur.pop("work_bam", None) out.append([cur]) return out
[ "def", "_collapse_by_bam_variantcaller", "(", "samples", ")", ":", "by_bam", "=", "collections", ".", "OrderedDict", "(", ")", "for", "data", "in", "(", "x", "[", "0", "]", "for", "x", "in", "samples", ")", ":", "work_bam", "=", "utils", ".", "get_in", ...
Collapse regions to a single representative by BAM input, variant caller and batch.
[ "Collapse", "regions", "to", "a", "single", "representative", "by", "BAM", "input", "variant", "caller", "and", "batch", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L156-L178
237,646
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_dup_samples_by_variantcaller
def _dup_samples_by_variantcaller(samples, require_bam=True): """Prepare samples by variant callers, duplicating any with multiple callers. """ samples = [utils.to_single_data(x) for x in samples] samples = germline.split_somatic(samples) to_process = [] extras = [] for data in samples: added = False for i, add in enumerate(handle_multiple_callers(data, "variantcaller", require_bam=require_bam)): added = True add = dd.set_variantcaller_order(add, i) to_process.append([add]) if not added: data = _handle_precalled(data) data = dd.set_variantcaller_order(data, 0) extras.append([data]) return to_process, extras
python
def _dup_samples_by_variantcaller(samples, require_bam=True): samples = [utils.to_single_data(x) for x in samples] samples = germline.split_somatic(samples) to_process = [] extras = [] for data in samples: added = False for i, add in enumerate(handle_multiple_callers(data, "variantcaller", require_bam=require_bam)): added = True add = dd.set_variantcaller_order(add, i) to_process.append([add]) if not added: data = _handle_precalled(data) data = dd.set_variantcaller_order(data, 0) extras.append([data]) return to_process, extras
[ "def", "_dup_samples_by_variantcaller", "(", "samples", ",", "require_bam", "=", "True", ")", ":", "samples", "=", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "samples", "]", "samples", "=", "germline", ".", "split_somatic", "(", ...
Prepare samples by variant callers, duplicating any with multiple callers.
[ "Prepare", "samples", "by", "variant", "callers", "duplicating", "any", "with", "multiple", "callers", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L180-L197
237,647
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
parallel_variantcall_region
def parallel_variantcall_region(samples, run_parallel): """Perform variant calling and post-analysis on samples by region. """ to_process, extras = _dup_samples_by_variantcaller(samples) split_fn = _split_by_ready_regions(".vcf.gz", "work_bam", get_variantcaller) samples = _collapse_by_bam_variantcaller( grouped_parallel_split_combine(to_process, split_fn, multi.group_batches, run_parallel, "variantcall_sample", "concat_variant_files", "vrn_file", ["region", "sam_ref", "config"])) return extras + samples
python
def parallel_variantcall_region(samples, run_parallel): to_process, extras = _dup_samples_by_variantcaller(samples) split_fn = _split_by_ready_regions(".vcf.gz", "work_bam", get_variantcaller) samples = _collapse_by_bam_variantcaller( grouped_parallel_split_combine(to_process, split_fn, multi.group_batches, run_parallel, "variantcall_sample", "concat_variant_files", "vrn_file", ["region", "sam_ref", "config"])) return extras + samples
[ "def", "parallel_variantcall_region", "(", "samples", ",", "run_parallel", ")", ":", "to_process", ",", "extras", "=", "_dup_samples_by_variantcaller", "(", "samples", ")", "split_fn", "=", "_split_by_ready_regions", "(", "\".vcf.gz\"", ",", "\"work_bam\"", ",", "get_...
Perform variant calling and post-analysis on samples by region.
[ "Perform", "variant", "calling", "and", "post", "-", "analysis", "on", "samples", "by", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L199-L209
237,648
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
vc_output_record
def vc_output_record(samples): """Prepare output record from variant calling to feed into downstream analysis. Prep work handles reformatting so we return generated dictionaries. For any shared keys that are calculated only once for a batch, like variant calls for the batch, we assign to every sample. """ shared_keys = [["vrn_file"], ["validate", "summary"], ["validate", "tp"], ["validate", "fp"], ["validate", "fn"]] raw = cwlutils.samples_to_records([utils.to_single_data(x) for x in samples]) shared = {} for key in shared_keys: cur = list(set([x for x in [tz.get_in(key, d) for d in raw] if x])) if len(cur) > 0: assert len(cur) == 1, (key, cur) shared[tuple(key)] = cur[0] else: shared[tuple(key)] = None out = [] for d in raw: for key, val in shared.items(): d = tz.update_in(d, key, lambda x: val) out.append([d]) return out
python
def vc_output_record(samples): shared_keys = [["vrn_file"], ["validate", "summary"], ["validate", "tp"], ["validate", "fp"], ["validate", "fn"]] raw = cwlutils.samples_to_records([utils.to_single_data(x) for x in samples]) shared = {} for key in shared_keys: cur = list(set([x for x in [tz.get_in(key, d) for d in raw] if x])) if len(cur) > 0: assert len(cur) == 1, (key, cur) shared[tuple(key)] = cur[0] else: shared[tuple(key)] = None out = [] for d in raw: for key, val in shared.items(): d = tz.update_in(d, key, lambda x: val) out.append([d]) return out
[ "def", "vc_output_record", "(", "samples", ")", ":", "shared_keys", "=", "[", "[", "\"vrn_file\"", "]", ",", "[", "\"validate\"", ",", "\"summary\"", "]", ",", "[", "\"validate\"", ",", "\"tp\"", "]", ",", "[", "\"validate\"", ",", "\"fp\"", "]", ",", "[...
Prepare output record from variant calling to feed into downstream analysis. Prep work handles reformatting so we return generated dictionaries. For any shared keys that are calculated only once for a batch, like variant calls for the batch, we assign to every sample.
[ "Prepare", "output", "record", "from", "variant", "calling", "to", "feed", "into", "downstream", "analysis", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L212-L236
237,649
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
batch_for_variantcall
def batch_for_variantcall(samples): """Prepare a set of samples for parallel variant calling. CWL input target that groups samples into batches and variant callers for parallel processing. If doing joint calling, with `tools_on: [gvcf]`, split the sample into individuals instead of combining into a batch. """ sample_order = [dd.get_sample_name(utils.to_single_data(x)) for x in samples] to_process, extras = _dup_samples_by_variantcaller(samples, require_bam=False) batch_groups = collections.defaultdict(list) to_process = [utils.to_single_data(x) for x in to_process] for data in cwlutils.samples_to_records(to_process): vc = get_variantcaller(data, require_bam=False) batches = dd.get_batches(data) or dd.get_sample_name(data) if not isinstance(batches, (list, tuple)): batches = [batches] for b in batches: batch_groups[(b, vc)].append(utils.deepish_copy(data)) batches = [] for cur_group in batch_groups.values(): joint_calling = any([is_joint(d) for d in cur_group]) if joint_calling: for d in cur_group: batches.append([d]) else: batches.append(cur_group) def by_original_order(xs): return (min([sample_order.index(dd.get_sample_name(x)) for x in xs]), min([dd.get_variantcaller_order(x) for x in xs])) return sorted(batches + extras, key=by_original_order)
python
def batch_for_variantcall(samples): sample_order = [dd.get_sample_name(utils.to_single_data(x)) for x in samples] to_process, extras = _dup_samples_by_variantcaller(samples, require_bam=False) batch_groups = collections.defaultdict(list) to_process = [utils.to_single_data(x) for x in to_process] for data in cwlutils.samples_to_records(to_process): vc = get_variantcaller(data, require_bam=False) batches = dd.get_batches(data) or dd.get_sample_name(data) if not isinstance(batches, (list, tuple)): batches = [batches] for b in batches: batch_groups[(b, vc)].append(utils.deepish_copy(data)) batches = [] for cur_group in batch_groups.values(): joint_calling = any([is_joint(d) for d in cur_group]) if joint_calling: for d in cur_group: batches.append([d]) else: batches.append(cur_group) def by_original_order(xs): return (min([sample_order.index(dd.get_sample_name(x)) for x in xs]), min([dd.get_variantcaller_order(x) for x in xs])) return sorted(batches + extras, key=by_original_order)
[ "def", "batch_for_variantcall", "(", "samples", ")", ":", "sample_order", "=", "[", "dd", ".", "get_sample_name", "(", "utils", ".", "to_single_data", "(", "x", ")", ")", "for", "x", "in", "samples", "]", "to_process", ",", "extras", "=", "_dup_samples_by_va...
Prepare a set of samples for parallel variant calling. CWL input target that groups samples into batches and variant callers for parallel processing. If doing joint calling, with `tools_on: [gvcf]`, split the sample into individuals instead of combining into a batch.
[ "Prepare", "a", "set", "of", "samples", "for", "parallel", "variant", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L241-L272
237,650
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_handle_precalled
def _handle_precalled(data): """Copy in external pre-called variants fed into analysis. Symlinks for non-CWL runs where we want to ensure VCF present in a local directory. """ if data.get("vrn_file") and not cwlutils.is_cwl_run(data): vrn_file = data["vrn_file"] if isinstance(vrn_file, (list, tuple)): assert len(vrn_file) == 1 vrn_file = vrn_file[0] precalled_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "precalled")) ext = utils.splitext_plus(vrn_file)[-1] orig_file = os.path.abspath(vrn_file) our_vrn_file = os.path.join(precalled_dir, "%s-precalled%s" % (dd.get_sample_name(data), ext)) utils.copy_plus(orig_file, our_vrn_file) data["vrn_file"] = our_vrn_file return data
python
def _handle_precalled(data): if data.get("vrn_file") and not cwlutils.is_cwl_run(data): vrn_file = data["vrn_file"] if isinstance(vrn_file, (list, tuple)): assert len(vrn_file) == 1 vrn_file = vrn_file[0] precalled_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "precalled")) ext = utils.splitext_plus(vrn_file)[-1] orig_file = os.path.abspath(vrn_file) our_vrn_file = os.path.join(precalled_dir, "%s-precalled%s" % (dd.get_sample_name(data), ext)) utils.copy_plus(orig_file, our_vrn_file) data["vrn_file"] = our_vrn_file return data
[ "def", "_handle_precalled", "(", "data", ")", ":", "if", "data", ".", "get", "(", "\"vrn_file\"", ")", "and", "not", "cwlutils", ".", "is_cwl_run", "(", "data", ")", ":", "vrn_file", "=", "data", "[", "\"vrn_file\"", "]", "if", "isinstance", "(", "vrn_fi...
Copy in external pre-called variants fed into analysis. Symlinks for non-CWL runs where we want to ensure VCF present in a local directory.
[ "Copy", "in", "external", "pre", "-", "called", "variants", "fed", "into", "analysis", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L274-L291
237,651
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
handle_multiple_callers
def handle_multiple_callers(data, key, default=None, require_bam=True): """Split samples that potentially require multiple variant calling approaches. """ callers = get_variantcaller(data, key, default, require_bam=require_bam) if isinstance(callers, six.string_types): return [data] elif not callers: return [] else: out = [] for caller in callers: base = copy.deepcopy(data) if not base["config"]["algorithm"].get("orig_%s" % key): base["config"]["algorithm"]["orig_%s" % key] = \ base["config"]["algorithm"][key] base["config"]["algorithm"][key] = caller # if splitting by variant caller, also split by jointcaller if key == "variantcaller": jcallers = get_variantcaller(data, "jointcaller", []) if isinstance(jcallers, six.string_types): jcallers = [jcallers] if jcallers: base["config"]["algorithm"]["orig_jointcaller"] = jcallers jcallers = [x for x in jcallers if x.startswith(caller)] if jcallers: base["config"]["algorithm"]["jointcaller"] = jcallers[0] else: base["config"]["algorithm"]["jointcaller"] = False out.append(base) return out
python
def handle_multiple_callers(data, key, default=None, require_bam=True): callers = get_variantcaller(data, key, default, require_bam=require_bam) if isinstance(callers, six.string_types): return [data] elif not callers: return [] else: out = [] for caller in callers: base = copy.deepcopy(data) if not base["config"]["algorithm"].get("orig_%s" % key): base["config"]["algorithm"]["orig_%s" % key] = \ base["config"]["algorithm"][key] base["config"]["algorithm"][key] = caller # if splitting by variant caller, also split by jointcaller if key == "variantcaller": jcallers = get_variantcaller(data, "jointcaller", []) if isinstance(jcallers, six.string_types): jcallers = [jcallers] if jcallers: base["config"]["algorithm"]["orig_jointcaller"] = jcallers jcallers = [x for x in jcallers if x.startswith(caller)] if jcallers: base["config"]["algorithm"]["jointcaller"] = jcallers[0] else: base["config"]["algorithm"]["jointcaller"] = False out.append(base) return out
[ "def", "handle_multiple_callers", "(", "data", ",", "key", ",", "default", "=", "None", ",", "require_bam", "=", "True", ")", ":", "callers", "=", "get_variantcaller", "(", "data", ",", "key", ",", "default", ",", "require_bam", "=", "require_bam", ")", "i...
Split samples that potentially require multiple variant calling approaches.
[ "Split", "samples", "that", "potentially", "require", "multiple", "variant", "calling", "approaches", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L293-L322
237,652
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
variantcall_sample
def variantcall_sample(data, region=None, align_bams=None, out_file=None): """Parallel entry point for doing genotyping of a region of a sample. """ if out_file is None or not os.path.exists(out_file) or not os.path.lexists(out_file): utils.safe_makedir(os.path.dirname(out_file)) ref_file = dd.get_ref_file(data) config = data["config"] caller_fns = get_variantcallers() caller_fn = caller_fns[config["algorithm"].get("variantcaller")] if len(align_bams) == 1: items = [data] else: items = multi.get_orig_items(data) assert len(items) == len(align_bams) assoc_files = tz.get_in(("genome_resources", "variation"), data, {}) if not assoc_files: assoc_files = {} for bam_file in align_bams: bam.index(bam_file, data["config"], check_timestamp=False) out_file = caller_fn(align_bams, items, ref_file, assoc_files, region, out_file) if region: data["region"] = region data["vrn_file"] = out_file return [data]
python
def variantcall_sample(data, region=None, align_bams=None, out_file=None): if out_file is None or not os.path.exists(out_file) or not os.path.lexists(out_file): utils.safe_makedir(os.path.dirname(out_file)) ref_file = dd.get_ref_file(data) config = data["config"] caller_fns = get_variantcallers() caller_fn = caller_fns[config["algorithm"].get("variantcaller")] if len(align_bams) == 1: items = [data] else: items = multi.get_orig_items(data) assert len(items) == len(align_bams) assoc_files = tz.get_in(("genome_resources", "variation"), data, {}) if not assoc_files: assoc_files = {} for bam_file in align_bams: bam.index(bam_file, data["config"], check_timestamp=False) out_file = caller_fn(align_bams, items, ref_file, assoc_files, region, out_file) if region: data["region"] = region data["vrn_file"] = out_file return [data]
[ "def", "variantcall_sample", "(", "data", ",", "region", "=", "None", ",", "align_bams", "=", "None", ",", "out_file", "=", "None", ")", ":", "if", "out_file", "is", "None", "or", "not", "os", ".", "path", ".", "exists", "(", "out_file", ")", "or", "...
Parallel entry point for doing genotyping of a region of a sample.
[ "Parallel", "entry", "point", "for", "doing", "genotyping", "of", "a", "region", "of", "a", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L359-L381
237,653
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_get_batch_name
def _get_batch_name(items, skip_jointcheck=False): """Retrieve the shared batch name for a group of items. """ batch_names = collections.defaultdict(int) has_joint = any([is_joint(d) for d in items]) for data in items: if has_joint and not skip_jointcheck: batches = dd.get_sample_name(data) else: batches = dd.get_batches(data) or dd.get_sample_name(data) if not isinstance(batches, (list, tuple)): batches = [batches] for b in batches: batch_names[b] += 1 return sorted(batch_names.items(), key=lambda x: x[-1], reverse=True)[0][0]
python
def _get_batch_name(items, skip_jointcheck=False): batch_names = collections.defaultdict(int) has_joint = any([is_joint(d) for d in items]) for data in items: if has_joint and not skip_jointcheck: batches = dd.get_sample_name(data) else: batches = dd.get_batches(data) or dd.get_sample_name(data) if not isinstance(batches, (list, tuple)): batches = [batches] for b in batches: batch_names[b] += 1 return sorted(batch_names.items(), key=lambda x: x[-1], reverse=True)[0][0]
[ "def", "_get_batch_name", "(", "items", ",", "skip_jointcheck", "=", "False", ")", ":", "batch_names", "=", "collections", ".", "defaultdict", "(", "int", ")", "has_joint", "=", "any", "(", "[", "is_joint", "(", "d", ")", "for", "d", "in", "items", "]", ...
Retrieve the shared batch name for a group of items.
[ "Retrieve", "the", "shared", "batch", "name", "for", "a", "group", "of", "items", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L410-L424
237,654
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_run_variantcall_batch_multicore
def _run_variantcall_batch_multicore(items, regions, final_file): """Run variant calling on a batch of items using multiple cores. """ batch_name = _get_batch_name(items) variantcaller = _get_batch_variantcaller(items) work_bams = [dd.get_work_bam(d) or dd.get_align_bam(d) for d in items] def split_fn(data): out = [] for region in regions: region = _region_to_coords(region) chrom, start, end = region region_str = "_".join(str(x) for x in region) out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, chrom, "%s-%s.vcf.gz" % (batch_name, region_str)) out.append((region, work_bams, out_file)) return final_file, out parallel = {"type": "local", "num_jobs": dd.get_num_cores(items[0]), "cores_per_job": 1} run_parallel = dmulti.runner(parallel, items[0]["config"]) to_run = copy.deepcopy(items[0]) to_run["sam_ref"] = dd.get_ref_file(to_run) to_run["group_orig"] = items parallel_split_combine([[to_run]], split_fn, run_parallel, "variantcall_sample", "concat_variant_files", "vrn_file", ["region", "sam_ref", "config"]) return final_file
python
def _run_variantcall_batch_multicore(items, regions, final_file): batch_name = _get_batch_name(items) variantcaller = _get_batch_variantcaller(items) work_bams = [dd.get_work_bam(d) or dd.get_align_bam(d) for d in items] def split_fn(data): out = [] for region in regions: region = _region_to_coords(region) chrom, start, end = region region_str = "_".join(str(x) for x in region) out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, chrom, "%s-%s.vcf.gz" % (batch_name, region_str)) out.append((region, work_bams, out_file)) return final_file, out parallel = {"type": "local", "num_jobs": dd.get_num_cores(items[0]), "cores_per_job": 1} run_parallel = dmulti.runner(parallel, items[0]["config"]) to_run = copy.deepcopy(items[0]) to_run["sam_ref"] = dd.get_ref_file(to_run) to_run["group_orig"] = items parallel_split_combine([[to_run]], split_fn, run_parallel, "variantcall_sample", "concat_variant_files", "vrn_file", ["region", "sam_ref", "config"]) return final_file
[ "def", "_run_variantcall_batch_multicore", "(", "items", ",", "regions", ",", "final_file", ")", ":", "batch_name", "=", "_get_batch_name", "(", "items", ")", "variantcaller", "=", "_get_batch_variantcaller", "(", "items", ")", "work_bams", "=", "[", "dd", ".", ...
Run variant calling on a batch of items using multiple cores.
[ "Run", "variant", "calling", "on", "a", "batch", "of", "items", "using", "multiple", "cores", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L462-L486
237,655
bcbio/bcbio-nextgen
bcbio/distributed/ipython.py
create
def create(parallel, dirs, config): """Create a cluster based on the provided parallel arguments. Returns an IPython view on the cluster, enabling processing on jobs. Adds a mincores specification if he have machines with a larger number of cores to allow jobs to be batched together for shared memory usage. """ profile_dir = utils.safe_makedir(os.path.join(dirs["work"], get_log_dir(config), "ipython")) has_mincores = any(x.startswith("mincores=") for x in parallel["resources"]) cores = min(_get_common_cores(config["resources"]), parallel["system_cores"]) if cores > 1 and not has_mincores: adj_cores = max(1, int(math.floor(cores * float(parallel.get("mem_pct", 1.0))))) # if we have less scheduled cores than per machine, use the scheduled count if cores > parallel["cores"]: cores = parallel["cores"] # if we have less total cores required for the entire process, use that elif adj_cores > parallel["num_jobs"] * parallel["cores_per_job"]: cores = parallel["num_jobs"] * parallel["cores_per_job"] else: cores = adj_cores cores = per_machine_target_cores(cores, parallel["num_jobs"]) parallel["resources"].append("mincores=%s" % cores) return ipython_cluster.cluster_view(parallel["scheduler"].lower(), parallel["queue"], parallel["num_jobs"], parallel["cores_per_job"], profile=profile_dir, start_wait=parallel["timeout"], extra_params={"resources": parallel["resources"], "mem": parallel["mem"], "tag": parallel.get("tag"), "run_local": parallel.get("run_local"), "local_controller": parallel.get("local_controller")}, retries=parallel.get("retries"))
python
def create(parallel, dirs, config): profile_dir = utils.safe_makedir(os.path.join(dirs["work"], get_log_dir(config), "ipython")) has_mincores = any(x.startswith("mincores=") for x in parallel["resources"]) cores = min(_get_common_cores(config["resources"]), parallel["system_cores"]) if cores > 1 and not has_mincores: adj_cores = max(1, int(math.floor(cores * float(parallel.get("mem_pct", 1.0))))) # if we have less scheduled cores than per machine, use the scheduled count if cores > parallel["cores"]: cores = parallel["cores"] # if we have less total cores required for the entire process, use that elif adj_cores > parallel["num_jobs"] * parallel["cores_per_job"]: cores = parallel["num_jobs"] * parallel["cores_per_job"] else: cores = adj_cores cores = per_machine_target_cores(cores, parallel["num_jobs"]) parallel["resources"].append("mincores=%s" % cores) return ipython_cluster.cluster_view(parallel["scheduler"].lower(), parallel["queue"], parallel["num_jobs"], parallel["cores_per_job"], profile=profile_dir, start_wait=parallel["timeout"], extra_params={"resources": parallel["resources"], "mem": parallel["mem"], "tag": parallel.get("tag"), "run_local": parallel.get("run_local"), "local_controller": parallel.get("local_controller")}, retries=parallel.get("retries"))
[ "def", "create", "(", "parallel", ",", "dirs", ",", "config", ")", ":", "profile_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dirs", "[", "\"work\"", "]", ",", "get_log_dir", "(", "config", ")", ",", "\"ipython\""...
Create a cluster based on the provided parallel arguments. Returns an IPython view on the cluster, enabling processing on jobs. Adds a mincores specification if he have machines with a larger number of cores to allow jobs to be batched together for shared memory usage.
[ "Create", "a", "cluster", "based", "on", "the", "provided", "parallel", "arguments", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipython.py#L33-L65
237,656
bcbio/bcbio-nextgen
bcbio/distributed/ipython.py
per_machine_target_cores
def per_machine_target_cores(cores, num_jobs): """Select target cores on larger machines to leave room for batch script and controller. On resource constrained environments, we want to pack all bcbio submissions onto a specific number of machines. This gives up some cores to enable sharing cores with the controller and batch script on larger machines. """ if cores >= 32 and num_jobs == 1: cores = cores - 2 elif cores >= 16 and num_jobs in [1, 2]: cores = cores - 1 return cores
python
def per_machine_target_cores(cores, num_jobs): if cores >= 32 and num_jobs == 1: cores = cores - 2 elif cores >= 16 and num_jobs in [1, 2]: cores = cores - 1 return cores
[ "def", "per_machine_target_cores", "(", "cores", ",", "num_jobs", ")", ":", "if", "cores", ">=", "32", "and", "num_jobs", "==", "1", ":", "cores", "=", "cores", "-", "2", "elif", "cores", ">=", "16", "and", "num_jobs", "in", "[", "1", ",", "2", "]", ...
Select target cores on larger machines to leave room for batch script and controller. On resource constrained environments, we want to pack all bcbio submissions onto a specific number of machines. This gives up some cores to enable sharing cores with the controller and batch script on larger machines.
[ "Select", "target", "cores", "on", "larger", "machines", "to", "leave", "room", "for", "batch", "script", "and", "controller", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipython.py#L67-L78
237,657
bcbio/bcbio-nextgen
bcbio/distributed/ipython.py
_get_common_cores
def _get_common_cores(resources): """Retrieve the most common configured number of cores in the input file. """ all_cores = [] for vs in resources.values(): cores = vs.get("cores") if cores: all_cores.append(int(vs["cores"])) return collections.Counter(all_cores).most_common(1)[0][0]
python
def _get_common_cores(resources): all_cores = [] for vs in resources.values(): cores = vs.get("cores") if cores: all_cores.append(int(vs["cores"])) return collections.Counter(all_cores).most_common(1)[0][0]
[ "def", "_get_common_cores", "(", "resources", ")", ":", "all_cores", "=", "[", "]", "for", "vs", "in", "resources", ".", "values", "(", ")", ":", "cores", "=", "vs", ".", "get", "(", "\"cores\"", ")", "if", "cores", ":", "all_cores", ".", "append", "...
Retrieve the most common configured number of cores in the input file.
[ "Retrieve", "the", "most", "common", "configured", "number", "of", "cores", "in", "the", "input", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipython.py#L80-L88
237,658
bcbio/bcbio-nextgen
bcbio/distributed/ipython.py
zip_args
def zip_args(args, config=None): """Compress arguments using msgpack. """ if msgpack: return [msgpack.packb(x, use_single_float=True, use_bin_type=True) for x in args] else: return args
python
def zip_args(args, config=None): if msgpack: return [msgpack.packb(x, use_single_float=True, use_bin_type=True) for x in args] else: return args
[ "def", "zip_args", "(", "args", ",", "config", "=", "None", ")", ":", "if", "msgpack", ":", "return", "[", "msgpack", ".", "packb", "(", "x", ",", "use_single_float", "=", "True", ",", "use_bin_type", "=", "True", ")", "for", "x", "in", "args", "]", ...
Compress arguments using msgpack.
[ "Compress", "arguments", "using", "msgpack", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipython.py#L103-L109
237,659
bcbio/bcbio-nextgen
bcbio/distributed/ipython.py
runner
def runner(view, parallel, dirs, config): """Run a task on an ipython parallel cluster, allowing alternative queue types. view provides map-style access to an existing Ipython cluster. """ def run(fn_name, items): setpath.prepend_bcbiopath() out = [] fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else (_get_ipython_fn(fn_name, parallel), fn_name) items = [x for x in items if x is not None] items = diagnostics.track_parallel(items, fn_name) logger.info("ipython: %s" % fn_name) if len(items) > 0: items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"], parallel) for x in items] if "wrapper" in parallel: wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources"])} items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items] items = zip_args([args for args in items]) for data in view.map_sync(fn, items, track=False): if data: out.extend(unzip_args(data)) return out return run
python
def runner(view, parallel, dirs, config): def run(fn_name, items): setpath.prepend_bcbiopath() out = [] fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else (_get_ipython_fn(fn_name, parallel), fn_name) items = [x for x in items if x is not None] items = diagnostics.track_parallel(items, fn_name) logger.info("ipython: %s" % fn_name) if len(items) > 0: items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"], parallel) for x in items] if "wrapper" in parallel: wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources"])} items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items] items = zip_args([args for args in items]) for data in view.map_sync(fn, items, track=False): if data: out.extend(unzip_args(data)) return out return run
[ "def", "runner", "(", "view", ",", "parallel", ",", "dirs", ",", "config", ")", ":", "def", "run", "(", "fn_name", ",", "items", ")", ":", "setpath", ".", "prepend_bcbiopath", "(", ")", "out", "=", "[", "]", "fn", ",", "fn_name", "=", "(", "fn_name...
Run a task on an ipython parallel cluster, allowing alternative queue types. view provides map-style access to an existing Ipython cluster.
[ "Run", "a", "task", "on", "an", "ipython", "parallel", "cluster", "allowing", "alternative", "queue", "types", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipython.py#L119-L141
237,660
bcbio/bcbio-nextgen
bcbio/chipseq/peaks.py
peakcall_prepare
def peakcall_prepare(data, run_parallel): """Entry point for doing peak calling""" caller_fns = get_callers() to_process = [] for sample in data: mimic = copy.copy(sample[0]) callers = dd.get_peakcaller(sample[0]) if not isinstance(callers, list): callers = [callers] for caller in callers: if caller in caller_fns: mimic["peak_fn"] = caller name = dd.get_sample_name(mimic) mimic = _check(mimic, data) if mimic: to_process.append(mimic) else: logger.info("Skipping peak calling. No input sample for %s" % name) if to_process: after_process = run_parallel("peakcalling", to_process) data = _sync(data, after_process) return data
python
def peakcall_prepare(data, run_parallel): caller_fns = get_callers() to_process = [] for sample in data: mimic = copy.copy(sample[0]) callers = dd.get_peakcaller(sample[0]) if not isinstance(callers, list): callers = [callers] for caller in callers: if caller in caller_fns: mimic["peak_fn"] = caller name = dd.get_sample_name(mimic) mimic = _check(mimic, data) if mimic: to_process.append(mimic) else: logger.info("Skipping peak calling. No input sample for %s" % name) if to_process: after_process = run_parallel("peakcalling", to_process) data = _sync(data, after_process) return data
[ "def", "peakcall_prepare", "(", "data", ",", "run_parallel", ")", ":", "caller_fns", "=", "get_callers", "(", ")", "to_process", "=", "[", "]", "for", "sample", "in", "data", ":", "mimic", "=", "copy", ".", "copy", "(", "sample", "[", "0", "]", ")", ...
Entry point for doing peak calling
[ "Entry", "point", "for", "doing", "peak", "calling" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/peaks.py#L22-L43
237,661
bcbio/bcbio-nextgen
bcbio/chipseq/peaks.py
calling
def calling(data): """Main function to parallelize peak calling.""" chip_bam = data.get("work_bam") input_bam = data.get("work_bam_input", None) caller_fn = get_callers()[data["peak_fn"]] name = dd.get_sample_name(data) out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), data["peak_fn"], name)) out_files = caller_fn(name, chip_bam, input_bam, dd.get_genome_build(data), out_dir, dd.get_chip_method(data), data["resources"], data) greylistdir = greylisting(data) data.update({"peaks_files": out_files}) # data["input_bam_filter"] = input_bam if greylistdir: data["greylist"] = greylistdir return [[data]]
python
def calling(data): chip_bam = data.get("work_bam") input_bam = data.get("work_bam_input", None) caller_fn = get_callers()[data["peak_fn"]] name = dd.get_sample_name(data) out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), data["peak_fn"], name)) out_files = caller_fn(name, chip_bam, input_bam, dd.get_genome_build(data), out_dir, dd.get_chip_method(data), data["resources"], data) greylistdir = greylisting(data) data.update({"peaks_files": out_files}) # data["input_bam_filter"] = input_bam if greylistdir: data["greylist"] = greylistdir return [[data]]
[ "def", "calling", "(", "data", ")", ":", "chip_bam", "=", "data", ".", "get", "(", "\"work_bam\"", ")", "input_bam", "=", "data", ".", "get", "(", "\"work_bam_input\"", ",", "None", ")", "caller_fn", "=", "get_callers", "(", ")", "[", "data", "[", "\"p...
Main function to parallelize peak calling.
[ "Main", "function", "to", "parallelize", "peak", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/peaks.py#L45-L59
237,662
bcbio/bcbio-nextgen
bcbio/chipseq/peaks.py
_sync
def _sync(original, processed): """ Add output to data if run sucessfully. For now only macs2 is available, so no need to consider multiple callers. """ for original_sample in original: original_sample[0]["peaks_files"] = {} for process_sample in processed: if dd.get_sample_name(original_sample[0]) == dd.get_sample_name(process_sample[0]): for key in ["peaks_files"]: if process_sample[0].get(key): original_sample[0][key] = process_sample[0][key] return original
python
def _sync(original, processed): for original_sample in original: original_sample[0]["peaks_files"] = {} for process_sample in processed: if dd.get_sample_name(original_sample[0]) == dd.get_sample_name(process_sample[0]): for key in ["peaks_files"]: if process_sample[0].get(key): original_sample[0][key] = process_sample[0][key] return original
[ "def", "_sync", "(", "original", ",", "processed", ")", ":", "for", "original_sample", "in", "original", ":", "original_sample", "[", "0", "]", "[", "\"peaks_files\"", "]", "=", "{", "}", "for", "process_sample", "in", "processed", ":", "if", "dd", ".", ...
Add output to data if run sucessfully. For now only macs2 is available, so no need to consider multiple callers.
[ "Add", "output", "to", "data", "if", "run", "sucessfully", ".", "For", "now", "only", "macs2", "is", "available", "so", "no", "need", "to", "consider", "multiple", "callers", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/peaks.py#L61-L74
237,663
bcbio/bcbio-nextgen
bcbio/chipseq/peaks.py
_check
def _check(sample, data): """Get input sample for each chip bam file.""" if dd.get_chip_method(sample).lower() == "atac": return [sample] if dd.get_phenotype(sample) == "input": return None for origin in data: if dd.get_batch(sample) in (dd.get_batches(origin[0]) or []) and dd.get_phenotype(origin[0]) == "input": sample["work_bam_input"] = origin[0].get("work_bam") return [sample] return [sample]
python
def _check(sample, data): if dd.get_chip_method(sample).lower() == "atac": return [sample] if dd.get_phenotype(sample) == "input": return None for origin in data: if dd.get_batch(sample) in (dd.get_batches(origin[0]) or []) and dd.get_phenotype(origin[0]) == "input": sample["work_bam_input"] = origin[0].get("work_bam") return [sample] return [sample]
[ "def", "_check", "(", "sample", ",", "data", ")", ":", "if", "dd", ".", "get_chip_method", "(", "sample", ")", ".", "lower", "(", ")", "==", "\"atac\"", ":", "return", "[", "sample", "]", "if", "dd", ".", "get_phenotype", "(", "sample", ")", "==", ...
Get input sample for each chip bam file.
[ "Get", "input", "sample", "for", "each", "chip", "bam", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/peaks.py#L76-L86
237,664
bcbio/bcbio-nextgen
bcbio/chipseq/peaks.py
_get_multiplier
def _get_multiplier(samples): """Get multiplier to get jobs only for samples that have input """ to_process = 1.0 to_skip = 0 for sample in samples: if dd.get_phenotype(sample[0]) == "chip": to_process += 1.0 elif dd.get_chip_method(sample[0]).lower() == "atac": to_process += 1.0 else: to_skip += 1.0 mult = (to_process - to_skip) / len(samples) if mult <= 0: mult = 1 / len(samples) return max(mult, 1)
python
def _get_multiplier(samples): to_process = 1.0 to_skip = 0 for sample in samples: if dd.get_phenotype(sample[0]) == "chip": to_process += 1.0 elif dd.get_chip_method(sample[0]).lower() == "atac": to_process += 1.0 else: to_skip += 1.0 mult = (to_process - to_skip) / len(samples) if mult <= 0: mult = 1 / len(samples) return max(mult, 1)
[ "def", "_get_multiplier", "(", "samples", ")", ":", "to_process", "=", "1.0", "to_skip", "=", "0", "for", "sample", "in", "samples", ":", "if", "dd", ".", "get_phenotype", "(", "sample", "[", "0", "]", ")", "==", "\"chip\"", ":", "to_process", "+=", "1...
Get multiplier to get jobs only for samples that have input
[ "Get", "multiplier", "to", "get", "jobs", "only", "for", "samples", "that", "have", "input" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/peaks.py#L88-L104
237,665
bcbio/bcbio-nextgen
bcbio/chipseq/peaks.py
greylisting
def greylisting(data): """ Run ChIP-seq greylisting """ input_bam = data.get("work_bam_input", None) if not input_bam: logger.info("No input BAM file detected, skipping greylisting.") return None try: greylister = config_utils.get_program("chipseq-greylist", data) except config_utils.CmdNotFound: logger.info("No greylister found, skipping greylisting.") return None greylistdir = os.path.join(os.path.dirname(input_bam), "greylist") if os.path.exists(greylistdir): return greylistdir cmd = "{greylister} --outdir {txgreylistdir} {input_bam}" message = "Running greylisting on %s." % input_bam with file_transaction(greylistdir) as txgreylistdir: utils.safe_makedir(txgreylistdir) try: do.run(cmd.format(**locals()), message) except subprocess.CalledProcessError as msg: if str(msg).find("Cannot take a larger sample than population when 'replace=False'") >= 0: logger.info("Skipping chipseq greylisting because of small sample size: %s" % dd.get_sample_name(data)) return None return greylistdir
python
def greylisting(data): input_bam = data.get("work_bam_input", None) if not input_bam: logger.info("No input BAM file detected, skipping greylisting.") return None try: greylister = config_utils.get_program("chipseq-greylist", data) except config_utils.CmdNotFound: logger.info("No greylister found, skipping greylisting.") return None greylistdir = os.path.join(os.path.dirname(input_bam), "greylist") if os.path.exists(greylistdir): return greylistdir cmd = "{greylister} --outdir {txgreylistdir} {input_bam}" message = "Running greylisting on %s." % input_bam with file_transaction(greylistdir) as txgreylistdir: utils.safe_makedir(txgreylistdir) try: do.run(cmd.format(**locals()), message) except subprocess.CalledProcessError as msg: if str(msg).find("Cannot take a larger sample than population when 'replace=False'") >= 0: logger.info("Skipping chipseq greylisting because of small sample size: %s" % dd.get_sample_name(data)) return None return greylistdir
[ "def", "greylisting", "(", "data", ")", ":", "input_bam", "=", "data", ".", "get", "(", "\"work_bam_input\"", ",", "None", ")", "if", "not", "input_bam", ":", "logger", ".", "info", "(", "\"No input BAM file detected, skipping greylisting.\"", ")", "return", "No...
Run ChIP-seq greylisting
[ "Run", "ChIP", "-", "seq", "greylisting" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/peaks.py#L106-L133
237,666
bcbio/bcbio-nextgen
bcbio/distributed/clargs.py
to_parallel
def to_parallel(args, module="bcbio.distributed"): """Convert input arguments into a parallel dictionary for passing to processing. """ ptype, cores = _get_cores_and_type(args.numcores, getattr(args, "paralleltype", None), args.scheduler) local_controller = getattr(args, "local_controller", False) parallel = {"type": ptype, "cores": cores, "scheduler": args.scheduler, "queue": args.queue, "tag": args.tag, "module": module, "resources": args.resources, "timeout": args.timeout, "retries": args.retries, "run_local": args.queue == "localrun", "local_controller": local_controller} return parallel
python
def to_parallel(args, module="bcbio.distributed"): ptype, cores = _get_cores_and_type(args.numcores, getattr(args, "paralleltype", None), args.scheduler) local_controller = getattr(args, "local_controller", False) parallel = {"type": ptype, "cores": cores, "scheduler": args.scheduler, "queue": args.queue, "tag": args.tag, "module": module, "resources": args.resources, "timeout": args.timeout, "retries": args.retries, "run_local": args.queue == "localrun", "local_controller": local_controller} return parallel
[ "def", "to_parallel", "(", "args", ",", "module", "=", "\"bcbio.distributed\"", ")", ":", "ptype", ",", "cores", "=", "_get_cores_and_type", "(", "args", ".", "numcores", ",", "getattr", "(", "args", ",", "\"paralleltype\"", ",", "None", ")", ",", "args", ...
Convert input arguments into a parallel dictionary for passing to processing.
[ "Convert", "input", "arguments", "into", "a", "parallel", "dictionary", "for", "passing", "to", "processing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/clargs.py#L4-L18
237,667
bcbio/bcbio-nextgen
bcbio/distributed/clargs.py
_get_cores_and_type
def _get_cores_and_type(numcores, paralleltype, scheduler): """Return core and parallelization approach from command line providing sane defaults. """ if scheduler is not None: paralleltype = "ipython" if paralleltype is None: paralleltype = "local" if not numcores or int(numcores) < 1: numcores = 1 return paralleltype, int(numcores)
python
def _get_cores_and_type(numcores, paralleltype, scheduler): if scheduler is not None: paralleltype = "ipython" if paralleltype is None: paralleltype = "local" if not numcores or int(numcores) < 1: numcores = 1 return paralleltype, int(numcores)
[ "def", "_get_cores_and_type", "(", "numcores", ",", "paralleltype", ",", "scheduler", ")", ":", "if", "scheduler", "is", "not", "None", ":", "paralleltype", "=", "\"ipython\"", "if", "paralleltype", "is", "None", ":", "paralleltype", "=", "\"local\"", "if", "n...
Return core and parallelization approach from command line providing sane defaults.
[ "Return", "core", "and", "parallelization", "approach", "from", "command", "line", "providing", "sane", "defaults", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/clargs.py#L20-L29
237,668
bcbio/bcbio-nextgen
bcbio/ngsalign/tophat.py
_fix_mates
def _fix_mates(orig_file, out_file, ref_file, config): """Fix problematic unmapped mate pairs in TopHat output. TopHat 2.0.9 appears to have issues with secondary reads: https://groups.google.com/forum/#!topic/tuxedo-tools-users/puLfDNbN9bo This cleans the input file to only keep properly mapped pairs, providing a general fix that will handle correctly mapped secondary reads as well. """ if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: samtools = config_utils.get_program("samtools", config) cmd = "{samtools} view -bS -h -t {ref_file}.fai -F 8 {orig_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Fix mate pairs in TopHat output", {}) return out_file
python
def _fix_mates(orig_file, out_file, ref_file, config): if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: samtools = config_utils.get_program("samtools", config) cmd = "{samtools} view -bS -h -t {ref_file}.fai -F 8 {orig_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Fix mate pairs in TopHat output", {}) return out_file
[ "def", "_fix_mates", "(", "orig_file", ",", "out_file", ",", "ref_file", ",", "config", ")", ":", "if", "not", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ")", "as", "tx_out_file", ":", "samtools", ...
Fix problematic unmapped mate pairs in TopHat output. TopHat 2.0.9 appears to have issues with secondary reads: https://groups.google.com/forum/#!topic/tuxedo-tools-users/puLfDNbN9bo This cleans the input file to only keep properly mapped pairs, providing a general fix that will handle correctly mapped secondary reads as well.
[ "Fix", "problematic", "unmapped", "mate", "pairs", "in", "TopHat", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/tophat.py#L173-L187
237,669
bcbio/bcbio-nextgen
bcbio/ngsalign/tophat.py
_add_rg
def _add_rg(unmapped_file, config, names): """Add the missing RG header.""" picard = broad.runner_from_path("picard", config) rg_fixed = picard.run_fn("picard_fix_rgs", unmapped_file, names) return rg_fixed
python
def _add_rg(unmapped_file, config, names): picard = broad.runner_from_path("picard", config) rg_fixed = picard.run_fn("picard_fix_rgs", unmapped_file, names) return rg_fixed
[ "def", "_add_rg", "(", "unmapped_file", ",", "config", ",", "names", ")", ":", "picard", "=", "broad", ".", "runner_from_path", "(", "\"picard\"", ",", "config", ")", "rg_fixed", "=", "picard", ".", "run_fn", "(", "\"picard_fix_rgs\"", ",", "unmapped_file", ...
Add the missing RG header.
[ "Add", "the", "missing", "RG", "header", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/tophat.py#L189-L193
237,670
bcbio/bcbio-nextgen
bcbio/ngsalign/tophat.py
_estimate_paired_innerdist
def _estimate_paired_innerdist(fastq_file, pair_file, ref_file, out_base, out_dir, data): """Use Bowtie to estimate the inner distance of paired reads. """ mean, stdev = _bowtie_for_innerdist("100000", fastq_file, pair_file, ref_file, out_base, out_dir, data, True) if not mean or not stdev: mean, stdev = _bowtie_for_innerdist("1", fastq_file, pair_file, ref_file, out_base, out_dir, data, True) # No reads aligning so no data to process, set some default values if not mean or not stdev: mean, stdev = 200, 50 return mean, stdev
python
def _estimate_paired_innerdist(fastq_file, pair_file, ref_file, out_base, out_dir, data): mean, stdev = _bowtie_for_innerdist("100000", fastq_file, pair_file, ref_file, out_base, out_dir, data, True) if not mean or not stdev: mean, stdev = _bowtie_for_innerdist("1", fastq_file, pair_file, ref_file, out_base, out_dir, data, True) # No reads aligning so no data to process, set some default values if not mean or not stdev: mean, stdev = 200, 50 return mean, stdev
[ "def", "_estimate_paired_innerdist", "(", "fastq_file", ",", "pair_file", ",", "ref_file", ",", "out_base", ",", "out_dir", ",", "data", ")", ":", "mean", ",", "stdev", "=", "_bowtie_for_innerdist", "(", "\"100000\"", ",", "fastq_file", ",", "pair_file", ",", ...
Use Bowtie to estimate the inner distance of paired reads.
[ "Use", "Bowtie", "to", "estimate", "the", "inner", "distance", "of", "paired", "reads", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/tophat.py#L230-L243
237,671
bcbio/bcbio-nextgen
bcbio/ngsalign/tophat.py
fix_insert_size
def fix_insert_size(in_bam, config): """ Tophat sets PI in the RG to be the inner distance size, but the SAM spec states should be the insert size. This fixes the RG in the alignment file generated by Tophat header to match the spec """ fixed_file = os.path.splitext(in_bam)[0] + ".pi_fixed.bam" if file_exists(fixed_file): return fixed_file header_file = os.path.splitext(in_bam)[0] + ".header.sam" read_length = bam.estimate_read_length(in_bam) bam_handle= bam.open_samfile(in_bam) header = bam_handle.header.copy() rg_dict = header['RG'][0] if 'PI' not in rg_dict: return in_bam PI = int(rg_dict.get('PI')) PI = PI + 2*read_length rg_dict['PI'] = PI header['RG'][0] = rg_dict with pysam.Samfile(header_file, "wb", header=header) as out_handle: with bam.open_samfile(in_bam) as in_handle: for record in in_handle: out_handle.write(record) shutil.move(header_file, fixed_file) return fixed_file
python
def fix_insert_size(in_bam, config): fixed_file = os.path.splitext(in_bam)[0] + ".pi_fixed.bam" if file_exists(fixed_file): return fixed_file header_file = os.path.splitext(in_bam)[0] + ".header.sam" read_length = bam.estimate_read_length(in_bam) bam_handle= bam.open_samfile(in_bam) header = bam_handle.header.copy() rg_dict = header['RG'][0] if 'PI' not in rg_dict: return in_bam PI = int(rg_dict.get('PI')) PI = PI + 2*read_length rg_dict['PI'] = PI header['RG'][0] = rg_dict with pysam.Samfile(header_file, "wb", header=header) as out_handle: with bam.open_samfile(in_bam) as in_handle: for record in in_handle: out_handle.write(record) shutil.move(header_file, fixed_file) return fixed_file
[ "def", "fix_insert_size", "(", "in_bam", ",", "config", ")", ":", "fixed_file", "=", "os", ".", "path", ".", "splitext", "(", "in_bam", ")", "[", "0", "]", "+", "\".pi_fixed.bam\"", "if", "file_exists", "(", "fixed_file", ")", ":", "return", "fixed_file", ...
Tophat sets PI in the RG to be the inner distance size, but the SAM spec states should be the insert size. This fixes the RG in the alignment file generated by Tophat header to match the spec
[ "Tophat", "sets", "PI", "in", "the", "RG", "to", "be", "the", "inner", "distance", "size", "but", "the", "SAM", "spec", "states", "should", "be", "the", "insert", "size", ".", "This", "fixes", "the", "RG", "in", "the", "alignment", "file", "generated", ...
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/tophat.py#L344-L369
237,672
bcbio/bcbio-nextgen
bcbio/variation/damage.py
_filter_to_info
def _filter_to_info(in_file, data): """Move DKFZ filter information into INFO field. """ header = ("""##INFO=<ID=DKFZBias,Number=.,Type=String,""" """Description="Bias estimation based on unequal read support from DKFZBiasFilterVariant Depth">\n""") out_file = "%s-ann.vcf" % utils.splitext_plus(in_file)[0] if not utils.file_uptodate(out_file, in_file) and not utils.file_uptodate(out_file + ".gz", in_file): with file_transaction(data, out_file) as tx_out_file: with utils.open_gzipsafe(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: if line.startswith("#CHROM"): out_handle.write(header + line) elif line.startswith("#"): out_handle.write(line) else: out_handle.write(_rec_filter_to_info(line)) return vcfutils.bgzip_and_index(out_file, data["config"])
python
def _filter_to_info(in_file, data): header = ("""##INFO=<ID=DKFZBias,Number=.,Type=String,""" """Description="Bias estimation based on unequal read support from DKFZBiasFilterVariant Depth">\n""") out_file = "%s-ann.vcf" % utils.splitext_plus(in_file)[0] if not utils.file_uptodate(out_file, in_file) and not utils.file_uptodate(out_file + ".gz", in_file): with file_transaction(data, out_file) as tx_out_file: with utils.open_gzipsafe(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: if line.startswith("#CHROM"): out_handle.write(header + line) elif line.startswith("#"): out_handle.write(line) else: out_handle.write(_rec_filter_to_info(line)) return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "_filter_to_info", "(", "in_file", ",", "data", ")", ":", "header", "=", "(", "\"\"\"##INFO=<ID=DKFZBias,Number=.,Type=String,\"\"\"", "\"\"\"Description=\"Bias estimation based on unequal read support from DKFZBiasFilterVariant Depth\">\\n\"\"\"", ")", "out_file", "=", "\"%s-...
Move DKFZ filter information into INFO field.
[ "Move", "DKFZ", "filter", "information", "into", "INFO", "field", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/damage.py#L46-L63
237,673
bcbio/bcbio-nextgen
bcbio/variation/damage.py
_rec_filter_to_info
def _rec_filter_to_info(line): """Move a DKFZBias filter to the INFO field, for a record. """ parts = line.rstrip().split("\t") move_filters = {"bSeq": "strand", "bPcr": "damage"} new_filters = [] bias_info = [] for f in parts[6].split(";"): if f in move_filters: bias_info.append(move_filters[f]) elif f not in ["."]: new_filters.append(f) if bias_info: parts[7] += ";DKFZBias=%s" % ",".join(bias_info) parts[6] = ";".join(new_filters or ["PASS"]) return "\t".join(parts) + "\n"
python
def _rec_filter_to_info(line): parts = line.rstrip().split("\t") move_filters = {"bSeq": "strand", "bPcr": "damage"} new_filters = [] bias_info = [] for f in parts[6].split(";"): if f in move_filters: bias_info.append(move_filters[f]) elif f not in ["."]: new_filters.append(f) if bias_info: parts[7] += ";DKFZBias=%s" % ",".join(bias_info) parts[6] = ";".join(new_filters or ["PASS"]) return "\t".join(parts) + "\n"
[ "def", "_rec_filter_to_info", "(", "line", ")", ":", "parts", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "move_filters", "=", "{", "\"bSeq\"", ":", "\"strand\"", ",", "\"bPcr\"", ":", "\"damage\"", "}", "new_filters", "=", "...
Move a DKFZBias filter to the INFO field, for a record.
[ "Move", "a", "DKFZBias", "filter", "to", "the", "INFO", "field", "for", "a", "record", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/damage.py#L65-L80
237,674
bcbio/bcbio-nextgen
bcbio/variation/damage.py
should_filter
def should_filter(items): """Check if we should do damage filtering on somatic calling with low frequency events. """ return (vcfutils.get_paired(items) is not None and any("damage_filter" in dd.get_tools_on(d) for d in items))
python
def should_filter(items): return (vcfutils.get_paired(items) is not None and any("damage_filter" in dd.get_tools_on(d) for d in items))
[ "def", "should_filter", "(", "items", ")", ":", "return", "(", "vcfutils", ".", "get_paired", "(", "items", ")", "is", "not", "None", "and", "any", "(", "\"damage_filter\"", "in", "dd", ".", "get_tools_on", "(", "d", ")", "for", "d", "in", "items", ")"...
Check if we should do damage filtering on somatic calling with low frequency events.
[ "Check", "if", "we", "should", "do", "damage", "filtering", "on", "somatic", "calling", "with", "low", "frequency", "events", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/damage.py#L82-L86
237,675
bcbio/bcbio-nextgen
bcbio/provenance/diagnostics.py
start_cmd
def start_cmd(cmd, descr, data): """Retain details about starting a command, returning a command identifier. """ if data and "provenance" in data: entity_id = tz.get_in(["provenance", "entity"], data)
python
def start_cmd(cmd, descr, data): if data and "provenance" in data: entity_id = tz.get_in(["provenance", "entity"], data)
[ "def", "start_cmd", "(", "cmd", ",", "descr", ",", "data", ")", ":", "if", "data", "and", "\"provenance\"", "in", "data", ":", "entity_id", "=", "tz", ".", "get_in", "(", "[", "\"provenance\"", ",", "\"entity\"", "]", ",", "data", ")" ]
Retain details about starting a command, returning a command identifier.
[ "Retain", "details", "about", "starting", "a", "command", "returning", "a", "command", "identifier", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/diagnostics.py#L23-L27
237,676
bcbio/bcbio-nextgen
bcbio/provenance/diagnostics.py
initialize
def initialize(dirs): """Initialize the biolite database to load provenance information. """ if biolite and dirs.get("work"): base_dir = utils.safe_makedir(os.path.join(dirs["work"], "provenance")) p_db = os.path.join(base_dir, "biolite.db") biolite.config.resources["database"] = p_db biolite.database.connect()
python
def initialize(dirs): if biolite and dirs.get("work"): base_dir = utils.safe_makedir(os.path.join(dirs["work"], "provenance")) p_db = os.path.join(base_dir, "biolite.db") biolite.config.resources["database"] = p_db biolite.database.connect()
[ "def", "initialize", "(", "dirs", ")", ":", "if", "biolite", "and", "dirs", ".", "get", "(", "\"work\"", ")", ":", "base_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dirs", "[", "\"work\"", "]", ",", "\"provenan...
Initialize the biolite database to load provenance information.
[ "Initialize", "the", "biolite", "database", "to", "load", "provenance", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/diagnostics.py#L34-L41
237,677
bcbio/bcbio-nextgen
bcbio/provenance/diagnostics.py
track_parallel
def track_parallel(items, sub_type): """Create entity identifiers to trace the given items in sub-commands. Helps handle nesting in parallel program execution: run id => sub-section id => parallel ids """ out = [] for i, args in enumerate(items): item_i, item = _get_provitem_from_args(args) if item: sub_entity = "%s.%s.%s" % (item["provenance"]["entity"], sub_type, i) item["provenance"]["entity"] = sub_entity args = list(args) args[item_i] = item out.append(args) # TODO: store mapping of entity to sub identifiers return out
python
def track_parallel(items, sub_type): out = [] for i, args in enumerate(items): item_i, item = _get_provitem_from_args(args) if item: sub_entity = "%s.%s.%s" % (item["provenance"]["entity"], sub_type, i) item["provenance"]["entity"] = sub_entity args = list(args) args[item_i] = item out.append(args) # TODO: store mapping of entity to sub identifiers return out
[ "def", "track_parallel", "(", "items", ",", "sub_type", ")", ":", "out", "=", "[", "]", "for", "i", ",", "args", "in", "enumerate", "(", "items", ")", ":", "item_i", ",", "item", "=", "_get_provitem_from_args", "(", "args", ")", "if", "item", ":", "s...
Create entity identifiers to trace the given items in sub-commands. Helps handle nesting in parallel program execution: run id => sub-section id => parallel ids
[ "Create", "entity", "identifiers", "to", "trace", "the", "given", "items", "in", "sub", "-", "commands", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/diagnostics.py#L49-L66
237,678
bcbio/bcbio-nextgen
bcbio/provenance/diagnostics.py
_get_provitem_from_args
def _get_provitem_from_args(xs): """Retrieve processed item from list of input arguments. """ for i, x in enumerate(xs): if _has_provenance(x): return i, x return -1, None
python
def _get_provitem_from_args(xs): for i, x in enumerate(xs): if _has_provenance(x): return i, x return -1, None
[ "def", "_get_provitem_from_args", "(", "xs", ")", ":", "for", "i", ",", "x", "in", "enumerate", "(", "xs", ")", ":", "if", "_has_provenance", "(", "x", ")", ":", "return", "i", ",", "x", "return", "-", "1", ",", "None" ]
Retrieve processed item from list of input arguments.
[ "Retrieve", "processed", "item", "from", "list", "of", "input", "arguments", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/diagnostics.py#L71-L77
237,679
bcbio/bcbio-nextgen
bcbio/variation/prioritize.py
handle_vcf_calls
def handle_vcf_calls(vcf_file, data, orig_items): """Prioritize VCF calls based on external annotations supplied through GEMINI. """ if not _do_prioritize(orig_items): return vcf_file else: ann_vcf = population.run_vcfanno(vcf_file, data) if ann_vcf: priority_file = _prep_priority_filter_vcfanno(ann_vcf, data) return _apply_priority_filter(ann_vcf, priority_file, data) # No data available for filtering, return original file else: return vcf_file
python
def handle_vcf_calls(vcf_file, data, orig_items): if not _do_prioritize(orig_items): return vcf_file else: ann_vcf = population.run_vcfanno(vcf_file, data) if ann_vcf: priority_file = _prep_priority_filter_vcfanno(ann_vcf, data) return _apply_priority_filter(ann_vcf, priority_file, data) # No data available for filtering, return original file else: return vcf_file
[ "def", "handle_vcf_calls", "(", "vcf_file", ",", "data", ",", "orig_items", ")", ":", "if", "not", "_do_prioritize", "(", "orig_items", ")", ":", "return", "vcf_file", "else", ":", "ann_vcf", "=", "population", ".", "run_vcfanno", "(", "vcf_file", ",", "data...
Prioritize VCF calls based on external annotations supplied through GEMINI.
[ "Prioritize", "VCF", "calls", "based", "on", "external", "annotations", "supplied", "through", "GEMINI", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/prioritize.py#L27-L39
237,680
bcbio/bcbio-nextgen
bcbio/variation/prioritize.py
_apply_priority_filter
def _apply_priority_filter(in_file, priority_file, data): """Annotate variants with priority information and use to apply filters. """ out_file = "%s-priority%s" % utils.splitext_plus(in_file) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: header = ('##INFO=<ID=EPR,Number=.,Type=String,' 'Description="Somatic prioritization based on external annotations, ' 'identify as likely germline">') header_file = "%s-repeatheader.txt" % utils.splitext_plus(tx_out_file)[0] with open(header_file, "w") as out_handle: out_handle.write(header) if "tumoronly_germline_filter" in dd.get_tools_on(data): filter_cmd = ("bcftools filter -m '+' -s 'LowPriority' " """-e "EPR[0] != 'pass'" |""") else: filter_cmd = "" cmd = ("bcftools annotate -a {priority_file} -h {header_file} " "-c CHROM,FROM,TO,REF,ALT,INFO/EPR {in_file} | " "{filter_cmd} bgzip -c > {tx_out_file}") do.run(cmd.format(**locals()), "Run external annotation based prioritization filtering") vcfutils.bgzip_and_index(out_file, data["config"]) return out_file
python
def _apply_priority_filter(in_file, priority_file, data): out_file = "%s-priority%s" % utils.splitext_plus(in_file) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: header = ('##INFO=<ID=EPR,Number=.,Type=String,' 'Description="Somatic prioritization based on external annotations, ' 'identify as likely germline">') header_file = "%s-repeatheader.txt" % utils.splitext_plus(tx_out_file)[0] with open(header_file, "w") as out_handle: out_handle.write(header) if "tumoronly_germline_filter" in dd.get_tools_on(data): filter_cmd = ("bcftools filter -m '+' -s 'LowPriority' " """-e "EPR[0] != 'pass'" |""") else: filter_cmd = "" cmd = ("bcftools annotate -a {priority_file} -h {header_file} " "-c CHROM,FROM,TO,REF,ALT,INFO/EPR {in_file} | " "{filter_cmd} bgzip -c > {tx_out_file}") do.run(cmd.format(**locals()), "Run external annotation based prioritization filtering") vcfutils.bgzip_and_index(out_file, data["config"]) return out_file
[ "def", "_apply_priority_filter", "(", "in_file", ",", "priority_file", ",", "data", ")", ":", "out_file", "=", "\"%s-priority%s\"", "%", "utils", ".", "splitext_plus", "(", "in_file", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", ...
Annotate variants with priority information and use to apply filters.
[ "Annotate", "variants", "with", "priority", "information", "and", "use", "to", "apply", "filters", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/prioritize.py#L41-L63
237,681
bcbio/bcbio-nextgen
bcbio/variation/prioritize.py
_prep_priority_filter_vcfanno
def _prep_priority_filter_vcfanno(in_vcf, data): """Prepare tabix file with priority filters based on vcfanno annotations. """ pops = ['af_adj_exac_afr', 'af_adj_exac_amr', 'af_adj_exac_eas', 'af_adj_exac_fin', 'af_adj_exac_nfe', 'af_adj_exac_oth', 'af_adj_exac_sas', 'af_exac_all', 'max_aaf_all', "af_esp_ea", "af_esp_aa", "af_esp_all", "af_1kg_amr", "af_1kg_eas", "af_1kg_sas", "af_1kg_afr", "af_1kg_eur", "af_1kg_all"] known = ["cosmic_ids", "cosmic_id", "clinvar_sig"] out_file = "%s-priority.tsv" % utils.splitext_plus(in_vcf)[0] if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle, dialect="excel-tab") header = ["#chrom", "start", "end", "ref", "alt", "filter"] writer.writerow(header) vcf_reader = cyvcf2.VCF(in_vcf) impact_info = _get_impact_info(vcf_reader) for rec in vcf_reader: row = _prepare_vcf_rec(rec, pops, known, impact_info) cur_filter = _calc_priority_filter(row, pops) writer.writerow([rec.CHROM, rec.start, rec.end, rec.REF, ",".join(rec.ALT), cur_filter]) return vcfutils.bgzip_and_index(out_file, data["config"], tabix_args="-0 -c '#' -s 1 -b 2 -e 3")
python
def _prep_priority_filter_vcfanno(in_vcf, data): pops = ['af_adj_exac_afr', 'af_adj_exac_amr', 'af_adj_exac_eas', 'af_adj_exac_fin', 'af_adj_exac_nfe', 'af_adj_exac_oth', 'af_adj_exac_sas', 'af_exac_all', 'max_aaf_all', "af_esp_ea", "af_esp_aa", "af_esp_all", "af_1kg_amr", "af_1kg_eas", "af_1kg_sas", "af_1kg_afr", "af_1kg_eur", "af_1kg_all"] known = ["cosmic_ids", "cosmic_id", "clinvar_sig"] out_file = "%s-priority.tsv" % utils.splitext_plus(in_vcf)[0] if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle, dialect="excel-tab") header = ["#chrom", "start", "end", "ref", "alt", "filter"] writer.writerow(header) vcf_reader = cyvcf2.VCF(in_vcf) impact_info = _get_impact_info(vcf_reader) for rec in vcf_reader: row = _prepare_vcf_rec(rec, pops, known, impact_info) cur_filter = _calc_priority_filter(row, pops) writer.writerow([rec.CHROM, rec.start, rec.end, rec.REF, ",".join(rec.ALT), cur_filter]) return vcfutils.bgzip_and_index(out_file, data["config"], tabix_args="-0 -c '#' -s 1 -b 2 -e 3")
[ "def", "_prep_priority_filter_vcfanno", "(", "in_vcf", ",", "data", ")", ":", "pops", "=", "[", "'af_adj_exac_afr'", ",", "'af_adj_exac_amr'", ",", "'af_adj_exac_eas'", ",", "'af_adj_exac_fin'", ",", "'af_adj_exac_nfe'", ",", "'af_adj_exac_oth'", ",", "'af_adj_exac_sas'...
Prepare tabix file with priority filters based on vcfanno annotations.
[ "Prepare", "tabix", "file", "with", "priority", "filters", "based", "on", "vcfanno", "annotations", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/prioritize.py#L65-L88
237,682
bcbio/bcbio-nextgen
bcbio/variation/prioritize.py
_get_impact_info
def _get_impact_info(vcf_reader): """Retrieve impact parsing information from INFO header. """ ImpactInfo = collections.namedtuple("ImpactInfo", "header, gclass, id") KEY_2_CLASS = { 'CSQ': geneimpacts.VEP, 'ANN': geneimpacts.SnpEff, 'BCSQ': geneimpacts.BCFT} for l in (x.strip() for x in _from_bytes(vcf_reader.raw_header).split("\n")): if l.startswith("##INFO"): patt = re.compile("(\w+)=(\"[^\"]+\"|[^,]+)") stub = l.split("=<")[1].rstrip(">") d = dict(patt.findall(_from_bytes(stub))) if d["ID"] in KEY_2_CLASS: return ImpactInfo(_parse_impact_header(d), KEY_2_CLASS[d["ID"]], d["ID"])
python
def _get_impact_info(vcf_reader): ImpactInfo = collections.namedtuple("ImpactInfo", "header, gclass, id") KEY_2_CLASS = { 'CSQ': geneimpacts.VEP, 'ANN': geneimpacts.SnpEff, 'BCSQ': geneimpacts.BCFT} for l in (x.strip() for x in _from_bytes(vcf_reader.raw_header).split("\n")): if l.startswith("##INFO"): patt = re.compile("(\w+)=(\"[^\"]+\"|[^,]+)") stub = l.split("=<")[1].rstrip(">") d = dict(patt.findall(_from_bytes(stub))) if d["ID"] in KEY_2_CLASS: return ImpactInfo(_parse_impact_header(d), KEY_2_CLASS[d["ID"]], d["ID"])
[ "def", "_get_impact_info", "(", "vcf_reader", ")", ":", "ImpactInfo", "=", "collections", ".", "namedtuple", "(", "\"ImpactInfo\"", ",", "\"header, gclass, id\"", ")", "KEY_2_CLASS", "=", "{", "'CSQ'", ":", "geneimpacts", ".", "VEP", ",", "'ANN'", ":", "geneimpa...
Retrieve impact parsing information from INFO header.
[ "Retrieve", "impact", "parsing", "information", "from", "INFO", "header", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/prioritize.py#L90-L104
237,683
bcbio/bcbio-nextgen
bcbio/variation/prioritize.py
_parse_impact_header
def _parse_impact_header(hdr_dict): """Parse fields for impact, taken from vcf2db """ desc = hdr_dict["Description"] if hdr_dict["ID"] == "ANN": parts = [x.strip("\"'") for x in re.split("\s*\|\s*", desc.split(":", 1)[1].strip('" '))] elif hdr_dict["ID"] == "EFF": parts = [x.strip(" [])'(\"") for x in re.split("\||\(", desc.split(":", 1)[1].strip())] elif hdr_dict["ID"] == "CSQ": parts = [x.strip(" [])'(\"") for x in re.split("\||\(", desc.split(":", 1)[1].strip())] elif hdr_dict["ID"] == "BCSQ": parts = desc.split(']', 1)[1].split(']')[0].replace('[','').split("|") else: raise Exception("don't know how to use %s as annotation" % hdr_dict["ID"]) return parts
python
def _parse_impact_header(hdr_dict): desc = hdr_dict["Description"] if hdr_dict["ID"] == "ANN": parts = [x.strip("\"'") for x in re.split("\s*\|\s*", desc.split(":", 1)[1].strip('" '))] elif hdr_dict["ID"] == "EFF": parts = [x.strip(" [])'(\"") for x in re.split("\||\(", desc.split(":", 1)[1].strip())] elif hdr_dict["ID"] == "CSQ": parts = [x.strip(" [])'(\"") for x in re.split("\||\(", desc.split(":", 1)[1].strip())] elif hdr_dict["ID"] == "BCSQ": parts = desc.split(']', 1)[1].split(']')[0].replace('[','').split("|") else: raise Exception("don't know how to use %s as annotation" % hdr_dict["ID"]) return parts
[ "def", "_parse_impact_header", "(", "hdr_dict", ")", ":", "desc", "=", "hdr_dict", "[", "\"Description\"", "]", "if", "hdr_dict", "[", "\"ID\"", "]", "==", "\"ANN\"", ":", "parts", "=", "[", "x", ".", "strip", "(", "\"\\\"'\"", ")", "for", "x", "in", "...
Parse fields for impact, taken from vcf2db
[ "Parse", "fields", "for", "impact", "taken", "from", "vcf2db" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/prioritize.py#L116-L130
237,684
bcbio/bcbio-nextgen
bcbio/variation/prioritize.py
_prepare_vcf_rec
def _prepare_vcf_rec(rec, pops, known, impact_info): """Parse a vcfanno output into a dictionary of useful attributes. """ out = {} for k in pops + known: out[k] = rec.INFO.get(k) if impact_info: cur_info = rec.INFO.get(impact_info.id) if cur_info: cur_impacts = [impact_info.gclass(e, impact_info.header) for e in _from_bytes(cur_info).split(",")] top = geneimpacts.Effect.top_severity(cur_impacts) if isinstance(top, list): top = top[0] out["impact_severity"] = top.effect_severity return out
python
def _prepare_vcf_rec(rec, pops, known, impact_info): out = {} for k in pops + known: out[k] = rec.INFO.get(k) if impact_info: cur_info = rec.INFO.get(impact_info.id) if cur_info: cur_impacts = [impact_info.gclass(e, impact_info.header) for e in _from_bytes(cur_info).split(",")] top = geneimpacts.Effect.top_severity(cur_impacts) if isinstance(top, list): top = top[0] out["impact_severity"] = top.effect_severity return out
[ "def", "_prepare_vcf_rec", "(", "rec", ",", "pops", ",", "known", ",", "impact_info", ")", ":", "out", "=", "{", "}", "for", "k", "in", "pops", "+", "known", ":", "out", "[", "k", "]", "=", "rec", ".", "INFO", ".", "get", "(", "k", ")", "if", ...
Parse a vcfanno output into a dictionary of useful attributes.
[ "Parse", "a", "vcfanno", "output", "into", "a", "dictionary", "of", "useful", "attributes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/prioritize.py#L132-L146
237,685
bcbio/bcbio-nextgen
bcbio/variation/prioritize.py
_calc_priority_filter
def _calc_priority_filter(row, pops): """Calculate the priority filter based on external associated data. - Pass high/medium impact variants not found in population databases - Pass variants found in COSMIC or Clinvar provided they don't have two additional reasons to filter (found in multiple external populations) """ filters = [] passes = [] passes.extend(_find_known(row)) filters.extend(_known_populations(row, pops)) if len(filters) == 0 or (len(passes) > 0 and len(filters) < 2): passes.insert(0, "pass") return ",".join(passes + filters)
python
def _calc_priority_filter(row, pops): filters = [] passes = [] passes.extend(_find_known(row)) filters.extend(_known_populations(row, pops)) if len(filters) == 0 or (len(passes) > 0 and len(filters) < 2): passes.insert(0, "pass") return ",".join(passes + filters)
[ "def", "_calc_priority_filter", "(", "row", ",", "pops", ")", ":", "filters", "=", "[", "]", "passes", "=", "[", "]", "passes", ".", "extend", "(", "_find_known", "(", "row", ")", ")", "filters", ".", "extend", "(", "_known_populations", "(", "row", ",...
Calculate the priority filter based on external associated data. - Pass high/medium impact variants not found in population databases - Pass variants found in COSMIC or Clinvar provided they don't have two additional reasons to filter (found in multiple external populations)
[ "Calculate", "the", "priority", "filter", "based", "on", "external", "associated", "data", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/prioritize.py#L148-L161
237,686
bcbio/bcbio-nextgen
bcbio/variation/prioritize.py
_known_populations
def _known_populations(row, pops): """Find variants present in substantial frequency in population databases. """ cutoff = 0.01 out = set([]) for pop, base in [("esp", "af_esp_all"), ("1000g", "af_1kg_all"), ("exac", "af_exac_all"), ("anypop", "max_aaf_all")]: for key in [x for x in pops if x.startswith(base)]: val = row[key] if val and val > cutoff: out.add(pop) return sorted(list(out))
python
def _known_populations(row, pops): cutoff = 0.01 out = set([]) for pop, base in [("esp", "af_esp_all"), ("1000g", "af_1kg_all"), ("exac", "af_exac_all"), ("anypop", "max_aaf_all")]: for key in [x for x in pops if x.startswith(base)]: val = row[key] if val and val > cutoff: out.add(pop) return sorted(list(out))
[ "def", "_known_populations", "(", "row", ",", "pops", ")", ":", "cutoff", "=", "0.01", "out", "=", "set", "(", "[", "]", ")", "for", "pop", ",", "base", "in", "[", "(", "\"esp\"", ",", "\"af_esp_all\"", ")", ",", "(", "\"1000g\"", ",", "\"af_1kg_all\...
Find variants present in substantial frequency in population databases.
[ "Find", "variants", "present", "in", "substantial", "frequency", "in", "population", "databases", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/prioritize.py#L163-L174
237,687
bcbio/bcbio-nextgen
bcbio/variation/prioritize.py
_find_known
def _find_known(row): """Find variant present in known pathogenic databases. """ out = [] clinvar_no = set(["unknown", "untested", "non-pathogenic", "probable-non-pathogenic", "uncertain_significance", "uncertain_significance", "not_provided", "benign", "likely_benign"]) if row["cosmic_ids"] or row["cosmic_id"]: out.append("cosmic") if row["clinvar_sig"] and not row["clinvar_sig"].lower() in clinvar_no: out.append("clinvar") return out
python
def _find_known(row): out = [] clinvar_no = set(["unknown", "untested", "non-pathogenic", "probable-non-pathogenic", "uncertain_significance", "uncertain_significance", "not_provided", "benign", "likely_benign"]) if row["cosmic_ids"] or row["cosmic_id"]: out.append("cosmic") if row["clinvar_sig"] and not row["clinvar_sig"].lower() in clinvar_no: out.append("clinvar") return out
[ "def", "_find_known", "(", "row", ")", ":", "out", "=", "[", "]", "clinvar_no", "=", "set", "(", "[", "\"unknown\"", ",", "\"untested\"", ",", "\"non-pathogenic\"", ",", "\"probable-non-pathogenic\"", ",", "\"uncertain_significance\"", ",", "\"uncertain_significance...
Find variant present in known pathogenic databases.
[ "Find", "variant", "present", "in", "known", "pathogenic", "databases", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/prioritize.py#L176-L187
237,688
bcbio/bcbio-nextgen
bcbio/variation/prioritize.py
_do_prioritize
def _do_prioritize(items): """Determine if we should perform prioritization. Currently done on tumor-only input samples and feeding into PureCN which needs the germline annotations. """ if not any("tumoronly-prioritization" in dd.get_tools_off(d) for d in items): if vcfutils.get_paired_phenotype(items[0]): has_tumor = False has_normal = False for sub_data in items: if vcfutils.get_paired_phenotype(sub_data) == "tumor": has_tumor = True elif vcfutils.get_paired_phenotype(sub_data) == "normal": has_normal = True return has_tumor and not has_normal
python
def _do_prioritize(items): if not any("tumoronly-prioritization" in dd.get_tools_off(d) for d in items): if vcfutils.get_paired_phenotype(items[0]): has_tumor = False has_normal = False for sub_data in items: if vcfutils.get_paired_phenotype(sub_data) == "tumor": has_tumor = True elif vcfutils.get_paired_phenotype(sub_data) == "normal": has_normal = True return has_tumor and not has_normal
[ "def", "_do_prioritize", "(", "items", ")", ":", "if", "not", "any", "(", "\"tumoronly-prioritization\"", "in", "dd", ".", "get_tools_off", "(", "d", ")", "for", "d", "in", "items", ")", ":", "if", "vcfutils", ".", "get_paired_phenotype", "(", "items", "["...
Determine if we should perform prioritization. Currently done on tumor-only input samples and feeding into PureCN which needs the germline annotations.
[ "Determine", "if", "we", "should", "perform", "prioritization", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/prioritize.py#L189-L204
237,689
bcbio/bcbio-nextgen
bcbio/variation/cortex.py
run_cortex
def run_cortex(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Top level entry to regional de-novo based variant calling with cortex_var. """ raise NotImplementedError("Cortex currently out of date and needs reworking.") if len(align_bams) == 1: align_bam = align_bams[0] config = items[0]["config"] else: raise NotImplementedError("Need to add multisample calling for cortex_var") if out_file is None: out_file = "%s-cortex.vcf" % os.path.splitext(align_bam)[0] if region is not None: work_dir = safe_makedir(os.path.join(os.path.dirname(out_file), region.replace(".", "_"))) else: work_dir = os.path.dirname(out_file) if not file_exists(out_file): bam.index(align_bam, config) variant_regions = config["algorithm"].get("variant_regions", None) if not variant_regions: raise ValueError("Only support regional variant calling with cortex_var: set variant_regions") target_regions = subset_variant_regions(variant_regions, region, out_file) if os.path.isfile(target_regions): with open(target_regions) as in_handle: regional_vcfs = [_run_cortex_on_region(x.strip().split("\t")[:3], align_bam, ref_file, work_dir, out_file, config) for x in in_handle] combine_file = "{0}-raw{1}".format(*os.path.splitext(out_file)) _combine_variants(regional_vcfs, combine_file, ref_file, config) _select_final_variants(combine_file, out_file, config) else: vcfutils.write_empty_vcf(out_file) return out_file
python
def run_cortex(align_bams, items, ref_file, assoc_files, region=None, out_file=None): raise NotImplementedError("Cortex currently out of date and needs reworking.") if len(align_bams) == 1: align_bam = align_bams[0] config = items[0]["config"] else: raise NotImplementedError("Need to add multisample calling for cortex_var") if out_file is None: out_file = "%s-cortex.vcf" % os.path.splitext(align_bam)[0] if region is not None: work_dir = safe_makedir(os.path.join(os.path.dirname(out_file), region.replace(".", "_"))) else: work_dir = os.path.dirname(out_file) if not file_exists(out_file): bam.index(align_bam, config) variant_regions = config["algorithm"].get("variant_regions", None) if not variant_regions: raise ValueError("Only support regional variant calling with cortex_var: set variant_regions") target_regions = subset_variant_regions(variant_regions, region, out_file) if os.path.isfile(target_regions): with open(target_regions) as in_handle: regional_vcfs = [_run_cortex_on_region(x.strip().split("\t")[:3], align_bam, ref_file, work_dir, out_file, config) for x in in_handle] combine_file = "{0}-raw{1}".format(*os.path.splitext(out_file)) _combine_variants(regional_vcfs, combine_file, ref_file, config) _select_final_variants(combine_file, out_file, config) else: vcfutils.write_empty_vcf(out_file) return out_file
[ "def", "run_cortex", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "raise", "NotImplementedError", "(", "\"Cortex currently out of date and needs reworking.\"", ")", "if", ...
Top level entry to regional de-novo based variant calling with cortex_var.
[ "Top", "level", "entry", "to", "regional", "de", "-", "novo", "based", "variant", "calling", "with", "cortex_var", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/cortex.py#L28-L62
237,690
bcbio/bcbio-nextgen
bcbio/variation/cortex.py
_passes_cortex_depth
def _passes_cortex_depth(line, min_depth): """Do any genotypes in the cortex_var VCF line passes the minimum depth requirement? """ parts = line.split("\t") cov_index = parts[8].split(":").index("COV") passes_depth = False for gt in parts[9:]: cur_cov = gt.split(":")[cov_index] cur_depth = sum(int(x) for x in cur_cov.split(",")) if cur_depth >= min_depth: passes_depth = True return passes_depth
python
def _passes_cortex_depth(line, min_depth): parts = line.split("\t") cov_index = parts[8].split(":").index("COV") passes_depth = False for gt in parts[9:]: cur_cov = gt.split(":")[cov_index] cur_depth = sum(int(x) for x in cur_cov.split(",")) if cur_depth >= min_depth: passes_depth = True return passes_depth
[ "def", "_passes_cortex_depth", "(", "line", ",", "min_depth", ")", ":", "parts", "=", "line", ".", "split", "(", "\"\\t\"", ")", "cov_index", "=", "parts", "[", "8", "]", ".", "split", "(", "\":\"", ")", ".", "index", "(", "\"COV\"", ")", "passes_depth...
Do any genotypes in the cortex_var VCF line passes the minimum depth requirement?
[ "Do", "any", "genotypes", "in", "the", "cortex_var", "VCF", "line", "passes", "the", "minimum", "depth", "requirement?" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/cortex.py#L64-L75
237,691
bcbio/bcbio-nextgen
bcbio/variation/cortex.py
_select_final_variants
def _select_final_variants(base_vcf, out_vcf, config): """Filter input file, removing items with low depth of support. cortex_var calls are tricky to filter by depth. Count information is in the COV FORMAT field grouped by alleles, so we need to sum up values and compare. """ min_depth = int(config["algorithm"].get("min_depth", 4)) with file_transaction(out_vcf) as tx_out_file: with open(base_vcf) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): passes = True else: passes = _passes_cortex_depth(line, min_depth) if passes: out_handle.write(line) return out_vcf
python
def _select_final_variants(base_vcf, out_vcf, config): min_depth = int(config["algorithm"].get("min_depth", 4)) with file_transaction(out_vcf) as tx_out_file: with open(base_vcf) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): passes = True else: passes = _passes_cortex_depth(line, min_depth) if passes: out_handle.write(line) return out_vcf
[ "def", "_select_final_variants", "(", "base_vcf", ",", "out_vcf", ",", "config", ")", ":", "min_depth", "=", "int", "(", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"min_depth\"", ",", "4", ")", ")", "with", "file_transaction", "(", "out_vcf", ...
Filter input file, removing items with low depth of support. cortex_var calls are tricky to filter by depth. Count information is in the COV FORMAT field grouped by alleles, so we need to sum up values and compare.
[ "Filter", "input", "file", "removing", "items", "with", "low", "depth", "of", "support", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/cortex.py#L77-L95
237,692
bcbio/bcbio-nextgen
bcbio/variation/cortex.py
_combine_variants
def _combine_variants(in_vcfs, out_file, ref_file, config): """Combine variant files, writing the header from the first non-empty input. in_vcfs is a list with each item starting with the chromosome regions, and ending with the input file. We sort by these regions to ensure the output file is in the expected order. """ in_vcfs.sort() wrote_header = False with open(out_file, "w") as out_handle: for in_vcf in (x[-1] for x in in_vcfs): with open(in_vcf) as in_handle: header = list(itertools.takewhile(lambda x: x.startswith("#"), in_handle)) if not header[0].startswith("##fileformat=VCFv4"): raise ValueError("Unexpected VCF file: %s" % in_vcf) for line in in_handle: if not wrote_header: wrote_header = True out_handle.write("".join(header)) out_handle.write(line) if not wrote_header: out_handle.write("".join(header)) return out_file
python
def _combine_variants(in_vcfs, out_file, ref_file, config): in_vcfs.sort() wrote_header = False with open(out_file, "w") as out_handle: for in_vcf in (x[-1] for x in in_vcfs): with open(in_vcf) as in_handle: header = list(itertools.takewhile(lambda x: x.startswith("#"), in_handle)) if not header[0].startswith("##fileformat=VCFv4"): raise ValueError("Unexpected VCF file: %s" % in_vcf) for line in in_handle: if not wrote_header: wrote_header = True out_handle.write("".join(header)) out_handle.write(line) if not wrote_header: out_handle.write("".join(header)) return out_file
[ "def", "_combine_variants", "(", "in_vcfs", ",", "out_file", ",", "ref_file", ",", "config", ")", ":", "in_vcfs", ".", "sort", "(", ")", "wrote_header", "=", "False", "with", "open", "(", "out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "for", "in...
Combine variant files, writing the header from the first non-empty input. in_vcfs is a list with each item starting with the chromosome regions, and ending with the input file. We sort by these regions to ensure the output file is in the expected order.
[ "Combine", "variant", "files", "writing", "the", "header", "from", "the", "first", "non", "-", "empty", "input", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/cortex.py#L97-L120
237,693
bcbio/bcbio-nextgen
bcbio/variation/cortex.py
_remap_cortex_out
def _remap_cortex_out(cortex_out, region, out_file): """Remap coordinates in local cortex variant calls to the original global region. """ def _remap_vcf_line(line, contig, start): parts = line.split("\t") if parts[0] == "" or parts[1] == "": return None parts[0] = contig try: parts[1] = str(int(parts[1]) + start) except ValueError: raise ValueError("Problem in {0} with \n{1}".format( cortex_out, parts)) return "\t".join(parts) def _not_filtered(line): parts = line.split("\t") return parts[6] == "PASS" contig, start, _ = region start = int(start) with open(cortex_out) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("##fileDate"): pass elif line.startswith("#"): out_handle.write(line) elif _not_filtered(line): update_line = _remap_vcf_line(line, contig, start) if update_line: out_handle.write(update_line)
python
def _remap_cortex_out(cortex_out, region, out_file): def _remap_vcf_line(line, contig, start): parts = line.split("\t") if parts[0] == "" or parts[1] == "": return None parts[0] = contig try: parts[1] = str(int(parts[1]) + start) except ValueError: raise ValueError("Problem in {0} with \n{1}".format( cortex_out, parts)) return "\t".join(parts) def _not_filtered(line): parts = line.split("\t") return parts[6] == "PASS" contig, start, _ = region start = int(start) with open(cortex_out) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("##fileDate"): pass elif line.startswith("#"): out_handle.write(line) elif _not_filtered(line): update_line = _remap_vcf_line(line, contig, start) if update_line: out_handle.write(update_line)
[ "def", "_remap_cortex_out", "(", "cortex_out", ",", "region", ",", "out_file", ")", ":", "def", "_remap_vcf_line", "(", "line", ",", "contig", ",", "start", ")", ":", "parts", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "parts", "[", "0", "]"...
Remap coordinates in local cortex variant calls to the original global region.
[ "Remap", "coordinates", "in", "local", "cortex", "variant", "calls", "to", "the", "original", "global", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/cortex.py#L159-L188
237,694
bcbio/bcbio-nextgen
bcbio/variation/cortex.py
_run_cortex
def _run_cortex(fastq, indexes, params, out_base, dirs, config): """Run cortex_var run_calls.pl, producing a VCF variant file. """ print(out_base) fastaq_index = "{0}.fastaq_index".format(out_base) se_fastq_index = "{0}.se_fastq".format(out_base) pe_fastq_index = "{0}.pe_fastq".format(out_base) reffasta_index = "{0}.list_ref_fasta".format(out_base) with open(se_fastq_index, "w") as out_handle: out_handle.write(fastq + "\n") with open(pe_fastq_index, "w") as out_handle: out_handle.write("") with open(fastaq_index, "w") as out_handle: out_handle.write("{0}\t{1}\t{2}\t{2}\n".format(params["sample"], se_fastq_index, pe_fastq_index)) with open(reffasta_index, "w") as out_handle: for x in indexes["fasta"]: out_handle.write(x + "\n") os.environ["PERL5LIB"] = "{0}:{1}:{2}".format( os.path.join(dirs["cortex"], "scripts/calling"), os.path.join(dirs["cortex"], "scripts/analyse_variants/bioinf-perl/lib"), os.environ.get("PERL5LIB", "")) kmers = sorted(params["kmers"]) kmer_info = ["--first_kmer", str(kmers[0])] if len(kmers) > 1: kmer_info += ["--last_kmer", str(kmers[-1]), "--kmer_step", str(kmers[1] - kmers[0])] subprocess.check_call(["perl", os.path.join(dirs["cortex"], "scripts", "calling", "run_calls.pl"), "--fastaq_index", fastaq_index, "--auto_cleaning", "yes", "--bc", "yes", "--pd", "yes", "--outdir", os.path.dirname(out_base), "--outvcf", os.path.basename(out_base), "--ploidy", str(config["algorithm"].get("ploidy", 2)), "--stampy_hash", indexes["stampy"], "--stampy_bin", os.path.join(dirs["stampy"], "stampy.py"), "--refbindir", os.path.dirname(indexes["cortex"][0]), "--list_ref_fasta", reffasta_index, "--genome_size", str(params["genome_size"]), "--max_read_len", "30000", #"--max_var_len", "4000", "--format", "FASTQ", "--qthresh", "5", "--do_union", "yes", "--mem_height", "17", "--mem_width", "100", "--ref", "CoordinatesAndInCalling", "--workflow", "independent", "--vcftools_dir", dirs["vcftools"], "--logfile", "{0}.logfile,f".format(out_base)] + kmer_info) final = glob.glob(os.path.join(os.path.dirname(out_base), "vcfs", "{0}*FINALcombined_BC*decomp.vcf".format(os.path.basename(out_base)))) # No calls, need to setup an empty file if len(final) != 1: print("Did not find output VCF file for {0}".format(out_base)) return None else: return final[0]
python
def _run_cortex(fastq, indexes, params, out_base, dirs, config): print(out_base) fastaq_index = "{0}.fastaq_index".format(out_base) se_fastq_index = "{0}.se_fastq".format(out_base) pe_fastq_index = "{0}.pe_fastq".format(out_base) reffasta_index = "{0}.list_ref_fasta".format(out_base) with open(se_fastq_index, "w") as out_handle: out_handle.write(fastq + "\n") with open(pe_fastq_index, "w") as out_handle: out_handle.write("") with open(fastaq_index, "w") as out_handle: out_handle.write("{0}\t{1}\t{2}\t{2}\n".format(params["sample"], se_fastq_index, pe_fastq_index)) with open(reffasta_index, "w") as out_handle: for x in indexes["fasta"]: out_handle.write(x + "\n") os.environ["PERL5LIB"] = "{0}:{1}:{2}".format( os.path.join(dirs["cortex"], "scripts/calling"), os.path.join(dirs["cortex"], "scripts/analyse_variants/bioinf-perl/lib"), os.environ.get("PERL5LIB", "")) kmers = sorted(params["kmers"]) kmer_info = ["--first_kmer", str(kmers[0])] if len(kmers) > 1: kmer_info += ["--last_kmer", str(kmers[-1]), "--kmer_step", str(kmers[1] - kmers[0])] subprocess.check_call(["perl", os.path.join(dirs["cortex"], "scripts", "calling", "run_calls.pl"), "--fastaq_index", fastaq_index, "--auto_cleaning", "yes", "--bc", "yes", "--pd", "yes", "--outdir", os.path.dirname(out_base), "--outvcf", os.path.basename(out_base), "--ploidy", str(config["algorithm"].get("ploidy", 2)), "--stampy_hash", indexes["stampy"], "--stampy_bin", os.path.join(dirs["stampy"], "stampy.py"), "--refbindir", os.path.dirname(indexes["cortex"][0]), "--list_ref_fasta", reffasta_index, "--genome_size", str(params["genome_size"]), "--max_read_len", "30000", #"--max_var_len", "4000", "--format", "FASTQ", "--qthresh", "5", "--do_union", "yes", "--mem_height", "17", "--mem_width", "100", "--ref", "CoordinatesAndInCalling", "--workflow", "independent", "--vcftools_dir", dirs["vcftools"], "--logfile", "{0}.logfile,f".format(out_base)] + kmer_info) final = glob.glob(os.path.join(os.path.dirname(out_base), "vcfs", "{0}*FINALcombined_BC*decomp.vcf".format(os.path.basename(out_base)))) # No calls, need to setup an empty file if len(final) != 1: print("Did not find output VCF file for {0}".format(out_base)) return None else: return final[0]
[ "def", "_run_cortex", "(", "fastq", ",", "indexes", ",", "params", ",", "out_base", ",", "dirs", ",", "config", ")", ":", "print", "(", "out_base", ")", "fastaq_index", "=", "\"{0}.fastaq_index\"", ".", "format", "(", "out_base", ")", "se_fastq_index", "=", ...
Run cortex_var run_calls.pl, producing a VCF variant file.
[ "Run", "cortex_var", "run_calls", ".", "pl", "producing", "a", "VCF", "variant", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/cortex.py#L190-L242
237,695
bcbio/bcbio-nextgen
bcbio/variation/cortex.py
_index_local_ref
def _index_local_ref(fasta_file, cortex_dir, stampy_dir, kmers): """Pre-index a generated local reference sequence with cortex_var and stampy. """ base_out = os.path.splitext(fasta_file)[0] cindexes = [] for kmer in kmers: out_file = "{0}.k{1}.ctx".format(base_out, kmer) if not file_exists(out_file): file_list = "{0}.se_list".format(base_out) with open(file_list, "w") as out_handle: out_handle.write(fasta_file + "\n") subprocess.check_call([_get_cortex_binary(kmer, cortex_dir), "--kmer_size", str(kmer), "--mem_height", "17", "--se_list", file_list, "--format", "FASTA", "--max_read_len", "30000", "--sample_id", base_out, "--dump_binary", out_file]) cindexes.append(out_file) if not file_exists("{0}.stidx".format(base_out)): subprocess.check_call([os.path.join(stampy_dir, "stampy.py"), "-G", base_out, fasta_file]) subprocess.check_call([os.path.join(stampy_dir, "stampy.py"), "-g", base_out, "-H", base_out]) return {"stampy": base_out, "cortex": cindexes, "fasta": [fasta_file]}
python
def _index_local_ref(fasta_file, cortex_dir, stampy_dir, kmers): base_out = os.path.splitext(fasta_file)[0] cindexes = [] for kmer in kmers: out_file = "{0}.k{1}.ctx".format(base_out, kmer) if not file_exists(out_file): file_list = "{0}.se_list".format(base_out) with open(file_list, "w") as out_handle: out_handle.write(fasta_file + "\n") subprocess.check_call([_get_cortex_binary(kmer, cortex_dir), "--kmer_size", str(kmer), "--mem_height", "17", "--se_list", file_list, "--format", "FASTA", "--max_read_len", "30000", "--sample_id", base_out, "--dump_binary", out_file]) cindexes.append(out_file) if not file_exists("{0}.stidx".format(base_out)): subprocess.check_call([os.path.join(stampy_dir, "stampy.py"), "-G", base_out, fasta_file]) subprocess.check_call([os.path.join(stampy_dir, "stampy.py"), "-g", base_out, "-H", base_out]) return {"stampy": base_out, "cortex": cindexes, "fasta": [fasta_file]}
[ "def", "_index_local_ref", "(", "fasta_file", ",", "cortex_dir", ",", "stampy_dir", ",", "kmers", ")", ":", "base_out", "=", "os", ".", "path", ".", "splitext", "(", "fasta_file", ")", "[", "0", "]", "cindexes", "=", "[", "]", "for", "kmer", "in", "kme...
Pre-index a generated local reference sequence with cortex_var and stampy.
[ "Pre", "-", "index", "a", "generated", "local", "reference", "sequence", "with", "cortex_var", "and", "stampy", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/cortex.py#L255-L280
237,696
bcbio/bcbio-nextgen
bcbio/variation/cortex.py
_get_local_ref
def _get_local_ref(region, ref_file, out_vcf_base): """Retrieve a local FASTA file corresponding to the specified region. """ out_file = "{0}.fa".format(out_vcf_base) if not file_exists(out_file): with pysam.Fastafile(ref_file) as in_pysam: contig, start, end = region seq = in_pysam.fetch(contig, int(start), int(end)) with open(out_file, "w") as out_handle: out_handle.write(">{0}-{1}-{2}\n{3}".format(contig, start, end, str(seq))) with open(out_file) as in_handle: in_handle.readline() size = len(in_handle.readline().strip()) return out_file, size
python
def _get_local_ref(region, ref_file, out_vcf_base): out_file = "{0}.fa".format(out_vcf_base) if not file_exists(out_file): with pysam.Fastafile(ref_file) as in_pysam: contig, start, end = region seq = in_pysam.fetch(contig, int(start), int(end)) with open(out_file, "w") as out_handle: out_handle.write(">{0}-{1}-{2}\n{3}".format(contig, start, end, str(seq))) with open(out_file) as in_handle: in_handle.readline() size = len(in_handle.readline().strip()) return out_file, size
[ "def", "_get_local_ref", "(", "region", ",", "ref_file", ",", "out_vcf_base", ")", ":", "out_file", "=", "\"{0}.fa\"", ".", "format", "(", "out_vcf_base", ")", "if", "not", "file_exists", "(", "out_file", ")", ":", "with", "pysam", ".", "Fastafile", "(", "...
Retrieve a local FASTA file corresponding to the specified region.
[ "Retrieve", "a", "local", "FASTA", "file", "corresponding", "to", "the", "specified", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/cortex.py#L282-L296
237,697
bcbio/bcbio-nextgen
bcbio/variation/cortex.py
_get_fastq_in_region
def _get_fastq_in_region(region, align_bam, out_base): """Retrieve fastq files in region as single end. Paired end is more complicated since pairs can map off the region, so focus on local only assembly since we've previously used paired information for mapping. """ out_file = "{0}.fastq".format(out_base) if not file_exists(out_file): with pysam.Samfile(align_bam, "rb") as in_pysam: with file_transaction(out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: contig, start, end = region for read in in_pysam.fetch(contig, int(start), int(end)): seq = Seq.Seq(read.seq) qual = list(read.qual) if read.is_reverse: seq = seq.reverse_complement() qual.reverse() out_handle.write("@{name}\n{seq}\n+\n{qual}\n".format( name=read.qname, seq=str(seq), qual="".join(qual))) return out_file
python
def _get_fastq_in_region(region, align_bam, out_base): out_file = "{0}.fastq".format(out_base) if not file_exists(out_file): with pysam.Samfile(align_bam, "rb") as in_pysam: with file_transaction(out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: contig, start, end = region for read in in_pysam.fetch(contig, int(start), int(end)): seq = Seq.Seq(read.seq) qual = list(read.qual) if read.is_reverse: seq = seq.reverse_complement() qual.reverse() out_handle.write("@{name}\n{seq}\n+\n{qual}\n".format( name=read.qname, seq=str(seq), qual="".join(qual))) return out_file
[ "def", "_get_fastq_in_region", "(", "region", ",", "align_bam", ",", "out_base", ")", ":", "out_file", "=", "\"{0}.fastq\"", ".", "format", "(", "out_base", ")", "if", "not", "file_exists", "(", "out_file", ")", ":", "with", "pysam", ".", "Samfile", "(", "...
Retrieve fastq files in region as single end. Paired end is more complicated since pairs can map off the region, so focus on local only assembly since we've previously used paired information for mapping.
[ "Retrieve", "fastq", "files", "in", "region", "as", "single", "end", ".", "Paired", "end", "is", "more", "complicated", "since", "pairs", "can", "map", "off", "the", "region", "so", "focus", "on", "local", "only", "assembly", "since", "we", "ve", "previous...
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/cortex.py#L298-L317
237,698
bcbio/bcbio-nextgen
bcbio/variation/cortex.py
_count_fastq_reads
def _count_fastq_reads(in_fastq, min_reads): """Count the number of fastq reads in a file, stopping after reaching min_reads. """ with open(in_fastq) as in_handle: items = list(itertools.takewhile(lambda i : i <= min_reads, (i for i, _ in enumerate(FastqGeneralIterator(in_handle))))) return len(items)
python
def _count_fastq_reads(in_fastq, min_reads): with open(in_fastq) as in_handle: items = list(itertools.takewhile(lambda i : i <= min_reads, (i for i, _ in enumerate(FastqGeneralIterator(in_handle))))) return len(items)
[ "def", "_count_fastq_reads", "(", "in_fastq", ",", "min_reads", ")", ":", "with", "open", "(", "in_fastq", ")", "as", "in_handle", ":", "items", "=", "list", "(", "itertools", ".", "takewhile", "(", "lambda", "i", ":", "i", "<=", "min_reads", ",", "(", ...
Count the number of fastq reads in a file, stopping after reaching min_reads.
[ "Count", "the", "number", "of", "fastq", "reads", "in", "a", "file", "stopping", "after", "reaching", "min_reads", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/cortex.py#L321-L327
237,699
bcbio/bcbio-nextgen
bcbio/distributed/transaction.py
_move_file_with_sizecheck
def _move_file_with_sizecheck(tx_file, final_file): """Move transaction file to final location, with size checks avoiding failed transfers. Creates an empty file with '.bcbiotmp' extention in the destination location, which serves as a flag. If a file like that is present, it means that transaction didn't finish successfully. """ #logger.debug("Moving %s to %s" % (tx_file, final_file)) tmp_file = final_file + ".bcbiotmp" open(tmp_file, 'wb').close() want_size = utils.get_size(tx_file) shutil.move(tx_file, final_file) transfer_size = utils.get_size(final_file) assert want_size == transfer_size, ( 'distributed.transaction.file_transaction: File copy error: ' 'file or directory on temporary storage ({}) size {} bytes ' 'does not equal size of file or directory after transfer to ' 'shared storage ({}) size {} bytes'.format( tx_file, want_size, final_file, transfer_size) ) utils.remove_safe(tmp_file)
python
def _move_file_with_sizecheck(tx_file, final_file): #logger.debug("Moving %s to %s" % (tx_file, final_file)) tmp_file = final_file + ".bcbiotmp" open(tmp_file, 'wb').close() want_size = utils.get_size(tx_file) shutil.move(tx_file, final_file) transfer_size = utils.get_size(final_file) assert want_size == transfer_size, ( 'distributed.transaction.file_transaction: File copy error: ' 'file or directory on temporary storage ({}) size {} bytes ' 'does not equal size of file or directory after transfer to ' 'shared storage ({}) size {} bytes'.format( tx_file, want_size, final_file, transfer_size) ) utils.remove_safe(tmp_file)
[ "def", "_move_file_with_sizecheck", "(", "tx_file", ",", "final_file", ")", ":", "#logger.debug(\"Moving %s to %s\" % (tx_file, final_file))", "tmp_file", "=", "final_file", "+", "\".bcbiotmp\"", "open", "(", "tmp_file", ",", "'wb'", ")", ".", "close", "(", ")", "want...
Move transaction file to final location, with size checks avoiding failed transfers. Creates an empty file with '.bcbiotmp' extention in the destination location, which serves as a flag. If a file like that is present, it means that transaction didn't finish successfully.
[ "Move", "transaction", "file", "to", "final", "location", "with", "size", "checks", "avoiding", "failed", "transfers", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/transaction.py#L102-L127