repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
_postprocess_somatic
def _postprocess_somatic(in_file, paired): """Post-process somatic calls to provide standard output. - Converts SGT and NT into standard VCF GT fields - Replace generic TUMOR NORMAL names in VCF with sample names. """ out_file = in_file.replace(".vcf.gz", "-fixed.vcf") if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"): with file_transaction(paired.tumor_data, out_file) as tx_out_file: with utils.open_gzipsafe(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: added_gt = False normal_index, tumor_index = (None, None) for line in in_handle: if line.startswith("##FORMAT") and not added_gt: added_gt = True out_handle.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n') out_handle.write(line) elif line.startswith("#CHROM"): assert added_gt parts = line.strip().split("\t") normal_index = parts.index("NORMAL") tumor_index = parts.index("TUMOR") line = line.replace("NORMAL", paired.normal_name).replace("TUMOR", paired.tumor_name) out_handle.write(line) elif line.startswith("#"): out_handle.write(line) else: parts = line.rstrip().split("\t") tumor_gt, normal_gt = _tumor_normal_genotypes(parts[3], parts[4].split(","), parts[7].split(";"), in_file, parts[:2]) parts[8] = "GT:%s" % parts[8] parts[normal_index] = "%s:%s" % (normal_gt, parts[normal_index]) parts[tumor_index] = "%s:%s" % (tumor_gt, parts[tumor_index]) out_handle.write("\t".join(parts) + "\n") return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"])
python
def _postprocess_somatic(in_file, paired): """Post-process somatic calls to provide standard output. - Converts SGT and NT into standard VCF GT fields - Replace generic TUMOR NORMAL names in VCF with sample names. """ out_file = in_file.replace(".vcf.gz", "-fixed.vcf") if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"): with file_transaction(paired.tumor_data, out_file) as tx_out_file: with utils.open_gzipsafe(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: added_gt = False normal_index, tumor_index = (None, None) for line in in_handle: if line.startswith("##FORMAT") and not added_gt: added_gt = True out_handle.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n') out_handle.write(line) elif line.startswith("#CHROM"): assert added_gt parts = line.strip().split("\t") normal_index = parts.index("NORMAL") tumor_index = parts.index("TUMOR") line = line.replace("NORMAL", paired.normal_name).replace("TUMOR", paired.tumor_name) out_handle.write(line) elif line.startswith("#"): out_handle.write(line) else: parts = line.rstrip().split("\t") tumor_gt, normal_gt = _tumor_normal_genotypes(parts[3], parts[4].split(","), parts[7].split(";"), in_file, parts[:2]) parts[8] = "GT:%s" % parts[8] parts[normal_index] = "%s:%s" % (normal_gt, parts[normal_index]) parts[tumor_index] = "%s:%s" % (tumor_gt, parts[tumor_index]) out_handle.write("\t".join(parts) + "\n") return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"])
[ "def", "_postprocess_somatic", "(", "in_file", ",", "paired", ")", ":", "out_file", "=", "in_file", ".", "replace", "(", "\".vcf.gz\"", ",", "\"-fixed.vcf\"", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", "and", "not", "utils", ".", ...
Post-process somatic calls to provide standard output. - Converts SGT and NT into standard VCF GT fields - Replace generic TUMOR NORMAL names in VCF with sample names.
[ "Post", "-", "process", "somatic", "calls", "to", "provide", "standard", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L272-L307
train
217,900
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
_run_workflow
def _run_workflow(data, workflow_file, work_dir): """Run Strelka2 analysis inside prepared workflow directory. """ utils.remove_safe(os.path.join(work_dir, "workspace")) cmd = [utils.get_program_python("configureStrelkaGermlineWorkflow.py"), workflow_file, "-m", "local", "-j", dd.get_num_cores(data), "--quiet"] do.run(cmd, "Run Strelka2: %s" % dd.get_sample_name(data)) utils.remove_safe(os.path.join(work_dir, "workspace"))
python
def _run_workflow(data, workflow_file, work_dir): """Run Strelka2 analysis inside prepared workflow directory. """ utils.remove_safe(os.path.join(work_dir, "workspace")) cmd = [utils.get_program_python("configureStrelkaGermlineWorkflow.py"), workflow_file, "-m", "local", "-j", dd.get_num_cores(data), "--quiet"] do.run(cmd, "Run Strelka2: %s" % dd.get_sample_name(data)) utils.remove_safe(os.path.join(work_dir, "workspace"))
[ "def", "_run_workflow", "(", "data", ",", "workflow_file", ",", "work_dir", ")", ":", "utils", ".", "remove_safe", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"workspace\"", ")", ")", "cmd", "=", "[", "utils", ".", "get_program_python", ...
Run Strelka2 analysis inside prepared workflow directory.
[ "Run", "Strelka2", "analysis", "inside", "prepared", "workflow", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L320-L327
train
217,901
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
run_gvcfgenotyper
def run_gvcfgenotyper(data, orig_region, vrn_files, out_file): """Merge strelka2 and Illumina compatible gVCFs with gvcfgenotyper. https://github.com/Illumina/gvcfgenotyper Also need to explore GLnexus (https://github.com/dnanexus-rnd/GLnexus) """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: regions = _find_gvcf_blocks(vrn_files[0], bamprep.region_to_gatk(orig_region), os.path.dirname(tx_out_file)) if len(regions) == 1: _run_gvcfgenotyper(data, regions[0], vrn_files, tx_out_file) else: split_outs = [_run_gvcfgenotyper(data, r, vrn_files, "%s-%s.vcf.gz" % (utils.splitext_plus(out_file)[0], r.replace(":", "_").replace("-", "_"))) for r in regions] vcfutils.concat_variant_files(split_outs, tx_out_file, regions, dd.get_ref_file(data), data["config"]) return vcfutils.bgzip_and_index(out_file, data["config"])
python
def run_gvcfgenotyper(data, orig_region, vrn_files, out_file): """Merge strelka2 and Illumina compatible gVCFs with gvcfgenotyper. https://github.com/Illumina/gvcfgenotyper Also need to explore GLnexus (https://github.com/dnanexus-rnd/GLnexus) """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: regions = _find_gvcf_blocks(vrn_files[0], bamprep.region_to_gatk(orig_region), os.path.dirname(tx_out_file)) if len(regions) == 1: _run_gvcfgenotyper(data, regions[0], vrn_files, tx_out_file) else: split_outs = [_run_gvcfgenotyper(data, r, vrn_files, "%s-%s.vcf.gz" % (utils.splitext_plus(out_file)[0], r.replace(":", "_").replace("-", "_"))) for r in regions] vcfutils.concat_variant_files(split_outs, tx_out_file, regions, dd.get_ref_file(data), data["config"]) return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "run_gvcfgenotyper", "(", "data", ",", "orig_region", ",", "vrn_files", ",", "out_file", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file",...
Merge strelka2 and Illumina compatible gVCFs with gvcfgenotyper. https://github.com/Illumina/gvcfgenotyper Also need to explore GLnexus (https://github.com/dnanexus-rnd/GLnexus)
[ "Merge", "strelka2", "and", "Illumina", "compatible", "gVCFs", "with", "gvcfgenotyper", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L331-L351
train
217,902
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
_run_gvcfgenotyper
def _run_gvcfgenotyper(data, region, vrn_files, out_file): """Run gvcfgenotyper on a single gVCF region in input file. """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: input_file = "%s-inputs.txt" % utils.splitext_plus(tx_out_file)[0] with open(input_file, "w") as out_handle: out_handle.write("%s\n" % "\n".join(vrn_files)) cmd = ["gvcfgenotyper", "-f", dd.get_ref_file(data), "-l", input_file, "-r", region, "-O", "z", "-o", tx_out_file] do.run(cmd, "gvcfgenotyper: %s %s" % (dd.get_sample_name(data), region)) return out_file
python
def _run_gvcfgenotyper(data, region, vrn_files, out_file): """Run gvcfgenotyper on a single gVCF region in input file. """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: input_file = "%s-inputs.txt" % utils.splitext_plus(tx_out_file)[0] with open(input_file, "w") as out_handle: out_handle.write("%s\n" % "\n".join(vrn_files)) cmd = ["gvcfgenotyper", "-f", dd.get_ref_file(data), "-l", input_file, "-r", region, "-O", "z", "-o", tx_out_file] do.run(cmd, "gvcfgenotyper: %s %s" % (dd.get_sample_name(data), region)) return out_file
[ "def", "_run_gvcfgenotyper", "(", "data", ",", "region", ",", "vrn_files", ",", "out_file", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", "...
Run gvcfgenotyper on a single gVCF region in input file.
[ "Run", "gvcfgenotyper", "on", "a", "single", "gVCF", "region", "in", "input", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L353-L364
train
217,903
bcbio/bcbio-nextgen
bcbio/variation/strelka2.py
_find_gvcf_blocks
def _find_gvcf_blocks(vcf_file, region, tmp_dir): """Retrieve gVCF blocks within our current evaluation region. gvcfgenotyper does not support calling larger regions with individual coverage blocks, so we split our big region into potentially multiple. """ region_file = os.path.join(tmp_dir, "cur_region.bed") with open(region_file, "w") as out_handle: chrom, coords = region.split(":") start, end = coords.split("-") out_handle.write("\t".join([chrom, start, end]) + "\n") final_file = os.path.join(tmp_dir, "split_regions.bed") cmd = "gvcf_regions.py {vcf_file} | bedtools intersect -a - -b {region_file} > {final_file}" do.run(cmd.format(**locals())) regions = [] with open(final_file) as in_handle: for line in in_handle: chrom, start, end = line.strip().split("\t") regions.append("%s:%s-%s" % (chrom, start, end)) return regions
python
def _find_gvcf_blocks(vcf_file, region, tmp_dir): """Retrieve gVCF blocks within our current evaluation region. gvcfgenotyper does not support calling larger regions with individual coverage blocks, so we split our big region into potentially multiple. """ region_file = os.path.join(tmp_dir, "cur_region.bed") with open(region_file, "w") as out_handle: chrom, coords = region.split(":") start, end = coords.split("-") out_handle.write("\t".join([chrom, start, end]) + "\n") final_file = os.path.join(tmp_dir, "split_regions.bed") cmd = "gvcf_regions.py {vcf_file} | bedtools intersect -a - -b {region_file} > {final_file}" do.run(cmd.format(**locals())) regions = [] with open(final_file) as in_handle: for line in in_handle: chrom, start, end = line.strip().split("\t") regions.append("%s:%s-%s" % (chrom, start, end)) return regions
[ "def", "_find_gvcf_blocks", "(", "vcf_file", ",", "region", ",", "tmp_dir", ")", ":", "region_file", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "\"cur_region.bed\"", ")", "with", "open", "(", "region_file", ",", "\"w\"", ")", "as", "out_hand...
Retrieve gVCF blocks within our current evaluation region. gvcfgenotyper does not support calling larger regions with individual coverage blocks, so we split our big region into potentially multiple.
[ "Retrieve", "gVCF", "blocks", "within", "our", "current", "evaluation", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L366-L385
train
217,904
bcbio/bcbio-nextgen
bcbio/hla/__init__.py
run
def run(samples, run_parallel): """Run HLA detection on the input samples. """ to_process = [] extras = [] for data in (xs[0] for xs in samples): hlacaller = tz.get_in(["config", "algorithm", "hlacaller"], data) if hlacaller: to_process.append(data) else: extras.append([data]) processed = run_parallel("call_hla", ([x] for x in to_process)) return extras + processed
python
def run(samples, run_parallel): """Run HLA detection on the input samples. """ to_process = [] extras = [] for data in (xs[0] for xs in samples): hlacaller = tz.get_in(["config", "algorithm", "hlacaller"], data) if hlacaller: to_process.append(data) else: extras.append([data]) processed = run_parallel("call_hla", ([x] for x in to_process)) return extras + processed
[ "def", "run", "(", "samples", ",", "run_parallel", ")", ":", "to_process", "=", "[", "]", "extras", "=", "[", "]", "for", "data", "in", "(", "xs", "[", "0", "]", "for", "xs", "in", "samples", ")", ":", "hlacaller", "=", "tz", ".", "get_in", "(", ...
Run HLA detection on the input samples.
[ "Run", "HLA", "detection", "on", "the", "input", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/__init__.py#L20-L32
train
217,905
bcbio/bcbio-nextgen
bcbio/ngsalign/bwa.py
align_bam
def align_bam(in_bam, ref_file, names, align_dir, data): """Perform direct alignment of an input BAM file with BWA using pipes. This avoids disk IO by piping between processes: - samtools sort of input BAM to queryname - bedtools conversion to interleaved FASTQ - bwa-mem alignment - samtools conversion to BAM - samtools sort to coordinate """ config = data["config"] out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"])) samtools = config_utils.get_program("samtools", config) bedtools = config_utils.get_program("bedtools", config) resources = config_utils.get_resources("samtools", config) num_cores = config["algorithm"].get("num_cores", 1) # adjust memory for samtools since used for input and output max_mem = config_utils.adjust_memory(resources.get("memory", "1G"), 3, "decrease").upper() if not utils.file_exists(out_file): with tx_tmpdir(data) as work_dir: with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file): bwa_cmd = _get_bwa_mem_cmd(data, out_file, ref_file, "-") tx_out_prefix = os.path.splitext(tx_out_file)[0] prefix1 = "%s-in1" % tx_out_prefix cmd = ("unset JAVA_HOME && " "{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} " "| {bedtools} bamtofastq -i /dev/stdin -fq /dev/stdout -fq2 /dev/stdout " "| {bwa_cmd} | ") cmd = cmd.format(**locals()) + tobam_cl do.run(cmd, "bwa mem alignment from BAM: %s" % names["sample"], None, [do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)]) return out_file
python
def align_bam(in_bam, ref_file, names, align_dir, data): """Perform direct alignment of an input BAM file with BWA using pipes. This avoids disk IO by piping between processes: - samtools sort of input BAM to queryname - bedtools conversion to interleaved FASTQ - bwa-mem alignment - samtools conversion to BAM - samtools sort to coordinate """ config = data["config"] out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"])) samtools = config_utils.get_program("samtools", config) bedtools = config_utils.get_program("bedtools", config) resources = config_utils.get_resources("samtools", config) num_cores = config["algorithm"].get("num_cores", 1) # adjust memory for samtools since used for input and output max_mem = config_utils.adjust_memory(resources.get("memory", "1G"), 3, "decrease").upper() if not utils.file_exists(out_file): with tx_tmpdir(data) as work_dir: with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file): bwa_cmd = _get_bwa_mem_cmd(data, out_file, ref_file, "-") tx_out_prefix = os.path.splitext(tx_out_file)[0] prefix1 = "%s-in1" % tx_out_prefix cmd = ("unset JAVA_HOME && " "{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} " "| {bedtools} bamtofastq -i /dev/stdin -fq /dev/stdout -fq2 /dev/stdout " "| {bwa_cmd} | ") cmd = cmd.format(**locals()) + tobam_cl do.run(cmd, "bwa mem alignment from BAM: %s" % names["sample"], None, [do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)]) return out_file
[ "def", "align_bam", "(", "in_bam", ",", "ref_file", ",", "names", ",", "align_dir", ",", "data", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "out_file", "=", "os", ".", "path", ".", "join", "(", "align_dir", ",", "\"{0}-sort.bam\"", ".", "...
Perform direct alignment of an input BAM file with BWA using pipes. This avoids disk IO by piping between processes: - samtools sort of input BAM to queryname - bedtools conversion to interleaved FASTQ - bwa-mem alignment - samtools conversion to BAM - samtools sort to coordinate
[ "Perform", "direct", "alignment", "of", "an", "input", "BAM", "file", "with", "BWA", "using", "pipes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bwa.py#L21-L53
train
217,906
bcbio/bcbio-nextgen
bcbio/ngsalign/bwa.py
_get_bwa_mem_cmd
def _get_bwa_mem_cmd(data, out_file, ref_file, fastq1, fastq2=""): """Perform piped bwa mem mapping potentially with alternative alleles in GRCh38 + HLA typing. Commands for HLA post-processing: base=TEST run-HLA $base.hla > $base.hla.top cat $base.hla.HLA*.gt | grep ^GT | cut -f2- > $base.hla.all rm -f $base.hla.HLA*gt rm -f $base.hla.HLA*gz """ alt_file = ref_file + ".alt" if utils.file_exists(alt_file) and dd.get_hlacaller(data): bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem"))) hla_base = os.path.join(utils.safe_makedir(os.path.join(os.path.dirname(out_file), "hla")), os.path.basename(out_file) + ".hla") alt_cmd = (" | {bwakit_dir}/k8 {bwakit_dir}/bwa-postalt.js -p {hla_base} {alt_file}") else: alt_cmd = "" if dd.get_aligner(data) == "sentieon-bwa": bwa_exe = "sentieon-bwa" exports = sentieon.license_export(data) else: bwa_exe = "bwa" exports = "" bwa = config_utils.get_program(bwa_exe, data["config"]) num_cores = data["config"]["algorithm"].get("num_cores", 1) bwa_resources = config_utils.get_resources("bwa", data["config"]) bwa_params = (" ".join([str(x) for x in bwa_resources.get("options", [])]) if "options" in bwa_resources else "") rg_info = novoalign.get_rg_info(data["rgnames"]) # For UMI runs, pass along consensus tags c_tags = "-C" if "umi_bam" in data else "" pairing = "-p" if not fastq2 else "" # Restrict seed occurances to 1/2 of default, manage memory usage for centromere repeats in hg38 # https://sourceforge.net/p/bio-bwa/mailman/message/31514937/ # http://ehc.ac/p/bio-bwa/mailman/message/32268544/ mem_usage = "-c 250" bwa_cmd = ("{exports}{bwa} mem {pairing} {c_tags} {mem_usage} -M -t {num_cores} {bwa_params} -R '{rg_info}' " "-v 1 {ref_file} {fastq1} {fastq2} ") return (bwa_cmd + alt_cmd).format(**locals())
python
def _get_bwa_mem_cmd(data, out_file, ref_file, fastq1, fastq2=""): """Perform piped bwa mem mapping potentially with alternative alleles in GRCh38 + HLA typing. Commands for HLA post-processing: base=TEST run-HLA $base.hla > $base.hla.top cat $base.hla.HLA*.gt | grep ^GT | cut -f2- > $base.hla.all rm -f $base.hla.HLA*gt rm -f $base.hla.HLA*gz """ alt_file = ref_file + ".alt" if utils.file_exists(alt_file) and dd.get_hlacaller(data): bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem"))) hla_base = os.path.join(utils.safe_makedir(os.path.join(os.path.dirname(out_file), "hla")), os.path.basename(out_file) + ".hla") alt_cmd = (" | {bwakit_dir}/k8 {bwakit_dir}/bwa-postalt.js -p {hla_base} {alt_file}") else: alt_cmd = "" if dd.get_aligner(data) == "sentieon-bwa": bwa_exe = "sentieon-bwa" exports = sentieon.license_export(data) else: bwa_exe = "bwa" exports = "" bwa = config_utils.get_program(bwa_exe, data["config"]) num_cores = data["config"]["algorithm"].get("num_cores", 1) bwa_resources = config_utils.get_resources("bwa", data["config"]) bwa_params = (" ".join([str(x) for x in bwa_resources.get("options", [])]) if "options" in bwa_resources else "") rg_info = novoalign.get_rg_info(data["rgnames"]) # For UMI runs, pass along consensus tags c_tags = "-C" if "umi_bam" in data else "" pairing = "-p" if not fastq2 else "" # Restrict seed occurances to 1/2 of default, manage memory usage for centromere repeats in hg38 # https://sourceforge.net/p/bio-bwa/mailman/message/31514937/ # http://ehc.ac/p/bio-bwa/mailman/message/32268544/ mem_usage = "-c 250" bwa_cmd = ("{exports}{bwa} mem {pairing} {c_tags} {mem_usage} -M -t {num_cores} {bwa_params} -R '{rg_info}' " "-v 1 {ref_file} {fastq1} {fastq2} ") return (bwa_cmd + alt_cmd).format(**locals())
[ "def", "_get_bwa_mem_cmd", "(", "data", ",", "out_file", ",", "ref_file", ",", "fastq1", ",", "fastq2", "=", "\"\"", ")", ":", "alt_file", "=", "ref_file", "+", "\".alt\"", "if", "utils", ".", "file_exists", "(", "alt_file", ")", "and", "dd", ".", "get_h...
Perform piped bwa mem mapping potentially with alternative alleles in GRCh38 + HLA typing. Commands for HLA post-processing: base=TEST run-HLA $base.hla > $base.hla.top cat $base.hla.HLA*.gt | grep ^GT | cut -f2- > $base.hla.all rm -f $base.hla.HLA*gt rm -f $base.hla.HLA*gz
[ "Perform", "piped", "bwa", "mem", "mapping", "potentially", "with", "alternative", "alleles", "in", "GRCh38", "+", "HLA", "typing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bwa.py#L55-L94
train
217,907
bcbio/bcbio-nextgen
bcbio/ngsalign/bwa.py
_align_mem
def _align_mem(fastq_file, pair_file, ref_file, out_file, names, rg_info, data): """Perform bwa-mem alignment on supported read lengths. """ with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file): cmd = ("unset JAVA_HOME && " "%s | %s" % (_get_bwa_mem_cmd(data, out_file, ref_file, fastq_file, pair_file), tobam_cl)) do.run(cmd, "bwa mem alignment from fastq: %s" % names["sample"], None, [do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, fastq_file)]) return out_file
python
def _align_mem(fastq_file, pair_file, ref_file, out_file, names, rg_info, data): """Perform bwa-mem alignment on supported read lengths. """ with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file): cmd = ("unset JAVA_HOME && " "%s | %s" % (_get_bwa_mem_cmd(data, out_file, ref_file, fastq_file, pair_file), tobam_cl)) do.run(cmd, "bwa mem alignment from fastq: %s" % names["sample"], None, [do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, fastq_file)]) return out_file
[ "def", "_align_mem", "(", "fastq_file", ",", "pair_file", ",", "ref_file", ",", "out_file", ",", "names", ",", "rg_info", ",", "data", ")", ":", "with", "postalign", ".", "tobam_cl", "(", "data", ",", "out_file", ",", "pair_file", "!=", "\"\"", ")", "as"...
Perform bwa-mem alignment on supported read lengths.
[ "Perform", "bwa", "-", "mem", "alignment", "on", "supported", "read", "lengths", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bwa.py#L174-L182
train
217,908
bcbio/bcbio-nextgen
bcbio/ngsalign/bwa.py
_align_backtrack
def _align_backtrack(fastq_file, pair_file, ref_file, out_file, names, rg_info, data): """Perform a BWA alignment using 'aln' backtrack algorithm. """ bwa = config_utils.get_program("bwa", data["config"]) config = data["config"] sai1_file = "%s_1.sai" % os.path.splitext(out_file)[0] sai2_file = "%s_2.sai" % os.path.splitext(out_file)[0] if pair_file else "" if not utils.file_exists(sai1_file): with file_transaction(data, sai1_file) as tx_sai1_file: _run_bwa_align(fastq_file, ref_file, tx_sai1_file, config) if sai2_file and not utils.file_exists(sai2_file): with file_transaction(data, sai2_file) as tx_sai2_file: _run_bwa_align(pair_file, ref_file, tx_sai2_file, config) with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file): align_type = "sampe" if sai2_file else "samse" cmd = ("unset JAVA_HOME && {bwa} {align_type} -r '{rg_info}' {ref_file} {sai1_file} {sai2_file} " "{fastq_file} {pair_file} | ") cmd = cmd.format(**locals()) + tobam_cl do.run(cmd, "bwa %s" % align_type, data) return out_file
python
def _align_backtrack(fastq_file, pair_file, ref_file, out_file, names, rg_info, data): """Perform a BWA alignment using 'aln' backtrack algorithm. """ bwa = config_utils.get_program("bwa", data["config"]) config = data["config"] sai1_file = "%s_1.sai" % os.path.splitext(out_file)[0] sai2_file = "%s_2.sai" % os.path.splitext(out_file)[0] if pair_file else "" if not utils.file_exists(sai1_file): with file_transaction(data, sai1_file) as tx_sai1_file: _run_bwa_align(fastq_file, ref_file, tx_sai1_file, config) if sai2_file and not utils.file_exists(sai2_file): with file_transaction(data, sai2_file) as tx_sai2_file: _run_bwa_align(pair_file, ref_file, tx_sai2_file, config) with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file): align_type = "sampe" if sai2_file else "samse" cmd = ("unset JAVA_HOME && {bwa} {align_type} -r '{rg_info}' {ref_file} {sai1_file} {sai2_file} " "{fastq_file} {pair_file} | ") cmd = cmd.format(**locals()) + tobam_cl do.run(cmd, "bwa %s" % align_type, data) return out_file
[ "def", "_align_backtrack", "(", "fastq_file", ",", "pair_file", ",", "ref_file", ",", "out_file", ",", "names", ",", "rg_info", ",", "data", ")", ":", "bwa", "=", "config_utils", ".", "get_program", "(", "\"bwa\"", ",", "data", "[", "\"config\"", "]", ")",...
Perform a BWA alignment using 'aln' backtrack algorithm.
[ "Perform", "a", "BWA", "alignment", "using", "aln", "backtrack", "algorithm", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bwa.py#L184-L203
train
217,909
bcbio/bcbio-nextgen
bcbio/pipeline/main.py
run_main
def run_main(workdir, config_file=None, fc_dir=None, run_info_yaml=None, parallel=None, workflow=None): """Run variant analysis, handling command line options. """ # Set environment to standard to use periods for decimals and avoid localization os.environ["LC_ALL"] = "C" os.environ["LC"] = "C" os.environ["LANG"] = "C" workdir = utils.safe_makedir(os.path.abspath(workdir)) os.chdir(workdir) config, config_file = config_utils.load_system_config(config_file, workdir) if config.get("log_dir", None) is None: config["log_dir"] = os.path.join(workdir, DEFAULT_LOG_DIR) if parallel["type"] in ["local", "clusterk"]: _setup_resources() _run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml) elif parallel["type"] == "ipython": assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)" if parallel["scheduler"] != "sge": assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)" elif not parallel["queue"]: parallel["queue"] = "" _run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml) else: raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
python
def run_main(workdir, config_file=None, fc_dir=None, run_info_yaml=None, parallel=None, workflow=None): """Run variant analysis, handling command line options. """ # Set environment to standard to use periods for decimals and avoid localization os.environ["LC_ALL"] = "C" os.environ["LC"] = "C" os.environ["LANG"] = "C" workdir = utils.safe_makedir(os.path.abspath(workdir)) os.chdir(workdir) config, config_file = config_utils.load_system_config(config_file, workdir) if config.get("log_dir", None) is None: config["log_dir"] = os.path.join(workdir, DEFAULT_LOG_DIR) if parallel["type"] in ["local", "clusterk"]: _setup_resources() _run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml) elif parallel["type"] == "ipython": assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)" if parallel["scheduler"] != "sge": assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)" elif not parallel["queue"]: parallel["queue"] = "" _run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml) else: raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
[ "def", "run_main", "(", "workdir", ",", "config_file", "=", "None", ",", "fc_dir", "=", "None", ",", "run_info_yaml", "=", "None", ",", "parallel", "=", "None", ",", "workflow", "=", "None", ")", ":", "# Set environment to standard to use periods for decimals and ...
Run variant analysis, handling command line options.
[ "Run", "variant", "analysis", "handling", "command", "line", "options", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/main.py#L29-L55
train
217,910
bcbio/bcbio-nextgen
bcbio/pipeline/main.py
_setup_resources
def _setup_resources(): """Attempt to increase resource limits up to hard limits. This allows us to avoid out of file handle limits where we can move beyond the soft limit up to the hard limit. """ target_procs = 10240 cur_proc, max_proc = resource.getrlimit(resource.RLIMIT_NPROC) target_proc = min(max_proc, target_procs) if max_proc > 0 else target_procs resource.setrlimit(resource.RLIMIT_NPROC, (max(cur_proc, target_proc), max_proc)) cur_hdls, max_hdls = resource.getrlimit(resource.RLIMIT_NOFILE) target_hdls = min(max_hdls, target_procs) if max_hdls > 0 else target_procs resource.setrlimit(resource.RLIMIT_NOFILE, (max(cur_hdls, target_hdls), max_hdls))
python
def _setup_resources(): """Attempt to increase resource limits up to hard limits. This allows us to avoid out of file handle limits where we can move beyond the soft limit up to the hard limit. """ target_procs = 10240 cur_proc, max_proc = resource.getrlimit(resource.RLIMIT_NPROC) target_proc = min(max_proc, target_procs) if max_proc > 0 else target_procs resource.setrlimit(resource.RLIMIT_NPROC, (max(cur_proc, target_proc), max_proc)) cur_hdls, max_hdls = resource.getrlimit(resource.RLIMIT_NOFILE) target_hdls = min(max_hdls, target_procs) if max_hdls > 0 else target_procs resource.setrlimit(resource.RLIMIT_NOFILE, (max(cur_hdls, target_hdls), max_hdls))
[ "def", "_setup_resources", "(", ")", ":", "target_procs", "=", "10240", "cur_proc", ",", "max_proc", "=", "resource", ".", "getrlimit", "(", "resource", ".", "RLIMIT_NPROC", ")", "target_proc", "=", "min", "(", "max_proc", ",", "target_procs", ")", "if", "ma...
Attempt to increase resource limits up to hard limits. This allows us to avoid out of file handle limits where we can move beyond the soft limit up to the hard limit.
[ "Attempt", "to", "increase", "resource", "limits", "up", "to", "hard", "limits", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/main.py#L57-L69
train
217,911
bcbio/bcbio-nextgen
bcbio/pipeline/main.py
_wres
def _wres(parallel, progs, fresources=None, ensure_mem=None): """Add resource information to the parallel environment on required programs and files. Enables spinning up required machines and operating in non-shared filesystem environments. progs -- Third party tools used in processing fresources -- Required file-based resources needed. These will be transferred on non-shared filesystems. ensure_mem -- Dictionary of required minimum memory for programs used. Ensures enough memory gets allocated on low-core machines. """ parallel = copy.deepcopy(parallel) parallel["progs"] = progs if fresources: parallel["fresources"] = fresources if ensure_mem: parallel["ensure_mem"] = ensure_mem return parallel
python
def _wres(parallel, progs, fresources=None, ensure_mem=None): """Add resource information to the parallel environment on required programs and files. Enables spinning up required machines and operating in non-shared filesystem environments. progs -- Third party tools used in processing fresources -- Required file-based resources needed. These will be transferred on non-shared filesystems. ensure_mem -- Dictionary of required minimum memory for programs used. Ensures enough memory gets allocated on low-core machines. """ parallel = copy.deepcopy(parallel) parallel["progs"] = progs if fresources: parallel["fresources"] = fresources if ensure_mem: parallel["ensure_mem"] = ensure_mem return parallel
[ "def", "_wres", "(", "parallel", ",", "progs", ",", "fresources", "=", "None", ",", "ensure_mem", "=", "None", ")", ":", "parallel", "=", "copy", ".", "deepcopy", "(", "parallel", ")", "parallel", "[", "\"progs\"", "]", "=", "progs", "if", "fresources", ...
Add resource information to the parallel environment on required programs and files. Enables spinning up required machines and operating in non-shared filesystem environments. progs -- Third party tools used in processing fresources -- Required file-based resources needed. These will be transferred on non-shared filesystems. ensure_mem -- Dictionary of required minimum memory for programs used. Ensures enough memory gets allocated on low-core machines.
[ "Add", "resource", "information", "to", "the", "parallel", "environment", "on", "required", "programs", "and", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/main.py#L94-L112
train
217,912
bcbio/bcbio-nextgen
bcbio/pipeline/main.py
rnaseq_prep_samples
def rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples): """ organizes RNA-seq and small-RNAseq samples, converting from BAM if necessary and trimming if necessary """ pipeline = dd.get_in_samples(samples, dd.get_analysis) trim_reads_set = any([tz.get_in(["algorithm", "trim_reads"], d) for d in dd.sample_data_iterator(samples)]) resources = ["picard"] needs_trimming = (_is_smallrnaseq(pipeline) or trim_reads_set) if needs_trimming: resources.append("atropos") with prun.start(_wres(parallel, resources), samples, config, dirs, "trimming", max_multicore=1 if not needs_trimming else None) as run_parallel: with profile.report("organize samples", dirs): samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml, [x[0]["description"] for x in samples]]]) samples = run_parallel("prepare_sample", samples) if needs_trimming: with profile.report("adapter trimming", dirs): if _is_smallrnaseq(pipeline): samples = run_parallel("trim_srna_sample", samples) else: samples = run_parallel("trim_sample", samples) return samples
python
def rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples): """ organizes RNA-seq and small-RNAseq samples, converting from BAM if necessary and trimming if necessary """ pipeline = dd.get_in_samples(samples, dd.get_analysis) trim_reads_set = any([tz.get_in(["algorithm", "trim_reads"], d) for d in dd.sample_data_iterator(samples)]) resources = ["picard"] needs_trimming = (_is_smallrnaseq(pipeline) or trim_reads_set) if needs_trimming: resources.append("atropos") with prun.start(_wres(parallel, resources), samples, config, dirs, "trimming", max_multicore=1 if not needs_trimming else None) as run_parallel: with profile.report("organize samples", dirs): samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml, [x[0]["description"] for x in samples]]]) samples = run_parallel("prepare_sample", samples) if needs_trimming: with profile.report("adapter trimming", dirs): if _is_smallrnaseq(pipeline): samples = run_parallel("trim_srna_sample", samples) else: samples = run_parallel("trim_sample", samples) return samples
[ "def", "rnaseq_prep_samples", "(", "config", ",", "run_info_yaml", ",", "parallel", ",", "dirs", ",", "samples", ")", ":", "pipeline", "=", "dd", ".", "get_in_samples", "(", "samples", ",", "dd", ".", "get_analysis", ")", "trim_reads_set", "=", "any", "(", ...
organizes RNA-seq and small-RNAseq samples, converting from BAM if necessary and trimming if necessary
[ "organizes", "RNA", "-", "seq", "and", "small", "-", "RNAseq", "samples", "converting", "from", "BAM", "if", "necessary", "and", "trimming", "if", "necessary" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/main.py#L394-L418
train
217,913
bcbio/bcbio-nextgen
bcbio/pipeline/main.py
_pair_samples_with_pipelines
def _pair_samples_with_pipelines(run_info_yaml, config): """Map samples defined in input file to pipelines to run. """ samples = config_utils.load_config(run_info_yaml) if isinstance(samples, dict): resources = samples.pop("resources") samples = samples["details"] else: resources = {} ready_samples = [] for sample in samples: if "files" in sample: del sample["files"] # add any resources to this item to recalculate global configuration usample = copy.deepcopy(sample) usample.pop("algorithm", None) if "resources" not in usample: usample["resources"] = {} for prog, pkvs in resources.items(): if prog not in usample["resources"]: usample["resources"][prog] = {} if pkvs is not None: for key, val in pkvs.items(): usample["resources"][prog][key] = val config = config_utils.update_w_custom(config, usample) sample["resources"] = {} ready_samples.append(sample) paired = [(x, _get_pipeline(x)) for x in ready_samples] d = defaultdict(list) for x in paired: d[x[1]].append([x[0]]) return d, config
python
def _pair_samples_with_pipelines(run_info_yaml, config): """Map samples defined in input file to pipelines to run. """ samples = config_utils.load_config(run_info_yaml) if isinstance(samples, dict): resources = samples.pop("resources") samples = samples["details"] else: resources = {} ready_samples = [] for sample in samples: if "files" in sample: del sample["files"] # add any resources to this item to recalculate global configuration usample = copy.deepcopy(sample) usample.pop("algorithm", None) if "resources" not in usample: usample["resources"] = {} for prog, pkvs in resources.items(): if prog not in usample["resources"]: usample["resources"][prog] = {} if pkvs is not None: for key, val in pkvs.items(): usample["resources"][prog][key] = val config = config_utils.update_w_custom(config, usample) sample["resources"] = {} ready_samples.append(sample) paired = [(x, _get_pipeline(x)) for x in ready_samples] d = defaultdict(list) for x in paired: d[x[1]].append([x[0]]) return d, config
[ "def", "_pair_samples_with_pipelines", "(", "run_info_yaml", ",", "config", ")", ":", "samples", "=", "config_utils", ".", "load_config", "(", "run_info_yaml", ")", "if", "isinstance", "(", "samples", ",", "dict", ")", ":", "resources", "=", "samples", ".", "p...
Map samples defined in input file to pipelines to run.
[ "Map", "samples", "defined", "in", "input", "file", "to", "pipelines", "to", "run", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/main.py#L430-L461
train
217,914
bcbio/bcbio-nextgen
bcbio/hla/bwakit.py
run
def run(data): """HLA typing with bwakit, parsing output from called genotype files. """ bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem"))) hla_fqs = tz.get_in(["hla", "fastq"], data, []) if len(hla_fqs) > 0: hla_base = os.path.commonprefix(hla_fqs) while hla_base.endswith("."): hla_base = hla_base[:-1] out_file = hla_base + ".top" if not utils.file_exists(out_file): cmd = "{bwakit_dir}/run-HLA {hla_base}" do.run(cmd.format(**locals()), "HLA typing with bwakit") out_file = _organize_calls(out_file, hla_base, data) data["hla"].update({"call_file": out_file, "hlacaller": "bwakit"}) return data
python
def run(data): """HLA typing with bwakit, parsing output from called genotype files. """ bwakit_dir = os.path.dirname(os.path.realpath(utils.which("run-bwamem"))) hla_fqs = tz.get_in(["hla", "fastq"], data, []) if len(hla_fqs) > 0: hla_base = os.path.commonprefix(hla_fqs) while hla_base.endswith("."): hla_base = hla_base[:-1] out_file = hla_base + ".top" if not utils.file_exists(out_file): cmd = "{bwakit_dir}/run-HLA {hla_base}" do.run(cmd.format(**locals()), "HLA typing with bwakit") out_file = _organize_calls(out_file, hla_base, data) data["hla"].update({"call_file": out_file, "hlacaller": "bwakit"}) return data
[ "def", "run", "(", "data", ")", ":", "bwakit_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "utils", ".", "which", "(", "\"run-bwamem\"", ")", ")", ")", "hla_fqs", "=", "tz", ".", "get_in", "(", "[", "...
HLA typing with bwakit, parsing output from called genotype files.
[ "HLA", "typing", "with", "bwakit", "parsing", "output", "from", "called", "genotype", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/bwakit.py#L18-L34
train
217,915
bcbio/bcbio-nextgen
bcbio/hla/bwakit.py
_organize_calls
def _organize_calls(out_file, hla_base, data): """Prepare genotype calls, reporting best call along with quality metrics. """ hla_truth = get_hla_truthset(data) sample = dd.get_sample_name(data) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(["sample", "locus", "mismatches", "options", "alleles", "p-groups", "expected", "validates"]) for genotype_file in glob.glob("%s.HLA-*.gt" % (hla_base)): hla_locus = os.path.basename(genotype_file).replace( "%s.HLA-" % os.path.basename(hla_base), "").replace(".gt", "") with open(genotype_file) as in_handle: total_options = set([]) for i, line in enumerate(in_handle): _, aone, atwo, m = line.split("\t")[:4] pgroups = (hla_groups.hla_protein(aone, data), hla_groups.hla_protein(atwo, data)) if i == 0: call_alleles = [aone, atwo] call_pgroups = pgroups mismatches = m total_options.add(pgroups) if len(total_options) > 0: truth_alleles = tz.get_in([sample, hla_locus], hla_truth, []) writer.writerow([sample, hla_locus, mismatches, len(total_options), ";".join(call_alleles), ";".join(call_pgroups), ";".join(truth_alleles), matches_truth(call_alleles, truth_alleles, data)]) return out_file
python
def _organize_calls(out_file, hla_base, data): """Prepare genotype calls, reporting best call along with quality metrics. """ hla_truth = get_hla_truthset(data) sample = dd.get_sample_name(data) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(["sample", "locus", "mismatches", "options", "alleles", "p-groups", "expected", "validates"]) for genotype_file in glob.glob("%s.HLA-*.gt" % (hla_base)): hla_locus = os.path.basename(genotype_file).replace( "%s.HLA-" % os.path.basename(hla_base), "").replace(".gt", "") with open(genotype_file) as in_handle: total_options = set([]) for i, line in enumerate(in_handle): _, aone, atwo, m = line.split("\t")[:4] pgroups = (hla_groups.hla_protein(aone, data), hla_groups.hla_protein(atwo, data)) if i == 0: call_alleles = [aone, atwo] call_pgroups = pgroups mismatches = m total_options.add(pgroups) if len(total_options) > 0: truth_alleles = tz.get_in([sample, hla_locus], hla_truth, []) writer.writerow([sample, hla_locus, mismatches, len(total_options), ";".join(call_alleles), ";".join(call_pgroups), ";".join(truth_alleles), matches_truth(call_alleles, truth_alleles, data)]) return out_file
[ "def", "_organize_calls", "(", "out_file", ",", "hla_base", ",", "data", ")", ":", "hla_truth", "=", "get_hla_truthset", "(", "data", ")", "sample", "=", "dd", ".", "get_sample_name", "(", "data", ")", "with", "file_transaction", "(", "data", ",", "out_file"...
Prepare genotype calls, reporting best call along with quality metrics.
[ "Prepare", "genotype", "calls", "reporting", "best", "call", "along", "with", "quality", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/bwakit.py#L36-L64
train
217,916
bcbio/bcbio-nextgen
bcbio/hla/bwakit.py
matches_truth
def matches_truth(call_alleles, truth_alleles, data): """Flexibly check if truth and call alleles match, using p-groups. """ if not truth_alleles: return "" else: def _remove_p(x): return x[:-1] if x.endswith("P") else x t_cmp = set([_remove_p(hla_groups.hla_protein(x, data)) for x in truth_alleles]) c_cmp = set([_remove_p(hla_groups.hla_protein(x, data)) for x in call_alleles]) return "yes" if len(t_cmp.intersection(c_cmp)) == len(t_cmp) else "no"
python
def matches_truth(call_alleles, truth_alleles, data): """Flexibly check if truth and call alleles match, using p-groups. """ if not truth_alleles: return "" else: def _remove_p(x): return x[:-1] if x.endswith("P") else x t_cmp = set([_remove_p(hla_groups.hla_protein(x, data)) for x in truth_alleles]) c_cmp = set([_remove_p(hla_groups.hla_protein(x, data)) for x in call_alleles]) return "yes" if len(t_cmp.intersection(c_cmp)) == len(t_cmp) else "no"
[ "def", "matches_truth", "(", "call_alleles", ",", "truth_alleles", ",", "data", ")", ":", "if", "not", "truth_alleles", ":", "return", "\"\"", "else", ":", "def", "_remove_p", "(", "x", ")", ":", "return", "x", "[", ":", "-", "1", "]", "if", "x", "."...
Flexibly check if truth and call alleles match, using p-groups.
[ "Flexibly", "check", "if", "truth", "and", "call", "alleles", "match", "using", "p", "-", "groups", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/bwakit.py#L66-L76
train
217,917
bcbio/bcbio-nextgen
bcbio/hla/bwakit.py
get_hla_truthset
def get_hla_truthset(data): """Retrieve expected truth calls for annotating HLA called output. """ val_csv = tz.get_in(["config", "algorithm", "hlavalidate"], data) out = {} if val_csv and utils.file_exists(val_csv): with open(val_csv) as in_handle: reader = csv.reader(in_handle) next(reader) # header for sample, locus, alleles in (l for l in reader if l): out = tz.update_in(out, [sample, locus], lambda x: [x.strip() for x in alleles.split(";")]) return out
python
def get_hla_truthset(data): """Retrieve expected truth calls for annotating HLA called output. """ val_csv = tz.get_in(["config", "algorithm", "hlavalidate"], data) out = {} if val_csv and utils.file_exists(val_csv): with open(val_csv) as in_handle: reader = csv.reader(in_handle) next(reader) # header for sample, locus, alleles in (l for l in reader if l): out = tz.update_in(out, [sample, locus], lambda x: [x.strip() for x in alleles.split(";")]) return out
[ "def", "get_hla_truthset", "(", "data", ")", ":", "val_csv", "=", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"hlavalidate\"", "]", ",", "data", ")", "out", "=", "{", "}", "if", "val_csv", "and", "utils", ".", "file_exists",...
Retrieve expected truth calls for annotating HLA called output.
[ "Retrieve", "expected", "truth", "calls", "for", "annotating", "HLA", "called", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/bwakit.py#L78-L89
train
217,918
bcbio/bcbio-nextgen
scripts/utils/bam_to_fastq_region.py
bam_to_fastq_pair
def bam_to_fastq_pair(in_file, target_region, pair): """Generator to convert BAM files into name, seq, qual in a region. """ space, start, end = target_region bam_file = pysam.Samfile(in_file, "rb") for read in bam_file: if (not read.is_unmapped and not read.mate_is_unmapped and bam_file.getrname(read.tid) == space and bam_file.getrname(read.mrnm) == space and read.pos >= start and read.pos <= end and read.mpos >= start and read.mpos <= end and not read.is_secondary and read.is_paired and getattr(read, "is_read%s" % pair)): seq = Seq.Seq(read.seq) qual = list(read.qual) if read.is_reverse: seq = seq.reverse_complement() qual.reverse() yield read.qname, str(seq), "".join(qual)
python
def bam_to_fastq_pair(in_file, target_region, pair): """Generator to convert BAM files into name, seq, qual in a region. """ space, start, end = target_region bam_file = pysam.Samfile(in_file, "rb") for read in bam_file: if (not read.is_unmapped and not read.mate_is_unmapped and bam_file.getrname(read.tid) == space and bam_file.getrname(read.mrnm) == space and read.pos >= start and read.pos <= end and read.mpos >= start and read.mpos <= end and not read.is_secondary and read.is_paired and getattr(read, "is_read%s" % pair)): seq = Seq.Seq(read.seq) qual = list(read.qual) if read.is_reverse: seq = seq.reverse_complement() qual.reverse() yield read.qname, str(seq), "".join(qual)
[ "def", "bam_to_fastq_pair", "(", "in_file", ",", "target_region", ",", "pair", ")", ":", "space", ",", "start", ",", "end", "=", "target_region", "bam_file", "=", "pysam", ".", "Samfile", "(", "in_file", ",", "\"rb\"", ")", "for", "read", "in", "bam_file",...
Generator to convert BAM files into name, seq, qual in a region.
[ "Generator", "to", "convert", "BAM", "files", "into", "name", "seq", "qual", "in", "a", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/bam_to_fastq_region.py#L32-L50
train
217,919
bcbio/bcbio-nextgen
bcbio/bam/callable.py
sample_callable_bed
def sample_callable_bed(bam_file, ref_file, data): """Retrieve callable regions for a sample subset by defined analysis regions. """ from bcbio.heterogeneity import chromhacks CovInfo = collections.namedtuple("CovInfo", "callable, raw_callable, depth_files") noalt_calling = "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data) def callable_chrom_filter(r): """Filter to callable region, potentially limiting by chromosomes. """ return r.name == "CALLABLE" and (not noalt_calling or chromhacks.is_nonalt(r.chrom)) out_file = "%s-callable_sample.bed" % os.path.splitext(bam_file)[0] with shared.bedtools_tmpdir(data): sv_bed = regions.get_sv_bed(data) callable_bed, depth_files = coverage.calculate(bam_file, data, sv_bed) input_regions_bed = dd.get_variant_regions(data) if not utils.file_uptodate(out_file, callable_bed): with file_transaction(data, out_file) as tx_out_file: callable_regions = pybedtools.BedTool(callable_bed) filter_regions = callable_regions.filter(callable_chrom_filter) if input_regions_bed: if not utils.file_uptodate(out_file, input_regions_bed): input_regions = pybedtools.BedTool(input_regions_bed) filter_regions.intersect(input_regions, nonamecheck=True).saveas(tx_out_file) else: filter_regions.saveas(tx_out_file) return CovInfo(out_file, callable_bed, depth_files)
python
def sample_callable_bed(bam_file, ref_file, data): """Retrieve callable regions for a sample subset by defined analysis regions. """ from bcbio.heterogeneity import chromhacks CovInfo = collections.namedtuple("CovInfo", "callable, raw_callable, depth_files") noalt_calling = "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data) def callable_chrom_filter(r): """Filter to callable region, potentially limiting by chromosomes. """ return r.name == "CALLABLE" and (not noalt_calling or chromhacks.is_nonalt(r.chrom)) out_file = "%s-callable_sample.bed" % os.path.splitext(bam_file)[0] with shared.bedtools_tmpdir(data): sv_bed = regions.get_sv_bed(data) callable_bed, depth_files = coverage.calculate(bam_file, data, sv_bed) input_regions_bed = dd.get_variant_regions(data) if not utils.file_uptodate(out_file, callable_bed): with file_transaction(data, out_file) as tx_out_file: callable_regions = pybedtools.BedTool(callable_bed) filter_regions = callable_regions.filter(callable_chrom_filter) if input_regions_bed: if not utils.file_uptodate(out_file, input_regions_bed): input_regions = pybedtools.BedTool(input_regions_bed) filter_regions.intersect(input_regions, nonamecheck=True).saveas(tx_out_file) else: filter_regions.saveas(tx_out_file) return CovInfo(out_file, callable_bed, depth_files)
[ "def", "sample_callable_bed", "(", "bam_file", ",", "ref_file", ",", "data", ")", ":", "from", "bcbio", ".", "heterogeneity", "import", "chromhacks", "CovInfo", "=", "collections", ".", "namedtuple", "(", "\"CovInfo\"", ",", "\"callable, raw_callable, depth_files\"", ...
Retrieve callable regions for a sample subset by defined analysis regions.
[ "Retrieve", "callable", "regions", "for", "a", "sample", "subset", "by", "defined", "analysis", "regions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L32-L57
train
217,920
bcbio/bcbio-nextgen
bcbio/bam/callable.py
get_ref_bedtool
def get_ref_bedtool(ref_file, config, chrom=None): """Retrieve a pybedtool BedTool object with reference sizes from input reference. """ broad_runner = broad.runner_from_path("picard", config) ref_dict = broad_runner.run_fn("picard_index_ref", ref_file) ref_lines = [] with pysam.Samfile(ref_dict, "r") as ref_sam: for sq in ref_sam.header["SQ"]: if not chrom or sq["SN"] == chrom: ref_lines.append("%s\t%s\t%s" % (sq["SN"], 0, sq["LN"])) return pybedtools.BedTool("\n".join(ref_lines), from_string=True)
python
def get_ref_bedtool(ref_file, config, chrom=None): """Retrieve a pybedtool BedTool object with reference sizes from input reference. """ broad_runner = broad.runner_from_path("picard", config) ref_dict = broad_runner.run_fn("picard_index_ref", ref_file) ref_lines = [] with pysam.Samfile(ref_dict, "r") as ref_sam: for sq in ref_sam.header["SQ"]: if not chrom or sq["SN"] == chrom: ref_lines.append("%s\t%s\t%s" % (sq["SN"], 0, sq["LN"])) return pybedtools.BedTool("\n".join(ref_lines), from_string=True)
[ "def", "get_ref_bedtool", "(", "ref_file", ",", "config", ",", "chrom", "=", "None", ")", ":", "broad_runner", "=", "broad", ".", "runner_from_path", "(", "\"picard\"", ",", "config", ")", "ref_dict", "=", "broad_runner", ".", "run_fn", "(", "\"picard_index_re...
Retrieve a pybedtool BedTool object with reference sizes from input reference.
[ "Retrieve", "a", "pybedtool", "BedTool", "object", "with", "reference", "sizes", "from", "input", "reference", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L59-L69
train
217,921
bcbio/bcbio-nextgen
bcbio/bam/callable.py
_get_nblock_regions
def _get_nblock_regions(in_file, min_n_size, ref_regions): """Retrieve coordinates of regions in reference genome with no mapping. These are potential breakpoints for parallelizing analysis. """ out_lines = [] called_contigs = set([]) with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: contig, start, end, ctype = line.rstrip().split() called_contigs.add(contig) if (ctype in ["REF_N", "NO_COVERAGE", "EXCESSIVE_COVERAGE", "LOW_COVERAGE"] and int(end) - int(start) > min_n_size): out_lines.append("%s\t%s\t%s\n" % (contig, start, end)) for refr in ref_regions: if refr.chrom not in called_contigs: out_lines.append("%s\t%s\t%s\n" % (refr.chrom, 0, refr.stop)) return pybedtools.BedTool("\n".join(out_lines), from_string=True)
python
def _get_nblock_regions(in_file, min_n_size, ref_regions): """Retrieve coordinates of regions in reference genome with no mapping. These are potential breakpoints for parallelizing analysis. """ out_lines = [] called_contigs = set([]) with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: contig, start, end, ctype = line.rstrip().split() called_contigs.add(contig) if (ctype in ["REF_N", "NO_COVERAGE", "EXCESSIVE_COVERAGE", "LOW_COVERAGE"] and int(end) - int(start) > min_n_size): out_lines.append("%s\t%s\t%s\n" % (contig, start, end)) for refr in ref_regions: if refr.chrom not in called_contigs: out_lines.append("%s\t%s\t%s\n" % (refr.chrom, 0, refr.stop)) return pybedtools.BedTool("\n".join(out_lines), from_string=True)
[ "def", "_get_nblock_regions", "(", "in_file", ",", "min_n_size", ",", "ref_regions", ")", ":", "out_lines", "=", "[", "]", "called_contigs", "=", "set", "(", "[", "]", ")", "with", "utils", ".", "open_gzipsafe", "(", "in_file", ")", "as", "in_handle", ":",...
Retrieve coordinates of regions in reference genome with no mapping. These are potential breakpoints for parallelizing analysis.
[ "Retrieve", "coordinates", "of", "regions", "in", "reference", "genome", "with", "no", "mapping", ".", "These", "are", "potential", "breakpoints", "for", "parallelizing", "analysis", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L71-L87
train
217,922
bcbio/bcbio-nextgen
bcbio/bam/callable.py
_combine_regions
def _combine_regions(all_regions, ref_regions): """Combine multiple BEDtools regions of regions into sorted final BEDtool. """ chrom_order = {} for i, x in enumerate(ref_regions): chrom_order[x.chrom] = i def wchrom_key(x): chrom, start, end = x return (chrom_order[chrom], start, end) all_intervals = [] for region_group in all_regions: for region in region_group: all_intervals.append((region.chrom, int(region.start), int(region.stop))) all_intervals.sort(key=wchrom_key) bed_lines = ["%s\t%s\t%s" % (c, s, e) for (c, s, e) in all_intervals] return pybedtools.BedTool("\n".join(bed_lines), from_string=True)
python
def _combine_regions(all_regions, ref_regions): """Combine multiple BEDtools regions of regions into sorted final BEDtool. """ chrom_order = {} for i, x in enumerate(ref_regions): chrom_order[x.chrom] = i def wchrom_key(x): chrom, start, end = x return (chrom_order[chrom], start, end) all_intervals = [] for region_group in all_regions: for region in region_group: all_intervals.append((region.chrom, int(region.start), int(region.stop))) all_intervals.sort(key=wchrom_key) bed_lines = ["%s\t%s\t%s" % (c, s, e) for (c, s, e) in all_intervals] return pybedtools.BedTool("\n".join(bed_lines), from_string=True)
[ "def", "_combine_regions", "(", "all_regions", ",", "ref_regions", ")", ":", "chrom_order", "=", "{", "}", "for", "i", ",", "x", "in", "enumerate", "(", "ref_regions", ")", ":", "chrom_order", "[", "x", ".", "chrom", "]", "=", "i", "def", "wchrom_key", ...
Combine multiple BEDtools regions of regions into sorted final BEDtool.
[ "Combine", "multiple", "BEDtools", "regions", "of", "regions", "into", "sorted", "final", "BEDtool", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L89-L104
train
217,923
bcbio/bcbio-nextgen
bcbio/bam/callable.py
_add_config_regions
def _add_config_regions(nblock_regions, ref_regions, data): """Add additional nblock regions based on configured regions to call. Identifies user defined regions which we should not be analyzing. """ input_regions_bed = dd.get_variant_regions(data) if input_regions_bed: input_regions = pybedtools.BedTool(input_regions_bed) # work around problem with single region not subtracted correctly. if len(input_regions) == 1: str_regions = str(input_regions[0]).strip() input_regions = pybedtools.BedTool("%s\n%s" % (str_regions, str_regions), from_string=True) input_nblock = ref_regions.subtract(input_regions, nonamecheck=True) if input_nblock == ref_regions: raise ValueError("Input variant_region file (%s) " "excludes all genomic regions. Do the chromosome names " "in the BED file match your genome (chr1 vs 1)?" % input_regions_bed) all_intervals = _combine_regions([input_nblock, nblock_regions], ref_regions) else: all_intervals = nblock_regions if "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data): from bcbio.heterogeneity import chromhacks remove_intervals = ref_regions.filter(lambda r: not chromhacks.is_nonalt(r.chrom)) all_intervals = _combine_regions([all_intervals, remove_intervals], ref_regions) return all_intervals.merge()
python
def _add_config_regions(nblock_regions, ref_regions, data): """Add additional nblock regions based on configured regions to call. Identifies user defined regions which we should not be analyzing. """ input_regions_bed = dd.get_variant_regions(data) if input_regions_bed: input_regions = pybedtools.BedTool(input_regions_bed) # work around problem with single region not subtracted correctly. if len(input_regions) == 1: str_regions = str(input_regions[0]).strip() input_regions = pybedtools.BedTool("%s\n%s" % (str_regions, str_regions), from_string=True) input_nblock = ref_regions.subtract(input_regions, nonamecheck=True) if input_nblock == ref_regions: raise ValueError("Input variant_region file (%s) " "excludes all genomic regions. Do the chromosome names " "in the BED file match your genome (chr1 vs 1)?" % input_regions_bed) all_intervals = _combine_regions([input_nblock, nblock_regions], ref_regions) else: all_intervals = nblock_regions if "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data): from bcbio.heterogeneity import chromhacks remove_intervals = ref_regions.filter(lambda r: not chromhacks.is_nonalt(r.chrom)) all_intervals = _combine_regions([all_intervals, remove_intervals], ref_regions) return all_intervals.merge()
[ "def", "_add_config_regions", "(", "nblock_regions", ",", "ref_regions", ",", "data", ")", ":", "input_regions_bed", "=", "dd", ".", "get_variant_regions", "(", "data", ")", "if", "input_regions_bed", ":", "input_regions", "=", "pybedtools", ".", "BedTool", "(", ...
Add additional nblock regions based on configured regions to call. Identifies user defined regions which we should not be analyzing.
[ "Add", "additional", "nblock", "regions", "based", "on", "configured", "regions", "to", "call", ".", "Identifies", "user", "defined", "regions", "which", "we", "should", "not", "be", "analyzing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L106-L130
train
217,924
bcbio/bcbio-nextgen
bcbio/bam/callable.py
block_regions
def block_regions(callable_bed, in_bam, ref_file, data): """Find blocks of regions for analysis from mapped input BAM file. Identifies islands of callable regions, surrounding by regions with no read support, that can be analyzed independently. """ min_n_size = int(data["config"]["algorithm"].get("nomap_split_size", 250)) with shared.bedtools_tmpdir(data): nblock_bed = "%s-nblocks.bed" % utils.splitext_plus(callable_bed)[0] callblock_bed = "%s-callableblocks.bed" % utils.splitext_plus(callable_bed)[0] if not utils.file_uptodate(nblock_bed, callable_bed): ref_regions = get_ref_bedtool(ref_file, data["config"]) nblock_regions = _get_nblock_regions(callable_bed, min_n_size, ref_regions) nblock_regions = _add_config_regions(nblock_regions, ref_regions, data) with file_transaction(data, nblock_bed, callblock_bed) as (tx_nblock_bed, tx_callblock_bed): nblock_regions.filter(lambda r: len(r) > min_n_size).saveas(tx_nblock_bed) if len(ref_regions.subtract(nblock_regions, nonamecheck=True)) > 0: ref_regions.subtract(tx_nblock_bed, nonamecheck=True).merge(d=min_n_size).saveas(tx_callblock_bed) else: raise ValueError("No callable regions found in %s from BAM file %s. Some causes:\n " " - Alignment regions do not overlap with regions found " "in your `variant_regions` BED: %s\n" " - There are no aligned reads in your BAM file that pass sanity checks " " (mapping score > 1, non-duplicates, both ends of paired reads mapped)" % (dd.get_sample_name(data), in_bam, dd.get_variant_regions(data))) return callblock_bed, nblock_bed
python
def block_regions(callable_bed, in_bam, ref_file, data): """Find blocks of regions for analysis from mapped input BAM file. Identifies islands of callable regions, surrounding by regions with no read support, that can be analyzed independently. """ min_n_size = int(data["config"]["algorithm"].get("nomap_split_size", 250)) with shared.bedtools_tmpdir(data): nblock_bed = "%s-nblocks.bed" % utils.splitext_plus(callable_bed)[0] callblock_bed = "%s-callableblocks.bed" % utils.splitext_plus(callable_bed)[0] if not utils.file_uptodate(nblock_bed, callable_bed): ref_regions = get_ref_bedtool(ref_file, data["config"]) nblock_regions = _get_nblock_regions(callable_bed, min_n_size, ref_regions) nblock_regions = _add_config_regions(nblock_regions, ref_regions, data) with file_transaction(data, nblock_bed, callblock_bed) as (tx_nblock_bed, tx_callblock_bed): nblock_regions.filter(lambda r: len(r) > min_n_size).saveas(tx_nblock_bed) if len(ref_regions.subtract(nblock_regions, nonamecheck=True)) > 0: ref_regions.subtract(tx_nblock_bed, nonamecheck=True).merge(d=min_n_size).saveas(tx_callblock_bed) else: raise ValueError("No callable regions found in %s from BAM file %s. Some causes:\n " " - Alignment regions do not overlap with regions found " "in your `variant_regions` BED: %s\n" " - There are no aligned reads in your BAM file that pass sanity checks " " (mapping score > 1, non-duplicates, both ends of paired reads mapped)" % (dd.get_sample_name(data), in_bam, dd.get_variant_regions(data))) return callblock_bed, nblock_bed
[ "def", "block_regions", "(", "callable_bed", ",", "in_bam", ",", "ref_file", ",", "data", ")", ":", "min_n_size", "=", "int", "(", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", ".", "get", "(", "\"nomap_split_size\"", ",", "250", ")", ")", "...
Find blocks of regions for analysis from mapped input BAM file. Identifies islands of callable regions, surrounding by regions with no read support, that can be analyzed independently.
[ "Find", "blocks", "of", "regions", "for", "analysis", "from", "mapped", "input", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L182-L207
train
217,925
bcbio/bcbio-nextgen
bcbio/bam/callable.py
_analysis_block_stats
def _analysis_block_stats(regions, samples): """Provide statistics on sizes and number of analysis blocks. """ prev = None between_sizes = [] region_sizes = [] for region in regions: if prev and prev.chrom == region.chrom: between_sizes.append(region.start - prev.end) region_sizes.append(region.end - region.start) prev = region def descriptive_stats(xs): if len(xs) < 2: return xs parts = ["min: %s" % min(xs), "5%%: %s" % numpy.percentile(xs, 5), "25%%: %s" % numpy.percentile(xs, 25), "median: %s" % numpy.percentile(xs, 50), "75%%: %s" % numpy.percentile(xs, 75), "95%%: %s" % numpy.percentile(xs, 95), "99%%: %s" % numpy.percentile(xs, 99), "max: %s" % max(xs)] return "\n".join([" " + x for x in parts]) logger.info("Identified %s parallel analysis blocks\n" % len(region_sizes) + "Block sizes:\n%s\n" % descriptive_stats(region_sizes) + "Between block sizes:\n%s\n" % descriptive_stats(between_sizes)) if len(region_sizes) == 0: raise ValueError("No callable regions found in: %s" % (", ".join([dd.get_sample_name(x) for x in samples])))
python
def _analysis_block_stats(regions, samples): """Provide statistics on sizes and number of analysis blocks. """ prev = None between_sizes = [] region_sizes = [] for region in regions: if prev and prev.chrom == region.chrom: between_sizes.append(region.start - prev.end) region_sizes.append(region.end - region.start) prev = region def descriptive_stats(xs): if len(xs) < 2: return xs parts = ["min: %s" % min(xs), "5%%: %s" % numpy.percentile(xs, 5), "25%%: %s" % numpy.percentile(xs, 25), "median: %s" % numpy.percentile(xs, 50), "75%%: %s" % numpy.percentile(xs, 75), "95%%: %s" % numpy.percentile(xs, 95), "99%%: %s" % numpy.percentile(xs, 99), "max: %s" % max(xs)] return "\n".join([" " + x for x in parts]) logger.info("Identified %s parallel analysis blocks\n" % len(region_sizes) + "Block sizes:\n%s\n" % descriptive_stats(region_sizes) + "Between block sizes:\n%s\n" % descriptive_stats(between_sizes)) if len(region_sizes) == 0: raise ValueError("No callable regions found in: %s" % (", ".join([dd.get_sample_name(x) for x in samples])))
[ "def", "_analysis_block_stats", "(", "regions", ",", "samples", ")", ":", "prev", "=", "None", "between_sizes", "=", "[", "]", "region_sizes", "=", "[", "]", "for", "region", "in", "regions", ":", "if", "prev", "and", "prev", ".", "chrom", "==", "region"...
Provide statistics on sizes and number of analysis blocks.
[ "Provide", "statistics", "on", "sizes", "and", "number", "of", "analysis", "blocks", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L216-L244
train
217,926
bcbio/bcbio-nextgen
bcbio/bam/callable.py
_needs_region_update
def _needs_region_update(out_file, samples): """Check if we need to update BED file of regions, supporting back compatibility. """ nblock_files = [x["regions"]["nblock"] for x in samples if "regions" in x] # For older approaches and do not create a new set of analysis # regions, since the new algorithm will re-do all BAM and variant # steps with new regions for nblock_file in nblock_files: test_old = nblock_file.replace("-nblocks", "-analysisblocks") if os.path.exists(test_old): return False # Check if any of the local files have changed so we need to refresh for noblock_file in nblock_files: if not utils.file_uptodate(out_file, noblock_file): return True return False
python
def _needs_region_update(out_file, samples): """Check if we need to update BED file of regions, supporting back compatibility. """ nblock_files = [x["regions"]["nblock"] for x in samples if "regions" in x] # For older approaches and do not create a new set of analysis # regions, since the new algorithm will re-do all BAM and variant # steps with new regions for nblock_file in nblock_files: test_old = nblock_file.replace("-nblocks", "-analysisblocks") if os.path.exists(test_old): return False # Check if any of the local files have changed so we need to refresh for noblock_file in nblock_files: if not utils.file_uptodate(out_file, noblock_file): return True return False
[ "def", "_needs_region_update", "(", "out_file", ",", "samples", ")", ":", "nblock_files", "=", "[", "x", "[", "\"regions\"", "]", "[", "\"nblock\"", "]", "for", "x", "in", "samples", "if", "\"regions\"", "in", "x", "]", "# For older approaches and do not create ...
Check if we need to update BED file of regions, supporting back compatibility.
[ "Check", "if", "we", "need", "to", "update", "BED", "file", "of", "regions", "supporting", "back", "compatibility", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L246-L261
train
217,927
bcbio/bcbio-nextgen
bcbio/bam/callable.py
combine_sample_regions
def combine_sample_regions(*samples): """Create batch-level sets of callable regions for multi-sample calling. Intersects all non-callable (nblock) regions from all samples in a batch, producing a global set of callable regions. """ samples = utils.unpack_worlds(samples) samples = cwlutils.unpack_tarballs(samples, samples[0]) # back compatibility -- global file for entire sample set global_analysis_file = os.path.join(samples[0]["dirs"]["work"], "analysis_blocks.bed") if utils.file_exists(global_analysis_file) and not _needs_region_update(global_analysis_file, samples): global_no_analysis_file = os.path.join(os.path.dirname(global_analysis_file), "noanalysis_blocks.bed") else: global_analysis_file = None out = [] analysis_files = [] batches = [] with shared.bedtools_tmpdir(samples[0]): for batch, items in vmulti.group_by_batch(samples, require_bam=False).items(): batches.append(items) if global_analysis_file: analysis_file, no_analysis_file = global_analysis_file, global_no_analysis_file else: analysis_file, no_analysis_file = _combine_sample_regions_batch(batch, items) for data in items: vr_file = dd.get_variant_regions(data) if analysis_file: analysis_files.append(analysis_file) data["config"]["algorithm"]["callable_regions"] = analysis_file data["config"]["algorithm"]["non_callable_regions"] = no_analysis_file data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(analysis_file).count() elif vr_file: data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(vr_file).count() # attach a representative sample for calculating callable region if not data.get("work_bam"): for x in items: if x.get("work_bam"): data["work_bam_callable"] = x["work_bam"] out.append([data]) # Ensure output order matches input order, consistency for CWL-based runs assert len(out) == len(samples) sample_indexes = {dd.get_sample_name(d): i for i, d in enumerate(samples)} def by_input_index(xs): return sample_indexes[dd.get_sample_name(xs[0])] out.sort(key=by_input_index) if len(analysis_files) > 0: final_regions = pybedtools.BedTool(analysis_files[0]) _analysis_block_stats(final_regions, batches[0]) return out
python
def combine_sample_regions(*samples): """Create batch-level sets of callable regions for multi-sample calling. Intersects all non-callable (nblock) regions from all samples in a batch, producing a global set of callable regions. """ samples = utils.unpack_worlds(samples) samples = cwlutils.unpack_tarballs(samples, samples[0]) # back compatibility -- global file for entire sample set global_analysis_file = os.path.join(samples[0]["dirs"]["work"], "analysis_blocks.bed") if utils.file_exists(global_analysis_file) and not _needs_region_update(global_analysis_file, samples): global_no_analysis_file = os.path.join(os.path.dirname(global_analysis_file), "noanalysis_blocks.bed") else: global_analysis_file = None out = [] analysis_files = [] batches = [] with shared.bedtools_tmpdir(samples[0]): for batch, items in vmulti.group_by_batch(samples, require_bam=False).items(): batches.append(items) if global_analysis_file: analysis_file, no_analysis_file = global_analysis_file, global_no_analysis_file else: analysis_file, no_analysis_file = _combine_sample_regions_batch(batch, items) for data in items: vr_file = dd.get_variant_regions(data) if analysis_file: analysis_files.append(analysis_file) data["config"]["algorithm"]["callable_regions"] = analysis_file data["config"]["algorithm"]["non_callable_regions"] = no_analysis_file data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(analysis_file).count() elif vr_file: data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(vr_file).count() # attach a representative sample for calculating callable region if not data.get("work_bam"): for x in items: if x.get("work_bam"): data["work_bam_callable"] = x["work_bam"] out.append([data]) # Ensure output order matches input order, consistency for CWL-based runs assert len(out) == len(samples) sample_indexes = {dd.get_sample_name(d): i for i, d in enumerate(samples)} def by_input_index(xs): return sample_indexes[dd.get_sample_name(xs[0])] out.sort(key=by_input_index) if len(analysis_files) > 0: final_regions = pybedtools.BedTool(analysis_files[0]) _analysis_block_stats(final_regions, batches[0]) return out
[ "def", "combine_sample_regions", "(", "*", "samples", ")", ":", "samples", "=", "utils", ".", "unpack_worlds", "(", "samples", ")", "samples", "=", "cwlutils", ".", "unpack_tarballs", "(", "samples", ",", "samples", "[", "0", "]", ")", "# back compatibility --...
Create batch-level sets of callable regions for multi-sample calling. Intersects all non-callable (nblock) regions from all samples in a batch, producing a global set of callable regions.
[ "Create", "batch", "-", "level", "sets", "of", "callable", "regions", "for", "multi", "-", "sample", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L263-L311
train
217,928
bcbio/bcbio-nextgen
bcbio/bam/callable.py
_combine_sample_regions_batch
def _combine_sample_regions_batch(batch, items): """Combine sample regions within a group of batched samples. """ config = items[0]["config"] work_dir = utils.safe_makedir(os.path.join(items[0]["dirs"]["work"], "regions")) analysis_file = os.path.join(work_dir, "%s-analysis_blocks.bed" % batch) no_analysis_file = os.path.join(work_dir, "%s-noanalysis_blocks.bed" % batch) if not utils.file_exists(analysis_file) or _needs_region_update(analysis_file, items): # Combine all nblocks into a final set of intersecting regions # without callable bases. HT @brentp for intersection approach # https://groups.google.com/forum/?fromgroups#!topic/bedtools-discuss/qA9wK4zN8do bed_regions = [pybedtools.BedTool(x["regions"]["nblock"]) for x in items if "regions" in x and x["regions"]["nblock"]] if len(bed_regions) == 0: analysis_file, no_analysis_file = None, None else: with file_transaction(items[0], analysis_file, no_analysis_file) as (tx_afile, tx_noafile): def intersect_two(a, b): return a.intersect(b, nonamecheck=True).saveas() nblock_regions = reduce(intersect_two, bed_regions).saveas( "%s-nblock%s" % utils.splitext_plus(tx_afile)) ref_file = tz.get_in(["reference", "fasta", "base"], items[0]) ref_regions = get_ref_bedtool(ref_file, config) min_n_size = int(config["algorithm"].get("nomap_split_size", 250)) block_filter = NBlockRegionPicker(ref_regions, config, min_n_size) final_nblock_regions = nblock_regions.filter( block_filter.include_block).saveas().each(block_filter.expand_block).saveas( "%s-nblockfinal%s" % utils.splitext_plus(tx_afile)) final_regions = ref_regions.subtract(final_nblock_regions, nonamecheck=True).\ saveas().merge(d=min_n_size) _write_bed_regions(items[0], final_regions, tx_afile, tx_noafile) if analysis_file and utils.file_exists(analysis_file): return analysis_file, no_analysis_file else: return None, None
python
def _combine_sample_regions_batch(batch, items): """Combine sample regions within a group of batched samples. """ config = items[0]["config"] work_dir = utils.safe_makedir(os.path.join(items[0]["dirs"]["work"], "regions")) analysis_file = os.path.join(work_dir, "%s-analysis_blocks.bed" % batch) no_analysis_file = os.path.join(work_dir, "%s-noanalysis_blocks.bed" % batch) if not utils.file_exists(analysis_file) or _needs_region_update(analysis_file, items): # Combine all nblocks into a final set of intersecting regions # without callable bases. HT @brentp for intersection approach # https://groups.google.com/forum/?fromgroups#!topic/bedtools-discuss/qA9wK4zN8do bed_regions = [pybedtools.BedTool(x["regions"]["nblock"]) for x in items if "regions" in x and x["regions"]["nblock"]] if len(bed_regions) == 0: analysis_file, no_analysis_file = None, None else: with file_transaction(items[0], analysis_file, no_analysis_file) as (tx_afile, tx_noafile): def intersect_two(a, b): return a.intersect(b, nonamecheck=True).saveas() nblock_regions = reduce(intersect_two, bed_regions).saveas( "%s-nblock%s" % utils.splitext_plus(tx_afile)) ref_file = tz.get_in(["reference", "fasta", "base"], items[0]) ref_regions = get_ref_bedtool(ref_file, config) min_n_size = int(config["algorithm"].get("nomap_split_size", 250)) block_filter = NBlockRegionPicker(ref_regions, config, min_n_size) final_nblock_regions = nblock_regions.filter( block_filter.include_block).saveas().each(block_filter.expand_block).saveas( "%s-nblockfinal%s" % utils.splitext_plus(tx_afile)) final_regions = ref_regions.subtract(final_nblock_regions, nonamecheck=True).\ saveas().merge(d=min_n_size) _write_bed_regions(items[0], final_regions, tx_afile, tx_noafile) if analysis_file and utils.file_exists(analysis_file): return analysis_file, no_analysis_file else: return None, None
[ "def", "_combine_sample_regions_batch", "(", "batch", ",", "items", ")", ":", "config", "=", "items", "[", "0", "]", "[", "\"config\"", "]", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "items", "[", "0", "]"...
Combine sample regions within a group of batched samples.
[ "Combine", "sample", "regions", "within", "a", "group", "of", "batched", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L313-L347
train
217,929
bcbio/bcbio-nextgen
bcbio/bam/callable.py
get_split_regions
def get_split_regions(bed_file, data): """Retrieve a set of split regions using the input BED for callable regions. Provides a less inclusive hook for parallelizing over multiple regions. """ out_file = "%s-analysis_blocks.bed" % utils.splitext_plus(bed_file)[0] with shared.bedtools_tmpdir(data): if not utils.file_uptodate(out_file, bed_file): ref_regions = get_ref_bedtool(dd.get_ref_file(data), data["config"]) nblock_regions = ref_regions.subtract(pybedtools.BedTool(bed_file)).saveas() min_n_size = int(tz.get_in(["config", "algorithm", "nomap_split_size"], data, 250)) block_filter = NBlockRegionPicker(ref_regions, data["config"], min_n_size) final_nblock_regions = nblock_regions.filter( block_filter.include_block).saveas().each(block_filter.expand_block).saveas() with file_transaction(data, out_file) as tx_out_file: final_regions = ref_regions.subtract(final_nblock_regions, nonamecheck=True).\ saveas().merge(d=min_n_size).saveas(tx_out_file) chroms = set([]) with shared.bedtools_tmpdir(data): for r in pybedtools.BedTool(bed_file): chroms.add(r.chrom) out = [] for r in pybedtools.BedTool(out_file): if r.chrom in chroms: out.append((r.chrom, r.start, r.stop)) return out
python
def get_split_regions(bed_file, data): """Retrieve a set of split regions using the input BED for callable regions. Provides a less inclusive hook for parallelizing over multiple regions. """ out_file = "%s-analysis_blocks.bed" % utils.splitext_plus(bed_file)[0] with shared.bedtools_tmpdir(data): if not utils.file_uptodate(out_file, bed_file): ref_regions = get_ref_bedtool(dd.get_ref_file(data), data["config"]) nblock_regions = ref_regions.subtract(pybedtools.BedTool(bed_file)).saveas() min_n_size = int(tz.get_in(["config", "algorithm", "nomap_split_size"], data, 250)) block_filter = NBlockRegionPicker(ref_regions, data["config"], min_n_size) final_nblock_regions = nblock_regions.filter( block_filter.include_block).saveas().each(block_filter.expand_block).saveas() with file_transaction(data, out_file) as tx_out_file: final_regions = ref_regions.subtract(final_nblock_regions, nonamecheck=True).\ saveas().merge(d=min_n_size).saveas(tx_out_file) chroms = set([]) with shared.bedtools_tmpdir(data): for r in pybedtools.BedTool(bed_file): chroms.add(r.chrom) out = [] for r in pybedtools.BedTool(out_file): if r.chrom in chroms: out.append((r.chrom, r.start, r.stop)) return out
[ "def", "get_split_regions", "(", "bed_file", ",", "data", ")", ":", "out_file", "=", "\"%s-analysis_blocks.bed\"", "%", "utils", ".", "splitext_plus", "(", "bed_file", ")", "[", "0", "]", "with", "shared", ".", "bedtools_tmpdir", "(", "data", ")", ":", "if",...
Retrieve a set of split regions using the input BED for callable regions. Provides a less inclusive hook for parallelizing over multiple regions.
[ "Retrieve", "a", "set", "of", "split", "regions", "using", "the", "input", "BED", "for", "callable", "regions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L349-L374
train
217,930
bcbio/bcbio-nextgen
bcbio/bam/callable.py
NBlockRegionPicker.include_block
def include_block(self, x): """Check for inclusion of block based on distance from previous. """ last_pos = self._chr_last_blocks.get(x.chrom, 0) # Region excludes an entire chromosome, typically decoy/haplotypes if last_pos <= self._end_buffer and x.stop >= self._ref_sizes.get(x.chrom, 0) - self._end_buffer: return True # Do not split on smaller decoy and haplotype chromosomes elif self._ref_sizes.get(x.chrom, 0) <= self._target_size: return False elif (x.start - last_pos) > self._target_size: self._chr_last_blocks[x.chrom] = x.stop return True else: return False
python
def include_block(self, x): """Check for inclusion of block based on distance from previous. """ last_pos = self._chr_last_blocks.get(x.chrom, 0) # Region excludes an entire chromosome, typically decoy/haplotypes if last_pos <= self._end_buffer and x.stop >= self._ref_sizes.get(x.chrom, 0) - self._end_buffer: return True # Do not split on smaller decoy and haplotype chromosomes elif self._ref_sizes.get(x.chrom, 0) <= self._target_size: return False elif (x.start - last_pos) > self._target_size: self._chr_last_blocks[x.chrom] = x.stop return True else: return False
[ "def", "include_block", "(", "self", ",", "x", ")", ":", "last_pos", "=", "self", ".", "_chr_last_blocks", ".", "get", "(", "x", ".", "chrom", ",", "0", ")", "# Region excludes an entire chromosome, typically decoy/haplotypes", "if", "last_pos", "<=", "self", "....
Check for inclusion of block based on distance from previous.
[ "Check", "for", "inclusion", "of", "block", "based", "on", "distance", "from", "previous", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L155-L169
train
217,931
bcbio/bcbio-nextgen
bcbio/bam/callable.py
NBlockRegionPicker.expand_block
def expand_block(self, feat): """Expand any blocks which are near the start or end of a contig. """ chrom_end = self._ref_sizes.get(feat.chrom) if chrom_end: if feat.start < self._end_buffer: feat.start = 0 if feat.stop >= chrom_end - self._end_buffer: feat.stop = chrom_end return feat
python
def expand_block(self, feat): """Expand any blocks which are near the start or end of a contig. """ chrom_end = self._ref_sizes.get(feat.chrom) if chrom_end: if feat.start < self._end_buffer: feat.start = 0 if feat.stop >= chrom_end - self._end_buffer: feat.stop = chrom_end return feat
[ "def", "expand_block", "(", "self", ",", "feat", ")", ":", "chrom_end", "=", "self", ".", "_ref_sizes", ".", "get", "(", "feat", ".", "chrom", ")", "if", "chrom_end", ":", "if", "feat", ".", "start", "<", "self", ".", "_end_buffer", ":", "feat", ".",...
Expand any blocks which are near the start or end of a contig.
[ "Expand", "any", "blocks", "which", "are", "near", "the", "start", "or", "end", "of", "a", "contig", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L171-L180
train
217,932
bcbio/bcbio-nextgen
bcbio/chipseq/__init__.py
_keep_assembled_chrom
def _keep_assembled_chrom(bam_file, genome, config): """Remove contigs from the BAM file""" fai = "%s.fai" % genome chrom = [] with open(fai) as inh: for line in inh: c = line.split("\t")[0] if c.find("_") < 0: chrom.append(c) chroms = " ".join(chrom) out_file = utils.append_stem(bam_file, '_chrom') samtools = config_utils.get_program("samtools", config) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out: cmd = "{samtools} view -b {bam_file} {chroms} > {tx_out}" do.run(cmd.format(**locals()), "Remove contigs from %s" % bam_file) bam.index(out_file, config) return out_file
python
def _keep_assembled_chrom(bam_file, genome, config): """Remove contigs from the BAM file""" fai = "%s.fai" % genome chrom = [] with open(fai) as inh: for line in inh: c = line.split("\t")[0] if c.find("_") < 0: chrom.append(c) chroms = " ".join(chrom) out_file = utils.append_stem(bam_file, '_chrom') samtools = config_utils.get_program("samtools", config) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out: cmd = "{samtools} view -b {bam_file} {chroms} > {tx_out}" do.run(cmd.format(**locals()), "Remove contigs from %s" % bam_file) bam.index(out_file, config) return out_file
[ "def", "_keep_assembled_chrom", "(", "bam_file", ",", "genome", ",", "config", ")", ":", "fai", "=", "\"%s.fai\"", "%", "genome", "chrom", "=", "[", "]", "with", "open", "(", "fai", ")", "as", "inh", ":", "for", "line", "in", "inh", ":", "c", "=", ...
Remove contigs from the BAM file
[ "Remove", "contigs", "from", "the", "BAM", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/__init__.py#L40-L57
train
217,933
bcbio/bcbio-nextgen
bcbio/chipseq/__init__.py
_prepare_bam
def _prepare_bam(bam_file, bed_file, config): """Remove regions from bed files""" if not bam_file or not bed_file: return bam_file out_file = utils.append_stem(bam_file, '_filter') bedtools = config_utils.get_program("bedtools", config) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out: cmd = "{bedtools} subtract -nonamecheck -A -a {bam_file} -b {bed_file} > {tx_out}" do.run(cmd.format(**locals()), "Remove blacklist regions from %s" % bam_file) return out_file
python
def _prepare_bam(bam_file, bed_file, config): """Remove regions from bed files""" if not bam_file or not bed_file: return bam_file out_file = utils.append_stem(bam_file, '_filter') bedtools = config_utils.get_program("bedtools", config) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out: cmd = "{bedtools} subtract -nonamecheck -A -a {bam_file} -b {bed_file} > {tx_out}" do.run(cmd.format(**locals()), "Remove blacklist regions from %s" % bam_file) return out_file
[ "def", "_prepare_bam", "(", "bam_file", ",", "bed_file", ",", "config", ")", ":", "if", "not", "bam_file", "or", "not", "bed_file", ":", "return", "bam_file", "out_file", "=", "utils", ".", "append_stem", "(", "bam_file", ",", "'_filter'", ")", "bedtools", ...
Remove regions from bed files
[ "Remove", "regions", "from", "bed", "files" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/__init__.py#L60-L70
train
217,934
bcbio/bcbio-nextgen
bcbio/chipseq/__init__.py
_bam_coverage
def _bam_coverage(name, bam_input, data): """Run bamCoverage from deeptools""" cmd = ("{bam_coverage} -b {bam_input} -o {bw_output} " "--binSize 20 --effectiveGenomeSize {size} " "--smoothLength 60 --extendReads 150 --centerReads -p {cores}") size = bam.fasta.total_sequence_length(dd.get_ref_file(data)) cores = dd.get_num_cores(data) try: bam_coverage = config_utils.get_program("bamCoverage", data) except config_utils.CmdNotFound: logger.info("No bamCoverage found, skipping bamCoverage.") return None resources = config_utils.get_resources("bamCoverage", data["config"]) if resources: options = resources.get("options") if options: cmd += " %s" % " ".join([str(x) for x in options]) bw_output = os.path.join(os.path.dirname(bam_input), "%s.bw" % name) if utils.file_exists(bw_output): return bw_output with file_transaction(bw_output) as out_tx: do.run(cmd.format(**locals()), "Run bamCoverage in %s" % name) return bw_output
python
def _bam_coverage(name, bam_input, data): """Run bamCoverage from deeptools""" cmd = ("{bam_coverage} -b {bam_input} -o {bw_output} " "--binSize 20 --effectiveGenomeSize {size} " "--smoothLength 60 --extendReads 150 --centerReads -p {cores}") size = bam.fasta.total_sequence_length(dd.get_ref_file(data)) cores = dd.get_num_cores(data) try: bam_coverage = config_utils.get_program("bamCoverage", data) except config_utils.CmdNotFound: logger.info("No bamCoverage found, skipping bamCoverage.") return None resources = config_utils.get_resources("bamCoverage", data["config"]) if resources: options = resources.get("options") if options: cmd += " %s" % " ".join([str(x) for x in options]) bw_output = os.path.join(os.path.dirname(bam_input), "%s.bw" % name) if utils.file_exists(bw_output): return bw_output with file_transaction(bw_output) as out_tx: do.run(cmd.format(**locals()), "Run bamCoverage in %s" % name) return bw_output
[ "def", "_bam_coverage", "(", "name", ",", "bam_input", ",", "data", ")", ":", "cmd", "=", "(", "\"{bam_coverage} -b {bam_input} -o {bw_output} \"", "\"--binSize 20 --effectiveGenomeSize {size} \"", "\"--smoothLength 60 --extendReads 150 --centerReads -p {cores}\"", ")", "size", "...
Run bamCoverage from deeptools
[ "Run", "bamCoverage", "from", "deeptools" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/__init__.py#L72-L94
train
217,935
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_get_out_file
def _get_out_file(work_dir, paired): """Retrieve manta output variant file, depending on analysis. """ if paired: if paired.normal_bam: base_file = "somaticSV.vcf.gz" else: base_file = "tumorSV.vcf.gz" else: base_file = "diploidSV.vcf.gz" return os.path.join(work_dir, "results", "variants", base_file)
python
def _get_out_file(work_dir, paired): """Retrieve manta output variant file, depending on analysis. """ if paired: if paired.normal_bam: base_file = "somaticSV.vcf.gz" else: base_file = "tumorSV.vcf.gz" else: base_file = "diploidSV.vcf.gz" return os.path.join(work_dir, "results", "variants", base_file)
[ "def", "_get_out_file", "(", "work_dir", ",", "paired", ")", ":", "if", "paired", ":", "if", "paired", ".", "normal_bam", ":", "base_file", "=", "\"somaticSV.vcf.gz\"", "else", ":", "base_file", "=", "\"tumorSV.vcf.gz\"", "else", ":", "base_file", "=", "\"dipl...
Retrieve manta output variant file, depending on analysis.
[ "Retrieve", "manta", "output", "variant", "file", "depending", "on", "analysis", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L71-L81
train
217,936
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_get_evidence_bam
def _get_evidence_bam(work_dir, data): """Retrieve evidence BAM for the sample if it exists """ evidence_bam = glob.glob(os.path.join(work_dir, "results", "evidence", "evidence_*.%s*.bam" % (dd.get_sample_name(data)))) if evidence_bam: return evidence_bam[0]
python
def _get_evidence_bam(work_dir, data): """Retrieve evidence BAM for the sample if it exists """ evidence_bam = glob.glob(os.path.join(work_dir, "results", "evidence", "evidence_*.%s*.bam" % (dd.get_sample_name(data)))) if evidence_bam: return evidence_bam[0]
[ "def", "_get_evidence_bam", "(", "work_dir", ",", "data", ")", ":", "evidence_bam", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"results\"", ",", "\"evidence\"", ",", "\"evidence_*.%s*.bam\"", "%", "(", "dd", ".",...
Retrieve evidence BAM for the sample if it exists
[ "Retrieve", "evidence", "BAM", "for", "the", "sample", "if", "it", "exists" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L83-L89
train
217,937
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_run_workflow
def _run_workflow(items, paired, workflow_file, work_dir): """Run manta analysis inside prepared workflow directory. """ utils.remove_safe(os.path.join(work_dir, "workspace")) data = paired.tumor_data if paired else items[0] cmd = [utils.get_program_python("configManta.py"), workflow_file, "-m", "local", "-j", dd.get_num_cores(data)] do.run(cmd, "Run manta SV analysis") utils.remove_safe(os.path.join(work_dir, "workspace"))
python
def _run_workflow(items, paired, workflow_file, work_dir): """Run manta analysis inside prepared workflow directory. """ utils.remove_safe(os.path.join(work_dir, "workspace")) data = paired.tumor_data if paired else items[0] cmd = [utils.get_program_python("configManta.py"), workflow_file, "-m", "local", "-j", dd.get_num_cores(data)] do.run(cmd, "Run manta SV analysis") utils.remove_safe(os.path.join(work_dir, "workspace"))
[ "def", "_run_workflow", "(", "items", ",", "paired", ",", "workflow_file", ",", "work_dir", ")", ":", "utils", ".", "remove_safe", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"workspace\"", ")", ")", "data", "=", "paired", ".", "tumor_da...
Run manta analysis inside prepared workflow directory.
[ "Run", "manta", "analysis", "inside", "prepared", "workflow", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L91-L98
train
217,938
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_prep_config
def _prep_config(items, paired, work_dir): """Run initial configuration, generating a run directory for Manta. """ assert utils.which("configManta.py"), "Could not find installed configManta.py" out_file = os.path.join(work_dir, "runWorkflow.py") if not utils.file_exists(out_file) or _out_of_date(out_file): config_script = os.path.realpath(utils.which("configManta.py")) cmd = [utils.get_program_python("configManta.py"), config_script] if paired: if paired.normal_bam: cmd += ["--normalBam=%s" % paired.normal_bam, "--tumorBam=%s" % paired.tumor_bam] else: cmd += ["--tumorBam=%s" % paired.tumor_bam] else: cmd += ["--bam=%s" % dd.get_align_bam(data) for data in items] data = paired.tumor_data if paired else items[0] cmd += ["--referenceFasta=%s" % dd.get_ref_file(data), "--runDir=%s" % work_dir] if dd.get_coverage_interval(data) not in ["genome"]: cmd += ["--exome"] for region in _maybe_limit_chromosomes(data): cmd += ["--region", region] resources = config_utils.get_resources("manta", data["config"]) if resources.get("options"): cmd += [str(x) for x in resources["options"]] # If we are removing polyX, avoid calling on small indels which require # excessively long runtimes on noisy WGS runs if "polyx" in dd.get_exclude_regions(data): cmd += ["--config", _prep_streamlined_config(config_script, work_dir)] do.run(cmd, "Configure manta SV analysis") return out_file
python
def _prep_config(items, paired, work_dir): """Run initial configuration, generating a run directory for Manta. """ assert utils.which("configManta.py"), "Could not find installed configManta.py" out_file = os.path.join(work_dir, "runWorkflow.py") if not utils.file_exists(out_file) or _out_of_date(out_file): config_script = os.path.realpath(utils.which("configManta.py")) cmd = [utils.get_program_python("configManta.py"), config_script] if paired: if paired.normal_bam: cmd += ["--normalBam=%s" % paired.normal_bam, "--tumorBam=%s" % paired.tumor_bam] else: cmd += ["--tumorBam=%s" % paired.tumor_bam] else: cmd += ["--bam=%s" % dd.get_align_bam(data) for data in items] data = paired.tumor_data if paired else items[0] cmd += ["--referenceFasta=%s" % dd.get_ref_file(data), "--runDir=%s" % work_dir] if dd.get_coverage_interval(data) not in ["genome"]: cmd += ["--exome"] for region in _maybe_limit_chromosomes(data): cmd += ["--region", region] resources = config_utils.get_resources("manta", data["config"]) if resources.get("options"): cmd += [str(x) for x in resources["options"]] # If we are removing polyX, avoid calling on small indels which require # excessively long runtimes on noisy WGS runs if "polyx" in dd.get_exclude_regions(data): cmd += ["--config", _prep_streamlined_config(config_script, work_dir)] do.run(cmd, "Configure manta SV analysis") return out_file
[ "def", "_prep_config", "(", "items", ",", "paired", ",", "work_dir", ")", ":", "assert", "utils", ".", "which", "(", "\"configManta.py\"", ")", ",", "\"Could not find installed configManta.py\"", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ...
Run initial configuration, generating a run directory for Manta.
[ "Run", "initial", "configuration", "generating", "a", "run", "directory", "for", "Manta", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L100-L129
train
217,939
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_prep_streamlined_config
def _prep_streamlined_config(config_script, work_dir): """Create manta INI file without steps that potentially increase runtimes. This removes calling of small indels. """ new_min_size = 100 in_file = config_script + ".ini" out_file = os.path.join(work_dir, os.path.basename(in_file)) with open(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("minCandidateVariantSize"): out_handle.write("minCandidateVariantSize = %s\n" % new_min_size) else: out_handle.write(line) return out_file
python
def _prep_streamlined_config(config_script, work_dir): """Create manta INI file without steps that potentially increase runtimes. This removes calling of small indels. """ new_min_size = 100 in_file = config_script + ".ini" out_file = os.path.join(work_dir, os.path.basename(in_file)) with open(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("minCandidateVariantSize"): out_handle.write("minCandidateVariantSize = %s\n" % new_min_size) else: out_handle.write(line) return out_file
[ "def", "_prep_streamlined_config", "(", "config_script", ",", "work_dir", ")", ":", "new_min_size", "=", "100", "in_file", "=", "config_script", "+", "\".ini\"", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "os", ".", "path", ".", ...
Create manta INI file without steps that potentially increase runtimes. This removes calling of small indels.
[ "Create", "manta", "INI", "file", "without", "steps", "that", "potentially", "increase", "runtimes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L131-L146
train
217,940
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_maybe_limit_chromosomes
def _maybe_limit_chromosomes(data): """Potentially limit chromosomes to avoid problematically named HLA contigs. HLAs have ':' characters in them which confuse downstream processing. If we have no problematic chromosomes we don't limit anything. """ std_chroms = [] prob_chroms = [] noalt_calling = "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data) for contig in ref.file_contigs(dd.get_ref_file(data)): if contig.name.find(":") > 0 or (noalt_calling and not chromhacks.is_nonalt(contig.name)): prob_chroms.append(contig.name) else: std_chroms.append(contig.name) if len(prob_chroms) > 0: return std_chroms else: return []
python
def _maybe_limit_chromosomes(data): """Potentially limit chromosomes to avoid problematically named HLA contigs. HLAs have ':' characters in them which confuse downstream processing. If we have no problematic chromosomes we don't limit anything. """ std_chroms = [] prob_chroms = [] noalt_calling = "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data) for contig in ref.file_contigs(dd.get_ref_file(data)): if contig.name.find(":") > 0 or (noalt_calling and not chromhacks.is_nonalt(contig.name)): prob_chroms.append(contig.name) else: std_chroms.append(contig.name) if len(prob_chroms) > 0: return std_chroms else: return []
[ "def", "_maybe_limit_chromosomes", "(", "data", ")", ":", "std_chroms", "=", "[", "]", "prob_chroms", "=", "[", "]", "noalt_calling", "=", "\"noalt_calling\"", "in", "dd", ".", "get_tools_on", "(", "data", ")", "or", "\"altcontigs\"", "in", "dd", ".", "get_e...
Potentially limit chromosomes to avoid problematically named HLA contigs. HLAs have ':' characters in them which confuse downstream processing. If we have no problematic chromosomes we don't limit anything.
[ "Potentially", "limit", "chromosomes", "to", "avoid", "problematically", "named", "HLA", "contigs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L148-L165
train
217,941
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_out_of_date
def _out_of_date(rw_file): """Check if a run workflow file points to an older version of manta and needs a refresh. """ with open(rw_file) as in_handle: for line in in_handle: if line.startswith("sys.path.append"): file_version = line.split("/lib/python")[0].split("Cellar/manta/")[-1] if file_version != programs.get_version_manifest("manta"): return True return False
python
def _out_of_date(rw_file): """Check if a run workflow file points to an older version of manta and needs a refresh. """ with open(rw_file) as in_handle: for line in in_handle: if line.startswith("sys.path.append"): file_version = line.split("/lib/python")[0].split("Cellar/manta/")[-1] if file_version != programs.get_version_manifest("manta"): return True return False
[ "def", "_out_of_date", "(", "rw_file", ")", ":", "with", "open", "(", "rw_file", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswith", "(", "\"sys.path.append\"", ")", ":", "file_version", "=", "line", ".", ...
Check if a run workflow file points to an older version of manta and needs a refresh.
[ "Check", "if", "a", "run", "workflow", "file", "points", "to", "an", "older", "version", "of", "manta", "and", "needs", "a", "refresh", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L171-L180
train
217,942
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_freebayes_options_from_config
def _freebayes_options_from_config(items, config, out_file, region=None): """Prepare standard options from configuration input. Input BED target files are merged to avoid overlapping regions which cause FreeBayes to call multiple times. Checks for empty sets of target regions after filtering for high depth, in which case we should skip the FreeBayes run. """ opts = ["--genotype-qualities", "--strict-vcf"] cur_ploidy = ploidy.get_ploidy(items, region) base_ploidy = ploidy.get_ploidy(items) opts += ["--ploidy", str(cur_ploidy)] # Adjust min fraction when trying to call more sensitively in certain # regions. This is primarily meant for pooled mitochondrial calling. if (isinstance(region, (list, tuple)) and chromhacks.is_mitochondrial(region[0]) and cur_ploidy >= base_ploidy and "--min-alternate-fraction" not in opts and "-F" not in opts): opts += ["--min-alternate-fraction", "0.01"] variant_regions = bedutils.population_variant_regions(items, merged=True) # Produce gVCF output if any("gvcf" in dd.get_tools_on(d) for d in items): opts += ["--gvcf", "--gvcf-chunk", "50000"] no_target_regions = False target = shared.subset_variant_regions(variant_regions, region, out_file, items) if target: if isinstance(target, six.string_types) and os.path.isfile(target): if os.path.getsize(target) == 0: no_target_regions = True else: opts += ["--targets", target] else: opts += ["--region", region_to_freebayes(target)] resources = config_utils.get_resources("freebayes", config) if resources.get("options"): opts += resources["options"] return opts, no_target_regions
python
def _freebayes_options_from_config(items, config, out_file, region=None): """Prepare standard options from configuration input. Input BED target files are merged to avoid overlapping regions which cause FreeBayes to call multiple times. Checks for empty sets of target regions after filtering for high depth, in which case we should skip the FreeBayes run. """ opts = ["--genotype-qualities", "--strict-vcf"] cur_ploidy = ploidy.get_ploidy(items, region) base_ploidy = ploidy.get_ploidy(items) opts += ["--ploidy", str(cur_ploidy)] # Adjust min fraction when trying to call more sensitively in certain # regions. This is primarily meant for pooled mitochondrial calling. if (isinstance(region, (list, tuple)) and chromhacks.is_mitochondrial(region[0]) and cur_ploidy >= base_ploidy and "--min-alternate-fraction" not in opts and "-F" not in opts): opts += ["--min-alternate-fraction", "0.01"] variant_regions = bedutils.population_variant_regions(items, merged=True) # Produce gVCF output if any("gvcf" in dd.get_tools_on(d) for d in items): opts += ["--gvcf", "--gvcf-chunk", "50000"] no_target_regions = False target = shared.subset_variant_regions(variant_regions, region, out_file, items) if target: if isinstance(target, six.string_types) and os.path.isfile(target): if os.path.getsize(target) == 0: no_target_regions = True else: opts += ["--targets", target] else: opts += ["--region", region_to_freebayes(target)] resources = config_utils.get_resources("freebayes", config) if resources.get("options"): opts += resources["options"] return opts, no_target_regions
[ "def", "_freebayes_options_from_config", "(", "items", ",", "config", ",", "out_file", ",", "region", "=", "None", ")", ":", "opts", "=", "[", "\"--genotype-qualities\"", ",", "\"--strict-vcf\"", "]", "cur_ploidy", "=", "ploidy", ".", "get_ploidy", "(", "items",...
Prepare standard options from configuration input. Input BED target files are merged to avoid overlapping regions which cause FreeBayes to call multiple times. Checks for empty sets of target regions after filtering for high depth, in which case we should skip the FreeBayes run.
[ "Prepare", "standard", "options", "from", "configuration", "input", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L29-L64
train
217,943
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_add_somatic_opts
def _add_somatic_opts(opts, paired): """Add somatic options to current set. See _run_freebayes_paired for references. """ if "--min-alternate-fraction" not in opts and "-F" not in opts: # add minimum reportable allele frequency # FreeBayes defaults to 20%, but use 10% by default for the # tumor case min_af = float(utils.get_in(paired.tumor_config, ("algorithm", "min_allele_fraction"), 10)) / 100.0 opts += " --min-alternate-fraction %s" % min_af # Recommended settings for cancer calling opts += (" --pooled-discrete --pooled-continuous " "--report-genotype-likelihood-max --allele-balance-priors-off") return opts
python
def _add_somatic_opts(opts, paired): """Add somatic options to current set. See _run_freebayes_paired for references. """ if "--min-alternate-fraction" not in opts and "-F" not in opts: # add minimum reportable allele frequency # FreeBayes defaults to 20%, but use 10% by default for the # tumor case min_af = float(utils.get_in(paired.tumor_config, ("algorithm", "min_allele_fraction"), 10)) / 100.0 opts += " --min-alternate-fraction %s" % min_af # Recommended settings for cancer calling opts += (" --pooled-discrete --pooled-continuous " "--report-genotype-likelihood-max --allele-balance-priors-off") return opts
[ "def", "_add_somatic_opts", "(", "opts", ",", "paired", ")", ":", "if", "\"--min-alternate-fraction\"", "not", "in", "opts", "and", "\"-F\"", "not", "in", "opts", ":", "# add minimum reportable allele frequency", "# FreeBayes defaults to 20%, but use 10% by default for the", ...
Add somatic options to current set. See _run_freebayes_paired for references.
[ "Add", "somatic", "options", "to", "current", "set", ".", "See", "_run_freebayes_paired", "for", "references", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L66-L79
train
217,944
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_run_freebayes_caller
def _run_freebayes_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None, somatic=None): """Detect SNPs and indels with FreeBayes. Performs post-filtering to remove very low quality variants which can cause issues feeding into GATK. Breaks variants into individual allelic primitives for analysis and evaluation. """ config = items[0]["config"] if out_file is None: out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0] if not utils.file_exists(out_file): if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: freebayes = config_utils.get_program("freebayes", config) input_bams = " ".join("-b %s" % x for x in align_bams) opts, no_target_regions = _freebayes_options_from_config(items, config, out_file, region) if no_target_regions: vcfutils.write_empty_vcf(tx_out_file, config, samples=[dd.get_sample_name(d) for d in items]) else: opts = " ".join(opts) # Recommended options from 1000 genomes low-complexity evaluation # https://groups.google.com/d/msg/freebayes/GvxIzjcpbas/1G6e3ArxQ4cJ opts += " --min-repeat-entropy 1" # Remove partial observations, which cause a preference for heterozygote calls # https://github.com/ekg/freebayes/issues/234#issuecomment-205331765 opts += " --no-partial-observations" if somatic: opts = _add_somatic_opts(opts, somatic) compress_cmd = "| bgzip -c" if out_file.endswith("gz") else "" # For multi-sample outputs, ensure consistent order samples = ("-s" + ",".join([dd.get_sample_name(d) for d in items])) if len(items) > 1 else "" fix_ambig = vcfutils.fix_ambiguous_cl() py_cl = config_utils.get_program("py", config) cmd = ("{freebayes} -f {ref_file} {opts} {input_bams} " """| bcftools filter -i 'ALT="<*>" || QUAL > 5' """ "| {fix_ambig} | bcftools view {samples} -a - | " "{py_cl} -x 'bcbio.variation.freebayes.remove_missingalt(x)' | " "vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | vcfstreamsort | " "vt normalize -n -r {ref_file} -q - | vcfuniqalleles | vt uniq - 2> /dev/null " "{compress_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "Genotyping with FreeBayes", {}) return out_file
python
def _run_freebayes_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None, somatic=None): """Detect SNPs and indels with FreeBayes. Performs post-filtering to remove very low quality variants which can cause issues feeding into GATK. Breaks variants into individual allelic primitives for analysis and evaluation. """ config = items[0]["config"] if out_file is None: out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0] if not utils.file_exists(out_file): if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: freebayes = config_utils.get_program("freebayes", config) input_bams = " ".join("-b %s" % x for x in align_bams) opts, no_target_regions = _freebayes_options_from_config(items, config, out_file, region) if no_target_regions: vcfutils.write_empty_vcf(tx_out_file, config, samples=[dd.get_sample_name(d) for d in items]) else: opts = " ".join(opts) # Recommended options from 1000 genomes low-complexity evaluation # https://groups.google.com/d/msg/freebayes/GvxIzjcpbas/1G6e3ArxQ4cJ opts += " --min-repeat-entropy 1" # Remove partial observations, which cause a preference for heterozygote calls # https://github.com/ekg/freebayes/issues/234#issuecomment-205331765 opts += " --no-partial-observations" if somatic: opts = _add_somatic_opts(opts, somatic) compress_cmd = "| bgzip -c" if out_file.endswith("gz") else "" # For multi-sample outputs, ensure consistent order samples = ("-s" + ",".join([dd.get_sample_name(d) for d in items])) if len(items) > 1 else "" fix_ambig = vcfutils.fix_ambiguous_cl() py_cl = config_utils.get_program("py", config) cmd = ("{freebayes} -f {ref_file} {opts} {input_bams} " """| bcftools filter -i 'ALT="<*>" || QUAL > 5' """ "| {fix_ambig} | bcftools view {samples} -a - | " "{py_cl} -x 'bcbio.variation.freebayes.remove_missingalt(x)' | " "vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | vcfstreamsort | " "vt normalize -n -r {ref_file} -q - | vcfuniqalleles | vt uniq - 2> /dev/null " "{compress_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "Genotyping with FreeBayes", {}) return out_file
[ "def", "_run_freebayes_caller", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ",", "somatic", "=", "None", ")", ":", "config", "=", "items", "[", "0", "]", "[", "\"config\"...
Detect SNPs and indels with FreeBayes. Performs post-filtering to remove very low quality variants which can cause issues feeding into GATK. Breaks variants into individual allelic primitives for analysis and evaluation.
[ "Detect", "SNPs", "and", "indels", "with", "FreeBayes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L102-L144
train
217,945
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_check_lods
def _check_lods(parts, tumor_thresh, normal_thresh, indexes): """Ensure likelihoods for tumor and normal pass thresholds. Skipped if no FreeBayes GL annotations available. """ try: gl_index = parts[8].split(":").index("GL") except ValueError: return True try: tumor_gls = [float(x) for x in parts[indexes["tumor"]].strip().split(":")[gl_index].split(",") if x != "."] if tumor_gls: tumor_lod = max(tumor_gls[i] - tumor_gls[0] for i in range(1, len(tumor_gls))) else: tumor_lod = -1.0 # No GL information, no tumor call (so fail it) except IndexError: tumor_lod = -1.0 try: normal_gls = [float(x) for x in parts[indexes["normal"]].strip().split(":")[gl_index].split(",") if x != "."] if normal_gls: normal_lod = min(normal_gls[0] - normal_gls[i] for i in range(1, len(normal_gls))) else: normal_lod = normal_thresh # No GL inofmration, no normal call (so pass it) except IndexError: normal_lod = normal_thresh return normal_lod >= normal_thresh and tumor_lod >= tumor_thresh
python
def _check_lods(parts, tumor_thresh, normal_thresh, indexes): """Ensure likelihoods for tumor and normal pass thresholds. Skipped if no FreeBayes GL annotations available. """ try: gl_index = parts[8].split(":").index("GL") except ValueError: return True try: tumor_gls = [float(x) for x in parts[indexes["tumor"]].strip().split(":")[gl_index].split(",") if x != "."] if tumor_gls: tumor_lod = max(tumor_gls[i] - tumor_gls[0] for i in range(1, len(tumor_gls))) else: tumor_lod = -1.0 # No GL information, no tumor call (so fail it) except IndexError: tumor_lod = -1.0 try: normal_gls = [float(x) for x in parts[indexes["normal"]].strip().split(":")[gl_index].split(",") if x != "."] if normal_gls: normal_lod = min(normal_gls[0] - normal_gls[i] for i in range(1, len(normal_gls))) else: normal_lod = normal_thresh # No GL inofmration, no normal call (so pass it) except IndexError: normal_lod = normal_thresh return normal_lod >= normal_thresh and tumor_lod >= tumor_thresh
[ "def", "_check_lods", "(", "parts", ",", "tumor_thresh", ",", "normal_thresh", ",", "indexes", ")", ":", "try", ":", "gl_index", "=", "parts", "[", "8", "]", ".", "split", "(", "\":\"", ")", ".", "index", "(", "\"GL\"", ")", "except", "ValueError", ":"...
Ensure likelihoods for tumor and normal pass thresholds. Skipped if no FreeBayes GL annotations available.
[ "Ensure", "likelihoods", "for", "tumor", "and", "normal", "pass", "thresholds", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L197-L224
train
217,946
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_check_freqs
def _check_freqs(parts, indexes): """Ensure frequency of tumor to normal passes a reasonable threshold. Avoids calling low frequency tumors also present at low frequency in normals, which indicates a contamination or persistent error. """ thresh_ratio = 2.7 try: # FreeBayes ao_index = parts[8].split(":").index("AO") ro_index = parts[8].split(":").index("RO") except ValueError: ao_index, ro_index = None, None try: # VarDict af_index = parts[8].split(":").index("AF") except ValueError: af_index = None if af_index is None and ao_index is None: # okay to skip if a gVCF record if parts[4].find("<*>") == -1: raise NotImplementedError("Unexpected format annotations: %s" % parts[8]) def _calc_freq(item): try: if ao_index is not None and ro_index is not None: ao = sum([int(x) for x in item.split(":")[ao_index].split(",")]) ro = int(item.split(":")[ro_index]) freq = ao / float(ao + ro) elif af_index is not None: freq = float(item.split(":")[af_index]) else: freq = 0.0 except (IndexError, ValueError, ZeroDivisionError): freq = 0.0 return freq tumor_freq, normal_freq = _calc_freq(parts[indexes["tumor"]]), _calc_freq(parts[indexes["normal"]]) return normal_freq <= 0.001 or normal_freq <= tumor_freq / thresh_ratio
python
def _check_freqs(parts, indexes): """Ensure frequency of tumor to normal passes a reasonable threshold. Avoids calling low frequency tumors also present at low frequency in normals, which indicates a contamination or persistent error. """ thresh_ratio = 2.7 try: # FreeBayes ao_index = parts[8].split(":").index("AO") ro_index = parts[8].split(":").index("RO") except ValueError: ao_index, ro_index = None, None try: # VarDict af_index = parts[8].split(":").index("AF") except ValueError: af_index = None if af_index is None and ao_index is None: # okay to skip if a gVCF record if parts[4].find("<*>") == -1: raise NotImplementedError("Unexpected format annotations: %s" % parts[8]) def _calc_freq(item): try: if ao_index is not None and ro_index is not None: ao = sum([int(x) for x in item.split(":")[ao_index].split(",")]) ro = int(item.split(":")[ro_index]) freq = ao / float(ao + ro) elif af_index is not None: freq = float(item.split(":")[af_index]) else: freq = 0.0 except (IndexError, ValueError, ZeroDivisionError): freq = 0.0 return freq tumor_freq, normal_freq = _calc_freq(parts[indexes["tumor"]]), _calc_freq(parts[indexes["normal"]]) return normal_freq <= 0.001 or normal_freq <= tumor_freq / thresh_ratio
[ "def", "_check_freqs", "(", "parts", ",", "indexes", ")", ":", "thresh_ratio", "=", "2.7", "try", ":", "# FreeBayes", "ao_index", "=", "parts", "[", "8", "]", ".", "split", "(", "\":\"", ")", ".", "index", "(", "\"AO\"", ")", "ro_index", "=", "parts", ...
Ensure frequency of tumor to normal passes a reasonable threshold. Avoids calling low frequency tumors also present at low frequency in normals, which indicates a contamination or persistent error.
[ "Ensure", "frequency", "of", "tumor", "to", "normal", "passes", "a", "reasonable", "threshold", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L226-L260
train
217,947
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_clean_freebayes_output
def _clean_freebayes_output(line): """Clean FreeBayes output to make post-processing with GATK happy. XXX Not applied on recent versions which fix issues to be more compatible with bgzip output, but retained in case of need. - Remove lines from FreeBayes outputs where REF/ALT are identical: 2 22816178 . G G 0.0339196 or there are multiple duplicate alleles: 4 60594753 . TGAAA T,T - Remove Type=Int specifications which are not valid VCF and GATK chokes on. """ if line.startswith("#"): line = line.replace("Type=Int,D", "Type=Integer,D") return line else: parts = line.split("\t") alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()] if len(alleles) == len(set(alleles)): return line return None
python
def _clean_freebayes_output(line): """Clean FreeBayes output to make post-processing with GATK happy. XXX Not applied on recent versions which fix issues to be more compatible with bgzip output, but retained in case of need. - Remove lines from FreeBayes outputs where REF/ALT are identical: 2 22816178 . G G 0.0339196 or there are multiple duplicate alleles: 4 60594753 . TGAAA T,T - Remove Type=Int specifications which are not valid VCF and GATK chokes on. """ if line.startswith("#"): line = line.replace("Type=Int,D", "Type=Integer,D") return line else: parts = line.split("\t") alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()] if len(alleles) == len(set(alleles)): return line return None
[ "def", "_clean_freebayes_output", "(", "line", ")", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "line", "=", "line", ".", "replace", "(", "\"Type=Int,D\"", ",", "\"Type=Integer,D\"", ")", "return", "line", "else", ":", "parts", "=", "line...
Clean FreeBayes output to make post-processing with GATK happy. XXX Not applied on recent versions which fix issues to be more compatible with bgzip output, but retained in case of need. - Remove lines from FreeBayes outputs where REF/ALT are identical: 2 22816178 . G G 0.0339196 or there are multiple duplicate alleles: 4 60594753 . TGAAA T,T - Remove Type=Int specifications which are not valid VCF and GATK chokes on.
[ "Clean", "FreeBayes", "output", "to", "make", "post", "-", "processing", "with", "GATK", "happy", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L332-L353
train
217,948
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
clean_vcf_output
def clean_vcf_output(orig_file, clean_fn, config, name="clean"): """Provide framework to clean a file in-place, with the specified clean function. """ base, ext = utils.splitext_plus(orig_file) out_file = "{0}-{1}{2}".format(base, name, ext) if not utils.file_exists(out_file): with open(orig_file) as in_handle: with file_transaction(config, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for line in in_handle: update_line = clean_fn(line) if update_line: out_handle.write(update_line) move_vcf(orig_file, "{0}.orig".format(orig_file)) move_vcf(out_file, orig_file) with open(out_file, "w") as out_handle: out_handle.write("Moved to {0}".format(orig_file))
python
def clean_vcf_output(orig_file, clean_fn, config, name="clean"): """Provide framework to clean a file in-place, with the specified clean function. """ base, ext = utils.splitext_plus(orig_file) out_file = "{0}-{1}{2}".format(base, name, ext) if not utils.file_exists(out_file): with open(orig_file) as in_handle: with file_transaction(config, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for line in in_handle: update_line = clean_fn(line) if update_line: out_handle.write(update_line) move_vcf(orig_file, "{0}.orig".format(orig_file)) move_vcf(out_file, orig_file) with open(out_file, "w") as out_handle: out_handle.write("Moved to {0}".format(orig_file))
[ "def", "clean_vcf_output", "(", "orig_file", ",", "clean_fn", ",", "config", ",", "name", "=", "\"clean\"", ")", ":", "base", ",", "ext", "=", "utils", ".", "splitext_plus", "(", "orig_file", ")", "out_file", "=", "\"{0}-{1}{2}\"", ".", "format", "(", "bas...
Provide framework to clean a file in-place, with the specified clean function.
[ "Provide", "framework", "to", "clean", "a", "file", "in", "-", "place", "with", "the", "specified", "clean", "function", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L355-L372
train
217,949
bcbio/bcbio-nextgen
bcbio/variation/effects.py
get_type
def get_type(data): """Retrieve the type of effects calculation to do. """ if data["analysis"].lower().startswith("var") or dd.get_variantcaller(data): return tz.get_in(("config", "algorithm", "effects"), data, "snpeff")
python
def get_type(data): """Retrieve the type of effects calculation to do. """ if data["analysis"].lower().startswith("var") or dd.get_variantcaller(data): return tz.get_in(("config", "algorithm", "effects"), data, "snpeff")
[ "def", "get_type", "(", "data", ")", ":", "if", "data", "[", "\"analysis\"", "]", ".", "lower", "(", ")", ".", "startswith", "(", "\"var\"", ")", "or", "dd", ".", "get_variantcaller", "(", "data", ")", ":", "return", "tz", ".", "get_in", "(", "(", ...
Retrieve the type of effects calculation to do.
[ "Retrieve", "the", "type", "of", "effects", "calculation", "to", "do", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L46-L50
train
217,950
bcbio/bcbio-nextgen
bcbio/variation/effects.py
prep_vep_cache
def prep_vep_cache(dbkey, ref_file, tooldir=None, config=None): """Ensure correct installation of VEP cache file. """ if config is None: config = {} resource_file = os.path.join(os.path.dirname(ref_file), "%s-resources.yaml" % dbkey) if os.path.exists(resource_file): with open(resource_file) as in_handle: resources = yaml.safe_load(in_handle) ensembl_name = tz.get_in(["aliases", "ensembl"], resources) symlink_dir = _special_dbkey_maps(dbkey, ref_file) if ensembl_name and ensembl_name.find("_vep_") == -1: raise ValueError("%s has ensembl an incorrect value." "It should have _vep_ in the name." "Remove line or fix the name to avoid error.") if symlink_dir and ensembl_name: species, vepv = ensembl_name.split("_vep_") return symlink_dir, species elif ensembl_name: species, vepv = ensembl_name.split("_vep_") vep_dir = utils.safe_makedir(os.path.normpath(os.path.join( os.path.dirname(os.path.dirname(ref_file)), "vep"))) out_dir = os.path.join(vep_dir, species, vepv) if not os.path.exists(out_dir): tmp_dir = utils.safe_makedir(os.path.join(vep_dir, species, "txtmp")) eversion = vepv.split("_")[0] url = "http://ftp.ensembl.org/pub/release-%s/variation/VEP/%s.tar.gz" % (eversion, ensembl_name) with utils.chdir(tmp_dir): subprocess.check_call(["wget", "--no-check-certificate", "-c", url]) vep_path = "%s/bin/" % tooldir if tooldir else "" perl_exports = utils.get_perl_exports() cmd = ["%svep_install" % vep_path, "-a", "c", "-s", ensembl_name, "-c", vep_dir, "-u", tmp_dir, "--NO_UPDATE", "--VERSION", eversion] do.run("%s && %s" % (perl_exports, " ".join(cmd)), "Prepare VEP directory for %s" % ensembl_name) cmd = ["%svep_convert_cache" % vep_path, "--species", species, "--version", vepv, "--dir", vep_dir, "--force_overwrite", "--remove"] do.run("%s && %s" % (perl_exports, " ".join(cmd)), "Convert VEP cache to tabix %s" % ensembl_name) for tmp_fname in os.listdir(tmp_dir): os.remove(os.path.join(tmp_dir, tmp_fname)) os.rmdir(tmp_dir) tmp_dir = os.path.join(vep_dir, "tmp") if os.path.exists(tmp_dir): shutil.rmtree(tmp_dir) return vep_dir, species return None, None
python
def prep_vep_cache(dbkey, ref_file, tooldir=None, config=None): """Ensure correct installation of VEP cache file. """ if config is None: config = {} resource_file = os.path.join(os.path.dirname(ref_file), "%s-resources.yaml" % dbkey) if os.path.exists(resource_file): with open(resource_file) as in_handle: resources = yaml.safe_load(in_handle) ensembl_name = tz.get_in(["aliases", "ensembl"], resources) symlink_dir = _special_dbkey_maps(dbkey, ref_file) if ensembl_name and ensembl_name.find("_vep_") == -1: raise ValueError("%s has ensembl an incorrect value." "It should have _vep_ in the name." "Remove line or fix the name to avoid error.") if symlink_dir and ensembl_name: species, vepv = ensembl_name.split("_vep_") return symlink_dir, species elif ensembl_name: species, vepv = ensembl_name.split("_vep_") vep_dir = utils.safe_makedir(os.path.normpath(os.path.join( os.path.dirname(os.path.dirname(ref_file)), "vep"))) out_dir = os.path.join(vep_dir, species, vepv) if not os.path.exists(out_dir): tmp_dir = utils.safe_makedir(os.path.join(vep_dir, species, "txtmp")) eversion = vepv.split("_")[0] url = "http://ftp.ensembl.org/pub/release-%s/variation/VEP/%s.tar.gz" % (eversion, ensembl_name) with utils.chdir(tmp_dir): subprocess.check_call(["wget", "--no-check-certificate", "-c", url]) vep_path = "%s/bin/" % tooldir if tooldir else "" perl_exports = utils.get_perl_exports() cmd = ["%svep_install" % vep_path, "-a", "c", "-s", ensembl_name, "-c", vep_dir, "-u", tmp_dir, "--NO_UPDATE", "--VERSION", eversion] do.run("%s && %s" % (perl_exports, " ".join(cmd)), "Prepare VEP directory for %s" % ensembl_name) cmd = ["%svep_convert_cache" % vep_path, "--species", species, "--version", vepv, "--dir", vep_dir, "--force_overwrite", "--remove"] do.run("%s && %s" % (perl_exports, " ".join(cmd)), "Convert VEP cache to tabix %s" % ensembl_name) for tmp_fname in os.listdir(tmp_dir): os.remove(os.path.join(tmp_dir, tmp_fname)) os.rmdir(tmp_dir) tmp_dir = os.path.join(vep_dir, "tmp") if os.path.exists(tmp_dir): shutil.rmtree(tmp_dir) return vep_dir, species return None, None
[ "def", "prep_vep_cache", "(", "dbkey", ",", "ref_file", ",", "tooldir", "=", "None", ",", "config", "=", "None", ")", ":", "if", "config", "is", "None", ":", "config", "=", "{", "}", "resource_file", "=", "os", ".", "path", ".", "join", "(", "os", ...
Ensure correct installation of VEP cache file.
[ "Ensure", "correct", "installation", "of", "VEP", "cache", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L74-L117
train
217,951
bcbio/bcbio-nextgen
bcbio/variation/effects.py
_get_G2P
def _get_G2P(data): """ A VEP plugin that uses G2P allelic requirements to assess variants in genes for potential phenotype involvement. """ G2P_file = os.path.realpath(tz.get_in(("genome_resources", "variation", "genotype2phenotype"), data)) args = ["--plugin", "G2P,file:%s" % (G2P_file)] if G2P_file: return args else: return []
python
def _get_G2P(data): """ A VEP plugin that uses G2P allelic requirements to assess variants in genes for potential phenotype involvement. """ G2P_file = os.path.realpath(tz.get_in(("genome_resources", "variation", "genotype2phenotype"), data)) args = ["--plugin", "G2P,file:%s" % (G2P_file)] if G2P_file: return args else: return []
[ "def", "_get_G2P", "(", "data", ")", ":", "G2P_file", "=", "os", ".", "path", ".", "realpath", "(", "tz", ".", "get_in", "(", "(", "\"genome_resources\"", ",", "\"variation\"", ",", "\"genotype2phenotype\"", ")", ",", "data", ")", ")", "args", "=", "[", ...
A VEP plugin that uses G2P allelic requirements to assess variants in genes for potential phenotype involvement.
[ "A", "VEP", "plugin", "that", "uses", "G2P", "allelic", "requirements", "to", "assess", "variants", "in", "genes", "for", "potential", "phenotype", "involvement", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L234-L244
train
217,952
bcbio/bcbio-nextgen
bcbio/variation/effects.py
_snpeff_args_from_config
def _snpeff_args_from_config(data): """Retrieve snpEff arguments supplied through input configuration. """ config = data["config"] args = ["-hgvs"] # General supplied arguments resources = config_utils.get_resources("snpeff", config) if resources.get("options"): args += [str(x) for x in resources.get("options", [])] # cancer specific calling arguments if vcfutils.get_paired_phenotype(data): args += ["-cancer"] effects_transcripts = dd.get_effects_transcripts(data) if effects_transcripts in set(["canonical_cancer"]): _, snpeff_base_dir = get_db(data) canon_list_file = os.path.join(snpeff_base_dir, "transcripts", "%s.txt" % effects_transcripts) if not utils.file_exists(canon_list_file): raise ValueError("Cannot find expected file for effects_transcripts: %s" % canon_list_file) args += ["-canonList", canon_list_file] elif effects_transcripts == "canonical" or tz.get_in(("config", "algorithm", "clinical_reporting"), data): args += ["-canon"] return args
python
def _snpeff_args_from_config(data): """Retrieve snpEff arguments supplied through input configuration. """ config = data["config"] args = ["-hgvs"] # General supplied arguments resources = config_utils.get_resources("snpeff", config) if resources.get("options"): args += [str(x) for x in resources.get("options", [])] # cancer specific calling arguments if vcfutils.get_paired_phenotype(data): args += ["-cancer"] effects_transcripts = dd.get_effects_transcripts(data) if effects_transcripts in set(["canonical_cancer"]): _, snpeff_base_dir = get_db(data) canon_list_file = os.path.join(snpeff_base_dir, "transcripts", "%s.txt" % effects_transcripts) if not utils.file_exists(canon_list_file): raise ValueError("Cannot find expected file for effects_transcripts: %s" % canon_list_file) args += ["-canonList", canon_list_file] elif effects_transcripts == "canonical" or tz.get_in(("config", "algorithm", "clinical_reporting"), data): args += ["-canon"] return args
[ "def", "_snpeff_args_from_config", "(", "data", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "args", "=", "[", "\"-hgvs\"", "]", "# General supplied arguments", "resources", "=", "config_utils", ".", "get_resources", "(", "\"snpeff\"", ",", "config", ...
Retrieve snpEff arguments supplied through input configuration.
[ "Retrieve", "snpEff", "arguments", "supplied", "through", "input", "configuration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L266-L288
train
217,953
bcbio/bcbio-nextgen
bcbio/variation/effects.py
get_db
def get_db(data): """Retrieve a snpEff database name and location relative to reference file. """ snpeff_db = utils.get_in(data, ("genome_resources", "aliases", "snpeff")) snpeff_base_dir = None if snpeff_db: snpeff_base_dir = utils.get_in(data, ("reference", "snpeff")) if not (isinstance(snpeff_base_dir, six.string_types) and os.path.isdir(snpeff_base_dir)): snpeff_base_dir = utils.get_in(data, ("reference", "snpeff", snpeff_db)) if not snpeff_base_dir: # We need to mask '.' characters for CWL/WDL processing, check for them here snpeff_base_dir = utils.get_in(data, ("reference", "snpeff", snpeff_db.replace(".", "_"))) if snpeff_base_dir: snpeff_db = snpeff_db.replace("_", ".") if isinstance(snpeff_base_dir, dict) and snpeff_base_dir.get("base"): snpeff_base_dir = snpeff_base_dir["base"] if (snpeff_base_dir and isinstance(snpeff_base_dir, six.string_types) and os.path.isfile(snpeff_base_dir)): snpeff_base_dir = os.path.dirname(snpeff_base_dir) if (snpeff_base_dir and isinstance(snpeff_base_dir, six.string_types) and snpeff_base_dir.endswith("%s%s" % (os.path.sep, snpeff_db))): snpeff_base_dir = os.path.dirname(snpeff_base_dir) if not snpeff_base_dir: ref_file = utils.get_in(data, ("reference", "fasta", "base")) snpeff_base_dir = utils.safe_makedir(os.path.normpath(os.path.join( os.path.dirname(os.path.dirname(ref_file)), "snpeff"))) # back compatible retrieval of genome from installation directory if "config" in data and not os.path.exists(os.path.join(snpeff_base_dir, snpeff_db)): snpeff_base_dir, snpeff_db = _installed_snpeff_genome(snpeff_db, data["config"]) if snpeff_base_dir.endswith("/%s" % snpeff_db): snpeff_base_dir = os.path.dirname(snpeff_base_dir) return snpeff_db, snpeff_base_dir
python
def get_db(data): """Retrieve a snpEff database name and location relative to reference file. """ snpeff_db = utils.get_in(data, ("genome_resources", "aliases", "snpeff")) snpeff_base_dir = None if snpeff_db: snpeff_base_dir = utils.get_in(data, ("reference", "snpeff")) if not (isinstance(snpeff_base_dir, six.string_types) and os.path.isdir(snpeff_base_dir)): snpeff_base_dir = utils.get_in(data, ("reference", "snpeff", snpeff_db)) if not snpeff_base_dir: # We need to mask '.' characters for CWL/WDL processing, check for them here snpeff_base_dir = utils.get_in(data, ("reference", "snpeff", snpeff_db.replace(".", "_"))) if snpeff_base_dir: snpeff_db = snpeff_db.replace("_", ".") if isinstance(snpeff_base_dir, dict) and snpeff_base_dir.get("base"): snpeff_base_dir = snpeff_base_dir["base"] if (snpeff_base_dir and isinstance(snpeff_base_dir, six.string_types) and os.path.isfile(snpeff_base_dir)): snpeff_base_dir = os.path.dirname(snpeff_base_dir) if (snpeff_base_dir and isinstance(snpeff_base_dir, six.string_types) and snpeff_base_dir.endswith("%s%s" % (os.path.sep, snpeff_db))): snpeff_base_dir = os.path.dirname(snpeff_base_dir) if not snpeff_base_dir: ref_file = utils.get_in(data, ("reference", "fasta", "base")) snpeff_base_dir = utils.safe_makedir(os.path.normpath(os.path.join( os.path.dirname(os.path.dirname(ref_file)), "snpeff"))) # back compatible retrieval of genome from installation directory if "config" in data and not os.path.exists(os.path.join(snpeff_base_dir, snpeff_db)): snpeff_base_dir, snpeff_db = _installed_snpeff_genome(snpeff_db, data["config"]) if snpeff_base_dir.endswith("/%s" % snpeff_db): snpeff_base_dir = os.path.dirname(snpeff_base_dir) return snpeff_db, snpeff_base_dir
[ "def", "get_db", "(", "data", ")", ":", "snpeff_db", "=", "utils", ".", "get_in", "(", "data", ",", "(", "\"genome_resources\"", ",", "\"aliases\"", ",", "\"snpeff\"", ")", ")", "snpeff_base_dir", "=", "None", "if", "snpeff_db", ":", "snpeff_base_dir", "=", ...
Retrieve a snpEff database name and location relative to reference file.
[ "Retrieve", "a", "snpEff", "database", "name", "and", "location", "relative", "to", "reference", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L290-L320
train
217,954
bcbio/bcbio-nextgen
bcbio/variation/effects.py
_get_snpeff_cmd
def _get_snpeff_cmd(cmd_name, datadir, data, out_file): """Retrieve snpEff base command line. """ resources = config_utils.get_resources("snpeff", data["config"]) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3g"]) # scale by cores, defaulting to 2x base usage to ensure we have enough memory # for single core runs to use with human genomes. # Sets a maximum amount of memory to avoid core dumps exceeding 32Gb # We shouldn't need that much memory for snpEff, so avoid issues # https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html#compressed_oops jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": {"direction": "increase", "maximum": "30000M", "magnitude": max(2, dd.get_cores(data))}}}) memory = " ".join(jvm_opts) snpeff = config_utils.get_program("snpEff", data["config"]) java_args = "-Djava.io.tmpdir=%s" % utils.safe_makedir(os.path.join(os.path.dirname(out_file), "tmp")) export = "unset JAVA_HOME && export PATH=%s:\"$PATH\" && " % (utils.get_java_binpath()) cmd = "{export} {snpeff} {memory} {java_args} {cmd_name} -dataDir {datadir}" return cmd.format(**locals())
python
def _get_snpeff_cmd(cmd_name, datadir, data, out_file): """Retrieve snpEff base command line. """ resources = config_utils.get_resources("snpeff", data["config"]) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3g"]) # scale by cores, defaulting to 2x base usage to ensure we have enough memory # for single core runs to use with human genomes. # Sets a maximum amount of memory to avoid core dumps exceeding 32Gb # We shouldn't need that much memory for snpEff, so avoid issues # https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html#compressed_oops jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": {"direction": "increase", "maximum": "30000M", "magnitude": max(2, dd.get_cores(data))}}}) memory = " ".join(jvm_opts) snpeff = config_utils.get_program("snpEff", data["config"]) java_args = "-Djava.io.tmpdir=%s" % utils.safe_makedir(os.path.join(os.path.dirname(out_file), "tmp")) export = "unset JAVA_HOME && export PATH=%s:\"$PATH\" && " % (utils.get_java_binpath()) cmd = "{export} {snpeff} {memory} {java_args} {cmd_name} -dataDir {datadir}" return cmd.format(**locals())
[ "def", "_get_snpeff_cmd", "(", "cmd_name", ",", "datadir", ",", "data", ",", "out_file", ")", ":", "resources", "=", "config_utils", ".", "get_resources", "(", "\"snpeff\"", ",", "data", "[", "\"config\"", "]", ")", "jvm_opts", "=", "resources", ".", "get", ...
Retrieve snpEff base command line.
[ "Retrieve", "snpEff", "base", "command", "line", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L339-L358
train
217,955
bcbio/bcbio-nextgen
bcbio/variation/effects.py
_run_snpeff
def _run_snpeff(snp_in, out_format, data): """Run effects prediction with snpEff, skipping if snpEff database not present. """ snpeff_db, datadir = get_db(data) if not snpeff_db: return None, None assert os.path.exists(os.path.join(datadir, snpeff_db)), \ "Did not find %s snpEff genome data in %s" % (snpeff_db, datadir) ext = utils.splitext_plus(snp_in)[1] if out_format == "vcf" else ".tsv" out_file = "%s-effects%s" % (utils.splitext_plus(snp_in)[0], ext) stats_file = "%s-stats.html" % utils.splitext_plus(out_file)[0] csv_file = "%s-stats.csv" % utils.splitext_plus(out_file)[0] if not utils.file_exists(out_file): config_args = " ".join(_snpeff_args_from_config(data)) if ext.endswith(".gz"): bgzip_cmd = "| %s -c" % tools.get_bgzip_cmd(data["config"]) else: bgzip_cmd = "" with file_transaction(data, out_file) as tx_out_file: snpeff_cmd = _get_snpeff_cmd("eff", datadir, data, tx_out_file) cmd = ("{snpeff_cmd} {config_args} -noLog -i vcf -o {out_format} " "-csvStats {csv_file} -s {stats_file} {snpeff_db} {snp_in} {bgzip_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "snpEff effects", data) if ext.endswith(".gz"): out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file, [stats_file, csv_file]
python
def _run_snpeff(snp_in, out_format, data): """Run effects prediction with snpEff, skipping if snpEff database not present. """ snpeff_db, datadir = get_db(data) if not snpeff_db: return None, None assert os.path.exists(os.path.join(datadir, snpeff_db)), \ "Did not find %s snpEff genome data in %s" % (snpeff_db, datadir) ext = utils.splitext_plus(snp_in)[1] if out_format == "vcf" else ".tsv" out_file = "%s-effects%s" % (utils.splitext_plus(snp_in)[0], ext) stats_file = "%s-stats.html" % utils.splitext_plus(out_file)[0] csv_file = "%s-stats.csv" % utils.splitext_plus(out_file)[0] if not utils.file_exists(out_file): config_args = " ".join(_snpeff_args_from_config(data)) if ext.endswith(".gz"): bgzip_cmd = "| %s -c" % tools.get_bgzip_cmd(data["config"]) else: bgzip_cmd = "" with file_transaction(data, out_file) as tx_out_file: snpeff_cmd = _get_snpeff_cmd("eff", datadir, data, tx_out_file) cmd = ("{snpeff_cmd} {config_args} -noLog -i vcf -o {out_format} " "-csvStats {csv_file} -s {stats_file} {snpeff_db} {snp_in} {bgzip_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "snpEff effects", data) if ext.endswith(".gz"): out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file, [stats_file, csv_file]
[ "def", "_run_snpeff", "(", "snp_in", ",", "out_format", ",", "data", ")", ":", "snpeff_db", ",", "datadir", "=", "get_db", "(", "data", ")", "if", "not", "snpeff_db", ":", "return", "None", ",", "None", "assert", "os", ".", "path", ".", "exists", "(", ...
Run effects prediction with snpEff, skipping if snpEff database not present.
[ "Run", "effects", "prediction", "with", "snpEff", "skipping", "if", "snpEff", "database", "not", "present", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L360-L386
train
217,956
bcbio/bcbio-nextgen
bcbio/variation/effects.py
_installed_snpeff_genome
def _installed_snpeff_genome(base_name, config): """Find the most recent installed genome for snpEff with the given name. """ snpeff_config_file = os.path.join(config_utils.get_program("snpeff", config, "dir"), "snpEff.config") if os.path.exists(snpeff_config_file): data_dir = _find_snpeff_datadir(snpeff_config_file) dbs = [d for d in sorted(glob.glob(os.path.join(data_dir, "%s*" % base_name)), reverse=True) if os.path.isdir(d)] else: data_dir = None dbs = [] if len(dbs) == 0: raise ValueError("No database found in %s for %s" % (data_dir, base_name)) else: return data_dir, os.path.split(dbs[0])[-1]
python
def _installed_snpeff_genome(base_name, config): """Find the most recent installed genome for snpEff with the given name. """ snpeff_config_file = os.path.join(config_utils.get_program("snpeff", config, "dir"), "snpEff.config") if os.path.exists(snpeff_config_file): data_dir = _find_snpeff_datadir(snpeff_config_file) dbs = [d for d in sorted(glob.glob(os.path.join(data_dir, "%s*" % base_name)), reverse=True) if os.path.isdir(d)] else: data_dir = None dbs = [] if len(dbs) == 0: raise ValueError("No database found in %s for %s" % (data_dir, base_name)) else: return data_dir, os.path.split(dbs[0])[-1]
[ "def", "_installed_snpeff_genome", "(", "base_name", ",", "config", ")", ":", "snpeff_config_file", "=", "os", ".", "path", ".", "join", "(", "config_utils", ".", "get_program", "(", "\"snpeff\"", ",", "config", ",", "\"dir\"", ")", ",", "\"snpEff.config\"", "...
Find the most recent installed genome for snpEff with the given name.
[ "Find", "the", "most", "recent", "installed", "genome", "for", "snpEff", "with", "the", "given", "name", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L400-L415
train
217,957
bcbio/bcbio-nextgen
bcbio/ngsalign/minimap2.py
remap_index_fn
def remap_index_fn(ref_file): """minimap2 can build indexes on the fly but will also store commons ones. """ index_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "minimap2") if os.path.exists(index_dir) and os.path.isdir(index_dir): return index_dir else: return os.path.dirname(ref_file)
python
def remap_index_fn(ref_file): """minimap2 can build indexes on the fly but will also store commons ones. """ index_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "minimap2") if os.path.exists(index_dir) and os.path.isdir(index_dir): return index_dir else: return os.path.dirname(ref_file)
[ "def", "remap_index_fn", "(", "ref_file", ")", ":", "index_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "ref_file", ")", ",", "os", ".", "pardir", ",", "\"minimap2\"", ")", "if", "os", ".", "path", ".", "e...
minimap2 can build indexes on the fly but will also store commons ones.
[ "minimap2", "can", "build", "indexes", "on", "the", "fly", "but", "will", "also", "store", "commons", "ones", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/minimap2.py#L44-L51
train
217,958
bcbio/bcbio-nextgen
scripts/bcbio_prepare_samples.py
create_new_csv
def create_new_csv(samples, args): """create csv file that can be use with bcbio -w template""" out_fn = os.path.splitext(args.csv)[0] + "-merged.csv" logger.info("Preparing new csv: %s" % out_fn) with file_transaction(out_fn) as tx_out: with open(tx_out, 'w') as handle: handle.write(_header(args.csv)) for s in samples: sample_name = s['name'] if isinstance(s['out_file'], list) else os.path.basename(s['out_file']) handle.write("%s,%s,%s\n" % (sample_name, s['name'], ",".join(s['anno'])))
python
def create_new_csv(samples, args): """create csv file that can be use with bcbio -w template""" out_fn = os.path.splitext(args.csv)[0] + "-merged.csv" logger.info("Preparing new csv: %s" % out_fn) with file_transaction(out_fn) as tx_out: with open(tx_out, 'w') as handle: handle.write(_header(args.csv)) for s in samples: sample_name = s['name'] if isinstance(s['out_file'], list) else os.path.basename(s['out_file']) handle.write("%s,%s,%s\n" % (sample_name, s['name'], ",".join(s['anno'])))
[ "def", "create_new_csv", "(", "samples", ",", "args", ")", ":", "out_fn", "=", "os", ".", "path", ".", "splitext", "(", "args", ".", "csv", ")", "[", "0", "]", "+", "\"-merged.csv\"", "logger", ".", "info", "(", "\"Preparing new csv: %s\"", "%", "out_fn"...
create csv file that can be use with bcbio -w template
[ "create", "csv", "file", "that", "can", "be", "use", "with", "bcbio", "-", "w", "template" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_prepare_samples.py#L23-L32
train
217,959
bcbio/bcbio-nextgen
scripts/bcbio_prepare_samples.py
_get_samples_to_process
def _get_samples_to_process(fn, out_dir, config, force_single, separators): """parse csv file with one line per file. It will merge all files that have the same description name""" out_dir = os.path.abspath(out_dir) samples = defaultdict(list) with open(fn) as handle: for l in handle: if l.find("description") > 0: logger.info("Skipping header.") continue cols = l.strip().split(",") if len(cols) > 0: if len(cols) < 2: raise ValueError("Line needs 2 values: file and name.") if utils.file_exists(cols[0]) or is_gsm(cols[0]) or is_srr(cols[0]): if cols[0].find(" ") > -1: new_name = os.path.abspath(cols[0].replace(" ", "_")) logger.warning("Space finds in %s. Linked to %s." % (cols[0], new_name)) logger.warning("Please, avoid names with spaces in the future.") utils.symlink_plus(os.path.abspath(cols[0]), new_name) cols[0] = new_name samples[cols[1]].append(cols) else: logger.info("skipping %s, File doesn't exist." % cols[0]) for sample, items in samples.items(): if is_fastq(items[0][0], True): fn = "fq_merge" ext = ".fastq.gz" elif is_bam(items[0][0]): fn = "bam_merge" ext = ".bam" elif is_gsm(items[0][0]): fn = "query_gsm" ext = ".fastq.gz" elif is_srr(items[0][0]): fn = "query_srr" ext = ".fastq.gz" files = [os.path.abspath(fn_file[0]) if utils.file_exists(fn_file[0]) else fn_file[0] for fn_file in items] samples[sample] = [{'files': _check_paired(files, force_single, separators), 'out_file': os.path.join(out_dir, sample + ext), 'fn': fn, 'anno': items[0][2:], 'config': config, 'name': sample, 'out_dir': out_dir}] return [samples[sample] for sample in samples]
python
def _get_samples_to_process(fn, out_dir, config, force_single, separators): """parse csv file with one line per file. It will merge all files that have the same description name""" out_dir = os.path.abspath(out_dir) samples = defaultdict(list) with open(fn) as handle: for l in handle: if l.find("description") > 0: logger.info("Skipping header.") continue cols = l.strip().split(",") if len(cols) > 0: if len(cols) < 2: raise ValueError("Line needs 2 values: file and name.") if utils.file_exists(cols[0]) or is_gsm(cols[0]) or is_srr(cols[0]): if cols[0].find(" ") > -1: new_name = os.path.abspath(cols[0].replace(" ", "_")) logger.warning("Space finds in %s. Linked to %s." % (cols[0], new_name)) logger.warning("Please, avoid names with spaces in the future.") utils.symlink_plus(os.path.abspath(cols[0]), new_name) cols[0] = new_name samples[cols[1]].append(cols) else: logger.info("skipping %s, File doesn't exist." % cols[0]) for sample, items in samples.items(): if is_fastq(items[0][0], True): fn = "fq_merge" ext = ".fastq.gz" elif is_bam(items[0][0]): fn = "bam_merge" ext = ".bam" elif is_gsm(items[0][0]): fn = "query_gsm" ext = ".fastq.gz" elif is_srr(items[0][0]): fn = "query_srr" ext = ".fastq.gz" files = [os.path.abspath(fn_file[0]) if utils.file_exists(fn_file[0]) else fn_file[0] for fn_file in items] samples[sample] = [{'files': _check_paired(files, force_single, separators), 'out_file': os.path.join(out_dir, sample + ext), 'fn': fn, 'anno': items[0][2:], 'config': config, 'name': sample, 'out_dir': out_dir}] return [samples[sample] for sample in samples]
[ "def", "_get_samples_to_process", "(", "fn", ",", "out_dir", ",", "config", ",", "force_single", ",", "separators", ")", ":", "out_dir", "=", "os", ".", "path", ".", "abspath", "(", "out_dir", ")", "samples", "=", "defaultdict", "(", "list", ")", "with", ...
parse csv file with one line per file. It will merge all files that have the same description name
[ "parse", "csv", "file", "with", "one", "line", "per", "file", ".", "It", "will", "merge", "all", "files", "that", "have", "the", "same", "description", "name" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_prepare_samples.py#L41-L83
train
217,960
bcbio/bcbio-nextgen
scripts/bcbio_prepare_samples.py
_check_stems
def _check_stems(files): """check if stem names are the same and use full path then""" used = set() for fn in files: if os.path.basename(fn) in used: logger.warning("%s stem is multiple times in your file list, " "so we don't know " "how to assign it to the sample data in the CSV. " "We are gonna use full path to make a difference, " "that means paired files should be in the same folder. " "If this is a problem, you should rename the files you want " "to merge. Sorry, no possible magic here." % os.path.basename(fn) ) return True used.add(os.path.basename(fn)) return False
python
def _check_stems(files): """check if stem names are the same and use full path then""" used = set() for fn in files: if os.path.basename(fn) in used: logger.warning("%s stem is multiple times in your file list, " "so we don't know " "how to assign it to the sample data in the CSV. " "We are gonna use full path to make a difference, " "that means paired files should be in the same folder. " "If this is a problem, you should rename the files you want " "to merge. Sorry, no possible magic here." % os.path.basename(fn) ) return True used.add(os.path.basename(fn)) return False
[ "def", "_check_stems", "(", "files", ")", ":", "used", "=", "set", "(", ")", "for", "fn", "in", "files", ":", "if", "os", ".", "path", ".", "basename", "(", "fn", ")", "in", "used", ":", "logger", ".", "warning", "(", "\"%s stem is multiple times in yo...
check if stem names are the same and use full path then
[ "check", "if", "stem", "names", "are", "the", "same", "and", "use", "full", "path", "then" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_prepare_samples.py#L86-L101
train
217,961
bcbio/bcbio-nextgen
scripts/bcbio_prepare_samples.py
get_cluster_view
def get_cluster_view(p): """get ipython running""" from cluster_helper import cluster as ipc return ipc.cluster_view(p['scheduler'], p['queue'], p['num_jobs'], p['cores_per_job'], start_wait=p['timeout'], extra_params={"resources": p['resources'], "mem": p['mem'], "tag": p['tag'], "run_local": False})
python
def get_cluster_view(p): """get ipython running""" from cluster_helper import cluster as ipc return ipc.cluster_view(p['scheduler'], p['queue'], p['num_jobs'], p['cores_per_job'], start_wait=p['timeout'], extra_params={"resources": p['resources'], "mem": p['mem'], "tag": p['tag'], "run_local": False})
[ "def", "get_cluster_view", "(", "p", ")", ":", "from", "cluster_helper", "import", "cluster", "as", "ipc", "return", "ipc", ".", "cluster_view", "(", "p", "[", "'scheduler'", "]", ",", "p", "[", "'queue'", "]", ",", "p", "[", "'num_jobs'", "]", ",", "p...
get ipython running
[ "get", "ipython", "running" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_prepare_samples.py#L114-L117
train
217,962
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
from_sample
def from_sample(sample): """Upload results of processing from an analysis pipeline sample. """ upload_config = sample.get("upload") if upload_config: approach = _approaches[upload_config.get("method", "filesystem")] for finfo in _get_files(sample): approach.update_file(finfo, sample, upload_config) return [[sample]]
python
def from_sample(sample): """Upload results of processing from an analysis pipeline sample. """ upload_config = sample.get("upload") if upload_config: approach = _approaches[upload_config.get("method", "filesystem")] for finfo in _get_files(sample): approach.update_file(finfo, sample, upload_config) return [[sample]]
[ "def", "from_sample", "(", "sample", ")", ":", "upload_config", "=", "sample", ".", "get", "(", "\"upload\"", ")", "if", "upload_config", ":", "approach", "=", "_approaches", "[", "upload_config", ".", "get", "(", "\"method\"", ",", "\"filesystem\"", ")", "]...
Upload results of processing from an analysis pipeline sample.
[ "Upload", "results", "of", "processing", "from", "an", "analysis", "pipeline", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L29-L37
train
217,963
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_get_files
def _get_files(sample): """Retrieve files for the sample, dispatching by analysis type. Each file is a dictionary containing the path plus associated metadata about the file and pipeline versions. """ analysis = sample.get("analysis") if analysis.lower() in ["variant", "snp calling", "variant2", "standard"]: return _get_files_variantcall(sample) elif analysis.lower() in ["rna-seq", "fastrna-seq"]: return _get_files_rnaseq(sample) elif analysis.lower() in ["smallrna-seq"]: return _get_files_srnaseq(sample) elif analysis.lower() in ["chip-seq"]: return _get_files_chipseq(sample) elif analysis.lower() in ["scrna-seq"]: return _get_files_scrnaseq(sample) else: return []
python
def _get_files(sample): """Retrieve files for the sample, dispatching by analysis type. Each file is a dictionary containing the path plus associated metadata about the file and pipeline versions. """ analysis = sample.get("analysis") if analysis.lower() in ["variant", "snp calling", "variant2", "standard"]: return _get_files_variantcall(sample) elif analysis.lower() in ["rna-seq", "fastrna-seq"]: return _get_files_rnaseq(sample) elif analysis.lower() in ["smallrna-seq"]: return _get_files_srnaseq(sample) elif analysis.lower() in ["chip-seq"]: return _get_files_chipseq(sample) elif analysis.lower() in ["scrna-seq"]: return _get_files_scrnaseq(sample) else: return []
[ "def", "_get_files", "(", "sample", ")", ":", "analysis", "=", "sample", ".", "get", "(", "\"analysis\"", ")", "if", "analysis", ".", "lower", "(", ")", "in", "[", "\"variant\"", ",", "\"snp calling\"", ",", "\"variant2\"", ",", "\"standard\"", "]", ":", ...
Retrieve files for the sample, dispatching by analysis type. Each file is a dictionary containing the path plus associated metadata about the file and pipeline versions.
[ "Retrieve", "files", "for", "the", "sample", "dispatching", "by", "analysis", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L56-L74
train
217,964
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_add_meta
def _add_meta(xs, sample=None, config=None): """Add top level information about the sample or flowcell to output. Sorts outputs into sample names (sample input) and project (config input). """ out = [] for x in xs: if not isinstance(x["path"], six.string_types) or not os.path.exists(x["path"]): raise ValueError("Unexpected path for upload: %s" % x) x["mtime"] = shared.get_file_timestamp(x["path"]) if sample: sample_name = dd.get_sample_name(sample) if "sample" not in x: x["sample"] = sample_name elif x["sample"] != sample_name: x["run"] = sample_name if config: fc_name = config.get("fc_name") or "project" fc_date = config.get("fc_date") or datetime.datetime.now().strftime("%Y-%m-%d") x["run"] = "%s_%s" % (fc_date, fc_name) out.append(x) return out
python
def _add_meta(xs, sample=None, config=None): """Add top level information about the sample or flowcell to output. Sorts outputs into sample names (sample input) and project (config input). """ out = [] for x in xs: if not isinstance(x["path"], six.string_types) or not os.path.exists(x["path"]): raise ValueError("Unexpected path for upload: %s" % x) x["mtime"] = shared.get_file_timestamp(x["path"]) if sample: sample_name = dd.get_sample_name(sample) if "sample" not in x: x["sample"] = sample_name elif x["sample"] != sample_name: x["run"] = sample_name if config: fc_name = config.get("fc_name") or "project" fc_date = config.get("fc_date") or datetime.datetime.now().strftime("%Y-%m-%d") x["run"] = "%s_%s" % (fc_date, fc_name) out.append(x) return out
[ "def", "_add_meta", "(", "xs", ",", "sample", "=", "None", ",", "config", "=", "None", ")", ":", "out", "=", "[", "]", "for", "x", "in", "xs", ":", "if", "not", "isinstance", "(", "x", "[", "\"path\"", "]", ",", "six", ".", "string_types", ")", ...
Add top level information about the sample or flowcell to output. Sorts outputs into sample names (sample input) and project (config input).
[ "Add", "top", "level", "information", "about", "the", "sample", "or", "flowcell", "to", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L123-L144
train
217,965
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_get_files_variantcall
def _get_files_variantcall(sample): """Return output files for the variant calling pipeline. """ out = [] algorithm = sample["config"]["algorithm"] out = _maybe_add_summary(algorithm, sample, out) out = _maybe_add_alignment(algorithm, sample, out) out = _maybe_add_callable(sample, out) out = _maybe_add_disambiguate(algorithm, sample, out) out = _maybe_add_variant_file(algorithm, sample, out) out = _maybe_add_sv(algorithm, sample, out) out = _maybe_add_hla(algorithm, sample, out) out = _maybe_add_heterogeneity(algorithm, sample, out) out = _maybe_add_validate(algorithm, sample, out) return _add_meta(out, sample)
python
def _get_files_variantcall(sample): """Return output files for the variant calling pipeline. """ out = [] algorithm = sample["config"]["algorithm"] out = _maybe_add_summary(algorithm, sample, out) out = _maybe_add_alignment(algorithm, sample, out) out = _maybe_add_callable(sample, out) out = _maybe_add_disambiguate(algorithm, sample, out) out = _maybe_add_variant_file(algorithm, sample, out) out = _maybe_add_sv(algorithm, sample, out) out = _maybe_add_hla(algorithm, sample, out) out = _maybe_add_heterogeneity(algorithm, sample, out) out = _maybe_add_validate(algorithm, sample, out) return _add_meta(out, sample)
[ "def", "_get_files_variantcall", "(", "sample", ")", ":", "out", "=", "[", "]", "algorithm", "=", "sample", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", "out", "=", "_maybe_add_summary", "(", "algorithm", ",", "sample", ",", "out", ")", "out", "=", ...
Return output files for the variant calling pipeline.
[ "Return", "output", "files", "for", "the", "variant", "calling", "pipeline", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L146-L161
train
217,966
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_maybe_add_callable
def _maybe_add_callable(data, out): """Add callable and depth regions to output folder. """ callable_bed = dd.get_sample_callable(data) if callable_bed: out.append({"path": callable_bed, "type": "bed", "ext": "callable"}) perbase_bed = tz.get_in(["depth", "variant_regions", "per_base"], data) if perbase_bed: out.append({"path": perbase_bed, "type": "bed.gz", "ext": "depth-per-base"}) return out
python
def _maybe_add_callable(data, out): """Add callable and depth regions to output folder. """ callable_bed = dd.get_sample_callable(data) if callable_bed: out.append({"path": callable_bed, "type": "bed", "ext": "callable"}) perbase_bed = tz.get_in(["depth", "variant_regions", "per_base"], data) if perbase_bed: out.append({"path": perbase_bed, "type": "bed.gz", "ext": "depth-per-base"}) return out
[ "def", "_maybe_add_callable", "(", "data", ",", "out", ")", ":", "callable_bed", "=", "dd", ".", "get_sample_callable", "(", "data", ")", "if", "callable_bed", ":", "out", ".", "append", "(", "{", "\"path\"", ":", "callable_bed", ",", "\"type\"", ":", "\"b...
Add callable and depth regions to output folder.
[ "Add", "callable", "and", "depth", "regions", "to", "output", "folder", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L183-L192
train
217,967
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_get_batch_name
def _get_batch_name(sample): """Retrieve batch name for use in SV calling outputs. Handles multiple batches split via SV calling. """ batch = dd.get_batch(sample) or dd.get_sample_name(sample) if isinstance(batch, (list, tuple)) and len(batch) > 1: batch = dd.get_sample_name(sample) return batch
python
def _get_batch_name(sample): """Retrieve batch name for use in SV calling outputs. Handles multiple batches split via SV calling. """ batch = dd.get_batch(sample) or dd.get_sample_name(sample) if isinstance(batch, (list, tuple)) and len(batch) > 1: batch = dd.get_sample_name(sample) return batch
[ "def", "_get_batch_name", "(", "sample", ")", ":", "batch", "=", "dd", ".", "get_batch", "(", "sample", ")", "or", "dd", ".", "get_sample_name", "(", "sample", ")", "if", "isinstance", "(", "batch", ",", "(", "list", ",", "tuple", ")", ")", "and", "l...
Retrieve batch name for use in SV calling outputs. Handles multiple batches split via SV calling.
[ "Retrieve", "batch", "name", "for", "use", "in", "SV", "calling", "outputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L236-L244
train
217,968
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_sample_variant_file_in_population
def _sample_variant_file_in_population(x): """Check if a sample file is the same as the population file. This is true for batches where we don't extract into samples and do not run decomposition for gemini. '""" if "population" in x: a = _get_project_vcf(x) b = _get_variant_file(x, ("vrn_file",)) decomposed = tz.get_in(("population", "decomposed"), x) if (a and b and not decomposed and len(a) > 0 and len(b) > 0 and vcfutils.get_samples(a[0]["path"]) == vcfutils.get_samples(b[0]["path"])): return True return False
python
def _sample_variant_file_in_population(x): """Check if a sample file is the same as the population file. This is true for batches where we don't extract into samples and do not run decomposition for gemini. '""" if "population" in x: a = _get_project_vcf(x) b = _get_variant_file(x, ("vrn_file",)) decomposed = tz.get_in(("population", "decomposed"), x) if (a and b and not decomposed and len(a) > 0 and len(b) > 0 and vcfutils.get_samples(a[0]["path"]) == vcfutils.get_samples(b[0]["path"])): return True return False
[ "def", "_sample_variant_file_in_population", "(", "x", ")", ":", "if", "\"population\"", "in", "x", ":", "a", "=", "_get_project_vcf", "(", "x", ")", "b", "=", "_get_variant_file", "(", "x", ",", "(", "\"vrn_file\"", ",", ")", ")", "decomposed", "=", "tz",...
Check if a sample file is the same as the population file. This is true for batches where we don't extract into samples and do not run decomposition for gemini.
[ "Check", "if", "a", "sample", "file", "is", "the", "same", "as", "the", "population", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L316-L329
train
217,969
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_get_variant_file
def _get_variant_file(x, key, suffix="", sample=None, ignore_do_upload=False): """Retrieve VCF file with the given key if it exists, handling bgzipped. """ out = [] fname = utils.get_in(x, key) upload_key = list(key) upload_key[-1] = "do_upload" do_upload = tz.get_in(tuple(upload_key), x, True) if fname and (ignore_do_upload or do_upload): if fname.endswith(".vcf.gz"): out.append({"path": fname, "type": "vcf.gz", "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) if utils.file_exists(fname + ".tbi"): out.append({"path": fname + ".tbi", "type": "vcf.gz.tbi", "index": True, "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) elif fname.endswith((".vcf", ".bed", ".bedpe", ".bedgraph", ".cnr", ".cns", ".cnn", ".txt", ".tsv")): ftype = utils.splitext_plus(fname)[-1][1:] if ftype == "txt": extended_ftype = fname.split("-")[-1] if "/" not in extended_ftype: ftype = extended_ftype out.append({"path": fname, "type": ftype, "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) if sample: out_sample = [] for x in out: x["sample"] = sample out_sample.append(x) return out_sample else: return out
python
def _get_variant_file(x, key, suffix="", sample=None, ignore_do_upload=False): """Retrieve VCF file with the given key if it exists, handling bgzipped. """ out = [] fname = utils.get_in(x, key) upload_key = list(key) upload_key[-1] = "do_upload" do_upload = tz.get_in(tuple(upload_key), x, True) if fname and (ignore_do_upload or do_upload): if fname.endswith(".vcf.gz"): out.append({"path": fname, "type": "vcf.gz", "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) if utils.file_exists(fname + ".tbi"): out.append({"path": fname + ".tbi", "type": "vcf.gz.tbi", "index": True, "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) elif fname.endswith((".vcf", ".bed", ".bedpe", ".bedgraph", ".cnr", ".cns", ".cnn", ".txt", ".tsv")): ftype = utils.splitext_plus(fname)[-1][1:] if ftype == "txt": extended_ftype = fname.split("-")[-1] if "/" not in extended_ftype: ftype = extended_ftype out.append({"path": fname, "type": ftype, "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) if sample: out_sample = [] for x in out: x["sample"] = sample out_sample.append(x) return out_sample else: return out
[ "def", "_get_variant_file", "(", "x", ",", "key", ",", "suffix", "=", "\"\"", ",", "sample", "=", "None", ",", "ignore_do_upload", "=", "False", ")", ":", "out", "=", "[", "]", "fname", "=", "utils", ".", "get_in", "(", "x", ",", "key", ")", "uploa...
Retrieve VCF file with the given key if it exists, handling bgzipped.
[ "Retrieve", "VCF", "file", "with", "the", "given", "key", "if", "it", "exists", "handling", "bgzipped", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L331-L368
train
217,970
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_add_batch
def _add_batch(x, sample): """Potentially add batch name to an upload file. """ added = False for batch in sorted(dd.get_batches(sample) or [], key=len, reverse=True): if batch and os.path.basename(x["path"]).startswith(("%s-" % batch, "%s.vcf" % batch)): x["batch"] = batch added = True break if not added: x["batch"] = dd.get_sample_name(sample) return x
python
def _add_batch(x, sample): """Potentially add batch name to an upload file. """ added = False for batch in sorted(dd.get_batches(sample) or [], key=len, reverse=True): if batch and os.path.basename(x["path"]).startswith(("%s-" % batch, "%s.vcf" % batch)): x["batch"] = batch added = True break if not added: x["batch"] = dd.get_sample_name(sample) return x
[ "def", "_add_batch", "(", "x", ",", "sample", ")", ":", "added", "=", "False", "for", "batch", "in", "sorted", "(", "dd", ".", "get_batches", "(", "sample", ")", "or", "[", "]", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", ":", "if",...
Potentially add batch name to an upload file.
[ "Potentially", "add", "batch", "name", "to", "an", "upload", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L638-L649
train
217,971
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_get_project_vcf
def _get_project_vcf(x, suffix=""): """Get our project VCF, either from the population or the variant batch file. """ vcfs = _get_variant_file(x, ("population", "vcf"), suffix=suffix) if not vcfs: vcfs = _get_variant_file(x, ("vrn_file_batch", ), suffix=suffix, ignore_do_upload=True) if not vcfs and x.get("variantcaller") == "ensemble": vcfs = _get_variant_file(x, ("vrn_file", ), suffix=suffix) return vcfs
python
def _get_project_vcf(x, suffix=""): """Get our project VCF, either from the population or the variant batch file. """ vcfs = _get_variant_file(x, ("population", "vcf"), suffix=suffix) if not vcfs: vcfs = _get_variant_file(x, ("vrn_file_batch", ), suffix=suffix, ignore_do_upload=True) if not vcfs and x.get("variantcaller") == "ensemble": vcfs = _get_variant_file(x, ("vrn_file", ), suffix=suffix) return vcfs
[ "def", "_get_project_vcf", "(", "x", ",", "suffix", "=", "\"\"", ")", ":", "vcfs", "=", "_get_variant_file", "(", "x", ",", "(", "\"population\"", ",", "\"vcf\"", ")", ",", "suffix", "=", "suffix", ")", "if", "not", "vcfs", ":", "vcfs", "=", "_get_vari...
Get our project VCF, either from the population or the variant batch file.
[ "Get", "our", "project", "VCF", "either", "from", "the", "population", "or", "the", "variant", "batch", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L651-L659
train
217,972
bcbio/bcbio-nextgen
scripts/utils/resort_bam_karyotype.py
_id_remapper
def _id_remapper(orig, new): """Provide a dictionary remapping original read indexes to new indexes. When re-ordering the header, the individual read identifiers need to be updated as well. """ new_chrom_to_index = {} for i_n, (chr_n, _) in enumerate(new): new_chrom_to_index[chr_n] = i_n remap_indexes = {} for i_o, (chr_o, _) in enumerate(orig): if chr_o in new_chrom_to_index.keys(): remap_indexes[i_o] = new_chrom_to_index[chr_o] remap_indexes[None] = None return remap_indexes
python
def _id_remapper(orig, new): """Provide a dictionary remapping original read indexes to new indexes. When re-ordering the header, the individual read identifiers need to be updated as well. """ new_chrom_to_index = {} for i_n, (chr_n, _) in enumerate(new): new_chrom_to_index[chr_n] = i_n remap_indexes = {} for i_o, (chr_o, _) in enumerate(orig): if chr_o in new_chrom_to_index.keys(): remap_indexes[i_o] = new_chrom_to_index[chr_o] remap_indexes[None] = None return remap_indexes
[ "def", "_id_remapper", "(", "orig", ",", "new", ")", ":", "new_chrom_to_index", "=", "{", "}", "for", "i_n", ",", "(", "chr_n", ",", "_", ")", "in", "enumerate", "(", "new", ")", ":", "new_chrom_to_index", "[", "chr_n", "]", "=", "i_n", "remap_indexes"...
Provide a dictionary remapping original read indexes to new indexes. When re-ordering the header, the individual read identifiers need to be updated as well.
[ "Provide", "a", "dictionary", "remapping", "original", "read", "indexes", "to", "new", "indexes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/resort_bam_karyotype.py#L67-L81
train
217,973
bcbio/bcbio-nextgen
scripts/bcbio_setup_genome.py
_clean_rec_name
def _clean_rec_name(rec): """Clean illegal characters in input fasta file which cause problems downstream. """ out_id = [] for char in list(rec.id): if char in ALLOWED_CONTIG_NAME_CHARS: out_id.append(char) else: out_id.append("_") rec.id = "".join(out_id) rec.description = "" return rec
python
def _clean_rec_name(rec): """Clean illegal characters in input fasta file which cause problems downstream. """ out_id = [] for char in list(rec.id): if char in ALLOWED_CONTIG_NAME_CHARS: out_id.append(char) else: out_id.append("_") rec.id = "".join(out_id) rec.description = "" return rec
[ "def", "_clean_rec_name", "(", "rec", ")", ":", "out_id", "=", "[", "]", "for", "char", "in", "list", "(", "rec", ".", "id", ")", ":", "if", "char", "in", "ALLOWED_CONTIG_NAME_CHARS", ":", "out_id", ".", "append", "(", "char", ")", "else", ":", "out_...
Clean illegal characters in input fasta file which cause problems downstream.
[ "Clean", "illegal", "characters", "in", "input", "fasta", "file", "which", "cause", "problems", "downstream", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_setup_genome.py#L151-L162
train
217,974
bcbio/bcbio-nextgen
bcbio/qc/kraken.py
run
def run(_, data, out_dir): """Run kraken, generating report in specified directory and parsing metrics. Using only first paired reads. """ # logger.info("Number of aligned reads < than 0.60 in %s: %s" % (dd.get_sample_name(data), ratio)) logger.info("Running kraken to determine contaminant: %s" % dd.get_sample_name(data)) # ratio = bam.get_aligned_reads(bam_file, data) out = out_stats = None db = tz.get_in(["config", "algorithm", "kraken"], data) if db and isinstance(db, (list, tuple)): db = db[0] kraken_cmd = config_utils.get_program("kraken", data["config"]) if db == "minikraken": db = os.path.join(install._get_data_dir(), "genomes", "kraken", "minikraken") if not os.path.exists(db): logger.info("kraken: no database found %s, skipping" % db) return {"kraken_report": "null"} if not os.path.exists(os.path.join(out_dir, "kraken_out")): work_dir = os.path.dirname(out_dir) utils.safe_makedir(work_dir) num_cores = data["config"]["algorithm"].get("num_cores", 1) fn_file = data["files_orig"][0] if dd.get_save_diskspace(data) else data["files"][0] if fn_file.endswith("bam"): logger.info("kraken: need fastq files as input") return {"kraken_report": "null"} with tx_tmpdir(data) as tx_tmp_dir: with utils.chdir(tx_tmp_dir): out = os.path.join(tx_tmp_dir, "kraken_out") out_stats = os.path.join(tx_tmp_dir, "kraken_stats") cat = "zcat" if fn_file.endswith(".gz") else "cat" cl = ("{cat} {fn_file} | {kraken_cmd} --db {db} --quick " "--preload --min-hits 2 " "--threads {num_cores} " "--output {out} --fastq-input /dev/stdin 2> {out_stats}").format(**locals()) do.run(cl, "kraken: %s" % dd.get_sample_name(data)) if os.path.exists(out_dir): shutil.rmtree(out_dir) shutil.move(tx_tmp_dir, out_dir) metrics = _parse_kraken_output(out_dir, db, data) return metrics
python
def run(_, data, out_dir): """Run kraken, generating report in specified directory and parsing metrics. Using only first paired reads. """ # logger.info("Number of aligned reads < than 0.60 in %s: %s" % (dd.get_sample_name(data), ratio)) logger.info("Running kraken to determine contaminant: %s" % dd.get_sample_name(data)) # ratio = bam.get_aligned_reads(bam_file, data) out = out_stats = None db = tz.get_in(["config", "algorithm", "kraken"], data) if db and isinstance(db, (list, tuple)): db = db[0] kraken_cmd = config_utils.get_program("kraken", data["config"]) if db == "minikraken": db = os.path.join(install._get_data_dir(), "genomes", "kraken", "minikraken") if not os.path.exists(db): logger.info("kraken: no database found %s, skipping" % db) return {"kraken_report": "null"} if not os.path.exists(os.path.join(out_dir, "kraken_out")): work_dir = os.path.dirname(out_dir) utils.safe_makedir(work_dir) num_cores = data["config"]["algorithm"].get("num_cores", 1) fn_file = data["files_orig"][0] if dd.get_save_diskspace(data) else data["files"][0] if fn_file.endswith("bam"): logger.info("kraken: need fastq files as input") return {"kraken_report": "null"} with tx_tmpdir(data) as tx_tmp_dir: with utils.chdir(tx_tmp_dir): out = os.path.join(tx_tmp_dir, "kraken_out") out_stats = os.path.join(tx_tmp_dir, "kraken_stats") cat = "zcat" if fn_file.endswith(".gz") else "cat" cl = ("{cat} {fn_file} | {kraken_cmd} --db {db} --quick " "--preload --min-hits 2 " "--threads {num_cores} " "--output {out} --fastq-input /dev/stdin 2> {out_stats}").format(**locals()) do.run(cl, "kraken: %s" % dd.get_sample_name(data)) if os.path.exists(out_dir): shutil.rmtree(out_dir) shutil.move(tx_tmp_dir, out_dir) metrics = _parse_kraken_output(out_dir, db, data) return metrics
[ "def", "run", "(", "_", ",", "data", ",", "out_dir", ")", ":", "# logger.info(\"Number of aligned reads < than 0.60 in %s: %s\" % (dd.get_sample_name(data), ratio))", "logger", ".", "info", "(", "\"Running kraken to determine contaminant: %s\"", "%", "dd", ".", "get_sample_name...
Run kraken, generating report in specified directory and parsing metrics. Using only first paired reads.
[ "Run", "kraken", "generating", "report", "in", "specified", "directory", "and", "parsing", "metrics", ".", "Using", "only", "first", "paired", "reads", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/kraken.py#L16-L57
train
217,975
bcbio/bcbio-nextgen
bcbio/qc/kraken.py
_parse_kraken_output
def _parse_kraken_output(out_dir, db, data): """Parse kraken stat info comming from stderr, generating report with kraken-report """ in_file = os.path.join(out_dir, "kraken_out") stat_file = os.path.join(out_dir, "kraken_stats") out_file = os.path.join(out_dir, "kraken_summary") kraken_cmd = config_utils.get_program("kraken-report", data["config"]) classify = unclassify = None with open(stat_file, 'r') as handle: for line in handle: if line.find(" classified") > -1: classify = line[line.find("(") + 1:line.find(")")] if line.find(" unclassified") > -1: unclassify = line[line.find("(") + 1:line.find(")")] if os.path.getsize(in_file) > 0 and not os.path.exists(out_file): with file_transaction(data, out_file) as tx_out_file: cl = ("{kraken_cmd} --db {db} {in_file} > {tx_out_file}").format(**locals()) do.run(cl, "kraken report: %s" % dd.get_sample_name(data)) kraken = {"kraken_clas": classify, "kraken_unclas": unclassify} kraken_sum = _summarize_kraken(out_file) kraken.update(kraken_sum) return kraken
python
def _parse_kraken_output(out_dir, db, data): """Parse kraken stat info comming from stderr, generating report with kraken-report """ in_file = os.path.join(out_dir, "kraken_out") stat_file = os.path.join(out_dir, "kraken_stats") out_file = os.path.join(out_dir, "kraken_summary") kraken_cmd = config_utils.get_program("kraken-report", data["config"]) classify = unclassify = None with open(stat_file, 'r') as handle: for line in handle: if line.find(" classified") > -1: classify = line[line.find("(") + 1:line.find(")")] if line.find(" unclassified") > -1: unclassify = line[line.find("(") + 1:line.find(")")] if os.path.getsize(in_file) > 0 and not os.path.exists(out_file): with file_transaction(data, out_file) as tx_out_file: cl = ("{kraken_cmd} --db {db} {in_file} > {tx_out_file}").format(**locals()) do.run(cl, "kraken report: %s" % dd.get_sample_name(data)) kraken = {"kraken_clas": classify, "kraken_unclas": unclassify} kraken_sum = _summarize_kraken(out_file) kraken.update(kraken_sum) return kraken
[ "def", "_parse_kraken_output", "(", "out_dir", ",", "db", ",", "data", ")", ":", "in_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"kraken_out\"", ")", "stat_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"krake...
Parse kraken stat info comming from stderr, generating report with kraken-report
[ "Parse", "kraken", "stat", "info", "comming", "from", "stderr", "generating", "report", "with", "kraken", "-", "report" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/kraken.py#L59-L81
train
217,976
bcbio/bcbio-nextgen
bcbio/qc/kraken.py
_summarize_kraken
def _summarize_kraken(fn): """get the value at species level""" kraken = {} list_sp, list_value = [], [] with open(fn) as handle: for line in handle: cols = line.strip().split("\t") sp = cols[5].strip() if len(sp.split(" ")) > 1 and not sp.startswith("cellular"): list_sp.append(sp) list_value.append(cols[0]) kraken = {"kraken_sp": list_sp, "kraken_value": list_value} return kraken
python
def _summarize_kraken(fn): """get the value at species level""" kraken = {} list_sp, list_value = [], [] with open(fn) as handle: for line in handle: cols = line.strip().split("\t") sp = cols[5].strip() if len(sp.split(" ")) > 1 and not sp.startswith("cellular"): list_sp.append(sp) list_value.append(cols[0]) kraken = {"kraken_sp": list_sp, "kraken_value": list_value} return kraken
[ "def", "_summarize_kraken", "(", "fn", ")", ":", "kraken", "=", "{", "}", "list_sp", ",", "list_value", "=", "[", "]", ",", "[", "]", "with", "open", "(", "fn", ")", "as", "handle", ":", "for", "line", "in", "handle", ":", "cols", "=", "line", "....
get the value at species level
[ "get", "the", "value", "at", "species", "level" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/kraken.py#L83-L95
train
217,977
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_get_main_and_json
def _get_main_and_json(directory): """Retrieve the main CWL and sample JSON files from a bcbio generated directory. """ directory = os.path.normpath(os.path.abspath(directory)) checker_main = os.path.normpath(os.path.join(directory, os.path.pardir, "checker-workflow-wrapping-tool.cwl")) if checker_main and os.path.exists(checker_main): main_cwl = [checker_main] else: main_cwl = glob.glob(os.path.join(directory, "main-*.cwl")) main_cwl = [x for x in main_cwl if not x.find("-pack") >= 0] assert len(main_cwl) == 1, "Did not find main CWL in %s" % directory main_json = glob.glob(os.path.join(directory, "main-*-samples.json")) assert len(main_json) == 1, "Did not find main json in %s" % directory project_name = os.path.basename(directory).split("-workflow")[0] return main_cwl[0], main_json[0], project_name
python
def _get_main_and_json(directory): """Retrieve the main CWL and sample JSON files from a bcbio generated directory. """ directory = os.path.normpath(os.path.abspath(directory)) checker_main = os.path.normpath(os.path.join(directory, os.path.pardir, "checker-workflow-wrapping-tool.cwl")) if checker_main and os.path.exists(checker_main): main_cwl = [checker_main] else: main_cwl = glob.glob(os.path.join(directory, "main-*.cwl")) main_cwl = [x for x in main_cwl if not x.find("-pack") >= 0] assert len(main_cwl) == 1, "Did not find main CWL in %s" % directory main_json = glob.glob(os.path.join(directory, "main-*-samples.json")) assert len(main_json) == 1, "Did not find main json in %s" % directory project_name = os.path.basename(directory).split("-workflow")[0] return main_cwl[0], main_json[0], project_name
[ "def", "_get_main_and_json", "(", "directory", ")", ":", "directory", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "abspath", "(", "directory", ")", ")", "checker_main", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", ...
Retrieve the main CWL and sample JSON files from a bcbio generated directory.
[ "Retrieve", "the", "main", "CWL", "and", "sample", "JSON", "files", "from", "a", "bcbio", "generated", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L18-L32
train
217,978
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_tool
def _run_tool(cmd, use_container=True, work_dir=None, log_file=None): """Run with injection of bcbio path. Place at end for runs without containers to avoid overriding other bcbio installations. """ if isinstance(cmd, (list, tuple)): cmd = " ".join([str(x) for x in cmd]) cmd = utils.local_path_export(at_start=use_container) + cmd if log_file: cmd += " 2>&1 | tee -a %s" % log_file try: print("Running: %s" % cmd) subprocess.check_call(cmd, shell=True) finally: if use_container and work_dir: _chown_workdir(work_dir)
python
def _run_tool(cmd, use_container=True, work_dir=None, log_file=None): """Run with injection of bcbio path. Place at end for runs without containers to avoid overriding other bcbio installations. """ if isinstance(cmd, (list, tuple)): cmd = " ".join([str(x) for x in cmd]) cmd = utils.local_path_export(at_start=use_container) + cmd if log_file: cmd += " 2>&1 | tee -a %s" % log_file try: print("Running: %s" % cmd) subprocess.check_call(cmd, shell=True) finally: if use_container and work_dir: _chown_workdir(work_dir)
[ "def", "_run_tool", "(", "cmd", ",", "use_container", "=", "True", ",", "work_dir", "=", "None", ",", "log_file", "=", "None", ")", ":", "if", "isinstance", "(", "cmd", ",", "(", "list", ",", "tuple", ")", ")", ":", "cmd", "=", "\" \"", ".", "join"...
Run with injection of bcbio path. Place at end for runs without containers to avoid overriding other bcbio installations.
[ "Run", "with", "injection", "of", "bcbio", "path", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L34-L50
train
217,979
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_pack_cwl
def _pack_cwl(unpacked_cwl): """Pack CWL into a single document for submission. """ out_file = "%s-pack%s" % os.path.splitext(unpacked_cwl) cmd = "cwltool --pack {unpacked_cwl} > {out_file}" _run_tool(cmd.format(**locals())) return out_file
python
def _pack_cwl(unpacked_cwl): """Pack CWL into a single document for submission. """ out_file = "%s-pack%s" % os.path.splitext(unpacked_cwl) cmd = "cwltool --pack {unpacked_cwl} > {out_file}" _run_tool(cmd.format(**locals())) return out_file
[ "def", "_pack_cwl", "(", "unpacked_cwl", ")", ":", "out_file", "=", "\"%s-pack%s\"", "%", "os", ".", "path", ".", "splitext", "(", "unpacked_cwl", ")", "cmd", "=", "\"cwltool --pack {unpacked_cwl} > {out_file}\"", "_run_tool", "(", "cmd", ".", "format", "(", "*"...
Pack CWL into a single document for submission.
[ "Pack", "CWL", "into", "a", "single", "document", "for", "submission", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L52-L58
train
217,980
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_remove_bcbiovm_path
def _remove_bcbiovm_path(): """Avoid referencing minimal bcbio_nextgen in bcbio_vm installation. """ cur_path = os.path.dirname(os.path.realpath(sys.executable)) paths = os.environ["PATH"].split(":") if cur_path in paths: paths.remove(cur_path) os.environ["PATH"] = ":".join(paths)
python
def _remove_bcbiovm_path(): """Avoid referencing minimal bcbio_nextgen in bcbio_vm installation. """ cur_path = os.path.dirname(os.path.realpath(sys.executable)) paths = os.environ["PATH"].split(":") if cur_path in paths: paths.remove(cur_path) os.environ["PATH"] = ":".join(paths)
[ "def", "_remove_bcbiovm_path", "(", ")", ":", "cur_path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "sys", ".", "executable", ")", ")", "paths", "=", "os", ".", "environ", "[", "\"PATH\"", "]", ".", "split"...
Avoid referencing minimal bcbio_nextgen in bcbio_vm installation.
[ "Avoid", "referencing", "minimal", "bcbio_nextgen", "in", "bcbio_vm", "installation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L74-L81
train
217,981
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_arvados
def _run_arvados(args): """Run CWL on Arvados. """ assert not args.no_container, "Arvados runs require containers" assert "ARVADOS_API_TOKEN" in os.environ and "ARVADOS_API_HOST" in os.environ, \ "Need to set ARVADOS_API_TOKEN and ARVADOS_API_HOST in environment to run" main_file, json_file, project_name = _get_main_and_json(args.directory) flags = ["--enable-reuse", "--api", "containers", "--submit", "--no-wait"] cmd = ["arvados-cwl-runner"] + flags + args.toolargs + [main_file, json_file] _run_tool(cmd)
python
def _run_arvados(args): """Run CWL on Arvados. """ assert not args.no_container, "Arvados runs require containers" assert "ARVADOS_API_TOKEN" in os.environ and "ARVADOS_API_HOST" in os.environ, \ "Need to set ARVADOS_API_TOKEN and ARVADOS_API_HOST in environment to run" main_file, json_file, project_name = _get_main_and_json(args.directory) flags = ["--enable-reuse", "--api", "containers", "--submit", "--no-wait"] cmd = ["arvados-cwl-runner"] + flags + args.toolargs + [main_file, json_file] _run_tool(cmd)
[ "def", "_run_arvados", "(", "args", ")", ":", "assert", "not", "args", ".", "no_container", ",", "\"Arvados runs require containers\"", "assert", "\"ARVADOS_API_TOKEN\"", "in", "os", ".", "environ", "and", "\"ARVADOS_API_HOST\"", "in", "os", ".", "environ", ",", "...
Run CWL on Arvados.
[ "Run", "CWL", "on", "Arvados", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L99-L108
train
217,982
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_toil
def _run_toil(args): """Run CWL with Toil. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "toil_work")) tmp_dir = utils.safe_makedir(os.path.join(work_dir, "tmpdir")) os.environ["TMPDIR"] = tmp_dir log_file = os.path.join(work_dir, "%s-toil.log" % project_name) jobstore = os.path.join(work_dir, "cwltoil_jobstore") flags = ["--jobStore", jobstore, "--logFile", log_file, "--workDir", tmp_dir, "--linkImports"] if os.path.exists(jobstore): flags += ["--restart"] # caching causes issues for batch systems if "--batchSystem" in args.toolargs: flags += ["--disableCaching"] flags += args.toolargs if args.no_container: _remove_bcbiovm_path() flags += ["--no-container", "--preserve-environment", "PATH", "HOME"] cmd = ["cwltoil"] + flags + ["--", main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir) for tmpdir in (glob.glob(os.path.join(work_dir, "out_tmpdir*")) + glob.glob(os.path.join(work_dir, "tmp*"))): if os.path.isdir(tmpdir): shutil.rmtree(tmpdir)
python
def _run_toil(args): """Run CWL with Toil. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "toil_work")) tmp_dir = utils.safe_makedir(os.path.join(work_dir, "tmpdir")) os.environ["TMPDIR"] = tmp_dir log_file = os.path.join(work_dir, "%s-toil.log" % project_name) jobstore = os.path.join(work_dir, "cwltoil_jobstore") flags = ["--jobStore", jobstore, "--logFile", log_file, "--workDir", tmp_dir, "--linkImports"] if os.path.exists(jobstore): flags += ["--restart"] # caching causes issues for batch systems if "--batchSystem" in args.toolargs: flags += ["--disableCaching"] flags += args.toolargs if args.no_container: _remove_bcbiovm_path() flags += ["--no-container", "--preserve-environment", "PATH", "HOME"] cmd = ["cwltoil"] + flags + ["--", main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir) for tmpdir in (glob.glob(os.path.join(work_dir, "out_tmpdir*")) + glob.glob(os.path.join(work_dir, "tmp*"))): if os.path.isdir(tmpdir): shutil.rmtree(tmpdir)
[ "def", "_run_toil", "(", "args", ")", ":", "main_file", ",", "json_file", ",", "project_name", "=", "_get_main_and_json", "(", "args", ".", "directory", ")", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "os", "...
Run CWL with Toil.
[ "Run", "CWL", "with", "Toil", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L110-L135
train
217,983
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_bunny
def _run_bunny(args): """Run CWL with rabix bunny. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "bunny_work")) flags = ["-b", work_dir] log_file = os.path.join(work_dir, "%s-bunny.log" % project_name) if os.path.exists(work_dir): caches = [os.path.join(work_dir, d) for d in os.listdir(work_dir) if os.path.isdir(os.path.join(work_dir, d))] if caches: flags += ["--cache-dir", max(caches, key=os.path.getmtime)] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container"] cmd = ["rabix"] + flags + [main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file)
python
def _run_bunny(args): """Run CWL with rabix bunny. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "bunny_work")) flags = ["-b", work_dir] log_file = os.path.join(work_dir, "%s-bunny.log" % project_name) if os.path.exists(work_dir): caches = [os.path.join(work_dir, d) for d in os.listdir(work_dir) if os.path.isdir(os.path.join(work_dir, d))] if caches: flags += ["--cache-dir", max(caches, key=os.path.getmtime)] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container"] cmd = ["rabix"] + flags + [main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file)
[ "def", "_run_bunny", "(", "args", ")", ":", "main_file", ",", "json_file", ",", "project_name", "=", "_get_main_and_json", "(", "args", ".", "directory", ")", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "os", ...
Run CWL with rabix bunny.
[ "Run", "CWL", "with", "rabix", "bunny", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L137-L154
train
217,984
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_wes_stratus
def _run_wes_stratus(args, main_file, json_file): """Run WES on Illumina stratus endpoint server, which wes-client doesn't support. https://stratus-docs.readme.io/docs/quick-start-4 """ import requests base_url = args.host if not base_url.startswith("http"): base_url = "https://%s" % base_url with open(main_file) as in_handle: r = requests.post("%s/v1/workflows" % base_url, headers={"Content-Type": "application/json", "Authorization": "Bearer %s" % args.auth}, data=in_handle.read()) print(r.status_code) print(r.text)
python
def _run_wes_stratus(args, main_file, json_file): """Run WES on Illumina stratus endpoint server, which wes-client doesn't support. https://stratus-docs.readme.io/docs/quick-start-4 """ import requests base_url = args.host if not base_url.startswith("http"): base_url = "https://%s" % base_url with open(main_file) as in_handle: r = requests.post("%s/v1/workflows" % base_url, headers={"Content-Type": "application/json", "Authorization": "Bearer %s" % args.auth}, data=in_handle.read()) print(r.status_code) print(r.text)
[ "def", "_run_wes_stratus", "(", "args", ",", "main_file", ",", "json_file", ")", ":", "import", "requests", "base_url", "=", "args", ".", "host", "if", "not", "base_url", ".", "startswith", "(", "\"http\"", ")", ":", "base_url", "=", "\"https://%s\"", "%", ...
Run WES on Illumina stratus endpoint server, which wes-client doesn't support. https://stratus-docs.readme.io/docs/quick-start-4
[ "Run", "WES", "on", "Illumina", "stratus", "endpoint", "server", "which", "wes", "-", "client", "doesn", "t", "support", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L172-L187
train
217,985
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_estimate_runner_memory
def _estimate_runner_memory(json_file): """Estimate Java memory requirements based on number of samples. A rough approach to selecting correct allocated memory for Cromwell. """ with open(json_file) as in_handle: sinfo = json.load(in_handle) num_parallel = 1 for key in ["config__algorithm__variantcaller", "description"]: item_counts = [] n = 0 for val in (sinfo.get(key) or []): n += 1 if val: if isinstance(val, (list, tuple)): item_counts.append(len(val)) else: item_counts.append(1) print(key, n, item_counts) if n and item_counts: num_parallel = n * max(item_counts) break if num_parallel < 25: return "3g" if num_parallel < 150: return "6g" elif num_parallel < 500: return "12g" else: return "24g"
python
def _estimate_runner_memory(json_file): """Estimate Java memory requirements based on number of samples. A rough approach to selecting correct allocated memory for Cromwell. """ with open(json_file) as in_handle: sinfo = json.load(in_handle) num_parallel = 1 for key in ["config__algorithm__variantcaller", "description"]: item_counts = [] n = 0 for val in (sinfo.get(key) or []): n += 1 if val: if isinstance(val, (list, tuple)): item_counts.append(len(val)) else: item_counts.append(1) print(key, n, item_counts) if n and item_counts: num_parallel = n * max(item_counts) break if num_parallel < 25: return "3g" if num_parallel < 150: return "6g" elif num_parallel < 500: return "12g" else: return "24g"
[ "def", "_estimate_runner_memory", "(", "json_file", ")", ":", "with", "open", "(", "json_file", ")", "as", "in_handle", ":", "sinfo", "=", "json", ".", "load", "(", "in_handle", ")", "num_parallel", "=", "1", "for", "key", "in", "[", "\"config__algorithm__va...
Estimate Java memory requirements based on number of samples. A rough approach to selecting correct allocated memory for Cromwell.
[ "Estimate", "Java", "memory", "requirements", "based", "on", "number", "of", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L189-L218
train
217,986
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_cromwell
def _run_cromwell(args): """Run CWL with Cromwell. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cromwell_work")) final_dir = utils.safe_makedir(os.path.join(work_dir, "final")) if args.no_container: _remove_bcbiovm_path() log_file = os.path.join(work_dir, "%s-cromwell.log" % project_name) metadata_file = os.path.join(work_dir, "%s-metadata.json" % project_name) option_file = os.path.join(work_dir, "%s-options.json" % project_name) cromwell_opts = {"final_workflow_outputs_dir": final_dir, "default_runtime_attributes": {"bootDiskSizeGb": 20}} with open(option_file, "w") as out_handle: json.dump(cromwell_opts, out_handle) cmd = ["cromwell", "-Xms1g", "-Xmx%s" % _estimate_runner_memory(json_file), "run", "--type", "CWL", "-Dconfig.file=%s" % hpc.create_cromwell_config(args, work_dir, json_file)] cmd += hpc.args_to_cromwell_cl(args) cmd += ["--metadata-output", metadata_file, "--options", option_file, "--inputs", json_file, main_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file) if metadata_file and utils.file_exists(metadata_file): with open(metadata_file) as in_handle: metadata = json.load(in_handle) if metadata["status"] == "Failed": _cromwell_debug(metadata) sys.exit(1) else: _cromwell_move_outputs(metadata, final_dir)
python
def _run_cromwell(args): """Run CWL with Cromwell. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cromwell_work")) final_dir = utils.safe_makedir(os.path.join(work_dir, "final")) if args.no_container: _remove_bcbiovm_path() log_file = os.path.join(work_dir, "%s-cromwell.log" % project_name) metadata_file = os.path.join(work_dir, "%s-metadata.json" % project_name) option_file = os.path.join(work_dir, "%s-options.json" % project_name) cromwell_opts = {"final_workflow_outputs_dir": final_dir, "default_runtime_attributes": {"bootDiskSizeGb": 20}} with open(option_file, "w") as out_handle: json.dump(cromwell_opts, out_handle) cmd = ["cromwell", "-Xms1g", "-Xmx%s" % _estimate_runner_memory(json_file), "run", "--type", "CWL", "-Dconfig.file=%s" % hpc.create_cromwell_config(args, work_dir, json_file)] cmd += hpc.args_to_cromwell_cl(args) cmd += ["--metadata-output", metadata_file, "--options", option_file, "--inputs", json_file, main_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file) if metadata_file and utils.file_exists(metadata_file): with open(metadata_file) as in_handle: metadata = json.load(in_handle) if metadata["status"] == "Failed": _cromwell_debug(metadata) sys.exit(1) else: _cromwell_move_outputs(metadata, final_dir)
[ "def", "_run_cromwell", "(", "args", ")", ":", "main_file", ",", "json_file", ",", "project_name", "=", "_get_main_and_json", "(", "args", ".", "directory", ")", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "os",...
Run CWL with Cromwell.
[ "Run", "CWL", "with", "Cromwell", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L220-L251
train
217,987
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_cromwell_debug
def _cromwell_debug(metadata): """Format Cromwell failures to make debugging easier. """ def get_failed_calls(cur, key=None): if key is None: key = [] out = [] if isinstance(cur, dict) and "failures" in cur and "callRoot" in cur: out.append((key, cur)) elif isinstance(cur, dict): for k, v in cur.items(): out.extend(get_failed_calls(v, key + [k])) elif isinstance(cur, (list, tuple)): for i, v in enumerate(cur): out.extend(get_failed_calls(v, key + [i])) return out print("Failed bcbio Cromwell run") print("-------------------------") for fail_k, fail_call in get_failed_calls(metadata["calls"]): root_dir = os.path.join("cromwell_work", os.path.relpath(fail_call["callRoot"])) print("Failure in step: %s" % ".".join([str(x) for x in fail_k])) print(" bcbio log file : %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-debug.log")) print(" bcbio commands file: %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-commands.log")) print(" Cromwell directory : %s" % root_dir) print()
python
def _cromwell_debug(metadata): """Format Cromwell failures to make debugging easier. """ def get_failed_calls(cur, key=None): if key is None: key = [] out = [] if isinstance(cur, dict) and "failures" in cur and "callRoot" in cur: out.append((key, cur)) elif isinstance(cur, dict): for k, v in cur.items(): out.extend(get_failed_calls(v, key + [k])) elif isinstance(cur, (list, tuple)): for i, v in enumerate(cur): out.extend(get_failed_calls(v, key + [i])) return out print("Failed bcbio Cromwell run") print("-------------------------") for fail_k, fail_call in get_failed_calls(metadata["calls"]): root_dir = os.path.join("cromwell_work", os.path.relpath(fail_call["callRoot"])) print("Failure in step: %s" % ".".join([str(x) for x in fail_k])) print(" bcbio log file : %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-debug.log")) print(" bcbio commands file: %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-commands.log")) print(" Cromwell directory : %s" % root_dir) print()
[ "def", "_cromwell_debug", "(", "metadata", ")", ":", "def", "get_failed_calls", "(", "cur", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", ":", "key", "=", "[", "]", "out", "=", "[", "]", "if", "isinstance", "(", "cur", ",", "dict", ...
Format Cromwell failures to make debugging easier.
[ "Format", "Cromwell", "failures", "to", "make", "debugging", "easier", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L253-L277
train
217,988
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_cromwell_move_outputs
def _cromwell_move_outputs(metadata, final_dir): """Move Cromwell outputs to the final upload directory. """ sample_key = [k for k in metadata["outputs"].keys() if k.endswith(("rgnames__sample", "rgnames__sample_out"))][0] project_dir = utils.safe_makedir(os.path.join(final_dir, "project")) samples = metadata["outputs"][sample_key] def _copy_with_secondary(f, dirname): if len(f["secondaryFiles"]) > 1: dirname = utils.safe_makedir(os.path.join(dirname, os.path.basename(os.path.dirname(f["location"])))) if not objectstore.is_remote(f["location"]): finalf = os.path.join(dirname, os.path.basename(f["location"])) if not utils.file_uptodate(finalf, f["location"]): shutil.copy(f["location"], dirname) [_copy_with_secondary(sf, dirname) for sf in f["secondaryFiles"]] def _write_to_dir(val, dirname): if isinstance(val, (list, tuple)): [_write_to_dir(v, dirname) for v in val] else: _copy_with_secondary(val, dirname) for k, vals in metadata["outputs"].items(): if k != sample_key: if k.endswith(("summary__multiqc")): vs = [v for v in vals if v] assert len(vs) == 1 _write_to_dir(vs[0], project_dir) elif len(vals) == len(samples): for s, v in zip(samples, vals): if v: _write_to_dir(v, utils.safe_makedir(os.path.join(final_dir, s))) elif len(vals) == 1: _write_to_dir(vals[0], project_dir) elif len(vals) > 0: raise ValueError("Unexpected sample and outputs: %s %s %s" % (k, samples, vals))
python
def _cromwell_move_outputs(metadata, final_dir): """Move Cromwell outputs to the final upload directory. """ sample_key = [k for k in metadata["outputs"].keys() if k.endswith(("rgnames__sample", "rgnames__sample_out"))][0] project_dir = utils.safe_makedir(os.path.join(final_dir, "project")) samples = metadata["outputs"][sample_key] def _copy_with_secondary(f, dirname): if len(f["secondaryFiles"]) > 1: dirname = utils.safe_makedir(os.path.join(dirname, os.path.basename(os.path.dirname(f["location"])))) if not objectstore.is_remote(f["location"]): finalf = os.path.join(dirname, os.path.basename(f["location"])) if not utils.file_uptodate(finalf, f["location"]): shutil.copy(f["location"], dirname) [_copy_with_secondary(sf, dirname) for sf in f["secondaryFiles"]] def _write_to_dir(val, dirname): if isinstance(val, (list, tuple)): [_write_to_dir(v, dirname) for v in val] else: _copy_with_secondary(val, dirname) for k, vals in metadata["outputs"].items(): if k != sample_key: if k.endswith(("summary__multiqc")): vs = [v for v in vals if v] assert len(vs) == 1 _write_to_dir(vs[0], project_dir) elif len(vals) == len(samples): for s, v in zip(samples, vals): if v: _write_to_dir(v, utils.safe_makedir(os.path.join(final_dir, s))) elif len(vals) == 1: _write_to_dir(vals[0], project_dir) elif len(vals) > 0: raise ValueError("Unexpected sample and outputs: %s %s %s" % (k, samples, vals))
[ "def", "_cromwell_move_outputs", "(", "metadata", ",", "final_dir", ")", ":", "sample_key", "=", "[", "k", "for", "k", "in", "metadata", "[", "\"outputs\"", "]", ".", "keys", "(", ")", "if", "k", ".", "endswith", "(", "(", "\"rgnames__sample\"", ",", "\"...
Move Cromwell outputs to the final upload directory.
[ "Move", "Cromwell", "outputs", "to", "the", "final", "upload", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L279-L311
train
217,989
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_sbgenomics
def _run_sbgenomics(args): """Run CWL on SevenBridges platform and Cancer Genomics Cloud. """ assert not args.no_container, "Seven Bridges runs require containers" main_file, json_file, project_name = _get_main_and_json(args.directory) flags = [] cmd = ["sbg-cwl-runner"] + flags + args.toolargs + [main_file, json_file] _run_tool(cmd)
python
def _run_sbgenomics(args): """Run CWL on SevenBridges platform and Cancer Genomics Cloud. """ assert not args.no_container, "Seven Bridges runs require containers" main_file, json_file, project_name = _get_main_and_json(args.directory) flags = [] cmd = ["sbg-cwl-runner"] + flags + args.toolargs + [main_file, json_file] _run_tool(cmd)
[ "def", "_run_sbgenomics", "(", "args", ")", ":", "assert", "not", "args", ".", "no_container", ",", "\"Seven Bridges runs require containers\"", "main_file", ",", "json_file", ",", "project_name", "=", "_get_main_and_json", "(", "args", ".", "directory", ")", "flags...
Run CWL on SevenBridges platform and Cancer Genomics Cloud.
[ "Run", "CWL", "on", "SevenBridges", "platform", "and", "Cancer", "Genomics", "Cloud", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L313-L320
train
217,990
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_funnel
def _run_funnel(args): """Run funnel TES server with rabix bunny for CWL. """ host = "localhost" port = "8088" main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "funnel_work")) log_file = os.path.join(work_dir, "%s-funnel.log" % project_name) # Create bunny configuration directory with TES backend orig_config_dir = os.path.join(os.path.dirname(os.path.realpath(utils.which("rabix"))), "config") work_config_dir = utils.safe_makedir(os.path.join(work_dir, "rabix_config")) for fname in os.listdir(orig_config_dir): if fname == "core.properties": with open(os.path.join(orig_config_dir, fname)) as in_handle: with open(os.path.join(work_config_dir, fname), "w") as out_handle: for line in in_handle: if line.startswith("backend.embedded.types"): line = "backend.embedded.types=TES\n" out_handle.write(line) else: shutil.copy(os.path.join(orig_config_dir, fname), os.path.join(work_config_dir, fname)) flags = ["-c", work_config_dir, "-tes-url=http://%s:%s" % (host, port), "-tes-storage=%s" % work_dir] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container"] cmd = ["rabix"] + flags + [main_file, json_file] funnelp = subprocess.Popen(["funnel", "server", "run", "--Server.HostName", host, "--Server.HTTPPort", port, "--LocalStorage.AllowedDirs", work_dir, "--Worker.WorkDir", os.path.join(work_dir, "funnel-work")]) try: with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file) finally: funnelp.kill()
python
def _run_funnel(args): """Run funnel TES server with rabix bunny for CWL. """ host = "localhost" port = "8088" main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "funnel_work")) log_file = os.path.join(work_dir, "%s-funnel.log" % project_name) # Create bunny configuration directory with TES backend orig_config_dir = os.path.join(os.path.dirname(os.path.realpath(utils.which("rabix"))), "config") work_config_dir = utils.safe_makedir(os.path.join(work_dir, "rabix_config")) for fname in os.listdir(orig_config_dir): if fname == "core.properties": with open(os.path.join(orig_config_dir, fname)) as in_handle: with open(os.path.join(work_config_dir, fname), "w") as out_handle: for line in in_handle: if line.startswith("backend.embedded.types"): line = "backend.embedded.types=TES\n" out_handle.write(line) else: shutil.copy(os.path.join(orig_config_dir, fname), os.path.join(work_config_dir, fname)) flags = ["-c", work_config_dir, "-tes-url=http://%s:%s" % (host, port), "-tes-storage=%s" % work_dir] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container"] cmd = ["rabix"] + flags + [main_file, json_file] funnelp = subprocess.Popen(["funnel", "server", "run", "--Server.HostName", host, "--Server.HTTPPort", port, "--LocalStorage.AllowedDirs", work_dir, "--Worker.WorkDir", os.path.join(work_dir, "funnel-work")]) try: with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file) finally: funnelp.kill()
[ "def", "_run_funnel", "(", "args", ")", ":", "host", "=", "\"localhost\"", "port", "=", "\"8088\"", "main_file", ",", "json_file", ",", "project_name", "=", "_get_main_and_json", "(", "args", ".", "directory", ")", "work_dir", "=", "utils", ".", "safe_makedir"...
Run funnel TES server with rabix bunny for CWL.
[ "Run", "funnel", "TES", "server", "with", "rabix", "bunny", "for", "CWL", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L322-L357
train
217,991
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
_parse_qualimap_globals_inregion
def _parse_qualimap_globals_inregion(table): """Retrieve metrics from the global targeted region table. """ out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] if col == "Mapped reads": out.update(_parse_num_pct("%s (in regions)" % col, val)) return out
python
def _parse_qualimap_globals_inregion(table): """Retrieve metrics from the global targeted region table. """ out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] if col == "Mapped reads": out.update(_parse_num_pct("%s (in regions)" % col, val)) return out
[ "def", "_parse_qualimap_globals_inregion", "(", "table", ")", ":", "out", "=", "{", "}", "for", "row", "in", "table", ".", "find_all", "(", "\"tr\"", ")", ":", "col", ",", "val", "=", "[", "x", ".", "text", "for", "x", "in", "row", ".", "find_all", ...
Retrieve metrics from the global targeted region table.
[ "Retrieve", "metrics", "from", "the", "global", "targeted", "region", "table", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L131-L139
train
217,992
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
_parse_qualimap_coverage
def _parse_qualimap_coverage(table): """Parse summary qualimap coverage metrics. """ out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] if col == "Mean": out["Coverage (Mean)"] = val return out
python
def _parse_qualimap_coverage(table): """Parse summary qualimap coverage metrics. """ out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] if col == "Mean": out["Coverage (Mean)"] = val return out
[ "def", "_parse_qualimap_coverage", "(", "table", ")", ":", "out", "=", "{", "}", "for", "row", "in", "table", ".", "find_all", "(", "\"tr\"", ")", ":", "col", ",", "val", "=", "[", "x", ".", "text", "for", "x", "in", "row", ".", "find_all", "(", ...
Parse summary qualimap coverage metrics.
[ "Parse", "summary", "qualimap", "coverage", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L141-L149
train
217,993
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
_bed_to_bed6
def _bed_to_bed6(orig_file, out_dir): """Convert bed to required bed6 inputs. """ bed6_file = os.path.join(out_dir, "%s-bed6%s" % os.path.splitext(os.path.basename(orig_file))) if not utils.file_exists(bed6_file): with open(bed6_file, "w") as out_handle: for i, region in enumerate(list(x) for x in pybedtools.BedTool(orig_file)): region = [x for x in list(region) if x] fillers = [str(i), "1.0", "+"] full = region + fillers[:6 - len(region)] out_handle.write("\t".join(full) + "\n") return bed6_file
python
def _bed_to_bed6(orig_file, out_dir): """Convert bed to required bed6 inputs. """ bed6_file = os.path.join(out_dir, "%s-bed6%s" % os.path.splitext(os.path.basename(orig_file))) if not utils.file_exists(bed6_file): with open(bed6_file, "w") as out_handle: for i, region in enumerate(list(x) for x in pybedtools.BedTool(orig_file)): region = [x for x in list(region) if x] fillers = [str(i), "1.0", "+"] full = region + fillers[:6 - len(region)] out_handle.write("\t".join(full) + "\n") return bed6_file
[ "def", "_bed_to_bed6", "(", "orig_file", ",", "out_dir", ")", ":", "bed6_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"%s-bed6%s\"", "%", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "orig_file...
Convert bed to required bed6 inputs.
[ "Convert", "bed", "to", "required", "bed6", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L161-L172
train
217,994
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
_detect_duplicates
def _detect_duplicates(bam_file, out_dir, data): """ count duplicate percentage """ out_file = os.path.join(out_dir, "dup_metrics.txt") if not utils.file_exists(out_file): dup_align_bam = postalign.dedup_bam(bam_file, data) logger.info("Detecting duplicates in %s." % dup_align_bam) dup_count = readstats.number_of_mapped_reads(data, dup_align_bam, keep_dups=False) tot_count = readstats.number_of_mapped_reads(data, dup_align_bam, keep_dups=True) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: out_handle.write("%s\n%s\n" % (dup_count, tot_count)) with open(out_file) as in_handle: dupes = float(next(in_handle).strip()) total = float(next(in_handle).strip()) if total == 0: rate = "NA" else: rate = dupes / total return {"Duplication Rate of Mapped": rate}
python
def _detect_duplicates(bam_file, out_dir, data): """ count duplicate percentage """ out_file = os.path.join(out_dir, "dup_metrics.txt") if not utils.file_exists(out_file): dup_align_bam = postalign.dedup_bam(bam_file, data) logger.info("Detecting duplicates in %s." % dup_align_bam) dup_count = readstats.number_of_mapped_reads(data, dup_align_bam, keep_dups=False) tot_count = readstats.number_of_mapped_reads(data, dup_align_bam, keep_dups=True) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: out_handle.write("%s\n%s\n" % (dup_count, tot_count)) with open(out_file) as in_handle: dupes = float(next(in_handle).strip()) total = float(next(in_handle).strip()) if total == 0: rate = "NA" else: rate = dupes / total return {"Duplication Rate of Mapped": rate}
[ "def", "_detect_duplicates", "(", "bam_file", ",", "out_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"dup_metrics.txt\"", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "d...
count duplicate percentage
[ "count", "duplicate", "percentage" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L219-L239
train
217,995
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
run_rnaseq
def run_rnaseq(bam_file, data, out_dir): """ Run qualimap for a rnaseq bam file and parse results """ strandedness = {"firststrand": "strand-specific-reverse", "secondstrand": "strand-specific-forward", "unstranded": "non-strand-specific"} # Qualimap results should be saved to a directory named after sample. # MultiQC (for parsing additional data) picks the sample name after the dir as follows: # <sample name>/raw_data_qualimapReport/insert_size_histogram.txt results_dir = os.path.join(out_dir, dd.get_sample_name(data)) results_file = os.path.join(results_dir, "rnaseq_qc_results.txt") report_file = os.path.join(results_dir, "qualimapReport.html") config = data["config"] gtf_file = dd.get_gtf_file(data) library = strandedness[dd.get_strandedness(data)] if not utils.file_exists(results_file): with file_transaction(data, results_dir) as tx_results_dir: utils.safe_makedir(tx_results_dir) bam.index(bam_file, config) cmd = _rnaseq_qualimap_cmd(data, bam_file, tx_results_dir, gtf_file, library) do.run(cmd, "Qualimap for {}".format(dd.get_sample_name(data))) tx_results_file = os.path.join(tx_results_dir, "rnaseq_qc_results.txt") cmd = "sed -i 's/bam file = .*/bam file = %s.bam/' %s" % (dd.get_sample_name(data), tx_results_file) do.run(cmd, "Fix Name Qualimap for {}".format(dd.get_sample_name(data))) metrics = _parse_rnaseq_qualimap_metrics(report_file) metrics.update(_detect_duplicates(bam_file, results_dir, data)) metrics.update(_detect_rRNA(data, results_dir)) metrics.update({"Average_insert_size": salmon.estimate_fragment_size(data)}) metrics = _parse_metrics(metrics) # Qualimap output folder (results_dir) needs to be named after the sample (see comments above). However, in order # to keep its name after upload, we need to put the base QC file (results_file) into the root directory (out_dir): base_results_file = os.path.join(out_dir, os.path.basename(results_file)) shutil.copyfile(results_file, base_results_file) return {"base": base_results_file, "secondary": _find_qualimap_secondary_files(results_dir, base_results_file), "metrics": metrics}
python
def run_rnaseq(bam_file, data, out_dir): """ Run qualimap for a rnaseq bam file and parse results """ strandedness = {"firststrand": "strand-specific-reverse", "secondstrand": "strand-specific-forward", "unstranded": "non-strand-specific"} # Qualimap results should be saved to a directory named after sample. # MultiQC (for parsing additional data) picks the sample name after the dir as follows: # <sample name>/raw_data_qualimapReport/insert_size_histogram.txt results_dir = os.path.join(out_dir, dd.get_sample_name(data)) results_file = os.path.join(results_dir, "rnaseq_qc_results.txt") report_file = os.path.join(results_dir, "qualimapReport.html") config = data["config"] gtf_file = dd.get_gtf_file(data) library = strandedness[dd.get_strandedness(data)] if not utils.file_exists(results_file): with file_transaction(data, results_dir) as tx_results_dir: utils.safe_makedir(tx_results_dir) bam.index(bam_file, config) cmd = _rnaseq_qualimap_cmd(data, bam_file, tx_results_dir, gtf_file, library) do.run(cmd, "Qualimap for {}".format(dd.get_sample_name(data))) tx_results_file = os.path.join(tx_results_dir, "rnaseq_qc_results.txt") cmd = "sed -i 's/bam file = .*/bam file = %s.bam/' %s" % (dd.get_sample_name(data), tx_results_file) do.run(cmd, "Fix Name Qualimap for {}".format(dd.get_sample_name(data))) metrics = _parse_rnaseq_qualimap_metrics(report_file) metrics.update(_detect_duplicates(bam_file, results_dir, data)) metrics.update(_detect_rRNA(data, results_dir)) metrics.update({"Average_insert_size": salmon.estimate_fragment_size(data)}) metrics = _parse_metrics(metrics) # Qualimap output folder (results_dir) needs to be named after the sample (see comments above). However, in order # to keep its name after upload, we need to put the base QC file (results_file) into the root directory (out_dir): base_results_file = os.path.join(out_dir, os.path.basename(results_file)) shutil.copyfile(results_file, base_results_file) return {"base": base_results_file, "secondary": _find_qualimap_secondary_files(results_dir, base_results_file), "metrics": metrics}
[ "def", "run_rnaseq", "(", "bam_file", ",", "data", ",", "out_dir", ")", ":", "strandedness", "=", "{", "\"firststrand\"", ":", "\"strand-specific-reverse\"", ",", "\"secondstrand\"", ":", "\"strand-specific-forward\"", ",", "\"unstranded\"", ":", "\"non-strand-specific\...
Run qualimap for a rnaseq bam file and parse results
[ "Run", "qualimap", "for", "a", "rnaseq", "bam", "file", "and", "parse", "results" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L317-L354
train
217,996
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
_rnaseq_qualimap_cmd
def _rnaseq_qualimap_cmd(data, bam_file, out_dir, gtf_file=None, library="non-strand-specific"): """ Create command lines for qualimap """ config = data["config"] qualimap = config_utils.get_program("qualimap", config) resources = config_utils.get_resources("qualimap", config) num_cores = resources.get("cores", dd.get_num_cores(data)) max_mem = config_utils.adjust_memory(resources.get("memory", "2G"), num_cores) export = "%s%s" % (utils.java_freetype_fix(), utils.local_path_export()) export = "%s%s export JAVA_OPTS='-Xms32m -Xmx%s -Djava.io.tmpdir=%s' && " % ( utils.java_freetype_fix(), utils.local_path_export(), max_mem, out_dir) paired = " --paired" if bam.is_paired(bam_file) else "" cmd = ("unset DISPLAY && {export} {qualimap} rnaseq -outdir {out_dir} " "-a proportional -bam {bam_file} -p {library}{paired} " "-gtf {gtf_file}").format(**locals()) return cmd
python
def _rnaseq_qualimap_cmd(data, bam_file, out_dir, gtf_file=None, library="non-strand-specific"): """ Create command lines for qualimap """ config = data["config"] qualimap = config_utils.get_program("qualimap", config) resources = config_utils.get_resources("qualimap", config) num_cores = resources.get("cores", dd.get_num_cores(data)) max_mem = config_utils.adjust_memory(resources.get("memory", "2G"), num_cores) export = "%s%s" % (utils.java_freetype_fix(), utils.local_path_export()) export = "%s%s export JAVA_OPTS='-Xms32m -Xmx%s -Djava.io.tmpdir=%s' && " % ( utils.java_freetype_fix(), utils.local_path_export(), max_mem, out_dir) paired = " --paired" if bam.is_paired(bam_file) else "" cmd = ("unset DISPLAY && {export} {qualimap} rnaseq -outdir {out_dir} " "-a proportional -bam {bam_file} -p {library}{paired} " "-gtf {gtf_file}").format(**locals()) return cmd
[ "def", "_rnaseq_qualimap_cmd", "(", "data", ",", "bam_file", ",", "out_dir", ",", "gtf_file", "=", "None", ",", "library", "=", "\"non-strand-specific\"", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "qualimap", "=", "config_utils", ".", "get_progr...
Create command lines for qualimap
[ "Create", "command", "lines", "for", "qualimap" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L356-L373
train
217,997
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
_find_qualimap_secondary_files
def _find_qualimap_secondary_files(results_dir, base_file): """Retrieve additional files, avoiding double uploading the base file. """ def not_dup(x): is_dup = (os.path.basename(x) == os.path.basename(base_file) and os.path.getsize(x) == os.path.getsize(base_file)) return not is_dup def is_problem_file(x): """Problematic files with characters that make some CWL runners unhappy. """ return x.find("(") >= 0 or x.find(")") >= 0 or x.find(" ") >= 0 return list(filter(lambda x: not is_problem_file(x), filter(not_dup, glob.glob(os.path.join(results_dir, 'qualimapReport.html')) + glob.glob(os.path.join(results_dir, '*.txt')) + glob.glob(os.path.join(results_dir, "css", "*")) + glob.glob(os.path.join(results_dir, "raw_data_qualimapReport", "*")) + glob.glob(os.path.join(results_dir, "images_qualimapReport", "*")))))
python
def _find_qualimap_secondary_files(results_dir, base_file): """Retrieve additional files, avoiding double uploading the base file. """ def not_dup(x): is_dup = (os.path.basename(x) == os.path.basename(base_file) and os.path.getsize(x) == os.path.getsize(base_file)) return not is_dup def is_problem_file(x): """Problematic files with characters that make some CWL runners unhappy. """ return x.find("(") >= 0 or x.find(")") >= 0 or x.find(" ") >= 0 return list(filter(lambda x: not is_problem_file(x), filter(not_dup, glob.glob(os.path.join(results_dir, 'qualimapReport.html')) + glob.glob(os.path.join(results_dir, '*.txt')) + glob.glob(os.path.join(results_dir, "css", "*")) + glob.glob(os.path.join(results_dir, "raw_data_qualimapReport", "*")) + glob.glob(os.path.join(results_dir, "images_qualimapReport", "*")))))
[ "def", "_find_qualimap_secondary_files", "(", "results_dir", ",", "base_file", ")", ":", "def", "not_dup", "(", "x", ")", ":", "is_dup", "=", "(", "os", ".", "path", ".", "basename", "(", "x", ")", "==", "os", ".", "path", ".", "basename", "(", "base_f...
Retrieve additional files, avoiding double uploading the base file.
[ "Retrieve", "additional", "files", "avoiding", "double", "uploading", "the", "base", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L375-L392
train
217,998
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
classifyplot_from_plotfiles
def classifyplot_from_plotfiles(plot_files, out_csv, outtype="png", title=None, size=None): """Create a plot from individual summary csv files with classification metrics. """ dfs = [pd.read_csv(x) for x in plot_files] samples = [] for df in dfs: for sample in df["sample"].unique(): if sample not in samples: samples.append(sample) df = pd.concat(dfs) df.to_csv(out_csv, index=False) return classifyplot_from_valfile(out_csv, outtype, title, size, samples)
python
def classifyplot_from_plotfiles(plot_files, out_csv, outtype="png", title=None, size=None): """Create a plot from individual summary csv files with classification metrics. """ dfs = [pd.read_csv(x) for x in plot_files] samples = [] for df in dfs: for sample in df["sample"].unique(): if sample not in samples: samples.append(sample) df = pd.concat(dfs) df.to_csv(out_csv, index=False) return classifyplot_from_valfile(out_csv, outtype, title, size, samples)
[ "def", "classifyplot_from_plotfiles", "(", "plot_files", ",", "out_csv", ",", "outtype", "=", "\"png\"", ",", "title", "=", "None", ",", "size", "=", "None", ")", ":", "dfs", "=", "[", "pd", ".", "read_csv", "(", "x", ")", "for", "x", "in", "plot_files...
Create a plot from individual summary csv files with classification metrics.
[ "Create", "a", "plot", "from", "individual", "summary", "csv", "files", "with", "classification", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L22-L33
train
217,999