id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
236,900
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_run_workflow
def _run_workflow(items, paired, workflow_file, work_dir): """Run manta analysis inside prepared workflow directory. """ utils.remove_safe(os.path.join(work_dir, "workspace")) data = paired.tumor_data if paired else items[0] cmd = [utils.get_program_python("configManta.py"), workflow_file, "-m", "local", "-j", dd.get_num_cores(data)] do.run(cmd, "Run manta SV analysis") utils.remove_safe(os.path.join(work_dir, "workspace"))
python
def _run_workflow(items, paired, workflow_file, work_dir): utils.remove_safe(os.path.join(work_dir, "workspace")) data = paired.tumor_data if paired else items[0] cmd = [utils.get_program_python("configManta.py"), workflow_file, "-m", "local", "-j", dd.get_num_cores(data)] do.run(cmd, "Run manta SV analysis") utils.remove_safe(os.path.join(work_dir, "workspace"))
[ "def", "_run_workflow", "(", "items", ",", "paired", ",", "workflow_file", ",", "work_dir", ")", ":", "utils", ".", "remove_safe", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"workspace\"", ")", ")", "data", "=", "paired", ".", "tumor_da...
Run manta analysis inside prepared workflow directory.
[ "Run", "manta", "analysis", "inside", "prepared", "workflow", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L91-L98
236,901
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_prep_config
def _prep_config(items, paired, work_dir): """Run initial configuration, generating a run directory for Manta. """ assert utils.which("configManta.py"), "Could not find installed configManta.py" out_file = os.path.join(work_dir, "runWorkflow.py") if not utils.file_exists(out_file) or _out_of_date(out_file): config_script = os.path.realpath(utils.which("configManta.py")) cmd = [utils.get_program_python("configManta.py"), config_script] if paired: if paired.normal_bam: cmd += ["--normalBam=%s" % paired.normal_bam, "--tumorBam=%s" % paired.tumor_bam] else: cmd += ["--tumorBam=%s" % paired.tumor_bam] else: cmd += ["--bam=%s" % dd.get_align_bam(data) for data in items] data = paired.tumor_data if paired else items[0] cmd += ["--referenceFasta=%s" % dd.get_ref_file(data), "--runDir=%s" % work_dir] if dd.get_coverage_interval(data) not in ["genome"]: cmd += ["--exome"] for region in _maybe_limit_chromosomes(data): cmd += ["--region", region] resources = config_utils.get_resources("manta", data["config"]) if resources.get("options"): cmd += [str(x) for x in resources["options"]] # If we are removing polyX, avoid calling on small indels which require # excessively long runtimes on noisy WGS runs if "polyx" in dd.get_exclude_regions(data): cmd += ["--config", _prep_streamlined_config(config_script, work_dir)] do.run(cmd, "Configure manta SV analysis") return out_file
python
def _prep_config(items, paired, work_dir): assert utils.which("configManta.py"), "Could not find installed configManta.py" out_file = os.path.join(work_dir, "runWorkflow.py") if not utils.file_exists(out_file) or _out_of_date(out_file): config_script = os.path.realpath(utils.which("configManta.py")) cmd = [utils.get_program_python("configManta.py"), config_script] if paired: if paired.normal_bam: cmd += ["--normalBam=%s" % paired.normal_bam, "--tumorBam=%s" % paired.tumor_bam] else: cmd += ["--tumorBam=%s" % paired.tumor_bam] else: cmd += ["--bam=%s" % dd.get_align_bam(data) for data in items] data = paired.tumor_data if paired else items[0] cmd += ["--referenceFasta=%s" % dd.get_ref_file(data), "--runDir=%s" % work_dir] if dd.get_coverage_interval(data) not in ["genome"]: cmd += ["--exome"] for region in _maybe_limit_chromosomes(data): cmd += ["--region", region] resources = config_utils.get_resources("manta", data["config"]) if resources.get("options"): cmd += [str(x) for x in resources["options"]] # If we are removing polyX, avoid calling on small indels which require # excessively long runtimes on noisy WGS runs if "polyx" in dd.get_exclude_regions(data): cmd += ["--config", _prep_streamlined_config(config_script, work_dir)] do.run(cmd, "Configure manta SV analysis") return out_file
[ "def", "_prep_config", "(", "items", ",", "paired", ",", "work_dir", ")", ":", "assert", "utils", ".", "which", "(", "\"configManta.py\"", ")", ",", "\"Could not find installed configManta.py\"", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ...
Run initial configuration, generating a run directory for Manta.
[ "Run", "initial", "configuration", "generating", "a", "run", "directory", "for", "Manta", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L100-L129
236,902
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_prep_streamlined_config
def _prep_streamlined_config(config_script, work_dir): """Create manta INI file without steps that potentially increase runtimes. This removes calling of small indels. """ new_min_size = 100 in_file = config_script + ".ini" out_file = os.path.join(work_dir, os.path.basename(in_file)) with open(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("minCandidateVariantSize"): out_handle.write("minCandidateVariantSize = %s\n" % new_min_size) else: out_handle.write(line) return out_file
python
def _prep_streamlined_config(config_script, work_dir): new_min_size = 100 in_file = config_script + ".ini" out_file = os.path.join(work_dir, os.path.basename(in_file)) with open(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("minCandidateVariantSize"): out_handle.write("minCandidateVariantSize = %s\n" % new_min_size) else: out_handle.write(line) return out_file
[ "def", "_prep_streamlined_config", "(", "config_script", ",", "work_dir", ")", ":", "new_min_size", "=", "100", "in_file", "=", "config_script", "+", "\".ini\"", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "os", ".", "path", ".", ...
Create manta INI file without steps that potentially increase runtimes. This removes calling of small indels.
[ "Create", "manta", "INI", "file", "without", "steps", "that", "potentially", "increase", "runtimes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L131-L146
236,903
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_maybe_limit_chromosomes
def _maybe_limit_chromosomes(data): """Potentially limit chromosomes to avoid problematically named HLA contigs. HLAs have ':' characters in them which confuse downstream processing. If we have no problematic chromosomes we don't limit anything. """ std_chroms = [] prob_chroms = [] noalt_calling = "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data) for contig in ref.file_contigs(dd.get_ref_file(data)): if contig.name.find(":") > 0 or (noalt_calling and not chromhacks.is_nonalt(contig.name)): prob_chroms.append(contig.name) else: std_chroms.append(contig.name) if len(prob_chroms) > 0: return std_chroms else: return []
python
def _maybe_limit_chromosomes(data): std_chroms = [] prob_chroms = [] noalt_calling = "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data) for contig in ref.file_contigs(dd.get_ref_file(data)): if contig.name.find(":") > 0 or (noalt_calling and not chromhacks.is_nonalt(contig.name)): prob_chroms.append(contig.name) else: std_chroms.append(contig.name) if len(prob_chroms) > 0: return std_chroms else: return []
[ "def", "_maybe_limit_chromosomes", "(", "data", ")", ":", "std_chroms", "=", "[", "]", "prob_chroms", "=", "[", "]", "noalt_calling", "=", "\"noalt_calling\"", "in", "dd", ".", "get_tools_on", "(", "data", ")", "or", "\"altcontigs\"", "in", "dd", ".", "get_e...
Potentially limit chromosomes to avoid problematically named HLA contigs. HLAs have ':' characters in them which confuse downstream processing. If we have no problematic chromosomes we don't limit anything.
[ "Potentially", "limit", "chromosomes", "to", "avoid", "problematically", "named", "HLA", "contigs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L148-L165
236,904
bcbio/bcbio-nextgen
bcbio/structural/manta.py
_out_of_date
def _out_of_date(rw_file): """Check if a run workflow file points to an older version of manta and needs a refresh. """ with open(rw_file) as in_handle: for line in in_handle: if line.startswith("sys.path.append"): file_version = line.split("/lib/python")[0].split("Cellar/manta/")[-1] if file_version != programs.get_version_manifest("manta"): return True return False
python
def _out_of_date(rw_file): with open(rw_file) as in_handle: for line in in_handle: if line.startswith("sys.path.append"): file_version = line.split("/lib/python")[0].split("Cellar/manta/")[-1] if file_version != programs.get_version_manifest("manta"): return True return False
[ "def", "_out_of_date", "(", "rw_file", ")", ":", "with", "open", "(", "rw_file", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswith", "(", "\"sys.path.append\"", ")", ":", "file_version", "=", "line", ".", ...
Check if a run workflow file points to an older version of manta and needs a refresh.
[ "Check", "if", "a", "run", "workflow", "file", "points", "to", "an", "older", "version", "of", "manta", "and", "needs", "a", "refresh", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L171-L180
236,905
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_freebayes_options_from_config
def _freebayes_options_from_config(items, config, out_file, region=None): """Prepare standard options from configuration input. Input BED target files are merged to avoid overlapping regions which cause FreeBayes to call multiple times. Checks for empty sets of target regions after filtering for high depth, in which case we should skip the FreeBayes run. """ opts = ["--genotype-qualities", "--strict-vcf"] cur_ploidy = ploidy.get_ploidy(items, region) base_ploidy = ploidy.get_ploidy(items) opts += ["--ploidy", str(cur_ploidy)] # Adjust min fraction when trying to call more sensitively in certain # regions. This is primarily meant for pooled mitochondrial calling. if (isinstance(region, (list, tuple)) and chromhacks.is_mitochondrial(region[0]) and cur_ploidy >= base_ploidy and "--min-alternate-fraction" not in opts and "-F" not in opts): opts += ["--min-alternate-fraction", "0.01"] variant_regions = bedutils.population_variant_regions(items, merged=True) # Produce gVCF output if any("gvcf" in dd.get_tools_on(d) for d in items): opts += ["--gvcf", "--gvcf-chunk", "50000"] no_target_regions = False target = shared.subset_variant_regions(variant_regions, region, out_file, items) if target: if isinstance(target, six.string_types) and os.path.isfile(target): if os.path.getsize(target) == 0: no_target_regions = True else: opts += ["--targets", target] else: opts += ["--region", region_to_freebayes(target)] resources = config_utils.get_resources("freebayes", config) if resources.get("options"): opts += resources["options"] return opts, no_target_regions
python
def _freebayes_options_from_config(items, config, out_file, region=None): opts = ["--genotype-qualities", "--strict-vcf"] cur_ploidy = ploidy.get_ploidy(items, region) base_ploidy = ploidy.get_ploidy(items) opts += ["--ploidy", str(cur_ploidy)] # Adjust min fraction when trying to call more sensitively in certain # regions. This is primarily meant for pooled mitochondrial calling. if (isinstance(region, (list, tuple)) and chromhacks.is_mitochondrial(region[0]) and cur_ploidy >= base_ploidy and "--min-alternate-fraction" not in opts and "-F" not in opts): opts += ["--min-alternate-fraction", "0.01"] variant_regions = bedutils.population_variant_regions(items, merged=True) # Produce gVCF output if any("gvcf" in dd.get_tools_on(d) for d in items): opts += ["--gvcf", "--gvcf-chunk", "50000"] no_target_regions = False target = shared.subset_variant_regions(variant_regions, region, out_file, items) if target: if isinstance(target, six.string_types) and os.path.isfile(target): if os.path.getsize(target) == 0: no_target_regions = True else: opts += ["--targets", target] else: opts += ["--region", region_to_freebayes(target)] resources = config_utils.get_resources("freebayes", config) if resources.get("options"): opts += resources["options"] return opts, no_target_regions
[ "def", "_freebayes_options_from_config", "(", "items", ",", "config", ",", "out_file", ",", "region", "=", "None", ")", ":", "opts", "=", "[", "\"--genotype-qualities\"", ",", "\"--strict-vcf\"", "]", "cur_ploidy", "=", "ploidy", ".", "get_ploidy", "(", "items",...
Prepare standard options from configuration input. Input BED target files are merged to avoid overlapping regions which cause FreeBayes to call multiple times. Checks for empty sets of target regions after filtering for high depth, in which case we should skip the FreeBayes run.
[ "Prepare", "standard", "options", "from", "configuration", "input", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L29-L64
236,906
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_add_somatic_opts
def _add_somatic_opts(opts, paired): """Add somatic options to current set. See _run_freebayes_paired for references. """ if "--min-alternate-fraction" not in opts and "-F" not in opts: # add minimum reportable allele frequency # FreeBayes defaults to 20%, but use 10% by default for the # tumor case min_af = float(utils.get_in(paired.tumor_config, ("algorithm", "min_allele_fraction"), 10)) / 100.0 opts += " --min-alternate-fraction %s" % min_af # Recommended settings for cancer calling opts += (" --pooled-discrete --pooled-continuous " "--report-genotype-likelihood-max --allele-balance-priors-off") return opts
python
def _add_somatic_opts(opts, paired): if "--min-alternate-fraction" not in opts and "-F" not in opts: # add minimum reportable allele frequency # FreeBayes defaults to 20%, but use 10% by default for the # tumor case min_af = float(utils.get_in(paired.tumor_config, ("algorithm", "min_allele_fraction"), 10)) / 100.0 opts += " --min-alternate-fraction %s" % min_af # Recommended settings for cancer calling opts += (" --pooled-discrete --pooled-continuous " "--report-genotype-likelihood-max --allele-balance-priors-off") return opts
[ "def", "_add_somatic_opts", "(", "opts", ",", "paired", ")", ":", "if", "\"--min-alternate-fraction\"", "not", "in", "opts", "and", "\"-F\"", "not", "in", "opts", ":", "# add minimum reportable allele frequency", "# FreeBayes defaults to 20%, but use 10% by default for the", ...
Add somatic options to current set. See _run_freebayes_paired for references.
[ "Add", "somatic", "options", "to", "current", "set", ".", "See", "_run_freebayes_paired", "for", "references", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L66-L79
236,907
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_run_freebayes_caller
def _run_freebayes_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None, somatic=None): """Detect SNPs and indels with FreeBayes. Performs post-filtering to remove very low quality variants which can cause issues feeding into GATK. Breaks variants into individual allelic primitives for analysis and evaluation. """ config = items[0]["config"] if out_file is None: out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0] if not utils.file_exists(out_file): if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: freebayes = config_utils.get_program("freebayes", config) input_bams = " ".join("-b %s" % x for x in align_bams) opts, no_target_regions = _freebayes_options_from_config(items, config, out_file, region) if no_target_regions: vcfutils.write_empty_vcf(tx_out_file, config, samples=[dd.get_sample_name(d) for d in items]) else: opts = " ".join(opts) # Recommended options from 1000 genomes low-complexity evaluation # https://groups.google.com/d/msg/freebayes/GvxIzjcpbas/1G6e3ArxQ4cJ opts += " --min-repeat-entropy 1" # Remove partial observations, which cause a preference for heterozygote calls # https://github.com/ekg/freebayes/issues/234#issuecomment-205331765 opts += " --no-partial-observations" if somatic: opts = _add_somatic_opts(opts, somatic) compress_cmd = "| bgzip -c" if out_file.endswith("gz") else "" # For multi-sample outputs, ensure consistent order samples = ("-s" + ",".join([dd.get_sample_name(d) for d in items])) if len(items) > 1 else "" fix_ambig = vcfutils.fix_ambiguous_cl() py_cl = config_utils.get_program("py", config) cmd = ("{freebayes} -f {ref_file} {opts} {input_bams} " """| bcftools filter -i 'ALT="<*>" || QUAL > 5' """ "| {fix_ambig} | bcftools view {samples} -a - | " "{py_cl} -x 'bcbio.variation.freebayes.remove_missingalt(x)' | " "vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | vcfstreamsort | " "vt normalize -n -r {ref_file} -q - | vcfuniqalleles | vt uniq - 2> /dev/null " "{compress_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "Genotyping with FreeBayes", {}) return out_file
python
def _run_freebayes_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None, somatic=None): config = items[0]["config"] if out_file is None: out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0] if not utils.file_exists(out_file): if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: freebayes = config_utils.get_program("freebayes", config) input_bams = " ".join("-b %s" % x for x in align_bams) opts, no_target_regions = _freebayes_options_from_config(items, config, out_file, region) if no_target_regions: vcfutils.write_empty_vcf(tx_out_file, config, samples=[dd.get_sample_name(d) for d in items]) else: opts = " ".join(opts) # Recommended options from 1000 genomes low-complexity evaluation # https://groups.google.com/d/msg/freebayes/GvxIzjcpbas/1G6e3ArxQ4cJ opts += " --min-repeat-entropy 1" # Remove partial observations, which cause a preference for heterozygote calls # https://github.com/ekg/freebayes/issues/234#issuecomment-205331765 opts += " --no-partial-observations" if somatic: opts = _add_somatic_opts(opts, somatic) compress_cmd = "| bgzip -c" if out_file.endswith("gz") else "" # For multi-sample outputs, ensure consistent order samples = ("-s" + ",".join([dd.get_sample_name(d) for d in items])) if len(items) > 1 else "" fix_ambig = vcfutils.fix_ambiguous_cl() py_cl = config_utils.get_program("py", config) cmd = ("{freebayes} -f {ref_file} {opts} {input_bams} " """| bcftools filter -i 'ALT="<*>" || QUAL > 5' """ "| {fix_ambig} | bcftools view {samples} -a - | " "{py_cl} -x 'bcbio.variation.freebayes.remove_missingalt(x)' | " "vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | vcfstreamsort | " "vt normalize -n -r {ref_file} -q - | vcfuniqalleles | vt uniq - 2> /dev/null " "{compress_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "Genotyping with FreeBayes", {}) return out_file
[ "def", "_run_freebayes_caller", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ",", "somatic", "=", "None", ")", ":", "config", "=", "items", "[", "0", "]", "[", "\"config\"...
Detect SNPs and indels with FreeBayes. Performs post-filtering to remove very low quality variants which can cause issues feeding into GATK. Breaks variants into individual allelic primitives for analysis and evaluation.
[ "Detect", "SNPs", "and", "indels", "with", "FreeBayes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L102-L144
236,908
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_check_lods
def _check_lods(parts, tumor_thresh, normal_thresh, indexes): """Ensure likelihoods for tumor and normal pass thresholds. Skipped if no FreeBayes GL annotations available. """ try: gl_index = parts[8].split(":").index("GL") except ValueError: return True try: tumor_gls = [float(x) for x in parts[indexes["tumor"]].strip().split(":")[gl_index].split(",") if x != "."] if tumor_gls: tumor_lod = max(tumor_gls[i] - tumor_gls[0] for i in range(1, len(tumor_gls))) else: tumor_lod = -1.0 # No GL information, no tumor call (so fail it) except IndexError: tumor_lod = -1.0 try: normal_gls = [float(x) for x in parts[indexes["normal"]].strip().split(":")[gl_index].split(",") if x != "."] if normal_gls: normal_lod = min(normal_gls[0] - normal_gls[i] for i in range(1, len(normal_gls))) else: normal_lod = normal_thresh # No GL inofmration, no normal call (so pass it) except IndexError: normal_lod = normal_thresh return normal_lod >= normal_thresh and tumor_lod >= tumor_thresh
python
def _check_lods(parts, tumor_thresh, normal_thresh, indexes): try: gl_index = parts[8].split(":").index("GL") except ValueError: return True try: tumor_gls = [float(x) for x in parts[indexes["tumor"]].strip().split(":")[gl_index].split(",") if x != "."] if tumor_gls: tumor_lod = max(tumor_gls[i] - tumor_gls[0] for i in range(1, len(tumor_gls))) else: tumor_lod = -1.0 # No GL information, no tumor call (so fail it) except IndexError: tumor_lod = -1.0 try: normal_gls = [float(x) for x in parts[indexes["normal"]].strip().split(":")[gl_index].split(",") if x != "."] if normal_gls: normal_lod = min(normal_gls[0] - normal_gls[i] for i in range(1, len(normal_gls))) else: normal_lod = normal_thresh # No GL inofmration, no normal call (so pass it) except IndexError: normal_lod = normal_thresh return normal_lod >= normal_thresh and tumor_lod >= tumor_thresh
[ "def", "_check_lods", "(", "parts", ",", "tumor_thresh", ",", "normal_thresh", ",", "indexes", ")", ":", "try", ":", "gl_index", "=", "parts", "[", "8", "]", ".", "split", "(", "\":\"", ")", ".", "index", "(", "\"GL\"", ")", "except", "ValueError", ":"...
Ensure likelihoods for tumor and normal pass thresholds. Skipped if no FreeBayes GL annotations available.
[ "Ensure", "likelihoods", "for", "tumor", "and", "normal", "pass", "thresholds", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L197-L224
236,909
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_check_freqs
def _check_freqs(parts, indexes): """Ensure frequency of tumor to normal passes a reasonable threshold. Avoids calling low frequency tumors also present at low frequency in normals, which indicates a contamination or persistent error. """ thresh_ratio = 2.7 try: # FreeBayes ao_index = parts[8].split(":").index("AO") ro_index = parts[8].split(":").index("RO") except ValueError: ao_index, ro_index = None, None try: # VarDict af_index = parts[8].split(":").index("AF") except ValueError: af_index = None if af_index is None and ao_index is None: # okay to skip if a gVCF record if parts[4].find("<*>") == -1: raise NotImplementedError("Unexpected format annotations: %s" % parts[8]) def _calc_freq(item): try: if ao_index is not None and ro_index is not None: ao = sum([int(x) for x in item.split(":")[ao_index].split(",")]) ro = int(item.split(":")[ro_index]) freq = ao / float(ao + ro) elif af_index is not None: freq = float(item.split(":")[af_index]) else: freq = 0.0 except (IndexError, ValueError, ZeroDivisionError): freq = 0.0 return freq tumor_freq, normal_freq = _calc_freq(parts[indexes["tumor"]]), _calc_freq(parts[indexes["normal"]]) return normal_freq <= 0.001 or normal_freq <= tumor_freq / thresh_ratio
python
def _check_freqs(parts, indexes): thresh_ratio = 2.7 try: # FreeBayes ao_index = parts[8].split(":").index("AO") ro_index = parts[8].split(":").index("RO") except ValueError: ao_index, ro_index = None, None try: # VarDict af_index = parts[8].split(":").index("AF") except ValueError: af_index = None if af_index is None and ao_index is None: # okay to skip if a gVCF record if parts[4].find("<*>") == -1: raise NotImplementedError("Unexpected format annotations: %s" % parts[8]) def _calc_freq(item): try: if ao_index is not None and ro_index is not None: ao = sum([int(x) for x in item.split(":")[ao_index].split(",")]) ro = int(item.split(":")[ro_index]) freq = ao / float(ao + ro) elif af_index is not None: freq = float(item.split(":")[af_index]) else: freq = 0.0 except (IndexError, ValueError, ZeroDivisionError): freq = 0.0 return freq tumor_freq, normal_freq = _calc_freq(parts[indexes["tumor"]]), _calc_freq(parts[indexes["normal"]]) return normal_freq <= 0.001 or normal_freq <= tumor_freq / thresh_ratio
[ "def", "_check_freqs", "(", "parts", ",", "indexes", ")", ":", "thresh_ratio", "=", "2.7", "try", ":", "# FreeBayes", "ao_index", "=", "parts", "[", "8", "]", ".", "split", "(", "\":\"", ")", ".", "index", "(", "\"AO\"", ")", "ro_index", "=", "parts", ...
Ensure frequency of tumor to normal passes a reasonable threshold. Avoids calling low frequency tumors also present at low frequency in normals, which indicates a contamination or persistent error.
[ "Ensure", "frequency", "of", "tumor", "to", "normal", "passes", "a", "reasonable", "threshold", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L226-L260
236,910
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
_clean_freebayes_output
def _clean_freebayes_output(line): """Clean FreeBayes output to make post-processing with GATK happy. XXX Not applied on recent versions which fix issues to be more compatible with bgzip output, but retained in case of need. - Remove lines from FreeBayes outputs where REF/ALT are identical: 2 22816178 . G G 0.0339196 or there are multiple duplicate alleles: 4 60594753 . TGAAA T,T - Remove Type=Int specifications which are not valid VCF and GATK chokes on. """ if line.startswith("#"): line = line.replace("Type=Int,D", "Type=Integer,D") return line else: parts = line.split("\t") alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()] if len(alleles) == len(set(alleles)): return line return None
python
def _clean_freebayes_output(line): if line.startswith("#"): line = line.replace("Type=Int,D", "Type=Integer,D") return line else: parts = line.split("\t") alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()] if len(alleles) == len(set(alleles)): return line return None
[ "def", "_clean_freebayes_output", "(", "line", ")", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "line", "=", "line", ".", "replace", "(", "\"Type=Int,D\"", ",", "\"Type=Integer,D\"", ")", "return", "line", "else", ":", "parts", "=", "line...
Clean FreeBayes output to make post-processing with GATK happy. XXX Not applied on recent versions which fix issues to be more compatible with bgzip output, but retained in case of need. - Remove lines from FreeBayes outputs where REF/ALT are identical: 2 22816178 . G G 0.0339196 or there are multiple duplicate alleles: 4 60594753 . TGAAA T,T - Remove Type=Int specifications which are not valid VCF and GATK chokes on.
[ "Clean", "FreeBayes", "output", "to", "make", "post", "-", "processing", "with", "GATK", "happy", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L332-L353
236,911
bcbio/bcbio-nextgen
bcbio/variation/freebayes.py
clean_vcf_output
def clean_vcf_output(orig_file, clean_fn, config, name="clean"): """Provide framework to clean a file in-place, with the specified clean function. """ base, ext = utils.splitext_plus(orig_file) out_file = "{0}-{1}{2}".format(base, name, ext) if not utils.file_exists(out_file): with open(orig_file) as in_handle: with file_transaction(config, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for line in in_handle: update_line = clean_fn(line) if update_line: out_handle.write(update_line) move_vcf(orig_file, "{0}.orig".format(orig_file)) move_vcf(out_file, orig_file) with open(out_file, "w") as out_handle: out_handle.write("Moved to {0}".format(orig_file))
python
def clean_vcf_output(orig_file, clean_fn, config, name="clean"): base, ext = utils.splitext_plus(orig_file) out_file = "{0}-{1}{2}".format(base, name, ext) if not utils.file_exists(out_file): with open(orig_file) as in_handle: with file_transaction(config, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for line in in_handle: update_line = clean_fn(line) if update_line: out_handle.write(update_line) move_vcf(orig_file, "{0}.orig".format(orig_file)) move_vcf(out_file, orig_file) with open(out_file, "w") as out_handle: out_handle.write("Moved to {0}".format(orig_file))
[ "def", "clean_vcf_output", "(", "orig_file", ",", "clean_fn", ",", "config", ",", "name", "=", "\"clean\"", ")", ":", "base", ",", "ext", "=", "utils", ".", "splitext_plus", "(", "orig_file", ")", "out_file", "=", "\"{0}-{1}{2}\"", ".", "format", "(", "bas...
Provide framework to clean a file in-place, with the specified clean function.
[ "Provide", "framework", "to", "clean", "a", "file", "in", "-", "place", "with", "the", "specified", "clean", "function", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/freebayes.py#L355-L372
236,912
bcbio/bcbio-nextgen
bcbio/variation/effects.py
get_type
def get_type(data): """Retrieve the type of effects calculation to do. """ if data["analysis"].lower().startswith("var") or dd.get_variantcaller(data): return tz.get_in(("config", "algorithm", "effects"), data, "snpeff")
python
def get_type(data): if data["analysis"].lower().startswith("var") or dd.get_variantcaller(data): return tz.get_in(("config", "algorithm", "effects"), data, "snpeff")
[ "def", "get_type", "(", "data", ")", ":", "if", "data", "[", "\"analysis\"", "]", ".", "lower", "(", ")", ".", "startswith", "(", "\"var\"", ")", "or", "dd", ".", "get_variantcaller", "(", "data", ")", ":", "return", "tz", ".", "get_in", "(", "(", ...
Retrieve the type of effects calculation to do.
[ "Retrieve", "the", "type", "of", "effects", "calculation", "to", "do", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L46-L50
236,913
bcbio/bcbio-nextgen
bcbio/variation/effects.py
prep_vep_cache
def prep_vep_cache(dbkey, ref_file, tooldir=None, config=None): """Ensure correct installation of VEP cache file. """ if config is None: config = {} resource_file = os.path.join(os.path.dirname(ref_file), "%s-resources.yaml" % dbkey) if os.path.exists(resource_file): with open(resource_file) as in_handle: resources = yaml.safe_load(in_handle) ensembl_name = tz.get_in(["aliases", "ensembl"], resources) symlink_dir = _special_dbkey_maps(dbkey, ref_file) if ensembl_name and ensembl_name.find("_vep_") == -1: raise ValueError("%s has ensembl an incorrect value." "It should have _vep_ in the name." "Remove line or fix the name to avoid error.") if symlink_dir and ensembl_name: species, vepv = ensembl_name.split("_vep_") return symlink_dir, species elif ensembl_name: species, vepv = ensembl_name.split("_vep_") vep_dir = utils.safe_makedir(os.path.normpath(os.path.join( os.path.dirname(os.path.dirname(ref_file)), "vep"))) out_dir = os.path.join(vep_dir, species, vepv) if not os.path.exists(out_dir): tmp_dir = utils.safe_makedir(os.path.join(vep_dir, species, "txtmp")) eversion = vepv.split("_")[0] url = "http://ftp.ensembl.org/pub/release-%s/variation/VEP/%s.tar.gz" % (eversion, ensembl_name) with utils.chdir(tmp_dir): subprocess.check_call(["wget", "--no-check-certificate", "-c", url]) vep_path = "%s/bin/" % tooldir if tooldir else "" perl_exports = utils.get_perl_exports() cmd = ["%svep_install" % vep_path, "-a", "c", "-s", ensembl_name, "-c", vep_dir, "-u", tmp_dir, "--NO_UPDATE", "--VERSION", eversion] do.run("%s && %s" % (perl_exports, " ".join(cmd)), "Prepare VEP directory for %s" % ensembl_name) cmd = ["%svep_convert_cache" % vep_path, "--species", species, "--version", vepv, "--dir", vep_dir, "--force_overwrite", "--remove"] do.run("%s && %s" % (perl_exports, " ".join(cmd)), "Convert VEP cache to tabix %s" % ensembl_name) for tmp_fname in os.listdir(tmp_dir): os.remove(os.path.join(tmp_dir, tmp_fname)) os.rmdir(tmp_dir) tmp_dir = os.path.join(vep_dir, "tmp") if os.path.exists(tmp_dir): shutil.rmtree(tmp_dir) return vep_dir, species return None, None
python
def prep_vep_cache(dbkey, ref_file, tooldir=None, config=None): if config is None: config = {} resource_file = os.path.join(os.path.dirname(ref_file), "%s-resources.yaml" % dbkey) if os.path.exists(resource_file): with open(resource_file) as in_handle: resources = yaml.safe_load(in_handle) ensembl_name = tz.get_in(["aliases", "ensembl"], resources) symlink_dir = _special_dbkey_maps(dbkey, ref_file) if ensembl_name and ensembl_name.find("_vep_") == -1: raise ValueError("%s has ensembl an incorrect value." "It should have _vep_ in the name." "Remove line or fix the name to avoid error.") if symlink_dir and ensembl_name: species, vepv = ensembl_name.split("_vep_") return symlink_dir, species elif ensembl_name: species, vepv = ensembl_name.split("_vep_") vep_dir = utils.safe_makedir(os.path.normpath(os.path.join( os.path.dirname(os.path.dirname(ref_file)), "vep"))) out_dir = os.path.join(vep_dir, species, vepv) if not os.path.exists(out_dir): tmp_dir = utils.safe_makedir(os.path.join(vep_dir, species, "txtmp")) eversion = vepv.split("_")[0] url = "http://ftp.ensembl.org/pub/release-%s/variation/VEP/%s.tar.gz" % (eversion, ensembl_name) with utils.chdir(tmp_dir): subprocess.check_call(["wget", "--no-check-certificate", "-c", url]) vep_path = "%s/bin/" % tooldir if tooldir else "" perl_exports = utils.get_perl_exports() cmd = ["%svep_install" % vep_path, "-a", "c", "-s", ensembl_name, "-c", vep_dir, "-u", tmp_dir, "--NO_UPDATE", "--VERSION", eversion] do.run("%s && %s" % (perl_exports, " ".join(cmd)), "Prepare VEP directory for %s" % ensembl_name) cmd = ["%svep_convert_cache" % vep_path, "--species", species, "--version", vepv, "--dir", vep_dir, "--force_overwrite", "--remove"] do.run("%s && %s" % (perl_exports, " ".join(cmd)), "Convert VEP cache to tabix %s" % ensembl_name) for tmp_fname in os.listdir(tmp_dir): os.remove(os.path.join(tmp_dir, tmp_fname)) os.rmdir(tmp_dir) tmp_dir = os.path.join(vep_dir, "tmp") if os.path.exists(tmp_dir): shutil.rmtree(tmp_dir) return vep_dir, species return None, None
[ "def", "prep_vep_cache", "(", "dbkey", ",", "ref_file", ",", "tooldir", "=", "None", ",", "config", "=", "None", ")", ":", "if", "config", "is", "None", ":", "config", "=", "{", "}", "resource_file", "=", "os", ".", "path", ".", "join", "(", "os", ...
Ensure correct installation of VEP cache file.
[ "Ensure", "correct", "installation", "of", "VEP", "cache", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L74-L117
236,914
bcbio/bcbio-nextgen
bcbio/variation/effects.py
_get_G2P
def _get_G2P(data): """ A VEP plugin that uses G2P allelic requirements to assess variants in genes for potential phenotype involvement. """ G2P_file = os.path.realpath(tz.get_in(("genome_resources", "variation", "genotype2phenotype"), data)) args = ["--plugin", "G2P,file:%s" % (G2P_file)] if G2P_file: return args else: return []
python
def _get_G2P(data): G2P_file = os.path.realpath(tz.get_in(("genome_resources", "variation", "genotype2phenotype"), data)) args = ["--plugin", "G2P,file:%s" % (G2P_file)] if G2P_file: return args else: return []
[ "def", "_get_G2P", "(", "data", ")", ":", "G2P_file", "=", "os", ".", "path", ".", "realpath", "(", "tz", ".", "get_in", "(", "(", "\"genome_resources\"", ",", "\"variation\"", ",", "\"genotype2phenotype\"", ")", ",", "data", ")", ")", "args", "=", "[", ...
A VEP plugin that uses G2P allelic requirements to assess variants in genes for potential phenotype involvement.
[ "A", "VEP", "plugin", "that", "uses", "G2P", "allelic", "requirements", "to", "assess", "variants", "in", "genes", "for", "potential", "phenotype", "involvement", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L234-L244
236,915
bcbio/bcbio-nextgen
bcbio/variation/effects.py
_snpeff_args_from_config
def _snpeff_args_from_config(data): """Retrieve snpEff arguments supplied through input configuration. """ config = data["config"] args = ["-hgvs"] # General supplied arguments resources = config_utils.get_resources("snpeff", config) if resources.get("options"): args += [str(x) for x in resources.get("options", [])] # cancer specific calling arguments if vcfutils.get_paired_phenotype(data): args += ["-cancer"] effects_transcripts = dd.get_effects_transcripts(data) if effects_transcripts in set(["canonical_cancer"]): _, snpeff_base_dir = get_db(data) canon_list_file = os.path.join(snpeff_base_dir, "transcripts", "%s.txt" % effects_transcripts) if not utils.file_exists(canon_list_file): raise ValueError("Cannot find expected file for effects_transcripts: %s" % canon_list_file) args += ["-canonList", canon_list_file] elif effects_transcripts == "canonical" or tz.get_in(("config", "algorithm", "clinical_reporting"), data): args += ["-canon"] return args
python
def _snpeff_args_from_config(data): config = data["config"] args = ["-hgvs"] # General supplied arguments resources = config_utils.get_resources("snpeff", config) if resources.get("options"): args += [str(x) for x in resources.get("options", [])] # cancer specific calling arguments if vcfutils.get_paired_phenotype(data): args += ["-cancer"] effects_transcripts = dd.get_effects_transcripts(data) if effects_transcripts in set(["canonical_cancer"]): _, snpeff_base_dir = get_db(data) canon_list_file = os.path.join(snpeff_base_dir, "transcripts", "%s.txt" % effects_transcripts) if not utils.file_exists(canon_list_file): raise ValueError("Cannot find expected file for effects_transcripts: %s" % canon_list_file) args += ["-canonList", canon_list_file] elif effects_transcripts == "canonical" or tz.get_in(("config", "algorithm", "clinical_reporting"), data): args += ["-canon"] return args
[ "def", "_snpeff_args_from_config", "(", "data", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "args", "=", "[", "\"-hgvs\"", "]", "# General supplied arguments", "resources", "=", "config_utils", ".", "get_resources", "(", "\"snpeff\"", ",", "config", ...
Retrieve snpEff arguments supplied through input configuration.
[ "Retrieve", "snpEff", "arguments", "supplied", "through", "input", "configuration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L266-L288
236,916
bcbio/bcbio-nextgen
bcbio/variation/effects.py
get_db
def get_db(data): """Retrieve a snpEff database name and location relative to reference file. """ snpeff_db = utils.get_in(data, ("genome_resources", "aliases", "snpeff")) snpeff_base_dir = None if snpeff_db: snpeff_base_dir = utils.get_in(data, ("reference", "snpeff")) if not (isinstance(snpeff_base_dir, six.string_types) and os.path.isdir(snpeff_base_dir)): snpeff_base_dir = utils.get_in(data, ("reference", "snpeff", snpeff_db)) if not snpeff_base_dir: # We need to mask '.' characters for CWL/WDL processing, check for them here snpeff_base_dir = utils.get_in(data, ("reference", "snpeff", snpeff_db.replace(".", "_"))) if snpeff_base_dir: snpeff_db = snpeff_db.replace("_", ".") if isinstance(snpeff_base_dir, dict) and snpeff_base_dir.get("base"): snpeff_base_dir = snpeff_base_dir["base"] if (snpeff_base_dir and isinstance(snpeff_base_dir, six.string_types) and os.path.isfile(snpeff_base_dir)): snpeff_base_dir = os.path.dirname(snpeff_base_dir) if (snpeff_base_dir and isinstance(snpeff_base_dir, six.string_types) and snpeff_base_dir.endswith("%s%s" % (os.path.sep, snpeff_db))): snpeff_base_dir = os.path.dirname(snpeff_base_dir) if not snpeff_base_dir: ref_file = utils.get_in(data, ("reference", "fasta", "base")) snpeff_base_dir = utils.safe_makedir(os.path.normpath(os.path.join( os.path.dirname(os.path.dirname(ref_file)), "snpeff"))) # back compatible retrieval of genome from installation directory if "config" in data and not os.path.exists(os.path.join(snpeff_base_dir, snpeff_db)): snpeff_base_dir, snpeff_db = _installed_snpeff_genome(snpeff_db, data["config"]) if snpeff_base_dir.endswith("/%s" % snpeff_db): snpeff_base_dir = os.path.dirname(snpeff_base_dir) return snpeff_db, snpeff_base_dir
python
def get_db(data): snpeff_db = utils.get_in(data, ("genome_resources", "aliases", "snpeff")) snpeff_base_dir = None if snpeff_db: snpeff_base_dir = utils.get_in(data, ("reference", "snpeff")) if not (isinstance(snpeff_base_dir, six.string_types) and os.path.isdir(snpeff_base_dir)): snpeff_base_dir = utils.get_in(data, ("reference", "snpeff", snpeff_db)) if not snpeff_base_dir: # We need to mask '.' characters for CWL/WDL processing, check for them here snpeff_base_dir = utils.get_in(data, ("reference", "snpeff", snpeff_db.replace(".", "_"))) if snpeff_base_dir: snpeff_db = snpeff_db.replace("_", ".") if isinstance(snpeff_base_dir, dict) and snpeff_base_dir.get("base"): snpeff_base_dir = snpeff_base_dir["base"] if (snpeff_base_dir and isinstance(snpeff_base_dir, six.string_types) and os.path.isfile(snpeff_base_dir)): snpeff_base_dir = os.path.dirname(snpeff_base_dir) if (snpeff_base_dir and isinstance(snpeff_base_dir, six.string_types) and snpeff_base_dir.endswith("%s%s" % (os.path.sep, snpeff_db))): snpeff_base_dir = os.path.dirname(snpeff_base_dir) if not snpeff_base_dir: ref_file = utils.get_in(data, ("reference", "fasta", "base")) snpeff_base_dir = utils.safe_makedir(os.path.normpath(os.path.join( os.path.dirname(os.path.dirname(ref_file)), "snpeff"))) # back compatible retrieval of genome from installation directory if "config" in data and not os.path.exists(os.path.join(snpeff_base_dir, snpeff_db)): snpeff_base_dir, snpeff_db = _installed_snpeff_genome(snpeff_db, data["config"]) if snpeff_base_dir.endswith("/%s" % snpeff_db): snpeff_base_dir = os.path.dirname(snpeff_base_dir) return snpeff_db, snpeff_base_dir
[ "def", "get_db", "(", "data", ")", ":", "snpeff_db", "=", "utils", ".", "get_in", "(", "data", ",", "(", "\"genome_resources\"", ",", "\"aliases\"", ",", "\"snpeff\"", ")", ")", "snpeff_base_dir", "=", "None", "if", "snpeff_db", ":", "snpeff_base_dir", "=", ...
Retrieve a snpEff database name and location relative to reference file.
[ "Retrieve", "a", "snpEff", "database", "name", "and", "location", "relative", "to", "reference", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L290-L320
236,917
bcbio/bcbio-nextgen
bcbio/variation/effects.py
_get_snpeff_cmd
def _get_snpeff_cmd(cmd_name, datadir, data, out_file): """Retrieve snpEff base command line. """ resources = config_utils.get_resources("snpeff", data["config"]) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3g"]) # scale by cores, defaulting to 2x base usage to ensure we have enough memory # for single core runs to use with human genomes. # Sets a maximum amount of memory to avoid core dumps exceeding 32Gb # We shouldn't need that much memory for snpEff, so avoid issues # https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html#compressed_oops jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": {"direction": "increase", "maximum": "30000M", "magnitude": max(2, dd.get_cores(data))}}}) memory = " ".join(jvm_opts) snpeff = config_utils.get_program("snpEff", data["config"]) java_args = "-Djava.io.tmpdir=%s" % utils.safe_makedir(os.path.join(os.path.dirname(out_file), "tmp")) export = "unset JAVA_HOME && export PATH=%s:\"$PATH\" && " % (utils.get_java_binpath()) cmd = "{export} {snpeff} {memory} {java_args} {cmd_name} -dataDir {datadir}" return cmd.format(**locals())
python
def _get_snpeff_cmd(cmd_name, datadir, data, out_file): resources = config_utils.get_resources("snpeff", data["config"]) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3g"]) # scale by cores, defaulting to 2x base usage to ensure we have enough memory # for single core runs to use with human genomes. # Sets a maximum amount of memory to avoid core dumps exceeding 32Gb # We shouldn't need that much memory for snpEff, so avoid issues # https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html#compressed_oops jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": {"direction": "increase", "maximum": "30000M", "magnitude": max(2, dd.get_cores(data))}}}) memory = " ".join(jvm_opts) snpeff = config_utils.get_program("snpEff", data["config"]) java_args = "-Djava.io.tmpdir=%s" % utils.safe_makedir(os.path.join(os.path.dirname(out_file), "tmp")) export = "unset JAVA_HOME && export PATH=%s:\"$PATH\" && " % (utils.get_java_binpath()) cmd = "{export} {snpeff} {memory} {java_args} {cmd_name} -dataDir {datadir}" return cmd.format(**locals())
[ "def", "_get_snpeff_cmd", "(", "cmd_name", ",", "datadir", ",", "data", ",", "out_file", ")", ":", "resources", "=", "config_utils", ".", "get_resources", "(", "\"snpeff\"", ",", "data", "[", "\"config\"", "]", ")", "jvm_opts", "=", "resources", ".", "get", ...
Retrieve snpEff base command line.
[ "Retrieve", "snpEff", "base", "command", "line", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L339-L358
236,918
bcbio/bcbio-nextgen
bcbio/variation/effects.py
_run_snpeff
def _run_snpeff(snp_in, out_format, data): """Run effects prediction with snpEff, skipping if snpEff database not present. """ snpeff_db, datadir = get_db(data) if not snpeff_db: return None, None assert os.path.exists(os.path.join(datadir, snpeff_db)), \ "Did not find %s snpEff genome data in %s" % (snpeff_db, datadir) ext = utils.splitext_plus(snp_in)[1] if out_format == "vcf" else ".tsv" out_file = "%s-effects%s" % (utils.splitext_plus(snp_in)[0], ext) stats_file = "%s-stats.html" % utils.splitext_plus(out_file)[0] csv_file = "%s-stats.csv" % utils.splitext_plus(out_file)[0] if not utils.file_exists(out_file): config_args = " ".join(_snpeff_args_from_config(data)) if ext.endswith(".gz"): bgzip_cmd = "| %s -c" % tools.get_bgzip_cmd(data["config"]) else: bgzip_cmd = "" with file_transaction(data, out_file) as tx_out_file: snpeff_cmd = _get_snpeff_cmd("eff", datadir, data, tx_out_file) cmd = ("{snpeff_cmd} {config_args} -noLog -i vcf -o {out_format} " "-csvStats {csv_file} -s {stats_file} {snpeff_db} {snp_in} {bgzip_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "snpEff effects", data) if ext.endswith(".gz"): out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file, [stats_file, csv_file]
python
def _run_snpeff(snp_in, out_format, data): snpeff_db, datadir = get_db(data) if not snpeff_db: return None, None assert os.path.exists(os.path.join(datadir, snpeff_db)), \ "Did not find %s snpEff genome data in %s" % (snpeff_db, datadir) ext = utils.splitext_plus(snp_in)[1] if out_format == "vcf" else ".tsv" out_file = "%s-effects%s" % (utils.splitext_plus(snp_in)[0], ext) stats_file = "%s-stats.html" % utils.splitext_plus(out_file)[0] csv_file = "%s-stats.csv" % utils.splitext_plus(out_file)[0] if not utils.file_exists(out_file): config_args = " ".join(_snpeff_args_from_config(data)) if ext.endswith(".gz"): bgzip_cmd = "| %s -c" % tools.get_bgzip_cmd(data["config"]) else: bgzip_cmd = "" with file_transaction(data, out_file) as tx_out_file: snpeff_cmd = _get_snpeff_cmd("eff", datadir, data, tx_out_file) cmd = ("{snpeff_cmd} {config_args} -noLog -i vcf -o {out_format} " "-csvStats {csv_file} -s {stats_file} {snpeff_db} {snp_in} {bgzip_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "snpEff effects", data) if ext.endswith(".gz"): out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file, [stats_file, csv_file]
[ "def", "_run_snpeff", "(", "snp_in", ",", "out_format", ",", "data", ")", ":", "snpeff_db", ",", "datadir", "=", "get_db", "(", "data", ")", "if", "not", "snpeff_db", ":", "return", "None", ",", "None", "assert", "os", ".", "path", ".", "exists", "(", ...
Run effects prediction with snpEff, skipping if snpEff database not present.
[ "Run", "effects", "prediction", "with", "snpEff", "skipping", "if", "snpEff", "database", "not", "present", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L360-L386
236,919
bcbio/bcbio-nextgen
bcbio/variation/effects.py
_installed_snpeff_genome
def _installed_snpeff_genome(base_name, config): """Find the most recent installed genome for snpEff with the given name. """ snpeff_config_file = os.path.join(config_utils.get_program("snpeff", config, "dir"), "snpEff.config") if os.path.exists(snpeff_config_file): data_dir = _find_snpeff_datadir(snpeff_config_file) dbs = [d for d in sorted(glob.glob(os.path.join(data_dir, "%s*" % base_name)), reverse=True) if os.path.isdir(d)] else: data_dir = None dbs = [] if len(dbs) == 0: raise ValueError("No database found in %s for %s" % (data_dir, base_name)) else: return data_dir, os.path.split(dbs[0])[-1]
python
def _installed_snpeff_genome(base_name, config): snpeff_config_file = os.path.join(config_utils.get_program("snpeff", config, "dir"), "snpEff.config") if os.path.exists(snpeff_config_file): data_dir = _find_snpeff_datadir(snpeff_config_file) dbs = [d for d in sorted(glob.glob(os.path.join(data_dir, "%s*" % base_name)), reverse=True) if os.path.isdir(d)] else: data_dir = None dbs = [] if len(dbs) == 0: raise ValueError("No database found in %s for %s" % (data_dir, base_name)) else: return data_dir, os.path.split(dbs[0])[-1]
[ "def", "_installed_snpeff_genome", "(", "base_name", ",", "config", ")", ":", "snpeff_config_file", "=", "os", ".", "path", ".", "join", "(", "config_utils", ".", "get_program", "(", "\"snpeff\"", ",", "config", ",", "\"dir\"", ")", ",", "\"snpEff.config\"", "...
Find the most recent installed genome for snpEff with the given name.
[ "Find", "the", "most", "recent", "installed", "genome", "for", "snpEff", "with", "the", "given", "name", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L400-L415
236,920
bcbio/bcbio-nextgen
bcbio/ngsalign/minimap2.py
remap_index_fn
def remap_index_fn(ref_file): """minimap2 can build indexes on the fly but will also store commons ones. """ index_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "minimap2") if os.path.exists(index_dir) and os.path.isdir(index_dir): return index_dir else: return os.path.dirname(ref_file)
python
def remap_index_fn(ref_file): index_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "minimap2") if os.path.exists(index_dir) and os.path.isdir(index_dir): return index_dir else: return os.path.dirname(ref_file)
[ "def", "remap_index_fn", "(", "ref_file", ")", ":", "index_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "ref_file", ")", ",", "os", ".", "pardir", ",", "\"minimap2\"", ")", "if", "os", ".", "path", ".", "e...
minimap2 can build indexes on the fly but will also store commons ones.
[ "minimap2", "can", "build", "indexes", "on", "the", "fly", "but", "will", "also", "store", "commons", "ones", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/minimap2.py#L44-L51
236,921
bcbio/bcbio-nextgen
scripts/bcbio_prepare_samples.py
create_new_csv
def create_new_csv(samples, args): """create csv file that can be use with bcbio -w template""" out_fn = os.path.splitext(args.csv)[0] + "-merged.csv" logger.info("Preparing new csv: %s" % out_fn) with file_transaction(out_fn) as tx_out: with open(tx_out, 'w') as handle: handle.write(_header(args.csv)) for s in samples: sample_name = s['name'] if isinstance(s['out_file'], list) else os.path.basename(s['out_file']) handle.write("%s,%s,%s\n" % (sample_name, s['name'], ",".join(s['anno'])))
python
def create_new_csv(samples, args): out_fn = os.path.splitext(args.csv)[0] + "-merged.csv" logger.info("Preparing new csv: %s" % out_fn) with file_transaction(out_fn) as tx_out: with open(tx_out, 'w') as handle: handle.write(_header(args.csv)) for s in samples: sample_name = s['name'] if isinstance(s['out_file'], list) else os.path.basename(s['out_file']) handle.write("%s,%s,%s\n" % (sample_name, s['name'], ",".join(s['anno'])))
[ "def", "create_new_csv", "(", "samples", ",", "args", ")", ":", "out_fn", "=", "os", ".", "path", ".", "splitext", "(", "args", ".", "csv", ")", "[", "0", "]", "+", "\"-merged.csv\"", "logger", ".", "info", "(", "\"Preparing new csv: %s\"", "%", "out_fn"...
create csv file that can be use with bcbio -w template
[ "create", "csv", "file", "that", "can", "be", "use", "with", "bcbio", "-", "w", "template" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_prepare_samples.py#L23-L32
236,922
bcbio/bcbio-nextgen
scripts/bcbio_prepare_samples.py
_get_samples_to_process
def _get_samples_to_process(fn, out_dir, config, force_single, separators): """parse csv file with one line per file. It will merge all files that have the same description name""" out_dir = os.path.abspath(out_dir) samples = defaultdict(list) with open(fn) as handle: for l in handle: if l.find("description") > 0: logger.info("Skipping header.") continue cols = l.strip().split(",") if len(cols) > 0: if len(cols) < 2: raise ValueError("Line needs 2 values: file and name.") if utils.file_exists(cols[0]) or is_gsm(cols[0]) or is_srr(cols[0]): if cols[0].find(" ") > -1: new_name = os.path.abspath(cols[0].replace(" ", "_")) logger.warning("Space finds in %s. Linked to %s." % (cols[0], new_name)) logger.warning("Please, avoid names with spaces in the future.") utils.symlink_plus(os.path.abspath(cols[0]), new_name) cols[0] = new_name samples[cols[1]].append(cols) else: logger.info("skipping %s, File doesn't exist." % cols[0]) for sample, items in samples.items(): if is_fastq(items[0][0], True): fn = "fq_merge" ext = ".fastq.gz" elif is_bam(items[0][0]): fn = "bam_merge" ext = ".bam" elif is_gsm(items[0][0]): fn = "query_gsm" ext = ".fastq.gz" elif is_srr(items[0][0]): fn = "query_srr" ext = ".fastq.gz" files = [os.path.abspath(fn_file[0]) if utils.file_exists(fn_file[0]) else fn_file[0] for fn_file in items] samples[sample] = [{'files': _check_paired(files, force_single, separators), 'out_file': os.path.join(out_dir, sample + ext), 'fn': fn, 'anno': items[0][2:], 'config': config, 'name': sample, 'out_dir': out_dir}] return [samples[sample] for sample in samples]
python
def _get_samples_to_process(fn, out_dir, config, force_single, separators): out_dir = os.path.abspath(out_dir) samples = defaultdict(list) with open(fn) as handle: for l in handle: if l.find("description") > 0: logger.info("Skipping header.") continue cols = l.strip().split(",") if len(cols) > 0: if len(cols) < 2: raise ValueError("Line needs 2 values: file and name.") if utils.file_exists(cols[0]) or is_gsm(cols[0]) or is_srr(cols[0]): if cols[0].find(" ") > -1: new_name = os.path.abspath(cols[0].replace(" ", "_")) logger.warning("Space finds in %s. Linked to %s." % (cols[0], new_name)) logger.warning("Please, avoid names with spaces in the future.") utils.symlink_plus(os.path.abspath(cols[0]), new_name) cols[0] = new_name samples[cols[1]].append(cols) else: logger.info("skipping %s, File doesn't exist." % cols[0]) for sample, items in samples.items(): if is_fastq(items[0][0], True): fn = "fq_merge" ext = ".fastq.gz" elif is_bam(items[0][0]): fn = "bam_merge" ext = ".bam" elif is_gsm(items[0][0]): fn = "query_gsm" ext = ".fastq.gz" elif is_srr(items[0][0]): fn = "query_srr" ext = ".fastq.gz" files = [os.path.abspath(fn_file[0]) if utils.file_exists(fn_file[0]) else fn_file[0] for fn_file in items] samples[sample] = [{'files': _check_paired(files, force_single, separators), 'out_file': os.path.join(out_dir, sample + ext), 'fn': fn, 'anno': items[0][2:], 'config': config, 'name': sample, 'out_dir': out_dir}] return [samples[sample] for sample in samples]
[ "def", "_get_samples_to_process", "(", "fn", ",", "out_dir", ",", "config", ",", "force_single", ",", "separators", ")", ":", "out_dir", "=", "os", ".", "path", ".", "abspath", "(", "out_dir", ")", "samples", "=", "defaultdict", "(", "list", ")", "with", ...
parse csv file with one line per file. It will merge all files that have the same description name
[ "parse", "csv", "file", "with", "one", "line", "per", "file", ".", "It", "will", "merge", "all", "files", "that", "have", "the", "same", "description", "name" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_prepare_samples.py#L41-L83
236,923
bcbio/bcbio-nextgen
scripts/bcbio_prepare_samples.py
_check_stems
def _check_stems(files): """check if stem names are the same and use full path then""" used = set() for fn in files: if os.path.basename(fn) in used: logger.warning("%s stem is multiple times in your file list, " "so we don't know " "how to assign it to the sample data in the CSV. " "We are gonna use full path to make a difference, " "that means paired files should be in the same folder. " "If this is a problem, you should rename the files you want " "to merge. Sorry, no possible magic here." % os.path.basename(fn) ) return True used.add(os.path.basename(fn)) return False
python
def _check_stems(files): used = set() for fn in files: if os.path.basename(fn) in used: logger.warning("%s stem is multiple times in your file list, " "so we don't know " "how to assign it to the sample data in the CSV. " "We are gonna use full path to make a difference, " "that means paired files should be in the same folder. " "If this is a problem, you should rename the files you want " "to merge. Sorry, no possible magic here." % os.path.basename(fn) ) return True used.add(os.path.basename(fn)) return False
[ "def", "_check_stems", "(", "files", ")", ":", "used", "=", "set", "(", ")", "for", "fn", "in", "files", ":", "if", "os", ".", "path", ".", "basename", "(", "fn", ")", "in", "used", ":", "logger", ".", "warning", "(", "\"%s stem is multiple times in yo...
check if stem names are the same and use full path then
[ "check", "if", "stem", "names", "are", "the", "same", "and", "use", "full", "path", "then" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_prepare_samples.py#L86-L101
236,924
bcbio/bcbio-nextgen
scripts/bcbio_prepare_samples.py
get_cluster_view
def get_cluster_view(p): """get ipython running""" from cluster_helper import cluster as ipc return ipc.cluster_view(p['scheduler'], p['queue'], p['num_jobs'], p['cores_per_job'], start_wait=p['timeout'], extra_params={"resources": p['resources'], "mem": p['mem'], "tag": p['tag'], "run_local": False})
python
def get_cluster_view(p): from cluster_helper import cluster as ipc return ipc.cluster_view(p['scheduler'], p['queue'], p['num_jobs'], p['cores_per_job'], start_wait=p['timeout'], extra_params={"resources": p['resources'], "mem": p['mem'], "tag": p['tag'], "run_local": False})
[ "def", "get_cluster_view", "(", "p", ")", ":", "from", "cluster_helper", "import", "cluster", "as", "ipc", "return", "ipc", ".", "cluster_view", "(", "p", "[", "'scheduler'", "]", ",", "p", "[", "'queue'", "]", ",", "p", "[", "'num_jobs'", "]", ",", "p...
get ipython running
[ "get", "ipython", "running" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_prepare_samples.py#L114-L117
236,925
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
from_sample
def from_sample(sample): """Upload results of processing from an analysis pipeline sample. """ upload_config = sample.get("upload") if upload_config: approach = _approaches[upload_config.get("method", "filesystem")] for finfo in _get_files(sample): approach.update_file(finfo, sample, upload_config) return [[sample]]
python
def from_sample(sample): upload_config = sample.get("upload") if upload_config: approach = _approaches[upload_config.get("method", "filesystem")] for finfo in _get_files(sample): approach.update_file(finfo, sample, upload_config) return [[sample]]
[ "def", "from_sample", "(", "sample", ")", ":", "upload_config", "=", "sample", ".", "get", "(", "\"upload\"", ")", "if", "upload_config", ":", "approach", "=", "_approaches", "[", "upload_config", ".", "get", "(", "\"method\"", ",", "\"filesystem\"", ")", "]...
Upload results of processing from an analysis pipeline sample.
[ "Upload", "results", "of", "processing", "from", "an", "analysis", "pipeline", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L29-L37
236,926
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_get_files
def _get_files(sample): """Retrieve files for the sample, dispatching by analysis type. Each file is a dictionary containing the path plus associated metadata about the file and pipeline versions. """ analysis = sample.get("analysis") if analysis.lower() in ["variant", "snp calling", "variant2", "standard"]: return _get_files_variantcall(sample) elif analysis.lower() in ["rna-seq", "fastrna-seq"]: return _get_files_rnaseq(sample) elif analysis.lower() in ["smallrna-seq"]: return _get_files_srnaseq(sample) elif analysis.lower() in ["chip-seq"]: return _get_files_chipseq(sample) elif analysis.lower() in ["scrna-seq"]: return _get_files_scrnaseq(sample) else: return []
python
def _get_files(sample): analysis = sample.get("analysis") if analysis.lower() in ["variant", "snp calling", "variant2", "standard"]: return _get_files_variantcall(sample) elif analysis.lower() in ["rna-seq", "fastrna-seq"]: return _get_files_rnaseq(sample) elif analysis.lower() in ["smallrna-seq"]: return _get_files_srnaseq(sample) elif analysis.lower() in ["chip-seq"]: return _get_files_chipseq(sample) elif analysis.lower() in ["scrna-seq"]: return _get_files_scrnaseq(sample) else: return []
[ "def", "_get_files", "(", "sample", ")", ":", "analysis", "=", "sample", ".", "get", "(", "\"analysis\"", ")", "if", "analysis", ".", "lower", "(", ")", "in", "[", "\"variant\"", ",", "\"snp calling\"", ",", "\"variant2\"", ",", "\"standard\"", "]", ":", ...
Retrieve files for the sample, dispatching by analysis type. Each file is a dictionary containing the path plus associated metadata about the file and pipeline versions.
[ "Retrieve", "files", "for", "the", "sample", "dispatching", "by", "analysis", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L56-L74
236,927
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_add_meta
def _add_meta(xs, sample=None, config=None): """Add top level information about the sample or flowcell to output. Sorts outputs into sample names (sample input) and project (config input). """ out = [] for x in xs: if not isinstance(x["path"], six.string_types) or not os.path.exists(x["path"]): raise ValueError("Unexpected path for upload: %s" % x) x["mtime"] = shared.get_file_timestamp(x["path"]) if sample: sample_name = dd.get_sample_name(sample) if "sample" not in x: x["sample"] = sample_name elif x["sample"] != sample_name: x["run"] = sample_name if config: fc_name = config.get("fc_name") or "project" fc_date = config.get("fc_date") or datetime.datetime.now().strftime("%Y-%m-%d") x["run"] = "%s_%s" % (fc_date, fc_name) out.append(x) return out
python
def _add_meta(xs, sample=None, config=None): out = [] for x in xs: if not isinstance(x["path"], six.string_types) or not os.path.exists(x["path"]): raise ValueError("Unexpected path for upload: %s" % x) x["mtime"] = shared.get_file_timestamp(x["path"]) if sample: sample_name = dd.get_sample_name(sample) if "sample" not in x: x["sample"] = sample_name elif x["sample"] != sample_name: x["run"] = sample_name if config: fc_name = config.get("fc_name") or "project" fc_date = config.get("fc_date") or datetime.datetime.now().strftime("%Y-%m-%d") x["run"] = "%s_%s" % (fc_date, fc_name) out.append(x) return out
[ "def", "_add_meta", "(", "xs", ",", "sample", "=", "None", ",", "config", "=", "None", ")", ":", "out", "=", "[", "]", "for", "x", "in", "xs", ":", "if", "not", "isinstance", "(", "x", "[", "\"path\"", "]", ",", "six", ".", "string_types", ")", ...
Add top level information about the sample or flowcell to output. Sorts outputs into sample names (sample input) and project (config input).
[ "Add", "top", "level", "information", "about", "the", "sample", "or", "flowcell", "to", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L123-L144
236,928
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_get_files_variantcall
def _get_files_variantcall(sample): """Return output files for the variant calling pipeline. """ out = [] algorithm = sample["config"]["algorithm"] out = _maybe_add_summary(algorithm, sample, out) out = _maybe_add_alignment(algorithm, sample, out) out = _maybe_add_callable(sample, out) out = _maybe_add_disambiguate(algorithm, sample, out) out = _maybe_add_variant_file(algorithm, sample, out) out = _maybe_add_sv(algorithm, sample, out) out = _maybe_add_hla(algorithm, sample, out) out = _maybe_add_heterogeneity(algorithm, sample, out) out = _maybe_add_validate(algorithm, sample, out) return _add_meta(out, sample)
python
def _get_files_variantcall(sample): out = [] algorithm = sample["config"]["algorithm"] out = _maybe_add_summary(algorithm, sample, out) out = _maybe_add_alignment(algorithm, sample, out) out = _maybe_add_callable(sample, out) out = _maybe_add_disambiguate(algorithm, sample, out) out = _maybe_add_variant_file(algorithm, sample, out) out = _maybe_add_sv(algorithm, sample, out) out = _maybe_add_hla(algorithm, sample, out) out = _maybe_add_heterogeneity(algorithm, sample, out) out = _maybe_add_validate(algorithm, sample, out) return _add_meta(out, sample)
[ "def", "_get_files_variantcall", "(", "sample", ")", ":", "out", "=", "[", "]", "algorithm", "=", "sample", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", "out", "=", "_maybe_add_summary", "(", "algorithm", ",", "sample", ",", "out", ")", "out", "=", ...
Return output files for the variant calling pipeline.
[ "Return", "output", "files", "for", "the", "variant", "calling", "pipeline", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L146-L161
236,929
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_maybe_add_callable
def _maybe_add_callable(data, out): """Add callable and depth regions to output folder. """ callable_bed = dd.get_sample_callable(data) if callable_bed: out.append({"path": callable_bed, "type": "bed", "ext": "callable"}) perbase_bed = tz.get_in(["depth", "variant_regions", "per_base"], data) if perbase_bed: out.append({"path": perbase_bed, "type": "bed.gz", "ext": "depth-per-base"}) return out
python
def _maybe_add_callable(data, out): callable_bed = dd.get_sample_callable(data) if callable_bed: out.append({"path": callable_bed, "type": "bed", "ext": "callable"}) perbase_bed = tz.get_in(["depth", "variant_regions", "per_base"], data) if perbase_bed: out.append({"path": perbase_bed, "type": "bed.gz", "ext": "depth-per-base"}) return out
[ "def", "_maybe_add_callable", "(", "data", ",", "out", ")", ":", "callable_bed", "=", "dd", ".", "get_sample_callable", "(", "data", ")", "if", "callable_bed", ":", "out", ".", "append", "(", "{", "\"path\"", ":", "callable_bed", ",", "\"type\"", ":", "\"b...
Add callable and depth regions to output folder.
[ "Add", "callable", "and", "depth", "regions", "to", "output", "folder", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L183-L192
236,930
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_get_batch_name
def _get_batch_name(sample): """Retrieve batch name for use in SV calling outputs. Handles multiple batches split via SV calling. """ batch = dd.get_batch(sample) or dd.get_sample_name(sample) if isinstance(batch, (list, tuple)) and len(batch) > 1: batch = dd.get_sample_name(sample) return batch
python
def _get_batch_name(sample): batch = dd.get_batch(sample) or dd.get_sample_name(sample) if isinstance(batch, (list, tuple)) and len(batch) > 1: batch = dd.get_sample_name(sample) return batch
[ "def", "_get_batch_name", "(", "sample", ")", ":", "batch", "=", "dd", ".", "get_batch", "(", "sample", ")", "or", "dd", ".", "get_sample_name", "(", "sample", ")", "if", "isinstance", "(", "batch", ",", "(", "list", ",", "tuple", ")", ")", "and", "l...
Retrieve batch name for use in SV calling outputs. Handles multiple batches split via SV calling.
[ "Retrieve", "batch", "name", "for", "use", "in", "SV", "calling", "outputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L236-L244
236,931
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_sample_variant_file_in_population
def _sample_variant_file_in_population(x): """Check if a sample file is the same as the population file. This is true for batches where we don't extract into samples and do not run decomposition for gemini. '""" if "population" in x: a = _get_project_vcf(x) b = _get_variant_file(x, ("vrn_file",)) decomposed = tz.get_in(("population", "decomposed"), x) if (a and b and not decomposed and len(a) > 0 and len(b) > 0 and vcfutils.get_samples(a[0]["path"]) == vcfutils.get_samples(b[0]["path"])): return True return False
python
def _sample_variant_file_in_population(x): """Check if a sample file is the same as the population file. This is true for batches where we don't extract into samples and do not run decomposition for gemini. '""" if "population" in x: a = _get_project_vcf(x) b = _get_variant_file(x, ("vrn_file",)) decomposed = tz.get_in(("population", "decomposed"), x) if (a and b and not decomposed and len(a) > 0 and len(b) > 0 and vcfutils.get_samples(a[0]["path"]) == vcfutils.get_samples(b[0]["path"])): return True return False
[ "def", "_sample_variant_file_in_population", "(", "x", ")", ":", "if", "\"population\"", "in", "x", ":", "a", "=", "_get_project_vcf", "(", "x", ")", "b", "=", "_get_variant_file", "(", "x", ",", "(", "\"vrn_file\"", ",", ")", ")", "decomposed", "=", "tz",...
Check if a sample file is the same as the population file. This is true for batches where we don't extract into samples and do not run decomposition for gemini.
[ "Check", "if", "a", "sample", "file", "is", "the", "same", "as", "the", "population", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L316-L329
236,932
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_get_variant_file
def _get_variant_file(x, key, suffix="", sample=None, ignore_do_upload=False): """Retrieve VCF file with the given key if it exists, handling bgzipped. """ out = [] fname = utils.get_in(x, key) upload_key = list(key) upload_key[-1] = "do_upload" do_upload = tz.get_in(tuple(upload_key), x, True) if fname and (ignore_do_upload or do_upload): if fname.endswith(".vcf.gz"): out.append({"path": fname, "type": "vcf.gz", "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) if utils.file_exists(fname + ".tbi"): out.append({"path": fname + ".tbi", "type": "vcf.gz.tbi", "index": True, "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) elif fname.endswith((".vcf", ".bed", ".bedpe", ".bedgraph", ".cnr", ".cns", ".cnn", ".txt", ".tsv")): ftype = utils.splitext_plus(fname)[-1][1:] if ftype == "txt": extended_ftype = fname.split("-")[-1] if "/" not in extended_ftype: ftype = extended_ftype out.append({"path": fname, "type": ftype, "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) if sample: out_sample = [] for x in out: x["sample"] = sample out_sample.append(x) return out_sample else: return out
python
def _get_variant_file(x, key, suffix="", sample=None, ignore_do_upload=False): out = [] fname = utils.get_in(x, key) upload_key = list(key) upload_key[-1] = "do_upload" do_upload = tz.get_in(tuple(upload_key), x, True) if fname and (ignore_do_upload or do_upload): if fname.endswith(".vcf.gz"): out.append({"path": fname, "type": "vcf.gz", "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) if utils.file_exists(fname + ".tbi"): out.append({"path": fname + ".tbi", "type": "vcf.gz.tbi", "index": True, "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) elif fname.endswith((".vcf", ".bed", ".bedpe", ".bedgraph", ".cnr", ".cns", ".cnn", ".txt", ".tsv")): ftype = utils.splitext_plus(fname)[-1][1:] if ftype == "txt": extended_ftype = fname.split("-")[-1] if "/" not in extended_ftype: ftype = extended_ftype out.append({"path": fname, "type": ftype, "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) if sample: out_sample = [] for x in out: x["sample"] = sample out_sample.append(x) return out_sample else: return out
[ "def", "_get_variant_file", "(", "x", ",", "key", ",", "suffix", "=", "\"\"", ",", "sample", "=", "None", ",", "ignore_do_upload", "=", "False", ")", ":", "out", "=", "[", "]", "fname", "=", "utils", ".", "get_in", "(", "x", ",", "key", ")", "uploa...
Retrieve VCF file with the given key if it exists, handling bgzipped.
[ "Retrieve", "VCF", "file", "with", "the", "given", "key", "if", "it", "exists", "handling", "bgzipped", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L331-L368
236,933
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_add_batch
def _add_batch(x, sample): """Potentially add batch name to an upload file. """ added = False for batch in sorted(dd.get_batches(sample) or [], key=len, reverse=True): if batch and os.path.basename(x["path"]).startswith(("%s-" % batch, "%s.vcf" % batch)): x["batch"] = batch added = True break if not added: x["batch"] = dd.get_sample_name(sample) return x
python
def _add_batch(x, sample): added = False for batch in sorted(dd.get_batches(sample) or [], key=len, reverse=True): if batch and os.path.basename(x["path"]).startswith(("%s-" % batch, "%s.vcf" % batch)): x["batch"] = batch added = True break if not added: x["batch"] = dd.get_sample_name(sample) return x
[ "def", "_add_batch", "(", "x", ",", "sample", ")", ":", "added", "=", "False", "for", "batch", "in", "sorted", "(", "dd", ".", "get_batches", "(", "sample", ")", "or", "[", "]", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", ":", "if",...
Potentially add batch name to an upload file.
[ "Potentially", "add", "batch", "name", "to", "an", "upload", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L638-L649
236,934
bcbio/bcbio-nextgen
bcbio/upload/__init__.py
_get_project_vcf
def _get_project_vcf(x, suffix=""): """Get our project VCF, either from the population or the variant batch file. """ vcfs = _get_variant_file(x, ("population", "vcf"), suffix=suffix) if not vcfs: vcfs = _get_variant_file(x, ("vrn_file_batch", ), suffix=suffix, ignore_do_upload=True) if not vcfs and x.get("variantcaller") == "ensemble": vcfs = _get_variant_file(x, ("vrn_file", ), suffix=suffix) return vcfs
python
def _get_project_vcf(x, suffix=""): vcfs = _get_variant_file(x, ("population", "vcf"), suffix=suffix) if not vcfs: vcfs = _get_variant_file(x, ("vrn_file_batch", ), suffix=suffix, ignore_do_upload=True) if not vcfs and x.get("variantcaller") == "ensemble": vcfs = _get_variant_file(x, ("vrn_file", ), suffix=suffix) return vcfs
[ "def", "_get_project_vcf", "(", "x", ",", "suffix", "=", "\"\"", ")", ":", "vcfs", "=", "_get_variant_file", "(", "x", ",", "(", "\"population\"", ",", "\"vcf\"", ")", ",", "suffix", "=", "suffix", ")", "if", "not", "vcfs", ":", "vcfs", "=", "_get_vari...
Get our project VCF, either from the population or the variant batch file.
[ "Get", "our", "project", "VCF", "either", "from", "the", "population", "or", "the", "variant", "batch", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/__init__.py#L651-L659
236,935
bcbio/bcbio-nextgen
scripts/utils/resort_bam_karyotype.py
_id_remapper
def _id_remapper(orig, new): """Provide a dictionary remapping original read indexes to new indexes. When re-ordering the header, the individual read identifiers need to be updated as well. """ new_chrom_to_index = {} for i_n, (chr_n, _) in enumerate(new): new_chrom_to_index[chr_n] = i_n remap_indexes = {} for i_o, (chr_o, _) in enumerate(orig): if chr_o in new_chrom_to_index.keys(): remap_indexes[i_o] = new_chrom_to_index[chr_o] remap_indexes[None] = None return remap_indexes
python
def _id_remapper(orig, new): new_chrom_to_index = {} for i_n, (chr_n, _) in enumerate(new): new_chrom_to_index[chr_n] = i_n remap_indexes = {} for i_o, (chr_o, _) in enumerate(orig): if chr_o in new_chrom_to_index.keys(): remap_indexes[i_o] = new_chrom_to_index[chr_o] remap_indexes[None] = None return remap_indexes
[ "def", "_id_remapper", "(", "orig", ",", "new", ")", ":", "new_chrom_to_index", "=", "{", "}", "for", "i_n", ",", "(", "chr_n", ",", "_", ")", "in", "enumerate", "(", "new", ")", ":", "new_chrom_to_index", "[", "chr_n", "]", "=", "i_n", "remap_indexes"...
Provide a dictionary remapping original read indexes to new indexes. When re-ordering the header, the individual read identifiers need to be updated as well.
[ "Provide", "a", "dictionary", "remapping", "original", "read", "indexes", "to", "new", "indexes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/resort_bam_karyotype.py#L67-L81
236,936
bcbio/bcbio-nextgen
scripts/bcbio_setup_genome.py
_clean_rec_name
def _clean_rec_name(rec): """Clean illegal characters in input fasta file which cause problems downstream. """ out_id = [] for char in list(rec.id): if char in ALLOWED_CONTIG_NAME_CHARS: out_id.append(char) else: out_id.append("_") rec.id = "".join(out_id) rec.description = "" return rec
python
def _clean_rec_name(rec): out_id = [] for char in list(rec.id): if char in ALLOWED_CONTIG_NAME_CHARS: out_id.append(char) else: out_id.append("_") rec.id = "".join(out_id) rec.description = "" return rec
[ "def", "_clean_rec_name", "(", "rec", ")", ":", "out_id", "=", "[", "]", "for", "char", "in", "list", "(", "rec", ".", "id", ")", ":", "if", "char", "in", "ALLOWED_CONTIG_NAME_CHARS", ":", "out_id", ".", "append", "(", "char", ")", "else", ":", "out_...
Clean illegal characters in input fasta file which cause problems downstream.
[ "Clean", "illegal", "characters", "in", "input", "fasta", "file", "which", "cause", "problems", "downstream", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_setup_genome.py#L151-L162
236,937
bcbio/bcbio-nextgen
bcbio/qc/kraken.py
run
def run(_, data, out_dir): """Run kraken, generating report in specified directory and parsing metrics. Using only first paired reads. """ # logger.info("Number of aligned reads < than 0.60 in %s: %s" % (dd.get_sample_name(data), ratio)) logger.info("Running kraken to determine contaminant: %s" % dd.get_sample_name(data)) # ratio = bam.get_aligned_reads(bam_file, data) out = out_stats = None db = tz.get_in(["config", "algorithm", "kraken"], data) if db and isinstance(db, (list, tuple)): db = db[0] kraken_cmd = config_utils.get_program("kraken", data["config"]) if db == "minikraken": db = os.path.join(install._get_data_dir(), "genomes", "kraken", "minikraken") if not os.path.exists(db): logger.info("kraken: no database found %s, skipping" % db) return {"kraken_report": "null"} if not os.path.exists(os.path.join(out_dir, "kraken_out")): work_dir = os.path.dirname(out_dir) utils.safe_makedir(work_dir) num_cores = data["config"]["algorithm"].get("num_cores", 1) fn_file = data["files_orig"][0] if dd.get_save_diskspace(data) else data["files"][0] if fn_file.endswith("bam"): logger.info("kraken: need fastq files as input") return {"kraken_report": "null"} with tx_tmpdir(data) as tx_tmp_dir: with utils.chdir(tx_tmp_dir): out = os.path.join(tx_tmp_dir, "kraken_out") out_stats = os.path.join(tx_tmp_dir, "kraken_stats") cat = "zcat" if fn_file.endswith(".gz") else "cat" cl = ("{cat} {fn_file} | {kraken_cmd} --db {db} --quick " "--preload --min-hits 2 " "--threads {num_cores} " "--output {out} --fastq-input /dev/stdin 2> {out_stats}").format(**locals()) do.run(cl, "kraken: %s" % dd.get_sample_name(data)) if os.path.exists(out_dir): shutil.rmtree(out_dir) shutil.move(tx_tmp_dir, out_dir) metrics = _parse_kraken_output(out_dir, db, data) return metrics
python
def run(_, data, out_dir): # logger.info("Number of aligned reads < than 0.60 in %s: %s" % (dd.get_sample_name(data), ratio)) logger.info("Running kraken to determine contaminant: %s" % dd.get_sample_name(data)) # ratio = bam.get_aligned_reads(bam_file, data) out = out_stats = None db = tz.get_in(["config", "algorithm", "kraken"], data) if db and isinstance(db, (list, tuple)): db = db[0] kraken_cmd = config_utils.get_program("kraken", data["config"]) if db == "minikraken": db = os.path.join(install._get_data_dir(), "genomes", "kraken", "minikraken") if not os.path.exists(db): logger.info("kraken: no database found %s, skipping" % db) return {"kraken_report": "null"} if not os.path.exists(os.path.join(out_dir, "kraken_out")): work_dir = os.path.dirname(out_dir) utils.safe_makedir(work_dir) num_cores = data["config"]["algorithm"].get("num_cores", 1) fn_file = data["files_orig"][0] if dd.get_save_diskspace(data) else data["files"][0] if fn_file.endswith("bam"): logger.info("kraken: need fastq files as input") return {"kraken_report": "null"} with tx_tmpdir(data) as tx_tmp_dir: with utils.chdir(tx_tmp_dir): out = os.path.join(tx_tmp_dir, "kraken_out") out_stats = os.path.join(tx_tmp_dir, "kraken_stats") cat = "zcat" if fn_file.endswith(".gz") else "cat" cl = ("{cat} {fn_file} | {kraken_cmd} --db {db} --quick " "--preload --min-hits 2 " "--threads {num_cores} " "--output {out} --fastq-input /dev/stdin 2> {out_stats}").format(**locals()) do.run(cl, "kraken: %s" % dd.get_sample_name(data)) if os.path.exists(out_dir): shutil.rmtree(out_dir) shutil.move(tx_tmp_dir, out_dir) metrics = _parse_kraken_output(out_dir, db, data) return metrics
[ "def", "run", "(", "_", ",", "data", ",", "out_dir", ")", ":", "# logger.info(\"Number of aligned reads < than 0.60 in %s: %s\" % (dd.get_sample_name(data), ratio))", "logger", ".", "info", "(", "\"Running kraken to determine contaminant: %s\"", "%", "dd", ".", "get_sample_name...
Run kraken, generating report in specified directory and parsing metrics. Using only first paired reads.
[ "Run", "kraken", "generating", "report", "in", "specified", "directory", "and", "parsing", "metrics", ".", "Using", "only", "first", "paired", "reads", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/kraken.py#L16-L57
236,938
bcbio/bcbio-nextgen
bcbio/qc/kraken.py
_parse_kraken_output
def _parse_kraken_output(out_dir, db, data): """Parse kraken stat info comming from stderr, generating report with kraken-report """ in_file = os.path.join(out_dir, "kraken_out") stat_file = os.path.join(out_dir, "kraken_stats") out_file = os.path.join(out_dir, "kraken_summary") kraken_cmd = config_utils.get_program("kraken-report", data["config"]) classify = unclassify = None with open(stat_file, 'r') as handle: for line in handle: if line.find(" classified") > -1: classify = line[line.find("(") + 1:line.find(")")] if line.find(" unclassified") > -1: unclassify = line[line.find("(") + 1:line.find(")")] if os.path.getsize(in_file) > 0 and not os.path.exists(out_file): with file_transaction(data, out_file) as tx_out_file: cl = ("{kraken_cmd} --db {db} {in_file} > {tx_out_file}").format(**locals()) do.run(cl, "kraken report: %s" % dd.get_sample_name(data)) kraken = {"kraken_clas": classify, "kraken_unclas": unclassify} kraken_sum = _summarize_kraken(out_file) kraken.update(kraken_sum) return kraken
python
def _parse_kraken_output(out_dir, db, data): in_file = os.path.join(out_dir, "kraken_out") stat_file = os.path.join(out_dir, "kraken_stats") out_file = os.path.join(out_dir, "kraken_summary") kraken_cmd = config_utils.get_program("kraken-report", data["config"]) classify = unclassify = None with open(stat_file, 'r') as handle: for line in handle: if line.find(" classified") > -1: classify = line[line.find("(") + 1:line.find(")")] if line.find(" unclassified") > -1: unclassify = line[line.find("(") + 1:line.find(")")] if os.path.getsize(in_file) > 0 and not os.path.exists(out_file): with file_transaction(data, out_file) as tx_out_file: cl = ("{kraken_cmd} --db {db} {in_file} > {tx_out_file}").format(**locals()) do.run(cl, "kraken report: %s" % dd.get_sample_name(data)) kraken = {"kraken_clas": classify, "kraken_unclas": unclassify} kraken_sum = _summarize_kraken(out_file) kraken.update(kraken_sum) return kraken
[ "def", "_parse_kraken_output", "(", "out_dir", ",", "db", ",", "data", ")", ":", "in_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"kraken_out\"", ")", "stat_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"krake...
Parse kraken stat info comming from stderr, generating report with kraken-report
[ "Parse", "kraken", "stat", "info", "comming", "from", "stderr", "generating", "report", "with", "kraken", "-", "report" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/kraken.py#L59-L81
236,939
bcbio/bcbio-nextgen
bcbio/qc/kraken.py
_summarize_kraken
def _summarize_kraken(fn): """get the value at species level""" kraken = {} list_sp, list_value = [], [] with open(fn) as handle: for line in handle: cols = line.strip().split("\t") sp = cols[5].strip() if len(sp.split(" ")) > 1 and not sp.startswith("cellular"): list_sp.append(sp) list_value.append(cols[0]) kraken = {"kraken_sp": list_sp, "kraken_value": list_value} return kraken
python
def _summarize_kraken(fn): kraken = {} list_sp, list_value = [], [] with open(fn) as handle: for line in handle: cols = line.strip().split("\t") sp = cols[5].strip() if len(sp.split(" ")) > 1 and not sp.startswith("cellular"): list_sp.append(sp) list_value.append(cols[0]) kraken = {"kraken_sp": list_sp, "kraken_value": list_value} return kraken
[ "def", "_summarize_kraken", "(", "fn", ")", ":", "kraken", "=", "{", "}", "list_sp", ",", "list_value", "=", "[", "]", ",", "[", "]", "with", "open", "(", "fn", ")", "as", "handle", ":", "for", "line", "in", "handle", ":", "cols", "=", "line", "....
get the value at species level
[ "get", "the", "value", "at", "species", "level" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/kraken.py#L83-L95
236,940
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_get_main_and_json
def _get_main_and_json(directory): """Retrieve the main CWL and sample JSON files from a bcbio generated directory. """ directory = os.path.normpath(os.path.abspath(directory)) checker_main = os.path.normpath(os.path.join(directory, os.path.pardir, "checker-workflow-wrapping-tool.cwl")) if checker_main and os.path.exists(checker_main): main_cwl = [checker_main] else: main_cwl = glob.glob(os.path.join(directory, "main-*.cwl")) main_cwl = [x for x in main_cwl if not x.find("-pack") >= 0] assert len(main_cwl) == 1, "Did not find main CWL in %s" % directory main_json = glob.glob(os.path.join(directory, "main-*-samples.json")) assert len(main_json) == 1, "Did not find main json in %s" % directory project_name = os.path.basename(directory).split("-workflow")[0] return main_cwl[0], main_json[0], project_name
python
def _get_main_and_json(directory): directory = os.path.normpath(os.path.abspath(directory)) checker_main = os.path.normpath(os.path.join(directory, os.path.pardir, "checker-workflow-wrapping-tool.cwl")) if checker_main and os.path.exists(checker_main): main_cwl = [checker_main] else: main_cwl = glob.glob(os.path.join(directory, "main-*.cwl")) main_cwl = [x for x in main_cwl if not x.find("-pack") >= 0] assert len(main_cwl) == 1, "Did not find main CWL in %s" % directory main_json = glob.glob(os.path.join(directory, "main-*-samples.json")) assert len(main_json) == 1, "Did not find main json in %s" % directory project_name = os.path.basename(directory).split("-workflow")[0] return main_cwl[0], main_json[0], project_name
[ "def", "_get_main_and_json", "(", "directory", ")", ":", "directory", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "abspath", "(", "directory", ")", ")", "checker_main", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", ...
Retrieve the main CWL and sample JSON files from a bcbio generated directory.
[ "Retrieve", "the", "main", "CWL", "and", "sample", "JSON", "files", "from", "a", "bcbio", "generated", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L18-L32
236,941
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_tool
def _run_tool(cmd, use_container=True, work_dir=None, log_file=None): """Run with injection of bcbio path. Place at end for runs without containers to avoid overriding other bcbio installations. """ if isinstance(cmd, (list, tuple)): cmd = " ".join([str(x) for x in cmd]) cmd = utils.local_path_export(at_start=use_container) + cmd if log_file: cmd += " 2>&1 | tee -a %s" % log_file try: print("Running: %s" % cmd) subprocess.check_call(cmd, shell=True) finally: if use_container and work_dir: _chown_workdir(work_dir)
python
def _run_tool(cmd, use_container=True, work_dir=None, log_file=None): if isinstance(cmd, (list, tuple)): cmd = " ".join([str(x) for x in cmd]) cmd = utils.local_path_export(at_start=use_container) + cmd if log_file: cmd += " 2>&1 | tee -a %s" % log_file try: print("Running: %s" % cmd) subprocess.check_call(cmd, shell=True) finally: if use_container and work_dir: _chown_workdir(work_dir)
[ "def", "_run_tool", "(", "cmd", ",", "use_container", "=", "True", ",", "work_dir", "=", "None", ",", "log_file", "=", "None", ")", ":", "if", "isinstance", "(", "cmd", ",", "(", "list", ",", "tuple", ")", ")", ":", "cmd", "=", "\" \"", ".", "join"...
Run with injection of bcbio path. Place at end for runs without containers to avoid overriding other bcbio installations.
[ "Run", "with", "injection", "of", "bcbio", "path", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L34-L50
236,942
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_pack_cwl
def _pack_cwl(unpacked_cwl): """Pack CWL into a single document for submission. """ out_file = "%s-pack%s" % os.path.splitext(unpacked_cwl) cmd = "cwltool --pack {unpacked_cwl} > {out_file}" _run_tool(cmd.format(**locals())) return out_file
python
def _pack_cwl(unpacked_cwl): out_file = "%s-pack%s" % os.path.splitext(unpacked_cwl) cmd = "cwltool --pack {unpacked_cwl} > {out_file}" _run_tool(cmd.format(**locals())) return out_file
[ "def", "_pack_cwl", "(", "unpacked_cwl", ")", ":", "out_file", "=", "\"%s-pack%s\"", "%", "os", ".", "path", ".", "splitext", "(", "unpacked_cwl", ")", "cmd", "=", "\"cwltool --pack {unpacked_cwl} > {out_file}\"", "_run_tool", "(", "cmd", ".", "format", "(", "*"...
Pack CWL into a single document for submission.
[ "Pack", "CWL", "into", "a", "single", "document", "for", "submission", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L52-L58
236,943
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_remove_bcbiovm_path
def _remove_bcbiovm_path(): """Avoid referencing minimal bcbio_nextgen in bcbio_vm installation. """ cur_path = os.path.dirname(os.path.realpath(sys.executable)) paths = os.environ["PATH"].split(":") if cur_path in paths: paths.remove(cur_path) os.environ["PATH"] = ":".join(paths)
python
def _remove_bcbiovm_path(): cur_path = os.path.dirname(os.path.realpath(sys.executable)) paths = os.environ["PATH"].split(":") if cur_path in paths: paths.remove(cur_path) os.environ["PATH"] = ":".join(paths)
[ "def", "_remove_bcbiovm_path", "(", ")", ":", "cur_path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "sys", ".", "executable", ")", ")", "paths", "=", "os", ".", "environ", "[", "\"PATH\"", "]", ".", "split"...
Avoid referencing minimal bcbio_nextgen in bcbio_vm installation.
[ "Avoid", "referencing", "minimal", "bcbio_nextgen", "in", "bcbio_vm", "installation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L74-L81
236,944
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_arvados
def _run_arvados(args): """Run CWL on Arvados. """ assert not args.no_container, "Arvados runs require containers" assert "ARVADOS_API_TOKEN" in os.environ and "ARVADOS_API_HOST" in os.environ, \ "Need to set ARVADOS_API_TOKEN and ARVADOS_API_HOST in environment to run" main_file, json_file, project_name = _get_main_and_json(args.directory) flags = ["--enable-reuse", "--api", "containers", "--submit", "--no-wait"] cmd = ["arvados-cwl-runner"] + flags + args.toolargs + [main_file, json_file] _run_tool(cmd)
python
def _run_arvados(args): assert not args.no_container, "Arvados runs require containers" assert "ARVADOS_API_TOKEN" in os.environ and "ARVADOS_API_HOST" in os.environ, \ "Need to set ARVADOS_API_TOKEN and ARVADOS_API_HOST in environment to run" main_file, json_file, project_name = _get_main_and_json(args.directory) flags = ["--enable-reuse", "--api", "containers", "--submit", "--no-wait"] cmd = ["arvados-cwl-runner"] + flags + args.toolargs + [main_file, json_file] _run_tool(cmd)
[ "def", "_run_arvados", "(", "args", ")", ":", "assert", "not", "args", ".", "no_container", ",", "\"Arvados runs require containers\"", "assert", "\"ARVADOS_API_TOKEN\"", "in", "os", ".", "environ", "and", "\"ARVADOS_API_HOST\"", "in", "os", ".", "environ", ",", "...
Run CWL on Arvados.
[ "Run", "CWL", "on", "Arvados", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L99-L108
236,945
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_toil
def _run_toil(args): """Run CWL with Toil. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "toil_work")) tmp_dir = utils.safe_makedir(os.path.join(work_dir, "tmpdir")) os.environ["TMPDIR"] = tmp_dir log_file = os.path.join(work_dir, "%s-toil.log" % project_name) jobstore = os.path.join(work_dir, "cwltoil_jobstore") flags = ["--jobStore", jobstore, "--logFile", log_file, "--workDir", tmp_dir, "--linkImports"] if os.path.exists(jobstore): flags += ["--restart"] # caching causes issues for batch systems if "--batchSystem" in args.toolargs: flags += ["--disableCaching"] flags += args.toolargs if args.no_container: _remove_bcbiovm_path() flags += ["--no-container", "--preserve-environment", "PATH", "HOME"] cmd = ["cwltoil"] + flags + ["--", main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir) for tmpdir in (glob.glob(os.path.join(work_dir, "out_tmpdir*")) + glob.glob(os.path.join(work_dir, "tmp*"))): if os.path.isdir(tmpdir): shutil.rmtree(tmpdir)
python
def _run_toil(args): main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "toil_work")) tmp_dir = utils.safe_makedir(os.path.join(work_dir, "tmpdir")) os.environ["TMPDIR"] = tmp_dir log_file = os.path.join(work_dir, "%s-toil.log" % project_name) jobstore = os.path.join(work_dir, "cwltoil_jobstore") flags = ["--jobStore", jobstore, "--logFile", log_file, "--workDir", tmp_dir, "--linkImports"] if os.path.exists(jobstore): flags += ["--restart"] # caching causes issues for batch systems if "--batchSystem" in args.toolargs: flags += ["--disableCaching"] flags += args.toolargs if args.no_container: _remove_bcbiovm_path() flags += ["--no-container", "--preserve-environment", "PATH", "HOME"] cmd = ["cwltoil"] + flags + ["--", main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir) for tmpdir in (glob.glob(os.path.join(work_dir, "out_tmpdir*")) + glob.glob(os.path.join(work_dir, "tmp*"))): if os.path.isdir(tmpdir): shutil.rmtree(tmpdir)
[ "def", "_run_toil", "(", "args", ")", ":", "main_file", ",", "json_file", ",", "project_name", "=", "_get_main_and_json", "(", "args", ".", "directory", ")", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "os", "...
Run CWL with Toil.
[ "Run", "CWL", "with", "Toil", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L110-L135
236,946
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_bunny
def _run_bunny(args): """Run CWL with rabix bunny. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "bunny_work")) flags = ["-b", work_dir] log_file = os.path.join(work_dir, "%s-bunny.log" % project_name) if os.path.exists(work_dir): caches = [os.path.join(work_dir, d) for d in os.listdir(work_dir) if os.path.isdir(os.path.join(work_dir, d))] if caches: flags += ["--cache-dir", max(caches, key=os.path.getmtime)] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container"] cmd = ["rabix"] + flags + [main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file)
python
def _run_bunny(args): main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "bunny_work")) flags = ["-b", work_dir] log_file = os.path.join(work_dir, "%s-bunny.log" % project_name) if os.path.exists(work_dir): caches = [os.path.join(work_dir, d) for d in os.listdir(work_dir) if os.path.isdir(os.path.join(work_dir, d))] if caches: flags += ["--cache-dir", max(caches, key=os.path.getmtime)] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container"] cmd = ["rabix"] + flags + [main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file)
[ "def", "_run_bunny", "(", "args", ")", ":", "main_file", ",", "json_file", ",", "project_name", "=", "_get_main_and_json", "(", "args", ".", "directory", ")", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "os", ...
Run CWL with rabix bunny.
[ "Run", "CWL", "with", "rabix", "bunny", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L137-L154
236,947
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_wes_stratus
def _run_wes_stratus(args, main_file, json_file): """Run WES on Illumina stratus endpoint server, which wes-client doesn't support. https://stratus-docs.readme.io/docs/quick-start-4 """ import requests base_url = args.host if not base_url.startswith("http"): base_url = "https://%s" % base_url with open(main_file) as in_handle: r = requests.post("%s/v1/workflows" % base_url, headers={"Content-Type": "application/json", "Authorization": "Bearer %s" % args.auth}, data=in_handle.read()) print(r.status_code) print(r.text)
python
def _run_wes_stratus(args, main_file, json_file): import requests base_url = args.host if not base_url.startswith("http"): base_url = "https://%s" % base_url with open(main_file) as in_handle: r = requests.post("%s/v1/workflows" % base_url, headers={"Content-Type": "application/json", "Authorization": "Bearer %s" % args.auth}, data=in_handle.read()) print(r.status_code) print(r.text)
[ "def", "_run_wes_stratus", "(", "args", ",", "main_file", ",", "json_file", ")", ":", "import", "requests", "base_url", "=", "args", ".", "host", "if", "not", "base_url", ".", "startswith", "(", "\"http\"", ")", ":", "base_url", "=", "\"https://%s\"", "%", ...
Run WES on Illumina stratus endpoint server, which wes-client doesn't support. https://stratus-docs.readme.io/docs/quick-start-4
[ "Run", "WES", "on", "Illumina", "stratus", "endpoint", "server", "which", "wes", "-", "client", "doesn", "t", "support", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L172-L187
236,948
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_estimate_runner_memory
def _estimate_runner_memory(json_file): """Estimate Java memory requirements based on number of samples. A rough approach to selecting correct allocated memory for Cromwell. """ with open(json_file) as in_handle: sinfo = json.load(in_handle) num_parallel = 1 for key in ["config__algorithm__variantcaller", "description"]: item_counts = [] n = 0 for val in (sinfo.get(key) or []): n += 1 if val: if isinstance(val, (list, tuple)): item_counts.append(len(val)) else: item_counts.append(1) print(key, n, item_counts) if n and item_counts: num_parallel = n * max(item_counts) break if num_parallel < 25: return "3g" if num_parallel < 150: return "6g" elif num_parallel < 500: return "12g" else: return "24g"
python
def _estimate_runner_memory(json_file): with open(json_file) as in_handle: sinfo = json.load(in_handle) num_parallel = 1 for key in ["config__algorithm__variantcaller", "description"]: item_counts = [] n = 0 for val in (sinfo.get(key) or []): n += 1 if val: if isinstance(val, (list, tuple)): item_counts.append(len(val)) else: item_counts.append(1) print(key, n, item_counts) if n and item_counts: num_parallel = n * max(item_counts) break if num_parallel < 25: return "3g" if num_parallel < 150: return "6g" elif num_parallel < 500: return "12g" else: return "24g"
[ "def", "_estimate_runner_memory", "(", "json_file", ")", ":", "with", "open", "(", "json_file", ")", "as", "in_handle", ":", "sinfo", "=", "json", ".", "load", "(", "in_handle", ")", "num_parallel", "=", "1", "for", "key", "in", "[", "\"config__algorithm__va...
Estimate Java memory requirements based on number of samples. A rough approach to selecting correct allocated memory for Cromwell.
[ "Estimate", "Java", "memory", "requirements", "based", "on", "number", "of", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L189-L218
236,949
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_cromwell
def _run_cromwell(args): """Run CWL with Cromwell. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cromwell_work")) final_dir = utils.safe_makedir(os.path.join(work_dir, "final")) if args.no_container: _remove_bcbiovm_path() log_file = os.path.join(work_dir, "%s-cromwell.log" % project_name) metadata_file = os.path.join(work_dir, "%s-metadata.json" % project_name) option_file = os.path.join(work_dir, "%s-options.json" % project_name) cromwell_opts = {"final_workflow_outputs_dir": final_dir, "default_runtime_attributes": {"bootDiskSizeGb": 20}} with open(option_file, "w") as out_handle: json.dump(cromwell_opts, out_handle) cmd = ["cromwell", "-Xms1g", "-Xmx%s" % _estimate_runner_memory(json_file), "run", "--type", "CWL", "-Dconfig.file=%s" % hpc.create_cromwell_config(args, work_dir, json_file)] cmd += hpc.args_to_cromwell_cl(args) cmd += ["--metadata-output", metadata_file, "--options", option_file, "--inputs", json_file, main_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file) if metadata_file and utils.file_exists(metadata_file): with open(metadata_file) as in_handle: metadata = json.load(in_handle) if metadata["status"] == "Failed": _cromwell_debug(metadata) sys.exit(1) else: _cromwell_move_outputs(metadata, final_dir)
python
def _run_cromwell(args): main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cromwell_work")) final_dir = utils.safe_makedir(os.path.join(work_dir, "final")) if args.no_container: _remove_bcbiovm_path() log_file = os.path.join(work_dir, "%s-cromwell.log" % project_name) metadata_file = os.path.join(work_dir, "%s-metadata.json" % project_name) option_file = os.path.join(work_dir, "%s-options.json" % project_name) cromwell_opts = {"final_workflow_outputs_dir": final_dir, "default_runtime_attributes": {"bootDiskSizeGb": 20}} with open(option_file, "w") as out_handle: json.dump(cromwell_opts, out_handle) cmd = ["cromwell", "-Xms1g", "-Xmx%s" % _estimate_runner_memory(json_file), "run", "--type", "CWL", "-Dconfig.file=%s" % hpc.create_cromwell_config(args, work_dir, json_file)] cmd += hpc.args_to_cromwell_cl(args) cmd += ["--metadata-output", metadata_file, "--options", option_file, "--inputs", json_file, main_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file) if metadata_file and utils.file_exists(metadata_file): with open(metadata_file) as in_handle: metadata = json.load(in_handle) if metadata["status"] == "Failed": _cromwell_debug(metadata) sys.exit(1) else: _cromwell_move_outputs(metadata, final_dir)
[ "def", "_run_cromwell", "(", "args", ")", ":", "main_file", ",", "json_file", ",", "project_name", "=", "_get_main_and_json", "(", "args", ".", "directory", ")", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "os",...
Run CWL with Cromwell.
[ "Run", "CWL", "with", "Cromwell", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L220-L251
236,950
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_cromwell_debug
def _cromwell_debug(metadata): """Format Cromwell failures to make debugging easier. """ def get_failed_calls(cur, key=None): if key is None: key = [] out = [] if isinstance(cur, dict) and "failures" in cur and "callRoot" in cur: out.append((key, cur)) elif isinstance(cur, dict): for k, v in cur.items(): out.extend(get_failed_calls(v, key + [k])) elif isinstance(cur, (list, tuple)): for i, v in enumerate(cur): out.extend(get_failed_calls(v, key + [i])) return out print("Failed bcbio Cromwell run") print("-------------------------") for fail_k, fail_call in get_failed_calls(metadata["calls"]): root_dir = os.path.join("cromwell_work", os.path.relpath(fail_call["callRoot"])) print("Failure in step: %s" % ".".join([str(x) for x in fail_k])) print(" bcbio log file : %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-debug.log")) print(" bcbio commands file: %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-commands.log")) print(" Cromwell directory : %s" % root_dir) print()
python
def _cromwell_debug(metadata): def get_failed_calls(cur, key=None): if key is None: key = [] out = [] if isinstance(cur, dict) and "failures" in cur and "callRoot" in cur: out.append((key, cur)) elif isinstance(cur, dict): for k, v in cur.items(): out.extend(get_failed_calls(v, key + [k])) elif isinstance(cur, (list, tuple)): for i, v in enumerate(cur): out.extend(get_failed_calls(v, key + [i])) return out print("Failed bcbio Cromwell run") print("-------------------------") for fail_k, fail_call in get_failed_calls(metadata["calls"]): root_dir = os.path.join("cromwell_work", os.path.relpath(fail_call["callRoot"])) print("Failure in step: %s" % ".".join([str(x) for x in fail_k])) print(" bcbio log file : %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-debug.log")) print(" bcbio commands file: %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-commands.log")) print(" Cromwell directory : %s" % root_dir) print()
[ "def", "_cromwell_debug", "(", "metadata", ")", ":", "def", "get_failed_calls", "(", "cur", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", ":", "key", "=", "[", "]", "out", "=", "[", "]", "if", "isinstance", "(", "cur", ",", "dict", ...
Format Cromwell failures to make debugging easier.
[ "Format", "Cromwell", "failures", "to", "make", "debugging", "easier", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L253-L277
236,951
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_cromwell_move_outputs
def _cromwell_move_outputs(metadata, final_dir): """Move Cromwell outputs to the final upload directory. """ sample_key = [k for k in metadata["outputs"].keys() if k.endswith(("rgnames__sample", "rgnames__sample_out"))][0] project_dir = utils.safe_makedir(os.path.join(final_dir, "project")) samples = metadata["outputs"][sample_key] def _copy_with_secondary(f, dirname): if len(f["secondaryFiles"]) > 1: dirname = utils.safe_makedir(os.path.join(dirname, os.path.basename(os.path.dirname(f["location"])))) if not objectstore.is_remote(f["location"]): finalf = os.path.join(dirname, os.path.basename(f["location"])) if not utils.file_uptodate(finalf, f["location"]): shutil.copy(f["location"], dirname) [_copy_with_secondary(sf, dirname) for sf in f["secondaryFiles"]] def _write_to_dir(val, dirname): if isinstance(val, (list, tuple)): [_write_to_dir(v, dirname) for v in val] else: _copy_with_secondary(val, dirname) for k, vals in metadata["outputs"].items(): if k != sample_key: if k.endswith(("summary__multiqc")): vs = [v for v in vals if v] assert len(vs) == 1 _write_to_dir(vs[0], project_dir) elif len(vals) == len(samples): for s, v in zip(samples, vals): if v: _write_to_dir(v, utils.safe_makedir(os.path.join(final_dir, s))) elif len(vals) == 1: _write_to_dir(vals[0], project_dir) elif len(vals) > 0: raise ValueError("Unexpected sample and outputs: %s %s %s" % (k, samples, vals))
python
def _cromwell_move_outputs(metadata, final_dir): sample_key = [k for k in metadata["outputs"].keys() if k.endswith(("rgnames__sample", "rgnames__sample_out"))][0] project_dir = utils.safe_makedir(os.path.join(final_dir, "project")) samples = metadata["outputs"][sample_key] def _copy_with_secondary(f, dirname): if len(f["secondaryFiles"]) > 1: dirname = utils.safe_makedir(os.path.join(dirname, os.path.basename(os.path.dirname(f["location"])))) if not objectstore.is_remote(f["location"]): finalf = os.path.join(dirname, os.path.basename(f["location"])) if not utils.file_uptodate(finalf, f["location"]): shutil.copy(f["location"], dirname) [_copy_with_secondary(sf, dirname) for sf in f["secondaryFiles"]] def _write_to_dir(val, dirname): if isinstance(val, (list, tuple)): [_write_to_dir(v, dirname) for v in val] else: _copy_with_secondary(val, dirname) for k, vals in metadata["outputs"].items(): if k != sample_key: if k.endswith(("summary__multiqc")): vs = [v for v in vals if v] assert len(vs) == 1 _write_to_dir(vs[0], project_dir) elif len(vals) == len(samples): for s, v in zip(samples, vals): if v: _write_to_dir(v, utils.safe_makedir(os.path.join(final_dir, s))) elif len(vals) == 1: _write_to_dir(vals[0], project_dir) elif len(vals) > 0: raise ValueError("Unexpected sample and outputs: %s %s %s" % (k, samples, vals))
[ "def", "_cromwell_move_outputs", "(", "metadata", ",", "final_dir", ")", ":", "sample_key", "=", "[", "k", "for", "k", "in", "metadata", "[", "\"outputs\"", "]", ".", "keys", "(", ")", "if", "k", ".", "endswith", "(", "(", "\"rgnames__sample\"", ",", "\"...
Move Cromwell outputs to the final upload directory.
[ "Move", "Cromwell", "outputs", "to", "the", "final", "upload", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L279-L311
236,952
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_sbgenomics
def _run_sbgenomics(args): """Run CWL on SevenBridges platform and Cancer Genomics Cloud. """ assert not args.no_container, "Seven Bridges runs require containers" main_file, json_file, project_name = _get_main_and_json(args.directory) flags = [] cmd = ["sbg-cwl-runner"] + flags + args.toolargs + [main_file, json_file] _run_tool(cmd)
python
def _run_sbgenomics(args): assert not args.no_container, "Seven Bridges runs require containers" main_file, json_file, project_name = _get_main_and_json(args.directory) flags = [] cmd = ["sbg-cwl-runner"] + flags + args.toolargs + [main_file, json_file] _run_tool(cmd)
[ "def", "_run_sbgenomics", "(", "args", ")", ":", "assert", "not", "args", ".", "no_container", ",", "\"Seven Bridges runs require containers\"", "main_file", ",", "json_file", ",", "project_name", "=", "_get_main_and_json", "(", "args", ".", "directory", ")", "flags...
Run CWL on SevenBridges platform and Cancer Genomics Cloud.
[ "Run", "CWL", "on", "SevenBridges", "platform", "and", "Cancer", "Genomics", "Cloud", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L313-L320
236,953
bcbio/bcbio-nextgen
bcbio/cwl/tool.py
_run_funnel
def _run_funnel(args): """Run funnel TES server with rabix bunny for CWL. """ host = "localhost" port = "8088" main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "funnel_work")) log_file = os.path.join(work_dir, "%s-funnel.log" % project_name) # Create bunny configuration directory with TES backend orig_config_dir = os.path.join(os.path.dirname(os.path.realpath(utils.which("rabix"))), "config") work_config_dir = utils.safe_makedir(os.path.join(work_dir, "rabix_config")) for fname in os.listdir(orig_config_dir): if fname == "core.properties": with open(os.path.join(orig_config_dir, fname)) as in_handle: with open(os.path.join(work_config_dir, fname), "w") as out_handle: for line in in_handle: if line.startswith("backend.embedded.types"): line = "backend.embedded.types=TES\n" out_handle.write(line) else: shutil.copy(os.path.join(orig_config_dir, fname), os.path.join(work_config_dir, fname)) flags = ["-c", work_config_dir, "-tes-url=http://%s:%s" % (host, port), "-tes-storage=%s" % work_dir] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container"] cmd = ["rabix"] + flags + [main_file, json_file] funnelp = subprocess.Popen(["funnel", "server", "run", "--Server.HostName", host, "--Server.HTTPPort", port, "--LocalStorage.AllowedDirs", work_dir, "--Worker.WorkDir", os.path.join(work_dir, "funnel-work")]) try: with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file) finally: funnelp.kill()
python
def _run_funnel(args): host = "localhost" port = "8088" main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "funnel_work")) log_file = os.path.join(work_dir, "%s-funnel.log" % project_name) # Create bunny configuration directory with TES backend orig_config_dir = os.path.join(os.path.dirname(os.path.realpath(utils.which("rabix"))), "config") work_config_dir = utils.safe_makedir(os.path.join(work_dir, "rabix_config")) for fname in os.listdir(orig_config_dir): if fname == "core.properties": with open(os.path.join(orig_config_dir, fname)) as in_handle: with open(os.path.join(work_config_dir, fname), "w") as out_handle: for line in in_handle: if line.startswith("backend.embedded.types"): line = "backend.embedded.types=TES\n" out_handle.write(line) else: shutil.copy(os.path.join(orig_config_dir, fname), os.path.join(work_config_dir, fname)) flags = ["-c", work_config_dir, "-tes-url=http://%s:%s" % (host, port), "-tes-storage=%s" % work_dir] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container"] cmd = ["rabix"] + flags + [main_file, json_file] funnelp = subprocess.Popen(["funnel", "server", "run", "--Server.HostName", host, "--Server.HTTPPort", port, "--LocalStorage.AllowedDirs", work_dir, "--Worker.WorkDir", os.path.join(work_dir, "funnel-work")]) try: with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file) finally: funnelp.kill()
[ "def", "_run_funnel", "(", "args", ")", ":", "host", "=", "\"localhost\"", "port", "=", "\"8088\"", "main_file", ",", "json_file", ",", "project_name", "=", "_get_main_and_json", "(", "args", ".", "directory", ")", "work_dir", "=", "utils", ".", "safe_makedir"...
Run funnel TES server with rabix bunny for CWL.
[ "Run", "funnel", "TES", "server", "with", "rabix", "bunny", "for", "CWL", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L322-L357
236,954
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
_parse_qualimap_globals_inregion
def _parse_qualimap_globals_inregion(table): """Retrieve metrics from the global targeted region table. """ out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] if col == "Mapped reads": out.update(_parse_num_pct("%s (in regions)" % col, val)) return out
python
def _parse_qualimap_globals_inregion(table): out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] if col == "Mapped reads": out.update(_parse_num_pct("%s (in regions)" % col, val)) return out
[ "def", "_parse_qualimap_globals_inregion", "(", "table", ")", ":", "out", "=", "{", "}", "for", "row", "in", "table", ".", "find_all", "(", "\"tr\"", ")", ":", "col", ",", "val", "=", "[", "x", ".", "text", "for", "x", "in", "row", ".", "find_all", ...
Retrieve metrics from the global targeted region table.
[ "Retrieve", "metrics", "from", "the", "global", "targeted", "region", "table", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L131-L139
236,955
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
_parse_qualimap_coverage
def _parse_qualimap_coverage(table): """Parse summary qualimap coverage metrics. """ out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] if col == "Mean": out["Coverage (Mean)"] = val return out
python
def _parse_qualimap_coverage(table): out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] if col == "Mean": out["Coverage (Mean)"] = val return out
[ "def", "_parse_qualimap_coverage", "(", "table", ")", ":", "out", "=", "{", "}", "for", "row", "in", "table", ".", "find_all", "(", "\"tr\"", ")", ":", "col", ",", "val", "=", "[", "x", ".", "text", "for", "x", "in", "row", ".", "find_all", "(", ...
Parse summary qualimap coverage metrics.
[ "Parse", "summary", "qualimap", "coverage", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L141-L149
236,956
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
_bed_to_bed6
def _bed_to_bed6(orig_file, out_dir): """Convert bed to required bed6 inputs. """ bed6_file = os.path.join(out_dir, "%s-bed6%s" % os.path.splitext(os.path.basename(orig_file))) if not utils.file_exists(bed6_file): with open(bed6_file, "w") as out_handle: for i, region in enumerate(list(x) for x in pybedtools.BedTool(orig_file)): region = [x for x in list(region) if x] fillers = [str(i), "1.0", "+"] full = region + fillers[:6 - len(region)] out_handle.write("\t".join(full) + "\n") return bed6_file
python
def _bed_to_bed6(orig_file, out_dir): bed6_file = os.path.join(out_dir, "%s-bed6%s" % os.path.splitext(os.path.basename(orig_file))) if not utils.file_exists(bed6_file): with open(bed6_file, "w") as out_handle: for i, region in enumerate(list(x) for x in pybedtools.BedTool(orig_file)): region = [x for x in list(region) if x] fillers = [str(i), "1.0", "+"] full = region + fillers[:6 - len(region)] out_handle.write("\t".join(full) + "\n") return bed6_file
[ "def", "_bed_to_bed6", "(", "orig_file", ",", "out_dir", ")", ":", "bed6_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"%s-bed6%s\"", "%", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "orig_file...
Convert bed to required bed6 inputs.
[ "Convert", "bed", "to", "required", "bed6", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L161-L172
236,957
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
_detect_duplicates
def _detect_duplicates(bam_file, out_dir, data): """ count duplicate percentage """ out_file = os.path.join(out_dir, "dup_metrics.txt") if not utils.file_exists(out_file): dup_align_bam = postalign.dedup_bam(bam_file, data) logger.info("Detecting duplicates in %s." % dup_align_bam) dup_count = readstats.number_of_mapped_reads(data, dup_align_bam, keep_dups=False) tot_count = readstats.number_of_mapped_reads(data, dup_align_bam, keep_dups=True) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: out_handle.write("%s\n%s\n" % (dup_count, tot_count)) with open(out_file) as in_handle: dupes = float(next(in_handle).strip()) total = float(next(in_handle).strip()) if total == 0: rate = "NA" else: rate = dupes / total return {"Duplication Rate of Mapped": rate}
python
def _detect_duplicates(bam_file, out_dir, data): out_file = os.path.join(out_dir, "dup_metrics.txt") if not utils.file_exists(out_file): dup_align_bam = postalign.dedup_bam(bam_file, data) logger.info("Detecting duplicates in %s." % dup_align_bam) dup_count = readstats.number_of_mapped_reads(data, dup_align_bam, keep_dups=False) tot_count = readstats.number_of_mapped_reads(data, dup_align_bam, keep_dups=True) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: out_handle.write("%s\n%s\n" % (dup_count, tot_count)) with open(out_file) as in_handle: dupes = float(next(in_handle).strip()) total = float(next(in_handle).strip()) if total == 0: rate = "NA" else: rate = dupes / total return {"Duplication Rate of Mapped": rate}
[ "def", "_detect_duplicates", "(", "bam_file", ",", "out_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"dup_metrics.txt\"", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "d...
count duplicate percentage
[ "count", "duplicate", "percentage" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L219-L239
236,958
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
run_rnaseq
def run_rnaseq(bam_file, data, out_dir): """ Run qualimap for a rnaseq bam file and parse results """ strandedness = {"firststrand": "strand-specific-reverse", "secondstrand": "strand-specific-forward", "unstranded": "non-strand-specific"} # Qualimap results should be saved to a directory named after sample. # MultiQC (for parsing additional data) picks the sample name after the dir as follows: # <sample name>/raw_data_qualimapReport/insert_size_histogram.txt results_dir = os.path.join(out_dir, dd.get_sample_name(data)) results_file = os.path.join(results_dir, "rnaseq_qc_results.txt") report_file = os.path.join(results_dir, "qualimapReport.html") config = data["config"] gtf_file = dd.get_gtf_file(data) library = strandedness[dd.get_strandedness(data)] if not utils.file_exists(results_file): with file_transaction(data, results_dir) as tx_results_dir: utils.safe_makedir(tx_results_dir) bam.index(bam_file, config) cmd = _rnaseq_qualimap_cmd(data, bam_file, tx_results_dir, gtf_file, library) do.run(cmd, "Qualimap for {}".format(dd.get_sample_name(data))) tx_results_file = os.path.join(tx_results_dir, "rnaseq_qc_results.txt") cmd = "sed -i 's/bam file = .*/bam file = %s.bam/' %s" % (dd.get_sample_name(data), tx_results_file) do.run(cmd, "Fix Name Qualimap for {}".format(dd.get_sample_name(data))) metrics = _parse_rnaseq_qualimap_metrics(report_file) metrics.update(_detect_duplicates(bam_file, results_dir, data)) metrics.update(_detect_rRNA(data, results_dir)) metrics.update({"Average_insert_size": salmon.estimate_fragment_size(data)}) metrics = _parse_metrics(metrics) # Qualimap output folder (results_dir) needs to be named after the sample (see comments above). However, in order # to keep its name after upload, we need to put the base QC file (results_file) into the root directory (out_dir): base_results_file = os.path.join(out_dir, os.path.basename(results_file)) shutil.copyfile(results_file, base_results_file) return {"base": base_results_file, "secondary": _find_qualimap_secondary_files(results_dir, base_results_file), "metrics": metrics}
python
def run_rnaseq(bam_file, data, out_dir): strandedness = {"firststrand": "strand-specific-reverse", "secondstrand": "strand-specific-forward", "unstranded": "non-strand-specific"} # Qualimap results should be saved to a directory named after sample. # MultiQC (for parsing additional data) picks the sample name after the dir as follows: # <sample name>/raw_data_qualimapReport/insert_size_histogram.txt results_dir = os.path.join(out_dir, dd.get_sample_name(data)) results_file = os.path.join(results_dir, "rnaseq_qc_results.txt") report_file = os.path.join(results_dir, "qualimapReport.html") config = data["config"] gtf_file = dd.get_gtf_file(data) library = strandedness[dd.get_strandedness(data)] if not utils.file_exists(results_file): with file_transaction(data, results_dir) as tx_results_dir: utils.safe_makedir(tx_results_dir) bam.index(bam_file, config) cmd = _rnaseq_qualimap_cmd(data, bam_file, tx_results_dir, gtf_file, library) do.run(cmd, "Qualimap for {}".format(dd.get_sample_name(data))) tx_results_file = os.path.join(tx_results_dir, "rnaseq_qc_results.txt") cmd = "sed -i 's/bam file = .*/bam file = %s.bam/' %s" % (dd.get_sample_name(data), tx_results_file) do.run(cmd, "Fix Name Qualimap for {}".format(dd.get_sample_name(data))) metrics = _parse_rnaseq_qualimap_metrics(report_file) metrics.update(_detect_duplicates(bam_file, results_dir, data)) metrics.update(_detect_rRNA(data, results_dir)) metrics.update({"Average_insert_size": salmon.estimate_fragment_size(data)}) metrics = _parse_metrics(metrics) # Qualimap output folder (results_dir) needs to be named after the sample (see comments above). However, in order # to keep its name after upload, we need to put the base QC file (results_file) into the root directory (out_dir): base_results_file = os.path.join(out_dir, os.path.basename(results_file)) shutil.copyfile(results_file, base_results_file) return {"base": base_results_file, "secondary": _find_qualimap_secondary_files(results_dir, base_results_file), "metrics": metrics}
[ "def", "run_rnaseq", "(", "bam_file", ",", "data", ",", "out_dir", ")", ":", "strandedness", "=", "{", "\"firststrand\"", ":", "\"strand-specific-reverse\"", ",", "\"secondstrand\"", ":", "\"strand-specific-forward\"", ",", "\"unstranded\"", ":", "\"non-strand-specific\...
Run qualimap for a rnaseq bam file and parse results
[ "Run", "qualimap", "for", "a", "rnaseq", "bam", "file", "and", "parse", "results" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L317-L354
236,959
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
_rnaseq_qualimap_cmd
def _rnaseq_qualimap_cmd(data, bam_file, out_dir, gtf_file=None, library="non-strand-specific"): """ Create command lines for qualimap """ config = data["config"] qualimap = config_utils.get_program("qualimap", config) resources = config_utils.get_resources("qualimap", config) num_cores = resources.get("cores", dd.get_num_cores(data)) max_mem = config_utils.adjust_memory(resources.get("memory", "2G"), num_cores) export = "%s%s" % (utils.java_freetype_fix(), utils.local_path_export()) export = "%s%s export JAVA_OPTS='-Xms32m -Xmx%s -Djava.io.tmpdir=%s' && " % ( utils.java_freetype_fix(), utils.local_path_export(), max_mem, out_dir) paired = " --paired" if bam.is_paired(bam_file) else "" cmd = ("unset DISPLAY && {export} {qualimap} rnaseq -outdir {out_dir} " "-a proportional -bam {bam_file} -p {library}{paired} " "-gtf {gtf_file}").format(**locals()) return cmd
python
def _rnaseq_qualimap_cmd(data, bam_file, out_dir, gtf_file=None, library="non-strand-specific"): config = data["config"] qualimap = config_utils.get_program("qualimap", config) resources = config_utils.get_resources("qualimap", config) num_cores = resources.get("cores", dd.get_num_cores(data)) max_mem = config_utils.adjust_memory(resources.get("memory", "2G"), num_cores) export = "%s%s" % (utils.java_freetype_fix(), utils.local_path_export()) export = "%s%s export JAVA_OPTS='-Xms32m -Xmx%s -Djava.io.tmpdir=%s' && " % ( utils.java_freetype_fix(), utils.local_path_export(), max_mem, out_dir) paired = " --paired" if bam.is_paired(bam_file) else "" cmd = ("unset DISPLAY && {export} {qualimap} rnaseq -outdir {out_dir} " "-a proportional -bam {bam_file} -p {library}{paired} " "-gtf {gtf_file}").format(**locals()) return cmd
[ "def", "_rnaseq_qualimap_cmd", "(", "data", ",", "bam_file", ",", "out_dir", ",", "gtf_file", "=", "None", ",", "library", "=", "\"non-strand-specific\"", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "qualimap", "=", "config_utils", ".", "get_progr...
Create command lines for qualimap
[ "Create", "command", "lines", "for", "qualimap" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L356-L373
236,960
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
_find_qualimap_secondary_files
def _find_qualimap_secondary_files(results_dir, base_file): """Retrieve additional files, avoiding double uploading the base file. """ def not_dup(x): is_dup = (os.path.basename(x) == os.path.basename(base_file) and os.path.getsize(x) == os.path.getsize(base_file)) return not is_dup def is_problem_file(x): """Problematic files with characters that make some CWL runners unhappy. """ return x.find("(") >= 0 or x.find(")") >= 0 or x.find(" ") >= 0 return list(filter(lambda x: not is_problem_file(x), filter(not_dup, glob.glob(os.path.join(results_dir, 'qualimapReport.html')) + glob.glob(os.path.join(results_dir, '*.txt')) + glob.glob(os.path.join(results_dir, "css", "*")) + glob.glob(os.path.join(results_dir, "raw_data_qualimapReport", "*")) + glob.glob(os.path.join(results_dir, "images_qualimapReport", "*")))))
python
def _find_qualimap_secondary_files(results_dir, base_file): def not_dup(x): is_dup = (os.path.basename(x) == os.path.basename(base_file) and os.path.getsize(x) == os.path.getsize(base_file)) return not is_dup def is_problem_file(x): """Problematic files with characters that make some CWL runners unhappy. """ return x.find("(") >= 0 or x.find(")") >= 0 or x.find(" ") >= 0 return list(filter(lambda x: not is_problem_file(x), filter(not_dup, glob.glob(os.path.join(results_dir, 'qualimapReport.html')) + glob.glob(os.path.join(results_dir, '*.txt')) + glob.glob(os.path.join(results_dir, "css", "*")) + glob.glob(os.path.join(results_dir, "raw_data_qualimapReport", "*")) + glob.glob(os.path.join(results_dir, "images_qualimapReport", "*")))))
[ "def", "_find_qualimap_secondary_files", "(", "results_dir", ",", "base_file", ")", ":", "def", "not_dup", "(", "x", ")", ":", "is_dup", "=", "(", "os", ".", "path", ".", "basename", "(", "x", ")", "==", "os", ".", "path", ".", "basename", "(", "base_f...
Retrieve additional files, avoiding double uploading the base file.
[ "Retrieve", "additional", "files", "avoiding", "double", "uploading", "the", "base", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L375-L392
236,961
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
classifyplot_from_plotfiles
def classifyplot_from_plotfiles(plot_files, out_csv, outtype="png", title=None, size=None): """Create a plot from individual summary csv files with classification metrics. """ dfs = [pd.read_csv(x) for x in plot_files] samples = [] for df in dfs: for sample in df["sample"].unique(): if sample not in samples: samples.append(sample) df = pd.concat(dfs) df.to_csv(out_csv, index=False) return classifyplot_from_valfile(out_csv, outtype, title, size, samples)
python
def classifyplot_from_plotfiles(plot_files, out_csv, outtype="png", title=None, size=None): dfs = [pd.read_csv(x) for x in plot_files] samples = [] for df in dfs: for sample in df["sample"].unique(): if sample not in samples: samples.append(sample) df = pd.concat(dfs) df.to_csv(out_csv, index=False) return classifyplot_from_valfile(out_csv, outtype, title, size, samples)
[ "def", "classifyplot_from_plotfiles", "(", "plot_files", ",", "out_csv", ",", "outtype", "=", "\"png\"", ",", "title", "=", "None", ",", "size", "=", "None", ")", ":", "dfs", "=", "[", "pd", ".", "read_csv", "(", "x", ")", "for", "x", "in", "plot_files...
Create a plot from individual summary csv files with classification metrics.
[ "Create", "a", "plot", "from", "individual", "summary", "csv", "files", "with", "classification", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L22-L33
236,962
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
classifyplot_from_valfile
def classifyplot_from_valfile(val_file, outtype="png", title=None, size=None, samples=None, callers=None): """Create a plot from a summarized validation file. Does new-style plotting of summarized metrics of false negative rate and false discovery rate. https://en.wikipedia.org/wiki/Sensitivity_and_specificity """ mpl.use('Agg', force=True) df = pd.read_csv(val_file) grouped = df.groupby(["sample", "caller", "vtype"]) df = grouped.apply(_calculate_fnr_fdr) df = df.reset_index() if len(df) == 0: return [] else: out_file = "%s.%s" % (os.path.splitext(val_file)[0], outtype) _do_classifyplot(df, out_file, title, size, samples, callers) return [out_file]
python
def classifyplot_from_valfile(val_file, outtype="png", title=None, size=None, samples=None, callers=None): mpl.use('Agg', force=True) df = pd.read_csv(val_file) grouped = df.groupby(["sample", "caller", "vtype"]) df = grouped.apply(_calculate_fnr_fdr) df = df.reset_index() if len(df) == 0: return [] else: out_file = "%s.%s" % (os.path.splitext(val_file)[0], outtype) _do_classifyplot(df, out_file, title, size, samples, callers) return [out_file]
[ "def", "classifyplot_from_valfile", "(", "val_file", ",", "outtype", "=", "\"png\"", ",", "title", "=", "None", ",", "size", "=", "None", ",", "samples", "=", "None", ",", "callers", "=", "None", ")", ":", "mpl", ".", "use", "(", "'Agg'", ",", "force",...
Create a plot from a summarized validation file. Does new-style plotting of summarized metrics of false negative rate and false discovery rate. https://en.wikipedia.org/wiki/Sensitivity_and_specificity
[ "Create", "a", "plot", "from", "a", "summarized", "validation", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L35-L53
236,963
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
create
def create(plot_data, header, ploti, sample_config, out_file_base, outtype="png", title=None, size=None): """Create plots of validation results for a sample, labeling prep strategies. """ if mpl is None or plt is None or sns is None: not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None]) logger.info("No validation plot. Missing imports: %s" % not_found) return None mpl.use('Agg', force=True) if header: df = pd.DataFrame(plot_data, columns=header) else: df = plot_data df["aligner"] = [get_aligner(x, sample_config) for x in df["sample"]] df["bamprep"] = [get_bamprep(x, sample_config) for x in df["sample"]] floors = get_group_floors(df, cat_labels) df["value.floor"] = [get_floor_value(x, cat, vartype, floors) for (x, cat, vartype) in zip(df["value"], df["category"], df["variant.type"])] out = [] for i, prep in enumerate(df["bamprep"].unique()): out.append(plot_prep_methods(df, prep, i + ploti, out_file_base, outtype, title, size)) return out
python
def create(plot_data, header, ploti, sample_config, out_file_base, outtype="png", title=None, size=None): if mpl is None or plt is None or sns is None: not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None]) logger.info("No validation plot. Missing imports: %s" % not_found) return None mpl.use('Agg', force=True) if header: df = pd.DataFrame(plot_data, columns=header) else: df = plot_data df["aligner"] = [get_aligner(x, sample_config) for x in df["sample"]] df["bamprep"] = [get_bamprep(x, sample_config) for x in df["sample"]] floors = get_group_floors(df, cat_labels) df["value.floor"] = [get_floor_value(x, cat, vartype, floors) for (x, cat, vartype) in zip(df["value"], df["category"], df["variant.type"])] out = [] for i, prep in enumerate(df["bamprep"].unique()): out.append(plot_prep_methods(df, prep, i + ploti, out_file_base, outtype, title, size)) return out
[ "def", "create", "(", "plot_data", ",", "header", ",", "ploti", ",", "sample_config", ",", "out_file_base", ",", "outtype", "=", "\"png\"", ",", "title", "=", "None", ",", "size", "=", "None", ")", ":", "if", "mpl", "is", "None", "or", "plt", "is", "...
Create plots of validation results for a sample, labeling prep strategies.
[ "Create", "plots", "of", "validation", "results", "for", "a", "sample", "labeling", "prep", "strategies", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L145-L167
236,964
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
plot_prep_methods
def plot_prep_methods(df, prep, prepi, out_file_base, outtype, title=None, size=None): """Plot comparison between BAM preparation methods. """ samples = df[(df["bamprep"] == prep)]["sample"].unique() assert len(samples) >= 1, samples out_file = "%s-%s.%s" % (out_file_base, samples[0], outtype) df = df[df["category"].isin(cat_labels)] _seaborn(df, prep, prepi, out_file, title, size) return out_file
python
def plot_prep_methods(df, prep, prepi, out_file_base, outtype, title=None, size=None): samples = df[(df["bamprep"] == prep)]["sample"].unique() assert len(samples) >= 1, samples out_file = "%s-%s.%s" % (out_file_base, samples[0], outtype) df = df[df["category"].isin(cat_labels)] _seaborn(df, prep, prepi, out_file, title, size) return out_file
[ "def", "plot_prep_methods", "(", "df", ",", "prep", ",", "prepi", ",", "out_file_base", ",", "outtype", ",", "title", "=", "None", ",", "size", "=", "None", ")", ":", "samples", "=", "df", "[", "(", "df", "[", "\"bamprep\"", "]", "==", "prep", ")", ...
Plot comparison between BAM preparation methods.
[ "Plot", "comparison", "between", "BAM", "preparation", "methods", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L178-L187
236,965
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
_seaborn
def _seaborn(df, prep, prepi, out_file, title=None, size=None): """Plot using seaborn wrapper around matplotlib. """ plt.ioff() sns.set(style='dark') vtypes = df["variant.type"].unique() callers = sorted(df["caller"].unique()) cats = _check_cats(["concordant", "discordant-missing-total", "discordant-extra-total", "discordant-shared-total"], vtypes, df, prep, callers) fig, axs = plt.subplots(len(vtypes), len(cats)) width = 0.8 for i, vtype in enumerate(vtypes): ax_row = axs[i] if len(vtypes) > 1 else axs for j, cat in enumerate(cats): vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers) if len(cats) == 1: assert j == 0 ax = ax_row else: ax = ax_row[j] if i == 0: ax.set_title(cat_labels[cat], size=14) ax.get_yaxis().set_ticks([]) if j == 0: ax.set_ylabel(vtype_labels[vtype], size=14) ax.bar(np.arange(len(callers)), vals, width=width) ax.set_ylim(0, maxval) if i == len(vtypes) - 1: ax.set_xticks(np.arange(len(callers)) + width / 2.0) ax.set_xticklabels([caller_labels.get(x, x).replace("__", "\n") if x else "" for x in callers], size=8, rotation=45) else: ax.get_xaxis().set_ticks([]) _annotate(ax, labels, vals, np.arange(len(callers)), width) fig.text(.5, .95, prep_labels.get(prep, "") if title is None else title, horizontalalignment='center', size=16) fig.subplots_adjust(left=0.05, right=0.95, top=0.87, bottom=0.15, wspace=0.1, hspace=0.1) x, y = (10, 5) if size is None else size fig.set_size_inches(x, y) fig.savefig(out_file)
python
def _seaborn(df, prep, prepi, out_file, title=None, size=None): plt.ioff() sns.set(style='dark') vtypes = df["variant.type"].unique() callers = sorted(df["caller"].unique()) cats = _check_cats(["concordant", "discordant-missing-total", "discordant-extra-total", "discordant-shared-total"], vtypes, df, prep, callers) fig, axs = plt.subplots(len(vtypes), len(cats)) width = 0.8 for i, vtype in enumerate(vtypes): ax_row = axs[i] if len(vtypes) > 1 else axs for j, cat in enumerate(cats): vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers) if len(cats) == 1: assert j == 0 ax = ax_row else: ax = ax_row[j] if i == 0: ax.set_title(cat_labels[cat], size=14) ax.get_yaxis().set_ticks([]) if j == 0: ax.set_ylabel(vtype_labels[vtype], size=14) ax.bar(np.arange(len(callers)), vals, width=width) ax.set_ylim(0, maxval) if i == len(vtypes) - 1: ax.set_xticks(np.arange(len(callers)) + width / 2.0) ax.set_xticklabels([caller_labels.get(x, x).replace("__", "\n") if x else "" for x in callers], size=8, rotation=45) else: ax.get_xaxis().set_ticks([]) _annotate(ax, labels, vals, np.arange(len(callers)), width) fig.text(.5, .95, prep_labels.get(prep, "") if title is None else title, horizontalalignment='center', size=16) fig.subplots_adjust(left=0.05, right=0.95, top=0.87, bottom=0.15, wspace=0.1, hspace=0.1) x, y = (10, 5) if size is None else size fig.set_size_inches(x, y) fig.savefig(out_file)
[ "def", "_seaborn", "(", "df", ",", "prep", ",", "prepi", ",", "out_file", ",", "title", "=", "None", ",", "size", "=", "None", ")", ":", "plt", ".", "ioff", "(", ")", "sns", ".", "set", "(", "style", "=", "'dark'", ")", "vtypes", "=", "df", "["...
Plot using seaborn wrapper around matplotlib.
[ "Plot", "using", "seaborn", "wrapper", "around", "matplotlib", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L189-L228
236,966
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
_check_cats
def _check_cats(cats, vtypes, df, prep, callers): """Only include categories in the final output if they have values. """ out = [] for cat in cats: all_vals = [] for vtype in vtypes: vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers) all_vals.extend(vals) if sum(all_vals) / float(len(all_vals)) > 2: out.append(cat) if len(out) == 0: return cats else: return out
python
def _check_cats(cats, vtypes, df, prep, callers): out = [] for cat in cats: all_vals = [] for vtype in vtypes: vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers) all_vals.extend(vals) if sum(all_vals) / float(len(all_vals)) > 2: out.append(cat) if len(out) == 0: return cats else: return out
[ "def", "_check_cats", "(", "cats", ",", "vtypes", ",", "df", ",", "prep", ",", "callers", ")", ":", "out", "=", "[", "]", "for", "cat", "in", "cats", ":", "all_vals", "=", "[", "]", "for", "vtype", "in", "vtypes", ":", "vals", ",", "labels", ",",...
Only include categories in the final output if they have values.
[ "Only", "include", "categories", "in", "the", "final", "output", "if", "they", "have", "values", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L230-L244
236,967
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
_get_chart_info
def _get_chart_info(df, vtype, cat, prep, callers): """Retrieve values for a specific variant type, category and prep method. """ maxval_raw = max(list(df["value.floor"])) curdf = df[(df["variant.type"] == vtype) & (df["category"] == cat) & (df["bamprep"] == prep)] vals = [] labels = [] for c in callers: row = curdf[df["caller"] == c] if len(row) > 0: vals.append(list(row["value.floor"])[0]) labels.append(list(row["value"])[0]) else: vals.append(1) labels.append("") return vals, labels, maxval_raw
python
def _get_chart_info(df, vtype, cat, prep, callers): maxval_raw = max(list(df["value.floor"])) curdf = df[(df["variant.type"] == vtype) & (df["category"] == cat) & (df["bamprep"] == prep)] vals = [] labels = [] for c in callers: row = curdf[df["caller"] == c] if len(row) > 0: vals.append(list(row["value.floor"])[0]) labels.append(list(row["value"])[0]) else: vals.append(1) labels.append("") return vals, labels, maxval_raw
[ "def", "_get_chart_info", "(", "df", ",", "vtype", ",", "cat", ",", "prep", ",", "callers", ")", ":", "maxval_raw", "=", "max", "(", "list", "(", "df", "[", "\"value.floor\"", "]", ")", ")", "curdf", "=", "df", "[", "(", "df", "[", "\"variant.type\""...
Retrieve values for a specific variant type, category and prep method.
[ "Retrieve", "values", "for", "a", "specific", "variant", "type", "category", "and", "prep", "method", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L246-L262
236,968
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
_annotate
def _annotate(ax, annotate, height, left, width): """Annotate axis with labels. """ annotate_yrange_factor = 0.010 xticks = np.array(left) + width / 2.0 ymin, ymax = ax.get_ylim() yrange = ymax - ymin # Reset ymax and ymin so there's enough room to see the annotation of # the top-most if ymax > 0: ymax += yrange * 0.15 if ymin < 0: ymin -= yrange * 0.15 ax.set_ylim(ymin, ymax) yrange = ymax - ymin offset_ = yrange * annotate_yrange_factor if isinstance(annotate, collections.Iterable): annotations = map(str, annotate) else: annotations = ['%.3f' % h if type(h) is np.float_ else str(h) for h in height] for x, h, annotation in zip(xticks, height, annotations): # Adjust the offset to account for negative bars offset = offset_ if h >= 0 else -1 * offset_ verticalalignment = 'bottom' if h >= 0 else 'top' if len(str(annotation)) > 6: size = 7 elif len(str(annotation)) > 5: size = 8 else: size = 10 # Finally, add the text to the axes ax.annotate(annotation, (x, h + offset), verticalalignment=verticalalignment, horizontalalignment='center', size=size)
python
def _annotate(ax, annotate, height, left, width): annotate_yrange_factor = 0.010 xticks = np.array(left) + width / 2.0 ymin, ymax = ax.get_ylim() yrange = ymax - ymin # Reset ymax and ymin so there's enough room to see the annotation of # the top-most if ymax > 0: ymax += yrange * 0.15 if ymin < 0: ymin -= yrange * 0.15 ax.set_ylim(ymin, ymax) yrange = ymax - ymin offset_ = yrange * annotate_yrange_factor if isinstance(annotate, collections.Iterable): annotations = map(str, annotate) else: annotations = ['%.3f' % h if type(h) is np.float_ else str(h) for h in height] for x, h, annotation in zip(xticks, height, annotations): # Adjust the offset to account for negative bars offset = offset_ if h >= 0 else -1 * offset_ verticalalignment = 'bottom' if h >= 0 else 'top' if len(str(annotation)) > 6: size = 7 elif len(str(annotation)) > 5: size = 8 else: size = 10 # Finally, add the text to the axes ax.annotate(annotation, (x, h + offset), verticalalignment=verticalalignment, horizontalalignment='center', size=size)
[ "def", "_annotate", "(", "ax", ",", "annotate", ",", "height", ",", "left", ",", "width", ")", ":", "annotate_yrange_factor", "=", "0.010", "xticks", "=", "np", ".", "array", "(", "left", ")", "+", "width", "/", "2.0", "ymin", ",", "ymax", "=", "ax",...
Annotate axis with labels.
[ "Annotate", "axis", "with", "labels", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L264-L302
236,969
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
_ggplot
def _ggplot(df, out_file): """Plot faceted items with ggplot wrapper on top of matplotlib. XXX Not yet functional """ import ggplot as gg df["variant.type"] = [vtype_labels[x] for x in df["variant.type"]] df["category"] = [cat_labels[x] for x in df["category"]] df["caller"] = [caller_labels.get(x, None) for x in df["caller"]] p = (gg.ggplot(df, gg.aes(x="caller", y="value.floor")) + gg.geom_bar() + gg.facet_wrap("variant.type", "category") + gg.theme_seaborn()) gg.ggsave(p, out_file)
python
def _ggplot(df, out_file): import ggplot as gg df["variant.type"] = [vtype_labels[x] for x in df["variant.type"]] df["category"] = [cat_labels[x] for x in df["category"]] df["caller"] = [caller_labels.get(x, None) for x in df["caller"]] p = (gg.ggplot(df, gg.aes(x="caller", y="value.floor")) + gg.geom_bar() + gg.facet_wrap("variant.type", "category") + gg.theme_seaborn()) gg.ggsave(p, out_file)
[ "def", "_ggplot", "(", "df", ",", "out_file", ")", ":", "import", "ggplot", "as", "gg", "df", "[", "\"variant.type\"", "]", "=", "[", "vtype_labels", "[", "x", "]", "for", "x", "in", "df", "[", "\"variant.type\"", "]", "]", "df", "[", "\"category\"", ...
Plot faceted items with ggplot wrapper on top of matplotlib. XXX Not yet functional
[ "Plot", "faceted", "items", "with", "ggplot", "wrapper", "on", "top", "of", "matplotlib", ".", "XXX", "Not", "yet", "functional" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L304-L315
236,970
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
get_floor_value
def get_floor_value(x, cat, vartype, floors): """Modify values so all have the same relative scale for differences. Using the chosen base heights, adjusts an individual sub-plot to be consistent relative to that height. """ all_base = floors[vartype] cur_max = floors[(cat, vartype)] if cur_max > all_base: diff = cur_max - all_base x = max(1, x - diff) return x
python
def get_floor_value(x, cat, vartype, floors): all_base = floors[vartype] cur_max = floors[(cat, vartype)] if cur_max > all_base: diff = cur_max - all_base x = max(1, x - diff) return x
[ "def", "get_floor_value", "(", "x", ",", "cat", ",", "vartype", ",", "floors", ")", ":", "all_base", "=", "floors", "[", "vartype", "]", "cur_max", "=", "floors", "[", "(", "cat", ",", "vartype", ")", "]", "if", "cur_max", ">", "all_base", ":", "diff...
Modify values so all have the same relative scale for differences. Using the chosen base heights, adjusts an individual sub-plot to be consistent relative to that height.
[ "Modify", "values", "so", "all", "have", "the", "same", "relative", "scale", "for", "differences", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L317-L328
236,971
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
get_group_floors
def get_group_floors(df, cat_labels): """Retrieve the floor for a given row of comparisons, creating a normalized set of differences. We need to set non-zero floors so large numbers (like concordance) don't drown out small numbers (like discordance). This defines the height for a row of comparisons as either the minimum height of any sub-plot, or the maximum difference between higher and lower (plus 10%). """ group_maxes = collections.defaultdict(list) group_diffs = collections.defaultdict(list) diff_pad = 0.1 # 10% padding onto difference to avoid large numbers looking like zero for name, group in df.groupby(["category", "variant.type"]): label, stype = name if label in cat_labels: diff = max(group["value"]) - min(group["value"]) group_diffs[stype].append(diff + int(diff_pad * diff)) group_maxes[stype].append(max(group["value"])) group_maxes[name].append(max(group["value"])) out = {} for k, vs in group_maxes.items(): if k in group_diffs: out[k] = max(max(group_diffs[stype]), min(vs)) else: out[k] = min(vs) return out
python
def get_group_floors(df, cat_labels): group_maxes = collections.defaultdict(list) group_diffs = collections.defaultdict(list) diff_pad = 0.1 # 10% padding onto difference to avoid large numbers looking like zero for name, group in df.groupby(["category", "variant.type"]): label, stype = name if label in cat_labels: diff = max(group["value"]) - min(group["value"]) group_diffs[stype].append(diff + int(diff_pad * diff)) group_maxes[stype].append(max(group["value"])) group_maxes[name].append(max(group["value"])) out = {} for k, vs in group_maxes.items(): if k in group_diffs: out[k] = max(max(group_diffs[stype]), min(vs)) else: out[k] = min(vs) return out
[ "def", "get_group_floors", "(", "df", ",", "cat_labels", ")", ":", "group_maxes", "=", "collections", ".", "defaultdict", "(", "list", ")", "group_diffs", "=", "collections", ".", "defaultdict", "(", "list", ")", "diff_pad", "=", "0.1", "# 10% padding onto diffe...
Retrieve the floor for a given row of comparisons, creating a normalized set of differences. We need to set non-zero floors so large numbers (like concordance) don't drown out small numbers (like discordance). This defines the height for a row of comparisons as either the minimum height of any sub-plot, or the maximum difference between higher and lower (plus 10%).
[ "Retrieve", "the", "floor", "for", "a", "given", "row", "of", "comparisons", "creating", "a", "normalized", "set", "of", "differences", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L330-L354
236,972
bcbio/bcbio-nextgen
scripts/bcbio_nextgen.py
_sanity_check_args
def _sanity_check_args(args): """Ensure dependent arguments are correctly specified """ if "scheduler" in args and "queue" in args: if args.scheduler and not args.queue: if args.scheduler != "sge": return "IPython parallel scheduler (-s) specified. This also requires a queue (-q)." elif args.queue and not args.scheduler: return "IPython parallel queue (-q) supplied. This also requires a scheduler (-s)." elif args.paralleltype == "ipython" and (not args.queue or not args.scheduler): return "IPython parallel requires queue (-q) and scheduler (-s) arguments."
python
def _sanity_check_args(args): if "scheduler" in args and "queue" in args: if args.scheduler and not args.queue: if args.scheduler != "sge": return "IPython parallel scheduler (-s) specified. This also requires a queue (-q)." elif args.queue and not args.scheduler: return "IPython parallel queue (-q) supplied. This also requires a scheduler (-s)." elif args.paralleltype == "ipython" and (not args.queue or not args.scheduler): return "IPython parallel requires queue (-q) and scheduler (-s) arguments."
[ "def", "_sanity_check_args", "(", "args", ")", ":", "if", "\"scheduler\"", "in", "args", "and", "\"queue\"", "in", "args", ":", "if", "args", ".", "scheduler", "and", "not", "args", ".", "queue", ":", "if", "args", ".", "scheduler", "!=", "\"sge\"", ":",...
Ensure dependent arguments are correctly specified
[ "Ensure", "dependent", "arguments", "are", "correctly", "specified" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_nextgen.py#L143-L153
236,973
bcbio/bcbio-nextgen
scripts/bcbio_nextgen.py
_add_inputs_to_kwargs
def _add_inputs_to_kwargs(args, kwargs, parser): """Convert input system config, flow cell directory and sample yaml to kwargs. Handles back compatibility with previous commandlines while allowing flexible specification of input parameters. """ inputs = [x for x in [args.global_config, args.fc_dir] + args.run_config if x is not None] global_config = "bcbio_system.yaml" # default configuration if not specified if kwargs.get("workflow", "") == "template": if args.only_metadata: inputs.append("--only-metadata") if args.force_single: inputs.append("--force-single") if args.separators: inputs.extend(["--separators", args.separators]) kwargs["inputs"] = inputs return kwargs elif len(inputs) == 1: if os.path.isfile(inputs[0]): fc_dir = None run_info_yaml = inputs[0] else: fc_dir = inputs[0] run_info_yaml = None elif len(inputs) == 2: if os.path.isfile(inputs[0]): global_config = inputs[0] if os.path.isfile(inputs[1]): fc_dir = None run_info_yaml = inputs[1] else: fc_dir = inputs[1] run_info_yaml = None else: fc_dir, run_info_yaml = inputs elif len(inputs) == 3: global_config, fc_dir, run_info_yaml = inputs elif args.version: print(version.__version__) sys.exit() else: print("Incorrect input arguments", inputs) parser.print_help() sys.exit() if fc_dir: fc_dir = os.path.abspath(fc_dir) if run_info_yaml: run_info_yaml = os.path.abspath(run_info_yaml) if kwargs.get("workflow"): kwargs["inputs"] = inputs kwargs["config_file"] = global_config kwargs["fc_dir"] = fc_dir kwargs["run_info_yaml"] = run_info_yaml return kwargs
python
def _add_inputs_to_kwargs(args, kwargs, parser): inputs = [x for x in [args.global_config, args.fc_dir] + args.run_config if x is not None] global_config = "bcbio_system.yaml" # default configuration if not specified if kwargs.get("workflow", "") == "template": if args.only_metadata: inputs.append("--only-metadata") if args.force_single: inputs.append("--force-single") if args.separators: inputs.extend(["--separators", args.separators]) kwargs["inputs"] = inputs return kwargs elif len(inputs) == 1: if os.path.isfile(inputs[0]): fc_dir = None run_info_yaml = inputs[0] else: fc_dir = inputs[0] run_info_yaml = None elif len(inputs) == 2: if os.path.isfile(inputs[0]): global_config = inputs[0] if os.path.isfile(inputs[1]): fc_dir = None run_info_yaml = inputs[1] else: fc_dir = inputs[1] run_info_yaml = None else: fc_dir, run_info_yaml = inputs elif len(inputs) == 3: global_config, fc_dir, run_info_yaml = inputs elif args.version: print(version.__version__) sys.exit() else: print("Incorrect input arguments", inputs) parser.print_help() sys.exit() if fc_dir: fc_dir = os.path.abspath(fc_dir) if run_info_yaml: run_info_yaml = os.path.abspath(run_info_yaml) if kwargs.get("workflow"): kwargs["inputs"] = inputs kwargs["config_file"] = global_config kwargs["fc_dir"] = fc_dir kwargs["run_info_yaml"] = run_info_yaml return kwargs
[ "def", "_add_inputs_to_kwargs", "(", "args", ",", "kwargs", ",", "parser", ")", ":", "inputs", "=", "[", "x", "for", "x", "in", "[", "args", ".", "global_config", ",", "args", ".", "fc_dir", "]", "+", "args", ".", "run_config", "if", "x", "is", "not"...
Convert input system config, flow cell directory and sample yaml to kwargs. Handles back compatibility with previous commandlines while allowing flexible specification of input parameters.
[ "Convert", "input", "system", "config", "flow", "cell", "directory", "and", "sample", "yaml", "to", "kwargs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_nextgen.py#L162-L216
236,974
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
_add_commas
def _add_commas(s, sep=','): """Add commas to output counts. From: http://code.activestate.com/recipes/498181 """ if len(s) <= 3: return s return _add_commas(s[:-3], sep) + sep + s[-3:]
python
def _add_commas(s, sep=','): if len(s) <= 3: return s return _add_commas(s[:-3], sep) + sep + s[-3:]
[ "def", "_add_commas", "(", "s", ",", "sep", "=", "','", ")", ":", "if", "len", "(", "s", ")", "<=", "3", ":", "return", "s", "return", "_add_commas", "(", "s", "[", ":", "-", "3", "]", ",", "sep", ")", "+", "sep", "+", "s", "[", "-", "3", ...
Add commas to output counts. From: http://code.activestate.com/recipes/498181
[ "Add", "commas", "to", "output", "counts", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L428-L436
236,975
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
bed_to_interval
def bed_to_interval(orig_bed, bam_file): """Add header and format BED bait and target files for Picard if necessary. """ with open(orig_bed) as in_handle: line = in_handle.readline() if line.startswith("@"): yield orig_bed else: with pysam.Samfile(bam_file, "rb") as bam_handle: header = bam_handle.text with tmpfile(dir=os.path.dirname(orig_bed), prefix="picardbed") as tmp_bed: with open(tmp_bed, "w") as out_handle: out_handle.write(header) with open(orig_bed) as in_handle: for i, line in enumerate(in_handle): parts = line.rstrip().split("\t") if len(parts) == 4: chrom, start, end, name = parts strand = "+" elif len(parts) >= 3: chrom, start, end = parts[:3] strand = "+" name = "r%s" % i out = [chrom, start, end, strand, name] out_handle.write("\t".join(out) + "\n") yield tmp_bed
python
def bed_to_interval(orig_bed, bam_file): with open(orig_bed) as in_handle: line = in_handle.readline() if line.startswith("@"): yield orig_bed else: with pysam.Samfile(bam_file, "rb") as bam_handle: header = bam_handle.text with tmpfile(dir=os.path.dirname(orig_bed), prefix="picardbed") as tmp_bed: with open(tmp_bed, "w") as out_handle: out_handle.write(header) with open(orig_bed) as in_handle: for i, line in enumerate(in_handle): parts = line.rstrip().split("\t") if len(parts) == 4: chrom, start, end, name = parts strand = "+" elif len(parts) >= 3: chrom, start, end = parts[:3] strand = "+" name = "r%s" % i out = [chrom, start, end, strand, name] out_handle.write("\t".join(out) + "\n") yield tmp_bed
[ "def", "bed_to_interval", "(", "orig_bed", ",", "bam_file", ")", ":", "with", "open", "(", "orig_bed", ")", "as", "in_handle", ":", "line", "=", "in_handle", ".", "readline", "(", ")", "if", "line", ".", "startswith", "(", "\"@\"", ")", ":", "yield", "...
Add header and format BED bait and target files for Picard if necessary.
[ "Add", "header", "and", "format", "BED", "bait", "and", "target", "files", "for", "Picard", "if", "necessary", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L440-L465
236,976
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetricsParser.get_summary_metrics
def get_summary_metrics(self, align_metrics, dup_metrics, insert_metrics=None, hybrid_metrics=None, vrn_vals=None, rnaseq_metrics=None): """Retrieve a high level summary of interesting metrics. """ with open(align_metrics) as in_handle: align_vals = self._parse_align_metrics(in_handle) if dup_metrics: with open(dup_metrics) as in_handle: dup_vals = self._parse_dup_metrics(in_handle) else: dup_vals = {} (insert_vals, hybrid_vals, rnaseq_vals) = (None, None, None) if insert_metrics and file_exists(insert_metrics): with open(insert_metrics) as in_handle: insert_vals = self._parse_insert_metrics(in_handle) if hybrid_metrics and file_exists(hybrid_metrics): with open(hybrid_metrics) as in_handle: hybrid_vals = self._parse_hybrid_metrics(in_handle) if rnaseq_metrics and file_exists(rnaseq_metrics): with open(rnaseq_metrics) as in_handle: rnaseq_vals = self._parse_rnaseq_metrics(in_handle) return self._tabularize_metrics(align_vals, dup_vals, insert_vals, hybrid_vals, vrn_vals, rnaseq_vals)
python
def get_summary_metrics(self, align_metrics, dup_metrics, insert_metrics=None, hybrid_metrics=None, vrn_vals=None, rnaseq_metrics=None): with open(align_metrics) as in_handle: align_vals = self._parse_align_metrics(in_handle) if dup_metrics: with open(dup_metrics) as in_handle: dup_vals = self._parse_dup_metrics(in_handle) else: dup_vals = {} (insert_vals, hybrid_vals, rnaseq_vals) = (None, None, None) if insert_metrics and file_exists(insert_metrics): with open(insert_metrics) as in_handle: insert_vals = self._parse_insert_metrics(in_handle) if hybrid_metrics and file_exists(hybrid_metrics): with open(hybrid_metrics) as in_handle: hybrid_vals = self._parse_hybrid_metrics(in_handle) if rnaseq_metrics and file_exists(rnaseq_metrics): with open(rnaseq_metrics) as in_handle: rnaseq_vals = self._parse_rnaseq_metrics(in_handle) return self._tabularize_metrics(align_vals, dup_vals, insert_vals, hybrid_vals, vrn_vals, rnaseq_vals)
[ "def", "get_summary_metrics", "(", "self", ",", "align_metrics", ",", "dup_metrics", ",", "insert_metrics", "=", "None", ",", "hybrid_metrics", "=", "None", ",", "vrn_vals", "=", "None", ",", "rnaseq_metrics", "=", "None", ")", ":", "with", "open", "(", "ali...
Retrieve a high level summary of interesting metrics.
[ "Retrieve", "a", "high", "level", "summary", "of", "interesting", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L24-L48
236,977
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetricsParser.extract_metrics
def extract_metrics(self, metrics_files): """Return summary information for a lane of metrics files. """ extension_maps = dict( align_metrics=(self._parse_align_metrics, "AL"), dup_metrics=(self._parse_dup_metrics, "DUP"), hs_metrics=(self._parse_hybrid_metrics, "HS"), insert_metrics=(self._parse_insert_metrics, "INS"), rnaseq_metrics=(self._parse_rnaseq_metrics, "RNA")) all_metrics = dict() for fname in metrics_files: ext = os.path.splitext(fname)[-1][1:] try: parse_fn, prefix = extension_maps[ext] except KeyError: parse_fn = None if parse_fn: with open(fname) as in_handle: for key, val in parse_fn(in_handle).items(): if not key.startswith(prefix): key = "%s_%s" % (prefix, key) all_metrics[key] = val return all_metrics
python
def extract_metrics(self, metrics_files): extension_maps = dict( align_metrics=(self._parse_align_metrics, "AL"), dup_metrics=(self._parse_dup_metrics, "DUP"), hs_metrics=(self._parse_hybrid_metrics, "HS"), insert_metrics=(self._parse_insert_metrics, "INS"), rnaseq_metrics=(self._parse_rnaseq_metrics, "RNA")) all_metrics = dict() for fname in metrics_files: ext = os.path.splitext(fname)[-1][1:] try: parse_fn, prefix = extension_maps[ext] except KeyError: parse_fn = None if parse_fn: with open(fname) as in_handle: for key, val in parse_fn(in_handle).items(): if not key.startswith(prefix): key = "%s_%s" % (prefix, key) all_metrics[key] = val return all_metrics
[ "def", "extract_metrics", "(", "self", ",", "metrics_files", ")", ":", "extension_maps", "=", "dict", "(", "align_metrics", "=", "(", "self", ".", "_parse_align_metrics", ",", "\"AL\"", ")", ",", "dup_metrics", "=", "(", "self", ".", "_parse_dup_metrics", ",",...
Return summary information for a lane of metrics files.
[ "Return", "summary", "information", "for", "a", "lane", "of", "metrics", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L50-L72
236,978
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetrics.report
def report(self, align_bam, ref_file, is_paired, bait_file, target_file, variant_region_file, config): """Produce report metrics using Picard with sorted aligned BAM file. """ dup_metrics = self._get_current_dup_metrics(align_bam) align_metrics = self._collect_align_metrics(align_bam, ref_file) # Prefer the GC metrics in FastQC instead of Picard # gc_graph, gc_metrics = self._gc_bias(align_bam, ref_file) gc_graph = None insert_graph, insert_metrics, hybrid_metrics = (None, None, None) if is_paired: insert_graph, insert_metrics = self._insert_sizes(align_bam) if bait_file and target_file: assert os.path.exists(bait_file), (bait_file, "does not exist!") assert os.path.exists(target_file), (target_file, "does not exist!") hybrid_metrics = self._hybrid_select_metrics(align_bam, bait_file, target_file) elif (variant_region_file and config["algorithm"].get("coverage_interval", "").lower() in ["exome"]): assert os.path.exists(variant_region_file), (variant_region_file, "does not exist") hybrid_metrics = self._hybrid_select_metrics( align_bam, variant_region_file, variant_region_file) vrn_vals = self._variant_eval_metrics(align_bam) summary_info = self._parser.get_summary_metrics(align_metrics, dup_metrics, insert_metrics, hybrid_metrics, vrn_vals) graphs = [] if gc_graph and os.path.exists(gc_graph): graphs.append((gc_graph, "Distribution of GC content across reads")) if insert_graph and os.path.exists(insert_graph): graphs.append((insert_graph, "Distribution of paired end insert sizes")) return summary_info, graphs
python
def report(self, align_bam, ref_file, is_paired, bait_file, target_file, variant_region_file, config): dup_metrics = self._get_current_dup_metrics(align_bam) align_metrics = self._collect_align_metrics(align_bam, ref_file) # Prefer the GC metrics in FastQC instead of Picard # gc_graph, gc_metrics = self._gc_bias(align_bam, ref_file) gc_graph = None insert_graph, insert_metrics, hybrid_metrics = (None, None, None) if is_paired: insert_graph, insert_metrics = self._insert_sizes(align_bam) if bait_file and target_file: assert os.path.exists(bait_file), (bait_file, "does not exist!") assert os.path.exists(target_file), (target_file, "does not exist!") hybrid_metrics = self._hybrid_select_metrics(align_bam, bait_file, target_file) elif (variant_region_file and config["algorithm"].get("coverage_interval", "").lower() in ["exome"]): assert os.path.exists(variant_region_file), (variant_region_file, "does not exist") hybrid_metrics = self._hybrid_select_metrics( align_bam, variant_region_file, variant_region_file) vrn_vals = self._variant_eval_metrics(align_bam) summary_info = self._parser.get_summary_metrics(align_metrics, dup_metrics, insert_metrics, hybrid_metrics, vrn_vals) graphs = [] if gc_graph and os.path.exists(gc_graph): graphs.append((gc_graph, "Distribution of GC content across reads")) if insert_graph and os.path.exists(insert_graph): graphs.append((insert_graph, "Distribution of paired end insert sizes")) return summary_info, graphs
[ "def", "report", "(", "self", ",", "align_bam", ",", "ref_file", ",", "is_paired", ",", "bait_file", ",", "target_file", ",", "variant_region_file", ",", "config", ")", ":", "dup_metrics", "=", "self", ".", "_get_current_dup_metrics", "(", "align_bam", ")", "a...
Produce report metrics using Picard with sorted aligned BAM file.
[ "Produce", "report", "metrics", "using", "Picard", "with", "sorted", "aligned", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L287-L319
236,979
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetrics._get_current_dup_metrics
def _get_current_dup_metrics(self, align_bam): """Retrieve duplicate information from input BAM file. """ metrics_file = "%s.dup_metrics" % os.path.splitext(align_bam)[0] if not file_exists(metrics_file): dups = 0 with pysam.Samfile(align_bam, "rb") as bam_handle: for read in bam_handle: if (read.is_paired and read.is_read1) or not read.is_paired: if read.is_duplicate: dups += 1 with open(metrics_file, "w") as out_handle: out_handle.write("# custom bcbio-nextgen metrics\n") out_handle.write("READ_PAIR_DUPLICATES\t%s\n" % dups) return metrics_file
python
def _get_current_dup_metrics(self, align_bam): metrics_file = "%s.dup_metrics" % os.path.splitext(align_bam)[0] if not file_exists(metrics_file): dups = 0 with pysam.Samfile(align_bam, "rb") as bam_handle: for read in bam_handle: if (read.is_paired and read.is_read1) or not read.is_paired: if read.is_duplicate: dups += 1 with open(metrics_file, "w") as out_handle: out_handle.write("# custom bcbio-nextgen metrics\n") out_handle.write("READ_PAIR_DUPLICATES\t%s\n" % dups) return metrics_file
[ "def", "_get_current_dup_metrics", "(", "self", ",", "align_bam", ")", ":", "metrics_file", "=", "\"%s.dup_metrics\"", "%", "os", ".", "path", ".", "splitext", "(", "align_bam", ")", "[", "0", "]", "if", "not", "file_exists", "(", "metrics_file", ")", ":", ...
Retrieve duplicate information from input BAM file.
[ "Retrieve", "duplicate", "information", "from", "input", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L321-L335
236,980
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetrics._check_metrics_file
def _check_metrics_file(self, bam_name, metrics_ext): """Check for an existing metrics file for the given BAM. """ base, _ = os.path.splitext(bam_name) try: int(base[-1]) can_glob = False except ValueError: can_glob = True check_fname = "{base}{maybe_glob}.{ext}".format( base=base, maybe_glob="*" if can_glob else "", ext=metrics_ext) glob_fnames = glob.glob(check_fname) if len(glob_fnames) > 0: return glob_fnames[0] else: return "{base}.{ext}".format(base=base, ext=metrics_ext)
python
def _check_metrics_file(self, bam_name, metrics_ext): base, _ = os.path.splitext(bam_name) try: int(base[-1]) can_glob = False except ValueError: can_glob = True check_fname = "{base}{maybe_glob}.{ext}".format( base=base, maybe_glob="*" if can_glob else "", ext=metrics_ext) glob_fnames = glob.glob(check_fname) if len(glob_fnames) > 0: return glob_fnames[0] else: return "{base}.{ext}".format(base=base, ext=metrics_ext)
[ "def", "_check_metrics_file", "(", "self", ",", "bam_name", ",", "metrics_ext", ")", ":", "base", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "bam_name", ")", "try", ":", "int", "(", "base", "[", "-", "1", "]", ")", "can_glob", "=", "Fa...
Check for an existing metrics file for the given BAM.
[ "Check", "for", "an", "existing", "metrics", "file", "for", "the", "given", "BAM", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L337-L352
236,981
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetrics._hybrid_select_metrics
def _hybrid_select_metrics(self, dup_bam, bait_file, target_file): """Generate metrics for hybrid selection efficiency. """ metrics = self._check_metrics_file(dup_bam, "hs_metrics") if not file_exists(metrics): with bed_to_interval(bait_file, dup_bam) as ready_bait: with bed_to_interval(target_file, dup_bam) as ready_target: with file_transaction(metrics) as tx_metrics: opts = [("BAIT_INTERVALS", ready_bait), ("TARGET_INTERVALS", ready_target), ("INPUT", dup_bam), ("OUTPUT", tx_metrics)] try: self._picard.run("CollectHsMetrics", opts) # HsMetrics fails regularly with memory errors # so we catch and skip instead of aborting the # full process except subprocess.CalledProcessError: return None return metrics
python
def _hybrid_select_metrics(self, dup_bam, bait_file, target_file): metrics = self._check_metrics_file(dup_bam, "hs_metrics") if not file_exists(metrics): with bed_to_interval(bait_file, dup_bam) as ready_bait: with bed_to_interval(target_file, dup_bam) as ready_target: with file_transaction(metrics) as tx_metrics: opts = [("BAIT_INTERVALS", ready_bait), ("TARGET_INTERVALS", ready_target), ("INPUT", dup_bam), ("OUTPUT", tx_metrics)] try: self._picard.run("CollectHsMetrics", opts) # HsMetrics fails regularly with memory errors # so we catch and skip instead of aborting the # full process except subprocess.CalledProcessError: return None return metrics
[ "def", "_hybrid_select_metrics", "(", "self", ",", "dup_bam", ",", "bait_file", ",", "target_file", ")", ":", "metrics", "=", "self", ".", "_check_metrics_file", "(", "dup_bam", ",", "\"hs_metrics\"", ")", "if", "not", "file_exists", "(", "metrics", ")", ":", ...
Generate metrics for hybrid selection efficiency.
[ "Generate", "metrics", "for", "hybrid", "selection", "efficiency", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L354-L373
236,982
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetrics._variant_eval_metrics
def _variant_eval_metrics(self, dup_bam): """Find metrics for evaluating variant effectiveness. """ base, ext = os.path.splitext(dup_bam) end_strip = "-dup" base = base[:-len(end_strip)] if base.endswith(end_strip) else base mfiles = glob.glob("%s*eval_metrics" % base) if len(mfiles) > 0: with open(mfiles[0]) as in_handle: # pull the metrics as JSON from the last line in the file for line in in_handle: pass metrics = json.loads(line) return metrics else: return None
python
def _variant_eval_metrics(self, dup_bam): base, ext = os.path.splitext(dup_bam) end_strip = "-dup" base = base[:-len(end_strip)] if base.endswith(end_strip) else base mfiles = glob.glob("%s*eval_metrics" % base) if len(mfiles) > 0: with open(mfiles[0]) as in_handle: # pull the metrics as JSON from the last line in the file for line in in_handle: pass metrics = json.loads(line) return metrics else: return None
[ "def", "_variant_eval_metrics", "(", "self", ",", "dup_bam", ")", ":", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "dup_bam", ")", "end_strip", "=", "\"-dup\"", "base", "=", "base", "[", ":", "-", "len", "(", "end_strip", ")", "...
Find metrics for evaluating variant effectiveness.
[ "Find", "metrics", "for", "evaluating", "variant", "effectiveness", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L375-L390
236,983
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
RNASeqPicardMetrics.report
def report(self, align_bam, ref_file, gtf_file, is_paired=False, rrna_file="null"): """Produce report metrics for a RNASeq experiment using Picard with a sorted aligned BAM file. """ # collect duplication metrics dup_metrics = self._get_current_dup_metrics(align_bam) align_metrics = self._collect_align_metrics(align_bam, ref_file) insert_graph, insert_metrics = (None, None) if is_paired: insert_graph, insert_metrics = self._insert_sizes(align_bam) rnaseq_metrics = self._rnaseq_metrics(align_bam, gtf_file, rrna_file) summary_info = self._parser.get_summary_metrics(align_metrics, dup_metrics, insert_metrics=insert_metrics, rnaseq_metrics=rnaseq_metrics) graphs = [] if insert_graph and file_exists(insert_graph): graphs.append((insert_graph, "Distribution of paired end insert sizes")) return summary_info, graphs
python
def report(self, align_bam, ref_file, gtf_file, is_paired=False, rrna_file="null"): # collect duplication metrics dup_metrics = self._get_current_dup_metrics(align_bam) align_metrics = self._collect_align_metrics(align_bam, ref_file) insert_graph, insert_metrics = (None, None) if is_paired: insert_graph, insert_metrics = self._insert_sizes(align_bam) rnaseq_metrics = self._rnaseq_metrics(align_bam, gtf_file, rrna_file) summary_info = self._parser.get_summary_metrics(align_metrics, dup_metrics, insert_metrics=insert_metrics, rnaseq_metrics=rnaseq_metrics) graphs = [] if insert_graph and file_exists(insert_graph): graphs.append((insert_graph, "Distribution of paired end insert sizes")) return summary_info, graphs
[ "def", "report", "(", "self", ",", "align_bam", ",", "ref_file", ",", "gtf_file", ",", "is_paired", "=", "False", ",", "rrna_file", "=", "\"null\"", ")", ":", "# collect duplication metrics", "dup_metrics", "=", "self", ".", "_get_current_dup_metrics", "(", "ali...
Produce report metrics for a RNASeq experiment using Picard with a sorted aligned BAM file.
[ "Produce", "report", "metrics", "for", "a", "RNASeq", "experiment", "using", "Picard", "with", "a", "sorted", "aligned", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L470-L493
236,984
bcbio/bcbio-nextgen
bcbio/variation/gatk.py
standard_cl_params
def standard_cl_params(items): """Shared command line parameters for GATK programs. Handles no removal of duplicate reads for amplicon or non mark duplicate experiments. If we have pre-aligned inputs we ignore the value or mark duplicates (since they may already be marked in the input BAM). """ out = [] def _skip_duplicates(data): return (dd.get_coverage_interval(data) == "amplicon" or (dd.get_aligner(data) and not dd.get_mark_duplicates(data))) if any(_skip_duplicates(d) for d in items): broad_runner = broad.runner_from_config(items[0]["config"]) gatk_type = broad_runner.gatk_type() if gatk_type == "gatk4": out += ["--disable-read-filter", "NotDuplicateReadFilter"] elif LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.5"): out += ["-drf", "DuplicateRead"] return out
python
def standard_cl_params(items): out = [] def _skip_duplicates(data): return (dd.get_coverage_interval(data) == "amplicon" or (dd.get_aligner(data) and not dd.get_mark_duplicates(data))) if any(_skip_duplicates(d) for d in items): broad_runner = broad.runner_from_config(items[0]["config"]) gatk_type = broad_runner.gatk_type() if gatk_type == "gatk4": out += ["--disable-read-filter", "NotDuplicateReadFilter"] elif LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.5"): out += ["-drf", "DuplicateRead"] return out
[ "def", "standard_cl_params", "(", "items", ")", ":", "out", "=", "[", "]", "def", "_skip_duplicates", "(", "data", ")", ":", "return", "(", "dd", ".", "get_coverage_interval", "(", "data", ")", "==", "\"amplicon\"", "or", "(", "dd", ".", "get_aligner", "...
Shared command line parameters for GATK programs. Handles no removal of duplicate reads for amplicon or non mark duplicate experiments. If we have pre-aligned inputs we ignore the value or mark duplicates (since they may already be marked in the input BAM).
[ "Shared", "command", "line", "parameters", "for", "GATK", "programs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatk.py#L17-L36
236,985
bcbio/bcbio-nextgen
bcbio/variation/gatk.py
_shared_gatk_call_prep
def _shared_gatk_call_prep(align_bams, items, ref_file, region, out_file, num_cores=1): """Shared preparation work for GATK variant calling. """ data = items[0] config = data["config"] broad_runner = broad.runner_from_config(config) gatk_type = broad_runner.gatk_type() for x in align_bams: bam.index(x, config) picard_runner = broad.runner_from_path("picard", config) picard_runner.run_fn("picard_index_ref", ref_file) params = ["-R", ref_file] coverage_depth_min = tz.get_in(["algorithm", "coverage_depth_min"], config) if coverage_depth_min and coverage_depth_min < 4: confidence = "4.0" params += ["--standard_min_confidence_threshold_for_calling", confidence] for a in annotation.get_gatk_annotations(config): params += ["--annotation", a] for x in align_bams: params += ["-I", x] variant_regions = bedutils.population_variant_regions(items) region = subset_variant_regions(variant_regions, region, out_file, items) if region: if gatk_type == "gatk4": params += ["-L", bamprep.region_to_gatk(region), "--interval-set-rule", "INTERSECTION"] else: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] params += standard_cl_params(items) return broad_runner, params
python
def _shared_gatk_call_prep(align_bams, items, ref_file, region, out_file, num_cores=1): data = items[0] config = data["config"] broad_runner = broad.runner_from_config(config) gatk_type = broad_runner.gatk_type() for x in align_bams: bam.index(x, config) picard_runner = broad.runner_from_path("picard", config) picard_runner.run_fn("picard_index_ref", ref_file) params = ["-R", ref_file] coverage_depth_min = tz.get_in(["algorithm", "coverage_depth_min"], config) if coverage_depth_min and coverage_depth_min < 4: confidence = "4.0" params += ["--standard_min_confidence_threshold_for_calling", confidence] for a in annotation.get_gatk_annotations(config): params += ["--annotation", a] for x in align_bams: params += ["-I", x] variant_regions = bedutils.population_variant_regions(items) region = subset_variant_regions(variant_regions, region, out_file, items) if region: if gatk_type == "gatk4": params += ["-L", bamprep.region_to_gatk(region), "--interval-set-rule", "INTERSECTION"] else: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] params += standard_cl_params(items) return broad_runner, params
[ "def", "_shared_gatk_call_prep", "(", "align_bams", ",", "items", ",", "ref_file", ",", "region", ",", "out_file", ",", "num_cores", "=", "1", ")", ":", "data", "=", "items", "[", "0", "]", "config", "=", "data", "[", "\"config\"", "]", "broad_runner", "...
Shared preparation work for GATK variant calling.
[ "Shared", "preparation", "work", "for", "GATK", "variant", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatk.py#L38-L66
236,986
bcbio/bcbio-nextgen
bcbio/variation/gatk.py
unified_genotyper
def unified_genotyper(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Perform SNP genotyping on the given alignment file. """ if out_file is None: out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0] if not utils.file_exists(out_file): broad_runner, params = \ _shared_gatk_call_prep(align_bams, items, ref_file, region, out_file) with file_transaction(items[0], out_file) as tx_out_file: params += ["-T", "UnifiedGenotyper", "-o", tx_out_file, "-ploidy", (str(ploidy.get_ploidy(items, region)) if broad_runner.gatk_type() == "restricted" else "2"), "--genotype_likelihoods_model", "BOTH"] resources = config_utils.get_resources("gatk", items[0]["config"]) if "options" in resources: params += [str(x) for x in resources.get("options", [])] broad_runner.run_gatk(params) return vcfutils.bgzip_and_index(out_file, items[0]["config"])
python
def unified_genotyper(align_bams, items, ref_file, assoc_files, region=None, out_file=None): if out_file is None: out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0] if not utils.file_exists(out_file): broad_runner, params = \ _shared_gatk_call_prep(align_bams, items, ref_file, region, out_file) with file_transaction(items[0], out_file) as tx_out_file: params += ["-T", "UnifiedGenotyper", "-o", tx_out_file, "-ploidy", (str(ploidy.get_ploidy(items, region)) if broad_runner.gatk_type() == "restricted" else "2"), "--genotype_likelihoods_model", "BOTH"] resources = config_utils.get_resources("gatk", items[0]["config"]) if "options" in resources: params += [str(x) for x in resources.get("options", [])] broad_runner.run_gatk(params) return vcfutils.bgzip_and_index(out_file, items[0]["config"])
[ "def", "unified_genotyper", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "if", "out_file", "is", "None", ":", "out_file", "=", "\"%s-variants.vcf.gz\"", "%", "utils"...
Perform SNP genotyping on the given alignment file.
[ "Perform", "SNP", "genotyping", "on", "the", "given", "alignment", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatk.py#L68-L87
236,987
bcbio/bcbio-nextgen
bcbio/variation/gatk.py
_joint_calling
def _joint_calling(items): """Determine if this call feeds downstream into joint calls. """ jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), items[0]) if jointcaller: assert len(items) == 1, "Can only do joint calling preparation with GATK with single samples" assert tz.get_in(("metadata", "batch"), items[0]) is not None, \ "Joint calling requires batched samples, %s has no metadata batch." % dd.get_sample_name(items[0]) return jointcaller
python
def _joint_calling(items): jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), items[0]) if jointcaller: assert len(items) == 1, "Can only do joint calling preparation with GATK with single samples" assert tz.get_in(("metadata", "batch"), items[0]) is not None, \ "Joint calling requires batched samples, %s has no metadata batch." % dd.get_sample_name(items[0]) return jointcaller
[ "def", "_joint_calling", "(", "items", ")", ":", "jointcaller", "=", "tz", ".", "get_in", "(", "(", "\"config\"", ",", "\"algorithm\"", ",", "\"jointcaller\"", ")", ",", "items", "[", "0", "]", ")", "if", "jointcaller", ":", "assert", "len", "(", "items"...
Determine if this call feeds downstream into joint calls.
[ "Determine", "if", "this", "call", "feeds", "downstream", "into", "joint", "calls", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatk.py#L89-L97
236,988
bcbio/bcbio-nextgen
bcbio/variation/gatk.py
_supports_avx
def _supports_avx(): """Check for support for Intel AVX acceleration.""" if os.path.exists("/proc/cpuinfo"): with open("/proc/cpuinfo") as in_handle: for line in in_handle: if line.startswith("flags") and line.find("avx") > 0: return True
python
def _supports_avx(): if os.path.exists("/proc/cpuinfo"): with open("/proc/cpuinfo") as in_handle: for line in in_handle: if line.startswith("flags") and line.find("avx") > 0: return True
[ "def", "_supports_avx", "(", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "\"/proc/cpuinfo\"", ")", ":", "with", "open", "(", "\"/proc/cpuinfo\"", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswit...
Check for support for Intel AVX acceleration.
[ "Check", "for", "support", "for", "Intel", "AVX", "acceleration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatk.py#L204-L210
236,989
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
jar_versioner
def jar_versioner(program_name, jar_name): """Retrieve version information based on jar file. """ def get_version(config): try: pdir = config_utils.get_program(program_name, config, "dir") # not configured except ValueError: return "" jar = os.path.basename(config_utils.get_jar(jar_name, pdir)) for to_remove in [jar_name, ".jar", "-standalone"]: jar = jar.replace(to_remove, "") if jar.startswith(("-", ".")): jar = jar[1:] if not jar: logger.warn("Unable to determine version for program '{}' from jar file {}".format( program_name, config_utils.get_jar(jar_name, pdir))) return jar return get_version
python
def jar_versioner(program_name, jar_name): def get_version(config): try: pdir = config_utils.get_program(program_name, config, "dir") # not configured except ValueError: return "" jar = os.path.basename(config_utils.get_jar(jar_name, pdir)) for to_remove in [jar_name, ".jar", "-standalone"]: jar = jar.replace(to_remove, "") if jar.startswith(("-", ".")): jar = jar[1:] if not jar: logger.warn("Unable to determine version for program '{}' from jar file {}".format( program_name, config_utils.get_jar(jar_name, pdir))) return jar return get_version
[ "def", "jar_versioner", "(", "program_name", ",", "jar_name", ")", ":", "def", "get_version", "(", "config", ")", ":", "try", ":", "pdir", "=", "config_utils", ".", "get_program", "(", "program_name", ",", "config", ",", "\"dir\"", ")", "# not configured", "...
Retrieve version information based on jar file.
[ "Retrieve", "version", "information", "based", "on", "jar", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L66-L84
236,990
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
_get_cl_version
def _get_cl_version(p, config): """Retrieve version of a single commandline program. """ if not p.get("has_cl_version", True): return "" try: prog = config_utils.get_program(p["cmd"], config) except config_utils.CmdNotFound: localpy_cmd = os.path.join(os.path.dirname(sys.executable), p["cmd"]) if os.path.exists(localpy_cmd): prog = localpy_cmd else: return "" args = p.get("args", "") cmd = "{prog} {args}" subp = subprocess.Popen(cmd.format(**locals()), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) with contextlib.closing(subp.stdout) as stdout: if p.get("stdout_flag"): v = _parse_from_stdoutflag(stdout, p["stdout_flag"]) elif p.get("paren_flag"): v = _parse_from_parenflag(stdout, p["paren_flag"]) else: lines = [l.strip() for l in str(stdout.read()).split("\n") if l.strip()] v = lines[-1] if v.endswith("."): v = v[:-1] return v
python
def _get_cl_version(p, config): if not p.get("has_cl_version", True): return "" try: prog = config_utils.get_program(p["cmd"], config) except config_utils.CmdNotFound: localpy_cmd = os.path.join(os.path.dirname(sys.executable), p["cmd"]) if os.path.exists(localpy_cmd): prog = localpy_cmd else: return "" args = p.get("args", "") cmd = "{prog} {args}" subp = subprocess.Popen(cmd.format(**locals()), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) with contextlib.closing(subp.stdout) as stdout: if p.get("stdout_flag"): v = _parse_from_stdoutflag(stdout, p["stdout_flag"]) elif p.get("paren_flag"): v = _parse_from_parenflag(stdout, p["paren_flag"]) else: lines = [l.strip() for l in str(stdout.read()).split("\n") if l.strip()] v = lines[-1] if v.endswith("."): v = v[:-1] return v
[ "def", "_get_cl_version", "(", "p", ",", "config", ")", ":", "if", "not", "p", ".", "get", "(", "\"has_cl_version\"", ",", "True", ")", ":", "return", "\"\"", "try", ":", "prog", "=", "config_utils", ".", "get_program", "(", "p", "[", "\"cmd\"", "]", ...
Retrieve version of a single commandline program.
[ "Retrieve", "version", "of", "a", "single", "commandline", "program", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L115-L145
236,991
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
_get_brew_versions
def _get_brew_versions(): """Retrieve versions of tools installed via brew. """ from bcbio import install tooldir = install.get_defaults().get("tooldir") brew_cmd = os.path.join(tooldir, "bin", "brew") if tooldir else "brew" try: vout = subprocess.check_output([brew_cmd, "list", "--versions"]) except OSError: # brew not installed/used vout = "" out = {} for vstr in vout.split("\n"): if vstr.strip(): parts = vstr.rstrip().split() name = parts[0] v = parts[-1] out[name] = v return out
python
def _get_brew_versions(): from bcbio import install tooldir = install.get_defaults().get("tooldir") brew_cmd = os.path.join(tooldir, "bin", "brew") if tooldir else "brew" try: vout = subprocess.check_output([brew_cmd, "list", "--versions"]) except OSError: # brew not installed/used vout = "" out = {} for vstr in vout.split("\n"): if vstr.strip(): parts = vstr.rstrip().split() name = parts[0] v = parts[-1] out[name] = v return out
[ "def", "_get_brew_versions", "(", ")", ":", "from", "bcbio", "import", "install", "tooldir", "=", "install", ".", "get_defaults", "(", ")", ".", "get", "(", "\"tooldir\"", ")", "brew_cmd", "=", "os", ".", "path", ".", "join", "(", "tooldir", ",", "\"bin\...
Retrieve versions of tools installed via brew.
[ "Retrieve", "versions", "of", "tools", "installed", "via", "brew", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L147-L164
236,992
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
_get_versions
def _get_versions(config=None): """Retrieve details on all programs available on the system. """ try: from bcbio.pipeline import version if hasattr(version, "__version__"): bcbio_version = ("%s-%s" % (version.__version__, version.__git_revision__) if version.__git_revision__ else version.__version__) else: bcbio_version = "" except ImportError: bcbio_version = "" out = [{"program": "bcbio-nextgen", "version": bcbio_version}] manifest_dir = _get_manifest_dir(config) manifest_vs = _get_versions_manifest(manifest_dir) if manifest_dir else [] if manifest_vs: out += manifest_vs else: assert config is not None, "Need configuration to retrieve from non-manifest installs" brew_vs = _get_brew_versions() for p in _cl_progs: out.append({"program": p["cmd"], "version": (brew_vs[p["cmd"]] if p["cmd"] in brew_vs else _get_cl_version(p, config))}) for p in _alt_progs: out.append({"program": p["name"], "version": (brew_vs[p["name"]] if p["name"] in brew_vs else p["version_fn"](config))}) out.sort(key=lambda x: x["program"]) return out
python
def _get_versions(config=None): try: from bcbio.pipeline import version if hasattr(version, "__version__"): bcbio_version = ("%s-%s" % (version.__version__, version.__git_revision__) if version.__git_revision__ else version.__version__) else: bcbio_version = "" except ImportError: bcbio_version = "" out = [{"program": "bcbio-nextgen", "version": bcbio_version}] manifest_dir = _get_manifest_dir(config) manifest_vs = _get_versions_manifest(manifest_dir) if manifest_dir else [] if manifest_vs: out += manifest_vs else: assert config is not None, "Need configuration to retrieve from non-manifest installs" brew_vs = _get_brew_versions() for p in _cl_progs: out.append({"program": p["cmd"], "version": (brew_vs[p["cmd"]] if p["cmd"] in brew_vs else _get_cl_version(p, config))}) for p in _alt_progs: out.append({"program": p["name"], "version": (brew_vs[p["name"]] if p["name"] in brew_vs else p["version_fn"](config))}) out.sort(key=lambda x: x["program"]) return out
[ "def", "_get_versions", "(", "config", "=", "None", ")", ":", "try", ":", "from", "bcbio", ".", "pipeline", "import", "version", "if", "hasattr", "(", "version", ",", "\"__version__\"", ")", ":", "bcbio_version", "=", "(", "\"%s-%s\"", "%", "(", "version",...
Retrieve details on all programs available on the system.
[ "Retrieve", "details", "on", "all", "programs", "available", "on", "the", "system", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L166-L195
236,993
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
_get_versions_manifest
def _get_versions_manifest(manifest_dir): """Retrieve versions from a pre-existing manifest of installed software. """ all_pkgs = _manifest_progs + [p.get("name", p["cmd"]) for p in _cl_progs] + [p["name"] for p in _alt_progs] if os.path.exists(manifest_dir): out = [] for plist in ["toolplus", "python", "r", "debian", "custom"]: pkg_file = os.path.join(manifest_dir, "%s-packages.yaml" % plist) if os.path.exists(pkg_file): with open(pkg_file) as in_handle: pkg_info = yaml.safe_load(in_handle) if not pkg_info: continue added = [] for pkg in all_pkgs: if pkg in pkg_info: added.append(pkg) out.append({"program": pkg, "version": pkg_info[pkg]["version"]}) for x in added: all_pkgs.remove(x) out.sort(key=lambda x: x["program"]) for pkg in all_pkgs: out.append({"program": pkg, "version": ""}) return out
python
def _get_versions_manifest(manifest_dir): all_pkgs = _manifest_progs + [p.get("name", p["cmd"]) for p in _cl_progs] + [p["name"] for p in _alt_progs] if os.path.exists(manifest_dir): out = [] for plist in ["toolplus", "python", "r", "debian", "custom"]: pkg_file = os.path.join(manifest_dir, "%s-packages.yaml" % plist) if os.path.exists(pkg_file): with open(pkg_file) as in_handle: pkg_info = yaml.safe_load(in_handle) if not pkg_info: continue added = [] for pkg in all_pkgs: if pkg in pkg_info: added.append(pkg) out.append({"program": pkg, "version": pkg_info[pkg]["version"]}) for x in added: all_pkgs.remove(x) out.sort(key=lambda x: x["program"]) for pkg in all_pkgs: out.append({"program": pkg, "version": ""}) return out
[ "def", "_get_versions_manifest", "(", "manifest_dir", ")", ":", "all_pkgs", "=", "_manifest_progs", "+", "[", "p", ".", "get", "(", "\"name\"", ",", "p", "[", "\"cmd\"", "]", ")", "for", "p", "in", "_cl_progs", "]", "+", "[", "p", "[", "\"name\"", "]",...
Retrieve versions from a pre-existing manifest of installed software.
[ "Retrieve", "versions", "from", "a", "pre", "-", "existing", "manifest", "of", "installed", "software", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L224-L247
236,994
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
write_versions
def write_versions(dirs, config=None, is_wrapper=False): """Write CSV file with versions used in analysis pipeline. """ out_file = _get_program_file(dirs) if is_wrapper: assert utils.file_exists(out_file), "Failed to create program versions from VM" elif out_file is None: for p in _get_versions(config): print("{program},{version}".format(**p)) else: with open(out_file, "w") as out_handle: for p in _get_versions(config): out_handle.write("{program},{version}\n".format(**p)) return out_file
python
def write_versions(dirs, config=None, is_wrapper=False): out_file = _get_program_file(dirs) if is_wrapper: assert utils.file_exists(out_file), "Failed to create program versions from VM" elif out_file is None: for p in _get_versions(config): print("{program},{version}".format(**p)) else: with open(out_file, "w") as out_handle: for p in _get_versions(config): out_handle.write("{program},{version}\n".format(**p)) return out_file
[ "def", "write_versions", "(", "dirs", ",", "config", "=", "None", ",", "is_wrapper", "=", "False", ")", ":", "out_file", "=", "_get_program_file", "(", "dirs", ")", "if", "is_wrapper", ":", "assert", "utils", ".", "file_exists", "(", "out_file", ")", ",", ...
Write CSV file with versions used in analysis pipeline.
[ "Write", "CSV", "file", "with", "versions", "used", "in", "analysis", "pipeline", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L254-L267
236,995
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
get_version_manifest
def get_version_manifest(name, data=None, required=False): """Retrieve a version from the currently installed manifest. """ manifest_dir = _get_manifest_dir(data, name) manifest_vs = _get_versions_manifest(manifest_dir) or [] for x in manifest_vs: if x["program"] == name: v = x.get("version", "") if v: return v if required: raise ValueError("Did not find %s in install manifest. Could not check version." % name) return ""
python
def get_version_manifest(name, data=None, required=False): manifest_dir = _get_manifest_dir(data, name) manifest_vs = _get_versions_manifest(manifest_dir) or [] for x in manifest_vs: if x["program"] == name: v = x.get("version", "") if v: return v if required: raise ValueError("Did not find %s in install manifest. Could not check version." % name) return ""
[ "def", "get_version_manifest", "(", "name", ",", "data", "=", "None", ",", "required", "=", "False", ")", ":", "manifest_dir", "=", "_get_manifest_dir", "(", "data", ",", "name", ")", "manifest_vs", "=", "_get_versions_manifest", "(", "manifest_dir", ")", "or"...
Retrieve a version from the currently installed manifest.
[ "Retrieve", "a", "version", "from", "the", "currently", "installed", "manifest", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L269-L281
236,996
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
add_subparser
def add_subparser(subparsers): """Add command line option for exporting version information. """ parser = subparsers.add_parser("version", help="Export versions of used software to stdout or a file ") parser.add_argument("--workdir", help="Directory export programs to in workdir/provenance/programs.txt", default=None)
python
def add_subparser(subparsers): parser = subparsers.add_parser("version", help="Export versions of used software to stdout or a file ") parser.add_argument("--workdir", help="Directory export programs to in workdir/provenance/programs.txt", default=None)
[ "def", "add_subparser", "(", "subparsers", ")", ":", "parser", "=", "subparsers", ".", "add_parser", "(", "\"version\"", ",", "help", "=", "\"Export versions of used software to stdout or a file \"", ")", "parser", ".", "add_argument", "(", "\"--workdir\"", ",", "help...
Add command line option for exporting version information.
[ "Add", "command", "line", "option", "for", "exporting", "version", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L283-L289
236,997
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
get_version
def get_version(name, dirs=None, config=None): """Retrieve the current version of the given program from cached names. """ if dirs: p = _get_program_file(dirs) else: p = tz.get_in(["resources", "program_versions"], config) if p: with open(p) as in_handle: for line in in_handle: prog, version = line.rstrip().split(",") if prog == name and version: return version raise KeyError("Version information not found for %s in %s" % (name, p))
python
def get_version(name, dirs=None, config=None): if dirs: p = _get_program_file(dirs) else: p = tz.get_in(["resources", "program_versions"], config) if p: with open(p) as in_handle: for line in in_handle: prog, version = line.rstrip().split(",") if prog == name and version: return version raise KeyError("Version information not found for %s in %s" % (name, p))
[ "def", "get_version", "(", "name", ",", "dirs", "=", "None", ",", "config", "=", "None", ")", ":", "if", "dirs", ":", "p", "=", "_get_program_file", "(", "dirs", ")", "else", ":", "p", "=", "tz", ".", "get_in", "(", "[", "\"resources\"", ",", "\"pr...
Retrieve the current version of the given program from cached names.
[ "Retrieve", "the", "current", "version", "of", "the", "given", "program", "from", "cached", "names", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L291-L304
236,998
bcbio/bcbio-nextgen
scripts/utils/hlas_to_pgroups.py
hla_choices
def hla_choices(orig_hla, min_parts=2): """Provide a range of options for HLA type, with decreasing resolution. """ yield orig_hla try: int(orig_hla[-1]) except ValueError: yield orig_hla[:-1] hla_parts = orig_hla.split(":") for sub_i in range(len(hla_parts) - min_parts + 1): yield ":".join(hla_parts[:len(hla_parts) - sub_i])
python
def hla_choices(orig_hla, min_parts=2): yield orig_hla try: int(orig_hla[-1]) except ValueError: yield orig_hla[:-1] hla_parts = orig_hla.split(":") for sub_i in range(len(hla_parts) - min_parts + 1): yield ":".join(hla_parts[:len(hla_parts) - sub_i])
[ "def", "hla_choices", "(", "orig_hla", ",", "min_parts", "=", "2", ")", ":", "yield", "orig_hla", "try", ":", "int", "(", "orig_hla", "[", "-", "1", "]", ")", "except", "ValueError", ":", "yield", "orig_hla", "[", ":", "-", "1", "]", "hla_parts", "="...
Provide a range of options for HLA type, with decreasing resolution.
[ "Provide", "a", "range", "of", "options", "for", "HLA", "type", "with", "decreasing", "resolution", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hlas_to_pgroups.py#L35-L45
236,999
bcbio/bcbio-nextgen
scripts/utils/hlas_to_pgroups.py
read_pgroups
def read_pgroups(in_file): """Read HLAs and the pgroups they fall in. """ out = {} with open(in_file) as in_handle: for line in (l for l in in_handle if not l.startswith("#")): locus, alleles, group = line.strip().split(";") for allele in alleles.split("/"): out["HLA-%s%s" % (locus, allele)] = group return out
python
def read_pgroups(in_file): out = {} with open(in_file) as in_handle: for line in (l for l in in_handle if not l.startswith("#")): locus, alleles, group = line.strip().split(";") for allele in alleles.split("/"): out["HLA-%s%s" % (locus, allele)] = group return out
[ "def", "read_pgroups", "(", "in_file", ")", ":", "out", "=", "{", "}", "with", "open", "(", "in_file", ")", "as", "in_handle", ":", "for", "line", "in", "(", "l", "for", "l", "in", "in_handle", "if", "not", "l", ".", "startswith", "(", "\"#\"", ")"...
Read HLAs and the pgroups they fall in.
[ "Read", "HLAs", "and", "the", "pgroups", "they", "fall", "in", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hlas_to_pgroups.py#L47-L56