repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
get_hla
def get_hla(sample, cromwell_dir, hla_glob): """Retrieve HLA calls and input fastqs for a sample. """ hla_dir = glob.glob(os.path.join(cromwell_dir, hla_glob, "align", sample, "hla"))[0] fastq = os.path.join(hla_dir, "OptiType-HLA-A_B_C-input.fq") calls = os.path.join(hla_dir, "%s-optitype.csv" % sample) return fastq, calls
python
def get_hla(sample, cromwell_dir, hla_glob): """Retrieve HLA calls and input fastqs for a sample. """ hla_dir = glob.glob(os.path.join(cromwell_dir, hla_glob, "align", sample, "hla"))[0] fastq = os.path.join(hla_dir, "OptiType-HLA-A_B_C-input.fq") calls = os.path.join(hla_dir, "%s-optitype.csv" % sample) return fastq, calls
[ "def", "get_hla", "(", "sample", ",", "cromwell_dir", ",", "hla_glob", ")", ":", "hla_dir", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "cromwell_dir", ",", "hla_glob", ",", "\"align\"", ",", "sample", ",", "\"hla\"", ")", ")", ...
Retrieve HLA calls and input fastqs for a sample.
[ "Retrieve", "HLA", "calls", "and", "input", "fastqs", "for", "a", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L278-L284
train
219,100
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
name_to_absolute
def name_to_absolute(x): """Convert standard hg38 HLA name into ABSOLUTE naming. """ for c in ["-", "*", ":"]: x = x.replace(c, "_") x = x.lower() return x
python
def name_to_absolute(x): """Convert standard hg38 HLA name into ABSOLUTE naming. """ for c in ["-", "*", ":"]: x = x.replace(c, "_") x = x.lower() return x
[ "def", "name_to_absolute", "(", "x", ")", ":", "for", "c", "in", "[", "\"-\"", ",", "\"*\"", ",", "\":\"", "]", ":", "x", "=", "x", ".", "replace", "(", "c", ",", "\"_\"", ")", "x", "=", "x", ".", "lower", "(", ")", "return", "x" ]
Convert standard hg38 HLA name into ABSOLUTE naming.
[ "Convert", "standard", "hg38", "HLA", "name", "into", "ABSOLUTE", "naming", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L286-L292
train
219,101
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
get_hla_choice
def get_hla_choice(h, hlas, normal_bam, tumor_bam): """Retrieve matching HLA with best read support in both tumor and normal """ def get_counts(bam_file): counts = {} for line in subprocess.check_output(["samtools", "idxstats", bam_file]).split("\n"): if line.startswith(h): name, _, count, _ = line.split() counts[name] = int(count) return counts tcounts = get_counts(tumor_bam) ncounts = get_counts(normal_bam) check_hlas = [x for x in hlas if x.startswith(h) and tcounts.get(x, 0) > 0 and ncounts.get(x, 0) > 0] cur_hlas = sorted(check_hlas, key=lambda x: ncounts[x], reverse=True) #print(cur_hlas[0], tcounts.get(cur_hlas[0]), ncounts.get(cur_hlas[0])) return cur_hlas[0]
python
def get_hla_choice(h, hlas, normal_bam, tumor_bam): """Retrieve matching HLA with best read support in both tumor and normal """ def get_counts(bam_file): counts = {} for line in subprocess.check_output(["samtools", "idxstats", bam_file]).split("\n"): if line.startswith(h): name, _, count, _ = line.split() counts[name] = int(count) return counts tcounts = get_counts(tumor_bam) ncounts = get_counts(normal_bam) check_hlas = [x for x in hlas if x.startswith(h) and tcounts.get(x, 0) > 0 and ncounts.get(x, 0) > 0] cur_hlas = sorted(check_hlas, key=lambda x: ncounts[x], reverse=True) #print(cur_hlas[0], tcounts.get(cur_hlas[0]), ncounts.get(cur_hlas[0])) return cur_hlas[0]
[ "def", "get_hla_choice", "(", "h", ",", "hlas", ",", "normal_bam", ",", "tumor_bam", ")", ":", "def", "get_counts", "(", "bam_file", ")", ":", "counts", "=", "{", "}", "for", "line", "in", "subprocess", ".", "check_output", "(", "[", "\"samtools\"", ",",...
Retrieve matching HLA with best read support in both tumor and normal
[ "Retrieve", "matching", "HLA", "with", "best", "read", "support", "in", "both", "tumor", "and", "normal" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L294-L309
train
219,102
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
prep_hla
def prep_hla(work_dir, sample, calls, hlas, normal_bam, tumor_bam): """Convert HLAs into ABSOLUTE format for use with LOHHLA. LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move """ work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) hla_file = os.path.join(work_dir, "%s-hlas.txt" % sample) with open(calls) as in_handle: with open(hla_file, "w") as out_handle: next(in_handle) # header for line in in_handle: _, _, a, _, _ = line.strip().split(",") a1, a2 = a.split(";") out_handle.write(get_hla_choice(name_to_absolute(a1), hlas, normal_bam, tumor_bam) + "\n") out_handle.write(get_hla_choice(name_to_absolute(a2), hlas, normal_bam, tumor_bam) + "\n") return hla_file
python
def prep_hla(work_dir, sample, calls, hlas, normal_bam, tumor_bam): """Convert HLAs into ABSOLUTE format for use with LOHHLA. LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move """ work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) hla_file = os.path.join(work_dir, "%s-hlas.txt" % sample) with open(calls) as in_handle: with open(hla_file, "w") as out_handle: next(in_handle) # header for line in in_handle: _, _, a, _, _ = line.strip().split(",") a1, a2 = a.split(";") out_handle.write(get_hla_choice(name_to_absolute(a1), hlas, normal_bam, tumor_bam) + "\n") out_handle.write(get_hla_choice(name_to_absolute(a2), hlas, normal_bam, tumor_bam) + "\n") return hla_file
[ "def", "prep_hla", "(", "work_dir", ",", "sample", ",", "calls", ",", "hlas", ",", "normal_bam", ",", "tumor_bam", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "sample", ",", "\"inp...
Convert HLAs into ABSOLUTE format for use with LOHHLA. LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move
[ "Convert", "HLAs", "into", "ABSOLUTE", "format", "for", "use", "with", "LOHHLA", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L311-L326
train
219,103
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
prep_ploidy
def prep_ploidy(work_dir, sample, bam_file, cromwell_dir, sv_glob): """Create LOHHLA compatible input ploidy file from PureCN output. """ purecn_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=sample, method="purecn", ext="purecn.csv")) work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) out_file = os.path.join(work_dir, "%s-solutions.txt" % sample) with open(purecn_file) as in_handle: reader = csv.reader(in_handle) purecn_stats = dict(zip(next(reader), next(reader))) with open(out_file, "w") as out_handle: out_handle.write("Ploidy\ttumorPurity\ttumorPloidy\n") lohhla_name = utils.splitext_plus(os.path.basename(bam_file))[0] out_handle.write("%s\t%s\t%s\t%s\n" % (lohhla_name, purecn_stats["Ploidy"], purecn_stats["Purity"], purecn_stats["Ploidy"])) return out_file
python
def prep_ploidy(work_dir, sample, bam_file, cromwell_dir, sv_glob): """Create LOHHLA compatible input ploidy file from PureCN output. """ purecn_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=sample, method="purecn", ext="purecn.csv")) work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) out_file = os.path.join(work_dir, "%s-solutions.txt" % sample) with open(purecn_file) as in_handle: reader = csv.reader(in_handle) purecn_stats = dict(zip(next(reader), next(reader))) with open(out_file, "w") as out_handle: out_handle.write("Ploidy\ttumorPurity\ttumorPloidy\n") lohhla_name = utils.splitext_plus(os.path.basename(bam_file))[0] out_handle.write("%s\t%s\t%s\t%s\n" % (lohhla_name, purecn_stats["Ploidy"], purecn_stats["Purity"], purecn_stats["Ploidy"])) return out_file
[ "def", "prep_ploidy", "(", "work_dir", ",", "sample", ",", "bam_file", ",", "cromwell_dir", ",", "sv_glob", ")", ":", "purecn_file", "=", "_get_cromwell_file", "(", "cromwell_dir", ",", "sv_glob", ",", "dict", "(", "sample", "=", "sample", ",", "method", "="...
Create LOHHLA compatible input ploidy file from PureCN output.
[ "Create", "LOHHLA", "compatible", "input", "ploidy", "file", "from", "PureCN", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L328-L342
train
219,104
bcbio/bcbio-nextgen
bcbio/ngsalign/bowtie.py
_bowtie_args_from_config
def _bowtie_args_from_config(data): """Configurable high level options for bowtie. """ config = data['config'] qual_format = config["algorithm"].get("quality_format", "") if qual_format.lower() == "illumina": qual_flags = ["--phred64-quals"] else: qual_flags = [] multi_mappers = config["algorithm"].get("multiple_mappers", True) multi_flags = ["-M", 1] if multi_mappers else ["-m", 1] multi_flags = [] if data["analysis"].lower().startswith("smallrna-seq") else multi_flags cores = config.get("resources", {}).get("bowtie", {}).get("cores", None) num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] return core_flags + qual_flags + multi_flags
python
def _bowtie_args_from_config(data): """Configurable high level options for bowtie. """ config = data['config'] qual_format = config["algorithm"].get("quality_format", "") if qual_format.lower() == "illumina": qual_flags = ["--phred64-quals"] else: qual_flags = [] multi_mappers = config["algorithm"].get("multiple_mappers", True) multi_flags = ["-M", 1] if multi_mappers else ["-m", 1] multi_flags = [] if data["analysis"].lower().startswith("smallrna-seq") else multi_flags cores = config.get("resources", {}).get("bowtie", {}).get("cores", None) num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] return core_flags + qual_flags + multi_flags
[ "def", "_bowtie_args_from_config", "(", "data", ")", ":", "config", "=", "data", "[", "'config'", "]", "qual_format", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"quality_format\"", ",", "\"\"", ")", "if", "qual_format", ".", "lower", "(", ...
Configurable high level options for bowtie.
[ "Configurable", "high", "level", "options", "for", "bowtie", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bowtie.py#L13-L28
train
219,105
bcbio/bcbio-nextgen
bcbio/ngsalign/bowtie.py
align
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): """Do standard or paired end alignment with bowtie. """ num_hits = 1 if data["analysis"].lower().startswith("smallrna-seq"): num_hits = 1000 config = data['config'] out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data))) if data.get("align_split"): final_file = out_file out_file, data = alignprep.setup_combine(final_file, data) fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data) else: final_file = None if fastq_file.endswith(".gz"): fastq_file = "<(gunzip -c %s)" % fastq_file if pair_file: pair_file = "<(gunzip -c %s)" % pair_file if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)): with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file): cl = [config_utils.get_program("bowtie", config)] cl += _bowtie_args_from_config(data) cl += extra_args if extra_args is not None else [] cl += ["-q", "-v", 2, "-k", num_hits, "-X", 2000, # default is too selective for most data "--best", "--strata", "--sam", ref_file] if pair_file: cl += ["-1", fastq_file, "-2", pair_file] else: cl += [fastq_file] cl = [str(i) for i in cl] fix_rg_cmd = r"samtools addreplacerg -r '%s' -" % novoalign.get_rg_info(data["rgnames"]) if fix_rg_cmd: cmd = " ".join(cl) + " | " + fix_rg_cmd + " | " + tobam_cl else: cmd = " ".join(cl) + " | " + tobam_cl do.run(cmd, "Running Bowtie on %s and %s." % (fastq_file, pair_file), data) return out_file
python
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): """Do standard or paired end alignment with bowtie. """ num_hits = 1 if data["analysis"].lower().startswith("smallrna-seq"): num_hits = 1000 config = data['config'] out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data))) if data.get("align_split"): final_file = out_file out_file, data = alignprep.setup_combine(final_file, data) fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data) else: final_file = None if fastq_file.endswith(".gz"): fastq_file = "<(gunzip -c %s)" % fastq_file if pair_file: pair_file = "<(gunzip -c %s)" % pair_file if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)): with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file): cl = [config_utils.get_program("bowtie", config)] cl += _bowtie_args_from_config(data) cl += extra_args if extra_args is not None else [] cl += ["-q", "-v", 2, "-k", num_hits, "-X", 2000, # default is too selective for most data "--best", "--strata", "--sam", ref_file] if pair_file: cl += ["-1", fastq_file, "-2", pair_file] else: cl += [fastq_file] cl = [str(i) for i in cl] fix_rg_cmd = r"samtools addreplacerg -r '%s' -" % novoalign.get_rg_info(data["rgnames"]) if fix_rg_cmd: cmd = " ".join(cl) + " | " + fix_rg_cmd + " | " + tobam_cl else: cmd = " ".join(cl) + " | " + tobam_cl do.run(cmd, "Running Bowtie on %s and %s." % (fastq_file, pair_file), data) return out_file
[ "def", "align", "(", "fastq_file", ",", "pair_file", ",", "ref_file", ",", "names", ",", "align_dir", ",", "data", ",", "extra_args", "=", "None", ")", ":", "num_hits", "=", "1", "if", "data", "[", "\"analysis\"", "]", ".", "lower", "(", ")", ".", "s...
Do standard or paired end alignment with bowtie.
[ "Do", "standard", "or", "paired", "end", "alignment", "with", "bowtie", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bowtie.py#L30-L74
train
219,106
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
subset_by_supported
def subset_by_supported(input_file, get_coords, calls_by_name, work_dir, data, headers=("#",)): """Limit CNVkit input to calls with support from another caller. get_coords is a function that return chrom, start, end from a line of the input_file, allowing handling of multiple input file types. """ support_files = [(c, tz.get_in([c, "vrn_file"], calls_by_name)) for c in convert.SUBSET_BY_SUPPORT["cnvkit"]] support_files = [(c, f) for (c, f) in support_files if f and vcfutils.vcf_has_variants(f)] if len(support_files) == 0: return input_file else: out_file = os.path.join(work_dir, "%s-havesupport%s" % utils.splitext_plus(os.path.basename(input_file))) if not utils.file_uptodate(out_file, input_file): input_bed = _input_to_bed(input_file, work_dir, get_coords, headers) pass_coords = set([]) with file_transaction(data, out_file) as tx_out_file: support_beds = " ".join([_sv_vcf_to_bed(f, c, out_file) for c, f in support_files]) tmp_cmp_bed = "%s-intersectwith.bed" % utils.splitext_plus(tx_out_file)[0] cmd = "bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}" do.run(cmd.format(**locals()), "Intersect CNVs with support files") for r in pybedtools.BedTool(tmp_cmp_bed): pass_coords.add((str(r.chrom), str(r.start), str(r.stop))) with open(input_file) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: passes = True if not line.startswith(headers): passes = get_coords(line) in pass_coords if passes: out_handle.write(line) return out_file
python
def subset_by_supported(input_file, get_coords, calls_by_name, work_dir, data, headers=("#",)): """Limit CNVkit input to calls with support from another caller. get_coords is a function that return chrom, start, end from a line of the input_file, allowing handling of multiple input file types. """ support_files = [(c, tz.get_in([c, "vrn_file"], calls_by_name)) for c in convert.SUBSET_BY_SUPPORT["cnvkit"]] support_files = [(c, f) for (c, f) in support_files if f and vcfutils.vcf_has_variants(f)] if len(support_files) == 0: return input_file else: out_file = os.path.join(work_dir, "%s-havesupport%s" % utils.splitext_plus(os.path.basename(input_file))) if not utils.file_uptodate(out_file, input_file): input_bed = _input_to_bed(input_file, work_dir, get_coords, headers) pass_coords = set([]) with file_transaction(data, out_file) as tx_out_file: support_beds = " ".join([_sv_vcf_to_bed(f, c, out_file) for c, f in support_files]) tmp_cmp_bed = "%s-intersectwith.bed" % utils.splitext_plus(tx_out_file)[0] cmd = "bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}" do.run(cmd.format(**locals()), "Intersect CNVs with support files") for r in pybedtools.BedTool(tmp_cmp_bed): pass_coords.add((str(r.chrom), str(r.start), str(r.stop))) with open(input_file) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: passes = True if not line.startswith(headers): passes = get_coords(line) in pass_coords if passes: out_handle.write(line) return out_file
[ "def", "subset_by_supported", "(", "input_file", ",", "get_coords", ",", "calls_by_name", ",", "work_dir", ",", "data", ",", "headers", "=", "(", "\"#\"", ",", ")", ")", ":", "support_files", "=", "[", "(", "c", ",", "tz", ".", "get_in", "(", "[", "c",...
Limit CNVkit input to calls with support from another caller. get_coords is a function that return chrom, start, end from a line of the input_file, allowing handling of multiple input file types.
[ "Limit", "CNVkit", "input", "to", "calls", "with", "support", "from", "another", "caller", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L43-L76
train
219,107
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_input_to_bed
def _input_to_bed(theta_input, work_dir, get_coords, headers): """Convert input file to a BED file for comparisons """ theta_bed = os.path.join(work_dir, "%s.bed" % os.path.splitext(os.path.basename(theta_input))[0]) with open(theta_input) as in_handle: with open(theta_bed, "w") as out_handle: for line in in_handle: if not line.startswith(headers): chrom, start, end = get_coords(line) out_handle.write("\t".join([chrom, start, end]) + "\n") return theta_bed
python
def _input_to_bed(theta_input, work_dir, get_coords, headers): """Convert input file to a BED file for comparisons """ theta_bed = os.path.join(work_dir, "%s.bed" % os.path.splitext(os.path.basename(theta_input))[0]) with open(theta_input) as in_handle: with open(theta_bed, "w") as out_handle: for line in in_handle: if not line.startswith(headers): chrom, start, end = get_coords(line) out_handle.write("\t".join([chrom, start, end]) + "\n") return theta_bed
[ "def", "_input_to_bed", "(", "theta_input", ",", "work_dir", ",", "get_coords", ",", "headers", ")", ":", "theta_bed", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s.bed\"", "%", "os", ".", "path", ".", "splitext", "(", "os", ".", "pa...
Convert input file to a BED file for comparisons
[ "Convert", "input", "file", "to", "a", "BED", "file", "for", "comparisons" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L88-L98
train
219,108
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_run_theta
def _run_theta(cnv_info, data, work_dir, run_n3=True): """Run theta, calculating subpopulations and normal contamination. """ out = {"caller": "theta"} max_normal = "0.9" opts = ["-m", max_normal] n2_result = _safe_run_theta(cnv_info["theta_input"], os.path.join(work_dir, "n2"), ".n2.results", ["-n", "2"] + opts, data) if n2_result: out["estimate"] = n2_result if run_n3: n2_bounds = "%s.withBounds" % os.path.splitext(n2_result)[0] n3_result = _safe_run_theta(n2_bounds, os.path.join(work_dir, "n3"), ".n3.results", ["-n", "3", "--RESULTS", n2_result] + opts, data) if n3_result: best_result = _select_model(n2_bounds, n2_result, n3_result, os.path.join(work_dir, "n3"), data) out["estimate"] = best_result out["cnvs"] = _merge_theta_calls(n2_bounds, best_result, cnv_info["vrn_file"], data) return out
python
def _run_theta(cnv_info, data, work_dir, run_n3=True): """Run theta, calculating subpopulations and normal contamination. """ out = {"caller": "theta"} max_normal = "0.9" opts = ["-m", max_normal] n2_result = _safe_run_theta(cnv_info["theta_input"], os.path.join(work_dir, "n2"), ".n2.results", ["-n", "2"] + opts, data) if n2_result: out["estimate"] = n2_result if run_n3: n2_bounds = "%s.withBounds" % os.path.splitext(n2_result)[0] n3_result = _safe_run_theta(n2_bounds, os.path.join(work_dir, "n3"), ".n3.results", ["-n", "3", "--RESULTS", n2_result] + opts, data) if n3_result: best_result = _select_model(n2_bounds, n2_result, n3_result, os.path.join(work_dir, "n3"), data) out["estimate"] = best_result out["cnvs"] = _merge_theta_calls(n2_bounds, best_result, cnv_info["vrn_file"], data) return out
[ "def", "_run_theta", "(", "cnv_info", ",", "data", ",", "work_dir", ",", "run_n3", "=", "True", ")", ":", "out", "=", "{", "\"caller\"", ":", "\"theta\"", "}", "max_normal", "=", "\"0.9\"", "opts", "=", "[", "\"-m\"", ",", "max_normal", "]", "n2_result",...
Run theta, calculating subpopulations and normal contamination.
[ "Run", "theta", "calculating", "subpopulations", "and", "normal", "contamination", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L100-L120
train
219,109
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_update_with_calls
def _update_with_calls(result_file, cnv_file): """Update bounds with calls from CNVkit, inferred copy numbers and p-values from THetA. """ results = {} with open(result_file) as in_handle: in_handle.readline() # header _, _, cs, ps = in_handle.readline().strip().split() for i, (c, p) in enumerate(zip(cs.split(":"), ps.split(","))): results[i] = (c, p) cnvs = {} with open(cnv_file) as in_handle: for line in in_handle: chrom, start, end, _, count = line.rstrip().split()[:5] cnvs[(chrom, start, end)] = count def update(i, line): parts = line.rstrip().split("\t") chrom, start, end = parts[1:4] parts += cnvs.get((chrom, start, end), ".") parts += list(results[i]) return "\t".join(parts) + "\n" return update
python
def _update_with_calls(result_file, cnv_file): """Update bounds with calls from CNVkit, inferred copy numbers and p-values from THetA. """ results = {} with open(result_file) as in_handle: in_handle.readline() # header _, _, cs, ps = in_handle.readline().strip().split() for i, (c, p) in enumerate(zip(cs.split(":"), ps.split(","))): results[i] = (c, p) cnvs = {} with open(cnv_file) as in_handle: for line in in_handle: chrom, start, end, _, count = line.rstrip().split()[:5] cnvs[(chrom, start, end)] = count def update(i, line): parts = line.rstrip().split("\t") chrom, start, end = parts[1:4] parts += cnvs.get((chrom, start, end), ".") parts += list(results[i]) return "\t".join(parts) + "\n" return update
[ "def", "_update_with_calls", "(", "result_file", ",", "cnv_file", ")", ":", "results", "=", "{", "}", "with", "open", "(", "result_file", ")", "as", "in_handle", ":", "in_handle", ".", "readline", "(", ")", "# header", "_", ",", "_", ",", "cs", ",", "p...
Update bounds with calls from CNVkit, inferred copy numbers and p-values from THetA.
[ "Update", "bounds", "with", "calls", "from", "CNVkit", "inferred", "copy", "numbers", "and", "p", "-", "values", "from", "THetA", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L122-L142
train
219,110
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_merge_theta_calls
def _merge_theta_calls(bounds_file, result_file, cnv_file, data): """Create a final output file with merged CNVkit and THetA copy and population estimates. """ out_file = "%s-merged.txt" % (result_file.replace(".BEST.results", "")) if not utils.file_uptodate(out_file, result_file): with file_transaction(data, out_file) as tx_out_file: updater = _update_with_calls(result_file, cnv_file) with open(bounds_file) as in_handle: with open(tx_out_file, "w") as out_handle: i = 0 for line in in_handle: if line.startswith("#"): parts = line.rstrip().split("\t") parts += ["cnv", "pop_cnvs", "pop_pvals"] out_handle.write("\t".join(parts) + "\n") else: out_handle.write(updater(i, line)) i += 1 return out_file
python
def _merge_theta_calls(bounds_file, result_file, cnv_file, data): """Create a final output file with merged CNVkit and THetA copy and population estimates. """ out_file = "%s-merged.txt" % (result_file.replace(".BEST.results", "")) if not utils.file_uptodate(out_file, result_file): with file_transaction(data, out_file) as tx_out_file: updater = _update_with_calls(result_file, cnv_file) with open(bounds_file) as in_handle: with open(tx_out_file, "w") as out_handle: i = 0 for line in in_handle: if line.startswith("#"): parts = line.rstrip().split("\t") parts += ["cnv", "pop_cnvs", "pop_pvals"] out_handle.write("\t".join(parts) + "\n") else: out_handle.write(updater(i, line)) i += 1 return out_file
[ "def", "_merge_theta_calls", "(", "bounds_file", ",", "result_file", ",", "cnv_file", ",", "data", ")", ":", "out_file", "=", "\"%s-merged.txt\"", "%", "(", "result_file", ".", "replace", "(", "\".BEST.results\"", ",", "\"\"", ")", ")", "if", "not", "utils", ...
Create a final output file with merged CNVkit and THetA copy and population estimates.
[ "Create", "a", "final", "output", "file", "with", "merged", "CNVkit", "and", "THetA", "copy", "and", "population", "estimates", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L144-L162
train
219,111
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_select_model
def _select_model(n2_bounds, n2_result, n3_result, out_dir, data): """Run final model selection from n=2 and n=3 options. """ n2_out_file = n2_result.replace(".n2.results", ".BEST.results") n3_out_file = n3_result.replace(".n3.results", ".BEST.results") if not utils.file_exists(n2_out_file) and not utils.file_exists(n3_out_file): cmd = _get_cmd("ModelSelection.py") + [n2_bounds, n2_result, n3_result] do.run(cmd, "Select best THetA model") if utils.file_exists(n2_out_file): return n2_out_file else: assert utils.file_exists(n3_out_file) return n3_out_file
python
def _select_model(n2_bounds, n2_result, n3_result, out_dir, data): """Run final model selection from n=2 and n=3 options. """ n2_out_file = n2_result.replace(".n2.results", ".BEST.results") n3_out_file = n3_result.replace(".n3.results", ".BEST.results") if not utils.file_exists(n2_out_file) and not utils.file_exists(n3_out_file): cmd = _get_cmd("ModelSelection.py") + [n2_bounds, n2_result, n3_result] do.run(cmd, "Select best THetA model") if utils.file_exists(n2_out_file): return n2_out_file else: assert utils.file_exists(n3_out_file) return n3_out_file
[ "def", "_select_model", "(", "n2_bounds", ",", "n2_result", ",", "n3_result", ",", "out_dir", ",", "data", ")", ":", "n2_out_file", "=", "n2_result", ".", "replace", "(", "\".n2.results\"", ",", "\".BEST.results\"", ")", "n3_out_file", "=", "n3_result", ".", "...
Run final model selection from n=2 and n=3 options.
[ "Run", "final", "model", "selection", "from", "n", "=", "2", "and", "n", "=", "3", "options", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L164-L176
train
219,112
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_safe_run_theta
def _safe_run_theta(input_file, out_dir, output_ext, args, data): """Run THetA, catching and continuing on any errors. """ out_file = os.path.join(out_dir, _split_theta_ext(input_file) + output_ext) skip_file = out_file + ".skipped" if utils.file_exists(skip_file): return None if not utils.file_exists(out_file): with file_transaction(data, out_dir) as tx_out_dir: utils.safe_makedir(tx_out_dir) cmd = _get_cmd("RunTHetA.py") + args + \ [input_file, "--NUM_PROCESSES", dd.get_cores(data), "--FORCE", "-d", tx_out_dir] try: do.run(cmd, "Run THetA to calculate purity", log_error=False) except subprocess.CalledProcessError as msg: if ("Number of intervals must be greater than 1" in str(msg) or "This sample isn't a good candidate for THetA analysis" in str(msg)): with open(os.path.join(tx_out_dir, os.path.basename(skip_file)), "w") as out_handle: out_handle.write("Expected TheTA failure, skipping") return None else: raise return out_file
python
def _safe_run_theta(input_file, out_dir, output_ext, args, data): """Run THetA, catching and continuing on any errors. """ out_file = os.path.join(out_dir, _split_theta_ext(input_file) + output_ext) skip_file = out_file + ".skipped" if utils.file_exists(skip_file): return None if not utils.file_exists(out_file): with file_transaction(data, out_dir) as tx_out_dir: utils.safe_makedir(tx_out_dir) cmd = _get_cmd("RunTHetA.py") + args + \ [input_file, "--NUM_PROCESSES", dd.get_cores(data), "--FORCE", "-d", tx_out_dir] try: do.run(cmd, "Run THetA to calculate purity", log_error=False) except subprocess.CalledProcessError as msg: if ("Number of intervals must be greater than 1" in str(msg) or "This sample isn't a good candidate for THetA analysis" in str(msg)): with open(os.path.join(tx_out_dir, os.path.basename(skip_file)), "w") as out_handle: out_handle.write("Expected TheTA failure, skipping") return None else: raise return out_file
[ "def", "_safe_run_theta", "(", "input_file", ",", "out_dir", ",", "output_ext", ",", "args", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "_split_theta_ext", "(", "input_file", ")", "+", "output_ext", ")", ...
Run THetA, catching and continuing on any errors.
[ "Run", "THetA", "catching", "and", "continuing", "on", "any", "errors", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L178-L201
train
219,113
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_get_cmd
def _get_cmd(cmd): """Retrieve required commands for running THetA with our local bcbio python. """ check_cmd = "RunTHetA.py" try: local_cmd = subprocess.check_output(["which", check_cmd]).strip() except subprocess.CalledProcessError: return None return [sys.executable, "%s/%s" % (os.path.dirname(os.path.realpath(local_cmd)), cmd)]
python
def _get_cmd(cmd): """Retrieve required commands for running THetA with our local bcbio python. """ check_cmd = "RunTHetA.py" try: local_cmd = subprocess.check_output(["which", check_cmd]).strip() except subprocess.CalledProcessError: return None return [sys.executable, "%s/%s" % (os.path.dirname(os.path.realpath(local_cmd)), cmd)]
[ "def", "_get_cmd", "(", "cmd", ")", ":", "check_cmd", "=", "\"RunTHetA.py\"", "try", ":", "local_cmd", "=", "subprocess", ".", "check_output", "(", "[", "\"which\"", ",", "check_cmd", "]", ")", ".", "strip", "(", ")", "except", "subprocess", ".", "CalledPr...
Retrieve required commands for running THetA with our local bcbio python.
[ "Retrieve", "required", "commands", "for", "running", "THetA", "with", "our", "local", "bcbio", "python", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L213-L221
train
219,114
bcbio/bcbio-nextgen
bcbio/srna/mirge.py
run
def run(data): """Proxy function to run the tool""" sample = data[0][0] work_dir = dd.get_work_dir(sample) out_dir = os.path.join(work_dir, "mirge") lib = _find_lib(sample) mirge = _find_mirge(sample) bowtie = _find_bowtie(sample) sps = dd.get_species(sample) species = SPS.get(sps, "") if not species: raise ValueError("species not supported (hsa, mmu, rno, dre, cel, dme): %s" % sps) if not lib: raise ValueError("-lib option is not set up in resources for mirge tool." " Read above warnings lines.") if not utils.file_exists(out_dir): with tx_tmpdir() as tmp_dir: sample_file = _create_sample_file(data, tmp_dir) do.run(_cmd().format(**locals()), "Running miRge2.0.") shutil.move(tmp_dir, out_dir) return [os.path.abspath(fn) for fn in glob.glob(os.path.join(out_dir, "*", "*"))]
python
def run(data): """Proxy function to run the tool""" sample = data[0][0] work_dir = dd.get_work_dir(sample) out_dir = os.path.join(work_dir, "mirge") lib = _find_lib(sample) mirge = _find_mirge(sample) bowtie = _find_bowtie(sample) sps = dd.get_species(sample) species = SPS.get(sps, "") if not species: raise ValueError("species not supported (hsa, mmu, rno, dre, cel, dme): %s" % sps) if not lib: raise ValueError("-lib option is not set up in resources for mirge tool." " Read above warnings lines.") if not utils.file_exists(out_dir): with tx_tmpdir() as tmp_dir: sample_file = _create_sample_file(data, tmp_dir) do.run(_cmd().format(**locals()), "Running miRge2.0.") shutil.move(tmp_dir, out_dir) return [os.path.abspath(fn) for fn in glob.glob(os.path.join(out_dir, "*", "*"))]
[ "def", "run", "(", "data", ")", ":", "sample", "=", "data", "[", "0", "]", "[", "0", "]", "work_dir", "=", "dd", ".", "get_work_dir", "(", "sample", ")", "out_dir", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"mirge\"", ")", "lib...
Proxy function to run the tool
[ "Proxy", "function", "to", "run", "the", "tool" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/mirge.py#L20-L41
train
219,115
bcbio/bcbio-nextgen
bcbio/srna/mirge.py
_create_sample_file
def _create_sample_file(data, out_dir): """from data list all the fastq files in a file""" sample_file = os.path.join(out_dir, "sample_file.txt") with open(sample_file, 'w') as outh: for sample in data: outh.write(sample[0]["clean_fastq"] + "\n") return sample_file
python
def _create_sample_file(data, out_dir): """from data list all the fastq files in a file""" sample_file = os.path.join(out_dir, "sample_file.txt") with open(sample_file, 'w') as outh: for sample in data: outh.write(sample[0]["clean_fastq"] + "\n") return sample_file
[ "def", "_create_sample_file", "(", "data", ",", "out_dir", ")", ":", "sample_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"sample_file.txt\"", ")", "with", "open", "(", "sample_file", ",", "'w'", ")", "as", "outh", ":", "for", "samp...
from data list all the fastq files in a file
[ "from", "data", "list", "all", "the", "fastq", "files", "in", "a", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/mirge.py#L55-L61
train
219,116
bcbio/bcbio-nextgen
bcbio/srna/mirge.py
_find_lib
def _find_lib(data): """Find mirge libs""" options = " ".join(data.get('resources', {}).get('mirge', {}).get("options", "")) if options.find("-lib") > -1 and utils.file_exists(options.split()[1]): return options if not options: logger.warning("miRge libraries not found. Follow these instructions to install them:") logger.warning("https://github.com/mhalushka/miRge#download-libraries") logger.warning("Then, pass -lib LIB_PATH with resourcces:mirge:options:[...]") logger.warning("More information: https://bcbio-nextgen.readthedocs.io/en/latest/contents/pipelines.html#smallrna-seq")
python
def _find_lib(data): """Find mirge libs""" options = " ".join(data.get('resources', {}).get('mirge', {}).get("options", "")) if options.find("-lib") > -1 and utils.file_exists(options.split()[1]): return options if not options: logger.warning("miRge libraries not found. Follow these instructions to install them:") logger.warning("https://github.com/mhalushka/miRge#download-libraries") logger.warning("Then, pass -lib LIB_PATH with resourcces:mirge:options:[...]") logger.warning("More information: https://bcbio-nextgen.readthedocs.io/en/latest/contents/pipelines.html#smallrna-seq")
[ "def", "_find_lib", "(", "data", ")", ":", "options", "=", "\" \"", ".", "join", "(", "data", ".", "get", "(", "'resources'", ",", "{", "}", ")", ".", "get", "(", "'mirge'", ",", "{", "}", ")", ".", "get", "(", "\"options\"", ",", "\"\"", ")", ...
Find mirge libs
[ "Find", "mirge", "libs" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/mirge.py#L71-L80
train
219,117
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
get_input_sequence_files
def get_input_sequence_files(data, default=None): """ returns the input sequencing files, these can be single or paired FASTQ files or BAM files """ if "files" not in data or data.get("files") is None: file1, file2 = None, None elif len(data["files"]) == 2: file1, file2 = data["files"] else: assert len(data["files"]) == 1, data["files"] file1, file2 = data["files"][0], None return file1, file2
python
def get_input_sequence_files(data, default=None): """ returns the input sequencing files, these can be single or paired FASTQ files or BAM files """ if "files" not in data or data.get("files") is None: file1, file2 = None, None elif len(data["files"]) == 2: file1, file2 = data["files"] else: assert len(data["files"]) == 1, data["files"] file1, file2 = data["files"][0], None return file1, file2
[ "def", "get_input_sequence_files", "(", "data", ",", "default", "=", "None", ")", ":", "if", "\"files\"", "not", "in", "data", "or", "data", ".", "get", "(", "\"files\"", ")", "is", "None", ":", "file1", ",", "file2", "=", "None", ",", "None", "elif", ...
returns the input sequencing files, these can be single or paired FASTQ files or BAM files
[ "returns", "the", "input", "sequencing", "files", "these", "can", "be", "single", "or", "paired", "FASTQ", "files", "or", "BAM", "files" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L223-L235
train
219,118
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
get_umi_consensus
def get_umi_consensus(data): """Retrieve UMI for consensus based preparation. We specify this either as a separate fastq file or embedded in the read name as `fastq_name`.` """ consensus_choices = (["fastq_name"]) umi = tz.get_in(["config", "algorithm", "umi_type"], data) # don't run consensus UMI calling for scrna-seq if tz.get_in(["analysis"], data, "").lower() == "scrna-seq": return False if umi and (umi in consensus_choices or os.path.exists(umi)): assert tz.get_in(["config", "algorithm", "mark_duplicates"], data, True), \ "Using consensus UMI inputs requires marking duplicates" return umi
python
def get_umi_consensus(data): """Retrieve UMI for consensus based preparation. We specify this either as a separate fastq file or embedded in the read name as `fastq_name`.` """ consensus_choices = (["fastq_name"]) umi = tz.get_in(["config", "algorithm", "umi_type"], data) # don't run consensus UMI calling for scrna-seq if tz.get_in(["analysis"], data, "").lower() == "scrna-seq": return False if umi and (umi in consensus_choices or os.path.exists(umi)): assert tz.get_in(["config", "algorithm", "mark_duplicates"], data, True), \ "Using consensus UMI inputs requires marking duplicates" return umi
[ "def", "get_umi_consensus", "(", "data", ")", ":", "consensus_choices", "=", "(", "[", "\"fastq_name\"", "]", ")", "umi", "=", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"umi_type\"", "]", ",", "data", ")", "# don't run consens...
Retrieve UMI for consensus based preparation. We specify this either as a separate fastq file or embedded in the read name as `fastq_name`.`
[ "Retrieve", "UMI", "for", "consensus", "based", "preparation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L237-L251
train
219,119
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
get_dexseq_gff
def get_dexseq_gff(config, default=None): """ some older versions of the genomes have the DEXseq gff file as gff instead of gff3, so this handles that by looking for either one """ dexseq_gff = tz.get_in(tz.get_in(['dexseq_gff', 'keys'], LOOKUPS, {}), config, None) if not dexseq_gff: return None gtf_file = get_gtf_file(config) if gtf_file: base_dir = os.path.dirname(gtf_file) else: base_dir = os.path.dirname(dexseq_gff) base, _ = os.path.splitext(dexseq_gff) gff_file = os.path.join(base_dir, base + ".gff") if file_exists(gff_file): return gff_file gtf_file = os.path.join(base_dir, base + ".gff3") if file_exists(gtf_file): return gtf_file else: return None
python
def get_dexseq_gff(config, default=None): """ some older versions of the genomes have the DEXseq gff file as gff instead of gff3, so this handles that by looking for either one """ dexseq_gff = tz.get_in(tz.get_in(['dexseq_gff', 'keys'], LOOKUPS, {}), config, None) if not dexseq_gff: return None gtf_file = get_gtf_file(config) if gtf_file: base_dir = os.path.dirname(gtf_file) else: base_dir = os.path.dirname(dexseq_gff) base, _ = os.path.splitext(dexseq_gff) gff_file = os.path.join(base_dir, base + ".gff") if file_exists(gff_file): return gff_file gtf_file = os.path.join(base_dir, base + ".gff3") if file_exists(gtf_file): return gtf_file else: return None
[ "def", "get_dexseq_gff", "(", "config", ",", "default", "=", "None", ")", ":", "dexseq_gff", "=", "tz", ".", "get_in", "(", "tz", ".", "get_in", "(", "[", "'dexseq_gff'", ",", "'keys'", "]", ",", "LOOKUPS", ",", "{", "}", ")", ",", "config", ",", "...
some older versions of the genomes have the DEXseq gff file as gff instead of gff3, so this handles that by looking for either one
[ "some", "older", "versions", "of", "the", "genomes", "have", "the", "DEXseq", "gff", "file", "as", "gff", "instead", "of", "gff3", "so", "this", "handles", "that", "by", "looking", "for", "either", "one" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L253-L275
train
219,120
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
get_in_samples
def get_in_samples(samples, fn): """ for a list of samples, return the value of a global option """ for sample in samples: sample = to_single_data(sample) if fn(sample, None): return fn(sample) return None
python
def get_in_samples(samples, fn): """ for a list of samples, return the value of a global option """ for sample in samples: sample = to_single_data(sample) if fn(sample, None): return fn(sample) return None
[ "def", "get_in_samples", "(", "samples", ",", "fn", ")", ":", "for", "sample", "in", "samples", ":", "sample", "=", "to_single_data", "(", "sample", ")", "if", "fn", "(", "sample", ",", "None", ")", ":", "return", "fn", "(", "sample", ")", "return", ...
for a list of samples, return the value of a global option
[ "for", "a", "list", "of", "samples", "return", "the", "value", "of", "a", "global", "option" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L329-L337
train
219,121
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
update_summary_qc
def update_summary_qc(data, key, base=None, secondary=None): """ updates summary_qc with a new section, keyed by key. stick files into summary_qc if you want them propagated forward and available for multiqc """ summary = get_summary_qc(data, {}) if base and secondary: summary[key] = {"base": base, "secondary": secondary} elif base: summary[key] = {"base": base} elif secondary: summary[key] = {"secondary": secondary} data = set_summary_qc(data, summary) return data
python
def update_summary_qc(data, key, base=None, secondary=None): """ updates summary_qc with a new section, keyed by key. stick files into summary_qc if you want them propagated forward and available for multiqc """ summary = get_summary_qc(data, {}) if base and secondary: summary[key] = {"base": base, "secondary": secondary} elif base: summary[key] = {"base": base} elif secondary: summary[key] = {"secondary": secondary} data = set_summary_qc(data, summary) return data
[ "def", "update_summary_qc", "(", "data", ",", "key", ",", "base", "=", "None", ",", "secondary", "=", "None", ")", ":", "summary", "=", "get_summary_qc", "(", "data", ",", "{", "}", ")", "if", "base", "and", "secondary", ":", "summary", "[", "key", "...
updates summary_qc with a new section, keyed by key. stick files into summary_qc if you want them propagated forward and available for multiqc
[ "updates", "summary_qc", "with", "a", "new", "section", "keyed", "by", "key", ".", "stick", "files", "into", "summary_qc", "if", "you", "want", "them", "propagated", "forward", "and", "available", "for", "multiqc" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L351-L365
train
219,122
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
has_variantcalls
def has_variantcalls(data): """ returns True if the data dictionary is configured for variant calling """ analysis = get_analysis(data).lower() variant_pipeline = analysis.startswith(("standard", "variant", "variant2")) variantcaller = get_variantcaller(data) return variant_pipeline or variantcaller
python
def has_variantcalls(data): """ returns True if the data dictionary is configured for variant calling """ analysis = get_analysis(data).lower() variant_pipeline = analysis.startswith(("standard", "variant", "variant2")) variantcaller = get_variantcaller(data) return variant_pipeline or variantcaller
[ "def", "has_variantcalls", "(", "data", ")", ":", "analysis", "=", "get_analysis", "(", "data", ")", ".", "lower", "(", ")", "variant_pipeline", "=", "analysis", ".", "startswith", "(", "(", "\"standard\"", ",", "\"variant\"", ",", "\"variant2\"", ")", ")", ...
returns True if the data dictionary is configured for variant calling
[ "returns", "True", "if", "the", "data", "dictionary", "is", "configured", "for", "variant", "calling" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L367-L374
train
219,123
bcbio/bcbio-nextgen
bcbio/rnaseq/qc.py
estimate_library_complexity
def estimate_library_complexity(df, algorithm="RNA-seq"): """ estimate library complexity from the number of reads vs. number of unique start sites. returns "NA" if there are not enough data points to fit the line """ DEFAULT_CUTOFFS = {"RNA-seq": (0.25, 0.40)} cutoffs = DEFAULT_CUTOFFS[algorithm] if len(df) < 5: return {"unique_starts_per_read": 'nan', "complexity": "NA"} model = sm.ols(formula="starts ~ reads", data=df) fitted = model.fit() slope = fitted.params["reads"] if slope <= cutoffs[0]: complexity = "LOW" elif slope <= cutoffs[1]: complexity = "MEDIUM" else: complexity = "HIGH" # for now don't return the complexity flag return {"Unique Starts Per Read": float(slope)}
python
def estimate_library_complexity(df, algorithm="RNA-seq"): """ estimate library complexity from the number of reads vs. number of unique start sites. returns "NA" if there are not enough data points to fit the line """ DEFAULT_CUTOFFS = {"RNA-seq": (0.25, 0.40)} cutoffs = DEFAULT_CUTOFFS[algorithm] if len(df) < 5: return {"unique_starts_per_read": 'nan', "complexity": "NA"} model = sm.ols(formula="starts ~ reads", data=df) fitted = model.fit() slope = fitted.params["reads"] if slope <= cutoffs[0]: complexity = "LOW" elif slope <= cutoffs[1]: complexity = "MEDIUM" else: complexity = "HIGH" # for now don't return the complexity flag return {"Unique Starts Per Read": float(slope)}
[ "def", "estimate_library_complexity", "(", "df", ",", "algorithm", "=", "\"RNA-seq\"", ")", ":", "DEFAULT_CUTOFFS", "=", "{", "\"RNA-seq\"", ":", "(", "0.25", ",", "0.40", ")", "}", "cutoffs", "=", "DEFAULT_CUTOFFS", "[", "algorithm", "]", "if", "len", "(", ...
estimate library complexity from the number of reads vs. number of unique start sites. returns "NA" if there are not enough data points to fit the line
[ "estimate", "library", "complexity", "from", "the", "number", "of", "reads", "vs", ".", "number", "of", "unique", "start", "sites", ".", "returns", "NA", "if", "there", "are", "not", "enough", "data", "points", "to", "fit", "the", "line" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/qc.py#L42-L64
train
219,124
bcbio/bcbio-nextgen
bcbio/galaxy/api.py
GalaxyApiAccess.run_details
def run_details(self, run_bc, run_date=None): """Next Gen LIMS specific API functionality. """ try: details = self._get("/nglims/api_run_details", dict(run=run_bc)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_bc) if "error" in details and run_date is not None: try: details = self._get("/nglims/api_run_details", dict(run=run_date)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_date) return details
python
def run_details(self, run_bc, run_date=None): """Next Gen LIMS specific API functionality. """ try: details = self._get("/nglims/api_run_details", dict(run=run_bc)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_bc) if "error" in details and run_date is not None: try: details = self._get("/nglims/api_run_details", dict(run=run_date)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_date) return details
[ "def", "run_details", "(", "self", ",", "run_bc", ",", "run_date", "=", "None", ")", ":", "try", ":", "details", "=", "self", ".", "_get", "(", "\"/nglims/api_run_details\"", ",", "dict", "(", "run", "=", "run_bc", ")", ")", "except", "ValueError", ":", ...
Next Gen LIMS specific API functionality.
[ "Next", "Gen", "LIMS", "specific", "API", "functionality", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/api.py#L52-L64
train
219,125
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
fixrg
def fixrg(in_bam, names, ref_file, dirs, data): """Fix read group in a file, using samtools addreplacerg. addreplacerg does not remove the old read group, causing confusion when checking. We use reheader to work around this """ work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bamclean", dd.get_sample_name(data))) out_file = os.path.join(work_dir, "%s-fixrg.bam" % utils.splitext_plus(os.path.basename(in_bam))[0]) if not utils.file_exists(out_file): out_file = os.path.join(work_dir, "%s-fixrg.bam" % dd.get_sample_name(data)) if not utils.file_uptodate(out_file, in_bam): with file_transaction(data, out_file) as tx_out_file: rg_info = novoalign.get_rg_info(names) new_header = "%s-header.txt" % os.path.splitext(out_file)[0] cores = dd.get_cores(data) do.run("samtools view -H {in_bam} | grep -v ^@RG > {new_header}".format(**locals()), "Create empty RG header: %s" % dd.get_sample_name(data)) cmd = ("samtools reheader {new_header} {in_bam} | " "samtools addreplacerg -@ {cores} -r '{rg_info}' -m overwrite_all -O bam -o {tx_out_file} -") do.run(cmd.format(**locals()), "Fix read groups: %s" % dd.get_sample_name(data)) return out_file
python
def fixrg(in_bam, names, ref_file, dirs, data): """Fix read group in a file, using samtools addreplacerg. addreplacerg does not remove the old read group, causing confusion when checking. We use reheader to work around this """ work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bamclean", dd.get_sample_name(data))) out_file = os.path.join(work_dir, "%s-fixrg.bam" % utils.splitext_plus(os.path.basename(in_bam))[0]) if not utils.file_exists(out_file): out_file = os.path.join(work_dir, "%s-fixrg.bam" % dd.get_sample_name(data)) if not utils.file_uptodate(out_file, in_bam): with file_transaction(data, out_file) as tx_out_file: rg_info = novoalign.get_rg_info(names) new_header = "%s-header.txt" % os.path.splitext(out_file)[0] cores = dd.get_cores(data) do.run("samtools view -H {in_bam} | grep -v ^@RG > {new_header}".format(**locals()), "Create empty RG header: %s" % dd.get_sample_name(data)) cmd = ("samtools reheader {new_header} {in_bam} | " "samtools addreplacerg -@ {cores} -r '{rg_info}' -m overwrite_all -O bam -o {tx_out_file} -") do.run(cmd.format(**locals()), "Fix read groups: %s" % dd.get_sample_name(data)) return out_file
[ "def", "fixrg", "(", "in_bam", ",", "names", ",", "ref_file", ",", "dirs", ",", "data", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", ")", ",", "\"bamclean\"...
Fix read group in a file, using samtools addreplacerg. addreplacerg does not remove the old read group, causing confusion when checking. We use reheader to work around this
[ "Fix", "read", "group", "in", "a", "file", "using", "samtools", "addreplacerg", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L20-L40
train
219,126
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
_target_chroms_and_header
def _target_chroms_and_header(bam_file, data): """Get a list of chromosomes to target and new updated ref_file header. Could potentially handle remapping from chr1 -> 1 but currently disabled due to speed issues. """ special_remaps = {"chrM": "MT", "MT": "chrM"} target_chroms = dict([(x.name, i) for i, x in enumerate(ref.file_contigs(dd.get_ref_file(data))) if chromhacks.is_autosomal_or_sex(x.name)]) out_chroms = [] with pysam.Samfile(bam_file, "rb") as bamfile: for bami, bam_contig in enumerate([c["SN"] for c in bamfile.header["SQ"]]): if bam_contig in target_chroms: target_chrom = bam_contig elif bam_contig in special_remaps and special_remaps[bam_contig] in target_chroms: target_chrom = special_remaps[bam_contig] elif bam_contig.startswith("chr") and bam_contig.replace("chr", "") in target_chroms: target_chrom = bam_contig.replace("chr", "") elif "chr%s" % bam_contig in target_chroms: target_chrom = "chr%s" % bam_contig else: target_chrom = None # target_chrom == bam_contig ensures we don't try chr1 -> 1 style remapping if target_chrom and target_chrom == bam_contig: # Order not required if dealing with SAM file header fixing #assert bami == target_chroms[target_chrom], \ # ("remove_extracontigs: Non-matching order of standard contig: %s %s (%s vs %s)" % # (bam_file, target_chrom, bami, target_chroms[target_chrom])) out_chroms.append(target_chrom) assert out_chroms, ("remove_extracontigs: Did not find any chromosomes in reference file: %s %s" % (bam_file, target_chroms)) return out_chroms
python
def _target_chroms_and_header(bam_file, data): """Get a list of chromosomes to target and new updated ref_file header. Could potentially handle remapping from chr1 -> 1 but currently disabled due to speed issues. """ special_remaps = {"chrM": "MT", "MT": "chrM"} target_chroms = dict([(x.name, i) for i, x in enumerate(ref.file_contigs(dd.get_ref_file(data))) if chromhacks.is_autosomal_or_sex(x.name)]) out_chroms = [] with pysam.Samfile(bam_file, "rb") as bamfile: for bami, bam_contig in enumerate([c["SN"] for c in bamfile.header["SQ"]]): if bam_contig in target_chroms: target_chrom = bam_contig elif bam_contig in special_remaps and special_remaps[bam_contig] in target_chroms: target_chrom = special_remaps[bam_contig] elif bam_contig.startswith("chr") and bam_contig.replace("chr", "") in target_chroms: target_chrom = bam_contig.replace("chr", "") elif "chr%s" % bam_contig in target_chroms: target_chrom = "chr%s" % bam_contig else: target_chrom = None # target_chrom == bam_contig ensures we don't try chr1 -> 1 style remapping if target_chrom and target_chrom == bam_contig: # Order not required if dealing with SAM file header fixing #assert bami == target_chroms[target_chrom], \ # ("remove_extracontigs: Non-matching order of standard contig: %s %s (%s vs %s)" % # (bam_file, target_chrom, bami, target_chroms[target_chrom])) out_chroms.append(target_chrom) assert out_chroms, ("remove_extracontigs: Did not find any chromosomes in reference file: %s %s" % (bam_file, target_chroms)) return out_chroms
[ "def", "_target_chroms_and_header", "(", "bam_file", ",", "data", ")", ":", "special_remaps", "=", "{", "\"chrM\"", ":", "\"MT\"", ",", "\"MT\"", ":", "\"chrM\"", "}", "target_chroms", "=", "dict", "(", "[", "(", "x", ".", "name", ",", "i", ")", "for", ...
Get a list of chromosomes to target and new updated ref_file header. Could potentially handle remapping from chr1 -> 1 but currently disabled due to speed issues.
[ "Get", "a", "list", "of", "chromosomes", "to", "target", "and", "new", "updated", "ref_file", "header", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L75-L106
train
219,127
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
picard_prep
def picard_prep(in_bam, names, ref_file, dirs, data): """Prepare input BAM using Picard and GATK cleaning tools. - ReorderSam to reorder file to reference - AddOrReplaceReadGroups to add read group information and coordinate sort - PrintReads to filters to remove problem records: - filterMBQ to remove reads with mismatching bases and base qualities """ runner = broad.runner_from_path("picard", data["config"]) work_dir = utils.safe_makedir(os.path.join(dirs["work"], "bamclean", names["sample"])) runner.run_fn("picard_index_ref", ref_file) reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % os.path.splitext(os.path.basename(in_bam))[0]) if not utils.file_exists(reorder_bam): reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % dd.get_sample_name(data)) reorder_bam = runner.run_fn("picard_reorder", in_bam, ref_file, reorder_bam) rg_bam = runner.run_fn("picard_fix_rgs", reorder_bam, names) return _filter_bad_reads(rg_bam, ref_file, data)
python
def picard_prep(in_bam, names, ref_file, dirs, data): """Prepare input BAM using Picard and GATK cleaning tools. - ReorderSam to reorder file to reference - AddOrReplaceReadGroups to add read group information and coordinate sort - PrintReads to filters to remove problem records: - filterMBQ to remove reads with mismatching bases and base qualities """ runner = broad.runner_from_path("picard", data["config"]) work_dir = utils.safe_makedir(os.path.join(dirs["work"], "bamclean", names["sample"])) runner.run_fn("picard_index_ref", ref_file) reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % os.path.splitext(os.path.basename(in_bam))[0]) if not utils.file_exists(reorder_bam): reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % dd.get_sample_name(data)) reorder_bam = runner.run_fn("picard_reorder", in_bam, ref_file, reorder_bam) rg_bam = runner.run_fn("picard_fix_rgs", reorder_bam, names) return _filter_bad_reads(rg_bam, ref_file, data)
[ "def", "picard_prep", "(", "in_bam", ",", "names", ",", "ref_file", ",", "dirs", ",", "data", ")", ":", "runner", "=", "broad", ".", "runner_from_path", "(", "\"picard\"", ",", "data", "[", "\"config\"", "]", ")", "work_dir", "=", "utils", ".", "safe_mak...
Prepare input BAM using Picard and GATK cleaning tools. - ReorderSam to reorder file to reference - AddOrReplaceReadGroups to add read group information and coordinate sort - PrintReads to filters to remove problem records: - filterMBQ to remove reads with mismatching bases and base qualities
[ "Prepare", "input", "BAM", "using", "Picard", "and", "GATK", "cleaning", "tools", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L122-L139
train
219,128
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
_filter_bad_reads
def _filter_bad_reads(in_bam, ref_file, data): """Use GATK filter to remove problem reads which choke GATK and Picard. """ bam.index(in_bam, data["config"]) out_file = "%s-gatkfilter.bam" % os.path.splitext(in_bam)[0] if not utils.file_exists(out_file): with tx_tmpdir(data) as tmp_dir: with file_transaction(data, out_file) as tx_out_file: params = [("FixMisencodedBaseQualityReads" if dd.get_quality_format(data, "").lower() == "illumina" else "PrintReads"), "-R", ref_file, "-I", in_bam, "-O", tx_out_file, "-RF", "MatchingBasesAndQualsReadFilter", "-RF", "SeqIsStoredReadFilter", "-RF", "CigarContainsNoNOperator"] jvm_opts = broad.get_gatk_opts(data["config"], tmp_dir) do.run(broad.gatk_cmd("gatk", jvm_opts, params), "Filter problem reads") bam.index(out_file, data["config"]) return out_file
python
def _filter_bad_reads(in_bam, ref_file, data): """Use GATK filter to remove problem reads which choke GATK and Picard. """ bam.index(in_bam, data["config"]) out_file = "%s-gatkfilter.bam" % os.path.splitext(in_bam)[0] if not utils.file_exists(out_file): with tx_tmpdir(data) as tmp_dir: with file_transaction(data, out_file) as tx_out_file: params = [("FixMisencodedBaseQualityReads" if dd.get_quality_format(data, "").lower() == "illumina" else "PrintReads"), "-R", ref_file, "-I", in_bam, "-O", tx_out_file, "-RF", "MatchingBasesAndQualsReadFilter", "-RF", "SeqIsStoredReadFilter", "-RF", "CigarContainsNoNOperator"] jvm_opts = broad.get_gatk_opts(data["config"], tmp_dir) do.run(broad.gatk_cmd("gatk", jvm_opts, params), "Filter problem reads") bam.index(out_file, data["config"]) return out_file
[ "def", "_filter_bad_reads", "(", "in_bam", ",", "ref_file", ",", "data", ")", ":", "bam", ".", "index", "(", "in_bam", ",", "data", "[", "\"config\"", "]", ")", "out_file", "=", "\"%s-gatkfilter.bam\"", "%", "os", ".", "path", ".", "splitext", "(", "in_b...
Use GATK filter to remove problem reads which choke GATK and Picard.
[ "Use", "GATK", "filter", "to", "remove", "problem", "reads", "which", "choke", "GATK", "and", "Picard", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L141-L161
train
219,129
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
generate_parallel
def generate_parallel(samples, run_parallel): """Provide parallel preparation of summary information for alignment and variant calling. """ to_analyze, extras = _split_samples_by_qc(samples) qced = run_parallel("pipeline_summary", to_analyze) samples = _combine_qc_samples(qced) + extras qsign_info = run_parallel("qsignature_summary", [samples]) metadata_file = _merge_metadata([samples]) summary_file = write_project_summary(samples, qsign_info) out = [] for data in samples: if "summary" not in data[0]: data[0]["summary"] = {} data[0]["summary"]["project"] = summary_file data[0]["summary"]["metadata"] = metadata_file if qsign_info: data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"] out.append(data) out = _add_researcher_summary(out, summary_file) # MultiQC must be run after all file outputs are set: return [[utils.to_single_data(d)] for d in run_parallel("multiqc_summary", [out])]
python
def generate_parallel(samples, run_parallel): """Provide parallel preparation of summary information for alignment and variant calling. """ to_analyze, extras = _split_samples_by_qc(samples) qced = run_parallel("pipeline_summary", to_analyze) samples = _combine_qc_samples(qced) + extras qsign_info = run_parallel("qsignature_summary", [samples]) metadata_file = _merge_metadata([samples]) summary_file = write_project_summary(samples, qsign_info) out = [] for data in samples: if "summary" not in data[0]: data[0]["summary"] = {} data[0]["summary"]["project"] = summary_file data[0]["summary"]["metadata"] = metadata_file if qsign_info: data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"] out.append(data) out = _add_researcher_summary(out, summary_file) # MultiQC must be run after all file outputs are set: return [[utils.to_single_data(d)] for d in run_parallel("multiqc_summary", [out])]
[ "def", "generate_parallel", "(", "samples", ",", "run_parallel", ")", ":", "to_analyze", ",", "extras", "=", "_split_samples_by_qc", "(", "samples", ")", "qced", "=", "run_parallel", "(", "\"pipeline_summary\"", ",", "to_analyze", ")", "samples", "=", "_combine_qc...
Provide parallel preparation of summary information for alignment and variant calling.
[ "Provide", "parallel", "preparation", "of", "summary", "information", "for", "alignment", "and", "variant", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L38-L58
train
219,130
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
pipeline_summary
def pipeline_summary(data): """Provide summary information on processing sample. Handles standard and CWL (single QC output) cases. """ data = utils.to_single_data(data) work_bam = dd.get_align_bam(data) or dd.get_work_bam(data) if not work_bam or not work_bam.endswith(".bam"): work_bam = None if dd.get_ref_file(data): if work_bam or (tz.get_in(["config", "algorithm", "kraken"], data)): # kraken doesn't need bam logger.info("QC: %s %s" % (dd.get_sample_name(data), ", ".join(dd.get_algorithm_qc(data)))) work_data = cwlutils.unpack_tarballs(utils.deepish_copy(data), data) data["summary"] = _run_qc_tools(work_bam, work_data) if (len(dd.get_algorithm_qc(data)) == 1 and "output_cwl_keys" in data): data["summary"]["qc"] = data["summary"]["qc"].get(dd.get_algorithm_qc(data)[0]) return [[data]]
python
def pipeline_summary(data): """Provide summary information on processing sample. Handles standard and CWL (single QC output) cases. """ data = utils.to_single_data(data) work_bam = dd.get_align_bam(data) or dd.get_work_bam(data) if not work_bam or not work_bam.endswith(".bam"): work_bam = None if dd.get_ref_file(data): if work_bam or (tz.get_in(["config", "algorithm", "kraken"], data)): # kraken doesn't need bam logger.info("QC: %s %s" % (dd.get_sample_name(data), ", ".join(dd.get_algorithm_qc(data)))) work_data = cwlutils.unpack_tarballs(utils.deepish_copy(data), data) data["summary"] = _run_qc_tools(work_bam, work_data) if (len(dd.get_algorithm_qc(data)) == 1 and "output_cwl_keys" in data): data["summary"]["qc"] = data["summary"]["qc"].get(dd.get_algorithm_qc(data)[0]) return [[data]]
[ "def", "pipeline_summary", "(", "data", ")", ":", "data", "=", "utils", ".", "to_single_data", "(", "data", ")", "work_bam", "=", "dd", ".", "get_align_bam", "(", "data", ")", "or", "dd", ".", "get_work_bam", "(", "data", ")", "if", "not", "work_bam", ...
Provide summary information on processing sample. Handles standard and CWL (single QC output) cases.
[ "Provide", "summary", "information", "on", "processing", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L60-L76
train
219,131
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
get_qc_tools
def get_qc_tools(data): """Retrieve a list of QC tools to use based on configuration and analysis type. Uses defaults if previously set. """ if dd.get_algorithm_qc(data): return dd.get_algorithm_qc(data) analysis = data["analysis"].lower() to_run = [] if tz.get_in(["config", "algorithm", "kraken"], data): to_run.append("kraken") if "fastqc" not in dd.get_tools_off(data): to_run.append("fastqc") if any([tool in dd.get_tools_on(data) for tool in ["qualimap", "qualimap_full"]]): to_run.append("qualimap") if analysis.startswith("rna-seq") or analysis == "smallrna-seq": if "qualimap" not in dd.get_tools_off(data): if gtf.is_qualimap_compatible(dd.get_gtf_file(data)): to_run.append("qualimap_rnaseq") else: logger.debug("GTF not compatible with Qualimap, skipping.") if analysis.startswith("chip-seq"): to_run.append("chipqc") if analysis.startswith("smallrna-seq"): to_run.append("small-rna") to_run.append("atropos") if "coverage_qc" not in dd.get_tools_off(data): to_run.append("samtools") if dd.has_variantcalls(data): if "coverage_qc" not in dd.get_tools_off(data): to_run += ["coverage", "picard"] to_run += ["qsignature", "variants"] if vcfanno.is_human(data): to_run += ["contamination", "peddy"] if vcfutils.get_paired_phenotype(data): to_run += ["viral"] if damage.should_filter([data]): to_run += ["damage"] if dd.get_umi_consensus(data): to_run += ["umi"] if tz.get_in(["config", "algorithm", "preseq"], data): to_run.append("preseq") to_run = [tool for tool in to_run if tool not in dd.get_tools_off(data)] to_run.sort() return to_run
python
def get_qc_tools(data): """Retrieve a list of QC tools to use based on configuration and analysis type. Uses defaults if previously set. """ if dd.get_algorithm_qc(data): return dd.get_algorithm_qc(data) analysis = data["analysis"].lower() to_run = [] if tz.get_in(["config", "algorithm", "kraken"], data): to_run.append("kraken") if "fastqc" not in dd.get_tools_off(data): to_run.append("fastqc") if any([tool in dd.get_tools_on(data) for tool in ["qualimap", "qualimap_full"]]): to_run.append("qualimap") if analysis.startswith("rna-seq") or analysis == "smallrna-seq": if "qualimap" not in dd.get_tools_off(data): if gtf.is_qualimap_compatible(dd.get_gtf_file(data)): to_run.append("qualimap_rnaseq") else: logger.debug("GTF not compatible with Qualimap, skipping.") if analysis.startswith("chip-seq"): to_run.append("chipqc") if analysis.startswith("smallrna-seq"): to_run.append("small-rna") to_run.append("atropos") if "coverage_qc" not in dd.get_tools_off(data): to_run.append("samtools") if dd.has_variantcalls(data): if "coverage_qc" not in dd.get_tools_off(data): to_run += ["coverage", "picard"] to_run += ["qsignature", "variants"] if vcfanno.is_human(data): to_run += ["contamination", "peddy"] if vcfutils.get_paired_phenotype(data): to_run += ["viral"] if damage.should_filter([data]): to_run += ["damage"] if dd.get_umi_consensus(data): to_run += ["umi"] if tz.get_in(["config", "algorithm", "preseq"], data): to_run.append("preseq") to_run = [tool for tool in to_run if tool not in dd.get_tools_off(data)] to_run.sort() return to_run
[ "def", "get_qc_tools", "(", "data", ")", ":", "if", "dd", ".", "get_algorithm_qc", "(", "data", ")", ":", "return", "dd", ".", "get_algorithm_qc", "(", "data", ")", "analysis", "=", "data", "[", "\"analysis\"", "]", ".", "lower", "(", ")", "to_run", "=...
Retrieve a list of QC tools to use based on configuration and analysis type. Uses defaults if previously set.
[ "Retrieve", "a", "list", "of", "QC", "tools", "to", "use", "based", "on", "configuration", "and", "analysis", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L78-L123
train
219,132
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_run_qc_tools
def _run_qc_tools(bam_file, data): """Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools """ from bcbio.qc import (atropos, contamination, coverage, damage, fastqc, kraken, qsignature, qualimap, samtools, picard, srna, umi, variant, viral, preseq, chipseq) tools = {"fastqc": fastqc.run, "atropos": atropos.run, "small-rna": srna.run, "samtools": samtools.run, "qualimap": qualimap.run, "qualimap_rnaseq": qualimap.run_rnaseq, "qsignature": qsignature.run, "contamination": contamination.run, "coverage": coverage.run, "damage": damage.run, "variants": variant.run, "peddy": peddy.run_qc, "kraken": kraken.run, "picard": picard.run, "umi": umi.run, "viral": viral.run, "preseq": preseq.run, "chipqc": chipseq.run } qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"])) metrics = {} qc_out = utils.deepish_copy(dd.get_summary_qc(data)) for program_name in dd.get_algorithm_qc(data): if not bam_file and program_name != "kraken": # kraken doesn't need bam continue if dd.get_phenotype(data) == "germline" and program_name != "variants": continue qc_fn = tools[program_name] cur_qc_dir = os.path.join(qc_dir, program_name) out = qc_fn(bam_file, data, cur_qc_dir) qc_files = None if out and isinstance(out, dict): # Check for metrics output, two cases: # 1. output with {"metrics"} and files ("base") if "metrics" in out: metrics.update(out.pop("metrics")) # 2. a dictionary of metrics elif "base" not in out: metrics.update(out) # Check for files only output if "base" in out: qc_files = out elif out and isinstance(out, six.string_types) and os.path.exists(out): qc_files = {"base": out, "secondary": []} if not qc_files: qc_files = _organize_qc_files(program_name, cur_qc_dir) if qc_files: qc_out[program_name] = qc_files metrics["Name"] = dd.get_sample_name(data) metrics["Quality format"] = dd.get_quality_format(data).lower() return {"qc": qc_out, "metrics": metrics}
python
def _run_qc_tools(bam_file, data): """Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools """ from bcbio.qc import (atropos, contamination, coverage, damage, fastqc, kraken, qsignature, qualimap, samtools, picard, srna, umi, variant, viral, preseq, chipseq) tools = {"fastqc": fastqc.run, "atropos": atropos.run, "small-rna": srna.run, "samtools": samtools.run, "qualimap": qualimap.run, "qualimap_rnaseq": qualimap.run_rnaseq, "qsignature": qsignature.run, "contamination": contamination.run, "coverage": coverage.run, "damage": damage.run, "variants": variant.run, "peddy": peddy.run_qc, "kraken": kraken.run, "picard": picard.run, "umi": umi.run, "viral": viral.run, "preseq": preseq.run, "chipqc": chipseq.run } qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"])) metrics = {} qc_out = utils.deepish_copy(dd.get_summary_qc(data)) for program_name in dd.get_algorithm_qc(data): if not bam_file and program_name != "kraken": # kraken doesn't need bam continue if dd.get_phenotype(data) == "germline" and program_name != "variants": continue qc_fn = tools[program_name] cur_qc_dir = os.path.join(qc_dir, program_name) out = qc_fn(bam_file, data, cur_qc_dir) qc_files = None if out and isinstance(out, dict): # Check for metrics output, two cases: # 1. output with {"metrics"} and files ("base") if "metrics" in out: metrics.update(out.pop("metrics")) # 2. a dictionary of metrics elif "base" not in out: metrics.update(out) # Check for files only output if "base" in out: qc_files = out elif out and isinstance(out, six.string_types) and os.path.exists(out): qc_files = {"base": out, "secondary": []} if not qc_files: qc_files = _organize_qc_files(program_name, cur_qc_dir) if qc_files: qc_out[program_name] = qc_files metrics["Name"] = dd.get_sample_name(data) metrics["Quality format"] = dd.get_quality_format(data).lower() return {"qc": qc_out, "metrics": metrics}
[ "def", "_run_qc_tools", "(", "bam_file", ",", "data", ")", ":", "from", "bcbio", ".", "qc", "import", "(", "atropos", ",", "contamination", ",", "coverage", ",", "damage", ",", "fastqc", ",", "kraken", ",", "qsignature", ",", "qualimap", ",", "samtools", ...
Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools
[ "Run", "a", "set", "of", "third", "party", "quality", "control", "tools", "returning", "QC", "directory", "and", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L125-L187
train
219,133
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_organize_qc_files
def _organize_qc_files(program, qc_dir): """Organize outputs from quality control runs into a base file and secondary outputs. Provides compatibility with CWL output. Returns None if no files created during processing. """ base_files = {"fastqc": "fastqc_report.html", "qualimap_rnaseq": "qualimapReport.html", "qualimap": "qualimapReport.html"} if os.path.exists(qc_dir): out_files = [] for fname in [os.path.join(qc_dir, x) for x in os.listdir(qc_dir)]: if os.path.isfile(fname) and not fname.endswith(".bcbiotmp"): out_files.append(fname) elif os.path.isdir(fname) and not fname.endswith("tx"): for root, dirs, files in os.walk(fname): for f in files: if not f.endswith(".bcbiotmp"): out_files.append(os.path.join(root, f)) if len(out_files) > 0 and all([not f.endswith("-failed.log") for f in out_files]): if len(out_files) == 1: base = out_files[0] secondary = [] else: base = None if program in base_files: base_choices = [x for x in out_files if x.endswith("/%s" % base_files[program])] if len(base_choices) == 1: base = base_choices[0] if not base: base = out_files[0] secondary = [x for x in out_files if x != base] return {"base": base, "secondary": secondary}
python
def _organize_qc_files(program, qc_dir): """Organize outputs from quality control runs into a base file and secondary outputs. Provides compatibility with CWL output. Returns None if no files created during processing. """ base_files = {"fastqc": "fastqc_report.html", "qualimap_rnaseq": "qualimapReport.html", "qualimap": "qualimapReport.html"} if os.path.exists(qc_dir): out_files = [] for fname in [os.path.join(qc_dir, x) for x in os.listdir(qc_dir)]: if os.path.isfile(fname) and not fname.endswith(".bcbiotmp"): out_files.append(fname) elif os.path.isdir(fname) and not fname.endswith("tx"): for root, dirs, files in os.walk(fname): for f in files: if not f.endswith(".bcbiotmp"): out_files.append(os.path.join(root, f)) if len(out_files) > 0 and all([not f.endswith("-failed.log") for f in out_files]): if len(out_files) == 1: base = out_files[0] secondary = [] else: base = None if program in base_files: base_choices = [x for x in out_files if x.endswith("/%s" % base_files[program])] if len(base_choices) == 1: base = base_choices[0] if not base: base = out_files[0] secondary = [x for x in out_files if x != base] return {"base": base, "secondary": secondary}
[ "def", "_organize_qc_files", "(", "program", ",", "qc_dir", ")", ":", "base_files", "=", "{", "\"fastqc\"", ":", "\"fastqc_report.html\"", ",", "\"qualimap_rnaseq\"", ":", "\"qualimapReport.html\"", ",", "\"qualimap\"", ":", "\"qualimapReport.html\"", "}", "if", "os",...
Organize outputs from quality control runs into a base file and secondary outputs. Provides compatibility with CWL output. Returns None if no files created during processing.
[ "Organize", "outputs", "from", "quality", "control", "runs", "into", "a", "base", "file", "and", "secondary", "outputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L189-L220
train
219,134
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_split_samples_by_qc
def _split_samples_by_qc(samples): """Split data into individual quality control steps for a run. """ to_process = [] extras = [] for data in [utils.to_single_data(x) for x in samples]: qcs = dd.get_algorithm_qc(data) # kraken doesn't need bam if qcs and (dd.get_align_bam(data) or dd.get_work_bam(data) or tz.get_in(["config", "algorithm", "kraken"], data)): for qc in qcs: add = copy.deepcopy(data) add["config"]["algorithm"]["qc"] = [qc] to_process.append([add]) else: extras.append([data]) return to_process, extras
python
def _split_samples_by_qc(samples): """Split data into individual quality control steps for a run. """ to_process = [] extras = [] for data in [utils.to_single_data(x) for x in samples]: qcs = dd.get_algorithm_qc(data) # kraken doesn't need bam if qcs and (dd.get_align_bam(data) or dd.get_work_bam(data) or tz.get_in(["config", "algorithm", "kraken"], data)): for qc in qcs: add = copy.deepcopy(data) add["config"]["algorithm"]["qc"] = [qc] to_process.append([add]) else: extras.append([data]) return to_process, extras
[ "def", "_split_samples_by_qc", "(", "samples", ")", ":", "to_process", "=", "[", "]", "extras", "=", "[", "]", "for", "data", "in", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "samples", "]", ":", "qcs", "=", "dd", ".", "g...
Split data into individual quality control steps for a run.
[ "Split", "data", "into", "individual", "quality", "control", "steps", "for", "a", "run", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L224-L240
train
219,135
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_combine_qc_samples
def _combine_qc_samples(samples): """Combine split QC analyses into single samples based on BAM files. """ by_bam = collections.defaultdict(list) for data in [utils.to_single_data(x) for x in samples]: batch = dd.get_batch(data) or dd.get_sample_name(data) if not isinstance(batch, (list, tuple)): batch = [batch] batch = tuple(batch) by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data) out = [] for data_group in by_bam.values(): data = data_group[0] alg_qc = [] qc = {} metrics = {} for d in data_group: qc.update(dd.get_summary_qc(d)) metrics.update(dd.get_summary_metrics(d)) alg_qc.extend(dd.get_algorithm_qc(d)) data["config"]["algorithm"]["qc"] = alg_qc data["summary"]["qc"] = qc data["summary"]["metrics"] = metrics out.append([data]) return out
python
def _combine_qc_samples(samples): """Combine split QC analyses into single samples based on BAM files. """ by_bam = collections.defaultdict(list) for data in [utils.to_single_data(x) for x in samples]: batch = dd.get_batch(data) or dd.get_sample_name(data) if not isinstance(batch, (list, tuple)): batch = [batch] batch = tuple(batch) by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data) out = [] for data_group in by_bam.values(): data = data_group[0] alg_qc = [] qc = {} metrics = {} for d in data_group: qc.update(dd.get_summary_qc(d)) metrics.update(dd.get_summary_metrics(d)) alg_qc.extend(dd.get_algorithm_qc(d)) data["config"]["algorithm"]["qc"] = alg_qc data["summary"]["qc"] = qc data["summary"]["metrics"] = metrics out.append([data]) return out
[ "def", "_combine_qc_samples", "(", "samples", ")", ":", "by_bam", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "data", "in", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "samples", "]", ":", "batch", "=", "d...
Combine split QC analyses into single samples based on BAM files.
[ "Combine", "split", "QC", "analyses", "into", "single", "samples", "based", "on", "BAM", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L242-L266
train
219,136
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
write_project_summary
def write_project_summary(samples, qsign_info=None): """Write project summary information on the provided samples. write out dirs, genome resources, """ work_dir = samples[0][0]["dirs"]["work"] out_file = os.path.join(work_dir, "project-summary.yaml") upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"]) if "dir" in samples[0][0]["upload"] else "") date = str(datetime.now()) prev_samples = _other_pipeline_samples(out_file, samples) with open(out_file, "w") as out_handle: yaml.safe_dump({"date": date}, out_handle, default_flow_style=False, allow_unicode=False) if qsign_info: qsign_out = utils.deepish_copy(qsign_info[0]) qsign_out.pop("out_dir", None) yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"upload": upload_dir}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle, default_flow_style=False, allow_unicode=False) return out_file
python
def write_project_summary(samples, qsign_info=None): """Write project summary information on the provided samples. write out dirs, genome resources, """ work_dir = samples[0][0]["dirs"]["work"] out_file = os.path.join(work_dir, "project-summary.yaml") upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"]) if "dir" in samples[0][0]["upload"] else "") date = str(datetime.now()) prev_samples = _other_pipeline_samples(out_file, samples) with open(out_file, "w") as out_handle: yaml.safe_dump({"date": date}, out_handle, default_flow_style=False, allow_unicode=False) if qsign_info: qsign_out = utils.deepish_copy(qsign_info[0]) qsign_out.pop("out_dir", None) yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"upload": upload_dir}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle, default_flow_style=False, allow_unicode=False) return out_file
[ "def", "write_project_summary", "(", "samples", ",", "qsign_info", "=", "None", ")", ":", "work_dir", "=", "samples", "[", "0", "]", "[", "0", "]", "[", "\"dirs\"", "]", "[", "\"work\"", "]", "out_file", "=", "os", ".", "path", ".", "join", "(", "wor...
Write project summary information on the provided samples. write out dirs, genome resources,
[ "Write", "project", "summary", "information", "on", "the", "provided", "samples", ".", "write", "out", "dirs", "genome", "resources" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L270-L295
train
219,137
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_merge_metadata
def _merge_metadata(samples): """Merge all metadata into CSV file""" samples = list(utils.flatten(samples)) out_dir = dd.get_work_dir(samples[0]) logger.info("summarize metadata") out_file = os.path.join(out_dir, "metadata.csv") sample_metrics = collections.defaultdict(dict) for s in samples: m = tz.get_in(['metadata'], s) if isinstance(m, six.string_types): m = json.loads(m) if m: for me in list(m.keys()): if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple): m.pop(me, None) sample_metrics[dd.get_sample_name(s)].update(m) pd.DataFrame(sample_metrics).transpose().to_csv(out_file) return out_file
python
def _merge_metadata(samples): """Merge all metadata into CSV file""" samples = list(utils.flatten(samples)) out_dir = dd.get_work_dir(samples[0]) logger.info("summarize metadata") out_file = os.path.join(out_dir, "metadata.csv") sample_metrics = collections.defaultdict(dict) for s in samples: m = tz.get_in(['metadata'], s) if isinstance(m, six.string_types): m = json.loads(m) if m: for me in list(m.keys()): if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple): m.pop(me, None) sample_metrics[dd.get_sample_name(s)].update(m) pd.DataFrame(sample_metrics).transpose().to_csv(out_file) return out_file
[ "def", "_merge_metadata", "(", "samples", ")", ":", "samples", "=", "list", "(", "utils", ".", "flatten", "(", "samples", ")", ")", "out_dir", "=", "dd", ".", "get_work_dir", "(", "samples", "[", "0", "]", ")", "logger", ".", "info", "(", "\"summarize ...
Merge all metadata into CSV file
[ "Merge", "all", "metadata", "into", "CSV", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L297-L314
train
219,138
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_other_pipeline_samples
def _other_pipeline_samples(summary_file, cur_samples): """Retrieve samples produced previously by another pipeline in the summary output. """ cur_descriptions = set([s[0]["description"] for s in cur_samples]) out = [] if utils.file_exists(summary_file): with open(summary_file) as in_handle: for s in yaml.safe_load(in_handle).get("samples", []): if s["description"] not in cur_descriptions: out.append(s) return out
python
def _other_pipeline_samples(summary_file, cur_samples): """Retrieve samples produced previously by another pipeline in the summary output. """ cur_descriptions = set([s[0]["description"] for s in cur_samples]) out = [] if utils.file_exists(summary_file): with open(summary_file) as in_handle: for s in yaml.safe_load(in_handle).get("samples", []): if s["description"] not in cur_descriptions: out.append(s) return out
[ "def", "_other_pipeline_samples", "(", "summary_file", ",", "cur_samples", ")", ":", "cur_descriptions", "=", "set", "(", "[", "s", "[", "0", "]", "[", "\"description\"", "]", "for", "s", "in", "cur_samples", "]", ")", "out", "=", "[", "]", "if", "utils"...
Retrieve samples produced previously by another pipeline in the summary output.
[ "Retrieve", "samples", "produced", "previously", "by", "another", "pipeline", "in", "the", "summary", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L316-L326
train
219,139
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_add_researcher_summary
def _add_researcher_summary(samples, summary_yaml): """Generate summary files per researcher if organized via a LIMS. """ by_researcher = collections.defaultdict(list) for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: by_researcher[researcher].append(data["description"]) out_by_researcher = {} for researcher, descrs in by_researcher.items(): out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher, set(descrs), samples[0][0]) out = [] for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: data["summary"]["researcher"] = out_by_researcher[researcher] out.append([data]) return out
python
def _add_researcher_summary(samples, summary_yaml): """Generate summary files per researcher if organized via a LIMS. """ by_researcher = collections.defaultdict(list) for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: by_researcher[researcher].append(data["description"]) out_by_researcher = {} for researcher, descrs in by_researcher.items(): out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher, set(descrs), samples[0][0]) out = [] for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: data["summary"]["researcher"] = out_by_researcher[researcher] out.append([data]) return out
[ "def", "_add_researcher_summary", "(", "samples", ",", "summary_yaml", ")", ":", "by_researcher", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "data", "in", "(", "x", "[", "0", "]", "for", "x", "in", "samples", ")", ":", "researcher", ...
Generate summary files per researcher if organized via a LIMS.
[ "Generate", "summary", "files", "per", "researcher", "if", "organized", "via", "a", "LIMS", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L338-L356
train
219,140
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_summary_csv_by_researcher
def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data): """Generate a CSV file with summary information for a researcher on this project. """ out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")), "%s-summary.tsv" % run_info.clean_name(researcher)) metrics = ["Total_reads", "Mapped_reads", "Mapped_reads_pct", "Duplicates", "Duplicates_pct"] with open(summary_yaml) as in_handle: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle, dialect="excel-tab") writer.writerow(["Name"] + metrics) for sample in yaml.safe_load(in_handle)["samples"]: if sample["description"] in descrs: row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "") for x in metrics] writer.writerow(row) return out_file
python
def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data): """Generate a CSV file with summary information for a researcher on this project. """ out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")), "%s-summary.tsv" % run_info.clean_name(researcher)) metrics = ["Total_reads", "Mapped_reads", "Mapped_reads_pct", "Duplicates", "Duplicates_pct"] with open(summary_yaml) as in_handle: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle, dialect="excel-tab") writer.writerow(["Name"] + metrics) for sample in yaml.safe_load(in_handle)["samples"]: if sample["description"] in descrs: row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "") for x in metrics] writer.writerow(row) return out_file
[ "def", "_summary_csv_by_researcher", "(", "summary_yaml", ",", "researcher", ",", "descrs", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "data", "[", ...
Generate a CSV file with summary information for a researcher on this project.
[ "Generate", "a", "CSV", "file", "with", "summary", "information", "for", "a", "researcher", "on", "this", "project", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L358-L373
train
219,141
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
prep_pdf
def prep_pdf(qc_dir, config): """Create PDF from HTML summary outputs in QC directory. Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1 Thanks to: https://www.biostars.org/p/16991/ Works around issues with CSS conversion on CentOS by adjusting CSS. """ html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html") html_fixed = "%s-fixed%s" % os.path.splitext(html_file) try: topdf = config_utils.get_program("wkhtmltopdf", config) except config_utils.CmdNotFound: topdf = None if topdf and utils.file_exists(html_file): out_file = "%s.pdf" % os.path.splitext(html_file)[0] if not utils.file_exists(out_file): cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s" % (html_file, html_fixed)) do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf") cmd = [topdf, html_fixed, out_file] do.run(cmd, "Convert QC HTML to PDF") return out_file
python
def prep_pdf(qc_dir, config): """Create PDF from HTML summary outputs in QC directory. Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1 Thanks to: https://www.biostars.org/p/16991/ Works around issues with CSS conversion on CentOS by adjusting CSS. """ html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html") html_fixed = "%s-fixed%s" % os.path.splitext(html_file) try: topdf = config_utils.get_program("wkhtmltopdf", config) except config_utils.CmdNotFound: topdf = None if topdf and utils.file_exists(html_file): out_file = "%s.pdf" % os.path.splitext(html_file)[0] if not utils.file_exists(out_file): cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s" % (html_file, html_fixed)) do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf") cmd = [topdf, html_fixed, out_file] do.run(cmd, "Convert QC HTML to PDF") return out_file
[ "def", "prep_pdf", "(", "qc_dir", ",", "config", ")", ":", "html_file", "=", "os", ".", "path", ".", "join", "(", "qc_dir", ",", "\"fastqc\"", ",", "\"fastqc_report.html\"", ")", "html_fixed", "=", "\"%s-fixed%s\"", "%", "os", ".", "path", ".", "splitext",...
Create PDF from HTML summary outputs in QC directory. Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1 Thanks to: https://www.biostars.org/p/16991/ Works around issues with CSS conversion on CentOS by adjusting CSS.
[ "Create", "PDF", "from", "HTML", "summary", "outputs", "in", "QC", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L377-L399
train
219,142
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_run_purecn_dx
def _run_purecn_dx(out, paired): """Extract signatures and mutational burdens from PureCN rds file. """ out_base, out, all_files = _get_purecn_dx_files(paired, out) if not utils.file_uptodate(out["mutation_burden"], out["rds"]): with file_transaction(paired.tumor_data, out_base) as tx_out_base: cmd = ["PureCN_Dx.R", "--rds", out["rds"], "--callable", dd.get_sample_callable(paired.tumor_data), "--signatures", "--out", tx_out_base] do.run(cmd, "PureCN Dx mutational burden and signatures") for f in all_files: if os.path.exists(os.path.join(os.path.dirname(tx_out_base), f)): shutil.move(os.path.join(os.path.dirname(tx_out_base), f), os.path.join(os.path.dirname(out_base), f)) return out
python
def _run_purecn_dx(out, paired): """Extract signatures and mutational burdens from PureCN rds file. """ out_base, out, all_files = _get_purecn_dx_files(paired, out) if not utils.file_uptodate(out["mutation_burden"], out["rds"]): with file_transaction(paired.tumor_data, out_base) as tx_out_base: cmd = ["PureCN_Dx.R", "--rds", out["rds"], "--callable", dd.get_sample_callable(paired.tumor_data), "--signatures", "--out", tx_out_base] do.run(cmd, "PureCN Dx mutational burden and signatures") for f in all_files: if os.path.exists(os.path.join(os.path.dirname(tx_out_base), f)): shutil.move(os.path.join(os.path.dirname(tx_out_base), f), os.path.join(os.path.dirname(out_base), f)) return out
[ "def", "_run_purecn_dx", "(", "out", ",", "paired", ")", ":", "out_base", ",", "out", ",", "all_files", "=", "_get_purecn_dx_files", "(", "paired", ",", "out", ")", "if", "not", "utils", ".", "file_uptodate", "(", "out", "[", "\"mutation_burden\"", "]", ",...
Extract signatures and mutational burdens from PureCN rds file.
[ "Extract", "signatures", "and", "mutational", "burdens", "from", "PureCN", "rds", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L49-L62
train
219,143
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_get_purecn_dx_files
def _get_purecn_dx_files(paired, out): """Retrieve files generated by PureCN_Dx """ out_base = "%s-dx" % utils.splitext_plus(out["rds"])[0] all_files = [] for key, ext in [[("mutation_burden",), "_mutation_burden.csv"], [("plot", "signatures"), "_signatures.pdf"], [("signatures",), "_signatures.csv"]]: cur_file = "%s%s" % (out_base, ext) out = tz.update_in(out, key, lambda x: cur_file) all_files.append(os.path.basename(cur_file)) return out_base, out, all_files
python
def _get_purecn_dx_files(paired, out): """Retrieve files generated by PureCN_Dx """ out_base = "%s-dx" % utils.splitext_plus(out["rds"])[0] all_files = [] for key, ext in [[("mutation_burden",), "_mutation_burden.csv"], [("plot", "signatures"), "_signatures.pdf"], [("signatures",), "_signatures.csv"]]: cur_file = "%s%s" % (out_base, ext) out = tz.update_in(out, key, lambda x: cur_file) all_files.append(os.path.basename(cur_file)) return out_base, out, all_files
[ "def", "_get_purecn_dx_files", "(", "paired", ",", "out", ")", ":", "out_base", "=", "\"%s-dx\"", "%", "utils", ".", "splitext_plus", "(", "out", "[", "\"rds\"", "]", ")", "[", "0", "]", "all_files", "=", "[", "]", "for", "key", ",", "ext", "in", "["...
Retrieve files generated by PureCN_Dx
[ "Retrieve", "files", "generated", "by", "PureCN_Dx" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L64-L75
train
219,144
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_run_purecn
def _run_purecn(paired, work_dir): """Run PureCN.R wrapper with pre-segmented CNVkit or GATK4 inputs. """ segfns = {"cnvkit": _segment_normalized_cnvkit, "gatk-cnv": _segment_normalized_gatk} out_base, out, all_files = _get_purecn_files(paired, work_dir) failed_file = out_base + "-failed.log" cnr_file = tz.get_in(["depth", "bins", "normalized"], paired.tumor_data) if not utils.file_uptodate(out["rds"], cnr_file) and not utils.file_exists(failed_file): cnr_file, seg_file = segfns[cnvkit.bin_approach(paired.tumor_data)](cnr_file, work_dir, paired) from bcbio import heterogeneity vcf_file = heterogeneity.get_variants(paired.tumor_data, include_germline=False)[0]["vrn_file"] vcf_file = germline.filter_to_pass_and_reject(vcf_file, paired, out_dir=work_dir) with file_transaction(paired.tumor_data, out_base) as tx_out_base: # Use UCSC style naming for human builds to support BSgenome genome = ("hg19" if dd.get_genome_build(paired.tumor_data) in ["GRCh37", "hg19"] else dd.get_genome_build(paired.tumor_data)) cmd = ["PureCN.R", "--seed", "42", "--out", tx_out_base, "--rds", "%s.rds" % tx_out_base, "--sampleid", dd.get_sample_name(paired.tumor_data), "--genome", genome, "--vcf", vcf_file, "--tumor", cnr_file, "--segfile", seg_file, "--funsegmentation", "Hclust", "--maxnonclonal", "0.3"] if dd.get_num_cores(paired.tumor_data) > 1: cmd += ["--cores", str(dd.get_num_cores(paired.tumor_data))] try: cmd = "export R_LIBS_USER=%s && %s && %s" % (utils.R_sitelib(), utils.get_R_exports(), " ".join([str(x) for x in cmd])) do.run(cmd, "PureCN copy number calling") except subprocess.CalledProcessError as msg: if _allowed_errors(str(msg)): logger.info("PureCN failed to find solution for %s: skipping" % dd.get_sample_name(paired.tumor_data)) with open(failed_file, "w") as out_handle: out_handle.write(str(msg)) else: logger.exception() raise for f in all_files: if os.path.exists(os.path.join(os.path.dirname(tx_out_base), f)): shutil.move(os.path.join(os.path.dirname(tx_out_base), f), os.path.join(os.path.dirname(out_base), f)) out = _get_purecn_files(paired, work_dir, require_exist=True)[1] return out if (out.get("rds") and os.path.exists(out["rds"])) else None
python
def _run_purecn(paired, work_dir): """Run PureCN.R wrapper with pre-segmented CNVkit or GATK4 inputs. """ segfns = {"cnvkit": _segment_normalized_cnvkit, "gatk-cnv": _segment_normalized_gatk} out_base, out, all_files = _get_purecn_files(paired, work_dir) failed_file = out_base + "-failed.log" cnr_file = tz.get_in(["depth", "bins", "normalized"], paired.tumor_data) if not utils.file_uptodate(out["rds"], cnr_file) and not utils.file_exists(failed_file): cnr_file, seg_file = segfns[cnvkit.bin_approach(paired.tumor_data)](cnr_file, work_dir, paired) from bcbio import heterogeneity vcf_file = heterogeneity.get_variants(paired.tumor_data, include_germline=False)[0]["vrn_file"] vcf_file = germline.filter_to_pass_and_reject(vcf_file, paired, out_dir=work_dir) with file_transaction(paired.tumor_data, out_base) as tx_out_base: # Use UCSC style naming for human builds to support BSgenome genome = ("hg19" if dd.get_genome_build(paired.tumor_data) in ["GRCh37", "hg19"] else dd.get_genome_build(paired.tumor_data)) cmd = ["PureCN.R", "--seed", "42", "--out", tx_out_base, "--rds", "%s.rds" % tx_out_base, "--sampleid", dd.get_sample_name(paired.tumor_data), "--genome", genome, "--vcf", vcf_file, "--tumor", cnr_file, "--segfile", seg_file, "--funsegmentation", "Hclust", "--maxnonclonal", "0.3"] if dd.get_num_cores(paired.tumor_data) > 1: cmd += ["--cores", str(dd.get_num_cores(paired.tumor_data))] try: cmd = "export R_LIBS_USER=%s && %s && %s" % (utils.R_sitelib(), utils.get_R_exports(), " ".join([str(x) for x in cmd])) do.run(cmd, "PureCN copy number calling") except subprocess.CalledProcessError as msg: if _allowed_errors(str(msg)): logger.info("PureCN failed to find solution for %s: skipping" % dd.get_sample_name(paired.tumor_data)) with open(failed_file, "w") as out_handle: out_handle.write(str(msg)) else: logger.exception() raise for f in all_files: if os.path.exists(os.path.join(os.path.dirname(tx_out_base), f)): shutil.move(os.path.join(os.path.dirname(tx_out_base), f), os.path.join(os.path.dirname(out_base), f)) out = _get_purecn_files(paired, work_dir, require_exist=True)[1] return out if (out.get("rds") and os.path.exists(out["rds"])) else None
[ "def", "_run_purecn", "(", "paired", ",", "work_dir", ")", ":", "segfns", "=", "{", "\"cnvkit\"", ":", "_segment_normalized_cnvkit", ",", "\"gatk-cnv\"", ":", "_segment_normalized_gatk", "}", "out_base", ",", "out", ",", "all_files", "=", "_get_purecn_files", "(",...
Run PureCN.R wrapper with pre-segmented CNVkit or GATK4 inputs.
[ "Run", "PureCN", ".", "R", "wrapper", "with", "pre", "-", "segmented", "CNVkit", "or", "GATK4", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L77-L118
train
219,145
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_segment_normalized_gatk
def _segment_normalized_gatk(cnr_file, work_dir, paired): """Segmentation of normalized inputs using GATK4, converting into standard input formats. """ work_dir = utils.safe_makedir(os.path.join(work_dir, "gatk-cnv")) seg_file = gatkcnv.model_segments(cnr_file, work_dir, paired)["seg"] std_seg_file = seg_file.replace(".cr.seg", ".seg") if not utils.file_uptodate(std_seg_file, seg_file): with file_transaction(std_seg_file) as tx_out_file: df = pd.read_csv(seg_file, sep="\t", comment="@", header=0, names=["chrom", "loc.start", "loc.end", "num.mark", "seg.mean"]) df.insert(0, "ID", [dd.get_sample_name(paired.tumor_data)] * len(df)) df.to_csv(tx_out_file, sep="\t", header=True, index=False) std_cnr_file = os.path.join(work_dir, "%s.cnr" % dd.get_sample_name(paired.tumor_data)) if not utils.file_uptodate(std_cnr_file, cnr_file): with file_transaction(std_cnr_file) as tx_out_file: logdf = pd.read_csv(cnr_file, sep="\t", comment="@", header=0, names=["chrom", "start", "end", "log2"]) covdf = pd.read_csv(tz.get_in(["depth", "bins", "antitarget"], paired.tumor_data), sep="\t", header=None, names=["chrom", "start", "end", "orig.name", "depth", "gene"]) df = pd.merge(logdf, covdf, on=["chrom", "start", "end"]) del df["orig.name"] df = df[["chrom", "start", "end", "gene", "log2", "depth"]] df.insert(6, "weight", [1.0] * len(df)) df.to_csv(tx_out_file, sep="\t", header=True, index=False) return std_cnr_file, std_seg_file
python
def _segment_normalized_gatk(cnr_file, work_dir, paired): """Segmentation of normalized inputs using GATK4, converting into standard input formats. """ work_dir = utils.safe_makedir(os.path.join(work_dir, "gatk-cnv")) seg_file = gatkcnv.model_segments(cnr_file, work_dir, paired)["seg"] std_seg_file = seg_file.replace(".cr.seg", ".seg") if not utils.file_uptodate(std_seg_file, seg_file): with file_transaction(std_seg_file) as tx_out_file: df = pd.read_csv(seg_file, sep="\t", comment="@", header=0, names=["chrom", "loc.start", "loc.end", "num.mark", "seg.mean"]) df.insert(0, "ID", [dd.get_sample_name(paired.tumor_data)] * len(df)) df.to_csv(tx_out_file, sep="\t", header=True, index=False) std_cnr_file = os.path.join(work_dir, "%s.cnr" % dd.get_sample_name(paired.tumor_data)) if not utils.file_uptodate(std_cnr_file, cnr_file): with file_transaction(std_cnr_file) as tx_out_file: logdf = pd.read_csv(cnr_file, sep="\t", comment="@", header=0, names=["chrom", "start", "end", "log2"]) covdf = pd.read_csv(tz.get_in(["depth", "bins", "antitarget"], paired.tumor_data), sep="\t", header=None, names=["chrom", "start", "end", "orig.name", "depth", "gene"]) df = pd.merge(logdf, covdf, on=["chrom", "start", "end"]) del df["orig.name"] df = df[["chrom", "start", "end", "gene", "log2", "depth"]] df.insert(6, "weight", [1.0] * len(df)) df.to_csv(tx_out_file, sep="\t", header=True, index=False) return std_cnr_file, std_seg_file
[ "def", "_segment_normalized_gatk", "(", "cnr_file", ",", "work_dir", ",", "paired", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"gatk-cnv\"", ")", ")", "seg_file", "=", "gatkcnv", "....
Segmentation of normalized inputs using GATK4, converting into standard input formats.
[ "Segmentation", "of", "normalized", "inputs", "using", "GATK4", "converting", "into", "standard", "input", "formats", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L126-L151
train
219,146
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_segment_normalized_cnvkit
def _segment_normalized_cnvkit(cnr_file, work_dir, paired): """Segmentation of normalized inputs using CNVkit. """ cnvkit_base = os.path.join(utils.safe_makedir(os.path.join(work_dir, "cnvkit")), dd.get_sample_name(paired.tumor_data)) cnr_file = chromhacks.bed_to_standardonly(cnr_file, paired.tumor_data, headers="chromosome", include_sex_chroms=True, out_dir=os.path.dirname(cnvkit_base)) cnr_file = _remove_overlaps(cnr_file, os.path.dirname(cnvkit_base), paired.tumor_data) seg_file = cnvkit.segment_from_cnr(cnr_file, paired.tumor_data, cnvkit_base) return cnr_file, seg_file
python
def _segment_normalized_cnvkit(cnr_file, work_dir, paired): """Segmentation of normalized inputs using CNVkit. """ cnvkit_base = os.path.join(utils.safe_makedir(os.path.join(work_dir, "cnvkit")), dd.get_sample_name(paired.tumor_data)) cnr_file = chromhacks.bed_to_standardonly(cnr_file, paired.tumor_data, headers="chromosome", include_sex_chroms=True, out_dir=os.path.dirname(cnvkit_base)) cnr_file = _remove_overlaps(cnr_file, os.path.dirname(cnvkit_base), paired.tumor_data) seg_file = cnvkit.segment_from_cnr(cnr_file, paired.tumor_data, cnvkit_base) return cnr_file, seg_file
[ "def", "_segment_normalized_cnvkit", "(", "cnr_file", ",", "work_dir", ",", "paired", ")", ":", "cnvkit_base", "=", "os", ".", "path", ".", "join", "(", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"cnvkit\"", ...
Segmentation of normalized inputs using CNVkit.
[ "Segmentation", "of", "normalized", "inputs", "using", "CNVkit", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L153-L163
train
219,147
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_remove_overlaps
def _remove_overlaps(in_file, out_dir, data): """Remove regions that overlap with next region, these result in issues with PureCN. """ out_file = os.path.join(out_dir, "%s-nooverlaps%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: prev_line = None for line in in_handle: if prev_line: pchrom, pstart, pend = prev_line.split("\t", 4)[:3] cchrom, cstart, cend = line.split("\t", 4)[:3] # Skip if chromosomes match and end overlaps start if pchrom == cchrom and int(pend) > int(cstart): pass else: out_handle.write(prev_line) prev_line = line out_handle.write(prev_line) return out_file
python
def _remove_overlaps(in_file, out_dir, data): """Remove regions that overlap with next region, these result in issues with PureCN. """ out_file = os.path.join(out_dir, "%s-nooverlaps%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: prev_line = None for line in in_handle: if prev_line: pchrom, pstart, pend = prev_line.split("\t", 4)[:3] cchrom, cstart, cend = line.split("\t", 4)[:3] # Skip if chromosomes match and end overlaps start if pchrom == cchrom and int(pend) > int(cstart): pass else: out_handle.write(prev_line) prev_line = line out_handle.write(prev_line) return out_file
[ "def", "_remove_overlaps", "(", "in_file", ",", "out_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"%s-nooverlaps%s\"", "%", "utils", ".", "splitext_plus", "(", "os", ".", "path", ".", "basename", "(...
Remove regions that overlap with next region, these result in issues with PureCN.
[ "Remove", "regions", "that", "overlap", "with", "next", "region", "these", "result", "in", "issues", "with", "PureCN", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L165-L185
train
219,148
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_get_purecn_files
def _get_purecn_files(paired, work_dir, require_exist=False): """Retrieve organized structure of PureCN output files. """ out_base = os.path.join(work_dir, "%s-purecn" % (dd.get_sample_name(paired.tumor_data))) out = {"plot": {}} all_files = [] for plot in ["chromosomes", "local_optima", "segmentation", "summary"]: if plot == "summary": cur_file = "%s.pdf" % out_base else: cur_file = "%s_%s.pdf" % (out_base, plot) if not require_exist or os.path.exists(cur_file): out["plot"][plot] = cur_file all_files.append(os.path.basename(cur_file)) for key, ext in [["hetsummary", ".csv"], ["dnacopy", "_dnacopy.seg"], ["genes", "_genes.csv"], ["log", ".log"], ["loh", "_loh.csv"], ["rds", ".rds"], ["variants", "_variants.csv"]]: cur_file = "%s%s" % (out_base, ext) if not require_exist or os.path.exists(cur_file): out[key] = cur_file all_files.append(os.path.basename(cur_file)) return out_base, out, all_files
python
def _get_purecn_files(paired, work_dir, require_exist=False): """Retrieve organized structure of PureCN output files. """ out_base = os.path.join(work_dir, "%s-purecn" % (dd.get_sample_name(paired.tumor_data))) out = {"plot": {}} all_files = [] for plot in ["chromosomes", "local_optima", "segmentation", "summary"]: if plot == "summary": cur_file = "%s.pdf" % out_base else: cur_file = "%s_%s.pdf" % (out_base, plot) if not require_exist or os.path.exists(cur_file): out["plot"][plot] = cur_file all_files.append(os.path.basename(cur_file)) for key, ext in [["hetsummary", ".csv"], ["dnacopy", "_dnacopy.seg"], ["genes", "_genes.csv"], ["log", ".log"], ["loh", "_loh.csv"], ["rds", ".rds"], ["variants", "_variants.csv"]]: cur_file = "%s%s" % (out_base, ext) if not require_exist or os.path.exists(cur_file): out[key] = cur_file all_files.append(os.path.basename(cur_file)) return out_base, out, all_files
[ "def", "_get_purecn_files", "(", "paired", ",", "work_dir", ",", "require_exist", "=", "False", ")", ":", "out_base", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-purecn\"", "%", "(", "dd", ".", "get_sample_name", "(", "paired", ".", ...
Retrieve organized structure of PureCN output files.
[ "Retrieve", "organized", "structure", "of", "PureCN", "output", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L187-L208
train
219,149
bcbio/bcbio-nextgen
bcbio/structural/purecn.py
_loh_to_vcf
def _loh_to_vcf(cur): """Convert LOH output into standardized VCF. """ cn = int(float(cur["C"])) minor_cn = int(float(cur["M"])) if cur["type"].find("LOH"): svtype = "LOH" elif cn > 2: svtype = "DUP" elif cn < 1: svtype = "DEL" else: svtype = None if svtype: info = ["SVTYPE=%s" % svtype, "END=%s" % cur["end"], "SVLEN=%s" % (int(cur["end"]) - int(cur["start"])), "CN=%s" % cn, "MajorCN=%s" % (cn - minor_cn), "MinorCN=%s" % minor_cn] return [cur["chr"], cur["start"], ".", "N", "<%s>" % svtype, ".", ".", ";".join(info), "GT", "0/1"]
python
def _loh_to_vcf(cur): """Convert LOH output into standardized VCF. """ cn = int(float(cur["C"])) minor_cn = int(float(cur["M"])) if cur["type"].find("LOH"): svtype = "LOH" elif cn > 2: svtype = "DUP" elif cn < 1: svtype = "DEL" else: svtype = None if svtype: info = ["SVTYPE=%s" % svtype, "END=%s" % cur["end"], "SVLEN=%s" % (int(cur["end"]) - int(cur["start"])), "CN=%s" % cn, "MajorCN=%s" % (cn - minor_cn), "MinorCN=%s" % minor_cn] return [cur["chr"], cur["start"], ".", "N", "<%s>" % svtype, ".", ".", ";".join(info), "GT", "0/1"]
[ "def", "_loh_to_vcf", "(", "cur", ")", ":", "cn", "=", "int", "(", "float", "(", "cur", "[", "\"C\"", "]", ")", ")", "minor_cn", "=", "int", "(", "float", "(", "cur", "[", "\"M\"", "]", ")", ")", "if", "cur", "[", "\"type\"", "]", ".", "find", ...
Convert LOH output into standardized VCF.
[ "Convert", "LOH", "output", "into", "standardized", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purecn.py#L219-L237
train
219,150
bcbio/bcbio-nextgen
scripts/utils/collect_metrics_to_csv.py
_generate_metrics
def _generate_metrics(bam_fname, config_file, ref_file, bait_file, target_file): """Run Picard commands to generate metrics files when missing. """ with open(config_file) as in_handle: config = yaml.safe_load(in_handle) broad_runner = broad.runner_from_config(config) bam_fname = os.path.abspath(bam_fname) path = os.path.dirname(bam_fname) out_dir = os.path.join(path, "metrics") utils.safe_makedir(out_dir) with utils.chdir(out_dir): with tx_tmpdir() as tmp_dir: cur_bam = os.path.basename(bam_fname) if not os.path.exists(cur_bam): os.symlink(bam_fname, cur_bam) gen_metrics = PicardMetrics(broad_runner, tmp_dir) gen_metrics.report(cur_bam, ref_file, _bam_is_paired(bam_fname), bait_file, target_file) return out_dir
python
def _generate_metrics(bam_fname, config_file, ref_file, bait_file, target_file): """Run Picard commands to generate metrics files when missing. """ with open(config_file) as in_handle: config = yaml.safe_load(in_handle) broad_runner = broad.runner_from_config(config) bam_fname = os.path.abspath(bam_fname) path = os.path.dirname(bam_fname) out_dir = os.path.join(path, "metrics") utils.safe_makedir(out_dir) with utils.chdir(out_dir): with tx_tmpdir() as tmp_dir: cur_bam = os.path.basename(bam_fname) if not os.path.exists(cur_bam): os.symlink(bam_fname, cur_bam) gen_metrics = PicardMetrics(broad_runner, tmp_dir) gen_metrics.report(cur_bam, ref_file, _bam_is_paired(bam_fname), bait_file, target_file) return out_dir
[ "def", "_generate_metrics", "(", "bam_fname", ",", "config_file", ",", "ref_file", ",", "bait_file", ",", "target_file", ")", ":", "with", "open", "(", "config_file", ")", "as", "in_handle", ":", "config", "=", "yaml", ".", "safe_load", "(", "in_handle", ")"...
Run Picard commands to generate metrics files when missing.
[ "Run", "Picard", "commands", "to", "generate", "metrics", "files", "when", "missing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/collect_metrics_to_csv.py#L135-L155
train
219,151
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
run
def run(items, background=None): """Detect copy number variations from batched set of samples using GATK4 CNV calling. TODO: implement germline calling with DetermineGermlineContigPloidy and GermlineCNVCaller """ if not background: background = [] paired = vcfutils.get_paired(items + background) if paired: out = _run_paired(paired) else: out = items logger.warn("GATK4 CNV calling currently only available for somatic samples: %s" % ", ".join([dd.get_sample_name(d) for d in items + background])) return out
python
def run(items, background=None): """Detect copy number variations from batched set of samples using GATK4 CNV calling. TODO: implement germline calling with DetermineGermlineContigPloidy and GermlineCNVCaller """ if not background: background = [] paired = vcfutils.get_paired(items + background) if paired: out = _run_paired(paired) else: out = items logger.warn("GATK4 CNV calling currently only available for somatic samples: %s" % ", ".join([dd.get_sample_name(d) for d in items + background])) return out
[ "def", "run", "(", "items", ",", "background", "=", "None", ")", ":", "if", "not", "background", ":", "background", "=", "[", "]", "paired", "=", "vcfutils", ".", "get_paired", "(", "items", "+", "background", ")", "if", "paired", ":", "out", "=", "_...
Detect copy number variations from batched set of samples using GATK4 CNV calling. TODO: implement germline calling with DetermineGermlineContigPloidy and GermlineCNVCaller
[ "Detect", "copy", "number", "variations", "from", "batched", "set", "of", "samples", "using", "GATK4", "CNV", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L19-L32
train
219,152
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
_run_paired
def _run_paired(paired): """Run somatic variant calling pipeline. """ from bcbio.structural import titancna work_dir = _sv_workdir(paired.tumor_data) seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data), work_dir, paired) call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data) out = [] if paired.normal_data: out.append(paired.normal_data) if "sv" not in paired.tumor_data: paired.tumor_data["sv"] = [] paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv", "call_file": call_file, "vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header, _seg_to_vcf, paired.tumor_data), "seg": seg_files["seg"], "plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)}) out.append(paired.tumor_data) return out
python
def _run_paired(paired): """Run somatic variant calling pipeline. """ from bcbio.structural import titancna work_dir = _sv_workdir(paired.tumor_data) seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data), work_dir, paired) call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data) out = [] if paired.normal_data: out.append(paired.normal_data) if "sv" not in paired.tumor_data: paired.tumor_data["sv"] = [] paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv", "call_file": call_file, "vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header, _seg_to_vcf, paired.tumor_data), "seg": seg_files["seg"], "plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)}) out.append(paired.tumor_data) return out
[ "def", "_run_paired", "(", "paired", ")", ":", "from", "bcbio", ".", "structural", "import", "titancna", "work_dir", "=", "_sv_workdir", "(", "paired", ".", "tumor_data", ")", "seg_files", "=", "model_segments", "(", "tz", ".", "get_in", "(", "[", "\"depth\"...
Run somatic variant calling pipeline.
[ "Run", "somatic", "variant", "calling", "pipeline", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L34-L54
train
219,153
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
call_copy_numbers
def call_copy_numbers(seg_file, work_dir, data): """Call copy numbers from a normalized and segmented input file. """ out_file = os.path.join(work_dir, "%s-call.seg" % dd.get_sample_name(data)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: params = ["-T", "CallCopyRatioSegments", "-I", seg_file, "-O", tx_out_file] _run_with_memory_scaling(params, tx_out_file, data) return out_file
python
def call_copy_numbers(seg_file, work_dir, data): """Call copy numbers from a normalized and segmented input file. """ out_file = os.path.join(work_dir, "%s-call.seg" % dd.get_sample_name(data)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: params = ["-T", "CallCopyRatioSegments", "-I", seg_file, "-O", tx_out_file] _run_with_memory_scaling(params, tx_out_file, data) return out_file
[ "def", "call_copy_numbers", "(", "seg_file", ",", "work_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-call.seg\"", "%", "dd", ".", "get_sample_name", "(", "data", ")", ")", "if", "not", "utils",...
Call copy numbers from a normalized and segmented input file.
[ "Call", "copy", "numbers", "from", "a", "normalized", "and", "segmented", "input", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L56-L65
train
219,154
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
plot_model_segments
def plot_model_segments(seg_files, work_dir, data): """Diagnostic plots of segmentation and inputs. """ from bcbio.heterogeneity import chromhacks out_file = os.path.join(work_dir, "%s.modeled.png" % dd.get_sample_name(data)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: dict_file = utils.splitext_plus(dd.get_ref_file(data))[0] + ".dict" plot_dict = os.path.join(os.path.dirname(tx_out_file), os.path.basename(dict_file)) with open(dict_file) as in_handle: with open(plot_dict, "w") as out_handle: for line in in_handle: if line.startswith("@SQ"): cur_chrom = [x.split(":", 1)[1].strip() for x in line.split("\t") if x.startswith("SN:")][0] if chromhacks.is_autosomal_or_sex(cur_chrom): out_handle.write(line) else: out_handle.write(line) params = ["-T", "PlotModeledSegments", "--denoised-copy-ratios", tz.get_in(["depth", "bins", "normalized"], data), "--segments", seg_files["final_seg"], "--allelic-counts", seg_files["tumor_hets"], "--sequence-dictionary", plot_dict, "--minimum-contig-length", "10", "--output-prefix", dd.get_sample_name(data), "-O", os.path.dirname(tx_out_file)] _run_with_memory_scaling(params, tx_out_file, data) return {"seg": out_file}
python
def plot_model_segments(seg_files, work_dir, data): """Diagnostic plots of segmentation and inputs. """ from bcbio.heterogeneity import chromhacks out_file = os.path.join(work_dir, "%s.modeled.png" % dd.get_sample_name(data)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: dict_file = utils.splitext_plus(dd.get_ref_file(data))[0] + ".dict" plot_dict = os.path.join(os.path.dirname(tx_out_file), os.path.basename(dict_file)) with open(dict_file) as in_handle: with open(plot_dict, "w") as out_handle: for line in in_handle: if line.startswith("@SQ"): cur_chrom = [x.split(":", 1)[1].strip() for x in line.split("\t") if x.startswith("SN:")][0] if chromhacks.is_autosomal_or_sex(cur_chrom): out_handle.write(line) else: out_handle.write(line) params = ["-T", "PlotModeledSegments", "--denoised-copy-ratios", tz.get_in(["depth", "bins", "normalized"], data), "--segments", seg_files["final_seg"], "--allelic-counts", seg_files["tumor_hets"], "--sequence-dictionary", plot_dict, "--minimum-contig-length", "10", "--output-prefix", dd.get_sample_name(data), "-O", os.path.dirname(tx_out_file)] _run_with_memory_scaling(params, tx_out_file, data) return {"seg": out_file}
[ "def", "plot_model_segments", "(", "seg_files", ",", "work_dir", ",", "data", ")", ":", "from", "bcbio", ".", "heterogeneity", "import", "chromhacks", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s.modeled.png\"", "%", "dd", "."...
Diagnostic plots of segmentation and inputs.
[ "Diagnostic", "plots", "of", "segmentation", "and", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L67-L95
train
219,155
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
model_segments
def model_segments(copy_file, work_dir, paired): """Perform segmentation on input copy number log2 ratio file. """ out_file = os.path.join(work_dir, "%s.cr.seg" % dd.get_sample_name(paired.tumor_data)) tumor_counts, normal_counts = heterogzygote_counts(paired) if not utils.file_exists(out_file): with file_transaction(paired.tumor_data, out_file) as tx_out_file: params = ["-T", "ModelSegments", "--denoised-copy-ratios", copy_file, "--allelic-counts", tumor_counts, "--output-prefix", dd.get_sample_name(paired.tumor_data), "-O", os.path.dirname(tx_out_file)] if normal_counts: params += ["--normal-allelic-counts", normal_counts] _run_with_memory_scaling(params, tx_out_file, paired.tumor_data) for tx_fname in glob.glob(os.path.join(os.path.dirname(tx_out_file), "%s*" % dd.get_sample_name(paired.tumor_data))): shutil.copy(tx_fname, os.path.join(work_dir, os.path.basename(tx_fname))) return {"seg": out_file, "tumor_hets": out_file.replace(".cr.seg", ".hets.tsv"), "final_seg": out_file.replace(".cr.seg", ".modelFinal.seg")}
python
def model_segments(copy_file, work_dir, paired): """Perform segmentation on input copy number log2 ratio file. """ out_file = os.path.join(work_dir, "%s.cr.seg" % dd.get_sample_name(paired.tumor_data)) tumor_counts, normal_counts = heterogzygote_counts(paired) if not utils.file_exists(out_file): with file_transaction(paired.tumor_data, out_file) as tx_out_file: params = ["-T", "ModelSegments", "--denoised-copy-ratios", copy_file, "--allelic-counts", tumor_counts, "--output-prefix", dd.get_sample_name(paired.tumor_data), "-O", os.path.dirname(tx_out_file)] if normal_counts: params += ["--normal-allelic-counts", normal_counts] _run_with_memory_scaling(params, tx_out_file, paired.tumor_data) for tx_fname in glob.glob(os.path.join(os.path.dirname(tx_out_file), "%s*" % dd.get_sample_name(paired.tumor_data))): shutil.copy(tx_fname, os.path.join(work_dir, os.path.basename(tx_fname))) return {"seg": out_file, "tumor_hets": out_file.replace(".cr.seg", ".hets.tsv"), "final_seg": out_file.replace(".cr.seg", ".modelFinal.seg")}
[ "def", "model_segments", "(", "copy_file", ",", "work_dir", ",", "paired", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s.cr.seg\"", "%", "dd", ".", "get_sample_name", "(", "paired", ".", "tumor_data", ")", ")", "t...
Perform segmentation on input copy number log2 ratio file.
[ "Perform", "segmentation", "on", "input", "copy", "number", "log2", "ratio", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L97-L116
train
219,156
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
create_panel_of_normals
def create_panel_of_normals(items, group_id, work_dir): """Create a panel of normals from one or more background read counts. """ out_file = os.path.join(work_dir, "%s-%s-pon.hdf5" % (dd.get_sample_name(items[0]), group_id)) if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: params = ["-T", "CreateReadCountPanelOfNormals", "-O", tx_out_file, "--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], items[0])] for data in items: params += ["-I", tz.get_in(["depth", "bins", "target"], data)] _run_with_memory_scaling(params, tx_out_file, items[0], ld_preload=True) return out_file
python
def create_panel_of_normals(items, group_id, work_dir): """Create a panel of normals from one or more background read counts. """ out_file = os.path.join(work_dir, "%s-%s-pon.hdf5" % (dd.get_sample_name(items[0]), group_id)) if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: params = ["-T", "CreateReadCountPanelOfNormals", "-O", tx_out_file, "--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], items[0])] for data in items: params += ["-I", tz.get_in(["depth", "bins", "target"], data)] _run_with_memory_scaling(params, tx_out_file, items[0], ld_preload=True) return out_file
[ "def", "create_panel_of_normals", "(", "items", ",", "group_id", ",", "work_dir", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-%s-pon.hdf5\"", "%", "(", "dd", ".", "get_sample_name", "(", "items", "[", "0", "]", ...
Create a panel of normals from one or more background read counts.
[ "Create", "a", "panel", "of", "normals", "from", "one", "or", "more", "background", "read", "counts", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L136-L148
train
219,157
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
pon_to_bed
def pon_to_bed(pon_file, out_dir, data): """Extract BED intervals from a GATK4 hdf5 panel of normal file. """ out_file = os.path.join(out_dir, "%s-intervals.bed" % (utils.splitext_plus(os.path.basename(pon_file))[0])) if not utils.file_uptodate(out_file, pon_file): import h5py with file_transaction(data, out_file) as tx_out_file: with h5py.File(pon_file, "r") as f: with open(tx_out_file, "w") as out_handle: intervals = f["original_data"]["intervals"] for i in range(len(intervals["transposed_index_start_end"][0])): chrom = intervals["indexed_contig_names"][intervals["transposed_index_start_end"][0][i]] start = int(intervals["transposed_index_start_end"][1][i]) - 1 end = int(intervals["transposed_index_start_end"][2][i]) out_handle.write("%s\t%s\t%s\n" % (chrom, start, end)) return out_file
python
def pon_to_bed(pon_file, out_dir, data): """Extract BED intervals from a GATK4 hdf5 panel of normal file. """ out_file = os.path.join(out_dir, "%s-intervals.bed" % (utils.splitext_plus(os.path.basename(pon_file))[0])) if not utils.file_uptodate(out_file, pon_file): import h5py with file_transaction(data, out_file) as tx_out_file: with h5py.File(pon_file, "r") as f: with open(tx_out_file, "w") as out_handle: intervals = f["original_data"]["intervals"] for i in range(len(intervals["transposed_index_start_end"][0])): chrom = intervals["indexed_contig_names"][intervals["transposed_index_start_end"][0][i]] start = int(intervals["transposed_index_start_end"][1][i]) - 1 end = int(intervals["transposed_index_start_end"][2][i]) out_handle.write("%s\t%s\t%s\n" % (chrom, start, end)) return out_file
[ "def", "pon_to_bed", "(", "pon_file", ",", "out_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"%s-intervals.bed\"", "%", "(", "utils", ".", "splitext_plus", "(", "os", ".", "path", ".", "basename", ...
Extract BED intervals from a GATK4 hdf5 panel of normal file.
[ "Extract", "BED", "intervals", "from", "a", "GATK4", "hdf5", "panel", "of", "normal", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L150-L165
train
219,158
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
prepare_intervals
def prepare_intervals(data, region_file, work_dir): """Prepare interval regions for targeted and gene based regions. """ target_file = os.path.join(work_dir, "%s-target.interval_list" % dd.get_sample_name(data)) if not utils.file_uptodate(target_file, region_file): with file_transaction(data, target_file) as tx_out_file: params = ["-T", "PreprocessIntervals", "-R", dd.get_ref_file(data), "--interval-merging-rule", "OVERLAPPING_ONLY", "-O", tx_out_file] if dd.get_coverage_interval(data) == "genome": params += ["--bin-length", "1000", "--padding", "0"] else: params += ["-L", region_file, "--bin-length", "0", "--padding", "250"] _run_with_memory_scaling(params, tx_out_file, data) return target_file
python
def prepare_intervals(data, region_file, work_dir): """Prepare interval regions for targeted and gene based regions. """ target_file = os.path.join(work_dir, "%s-target.interval_list" % dd.get_sample_name(data)) if not utils.file_uptodate(target_file, region_file): with file_transaction(data, target_file) as tx_out_file: params = ["-T", "PreprocessIntervals", "-R", dd.get_ref_file(data), "--interval-merging-rule", "OVERLAPPING_ONLY", "-O", tx_out_file] if dd.get_coverage_interval(data) == "genome": params += ["--bin-length", "1000", "--padding", "0"] else: params += ["-L", region_file, "--bin-length", "0", "--padding", "250"] _run_with_memory_scaling(params, tx_out_file, data) return target_file
[ "def", "prepare_intervals", "(", "data", ",", "region_file", ",", "work_dir", ")", ":", "target_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-target.interval_list\"", "%", "dd", ".", "get_sample_name", "(", "data", ")", ")", "if", ...
Prepare interval regions for targeted and gene based regions.
[ "Prepare", "interval", "regions", "for", "targeted", "and", "gene", "based", "regions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L167-L181
train
219,159
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
annotate_intervals
def annotate_intervals(target_file, data): """Provide GC annotated intervals for error correction during panels and denoising. TODO: include mappability and segmentation duplication inputs """ out_file = "%s-gcannotated.tsv" % utils.splitext_plus(target_file)[0] if not utils.file_uptodate(out_file, target_file): with file_transaction(data, out_file) as tx_out_file: params = ["-T", "AnnotateIntervals", "-R", dd.get_ref_file(data), "-L", target_file, "--interval-merging-rule", "OVERLAPPING_ONLY", "-O", tx_out_file] _run_with_memory_scaling(params, tx_out_file, data) return out_file
python
def annotate_intervals(target_file, data): """Provide GC annotated intervals for error correction during panels and denoising. TODO: include mappability and segmentation duplication inputs """ out_file = "%s-gcannotated.tsv" % utils.splitext_plus(target_file)[0] if not utils.file_uptodate(out_file, target_file): with file_transaction(data, out_file) as tx_out_file: params = ["-T", "AnnotateIntervals", "-R", dd.get_ref_file(data), "-L", target_file, "--interval-merging-rule", "OVERLAPPING_ONLY", "-O", tx_out_file] _run_with_memory_scaling(params, tx_out_file, data) return out_file
[ "def", "annotate_intervals", "(", "target_file", ",", "data", ")", ":", "out_file", "=", "\"%s-gcannotated.tsv\"", "%", "utils", ".", "splitext_plus", "(", "target_file", ")", "[", "0", "]", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",", "...
Provide GC annotated intervals for error correction during panels and denoising. TODO: include mappability and segmentation duplication inputs
[ "Provide", "GC", "annotated", "intervals", "for", "error", "correction", "during", "panels", "and", "denoising", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L183-L196
train
219,160
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
collect_read_counts
def collect_read_counts(data, work_dir): """Count reads in defined bins using CollectReadCounts. """ out_file = os.path.join(work_dir, "%s-target-coverage.hdf5" % dd.get_sample_name(data)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: params = ["-T", "CollectReadCounts", "-I", dd.get_align_bam(data), "-L", tz.get_in(["regions", "bins", "target"], data), "--interval-merging-rule", "OVERLAPPING_ONLY", "-O", tx_out_file, "--format", "HDF5"] _run_with_memory_scaling(params, tx_out_file, data) return out_file
python
def collect_read_counts(data, work_dir): """Count reads in defined bins using CollectReadCounts. """ out_file = os.path.join(work_dir, "%s-target-coverage.hdf5" % dd.get_sample_name(data)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: params = ["-T", "CollectReadCounts", "-I", dd.get_align_bam(data), "-L", tz.get_in(["regions", "bins", "target"], data), "--interval-merging-rule", "OVERLAPPING_ONLY", "-O", tx_out_file, "--format", "HDF5"] _run_with_memory_scaling(params, tx_out_file, data) return out_file
[ "def", "collect_read_counts", "(", "data", ",", "work_dir", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-target-coverage.hdf5\"", "%", "dd", ".", "get_sample_name", "(", "data", ")", ")", "if", "not", "utils", ".",...
Count reads in defined bins using CollectReadCounts.
[ "Count", "reads", "in", "defined", "bins", "using", "CollectReadCounts", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L198-L209
train
219,161
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
_filter_by_normal
def _filter_by_normal(tumor_counts, normal_counts, data): """Filter count files based on normal frequency and median depth, avoiding high depth regions. For frequency, restricts normal positions to those between 0.4 and 0.65 For depth, matches approach used in AMBER to try and avoid problematic genomic regions with high count in the normal: https://github.com/hartwigmedical/hmftools/tree/master/amber#usage """ from bcbio.heterogeneity import bubbletree fparams = bubbletree.NORMAL_FILTER_PARAMS tumor_out = "%s-normfilter%s" % utils.splitext_plus(tumor_counts) normal_out = "%s-normfilter%s" % utils.splitext_plus(normal_counts) if not utils.file_uptodate(tumor_out, tumor_counts): with file_transaction(data, tumor_out, normal_out) as (tx_tumor_out, tx_normal_out): median_depth = _get_normal_median_depth(normal_counts) min_normal_depth = median_depth * fparams["min_depth_percent"] max_normal_depth = median_depth * fparams["max_depth_percent"] with open(tumor_counts) as tumor_handle: with open(normal_counts) as normal_handle: with open(tx_tumor_out, "w") as tumor_out_handle: with open(tx_normal_out, "w") as normal_out_handle: header = None for t, n in zip(tumor_handle, normal_handle): if header is None: if not n.startswith("@"): header = n.strip().split() tumor_out_handle.write(t) normal_out_handle.write(n) elif (_normal_passes_depth(header, n, min_normal_depth, max_normal_depth) and _normal_passes_freq(header, n, fparams)): tumor_out_handle.write(t) normal_out_handle.write(n) return tumor_out, normal_out
python
def _filter_by_normal(tumor_counts, normal_counts, data): """Filter count files based on normal frequency and median depth, avoiding high depth regions. For frequency, restricts normal positions to those between 0.4 and 0.65 For depth, matches approach used in AMBER to try and avoid problematic genomic regions with high count in the normal: https://github.com/hartwigmedical/hmftools/tree/master/amber#usage """ from bcbio.heterogeneity import bubbletree fparams = bubbletree.NORMAL_FILTER_PARAMS tumor_out = "%s-normfilter%s" % utils.splitext_plus(tumor_counts) normal_out = "%s-normfilter%s" % utils.splitext_plus(normal_counts) if not utils.file_uptodate(tumor_out, tumor_counts): with file_transaction(data, tumor_out, normal_out) as (tx_tumor_out, tx_normal_out): median_depth = _get_normal_median_depth(normal_counts) min_normal_depth = median_depth * fparams["min_depth_percent"] max_normal_depth = median_depth * fparams["max_depth_percent"] with open(tumor_counts) as tumor_handle: with open(normal_counts) as normal_handle: with open(tx_tumor_out, "w") as tumor_out_handle: with open(tx_normal_out, "w") as normal_out_handle: header = None for t, n in zip(tumor_handle, normal_handle): if header is None: if not n.startswith("@"): header = n.strip().split() tumor_out_handle.write(t) normal_out_handle.write(n) elif (_normal_passes_depth(header, n, min_normal_depth, max_normal_depth) and _normal_passes_freq(header, n, fparams)): tumor_out_handle.write(t) normal_out_handle.write(n) return tumor_out, normal_out
[ "def", "_filter_by_normal", "(", "tumor_counts", ",", "normal_counts", ",", "data", ")", ":", "from", "bcbio", ".", "heterogeneity", "import", "bubbletree", "fparams", "=", "bubbletree", ".", "NORMAL_FILTER_PARAMS", "tumor_out", "=", "\"%s-normfilter%s\"", "%", "uti...
Filter count files based on normal frequency and median depth, avoiding high depth regions. For frequency, restricts normal positions to those between 0.4 and 0.65 For depth, matches approach used in AMBER to try and avoid problematic genomic regions with high count in the normal: https://github.com/hartwigmedical/hmftools/tree/master/amber#usage
[ "Filter", "count", "files", "based", "on", "normal", "frequency", "and", "median", "depth", "avoiding", "high", "depth", "regions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L226-L259
train
219,162
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
_run_collect_allelic_counts
def _run_collect_allelic_counts(pos_file, pos_name, work_dir, data): """Counts by alleles for a specific sample and set of positions. """ out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural", "counts")) out_file = os.path.join(out_dir, "%s-%s-counts.tsv" % (dd.get_sample_name(data), pos_name)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: params = ["-T", "CollectAllelicCounts", "-L", pos_file, "-I", dd.get_align_bam(data), "-R", dd.get_ref_file(data), "-O", tx_out_file] _run_with_memory_scaling(params, tx_out_file, data) return out_file
python
def _run_collect_allelic_counts(pos_file, pos_name, work_dir, data): """Counts by alleles for a specific sample and set of positions. """ out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural", "counts")) out_file = os.path.join(out_dir, "%s-%s-counts.tsv" % (dd.get_sample_name(data), pos_name)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: params = ["-T", "CollectAllelicCounts", "-L", pos_file, "-I", dd.get_align_bam(data), "-R", dd.get_ref_file(data), "-O", tx_out_file] _run_with_memory_scaling(params, tx_out_file, data) return out_file
[ "def", "_run_collect_allelic_counts", "(", "pos_file", ",", "pos_name", ",", "work_dir", ",", "data", ")", ":", "out_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", ")", ",", "\"s...
Counts by alleles for a specific sample and set of positions.
[ "Counts", "by", "alleles", "for", "a", "specific", "sample", "and", "set", "of", "positions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L287-L297
train
219,163
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
_seg_to_vcf
def _seg_to_vcf(vals): """Convert GATK CNV calls seg output to a VCF line. """ call_to_cn = {"+": 3, "-": 1} call_to_type = {"+": "DUP", "-": "DEL"} if vals["CALL"] not in ["0"]: info = ["FOLD_CHANGE_LOG=%s" % vals["MEAN_LOG2_COPY_RATIO"], "PROBES=%s" % vals["NUM_POINTS_COPY_RATIO"], "SVTYPE=%s" % call_to_type[vals["CALL"]], "SVLEN=%s" % (int(vals["END"]) - int(vals["START"])), "END=%s" % vals["END"], "CN=%s" % call_to_cn[vals["CALL"]]] return [vals["CONTIG"], vals["START"], ".", "N", "<%s>" % call_to_type[vals["CALL"]], ".", ".", ";".join(info), "GT", "0/1"]
python
def _seg_to_vcf(vals): """Convert GATK CNV calls seg output to a VCF line. """ call_to_cn = {"+": 3, "-": 1} call_to_type = {"+": "DUP", "-": "DEL"} if vals["CALL"] not in ["0"]: info = ["FOLD_CHANGE_LOG=%s" % vals["MEAN_LOG2_COPY_RATIO"], "PROBES=%s" % vals["NUM_POINTS_COPY_RATIO"], "SVTYPE=%s" % call_to_type[vals["CALL"]], "SVLEN=%s" % (int(vals["END"]) - int(vals["START"])), "END=%s" % vals["END"], "CN=%s" % call_to_cn[vals["CALL"]]] return [vals["CONTIG"], vals["START"], ".", "N", "<%s>" % call_to_type[vals["CALL"]], ".", ".", ";".join(info), "GT", "0/1"]
[ "def", "_seg_to_vcf", "(", "vals", ")", ":", "call_to_cn", "=", "{", "\"+\"", ":", "3", ",", "\"-\"", ":", "1", "}", "call_to_type", "=", "{", "\"+\"", ":", "\"DUP\"", ",", "\"-\"", ":", "\"DEL\"", "}", "if", "vals", "[", "\"CALL\"", "]", "not", "i...
Convert GATK CNV calls seg output to a VCF line.
[ "Convert", "GATK", "CNV", "calls", "seg", "output", "to", "a", "VCF", "line", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L313-L326
train
219,164
bcbio/bcbio-nextgen
bcbio/rnaseq/bcbiornaseq.py
make_bcbiornaseq_object
def make_bcbiornaseq_object(data): """ load the initial bcb.rda object using bcbioRNASeq """ if "bcbiornaseq" not in dd.get_tools_on(data): return data upload_dir = tz.get_in(("upload", "dir"), data) report_dir = os.path.join(upload_dir, "bcbioRNASeq") safe_makedir(report_dir) organism = dd.get_bcbiornaseq(data).get("organism", None) groups = dd.get_bcbiornaseq(data).get("interesting_groups", None) loadstring = create_load_string(upload_dir, groups, organism) r_file = os.path.join(report_dir, "load_bcbioRNAseq.R") with file_transaction(r_file) as tmp_file: memoize_write_file(loadstring, tmp_file) rcmd = Rscript_cmd() with chdir(report_dir): do.run([rcmd, "--no-environ", r_file], "Loading bcbioRNASeq object.") make_quality_report(data) return data
python
def make_bcbiornaseq_object(data): """ load the initial bcb.rda object using bcbioRNASeq """ if "bcbiornaseq" not in dd.get_tools_on(data): return data upload_dir = tz.get_in(("upload", "dir"), data) report_dir = os.path.join(upload_dir, "bcbioRNASeq") safe_makedir(report_dir) organism = dd.get_bcbiornaseq(data).get("organism", None) groups = dd.get_bcbiornaseq(data).get("interesting_groups", None) loadstring = create_load_string(upload_dir, groups, organism) r_file = os.path.join(report_dir, "load_bcbioRNAseq.R") with file_transaction(r_file) as tmp_file: memoize_write_file(loadstring, tmp_file) rcmd = Rscript_cmd() with chdir(report_dir): do.run([rcmd, "--no-environ", r_file], "Loading bcbioRNASeq object.") make_quality_report(data) return data
[ "def", "make_bcbiornaseq_object", "(", "data", ")", ":", "if", "\"bcbiornaseq\"", "not", "in", "dd", ".", "get_tools_on", "(", "data", ")", ":", "return", "data", "upload_dir", "=", "tz", ".", "get_in", "(", "(", "\"upload\"", ",", "\"dir\"", ")", ",", "...
load the initial bcb.rda object using bcbioRNASeq
[ "load", "the", "initial", "bcb", ".", "rda", "object", "using", "bcbioRNASeq" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/bcbiornaseq.py#L12-L31
train
219,165
bcbio/bcbio-nextgen
bcbio/rnaseq/bcbiornaseq.py
make_quality_report
def make_quality_report(data): """ create and render the bcbioRNASeq quality report """ if "bcbiornaseq" not in dd.get_tools_on(data): return data upload_dir = tz.get_in(("upload", "dir"), data) report_dir = os.path.join(upload_dir, "bcbioRNASeq") safe_makedir(report_dir) quality_rmd = os.path.join(report_dir, "quality_control.Rmd") quality_html = os.path.join(report_dir, "quality_control.html") quality_rmd = rmarkdown_draft(quality_rmd, "quality_control", "bcbioRNASeq") if not file_exists(quality_html): render_rmarkdown_file(quality_rmd) return data
python
def make_quality_report(data): """ create and render the bcbioRNASeq quality report """ if "bcbiornaseq" not in dd.get_tools_on(data): return data upload_dir = tz.get_in(("upload", "dir"), data) report_dir = os.path.join(upload_dir, "bcbioRNASeq") safe_makedir(report_dir) quality_rmd = os.path.join(report_dir, "quality_control.Rmd") quality_html = os.path.join(report_dir, "quality_control.html") quality_rmd = rmarkdown_draft(quality_rmd, "quality_control", "bcbioRNASeq") if not file_exists(quality_html): render_rmarkdown_file(quality_rmd) return data
[ "def", "make_quality_report", "(", "data", ")", ":", "if", "\"bcbiornaseq\"", "not", "in", "dd", ".", "get_tools_on", "(", "data", ")", ":", "return", "data", "upload_dir", "=", "tz", ".", "get_in", "(", "(", "\"upload\"", ",", "\"dir\"", ")", ",", "data...
create and render the bcbioRNASeq quality report
[ "create", "and", "render", "the", "bcbioRNASeq", "quality", "report" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/bcbiornaseq.py#L33-L47
train
219,166
bcbio/bcbio-nextgen
bcbio/rnaseq/bcbiornaseq.py
rmarkdown_draft
def rmarkdown_draft(filename, template, package): """ create a draft rmarkdown file from an installed template """ if file_exists(filename): return filename draft_template = Template( 'rmarkdown::draft("$filename", template="$template", package="$package", edit=FALSE)' ) draft_string = draft_template.substitute( filename=filename, template=template, package=package) report_dir = os.path.dirname(filename) rcmd = Rscript_cmd() with chdir(report_dir): do.run([rcmd, "--no-environ", "-e", draft_string], "Creating bcbioRNASeq quality control template.") do.run(["sed", "-i", "s/YYYY-MM-DD\///g", filename], "Editing bcbioRNAseq quality control template.") return filename
python
def rmarkdown_draft(filename, template, package): """ create a draft rmarkdown file from an installed template """ if file_exists(filename): return filename draft_template = Template( 'rmarkdown::draft("$filename", template="$template", package="$package", edit=FALSE)' ) draft_string = draft_template.substitute( filename=filename, template=template, package=package) report_dir = os.path.dirname(filename) rcmd = Rscript_cmd() with chdir(report_dir): do.run([rcmd, "--no-environ", "-e", draft_string], "Creating bcbioRNASeq quality control template.") do.run(["sed", "-i", "s/YYYY-MM-DD\///g", filename], "Editing bcbioRNAseq quality control template.") return filename
[ "def", "rmarkdown_draft", "(", "filename", ",", "template", ",", "package", ")", ":", "if", "file_exists", "(", "filename", ")", ":", "return", "filename", "draft_template", "=", "Template", "(", "'rmarkdown::draft(\"$filename\", template=\"$template\", package=\"$package...
create a draft rmarkdown file from an installed template
[ "create", "a", "draft", "rmarkdown", "file", "from", "an", "installed", "template" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/bcbiornaseq.py#L49-L65
train
219,167
bcbio/bcbio-nextgen
bcbio/rnaseq/bcbiornaseq.py
render_rmarkdown_file
def render_rmarkdown_file(filename): """ render a rmarkdown file using the rmarkdown library """ render_template = Template( 'rmarkdown::render("$filename")' ) render_string = render_template.substitute( filename=filename) report_dir = os.path.dirname(filename) rcmd = Rscript_cmd() with chdir(report_dir): do.run([rcmd, "--no-environ", "-e", render_string], "Rendering bcbioRNASeq quality control report.") return filename
python
def render_rmarkdown_file(filename): """ render a rmarkdown file using the rmarkdown library """ render_template = Template( 'rmarkdown::render("$filename")' ) render_string = render_template.substitute( filename=filename) report_dir = os.path.dirname(filename) rcmd = Rscript_cmd() with chdir(report_dir): do.run([rcmd, "--no-environ", "-e", render_string], "Rendering bcbioRNASeq quality control report.") return filename
[ "def", "render_rmarkdown_file", "(", "filename", ")", ":", "render_template", "=", "Template", "(", "'rmarkdown::render(\"$filename\")'", ")", "render_string", "=", "render_template", ".", "substitute", "(", "filename", "=", "filename", ")", "report_dir", "=", "os", ...
render a rmarkdown file using the rmarkdown library
[ "render", "a", "rmarkdown", "file", "using", "the", "rmarkdown", "library" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/bcbiornaseq.py#L67-L80
train
219,168
bcbio/bcbio-nextgen
bcbio/rnaseq/bcbiornaseq.py
create_load_string
def create_load_string(upload_dir, groups=None, organism=None): """ create the code necessary to load the bcbioRNAseq object """ libraryline = 'library(bcbioRNASeq)' load_template = Template( ('bcb <- bcbioRNASeq(uploadDir="$upload_dir",' 'interestingGroups=$groups,' 'organism="$organism")')) load_noorganism_template = Template( ('bcb <- bcbioRNASeq(uploadDir="$upload_dir",' 'interestingGroups=$groups,' 'organism=NULL)')) flatline = 'flat <- flatFiles(bcb)' saveline = 'saveData(bcb, flat, dir="data")' if groups: groups = _list2Rlist(groups) else: groups = _quotestring("sampleName") if organism: load_bcbio = load_template.substitute( upload_dir=upload_dir, groups=groups, organism=organism) else: load_bcbio = load_noorganism_template.substitute(upload_dir=upload_dir, groups=groups) return ";\n".join([libraryline, load_bcbio, flatline, saveline])
python
def create_load_string(upload_dir, groups=None, organism=None): """ create the code necessary to load the bcbioRNAseq object """ libraryline = 'library(bcbioRNASeq)' load_template = Template( ('bcb <- bcbioRNASeq(uploadDir="$upload_dir",' 'interestingGroups=$groups,' 'organism="$organism")')) load_noorganism_template = Template( ('bcb <- bcbioRNASeq(uploadDir="$upload_dir",' 'interestingGroups=$groups,' 'organism=NULL)')) flatline = 'flat <- flatFiles(bcb)' saveline = 'saveData(bcb, flat, dir="data")' if groups: groups = _list2Rlist(groups) else: groups = _quotestring("sampleName") if organism: load_bcbio = load_template.substitute( upload_dir=upload_dir, groups=groups, organism=organism) else: load_bcbio = load_noorganism_template.substitute(upload_dir=upload_dir, groups=groups) return ";\n".join([libraryline, load_bcbio, flatline, saveline])
[ "def", "create_load_string", "(", "upload_dir", ",", "groups", "=", "None", ",", "organism", "=", "None", ")", ":", "libraryline", "=", "'library(bcbioRNASeq)'", "load_template", "=", "Template", "(", "(", "'bcb <- bcbioRNASeq(uploadDir=\"$upload_dir\",'", "'interesting...
create the code necessary to load the bcbioRNAseq object
[ "create", "the", "code", "necessary", "to", "load", "the", "bcbioRNAseq", "object" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/bcbiornaseq.py#L82-L107
train
219,169
bcbio/bcbio-nextgen
bcbio/rnaseq/bcbiornaseq.py
_list2Rlist
def _list2Rlist(xs): """ convert a python list to an R list """ if isinstance(xs, six.string_types): xs = [xs] rlist = ",".join([_quotestring(x) for x in xs]) return "c(" + rlist + ")"
python
def _list2Rlist(xs): """ convert a python list to an R list """ if isinstance(xs, six.string_types): xs = [xs] rlist = ",".join([_quotestring(x) for x in xs]) return "c(" + rlist + ")"
[ "def", "_list2Rlist", "(", "xs", ")", ":", "if", "isinstance", "(", "xs", ",", "six", ".", "string_types", ")", ":", "xs", "=", "[", "xs", "]", "rlist", "=", "\",\"", ".", "join", "(", "[", "_quotestring", "(", "x", ")", "for", "x", "in", "xs", ...
convert a python list to an R list
[ "convert", "a", "python", "list", "to", "an", "R", "list" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/bcbiornaseq.py#L124-L129
train
219,170
bcbio/bcbio-nextgen
bcbio/variation/qsnp.py
_run_qsnp_paired
def _run_qsnp_paired(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Detect somatic mutations with qSNP. This is used for paired tumor / normal samples. """ config = items[0]["config"] if out_file is None: out_file = "%s-paired-variants.vcf" % os.path.splitext(align_bams[0])[0] if not utils.file_exists(out_file): out_file = out_file.replace(".gz", "") with file_transaction(config, out_file) as tx_out_file: with tx_tmpdir(config) as tmpdir: with utils.chdir(tmpdir): paired = get_paired_bams(align_bams, items) qsnp = config_utils.get_program("qsnp", config) resources = config_utils.get_resources("qsnp", config) mem = " ".join(resources.get("jvm_opts", ["-Xms750m -Xmx4g"])) qsnp_log = os.path.join(tmpdir, "qsnp.log") qsnp_init = os.path.join(tmpdir, "qsnp.ini") if region: paired = _create_bam_region(paired, region, tmpdir) _create_input(paired, tx_out_file, ref_file, assoc_files['dbsnp'], qsnp_init) cl = ("{qsnp} {mem} -i {qsnp_init} -log {qsnp_log}") do.run(cl.format(**locals()), "Genotyping paired variants with Qsnp", {}) out_file = _filter_vcf(out_file) out_file = bgzip_and_index(out_file, config) return out_file
python
def _run_qsnp_paired(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Detect somatic mutations with qSNP. This is used for paired tumor / normal samples. """ config = items[0]["config"] if out_file is None: out_file = "%s-paired-variants.vcf" % os.path.splitext(align_bams[0])[0] if not utils.file_exists(out_file): out_file = out_file.replace(".gz", "") with file_transaction(config, out_file) as tx_out_file: with tx_tmpdir(config) as tmpdir: with utils.chdir(tmpdir): paired = get_paired_bams(align_bams, items) qsnp = config_utils.get_program("qsnp", config) resources = config_utils.get_resources("qsnp", config) mem = " ".join(resources.get("jvm_opts", ["-Xms750m -Xmx4g"])) qsnp_log = os.path.join(tmpdir, "qsnp.log") qsnp_init = os.path.join(tmpdir, "qsnp.ini") if region: paired = _create_bam_region(paired, region, tmpdir) _create_input(paired, tx_out_file, ref_file, assoc_files['dbsnp'], qsnp_init) cl = ("{qsnp} {mem} -i {qsnp_init} -log {qsnp_log}") do.run(cl.format(**locals()), "Genotyping paired variants with Qsnp", {}) out_file = _filter_vcf(out_file) out_file = bgzip_and_index(out_file, config) return out_file
[ "def", "_run_qsnp_paired", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "config", "=", "items", "[", "0", "]", "[", "\"config\"", "]", "if", "out_file", "is", ...
Detect somatic mutations with qSNP. This is used for paired tumor / normal samples.
[ "Detect", "somatic", "mutations", "with", "qSNP", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/qsnp.py#L55-L82
train
219,171
bcbio/bcbio-nextgen
bcbio/variation/qsnp.py
_clean_regions
def _clean_regions(items, region): """Intersect region with target file if it exists""" variant_regions = bedutils.population_variant_regions(items, merged=True) with utils.tmpfile() as tx_out_file: target = subset_variant_regions(variant_regions, region, tx_out_file, items) if target: if isinstance(target, six.string_types) and os.path.isfile(target): target = _load_regions(target) else: target = [target] return target
python
def _clean_regions(items, region): """Intersect region with target file if it exists""" variant_regions = bedutils.population_variant_regions(items, merged=True) with utils.tmpfile() as tx_out_file: target = subset_variant_regions(variant_regions, region, tx_out_file, items) if target: if isinstance(target, six.string_types) and os.path.isfile(target): target = _load_regions(target) else: target = [target] return target
[ "def", "_clean_regions", "(", "items", ",", "region", ")", ":", "variant_regions", "=", "bedutils", ".", "population_variant_regions", "(", "items", ",", "merged", "=", "True", ")", "with", "utils", ".", "tmpfile", "(", ")", "as", "tx_out_file", ":", "target...
Intersect region with target file if it exists
[ "Intersect", "region", "with", "target", "file", "if", "it", "exists" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/qsnp.py#L84-L94
train
219,172
bcbio/bcbio-nextgen
bcbio/variation/qsnp.py
_load_regions
def _load_regions(target): """Get list of tupples from bed file""" regions = [] with open(target) as in_handle: for line in in_handle: if not line.startswith("#"): c, s, e = line.strip().split("\t") regions.append((c, s, e)) return regions
python
def _load_regions(target): """Get list of tupples from bed file""" regions = [] with open(target) as in_handle: for line in in_handle: if not line.startswith("#"): c, s, e = line.strip().split("\t") regions.append((c, s, e)) return regions
[ "def", "_load_regions", "(", "target", ")", ":", "regions", "=", "[", "]", "with", "open", "(", "target", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "not", "line", ".", "startswith", "(", "\"#\"", ")", ":", "c", ",", ...
Get list of tupples from bed file
[ "Get", "list", "of", "tupples", "from", "bed", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/qsnp.py#L96-L104
train
219,173
bcbio/bcbio-nextgen
bcbio/variation/qsnp.py
_slice_bam
def _slice_bam(in_bam, region, tmp_dir, config): """Use sambamba to slice a bam region""" name_file = os.path.splitext(os.path.basename(in_bam))[0] out_file = os.path.join(tmp_dir, os.path.join(tmp_dir, name_file + _to_str(region) + ".bam")) sambamba = config_utils.get_program("sambamba", config) region = _to_sambamba(region) with file_transaction(out_file) as tx_out_file: cmd = ("{sambamba} slice {in_bam} {region} -o {tx_out_file}") do.run(cmd.format(**locals()), "Slice region", {}) return out_file
python
def _slice_bam(in_bam, region, tmp_dir, config): """Use sambamba to slice a bam region""" name_file = os.path.splitext(os.path.basename(in_bam))[0] out_file = os.path.join(tmp_dir, os.path.join(tmp_dir, name_file + _to_str(region) + ".bam")) sambamba = config_utils.get_program("sambamba", config) region = _to_sambamba(region) with file_transaction(out_file) as tx_out_file: cmd = ("{sambamba} slice {in_bam} {region} -o {tx_out_file}") do.run(cmd.format(**locals()), "Slice region", {}) return out_file
[ "def", "_slice_bam", "(", "in_bam", ",", "region", ",", "tmp_dir", ",", "config", ")", ":", "name_file", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "in_bam", ")", ")", "[", "0", "]", "out_file", "=", "os"...
Use sambamba to slice a bam region
[ "Use", "sambamba", "to", "slice", "a", "bam", "region" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/qsnp.py#L114-L123
train
219,174
bcbio/bcbio-nextgen
bcbio/variation/qsnp.py
_create_input
def _create_input(paired, out_file, ref_file, snp_file, qsnp_file): """Create INI input for qSNP""" ini_file["[inputFiles]"]["dbSNP"] = snp_file ini_file["[inputFiles]"]["ref"] = ref_file ini_file["[inputFiles]"]["normalBam"] = paired.normal_bam ini_file["[inputFiles]"]["tumourBam"] = paired.tumor_bam ini_file["[ids]"]["normalSample"] = paired.normal_name ini_file["[ids]"]["tumourSample"] = paired.tumor_name ini_file["[ids]"]["donor"] = paired.tumor_name ini_file["[outputFiles]"]["vcf"] = out_file with open(qsnp_file, "w") as out_handle: for k, v in ini_file.items(): out_handle.write("%s\n" % k) for opt, value in v.items(): if value != "": out_handle.write("%s = %s\n" % (opt, value))
python
def _create_input(paired, out_file, ref_file, snp_file, qsnp_file): """Create INI input for qSNP""" ini_file["[inputFiles]"]["dbSNP"] = snp_file ini_file["[inputFiles]"]["ref"] = ref_file ini_file["[inputFiles]"]["normalBam"] = paired.normal_bam ini_file["[inputFiles]"]["tumourBam"] = paired.tumor_bam ini_file["[ids]"]["normalSample"] = paired.normal_name ini_file["[ids]"]["tumourSample"] = paired.tumor_name ini_file["[ids]"]["donor"] = paired.tumor_name ini_file["[outputFiles]"]["vcf"] = out_file with open(qsnp_file, "w") as out_handle: for k, v in ini_file.items(): out_handle.write("%s\n" % k) for opt, value in v.items(): if value != "": out_handle.write("%s = %s\n" % (opt, value))
[ "def", "_create_input", "(", "paired", ",", "out_file", ",", "ref_file", ",", "snp_file", ",", "qsnp_file", ")", ":", "ini_file", "[", "\"[inputFiles]\"", "]", "[", "\"dbSNP\"", "]", "=", "snp_file", "ini_file", "[", "\"[inputFiles]\"", "]", "[", "\"ref\"", ...
Create INI input for qSNP
[ "Create", "INI", "input", "for", "qSNP" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/qsnp.py#L125-L140
train
219,175
bcbio/bcbio-nextgen
bcbio/variation/qsnp.py
_filter_vcf
def _filter_vcf(out_file): """Fix sample names, FILTER and FORMAT fields. Remove lines with ambiguous reference. """ in_file = out_file.replace(".vcf", "-ori.vcf") FILTER_line = ('##FILTER=<ID=SBIAS,Description="Due to bias">\n' '##FILTER=<ID=5BP,Description="Due to 5BP">\n' '##FILTER=<ID=REJECT,Description="Not somatic due to qSNP filters">\n') SOMATIC_line = '##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="somatic event">\n' if not utils.file_exists(in_file): shutil.move(out_file, in_file) with file_transaction(out_file) as tx_out_file: with open(in_file) as in_handle, open(tx_out_file, "w") as out_handle: for line in in_handle: if line.startswith("##normalSample="): normal_name = line.strip().split("=")[1] if line.startswith("##patient_id="): tumor_name = line.strip().split("=")[1] if line.startswith("#CHROM"): line = line.replace("Normal", normal_name) line = line.replace("Tumour", tumor_name) if line.startswith("##INFO=<ID=FS"): line = line.replace("ID=FS", "ID=RNT") if line.find("FS=") > -1: line = line.replace("FS=", "RNT=") if "5BP" in line: line = sub("5BP[0-9]+", "5BP", line) if line.find("PASS") == -1: line = _set_reject(line) if line.find("PASS") > - 1 and line.find("SOMATIC") == -1: line = _set_reject(line) if not _has_ambiguous_ref_allele(line): out_handle.write(line) if line.startswith("##FILTER") and FILTER_line: out_handle.write("%s" % FILTER_line) FILTER_line = "" if line.startswith("##INFO") and SOMATIC_line: out_handle.write("%s" % SOMATIC_line) SOMATIC_line = "" return out_file
python
def _filter_vcf(out_file): """Fix sample names, FILTER and FORMAT fields. Remove lines with ambiguous reference. """ in_file = out_file.replace(".vcf", "-ori.vcf") FILTER_line = ('##FILTER=<ID=SBIAS,Description="Due to bias">\n' '##FILTER=<ID=5BP,Description="Due to 5BP">\n' '##FILTER=<ID=REJECT,Description="Not somatic due to qSNP filters">\n') SOMATIC_line = '##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="somatic event">\n' if not utils.file_exists(in_file): shutil.move(out_file, in_file) with file_transaction(out_file) as tx_out_file: with open(in_file) as in_handle, open(tx_out_file, "w") as out_handle: for line in in_handle: if line.startswith("##normalSample="): normal_name = line.strip().split("=")[1] if line.startswith("##patient_id="): tumor_name = line.strip().split("=")[1] if line.startswith("#CHROM"): line = line.replace("Normal", normal_name) line = line.replace("Tumour", tumor_name) if line.startswith("##INFO=<ID=FS"): line = line.replace("ID=FS", "ID=RNT") if line.find("FS=") > -1: line = line.replace("FS=", "RNT=") if "5BP" in line: line = sub("5BP[0-9]+", "5BP", line) if line.find("PASS") == -1: line = _set_reject(line) if line.find("PASS") > - 1 and line.find("SOMATIC") == -1: line = _set_reject(line) if not _has_ambiguous_ref_allele(line): out_handle.write(line) if line.startswith("##FILTER") and FILTER_line: out_handle.write("%s" % FILTER_line) FILTER_line = "" if line.startswith("##INFO") and SOMATIC_line: out_handle.write("%s" % SOMATIC_line) SOMATIC_line = "" return out_file
[ "def", "_filter_vcf", "(", "out_file", ")", ":", "in_file", "=", "out_file", ".", "replace", "(", "\".vcf\"", ",", "\"-ori.vcf\"", ")", "FILTER_line", "=", "(", "'##FILTER=<ID=SBIAS,Description=\"Due to bias\">\\n'", "'##FILTER=<ID=5BP,Description=\"Due to 5BP\">\\n'", "'##...
Fix sample names, FILTER and FORMAT fields. Remove lines with ambiguous reference.
[ "Fix", "sample", "names", "FILTER", "and", "FORMAT", "fields", ".", "Remove", "lines", "with", "ambiguous", "reference", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/qsnp.py#L147-L185
train
219,176
bcbio/bcbio-nextgen
bcbio/variation/qsnp.py
_set_reject
def _set_reject(line): """Set REJECT in VCF line, or add it if there is something else.""" if line.startswith("#"): return line parts = line.split("\t") if parts[6] == "PASS": parts[6] = "REJECT" else: parts[6] += ";REJECT" return "\t".join(parts)
python
def _set_reject(line): """Set REJECT in VCF line, or add it if there is something else.""" if line.startswith("#"): return line parts = line.split("\t") if parts[6] == "PASS": parts[6] = "REJECT" else: parts[6] += ";REJECT" return "\t".join(parts)
[ "def", "_set_reject", "(", "line", ")", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "return", "line", "parts", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "parts", "[", "6", "]", "==", "\"PASS\"", ":", "parts", "[", "6",...
Set REJECT in VCF line, or add it if there is something else.
[ "Set", "REJECT", "in", "VCF", "line", "or", "add", "it", "if", "there", "is", "something", "else", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/qsnp.py#L193-L202
train
219,177
bcbio/bcbio-nextgen
scripts/utils/cg_svevents_to_vcf.py
svevent_reader
def svevent_reader(in_file): """Lazy generator of SV events, returned as dictionary of parts. """ with open(in_file) as in_handle: while 1: line = next(in_handle) if line.startswith(">"): break header = line[1:].rstrip().split("\t") reader = csv.reader(in_handle, dialect="excel-tab") for parts in reader: out = {} for h, p in zip(header, parts): out[h] = p yield out
python
def svevent_reader(in_file): """Lazy generator of SV events, returned as dictionary of parts. """ with open(in_file) as in_handle: while 1: line = next(in_handle) if line.startswith(">"): break header = line[1:].rstrip().split("\t") reader = csv.reader(in_handle, dialect="excel-tab") for parts in reader: out = {} for h, p in zip(header, parts): out[h] = p yield out
[ "def", "svevent_reader", "(", "in_file", ")", ":", "with", "open", "(", "in_file", ")", "as", "in_handle", ":", "while", "1", ":", "line", "=", "next", "(", "in_handle", ")", "if", "line", ".", "startswith", "(", "\">\"", ")", ":", "break", "header", ...
Lazy generator of SV events, returned as dictionary of parts.
[ "Lazy", "generator", "of", "SV", "events", "returned", "as", "dictionary", "of", "parts", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/cg_svevents_to_vcf.py#L64-L78
train
219,178
bcbio/bcbio-nextgen
bcbio/cwl/inspect.py
initialize_watcher
def initialize_watcher(samples): """ check to see if cwl_reporting is set for any samples, and if so, initialize a WorldWatcher object from a set of samples, """ work_dir = dd.get_in_samples(samples, dd.get_work_dir) ww = WorldWatcher(work_dir, is_on=any([dd.get_cwl_reporting(d[0]) for d in samples])) ww.initialize(samples) return ww
python
def initialize_watcher(samples): """ check to see if cwl_reporting is set for any samples, and if so, initialize a WorldWatcher object from a set of samples, """ work_dir = dd.get_in_samples(samples, dd.get_work_dir) ww = WorldWatcher(work_dir, is_on=any([dd.get_cwl_reporting(d[0]) for d in samples])) ww.initialize(samples) return ww
[ "def", "initialize_watcher", "(", "samples", ")", ":", "work_dir", "=", "dd", ".", "get_in_samples", "(", "samples", ",", "dd", ".", "get_work_dir", ")", "ww", "=", "WorldWatcher", "(", "work_dir", ",", "is_on", "=", "any", "(", "[", "dd", ".", "get_cwl_...
check to see if cwl_reporting is set for any samples, and if so, initialize a WorldWatcher object from a set of samples,
[ "check", "to", "see", "if", "cwl_reporting", "is", "set", "for", "any", "samples", "and", "if", "so", "initialize", "a", "WorldWatcher", "object", "from", "a", "set", "of", "samples" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/inspect.py#L92-L101
train
219,179
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
guess_infer_extent
def guess_infer_extent(gtf_file): """ guess if we need to use the gene extent option when making a gffutils database by making a tiny database of 1000 lines from the original GTF and looking for all of the features """ _, ext = os.path.splitext(gtf_file) tmp_out = tempfile.NamedTemporaryFile(suffix=".gtf", delete=False).name with open(tmp_out, "w") as out_handle: count = 0 in_handle = utils.open_gzipsafe(gtf_file) for line in in_handle: if count > 1000: break out_handle.write(line) count += 1 in_handle.close() db = gffutils.create_db(tmp_out, dbfn=":memory:", infer_gene_extent=False) os.remove(tmp_out) features = [x for x in db.featuretypes()] if "gene" in features and "transcript" in features: return False else: return True
python
def guess_infer_extent(gtf_file): """ guess if we need to use the gene extent option when making a gffutils database by making a tiny database of 1000 lines from the original GTF and looking for all of the features """ _, ext = os.path.splitext(gtf_file) tmp_out = tempfile.NamedTemporaryFile(suffix=".gtf", delete=False).name with open(tmp_out, "w") as out_handle: count = 0 in_handle = utils.open_gzipsafe(gtf_file) for line in in_handle: if count > 1000: break out_handle.write(line) count += 1 in_handle.close() db = gffutils.create_db(tmp_out, dbfn=":memory:", infer_gene_extent=False) os.remove(tmp_out) features = [x for x in db.featuretypes()] if "gene" in features and "transcript" in features: return False else: return True
[ "def", "guess_infer_extent", "(", "gtf_file", ")", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "gtf_file", ")", "tmp_out", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "\".gtf\"", ",", "delete", "=", "False", ")...
guess if we need to use the gene extent option when making a gffutils database by making a tiny database of 1000 lines from the original GTF and looking for all of the features
[ "guess", "if", "we", "need", "to", "use", "the", "gene", "extent", "option", "when", "making", "a", "gffutils", "database", "by", "making", "a", "tiny", "database", "of", "1000", "lines", "from", "the", "original", "GTF", "and", "looking", "for", "all", ...
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L13-L36
train
219,180
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
get_gtf_db
def get_gtf_db(gtf, in_memory=False): """ create a gffutils DB, in memory if we don't have write permissions """ db_file = gtf + ".db" if file_exists(db_file): return gffutils.FeatureDB(db_file) if not os.access(os.path.dirname(db_file), os.W_OK | os.X_OK): in_memory = True db_file = ":memory:" if in_memory else db_file if in_memory or not file_exists(db_file): infer_extent = guess_infer_extent(gtf) disable_extent = not infer_extent db = gffutils.create_db(gtf, dbfn=db_file, disable_infer_genes=disable_extent, disable_infer_transcripts=disable_extent) if in_memory: return db else: return gffutils.FeatureDB(db_file)
python
def get_gtf_db(gtf, in_memory=False): """ create a gffutils DB, in memory if we don't have write permissions """ db_file = gtf + ".db" if file_exists(db_file): return gffutils.FeatureDB(db_file) if not os.access(os.path.dirname(db_file), os.W_OK | os.X_OK): in_memory = True db_file = ":memory:" if in_memory else db_file if in_memory or not file_exists(db_file): infer_extent = guess_infer_extent(gtf) disable_extent = not infer_extent db = gffutils.create_db(gtf, dbfn=db_file, disable_infer_genes=disable_extent, disable_infer_transcripts=disable_extent) if in_memory: return db else: return gffutils.FeatureDB(db_file)
[ "def", "get_gtf_db", "(", "gtf", ",", "in_memory", "=", "False", ")", ":", "db_file", "=", "gtf", "+", "\".db\"", "if", "file_exists", "(", "db_file", ")", ":", "return", "gffutils", ".", "FeatureDB", "(", "db_file", ")", "if", "not", "os", ".", "acces...
create a gffutils DB, in memory if we don't have write permissions
[ "create", "a", "gffutils", "DB", "in", "memory", "if", "we", "don", "t", "have", "write", "permissions" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L38-L57
train
219,181
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
partition_gtf
def partition_gtf(gtf, coding=False, out_file=False): """ return a GTF file of all non-coding or coding transcripts. the GTF must be annotated with gene_biotype = "protein_coding" or to have the source column set to the biotype for all coding transcripts. set coding to True to get only the coding, false to get only the non-coding """ if out_file and file_exists(out_file): return out_file if not out_file: out_file = tempfile.NamedTemporaryFile(delete=False, suffix=".gtf").name if coding: pred = lambda biotype: biotype and biotype == "protein_coding" else: pred = lambda biotype: biotype and biotype != "protein_coding" biotype_lookup = _biotype_lookup_fn(gtf) db = get_gtf_db(gtf) with file_transaction(out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for feature in db.all_features(): biotype = biotype_lookup(feature) if pred(biotype): out_handle.write(str(feature) + "\n") return out_file
python
def partition_gtf(gtf, coding=False, out_file=False): """ return a GTF file of all non-coding or coding transcripts. the GTF must be annotated with gene_biotype = "protein_coding" or to have the source column set to the biotype for all coding transcripts. set coding to True to get only the coding, false to get only the non-coding """ if out_file and file_exists(out_file): return out_file if not out_file: out_file = tempfile.NamedTemporaryFile(delete=False, suffix=".gtf").name if coding: pred = lambda biotype: biotype and biotype == "protein_coding" else: pred = lambda biotype: biotype and biotype != "protein_coding" biotype_lookup = _biotype_lookup_fn(gtf) db = get_gtf_db(gtf) with file_transaction(out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for feature in db.all_features(): biotype = biotype_lookup(feature) if pred(biotype): out_handle.write(str(feature) + "\n") return out_file
[ "def", "partition_gtf", "(", "gtf", ",", "coding", "=", "False", ",", "out_file", "=", "False", ")", ":", "if", "out_file", "and", "file_exists", "(", "out_file", ")", ":", "return", "out_file", "if", "not", "out_file", ":", "out_file", "=", "tempfile", ...
return a GTF file of all non-coding or coding transcripts. the GTF must be annotated with gene_biotype = "protein_coding" or to have the source column set to the biotype for all coding transcripts. set coding to True to get only the coding, false to get only the non-coding
[ "return", "a", "GTF", "file", "of", "all", "non", "-", "coding", "or", "coding", "transcripts", ".", "the", "GTF", "must", "be", "annotated", "with", "gene_biotype", "=", "protein_coding", "or", "to", "have", "the", "source", "column", "set", "to", "the", ...
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L142-L169
train
219,182
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
split_gtf
def split_gtf(gtf, sample_size=None, out_dir=None): """ split a GTF file into two equal parts, randomly selecting genes. sample_size will select up to sample_size genes in total """ if out_dir: part1_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part1.gtf" part2_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part2.gtf" part1 = os.path.join(out_dir, part1_fn) part2 = os.path.join(out_dir, part2_fn) if file_exists(part1) and file_exists(part2): return part1, part2 else: part1 = tempfile.NamedTemporaryFile(delete=False, suffix=".part1.gtf").name part2 = tempfile.NamedTemporaryFile(delete=False, suffix=".part2.gtf").name db = get_gtf_db(gtf) gene_ids = set([x['gene_id'][0] for x in db.all_features()]) if not sample_size or (sample_size and sample_size > len(gene_ids)): sample_size = len(gene_ids) gene_ids = set(random.sample(gene_ids, sample_size)) part1_ids = set(random.sample(gene_ids, sample_size / 2)) part2_ids = gene_ids.difference(part1_ids) with open(part1, "w") as part1_handle: for gene in part1_ids: for feature in db.children(gene): part1_handle.write(str(feature) + "\n") with open(part2, "w") as part2_handle: for gene in part2_ids: for feature in db.children(gene): part2_handle.write(str(feature) + "\n") return part1, part2
python
def split_gtf(gtf, sample_size=None, out_dir=None): """ split a GTF file into two equal parts, randomly selecting genes. sample_size will select up to sample_size genes in total """ if out_dir: part1_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part1.gtf" part2_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part2.gtf" part1 = os.path.join(out_dir, part1_fn) part2 = os.path.join(out_dir, part2_fn) if file_exists(part1) and file_exists(part2): return part1, part2 else: part1 = tempfile.NamedTemporaryFile(delete=False, suffix=".part1.gtf").name part2 = tempfile.NamedTemporaryFile(delete=False, suffix=".part2.gtf").name db = get_gtf_db(gtf) gene_ids = set([x['gene_id'][0] for x in db.all_features()]) if not sample_size or (sample_size and sample_size > len(gene_ids)): sample_size = len(gene_ids) gene_ids = set(random.sample(gene_ids, sample_size)) part1_ids = set(random.sample(gene_ids, sample_size / 2)) part2_ids = gene_ids.difference(part1_ids) with open(part1, "w") as part1_handle: for gene in part1_ids: for feature in db.children(gene): part1_handle.write(str(feature) + "\n") with open(part2, "w") as part2_handle: for gene in part2_ids: for feature in db.children(gene): part2_handle.write(str(feature) + "\n") return part1, part2
[ "def", "split_gtf", "(", "gtf", ",", "sample_size", "=", "None", ",", "out_dir", "=", "None", ")", ":", "if", "out_dir", ":", "part1_fn", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "splitext", "(", "gtf", ")", "[", "0", ...
split a GTF file into two equal parts, randomly selecting genes. sample_size will select up to sample_size genes in total
[ "split", "a", "GTF", "file", "into", "two", "equal", "parts", "randomly", "selecting", "genes", ".", "sample_size", "will", "select", "up", "to", "sample_size", "genes", "in", "total" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L171-L202
train
219,183
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
get_coding_noncoding_transcript_ids
def get_coding_noncoding_transcript_ids(gtf): """ return a set of coding and non-coding transcript_ids from a GTF """ coding_gtf = partition_gtf(gtf, coding=True) coding_db = get_gtf_db(coding_gtf) coding_ids = set([x['transcript_id'][0] for x in coding_db.all_features() if 'transcript_id' in x.attributes]) noncoding_gtf = partition_gtf(gtf) noncoding_db = get_gtf_db(noncoding_gtf) noncoding_ids = set([x['transcript_id'][0] for x in noncoding_db.all_features() if 'transcript_id' in x.attributes]) return coding_ids, noncoding_ids
python
def get_coding_noncoding_transcript_ids(gtf): """ return a set of coding and non-coding transcript_ids from a GTF """ coding_gtf = partition_gtf(gtf, coding=True) coding_db = get_gtf_db(coding_gtf) coding_ids = set([x['transcript_id'][0] for x in coding_db.all_features() if 'transcript_id' in x.attributes]) noncoding_gtf = partition_gtf(gtf) noncoding_db = get_gtf_db(noncoding_gtf) noncoding_ids = set([x['transcript_id'][0] for x in noncoding_db.all_features() if 'transcript_id' in x.attributes]) return coding_ids, noncoding_ids
[ "def", "get_coding_noncoding_transcript_ids", "(", "gtf", ")", ":", "coding_gtf", "=", "partition_gtf", "(", "gtf", ",", "coding", "=", "True", ")", "coding_db", "=", "get_gtf_db", "(", "coding_gtf", ")", "coding_ids", "=", "set", "(", "[", "x", "[", "'trans...
return a set of coding and non-coding transcript_ids from a GTF
[ "return", "a", "set", "of", "coding", "and", "non", "-", "coding", "transcript_ids", "from", "a", "GTF" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L204-L216
train
219,184
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
get_gene_source_set
def get_gene_source_set(gtf): """ get a dictionary of the set of all sources for a gene """ gene_to_source = {} db = get_gtf_db(gtf) for feature in complete_features(db): gene_id = feature['gene_id'][0] sources = gene_to_source.get(gene_id, set([])).union(set([feature.source])) gene_to_source[gene_id] = sources return gene_to_source
python
def get_gene_source_set(gtf): """ get a dictionary of the set of all sources for a gene """ gene_to_source = {} db = get_gtf_db(gtf) for feature in complete_features(db): gene_id = feature['gene_id'][0] sources = gene_to_source.get(gene_id, set([])).union(set([feature.source])) gene_to_source[gene_id] = sources return gene_to_source
[ "def", "get_gene_source_set", "(", "gtf", ")", ":", "gene_to_source", "=", "{", "}", "db", "=", "get_gtf_db", "(", "gtf", ")", "for", "feature", "in", "complete_features", "(", "db", ")", ":", "gene_id", "=", "feature", "[", "'gene_id'", "]", "[", "0", ...
get a dictionary of the set of all sources for a gene
[ "get", "a", "dictionary", "of", "the", "set", "of", "all", "sources", "for", "a", "gene" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L218-L228
train
219,185
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
get_transcript_source_set
def get_transcript_source_set(gtf): """ get a dictionary of the set of all sources of the gene for a given transcript """ gene_to_source = get_gene_source_set(gtf) transcript_to_source = {} db = get_gtf_db(gtf) for feature in complete_features(db): gene_id = feature['gene_id'][0] transcript_to_source[feature['transcript_id'][0]] = gene_to_source[gene_id] return transcript_to_source
python
def get_transcript_source_set(gtf): """ get a dictionary of the set of all sources of the gene for a given transcript """ gene_to_source = get_gene_source_set(gtf) transcript_to_source = {} db = get_gtf_db(gtf) for feature in complete_features(db): gene_id = feature['gene_id'][0] transcript_to_source[feature['transcript_id'][0]] = gene_to_source[gene_id] return transcript_to_source
[ "def", "get_transcript_source_set", "(", "gtf", ")", ":", "gene_to_source", "=", "get_gene_source_set", "(", "gtf", ")", "transcript_to_source", "=", "{", "}", "db", "=", "get_gtf_db", "(", "gtf", ")", "for", "feature", "in", "complete_features", "(", "db", ")...
get a dictionary of the set of all sources of the gene for a given transcript
[ "get", "a", "dictionary", "of", "the", "set", "of", "all", "sources", "of", "the", "gene", "for", "a", "given", "transcript" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L230-L241
train
219,186
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
get_rRNA
def get_rRNA(gtf): """ extract rRNA genes and transcripts from a gtf file """ rRNA_biotypes = ["rRNA", "Mt_rRNA", "tRNA", "MT_tRNA"] features = set() with open_gzipsafe(gtf) as in_handle: for line in in_handle: if not "gene_id" in line or not "transcript_id" in line: continue if any(x in line for x in rRNA_biotypes): geneid = line.split("gene_id")[1].split(" ")[1] geneid = _strip_non_alphanumeric(geneid) geneid = _strip_feature_version(geneid) txid = line.split("transcript_id")[1].split(" ")[1] txid = _strip_non_alphanumeric(txid) txid = _strip_feature_version(txid) features.add((geneid, txid)) return features
python
def get_rRNA(gtf): """ extract rRNA genes and transcripts from a gtf file """ rRNA_biotypes = ["rRNA", "Mt_rRNA", "tRNA", "MT_tRNA"] features = set() with open_gzipsafe(gtf) as in_handle: for line in in_handle: if not "gene_id" in line or not "transcript_id" in line: continue if any(x in line for x in rRNA_biotypes): geneid = line.split("gene_id")[1].split(" ")[1] geneid = _strip_non_alphanumeric(geneid) geneid = _strip_feature_version(geneid) txid = line.split("transcript_id")[1].split(" ")[1] txid = _strip_non_alphanumeric(txid) txid = _strip_feature_version(txid) features.add((geneid, txid)) return features
[ "def", "get_rRNA", "(", "gtf", ")", ":", "rRNA_biotypes", "=", "[", "\"rRNA\"", ",", "\"Mt_rRNA\"", ",", "\"tRNA\"", ",", "\"MT_tRNA\"", "]", "features", "=", "set", "(", ")", "with", "open_gzipsafe", "(", "gtf", ")", "as", "in_handle", ":", "for", "line...
extract rRNA genes and transcripts from a gtf file
[ "extract", "rRNA", "genes", "and", "transcripts", "from", "a", "gtf", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L243-L261
train
219,187
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
_biotype_lookup_fn
def _biotype_lookup_fn(gtf): """ return a function that will look up the biotype of a feature this checks for either gene_biotype or biotype being set or for the source column to have biotype information """ db = get_gtf_db(gtf) sources = set([feature.source for feature in db.all_features()]) gene_biotypes = set([feature.attributes.get("gene_biotype", [None])[0] for feature in db.all_features()]) biotypes = set([feature.attributes.get("biotype", [None])[0] for feature in db.all_features()]) if "protein_coding" in sources: return lambda feature: feature.source elif "protein_coding" in biotypes: return lambda feature: feature.attributes.get("biotype", [None])[0] elif "protein_coding" in gene_biotypes: return lambda feature: feature.attributes.get("gene_biotype", [None])[0] else: return None
python
def _biotype_lookup_fn(gtf): """ return a function that will look up the biotype of a feature this checks for either gene_biotype or biotype being set or for the source column to have biotype information """ db = get_gtf_db(gtf) sources = set([feature.source for feature in db.all_features()]) gene_biotypes = set([feature.attributes.get("gene_biotype", [None])[0] for feature in db.all_features()]) biotypes = set([feature.attributes.get("biotype", [None])[0] for feature in db.all_features()]) if "protein_coding" in sources: return lambda feature: feature.source elif "protein_coding" in biotypes: return lambda feature: feature.attributes.get("biotype", [None])[0] elif "protein_coding" in gene_biotypes: return lambda feature: feature.attributes.get("gene_biotype", [None])[0] else: return None
[ "def", "_biotype_lookup_fn", "(", "gtf", ")", ":", "db", "=", "get_gtf_db", "(", "gtf", ")", "sources", "=", "set", "(", "[", "feature", ".", "source", "for", "feature", "in", "db", ".", "all_features", "(", ")", "]", ")", "gene_biotypes", "=", "set", ...
return a function that will look up the biotype of a feature this checks for either gene_biotype or biotype being set or for the source column to have biotype information
[ "return", "a", "function", "that", "will", "look", "up", "the", "biotype", "of", "a", "feature", "this", "checks", "for", "either", "gene_biotype", "or", "biotype", "being", "set", "or", "for", "the", "source", "column", "to", "have", "biotype", "information...
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L263-L282
train
219,188
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
tx2genedict
def tx2genedict(gtf, keep_version=False): """ produce a tx2gene dictionary from a GTF file """ d = {} with open_gzipsafe(gtf) as in_handle: for line in in_handle: if "gene_id" not in line or "transcript_id" not in line: continue geneid = line.split("gene_id")[1].split(" ")[1] geneid = _strip_non_alphanumeric(geneid) txid = line.split("transcript_id")[1].split(" ")[1] txid = _strip_non_alphanumeric(txid) if keep_version and "transcript_version" in line: txversion = line.split("transcript_version")[1].split(" ")[1] txversion = _strip_non_alphanumeric(txversion) txid += "." + txversion if has_transcript_version(line) and not keep_version: txid = _strip_feature_version(txid) geneid = _strip_feature_version(geneid) d[txid] = geneid return d
python
def tx2genedict(gtf, keep_version=False): """ produce a tx2gene dictionary from a GTF file """ d = {} with open_gzipsafe(gtf) as in_handle: for line in in_handle: if "gene_id" not in line or "transcript_id" not in line: continue geneid = line.split("gene_id")[1].split(" ")[1] geneid = _strip_non_alphanumeric(geneid) txid = line.split("transcript_id")[1].split(" ")[1] txid = _strip_non_alphanumeric(txid) if keep_version and "transcript_version" in line: txversion = line.split("transcript_version")[1].split(" ")[1] txversion = _strip_non_alphanumeric(txversion) txid += "." + txversion if has_transcript_version(line) and not keep_version: txid = _strip_feature_version(txid) geneid = _strip_feature_version(geneid) d[txid] = geneid return d
[ "def", "tx2genedict", "(", "gtf", ",", "keep_version", "=", "False", ")", ":", "d", "=", "{", "}", "with", "open_gzipsafe", "(", "gtf", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "\"gene_id\"", "not", "in", "line", "or", ...
produce a tx2gene dictionary from a GTF file
[ "produce", "a", "tx2gene", "dictionary", "from", "a", "GTF", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L284-L305
train
219,189
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
_strip_feature_version
def _strip_feature_version(featureid): """ some feature versions are encoded as featureid.version, this strips those off, if they exist """ version_detector = re.compile(r"(?P<featureid>.*)(?P<version>\.\d+)") match = version_detector.match(featureid) if match: return match.groupdict()["featureid"] else: return featureid
python
def _strip_feature_version(featureid): """ some feature versions are encoded as featureid.version, this strips those off, if they exist """ version_detector = re.compile(r"(?P<featureid>.*)(?P<version>\.\d+)") match = version_detector.match(featureid) if match: return match.groupdict()["featureid"] else: return featureid
[ "def", "_strip_feature_version", "(", "featureid", ")", ":", "version_detector", "=", "re", ".", "compile", "(", "r\"(?P<featureid>.*)(?P<version>\\.\\d+)\"", ")", "match", "=", "version_detector", ".", "match", "(", "featureid", ")", "if", "match", ":", "return", ...
some feature versions are encoded as featureid.version, this strips those off, if they exist
[ "some", "feature", "versions", "are", "encoded", "as", "featureid", ".", "version", "this", "strips", "those", "off", "if", "they", "exist" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L307-L316
train
219,190
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
tx2genefile
def tx2genefile(gtf, out_file=None, data=None, tsv=True, keep_version=False): """ write out a file of transcript->gene mappings. """ if tsv: extension = ".tsv" sep = "\t" else: extension = ".csv" sep = "," if file_exists(out_file): return out_file with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for k, v in tx2genedict(gtf, keep_version).items(): out_handle.write(sep.join([k, v]) + "\n") logger.info("tx2gene file %s created from %s." % (out_file, gtf)) return out_file
python
def tx2genefile(gtf, out_file=None, data=None, tsv=True, keep_version=False): """ write out a file of transcript->gene mappings. """ if tsv: extension = ".tsv" sep = "\t" else: extension = ".csv" sep = "," if file_exists(out_file): return out_file with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for k, v in tx2genedict(gtf, keep_version).items(): out_handle.write(sep.join([k, v]) + "\n") logger.info("tx2gene file %s created from %s." % (out_file, gtf)) return out_file
[ "def", "tx2genefile", "(", "gtf", ",", "out_file", "=", "None", ",", "data", "=", "None", ",", "tsv", "=", "True", ",", "keep_version", "=", "False", ")", ":", "if", "tsv", ":", "extension", "=", "\".tsv\"", "sep", "=", "\"\\t\"", "else", ":", "exten...
write out a file of transcript->gene mappings.
[ "write", "out", "a", "file", "of", "transcript", "-", ">", "gene", "mappings", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L330-L347
train
219,191
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
is_qualimap_compatible
def is_qualimap_compatible(gtf): """ Qualimap needs a very specific GTF format or it fails, so skip it if the GTF is not in that format """ if not gtf: return False db = get_gtf_db(gtf) def qualimap_compatible(feature): gene_id = feature.attributes.get('gene_id', [None])[0] transcript_id = feature.attributes.get('transcript_id', [None])[0] gene_biotype = feature.attributes.get('gene_biotype', [None])[0] return gene_id and transcript_id and gene_biotype for feature in db.all_features(): if qualimap_compatible(feature): return True return False
python
def is_qualimap_compatible(gtf): """ Qualimap needs a very specific GTF format or it fails, so skip it if the GTF is not in that format """ if not gtf: return False db = get_gtf_db(gtf) def qualimap_compatible(feature): gene_id = feature.attributes.get('gene_id', [None])[0] transcript_id = feature.attributes.get('transcript_id', [None])[0] gene_biotype = feature.attributes.get('gene_biotype', [None])[0] return gene_id and transcript_id and gene_biotype for feature in db.all_features(): if qualimap_compatible(feature): return True return False
[ "def", "is_qualimap_compatible", "(", "gtf", ")", ":", "if", "not", "gtf", ":", "return", "False", "db", "=", "get_gtf_db", "(", "gtf", ")", "def", "qualimap_compatible", "(", "feature", ")", ":", "gene_id", "=", "feature", ".", "attributes", ".", "get", ...
Qualimap needs a very specific GTF format or it fails, so skip it if the GTF is not in that format
[ "Qualimap", "needs", "a", "very", "specific", "GTF", "format", "or", "it", "fails", "so", "skip", "it", "if", "the", "GTF", "is", "not", "in", "that", "format" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L349-L365
train
219,192
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
is_cpat_compatible
def is_cpat_compatible(gtf): """ CPAT needs some transcripts annotated with protein coding status to work properly """ if not gtf: return False db = get_gtf_db(gtf) pred = lambda biotype: biotype and biotype == "protein_coding" biotype_lookup = _biotype_lookup_fn(gtf) if not biotype_lookup: return False db = get_gtf_db(gtf) for feature in db.all_features(): biotype = biotype_lookup(feature) if pred(biotype): return True return False
python
def is_cpat_compatible(gtf): """ CPAT needs some transcripts annotated with protein coding status to work properly """ if not gtf: return False db = get_gtf_db(gtf) pred = lambda biotype: biotype and biotype == "protein_coding" biotype_lookup = _biotype_lookup_fn(gtf) if not biotype_lookup: return False db = get_gtf_db(gtf) for feature in db.all_features(): biotype = biotype_lookup(feature) if pred(biotype): return True return False
[ "def", "is_cpat_compatible", "(", "gtf", ")", ":", "if", "not", "gtf", ":", "return", "False", "db", "=", "get_gtf_db", "(", "gtf", ")", "pred", "=", "lambda", "biotype", ":", "biotype", "and", "biotype", "==", "\"protein_coding\"", "biotype_lookup", "=", ...
CPAT needs some transcripts annotated with protein coding status to work properly
[ "CPAT", "needs", "some", "transcripts", "annotated", "with", "protein", "coding", "status", "to", "work", "properly" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L403-L420
train
219,193
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
organize
def organize(dirs, config, run_info_yaml, sample_names=None, is_cwl=False, integrations=None): """Organize run information from a passed YAML file or the Galaxy API. Creates the high level structure used for subsequent processing. sample_names is a list of samples to include from the overall file, for cases where we are running multiple pipelines from the same configuration file. """ from bcbio.pipeline import qcsummary if integrations is None: integrations = {} logger.info("Using input YAML configuration: %s" % run_info_yaml) assert run_info_yaml and os.path.exists(run_info_yaml), \ "Did not find input sample YAML file: %s" % run_info_yaml run_details = _run_info_from_yaml(dirs, run_info_yaml, config, sample_names, is_cwl=is_cwl, integrations=integrations) remote_retriever = None for iname, retriever in integrations.items(): if iname in config: run_details = retriever.add_remotes(run_details, config[iname]) remote_retriever = retriever out = [] for item in run_details: item["dirs"] = dirs if "name" not in item: item["name"] = ["", item["description"]] elif isinstance(item["name"], six.string_types): description = "%s-%s" % (item["name"], clean_name(item["description"])) item["name"] = [item["name"], description] item["description"] = description # add algorithm details to configuration, avoid double specification item["resources"] = _add_remote_resources(item["resources"]) item["config"] = config_utils.update_w_custom(config, item) item.pop("algorithm", None) item = add_reference_resources(item, remote_retriever) item["config"]["algorithm"]["qc"] = qcsummary.get_qc_tools(item) item["config"]["algorithm"]["vcfanno"] = vcfanno.find_annotations(item, remote_retriever) # Create temporary directories and make absolute, expanding environmental variables tmp_dir = tz.get_in(["config", "resources", "tmp", "dir"], item) if tmp_dir: # if no environmental variables, make and normalize the directory # otherwise we normalize later in distributed.transaction: if os.path.expandvars(tmp_dir) == tmp_dir: tmp_dir = utils.safe_makedir(os.path.expandvars(tmp_dir)) tmp_dir = genome.abs_file_paths(tmp_dir, do_download=not integrations) item["config"]["resources"]["tmp"]["dir"] = tmp_dir out.append(item) out = _add_provenance(out, dirs, config, not is_cwl) return out
python
def organize(dirs, config, run_info_yaml, sample_names=None, is_cwl=False, integrations=None): """Organize run information from a passed YAML file or the Galaxy API. Creates the high level structure used for subsequent processing. sample_names is a list of samples to include from the overall file, for cases where we are running multiple pipelines from the same configuration file. """ from bcbio.pipeline import qcsummary if integrations is None: integrations = {} logger.info("Using input YAML configuration: %s" % run_info_yaml) assert run_info_yaml and os.path.exists(run_info_yaml), \ "Did not find input sample YAML file: %s" % run_info_yaml run_details = _run_info_from_yaml(dirs, run_info_yaml, config, sample_names, is_cwl=is_cwl, integrations=integrations) remote_retriever = None for iname, retriever in integrations.items(): if iname in config: run_details = retriever.add_remotes(run_details, config[iname]) remote_retriever = retriever out = [] for item in run_details: item["dirs"] = dirs if "name" not in item: item["name"] = ["", item["description"]] elif isinstance(item["name"], six.string_types): description = "%s-%s" % (item["name"], clean_name(item["description"])) item["name"] = [item["name"], description] item["description"] = description # add algorithm details to configuration, avoid double specification item["resources"] = _add_remote_resources(item["resources"]) item["config"] = config_utils.update_w_custom(config, item) item.pop("algorithm", None) item = add_reference_resources(item, remote_retriever) item["config"]["algorithm"]["qc"] = qcsummary.get_qc_tools(item) item["config"]["algorithm"]["vcfanno"] = vcfanno.find_annotations(item, remote_retriever) # Create temporary directories and make absolute, expanding environmental variables tmp_dir = tz.get_in(["config", "resources", "tmp", "dir"], item) if tmp_dir: # if no environmental variables, make and normalize the directory # otherwise we normalize later in distributed.transaction: if os.path.expandvars(tmp_dir) == tmp_dir: tmp_dir = utils.safe_makedir(os.path.expandvars(tmp_dir)) tmp_dir = genome.abs_file_paths(tmp_dir, do_download=not integrations) item["config"]["resources"]["tmp"]["dir"] = tmp_dir out.append(item) out = _add_provenance(out, dirs, config, not is_cwl) return out
[ "def", "organize", "(", "dirs", ",", "config", ",", "run_info_yaml", ",", "sample_names", "=", "None", ",", "is_cwl", "=", "False", ",", "integrations", "=", "None", ")", ":", "from", "bcbio", ".", "pipeline", "import", "qcsummary", "if", "integrations", "...
Organize run information from a passed YAML file or the Galaxy API. Creates the high level structure used for subsequent processing. sample_names is a list of samples to include from the overall file, for cases where we are running multiple pipelines from the same configuration file.
[ "Organize", "run", "information", "from", "a", "passed", "YAML", "file", "or", "the", "Galaxy", "API", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L46-L94
train
219,194
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_get_full_paths
def _get_full_paths(fastq_dir, config, config_file): """Retrieve full paths for directories in the case of relative locations. """ if fastq_dir: fastq_dir = utils.add_full_path(fastq_dir) config_dir = utils.add_full_path(os.path.dirname(config_file)) galaxy_config_file = utils.add_full_path(config.get("galaxy_config", "universe_wsgi.ini"), config_dir) return fastq_dir, os.path.dirname(galaxy_config_file), config_dir
python
def _get_full_paths(fastq_dir, config, config_file): """Retrieve full paths for directories in the case of relative locations. """ if fastq_dir: fastq_dir = utils.add_full_path(fastq_dir) config_dir = utils.add_full_path(os.path.dirname(config_file)) galaxy_config_file = utils.add_full_path(config.get("galaxy_config", "universe_wsgi.ini"), config_dir) return fastq_dir, os.path.dirname(galaxy_config_file), config_dir
[ "def", "_get_full_paths", "(", "fastq_dir", ",", "config", ",", "config_file", ")", ":", "if", "fastq_dir", ":", "fastq_dir", "=", "utils", ".", "add_full_path", "(", "fastq_dir", ")", "config_dir", "=", "utils", ".", "add_full_path", "(", "os", ".", "path",...
Retrieve full paths for directories in the case of relative locations.
[ "Retrieve", "full", "paths", "for", "directories", "in", "the", "case", "of", "relative", "locations", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L130-L138
train
219,195
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
add_reference_resources
def add_reference_resources(data, remote_retriever=None): """Add genome reference information to the item to process. """ aligner = data["config"]["algorithm"].get("aligner", None) if remote_retriever: data["reference"] = remote_retriever.get_refs(data["genome_build"], alignment.get_aligner_with_aliases(aligner, data), data["config"]) else: data["reference"] = genome.get_refs(data["genome_build"], alignment.get_aligner_with_aliases(aligner, data), data["dirs"]["galaxy"], data) _check_ref_files(data["reference"], data) # back compatible `sam_ref` target data["sam_ref"] = utils.get_in(data, ("reference", "fasta", "base")) ref_loc = utils.get_in(data, ("config", "resources", "species", "dir"), utils.get_in(data, ("reference", "fasta", "base"))) if remote_retriever: data = remote_retriever.get_resources(data["genome_build"], ref_loc, data) else: data["genome_resources"] = genome.get_resources(data["genome_build"], ref_loc, data) data["genome_resources"] = genome.add_required_resources(data["genome_resources"]) if effects.get_type(data) == "snpeff" and "snpeff" not in data["reference"]: data["reference"]["snpeff"] = effects.get_snpeff_files(data) if "genome_context" not in data["reference"]: data["reference"]["genome_context"] = annotation.get_context_files(data) if "viral" not in data["reference"]: data["reference"]["viral"] = viral.get_files(data) if not data["reference"]["viral"]: data["reference"]["viral"] = None if "versions" not in data["reference"]: data["reference"]["versions"] = _get_data_versions(data) data = _fill_validation_targets(data) data = _fill_prioritization_targets(data) data = _fill_capture_regions(data) # Re-enable when we have ability to re-define gemini configuration directory if False: data["reference"]["gemini"] = population.get_gemini_files(data) return data
python
def add_reference_resources(data, remote_retriever=None): """Add genome reference information to the item to process. """ aligner = data["config"]["algorithm"].get("aligner", None) if remote_retriever: data["reference"] = remote_retriever.get_refs(data["genome_build"], alignment.get_aligner_with_aliases(aligner, data), data["config"]) else: data["reference"] = genome.get_refs(data["genome_build"], alignment.get_aligner_with_aliases(aligner, data), data["dirs"]["galaxy"], data) _check_ref_files(data["reference"], data) # back compatible `sam_ref` target data["sam_ref"] = utils.get_in(data, ("reference", "fasta", "base")) ref_loc = utils.get_in(data, ("config", "resources", "species", "dir"), utils.get_in(data, ("reference", "fasta", "base"))) if remote_retriever: data = remote_retriever.get_resources(data["genome_build"], ref_loc, data) else: data["genome_resources"] = genome.get_resources(data["genome_build"], ref_loc, data) data["genome_resources"] = genome.add_required_resources(data["genome_resources"]) if effects.get_type(data) == "snpeff" and "snpeff" not in data["reference"]: data["reference"]["snpeff"] = effects.get_snpeff_files(data) if "genome_context" not in data["reference"]: data["reference"]["genome_context"] = annotation.get_context_files(data) if "viral" not in data["reference"]: data["reference"]["viral"] = viral.get_files(data) if not data["reference"]["viral"]: data["reference"]["viral"] = None if "versions" not in data["reference"]: data["reference"]["versions"] = _get_data_versions(data) data = _fill_validation_targets(data) data = _fill_prioritization_targets(data) data = _fill_capture_regions(data) # Re-enable when we have ability to re-define gemini configuration directory if False: data["reference"]["gemini"] = population.get_gemini_files(data) return data
[ "def", "add_reference_resources", "(", "data", ",", "remote_retriever", "=", "None", ")", ":", "aligner", "=", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", ".", "get", "(", "\"aligner\"", ",", "None", ")", "if", "remote_retriever", ":", "data",...
Add genome reference information to the item to process.
[ "Add", "genome", "reference", "information", "to", "the", "item", "to", "process", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L166-L204
train
219,196
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_get_data_versions
def _get_data_versions(data): """Retrieve CSV file with version information for reference data. """ genome_dir = install.get_genome_dir(data["genome_build"], data["dirs"].get("galaxy"), data) if genome_dir: version_file = os.path.join(genome_dir, "versions.csv") if version_file and os.path.exists(version_file): return version_file return None
python
def _get_data_versions(data): """Retrieve CSV file with version information for reference data. """ genome_dir = install.get_genome_dir(data["genome_build"], data["dirs"].get("galaxy"), data) if genome_dir: version_file = os.path.join(genome_dir, "versions.csv") if version_file and os.path.exists(version_file): return version_file return None
[ "def", "_get_data_versions", "(", "data", ")", ":", "genome_dir", "=", "install", ".", "get_genome_dir", "(", "data", "[", "\"genome_build\"", "]", ",", "data", "[", "\"dirs\"", "]", ".", "get", "(", "\"galaxy\"", ")", ",", "data", ")", "if", "genome_dir",...
Retrieve CSV file with version information for reference data.
[ "Retrieve", "CSV", "file", "with", "version", "information", "for", "reference", "data", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L206-L214
train
219,197
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_fill_validation_targets
def _fill_validation_targets(data): """Fill validation targets pointing to globally installed truth sets. """ ref_file = dd.get_ref_file(data) sv_truth = tz.get_in(["config", "algorithm", "svvalidate"], data, {}) sv_targets = (zip(itertools.repeat("svvalidate"), sv_truth.keys()) if isinstance(sv_truth, dict) else [["svvalidate"]]) for vtarget in [list(xs) for xs in [["validate"], ["validate_regions"], ["variant_regions"]] + list(sv_targets)]: val = tz.get_in(["config", "algorithm"] + vtarget, data) if val and not os.path.exists(val) and not objectstore.is_remote(val): installed_val = os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir, "validation", val)) if os.path.exists(installed_val): data = tz.update_in(data, ["config", "algorithm"] + vtarget, lambda x: installed_val) else: raise ValueError("Configuration problem. Validation file not found for %s: %s" % (vtarget, val)) return data
python
def _fill_validation_targets(data): """Fill validation targets pointing to globally installed truth sets. """ ref_file = dd.get_ref_file(data) sv_truth = tz.get_in(["config", "algorithm", "svvalidate"], data, {}) sv_targets = (zip(itertools.repeat("svvalidate"), sv_truth.keys()) if isinstance(sv_truth, dict) else [["svvalidate"]]) for vtarget in [list(xs) for xs in [["validate"], ["validate_regions"], ["variant_regions"]] + list(sv_targets)]: val = tz.get_in(["config", "algorithm"] + vtarget, data) if val and not os.path.exists(val) and not objectstore.is_remote(val): installed_val = os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir, "validation", val)) if os.path.exists(installed_val): data = tz.update_in(data, ["config", "algorithm"] + vtarget, lambda x: installed_val) else: raise ValueError("Configuration problem. Validation file not found for %s: %s" % (vtarget, val)) return data
[ "def", "_fill_validation_targets", "(", "data", ")", ":", "ref_file", "=", "dd", ".", "get_ref_file", "(", "data", ")", "sv_truth", "=", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"svvalidate\"", "]", ",", "data", ",", "{", ...
Fill validation targets pointing to globally installed truth sets.
[ "Fill", "validation", "targets", "pointing", "to", "globally", "installed", "truth", "sets", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L236-L252
train
219,198
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_fill_capture_regions
def _fill_capture_regions(data): """Fill short-hand specification of BED capture regions. """ special_targets = {"sv_regions": ("exons", "transcripts")} ref_file = dd.get_ref_file(data) for target in ["variant_regions", "sv_regions", "coverage"]: val = tz.get_in(["config", "algorithm", target], data) if val and not os.path.exists(val) and not objectstore.is_remote(val): installed_vals = [] # Check prioritize directory for ext in [".bed", ".bed.gz"]: installed_vals += glob.glob(os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir, "coverage", val + ext))) if len(installed_vals) == 0: if target not in special_targets or not val.startswith(special_targets[target]): raise ValueError("Configuration problem. BED file not found for %s: %s" % (target, val)) else: assert len(installed_vals) == 1, installed_vals data = tz.update_in(data, ["config", "algorithm", target], lambda x: installed_vals[0]) return data
python
def _fill_capture_regions(data): """Fill short-hand specification of BED capture regions. """ special_targets = {"sv_regions": ("exons", "transcripts")} ref_file = dd.get_ref_file(data) for target in ["variant_regions", "sv_regions", "coverage"]: val = tz.get_in(["config", "algorithm", target], data) if val and not os.path.exists(val) and not objectstore.is_remote(val): installed_vals = [] # Check prioritize directory for ext in [".bed", ".bed.gz"]: installed_vals += glob.glob(os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir, "coverage", val + ext))) if len(installed_vals) == 0: if target not in special_targets or not val.startswith(special_targets[target]): raise ValueError("Configuration problem. BED file not found for %s: %s" % (target, val)) else: assert len(installed_vals) == 1, installed_vals data = tz.update_in(data, ["config", "algorithm", target], lambda x: installed_vals[0]) return data
[ "def", "_fill_capture_regions", "(", "data", ")", ":", "special_targets", "=", "{", "\"sv_regions\"", ":", "(", "\"exons\"", ",", "\"transcripts\"", ")", "}", "ref_file", "=", "dd", ".", "get_ref_file", "(", "data", ")", "for", "target", "in", "[", "\"varian...
Fill short-hand specification of BED capture regions.
[ "Fill", "short", "-", "hand", "specification", "of", "BED", "capture", "regions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L254-L274
train
219,199