repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
bcbio/bcbio-nextgen | bcbio/structural/regions.py | calculate_sv_coverage | def calculate_sv_coverage(data):
"""Calculate coverage within bins for downstream CNV calling.
Creates corrected cnr files with log2 ratios and depths.
"""
calcfns = {"cnvkit": _calculate_sv_coverage_cnvkit, "gatk-cnv": _calculate_sv_coverage_gatk}
from bcbio.structural import cnvkit
data = utils.to_single_data(data)
if not cnvkit.use_general_sv_bins(data):
out_target_file, out_anti_file = (None, None)
else:
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural",
dd.get_sample_name(data), "bins"))
out_target_file, out_anti_file = calcfns[cnvkit.bin_approach(data)](data, work_dir)
if not os.path.exists(out_target_file):
out_target_file, out_anti_file = (None, None)
if "seq2c" in dd.get_svcaller(data):
from bcbio.structural import seq2c
seq2c_target = seq2c.precall(data)
else:
seq2c_target = None
if not tz.get_in(["depth", "bins"], data):
data = tz.update_in(data, ["depth", "bins"], lambda x: {})
data["depth"]["bins"] = {"target": out_target_file, "antitarget": out_anti_file, "seq2c": seq2c_target}
return [[data]] | python | def calculate_sv_coverage(data):
"""Calculate coverage within bins for downstream CNV calling.
Creates corrected cnr files with log2 ratios and depths.
"""
calcfns = {"cnvkit": _calculate_sv_coverage_cnvkit, "gatk-cnv": _calculate_sv_coverage_gatk}
from bcbio.structural import cnvkit
data = utils.to_single_data(data)
if not cnvkit.use_general_sv_bins(data):
out_target_file, out_anti_file = (None, None)
else:
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural",
dd.get_sample_name(data), "bins"))
out_target_file, out_anti_file = calcfns[cnvkit.bin_approach(data)](data, work_dir)
if not os.path.exists(out_target_file):
out_target_file, out_anti_file = (None, None)
if "seq2c" in dd.get_svcaller(data):
from bcbio.structural import seq2c
seq2c_target = seq2c.precall(data)
else:
seq2c_target = None
if not tz.get_in(["depth", "bins"], data):
data = tz.update_in(data, ["depth", "bins"], lambda x: {})
data["depth"]["bins"] = {"target": out_target_file, "antitarget": out_anti_file, "seq2c": seq2c_target}
return [[data]] | [
"def",
"calculate_sv_coverage",
"(",
"data",
")",
":",
"calcfns",
"=",
"{",
"\"cnvkit\"",
":",
"_calculate_sv_coverage_cnvkit",
",",
"\"gatk-cnv\"",
":",
"_calculate_sv_coverage_gatk",
"}",
"from",
"bcbio",
".",
"structural",
"import",
"cnvkit",
"data",
"=",
"utils"... | Calculate coverage within bins for downstream CNV calling.
Creates corrected cnr files with log2 ratios and depths. | [
"Calculate",
"coverage",
"within",
"bins",
"for",
"downstream",
"CNV",
"calling",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L168-L193 | train | 218,400 |
bcbio/bcbio-nextgen | bcbio/structural/regions.py | _calculate_sv_coverage_gatk | def _calculate_sv_coverage_gatk(data, work_dir):
"""Calculate coverage in defined regions using GATK tools
TODO: This does double calculations to get GATK4 compatible HDF read counts
and then depth and gene annotations. Both are needed for creating heterogeneity inputs.
Ideally replace with a single mosdepth coverage calculation, and creat GATK4 TSV format:
CONTIG START END COUNT
chrM 1 1000 13268
"""
from bcbio.variation import coverage
from bcbio.structural import annotate
# GATK compatible
target_file = gatkcnv.collect_read_counts(data, work_dir)
# heterogeneity compatible
target_in = bedutils.clean_file(tz.get_in(["regions", "bins", "target"], data), data, bedprep_dir=work_dir)
target_cov = coverage.run_mosdepth(data, "target-gatk", target_in)
target_cov_genes = annotate.add_genes(target_cov.regions, data, max_distance=0)
return target_file, target_cov_genes | python | def _calculate_sv_coverage_gatk(data, work_dir):
"""Calculate coverage in defined regions using GATK tools
TODO: This does double calculations to get GATK4 compatible HDF read counts
and then depth and gene annotations. Both are needed for creating heterogeneity inputs.
Ideally replace with a single mosdepth coverage calculation, and creat GATK4 TSV format:
CONTIG START END COUNT
chrM 1 1000 13268
"""
from bcbio.variation import coverage
from bcbio.structural import annotate
# GATK compatible
target_file = gatkcnv.collect_read_counts(data, work_dir)
# heterogeneity compatible
target_in = bedutils.clean_file(tz.get_in(["regions", "bins", "target"], data), data, bedprep_dir=work_dir)
target_cov = coverage.run_mosdepth(data, "target-gatk", target_in)
target_cov_genes = annotate.add_genes(target_cov.regions, data, max_distance=0)
return target_file, target_cov_genes | [
"def",
"_calculate_sv_coverage_gatk",
"(",
"data",
",",
"work_dir",
")",
":",
"from",
"bcbio",
".",
"variation",
"import",
"coverage",
"from",
"bcbio",
".",
"structural",
"import",
"annotate",
"# GATK compatible",
"target_file",
"=",
"gatkcnv",
".",
"collect_read_co... | Calculate coverage in defined regions using GATK tools
TODO: This does double calculations to get GATK4 compatible HDF read counts
and then depth and gene annotations. Both are needed for creating heterogeneity inputs.
Ideally replace with a single mosdepth coverage calculation, and creat GATK4 TSV format:
CONTIG START END COUNT
chrM 1 1000 13268 | [
"Calculate",
"coverage",
"in",
"defined",
"regions",
"using",
"GATK",
"tools"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L195-L213 | train | 218,401 |
bcbio/bcbio-nextgen | bcbio/structural/regions.py | _calculate_sv_coverage_cnvkit | def _calculate_sv_coverage_cnvkit(data, work_dir):
"""Calculate coverage in an CNVkit ready format using mosdepth.
"""
from bcbio.variation import coverage
from bcbio.structural import annotate
out_target_file = os.path.join(work_dir, "%s-target-coverage.cnn" % dd.get_sample_name(data))
out_anti_file = os.path.join(work_dir, "%s-antitarget-coverage.cnn" % dd.get_sample_name(data))
if ((not utils.file_exists(out_target_file) or not utils.file_exists(out_anti_file)) and
(dd.get_align_bam(data) or dd.get_work_bam(data))):
target_cov = coverage.run_mosdepth(data, "target", tz.get_in(["regions", "bins", "target"], data))
anti_cov = coverage.run_mosdepth(data, "antitarget", tz.get_in(["regions", "bins", "antitarget"], data))
target_cov_genes = annotate.add_genes(target_cov.regions, data, max_distance=0)
out_target_file = _add_log2_depth(target_cov_genes, out_target_file, data)
out_anti_file = _add_log2_depth(anti_cov.regions, out_anti_file, data)
return out_target_file, out_anti_file | python | def _calculate_sv_coverage_cnvkit(data, work_dir):
"""Calculate coverage in an CNVkit ready format using mosdepth.
"""
from bcbio.variation import coverage
from bcbio.structural import annotate
out_target_file = os.path.join(work_dir, "%s-target-coverage.cnn" % dd.get_sample_name(data))
out_anti_file = os.path.join(work_dir, "%s-antitarget-coverage.cnn" % dd.get_sample_name(data))
if ((not utils.file_exists(out_target_file) or not utils.file_exists(out_anti_file)) and
(dd.get_align_bam(data) or dd.get_work_bam(data))):
target_cov = coverage.run_mosdepth(data, "target", tz.get_in(["regions", "bins", "target"], data))
anti_cov = coverage.run_mosdepth(data, "antitarget", tz.get_in(["regions", "bins", "antitarget"], data))
target_cov_genes = annotate.add_genes(target_cov.regions, data, max_distance=0)
out_target_file = _add_log2_depth(target_cov_genes, out_target_file, data)
out_anti_file = _add_log2_depth(anti_cov.regions, out_anti_file, data)
return out_target_file, out_anti_file | [
"def",
"_calculate_sv_coverage_cnvkit",
"(",
"data",
",",
"work_dir",
")",
":",
"from",
"bcbio",
".",
"variation",
"import",
"coverage",
"from",
"bcbio",
".",
"structural",
"import",
"annotate",
"out_target_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"wor... | Calculate coverage in an CNVkit ready format using mosdepth. | [
"Calculate",
"coverage",
"in",
"an",
"CNVkit",
"ready",
"format",
"using",
"mosdepth",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L215-L229 | train | 218,402 |
bcbio/bcbio-nextgen | bcbio/structural/regions.py | normalize_sv_coverage | def normalize_sv_coverage(*items):
"""Normalize CNV coverage, providing flexible point for multiple methods.
"""
calcfns = {"cnvkit": _normalize_sv_coverage_cnvkit, "gatk-cnv": _normalize_sv_coverage_gatk}
from bcbio.structural import cnvkit
from bcbio.structural import shared as sshared
items = [utils.to_single_data(x) for x in cwlutils.handle_combined_input(items)]
if all(not cnvkit.use_general_sv_bins(x) for x in items):
return [[d] for d in items]
out_files = {}
back_files = {}
for group_id, gitems in itertools.groupby(items, lambda x: tz.get_in(["regions", "bins", "group"], x)):
# No CNVkit calling for this particular set of samples
if group_id is None:
continue
inputs, backgrounds = sshared.find_case_control(list(gitems))
assert inputs, "Did not find inputs for sample batch: %s" % (" ".join(dd.get_sample_name(x) for x in items))
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(inputs[0]), "structural",
dd.get_sample_name(inputs[0]), "bins"))
back_files, out_files = calcfns[cnvkit.bin_approach(inputs[0])](group_id, inputs, backgrounds, work_dir,
back_files, out_files)
out = []
for data in items:
if dd.get_sample_name(data) in out_files:
data["depth"]["bins"]["background"] = back_files[dd.get_sample_name(data)]
data["depth"]["bins"]["normalized"] = out_files[dd.get_sample_name(data)]
out.append([data])
return out | python | def normalize_sv_coverage(*items):
"""Normalize CNV coverage, providing flexible point for multiple methods.
"""
calcfns = {"cnvkit": _normalize_sv_coverage_cnvkit, "gatk-cnv": _normalize_sv_coverage_gatk}
from bcbio.structural import cnvkit
from bcbio.structural import shared as sshared
items = [utils.to_single_data(x) for x in cwlutils.handle_combined_input(items)]
if all(not cnvkit.use_general_sv_bins(x) for x in items):
return [[d] for d in items]
out_files = {}
back_files = {}
for group_id, gitems in itertools.groupby(items, lambda x: tz.get_in(["regions", "bins", "group"], x)):
# No CNVkit calling for this particular set of samples
if group_id is None:
continue
inputs, backgrounds = sshared.find_case_control(list(gitems))
assert inputs, "Did not find inputs for sample batch: %s" % (" ".join(dd.get_sample_name(x) for x in items))
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(inputs[0]), "structural",
dd.get_sample_name(inputs[0]), "bins"))
back_files, out_files = calcfns[cnvkit.bin_approach(inputs[0])](group_id, inputs, backgrounds, work_dir,
back_files, out_files)
out = []
for data in items:
if dd.get_sample_name(data) in out_files:
data["depth"]["bins"]["background"] = back_files[dd.get_sample_name(data)]
data["depth"]["bins"]["normalized"] = out_files[dd.get_sample_name(data)]
out.append([data])
return out | [
"def",
"normalize_sv_coverage",
"(",
"*",
"items",
")",
":",
"calcfns",
"=",
"{",
"\"cnvkit\"",
":",
"_normalize_sv_coverage_cnvkit",
",",
"\"gatk-cnv\"",
":",
"_normalize_sv_coverage_gatk",
"}",
"from",
"bcbio",
".",
"structural",
"import",
"cnvkit",
"from",
"bcbio... | Normalize CNV coverage, providing flexible point for multiple methods. | [
"Normalize",
"CNV",
"coverage",
"providing",
"flexible",
"point",
"for",
"multiple",
"methods",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L256-L283 | train | 218,403 |
bcbio/bcbio-nextgen | bcbio/structural/regions.py | _normalize_sv_coverage_gatk | def _normalize_sv_coverage_gatk(group_id, inputs, backgrounds, work_dir, back_files, out_files):
"""Normalize CNV coverage using panel of normals with GATK's de-noise approaches.
"""
input_backs = set(filter(lambda x: x is not None,
[dd.get_background_cnv_reference(d, "gatk-cnv") for d in inputs]))
if input_backs:
assert len(input_backs) == 1, "Multiple backgrounds in group: %s" % list(input_backs)
pon = list(input_backs)[0]
elif backgrounds:
pon = gatkcnv.create_panel_of_normals(backgrounds, group_id, work_dir)
else:
pon = None
for data in inputs:
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural",
dd.get_sample_name(data), "bins"))
denoise_file = gatkcnv.denoise(data, pon, work_dir)
out_files[dd.get_sample_name(data)] = denoise_file
back_files[dd.get_sample_name(data)] = pon
return back_files, out_files | python | def _normalize_sv_coverage_gatk(group_id, inputs, backgrounds, work_dir, back_files, out_files):
"""Normalize CNV coverage using panel of normals with GATK's de-noise approaches.
"""
input_backs = set(filter(lambda x: x is not None,
[dd.get_background_cnv_reference(d, "gatk-cnv") for d in inputs]))
if input_backs:
assert len(input_backs) == 1, "Multiple backgrounds in group: %s" % list(input_backs)
pon = list(input_backs)[0]
elif backgrounds:
pon = gatkcnv.create_panel_of_normals(backgrounds, group_id, work_dir)
else:
pon = None
for data in inputs:
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural",
dd.get_sample_name(data), "bins"))
denoise_file = gatkcnv.denoise(data, pon, work_dir)
out_files[dd.get_sample_name(data)] = denoise_file
back_files[dd.get_sample_name(data)] = pon
return back_files, out_files | [
"def",
"_normalize_sv_coverage_gatk",
"(",
"group_id",
",",
"inputs",
",",
"backgrounds",
",",
"work_dir",
",",
"back_files",
",",
"out_files",
")",
":",
"input_backs",
"=",
"set",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
"is",
"not",
"None",
",",
"[",... | Normalize CNV coverage using panel of normals with GATK's de-noise approaches. | [
"Normalize",
"CNV",
"coverage",
"using",
"panel",
"of",
"normals",
"with",
"GATK",
"s",
"de",
"-",
"noise",
"approaches",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L285-L303 | train | 218,404 |
bcbio/bcbio-nextgen | bcbio/structural/regions.py | _normalize_sv_coverage_cnvkit | def _normalize_sv_coverage_cnvkit(group_id, inputs, backgrounds, work_dir, back_files, out_files):
"""Normalize CNV coverage depths by GC, repeats and background using CNVkit
- reference: calculates reference backgrounds from normals and pools
including GC and repeat information
- fix: Uses background to normalize coverage estimations
http://cnvkit.readthedocs.io/en/stable/pipeline.html#fix
"""
from bcbio.structural import cnvkit
cnns = reduce(operator.add, [[tz.get_in(["depth", "bins", "target"], x),
tz.get_in(["depth", "bins", "antitarget"], x)] for x in backgrounds], [])
for d in inputs:
if tz.get_in(["depth", "bins", "target"], d):
target_bed = tz.get_in(["depth", "bins", "target"], d)
antitarget_bed = tz.get_in(["depth", "bins", "antitarget"], d)
input_backs = set(filter(lambda x: x is not None,
[dd.get_background_cnv_reference(d, "cnvkit") for d in inputs]))
if input_backs:
assert len(input_backs) == 1, "Multiple backgrounds in group: %s" % list(input_backs)
back_file = list(input_backs)[0]
else:
back_file = cnvkit.cnvkit_background(cnns,
os.path.join(work_dir, "background-%s-cnvkit.cnn" % (group_id)),
backgrounds or inputs, target_bed, antitarget_bed)
fix_cmd_inputs = []
for data in inputs:
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural",
dd.get_sample_name(data), "bins"))
if tz.get_in(["depth", "bins", "target"], data):
fix_file = os.path.join(work_dir, "%s-normalized.cnr" % (dd.get_sample_name(data)))
fix_cmd_inputs.append((tz.get_in(["depth", "bins", "target"], data),
tz.get_in(["depth", "bins", "antitarget"], data),
back_file, fix_file, data))
out_files[dd.get_sample_name(data)] = fix_file
back_files[dd.get_sample_name(data)] = back_file
parallel = {"type": "local", "cores": dd.get_cores(inputs[0]), "progs": ["cnvkit"]}
run_multicore(cnvkit.run_fix_parallel, fix_cmd_inputs, inputs[0]["config"], parallel)
return back_files, out_files | python | def _normalize_sv_coverage_cnvkit(group_id, inputs, backgrounds, work_dir, back_files, out_files):
"""Normalize CNV coverage depths by GC, repeats and background using CNVkit
- reference: calculates reference backgrounds from normals and pools
including GC and repeat information
- fix: Uses background to normalize coverage estimations
http://cnvkit.readthedocs.io/en/stable/pipeline.html#fix
"""
from bcbio.structural import cnvkit
cnns = reduce(operator.add, [[tz.get_in(["depth", "bins", "target"], x),
tz.get_in(["depth", "bins", "antitarget"], x)] for x in backgrounds], [])
for d in inputs:
if tz.get_in(["depth", "bins", "target"], d):
target_bed = tz.get_in(["depth", "bins", "target"], d)
antitarget_bed = tz.get_in(["depth", "bins", "antitarget"], d)
input_backs = set(filter(lambda x: x is not None,
[dd.get_background_cnv_reference(d, "cnvkit") for d in inputs]))
if input_backs:
assert len(input_backs) == 1, "Multiple backgrounds in group: %s" % list(input_backs)
back_file = list(input_backs)[0]
else:
back_file = cnvkit.cnvkit_background(cnns,
os.path.join(work_dir, "background-%s-cnvkit.cnn" % (group_id)),
backgrounds or inputs, target_bed, antitarget_bed)
fix_cmd_inputs = []
for data in inputs:
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural",
dd.get_sample_name(data), "bins"))
if tz.get_in(["depth", "bins", "target"], data):
fix_file = os.path.join(work_dir, "%s-normalized.cnr" % (dd.get_sample_name(data)))
fix_cmd_inputs.append((tz.get_in(["depth", "bins", "target"], data),
tz.get_in(["depth", "bins", "antitarget"], data),
back_file, fix_file, data))
out_files[dd.get_sample_name(data)] = fix_file
back_files[dd.get_sample_name(data)] = back_file
parallel = {"type": "local", "cores": dd.get_cores(inputs[0]), "progs": ["cnvkit"]}
run_multicore(cnvkit.run_fix_parallel, fix_cmd_inputs, inputs[0]["config"], parallel)
return back_files, out_files | [
"def",
"_normalize_sv_coverage_cnvkit",
"(",
"group_id",
",",
"inputs",
",",
"backgrounds",
",",
"work_dir",
",",
"back_files",
",",
"out_files",
")",
":",
"from",
"bcbio",
".",
"structural",
"import",
"cnvkit",
"cnns",
"=",
"reduce",
"(",
"operator",
".",
"ad... | Normalize CNV coverage depths by GC, repeats and background using CNVkit
- reference: calculates reference backgrounds from normals and pools
including GC and repeat information
- fix: Uses background to normalize coverage estimations
http://cnvkit.readthedocs.io/en/stable/pipeline.html#fix | [
"Normalize",
"CNV",
"coverage",
"depths",
"by",
"GC",
"repeats",
"and",
"background",
"using",
"CNVkit"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L305-L342 | train | 218,405 |
bcbio/bcbio-nextgen | bcbio/structural/regions.py | get_base_cnv_regions | def get_base_cnv_regions(data, work_dir, genome_default="transcripts1e4", include_gene_names=True):
"""Retrieve set of target regions for CNV analysis.
Subsets to extended transcript regions for WGS experiments to avoid
long runtimes.
"""
cov_interval = dd.get_coverage_interval(data)
base_regions = get_sv_bed(data, include_gene_names=include_gene_names)
# if we don't have a configured BED or regions to use for SV caling
if not base_regions:
# For genome calls, subset to regions near genes as targets
if cov_interval == "genome":
base_regions = get_sv_bed(data, genome_default, work_dir, include_gene_names=include_gene_names)
if base_regions:
base_regions = remove_exclude_regions(base_regions, base_regions, [data])
# Finally, default to the defined variant regions
if not base_regions:
base_regions = dd.get_variant_regions(data) or dd.get_sample_callable(data)
return bedutils.clean_file(base_regions, data) | python | def get_base_cnv_regions(data, work_dir, genome_default="transcripts1e4", include_gene_names=True):
"""Retrieve set of target regions for CNV analysis.
Subsets to extended transcript regions for WGS experiments to avoid
long runtimes.
"""
cov_interval = dd.get_coverage_interval(data)
base_regions = get_sv_bed(data, include_gene_names=include_gene_names)
# if we don't have a configured BED or regions to use for SV caling
if not base_regions:
# For genome calls, subset to regions near genes as targets
if cov_interval == "genome":
base_regions = get_sv_bed(data, genome_default, work_dir, include_gene_names=include_gene_names)
if base_regions:
base_regions = remove_exclude_regions(base_regions, base_regions, [data])
# Finally, default to the defined variant regions
if not base_regions:
base_regions = dd.get_variant_regions(data) or dd.get_sample_callable(data)
return bedutils.clean_file(base_regions, data) | [
"def",
"get_base_cnv_regions",
"(",
"data",
",",
"work_dir",
",",
"genome_default",
"=",
"\"transcripts1e4\"",
",",
"include_gene_names",
"=",
"True",
")",
":",
"cov_interval",
"=",
"dd",
".",
"get_coverage_interval",
"(",
"data",
")",
"base_regions",
"=",
"get_sv... | Retrieve set of target regions for CNV analysis.
Subsets to extended transcript regions for WGS experiments to avoid
long runtimes. | [
"Retrieve",
"set",
"of",
"target",
"regions",
"for",
"CNV",
"analysis",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L346-L364 | train | 218,406 |
bcbio/bcbio-nextgen | bcbio/structural/regions.py | remove_exclude_regions | def remove_exclude_regions(orig_bed, base_file, items, remove_entire_feature=False):
"""Remove centromere and short end regions from an existing BED file of regions to target.
"""
from bcbio.structural import shared as sshared
out_bed = os.path.join("%s-noexclude.bed" % (utils.splitext_plus(base_file)[0]))
if not utils.file_uptodate(out_bed, orig_bed):
exclude_bed = sshared.prepare_exclude_file(items, base_file)
with file_transaction(items[0], out_bed) as tx_out_bed:
pybedtools.BedTool(orig_bed).subtract(pybedtools.BedTool(exclude_bed),
A=remove_entire_feature, nonamecheck=True).saveas(tx_out_bed)
if utils.file_exists(out_bed):
return out_bed
else:
return orig_bed | python | def remove_exclude_regions(orig_bed, base_file, items, remove_entire_feature=False):
"""Remove centromere and short end regions from an existing BED file of regions to target.
"""
from bcbio.structural import shared as sshared
out_bed = os.path.join("%s-noexclude.bed" % (utils.splitext_plus(base_file)[0]))
if not utils.file_uptodate(out_bed, orig_bed):
exclude_bed = sshared.prepare_exclude_file(items, base_file)
with file_transaction(items[0], out_bed) as tx_out_bed:
pybedtools.BedTool(orig_bed).subtract(pybedtools.BedTool(exclude_bed),
A=remove_entire_feature, nonamecheck=True).saveas(tx_out_bed)
if utils.file_exists(out_bed):
return out_bed
else:
return orig_bed | [
"def",
"remove_exclude_regions",
"(",
"orig_bed",
",",
"base_file",
",",
"items",
",",
"remove_entire_feature",
"=",
"False",
")",
":",
"from",
"bcbio",
".",
"structural",
"import",
"shared",
"as",
"sshared",
"out_bed",
"=",
"os",
".",
"path",
".",
"join",
"... | Remove centromere and short end regions from an existing BED file of regions to target. | [
"Remove",
"centromere",
"and",
"short",
"end",
"regions",
"from",
"an",
"existing",
"BED",
"file",
"of",
"regions",
"to",
"target",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L366-L379 | train | 218,407 |
bcbio/bcbio-nextgen | bcbio/structural/regions.py | get_sv_bed | def get_sv_bed(data, method=None, out_dir=None, include_gene_names=True):
"""Retrieve a BED file of regions for SV and heterogeneity calling using the provided method.
method choices:
- exons: Raw BED file of exon regions
- transcripts: Full collapsed regions with the min and max of each transcript.
- transcriptsXXXX: Collapsed regions around transcripts with a window size of
XXXX.
- A custom BED file of regions
"""
if method is None:
method = (tz.get_in(["config", "algorithm", "sv_regions"], data) or dd.get_variant_regions(data)
or dd.get_sample_callable(data))
gene_file = dd.get_gene_bed(data)
if method and os.path.isfile(method):
return method
elif not gene_file or not method:
return None
elif method == "exons":
return gene_file
elif method.startswith("transcripts"):
window = method.split("transcripts")[-1]
window = int(float(window)) if window else 0
return _collapse_transcripts(gene_file, window, data, out_dir, include_gene_names=include_gene_names)
else:
raise ValueError("Unexpected transcript retrieval method: %s" % method) | python | def get_sv_bed(data, method=None, out_dir=None, include_gene_names=True):
"""Retrieve a BED file of regions for SV and heterogeneity calling using the provided method.
method choices:
- exons: Raw BED file of exon regions
- transcripts: Full collapsed regions with the min and max of each transcript.
- transcriptsXXXX: Collapsed regions around transcripts with a window size of
XXXX.
- A custom BED file of regions
"""
if method is None:
method = (tz.get_in(["config", "algorithm", "sv_regions"], data) or dd.get_variant_regions(data)
or dd.get_sample_callable(data))
gene_file = dd.get_gene_bed(data)
if method and os.path.isfile(method):
return method
elif not gene_file or not method:
return None
elif method == "exons":
return gene_file
elif method.startswith("transcripts"):
window = method.split("transcripts")[-1]
window = int(float(window)) if window else 0
return _collapse_transcripts(gene_file, window, data, out_dir, include_gene_names=include_gene_names)
else:
raise ValueError("Unexpected transcript retrieval method: %s" % method) | [
"def",
"get_sv_bed",
"(",
"data",
",",
"method",
"=",
"None",
",",
"out_dir",
"=",
"None",
",",
"include_gene_names",
"=",
"True",
")",
":",
"if",
"method",
"is",
"None",
":",
"method",
"=",
"(",
"tz",
".",
"get_in",
"(",
"[",
"\"config\"",
",",
"\"a... | Retrieve a BED file of regions for SV and heterogeneity calling using the provided method.
method choices:
- exons: Raw BED file of exon regions
- transcripts: Full collapsed regions with the min and max of each transcript.
- transcriptsXXXX: Collapsed regions around transcripts with a window size of
XXXX.
- A custom BED file of regions | [
"Retrieve",
"a",
"BED",
"file",
"of",
"regions",
"for",
"SV",
"and",
"heterogeneity",
"calling",
"using",
"the",
"provided",
"method",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L381-L406 | train | 218,408 |
bcbio/bcbio-nextgen | bcbio/structural/regions.py | _group_coords | def _group_coords(rs):
"""Organize coordinate regions into groups for each transcript.
Avoids collapsing very large introns or repetitive genes spread across
the chromosome by limiting the intron size to 100kb for creating a single transcript
"""
max_intron_size = 1e5
coords = []
for r in rs:
coords.append(r.start)
coords.append(r.end)
coord_groups = []
cur_group = []
for coord in sorted(coords):
if not cur_group or coord - cur_group[-1] < max_intron_size:
cur_group.append(coord)
else:
coord_groups.append(cur_group)
cur_group = [coord]
if cur_group:
coord_groups.append(cur_group)
return coord_groups | python | def _group_coords(rs):
"""Organize coordinate regions into groups for each transcript.
Avoids collapsing very large introns or repetitive genes spread across
the chromosome by limiting the intron size to 100kb for creating a single transcript
"""
max_intron_size = 1e5
coords = []
for r in rs:
coords.append(r.start)
coords.append(r.end)
coord_groups = []
cur_group = []
for coord in sorted(coords):
if not cur_group or coord - cur_group[-1] < max_intron_size:
cur_group.append(coord)
else:
coord_groups.append(cur_group)
cur_group = [coord]
if cur_group:
coord_groups.append(cur_group)
return coord_groups | [
"def",
"_group_coords",
"(",
"rs",
")",
":",
"max_intron_size",
"=",
"1e5",
"coords",
"=",
"[",
"]",
"for",
"r",
"in",
"rs",
":",
"coords",
".",
"append",
"(",
"r",
".",
"start",
")",
"coords",
".",
"append",
"(",
"r",
".",
"end",
")",
"coord_group... | Organize coordinate regions into groups for each transcript.
Avoids collapsing very large introns or repetitive genes spread across
the chromosome by limiting the intron size to 100kb for creating a single transcript | [
"Organize",
"coordinate",
"regions",
"into",
"groups",
"for",
"each",
"transcript",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L445-L466 | train | 218,409 |
bcbio/bcbio-nextgen | bcbio/structural/regions.py | MemoizedSizes._calc_sizes | def _calc_sizes(self, cnv_file, items):
"""Retrieve target and antitarget bin sizes based on depth.
Similar to CNVkit's do_autobin but tries to have a standard set of
ranges (50bp intervals for target and 10kb intervals for antitarget).
"""
bp_per_bin = 100000 # same target as CNVkit
range_map = {"target": (100, 250), "antitarget": (10000, 1000000)}
target_bps = []
anti_bps = []
checked_beds = set([])
for data in items:
region_bed = tz.get_in(["depth", "variant_regions", "regions"], data)
if region_bed and region_bed not in checked_beds:
with utils.open_gzipsafe(region_bed) as in_handle:
for r in pybedtools.BedTool(in_handle).intersect(cnv_file):
if r.stop - r.start > range_map["target"][0]:
target_bps.append(float(r.name))
with utils.open_gzipsafe(region_bed) as in_handle:
for r in pybedtools.BedTool(in_handle).intersect(cnv_file, v=True):
if r.stop - r.start > range_map["target"][1]:
anti_bps.append(float(r.name))
checked_beds.add(region_bed)
def scale_in_boundary(raw, round_interval, range_targets):
min_val, max_val = range_targets
out = int(math.ceil(raw / float(round_interval)) * round_interval)
if out > max_val:
return max_val
elif out < min_val:
return min_val
else:
return out
if target_bps and np.median(target_bps) > 0:
raw_target_bin = bp_per_bin / float(np.median(target_bps))
target_bin = scale_in_boundary(raw_target_bin, 50, range_map["target"])
else:
target_bin = range_map["target"][1]
if anti_bps and np.median(anti_bps) > 0:
raw_anti_bin = bp_per_bin / float(np.median(anti_bps))
anti_bin = scale_in_boundary(raw_anti_bin, 10000, range_map["antitarget"])
else:
anti_bin = range_map["antitarget"][1]
return target_bin, anti_bin | python | def _calc_sizes(self, cnv_file, items):
"""Retrieve target and antitarget bin sizes based on depth.
Similar to CNVkit's do_autobin but tries to have a standard set of
ranges (50bp intervals for target and 10kb intervals for antitarget).
"""
bp_per_bin = 100000 # same target as CNVkit
range_map = {"target": (100, 250), "antitarget": (10000, 1000000)}
target_bps = []
anti_bps = []
checked_beds = set([])
for data in items:
region_bed = tz.get_in(["depth", "variant_regions", "regions"], data)
if region_bed and region_bed not in checked_beds:
with utils.open_gzipsafe(region_bed) as in_handle:
for r in pybedtools.BedTool(in_handle).intersect(cnv_file):
if r.stop - r.start > range_map["target"][0]:
target_bps.append(float(r.name))
with utils.open_gzipsafe(region_bed) as in_handle:
for r in pybedtools.BedTool(in_handle).intersect(cnv_file, v=True):
if r.stop - r.start > range_map["target"][1]:
anti_bps.append(float(r.name))
checked_beds.add(region_bed)
def scale_in_boundary(raw, round_interval, range_targets):
min_val, max_val = range_targets
out = int(math.ceil(raw / float(round_interval)) * round_interval)
if out > max_val:
return max_val
elif out < min_val:
return min_val
else:
return out
if target_bps and np.median(target_bps) > 0:
raw_target_bin = bp_per_bin / float(np.median(target_bps))
target_bin = scale_in_boundary(raw_target_bin, 50, range_map["target"])
else:
target_bin = range_map["target"][1]
if anti_bps and np.median(anti_bps) > 0:
raw_anti_bin = bp_per_bin / float(np.median(anti_bps))
anti_bin = scale_in_boundary(raw_anti_bin, 10000, range_map["antitarget"])
else:
anti_bin = range_map["antitarget"][1]
return target_bin, anti_bin | [
"def",
"_calc_sizes",
"(",
"self",
",",
"cnv_file",
",",
"items",
")",
":",
"bp_per_bin",
"=",
"100000",
"# same target as CNVkit",
"range_map",
"=",
"{",
"\"target\"",
":",
"(",
"100",
",",
"250",
")",
",",
"\"antitarget\"",
":",
"(",
"10000",
",",
"10000... | Retrieve target and antitarget bin sizes based on depth.
Similar to CNVkit's do_autobin but tries to have a standard set of
ranges (50bp intervals for target and 10kb intervals for antitarget). | [
"Retrieve",
"target",
"and",
"antitarget",
"bin",
"sizes",
"based",
"on",
"depth",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L98-L141 | train | 218,410 |
bcbio/bcbio-nextgen | bcbio/hmmer/search.py | phmmer | def phmmer(**kwargs):
"""Search a protein sequence against a HMMER sequence database.
Arguments:
seq - The sequence to search -- a Fasta string.
seqdb -- Sequence database to search against.
range -- A string range of results to return (ie. 1,10 for the first ten)
output -- The output format (defaults to JSON).
"""
logging.debug(kwargs)
args = {'seq' : kwargs.get('seq'),
'seqdb' : kwargs.get('seqdb')}
args2 = {'output' : kwargs.get('output', 'json'),
'range' : kwargs.get('range')}
return _hmmer("http://hmmer.janelia.org/search/phmmer", args, args2) | python | def phmmer(**kwargs):
"""Search a protein sequence against a HMMER sequence database.
Arguments:
seq - The sequence to search -- a Fasta string.
seqdb -- Sequence database to search against.
range -- A string range of results to return (ie. 1,10 for the first ten)
output -- The output format (defaults to JSON).
"""
logging.debug(kwargs)
args = {'seq' : kwargs.get('seq'),
'seqdb' : kwargs.get('seqdb')}
args2 = {'output' : kwargs.get('output', 'json'),
'range' : kwargs.get('range')}
return _hmmer("http://hmmer.janelia.org/search/phmmer", args, args2) | [
"def",
"phmmer",
"(",
"*",
"*",
"kwargs",
")",
":",
"logging",
".",
"debug",
"(",
"kwargs",
")",
"args",
"=",
"{",
"'seq'",
":",
"kwargs",
".",
"get",
"(",
"'seq'",
")",
",",
"'seqdb'",
":",
"kwargs",
".",
"get",
"(",
"'seqdb'",
")",
"}",
"args2"... | Search a protein sequence against a HMMER sequence database.
Arguments:
seq - The sequence to search -- a Fasta string.
seqdb -- Sequence database to search against.
range -- A string range of results to return (ie. 1,10 for the first ten)
output -- The output format (defaults to JSON). | [
"Search",
"a",
"protein",
"sequence",
"against",
"a",
"HMMER",
"sequence",
"database",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hmmer/search.py#L40-L54 | train | 218,411 |
bcbio/bcbio-nextgen | bcbio/pipeline/rnaseq.py | scrnaseq_concatenate_metadata | def scrnaseq_concatenate_metadata(samples):
"""
Create file same dimension than mtx.colnames
with metadata and sample name to help in the
creation of the SC object.
"""
barcodes = {}
counts = ""
metadata = {}
has_sample_barcodes = False
for sample in dd.sample_data_iterator(samples):
if dd.get_sample_barcodes(sample):
has_sample_barcodes = True
with open(dd.get_sample_barcodes(sample)) as inh:
for line in inh:
cols = line.strip().split(",")
if len(cols) == 1:
# Assign sample name in case of missing in barcodes
cols.append("NaN")
barcodes[(dd.get_sample_name(sample), cols[0])] = cols[1:]
else:
barcodes[(dd.get_sample_name(sample), "NaN")] = [dd.get_sample_name(sample), "NaN"]
counts = dd.get_combined_counts(sample)
meta = map(str, list(sample["metadata"].values()))
meta_cols = list(sample["metadata"].keys())
meta = ["NaN" if not v else v for v in meta]
metadata[dd.get_sample_name(sample)] = meta
metadata_fn = counts + ".metadata"
if file_exists(metadata_fn):
return samples
with file_transaction(metadata_fn) as tx_metadata_fn:
with open(tx_metadata_fn, 'w') as outh:
outh.write(",".join(["sample"] + meta_cols) + '\n')
with open(counts + ".colnames") as inh:
for line in inh:
sample = line.split(":")[0]
if has_sample_barcodes:
barcode = sample.split("-")[1]
else:
barcode = "NaN"
outh.write(",".join(barcodes[(sample, barcode)] + metadata[sample]) + '\n')
return samples | python | def scrnaseq_concatenate_metadata(samples):
"""
Create file same dimension than mtx.colnames
with metadata and sample name to help in the
creation of the SC object.
"""
barcodes = {}
counts = ""
metadata = {}
has_sample_barcodes = False
for sample in dd.sample_data_iterator(samples):
if dd.get_sample_barcodes(sample):
has_sample_barcodes = True
with open(dd.get_sample_barcodes(sample)) as inh:
for line in inh:
cols = line.strip().split(",")
if len(cols) == 1:
# Assign sample name in case of missing in barcodes
cols.append("NaN")
barcodes[(dd.get_sample_name(sample), cols[0])] = cols[1:]
else:
barcodes[(dd.get_sample_name(sample), "NaN")] = [dd.get_sample_name(sample), "NaN"]
counts = dd.get_combined_counts(sample)
meta = map(str, list(sample["metadata"].values()))
meta_cols = list(sample["metadata"].keys())
meta = ["NaN" if not v else v for v in meta]
metadata[dd.get_sample_name(sample)] = meta
metadata_fn = counts + ".metadata"
if file_exists(metadata_fn):
return samples
with file_transaction(metadata_fn) as tx_metadata_fn:
with open(tx_metadata_fn, 'w') as outh:
outh.write(",".join(["sample"] + meta_cols) + '\n')
with open(counts + ".colnames") as inh:
for line in inh:
sample = line.split(":")[0]
if has_sample_barcodes:
barcode = sample.split("-")[1]
else:
barcode = "NaN"
outh.write(",".join(barcodes[(sample, barcode)] + metadata[sample]) + '\n')
return samples | [
"def",
"scrnaseq_concatenate_metadata",
"(",
"samples",
")",
":",
"barcodes",
"=",
"{",
"}",
"counts",
"=",
"\"\"",
"metadata",
"=",
"{",
"}",
"has_sample_barcodes",
"=",
"False",
"for",
"sample",
"in",
"dd",
".",
"sample_data_iterator",
"(",
"samples",
")",
... | Create file same dimension than mtx.colnames
with metadata and sample name to help in the
creation of the SC object. | [
"Create",
"file",
"same",
"dimension",
"than",
"mtx",
".",
"colnames",
"with",
"metadata",
"and",
"sample",
"name",
"to",
"help",
"in",
"the",
"creation",
"of",
"the",
"SC",
"object",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/rnaseq.py#L47-L90 | train | 218,412 |
bcbio/bcbio-nextgen | bcbio/pipeline/rnaseq.py | rnaseq_variant_calling | def rnaseq_variant_calling(samples, run_parallel):
"""
run RNA-seq variant calling using GATK
"""
samples = run_parallel("run_rnaseq_variant_calling", samples)
variantcaller = dd.get_variantcaller(to_single_data(samples[0]))
if variantcaller and ("gatk-haplotype" in variantcaller):
out = []
for d in joint.square_off(samples, run_parallel):
out.extend([[to_single_data(xs)] for xs in multi.split_variants_by_sample(to_single_data(d))])
samples = out
if variantcaller:
samples = run_parallel("run_rnaseq_ann_filter", samples)
if variantcaller and ("gatk-haplotype" in variantcaller):
out = []
for data in (to_single_data(xs) for xs in samples):
if "variants" not in data:
data["variants"] = []
data["variants"].append({"variantcaller": "gatk-haplotype", "vcf": data["vrn_file_orig"],
"population": {"vcf": data["vrn_file"]}})
data["vrn_file"] = data.pop("vrn_file_orig")
out.append([data])
samples = out
return samples | python | def rnaseq_variant_calling(samples, run_parallel):
"""
run RNA-seq variant calling using GATK
"""
samples = run_parallel("run_rnaseq_variant_calling", samples)
variantcaller = dd.get_variantcaller(to_single_data(samples[0]))
if variantcaller and ("gatk-haplotype" in variantcaller):
out = []
for d in joint.square_off(samples, run_parallel):
out.extend([[to_single_data(xs)] for xs in multi.split_variants_by_sample(to_single_data(d))])
samples = out
if variantcaller:
samples = run_parallel("run_rnaseq_ann_filter", samples)
if variantcaller and ("gatk-haplotype" in variantcaller):
out = []
for data in (to_single_data(xs) for xs in samples):
if "variants" not in data:
data["variants"] = []
data["variants"].append({"variantcaller": "gatk-haplotype", "vcf": data["vrn_file_orig"],
"population": {"vcf": data["vrn_file"]}})
data["vrn_file"] = data.pop("vrn_file_orig")
out.append([data])
samples = out
return samples | [
"def",
"rnaseq_variant_calling",
"(",
"samples",
",",
"run_parallel",
")",
":",
"samples",
"=",
"run_parallel",
"(",
"\"run_rnaseq_variant_calling\"",
",",
"samples",
")",
"variantcaller",
"=",
"dd",
".",
"get_variantcaller",
"(",
"to_single_data",
"(",
"samples",
"... | run RNA-seq variant calling using GATK | [
"run",
"RNA",
"-",
"seq",
"variant",
"calling",
"using",
"GATK"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/rnaseq.py#L92-L115 | train | 218,413 |
bcbio/bcbio-nextgen | bcbio/pipeline/rnaseq.py | run_rnaseq_variant_calling | def run_rnaseq_variant_calling(data):
"""
run RNA-seq variant calling, variation file is stored in `vrn_file`
in the datadict
"""
variantcaller = dd.get_variantcaller(data)
if isinstance(variantcaller, list) and len(variantcaller) > 1:
logger.error("Only one variantcaller can be run for RNA-seq at "
"this time. Post an issue here "
"(https://github.com/bcbio/bcbio-nextgen/issues) "
"if this is something you need to do.")
sys.exit(1)
if variantcaller:
if "gatk-haplotype" in variantcaller:
data = variation.rnaseq_gatk_variant_calling(data)
if vardict.get_vardict_command(data):
data = variation.rnaseq_vardict_variant_calling(data)
vrn_file = dd.get_vrn_file(data)
return [[data]] | python | def run_rnaseq_variant_calling(data):
"""
run RNA-seq variant calling, variation file is stored in `vrn_file`
in the datadict
"""
variantcaller = dd.get_variantcaller(data)
if isinstance(variantcaller, list) and len(variantcaller) > 1:
logger.error("Only one variantcaller can be run for RNA-seq at "
"this time. Post an issue here "
"(https://github.com/bcbio/bcbio-nextgen/issues) "
"if this is something you need to do.")
sys.exit(1)
if variantcaller:
if "gatk-haplotype" in variantcaller:
data = variation.rnaseq_gatk_variant_calling(data)
if vardict.get_vardict_command(data):
data = variation.rnaseq_vardict_variant_calling(data)
vrn_file = dd.get_vrn_file(data)
return [[data]] | [
"def",
"run_rnaseq_variant_calling",
"(",
"data",
")",
":",
"variantcaller",
"=",
"dd",
".",
"get_variantcaller",
"(",
"data",
")",
"if",
"isinstance",
"(",
"variantcaller",
",",
"list",
")",
"and",
"len",
"(",
"variantcaller",
")",
">",
"1",
":",
"logger",
... | run RNA-seq variant calling, variation file is stored in `vrn_file`
in the datadict | [
"run",
"RNA",
"-",
"seq",
"variant",
"calling",
"variation",
"file",
"is",
"stored",
"in",
"vrn_file",
"in",
"the",
"datadict"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/rnaseq.py#L117-L136 | train | 218,414 |
bcbio/bcbio-nextgen | bcbio/pipeline/rnaseq.py | run_rnaseq_ann_filter | def run_rnaseq_ann_filter(data):
"""Run RNA-seq annotation and filtering.
"""
data = to_single_data(data)
if dd.get_vrn_file(data):
eff_file = effects.add_to_vcf(dd.get_vrn_file(data), data)[0]
if eff_file:
data = dd.set_vrn_file(data, eff_file)
ann_file = population.run_vcfanno(dd.get_vrn_file(data), data)
if ann_file:
data = dd.set_vrn_file(data, ann_file)
variantcaller = dd.get_variantcaller(data)
if variantcaller and ("gatk-haplotype" in variantcaller):
filter_file = variation.gatk_filter_rnaseq(dd.get_vrn_file(data), data)
data = dd.set_vrn_file(data, filter_file)
# remove variants close to splice junctions
vrn_file = dd.get_vrn_file(data)
vrn_file = variation.filter_junction_variants(vrn_file, data)
data = dd.set_vrn_file(data, vrn_file)
return [[data]] | python | def run_rnaseq_ann_filter(data):
"""Run RNA-seq annotation and filtering.
"""
data = to_single_data(data)
if dd.get_vrn_file(data):
eff_file = effects.add_to_vcf(dd.get_vrn_file(data), data)[0]
if eff_file:
data = dd.set_vrn_file(data, eff_file)
ann_file = population.run_vcfanno(dd.get_vrn_file(data), data)
if ann_file:
data = dd.set_vrn_file(data, ann_file)
variantcaller = dd.get_variantcaller(data)
if variantcaller and ("gatk-haplotype" in variantcaller):
filter_file = variation.gatk_filter_rnaseq(dd.get_vrn_file(data), data)
data = dd.set_vrn_file(data, filter_file)
# remove variants close to splice junctions
vrn_file = dd.get_vrn_file(data)
vrn_file = variation.filter_junction_variants(vrn_file, data)
data = dd.set_vrn_file(data, vrn_file)
return [[data]] | [
"def",
"run_rnaseq_ann_filter",
"(",
"data",
")",
":",
"data",
"=",
"to_single_data",
"(",
"data",
")",
"if",
"dd",
".",
"get_vrn_file",
"(",
"data",
")",
":",
"eff_file",
"=",
"effects",
".",
"add_to_vcf",
"(",
"dd",
".",
"get_vrn_file",
"(",
"data",
")... | Run RNA-seq annotation and filtering. | [
"Run",
"RNA",
"-",
"seq",
"annotation",
"and",
"filtering",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/rnaseq.py#L138-L157 | train | 218,415 |
bcbio/bcbio-nextgen | bcbio/pipeline/rnaseq.py | quantitate | def quantitate(data):
"""CWL target for quantitation.
XXX Needs to be split and parallelized by expression caller, with merging
of multiple calls.
"""
data = to_single_data(to_single_data(data))
data = generate_transcript_counts(data)[0][0]
data["quant"] = {}
if "sailfish" in dd.get_expression_caller(data):
data = to_single_data(sailfish.run_sailfish(data)[0])
data["quant"]["tsv"] = data["sailfish"]
data["quant"]["hdf5"] = os.path.join(os.path.dirname(data["sailfish"]), "abundance.h5")
if ("kallisto" in dd.get_expression_caller(data) or "pizzly" in dd.get_fusion_caller(data, [])):
data = to_single_data(kallisto.run_kallisto_rnaseq(data)[0])
data["quant"]["tsv"] = os.path.join(data["kallisto_quant"], "abundance.tsv")
data["quant"]["hdf5"] = os.path.join(data["kallisto_quant"], "abundance.h5")
if (os.path.exists(os.path.join(data["kallisto_quant"], "fusion.txt"))):
data["quant"]["fusion"] = os.path.join(data["kallisto_quant"], "fusion.txt")
else:
data["quant"]["fusion"] = None
if "salmon" in dd.get_expression_caller(data):
data = to_single_data(salmon.run_salmon_reads(data)[0])
data["quant"]["tsv"] = data["salmon"]
data["quant"]["hdf5"] = os.path.join(os.path.dirname(data["salmon"]), "abundance.h5")
return [[data]] | python | def quantitate(data):
"""CWL target for quantitation.
XXX Needs to be split and parallelized by expression caller, with merging
of multiple calls.
"""
data = to_single_data(to_single_data(data))
data = generate_transcript_counts(data)[0][0]
data["quant"] = {}
if "sailfish" in dd.get_expression_caller(data):
data = to_single_data(sailfish.run_sailfish(data)[0])
data["quant"]["tsv"] = data["sailfish"]
data["quant"]["hdf5"] = os.path.join(os.path.dirname(data["sailfish"]), "abundance.h5")
if ("kallisto" in dd.get_expression_caller(data) or "pizzly" in dd.get_fusion_caller(data, [])):
data = to_single_data(kallisto.run_kallisto_rnaseq(data)[0])
data["quant"]["tsv"] = os.path.join(data["kallisto_quant"], "abundance.tsv")
data["quant"]["hdf5"] = os.path.join(data["kallisto_quant"], "abundance.h5")
if (os.path.exists(os.path.join(data["kallisto_quant"], "fusion.txt"))):
data["quant"]["fusion"] = os.path.join(data["kallisto_quant"], "fusion.txt")
else:
data["quant"]["fusion"] = None
if "salmon" in dd.get_expression_caller(data):
data = to_single_data(salmon.run_salmon_reads(data)[0])
data["quant"]["tsv"] = data["salmon"]
data["quant"]["hdf5"] = os.path.join(os.path.dirname(data["salmon"]), "abundance.h5")
return [[data]] | [
"def",
"quantitate",
"(",
"data",
")",
":",
"data",
"=",
"to_single_data",
"(",
"to_single_data",
"(",
"data",
")",
")",
"data",
"=",
"generate_transcript_counts",
"(",
"data",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"data",
"[",
"\"quant\"",
"]",
"=",
"{",... | CWL target for quantitation.
XXX Needs to be split and parallelized by expression caller, with merging
of multiple calls. | [
"CWL",
"target",
"for",
"quantitation",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/rnaseq.py#L159-L184 | train | 218,416 |
bcbio/bcbio-nextgen | bcbio/pipeline/rnaseq.py | quantitate_expression_parallel | def quantitate_expression_parallel(samples, run_parallel):
"""
quantitate expression, all programs run here should be multithreaded to
take advantage of the threaded run_parallel environment
"""
data = samples[0][0]
samples = run_parallel("generate_transcript_counts", samples)
if "cufflinks" in dd.get_expression_caller(data):
samples = run_parallel("run_cufflinks", samples)
if "stringtie" in dd.get_expression_caller(data):
samples = run_parallel("run_stringtie_expression", samples)
if ("kallisto" in dd.get_expression_caller(data) or
dd.get_fusion_mode(data) or
"pizzly" in dd.get_fusion_caller(data, [])):
samples = run_parallel("run_kallisto_index", [samples])
samples = run_parallel("run_kallisto_rnaseq", samples)
if "sailfish" in dd.get_expression_caller(data):
samples = run_parallel("run_sailfish_index", [samples])
samples = run_parallel("run_sailfish", samples)
# always run salmon
samples = run_parallel("run_salmon_index", [samples])
samples = run_parallel("run_salmon_reads", samples)
samples = run_parallel("detect_fusions", samples)
return samples | python | def quantitate_expression_parallel(samples, run_parallel):
"""
quantitate expression, all programs run here should be multithreaded to
take advantage of the threaded run_parallel environment
"""
data = samples[0][0]
samples = run_parallel("generate_transcript_counts", samples)
if "cufflinks" in dd.get_expression_caller(data):
samples = run_parallel("run_cufflinks", samples)
if "stringtie" in dd.get_expression_caller(data):
samples = run_parallel("run_stringtie_expression", samples)
if ("kallisto" in dd.get_expression_caller(data) or
dd.get_fusion_mode(data) or
"pizzly" in dd.get_fusion_caller(data, [])):
samples = run_parallel("run_kallisto_index", [samples])
samples = run_parallel("run_kallisto_rnaseq", samples)
if "sailfish" in dd.get_expression_caller(data):
samples = run_parallel("run_sailfish_index", [samples])
samples = run_parallel("run_sailfish", samples)
# always run salmon
samples = run_parallel("run_salmon_index", [samples])
samples = run_parallel("run_salmon_reads", samples)
samples = run_parallel("detect_fusions", samples)
return samples | [
"def",
"quantitate_expression_parallel",
"(",
"samples",
",",
"run_parallel",
")",
":",
"data",
"=",
"samples",
"[",
"0",
"]",
"[",
"0",
"]",
"samples",
"=",
"run_parallel",
"(",
"\"generate_transcript_counts\"",
",",
"samples",
")",
"if",
"\"cufflinks\"",
"in",... | quantitate expression, all programs run here should be multithreaded to
take advantage of the threaded run_parallel environment | [
"quantitate",
"expression",
"all",
"programs",
"run",
"here",
"should",
"be",
"multithreaded",
"to",
"take",
"advantage",
"of",
"the",
"threaded",
"run_parallel",
"environment"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/rnaseq.py#L186-L210 | train | 218,417 |
bcbio/bcbio-nextgen | bcbio/pipeline/rnaseq.py | quantitate_expression_noparallel | def quantitate_expression_noparallel(samples, run_parallel):
"""
run transcript quantitation for algorithms that don't run in parallel
"""
data = samples[0][0]
if "express" in dd.get_expression_caller(data):
samples = run_parallel("run_express", samples)
if "dexseq" in dd.get_expression_caller(data):
samples = run_parallel("run_dexseq", samples)
return samples | python | def quantitate_expression_noparallel(samples, run_parallel):
"""
run transcript quantitation for algorithms that don't run in parallel
"""
data = samples[0][0]
if "express" in dd.get_expression_caller(data):
samples = run_parallel("run_express", samples)
if "dexseq" in dd.get_expression_caller(data):
samples = run_parallel("run_dexseq", samples)
return samples | [
"def",
"quantitate_expression_noparallel",
"(",
"samples",
",",
"run_parallel",
")",
":",
"data",
"=",
"samples",
"[",
"0",
"]",
"[",
"0",
"]",
"if",
"\"express\"",
"in",
"dd",
".",
"get_expression_caller",
"(",
"data",
")",
":",
"samples",
"=",
"run_paralle... | run transcript quantitation for algorithms that don't run in parallel | [
"run",
"transcript",
"quantitation",
"for",
"algorithms",
"that",
"don",
"t",
"run",
"in",
"parallel"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/rnaseq.py#L236-L245 | train | 218,418 |
bcbio/bcbio-nextgen | bcbio/pipeline/rnaseq.py | generate_transcript_counts | def generate_transcript_counts(data):
"""Generate counts per transcript and per exon from an alignment"""
data["count_file"] = featureCounts.count(data)
if dd.get_fusion_mode(data, False) and not dd.get_fusion_caller(data):
oncofuse_file = oncofuse.run(data)
if oncofuse_file:
data = dd.set_oncofuse_file(data, oncofuse_file)
if dd.get_transcriptome_align(data):
# to create a disambiguated transcriptome file realign with bowtie2
if dd.get_disambiguate(data):
logger.info("Aligning to the transcriptome with bowtie2 using the "
"disambiguated reads.")
bam_path = data["work_bam"]
fastq_paths = alignprep._bgzip_from_bam(bam_path, data["dirs"], data, is_retry=False, output_infix='-transcriptome')
if len(fastq_paths) == 2:
file1, file2 = fastq_paths
else:
file1, file2 = fastq_paths[0], None
ref_file = dd.get_ref_file(data)
data = bowtie2.align_transcriptome(file1, file2, ref_file, data)
else:
file1, file2 = dd.get_input_sequence_files(data)
if not dd.get_transcriptome_bam(data):
ref_file = dd.get_ref_file(data)
logger.info("Transcriptome alignment was flagged to run, but the "
"transcriptome BAM file was not found. Aligning to the "
"transcriptome with bowtie2.")
data = bowtie2.align_transcriptome(file1, file2, ref_file, data)
data = spikein.counts_spikein(data)
return [[data]] | python | def generate_transcript_counts(data):
"""Generate counts per transcript and per exon from an alignment"""
data["count_file"] = featureCounts.count(data)
if dd.get_fusion_mode(data, False) and not dd.get_fusion_caller(data):
oncofuse_file = oncofuse.run(data)
if oncofuse_file:
data = dd.set_oncofuse_file(data, oncofuse_file)
if dd.get_transcriptome_align(data):
# to create a disambiguated transcriptome file realign with bowtie2
if dd.get_disambiguate(data):
logger.info("Aligning to the transcriptome with bowtie2 using the "
"disambiguated reads.")
bam_path = data["work_bam"]
fastq_paths = alignprep._bgzip_from_bam(bam_path, data["dirs"], data, is_retry=False, output_infix='-transcriptome')
if len(fastq_paths) == 2:
file1, file2 = fastq_paths
else:
file1, file2 = fastq_paths[0], None
ref_file = dd.get_ref_file(data)
data = bowtie2.align_transcriptome(file1, file2, ref_file, data)
else:
file1, file2 = dd.get_input_sequence_files(data)
if not dd.get_transcriptome_bam(data):
ref_file = dd.get_ref_file(data)
logger.info("Transcriptome alignment was flagged to run, but the "
"transcriptome BAM file was not found. Aligning to the "
"transcriptome with bowtie2.")
data = bowtie2.align_transcriptome(file1, file2, ref_file, data)
data = spikein.counts_spikein(data)
return [[data]] | [
"def",
"generate_transcript_counts",
"(",
"data",
")",
":",
"data",
"[",
"\"count_file\"",
"]",
"=",
"featureCounts",
".",
"count",
"(",
"data",
")",
"if",
"dd",
".",
"get_fusion_mode",
"(",
"data",
",",
"False",
")",
"and",
"not",
"dd",
".",
"get_fusion_c... | Generate counts per transcript and per exon from an alignment | [
"Generate",
"counts",
"per",
"transcript",
"and",
"per",
"exon",
"from",
"an",
"alignment"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/rnaseq.py#L247-L278 | train | 218,419 |
bcbio/bcbio-nextgen | bcbio/pipeline/rnaseq.py | run_dexseq | def run_dexseq(data):
"""Quantitate exon-level counts with DEXSeq"""
if dd.get_dexseq_gff(data, None):
data = dexseq.bcbio_run(data)
return [[data]] | python | def run_dexseq(data):
"""Quantitate exon-level counts with DEXSeq"""
if dd.get_dexseq_gff(data, None):
data = dexseq.bcbio_run(data)
return [[data]] | [
"def",
"run_dexseq",
"(",
"data",
")",
":",
"if",
"dd",
".",
"get_dexseq_gff",
"(",
"data",
",",
"None",
")",
":",
"data",
"=",
"dexseq",
".",
"bcbio_run",
"(",
"data",
")",
"return",
"[",
"[",
"data",
"]",
"]"
] | Quantitate exon-level counts with DEXSeq | [
"Quantitate",
"exon",
"-",
"level",
"counts",
"with",
"DEXSeq"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/rnaseq.py#L285-L289 | train | 218,420 |
bcbio/bcbio-nextgen | bcbio/pipeline/rnaseq.py | combine_express | def combine_express(samples, combined):
"""Combine tpm, effective counts and fpkm from express results"""
if not combined:
return None
to_combine = [dd.get_express_counts(x) for x in
dd.sample_data_iterator(samples) if dd.get_express_counts(x)]
gtf_file = dd.get_gtf_file(samples[0][0])
isoform_to_gene_file = os.path.join(os.path.dirname(combined), "isoform_to_gene.txt")
isoform_to_gene_file = express.isoform_to_gene_name(
gtf_file, isoform_to_gene_file, next(dd.sample_data_iterator(samples)))
if len(to_combine) > 0:
eff_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_counts"
eff_counts_combined = count.combine_count_files(to_combine, eff_counts_combined_file, ext=".counts")
to_combine = [dd.get_express_tpm(x) for x in
dd.sample_data_iterator(samples) if dd.get_express_tpm(x)]
tpm_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_tpm"
tpm_counts_combined = count.combine_count_files(to_combine, tpm_counts_combined_file)
to_combine = [dd.get_express_fpkm(x) for x in dd.sample_data_iterator(samples)
if dd.get_express_fpkm(x)]
fpkm_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_fpkm"
fpkm_counts_combined = count.combine_count_files(to_combine, fpkm_counts_combined_file, ext=".fpkm")
return {'counts': eff_counts_combined, 'tpm': tpm_counts_combined,
'fpkm': fpkm_counts_combined, 'isoform_to_gene': isoform_to_gene_file}
return {} | python | def combine_express(samples, combined):
"""Combine tpm, effective counts and fpkm from express results"""
if not combined:
return None
to_combine = [dd.get_express_counts(x) for x in
dd.sample_data_iterator(samples) if dd.get_express_counts(x)]
gtf_file = dd.get_gtf_file(samples[0][0])
isoform_to_gene_file = os.path.join(os.path.dirname(combined), "isoform_to_gene.txt")
isoform_to_gene_file = express.isoform_to_gene_name(
gtf_file, isoform_to_gene_file, next(dd.sample_data_iterator(samples)))
if len(to_combine) > 0:
eff_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_counts"
eff_counts_combined = count.combine_count_files(to_combine, eff_counts_combined_file, ext=".counts")
to_combine = [dd.get_express_tpm(x) for x in
dd.sample_data_iterator(samples) if dd.get_express_tpm(x)]
tpm_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_tpm"
tpm_counts_combined = count.combine_count_files(to_combine, tpm_counts_combined_file)
to_combine = [dd.get_express_fpkm(x) for x in dd.sample_data_iterator(samples)
if dd.get_express_fpkm(x)]
fpkm_counts_combined_file = os.path.splitext(combined)[0] + ".isoform.express_fpkm"
fpkm_counts_combined = count.combine_count_files(to_combine, fpkm_counts_combined_file, ext=".fpkm")
return {'counts': eff_counts_combined, 'tpm': tpm_counts_combined,
'fpkm': fpkm_counts_combined, 'isoform_to_gene': isoform_to_gene_file}
return {} | [
"def",
"combine_express",
"(",
"samples",
",",
"combined",
")",
":",
"if",
"not",
"combined",
":",
"return",
"None",
"to_combine",
"=",
"[",
"dd",
".",
"get_express_counts",
"(",
"x",
")",
"for",
"x",
"in",
"dd",
".",
"sample_data_iterator",
"(",
"samples"... | Combine tpm, effective counts and fpkm from express results | [
"Combine",
"tpm",
"effective",
"counts",
"and",
"fpkm",
"from",
"express",
"results"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/rnaseq.py#L296-L319 | train | 218,421 |
bcbio/bcbio-nextgen | bcbio/pipeline/rnaseq.py | run_cufflinks | def run_cufflinks(data):
"""Quantitate transcript expression with Cufflinks"""
if "cufflinks" in dd.get_tools_off(data):
return [[data]]
work_bam = dd.get_work_bam(data)
ref_file = dd.get_sam_ref(data)
out_dir, fpkm_file, fpkm_isoform_file = cufflinks.run(work_bam, ref_file, data)
data = dd.set_cufflinks_dir(data, out_dir)
data = dd.set_fpkm(data, fpkm_file)
data = dd.set_fpkm_isoform(data, fpkm_isoform_file)
return [[data]] | python | def run_cufflinks(data):
"""Quantitate transcript expression with Cufflinks"""
if "cufflinks" in dd.get_tools_off(data):
return [[data]]
work_bam = dd.get_work_bam(data)
ref_file = dd.get_sam_ref(data)
out_dir, fpkm_file, fpkm_isoform_file = cufflinks.run(work_bam, ref_file, data)
data = dd.set_cufflinks_dir(data, out_dir)
data = dd.set_fpkm(data, fpkm_file)
data = dd.set_fpkm_isoform(data, fpkm_isoform_file)
return [[data]] | [
"def",
"run_cufflinks",
"(",
"data",
")",
":",
"if",
"\"cufflinks\"",
"in",
"dd",
".",
"get_tools_off",
"(",
"data",
")",
":",
"return",
"[",
"[",
"data",
"]",
"]",
"work_bam",
"=",
"dd",
".",
"get_work_bam",
"(",
"data",
")",
"ref_file",
"=",
"dd",
... | Quantitate transcript expression with Cufflinks | [
"Quantitate",
"transcript",
"expression",
"with",
"Cufflinks"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/rnaseq.py#L321-L331 | train | 218,422 |
bcbio/bcbio-nextgen | bcbio/rnaseq/express.py | run | def run(data):
"""Quantitaive isoforms expression by eXpress"""
name = dd.get_sample_name(data)
in_bam = dd.get_transcriptome_bam(data)
config = data['config']
if not in_bam:
logger.info("Transcriptome-mapped BAM file not found, skipping eXpress.")
return data
out_dir = os.path.join(dd.get_work_dir(data), "express", name)
out_file = os.path.join(out_dir, name + ".xprs")
express = config_utils.get_program("express", data['config'])
strand = _set_stranded_flag(in_bam, data)
if not file_exists(out_file):
gtf_fasta = gtf.gtf_to_fasta(dd.get_gtf_file(data), dd.get_ref_file(data))
with tx_tmpdir(data) as tmp_dir:
with file_transaction(data, out_dir) as tx_out_dir:
bam_file = _prepare_bam_file(in_bam, tmp_dir, config)
cmd = ("{express} --no-update-check -o {tx_out_dir} {strand} {gtf_fasta} {bam_file}")
do.run(cmd.format(**locals()), "Run express on %s." % in_bam, {})
shutil.move(os.path.join(out_dir, "results.xprs"), out_file)
eff_count_file = _get_column(out_file, out_file.replace(".xprs", "_eff.counts"), 7, data=data)
tpm_file = _get_column(out_file, out_file.replace("xprs", "tpm"), 14, data=data)
fpkm_file = _get_column(out_file, out_file.replace("xprs", "fpkm"), 10, data=data)
data = dd.set_express_counts(data, eff_count_file)
data = dd.set_express_tpm(data, tpm_file)
data = dd.set_express_fpkm(data, fpkm_file)
return data | python | def run(data):
"""Quantitaive isoforms expression by eXpress"""
name = dd.get_sample_name(data)
in_bam = dd.get_transcriptome_bam(data)
config = data['config']
if not in_bam:
logger.info("Transcriptome-mapped BAM file not found, skipping eXpress.")
return data
out_dir = os.path.join(dd.get_work_dir(data), "express", name)
out_file = os.path.join(out_dir, name + ".xprs")
express = config_utils.get_program("express", data['config'])
strand = _set_stranded_flag(in_bam, data)
if not file_exists(out_file):
gtf_fasta = gtf.gtf_to_fasta(dd.get_gtf_file(data), dd.get_ref_file(data))
with tx_tmpdir(data) as tmp_dir:
with file_transaction(data, out_dir) as tx_out_dir:
bam_file = _prepare_bam_file(in_bam, tmp_dir, config)
cmd = ("{express} --no-update-check -o {tx_out_dir} {strand} {gtf_fasta} {bam_file}")
do.run(cmd.format(**locals()), "Run express on %s." % in_bam, {})
shutil.move(os.path.join(out_dir, "results.xprs"), out_file)
eff_count_file = _get_column(out_file, out_file.replace(".xprs", "_eff.counts"), 7, data=data)
tpm_file = _get_column(out_file, out_file.replace("xprs", "tpm"), 14, data=data)
fpkm_file = _get_column(out_file, out_file.replace("xprs", "fpkm"), 10, data=data)
data = dd.set_express_counts(data, eff_count_file)
data = dd.set_express_tpm(data, tpm_file)
data = dd.set_express_fpkm(data, fpkm_file)
return data | [
"def",
"run",
"(",
"data",
")",
":",
"name",
"=",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
"in_bam",
"=",
"dd",
".",
"get_transcriptome_bam",
"(",
"data",
")",
"config",
"=",
"data",
"[",
"'config'",
"]",
"if",
"not",
"in_bam",
":",
"logger",
... | Quantitaive isoforms expression by eXpress | [
"Quantitaive",
"isoforms",
"expression",
"by",
"eXpress"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/express.py#L13-L39 | train | 218,423 |
bcbio/bcbio-nextgen | bcbio/rnaseq/express.py | _get_column | def _get_column(in_file, out_file, column, data=None):
"""Subset one column from a file
"""
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, 'w') as out_handle:
for line in in_handle:
cols = line.strip().split("\t")
if line.find("eff_count") > 0:
continue
number = cols[column]
if column == 7:
number = int(round(float(number), 0))
out_handle.write("%s\t%s\n" % (cols[1], number))
return out_file | python | def _get_column(in_file, out_file, column, data=None):
"""Subset one column from a file
"""
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, 'w') as out_handle:
for line in in_handle:
cols = line.strip().split("\t")
if line.find("eff_count") > 0:
continue
number = cols[column]
if column == 7:
number = int(round(float(number), 0))
out_handle.write("%s\t%s\n" % (cols[1], number))
return out_file | [
"def",
"_get_column",
"(",
"in_file",
",",
"out_file",
",",
"column",
",",
"data",
"=",
"None",
")",
":",
"with",
"file_transaction",
"(",
"data",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
"with",
"open",
"(",
"in_file",
")",
"as",
"in_handle",
":"... | Subset one column from a file | [
"Subset",
"one",
"column",
"from",
"a",
"file"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/express.py#L41-L55 | train | 218,424 |
bcbio/bcbio-nextgen | bcbio/rnaseq/express.py | _prepare_bam_file | def _prepare_bam_file(bam_file, tmp_dir, config):
"""
Pipe sort by name cmd in case sort by coordinates
"""
sort_mode = _get_sort_order(bam_file, config)
if sort_mode != "queryname":
bam_file = sort(bam_file, config, "queryname")
return bam_file | python | def _prepare_bam_file(bam_file, tmp_dir, config):
"""
Pipe sort by name cmd in case sort by coordinates
"""
sort_mode = _get_sort_order(bam_file, config)
if sort_mode != "queryname":
bam_file = sort(bam_file, config, "queryname")
return bam_file | [
"def",
"_prepare_bam_file",
"(",
"bam_file",
",",
"tmp_dir",
",",
"config",
")",
":",
"sort_mode",
"=",
"_get_sort_order",
"(",
"bam_file",
",",
"config",
")",
"if",
"sort_mode",
"!=",
"\"queryname\"",
":",
"bam_file",
"=",
"sort",
"(",
"bam_file",
",",
"con... | Pipe sort by name cmd in case sort by coordinates | [
"Pipe",
"sort",
"by",
"name",
"cmd",
"in",
"case",
"sort",
"by",
"coordinates"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/express.py#L72-L79 | train | 218,425 |
bcbio/bcbio-nextgen | bcbio/rnaseq/express.py | isoform_to_gene_name | def isoform_to_gene_name(gtf_file, out_file, data):
"""
produce a table of isoform -> gene mappings for loading into EBSeq
"""
if not out_file:
out_file = tempfile.NamedTemporaryFile(delete=False).name
if file_exists(out_file):
return out_file
db = gtf.get_gtf_db(gtf_file)
line_format = "{transcript}\t{gene}\n"
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in db.features_of_type('transcript'):
transcript = feature['transcript_id'][0]
gene = feature['gene_id'][0]
out_handle.write(line_format.format(**locals()))
return out_file | python | def isoform_to_gene_name(gtf_file, out_file, data):
"""
produce a table of isoform -> gene mappings for loading into EBSeq
"""
if not out_file:
out_file = tempfile.NamedTemporaryFile(delete=False).name
if file_exists(out_file):
return out_file
db = gtf.get_gtf_db(gtf_file)
line_format = "{transcript}\t{gene}\n"
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in db.features_of_type('transcript'):
transcript = feature['transcript_id'][0]
gene = feature['gene_id'][0]
out_handle.write(line_format.format(**locals()))
return out_file | [
"def",
"isoform_to_gene_name",
"(",
"gtf_file",
",",
"out_file",
",",
"data",
")",
":",
"if",
"not",
"out_file",
":",
"out_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"delete",
"=",
"False",
")",
".",
"name",
"if",
"file_exists",
"(",
"out_file",... | produce a table of isoform -> gene mappings for loading into EBSeq | [
"produce",
"a",
"table",
"of",
"isoform",
"-",
">",
"gene",
"mappings",
"for",
"loading",
"into",
"EBSeq"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/express.py#L81-L97 | train | 218,426 |
bcbio/bcbio-nextgen | bcbio/variation/samtools.py | shared_variantcall | def shared_variantcall(call_fn, name, align_bams, ref_file, items,
assoc_files, region=None, out_file=None):
"""Provide base functionality for prepping and indexing for variant calling.
"""
config = items[0]["config"]
if out_file is None:
if vcfutils.is_paired_analysis(align_bams, items):
out_file = "%s-paired-variants.vcf.gz" % config["metdata"]["batch"]
else:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not file_exists(out_file):
logger.debug("Genotyping with {name}: {region} {fname}".format(
name=name, region=region, fname=os.path.basename(align_bams[0])))
variant_regions = bedutils.population_variant_regions(items, merged=True)
target_regions = subset_variant_regions(variant_regions, region, out_file, items=items)
if (variant_regions is not None and isinstance(target_regions, six.string_types)
and not os.path.isfile(target_regions)):
vcfutils.write_empty_vcf(out_file, config)
else:
with file_transaction(config, out_file) as tx_out_file:
call_fn(align_bams, ref_file, items, target_regions,
tx_out_file)
if out_file.endswith(".gz"):
out_file = vcfutils.bgzip_and_index(out_file, config)
return out_file | python | def shared_variantcall(call_fn, name, align_bams, ref_file, items,
assoc_files, region=None, out_file=None):
"""Provide base functionality for prepping and indexing for variant calling.
"""
config = items[0]["config"]
if out_file is None:
if vcfutils.is_paired_analysis(align_bams, items):
out_file = "%s-paired-variants.vcf.gz" % config["metdata"]["batch"]
else:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not file_exists(out_file):
logger.debug("Genotyping with {name}: {region} {fname}".format(
name=name, region=region, fname=os.path.basename(align_bams[0])))
variant_regions = bedutils.population_variant_regions(items, merged=True)
target_regions = subset_variant_regions(variant_regions, region, out_file, items=items)
if (variant_regions is not None and isinstance(target_regions, six.string_types)
and not os.path.isfile(target_regions)):
vcfutils.write_empty_vcf(out_file, config)
else:
with file_transaction(config, out_file) as tx_out_file:
call_fn(align_bams, ref_file, items, target_regions,
tx_out_file)
if out_file.endswith(".gz"):
out_file = vcfutils.bgzip_and_index(out_file, config)
return out_file | [
"def",
"shared_variantcall",
"(",
"call_fn",
",",
"name",
",",
"align_bams",
",",
"ref_file",
",",
"items",
",",
"assoc_files",
",",
"region",
"=",
"None",
",",
"out_file",
"=",
"None",
")",
":",
"config",
"=",
"items",
"[",
"0",
"]",
"[",
"\"config\"",
... | Provide base functionality for prepping and indexing for variant calling. | [
"Provide",
"base",
"functionality",
"for",
"prepping",
"and",
"indexing",
"for",
"variant",
"calling",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/samtools.py#L19-L43 | train | 218,427 |
bcbio/bcbio-nextgen | bcbio/variation/samtools.py | run_samtools | def run_samtools(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Detect SNPs and indels with samtools mpileup and bcftools.
"""
return shared_variantcall(_call_variants_samtools, "samtools", align_bams, ref_file,
items, assoc_files, region, out_file) | python | def run_samtools(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Detect SNPs and indels with samtools mpileup and bcftools.
"""
return shared_variantcall(_call_variants_samtools, "samtools", align_bams, ref_file,
items, assoc_files, region, out_file) | [
"def",
"run_samtools",
"(",
"align_bams",
",",
"items",
",",
"ref_file",
",",
"assoc_files",
",",
"region",
"=",
"None",
",",
"out_file",
"=",
"None",
")",
":",
"return",
"shared_variantcall",
"(",
"_call_variants_samtools",
",",
"\"samtools\"",
",",
"align_bams... | Detect SNPs and indels with samtools mpileup and bcftools. | [
"Detect",
"SNPs",
"and",
"indels",
"with",
"samtools",
"mpileup",
"and",
"bcftools",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/samtools.py#L45-L50 | train | 218,428 |
bcbio/bcbio-nextgen | bcbio/variation/samtools.py | _call_variants_samtools | def _call_variants_samtools(align_bams, ref_file, items, target_regions, tx_out_file):
"""Call variants with samtools in target_regions.
Works around a GATK VCF 4.2 compatibility issue in samtools 1.0
by removing addition 4.2-only isms from VCF header lines.
"""
config = items[0]["config"]
mpileup = prep_mpileup(align_bams, ref_file, config,
target_regions=target_regions, want_bcf=True)
bcftools = config_utils.get_program("bcftools", config)
samtools_version = programs.get_version("samtools", config=config)
if samtools_version and LooseVersion(samtools_version) <= LooseVersion("0.1.19"):
raise ValueError("samtools calling not supported with pre-1.0 samtools")
bcftools_opts = "call -v -m"
compress_cmd = "| bgzip -c" if tx_out_file.endswith(".gz") else ""
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
cmd = ("{mpileup} "
"| {bcftools} {bcftools_opts} - "
"| {fix_ambig_ref} | {fix_ambig_alt} "
"| vt normalize -n -q -r {ref_file} - "
"| sed 's/VCFv4.2/VCFv4.1/' "
"| sed 's/,Version=3>/>/' "
"| sed 's/,Version=\"3\">/>/' "
"| sed 's/Number=R/Number=./' "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Variant calling with samtools", items[0]) | python | def _call_variants_samtools(align_bams, ref_file, items, target_regions, tx_out_file):
"""Call variants with samtools in target_regions.
Works around a GATK VCF 4.2 compatibility issue in samtools 1.0
by removing addition 4.2-only isms from VCF header lines.
"""
config = items[0]["config"]
mpileup = prep_mpileup(align_bams, ref_file, config,
target_regions=target_regions, want_bcf=True)
bcftools = config_utils.get_program("bcftools", config)
samtools_version = programs.get_version("samtools", config=config)
if samtools_version and LooseVersion(samtools_version) <= LooseVersion("0.1.19"):
raise ValueError("samtools calling not supported with pre-1.0 samtools")
bcftools_opts = "call -v -m"
compress_cmd = "| bgzip -c" if tx_out_file.endswith(".gz") else ""
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
cmd = ("{mpileup} "
"| {bcftools} {bcftools_opts} - "
"| {fix_ambig_ref} | {fix_ambig_alt} "
"| vt normalize -n -q -r {ref_file} - "
"| sed 's/VCFv4.2/VCFv4.1/' "
"| sed 's/,Version=3>/>/' "
"| sed 's/,Version=\"3\">/>/' "
"| sed 's/Number=R/Number=./' "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Variant calling with samtools", items[0]) | [
"def",
"_call_variants_samtools",
"(",
"align_bams",
",",
"ref_file",
",",
"items",
",",
"target_regions",
",",
"tx_out_file",
")",
":",
"config",
"=",
"items",
"[",
"0",
"]",
"[",
"\"config\"",
"]",
"mpileup",
"=",
"prep_mpileup",
"(",
"align_bams",
",",
"r... | Call variants with samtools in target_regions.
Works around a GATK VCF 4.2 compatibility issue in samtools 1.0
by removing addition 4.2-only isms from VCF header lines. | [
"Call",
"variants",
"with",
"samtools",
"in",
"target_regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/samtools.py#L68-L94 | train | 218,429 |
bcbio/bcbio-nextgen | bcbio/pipeline/sra.py | _convert_fastq | def _convert_fastq(srafn, outdir, single=False):
"convert sra to fastq"
cmd = "fastq-dump --split-files --gzip {srafn}"
cmd = "%s %s" % (utils.local_path_export(), cmd)
sraid = os.path.basename(utils.splitext_plus(srafn)[0])
if not srafn:
return None
if not single:
out_file = [os.path.join(outdir, "%s_1.fastq.gz" % sraid),
os.path.join(outdir, "%s_2.fastq.gz" % sraid)]
if not utils.file_exists(out_file[0]):
with utils.chdir(outdir):
do.run(cmd.format(**locals()), "Covert to fastq %s" % sraid)
if not utils.file_exists(out_file[0]):
raise IOError("SRA %s didn't convert, something happened." % srafn)
return [out for out in out_file if utils.file_exists(out)]
else:
raise ValueError("Not supported single-end sra samples for now.") | python | def _convert_fastq(srafn, outdir, single=False):
"convert sra to fastq"
cmd = "fastq-dump --split-files --gzip {srafn}"
cmd = "%s %s" % (utils.local_path_export(), cmd)
sraid = os.path.basename(utils.splitext_plus(srafn)[0])
if not srafn:
return None
if not single:
out_file = [os.path.join(outdir, "%s_1.fastq.gz" % sraid),
os.path.join(outdir, "%s_2.fastq.gz" % sraid)]
if not utils.file_exists(out_file[0]):
with utils.chdir(outdir):
do.run(cmd.format(**locals()), "Covert to fastq %s" % sraid)
if not utils.file_exists(out_file[0]):
raise IOError("SRA %s didn't convert, something happened." % srafn)
return [out for out in out_file if utils.file_exists(out)]
else:
raise ValueError("Not supported single-end sra samples for now.") | [
"def",
"_convert_fastq",
"(",
"srafn",
",",
"outdir",
",",
"single",
"=",
"False",
")",
":",
"cmd",
"=",
"\"fastq-dump --split-files --gzip {srafn}\"",
"cmd",
"=",
"\"%s %s\"",
"%",
"(",
"utils",
".",
"local_path_export",
"(",
")",
",",
"cmd",
")",
"sraid",
... | convert sra to fastq | [
"convert",
"sra",
"to",
"fastq"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sra.py#L119-L136 | train | 218,430 |
bcbio/bcbio-nextgen | bcbio/illumina/machine.py | check_and_postprocess | def check_and_postprocess(args):
"""Check for newly dumped sequencer output, post-processing and transferring.
"""
with open(args.process_config) as in_handle:
config = yaml.safe_load(in_handle)
setup_local_logging(config)
for dname in _find_unprocessed(config):
lane_details = nglims.get_runinfo(config["galaxy_url"], config["galaxy_apikey"], dname,
utils.get_in(config, ("process", "storedir")))
if isinstance(lane_details, dict) and "error" in lane_details:
print("Flowcell not found in Galaxy: %s" % lane_details)
else:
lane_details = _tweak_lane(lane_details, dname)
fcid_ss = samplesheet.from_flowcell(dname, lane_details)
_update_reported(config["msg_db"], dname)
fastq_dir = demultiplex.run_bcl2fastq(dname, fcid_ss, config)
bcbio_config, ready_fastq_dir = nglims.prep_samples_and_config(dname, lane_details, fastq_dir, config)
transfer.copy_flowcell(dname, ready_fastq_dir, bcbio_config, config)
_start_processing(dname, bcbio_config, config) | python | def check_and_postprocess(args):
"""Check for newly dumped sequencer output, post-processing and transferring.
"""
with open(args.process_config) as in_handle:
config = yaml.safe_load(in_handle)
setup_local_logging(config)
for dname in _find_unprocessed(config):
lane_details = nglims.get_runinfo(config["galaxy_url"], config["galaxy_apikey"], dname,
utils.get_in(config, ("process", "storedir")))
if isinstance(lane_details, dict) and "error" in lane_details:
print("Flowcell not found in Galaxy: %s" % lane_details)
else:
lane_details = _tweak_lane(lane_details, dname)
fcid_ss = samplesheet.from_flowcell(dname, lane_details)
_update_reported(config["msg_db"], dname)
fastq_dir = demultiplex.run_bcl2fastq(dname, fcid_ss, config)
bcbio_config, ready_fastq_dir = nglims.prep_samples_and_config(dname, lane_details, fastq_dir, config)
transfer.copy_flowcell(dname, ready_fastq_dir, bcbio_config, config)
_start_processing(dname, bcbio_config, config) | [
"def",
"check_and_postprocess",
"(",
"args",
")",
":",
"with",
"open",
"(",
"args",
".",
"process_config",
")",
"as",
"in_handle",
":",
"config",
"=",
"yaml",
".",
"safe_load",
"(",
"in_handle",
")",
"setup_local_logging",
"(",
"config",
")",
"for",
"dname",... | Check for newly dumped sequencer output, post-processing and transferring. | [
"Check",
"for",
"newly",
"dumped",
"sequencer",
"output",
"post",
"-",
"processing",
"and",
"transferring",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/machine.py#L22-L40 | train | 218,431 |
bcbio/bcbio-nextgen | bcbio/illumina/machine.py | _tweak_lane | def _tweak_lane(lane_details, dname):
"""Potentially tweak lane information to handle custom processing, reading a lane_config.yaml file.
"""
tweak_config_file = os.path.join(dname, "lane_config.yaml")
if os.path.exists(tweak_config_file):
with open(tweak_config_file) as in_handle:
tweak_config = yaml.safe_load(in_handle)
if tweak_config.get("uniquify_lanes"):
out = []
for ld in lane_details:
ld["name"] = "%s-%s" % (ld["name"], ld["lane"])
out.append(ld)
return out
return lane_details | python | def _tweak_lane(lane_details, dname):
"""Potentially tweak lane information to handle custom processing, reading a lane_config.yaml file.
"""
tweak_config_file = os.path.join(dname, "lane_config.yaml")
if os.path.exists(tweak_config_file):
with open(tweak_config_file) as in_handle:
tweak_config = yaml.safe_load(in_handle)
if tweak_config.get("uniquify_lanes"):
out = []
for ld in lane_details:
ld["name"] = "%s-%s" % (ld["name"], ld["lane"])
out.append(ld)
return out
return lane_details | [
"def",
"_tweak_lane",
"(",
"lane_details",
",",
"dname",
")",
":",
"tweak_config_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dname",
",",
"\"lane_config.yaml\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"tweak_config_file",
")",
":",
"with",... | Potentially tweak lane information to handle custom processing, reading a lane_config.yaml file. | [
"Potentially",
"tweak",
"lane",
"information",
"to",
"handle",
"custom",
"processing",
"reading",
"a",
"lane_config",
".",
"yaml",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/machine.py#L42-L55 | train | 218,432 |
bcbio/bcbio-nextgen | bcbio/illumina/machine.py | _remap_dirname | def _remap_dirname(local, remote):
"""Remap directory names from local to remote.
"""
def do(x):
return x.replace(local, remote, 1)
return do | python | def _remap_dirname(local, remote):
"""Remap directory names from local to remote.
"""
def do(x):
return x.replace(local, remote, 1)
return do | [
"def",
"_remap_dirname",
"(",
"local",
",",
"remote",
")",
":",
"def",
"do",
"(",
"x",
")",
":",
"return",
"x",
".",
"replace",
"(",
"local",
",",
"remote",
",",
"1",
")",
"return",
"do"
] | Remap directory names from local to remote. | [
"Remap",
"directory",
"names",
"from",
"local",
"to",
"remote",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/machine.py#L57-L62 | train | 218,433 |
bcbio/bcbio-nextgen | bcbio/illumina/machine.py | _find_unprocessed | def _find_unprocessed(config):
"""Find any finished directories that have not been processed.
"""
reported = _read_reported(config["msg_db"])
for dname in _get_directories(config):
if os.path.isdir(dname) and dname not in reported:
if _is_finished_dumping(dname):
yield dname | python | def _find_unprocessed(config):
"""Find any finished directories that have not been processed.
"""
reported = _read_reported(config["msg_db"])
for dname in _get_directories(config):
if os.path.isdir(dname) and dname not in reported:
if _is_finished_dumping(dname):
yield dname | [
"def",
"_find_unprocessed",
"(",
"config",
")",
":",
"reported",
"=",
"_read_reported",
"(",
"config",
"[",
"\"msg_db\"",
"]",
")",
"for",
"dname",
"in",
"_get_directories",
"(",
"config",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dname",
")... | Find any finished directories that have not been processed. | [
"Find",
"any",
"finished",
"directories",
"that",
"have",
"not",
"been",
"processed",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/machine.py#L98-L105 | train | 218,434 |
bcbio/bcbio-nextgen | bcbio/illumina/machine.py | _is_finished_dumping | def _is_finished_dumping(directory):
"""Determine if the sequencing directory has all files.
The final checkpoint file will differ depending if we are a
single or paired end run.
"""
#if _is_finished_dumping_checkpoint(directory):
# return True
# Check final output files; handles both HiSeq and GAII
run_info = os.path.join(directory, "RunInfo.xml")
hi_seq_checkpoint = "Basecalling_Netcopy_complete_Read%s.txt" % \
_expected_reads(run_info)
to_check = ["Basecalling_Netcopy_complete_SINGLEREAD.txt",
"Basecalling_Netcopy_complete_READ2.txt",
hi_seq_checkpoint]
return reduce(operator.or_,
[os.path.exists(os.path.join(directory, f)) for f in to_check]) | python | def _is_finished_dumping(directory):
"""Determine if the sequencing directory has all files.
The final checkpoint file will differ depending if we are a
single or paired end run.
"""
#if _is_finished_dumping_checkpoint(directory):
# return True
# Check final output files; handles both HiSeq and GAII
run_info = os.path.join(directory, "RunInfo.xml")
hi_seq_checkpoint = "Basecalling_Netcopy_complete_Read%s.txt" % \
_expected_reads(run_info)
to_check = ["Basecalling_Netcopy_complete_SINGLEREAD.txt",
"Basecalling_Netcopy_complete_READ2.txt",
hi_seq_checkpoint]
return reduce(operator.or_,
[os.path.exists(os.path.join(directory, f)) for f in to_check]) | [
"def",
"_is_finished_dumping",
"(",
"directory",
")",
":",
"#if _is_finished_dumping_checkpoint(directory):",
"# return True",
"# Check final output files; handles both HiSeq and GAII",
"run_info",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"\"RunInfo.xml\""... | Determine if the sequencing directory has all files.
The final checkpoint file will differ depending if we are a
single or paired end run. | [
"Determine",
"if",
"the",
"sequencing",
"directory",
"has",
"all",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/machine.py#L113-L129 | train | 218,435 |
bcbio/bcbio-nextgen | bcbio/illumina/machine.py | _expected_reads | def _expected_reads(run_info_file):
"""Parse the number of expected reads from the RunInfo.xml file.
"""
reads = []
if os.path.exists(run_info_file):
tree = ElementTree()
tree.parse(run_info_file)
read_elem = tree.find("Run/Reads")
reads = read_elem.findall("Read")
return len(reads) | python | def _expected_reads(run_info_file):
"""Parse the number of expected reads from the RunInfo.xml file.
"""
reads = []
if os.path.exists(run_info_file):
tree = ElementTree()
tree.parse(run_info_file)
read_elem = tree.find("Run/Reads")
reads = read_elem.findall("Read")
return len(reads) | [
"def",
"_expected_reads",
"(",
"run_info_file",
")",
":",
"reads",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"run_info_file",
")",
":",
"tree",
"=",
"ElementTree",
"(",
")",
"tree",
".",
"parse",
"(",
"run_info_file",
")",
"read_elem",
... | Parse the number of expected reads from the RunInfo.xml file. | [
"Parse",
"the",
"number",
"of",
"expected",
"reads",
"from",
"the",
"RunInfo",
".",
"xml",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/machine.py#L149-L158 | train | 218,436 |
bcbio/bcbio-nextgen | bcbio/illumina/machine.py | _read_reported | def _read_reported(msg_db):
"""Retrieve a list of directories previous reported.
"""
reported = []
if os.path.exists(msg_db):
with open(msg_db) as in_handle:
for line in in_handle:
reported.append(line.strip())
return reported | python | def _read_reported(msg_db):
"""Retrieve a list of directories previous reported.
"""
reported = []
if os.path.exists(msg_db):
with open(msg_db) as in_handle:
for line in in_handle:
reported.append(line.strip())
return reported | [
"def",
"_read_reported",
"(",
"msg_db",
")",
":",
"reported",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"msg_db",
")",
":",
"with",
"open",
"(",
"msg_db",
")",
"as",
"in_handle",
":",
"for",
"line",
"in",
"in_handle",
":",
"reported... | Retrieve a list of directories previous reported. | [
"Retrieve",
"a",
"list",
"of",
"directories",
"previous",
"reported",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/machine.py#L162-L170 | train | 218,437 |
bcbio/bcbio-nextgen | bcbio/qc/viral.py | get_files | def get_files(data):
"""Retrieve pre-installed viral reference files.
"""
all_files = glob.glob(os.path.normpath(os.path.join(os.path.dirname(dd.get_ref_file(data)),
os.pardir, "viral", "*")))
return sorted(all_files) | python | def get_files(data):
"""Retrieve pre-installed viral reference files.
"""
all_files = glob.glob(os.path.normpath(os.path.join(os.path.dirname(dd.get_ref_file(data)),
os.pardir, "viral", "*")))
return sorted(all_files) | [
"def",
"get_files",
"(",
"data",
")",
":",
"all_files",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"dd",
".",
"get_ref_file",
"(",
"data",
"... | Retrieve pre-installed viral reference files. | [
"Retrieve",
"pre",
"-",
"installed",
"viral",
"reference",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/viral.py#L59-L64 | train | 218,438 |
bcbio/bcbio-nextgen | bcbio/rnaseq/variation.py | gatk_splitreads | def gatk_splitreads(data):
"""
use GATK to split reads with Ns in the CIGAR string, hard clipping regions
that end up in introns
"""
broad_runner = broad.runner_from_config(dd.get_config(data))
ref_file = dd.get_ref_file(data)
deduped_bam = dd.get_deduped_bam(data)
base, ext = os.path.splitext(deduped_bam)
split_bam = base + ".splitN" + ext
if file_exists(split_bam):
data = dd.set_split_bam(data, split_bam)
return data
gatk_type = broad_runner.gatk_type()
with file_transaction(data, split_bam) as tx_split_bam:
params = ["-T", "SplitNCigarReads",
"-R", ref_file,
"-I", deduped_bam]
if gatk_type == "gatk4":
params += ["--output", tx_split_bam]
else:
params += ["-rf", "ReassignOneMappingQuality",
"-RMQF", "255",
"-RMQT", "60",
"-rf", "UnmappedRead",
"-U", "ALLOW_N_CIGAR_READS",
"-o", tx_split_bam]
if dd.get_quality_format(data) == "illumina":
params += ["--fix_misencoded_quality_scores", "-fixMisencodedQuals"]
broad_runner.run_gatk(params)
bam.index(split_bam, dd.get_config(data))
data = dd.set_split_bam(data, split_bam)
return data | python | def gatk_splitreads(data):
"""
use GATK to split reads with Ns in the CIGAR string, hard clipping regions
that end up in introns
"""
broad_runner = broad.runner_from_config(dd.get_config(data))
ref_file = dd.get_ref_file(data)
deduped_bam = dd.get_deduped_bam(data)
base, ext = os.path.splitext(deduped_bam)
split_bam = base + ".splitN" + ext
if file_exists(split_bam):
data = dd.set_split_bam(data, split_bam)
return data
gatk_type = broad_runner.gatk_type()
with file_transaction(data, split_bam) as tx_split_bam:
params = ["-T", "SplitNCigarReads",
"-R", ref_file,
"-I", deduped_bam]
if gatk_type == "gatk4":
params += ["--output", tx_split_bam]
else:
params += ["-rf", "ReassignOneMappingQuality",
"-RMQF", "255",
"-RMQT", "60",
"-rf", "UnmappedRead",
"-U", "ALLOW_N_CIGAR_READS",
"-o", tx_split_bam]
if dd.get_quality_format(data) == "illumina":
params += ["--fix_misencoded_quality_scores", "-fixMisencodedQuals"]
broad_runner.run_gatk(params)
bam.index(split_bam, dd.get_config(data))
data = dd.set_split_bam(data, split_bam)
return data | [
"def",
"gatk_splitreads",
"(",
"data",
")",
":",
"broad_runner",
"=",
"broad",
".",
"runner_from_config",
"(",
"dd",
".",
"get_config",
"(",
"data",
")",
")",
"ref_file",
"=",
"dd",
".",
"get_ref_file",
"(",
"data",
")",
"deduped_bam",
"=",
"dd",
".",
"g... | use GATK to split reads with Ns in the CIGAR string, hard clipping regions
that end up in introns | [
"use",
"GATK",
"to",
"split",
"reads",
"with",
"Ns",
"in",
"the",
"CIGAR",
"string",
"hard",
"clipping",
"regions",
"that",
"end",
"up",
"in",
"introns"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/variation.py#L25-L57 | train | 218,439 |
bcbio/bcbio-nextgen | bcbio/rnaseq/variation.py | _setup_variant_regions | def _setup_variant_regions(data, out_dir):
"""Ensure we have variant regions for calling, using transcript if not present.
Respects noalt_calling by removing additional contigs to improve
speeds.
"""
vr_file = dd.get_variant_regions(data)
if not vr_file:
vr_file = regions.get_sv_bed(data, "transcripts", out_dir=out_dir)
contigs = set([c.name for c in ref.file_contigs(dd.get_ref_file(data))])
out_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bedprep")),
"%s-rnaseq_clean.bed" % utils.splitext_plus(os.path.basename(vr_file))[0])
if not utils.file_uptodate(out_file, vr_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
with shared.bedtools_tmpdir(data):
for r in pybedtools.BedTool(vr_file):
if r.chrom in contigs:
if chromhacks.is_nonalt(r.chrom):
out_handle.write(str(r))
data = dd.set_variant_regions(data, out_file)
return data | python | def _setup_variant_regions(data, out_dir):
"""Ensure we have variant regions for calling, using transcript if not present.
Respects noalt_calling by removing additional contigs to improve
speeds.
"""
vr_file = dd.get_variant_regions(data)
if not vr_file:
vr_file = regions.get_sv_bed(data, "transcripts", out_dir=out_dir)
contigs = set([c.name for c in ref.file_contigs(dd.get_ref_file(data))])
out_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bedprep")),
"%s-rnaseq_clean.bed" % utils.splitext_plus(os.path.basename(vr_file))[0])
if not utils.file_uptodate(out_file, vr_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
with shared.bedtools_tmpdir(data):
for r in pybedtools.BedTool(vr_file):
if r.chrom in contigs:
if chromhacks.is_nonalt(r.chrom):
out_handle.write(str(r))
data = dd.set_variant_regions(data, out_file)
return data | [
"def",
"_setup_variant_regions",
"(",
"data",
",",
"out_dir",
")",
":",
"vr_file",
"=",
"dd",
".",
"get_variant_regions",
"(",
"data",
")",
"if",
"not",
"vr_file",
":",
"vr_file",
"=",
"regions",
".",
"get_sv_bed",
"(",
"data",
",",
"\"transcripts\"",
",",
... | Ensure we have variant regions for calling, using transcript if not present.
Respects noalt_calling by removing additional contigs to improve
speeds. | [
"Ensure",
"we",
"have",
"variant",
"regions",
"for",
"calling",
"using",
"transcript",
"if",
"not",
"present",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/variation.py#L59-L80 | train | 218,440 |
bcbio/bcbio-nextgen | bcbio/rnaseq/variation.py | gatk_rnaseq_calling | def gatk_rnaseq_calling(data):
"""Use GATK to perform gVCF variant calling on RNA-seq data
"""
from bcbio.bam import callable
data = utils.deepish_copy(data)
tools_on = dd.get_tools_on(data)
if not tools_on:
tools_on = []
tools_on.append("gvcf")
data = dd.set_tools_on(data, tools_on)
data = dd.set_jointcaller(data, ["%s-joint" % v for v in dd.get_variantcaller(data)])
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data),
"variation", "rnaseq", "gatk-haplotype"))
data = _setup_variant_regions(data, out_dir)
out_file = os.path.join(out_dir, "%s-gatk-haplotype.vcf.gz" % dd.get_sample_name(data))
if not utils.file_exists(out_file):
region_files = []
regions = []
for cur_region in callable.get_split_regions(dd.get_variant_regions(data), data):
str_region = "_".join([str(x) for x in cur_region])
region_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data),
"variation", "rnaseq", "gatk-haplotype",
"regions")),
"%s-%s-gatk-haplotype.vcf.gz" % (dd.get_sample_name(data), str_region))
region_file = gatk.haplotype_caller([dd.get_split_bam(data)], [data], dd.get_ref_file(data), {},
region=cur_region, out_file=region_file)
region_files.append(region_file)
regions.append(cur_region)
out_file = vcfutils.concat_variant_files(region_files, out_file, regions,
dd.get_ref_file(data), data["config"])
return dd.set_vrn_file(data, out_file) | python | def gatk_rnaseq_calling(data):
"""Use GATK to perform gVCF variant calling on RNA-seq data
"""
from bcbio.bam import callable
data = utils.deepish_copy(data)
tools_on = dd.get_tools_on(data)
if not tools_on:
tools_on = []
tools_on.append("gvcf")
data = dd.set_tools_on(data, tools_on)
data = dd.set_jointcaller(data, ["%s-joint" % v for v in dd.get_variantcaller(data)])
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data),
"variation", "rnaseq", "gatk-haplotype"))
data = _setup_variant_regions(data, out_dir)
out_file = os.path.join(out_dir, "%s-gatk-haplotype.vcf.gz" % dd.get_sample_name(data))
if not utils.file_exists(out_file):
region_files = []
regions = []
for cur_region in callable.get_split_regions(dd.get_variant_regions(data), data):
str_region = "_".join([str(x) for x in cur_region])
region_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data),
"variation", "rnaseq", "gatk-haplotype",
"regions")),
"%s-%s-gatk-haplotype.vcf.gz" % (dd.get_sample_name(data), str_region))
region_file = gatk.haplotype_caller([dd.get_split_bam(data)], [data], dd.get_ref_file(data), {},
region=cur_region, out_file=region_file)
region_files.append(region_file)
regions.append(cur_region)
out_file = vcfutils.concat_variant_files(region_files, out_file, regions,
dd.get_ref_file(data), data["config"])
return dd.set_vrn_file(data, out_file) | [
"def",
"gatk_rnaseq_calling",
"(",
"data",
")",
":",
"from",
"bcbio",
".",
"bam",
"import",
"callable",
"data",
"=",
"utils",
".",
"deepish_copy",
"(",
"data",
")",
"tools_on",
"=",
"dd",
".",
"get_tools_on",
"(",
"data",
")",
"if",
"not",
"tools_on",
":... | Use GATK to perform gVCF variant calling on RNA-seq data | [
"Use",
"GATK",
"to",
"perform",
"gVCF",
"variant",
"calling",
"on",
"RNA",
"-",
"seq",
"data"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/variation.py#L82-L112 | train | 218,441 |
bcbio/bcbio-nextgen | bcbio/rnaseq/variation.py | filter_junction_variants | def filter_junction_variants(vrn_file, data):
"""
filter out variants within 10 basepairs of a splice junction, these are
very prone to being false positives with RNA-seq data
"""
SJ_BP_MASK = 10
vrn_dir = os.path.dirname(vrn_file)
splicebed = dd.get_junction_bed(data)
if not file_exists(splicebed):
logger.info("Splice junction BED file not found, skipping filtering of "
"variants closed to splice junctions.")
return vrn_file
spliceslop = get_padded_bed_file(vrn_dir, splicebed, SJ_BP_MASK, data)
out_file = os.path.splitext(vrn_file)[0] + "-junctionfiltered.vcf.gz"
if file_exists(out_file):
return out_file
with file_transaction(data, out_file) as tx_out_file:
out_base = os.path.splitext(tx_out_file)[0]
logger.info("Removing variants within %d bases of splice junctions listed in %s from %s. " % (SJ_BP_MASK, spliceslop, vrn_file))
pybedtools.BedTool(vrn_file).intersect(spliceslop, wa=True, header=True, v=True).saveas(out_base)
tx_out_file = vcfutils.bgzip_and_index(out_base, dd.get_config(data))
return out_file | python | def filter_junction_variants(vrn_file, data):
"""
filter out variants within 10 basepairs of a splice junction, these are
very prone to being false positives with RNA-seq data
"""
SJ_BP_MASK = 10
vrn_dir = os.path.dirname(vrn_file)
splicebed = dd.get_junction_bed(data)
if not file_exists(splicebed):
logger.info("Splice junction BED file not found, skipping filtering of "
"variants closed to splice junctions.")
return vrn_file
spliceslop = get_padded_bed_file(vrn_dir, splicebed, SJ_BP_MASK, data)
out_file = os.path.splitext(vrn_file)[0] + "-junctionfiltered.vcf.gz"
if file_exists(out_file):
return out_file
with file_transaction(data, out_file) as tx_out_file:
out_base = os.path.splitext(tx_out_file)[0]
logger.info("Removing variants within %d bases of splice junctions listed in %s from %s. " % (SJ_BP_MASK, spliceslop, vrn_file))
pybedtools.BedTool(vrn_file).intersect(spliceslop, wa=True, header=True, v=True).saveas(out_base)
tx_out_file = vcfutils.bgzip_and_index(out_base, dd.get_config(data))
return out_file | [
"def",
"filter_junction_variants",
"(",
"vrn_file",
",",
"data",
")",
":",
"SJ_BP_MASK",
"=",
"10",
"vrn_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"vrn_file",
")",
"splicebed",
"=",
"dd",
".",
"get_junction_bed",
"(",
"data",
")",
"if",
"not",
"... | filter out variants within 10 basepairs of a splice junction, these are
very prone to being false positives with RNA-seq data | [
"filter",
"out",
"variants",
"within",
"10",
"basepairs",
"of",
"a",
"splice",
"junction",
"these",
"are",
"very",
"prone",
"to",
"being",
"false",
"positives",
"with",
"RNA",
"-",
"seq",
"data"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/variation.py#L186-L207 | train | 218,442 |
bcbio/bcbio-nextgen | scripts/bcbio_nextgen_install.py | _clean_args | def _clean_args(sys_argv, args):
"""Remove data directory from arguments to pass to upgrade function.
"""
base = [x for x in sys_argv if
x.startswith("-") or not args.datadir == os.path.abspath(os.path.expanduser(x))]
# Remove installer only options we don't pass on
base = [x for x in base if x not in set(["--minimize-disk"])]
if "--nodata" in base:
base.remove("--nodata")
else:
base.append("--data")
return base | python | def _clean_args(sys_argv, args):
"""Remove data directory from arguments to pass to upgrade function.
"""
base = [x for x in sys_argv if
x.startswith("-") or not args.datadir == os.path.abspath(os.path.expanduser(x))]
# Remove installer only options we don't pass on
base = [x for x in base if x not in set(["--minimize-disk"])]
if "--nodata" in base:
base.remove("--nodata")
else:
base.append("--data")
return base | [
"def",
"_clean_args",
"(",
"sys_argv",
",",
"args",
")",
":",
"base",
"=",
"[",
"x",
"for",
"x",
"in",
"sys_argv",
"if",
"x",
".",
"startswith",
"(",
"\"-\"",
")",
"or",
"not",
"args",
".",
"datadir",
"==",
"os",
".",
"path",
".",
"abspath",
"(",
... | Remove data directory from arguments to pass to upgrade function. | [
"Remove",
"data",
"directory",
"from",
"arguments",
"to",
"pass",
"to",
"upgrade",
"function",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_nextgen_install.py#L53-L64 | train | 218,443 |
bcbio/bcbio-nextgen | scripts/bcbio_nextgen_install.py | setup_manifest | def setup_manifest(datadir):
"""Create barebones manifest to be filled in during update
"""
manifest_dir = os.path.join(datadir, "manifest")
if not os.path.exists(manifest_dir):
os.makedirs(manifest_dir) | python | def setup_manifest(datadir):
"""Create barebones manifest to be filled in during update
"""
manifest_dir = os.path.join(datadir, "manifest")
if not os.path.exists(manifest_dir):
os.makedirs(manifest_dir) | [
"def",
"setup_manifest",
"(",
"datadir",
")",
":",
"manifest_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"datadir",
",",
"\"manifest\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"manifest_dir",
")",
":",
"os",
".",
"makedirs",
"(",
... | Create barebones manifest to be filled in during update | [
"Create",
"barebones",
"manifest",
"to",
"be",
"filled",
"in",
"during",
"update"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_nextgen_install.py#L139-L144 | train | 218,444 |
bcbio/bcbio-nextgen | scripts/bcbio_nextgen_install.py | write_system_config | def write_system_config(base_url, datadir, tooldir):
"""Write a bcbio_system.yaml configuration file with tool information.
"""
out_file = os.path.join(datadir, "galaxy", os.path.basename(base_url))
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
if os.path.exists(out_file):
# if no tool directory and exists, do not overwrite
if tooldir is None:
return out_file
else:
bak_file = out_file + ".bak%s" % (datetime.datetime.now().strftime("%Y%M%d_%H%M"))
shutil.copy(out_file, bak_file)
if tooldir:
java_basedir = os.path.join(tooldir, "share", "java")
rewrite_ignore = ("log",)
with contextlib.closing(urllib_request.urlopen(base_url)) as in_handle:
with open(out_file, "w") as out_handle:
in_resources = False
in_prog = None
for line in (l.decode("utf-8") for l in in_handle):
if line[0] != " ":
in_resources = line.startswith("resources")
in_prog = None
elif (in_resources and line[:2] == " " and line[2] != " "
and not line.strip().startswith(rewrite_ignore)):
in_prog = line.split(":")[0].strip()
# Update java directories to point to install directory, avoid special cases
elif line.strip().startswith("dir:") and in_prog and in_prog not in ["log", "tmp"]:
final_dir = os.path.basename(line.split()[-1])
if tooldir:
line = "%s: %s\n" % (line.split(":")[0],
os.path.join(java_basedir, final_dir))
in_prog = None
elif line.startswith("galaxy"):
line = "# %s" % line
out_handle.write(line)
return out_file | python | def write_system_config(base_url, datadir, tooldir):
"""Write a bcbio_system.yaml configuration file with tool information.
"""
out_file = os.path.join(datadir, "galaxy", os.path.basename(base_url))
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
if os.path.exists(out_file):
# if no tool directory and exists, do not overwrite
if tooldir is None:
return out_file
else:
bak_file = out_file + ".bak%s" % (datetime.datetime.now().strftime("%Y%M%d_%H%M"))
shutil.copy(out_file, bak_file)
if tooldir:
java_basedir = os.path.join(tooldir, "share", "java")
rewrite_ignore = ("log",)
with contextlib.closing(urllib_request.urlopen(base_url)) as in_handle:
with open(out_file, "w") as out_handle:
in_resources = False
in_prog = None
for line in (l.decode("utf-8") for l in in_handle):
if line[0] != " ":
in_resources = line.startswith("resources")
in_prog = None
elif (in_resources and line[:2] == " " and line[2] != " "
and not line.strip().startswith(rewrite_ignore)):
in_prog = line.split(":")[0].strip()
# Update java directories to point to install directory, avoid special cases
elif line.strip().startswith("dir:") and in_prog and in_prog not in ["log", "tmp"]:
final_dir = os.path.basename(line.split()[-1])
if tooldir:
line = "%s: %s\n" % (line.split(":")[0],
os.path.join(java_basedir, final_dir))
in_prog = None
elif line.startswith("galaxy"):
line = "# %s" % line
out_handle.write(line)
return out_file | [
"def",
"write_system_config",
"(",
"base_url",
",",
"datadir",
",",
"tooldir",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"datadir",
",",
"\"galaxy\"",
",",
"os",
".",
"path",
".",
"basename",
"(",
"base_url",
")",
")",
"if",
"not"... | Write a bcbio_system.yaml configuration file with tool information. | [
"Write",
"a",
"bcbio_system",
".",
"yaml",
"configuration",
"file",
"with",
"tool",
"information",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_nextgen_install.py#L146-L183 | train | 218,445 |
bcbio/bcbio-nextgen | scripts/bcbio_nextgen_install.py | check_dependencies | def check_dependencies():
"""Ensure required tools for installation are present.
"""
print("Checking required dependencies")
for dep, msg in [(["git", "--version"], "Git (http://git-scm.com/)"),
(["wget", "--version"], "wget"),
(["bzip2", "-h"], "bzip2")]:
try:
p = subprocess.Popen(dep, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
out, code = p.communicate()
except OSError:
out = "Executable not found"
code = 127
if code == 127:
raise OSError("bcbio-nextgen installer requires %s\n%s" % (msg, out)) | python | def check_dependencies():
"""Ensure required tools for installation are present.
"""
print("Checking required dependencies")
for dep, msg in [(["git", "--version"], "Git (http://git-scm.com/)"),
(["wget", "--version"], "wget"),
(["bzip2", "-h"], "bzip2")]:
try:
p = subprocess.Popen(dep, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
out, code = p.communicate()
except OSError:
out = "Executable not found"
code = 127
if code == 127:
raise OSError("bcbio-nextgen installer requires %s\n%s" % (msg, out)) | [
"def",
"check_dependencies",
"(",
")",
":",
"print",
"(",
"\"Checking required dependencies\"",
")",
"for",
"dep",
",",
"msg",
"in",
"[",
"(",
"[",
"\"git\"",
",",
"\"--version\"",
"]",
",",
"\"Git (http://git-scm.com/)\"",
")",
",",
"(",
"[",
"\"wget\"",
",",... | Ensure required tools for installation are present. | [
"Ensure",
"required",
"tools",
"for",
"installation",
"are",
"present",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_nextgen_install.py#L207-L221 | train | 218,446 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | process | def process(args):
"""Run the function in args.name given arguments in args.argfile.
"""
# Set environment to standard to use periods for decimals and avoid localization
os.environ["LC_ALL"] = "C"
os.environ["LC"] = "C"
os.environ["LANG"] = "C"
setpath.prepend_bcbiopath()
try:
fn = getattr(multitasks, args.name)
except AttributeError:
raise AttributeError("Did not find exposed function in bcbio.distributed.multitasks named '%s'" % args.name)
if args.moreargs or args.raw:
fnargs = [args.argfile] + args.moreargs
work_dir = None
argfile = None
else:
with open(args.argfile) as in_handle:
fnargs = yaml.safe_load(in_handle)
work_dir = os.path.dirname(args.argfile)
fnargs = config_utils.merge_resources(fnargs)
argfile = args.outfile if args.outfile else "%s-out%s" % os.path.splitext(args.argfile)
if not work_dir:
work_dir = os.getcwd()
if len(fnargs) > 0 and fnargs[0] == "cwl":
fnargs, parallel, out_keys, input_files = _world_from_cwl(args.name, fnargs[1:], work_dir)
# Can remove this awkward Docker merge when we do not need custom GATK3 installs
fnargs = config_utils.merge_resources(fnargs)
argfile = os.path.join(work_dir, "cwl.output.json")
else:
parallel, out_keys, input_files = None, {}, []
with utils.chdir(work_dir):
with contextlib.closing(log.setup_local_logging(parallel={"wrapper": "runfn"})):
try:
out = fn(*fnargs)
except:
logger.exception()
raise
finally:
# Clean up any copied and unpacked workflow inputs, avoiding extra disk usage
wf_input_dir = os.path.join(work_dir, "wf-inputs")
if os.path.exists(wf_input_dir) and os.path.isdir(wf_input_dir):
shutil.rmtree(wf_input_dir)
if argfile:
try:
_write_out_argfile(argfile, out, fnargs, parallel, out_keys, input_files, work_dir)
except:
logger.exception()
raise | python | def process(args):
"""Run the function in args.name given arguments in args.argfile.
"""
# Set environment to standard to use periods for decimals and avoid localization
os.environ["LC_ALL"] = "C"
os.environ["LC"] = "C"
os.environ["LANG"] = "C"
setpath.prepend_bcbiopath()
try:
fn = getattr(multitasks, args.name)
except AttributeError:
raise AttributeError("Did not find exposed function in bcbio.distributed.multitasks named '%s'" % args.name)
if args.moreargs or args.raw:
fnargs = [args.argfile] + args.moreargs
work_dir = None
argfile = None
else:
with open(args.argfile) as in_handle:
fnargs = yaml.safe_load(in_handle)
work_dir = os.path.dirname(args.argfile)
fnargs = config_utils.merge_resources(fnargs)
argfile = args.outfile if args.outfile else "%s-out%s" % os.path.splitext(args.argfile)
if not work_dir:
work_dir = os.getcwd()
if len(fnargs) > 0 and fnargs[0] == "cwl":
fnargs, parallel, out_keys, input_files = _world_from_cwl(args.name, fnargs[1:], work_dir)
# Can remove this awkward Docker merge when we do not need custom GATK3 installs
fnargs = config_utils.merge_resources(fnargs)
argfile = os.path.join(work_dir, "cwl.output.json")
else:
parallel, out_keys, input_files = None, {}, []
with utils.chdir(work_dir):
with contextlib.closing(log.setup_local_logging(parallel={"wrapper": "runfn"})):
try:
out = fn(*fnargs)
except:
logger.exception()
raise
finally:
# Clean up any copied and unpacked workflow inputs, avoiding extra disk usage
wf_input_dir = os.path.join(work_dir, "wf-inputs")
if os.path.exists(wf_input_dir) and os.path.isdir(wf_input_dir):
shutil.rmtree(wf_input_dir)
if argfile:
try:
_write_out_argfile(argfile, out, fnargs, parallel, out_keys, input_files, work_dir)
except:
logger.exception()
raise | [
"def",
"process",
"(",
"args",
")",
":",
"# Set environment to standard to use periods for decimals and avoid localization",
"os",
".",
"environ",
"[",
"\"LC_ALL\"",
"]",
"=",
"\"C\"",
"os",
".",
"environ",
"[",
"\"LC\"",
"]",
"=",
"\"C\"",
"os",
".",
"environ",
"... | Run the function in args.name given arguments in args.argfile. | [
"Run",
"the",
"function",
"in",
"args",
".",
"name",
"given",
"arguments",
"in",
"args",
".",
"argfile",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L23-L71 | train | 218,447 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _cwlvar_to_wdl | def _cwlvar_to_wdl(var):
"""Convert a CWL output object into a WDL output.
This flattens files and other special CWL outputs that are
plain strings in WDL.
"""
if isinstance(var, (list, tuple)):
return [_cwlvar_to_wdl(x) for x in var]
elif isinstance(var, dict):
assert var.get("class") == "File", var
# XXX handle secondary files
return var.get("path") or var["value"]
else:
return var | python | def _cwlvar_to_wdl(var):
"""Convert a CWL output object into a WDL output.
This flattens files and other special CWL outputs that are
plain strings in WDL.
"""
if isinstance(var, (list, tuple)):
return [_cwlvar_to_wdl(x) for x in var]
elif isinstance(var, dict):
assert var.get("class") == "File", var
# XXX handle secondary files
return var.get("path") or var["value"]
else:
return var | [
"def",
"_cwlvar_to_wdl",
"(",
"var",
")",
":",
"if",
"isinstance",
"(",
"var",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"[",
"_cwlvar_to_wdl",
"(",
"x",
")",
"for",
"x",
"in",
"var",
"]",
"elif",
"isinstance",
"(",
"var",
",",
"dict... | Convert a CWL output object into a WDL output.
This flattens files and other special CWL outputs that are
plain strings in WDL. | [
"Convert",
"a",
"CWL",
"output",
"object",
"into",
"a",
"WDL",
"output",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L73-L86 | train | 218,448 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _write_out_argfile | def _write_out_argfile(argfile, out, fnargs, parallel, out_keys, input_files, work_dir):
"""Write output argfile, preparing a CWL ready JSON or YAML representation of the world.
"""
with open(argfile, "w") as out_handle:
if argfile.endswith(".json"):
record_name, record_attrs = _get_record_attrs(out_keys)
if record_name:
if parallel in ["multi-batch"]:
recs = _nested_cwl_record(out, record_attrs, input_files)
elif parallel in ["single-split", "multi-combined", "multi-parallel", "batch-single",
"single-single"]:
recs = [_collapse_to_cwl_record_single(utils.to_single_data(xs), record_attrs, input_files)
for xs in out]
else:
samples = [utils.to_single_data(xs) for xs in out]
recs = [_collapse_to_cwl_record(samples, record_attrs, input_files)]
json.dump(_combine_cwl_records(recs, record_name, parallel),
out_handle, sort_keys=True, indent=4, separators=(', ', ': '))
elif parallel in ["single-split", "multi-combined", "batch-split"]:
json.dump(_convert_to_cwl_json([utils.to_single_data(xs) for xs in out], fnargs, input_files),
out_handle, sort_keys=True, indent=4, separators=(', ', ': '))
else:
json.dump(_convert_to_cwl_json(utils.to_single_data(utils.to_single_data(out)), fnargs, input_files),
out_handle, sort_keys=True, indent=4, separators=(', ', ': '))
else:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) | python | def _write_out_argfile(argfile, out, fnargs, parallel, out_keys, input_files, work_dir):
"""Write output argfile, preparing a CWL ready JSON or YAML representation of the world.
"""
with open(argfile, "w") as out_handle:
if argfile.endswith(".json"):
record_name, record_attrs = _get_record_attrs(out_keys)
if record_name:
if parallel in ["multi-batch"]:
recs = _nested_cwl_record(out, record_attrs, input_files)
elif parallel in ["single-split", "multi-combined", "multi-parallel", "batch-single",
"single-single"]:
recs = [_collapse_to_cwl_record_single(utils.to_single_data(xs), record_attrs, input_files)
for xs in out]
else:
samples = [utils.to_single_data(xs) for xs in out]
recs = [_collapse_to_cwl_record(samples, record_attrs, input_files)]
json.dump(_combine_cwl_records(recs, record_name, parallel),
out_handle, sort_keys=True, indent=4, separators=(', ', ': '))
elif parallel in ["single-split", "multi-combined", "batch-split"]:
json.dump(_convert_to_cwl_json([utils.to_single_data(xs) for xs in out], fnargs, input_files),
out_handle, sort_keys=True, indent=4, separators=(', ', ': '))
else:
json.dump(_convert_to_cwl_json(utils.to_single_data(utils.to_single_data(out)), fnargs, input_files),
out_handle, sort_keys=True, indent=4, separators=(', ', ': '))
else:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) | [
"def",
"_write_out_argfile",
"(",
"argfile",
",",
"out",
",",
"fnargs",
",",
"parallel",
",",
"out_keys",
",",
"input_files",
",",
"work_dir",
")",
":",
"with",
"open",
"(",
"argfile",
",",
"\"w\"",
")",
"as",
"out_handle",
":",
"if",
"argfile",
".",
"en... | Write output argfile, preparing a CWL ready JSON or YAML representation of the world. | [
"Write",
"output",
"argfile",
"preparing",
"a",
"CWL",
"ready",
"JSON",
"or",
"YAML",
"representation",
"of",
"the",
"world",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L88-L113 | train | 218,449 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _get_record_attrs | def _get_record_attrs(out_keys):
"""Check for records, a single key plus output attributes.
"""
if len(out_keys) == 1:
attr = list(out_keys.keys())[0]
if out_keys[attr]:
return attr, out_keys[attr]
return None, None | python | def _get_record_attrs(out_keys):
"""Check for records, a single key plus output attributes.
"""
if len(out_keys) == 1:
attr = list(out_keys.keys())[0]
if out_keys[attr]:
return attr, out_keys[attr]
return None, None | [
"def",
"_get_record_attrs",
"(",
"out_keys",
")",
":",
"if",
"len",
"(",
"out_keys",
")",
"==",
"1",
":",
"attr",
"=",
"list",
"(",
"out_keys",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"if",
"out_keys",
"[",
"attr",
"]",
":",
"return",
"attr",
... | Check for records, a single key plus output attributes. | [
"Check",
"for",
"records",
"a",
"single",
"key",
"plus",
"output",
"attributes",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L115-L122 | train | 218,450 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _add_resources | def _add_resources(data, runtime):
"""Merge input resources with current CWL runtime parameters.
"""
if "config" not in data:
data["config"] = {}
# Convert input resources, which may be a JSON string
resources = data.get("resources", {}) or {}
if isinstance(resources, six.string_types) and resources.startswith(("{", "[")):
resources = json.loads(resources)
data["resources"] = resources
assert isinstance(resources, dict), (resources, data)
data["config"]["resources"] = resources
# Add in memory and core usage from CWL
memory = int(float(runtime["ram"]) / float(runtime["cores"]))
data["config"]["resources"].update({"default": {"cores": int(runtime["cores"]),
"memory": "%sM" % memory,
"jvm_opts": ["-Xms%sm" % min(1000, memory // 2),
"-Xmx%sm" % memory]}})
data["config"]["algorithm"]["num_cores"] = int(runtime["cores"])
return data | python | def _add_resources(data, runtime):
"""Merge input resources with current CWL runtime parameters.
"""
if "config" not in data:
data["config"] = {}
# Convert input resources, which may be a JSON string
resources = data.get("resources", {}) or {}
if isinstance(resources, six.string_types) and resources.startswith(("{", "[")):
resources = json.loads(resources)
data["resources"] = resources
assert isinstance(resources, dict), (resources, data)
data["config"]["resources"] = resources
# Add in memory and core usage from CWL
memory = int(float(runtime["ram"]) / float(runtime["cores"]))
data["config"]["resources"].update({"default": {"cores": int(runtime["cores"]),
"memory": "%sM" % memory,
"jvm_opts": ["-Xms%sm" % min(1000, memory // 2),
"-Xmx%sm" % memory]}})
data["config"]["algorithm"]["num_cores"] = int(runtime["cores"])
return data | [
"def",
"_add_resources",
"(",
"data",
",",
"runtime",
")",
":",
"if",
"\"config\"",
"not",
"in",
"data",
":",
"data",
"[",
"\"config\"",
"]",
"=",
"{",
"}",
"# Convert input resources, which may be a JSON string",
"resources",
"=",
"data",
".",
"get",
"(",
"\"... | Merge input resources with current CWL runtime parameters. | [
"Merge",
"input",
"resources",
"with",
"current",
"CWL",
"runtime",
"parameters",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L124-L143 | train | 218,451 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _world_from_cwl | def _world_from_cwl(fn_name, fnargs, work_dir):
"""Reconstitute a bcbio world data object from flattened CWL-compatible inputs.
Converts the flat CWL representation into a nested bcbio world dictionary.
Handles single sample inputs (returning a single world object) and multi-sample
runs (returning a list of individual samples to get processed together).
"""
parallel = None
output_cwl_keys = None
runtime = {}
out = []
data = {}
passed_keys = []
for fnarg in fnargs:
key, val = fnarg.split("=")
# extra values pulling in nested indexes
if key == "ignore":
continue
if key == "sentinel_parallel":
parallel = val
continue
if key == "sentinel_runtime":
runtime = dict(tz.partition(2, val.split(",")))
continue
if key == "sentinel_outputs":
output_cwl_keys = _parse_output_keys(val)
continue
if key == "sentinel_inputs":
input_order = collections.OrderedDict([x.split(":") for x in val.split(",")])
continue
else:
assert key not in passed_keys, "Multiple keys should be handled via JSON records"
passed_keys.append(key)
key = key.split("__")
data = _update_nested(key, _convert_value(val), data)
if data:
out.append(_finalize_cwl_in(data, work_dir, passed_keys, output_cwl_keys, runtime))
# Read inputs from standard files instead of command line
assert os.path.exists(os.path.join(work_dir, "cwl.inputs.json"))
out, input_files = _read_from_cwlinput(os.path.join(work_dir, "cwl.inputs.json"), work_dir, runtime, parallel,
input_order, output_cwl_keys)
if parallel in ["single-parallel", "single-merge", "multi-parallel", "multi-combined", "multi-batch",
"batch-split", "batch-parallel", "batch-merge", "batch-single"]:
out = [out]
else:
assert len(out) == 1, "%s\n%s" % (pprint.pformat(out), pprint.pformat(fnargs))
return out, parallel, output_cwl_keys, input_files | python | def _world_from_cwl(fn_name, fnargs, work_dir):
"""Reconstitute a bcbio world data object from flattened CWL-compatible inputs.
Converts the flat CWL representation into a nested bcbio world dictionary.
Handles single sample inputs (returning a single world object) and multi-sample
runs (returning a list of individual samples to get processed together).
"""
parallel = None
output_cwl_keys = None
runtime = {}
out = []
data = {}
passed_keys = []
for fnarg in fnargs:
key, val = fnarg.split("=")
# extra values pulling in nested indexes
if key == "ignore":
continue
if key == "sentinel_parallel":
parallel = val
continue
if key == "sentinel_runtime":
runtime = dict(tz.partition(2, val.split(",")))
continue
if key == "sentinel_outputs":
output_cwl_keys = _parse_output_keys(val)
continue
if key == "sentinel_inputs":
input_order = collections.OrderedDict([x.split(":") for x in val.split(",")])
continue
else:
assert key not in passed_keys, "Multiple keys should be handled via JSON records"
passed_keys.append(key)
key = key.split("__")
data = _update_nested(key, _convert_value(val), data)
if data:
out.append(_finalize_cwl_in(data, work_dir, passed_keys, output_cwl_keys, runtime))
# Read inputs from standard files instead of command line
assert os.path.exists(os.path.join(work_dir, "cwl.inputs.json"))
out, input_files = _read_from_cwlinput(os.path.join(work_dir, "cwl.inputs.json"), work_dir, runtime, parallel,
input_order, output_cwl_keys)
if parallel in ["single-parallel", "single-merge", "multi-parallel", "multi-combined", "multi-batch",
"batch-split", "batch-parallel", "batch-merge", "batch-single"]:
out = [out]
else:
assert len(out) == 1, "%s\n%s" % (pprint.pformat(out), pprint.pformat(fnargs))
return out, parallel, output_cwl_keys, input_files | [
"def",
"_world_from_cwl",
"(",
"fn_name",
",",
"fnargs",
",",
"work_dir",
")",
":",
"parallel",
"=",
"None",
"output_cwl_keys",
"=",
"None",
"runtime",
"=",
"{",
"}",
"out",
"=",
"[",
"]",
"data",
"=",
"{",
"}",
"passed_keys",
"=",
"[",
"]",
"for",
"... | Reconstitute a bcbio world data object from flattened CWL-compatible inputs.
Converts the flat CWL representation into a nested bcbio world dictionary.
Handles single sample inputs (returning a single world object) and multi-sample
runs (returning a list of individual samples to get processed together). | [
"Reconstitute",
"a",
"bcbio",
"world",
"data",
"object",
"from",
"flattened",
"CWL",
"-",
"compatible",
"inputs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L145-L194 | train | 218,452 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _parse_output_keys | def _parse_output_keys(val):
"""Parse expected output keys from string, handling records.
"""
out = {}
for k in val.split(","):
# record output
if ":" in k:
name, attrs = k.split(":")
out[name] = attrs.split(";")
else:
out[k] = None
return out | python | def _parse_output_keys(val):
"""Parse expected output keys from string, handling records.
"""
out = {}
for k in val.split(","):
# record output
if ":" in k:
name, attrs = k.split(":")
out[name] = attrs.split(";")
else:
out[k] = None
return out | [
"def",
"_parse_output_keys",
"(",
"val",
")",
":",
"out",
"=",
"{",
"}",
"for",
"k",
"in",
"val",
".",
"split",
"(",
"\",\"",
")",
":",
"# record output",
"if",
"\":\"",
"in",
"k",
":",
"name",
",",
"attrs",
"=",
"k",
".",
"split",
"(",
"\":\"",
... | Parse expected output keys from string, handling records. | [
"Parse",
"expected",
"output",
"keys",
"from",
"string",
"handling",
"records",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L196-L207 | train | 218,453 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _find_input_files | def _find_input_files(var, out):
"""Find input files within the given CWL object.
"""
if isinstance(var, (list, tuple)):
for x in var:
out = _find_input_files(x, out)
elif isinstance(var, dict):
if var.get("class") == "File":
out.append(var["path"])
out = _find_input_files(var.get("secondaryFiles", []), out)
for key, val in var.items():
out = _find_input_files(val, out)
return out | python | def _find_input_files(var, out):
"""Find input files within the given CWL object.
"""
if isinstance(var, (list, tuple)):
for x in var:
out = _find_input_files(x, out)
elif isinstance(var, dict):
if var.get("class") == "File":
out.append(var["path"])
out = _find_input_files(var.get("secondaryFiles", []), out)
for key, val in var.items():
out = _find_input_files(val, out)
return out | [
"def",
"_find_input_files",
"(",
"var",
",",
"out",
")",
":",
"if",
"isinstance",
"(",
"var",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"x",
"in",
"var",
":",
"out",
"=",
"_find_input_files",
"(",
"x",
",",
"out",
")",
"elif",
"isinstan... | Find input files within the given CWL object. | [
"Find",
"input",
"files",
"within",
"the",
"given",
"CWL",
"object",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L209-L221 | train | 218,454 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _read_from_cwlinput | def _read_from_cwlinput(in_file, work_dir, runtime, parallel, input_order, output_cwl_keys):
"""Read data records from a JSON dump of inputs. Avoids command line flattening of records.
"""
with open(in_file) as in_handle:
inputs = json.load(in_handle)
items_by_key = {}
input_files = []
passed_keys = set([])
for key, input_val in ((k, v) for (k, v) in inputs.items() if not k.startswith(("sentinel", "ignore"))):
if key.endswith("_toolinput"):
key = key.replace("_toolinput", "")
if input_order[key] == "record":
cur_keys, items = _read_cwl_record(input_val)
passed_keys |= cur_keys
items_by_key[key] = items
else:
items_by_key[tuple(key.split("__"))] = _cwlvar_to_wdl(input_val)
input_files = _find_input_files(input_val, input_files)
prepped = _merge_cwlinputs(items_by_key, input_order, parallel)
out = []
for data in prepped:
if isinstance(data, (list, tuple)):
out.append([_finalize_cwl_in(utils.to_single_data(x), work_dir, list(passed_keys),
output_cwl_keys, runtime) for x in data])
else:
out.append(_finalize_cwl_in(data, work_dir, list(passed_keys), output_cwl_keys, runtime))
return out, input_files | python | def _read_from_cwlinput(in_file, work_dir, runtime, parallel, input_order, output_cwl_keys):
"""Read data records from a JSON dump of inputs. Avoids command line flattening of records.
"""
with open(in_file) as in_handle:
inputs = json.load(in_handle)
items_by_key = {}
input_files = []
passed_keys = set([])
for key, input_val in ((k, v) for (k, v) in inputs.items() if not k.startswith(("sentinel", "ignore"))):
if key.endswith("_toolinput"):
key = key.replace("_toolinput", "")
if input_order[key] == "record":
cur_keys, items = _read_cwl_record(input_val)
passed_keys |= cur_keys
items_by_key[key] = items
else:
items_by_key[tuple(key.split("__"))] = _cwlvar_to_wdl(input_val)
input_files = _find_input_files(input_val, input_files)
prepped = _merge_cwlinputs(items_by_key, input_order, parallel)
out = []
for data in prepped:
if isinstance(data, (list, tuple)):
out.append([_finalize_cwl_in(utils.to_single_data(x), work_dir, list(passed_keys),
output_cwl_keys, runtime) for x in data])
else:
out.append(_finalize_cwl_in(data, work_dir, list(passed_keys), output_cwl_keys, runtime))
return out, input_files | [
"def",
"_read_from_cwlinput",
"(",
"in_file",
",",
"work_dir",
",",
"runtime",
",",
"parallel",
",",
"input_order",
",",
"output_cwl_keys",
")",
":",
"with",
"open",
"(",
"in_file",
")",
"as",
"in_handle",
":",
"inputs",
"=",
"json",
".",
"load",
"(",
"in_... | Read data records from a JSON dump of inputs. Avoids command line flattening of records. | [
"Read",
"data",
"records",
"from",
"a",
"JSON",
"dump",
"of",
"inputs",
".",
"Avoids",
"command",
"line",
"flattening",
"of",
"records",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L223-L249 | train | 218,455 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _maybe_nest_bare_single | def _maybe_nest_bare_single(items_by_key, parallel):
"""Nest single inputs to avoid confusing single items and lists like files.
"""
if (parallel == "multi-parallel" and
(sum([1 for x in items_by_key.values() if not _is_nested_item(x)]) >=
sum([1 for x in items_by_key.values() if _is_nested_item(x)]))):
out = {}
for k, v in items_by_key.items():
out[k] = [v]
return out
else:
return items_by_key | python | def _maybe_nest_bare_single(items_by_key, parallel):
"""Nest single inputs to avoid confusing single items and lists like files.
"""
if (parallel == "multi-parallel" and
(sum([1 for x in items_by_key.values() if not _is_nested_item(x)]) >=
sum([1 for x in items_by_key.values() if _is_nested_item(x)]))):
out = {}
for k, v in items_by_key.items():
out[k] = [v]
return out
else:
return items_by_key | [
"def",
"_maybe_nest_bare_single",
"(",
"items_by_key",
",",
"parallel",
")",
":",
"if",
"(",
"parallel",
"==",
"\"multi-parallel\"",
"and",
"(",
"sum",
"(",
"[",
"1",
"for",
"x",
"in",
"items_by_key",
".",
"values",
"(",
")",
"if",
"not",
"_is_nested_item",
... | Nest single inputs to avoid confusing single items and lists like files. | [
"Nest",
"single",
"inputs",
"to",
"avoid",
"confusing",
"single",
"items",
"and",
"lists",
"like",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L254-L265 | train | 218,456 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _check_for_single_nested | def _check_for_single_nested(target, items_by_key, input_order):
"""Check for single nested inputs that match our target count and unnest.
Handles complex var inputs where some have an extra layer of nesting.
"""
out = utils.deepish_copy(items_by_key)
for (k, t) in input_order.items():
if t == "var":
v = items_by_key[tuple(k.split("__"))]
if _is_nested_single(v, target):
out[tuple(k.split("__"))] = v[0]
return out | python | def _check_for_single_nested(target, items_by_key, input_order):
"""Check for single nested inputs that match our target count and unnest.
Handles complex var inputs where some have an extra layer of nesting.
"""
out = utils.deepish_copy(items_by_key)
for (k, t) in input_order.items():
if t == "var":
v = items_by_key[tuple(k.split("__"))]
if _is_nested_single(v, target):
out[tuple(k.split("__"))] = v[0]
return out | [
"def",
"_check_for_single_nested",
"(",
"target",
",",
"items_by_key",
",",
"input_order",
")",
":",
"out",
"=",
"utils",
".",
"deepish_copy",
"(",
"items_by_key",
")",
"for",
"(",
"k",
",",
"t",
")",
"in",
"input_order",
".",
"items",
"(",
")",
":",
"if... | Check for single nested inputs that match our target count and unnest.
Handles complex var inputs where some have an extra layer of nesting. | [
"Check",
"for",
"single",
"nested",
"inputs",
"that",
"match",
"our",
"target",
"count",
"and",
"unnest",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L273-L284 | train | 218,457 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _concat_records | def _concat_records(items_by_key, input_order):
"""Concatenate records into a single key to avoid merging.
Handles heterogeneous records that will then be sorted out in
the processing fuction.
"""
all_records = []
for (k, t) in input_order.items():
if t == "record":
all_records.append(k)
out_items_by_key = utils.deepish_copy(items_by_key)
out_input_order = utils.deepish_copy(input_order)
if len(all_records) > 1:
final_k = all_records[0]
final_v = items_by_key[final_k]
for k in all_records[1:]:
final_v += items_by_key[k]
del out_items_by_key[k]
del out_input_order[k]
out_items_by_key[final_k] = final_v
return out_items_by_key, out_input_order | python | def _concat_records(items_by_key, input_order):
"""Concatenate records into a single key to avoid merging.
Handles heterogeneous records that will then be sorted out in
the processing fuction.
"""
all_records = []
for (k, t) in input_order.items():
if t == "record":
all_records.append(k)
out_items_by_key = utils.deepish_copy(items_by_key)
out_input_order = utils.deepish_copy(input_order)
if len(all_records) > 1:
final_k = all_records[0]
final_v = items_by_key[final_k]
for k in all_records[1:]:
final_v += items_by_key[k]
del out_items_by_key[k]
del out_input_order[k]
out_items_by_key[final_k] = final_v
return out_items_by_key, out_input_order | [
"def",
"_concat_records",
"(",
"items_by_key",
",",
"input_order",
")",
":",
"all_records",
"=",
"[",
"]",
"for",
"(",
"k",
",",
"t",
")",
"in",
"input_order",
".",
"items",
"(",
")",
":",
"if",
"t",
"==",
"\"record\"",
":",
"all_records",
".",
"append... | Concatenate records into a single key to avoid merging.
Handles heterogeneous records that will then be sorted out in
the processing fuction. | [
"Concatenate",
"records",
"into",
"a",
"single",
"key",
"to",
"avoid",
"merging",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L286-L306 | train | 218,458 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _nest_vars_in_rec | def _nest_vars_in_rec(var_items, rec_items, input_order, items_by_key, parallel):
"""Nest multiple variable inputs into a single record or list of batch records.
Custom CWL implementations extract and merge these.
"""
num_items = var_items
var_items = list(var_items)[0]
if rec_items:
rec_items = list(rec_items)[0]
if ((rec_items == 1 and var_items > 1) or parallel.startswith("batch")):
num_items = set([rec_items])
for var_key in (k for (k, t) in input_order.items() if t != "record"):
var_key = tuple(var_key.split("__"))
items_by_key[var_key] = [items_by_key[var_key]] * rec_items
else:
assert var_items == rec_items, (var_items, rec_items)
return items_by_key, num_items | python | def _nest_vars_in_rec(var_items, rec_items, input_order, items_by_key, parallel):
"""Nest multiple variable inputs into a single record or list of batch records.
Custom CWL implementations extract and merge these.
"""
num_items = var_items
var_items = list(var_items)[0]
if rec_items:
rec_items = list(rec_items)[0]
if ((rec_items == 1 and var_items > 1) or parallel.startswith("batch")):
num_items = set([rec_items])
for var_key in (k for (k, t) in input_order.items() if t != "record"):
var_key = tuple(var_key.split("__"))
items_by_key[var_key] = [items_by_key[var_key]] * rec_items
else:
assert var_items == rec_items, (var_items, rec_items)
return items_by_key, num_items | [
"def",
"_nest_vars_in_rec",
"(",
"var_items",
",",
"rec_items",
",",
"input_order",
",",
"items_by_key",
",",
"parallel",
")",
":",
"num_items",
"=",
"var_items",
"var_items",
"=",
"list",
"(",
"var_items",
")",
"[",
"0",
"]",
"if",
"rec_items",
":",
"rec_it... | Nest multiple variable inputs into a single record or list of batch records.
Custom CWL implementations extract and merge these. | [
"Nest",
"multiple",
"variable",
"inputs",
"into",
"a",
"single",
"record",
"or",
"list",
"of",
"batch",
"records",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L364-L380 | train | 218,459 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _expand_rec_to_vars | def _expand_rec_to_vars(var_items, rec_items, input_order, items_by_key, parallel):
"""Expand record to apply to number of variants.
Alternative approach to _nest_vars_in_rec
to combining a single record with multiple variants.
"""
num_items = var_items
var_items = list(var_items)[0]
if rec_items:
for rec_key in (k for (k, t) in input_order.items() if t == "record"):
rec_vals = items_by_key[rec_key]
if len(rec_vals) == 1 and var_items > 1:
items_by_key[rec_key] = rec_vals * var_items
else:
assert var_items == len(rec_vals), (var_items, rec_vals)
return items_by_key, num_items | python | def _expand_rec_to_vars(var_items, rec_items, input_order, items_by_key, parallel):
"""Expand record to apply to number of variants.
Alternative approach to _nest_vars_in_rec
to combining a single record with multiple variants.
"""
num_items = var_items
var_items = list(var_items)[0]
if rec_items:
for rec_key in (k for (k, t) in input_order.items() if t == "record"):
rec_vals = items_by_key[rec_key]
if len(rec_vals) == 1 and var_items > 1:
items_by_key[rec_key] = rec_vals * var_items
else:
assert var_items == len(rec_vals), (var_items, rec_vals)
return items_by_key, num_items | [
"def",
"_expand_rec_to_vars",
"(",
"var_items",
",",
"rec_items",
",",
"input_order",
",",
"items_by_key",
",",
"parallel",
")",
":",
"num_items",
"=",
"var_items",
"var_items",
"=",
"list",
"(",
"var_items",
")",
"[",
"0",
"]",
"if",
"rec_items",
":",
"for"... | Expand record to apply to number of variants.
Alternative approach to _nest_vars_in_rec
to combining a single record with multiple variants. | [
"Expand",
"record",
"to",
"apply",
"to",
"number",
"of",
"variants",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L382-L397 | train | 218,460 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _read_cwl_record | def _read_cwl_record(rec):
"""Read CWL records, handling multiple nesting and batching cases.
"""
keys = set([])
out = []
if isinstance(rec, dict):
is_batched = all([isinstance(v, (list, tuple)) for v in rec.values()])
cur = [{} for _ in range(len(rec.values()[0]) if is_batched else 1)]
for k in rec.keys():
keys.add(k)
val = rec[k]
val = val if is_batched else [val]
for i, v in enumerate(val):
v = _cwlvar_to_wdl(v)
cur[i] = _update_nested(k.split("__"), v, cur[i])
if is_batched:
out.append(cur)
else:
assert len(cur) == 1
out.append(cur[0])
else:
assert isinstance(rec, (list, tuple))
for sub_rec in rec:
sub_keys, sub_out = _read_cwl_record(sub_rec)
keys |= sub_keys
out.append(sub_out)
return keys, out | python | def _read_cwl_record(rec):
"""Read CWL records, handling multiple nesting and batching cases.
"""
keys = set([])
out = []
if isinstance(rec, dict):
is_batched = all([isinstance(v, (list, tuple)) for v in rec.values()])
cur = [{} for _ in range(len(rec.values()[0]) if is_batched else 1)]
for k in rec.keys():
keys.add(k)
val = rec[k]
val = val if is_batched else [val]
for i, v in enumerate(val):
v = _cwlvar_to_wdl(v)
cur[i] = _update_nested(k.split("__"), v, cur[i])
if is_batched:
out.append(cur)
else:
assert len(cur) == 1
out.append(cur[0])
else:
assert isinstance(rec, (list, tuple))
for sub_rec in rec:
sub_keys, sub_out = _read_cwl_record(sub_rec)
keys |= sub_keys
out.append(sub_out)
return keys, out | [
"def",
"_read_cwl_record",
"(",
"rec",
")",
":",
"keys",
"=",
"set",
"(",
"[",
"]",
")",
"out",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"rec",
",",
"dict",
")",
":",
"is_batched",
"=",
"all",
"(",
"[",
"isinstance",
"(",
"v",
",",
"(",
"list",
... | Read CWL records, handling multiple nesting and batching cases. | [
"Read",
"CWL",
"records",
"handling",
"multiple",
"nesting",
"and",
"batching",
"cases",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L399-L425 | train | 218,461 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _finalize_cwl_in | def _finalize_cwl_in(data, work_dir, passed_keys, output_cwl_keys, runtime):
"""Finalize data object with inputs from CWL.
"""
data["dirs"] = {"work": work_dir}
if not tz.get_in(["config", "algorithm"], data):
if "config" not in data:
data["config"] = {}
data["config"]["algorithm"] = {}
if "rgnames" not in data and "description" in data:
data["rgnames"] = {"sample": data["description"]}
data["cwl_keys"] = passed_keys
data["output_cwl_keys"] = output_cwl_keys
data = _add_resources(data, runtime)
data = cwlutils.normalize_missing(data)
data = run_info.normalize_world(data)
return data | python | def _finalize_cwl_in(data, work_dir, passed_keys, output_cwl_keys, runtime):
"""Finalize data object with inputs from CWL.
"""
data["dirs"] = {"work": work_dir}
if not tz.get_in(["config", "algorithm"], data):
if "config" not in data:
data["config"] = {}
data["config"]["algorithm"] = {}
if "rgnames" not in data and "description" in data:
data["rgnames"] = {"sample": data["description"]}
data["cwl_keys"] = passed_keys
data["output_cwl_keys"] = output_cwl_keys
data = _add_resources(data, runtime)
data = cwlutils.normalize_missing(data)
data = run_info.normalize_world(data)
return data | [
"def",
"_finalize_cwl_in",
"(",
"data",
",",
"work_dir",
",",
"passed_keys",
",",
"output_cwl_keys",
",",
"runtime",
")",
":",
"data",
"[",
"\"dirs\"",
"]",
"=",
"{",
"\"work\"",
":",
"work_dir",
"}",
"if",
"not",
"tz",
".",
"get_in",
"(",
"[",
"\"config... | Finalize data object with inputs from CWL. | [
"Finalize",
"data",
"object",
"with",
"inputs",
"from",
"CWL",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L427-L442 | train | 218,462 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _convert_value | def _convert_value(val):
"""Handle multiple input type values.
"""
def _is_number(x, op):
try:
op(x)
return True
except ValueError:
return False
if isinstance(val, (list, tuple)):
return [_convert_value(x) for x in val]
elif val is None:
return val
elif _is_number(val, int):
return int(val)
elif _is_number(val, float):
return float(val)
elif val.find(";;") >= 0:
return [_convert_value(v) for v in val.split(";;")]
elif val.startswith(("{", "[")):
# Can get ugly JSON output from CWL with unicode and ' instead of "
# This tries to fix it so parsed correctly by json loader
return json.loads(val.replace("u'", "'").replace("'", '"'))
elif val.lower() == "true":
return True
elif val.lower() == "false":
return False
else:
return val | python | def _convert_value(val):
"""Handle multiple input type values.
"""
def _is_number(x, op):
try:
op(x)
return True
except ValueError:
return False
if isinstance(val, (list, tuple)):
return [_convert_value(x) for x in val]
elif val is None:
return val
elif _is_number(val, int):
return int(val)
elif _is_number(val, float):
return float(val)
elif val.find(";;") >= 0:
return [_convert_value(v) for v in val.split(";;")]
elif val.startswith(("{", "[")):
# Can get ugly JSON output from CWL with unicode and ' instead of "
# This tries to fix it so parsed correctly by json loader
return json.loads(val.replace("u'", "'").replace("'", '"'))
elif val.lower() == "true":
return True
elif val.lower() == "false":
return False
else:
return val | [
"def",
"_convert_value",
"(",
"val",
")",
":",
"def",
"_is_number",
"(",
"x",
",",
"op",
")",
":",
"try",
":",
"op",
"(",
"x",
")",
"return",
"True",
"except",
"ValueError",
":",
"return",
"False",
"if",
"isinstance",
"(",
"val",
",",
"(",
"list",
... | Handle multiple input type values. | [
"Handle",
"multiple",
"input",
"type",
"values",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L444-L472 | train | 218,463 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _get_output_cwl_keys | def _get_output_cwl_keys(fnargs):
"""Retrieve output_cwl_keys from potentially nested input arguments.
"""
for d in utils.flatten(fnargs):
if isinstance(d, dict) and d.get("output_cwl_keys"):
return d["output_cwl_keys"]
raise ValueError("Did not find output_cwl_keys in %s" % (pprint.pformat(fnargs))) | python | def _get_output_cwl_keys(fnargs):
"""Retrieve output_cwl_keys from potentially nested input arguments.
"""
for d in utils.flatten(fnargs):
if isinstance(d, dict) and d.get("output_cwl_keys"):
return d["output_cwl_keys"]
raise ValueError("Did not find output_cwl_keys in %s" % (pprint.pformat(fnargs))) | [
"def",
"_get_output_cwl_keys",
"(",
"fnargs",
")",
":",
"for",
"d",
"in",
"utils",
".",
"flatten",
"(",
"fnargs",
")",
":",
"if",
"isinstance",
"(",
"d",
",",
"dict",
")",
"and",
"d",
".",
"get",
"(",
"\"output_cwl_keys\"",
")",
":",
"return",
"d",
"... | Retrieve output_cwl_keys from potentially nested input arguments. | [
"Retrieve",
"output_cwl_keys",
"from",
"potentially",
"nested",
"input",
"arguments",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L492-L498 | train | 218,464 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _combine_cwl_records | def _combine_cwl_records(recs, record_name, parallel):
"""Provide a list of nexted CWL records keyed by output key.
Handles batches, where we return a list of records, and single items
where we return one record.
"""
if parallel not in ["multi-batch", "single-split", "multi-combined", "batch-single"]:
assert len(recs) == 1, pprint.pformat(recs)
return {record_name: recs[0]}
else:
return {record_name: recs} | python | def _combine_cwl_records(recs, record_name, parallel):
"""Provide a list of nexted CWL records keyed by output key.
Handles batches, where we return a list of records, and single items
where we return one record.
"""
if parallel not in ["multi-batch", "single-split", "multi-combined", "batch-single"]:
assert len(recs) == 1, pprint.pformat(recs)
return {record_name: recs[0]}
else:
return {record_name: recs} | [
"def",
"_combine_cwl_records",
"(",
"recs",
",",
"record_name",
",",
"parallel",
")",
":",
"if",
"parallel",
"not",
"in",
"[",
"\"multi-batch\"",
",",
"\"single-split\"",
",",
"\"multi-combined\"",
",",
"\"batch-single\"",
"]",
":",
"assert",
"len",
"(",
"recs",... | Provide a list of nexted CWL records keyed by output key.
Handles batches, where we return a list of records, and single items
where we return one record. | [
"Provide",
"a",
"list",
"of",
"nexted",
"CWL",
"records",
"keyed",
"by",
"output",
"key",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L500-L510 | train | 218,465 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _collapse_to_cwl_record_single | def _collapse_to_cwl_record_single(data, want_attrs, input_files):
"""Convert a single sample into a CWL record.
"""
out = {}
for key in want_attrs:
key_parts = key.split("__")
out[key] = _to_cwl(tz.get_in(key_parts, data), input_files)
return out | python | def _collapse_to_cwl_record_single(data, want_attrs, input_files):
"""Convert a single sample into a CWL record.
"""
out = {}
for key in want_attrs:
key_parts = key.split("__")
out[key] = _to_cwl(tz.get_in(key_parts, data), input_files)
return out | [
"def",
"_collapse_to_cwl_record_single",
"(",
"data",
",",
"want_attrs",
",",
"input_files",
")",
":",
"out",
"=",
"{",
"}",
"for",
"key",
"in",
"want_attrs",
":",
"key_parts",
"=",
"key",
".",
"split",
"(",
"\"__\"",
")",
"out",
"[",
"key",
"]",
"=",
... | Convert a single sample into a CWL record. | [
"Convert",
"a",
"single",
"sample",
"into",
"a",
"CWL",
"record",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L512-L519 | train | 218,466 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _nested_cwl_record | def _nested_cwl_record(xs, want_attrs, input_files):
"""Convert arbitrarily nested samples into a nested list of dictionaries.
nests only at the record level, rather than within records. For batching
a top level list is all of the batches and sub-lists are samples within the
batch.
"""
if isinstance(xs, (list, tuple)):
return [_nested_cwl_record(x, want_attrs, input_files) for x in xs]
else:
assert isinstance(xs, dict), pprint.pformat(xs)
return _collapse_to_cwl_record_single(xs, want_attrs, input_files) | python | def _nested_cwl_record(xs, want_attrs, input_files):
"""Convert arbitrarily nested samples into a nested list of dictionaries.
nests only at the record level, rather than within records. For batching
a top level list is all of the batches and sub-lists are samples within the
batch.
"""
if isinstance(xs, (list, tuple)):
return [_nested_cwl_record(x, want_attrs, input_files) for x in xs]
else:
assert isinstance(xs, dict), pprint.pformat(xs)
return _collapse_to_cwl_record_single(xs, want_attrs, input_files) | [
"def",
"_nested_cwl_record",
"(",
"xs",
",",
"want_attrs",
",",
"input_files",
")",
":",
"if",
"isinstance",
"(",
"xs",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"[",
"_nested_cwl_record",
"(",
"x",
",",
"want_attrs",
",",
"input_files",
"... | Convert arbitrarily nested samples into a nested list of dictionaries.
nests only at the record level, rather than within records. For batching
a top level list is all of the batches and sub-lists are samples within the
batch. | [
"Convert",
"arbitrarily",
"nested",
"samples",
"into",
"a",
"nested",
"list",
"of",
"dictionaries",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L521-L532 | train | 218,467 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _collapse_to_cwl_record | def _collapse_to_cwl_record(samples, want_attrs, input_files):
"""Convert nested samples from batches into a CWL record, based on input keys.
"""
input_keys = sorted(list(set().union(*[d["cwl_keys"] for d in samples])), key=lambda x: (-len(x), tuple(x)))
out = {}
for key in input_keys:
if key in want_attrs:
key_parts = key.split("__")
vals = []
cur = []
for d in samples:
vals.append(_to_cwl(tz.get_in(key_parts, d), input_files))
# Remove nested keys to avoid specifying multiple times
cur.append(_dissoc_in(d, key_parts) if len(key_parts) > 1 else d)
samples = cur
out[key] = vals
return out | python | def _collapse_to_cwl_record(samples, want_attrs, input_files):
"""Convert nested samples from batches into a CWL record, based on input keys.
"""
input_keys = sorted(list(set().union(*[d["cwl_keys"] for d in samples])), key=lambda x: (-len(x), tuple(x)))
out = {}
for key in input_keys:
if key in want_attrs:
key_parts = key.split("__")
vals = []
cur = []
for d in samples:
vals.append(_to_cwl(tz.get_in(key_parts, d), input_files))
# Remove nested keys to avoid specifying multiple times
cur.append(_dissoc_in(d, key_parts) if len(key_parts) > 1 else d)
samples = cur
out[key] = vals
return out | [
"def",
"_collapse_to_cwl_record",
"(",
"samples",
",",
"want_attrs",
",",
"input_files",
")",
":",
"input_keys",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
")",
".",
"union",
"(",
"*",
"[",
"d",
"[",
"\"cwl_keys\"",
"]",
"for",
"d",
"in",
"samples",
... | Convert nested samples from batches into a CWL record, based on input keys. | [
"Convert",
"nested",
"samples",
"from",
"batches",
"into",
"a",
"CWL",
"record",
"based",
"on",
"input",
"keys",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L534-L550 | train | 218,468 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _file_and_exists | def _file_and_exists(val, input_files):
"""Check if an input is a file and exists.
Checks both locally (staged) and from input files (re-passed but never localized).
"""
return ((os.path.exists(val) and os.path.isfile(val)) or
val in input_files) | python | def _file_and_exists(val, input_files):
"""Check if an input is a file and exists.
Checks both locally (staged) and from input files (re-passed but never localized).
"""
return ((os.path.exists(val) and os.path.isfile(val)) or
val in input_files) | [
"def",
"_file_and_exists",
"(",
"val",
",",
"input_files",
")",
":",
"return",
"(",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"val",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"val",
")",
")",
"or",
"val",
"in",
"input_files",
")"
] | Check if an input is a file and exists.
Checks both locally (staged) and from input files (re-passed but never localized). | [
"Check",
"if",
"an",
"input",
"is",
"a",
"file",
"and",
"exists",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L552-L558 | train | 218,469 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _to_cwl | def _to_cwl(val, input_files):
"""Convert a value into CWL formatted JSON, handling files and complex things.
"""
if isinstance(val, six.string_types):
if _file_and_exists(val, input_files):
val = {"class": "File", "path": val}
secondary = []
for idx in [".bai", ".tbi", ".gbi", ".fai", ".crai", ".db"]:
idx_file = val["path"] + idx
if _file_and_exists(idx_file, input_files):
secondary.append({"class": "File", "path": idx_file})
for idx in [".dict"]:
idx_file = os.path.splitext(val["path"])[0] + idx
if _file_and_exists(idx_file, input_files):
secondary.append({"class": "File", "path": idx_file})
cur_dir, cur_file = os.path.split(val["path"])
# Handle relative paths
if not cur_dir:
cur_dir = os.getcwd()
if cur_file.endswith(cwlutils.DIR_TARGETS):
if os.path.exists(cur_dir):
for fname in os.listdir(cur_dir):
if fname != cur_file and not os.path.isdir(os.path.join(cur_dir, fname))\
and fname != 'sbg.worker.log':
secondary.append({"class": "File", "path": os.path.join(cur_dir, fname)})
else:
for f in input_files:
if f.startswith(cur_dir) and f != cur_file and not os.path.isdir(f):
secondary.append({"class": "File", "path": f})
if secondary:
val["secondaryFiles"] = _remove_duplicate_files(secondary)
elif isinstance(val, (list, tuple)):
val = [_to_cwl(x, input_files) for x in val]
elif isinstance(val, dict):
# File representation with secondary files
if "base" in val and "secondary" in val:
out = {"class": "File", "path": val["base"]}
secondary = [{"class": "File", "path": x} for x in val["secondary"] if not os.path.isdir(x)]
if secondary:
out["secondaryFiles"] = _remove_duplicate_files(secondary)
val = out
else:
val = json.dumps(val, sort_keys=True, separators=(',', ':'))
return val | python | def _to_cwl(val, input_files):
"""Convert a value into CWL formatted JSON, handling files and complex things.
"""
if isinstance(val, six.string_types):
if _file_and_exists(val, input_files):
val = {"class": "File", "path": val}
secondary = []
for idx in [".bai", ".tbi", ".gbi", ".fai", ".crai", ".db"]:
idx_file = val["path"] + idx
if _file_and_exists(idx_file, input_files):
secondary.append({"class": "File", "path": idx_file})
for idx in [".dict"]:
idx_file = os.path.splitext(val["path"])[0] + idx
if _file_and_exists(idx_file, input_files):
secondary.append({"class": "File", "path": idx_file})
cur_dir, cur_file = os.path.split(val["path"])
# Handle relative paths
if not cur_dir:
cur_dir = os.getcwd()
if cur_file.endswith(cwlutils.DIR_TARGETS):
if os.path.exists(cur_dir):
for fname in os.listdir(cur_dir):
if fname != cur_file and not os.path.isdir(os.path.join(cur_dir, fname))\
and fname != 'sbg.worker.log':
secondary.append({"class": "File", "path": os.path.join(cur_dir, fname)})
else:
for f in input_files:
if f.startswith(cur_dir) and f != cur_file and not os.path.isdir(f):
secondary.append({"class": "File", "path": f})
if secondary:
val["secondaryFiles"] = _remove_duplicate_files(secondary)
elif isinstance(val, (list, tuple)):
val = [_to_cwl(x, input_files) for x in val]
elif isinstance(val, dict):
# File representation with secondary files
if "base" in val and "secondary" in val:
out = {"class": "File", "path": val["base"]}
secondary = [{"class": "File", "path": x} for x in val["secondary"] if not os.path.isdir(x)]
if secondary:
out["secondaryFiles"] = _remove_duplicate_files(secondary)
val = out
else:
val = json.dumps(val, sort_keys=True, separators=(',', ':'))
return val | [
"def",
"_to_cwl",
"(",
"val",
",",
"input_files",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"six",
".",
"string_types",
")",
":",
"if",
"_file_and_exists",
"(",
"val",
",",
"input_files",
")",
":",
"val",
"=",
"{",
"\"class\"",
":",
"\"File\"",
",... | Convert a value into CWL formatted JSON, handling files and complex things. | [
"Convert",
"a",
"value",
"into",
"CWL",
"formatted",
"JSON",
"handling",
"files",
"and",
"complex",
"things",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L560-L603 | train | 218,470 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _remove_duplicate_files | def _remove_duplicate_files(xs):
"""Remove files specified multiple times in a list.
"""
seen = set([])
out = []
for x in xs:
if x["path"] not in seen:
out.append(x)
seen.add(x["path"])
return out | python | def _remove_duplicate_files(xs):
"""Remove files specified multiple times in a list.
"""
seen = set([])
out = []
for x in xs:
if x["path"] not in seen:
out.append(x)
seen.add(x["path"])
return out | [
"def",
"_remove_duplicate_files",
"(",
"xs",
")",
":",
"seen",
"=",
"set",
"(",
"[",
"]",
")",
"out",
"=",
"[",
"]",
"for",
"x",
"in",
"xs",
":",
"if",
"x",
"[",
"\"path\"",
"]",
"not",
"in",
"seen",
":",
"out",
".",
"append",
"(",
"x",
")",
... | Remove files specified multiple times in a list. | [
"Remove",
"files",
"specified",
"multiple",
"times",
"in",
"a",
"list",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L605-L614 | train | 218,471 |
bcbio/bcbio-nextgen | bcbio/distributed/runfn.py | _update_nested | def _update_nested(key, val, data, allow_overwriting=False):
"""Update the data object, avoiding over-writing with nested dictionaries.
"""
if isinstance(val, dict):
for sub_key, sub_val in val.items():
data = _update_nested(key + [sub_key], sub_val, data, allow_overwriting=allow_overwriting)
else:
already_there = tz.get_in(key, data) is not None
if already_there and val:
if not allow_overwriting:
raise ValueError("Duplicated key %s: %s and %s" % (key, val, tz.get_in(key, data)))
else:
already_there = False
if val or not already_there:
data = tz.update_in(data, key, lambda x: val)
return data | python | def _update_nested(key, val, data, allow_overwriting=False):
"""Update the data object, avoiding over-writing with nested dictionaries.
"""
if isinstance(val, dict):
for sub_key, sub_val in val.items():
data = _update_nested(key + [sub_key], sub_val, data, allow_overwriting=allow_overwriting)
else:
already_there = tz.get_in(key, data) is not None
if already_there and val:
if not allow_overwriting:
raise ValueError("Duplicated key %s: %s and %s" % (key, val, tz.get_in(key, data)))
else:
already_there = False
if val or not already_there:
data = tz.update_in(data, key, lambda x: val)
return data | [
"def",
"_update_nested",
"(",
"key",
",",
"val",
",",
"data",
",",
"allow_overwriting",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"for",
"sub_key",
",",
"sub_val",
"in",
"val",
".",
"items",
"(",
")",
":",
"data",... | Update the data object, avoiding over-writing with nested dictionaries. | [
"Update",
"the",
"data",
"object",
"avoiding",
"over",
"-",
"writing",
"with",
"nested",
"dictionaries",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L623-L638 | train | 218,472 |
bcbio/bcbio-nextgen | bcbio/server/main.py | start | def start(args):
"""Run server with provided command line arguments.
"""
application = tornado.web.Application([(r"/run", run.get_handler(args)),
(r"/status", run.StatusHandler)])
application.runmonitor = RunMonitor()
application.listen(args.port)
tornado.ioloop.IOLoop.instance().start() | python | def start(args):
"""Run server with provided command line arguments.
"""
application = tornado.web.Application([(r"/run", run.get_handler(args)),
(r"/status", run.StatusHandler)])
application.runmonitor = RunMonitor()
application.listen(args.port)
tornado.ioloop.IOLoop.instance().start() | [
"def",
"start",
"(",
"args",
")",
":",
"application",
"=",
"tornado",
".",
"web",
".",
"Application",
"(",
"[",
"(",
"r\"/run\"",
",",
"run",
".",
"get_handler",
"(",
"args",
")",
")",
",",
"(",
"r\"/status\"",
",",
"run",
".",
"StatusHandler",
")",
... | Run server with provided command line arguments. | [
"Run",
"server",
"with",
"provided",
"command",
"line",
"arguments",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/server/main.py#L8-L15 | train | 218,473 |
bcbio/bcbio-nextgen | bcbio/server/main.py | add_subparser | def add_subparser(subparsers):
"""Add command line arguments as server subparser.
"""
parser = subparsers.add_parser("server", help="Run a bcbio-nextgen server allowing remote job execution.")
parser.add_argument("-c", "--config", help=("Global YAML configuration file specifying system details."
"Defaults to installed bcbio_system.yaml"))
parser.add_argument("-p", "--port", help="Port to listen on (default 8080)",
default=8080, type=int)
parser.add_argument("-n", "--cores", help="Cores to use when processing locally when not requested (default 1)",
default=1, type=int)
parser.add_argument("-d", "--biodata_dir", help="Directory with biological data",
default="/mnt/biodata", type=str)
return parser | python | def add_subparser(subparsers):
"""Add command line arguments as server subparser.
"""
parser = subparsers.add_parser("server", help="Run a bcbio-nextgen server allowing remote job execution.")
parser.add_argument("-c", "--config", help=("Global YAML configuration file specifying system details."
"Defaults to installed bcbio_system.yaml"))
parser.add_argument("-p", "--port", help="Port to listen on (default 8080)",
default=8080, type=int)
parser.add_argument("-n", "--cores", help="Cores to use when processing locally when not requested (default 1)",
default=1, type=int)
parser.add_argument("-d", "--biodata_dir", help="Directory with biological data",
default="/mnt/biodata", type=str)
return parser | [
"def",
"add_subparser",
"(",
"subparsers",
")",
":",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"server\"",
",",
"help",
"=",
"\"Run a bcbio-nextgen server allowing remote job execution.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--conf... | Add command line arguments as server subparser. | [
"Add",
"command",
"line",
"arguments",
"as",
"server",
"subparser",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/server/main.py#L29-L41 | train | 218,474 |
bcbio/bcbio-nextgen | bcbio/provenance/data.py | write_versions | def write_versions(dirs, items):
"""Write data versioning for genomes present in the configuration.
"""
genomes = {}
for d in items:
genomes[d["genome_build"]] = d.get("reference", {}).get("versions")
out_file = _get_out_file(dirs)
found_versions = False
if genomes and out_file:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["genome", "resource", "version"])
for genome, version_file in genomes.items():
if not version_file:
genome_dir = install.get_genome_dir(genome, dirs.get("galaxy"), items[0])
if genome_dir:
version_file = os.path.join(genome_dir, "versions.csv")
if version_file and os.path.exists(version_file):
found_versions = True
with open(version_file) as in_handle:
reader = csv.reader(in_handle)
for parts in reader:
if len(parts) >= 2:
resource, version = parts[:2]
writer.writerow([genome, resource, version])
if found_versions:
return out_file | python | def write_versions(dirs, items):
"""Write data versioning for genomes present in the configuration.
"""
genomes = {}
for d in items:
genomes[d["genome_build"]] = d.get("reference", {}).get("versions")
out_file = _get_out_file(dirs)
found_versions = False
if genomes and out_file:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["genome", "resource", "version"])
for genome, version_file in genomes.items():
if not version_file:
genome_dir = install.get_genome_dir(genome, dirs.get("galaxy"), items[0])
if genome_dir:
version_file = os.path.join(genome_dir, "versions.csv")
if version_file and os.path.exists(version_file):
found_versions = True
with open(version_file) as in_handle:
reader = csv.reader(in_handle)
for parts in reader:
if len(parts) >= 2:
resource, version = parts[:2]
writer.writerow([genome, resource, version])
if found_versions:
return out_file | [
"def",
"write_versions",
"(",
"dirs",
",",
"items",
")",
":",
"genomes",
"=",
"{",
"}",
"for",
"d",
"in",
"items",
":",
"genomes",
"[",
"d",
"[",
"\"genome_build\"",
"]",
"]",
"=",
"d",
".",
"get",
"(",
"\"reference\"",
",",
"{",
"}",
")",
".",
"... | Write data versioning for genomes present in the configuration. | [
"Write",
"data",
"versioning",
"for",
"genomes",
"present",
"in",
"the",
"configuration",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/data.py#L8-L34 | train | 218,475 |
bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | combine_calls | def combine_calls(*args):
"""Combine multiple callsets into a final set of merged calls.
"""
if len(args) == 3:
is_cwl = False
batch_id, samples, data = args
caller_names, vrn_files = _organize_variants(samples, batch_id)
else:
is_cwl = True
samples = [utils.to_single_data(x) for x in args]
samples = [cwlutils.unpack_tarballs(x, x) for x in samples]
data = samples[0]
batch_id = data["batch_id"]
caller_names = data["variants"]["variantcallers"]
vrn_files = data["variants"]["calls"]
logger.info("Ensemble consensus calls for {0}: {1}".format(
batch_id, ",".join(caller_names)))
edata = copy.deepcopy(data)
base_dir = utils.safe_makedir(os.path.join(edata["dirs"]["work"], "ensemble", batch_id))
if any([vcfutils.vcf_has_variants(f) for f in vrn_files]):
# Decompose multiallelic variants and normalize
passonly = not tz.get_in(["config", "algorithm", "ensemble", "use_filtered"], edata, False)
vrn_files = [normalize.normalize(f, data, passonly=passonly, rerun_effects=False, remove_oldeffects=True,
nonrefonly=True,
work_dir=utils.safe_makedir(os.path.join(base_dir, c)))
for c, f in zip(caller_names, vrn_files)]
if "classifiers" not in (dd.get_ensemble(edata) or {}):
callinfo = _run_ensemble_intersection(batch_id, vrn_files, caller_names, base_dir, edata)
else:
config_file = _write_config_file(batch_id, caller_names, base_dir, edata)
callinfo = _run_ensemble(batch_id, vrn_files, config_file, base_dir,
dd.get_ref_file(edata), edata)
callinfo["vrn_file"] = vcfutils.bgzip_and_index(callinfo["vrn_file"], data["config"])
# After decomposing multiallelic variants and normalizing, re-evaluate effects
ann_ma_file, _ = effects.add_to_vcf(callinfo["vrn_file"], data)
if ann_ma_file:
callinfo["vrn_file"] = ann_ma_file
edata["config"]["algorithm"]["variantcaller"] = "ensemble"
edata["vrn_file"] = callinfo["vrn_file"]
edata["ensemble_bed"] = callinfo["bed_file"]
callinfo["validate"] = validate.compare_to_rm(edata)[0][0].get("validate")
else:
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id))
vcfutils.write_empty_vcf(out_vcf_file, samples=[dd.get_sample_name(d) for d in samples])
callinfo = {"variantcaller": "ensemble",
"vrn_file": vcfutils.bgzip_and_index(out_vcf_file, data["config"]),
"bed_file": None}
if is_cwl:
callinfo["batch_samples"] = data["batch_samples"]
callinfo["batch_id"] = batch_id
return [{"ensemble": callinfo}]
else:
return [[batch_id, callinfo]] | python | def combine_calls(*args):
"""Combine multiple callsets into a final set of merged calls.
"""
if len(args) == 3:
is_cwl = False
batch_id, samples, data = args
caller_names, vrn_files = _organize_variants(samples, batch_id)
else:
is_cwl = True
samples = [utils.to_single_data(x) for x in args]
samples = [cwlutils.unpack_tarballs(x, x) for x in samples]
data = samples[0]
batch_id = data["batch_id"]
caller_names = data["variants"]["variantcallers"]
vrn_files = data["variants"]["calls"]
logger.info("Ensemble consensus calls for {0}: {1}".format(
batch_id, ",".join(caller_names)))
edata = copy.deepcopy(data)
base_dir = utils.safe_makedir(os.path.join(edata["dirs"]["work"], "ensemble", batch_id))
if any([vcfutils.vcf_has_variants(f) for f in vrn_files]):
# Decompose multiallelic variants and normalize
passonly = not tz.get_in(["config", "algorithm", "ensemble", "use_filtered"], edata, False)
vrn_files = [normalize.normalize(f, data, passonly=passonly, rerun_effects=False, remove_oldeffects=True,
nonrefonly=True,
work_dir=utils.safe_makedir(os.path.join(base_dir, c)))
for c, f in zip(caller_names, vrn_files)]
if "classifiers" not in (dd.get_ensemble(edata) or {}):
callinfo = _run_ensemble_intersection(batch_id, vrn_files, caller_names, base_dir, edata)
else:
config_file = _write_config_file(batch_id, caller_names, base_dir, edata)
callinfo = _run_ensemble(batch_id, vrn_files, config_file, base_dir,
dd.get_ref_file(edata), edata)
callinfo["vrn_file"] = vcfutils.bgzip_and_index(callinfo["vrn_file"], data["config"])
# After decomposing multiallelic variants and normalizing, re-evaluate effects
ann_ma_file, _ = effects.add_to_vcf(callinfo["vrn_file"], data)
if ann_ma_file:
callinfo["vrn_file"] = ann_ma_file
edata["config"]["algorithm"]["variantcaller"] = "ensemble"
edata["vrn_file"] = callinfo["vrn_file"]
edata["ensemble_bed"] = callinfo["bed_file"]
callinfo["validate"] = validate.compare_to_rm(edata)[0][0].get("validate")
else:
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id))
vcfutils.write_empty_vcf(out_vcf_file, samples=[dd.get_sample_name(d) for d in samples])
callinfo = {"variantcaller": "ensemble",
"vrn_file": vcfutils.bgzip_and_index(out_vcf_file, data["config"]),
"bed_file": None}
if is_cwl:
callinfo["batch_samples"] = data["batch_samples"]
callinfo["batch_id"] = batch_id
return [{"ensemble": callinfo}]
else:
return [[batch_id, callinfo]] | [
"def",
"combine_calls",
"(",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"3",
":",
"is_cwl",
"=",
"False",
"batch_id",
",",
"samples",
",",
"data",
"=",
"args",
"caller_names",
",",
"vrn_files",
"=",
"_organize_variants",
"(",
"samples",
... | Combine multiple callsets into a final set of merged calls. | [
"Combine",
"multiple",
"callsets",
"into",
"a",
"final",
"set",
"of",
"merged",
"calls",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L57-L110 | train | 218,476 |
bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | combine_calls_parallel | def combine_calls_parallel(samples, run_parallel):
"""Combine calls using batched Ensemble approach.
"""
batch_groups, extras = _group_by_batches(samples, _has_ensemble)
out = []
if batch_groups:
processed = run_parallel("combine_calls", ((b, xs, xs[0]) for b, xs in batch_groups.items()))
for batch_id, callinfo in processed:
for data in batch_groups[batch_id]:
data["variants"].insert(0, callinfo)
out.append([data])
return out + extras | python | def combine_calls_parallel(samples, run_parallel):
"""Combine calls using batched Ensemble approach.
"""
batch_groups, extras = _group_by_batches(samples, _has_ensemble)
out = []
if batch_groups:
processed = run_parallel("combine_calls", ((b, xs, xs[0]) for b, xs in batch_groups.items()))
for batch_id, callinfo in processed:
for data in batch_groups[batch_id]:
data["variants"].insert(0, callinfo)
out.append([data])
return out + extras | [
"def",
"combine_calls_parallel",
"(",
"samples",
",",
"run_parallel",
")",
":",
"batch_groups",
",",
"extras",
"=",
"_group_by_batches",
"(",
"samples",
",",
"_has_ensemble",
")",
"out",
"=",
"[",
"]",
"if",
"batch_groups",
":",
"processed",
"=",
"run_parallel",... | Combine calls using batched Ensemble approach. | [
"Combine",
"calls",
"using",
"batched",
"Ensemble",
"approach",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L112-L123 | train | 218,477 |
bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _group_by_batches | def _group_by_batches(samples, check_fn):
"""Group calls by batches, processing families together during ensemble calling.
"""
batch_groups = collections.defaultdict(list)
extras = []
for data in [x[0] for x in samples]:
if check_fn(data):
batch_groups[multi.get_batch_for_key(data)].append(data)
else:
extras.append([data])
return batch_groups, extras | python | def _group_by_batches(samples, check_fn):
"""Group calls by batches, processing families together during ensemble calling.
"""
batch_groups = collections.defaultdict(list)
extras = []
for data in [x[0] for x in samples]:
if check_fn(data):
batch_groups[multi.get_batch_for_key(data)].append(data)
else:
extras.append([data])
return batch_groups, extras | [
"def",
"_group_by_batches",
"(",
"samples",
",",
"check_fn",
")",
":",
"batch_groups",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"extras",
"=",
"[",
"]",
"for",
"data",
"in",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"samples",
"]",
... | Group calls by batches, processing families together during ensemble calling. | [
"Group",
"calls",
"by",
"batches",
"processing",
"families",
"together",
"during",
"ensemble",
"calling",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L133-L143 | train | 218,478 |
bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _organize_variants | def _organize_variants(samples, batch_id):
"""Retrieve variant calls for all samples, merging batched samples into single VCF.
"""
caller_names = [x["variantcaller"] for x in samples[0]["variants"]]
calls = collections.defaultdict(list)
for data in samples:
for vrn in data["variants"]:
calls[vrn["variantcaller"]].append(vrn["vrn_file"])
data = samples[0]
vrn_files = []
for caller in caller_names:
fnames = calls[caller]
if len(fnames) == 1:
vrn_files.append(fnames[0])
else:
vrn_files.append(population.get_multisample_vcf(fnames, batch_id, caller, data))
return caller_names, vrn_files | python | def _organize_variants(samples, batch_id):
"""Retrieve variant calls for all samples, merging batched samples into single VCF.
"""
caller_names = [x["variantcaller"] for x in samples[0]["variants"]]
calls = collections.defaultdict(list)
for data in samples:
for vrn in data["variants"]:
calls[vrn["variantcaller"]].append(vrn["vrn_file"])
data = samples[0]
vrn_files = []
for caller in caller_names:
fnames = calls[caller]
if len(fnames) == 1:
vrn_files.append(fnames[0])
else:
vrn_files.append(population.get_multisample_vcf(fnames, batch_id, caller, data))
return caller_names, vrn_files | [
"def",
"_organize_variants",
"(",
"samples",
",",
"batch_id",
")",
":",
"caller_names",
"=",
"[",
"x",
"[",
"\"variantcaller\"",
"]",
"for",
"x",
"in",
"samples",
"[",
"0",
"]",
"[",
"\"variants\"",
"]",
"]",
"calls",
"=",
"collections",
".",
"defaultdict"... | Retrieve variant calls for all samples, merging batched samples into single VCF. | [
"Retrieve",
"variant",
"calls",
"for",
"all",
"samples",
"merging",
"batched",
"samples",
"into",
"single",
"VCF",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L145-L161 | train | 218,479 |
bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _handle_somatic_ensemble | def _handle_somatic_ensemble(vrn_file, data):
"""For somatic ensemble, discard normal samples and filtered variants from vcfs.
Only needed for bcbio.variation based ensemble calling.
"""
if tz.get_in(["metadata", "phenotype"], data, "").lower().startswith("tumor"):
vrn_file_temp = vrn_file.replace(".vcf", "_tumorOnly_noFilteredCalls.vcf")
# Select tumor sample and keep only PASS and . calls
vrn_file = vcfutils.select_sample(in_file=vrn_file, sample=data["name"][1],
out_file=vrn_file_temp,
config=data["config"], filters="PASS,.")
return vrn_file | python | def _handle_somatic_ensemble(vrn_file, data):
"""For somatic ensemble, discard normal samples and filtered variants from vcfs.
Only needed for bcbio.variation based ensemble calling.
"""
if tz.get_in(["metadata", "phenotype"], data, "").lower().startswith("tumor"):
vrn_file_temp = vrn_file.replace(".vcf", "_tumorOnly_noFilteredCalls.vcf")
# Select tumor sample and keep only PASS and . calls
vrn_file = vcfutils.select_sample(in_file=vrn_file, sample=data["name"][1],
out_file=vrn_file_temp,
config=data["config"], filters="PASS,.")
return vrn_file | [
"def",
"_handle_somatic_ensemble",
"(",
"vrn_file",
",",
"data",
")",
":",
"if",
"tz",
".",
"get_in",
"(",
"[",
"\"metadata\"",
",",
"\"phenotype\"",
"]",
",",
"data",
",",
"\"\"",
")",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"\"tumor\"",
")",
... | For somatic ensemble, discard normal samples and filtered variants from vcfs.
Only needed for bcbio.variation based ensemble calling. | [
"For",
"somatic",
"ensemble",
"discard",
"normal",
"samples",
"and",
"filtered",
"variants",
"from",
"vcfs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L163-L174 | train | 218,480 |
bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _run_ensemble | def _run_ensemble(batch_id, vrn_files, config_file, base_dir, ref_file, data):
"""Run an ensemble call using merging and SVM-based approach in bcbio.variation
"""
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id))
out_bed_file = os.path.join(base_dir, "{0}-callregions.bed".format(batch_id))
work_dir = "%s-work" % os.path.splitext(out_vcf_file)[0]
if not utils.file_exists(out_vcf_file):
_bcbio_variation_ensemble(vrn_files, out_vcf_file, ref_file, config_file,
base_dir, data)
if not utils.file_exists(out_vcf_file):
base_vcf = glob.glob(os.path.join(work_dir, "prep", "*-cfilter.vcf"))[0]
utils.symlink_plus(base_vcf, out_vcf_file)
if not utils.file_exists(out_bed_file):
multi_beds = glob.glob(os.path.join(work_dir, "prep", "*-multicombine.bed"))
if len(multi_beds) > 0:
utils.symlink_plus(multi_beds[0], out_bed_file)
return {"variantcaller": "ensemble",
"vrn_file": out_vcf_file,
"bed_file": out_bed_file if os.path.exists(out_bed_file) else None} | python | def _run_ensemble(batch_id, vrn_files, config_file, base_dir, ref_file, data):
"""Run an ensemble call using merging and SVM-based approach in bcbio.variation
"""
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id))
out_bed_file = os.path.join(base_dir, "{0}-callregions.bed".format(batch_id))
work_dir = "%s-work" % os.path.splitext(out_vcf_file)[0]
if not utils.file_exists(out_vcf_file):
_bcbio_variation_ensemble(vrn_files, out_vcf_file, ref_file, config_file,
base_dir, data)
if not utils.file_exists(out_vcf_file):
base_vcf = glob.glob(os.path.join(work_dir, "prep", "*-cfilter.vcf"))[0]
utils.symlink_plus(base_vcf, out_vcf_file)
if not utils.file_exists(out_bed_file):
multi_beds = glob.glob(os.path.join(work_dir, "prep", "*-multicombine.bed"))
if len(multi_beds) > 0:
utils.symlink_plus(multi_beds[0], out_bed_file)
return {"variantcaller": "ensemble",
"vrn_file": out_vcf_file,
"bed_file": out_bed_file if os.path.exists(out_bed_file) else None} | [
"def",
"_run_ensemble",
"(",
"batch_id",
",",
"vrn_files",
",",
"config_file",
",",
"base_dir",
",",
"ref_file",
",",
"data",
")",
":",
"out_vcf_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"\"{0}-ensemble.vcf\"",
".",
"format",
"(",
"... | Run an ensemble call using merging and SVM-based approach in bcbio.variation | [
"Run",
"an",
"ensemble",
"call",
"using",
"merging",
"and",
"SVM",
"-",
"based",
"approach",
"in",
"bcbio",
".",
"variation"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L190-L208 | train | 218,481 |
bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _write_config_file | def _write_config_file(batch_id, caller_names, base_dir, data):
"""Write YAML configuration to generate an ensemble set of combined calls.
"""
config_dir = utils.safe_makedir(os.path.join(base_dir, "config"))
config_file = os.path.join(config_dir, "{0}-ensemble.yaml".format(batch_id))
algorithm = data["config"]["algorithm"]
econfig = {"ensemble": algorithm["ensemble"],
"names": caller_names,
"prep-inputs": False}
intervals = validate.get_analysis_intervals(data, None, base_dir)
if intervals:
econfig["intervals"] = os.path.abspath(intervals)
with open(config_file, "w") as out_handle:
yaml.safe_dump(econfig, out_handle, allow_unicode=False, default_flow_style=False)
return config_file | python | def _write_config_file(batch_id, caller_names, base_dir, data):
"""Write YAML configuration to generate an ensemble set of combined calls.
"""
config_dir = utils.safe_makedir(os.path.join(base_dir, "config"))
config_file = os.path.join(config_dir, "{0}-ensemble.yaml".format(batch_id))
algorithm = data["config"]["algorithm"]
econfig = {"ensemble": algorithm["ensemble"],
"names": caller_names,
"prep-inputs": False}
intervals = validate.get_analysis_intervals(data, None, base_dir)
if intervals:
econfig["intervals"] = os.path.abspath(intervals)
with open(config_file, "w") as out_handle:
yaml.safe_dump(econfig, out_handle, allow_unicode=False, default_flow_style=False)
return config_file | [
"def",
"_write_config_file",
"(",
"batch_id",
",",
"caller_names",
",",
"base_dir",
",",
"data",
")",
":",
"config_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"\"config\"",
")",
")",
"config_file",
"=... | Write YAML configuration to generate an ensemble set of combined calls. | [
"Write",
"YAML",
"configuration",
"to",
"generate",
"an",
"ensemble",
"set",
"of",
"combined",
"calls",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L210-L224 | train | 218,482 |
bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _get_num_pass | def _get_num_pass(data, n):
"""Calculate the number of samples needed to pass ensemble calling.
"""
numpass = tz.get_in(["config", "algorithm", "ensemble", "numpass"], data)
if numpass:
return int(numpass)
trusted_pct = tz.get_in(["config", "algorithm", "ensemble", "trusted_pct"], data)
if trusted_pct:
return int(math.ceil(float(trusted_pct) * n))
return 2 | python | def _get_num_pass(data, n):
"""Calculate the number of samples needed to pass ensemble calling.
"""
numpass = tz.get_in(["config", "algorithm", "ensemble", "numpass"], data)
if numpass:
return int(numpass)
trusted_pct = tz.get_in(["config", "algorithm", "ensemble", "trusted_pct"], data)
if trusted_pct:
return int(math.ceil(float(trusted_pct) * n))
return 2 | [
"def",
"_get_num_pass",
"(",
"data",
",",
"n",
")",
":",
"numpass",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"ensemble\"",
",",
"\"numpass\"",
"]",
",",
"data",
")",
"if",
"numpass",
":",
"return",
"int",
"(",
"num... | Calculate the number of samples needed to pass ensemble calling. | [
"Calculate",
"the",
"number",
"of",
"samples",
"needed",
"to",
"pass",
"ensemble",
"calling",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L226-L235 | train | 218,483 |
bcbio/bcbio-nextgen | bcbio/variation/ensemble.py | _run_ensemble_intersection | def _run_ensemble_intersection(batch_id, vrn_files, callers, base_dir, edata):
"""Run intersection n out of x based ensemble method using bcbio.variation.recall.
"""
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf.gz".format(batch_id))
if not utils.file_exists(out_vcf_file):
num_pass = _get_num_pass(edata, len(vrn_files))
cmd = [
config_utils.get_program(
"bcbio-variation-recall", edata["config"]),
"ensemble",
"--cores=%s" % edata["config"]["algorithm"].get("num_cores", 1),
"--numpass", str(num_pass),
"--names", ",".join(callers)
]
# Remove filtered calls, do not try to rescue, unless configured
if not tz.get_in(["config", "algorithm", "ensemble", "use_filtered"], edata):
cmd += ["--nofiltered"]
with file_transaction(edata, out_vcf_file) as tx_out_file:
cmd += [tx_out_file, dd.get_ref_file(edata)] + vrn_files
cmd = "%s && %s" % (utils.get_java_clprep(), " ".join(str(x) for x in cmd))
do.run(cmd, "Ensemble intersection calling: %s" % (batch_id))
in_data = utils.deepish_copy(edata)
in_data["vrn_file"] = out_vcf_file
return {"variantcaller": "ensemble",
"vrn_file": out_vcf_file,
"bed_file": None} | python | def _run_ensemble_intersection(batch_id, vrn_files, callers, base_dir, edata):
"""Run intersection n out of x based ensemble method using bcbio.variation.recall.
"""
out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf.gz".format(batch_id))
if not utils.file_exists(out_vcf_file):
num_pass = _get_num_pass(edata, len(vrn_files))
cmd = [
config_utils.get_program(
"bcbio-variation-recall", edata["config"]),
"ensemble",
"--cores=%s" % edata["config"]["algorithm"].get("num_cores", 1),
"--numpass", str(num_pass),
"--names", ",".join(callers)
]
# Remove filtered calls, do not try to rescue, unless configured
if not tz.get_in(["config", "algorithm", "ensemble", "use_filtered"], edata):
cmd += ["--nofiltered"]
with file_transaction(edata, out_vcf_file) as tx_out_file:
cmd += [tx_out_file, dd.get_ref_file(edata)] + vrn_files
cmd = "%s && %s" % (utils.get_java_clprep(), " ".join(str(x) for x in cmd))
do.run(cmd, "Ensemble intersection calling: %s" % (batch_id))
in_data = utils.deepish_copy(edata)
in_data["vrn_file"] = out_vcf_file
return {"variantcaller": "ensemble",
"vrn_file": out_vcf_file,
"bed_file": None} | [
"def",
"_run_ensemble_intersection",
"(",
"batch_id",
",",
"vrn_files",
",",
"callers",
",",
"base_dir",
",",
"edata",
")",
":",
"out_vcf_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"\"{0}-ensemble.vcf.gz\"",
".",
"format",
"(",
"batch_id... | Run intersection n out of x based ensemble method using bcbio.variation.recall. | [
"Run",
"intersection",
"n",
"out",
"of",
"x",
"based",
"ensemble",
"method",
"using",
"bcbio",
".",
"variation",
".",
"recall",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L237-L263 | train | 218,484 |
bcbio/bcbio-nextgen | bcbio/variation/joint.py | _get_callable_regions | def _get_callable_regions(data):
"""Retrieve regions to parallelize by from callable regions or chromosomes.
"""
import pybedtools
callable_files = data.get("callable_regions")
if callable_files:
assert len(callable_files) == 1
regions = [(r.chrom, int(r.start), int(r.stop)) for r in pybedtools.BedTool(callable_files[0])]
else:
work_bam = list(tz.take(1, filter(lambda x: x and x.endswith(".bam"), data["work_bams"])))
if work_bam:
with pysam.Samfile(work_bam[0], "rb") as pysam_bam:
regions = [(chrom, 0, length) for (chrom, length) in zip(pysam_bam.references,
pysam_bam.lengths)]
else:
regions = [(r.name, 0, r.size) for r in
ref.file_contigs(dd.get_ref_file(data), data["config"])]
return regions | python | def _get_callable_regions(data):
"""Retrieve regions to parallelize by from callable regions or chromosomes.
"""
import pybedtools
callable_files = data.get("callable_regions")
if callable_files:
assert len(callable_files) == 1
regions = [(r.chrom, int(r.start), int(r.stop)) for r in pybedtools.BedTool(callable_files[0])]
else:
work_bam = list(tz.take(1, filter(lambda x: x and x.endswith(".bam"), data["work_bams"])))
if work_bam:
with pysam.Samfile(work_bam[0], "rb") as pysam_bam:
regions = [(chrom, 0, length) for (chrom, length) in zip(pysam_bam.references,
pysam_bam.lengths)]
else:
regions = [(r.name, 0, r.size) for r in
ref.file_contigs(dd.get_ref_file(data), data["config"])]
return regions | [
"def",
"_get_callable_regions",
"(",
"data",
")",
":",
"import",
"pybedtools",
"callable_files",
"=",
"data",
".",
"get",
"(",
"\"callable_regions\"",
")",
"if",
"callable_files",
":",
"assert",
"len",
"(",
"callable_files",
")",
"==",
"1",
"regions",
"=",
"["... | Retrieve regions to parallelize by from callable regions or chromosomes. | [
"Retrieve",
"regions",
"to",
"parallelize",
"by",
"from",
"callable",
"regions",
"or",
"chromosomes",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L75-L92 | train | 218,485 |
bcbio/bcbio-nextgen | bcbio/variation/joint.py | _split_by_callable_region | def _split_by_callable_region(data):
"""Split by callable or variant regions.
We expect joint calling to be deep in numbers of samples per region, so prefer
splitting aggressively by regions.
"""
batch = tz.get_in(("metadata", "batch"), data)
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
name = batch if batch else tz.get_in(("rgnames", "sample"), data)
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "joint", jointcaller, name))
utils.safe_makedir(os.path.join(out_dir, "inprep"))
parts = []
for feat in _get_callable_regions(data):
region_dir = utils.safe_makedir(os.path.join(out_dir, feat[0]))
region_prep_dir = os.path.join(region_dir, "inprep")
if not os.path.exists(region_prep_dir):
os.symlink(os.path.join(os.pardir, "inprep"), region_prep_dir)
region_outfile = os.path.join(region_dir, "%s-%s.vcf.gz" % (batch, region.to_safestr(feat)))
parts.append((feat, data["work_bams"], data["vrn_files"], region_outfile))
out_file = os.path.join(out_dir, "%s-joint.vcf.gz" % name)
return out_file, parts | python | def _split_by_callable_region(data):
"""Split by callable or variant regions.
We expect joint calling to be deep in numbers of samples per region, so prefer
splitting aggressively by regions.
"""
batch = tz.get_in(("metadata", "batch"), data)
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
name = batch if batch else tz.get_in(("rgnames", "sample"), data)
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "joint", jointcaller, name))
utils.safe_makedir(os.path.join(out_dir, "inprep"))
parts = []
for feat in _get_callable_regions(data):
region_dir = utils.safe_makedir(os.path.join(out_dir, feat[0]))
region_prep_dir = os.path.join(region_dir, "inprep")
if not os.path.exists(region_prep_dir):
os.symlink(os.path.join(os.pardir, "inprep"), region_prep_dir)
region_outfile = os.path.join(region_dir, "%s-%s.vcf.gz" % (batch, region.to_safestr(feat)))
parts.append((feat, data["work_bams"], data["vrn_files"], region_outfile))
out_file = os.path.join(out_dir, "%s-joint.vcf.gz" % name)
return out_file, parts | [
"def",
"_split_by_callable_region",
"(",
"data",
")",
":",
"batch",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"metadata\"",
",",
"\"batch\"",
")",
",",
"data",
")",
"jointcaller",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"config\"",
",",
"\"algorithm\"",
",",
"... | Split by callable or variant regions.
We expect joint calling to be deep in numbers of samples per region, so prefer
splitting aggressively by regions. | [
"Split",
"by",
"callable",
"or",
"variant",
"regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L94-L114 | train | 218,486 |
bcbio/bcbio-nextgen | bcbio/variation/joint.py | _is_jointcaller_compatible | def _is_jointcaller_compatible(data):
"""Match variant caller inputs to compatible joint callers.
"""
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if isinstance(variantcaller, (list, tuple)) and len(variantcaller) == 1:
variantcaller = variantcaller[0]
return jointcaller == "%s-joint" % variantcaller or not variantcaller | python | def _is_jointcaller_compatible(data):
"""Match variant caller inputs to compatible joint callers.
"""
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if isinstance(variantcaller, (list, tuple)) and len(variantcaller) == 1:
variantcaller = variantcaller[0]
return jointcaller == "%s-joint" % variantcaller or not variantcaller | [
"def",
"_is_jointcaller_compatible",
"(",
"data",
")",
":",
"jointcaller",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"jointcaller\"",
")",
",",
"data",
")",
"variantcaller",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"config\"... | Match variant caller inputs to compatible joint callers. | [
"Match",
"variant",
"caller",
"inputs",
"to",
"compatible",
"joint",
"callers",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L116-L123 | train | 218,487 |
bcbio/bcbio-nextgen | bcbio/variation/joint.py | square_off | def square_off(samples, run_parallel):
"""Perform joint calling at all variants within a batch.
"""
to_process = []
extras = []
for data in [utils.to_single_data(x) for x in samples]:
added = False
if tz.get_in(("metadata", "batch"), data):
for add in genotype.handle_multiple_callers(data, "jointcaller", require_bam=False):
if _is_jointcaller_compatible(add):
added = True
to_process.append([add])
if not added:
extras.append([data])
processed = grouped_parallel_split_combine(to_process, _split_by_callable_region,
multi.group_batches_joint, run_parallel,
"square_batch_region", "concat_variant_files",
"vrn_file", ["region", "sam_ref", "config"])
return _combine_to_jointcaller(processed) + extras | python | def square_off(samples, run_parallel):
"""Perform joint calling at all variants within a batch.
"""
to_process = []
extras = []
for data in [utils.to_single_data(x) for x in samples]:
added = False
if tz.get_in(("metadata", "batch"), data):
for add in genotype.handle_multiple_callers(data, "jointcaller", require_bam=False):
if _is_jointcaller_compatible(add):
added = True
to_process.append([add])
if not added:
extras.append([data])
processed = grouped_parallel_split_combine(to_process, _split_by_callable_region,
multi.group_batches_joint, run_parallel,
"square_batch_region", "concat_variant_files",
"vrn_file", ["region", "sam_ref", "config"])
return _combine_to_jointcaller(processed) + extras | [
"def",
"square_off",
"(",
"samples",
",",
"run_parallel",
")",
":",
"to_process",
"=",
"[",
"]",
"extras",
"=",
"[",
"]",
"for",
"data",
"in",
"[",
"utils",
".",
"to_single_data",
"(",
"x",
")",
"for",
"x",
"in",
"samples",
"]",
":",
"added",
"=",
... | Perform joint calling at all variants within a batch. | [
"Perform",
"joint",
"calling",
"at",
"all",
"variants",
"within",
"a",
"batch",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L125-L143 | train | 218,488 |
bcbio/bcbio-nextgen | bcbio/variation/joint.py | _combine_to_jointcaller | def _combine_to_jointcaller(processed):
"""Add joint calling information to variants, while collapsing independent regions.
"""
by_vrn_file = collections.OrderedDict()
for data in (x[0] for x in processed):
key = (tz.get_in(("config", "algorithm", "jointcaller"), data), data["vrn_file"])
if key not in by_vrn_file:
by_vrn_file[key] = []
by_vrn_file[key].append(data)
out = []
for grouped_data in by_vrn_file.values():
cur = grouped_data[0]
out.append([cur])
return out | python | def _combine_to_jointcaller(processed):
"""Add joint calling information to variants, while collapsing independent regions.
"""
by_vrn_file = collections.OrderedDict()
for data in (x[0] for x in processed):
key = (tz.get_in(("config", "algorithm", "jointcaller"), data), data["vrn_file"])
if key not in by_vrn_file:
by_vrn_file[key] = []
by_vrn_file[key].append(data)
out = []
for grouped_data in by_vrn_file.values():
cur = grouped_data[0]
out.append([cur])
return out | [
"def",
"_combine_to_jointcaller",
"(",
"processed",
")",
":",
"by_vrn_file",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"data",
"in",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"processed",
")",
":",
"key",
"=",
"(",
"tz",
".",
"get_in",
... | Add joint calling information to variants, while collapsing independent regions. | [
"Add",
"joint",
"calling",
"information",
"to",
"variants",
"while",
"collapsing",
"independent",
"regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L145-L158 | train | 218,489 |
bcbio/bcbio-nextgen | bcbio/variation/joint.py | square_batch_region | def square_batch_region(data, region, bam_files, vrn_files, out_file):
"""Perform squaring of a batch in a supplied region, with input BAMs
"""
from bcbio.variation import sentieon, strelka2
if not utils.file_exists(out_file):
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
if jointcaller in ["%s-joint" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "square")
elif jointcaller in ["%s-merge" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "merge")
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gatk"]]:
gatkjoint.run_region(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gvcf"]]:
strelka2.run_gvcfgenotyper(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["sentieon"]]:
sentieon.run_gvcftyper(vrn_files, out_file, region, data)
else:
raise ValueError("Unexpected joint calling approach: %s." % jointcaller)
if region:
data["region"] = region
data = _fix_orig_vcf_refs(data)
data["vrn_file"] = out_file
return [data] | python | def square_batch_region(data, region, bam_files, vrn_files, out_file):
"""Perform squaring of a batch in a supplied region, with input BAMs
"""
from bcbio.variation import sentieon, strelka2
if not utils.file_exists(out_file):
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
if jointcaller in ["%s-joint" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "square")
elif jointcaller in ["%s-merge" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "merge")
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gatk"]]:
gatkjoint.run_region(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gvcf"]]:
strelka2.run_gvcfgenotyper(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["sentieon"]]:
sentieon.run_gvcftyper(vrn_files, out_file, region, data)
else:
raise ValueError("Unexpected joint calling approach: %s." % jointcaller)
if region:
data["region"] = region
data = _fix_orig_vcf_refs(data)
data["vrn_file"] = out_file
return [data] | [
"def",
"square_batch_region",
"(",
"data",
",",
"region",
",",
"bam_files",
",",
"vrn_files",
",",
"out_file",
")",
":",
"from",
"bcbio",
".",
"variation",
"import",
"sentieon",
",",
"strelka2",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",... | Perform squaring of a batch in a supplied region, with input BAMs | [
"Perform",
"squaring",
"of",
"a",
"batch",
"in",
"a",
"supplied",
"region",
"with",
"input",
"BAMs"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L172-L194 | train | 218,490 |
bcbio/bcbio-nextgen | bcbio/variation/joint.py | _fix_orig_vcf_refs | def _fix_orig_vcf_refs(data):
"""Supply references to initial variantcalls if run in addition to batching.
"""
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if variantcaller:
data["vrn_file_orig"] = data["vrn_file"]
for i, sub in enumerate(data.get("group_orig", [])):
sub_vrn = sub.pop("vrn_file", None)
if sub_vrn:
sub["vrn_file_orig"] = sub_vrn
data["group_orig"][i] = sub
return data | python | def _fix_orig_vcf_refs(data):
"""Supply references to initial variantcalls if run in addition to batching.
"""
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if variantcaller:
data["vrn_file_orig"] = data["vrn_file"]
for i, sub in enumerate(data.get("group_orig", [])):
sub_vrn = sub.pop("vrn_file", None)
if sub_vrn:
sub["vrn_file_orig"] = sub_vrn
data["group_orig"][i] = sub
return data | [
"def",
"_fix_orig_vcf_refs",
"(",
"data",
")",
":",
"variantcaller",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"variantcaller\"",
")",
",",
"data",
")",
"if",
"variantcaller",
":",
"data",
"[",
"\"vrn_file_orig\"",
"]",
"... | Supply references to initial variantcalls if run in addition to batching. | [
"Supply",
"references",
"to",
"initial",
"variantcalls",
"if",
"run",
"in",
"addition",
"to",
"batching",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L196-L207 | train | 218,491 |
bcbio/bcbio-nextgen | bcbio/variation/joint.py | _square_batch_bcbio_variation | def _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file,
todo="square"):
"""Run squaring or merging analysis using bcbio.variation.recall.
"""
ref_file = tz.get_in(("reference", "fasta", "base"), data)
cores = tz.get_in(("config", "algorithm", "num_cores"), data, 1)
resources = config_utils.get_resources("bcbio-variation-recall", data["config"])
# adjust memory by cores but leave room for run program memory
memcores = int(math.ceil(float(cores) / 5.0))
jvm_opts = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms250m", "-Xmx2g"]),
{"algorithm": {"memory_adjust": {"direction": "increase",
"magnitude": memcores}}})
# Write unique VCFs and BAMs to input file
input_file = "%s-inputs.txt" % os.path.splitext(out_file)[0]
with open(input_file, "w") as out_handle:
out_handle.write("\n".join(sorted(list(set(vrn_files)))) + "\n")
if todo == "square":
out_handle.write("\n".join(sorted(list(set(bam_files)))) + "\n")
variantcaller = tz.get_in(("config", "algorithm", "jointcaller"), data).replace("-joint", "")
cmd = ["bcbio-variation-recall", todo] + jvm_opts + broad.get_default_jvm_opts() + \
["-c", cores, "-r", bamprep.region_to_gatk(region)]
if todo == "square":
cmd += ["--caller", variantcaller]
cmd += [out_file, ref_file, input_file]
bcbio_env = utils.get_bcbio_env()
cmd = " ".join(str(x) for x in cmd)
do.run(cmd, "%s in region: %s" % (cmd, bamprep.region_to_gatk(region)), env=bcbio_env)
return out_file | python | def _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file,
todo="square"):
"""Run squaring or merging analysis using bcbio.variation.recall.
"""
ref_file = tz.get_in(("reference", "fasta", "base"), data)
cores = tz.get_in(("config", "algorithm", "num_cores"), data, 1)
resources = config_utils.get_resources("bcbio-variation-recall", data["config"])
# adjust memory by cores but leave room for run program memory
memcores = int(math.ceil(float(cores) / 5.0))
jvm_opts = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms250m", "-Xmx2g"]),
{"algorithm": {"memory_adjust": {"direction": "increase",
"magnitude": memcores}}})
# Write unique VCFs and BAMs to input file
input_file = "%s-inputs.txt" % os.path.splitext(out_file)[0]
with open(input_file, "w") as out_handle:
out_handle.write("\n".join(sorted(list(set(vrn_files)))) + "\n")
if todo == "square":
out_handle.write("\n".join(sorted(list(set(bam_files)))) + "\n")
variantcaller = tz.get_in(("config", "algorithm", "jointcaller"), data).replace("-joint", "")
cmd = ["bcbio-variation-recall", todo] + jvm_opts + broad.get_default_jvm_opts() + \
["-c", cores, "-r", bamprep.region_to_gatk(region)]
if todo == "square":
cmd += ["--caller", variantcaller]
cmd += [out_file, ref_file, input_file]
bcbio_env = utils.get_bcbio_env()
cmd = " ".join(str(x) for x in cmd)
do.run(cmd, "%s in region: %s" % (cmd, bamprep.region_to_gatk(region)), env=bcbio_env)
return out_file | [
"def",
"_square_batch_bcbio_variation",
"(",
"data",
",",
"region",
",",
"bam_files",
",",
"vrn_files",
",",
"out_file",
",",
"todo",
"=",
"\"square\"",
")",
":",
"ref_file",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"reference\"",
",",
"\"fasta\"",
",",
"\"base... | Run squaring or merging analysis using bcbio.variation.recall. | [
"Run",
"squaring",
"or",
"merging",
"analysis",
"using",
"bcbio",
".",
"variation",
".",
"recall",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/joint.py#L209-L236 | train | 218,492 |
bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | run | def run(items):
"""Normalization and log2 ratio calculation plus CNV calling for full cohort.
- Combine coverage of each region for each sample
- Prepare read counts for each sample
- Normalize coverages in cohort by gene and sample, and calculate log2 ratios
- Call amplifications and deletions
"""
items = [utils.to_single_data(x) for x in items]
work_dir = _sv_workdir(items[0])
input_backs = list(set(filter(lambda x: x is not None,
[dd.get_background_cnv_reference(d, "seq2c") for d in items])))
coverage_file = _combine_coverages(items, work_dir, input_backs)
read_mapping_file = _calculate_mapping_reads(items, work_dir, input_backs)
normal_names = []
if input_backs:
with open(input_backs[0]) as in_handle:
for line in in_handle:
if len(line.split()) == 2:
normal_names.append(line.split()[0])
normal_names += [dd.get_sample_name(x) for x in items if population.get_affected_status(x) == 1]
seq2c_calls_file = _call_cnv(items, work_dir, read_mapping_file, coverage_file, normal_names)
items = _split_cnv(items, seq2c_calls_file, read_mapping_file, coverage_file)
return items | python | def run(items):
"""Normalization and log2 ratio calculation plus CNV calling for full cohort.
- Combine coverage of each region for each sample
- Prepare read counts for each sample
- Normalize coverages in cohort by gene and sample, and calculate log2 ratios
- Call amplifications and deletions
"""
items = [utils.to_single_data(x) for x in items]
work_dir = _sv_workdir(items[0])
input_backs = list(set(filter(lambda x: x is not None,
[dd.get_background_cnv_reference(d, "seq2c") for d in items])))
coverage_file = _combine_coverages(items, work_dir, input_backs)
read_mapping_file = _calculate_mapping_reads(items, work_dir, input_backs)
normal_names = []
if input_backs:
with open(input_backs[0]) as in_handle:
for line in in_handle:
if len(line.split()) == 2:
normal_names.append(line.split()[0])
normal_names += [dd.get_sample_name(x) for x in items if population.get_affected_status(x) == 1]
seq2c_calls_file = _call_cnv(items, work_dir, read_mapping_file, coverage_file, normal_names)
items = _split_cnv(items, seq2c_calls_file, read_mapping_file, coverage_file)
return items | [
"def",
"run",
"(",
"items",
")",
":",
"items",
"=",
"[",
"utils",
".",
"to_single_data",
"(",
"x",
")",
"for",
"x",
"in",
"items",
"]",
"work_dir",
"=",
"_sv_workdir",
"(",
"items",
"[",
"0",
"]",
")",
"input_backs",
"=",
"list",
"(",
"set",
"(",
... | Normalization and log2 ratio calculation plus CNV calling for full cohort.
- Combine coverage of each region for each sample
- Prepare read counts for each sample
- Normalize coverages in cohort by gene and sample, and calculate log2 ratios
- Call amplifications and deletions | [
"Normalization",
"and",
"log2",
"ratio",
"calculation",
"plus",
"CNV",
"calling",
"for",
"full",
"cohort",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L41-L65 | train | 218,493 |
bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | prep_seq2c_bed | def prep_seq2c_bed(data):
"""Selecting the bed file, cleaning, and properly annotating for Seq2C
"""
if dd.get_background_cnv_reference(data, "seq2c"):
bed_file = _background_to_bed(dd.get_background_cnv_reference(data, "seq2c"), data)
else:
bed_file = regions.get_sv_bed(data)
if bed_file:
bed_file = bedutils.clean_file(bed_file, data, prefix="svregions-")
else:
bed_file = bedutils.clean_file(dd.get_variant_regions(data), data)
if not bed_file:
return None
col_num = bt.BedTool(bed_file).field_count()
if col_num < 4:
annotated_file = annotate.add_genes(bed_file, data, max_distance=0)
if annotated_file == bed_file:
raise ValueError("BED file for Seq2C must be annotated with gene names, "
"however the input BED is 3-columns and we have no transcript "
"data to annotate with " + bed_file)
annotated_file = annotate.gene_one_per_line(annotated_file, data)
else:
annotated_file = bed_file
ready_file = "%s-seq2cclean.bed" % (utils.splitext_plus(annotated_file)[0])
if not utils.file_uptodate(ready_file, annotated_file):
bed = bt.BedTool(annotated_file)
if col_num > 4 and col_num != 8:
bed = bed.cut(range(4))
bed = bed.filter(lambda x: x.name not in ["", ".", "-"])
with file_transaction(data, ready_file) as tx_out_file:
bed.saveas(tx_out_file)
logger.debug("Saved Seq2C clean annotated ready input BED into " + ready_file)
return ready_file | python | def prep_seq2c_bed(data):
"""Selecting the bed file, cleaning, and properly annotating for Seq2C
"""
if dd.get_background_cnv_reference(data, "seq2c"):
bed_file = _background_to_bed(dd.get_background_cnv_reference(data, "seq2c"), data)
else:
bed_file = regions.get_sv_bed(data)
if bed_file:
bed_file = bedutils.clean_file(bed_file, data, prefix="svregions-")
else:
bed_file = bedutils.clean_file(dd.get_variant_regions(data), data)
if not bed_file:
return None
col_num = bt.BedTool(bed_file).field_count()
if col_num < 4:
annotated_file = annotate.add_genes(bed_file, data, max_distance=0)
if annotated_file == bed_file:
raise ValueError("BED file for Seq2C must be annotated with gene names, "
"however the input BED is 3-columns and we have no transcript "
"data to annotate with " + bed_file)
annotated_file = annotate.gene_one_per_line(annotated_file, data)
else:
annotated_file = bed_file
ready_file = "%s-seq2cclean.bed" % (utils.splitext_plus(annotated_file)[0])
if not utils.file_uptodate(ready_file, annotated_file):
bed = bt.BedTool(annotated_file)
if col_num > 4 and col_num != 8:
bed = bed.cut(range(4))
bed = bed.filter(lambda x: x.name not in ["", ".", "-"])
with file_transaction(data, ready_file) as tx_out_file:
bed.saveas(tx_out_file)
logger.debug("Saved Seq2C clean annotated ready input BED into " + ready_file)
return ready_file | [
"def",
"prep_seq2c_bed",
"(",
"data",
")",
":",
"if",
"dd",
".",
"get_background_cnv_reference",
"(",
"data",
",",
"\"seq2c\"",
")",
":",
"bed_file",
"=",
"_background_to_bed",
"(",
"dd",
".",
"get_background_cnv_reference",
"(",
"data",
",",
"\"seq2c\"",
")",
... | Selecting the bed file, cleaning, and properly annotating for Seq2C | [
"Selecting",
"the",
"bed",
"file",
"cleaning",
"and",
"properly",
"annotating",
"for",
"Seq2C"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L67-L102 | train | 218,494 |
bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | _background_to_bed | def _background_to_bed(back_file, data):
"""Convert a seq2c background file with calls into BED regions for coverage.
seq2c background files are a concatenation of mapping and sample_coverages from
potentially multiple samples. We use the coverage information from the first
sample to translate into BED.
"""
out_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bedprep")),
"%s-regions.bed" % utils.splitext_plus(os.path.basename(back_file))[0])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(back_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
sample = in_handle.readline().split("\t")[0]
for line in in_handle:
if line.startswith(sample) and len(line.split()) >= 5:
_, gene, chrom, start, end = line.split()[:5]
out_handle.write("%s\n" % ("\t".join([chrom, str(int(start) - 1), end, gene])))
return out_file | python | def _background_to_bed(back_file, data):
"""Convert a seq2c background file with calls into BED regions for coverage.
seq2c background files are a concatenation of mapping and sample_coverages from
potentially multiple samples. We use the coverage information from the first
sample to translate into BED.
"""
out_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bedprep")),
"%s-regions.bed" % utils.splitext_plus(os.path.basename(back_file))[0])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(back_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
sample = in_handle.readline().split("\t")[0]
for line in in_handle:
if line.startswith(sample) and len(line.split()) >= 5:
_, gene, chrom, start, end = line.split()[:5]
out_handle.write("%s\n" % ("\t".join([chrom, str(int(start) - 1), end, gene])))
return out_file | [
"def",
"_background_to_bed",
"(",
"back_file",
",",
"data",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"utils",
".",
"safe_makedir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dd",
".",
"get_work_dir",
"(",
"data",
")",
",",
"\"b... | Convert a seq2c background file with calls into BED regions for coverage.
seq2c background files are a concatenation of mapping and sample_coverages from
potentially multiple samples. We use the coverage information from the first
sample to translate into BED. | [
"Convert",
"a",
"seq2c",
"background",
"file",
"with",
"calls",
"into",
"BED",
"regions",
"for",
"coverage",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L104-L122 | train | 218,495 |
bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | _get_seq2c_options | def _get_seq2c_options(data):
"""Get adjustable, through resources, or default options for seq2c.
"""
cov2lr_possible_opts = ["-F"]
defaults = {}
ropts = config_utils.get_resources("seq2c", data["config"]).get("options", [])
assert len(ropts) % 2 == 0, "Expect even number of options for seq2c" % ropts
defaults.update(dict(tz.partition(2, ropts)))
cov2lr_out, lr2gene_out = [], []
for k, v in defaults.items():
if k in cov2lr_possible_opts:
cov2lr_out += [str(k), str(v)]
else:
lr2gene_out += [str(k), str(v)]
return cov2lr_out, lr2gene_out | python | def _get_seq2c_options(data):
"""Get adjustable, through resources, or default options for seq2c.
"""
cov2lr_possible_opts = ["-F"]
defaults = {}
ropts = config_utils.get_resources("seq2c", data["config"]).get("options", [])
assert len(ropts) % 2 == 0, "Expect even number of options for seq2c" % ropts
defaults.update(dict(tz.partition(2, ropts)))
cov2lr_out, lr2gene_out = [], []
for k, v in defaults.items():
if k in cov2lr_possible_opts:
cov2lr_out += [str(k), str(v)]
else:
lr2gene_out += [str(k), str(v)]
return cov2lr_out, lr2gene_out | [
"def",
"_get_seq2c_options",
"(",
"data",
")",
":",
"cov2lr_possible_opts",
"=",
"[",
"\"-F\"",
"]",
"defaults",
"=",
"{",
"}",
"ropts",
"=",
"config_utils",
".",
"get_resources",
"(",
"\"seq2c\"",
",",
"data",
"[",
"\"config\"",
"]",
")",
".",
"get",
"(",... | Get adjustable, through resources, or default options for seq2c. | [
"Get",
"adjustable",
"through",
"resources",
"or",
"default",
"options",
"for",
"seq2c",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L124-L138 | train | 218,496 |
bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | to_vcf | def to_vcf(in_tsv, data):
"""Convert seq2c output file into BED output.
"""
call_convert = {"Amp": "DUP", "Del": "DEL"}
out_file = "%s.vcf" % utils.splitext_plus(in_tsv)[0]
if not utils.file_uptodate(out_file, in_tsv):
with file_transaction(data, out_file) as tx_out_file:
with open(in_tsv) as in_handle:
with open(tx_out_file, "w") as out_handle:
out_handle.write(VCF_HEADER + "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n"
% (dd.get_sample_name(data)))
header = in_handle.readline().split("\t")
for cur in (dict(zip(header, l.split("\t"))) for l in in_handle):
if cur["Amp_Del"] in call_convert:
svtype = call_convert[cur["Amp_Del"]]
info = "SVTYPE=%s;END=%s;SVLEN=%s;FOLD_CHANGE_LOG=%s;PROBES=%s;GENE=%s" % (
svtype, cur["End"], int(cur["End"]) - int(cur["Start"]),
cur["Log2ratio"], cur["Ab_Seg"], cur["Gene"])
out_handle.write("\t".join([cur["Chr"], cur["Start"], ".", "N", "<%s>" % (svtype),
".", ".", info, "GT", "1/1"]) + "\n")
return vcfutils.sort_by_ref(out_file, data) | python | def to_vcf(in_tsv, data):
"""Convert seq2c output file into BED output.
"""
call_convert = {"Amp": "DUP", "Del": "DEL"}
out_file = "%s.vcf" % utils.splitext_plus(in_tsv)[0]
if not utils.file_uptodate(out_file, in_tsv):
with file_transaction(data, out_file) as tx_out_file:
with open(in_tsv) as in_handle:
with open(tx_out_file, "w") as out_handle:
out_handle.write(VCF_HEADER + "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n"
% (dd.get_sample_name(data)))
header = in_handle.readline().split("\t")
for cur in (dict(zip(header, l.split("\t"))) for l in in_handle):
if cur["Amp_Del"] in call_convert:
svtype = call_convert[cur["Amp_Del"]]
info = "SVTYPE=%s;END=%s;SVLEN=%s;FOLD_CHANGE_LOG=%s;PROBES=%s;GENE=%s" % (
svtype, cur["End"], int(cur["End"]) - int(cur["Start"]),
cur["Log2ratio"], cur["Ab_Seg"], cur["Gene"])
out_handle.write("\t".join([cur["Chr"], cur["Start"], ".", "N", "<%s>" % (svtype),
".", ".", info, "GT", "1/1"]) + "\n")
return vcfutils.sort_by_ref(out_file, data) | [
"def",
"to_vcf",
"(",
"in_tsv",
",",
"data",
")",
":",
"call_convert",
"=",
"{",
"\"Amp\"",
":",
"\"DUP\"",
",",
"\"Del\"",
":",
"\"DEL\"",
"}",
"out_file",
"=",
"\"%s.vcf\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"in_tsv",
")",
"[",
"0",
"]",
"if",... | Convert seq2c output file into BED output. | [
"Convert",
"seq2c",
"output",
"file",
"into",
"BED",
"output",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L198-L218 | train | 218,497 |
bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | _combine_coverages | def _combine_coverages(items, work_dir, input_backs=None):
"""Combine coverage cnns calculated for individual inputs into single file.
Optionally moves over pre-calculated coverage samples from a background file.
"""
out_file = os.path.join(work_dir, "sample_coverages.txt")
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
with open(tx_out_file, 'w') as out_f:
for data in items:
cov_file = tz.get_in(["depth", "bins", "seq2c"], data)
with open(cov_file) as cov_f:
out_f.write(cov_f.read())
if input_backs:
for input_back in input_backs:
with open(input_back) as in_handle:
for line in in_handle:
if len(line.split()) >= 4:
out_f.write(line)
return out_file | python | def _combine_coverages(items, work_dir, input_backs=None):
"""Combine coverage cnns calculated for individual inputs into single file.
Optionally moves over pre-calculated coverage samples from a background file.
"""
out_file = os.path.join(work_dir, "sample_coverages.txt")
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
with open(tx_out_file, 'w') as out_f:
for data in items:
cov_file = tz.get_in(["depth", "bins", "seq2c"], data)
with open(cov_file) as cov_f:
out_f.write(cov_f.read())
if input_backs:
for input_back in input_backs:
with open(input_back) as in_handle:
for line in in_handle:
if len(line.split()) >= 4:
out_f.write(line)
return out_file | [
"def",
"_combine_coverages",
"(",
"items",
",",
"work_dir",
",",
"input_backs",
"=",
"None",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"sample_coverages.txt\"",
")",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"o... | Combine coverage cnns calculated for individual inputs into single file.
Optionally moves over pre-calculated coverage samples from a background file. | [
"Combine",
"coverage",
"cnns",
"calculated",
"for",
"individual",
"inputs",
"into",
"single",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L290-L309 | train | 218,498 |
bcbio/bcbio-nextgen | bcbio/structural/seq2c.py | _calculate_mapping_reads | def _calculate_mapping_reads(items, work_dir, input_backs=None):
"""Calculate read counts from samtools idxstats for each sample.
Optionally moves over pre-calculated mapping counts from a background file.
"""
out_file = os.path.join(work_dir, "mapping_reads.txt")
if not utils.file_exists(out_file):
lines = []
for data in items:
count = 0
for line in subprocess.check_output([
"samtools", "idxstats", dd.get_align_bam(data)]).decode().split("\n"):
if line.strip():
count += int(line.split("\t")[2])
lines.append("%s\t%s" % (dd.get_sample_name(data), count))
with file_transaction(items[0], out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write("\n".join(lines) + "\n")
if input_backs:
for input_back in input_backs:
with open(input_back) as in_handle:
for line in in_handle:
if len(line.split()) == 2:
out_handle.write(line)
return out_file | python | def _calculate_mapping_reads(items, work_dir, input_backs=None):
"""Calculate read counts from samtools idxstats for each sample.
Optionally moves over pre-calculated mapping counts from a background file.
"""
out_file = os.path.join(work_dir, "mapping_reads.txt")
if not utils.file_exists(out_file):
lines = []
for data in items:
count = 0
for line in subprocess.check_output([
"samtools", "idxstats", dd.get_align_bam(data)]).decode().split("\n"):
if line.strip():
count += int(line.split("\t")[2])
lines.append("%s\t%s" % (dd.get_sample_name(data), count))
with file_transaction(items[0], out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write("\n".join(lines) + "\n")
if input_backs:
for input_back in input_backs:
with open(input_back) as in_handle:
for line in in_handle:
if len(line.split()) == 2:
out_handle.write(line)
return out_file | [
"def",
"_calculate_mapping_reads",
"(",
"items",
",",
"work_dir",
",",
"input_backs",
"=",
"None",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"mapping_reads.txt\"",
")",
"if",
"not",
"utils",
".",
"file_exists",
"(",
... | Calculate read counts from samtools idxstats for each sample.
Optionally moves over pre-calculated mapping counts from a background file. | [
"Calculate",
"read",
"counts",
"from",
"samtools",
"idxstats",
"for",
"each",
"sample",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/seq2c.py#L311-L335 | train | 218,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.