repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
bcbio/bcbio-nextgen | bcbio/structural/shared.py | calc_paired_insert_stats_save | def calc_paired_insert_stats_save(in_bam, stat_file, nsample=1000000):
"""Calculate paired stats, saving to a file for re-runs.
"""
if utils.file_exists(stat_file):
with open(stat_file) as in_handle:
return yaml.safe_load(in_handle)
else:
stats = calc_paired_insert_stats(in_bam, nsample)
with open(stat_file, "w") as out_handle:
yaml.safe_dump(stats, out_handle, default_flow_style=False, allow_unicode=False)
return stats | python | def calc_paired_insert_stats_save(in_bam, stat_file, nsample=1000000):
"""Calculate paired stats, saving to a file for re-runs.
"""
if utils.file_exists(stat_file):
with open(stat_file) as in_handle:
return yaml.safe_load(in_handle)
else:
stats = calc_paired_insert_stats(in_bam, nsample)
with open(stat_file, "w") as out_handle:
yaml.safe_dump(stats, out_handle, default_flow_style=False, allow_unicode=False)
return stats | [
"def",
"calc_paired_insert_stats_save",
"(",
"in_bam",
",",
"stat_file",
",",
"nsample",
"=",
"1000000",
")",
":",
"if",
"utils",
".",
"file_exists",
"(",
"stat_file",
")",
":",
"with",
"open",
"(",
"stat_file",
")",
"as",
"in_handle",
":",
"return",
"yaml",... | Calculate paired stats, saving to a file for re-runs. | [
"Calculate",
"paired",
"stats",
"saving",
"to",
"a",
"file",
"for",
"re",
"-",
"runs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/shared.py#L321-L331 | train | 219,000 |
bcbio/bcbio-nextgen | scripts/utils/upload_to_synapse.py | _accumulate_remotes | def _accumulate_remotes(synapse_parent_id, syn):
"""Retrieve references to all remote directories and files.
"""
remotes = {}
s_base_folder = syn.get(synapse_parent_id)
for (s_dirpath, s_dirpath_id), _, s_filenames in synapseutils.walk(syn, synapse_parent_id):
remotes[s_dirpath] = s_dirpath_id
if s_filenames:
for s_filename, s_filename_id in s_filenames:
remotes[os.path.join(s_dirpath, s_filename)] = s_filename_id
return s_base_folder, remotes | python | def _accumulate_remotes(synapse_parent_id, syn):
"""Retrieve references to all remote directories and files.
"""
remotes = {}
s_base_folder = syn.get(synapse_parent_id)
for (s_dirpath, s_dirpath_id), _, s_filenames in synapseutils.walk(syn, synapse_parent_id):
remotes[s_dirpath] = s_dirpath_id
if s_filenames:
for s_filename, s_filename_id in s_filenames:
remotes[os.path.join(s_dirpath, s_filename)] = s_filename_id
return s_base_folder, remotes | [
"def",
"_accumulate_remotes",
"(",
"synapse_parent_id",
",",
"syn",
")",
":",
"remotes",
"=",
"{",
"}",
"s_base_folder",
"=",
"syn",
".",
"get",
"(",
"synapse_parent_id",
")",
"for",
"(",
"s_dirpath",
",",
"s_dirpath_id",
")",
",",
"_",
",",
"s_filenames",
... | Retrieve references to all remote directories and files. | [
"Retrieve",
"references",
"to",
"all",
"remote",
"directories",
"and",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/upload_to_synapse.py#L38-L48 | train | 219,001 |
bcbio/bcbio-nextgen | scripts/utils/upload_to_synapse.py | _remote_folder | def _remote_folder(dirpath, remotes, syn):
"""Retrieve the remote folder for files, creating if necessary.
"""
if dirpath in remotes:
return remotes[dirpath], remotes
else:
parent_dir, cur_dir = os.path.split(dirpath)
parent_folder, remotes = _remote_folder(parent_dir, remotes, syn)
s_cur_dir = syn.store(synapseclient.Folder(cur_dir, parent=parent_folder))
remotes[dirpath] = s_cur_dir.id
return s_cur_dir.id, remotes | python | def _remote_folder(dirpath, remotes, syn):
"""Retrieve the remote folder for files, creating if necessary.
"""
if dirpath in remotes:
return remotes[dirpath], remotes
else:
parent_dir, cur_dir = os.path.split(dirpath)
parent_folder, remotes = _remote_folder(parent_dir, remotes, syn)
s_cur_dir = syn.store(synapseclient.Folder(cur_dir, parent=parent_folder))
remotes[dirpath] = s_cur_dir.id
return s_cur_dir.id, remotes | [
"def",
"_remote_folder",
"(",
"dirpath",
",",
"remotes",
",",
"syn",
")",
":",
"if",
"dirpath",
"in",
"remotes",
":",
"return",
"remotes",
"[",
"dirpath",
"]",
",",
"remotes",
"else",
":",
"parent_dir",
",",
"cur_dir",
"=",
"os",
".",
"path",
".",
"spl... | Retrieve the remote folder for files, creating if necessary. | [
"Retrieve",
"the",
"remote",
"folder",
"for",
"files",
"creating",
"if",
"necessary",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/upload_to_synapse.py#L50-L60 | train | 219,002 |
bcbio/bcbio-nextgen | bcbio/structural/wham.py | run | def run(items, background=None):
"""Detect copy number variations from batched set of samples using WHAM.
"""
if not background: background = []
background_bams = []
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
if paired:
inputs = [paired.tumor_data]
if paired.normal_bam:
background = [paired.normal_data]
background_bams = [paired.normal_bam]
else:
assert not background
inputs, background = shared.find_case_control(items)
background_bams = [x["align_bam"] for x in background]
orig_vcf = _run_wham(inputs, background_bams)
out = []
for data in inputs:
if "sv" not in data:
data["sv"] = []
final_vcf = shared.finalize_sv(orig_vcf, data, items)
data["sv"].append({"variantcaller": "wham", "vrn_file": final_vcf})
out.append(data)
return out | python | def run(items, background=None):
"""Detect copy number variations from batched set of samples using WHAM.
"""
if not background: background = []
background_bams = []
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
if paired:
inputs = [paired.tumor_data]
if paired.normal_bam:
background = [paired.normal_data]
background_bams = [paired.normal_bam]
else:
assert not background
inputs, background = shared.find_case_control(items)
background_bams = [x["align_bam"] for x in background]
orig_vcf = _run_wham(inputs, background_bams)
out = []
for data in inputs:
if "sv" not in data:
data["sv"] = []
final_vcf = shared.finalize_sv(orig_vcf, data, items)
data["sv"].append({"variantcaller": "wham", "vrn_file": final_vcf})
out.append(data)
return out | [
"def",
"run",
"(",
"items",
",",
"background",
"=",
"None",
")",
":",
"if",
"not",
"background",
":",
"background",
"=",
"[",
"]",
"background_bams",
"=",
"[",
"]",
"paired",
"=",
"vcfutils",
".",
"get_paired_bams",
"(",
"[",
"x",
"[",
"\"align_bam\"",
... | Detect copy number variations from batched set of samples using WHAM. | [
"Detect",
"copy",
"number",
"variations",
"from",
"batched",
"set",
"of",
"samples",
"using",
"WHAM",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/wham.py#L19-L42 | train | 219,003 |
bcbio/bcbio-nextgen | bcbio/structural/wham.py | _run_wham | def _run_wham(inputs, background_bams):
"""Run WHAM on a defined set of inputs and targets.
"""
out_file = os.path.join(_sv_workdir(inputs[0]), "%s-wham.vcf.gz" % dd.get_sample_name(inputs[0]))
if not utils.file_exists(out_file):
with file_transaction(inputs[0], out_file) as tx_out_file:
cores = dd.get_cores(inputs[0])
ref_file = dd.get_ref_file(inputs[0])
include_chroms = ",".join([c.name for c in ref.file_contigs(ref_file)
if chromhacks.is_autosomal_or_x(c.name)])
all_bams = ",".join([x["align_bam"] for x in inputs] + background_bams)
cmd = ("whamg -x {cores} -a {ref_file} -f {all_bams} -c {include_chroms} "
"| bgzip -c > {tx_out_file}")
do.run(cmd.format(**locals()), "WHAM SV caller: %s" % ", ".join(dd.get_sample_name(d) for d in inputs))
return vcfutils.bgzip_and_index(out_file, inputs[0]["config"]) | python | def _run_wham(inputs, background_bams):
"""Run WHAM on a defined set of inputs and targets.
"""
out_file = os.path.join(_sv_workdir(inputs[0]), "%s-wham.vcf.gz" % dd.get_sample_name(inputs[0]))
if not utils.file_exists(out_file):
with file_transaction(inputs[0], out_file) as tx_out_file:
cores = dd.get_cores(inputs[0])
ref_file = dd.get_ref_file(inputs[0])
include_chroms = ",".join([c.name for c in ref.file_contigs(ref_file)
if chromhacks.is_autosomal_or_x(c.name)])
all_bams = ",".join([x["align_bam"] for x in inputs] + background_bams)
cmd = ("whamg -x {cores} -a {ref_file} -f {all_bams} -c {include_chroms} "
"| bgzip -c > {tx_out_file}")
do.run(cmd.format(**locals()), "WHAM SV caller: %s" % ", ".join(dd.get_sample_name(d) for d in inputs))
return vcfutils.bgzip_and_index(out_file, inputs[0]["config"]) | [
"def",
"_run_wham",
"(",
"inputs",
",",
"background_bams",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_sv_workdir",
"(",
"inputs",
"[",
"0",
"]",
")",
",",
"\"%s-wham.vcf.gz\"",
"%",
"dd",
".",
"get_sample_name",
"(",
"inputs",
"[",... | Run WHAM on a defined set of inputs and targets. | [
"Run",
"WHAM",
"on",
"a",
"defined",
"set",
"of",
"inputs",
"and",
"targets",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/wham.py#L48-L62 | train | 219,004 |
bcbio/bcbio-nextgen | bcbio/structural/wham.py | filter_by_background | def filter_by_background(in_vcf, full_vcf, background, data):
"""Filter SV calls also present in background samples.
Skips filtering of inversions, which are not characterized differently
between cases and controls in test datasets.
"""
Filter = collections.namedtuple('Filter', ['id', 'desc'])
back_filter = Filter(id='InBackground',
desc='Rejected due to presence in background sample')
out_file = "%s-filter.vcf" % utils.splitext_plus(in_vcf)[0]
if not utils.file_uptodate(out_file, in_vcf) and not utils.file_uptodate(out_file + ".vcf.gz", in_vcf):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
reader = vcf.VCFReader(filename=in_vcf)
reader.filters["InBackground"] = back_filter
full_reader = vcf.VCFReader(filename=full_vcf)
writer = vcf.VCFWriter(out_handle, template=reader)
for out_rec, rec in zip(reader, full_reader):
rec_type = rec.genotype(dd.get_sample_name(data)).gt_type
if rec_type == 0 or any(rec_type == rec.genotype(dd.get_sample_name(x)).gt_type
for x in background):
out_rec.add_filter("InBackground")
writer.write_record(out_rec)
return vcfutils.bgzip_and_index(out_file, data["config"]) | python | def filter_by_background(in_vcf, full_vcf, background, data):
"""Filter SV calls also present in background samples.
Skips filtering of inversions, which are not characterized differently
between cases and controls in test datasets.
"""
Filter = collections.namedtuple('Filter', ['id', 'desc'])
back_filter = Filter(id='InBackground',
desc='Rejected due to presence in background sample')
out_file = "%s-filter.vcf" % utils.splitext_plus(in_vcf)[0]
if not utils.file_uptodate(out_file, in_vcf) and not utils.file_uptodate(out_file + ".vcf.gz", in_vcf):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
reader = vcf.VCFReader(filename=in_vcf)
reader.filters["InBackground"] = back_filter
full_reader = vcf.VCFReader(filename=full_vcf)
writer = vcf.VCFWriter(out_handle, template=reader)
for out_rec, rec in zip(reader, full_reader):
rec_type = rec.genotype(dd.get_sample_name(data)).gt_type
if rec_type == 0 or any(rec_type == rec.genotype(dd.get_sample_name(x)).gt_type
for x in background):
out_rec.add_filter("InBackground")
writer.write_record(out_rec)
return vcfutils.bgzip_and_index(out_file, data["config"]) | [
"def",
"filter_by_background",
"(",
"in_vcf",
",",
"full_vcf",
",",
"background",
",",
"data",
")",
":",
"Filter",
"=",
"collections",
".",
"namedtuple",
"(",
"'Filter'",
",",
"[",
"'id'",
",",
"'desc'",
"]",
")",
"back_filter",
"=",
"Filter",
"(",
"id",
... | Filter SV calls also present in background samples.
Skips filtering of inversions, which are not characterized differently
between cases and controls in test datasets. | [
"Filter",
"SV",
"calls",
"also",
"present",
"in",
"background",
"samples",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/wham.py#L64-L87 | train | 219,005 |
bcbio/bcbio-nextgen | bcbio/variation/gatkjoint.py | run_region | def run_region(data, region, vrn_files, out_file):
"""Perform variant calling on gVCF inputs in a specific genomic region.
"""
broad_runner = broad.runner_from_config(data["config"])
if broad_runner.gatk_type() == "gatk4":
genomics_db = _run_genomicsdb_import(vrn_files, region, out_file, data)
return _run_genotype_gvcfs_genomicsdb(genomics_db, region, out_file, data)
else:
vrn_files = _batch_gvcfs(data, region, vrn_files, dd.get_ref_file(data), out_file)
return _run_genotype_gvcfs_gatk3(data, region, vrn_files, dd.get_ref_file(data), out_file) | python | def run_region(data, region, vrn_files, out_file):
"""Perform variant calling on gVCF inputs in a specific genomic region.
"""
broad_runner = broad.runner_from_config(data["config"])
if broad_runner.gatk_type() == "gatk4":
genomics_db = _run_genomicsdb_import(vrn_files, region, out_file, data)
return _run_genotype_gvcfs_genomicsdb(genomics_db, region, out_file, data)
else:
vrn_files = _batch_gvcfs(data, region, vrn_files, dd.get_ref_file(data), out_file)
return _run_genotype_gvcfs_gatk3(data, region, vrn_files, dd.get_ref_file(data), out_file) | [
"def",
"run_region",
"(",
"data",
",",
"region",
",",
"vrn_files",
",",
"out_file",
")",
":",
"broad_runner",
"=",
"broad",
".",
"runner_from_config",
"(",
"data",
"[",
"\"config\"",
"]",
")",
"if",
"broad_runner",
".",
"gatk_type",
"(",
")",
"==",
"\"gatk... | Perform variant calling on gVCF inputs in a specific genomic region. | [
"Perform",
"variant",
"calling",
"on",
"gVCF",
"inputs",
"in",
"a",
"specific",
"genomic",
"region",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkjoint.py#L18-L27 | train | 219,006 |
bcbio/bcbio-nextgen | bcbio/variation/gatkjoint.py | _incomplete_genomicsdb | def _incomplete_genomicsdb(dbdir):
"""Check if a GenomicsDB output is incomplete and we should regenerate.
Works around current inability to move GenomicsDB outputs and support
transactional directories.
"""
for test_file in ["callset.json", "vidmap.json", "genomicsdb_array/genomicsdb_meta.json"]:
if not os.path.exists(os.path.join(dbdir, test_file)):
return True
return False | python | def _incomplete_genomicsdb(dbdir):
"""Check if a GenomicsDB output is incomplete and we should regenerate.
Works around current inability to move GenomicsDB outputs and support
transactional directories.
"""
for test_file in ["callset.json", "vidmap.json", "genomicsdb_array/genomicsdb_meta.json"]:
if not os.path.exists(os.path.join(dbdir, test_file)):
return True
return False | [
"def",
"_incomplete_genomicsdb",
"(",
"dbdir",
")",
":",
"for",
"test_file",
"in",
"[",
"\"callset.json\"",
",",
"\"vidmap.json\"",
",",
"\"genomicsdb_array/genomicsdb_meta.json\"",
"]",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",... | Check if a GenomicsDB output is incomplete and we should regenerate.
Works around current inability to move GenomicsDB outputs and support
transactional directories. | [
"Check",
"if",
"a",
"GenomicsDB",
"output",
"is",
"incomplete",
"and",
"we",
"should",
"regenerate",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkjoint.py#L64-L73 | train | 219,007 |
bcbio/bcbio-nextgen | bcbio/variation/gatkjoint.py | _run_genotype_gvcfs_gatk3 | def _run_genotype_gvcfs_gatk3(data, region, vrn_files, ref_file, out_file):
"""Performs genotyping of gVCFs into final VCF files.
"""
if not utils.file_exists(out_file):
broad_runner = broad.runner_from_config(data["config"])
with file_transaction(data, out_file) as tx_out_file:
assoc_files = tz.get_in(("genome_resources", "variation"), data, {})
if not assoc_files: assoc_files = {}
params = ["-T", "GenotypeGVCFs",
"-R", ref_file, "-o", tx_out_file,
"-L", bamprep.region_to_gatk(region),
"--max_alternate_alleles", "4"]
for vrn_file in vrn_files:
params += ["--variant", vrn_file]
if assoc_files.get("dbsnp"):
params += ["--dbsnp", assoc_files["dbsnp"]]
broad_runner.new_resources("gatk-haplotype")
cores = dd.get_cores(data)
if cores > 1:
# GATK performs poorly with memory usage when parallelizing
# with a large number of cores but makes use of extra memory,
# so we cap at 6 cores.
# See issue #1565 for discussion
# Recent GATK 3.x versions also have race conditions with multiple
# threads, so limit to 1 and keep memory available
# https://gatkforums.broadinstitute.org/wdl/discussion/8718/concurrentmodificationexception-in-gatk-3-7-genotypegvcfs
# params += ["-nt", str(min(6, cores))]
memscale = {"magnitude": 0.9 * cores, "direction": "increase"}
else:
memscale = None
broad_runner.run_gatk(params, memscale=memscale, parallel_gc=True)
return vcfutils.bgzip_and_index(out_file, data["config"]) | python | def _run_genotype_gvcfs_gatk3(data, region, vrn_files, ref_file, out_file):
"""Performs genotyping of gVCFs into final VCF files.
"""
if not utils.file_exists(out_file):
broad_runner = broad.runner_from_config(data["config"])
with file_transaction(data, out_file) as tx_out_file:
assoc_files = tz.get_in(("genome_resources", "variation"), data, {})
if not assoc_files: assoc_files = {}
params = ["-T", "GenotypeGVCFs",
"-R", ref_file, "-o", tx_out_file,
"-L", bamprep.region_to_gatk(region),
"--max_alternate_alleles", "4"]
for vrn_file in vrn_files:
params += ["--variant", vrn_file]
if assoc_files.get("dbsnp"):
params += ["--dbsnp", assoc_files["dbsnp"]]
broad_runner.new_resources("gatk-haplotype")
cores = dd.get_cores(data)
if cores > 1:
# GATK performs poorly with memory usage when parallelizing
# with a large number of cores but makes use of extra memory,
# so we cap at 6 cores.
# See issue #1565 for discussion
# Recent GATK 3.x versions also have race conditions with multiple
# threads, so limit to 1 and keep memory available
# https://gatkforums.broadinstitute.org/wdl/discussion/8718/concurrentmodificationexception-in-gatk-3-7-genotypegvcfs
# params += ["-nt", str(min(6, cores))]
memscale = {"magnitude": 0.9 * cores, "direction": "increase"}
else:
memscale = None
broad_runner.run_gatk(params, memscale=memscale, parallel_gc=True)
return vcfutils.bgzip_and_index(out_file, data["config"]) | [
"def",
"_run_genotype_gvcfs_gatk3",
"(",
"data",
",",
"region",
",",
"vrn_files",
",",
"ref_file",
",",
"out_file",
")",
":",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"broad_runner",
"=",
"broad",
".",
"runner_from_config",
"(",
"... | Performs genotyping of gVCFs into final VCF files. | [
"Performs",
"genotyping",
"of",
"gVCFs",
"into",
"final",
"VCF",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkjoint.py#L102-L133 | train | 219,008 |
bcbio/bcbio-nextgen | bcbio/variation/gatkjoint.py | _batch_gvcfs | def _batch_gvcfs(data, region, vrn_files, ref_file, out_file=None):
"""Perform batching of gVCF files if above recommended input count.
"""
if out_file is None:
out_file = vrn_files[0]
# group to get below the maximum batch size, using 200 as the baseline
max_batch = int(dd.get_joint_group_size(data))
if len(vrn_files) > max_batch:
out = []
num_batches = int(math.ceil(float(len(vrn_files)) / max_batch))
for i, batch_vrn_files in enumerate(tz.partition_all(num_batches, vrn_files)):
base, ext = utils.splitext_plus(out_file)
batch_out_file = "%s-b%s%s" % (base, i, ext)
out.append(run_combine_gvcfs(batch_vrn_files, region, ref_file, batch_out_file, data))
return _batch_gvcfs(data, region, out, ref_file)
else:
return vrn_files | python | def _batch_gvcfs(data, region, vrn_files, ref_file, out_file=None):
"""Perform batching of gVCF files if above recommended input count.
"""
if out_file is None:
out_file = vrn_files[0]
# group to get below the maximum batch size, using 200 as the baseline
max_batch = int(dd.get_joint_group_size(data))
if len(vrn_files) > max_batch:
out = []
num_batches = int(math.ceil(float(len(vrn_files)) / max_batch))
for i, batch_vrn_files in enumerate(tz.partition_all(num_batches, vrn_files)):
base, ext = utils.splitext_plus(out_file)
batch_out_file = "%s-b%s%s" % (base, i, ext)
out.append(run_combine_gvcfs(batch_vrn_files, region, ref_file, batch_out_file, data))
return _batch_gvcfs(data, region, out, ref_file)
else:
return vrn_files | [
"def",
"_batch_gvcfs",
"(",
"data",
",",
"region",
",",
"vrn_files",
",",
"ref_file",
",",
"out_file",
"=",
"None",
")",
":",
"if",
"out_file",
"is",
"None",
":",
"out_file",
"=",
"vrn_files",
"[",
"0",
"]",
"# group to get below the maximum batch size, using 20... | Perform batching of gVCF files if above recommended input count. | [
"Perform",
"batching",
"of",
"gVCF",
"files",
"if",
"above",
"recommended",
"input",
"count",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkjoint.py#L137-L153 | train | 219,009 |
bcbio/bcbio-nextgen | bcbio/qc/preseq.py | _get_preseq_params | def _get_preseq_params(data, preseq_cmd, read_count):
""" Get parameters through resources.
If "step" or "extrap" limit are not provided, then calculate optimal values based on read count.
"""
defaults = {
'seg_len': 100000, # maximum segment length when merging paired end bam reads
'steps': 300, # number of points on the plot
'extrap_fraction': 3, # extrapolate up to X times read_count
'extrap': None, # extrapolate up to X reads
'step': None, # step size (number of reads between points on the plot)
'options': '',
}
params = {}
main_opts = [("-e", "-extrap"), ("-l", "-seg_len"), ("-s", "-step")]
other_opts = config_utils.get_resources("preseq", data["config"]).get("options", [])
if isinstance(other_opts, str):
other_opts = [other_opts]
for sht, lng in main_opts:
if sht in other_opts:
i = other_opts.index(sht)
elif lng in other_opts:
i = other_opts.index(lng)
else:
i = None
if i is not None:
params[lng[1:]] = other_opts[i + 1]
other_opts = other_opts[:i] + other_opts[i + 2:]
params['options'] = ' '.join(other_opts)
for k, v in config_utils.get_resources("preseq", data["config"]).items():
if k != 'options':
params[k] = v
params['steps'] = params.get('steps', defaults['steps'])
if preseq_cmd == 'c_curve':
params['extrap_fraction'] = 1
else:
if params.get('step') is None:
if params.get('extrap') is None:
unrounded__extrap = read_count * params.get('extrap_fraction', defaults['extrap_fraction'])
unrounded__step = unrounded__extrap // params['steps']
if params.get('extrap_fraction') is not None: # extrap_fraction explicitly provided
params['extrap'] = unrounded__extrap
params['step'] = unrounded__step
else:
power_of_10 = 10 ** math.floor(math.log(unrounded__step, 10))
rounded__step = int(math.floor(unrounded__step // power_of_10) * power_of_10)
rounded__extrap = int(rounded__step) * params['steps']
params['step'] = rounded__step
params['extrap'] = rounded__extrap
else:
params['step'] = params['extrap'] // params['steps']
elif params.get('extrap') is None:
params['extrap'] = params['step'] * params['steps']
params['step'] = params.get('step', defaults['step'])
params['extrap'] = params.get('extrap', defaults['extrap'])
params['seg_len'] = params.get('seg_len', defaults['seg_len'])
logger.info("Preseq: running {steps} steps of size {step}, extap limit {extrap}".format(**params))
return params | python | def _get_preseq_params(data, preseq_cmd, read_count):
""" Get parameters through resources.
If "step" or "extrap" limit are not provided, then calculate optimal values based on read count.
"""
defaults = {
'seg_len': 100000, # maximum segment length when merging paired end bam reads
'steps': 300, # number of points on the plot
'extrap_fraction': 3, # extrapolate up to X times read_count
'extrap': None, # extrapolate up to X reads
'step': None, # step size (number of reads between points on the plot)
'options': '',
}
params = {}
main_opts = [("-e", "-extrap"), ("-l", "-seg_len"), ("-s", "-step")]
other_opts = config_utils.get_resources("preseq", data["config"]).get("options", [])
if isinstance(other_opts, str):
other_opts = [other_opts]
for sht, lng in main_opts:
if sht in other_opts:
i = other_opts.index(sht)
elif lng in other_opts:
i = other_opts.index(lng)
else:
i = None
if i is not None:
params[lng[1:]] = other_opts[i + 1]
other_opts = other_opts[:i] + other_opts[i + 2:]
params['options'] = ' '.join(other_opts)
for k, v in config_utils.get_resources("preseq", data["config"]).items():
if k != 'options':
params[k] = v
params['steps'] = params.get('steps', defaults['steps'])
if preseq_cmd == 'c_curve':
params['extrap_fraction'] = 1
else:
if params.get('step') is None:
if params.get('extrap') is None:
unrounded__extrap = read_count * params.get('extrap_fraction', defaults['extrap_fraction'])
unrounded__step = unrounded__extrap // params['steps']
if params.get('extrap_fraction') is not None: # extrap_fraction explicitly provided
params['extrap'] = unrounded__extrap
params['step'] = unrounded__step
else:
power_of_10 = 10 ** math.floor(math.log(unrounded__step, 10))
rounded__step = int(math.floor(unrounded__step // power_of_10) * power_of_10)
rounded__extrap = int(rounded__step) * params['steps']
params['step'] = rounded__step
params['extrap'] = rounded__extrap
else:
params['step'] = params['extrap'] // params['steps']
elif params.get('extrap') is None:
params['extrap'] = params['step'] * params['steps']
params['step'] = params.get('step', defaults['step'])
params['extrap'] = params.get('extrap', defaults['extrap'])
params['seg_len'] = params.get('seg_len', defaults['seg_len'])
logger.info("Preseq: running {steps} steps of size {step}, extap limit {extrap}".format(**params))
return params | [
"def",
"_get_preseq_params",
"(",
"data",
",",
"preseq_cmd",
",",
"read_count",
")",
":",
"defaults",
"=",
"{",
"'seg_len'",
":",
"100000",
",",
"# maximum segment length when merging paired end bam reads",
"'steps'",
":",
"300",
",",
"# number of points on the plot",
"... | Get parameters through resources.
If "step" or "extrap" limit are not provided, then calculate optimal values based on read count. | [
"Get",
"parameters",
"through",
"resources",
".",
"If",
"step",
"or",
"extrap",
"limit",
"are",
"not",
"provided",
"then",
"calculate",
"optimal",
"values",
"based",
"on",
"read",
"count",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/preseq.py#L51-L114 | train | 219,010 |
bcbio/bcbio-nextgen | bcbio/variation/germline.py | split_somatic | def split_somatic(items):
"""Split somatic batches, adding a germline target.
Enables separate germline calling of samples using shared alignments.
"""
items = [_clean_flat_variantcaller(x) for x in items]
somatic_groups, somatic, non_somatic = vcfutils.somatic_batches(items)
# extract germline samples to run from normals in tumor/normal pairs
germline_added = set([])
germline = []
for somatic_group in somatic_groups:
paired = vcfutils.get_paired(somatic_group)
if paired and paired.normal_data:
cur = utils.deepish_copy(paired.normal_data)
vc = dd.get_variantcaller(cur)
if isinstance(vc, dict) and "germline" in vc:
if cur["description"] not in germline_added:
germline_added.add(cur["description"])
cur["rgnames"]["sample"] = cur["description"]
cur["metadata"]["batch"] = "%s-germline" % cur["description"]
cur["metadata"]["phenotype"] = "germline"
cur = remove_align_qc_tools(cur)
cur["config"]["algorithm"]["variantcaller"] = vc["germline"]
germline.append(cur)
# Fix variantcalling specification for only somatic targets
somatic_out = []
for data in somatic:
vc = dd.get_variantcaller(data)
if isinstance(vc, dict) and "somatic" in vc:
data["config"]["algorithm"]["variantcaller"] = vc["somatic"]
somatic_out.append(data)
return non_somatic + somatic_out + germline | python | def split_somatic(items):
"""Split somatic batches, adding a germline target.
Enables separate germline calling of samples using shared alignments.
"""
items = [_clean_flat_variantcaller(x) for x in items]
somatic_groups, somatic, non_somatic = vcfutils.somatic_batches(items)
# extract germline samples to run from normals in tumor/normal pairs
germline_added = set([])
germline = []
for somatic_group in somatic_groups:
paired = vcfutils.get_paired(somatic_group)
if paired and paired.normal_data:
cur = utils.deepish_copy(paired.normal_data)
vc = dd.get_variantcaller(cur)
if isinstance(vc, dict) and "germline" in vc:
if cur["description"] not in germline_added:
germline_added.add(cur["description"])
cur["rgnames"]["sample"] = cur["description"]
cur["metadata"]["batch"] = "%s-germline" % cur["description"]
cur["metadata"]["phenotype"] = "germline"
cur = remove_align_qc_tools(cur)
cur["config"]["algorithm"]["variantcaller"] = vc["germline"]
germline.append(cur)
# Fix variantcalling specification for only somatic targets
somatic_out = []
for data in somatic:
vc = dd.get_variantcaller(data)
if isinstance(vc, dict) and "somatic" in vc:
data["config"]["algorithm"]["variantcaller"] = vc["somatic"]
somatic_out.append(data)
return non_somatic + somatic_out + germline | [
"def",
"split_somatic",
"(",
"items",
")",
":",
"items",
"=",
"[",
"_clean_flat_variantcaller",
"(",
"x",
")",
"for",
"x",
"in",
"items",
"]",
"somatic_groups",
",",
"somatic",
",",
"non_somatic",
"=",
"vcfutils",
".",
"somatic_batches",
"(",
"items",
")",
... | Split somatic batches, adding a germline target.
Enables separate germline calling of samples using shared alignments. | [
"Split",
"somatic",
"batches",
"adding",
"a",
"germline",
"target",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/germline.py#L19-L50 | train | 219,011 |
bcbio/bcbio-nextgen | bcbio/variation/germline.py | _clean_flat_variantcaller | def _clean_flat_variantcaller(data):
"""Convert flattened dictionary from CWL representation into dictionary.
CWL flattens somatic/germline tags into a set of strings, which we
reconstitute as a dictionary.
"""
vc = dd.get_variantcaller(data)
if isinstance(vc, (list, tuple)) and all([x.count(":") == 1 for x in vc]):
out = {}
for v in vc:
k, v = v.split(":")
if k in out:
out[k].append(v)
else:
out[k] = [v]
data = dd.set_variantcaller(data, out)
return data | python | def _clean_flat_variantcaller(data):
"""Convert flattened dictionary from CWL representation into dictionary.
CWL flattens somatic/germline tags into a set of strings, which we
reconstitute as a dictionary.
"""
vc = dd.get_variantcaller(data)
if isinstance(vc, (list, tuple)) and all([x.count(":") == 1 for x in vc]):
out = {}
for v in vc:
k, v = v.split(":")
if k in out:
out[k].append(v)
else:
out[k] = [v]
data = dd.set_variantcaller(data, out)
return data | [
"def",
"_clean_flat_variantcaller",
"(",
"data",
")",
":",
"vc",
"=",
"dd",
".",
"get_variantcaller",
"(",
"data",
")",
"if",
"isinstance",
"(",
"vc",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"all",
"(",
"[",
"x",
".",
"count",
"(",
"\":\"",
... | Convert flattened dictionary from CWL representation into dictionary.
CWL flattens somatic/germline tags into a set of strings, which we
reconstitute as a dictionary. | [
"Convert",
"flattened",
"dictionary",
"from",
"CWL",
"representation",
"into",
"dictionary",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/germline.py#L52-L68 | train | 219,012 |
bcbio/bcbio-nextgen | bcbio/variation/germline.py | remove_align_qc_tools | def remove_align_qc_tools(data):
"""Remove alignment based QC tools we don't need for data replicates.
When we do multiple variant calling on a sample file (somatic/germline),
avoid re-running QC.
"""
align_qc = set(["qsignature", "coverage", "picard", "samtools", "fastqc"])
data["config"]["algorithm"]["qc"] = [t for t in dd.get_algorithm_qc(data)
if t not in align_qc]
return data | python | def remove_align_qc_tools(data):
"""Remove alignment based QC tools we don't need for data replicates.
When we do multiple variant calling on a sample file (somatic/germline),
avoid re-running QC.
"""
align_qc = set(["qsignature", "coverage", "picard", "samtools", "fastqc"])
data["config"]["algorithm"]["qc"] = [t for t in dd.get_algorithm_qc(data)
if t not in align_qc]
return data | [
"def",
"remove_align_qc_tools",
"(",
"data",
")",
":",
"align_qc",
"=",
"set",
"(",
"[",
"\"qsignature\"",
",",
"\"coverage\"",
",",
"\"picard\"",
",",
"\"samtools\"",
",",
"\"fastqc\"",
"]",
")",
"data",
"[",
"\"config\"",
"]",
"[",
"\"algorithm\"",
"]",
"[... | Remove alignment based QC tools we don't need for data replicates.
When we do multiple variant calling on a sample file (somatic/germline),
avoid re-running QC. | [
"Remove",
"alignment",
"based",
"QC",
"tools",
"we",
"don",
"t",
"need",
"for",
"data",
"replicates",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/germline.py#L70-L79 | train | 219,013 |
bcbio/bcbio-nextgen | bcbio/variation/germline.py | extract | def extract(data, items, out_dir=None):
"""Extract germline calls for the given sample, if tumor only.
"""
if vcfutils.get_paired_phenotype(data):
if len(items) == 1:
germline_vcf = _remove_prioritization(data["vrn_file"], data, out_dir)
germline_vcf = vcfutils.bgzip_and_index(germline_vcf, data["config"])
data["vrn_file_plus"] = {"germline": germline_vcf}
return data | python | def extract(data, items, out_dir=None):
"""Extract germline calls for the given sample, if tumor only.
"""
if vcfutils.get_paired_phenotype(data):
if len(items) == 1:
germline_vcf = _remove_prioritization(data["vrn_file"], data, out_dir)
germline_vcf = vcfutils.bgzip_and_index(germline_vcf, data["config"])
data["vrn_file_plus"] = {"germline": germline_vcf}
return data | [
"def",
"extract",
"(",
"data",
",",
"items",
",",
"out_dir",
"=",
"None",
")",
":",
"if",
"vcfutils",
".",
"get_paired_phenotype",
"(",
"data",
")",
":",
"if",
"len",
"(",
"items",
")",
"==",
"1",
":",
"germline_vcf",
"=",
"_remove_prioritization",
"(",
... | Extract germline calls for the given sample, if tumor only. | [
"Extract",
"germline",
"calls",
"for",
"the",
"given",
"sample",
"if",
"tumor",
"only",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/germline.py#L81-L89 | train | 219,014 |
bcbio/bcbio-nextgen | bcbio/variation/germline.py | fix_germline_samplename | def fix_germline_samplename(in_file, sample_name, data):
"""Replace germline sample names, originally from normal BAM file.
"""
out_file = "%s-fixnames%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
sample_file = "%s-samples.txt" % utils.splitext_plus(tx_out_file)[0]
with open(sample_file, "w") as out_handle:
out_handle.write("%s\n" % sample_name)
cmd = ("bcftools reheader -s {sample_file} {in_file} -o {tx_out_file}")
do.run(cmd.format(**locals()), "Fix germline samplename: %s" % sample_name)
return vcfutils.bgzip_and_index(out_file, data["config"]) | python | def fix_germline_samplename(in_file, sample_name, data):
"""Replace germline sample names, originally from normal BAM file.
"""
out_file = "%s-fixnames%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
sample_file = "%s-samples.txt" % utils.splitext_plus(tx_out_file)[0]
with open(sample_file, "w") as out_handle:
out_handle.write("%s\n" % sample_name)
cmd = ("bcftools reheader -s {sample_file} {in_file} -o {tx_out_file}")
do.run(cmd.format(**locals()), "Fix germline samplename: %s" % sample_name)
return vcfutils.bgzip_and_index(out_file, data["config"]) | [
"def",
"fix_germline_samplename",
"(",
"in_file",
",",
"sample_name",
",",
"data",
")",
":",
"out_file",
"=",
"\"%s-fixnames%s\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"in_file",
")",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"... | Replace germline sample names, originally from normal BAM file. | [
"Replace",
"germline",
"sample",
"names",
"originally",
"from",
"normal",
"BAM",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/germline.py#L133-L144 | train | 219,015 |
bcbio/bcbio-nextgen | bcbio/variation/germline.py | _remove_prioritization | def _remove_prioritization(in_file, data, out_dir=None):
"""Remove tumor-only prioritization and return non-filtered calls.
"""
out_file = "%s-germline.vcf" % utils.splitext_plus(in_file)[0]
if out_dir:
out_file = os.path.join(out_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file) and not utils.file_uptodate(out_file + ".gz", in_file):
with file_transaction(data, out_file) as tx_out_file:
reader = cyvcf2.VCF(str(in_file))
reader.add_filter_to_header({'ID': 'Somatic', 'Description': 'Variant called as Somatic'})
# with open(tx_out_file, "w") as out_handle:
# out_handle.write(reader.raw_header)
with contextlib.closing(cyvcf2.Writer(tx_out_file, reader)) as writer:
for rec in reader:
rec = _update_prioritization_filters(rec)
# out_handle.write(str(rec))
writer.write_record(rec)
return out_file | python | def _remove_prioritization(in_file, data, out_dir=None):
"""Remove tumor-only prioritization and return non-filtered calls.
"""
out_file = "%s-germline.vcf" % utils.splitext_plus(in_file)[0]
if out_dir:
out_file = os.path.join(out_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file) and not utils.file_uptodate(out_file + ".gz", in_file):
with file_transaction(data, out_file) as tx_out_file:
reader = cyvcf2.VCF(str(in_file))
reader.add_filter_to_header({'ID': 'Somatic', 'Description': 'Variant called as Somatic'})
# with open(tx_out_file, "w") as out_handle:
# out_handle.write(reader.raw_header)
with contextlib.closing(cyvcf2.Writer(tx_out_file, reader)) as writer:
for rec in reader:
rec = _update_prioritization_filters(rec)
# out_handle.write(str(rec))
writer.write_record(rec)
return out_file | [
"def",
"_remove_prioritization",
"(",
"in_file",
",",
"data",
",",
"out_dir",
"=",
"None",
")",
":",
"out_file",
"=",
"\"%s-germline.vcf\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"in_file",
")",
"[",
"0",
"]",
"if",
"out_dir",
":",
"out_file",
"=",
"os"... | Remove tumor-only prioritization and return non-filtered calls. | [
"Remove",
"tumor",
"-",
"only",
"prioritization",
"and",
"return",
"non",
"-",
"filtered",
"calls",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/germline.py#L146-L163 | train | 219,016 |
bcbio/bcbio-nextgen | bcbio/variation/germline.py | _extract_germline | def _extract_germline(in_file, data):
"""Extract germline calls non-somatic, non-filtered calls.
"""
out_file = "%s-germline.vcf" % utils.splitext_plus(in_file)[0]
if not utils.file_uptodate(out_file, in_file) and not utils.file_uptodate(out_file + ".gz", in_file):
with file_transaction(data, out_file) as tx_out_file:
reader = cyvcf2.VCF(str(in_file))
reader.add_filter_to_header({'ID': 'Somatic', 'Description': 'Variant called as Somatic'})
#with contextlib.closing(cyvcf2.Writer(tx_out_file, reader)) as writer:
with open(tx_out_file, "w") as out_handle:
out_handle.write(reader.raw_header)
for rec in reader:
rec = _update_germline_filters(rec)
out_handle.write(str(rec))
#writer.write_record(rec)
return out_file | python | def _extract_germline(in_file, data):
"""Extract germline calls non-somatic, non-filtered calls.
"""
out_file = "%s-germline.vcf" % utils.splitext_plus(in_file)[0]
if not utils.file_uptodate(out_file, in_file) and not utils.file_uptodate(out_file + ".gz", in_file):
with file_transaction(data, out_file) as tx_out_file:
reader = cyvcf2.VCF(str(in_file))
reader.add_filter_to_header({'ID': 'Somatic', 'Description': 'Variant called as Somatic'})
#with contextlib.closing(cyvcf2.Writer(tx_out_file, reader)) as writer:
with open(tx_out_file, "w") as out_handle:
out_handle.write(reader.raw_header)
for rec in reader:
rec = _update_germline_filters(rec)
out_handle.write(str(rec))
#writer.write_record(rec)
return out_file | [
"def",
"_extract_germline",
"(",
"in_file",
",",
"data",
")",
":",
"out_file",
"=",
"\"%s-germline.vcf\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"in_file",
")",
"[",
"0",
"]",
"if",
"not",
"utils",
".",
"file_uptodate",
"(",
"out_file",
",",
"in_file",
... | Extract germline calls non-somatic, non-filtered calls. | [
"Extract",
"germline",
"calls",
"non",
"-",
"somatic",
"non",
"-",
"filtered",
"calls",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/germline.py#L169-L184 | train | 219,017 |
bcbio/bcbio-nextgen | bcbio/variation/germline.py | _is_somatic | def _is_somatic(rec):
"""Handle somatic classifications from MuTect, MuTect2, VarDict and VarScan
"""
if _has_somatic_flag(rec):
return True
if _is_mutect2_somatic(rec):
return True
ss_flag = rec.INFO.get("SS")
if ss_flag is not None:
if str(ss_flag) == "2":
return True
status_flag = rec.INFO.get("STATUS")
if status_flag is not None:
if str(status_flag).lower() in ["somatic", "likelysomatic", "strongsomatic", "samplespecific"]:
return True
epr = rec.INFO.get("EPR", "").split(",")
if epr and all([p == "pass" for p in epr]):
return True
return False | python | def _is_somatic(rec):
"""Handle somatic classifications from MuTect, MuTect2, VarDict and VarScan
"""
if _has_somatic_flag(rec):
return True
if _is_mutect2_somatic(rec):
return True
ss_flag = rec.INFO.get("SS")
if ss_flag is not None:
if str(ss_flag) == "2":
return True
status_flag = rec.INFO.get("STATUS")
if status_flag is not None:
if str(status_flag).lower() in ["somatic", "likelysomatic", "strongsomatic", "samplespecific"]:
return True
epr = rec.INFO.get("EPR", "").split(",")
if epr and all([p == "pass" for p in epr]):
return True
return False | [
"def",
"_is_somatic",
"(",
"rec",
")",
":",
"if",
"_has_somatic_flag",
"(",
"rec",
")",
":",
"return",
"True",
"if",
"_is_mutect2_somatic",
"(",
"rec",
")",
":",
"return",
"True",
"ss_flag",
"=",
"rec",
".",
"INFO",
".",
"get",
"(",
"\"SS\"",
")",
"if"... | Handle somatic classifications from MuTect, MuTect2, VarDict and VarScan | [
"Handle",
"somatic",
"classifications",
"from",
"MuTect",
"MuTect2",
"VarDict",
"and",
"VarScan"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/germline.py#L210-L228 | train | 219,018 |
bcbio/bcbio-nextgen | bcbio/variation/germline.py | _is_germline | def _is_germline(rec):
"""Handle somatic INFO classifications from MuTect, MuTect2, VarDict, VarScan and Octopus.
"""
if _has_somatic_flag(rec):
return False
if _is_mutect2_somatic(rec):
return False
ss_flag = rec.INFO.get("SS")
if ss_flag is not None:
if str(ss_flag) == "1":
return True
# Octopus, assessed for potentially being Germline and not flagged SOMATIC
# https://github.com/luntergroup/octopus/wiki/Calling-models:-Cancer#qual-vs-pp
pp = rec.INFO.get("PP")
if pp and float(pp) / float(rec.QUAL) >= 0.5:
return True
status_flag = rec.INFO.get("STATUS")
if status_flag is not None:
if str(status_flag).lower() in ["germline", "likelyloh", "strongloh", "afdiff", "deletion"]:
return True
return False | python | def _is_germline(rec):
"""Handle somatic INFO classifications from MuTect, MuTect2, VarDict, VarScan and Octopus.
"""
if _has_somatic_flag(rec):
return False
if _is_mutect2_somatic(rec):
return False
ss_flag = rec.INFO.get("SS")
if ss_flag is not None:
if str(ss_flag) == "1":
return True
# Octopus, assessed for potentially being Germline and not flagged SOMATIC
# https://github.com/luntergroup/octopus/wiki/Calling-models:-Cancer#qual-vs-pp
pp = rec.INFO.get("PP")
if pp and float(pp) / float(rec.QUAL) >= 0.5:
return True
status_flag = rec.INFO.get("STATUS")
if status_flag is not None:
if str(status_flag).lower() in ["germline", "likelyloh", "strongloh", "afdiff", "deletion"]:
return True
return False | [
"def",
"_is_germline",
"(",
"rec",
")",
":",
"if",
"_has_somatic_flag",
"(",
"rec",
")",
":",
"return",
"False",
"if",
"_is_mutect2_somatic",
"(",
"rec",
")",
":",
"return",
"False",
"ss_flag",
"=",
"rec",
".",
"INFO",
".",
"get",
"(",
"\"SS\"",
")",
"... | Handle somatic INFO classifications from MuTect, MuTect2, VarDict, VarScan and Octopus. | [
"Handle",
"somatic",
"INFO",
"classifications",
"from",
"MuTect",
"MuTect2",
"VarDict",
"VarScan",
"and",
"Octopus",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/germline.py#L242-L262 | train | 219,019 |
bcbio/bcbio-nextgen | bcbio/variation/bedutils.py | get_sort_cmd | def get_sort_cmd(tmp_dir=None):
"""Retrieve GNU coreutils sort command, using version-sort if available.
Recent versions of sort have alpha-numeric sorting, which provides
more natural sorting of chromosomes (chr1, chr2) instead of (chr1, chr10).
This also fixes versions of sort, like 8.22 in CentOS 7.1, that have broken
sorting without version sorting specified.
https://github.com/bcbio/bcbio-nextgen/issues/624
https://github.com/bcbio/bcbio-nextgen/issues/1017
"""
has_versionsort = subprocess.check_output("sort --help | grep version-sort; exit 0", shell=True).strip()
if has_versionsort:
cmd = "sort -V"
else:
cmd = "sort"
if tmp_dir and os.path.exists(tmp_dir) and os.path.isdir(tmp_dir):
cmd += " -T %s" % tmp_dir
return cmd | python | def get_sort_cmd(tmp_dir=None):
"""Retrieve GNU coreutils sort command, using version-sort if available.
Recent versions of sort have alpha-numeric sorting, which provides
more natural sorting of chromosomes (chr1, chr2) instead of (chr1, chr10).
This also fixes versions of sort, like 8.22 in CentOS 7.1, that have broken
sorting without version sorting specified.
https://github.com/bcbio/bcbio-nextgen/issues/624
https://github.com/bcbio/bcbio-nextgen/issues/1017
"""
has_versionsort = subprocess.check_output("sort --help | grep version-sort; exit 0", shell=True).strip()
if has_versionsort:
cmd = "sort -V"
else:
cmd = "sort"
if tmp_dir and os.path.exists(tmp_dir) and os.path.isdir(tmp_dir):
cmd += " -T %s" % tmp_dir
return cmd | [
"def",
"get_sort_cmd",
"(",
"tmp_dir",
"=",
"None",
")",
":",
"has_versionsort",
"=",
"subprocess",
".",
"check_output",
"(",
"\"sort --help | grep version-sort; exit 0\"",
",",
"shell",
"=",
"True",
")",
".",
"strip",
"(",
")",
"if",
"has_versionsort",
":",
"cm... | Retrieve GNU coreutils sort command, using version-sort if available.
Recent versions of sort have alpha-numeric sorting, which provides
more natural sorting of chromosomes (chr1, chr2) instead of (chr1, chr10).
This also fixes versions of sort, like 8.22 in CentOS 7.1, that have broken
sorting without version sorting specified.
https://github.com/bcbio/bcbio-nextgen/issues/624
https://github.com/bcbio/bcbio-nextgen/issues/1017 | [
"Retrieve",
"GNU",
"coreutils",
"sort",
"command",
"using",
"version",
"-",
"sort",
"if",
"available",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bedutils.py#L18-L36 | train | 219,020 |
bcbio/bcbio-nextgen | bcbio/variation/bedutils.py | check_bed_contigs | def check_bed_contigs(in_file, data):
"""Ensure BED file contigs match the reference genome.
"""
if not dd.get_ref_file(data):
return
contigs = set([])
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith(("#", "track", "browser", "@")) and line.strip():
contigs.add(line.split()[0])
ref_contigs = set([x.name for x in ref.file_contigs(dd.get_ref_file(data))])
if contigs and len(contigs - ref_contigs) / float(len(contigs)) > 0.25:
raise ValueError("Contigs in BED file %s not in reference genome:\n %s\n"
% (in_file, list(contigs - ref_contigs)) +
"This is typically due to chr1 versus 1 differences in BED file and reference.") | python | def check_bed_contigs(in_file, data):
"""Ensure BED file contigs match the reference genome.
"""
if not dd.get_ref_file(data):
return
contigs = set([])
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith(("#", "track", "browser", "@")) and line.strip():
contigs.add(line.split()[0])
ref_contigs = set([x.name for x in ref.file_contigs(dd.get_ref_file(data))])
if contigs and len(contigs - ref_contigs) / float(len(contigs)) > 0.25:
raise ValueError("Contigs in BED file %s not in reference genome:\n %s\n"
% (in_file, list(contigs - ref_contigs)) +
"This is typically due to chr1 versus 1 differences in BED file and reference.") | [
"def",
"check_bed_contigs",
"(",
"in_file",
",",
"data",
")",
":",
"if",
"not",
"dd",
".",
"get_ref_file",
"(",
"data",
")",
":",
"return",
"contigs",
"=",
"set",
"(",
"[",
"]",
")",
"with",
"utils",
".",
"open_gzipsafe",
"(",
"in_file",
")",
"as",
"... | Ensure BED file contigs match the reference genome. | [
"Ensure",
"BED",
"file",
"contigs",
"match",
"the",
"reference",
"genome",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bedutils.py#L38-L52 | train | 219,021 |
bcbio/bcbio-nextgen | bcbio/variation/bedutils.py | check_bed_coords | def check_bed_coords(in_file, data):
"""Ensure BED file coordinates match reference genome.
Catches errors like using a hg38 BED file for an hg19 genome run.
"""
if dd.get_ref_file(data):
contig_sizes = {}
for contig in ref.file_contigs(dd.get_ref_file(data)):
contig_sizes[contig.name] = contig.size
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith(("#", "track", "browser", "@")) and line.strip():
parts = line.split()
if len(parts) > 3:
try:
end = int(parts[2])
except ValueError:
continue
contig = parts[0]
check_size = contig_sizes.get(contig)
if check_size and end > check_size:
raise ValueError("Found BED coordinate off the end of the chromosome:\n%s%s\n"
"Is the input BED from the right genome build?" %
(line, in_file)) | python | def check_bed_coords(in_file, data):
"""Ensure BED file coordinates match reference genome.
Catches errors like using a hg38 BED file for an hg19 genome run.
"""
if dd.get_ref_file(data):
contig_sizes = {}
for contig in ref.file_contigs(dd.get_ref_file(data)):
contig_sizes[contig.name] = contig.size
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith(("#", "track", "browser", "@")) and line.strip():
parts = line.split()
if len(parts) > 3:
try:
end = int(parts[2])
except ValueError:
continue
contig = parts[0]
check_size = contig_sizes.get(contig)
if check_size and end > check_size:
raise ValueError("Found BED coordinate off the end of the chromosome:\n%s%s\n"
"Is the input BED from the right genome build?" %
(line, in_file)) | [
"def",
"check_bed_coords",
"(",
"in_file",
",",
"data",
")",
":",
"if",
"dd",
".",
"get_ref_file",
"(",
"data",
")",
":",
"contig_sizes",
"=",
"{",
"}",
"for",
"contig",
"in",
"ref",
".",
"file_contigs",
"(",
"dd",
".",
"get_ref_file",
"(",
"data",
")"... | Ensure BED file coordinates match reference genome.
Catches errors like using a hg38 BED file for an hg19 genome run. | [
"Ensure",
"BED",
"file",
"coordinates",
"match",
"reference",
"genome",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bedutils.py#L54-L77 | train | 219,022 |
bcbio/bcbio-nextgen | bcbio/variation/bedutils.py | clean_file | def clean_file(in_file, data, prefix="", bedprep_dir=None, simple=None):
"""Prepare a clean sorted input BED file without headers
"""
# Remove non-ascii characters. Used in coverage analysis, to support JSON code in one column
# and be happy with sambamba:
simple = "iconv -c -f utf-8 -t ascii | sed 's/ //g' |" if simple else ""
if in_file:
if not bedprep_dir:
bedprep_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "bedprep"))
# Avoid running multiple times with same prefix
if prefix and os.path.basename(in_file).startswith(prefix):
return in_file
out_file = os.path.join(bedprep_dir, "%s%s" % (prefix, os.path.basename(in_file)))
out_file = out_file.replace(".interval_list", ".bed")
if out_file.endswith(".gz"):
out_file = out_file[:-3]
if not utils.file_uptodate(out_file, in_file):
check_bed_contigs(in_file, data)
check_bed_coords(in_file, data)
with file_transaction(data, out_file) as tx_out_file:
bcbio_py = sys.executable
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
sort_cmd = get_sort_cmd(os.path.dirname(tx_out_file))
cmd = ("{cat_cmd} {in_file} | grep -v ^track | grep -v ^browser | grep -v ^@ | "
"grep -v ^# | {simple} "
"{bcbio_py} -c 'from bcbio.variation import bedutils; bedutils.remove_bad()' | "
"{sort_cmd} -k1,1 -k2,2n > {tx_out_file}")
do.run(cmd.format(**locals()), "Prepare cleaned BED file", data)
vcfutils.bgzip_and_index(out_file, data.get("config", {}), remove_orig=False)
return out_file | python | def clean_file(in_file, data, prefix="", bedprep_dir=None, simple=None):
"""Prepare a clean sorted input BED file without headers
"""
# Remove non-ascii characters. Used in coverage analysis, to support JSON code in one column
# and be happy with sambamba:
simple = "iconv -c -f utf-8 -t ascii | sed 's/ //g' |" if simple else ""
if in_file:
if not bedprep_dir:
bedprep_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "bedprep"))
# Avoid running multiple times with same prefix
if prefix and os.path.basename(in_file).startswith(prefix):
return in_file
out_file = os.path.join(bedprep_dir, "%s%s" % (prefix, os.path.basename(in_file)))
out_file = out_file.replace(".interval_list", ".bed")
if out_file.endswith(".gz"):
out_file = out_file[:-3]
if not utils.file_uptodate(out_file, in_file):
check_bed_contigs(in_file, data)
check_bed_coords(in_file, data)
with file_transaction(data, out_file) as tx_out_file:
bcbio_py = sys.executable
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
sort_cmd = get_sort_cmd(os.path.dirname(tx_out_file))
cmd = ("{cat_cmd} {in_file} | grep -v ^track | grep -v ^browser | grep -v ^@ | "
"grep -v ^# | {simple} "
"{bcbio_py} -c 'from bcbio.variation import bedutils; bedutils.remove_bad()' | "
"{sort_cmd} -k1,1 -k2,2n > {tx_out_file}")
do.run(cmd.format(**locals()), "Prepare cleaned BED file", data)
vcfutils.bgzip_and_index(out_file, data.get("config", {}), remove_orig=False)
return out_file | [
"def",
"clean_file",
"(",
"in_file",
",",
"data",
",",
"prefix",
"=",
"\"\"",
",",
"bedprep_dir",
"=",
"None",
",",
"simple",
"=",
"None",
")",
":",
"# Remove non-ascii characters. Used in coverage analysis, to support JSON code in one column",
"# and be happy with sambam... | Prepare a clean sorted input BED file without headers | [
"Prepare",
"a",
"clean",
"sorted",
"input",
"BED",
"file",
"without",
"headers"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bedutils.py#L79-L108 | train | 219,023 |
bcbio/bcbio-nextgen | bcbio/variation/bedutils.py | remove_bad | def remove_bad():
"""Remove non-increasing BED lines which will cause variant callers to choke.
Also fixes space separated BED inputs.
"""
for line in sys.stdin:
parts = line.strip().split("\t")
if len(parts) == 1 and len(line.strip().split()) > 1:
parts = line.strip().split()
if line.strip() and len(parts) > 2 and int(parts[2]) > int(parts[1]):
sys.stdout.write("\t".join(parts) + "\n") | python | def remove_bad():
"""Remove non-increasing BED lines which will cause variant callers to choke.
Also fixes space separated BED inputs.
"""
for line in sys.stdin:
parts = line.strip().split("\t")
if len(parts) == 1 and len(line.strip().split()) > 1:
parts = line.strip().split()
if line.strip() and len(parts) > 2 and int(parts[2]) > int(parts[1]):
sys.stdout.write("\t".join(parts) + "\n") | [
"def",
"remove_bad",
"(",
")",
":",
"for",
"line",
"in",
"sys",
".",
"stdin",
":",
"parts",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
"and",
"len",
"(",
"line",
".",
"stri... | Remove non-increasing BED lines which will cause variant callers to choke.
Also fixes space separated BED inputs. | [
"Remove",
"non",
"-",
"increasing",
"BED",
"lines",
"which",
"will",
"cause",
"variant",
"callers",
"to",
"choke",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bedutils.py#L134-L144 | train | 219,024 |
bcbio/bcbio-nextgen | bcbio/variation/bedutils.py | merge_overlaps | def merge_overlaps(in_file, data, distance=None, out_dir=None):
"""Merge bed file intervals to avoid overlapping regions. Output is always a 3 column file.
Overlapping regions (1:1-100, 1:90-100) cause issues with callers like FreeBayes
that don't collapse BEDs prior to using them.
"""
config = data["config"]
if in_file:
bedtools = config_utils.get_program("bedtools", config,
default="bedtools")
work_dir = tz.get_in(["dirs", "work"], data)
if out_dir:
bedprep_dir = out_dir
elif work_dir:
bedprep_dir = utils.safe_makedir(os.path.join(work_dir, "bedprep"))
else:
bedprep_dir = os.path.dirname(in_file)
out_file = os.path.join(bedprep_dir, "%s-merged.bed" % (utils.splitext_plus(os.path.basename(in_file))[0]))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
distance = "-d %s" % distance if distance else ""
cmd = "{bedtools} merge {distance} -i {in_file} > {tx_out_file}"
do.run(cmd.format(**locals()), "Prepare merged BED file", data)
vcfutils.bgzip_and_index(out_file, data["config"], remove_orig=False)
return out_file | python | def merge_overlaps(in_file, data, distance=None, out_dir=None):
"""Merge bed file intervals to avoid overlapping regions. Output is always a 3 column file.
Overlapping regions (1:1-100, 1:90-100) cause issues with callers like FreeBayes
that don't collapse BEDs prior to using them.
"""
config = data["config"]
if in_file:
bedtools = config_utils.get_program("bedtools", config,
default="bedtools")
work_dir = tz.get_in(["dirs", "work"], data)
if out_dir:
bedprep_dir = out_dir
elif work_dir:
bedprep_dir = utils.safe_makedir(os.path.join(work_dir, "bedprep"))
else:
bedprep_dir = os.path.dirname(in_file)
out_file = os.path.join(bedprep_dir, "%s-merged.bed" % (utils.splitext_plus(os.path.basename(in_file))[0]))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
distance = "-d %s" % distance if distance else ""
cmd = "{bedtools} merge {distance} -i {in_file} > {tx_out_file}"
do.run(cmd.format(**locals()), "Prepare merged BED file", data)
vcfutils.bgzip_and_index(out_file, data["config"], remove_orig=False)
return out_file | [
"def",
"merge_overlaps",
"(",
"in_file",
",",
"data",
",",
"distance",
"=",
"None",
",",
"out_dir",
"=",
"None",
")",
":",
"config",
"=",
"data",
"[",
"\"config\"",
"]",
"if",
"in_file",
":",
"bedtools",
"=",
"config_utils",
".",
"get_program",
"(",
"\"b... | Merge bed file intervals to avoid overlapping regions. Output is always a 3 column file.
Overlapping regions (1:1-100, 1:90-100) cause issues with callers like FreeBayes
that don't collapse BEDs prior to using them. | [
"Merge",
"bed",
"file",
"intervals",
"to",
"avoid",
"overlapping",
"regions",
".",
"Output",
"is",
"always",
"a",
"3",
"column",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bedutils.py#L146-L170 | train | 219,025 |
bcbio/bcbio-nextgen | bcbio/variation/bedutils.py | population_variant_regions | def population_variant_regions(items, merged=False):
"""Retrieve the variant region BED file from a population of items.
If tumor/normal, return the tumor BED file. If a population, return
the BED file covering the most bases.
"""
def _get_variant_regions(data):
out = dd.get_variant_regions(data) or dd.get_sample_callable(data)
# Only need to merge for variant region inputs, not callable BED regions which don't overlap
if merged and dd.get_variant_regions(data):
merged_out = dd.get_variant_regions_merged(data)
if merged_out:
out = merged_out
else:
out = merge_overlaps(out, data)
return out
import pybedtools
if len(items) == 1:
return _get_variant_regions(items[0])
else:
paired = vcfutils.get_paired(items)
if paired:
return _get_variant_regions(paired.tumor_data)
else:
vrs = []
for data in items:
vr_bed = _get_variant_regions(data)
if vr_bed:
vrs.append((pybedtools.BedTool(vr_bed).total_coverage(), vr_bed))
vrs.sort(reverse=True)
if vrs:
return vrs[0][1] | python | def population_variant_regions(items, merged=False):
"""Retrieve the variant region BED file from a population of items.
If tumor/normal, return the tumor BED file. If a population, return
the BED file covering the most bases.
"""
def _get_variant_regions(data):
out = dd.get_variant_regions(data) or dd.get_sample_callable(data)
# Only need to merge for variant region inputs, not callable BED regions which don't overlap
if merged and dd.get_variant_regions(data):
merged_out = dd.get_variant_regions_merged(data)
if merged_out:
out = merged_out
else:
out = merge_overlaps(out, data)
return out
import pybedtools
if len(items) == 1:
return _get_variant_regions(items[0])
else:
paired = vcfutils.get_paired(items)
if paired:
return _get_variant_regions(paired.tumor_data)
else:
vrs = []
for data in items:
vr_bed = _get_variant_regions(data)
if vr_bed:
vrs.append((pybedtools.BedTool(vr_bed).total_coverage(), vr_bed))
vrs.sort(reverse=True)
if vrs:
return vrs[0][1] | [
"def",
"population_variant_regions",
"(",
"items",
",",
"merged",
"=",
"False",
")",
":",
"def",
"_get_variant_regions",
"(",
"data",
")",
":",
"out",
"=",
"dd",
".",
"get_variant_regions",
"(",
"data",
")",
"or",
"dd",
".",
"get_sample_callable",
"(",
"data... | Retrieve the variant region BED file from a population of items.
If tumor/normal, return the tumor BED file. If a population, return
the BED file covering the most bases. | [
"Retrieve",
"the",
"variant",
"region",
"BED",
"file",
"from",
"a",
"population",
"of",
"items",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bedutils.py#L172-L203 | train | 219,026 |
bcbio/bcbio-nextgen | bcbio/variation/bedutils.py | combine | def combine(in_files, out_file, config):
"""Combine multiple BED files into a single output.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for in_file in in_files:
with open(in_file) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
return out_file | python | def combine(in_files, out_file, config):
"""Combine multiple BED files into a single output.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for in_file in in_files:
with open(in_file) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
return out_file | [
"def",
"combine",
"(",
"in_files",
",",
"out_file",
",",
"config",
")",
":",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"file_transaction",
"(",
"config",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
"with",
"open",
"... | Combine multiple BED files into a single output. | [
"Combine",
"multiple",
"BED",
"files",
"into",
"a",
"single",
"output",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bedutils.py#L205-L214 | train | 219,027 |
bcbio/bcbio-nextgen | bcbio/variation/bedutils.py | intersect_two | def intersect_two(f1, f2, work_dir, data):
"""Intersect two regions, handling cases where either file is not present.
"""
bedtools = config_utils.get_program("bedtools", data, default="bedtools")
f1_exists = f1 and utils.file_exists(f1)
f2_exists = f2 and utils.file_exists(f2)
if not f1_exists and not f2_exists:
return None
elif f1_exists and not f2_exists:
return f1
elif f2_exists and not f1_exists:
return f2
else:
out_file = os.path.join(work_dir, "%s-merged.bed" % (utils.splitext_plus(os.path.basename(f1))[0]))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = "{bedtools} intersect -a {f1} -b {f2} > {tx_out_file}"
do.run(cmd.format(**locals()), "Intersect BED files", data)
return out_file | python | def intersect_two(f1, f2, work_dir, data):
"""Intersect two regions, handling cases where either file is not present.
"""
bedtools = config_utils.get_program("bedtools", data, default="bedtools")
f1_exists = f1 and utils.file_exists(f1)
f2_exists = f2 and utils.file_exists(f2)
if not f1_exists and not f2_exists:
return None
elif f1_exists and not f2_exists:
return f1
elif f2_exists and not f1_exists:
return f2
else:
out_file = os.path.join(work_dir, "%s-merged.bed" % (utils.splitext_plus(os.path.basename(f1))[0]))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = "{bedtools} intersect -a {f1} -b {f2} > {tx_out_file}"
do.run(cmd.format(**locals()), "Intersect BED files", data)
return out_file | [
"def",
"intersect_two",
"(",
"f1",
",",
"f2",
",",
"work_dir",
",",
"data",
")",
":",
"bedtools",
"=",
"config_utils",
".",
"get_program",
"(",
"\"bedtools\"",
",",
"data",
",",
"default",
"=",
"\"bedtools\"",
")",
"f1_exists",
"=",
"f1",
"and",
"utils",
... | Intersect two regions, handling cases where either file is not present. | [
"Intersect",
"two",
"regions",
"handling",
"cases",
"where",
"either",
"file",
"is",
"not",
"present",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bedutils.py#L216-L234 | train | 219,028 |
bcbio/bcbio-nextgen | bcbio/variation/bedutils.py | subset_to_genome | def subset_to_genome(in_file, out_file, data):
"""Subset a BED file to only contain contigs present in the reference genome.
"""
if not utils.file_uptodate(out_file, in_file):
contigs = set([x.name for x in ref.file_contigs(dd.get_ref_file(data))])
with utils.open_gzipsafe(in_file) as in_handle:
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
parts = line.split()
if parts and parts[0] in contigs:
out_handle.write(line)
return out_file | python | def subset_to_genome(in_file, out_file, data):
"""Subset a BED file to only contain contigs present in the reference genome.
"""
if not utils.file_uptodate(out_file, in_file):
contigs = set([x.name for x in ref.file_contigs(dd.get_ref_file(data))])
with utils.open_gzipsafe(in_file) as in_handle:
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
parts = line.split()
if parts and parts[0] in contigs:
out_handle.write(line)
return out_file | [
"def",
"subset_to_genome",
"(",
"in_file",
",",
"out_file",
",",
"data",
")",
":",
"if",
"not",
"utils",
".",
"file_uptodate",
"(",
"out_file",
",",
"in_file",
")",
":",
"contigs",
"=",
"set",
"(",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"ref",
".",... | Subset a BED file to only contain contigs present in the reference genome. | [
"Subset",
"a",
"BED",
"file",
"to",
"only",
"contain",
"contigs",
"present",
"in",
"the",
"reference",
"genome",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bedutils.py#L247-L259 | train | 219,029 |
bcbio/bcbio-nextgen | bcbio/variation/octopus.py | run | def run(align_bams, items, ref_file, assoc_files, region, out_file):
"""Run octopus variant calling, handling both somatic and germline calling.
"""
if not utils.file_exists(out_file):
paired = vcfutils.get_paired_bams(align_bams, items)
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(vrs, region,
out_file, items=items, do_merge=True)
if paired:
return _run_somatic(paired, ref_file, target, out_file)
else:
return _run_germline(align_bams, items, ref_file, target, out_file)
return out_file | python | def run(align_bams, items, ref_file, assoc_files, region, out_file):
"""Run octopus variant calling, handling both somatic and germline calling.
"""
if not utils.file_exists(out_file):
paired = vcfutils.get_paired_bams(align_bams, items)
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(vrs, region,
out_file, items=items, do_merge=True)
if paired:
return _run_somatic(paired, ref_file, target, out_file)
else:
return _run_germline(align_bams, items, ref_file, target, out_file)
return out_file | [
"def",
"run",
"(",
"align_bams",
",",
"items",
",",
"ref_file",
",",
"assoc_files",
",",
"region",
",",
"out_file",
")",
":",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"paired",
"=",
"vcfutils",
".",
"get_paired_bams",
"(",
"al... | Run octopus variant calling, handling both somatic and germline calling. | [
"Run",
"octopus",
"variant",
"calling",
"handling",
"both",
"somatic",
"and",
"germline",
"calling",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/octopus.py#L17-L29 | train | 219,030 |
bcbio/bcbio-nextgen | bcbio/variation/octopus.py | _produce_compatible_vcf | def _produce_compatible_vcf(out_file, data, is_somatic):
"""Create a compatible VCF that downstream tools can deal with.
- htsjdk and thus GATK and Picard do not support VCF4.3:
https://github.com/broadinstitute/gatk/issues/2092
- Use octopus legacy format to avoid incompatibilities.
https://github.com/luntergroup/octopus#output-format
- Fixes `##contig` lines since octopus only writes contigs
used in the BED file region, causing incompatibilies with
GatherVcfs when merging
- Fixes alleles prefixed with '*' like 'C,*T' which cause
downstream failures when reading with GATK.
"""
base, ext = utils.splitext_plus(out_file)
legacy_file = "%s.legacy%s" % (base, ext)
if is_somatic:
legacy_file = _covert_to_diploid(legacy_file, data)
final_file = "%s.vcf.gz" % base
cat_cmd = "zcat" if legacy_file.endswith(".gz") else "cat"
contig_cl = vcfutils.add_contig_to_header_cl(dd.get_ref_file(data), out_file)
remove_problem_alleles = r"sed 's/,\*\([A-Z]\)/,\1/'"
cmd = ("{cat_cmd} {legacy_file} | sed 's/fileformat=VCFv4.3/fileformat=VCFv4.2/' | "
"{remove_problem_alleles} | {contig_cl} | bgzip -c > {final_file}")
do.run(cmd.format(**locals()), "Produce compatible VCF output file from octopus")
return vcfutils.bgzip_and_index(out_file, data["config"]) | python | def _produce_compatible_vcf(out_file, data, is_somatic):
"""Create a compatible VCF that downstream tools can deal with.
- htsjdk and thus GATK and Picard do not support VCF4.3:
https://github.com/broadinstitute/gatk/issues/2092
- Use octopus legacy format to avoid incompatibilities.
https://github.com/luntergroup/octopus#output-format
- Fixes `##contig` lines since octopus only writes contigs
used in the BED file region, causing incompatibilies with
GatherVcfs when merging
- Fixes alleles prefixed with '*' like 'C,*T' which cause
downstream failures when reading with GATK.
"""
base, ext = utils.splitext_plus(out_file)
legacy_file = "%s.legacy%s" % (base, ext)
if is_somatic:
legacy_file = _covert_to_diploid(legacy_file, data)
final_file = "%s.vcf.gz" % base
cat_cmd = "zcat" if legacy_file.endswith(".gz") else "cat"
contig_cl = vcfutils.add_contig_to_header_cl(dd.get_ref_file(data), out_file)
remove_problem_alleles = r"sed 's/,\*\([A-Z]\)/,\1/'"
cmd = ("{cat_cmd} {legacy_file} | sed 's/fileformat=VCFv4.3/fileformat=VCFv4.2/' | "
"{remove_problem_alleles} | {contig_cl} | bgzip -c > {final_file}")
do.run(cmd.format(**locals()), "Produce compatible VCF output file from octopus")
return vcfutils.bgzip_and_index(out_file, data["config"]) | [
"def",
"_produce_compatible_vcf",
"(",
"out_file",
",",
"data",
",",
"is_somatic",
")",
":",
"base",
",",
"ext",
"=",
"utils",
".",
"splitext_plus",
"(",
"out_file",
")",
"legacy_file",
"=",
"\"%s.legacy%s\"",
"%",
"(",
"base",
",",
"ext",
")",
"if",
"is_s... | Create a compatible VCF that downstream tools can deal with.
- htsjdk and thus GATK and Picard do not support VCF4.3:
https://github.com/broadinstitute/gatk/issues/2092
- Use octopus legacy format to avoid incompatibilities.
https://github.com/luntergroup/octopus#output-format
- Fixes `##contig` lines since octopus only writes contigs
used in the BED file region, causing incompatibilies with
GatherVcfs when merging
- Fixes alleles prefixed with '*' like 'C,*T' which cause
downstream failures when reading with GATK. | [
"Create",
"a",
"compatible",
"VCF",
"that",
"downstream",
"tools",
"can",
"deal",
"with",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/octopus.py#L31-L55 | train | 219,031 |
bcbio/bcbio-nextgen | bcbio/variation/octopus.py | _covert_to_diploid | def _covert_to_diploid(in_file, data):
"""Converts non-diploid somatic outputs into diploid.
https://github.com/luntergroup/octopus/wiki/Case-study:-Tumour-only-UMI#evaluate-variant-calls
"""
sample = dd.get_sample_name(data)
out_file = "%s-diploid.vcf" % utils.splitext_plus(in_file)[0]
in_vcf = pysam.VariantFile(in_file)
out_vcf = pysam.VariantFile(out_file, 'w', header=in_vcf.header)
for record in in_vcf:
gt = list(record.samples[sample]['GT'])
if 'SOMATIC' in record.info:
for allele in set(gt):
if allele != gt[0]:
record.samples[sample]['GT'] = gt[0], allele
out_vcf.write(record)
else:
if len(gt) == 1:
record.samples[sample]['GT'] = gt
else:
record.samples[sample]['GT'] = gt[0], gt[1]
out_vcf.write(record)
in_vcf.close()
out_vcf.close()
return vcfutils.bgzip_and_index(out_file, data["config"]) | python | def _covert_to_diploid(in_file, data):
"""Converts non-diploid somatic outputs into diploid.
https://github.com/luntergroup/octopus/wiki/Case-study:-Tumour-only-UMI#evaluate-variant-calls
"""
sample = dd.get_sample_name(data)
out_file = "%s-diploid.vcf" % utils.splitext_plus(in_file)[0]
in_vcf = pysam.VariantFile(in_file)
out_vcf = pysam.VariantFile(out_file, 'w', header=in_vcf.header)
for record in in_vcf:
gt = list(record.samples[sample]['GT'])
if 'SOMATIC' in record.info:
for allele in set(gt):
if allele != gt[0]:
record.samples[sample]['GT'] = gt[0], allele
out_vcf.write(record)
else:
if len(gt) == 1:
record.samples[sample]['GT'] = gt
else:
record.samples[sample]['GT'] = gt[0], gt[1]
out_vcf.write(record)
in_vcf.close()
out_vcf.close()
return vcfutils.bgzip_and_index(out_file, data["config"]) | [
"def",
"_covert_to_diploid",
"(",
"in_file",
",",
"data",
")",
":",
"sample",
"=",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
"out_file",
"=",
"\"%s-diploid.vcf\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"in_file",
")",
"[",
"0",
"]",
"in_vcf",
"=",... | Converts non-diploid somatic outputs into diploid.
https://github.com/luntergroup/octopus/wiki/Case-study:-Tumour-only-UMI#evaluate-variant-calls | [
"Converts",
"non",
"-",
"diploid",
"somatic",
"outputs",
"into",
"diploid",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/octopus.py#L57-L81 | train | 219,032 |
bcbio/bcbio-nextgen | bcbio/variation/octopus.py | _run_germline | def _run_germline(align_bams, items, ref_file, target, out_file):
"""Run germline calling, handling populations.
TODO:
- We could better handle trio calling with ped inputs as octopus
has special support.
"""
align_bams = " ".join(align_bams)
cores = dd.get_num_cores(items[0])
cmd = ("octopus --threads {cores} --reference {ref_file} --reads {align_bams} "
"--regions-file {target} "
"--working-directory {tmp_dir} "
"-o {tx_out_file} --legacy")
with file_transaction(items[0], out_file) as tx_out_file:
tmp_dir = os.path.dirname(tx_out_file)
do.run(cmd.format(**locals()), "Octopus germline calling")
_produce_compatible_vcf(tx_out_file, items[0])
return out_file | python | def _run_germline(align_bams, items, ref_file, target, out_file):
"""Run germline calling, handling populations.
TODO:
- We could better handle trio calling with ped inputs as octopus
has special support.
"""
align_bams = " ".join(align_bams)
cores = dd.get_num_cores(items[0])
cmd = ("octopus --threads {cores} --reference {ref_file} --reads {align_bams} "
"--regions-file {target} "
"--working-directory {tmp_dir} "
"-o {tx_out_file} --legacy")
with file_transaction(items[0], out_file) as tx_out_file:
tmp_dir = os.path.dirname(tx_out_file)
do.run(cmd.format(**locals()), "Octopus germline calling")
_produce_compatible_vcf(tx_out_file, items[0])
return out_file | [
"def",
"_run_germline",
"(",
"align_bams",
",",
"items",
",",
"ref_file",
",",
"target",
",",
"out_file",
")",
":",
"align_bams",
"=",
"\" \"",
".",
"join",
"(",
"align_bams",
")",
"cores",
"=",
"dd",
".",
"get_num_cores",
"(",
"items",
"[",
"0",
"]",
... | Run germline calling, handling populations.
TODO:
- We could better handle trio calling with ped inputs as octopus
has special support. | [
"Run",
"germline",
"calling",
"handling",
"populations",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/octopus.py#L83-L100 | train | 219,033 |
bcbio/bcbio-nextgen | bcbio/variation/octopus.py | _run_somatic | def _run_somatic(paired, ref_file, target, out_file):
"""Run somatic calling with octopus, handling both paired and tumor-only cases.
Tweaks for low frequency, tumor only and UMI calling documented in:
https://github.com/luntergroup/octopus/blob/develop/configs/UMI.config
"""
align_bams = paired.tumor_bam
if paired.normal_bam:
align_bams += " %s --normal-sample %s" % (paired.normal_bam, paired.normal_name)
cores = dd.get_num_cores(paired.tumor_data)
# Do not try to search below 0.4% currently as leads to long runtimes
# https://github.com/luntergroup/octopus/issues/29#issuecomment-428167979
min_af = max([float(dd.get_min_allele_fraction(paired.tumor_data)) / 100.0, 0.004])
min_af_floor = min_af / 4.0
cmd = ("octopus --threads {cores} --reference {ref_file} --reads {align_bams} "
"--regions-file {target} "
"--min-credible-somatic-frequency {min_af_floor} --min-expected-somatic-frequency {min_af} "
"--downsample-above 4000 --downsample-target 4000 --min-kmer-prune 5 --min-bubble-score 20 "
"--max-haplotypes 200 --somatic-snv-mutation-rate '5e-4' --somatic-indel-mutation-rate '1e-05' "
"--target-working-memory 5G --target-read-buffer-footprint 5G --max-somatic-haplotypes 3 "
"--caller cancer "
"--working-directory {tmp_dir} "
"-o {tx_out_file} --legacy")
if not paired.normal_bam:
cmd += (" --tumour-germline-concentration 5")
if dd.get_umi_type(paired.tumor_data) or _is_umi_consensus_bam(paired.tumor_bam):
cmd += (" --allow-octopus-duplicates --overlap-masking 0 "
"--somatic-filter-expression 'GQ < 200 | MQ < 30 | SB > 0.2 | SD[.25] > 0.1 "
"| BQ < 40 | DP < 100 | MF > 0.1 | AD < 5 | CC > 1.1 | GQD > 2'")
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
tmp_dir = os.path.dirname(tx_out_file)
do.run(cmd.format(**locals()), "Octopus somatic calling")
_produce_compatible_vcf(tx_out_file, paired.tumor_data, is_somatic=True)
return out_file | python | def _run_somatic(paired, ref_file, target, out_file):
"""Run somatic calling with octopus, handling both paired and tumor-only cases.
Tweaks for low frequency, tumor only and UMI calling documented in:
https://github.com/luntergroup/octopus/blob/develop/configs/UMI.config
"""
align_bams = paired.tumor_bam
if paired.normal_bam:
align_bams += " %s --normal-sample %s" % (paired.normal_bam, paired.normal_name)
cores = dd.get_num_cores(paired.tumor_data)
# Do not try to search below 0.4% currently as leads to long runtimes
# https://github.com/luntergroup/octopus/issues/29#issuecomment-428167979
min_af = max([float(dd.get_min_allele_fraction(paired.tumor_data)) / 100.0, 0.004])
min_af_floor = min_af / 4.0
cmd = ("octopus --threads {cores} --reference {ref_file} --reads {align_bams} "
"--regions-file {target} "
"--min-credible-somatic-frequency {min_af_floor} --min-expected-somatic-frequency {min_af} "
"--downsample-above 4000 --downsample-target 4000 --min-kmer-prune 5 --min-bubble-score 20 "
"--max-haplotypes 200 --somatic-snv-mutation-rate '5e-4' --somatic-indel-mutation-rate '1e-05' "
"--target-working-memory 5G --target-read-buffer-footprint 5G --max-somatic-haplotypes 3 "
"--caller cancer "
"--working-directory {tmp_dir} "
"-o {tx_out_file} --legacy")
if not paired.normal_bam:
cmd += (" --tumour-germline-concentration 5")
if dd.get_umi_type(paired.tumor_data) or _is_umi_consensus_bam(paired.tumor_bam):
cmd += (" --allow-octopus-duplicates --overlap-masking 0 "
"--somatic-filter-expression 'GQ < 200 | MQ < 30 | SB > 0.2 | SD[.25] > 0.1 "
"| BQ < 40 | DP < 100 | MF > 0.1 | AD < 5 | CC > 1.1 | GQD > 2'")
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
tmp_dir = os.path.dirname(tx_out_file)
do.run(cmd.format(**locals()), "Octopus somatic calling")
_produce_compatible_vcf(tx_out_file, paired.tumor_data, is_somatic=True)
return out_file | [
"def",
"_run_somatic",
"(",
"paired",
",",
"ref_file",
",",
"target",
",",
"out_file",
")",
":",
"align_bams",
"=",
"paired",
".",
"tumor_bam",
"if",
"paired",
".",
"normal_bam",
":",
"align_bams",
"+=",
"\" %s --normal-sample %s\"",
"%",
"(",
"paired",
".",
... | Run somatic calling with octopus, handling both paired and tumor-only cases.
Tweaks for low frequency, tumor only and UMI calling documented in:
https://github.com/luntergroup/octopus/blob/develop/configs/UMI.config | [
"Run",
"somatic",
"calling",
"with",
"octopus",
"handling",
"both",
"paired",
"and",
"tumor",
"-",
"only",
"cases",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/octopus.py#L102-L135 | train | 219,034 |
bcbio/bcbio-nextgen | bcbio/variation/octopus.py | _is_umi_consensus_bam | def _is_umi_consensus_bam(in_file):
"""Check if input BAM file generated by fgbio consensus calls on UMIs.
Identify these by lack of duplicated reads.
This is useful for pre-aligned consensus BAMs feeding into octopus.
"""
cmd = "samtools view -h %s | head -500000 | samtools view -c -f 1024"
count = subprocess.check_output(cmd % in_file, shell=True)
return int(count) == 0 | python | def _is_umi_consensus_bam(in_file):
"""Check if input BAM file generated by fgbio consensus calls on UMIs.
Identify these by lack of duplicated reads.
This is useful for pre-aligned consensus BAMs feeding into octopus.
"""
cmd = "samtools view -h %s | head -500000 | samtools view -c -f 1024"
count = subprocess.check_output(cmd % in_file, shell=True)
return int(count) == 0 | [
"def",
"_is_umi_consensus_bam",
"(",
"in_file",
")",
":",
"cmd",
"=",
"\"samtools view -h %s | head -500000 | samtools view -c -f 1024\"",
"count",
"=",
"subprocess",
".",
"check_output",
"(",
"cmd",
"%",
"in_file",
",",
"shell",
"=",
"True",
")",
"return",
"int",
"... | Check if input BAM file generated by fgbio consensus calls on UMIs.
Identify these by lack of duplicated reads.
This is useful for pre-aligned consensus BAMs feeding into octopus. | [
"Check",
"if",
"input",
"BAM",
"file",
"generated",
"by",
"fgbio",
"consensus",
"calls",
"on",
"UMIs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/octopus.py#L137-L145 | train | 219,035 |
bcbio/bcbio-nextgen | bcbio/upload/s3.py | update_file | def update_file(finfo, sample_info, config):
"""Update the file to an Amazon S3 bucket, using server side encryption.
"""
ffinal = filesystem.update_file(finfo, sample_info, config, pass_uptodate=True)
if os.path.isdir(ffinal):
to_transfer = []
for path, dirs, files in os.walk(ffinal):
for f in files:
full_f = os.path.join(path, f)
k = full_f.replace(os.path.abspath(config["dir"]) + "/", "")
to_transfer.append((full_f, k))
else:
k = ffinal.replace(os.path.abspath(config["dir"]) + "/", "")
to_transfer = [(ffinal, k)]
region = "@%s" % config["region"] if config.get("region") else ""
fname = "s3://%s%s/%s" % (config["bucket"], region, to_transfer[0][1])
conn = objectstore.connect(fname)
bucket = conn.lookup(config["bucket"])
if not bucket:
bucket = conn.create_bucket(config["bucket"], location=config.get("region", "us-east-1"))
for fname, orig_keyname in to_transfer:
keyname = os.path.join(config.get("folder", ""), orig_keyname)
key = bucket.get_key(keyname) if bucket else None
modified = datetime.datetime.fromtimestamp(email.utils.mktime_tz(
email.utils.parsedate_tz(key.last_modified))) if key else None
no_upload = key and modified >= finfo["mtime"]
if not no_upload:
_upload_file_aws_cli(fname, config["bucket"], keyname, config, finfo) | python | def update_file(finfo, sample_info, config):
"""Update the file to an Amazon S3 bucket, using server side encryption.
"""
ffinal = filesystem.update_file(finfo, sample_info, config, pass_uptodate=True)
if os.path.isdir(ffinal):
to_transfer = []
for path, dirs, files in os.walk(ffinal):
for f in files:
full_f = os.path.join(path, f)
k = full_f.replace(os.path.abspath(config["dir"]) + "/", "")
to_transfer.append((full_f, k))
else:
k = ffinal.replace(os.path.abspath(config["dir"]) + "/", "")
to_transfer = [(ffinal, k)]
region = "@%s" % config["region"] if config.get("region") else ""
fname = "s3://%s%s/%s" % (config["bucket"], region, to_transfer[0][1])
conn = objectstore.connect(fname)
bucket = conn.lookup(config["bucket"])
if not bucket:
bucket = conn.create_bucket(config["bucket"], location=config.get("region", "us-east-1"))
for fname, orig_keyname in to_transfer:
keyname = os.path.join(config.get("folder", ""), orig_keyname)
key = bucket.get_key(keyname) if bucket else None
modified = datetime.datetime.fromtimestamp(email.utils.mktime_tz(
email.utils.parsedate_tz(key.last_modified))) if key else None
no_upload = key and modified >= finfo["mtime"]
if not no_upload:
_upload_file_aws_cli(fname, config["bucket"], keyname, config, finfo) | [
"def",
"update_file",
"(",
"finfo",
",",
"sample_info",
",",
"config",
")",
":",
"ffinal",
"=",
"filesystem",
".",
"update_file",
"(",
"finfo",
",",
"sample_info",
",",
"config",
",",
"pass_uptodate",
"=",
"True",
")",
"if",
"os",
".",
"path",
".",
"isdi... | Update the file to an Amazon S3 bucket, using server side encryption. | [
"Update",
"the",
"file",
"to",
"an",
"Amazon",
"S3",
"bucket",
"using",
"server",
"side",
"encryption",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/s3.py#L20-L49 | train | 219,036 |
bcbio/bcbio-nextgen | bcbio/upload/s3.py | _upload_file_aws_cli | def _upload_file_aws_cli(local_fname, bucket, keyname, config=None, mditems=None):
"""Streaming upload via the standard AWS command line interface.
"""
s3_fname = "s3://%s/%s" % (bucket, keyname)
args = ["--sse", "--expected-size", str(os.path.getsize(local_fname))]
if config:
if config.get("region"):
args += ["--region", config.get("region")]
if config.get("reduced_redundancy"):
args += ["--storage-class", "REDUCED_REDUNDANCY"]
cmd = [os.path.join(os.path.dirname(sys.executable), "aws"), "s3", "cp"] + args + \
[local_fname, s3_fname]
do.run(cmd, "Upload to s3: %s %s" % (bucket, keyname)) | python | def _upload_file_aws_cli(local_fname, bucket, keyname, config=None, mditems=None):
"""Streaming upload via the standard AWS command line interface.
"""
s3_fname = "s3://%s/%s" % (bucket, keyname)
args = ["--sse", "--expected-size", str(os.path.getsize(local_fname))]
if config:
if config.get("region"):
args += ["--region", config.get("region")]
if config.get("reduced_redundancy"):
args += ["--storage-class", "REDUCED_REDUNDANCY"]
cmd = [os.path.join(os.path.dirname(sys.executable), "aws"), "s3", "cp"] + args + \
[local_fname, s3_fname]
do.run(cmd, "Upload to s3: %s %s" % (bucket, keyname)) | [
"def",
"_upload_file_aws_cli",
"(",
"local_fname",
",",
"bucket",
",",
"keyname",
",",
"config",
"=",
"None",
",",
"mditems",
"=",
"None",
")",
":",
"s3_fname",
"=",
"\"s3://%s/%s\"",
"%",
"(",
"bucket",
",",
"keyname",
")",
"args",
"=",
"[",
"\"--sse\"",
... | Streaming upload via the standard AWS command line interface. | [
"Streaming",
"upload",
"via",
"the",
"standard",
"AWS",
"command",
"line",
"interface",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/s3.py#L69-L81 | train | 219,037 |
bcbio/bcbio-nextgen | bcbio/upload/s3.py | upload_file_boto | def upload_file_boto(fname, remote_fname, mditems=None):
"""Upload a file using boto instead of external tools.
"""
r_fname = objectstore.parse_remote(remote_fname)
conn = objectstore.connect(remote_fname)
bucket = conn.lookup(r_fname.bucket)
if not bucket:
bucket = conn.create_bucket(r_fname.bucket, location=objectstore.get_region(remote_fname))
key = bucket.get_key(r_fname.key, validate=False)
if mditems is None:
mditems = {}
if "x-amz-server-side-encryption" not in mditems:
mditems["x-amz-server-side-encryption"] = "AES256"
for name, val in mditems.items():
key.set_metadata(name, val)
key.set_contents_from_filename(fname, encrypt_key=True) | python | def upload_file_boto(fname, remote_fname, mditems=None):
"""Upload a file using boto instead of external tools.
"""
r_fname = objectstore.parse_remote(remote_fname)
conn = objectstore.connect(remote_fname)
bucket = conn.lookup(r_fname.bucket)
if not bucket:
bucket = conn.create_bucket(r_fname.bucket, location=objectstore.get_region(remote_fname))
key = bucket.get_key(r_fname.key, validate=False)
if mditems is None:
mditems = {}
if "x-amz-server-side-encryption" not in mditems:
mditems["x-amz-server-side-encryption"] = "AES256"
for name, val in mditems.items():
key.set_metadata(name, val)
key.set_contents_from_filename(fname, encrypt_key=True) | [
"def",
"upload_file_boto",
"(",
"fname",
",",
"remote_fname",
",",
"mditems",
"=",
"None",
")",
":",
"r_fname",
"=",
"objectstore",
".",
"parse_remote",
"(",
"remote_fname",
")",
"conn",
"=",
"objectstore",
".",
"connect",
"(",
"remote_fname",
")",
"bucket",
... | Upload a file using boto instead of external tools. | [
"Upload",
"a",
"file",
"using",
"boto",
"instead",
"of",
"external",
"tools",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/s3.py#L83-L98 | train | 219,038 |
bcbio/bcbio-nextgen | bcbio/qc/chipseq.py | run | def run(bam_file, sample, out_dir):
"""Standard QC metrics for chipseq"""
out = {}
# if "rchipqc" in dd.get_tools_on(sample):
# out = chipqc(bam_file, sample, out_dir)
peaks = sample.get("peaks_files", {}).get("main")
if peaks:
out.update(_reads_in_peaks(bam_file, peaks, sample))
return out | python | def run(bam_file, sample, out_dir):
"""Standard QC metrics for chipseq"""
out = {}
# if "rchipqc" in dd.get_tools_on(sample):
# out = chipqc(bam_file, sample, out_dir)
peaks = sample.get("peaks_files", {}).get("main")
if peaks:
out.update(_reads_in_peaks(bam_file, peaks, sample))
return out | [
"def",
"run",
"(",
"bam_file",
",",
"sample",
",",
"out_dir",
")",
":",
"out",
"=",
"{",
"}",
"# if \"rchipqc\" in dd.get_tools_on(sample):",
"# out = chipqc(bam_file, sample, out_dir)",
"peaks",
"=",
"sample",
".",
"get",
"(",
"\"peaks_files\"",
",",
"{",
"}",
... | Standard QC metrics for chipseq | [
"Standard",
"QC",
"metrics",
"for",
"chipseq"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/chipseq.py#L16-L25 | train | 219,039 |
bcbio/bcbio-nextgen | bcbio/qc/chipseq.py | _reads_in_peaks | def _reads_in_peaks(bam_file, peaks_file, sample):
"""Calculate number of reads in peaks"""
if not peaks_file:
return {}
rip = number_of_mapped_reads(sample, bam_file, bed_file = peaks_file)
return {"metrics": {"RiP": rip}} | python | def _reads_in_peaks(bam_file, peaks_file, sample):
"""Calculate number of reads in peaks"""
if not peaks_file:
return {}
rip = number_of_mapped_reads(sample, bam_file, bed_file = peaks_file)
return {"metrics": {"RiP": rip}} | [
"def",
"_reads_in_peaks",
"(",
"bam_file",
",",
"peaks_file",
",",
"sample",
")",
":",
"if",
"not",
"peaks_file",
":",
"return",
"{",
"}",
"rip",
"=",
"number_of_mapped_reads",
"(",
"sample",
",",
"bam_file",
",",
"bed_file",
"=",
"peaks_file",
")",
"return"... | Calculate number of reads in peaks | [
"Calculate",
"number",
"of",
"reads",
"in",
"peaks"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/chipseq.py#L27-L32 | train | 219,040 |
bcbio/bcbio-nextgen | bcbio/qc/chipseq.py | chipqc | def chipqc(bam_file, sample, out_dir):
"""Attempt code to run ChIPQC bioconductor packate in one sample"""
sample_name = dd.get_sample_name(sample)
logger.warning("ChIPQC is unstable right now, if it breaks, turn off the tool.")
if utils.file_exists(out_dir):
return _get_output(out_dir)
with tx_tmpdir() as tmp_dir:
rcode = _sample_template(sample, tmp_dir)
if rcode:
# local_sitelib = utils.R_sitelib()
rscript = utils.Rscript_cmd()
do.run([rscript, "--no-environ", rcode], "ChIPQC in %s" % sample_name, log_error=False)
shutil.move(tmp_dir, out_dir)
return _get_output(out_dir) | python | def chipqc(bam_file, sample, out_dir):
"""Attempt code to run ChIPQC bioconductor packate in one sample"""
sample_name = dd.get_sample_name(sample)
logger.warning("ChIPQC is unstable right now, if it breaks, turn off the tool.")
if utils.file_exists(out_dir):
return _get_output(out_dir)
with tx_tmpdir() as tmp_dir:
rcode = _sample_template(sample, tmp_dir)
if rcode:
# local_sitelib = utils.R_sitelib()
rscript = utils.Rscript_cmd()
do.run([rscript, "--no-environ", rcode], "ChIPQC in %s" % sample_name, log_error=False)
shutil.move(tmp_dir, out_dir)
return _get_output(out_dir) | [
"def",
"chipqc",
"(",
"bam_file",
",",
"sample",
",",
"out_dir",
")",
":",
"sample_name",
"=",
"dd",
".",
"get_sample_name",
"(",
"sample",
")",
"logger",
".",
"warning",
"(",
"\"ChIPQC is unstable right now, if it breaks, turn off the tool.\"",
")",
"if",
"utils",
... | Attempt code to run ChIPQC bioconductor packate in one sample | [
"Attempt",
"code",
"to",
"run",
"ChIPQC",
"bioconductor",
"packate",
"in",
"one",
"sample"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/chipseq.py#L34-L47 | train | 219,041 |
bcbio/bcbio-nextgen | bcbio/qc/chipseq.py | _sample_template | def _sample_template(sample, out_dir):
"""R code to get QC for one sample"""
bam_fn = dd.get_work_bam(sample)
genome = dd.get_genome_build(sample)
if genome in supported:
peaks = sample.get("peaks_files", []).get("main")
if peaks:
r_code = ("library(ChIPQC);\n"
"sample = ChIPQCsample(\"{bam_fn}\","
"\"{peaks}\", "
"annotation = \"{genome}\","
");\n"
"ChIPQCreport(sample);\n")
r_code_fn = os.path.join(out_dir, "chipqc.r")
with open(r_code_fn, 'w') as inh:
inh.write(r_code.format(**locals()))
return r_code_fn | python | def _sample_template(sample, out_dir):
"""R code to get QC for one sample"""
bam_fn = dd.get_work_bam(sample)
genome = dd.get_genome_build(sample)
if genome in supported:
peaks = sample.get("peaks_files", []).get("main")
if peaks:
r_code = ("library(ChIPQC);\n"
"sample = ChIPQCsample(\"{bam_fn}\","
"\"{peaks}\", "
"annotation = \"{genome}\","
");\n"
"ChIPQCreport(sample);\n")
r_code_fn = os.path.join(out_dir, "chipqc.r")
with open(r_code_fn, 'w') as inh:
inh.write(r_code.format(**locals()))
return r_code_fn | [
"def",
"_sample_template",
"(",
"sample",
",",
"out_dir",
")",
":",
"bam_fn",
"=",
"dd",
".",
"get_work_bam",
"(",
"sample",
")",
"genome",
"=",
"dd",
".",
"get_genome_build",
"(",
"sample",
")",
"if",
"genome",
"in",
"supported",
":",
"peaks",
"=",
"sam... | R code to get QC for one sample | [
"R",
"code",
"to",
"get",
"QC",
"for",
"one",
"sample"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/chipseq.py#L56-L72 | train | 219,042 |
bcbio/bcbio-nextgen | bcbio/rnaseq/featureCounts.py | _change_sample_name | def _change_sample_name(in_file, sample_name, data=None):
"""Fix name in feature counts log file to get the same
name in multiqc report.
"""
out_file = append_stem(in_file, "_fixed")
with file_transaction(data, out_file) as tx_out:
with open(tx_out, "w") as out_handle:
with open(in_file) as in_handle:
for line in in_handle:
if line.startswith("Status"):
line = "Status\t%s.bam" % sample_name
out_handle.write("%s\n" % line.strip())
return out_file | python | def _change_sample_name(in_file, sample_name, data=None):
"""Fix name in feature counts log file to get the same
name in multiqc report.
"""
out_file = append_stem(in_file, "_fixed")
with file_transaction(data, out_file) as tx_out:
with open(tx_out, "w") as out_handle:
with open(in_file) as in_handle:
for line in in_handle:
if line.startswith("Status"):
line = "Status\t%s.bam" % sample_name
out_handle.write("%s\n" % line.strip())
return out_file | [
"def",
"_change_sample_name",
"(",
"in_file",
",",
"sample_name",
",",
"data",
"=",
"None",
")",
":",
"out_file",
"=",
"append_stem",
"(",
"in_file",
",",
"\"_fixed\"",
")",
"with",
"file_transaction",
"(",
"data",
",",
"out_file",
")",
"as",
"tx_out",
":",
... | Fix name in feature counts log file to get the same
name in multiqc report. | [
"Fix",
"name",
"in",
"feature",
"counts",
"log",
"file",
"to",
"get",
"the",
"same",
"name",
"in",
"multiqc",
"report",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/featureCounts.py#L57-L69 | train | 219,043 |
bcbio/bcbio-nextgen | bcbio/rnaseq/featureCounts.py | _format_count_file | def _format_count_file(count_file, data):
"""
this cuts the count file produced from featureCounts down to
a two column file of gene ids and number of reads mapping to
each gene
"""
COUNT_COLUMN = 5
out_file = os.path.splitext(count_file)[0] + ".fixed.counts"
if file_exists(out_file):
return out_file
df = pd.io.parsers.read_table(count_file, sep="\t", index_col=0, header=1)
df_sub = df.ix[:, COUNT_COLUMN]
with file_transaction(data, out_file) as tx_out_file:
df_sub.to_csv(tx_out_file, sep="\t", index_label="id", header=False)
return out_file | python | def _format_count_file(count_file, data):
"""
this cuts the count file produced from featureCounts down to
a two column file of gene ids and number of reads mapping to
each gene
"""
COUNT_COLUMN = 5
out_file = os.path.splitext(count_file)[0] + ".fixed.counts"
if file_exists(out_file):
return out_file
df = pd.io.parsers.read_table(count_file, sep="\t", index_col=0, header=1)
df_sub = df.ix[:, COUNT_COLUMN]
with file_transaction(data, out_file) as tx_out_file:
df_sub.to_csv(tx_out_file, sep="\t", index_label="id", header=False)
return out_file | [
"def",
"_format_count_file",
"(",
"count_file",
",",
"data",
")",
":",
"COUNT_COLUMN",
"=",
"5",
"out_file",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"count_file",
")",
"[",
"0",
"]",
"+",
"\".fixed.counts\"",
"if",
"file_exists",
"(",
"out_file",
")"... | this cuts the count file produced from featureCounts down to
a two column file of gene ids and number of reads mapping to
each gene | [
"this",
"cuts",
"the",
"count",
"file",
"produced",
"from",
"featureCounts",
"down",
"to",
"a",
"two",
"column",
"file",
"of",
"gene",
"ids",
"and",
"number",
"of",
"reads",
"mapping",
"to",
"each",
"gene"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/featureCounts.py#L71-L86 | train | 219,044 |
bcbio/bcbio-nextgen | bcbio/variation/peddy.py | run_qc | def run_qc(_, data, out_dir):
"""Run quality control in QC environment on a single sample.
Enables peddy integration with CWL runs.
"""
if cwlutils.is_cwl_run(data):
qc_data = run_peddy([data], out_dir)
if tz.get_in(["summary", "qc", "peddy"], qc_data):
return tz.get_in(["summary", "qc", "peddy"], qc_data) | python | def run_qc(_, data, out_dir):
"""Run quality control in QC environment on a single sample.
Enables peddy integration with CWL runs.
"""
if cwlutils.is_cwl_run(data):
qc_data = run_peddy([data], out_dir)
if tz.get_in(["summary", "qc", "peddy"], qc_data):
return tz.get_in(["summary", "qc", "peddy"], qc_data) | [
"def",
"run_qc",
"(",
"_",
",",
"data",
",",
"out_dir",
")",
":",
"if",
"cwlutils",
".",
"is_cwl_run",
"(",
"data",
")",
":",
"qc_data",
"=",
"run_peddy",
"(",
"[",
"data",
"]",
",",
"out_dir",
")",
"if",
"tz",
".",
"get_in",
"(",
"[",
"\"summary\"... | Run quality control in QC environment on a single sample.
Enables peddy integration with CWL runs. | [
"Run",
"quality",
"control",
"in",
"QC",
"environment",
"on",
"a",
"single",
"sample",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/peddy.py#L35-L43 | train | 219,045 |
bcbio/bcbio-nextgen | bcbio/chipseq/macs2.py | run | def run(name, chip_bam, input_bam, genome_build, out_dir, method, resources, data):
"""
Run macs2 for chip and input samples avoiding
errors due to samples.
"""
# output file name need to have the caller name
config = dd.get_config(data)
out_file = os.path.join(out_dir, name + "_peaks_macs2.xls")
macs2_file = os.path.join(out_dir, name + "_peaks.xls")
if utils.file_exists(out_file):
_compres_bdg_files(out_dir)
return _get_output_files(out_dir)
macs2 = config_utils.get_program("macs2", config)
options = " ".join(resources.get("macs2", {}).get("options", ""))
genome_size = bam.fasta.total_sequence_length(dd.get_ref_file(data))
genome_size = "" if options.find("-g") > -1 else "-g %s" % genome_size
paired = "-f BAMPE" if bam.is_paired(chip_bam) else ""
with utils.chdir(out_dir):
cmd = _macs2_cmd(method)
try:
do.run(cmd.format(**locals()), "macs2 for %s" % name)
utils.move_safe(macs2_file, out_file)
except subprocess.CalledProcessError:
raise RuntimeWarning("macs2 terminated with an error.\n"
"Please, check the message and report "
"error if it is related to bcbio.\n"
"You can add specific options for the sample "
"setting resources as explained in docs: "
"https://bcbio-nextgen.readthedocs.org/en/latest/contents/configuration.html#sample-specific-resources")
_compres_bdg_files(out_dir)
return _get_output_files(out_dir) | python | def run(name, chip_bam, input_bam, genome_build, out_dir, method, resources, data):
"""
Run macs2 for chip and input samples avoiding
errors due to samples.
"""
# output file name need to have the caller name
config = dd.get_config(data)
out_file = os.path.join(out_dir, name + "_peaks_macs2.xls")
macs2_file = os.path.join(out_dir, name + "_peaks.xls")
if utils.file_exists(out_file):
_compres_bdg_files(out_dir)
return _get_output_files(out_dir)
macs2 = config_utils.get_program("macs2", config)
options = " ".join(resources.get("macs2", {}).get("options", ""))
genome_size = bam.fasta.total_sequence_length(dd.get_ref_file(data))
genome_size = "" if options.find("-g") > -1 else "-g %s" % genome_size
paired = "-f BAMPE" if bam.is_paired(chip_bam) else ""
with utils.chdir(out_dir):
cmd = _macs2_cmd(method)
try:
do.run(cmd.format(**locals()), "macs2 for %s" % name)
utils.move_safe(macs2_file, out_file)
except subprocess.CalledProcessError:
raise RuntimeWarning("macs2 terminated with an error.\n"
"Please, check the message and report "
"error if it is related to bcbio.\n"
"You can add specific options for the sample "
"setting resources as explained in docs: "
"https://bcbio-nextgen.readthedocs.org/en/latest/contents/configuration.html#sample-specific-resources")
_compres_bdg_files(out_dir)
return _get_output_files(out_dir) | [
"def",
"run",
"(",
"name",
",",
"chip_bam",
",",
"input_bam",
",",
"genome_build",
",",
"out_dir",
",",
"method",
",",
"resources",
",",
"data",
")",
":",
"# output file name need to have the caller name",
"config",
"=",
"dd",
".",
"get_config",
"(",
"data",
"... | Run macs2 for chip and input samples avoiding
errors due to samples. | [
"Run",
"macs2",
"for",
"chip",
"and",
"input",
"samples",
"avoiding",
"errors",
"due",
"to",
"samples",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/macs2.py#L11-L41 | train | 219,046 |
bcbio/bcbio-nextgen | bcbio/chipseq/macs2.py | _macs2_cmd | def _macs2_cmd(method="chip"):
"""Main command for macs2 tool."""
if method.lower() == "chip":
cmd = ("{macs2} callpeak -t {chip_bam} -c {input_bam} {paired} "
" {genome_size} -n {name} -B {options}")
elif method.lower() == "atac":
cmd = ("{macs2} callpeak -t {chip_bam} --nomodel "
" {paired} {genome_size} -n {name} -B {options}"
" --nolambda --keep-dup all")
else:
raise ValueError("chip_method should be chip or atac.")
return cmd | python | def _macs2_cmd(method="chip"):
"""Main command for macs2 tool."""
if method.lower() == "chip":
cmd = ("{macs2} callpeak -t {chip_bam} -c {input_bam} {paired} "
" {genome_size} -n {name} -B {options}")
elif method.lower() == "atac":
cmd = ("{macs2} callpeak -t {chip_bam} --nomodel "
" {paired} {genome_size} -n {name} -B {options}"
" --nolambda --keep-dup all")
else:
raise ValueError("chip_method should be chip or atac.")
return cmd | [
"def",
"_macs2_cmd",
"(",
"method",
"=",
"\"chip\"",
")",
":",
"if",
"method",
".",
"lower",
"(",
")",
"==",
"\"chip\"",
":",
"cmd",
"=",
"(",
"\"{macs2} callpeak -t {chip_bam} -c {input_bam} {paired} \"",
"\" {genome_size} -n {name} -B {options}\"",
")",
"elif",
"met... | Main command for macs2 tool. | [
"Main",
"command",
"for",
"macs2",
"tool",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/macs2.py#L60-L71 | train | 219,047 |
bcbio/bcbio-nextgen | bcbio/pipeline/archive.py | to_cram | def to_cram(data):
"""Convert BAM archive files into indexed CRAM.
"""
data = utils.to_single_data(data)
cram_file = cram.compress(dd.get_work_bam(data) or dd.get_align_bam(data), data)
out_key = "archive_bam" if cwlutils.is_cwl_run(data) else "work_bam"
data[out_key] = cram_file
return [[data]] | python | def to_cram(data):
"""Convert BAM archive files into indexed CRAM.
"""
data = utils.to_single_data(data)
cram_file = cram.compress(dd.get_work_bam(data) or dd.get_align_bam(data), data)
out_key = "archive_bam" if cwlutils.is_cwl_run(data) else "work_bam"
data[out_key] = cram_file
return [[data]] | [
"def",
"to_cram",
"(",
"data",
")",
":",
"data",
"=",
"utils",
".",
"to_single_data",
"(",
"data",
")",
"cram_file",
"=",
"cram",
".",
"compress",
"(",
"dd",
".",
"get_work_bam",
"(",
"data",
")",
"or",
"dd",
".",
"get_align_bam",
"(",
"data",
")",
"... | Convert BAM archive files into indexed CRAM. | [
"Convert",
"BAM",
"archive",
"files",
"into",
"indexed",
"CRAM",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/archive.py#L11-L18 | train | 219,048 |
bcbio/bcbio-nextgen | bcbio/pipeline/archive.py | compress | def compress(samples, run_parallel):
"""Perform compression of output files for long term storage.
"""
to_cram = []
finished = []
for data in [x[0] for x in samples]:
if "cram" in dd.get_archive(data) or "cram-lossless" in dd.get_archive(data):
to_cram.append([data])
else:
finished.append([data])
crammed = run_parallel("archive_to_cram", to_cram)
return finished + crammed | python | def compress(samples, run_parallel):
"""Perform compression of output files for long term storage.
"""
to_cram = []
finished = []
for data in [x[0] for x in samples]:
if "cram" in dd.get_archive(data) or "cram-lossless" in dd.get_archive(data):
to_cram.append([data])
else:
finished.append([data])
crammed = run_parallel("archive_to_cram", to_cram)
return finished + crammed | [
"def",
"compress",
"(",
"samples",
",",
"run_parallel",
")",
":",
"to_cram",
"=",
"[",
"]",
"finished",
"=",
"[",
"]",
"for",
"data",
"in",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"samples",
"]",
":",
"if",
"\"cram\"",
"in",
"dd",
".",
"get_ar... | Perform compression of output files for long term storage. | [
"Perform",
"compression",
"of",
"output",
"files",
"for",
"long",
"term",
"storage",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/archive.py#L20-L31 | train | 219,049 |
bcbio/bcbio-nextgen | bcbio/qc/samtools.py | run | def run(_, data, out_dir=None):
"""Run samtools stats with reports on mapped reads, duplicates and insert sizes.
"""
stats_file, idxstats_file = _get_stats_files(data, out_dir)
samtools = config_utils.get_program("samtools", data["config"])
bam_file = dd.get_align_bam(data) or dd.get_work_bam(data)
if not utils.file_exists(stats_file):
utils.safe_makedir(out_dir)
with file_transaction(data, stats_file) as tx_out_file:
cores = dd.get_num_cores(data)
cmd = "{samtools} stats -@ {cores} {bam_file}"
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "samtools stats", data)
if not utils.file_exists(idxstats_file):
utils.safe_makedir(out_dir)
with file_transaction(data, idxstats_file) as tx_out_file:
cmd = "{samtools} idxstats {bam_file}"
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "samtools index stats", data)
out = {"base": idxstats_file, "secondary": [stats_file]}
out["metrics"] = _parse_samtools_stats(stats_file)
return out | python | def run(_, data, out_dir=None):
"""Run samtools stats with reports on mapped reads, duplicates and insert sizes.
"""
stats_file, idxstats_file = _get_stats_files(data, out_dir)
samtools = config_utils.get_program("samtools", data["config"])
bam_file = dd.get_align_bam(data) or dd.get_work_bam(data)
if not utils.file_exists(stats_file):
utils.safe_makedir(out_dir)
with file_transaction(data, stats_file) as tx_out_file:
cores = dd.get_num_cores(data)
cmd = "{samtools} stats -@ {cores} {bam_file}"
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "samtools stats", data)
if not utils.file_exists(idxstats_file):
utils.safe_makedir(out_dir)
with file_transaction(data, idxstats_file) as tx_out_file:
cmd = "{samtools} idxstats {bam_file}"
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "samtools index stats", data)
out = {"base": idxstats_file, "secondary": [stats_file]}
out["metrics"] = _parse_samtools_stats(stats_file)
return out | [
"def",
"run",
"(",
"_",
",",
"data",
",",
"out_dir",
"=",
"None",
")",
":",
"stats_file",
",",
"idxstats_file",
"=",
"_get_stats_files",
"(",
"data",
",",
"out_dir",
")",
"samtools",
"=",
"config_utils",
".",
"get_program",
"(",
"\"samtools\"",
",",
"data"... | Run samtools stats with reports on mapped reads, duplicates and insert sizes. | [
"Run",
"samtools",
"stats",
"with",
"reports",
"on",
"mapped",
"reads",
"duplicates",
"and",
"insert",
"sizes",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/samtools.py#L13-L34 | train | 219,050 |
bcbio/bcbio-nextgen | bcbio/qc/samtools.py | run_and_save | def run_and_save(data):
"""Run QC, saving file outputs in data dictionary.
"""
run(None, data)
stats_file, idxstats_file = _get_stats_files(data)
data = tz.update_in(data, ["depth", "samtools", "stats"], lambda x: stats_file)
data = tz.update_in(data, ["depth", "samtools", "idxstats"], lambda x: idxstats_file)
return data | python | def run_and_save(data):
"""Run QC, saving file outputs in data dictionary.
"""
run(None, data)
stats_file, idxstats_file = _get_stats_files(data)
data = tz.update_in(data, ["depth", "samtools", "stats"], lambda x: stats_file)
data = tz.update_in(data, ["depth", "samtools", "idxstats"], lambda x: idxstats_file)
return data | [
"def",
"run_and_save",
"(",
"data",
")",
":",
"run",
"(",
"None",
",",
"data",
")",
"stats_file",
",",
"idxstats_file",
"=",
"_get_stats_files",
"(",
"data",
")",
"data",
"=",
"tz",
".",
"update_in",
"(",
"data",
",",
"[",
"\"depth\"",
",",
"\"samtools\"... | Run QC, saving file outputs in data dictionary. | [
"Run",
"QC",
"saving",
"file",
"outputs",
"in",
"data",
"dictionary",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/samtools.py#L36-L43 | train | 219,051 |
bcbio/bcbio-nextgen | bcbio/qc/samtools.py | _get_stats_files | def _get_stats_files(data, out_dir=None):
"""Retrieve stats files from pre-existing dictionary or filesystem.
"""
if not out_dir:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data),
"qc", dd.get_sample_name(data), "samtools"))
stats_file = tz.get_in(["depth", "samtools", "stats"], data)
idxstats_file = tz.get_in(["depth", "samtools", "idxstats"], data)
if not stats_file:
stats_file = os.path.join(out_dir, "%s.txt" % dd.get_sample_name(data))
if not idxstats_file:
idxstats_file = os.path.join(out_dir, "%s-idxstats.txt" % dd.get_sample_name(data))
return stats_file, idxstats_file | python | def _get_stats_files(data, out_dir=None):
"""Retrieve stats files from pre-existing dictionary or filesystem.
"""
if not out_dir:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data),
"qc", dd.get_sample_name(data), "samtools"))
stats_file = tz.get_in(["depth", "samtools", "stats"], data)
idxstats_file = tz.get_in(["depth", "samtools", "idxstats"], data)
if not stats_file:
stats_file = os.path.join(out_dir, "%s.txt" % dd.get_sample_name(data))
if not idxstats_file:
idxstats_file = os.path.join(out_dir, "%s-idxstats.txt" % dd.get_sample_name(data))
return stats_file, idxstats_file | [
"def",
"_get_stats_files",
"(",
"data",
",",
"out_dir",
"=",
"None",
")",
":",
"if",
"not",
"out_dir",
":",
"out_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dd",
".",
"get_work_dir",
"(",
"data",
")",
",",
"\"q... | Retrieve stats files from pre-existing dictionary or filesystem. | [
"Retrieve",
"stats",
"files",
"from",
"pre",
"-",
"existing",
"dictionary",
"or",
"filesystem",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/samtools.py#L45-L57 | train | 219,052 |
bcbio/bcbio-nextgen | bcbio/provenance/do.py | _descr_str | def _descr_str(descr, data, region):
"""Add additional useful information from data to description string.
"""
if data:
name = dd.get_sample_name(data)
if name:
descr = "{0} : {1}".format(descr, name)
elif "work_bam" in data:
descr = "{0} : {1}".format(descr, os.path.basename(data["work_bam"]))
if region:
descr = "{0} : {1}".format(descr, region)
return descr | python | def _descr_str(descr, data, region):
"""Add additional useful information from data to description string.
"""
if data:
name = dd.get_sample_name(data)
if name:
descr = "{0} : {1}".format(descr, name)
elif "work_bam" in data:
descr = "{0} : {1}".format(descr, os.path.basename(data["work_bam"]))
if region:
descr = "{0} : {1}".format(descr, region)
return descr | [
"def",
"_descr_str",
"(",
"descr",
",",
"data",
",",
"region",
")",
":",
"if",
"data",
":",
"name",
"=",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
"if",
"name",
":",
"descr",
"=",
"\"{0} : {1}\"",
".",
"format",
"(",
"descr",
",",
"name",
")",
... | Add additional useful information from data to description string. | [
"Add",
"additional",
"useful",
"information",
"from",
"data",
"to",
"description",
"string",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/do.py#L35-L46 | train | 219,053 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | get_indelcaller | def get_indelcaller(d_or_c):
"""Retrieve string for indelcaller to use, or empty string if not specified.
"""
config = d_or_c if isinstance(d_or_c, dict) and "config" in d_or_c else d_or_c
indelcaller = config["algorithm"].get("indelcaller", "")
if not indelcaller:
indelcaller = ""
if isinstance(indelcaller, (list, tuple)):
indelcaller = indelcaller[0] if (len(indelcaller) > 0) else ""
return indelcaller | python | def get_indelcaller(d_or_c):
"""Retrieve string for indelcaller to use, or empty string if not specified.
"""
config = d_or_c if isinstance(d_or_c, dict) and "config" in d_or_c else d_or_c
indelcaller = config["algorithm"].get("indelcaller", "")
if not indelcaller:
indelcaller = ""
if isinstance(indelcaller, (list, tuple)):
indelcaller = indelcaller[0] if (len(indelcaller) > 0) else ""
return indelcaller | [
"def",
"get_indelcaller",
"(",
"d_or_c",
")",
":",
"config",
"=",
"d_or_c",
"if",
"isinstance",
"(",
"d_or_c",
",",
"dict",
")",
"and",
"\"config\"",
"in",
"d_or_c",
"else",
"d_or_c",
"indelcaller",
"=",
"config",
"[",
"\"algorithm\"",
"]",
".",
"get",
"("... | Retrieve string for indelcaller to use, or empty string if not specified. | [
"Retrieve",
"string",
"for",
"indelcaller",
"to",
"use",
"or",
"empty",
"string",
"if",
"not",
"specified",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L140-L149 | train | 219,054 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | split_snps_indels | def split_snps_indels(orig_file, ref_file, config):
"""Split a variant call file into SNPs and INDELs for processing.
"""
base, ext = utils.splitext_plus(orig_file)
snp_file = "{base}-snp{ext}".format(base=base, ext=ext)
indel_file = "{base}-indel{ext}".format(base=base, ext=ext)
for out_file, select_arg in [(snp_file, "--types snps"),
(indel_file, "--exclude-types snps")]:
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
cmd = "{bcftools} view -O {output_type} {orig_file} {select_arg} > {tx_out_file}"
do.run(cmd.format(**locals()), "Subset to SNPs and indels")
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return snp_file, indel_file | python | def split_snps_indels(orig_file, ref_file, config):
"""Split a variant call file into SNPs and INDELs for processing.
"""
base, ext = utils.splitext_plus(orig_file)
snp_file = "{base}-snp{ext}".format(base=base, ext=ext)
indel_file = "{base}-indel{ext}".format(base=base, ext=ext)
for out_file, select_arg in [(snp_file, "--types snps"),
(indel_file, "--exclude-types snps")]:
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
cmd = "{bcftools} view -O {output_type} {orig_file} {select_arg} > {tx_out_file}"
do.run(cmd.format(**locals()), "Subset to SNPs and indels")
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return snp_file, indel_file | [
"def",
"split_snps_indels",
"(",
"orig_file",
",",
"ref_file",
",",
"config",
")",
":",
"base",
",",
"ext",
"=",
"utils",
".",
"splitext_plus",
"(",
"orig_file",
")",
"snp_file",
"=",
"\"{base}-snp{ext}\"",
".",
"format",
"(",
"base",
"=",
"base",
",",
"ex... | Split a variant call file into SNPs and INDELs for processing. | [
"Split",
"a",
"variant",
"call",
"file",
"into",
"SNPs",
"and",
"INDELs",
"for",
"processing",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L183-L199 | train | 219,055 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | get_samples | def get_samples(in_file):
"""Retrieve samples present in a VCF file
"""
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if line.startswith("#CHROM"):
parts = line.strip().split("\t")
return parts[9:]
raise ValueError("Did not find sample header in VCF file %s" % in_file) | python | def get_samples(in_file):
"""Retrieve samples present in a VCF file
"""
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if line.startswith("#CHROM"):
parts = line.strip().split("\t")
return parts[9:]
raise ValueError("Did not find sample header in VCF file %s" % in_file) | [
"def",
"get_samples",
"(",
"in_file",
")",
":",
"with",
"utils",
".",
"open_gzipsafe",
"(",
"in_file",
")",
"as",
"in_handle",
":",
"for",
"line",
"in",
"in_handle",
":",
"if",
"line",
".",
"startswith",
"(",
"\"#CHROM\"",
")",
":",
"parts",
"=",
"line",... | Retrieve samples present in a VCF file | [
"Retrieve",
"samples",
"present",
"in",
"a",
"VCF",
"file"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L210-L218 | train | 219,056 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | _get_exclude_samples | def _get_exclude_samples(in_file, to_exclude):
"""Identify samples in the exclusion list which are actually in the VCF.
"""
include, exclude = [], []
to_exclude = set(to_exclude)
for s in get_samples(in_file):
if s in to_exclude:
exclude.append(s)
else:
include.append(s)
return include, exclude | python | def _get_exclude_samples(in_file, to_exclude):
"""Identify samples in the exclusion list which are actually in the VCF.
"""
include, exclude = [], []
to_exclude = set(to_exclude)
for s in get_samples(in_file):
if s in to_exclude:
exclude.append(s)
else:
include.append(s)
return include, exclude | [
"def",
"_get_exclude_samples",
"(",
"in_file",
",",
"to_exclude",
")",
":",
"include",
",",
"exclude",
"=",
"[",
"]",
",",
"[",
"]",
"to_exclude",
"=",
"set",
"(",
"to_exclude",
")",
"for",
"s",
"in",
"get_samples",
"(",
"in_file",
")",
":",
"if",
"s",... | Identify samples in the exclusion list which are actually in the VCF. | [
"Identify",
"samples",
"in",
"the",
"exclusion",
"list",
"which",
"are",
"actually",
"in",
"the",
"VCF",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L220-L230 | train | 219,057 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | exclude_samples | def exclude_samples(in_file, out_file, to_exclude, ref_file, config, filters=None):
"""Exclude specific samples from an input VCF file.
"""
include, exclude = _get_exclude_samples(in_file, to_exclude)
# can use the input sample, all exclusions already gone
if len(exclude) == 0:
out_file = in_file
elif not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
include_str = ",".join(include)
filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.'
cmd = "{bcftools} view -O {output_type} -s {include_str} {filter_str} {in_file} > {tx_out_file}"
do.run(cmd.format(**locals()), "Exclude samples: {}".format(to_exclude))
return out_file | python | def exclude_samples(in_file, out_file, to_exclude, ref_file, config, filters=None):
"""Exclude specific samples from an input VCF file.
"""
include, exclude = _get_exclude_samples(in_file, to_exclude)
# can use the input sample, all exclusions already gone
if len(exclude) == 0:
out_file = in_file
elif not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
include_str = ",".join(include)
filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.'
cmd = "{bcftools} view -O {output_type} -s {include_str} {filter_str} {in_file} > {tx_out_file}"
do.run(cmd.format(**locals()), "Exclude samples: {}".format(to_exclude))
return out_file | [
"def",
"exclude_samples",
"(",
"in_file",
",",
"out_file",
",",
"to_exclude",
",",
"ref_file",
",",
"config",
",",
"filters",
"=",
"None",
")",
":",
"include",
",",
"exclude",
"=",
"_get_exclude_samples",
"(",
"in_file",
",",
"to_exclude",
")",
"# can use the ... | Exclude specific samples from an input VCF file. | [
"Exclude",
"specific",
"samples",
"from",
"an",
"input",
"VCF",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L232-L247 | train | 219,058 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | select_sample | def select_sample(in_file, sample, out_file, config, filters=None):
"""Select a single sample from the supplied multisample VCF file.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
if len(get_samples(in_file)) == 1:
shutil.copy(in_file, tx_out_file)
else:
if in_file.endswith(".gz"):
bgzip_and_index(in_file, config)
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.'
cmd = "{bcftools} view -O {output_type} {filter_str} {in_file} -s {sample} > {tx_out_file}"
do.run(cmd.format(**locals()), "Select sample: %s" % sample)
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file | python | def select_sample(in_file, sample, out_file, config, filters=None):
"""Select a single sample from the supplied multisample VCF file.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
if len(get_samples(in_file)) == 1:
shutil.copy(in_file, tx_out_file)
else:
if in_file.endswith(".gz"):
bgzip_and_index(in_file, config)
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.'
cmd = "{bcftools} view -O {output_type} {filter_str} {in_file} -s {sample} > {tx_out_file}"
do.run(cmd.format(**locals()), "Select sample: %s" % sample)
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file | [
"def",
"select_sample",
"(",
"in_file",
",",
"sample",
",",
"out_file",
",",
"config",
",",
"filters",
"=",
"None",
")",
":",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"file_transaction",
"(",
"config",
",",
"out_file",
... | Select a single sample from the supplied multisample VCF file. | [
"Select",
"a",
"single",
"sample",
"from",
"the",
"supplied",
"multisample",
"VCF",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L249-L266 | train | 219,059 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | merge_variant_files | def merge_variant_files(orig_files, out_file, ref_file, config, region=None):
"""Combine multiple VCF files with different samples into a single output file.
Uses bcftools merge on bgzipped input files, handling both tricky merge and
concatenation of files. Does not correctly handle files with the same
sample (use combine_variant_files instead).
"""
in_pipeline = False
if isinstance(orig_files, dict):
file_key = config["file_key"]
in_pipeline = True
orig_files = orig_files[file_key]
out_file = _do_merge(orig_files, out_file, config, region)
if in_pipeline:
return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}]
else:
return out_file | python | def merge_variant_files(orig_files, out_file, ref_file, config, region=None):
"""Combine multiple VCF files with different samples into a single output file.
Uses bcftools merge on bgzipped input files, handling both tricky merge and
concatenation of files. Does not correctly handle files with the same
sample (use combine_variant_files instead).
"""
in_pipeline = False
if isinstance(orig_files, dict):
file_key = config["file_key"]
in_pipeline = True
orig_files = orig_files[file_key]
out_file = _do_merge(orig_files, out_file, config, region)
if in_pipeline:
return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}]
else:
return out_file | [
"def",
"merge_variant_files",
"(",
"orig_files",
",",
"out_file",
",",
"ref_file",
",",
"config",
",",
"region",
"=",
"None",
")",
":",
"in_pipeline",
"=",
"False",
"if",
"isinstance",
"(",
"orig_files",
",",
"dict",
")",
":",
"file_key",
"=",
"config",
"[... | Combine multiple VCF files with different samples into a single output file.
Uses bcftools merge on bgzipped input files, handling both tricky merge and
concatenation of files. Does not correctly handle files with the same
sample (use combine_variant_files instead). | [
"Combine",
"multiple",
"VCF",
"files",
"with",
"different",
"samples",
"into",
"a",
"single",
"output",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L288-L304 | train | 219,060 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | _do_merge | def _do_merge(orig_files, out_file, config, region):
"""Do the actual work of merging with bcftools merge.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
_check_samples_nodups(orig_files)
prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config)
input_vcf_file = "%s-files.txt" % utils.splitext_plus(out_file)[0]
with open(input_vcf_file, "w") as out_handle:
for fname in prep_files:
out_handle.write(fname + "\n")
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
region_str = "-r {}".format(region) if region else ""
cmd = "{bcftools} merge -O {output_type} {region_str} `cat {input_vcf_file}` > {tx_out_file}"
do.run(cmd.format(**locals()), "Merge variants")
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file | python | def _do_merge(orig_files, out_file, config, region):
"""Do the actual work of merging with bcftools merge.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
_check_samples_nodups(orig_files)
prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config)
input_vcf_file = "%s-files.txt" % utils.splitext_plus(out_file)[0]
with open(input_vcf_file, "w") as out_handle:
for fname in prep_files:
out_handle.write(fname + "\n")
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
region_str = "-r {}".format(region) if region else ""
cmd = "{bcftools} merge -O {output_type} {region_str} `cat {input_vcf_file}` > {tx_out_file}"
do.run(cmd.format(**locals()), "Merge variants")
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file | [
"def",
"_do_merge",
"(",
"orig_files",
",",
"out_file",
",",
"config",
",",
"region",
")",
":",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"file_transaction",
"(",
"config",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
... | Do the actual work of merging with bcftools merge. | [
"Do",
"the",
"actual",
"work",
"of",
"merging",
"with",
"bcftools",
"merge",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L306-L324 | train | 219,061 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | _check_samples_nodups | def _check_samples_nodups(fnames):
"""Ensure a set of input VCFs do not have duplicate samples.
"""
counts = defaultdict(int)
for f in fnames:
for s in get_samples(f):
counts[s] += 1
duplicates = [s for s, c in counts.items() if c > 1]
if duplicates:
raise ValueError("Duplicate samples found in inputs %s: %s" % (duplicates, fnames)) | python | def _check_samples_nodups(fnames):
"""Ensure a set of input VCFs do not have duplicate samples.
"""
counts = defaultdict(int)
for f in fnames:
for s in get_samples(f):
counts[s] += 1
duplicates = [s for s, c in counts.items() if c > 1]
if duplicates:
raise ValueError("Duplicate samples found in inputs %s: %s" % (duplicates, fnames)) | [
"def",
"_check_samples_nodups",
"(",
"fnames",
")",
":",
"counts",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"f",
"in",
"fnames",
":",
"for",
"s",
"in",
"get_samples",
"(",
"f",
")",
":",
"counts",
"[",
"s",
"]",
"+=",
"1",
"duplicates",
"=",
"[",
... | Ensure a set of input VCFs do not have duplicate samples. | [
"Ensure",
"a",
"set",
"of",
"input",
"VCFs",
"do",
"not",
"have",
"duplicate",
"samples",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L326-L335 | train | 219,062 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | _sort_by_region | def _sort_by_region(fnames, regions, ref_file, config):
"""Sort a set of regionally split files by region for ordered output.
"""
contig_order = {}
for i, sq in enumerate(ref.file_contigs(ref_file, config)):
contig_order[sq.name] = i
sitems = []
assert len(regions) == len(fnames), (regions, fnames)
added_fnames = set([])
for region, fname in zip(regions, fnames):
if fname not in added_fnames:
if isinstance(region, (list, tuple)):
c, s, e = region
elif isinstance(region, six.string_types) and region.find(":") >= 0:
c, coords = region.split(":")
s, e = [int(x) for x in coords.split("-")]
else:
c = region
s, e = 0, 0
sitems.append(((contig_order[c], s, e), c, fname))
added_fnames.add(fname)
sitems.sort()
return [(x[1], x[2]) for x in sitems] | python | def _sort_by_region(fnames, regions, ref_file, config):
"""Sort a set of regionally split files by region for ordered output.
"""
contig_order = {}
for i, sq in enumerate(ref.file_contigs(ref_file, config)):
contig_order[sq.name] = i
sitems = []
assert len(regions) == len(fnames), (regions, fnames)
added_fnames = set([])
for region, fname in zip(regions, fnames):
if fname not in added_fnames:
if isinstance(region, (list, tuple)):
c, s, e = region
elif isinstance(region, six.string_types) and region.find(":") >= 0:
c, coords = region.split(":")
s, e = [int(x) for x in coords.split("-")]
else:
c = region
s, e = 0, 0
sitems.append(((contig_order[c], s, e), c, fname))
added_fnames.add(fname)
sitems.sort()
return [(x[1], x[2]) for x in sitems] | [
"def",
"_sort_by_region",
"(",
"fnames",
",",
"regions",
",",
"ref_file",
",",
"config",
")",
":",
"contig_order",
"=",
"{",
"}",
"for",
"i",
",",
"sq",
"in",
"enumerate",
"(",
"ref",
".",
"file_contigs",
"(",
"ref_file",
",",
"config",
")",
")",
":",
... | Sort a set of regionally split files by region for ordered output. | [
"Sort",
"a",
"set",
"of",
"regionally",
"split",
"files",
"by",
"region",
"for",
"ordered",
"output",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L337-L359 | train | 219,063 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | concat_variant_files | def concat_variant_files(orig_files, out_file, regions, ref_file, config):
"""Concatenate multiple variant files from regions into a single output file.
Uses GATK4's GatherVcfs, falling back to bcftools concat --naive if it fails.
These both only combine samples and avoid parsing, allowing scaling to large
file sizes.
"""
if not utils.file_exists(out_file):
input_file_list = _get_file_list(orig_files, out_file, regions, ref_file, config)
try:
out_file = _run_concat_variant_files_gatk4(input_file_list, out_file, config)
except subprocess.CalledProcessError as msg:
if ("We require all VCFs to have complete VCF headers" in str(msg) or
"Features added out of order" in str(msg) or
"The reference allele cannot be missing" in str(msg)):
out_file = _run_concat_variant_files_bcftools(input_file_list, out_file, config, naive=True)
else:
raise
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file | python | def concat_variant_files(orig_files, out_file, regions, ref_file, config):
"""Concatenate multiple variant files from regions into a single output file.
Uses GATK4's GatherVcfs, falling back to bcftools concat --naive if it fails.
These both only combine samples and avoid parsing, allowing scaling to large
file sizes.
"""
if not utils.file_exists(out_file):
input_file_list = _get_file_list(orig_files, out_file, regions, ref_file, config)
try:
out_file = _run_concat_variant_files_gatk4(input_file_list, out_file, config)
except subprocess.CalledProcessError as msg:
if ("We require all VCFs to have complete VCF headers" in str(msg) or
"Features added out of order" in str(msg) or
"The reference allele cannot be missing" in str(msg)):
out_file = _run_concat_variant_files_bcftools(input_file_list, out_file, config, naive=True)
else:
raise
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file | [
"def",
"concat_variant_files",
"(",
"orig_files",
",",
"out_file",
",",
"regions",
",",
"ref_file",
",",
"config",
")",
":",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"input_file_list",
"=",
"_get_file_list",
"(",
"orig_files",
",",
... | Concatenate multiple variant files from regions into a single output file.
Uses GATK4's GatherVcfs, falling back to bcftools concat --naive if it fails.
These both only combine samples and avoid parsing, allowing scaling to large
file sizes. | [
"Concatenate",
"multiple",
"variant",
"files",
"from",
"regions",
"into",
"a",
"single",
"output",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L361-L381 | train | 219,064 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | _run_concat_variant_files_gatk4 | def _run_concat_variant_files_gatk4(input_file_list, out_file, config):
"""Use GATK4 GatherVcfs for concatenation of scattered VCFs.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
params = ["-T", "GatherVcfs", "-I", input_file_list, "-O", tx_out_file]
# Use GATK4 for merging, tools_off: [gatk4] applies to variant calling
config = utils.deepish_copy(config)
if "gatk4" in dd.get_tools_off({"config": config}):
config["algorithm"]["tools_off"].remove("gatk4")
# Allow specification of verbosity in the unique style this tool uses
resources = config_utils.get_resources("gatk", config)
opts = [str(x) for x in resources.get("options", [])]
if "--verbosity" in opts:
params += ["--VERBOSITY:%s" % opts[opts.index("--verbosity") + 1]]
broad_runner = broad.runner_from_config(config)
broad_runner.run_gatk(params)
return out_file | python | def _run_concat_variant_files_gatk4(input_file_list, out_file, config):
"""Use GATK4 GatherVcfs for concatenation of scattered VCFs.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
params = ["-T", "GatherVcfs", "-I", input_file_list, "-O", tx_out_file]
# Use GATK4 for merging, tools_off: [gatk4] applies to variant calling
config = utils.deepish_copy(config)
if "gatk4" in dd.get_tools_off({"config": config}):
config["algorithm"]["tools_off"].remove("gatk4")
# Allow specification of verbosity in the unique style this tool uses
resources = config_utils.get_resources("gatk", config)
opts = [str(x) for x in resources.get("options", [])]
if "--verbosity" in opts:
params += ["--VERBOSITY:%s" % opts[opts.index("--verbosity") + 1]]
broad_runner = broad.runner_from_config(config)
broad_runner.run_gatk(params)
return out_file | [
"def",
"_run_concat_variant_files_gatk4",
"(",
"input_file_list",
",",
"out_file",
",",
"config",
")",
":",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"file_transaction",
"(",
"config",
",",
"out_file",
")",
"as",
"tx_out_file",... | Use GATK4 GatherVcfs for concatenation of scattered VCFs. | [
"Use",
"GATK4",
"GatherVcfs",
"for",
"concatenation",
"of",
"scattered",
"VCFs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L383-L400 | train | 219,065 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | _get_file_list | def _get_file_list(orig_files, out_file, regions, ref_file, config):
"""Create file with region sorted list of non-empty VCFs for concatenating.
"""
sorted_files = _sort_by_region(orig_files, regions, ref_file, config)
exist_files = [(c, x) for c, x in sorted_files if os.path.exists(x) and vcf_has_variants(x)]
if len(exist_files) == 0: # no non-empty inputs, merge the empty ones
exist_files = [x for c, x in sorted_files if os.path.exists(x)]
elif len(exist_files) > 1:
exist_files = _fix_gatk_header(exist_files, out_file, config)
else:
exist_files = [x for c, x in exist_files]
ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config)
input_file_list = "%s-files.list" % utils.splitext_plus(out_file)[0]
with open(input_file_list, "w") as out_handle:
for fname in ready_files:
out_handle.write(fname + "\n")
return input_file_list | python | def _get_file_list(orig_files, out_file, regions, ref_file, config):
"""Create file with region sorted list of non-empty VCFs for concatenating.
"""
sorted_files = _sort_by_region(orig_files, regions, ref_file, config)
exist_files = [(c, x) for c, x in sorted_files if os.path.exists(x) and vcf_has_variants(x)]
if len(exist_files) == 0: # no non-empty inputs, merge the empty ones
exist_files = [x for c, x in sorted_files if os.path.exists(x)]
elif len(exist_files) > 1:
exist_files = _fix_gatk_header(exist_files, out_file, config)
else:
exist_files = [x for c, x in exist_files]
ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config)
input_file_list = "%s-files.list" % utils.splitext_plus(out_file)[0]
with open(input_file_list, "w") as out_handle:
for fname in ready_files:
out_handle.write(fname + "\n")
return input_file_list | [
"def",
"_get_file_list",
"(",
"orig_files",
",",
"out_file",
",",
"regions",
",",
"ref_file",
",",
"config",
")",
":",
"sorted_files",
"=",
"_sort_by_region",
"(",
"orig_files",
",",
"regions",
",",
"ref_file",
",",
"config",
")",
"exist_files",
"=",
"[",
"(... | Create file with region sorted list of non-empty VCFs for concatenating. | [
"Create",
"file",
"with",
"region",
"sorted",
"list",
"of",
"non",
"-",
"empty",
"VCFs",
"for",
"concatenating",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L402-L418 | train | 219,066 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | _fix_gatk_header | def _fix_gatk_header(exist_files, out_file, config):
"""Ensure consistent headers for VCF concatenation.
Fixes problems for genomes that start with chrM by reheadering the first file.
These files do haploid variant calling which lack the PID phasing key/value
pair in FORMAT, so initial chrM samples cause errors during concatenation
due to the lack of header merging. This fixes this by updating the first header.
"""
from bcbio.variation import ploidy
c, base_file = exist_files[0]
replace_file = base_file
items = [{"config": config}]
if ploidy.get_ploidy(items, region=(c, 1, 2)) == 1:
for c, x in exist_files[1:]:
if ploidy.get_ploidy(items, (c, 1, 2)) > 1:
replace_file = x
break
base_fix_file = os.path.join(os.path.dirname(out_file),
"%s-fixheader%s" % utils.splitext_plus(os.path.basename(base_file)))
with file_transaction(config, base_fix_file) as tx_out_file:
header_file = "%s-header.vcf" % utils.splitext_plus(tx_out_file)[0]
do.run("zgrep ^# %s > %s"
% (replace_file, header_file), "Prepare header file for merging")
resources = config_utils.get_resources("picard", config)
ropts = []
if "options" in resources:
ropts += [str(x) for x in resources.get("options", [])]
do.run("%s && picard FixVcfHeader HEADER=%s INPUT=%s OUTPUT=%s %s" %
(utils.get_java_clprep(), header_file, base_file, base_fix_file, " ".join(ropts)),
"Reheader initial VCF file in merge")
bgzip_and_index(base_fix_file, config)
return [base_fix_file] + [x for (c, x) in exist_files[1:]] | python | def _fix_gatk_header(exist_files, out_file, config):
"""Ensure consistent headers for VCF concatenation.
Fixes problems for genomes that start with chrM by reheadering the first file.
These files do haploid variant calling which lack the PID phasing key/value
pair in FORMAT, so initial chrM samples cause errors during concatenation
due to the lack of header merging. This fixes this by updating the first header.
"""
from bcbio.variation import ploidy
c, base_file = exist_files[0]
replace_file = base_file
items = [{"config": config}]
if ploidy.get_ploidy(items, region=(c, 1, 2)) == 1:
for c, x in exist_files[1:]:
if ploidy.get_ploidy(items, (c, 1, 2)) > 1:
replace_file = x
break
base_fix_file = os.path.join(os.path.dirname(out_file),
"%s-fixheader%s" % utils.splitext_plus(os.path.basename(base_file)))
with file_transaction(config, base_fix_file) as tx_out_file:
header_file = "%s-header.vcf" % utils.splitext_plus(tx_out_file)[0]
do.run("zgrep ^# %s > %s"
% (replace_file, header_file), "Prepare header file for merging")
resources = config_utils.get_resources("picard", config)
ropts = []
if "options" in resources:
ropts += [str(x) for x in resources.get("options", [])]
do.run("%s && picard FixVcfHeader HEADER=%s INPUT=%s OUTPUT=%s %s" %
(utils.get_java_clprep(), header_file, base_file, base_fix_file, " ".join(ropts)),
"Reheader initial VCF file in merge")
bgzip_and_index(base_fix_file, config)
return [base_fix_file] + [x for (c, x) in exist_files[1:]] | [
"def",
"_fix_gatk_header",
"(",
"exist_files",
",",
"out_file",
",",
"config",
")",
":",
"from",
"bcbio",
".",
"variation",
"import",
"ploidy",
"c",
",",
"base_file",
"=",
"exist_files",
"[",
"0",
"]",
"replace_file",
"=",
"base_file",
"items",
"=",
"[",
"... | Ensure consistent headers for VCF concatenation.
Fixes problems for genomes that start with chrM by reheadering the first file.
These files do haploid variant calling which lack the PID phasing key/value
pair in FORMAT, so initial chrM samples cause errors during concatenation
due to the lack of header merging. This fixes this by updating the first header. | [
"Ensure",
"consistent",
"headers",
"for",
"VCF",
"concatenation",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L420-L451 | train | 219,067 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | _run_concat_variant_files_bcftools | def _run_concat_variant_files_bcftools(in_list, out_file, config, naive=False):
"""Concatenate variant files using bcftools concat, potentially using the fast naive option.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
if naive:
args = "--naive"
else:
args = "--allow-overlaps"
cmd = "{bcftools} concat {args} -O {output_type} --file-list {in_list} -o {tx_out_file}"
do.run(cmd.format(**locals()), "bcftools concat variants")
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file | python | def _run_concat_variant_files_bcftools(in_list, out_file, config, naive=False):
"""Concatenate variant files using bcftools concat, potentially using the fast naive option.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
if naive:
args = "--naive"
else:
args = "--allow-overlaps"
cmd = "{bcftools} concat {args} -O {output_type} --file-list {in_list} -o {tx_out_file}"
do.run(cmd.format(**locals()), "bcftools concat variants")
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file | [
"def",
"_run_concat_variant_files_bcftools",
"(",
"in_list",
",",
"out_file",
",",
"config",
",",
"naive",
"=",
"False",
")",
":",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"file_transaction",
"(",
"config",
",",
"out_file",
... | Concatenate variant files using bcftools concat, potentially using the fast naive option. | [
"Concatenate",
"variant",
"files",
"using",
"bcftools",
"concat",
"potentially",
"using",
"the",
"fast",
"naive",
"option",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L465-L480 | train | 219,068 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | combine_variant_files | def combine_variant_files(orig_files, out_file, ref_file, config,
quiet_out=True, region=None):
"""Combine VCF files from the same sample into a single output file.
Handles cases where we split files into SNPs/Indels for processing then
need to merge back into a final file.
"""
in_pipeline = False
if isinstance(orig_files, dict):
file_key = config["file_key"]
in_pipeline = True
orig_files = orig_files[file_key]
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
exist_files = [x for x in orig_files if os.path.exists(x)]
ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config)
dict_file = "%s.dict" % utils.splitext_plus(ref_file)[0]
cores = dd.get_num_cores({"config": config})
memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None
cmd = ["picard"] + broad.get_picard_opts(config, memscale) + \
["MergeVcfs", "D=%s" % dict_file, "O=%s" % tx_out_file] + \
["I=%s" % f for f in ready_files]
cmd = "%s && %s" % (utils.get_java_clprep(), " ".join(cmd))
do.run(cmd, "Combine variant files")
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
if in_pipeline:
return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}]
else:
return out_file | python | def combine_variant_files(orig_files, out_file, ref_file, config,
quiet_out=True, region=None):
"""Combine VCF files from the same sample into a single output file.
Handles cases where we split files into SNPs/Indels for processing then
need to merge back into a final file.
"""
in_pipeline = False
if isinstance(orig_files, dict):
file_key = config["file_key"]
in_pipeline = True
orig_files = orig_files[file_key]
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
exist_files = [x for x in orig_files if os.path.exists(x)]
ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config)
dict_file = "%s.dict" % utils.splitext_plus(ref_file)[0]
cores = dd.get_num_cores({"config": config})
memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None
cmd = ["picard"] + broad.get_picard_opts(config, memscale) + \
["MergeVcfs", "D=%s" % dict_file, "O=%s" % tx_out_file] + \
["I=%s" % f for f in ready_files]
cmd = "%s && %s" % (utils.get_java_clprep(), " ".join(cmd))
do.run(cmd, "Combine variant files")
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
if in_pipeline:
return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}]
else:
return out_file | [
"def",
"combine_variant_files",
"(",
"orig_files",
",",
"out_file",
",",
"ref_file",
",",
"config",
",",
"quiet_out",
"=",
"True",
",",
"region",
"=",
"None",
")",
":",
"in_pipeline",
"=",
"False",
"if",
"isinstance",
"(",
"orig_files",
",",
"dict",
")",
"... | Combine VCF files from the same sample into a single output file.
Handles cases where we split files into SNPs/Indels for processing then
need to merge back into a final file. | [
"Combine",
"VCF",
"files",
"from",
"the",
"same",
"sample",
"into",
"a",
"single",
"output",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L482-L511 | train | 219,069 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | sort_by_ref | def sort_by_ref(vcf_file, data):
"""Sort a VCF file by genome reference and position, adding contig information.
"""
out_file = "%s-prep.vcf.gz" % utils.splitext_plus(vcf_file)[0]
if not utils.file_uptodate(out_file, vcf_file):
with file_transaction(data, out_file) as tx_out_file:
header_file = "%s-header.txt" % utils.splitext_plus(tx_out_file)[0]
with open(header_file, "w") as out_handle:
for region in ref.file_contigs(dd.get_ref_file(data), data["config"]):
out_handle.write("##contig=<ID=%s,length=%s>\n" % (region.name, region.size))
cat_cmd = "zcat" if vcf_file.endswith("vcf.gz") else "cat"
cmd = ("{cat_cmd} {vcf_file} | grep -v ^##contig | bcftools annotate -h {header_file} | "
"vt sort -m full -o {tx_out_file} -")
with utils.chdir(os.path.dirname(tx_out_file)):
do.run(cmd.format(**locals()), "Sort VCF by reference")
return bgzip_and_index(out_file, data["config"]) | python | def sort_by_ref(vcf_file, data):
"""Sort a VCF file by genome reference and position, adding contig information.
"""
out_file = "%s-prep.vcf.gz" % utils.splitext_plus(vcf_file)[0]
if not utils.file_uptodate(out_file, vcf_file):
with file_transaction(data, out_file) as tx_out_file:
header_file = "%s-header.txt" % utils.splitext_plus(tx_out_file)[0]
with open(header_file, "w") as out_handle:
for region in ref.file_contigs(dd.get_ref_file(data), data["config"]):
out_handle.write("##contig=<ID=%s,length=%s>\n" % (region.name, region.size))
cat_cmd = "zcat" if vcf_file.endswith("vcf.gz") else "cat"
cmd = ("{cat_cmd} {vcf_file} | grep -v ^##contig | bcftools annotate -h {header_file} | "
"vt sort -m full -o {tx_out_file} -")
with utils.chdir(os.path.dirname(tx_out_file)):
do.run(cmd.format(**locals()), "Sort VCF by reference")
return bgzip_and_index(out_file, data["config"]) | [
"def",
"sort_by_ref",
"(",
"vcf_file",
",",
"data",
")",
":",
"out_file",
"=",
"\"%s-prep.vcf.gz\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"vcf_file",
")",
"[",
"0",
"]",
"if",
"not",
"utils",
".",
"file_uptodate",
"(",
"out_file",
",",
"vcf_file",
")",... | Sort a VCF file by genome reference and position, adding contig information. | [
"Sort",
"a",
"VCF",
"file",
"by",
"genome",
"reference",
"and",
"position",
"adding",
"contig",
"information",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L513-L528 | train | 219,070 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | add_contig_to_header | def add_contig_to_header(line, ref_file):
"""Streaming target to add contigs to a VCF file header.
"""
if line.startswith("##fileformat=VCF"):
out = [line]
for region in ref.file_contigs(ref_file):
out.append("##contig=<ID=%s,length=%s>" % (region.name, region.size))
return "\n".join(out)
else:
return line | python | def add_contig_to_header(line, ref_file):
"""Streaming target to add contigs to a VCF file header.
"""
if line.startswith("##fileformat=VCF"):
out = [line]
for region in ref.file_contigs(ref_file):
out.append("##contig=<ID=%s,length=%s>" % (region.name, region.size))
return "\n".join(out)
else:
return line | [
"def",
"add_contig_to_header",
"(",
"line",
",",
"ref_file",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"\"##fileformat=VCF\"",
")",
":",
"out",
"=",
"[",
"line",
"]",
"for",
"region",
"in",
"ref",
".",
"file_contigs",
"(",
"ref_file",
")",
":",
"ou... | Streaming target to add contigs to a VCF file header. | [
"Streaming",
"target",
"to",
"add",
"contigs",
"to",
"a",
"VCF",
"file",
"header",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L539-L548 | train | 219,071 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | parallel_combine_variants | def parallel_combine_variants(orig_files, out_file, ref_file, config, run_parallel):
"""Combine variants in parallel by chromosome, concatenating final outputs.
"""
file_key = "vcf_files"
def split_by_region(data):
base, ext = utils.splitext_plus(os.path.basename(out_file))
args = []
for region in [x.name for x in ref.file_contigs(ref_file, config)]:
region_out = os.path.join(os.path.dirname(out_file), "%s-regions" % base,
"%s-%s%s" % (base, region, ext))
utils.safe_makedir(os.path.dirname(region_out))
args.append((region_out, ref_file, config, region))
return out_file, args
config = copy.deepcopy(config)
config["file_key"] = file_key
prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config)
items = [[{file_key: prep_files}]]
parallel_split_combine(items, split_by_region, run_parallel,
"merge_variant_files", "concat_variant_files",
file_key, ["region", "sam_ref", "config"], split_outfile_i=0)
return out_file | python | def parallel_combine_variants(orig_files, out_file, ref_file, config, run_parallel):
"""Combine variants in parallel by chromosome, concatenating final outputs.
"""
file_key = "vcf_files"
def split_by_region(data):
base, ext = utils.splitext_plus(os.path.basename(out_file))
args = []
for region in [x.name for x in ref.file_contigs(ref_file, config)]:
region_out = os.path.join(os.path.dirname(out_file), "%s-regions" % base,
"%s-%s%s" % (base, region, ext))
utils.safe_makedir(os.path.dirname(region_out))
args.append((region_out, ref_file, config, region))
return out_file, args
config = copy.deepcopy(config)
config["file_key"] = file_key
prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config)
items = [[{file_key: prep_files}]]
parallel_split_combine(items, split_by_region, run_parallel,
"merge_variant_files", "concat_variant_files",
file_key, ["region", "sam_ref", "config"], split_outfile_i=0)
return out_file | [
"def",
"parallel_combine_variants",
"(",
"orig_files",
",",
"out_file",
",",
"ref_file",
",",
"config",
",",
"run_parallel",
")",
":",
"file_key",
"=",
"\"vcf_files\"",
"def",
"split_by_region",
"(",
"data",
")",
":",
"base",
",",
"ext",
"=",
"utils",
".",
"... | Combine variants in parallel by chromosome, concatenating final outputs. | [
"Combine",
"variants",
"in",
"parallel",
"by",
"chromosome",
"concatenating",
"final",
"outputs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L552-L572 | train | 219,072 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | move_vcf | def move_vcf(orig_file, new_file):
"""Move a VCF file with associated index.
"""
for ext in ["", ".idx", ".tbi"]:
to_move = orig_file + ext
if os.path.exists(to_move):
shutil.move(to_move, new_file + ext) | python | def move_vcf(orig_file, new_file):
"""Move a VCF file with associated index.
"""
for ext in ["", ".idx", ".tbi"]:
to_move = orig_file + ext
if os.path.exists(to_move):
shutil.move(to_move, new_file + ext) | [
"def",
"move_vcf",
"(",
"orig_file",
",",
"new_file",
")",
":",
"for",
"ext",
"in",
"[",
"\"\"",
",",
"\".idx\"",
",",
"\".tbi\"",
"]",
":",
"to_move",
"=",
"orig_file",
"+",
"ext",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"to_move",
")",
":",
... | Move a VCF file with associated index. | [
"Move",
"a",
"VCF",
"file",
"with",
"associated",
"index",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L576-L582 | train | 219,073 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | bgzip_and_index | def bgzip_and_index(in_file, config=None, remove_orig=True, prep_cmd="", tabix_args=None, out_dir=None):
"""bgzip and tabix index an input file, handling VCF and BED.
"""
if config is None:
config = {}
out_file = in_file if in_file.endswith(".gz") else in_file + ".gz"
if out_dir:
remove_orig = False
out_file = os.path.join(out_dir, os.path.basename(out_file))
if (not utils.file_exists(out_file) or not os.path.lexists(out_file)
or (utils.file_exists(in_file) and not utils.file_uptodate(out_file, in_file))):
assert not in_file == out_file, "Input file is bgzipped but not found: %s" % in_file
assert os.path.exists(in_file), "Input file %s not found" % in_file
if not utils.file_uptodate(out_file, in_file):
with file_transaction(config, out_file) as tx_out_file:
bgzip = tools.get_bgzip_cmd(config)
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
if prep_cmd:
prep_cmd = "| %s " % prep_cmd
cmd = "{cat_cmd} {in_file} {prep_cmd} | {bgzip} -c > {tx_out_file}"
try:
do.run(cmd.format(**locals()), "bgzip %s" % os.path.basename(in_file))
except subprocess.CalledProcessError:
# Race conditions: ignore errors where file has been deleted by another
if os.path.exists(in_file) and not os.path.exists(out_file):
raise
if remove_orig:
try:
os.remove(in_file)
except OSError: # Handle cases where run in parallel and file has been deleted
pass
tabix_index(out_file, config, tabix_args=tabix_args)
return out_file | python | def bgzip_and_index(in_file, config=None, remove_orig=True, prep_cmd="", tabix_args=None, out_dir=None):
"""bgzip and tabix index an input file, handling VCF and BED.
"""
if config is None:
config = {}
out_file = in_file if in_file.endswith(".gz") else in_file + ".gz"
if out_dir:
remove_orig = False
out_file = os.path.join(out_dir, os.path.basename(out_file))
if (not utils.file_exists(out_file) or not os.path.lexists(out_file)
or (utils.file_exists(in_file) and not utils.file_uptodate(out_file, in_file))):
assert not in_file == out_file, "Input file is bgzipped but not found: %s" % in_file
assert os.path.exists(in_file), "Input file %s not found" % in_file
if not utils.file_uptodate(out_file, in_file):
with file_transaction(config, out_file) as tx_out_file:
bgzip = tools.get_bgzip_cmd(config)
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
if prep_cmd:
prep_cmd = "| %s " % prep_cmd
cmd = "{cat_cmd} {in_file} {prep_cmd} | {bgzip} -c > {tx_out_file}"
try:
do.run(cmd.format(**locals()), "bgzip %s" % os.path.basename(in_file))
except subprocess.CalledProcessError:
# Race conditions: ignore errors where file has been deleted by another
if os.path.exists(in_file) and not os.path.exists(out_file):
raise
if remove_orig:
try:
os.remove(in_file)
except OSError: # Handle cases where run in parallel and file has been deleted
pass
tabix_index(out_file, config, tabix_args=tabix_args)
return out_file | [
"def",
"bgzip_and_index",
"(",
"in_file",
",",
"config",
"=",
"None",
",",
"remove_orig",
"=",
"True",
",",
"prep_cmd",
"=",
"\"\"",
",",
"tabix_args",
"=",
"None",
",",
"out_dir",
"=",
"None",
")",
":",
"if",
"config",
"is",
"None",
":",
"config",
"="... | bgzip and tabix index an input file, handling VCF and BED. | [
"bgzip",
"and",
"tabix",
"index",
"an",
"input",
"file",
"handling",
"VCF",
"and",
"BED",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L584-L616 | train | 219,074 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | tabix_index | def tabix_index(in_file, config, preset=None, tabix_args=None):
"""Index a file using tabix.
"""
in_file = os.path.abspath(in_file)
out_file = in_file + ".tbi"
if not utils.file_exists(out_file) or not utils.file_uptodate(out_file, in_file):
# Remove old index files to prevent linking into tx directory
utils.remove_safe(out_file)
with file_transaction(config, out_file) as tx_out_file:
tabix = tools.get_tabix_cmd(config)
tx_in_file = os.path.splitext(tx_out_file)[0]
utils.symlink_plus(in_file, tx_in_file)
if tabix_args:
cmd = "{tabix} -f {tabix_args} {tx_in_file}"
else:
preset = _guess_preset(in_file) if preset is None else preset
cmd = "{tabix} -f -p {preset} {tx_in_file}"
do.run(cmd.format(**locals()), "tabix index %s" % os.path.basename(in_file))
return out_file | python | def tabix_index(in_file, config, preset=None, tabix_args=None):
"""Index a file using tabix.
"""
in_file = os.path.abspath(in_file)
out_file = in_file + ".tbi"
if not utils.file_exists(out_file) or not utils.file_uptodate(out_file, in_file):
# Remove old index files to prevent linking into tx directory
utils.remove_safe(out_file)
with file_transaction(config, out_file) as tx_out_file:
tabix = tools.get_tabix_cmd(config)
tx_in_file = os.path.splitext(tx_out_file)[0]
utils.symlink_plus(in_file, tx_in_file)
if tabix_args:
cmd = "{tabix} -f {tabix_args} {tx_in_file}"
else:
preset = _guess_preset(in_file) if preset is None else preset
cmd = "{tabix} -f -p {preset} {tx_in_file}"
do.run(cmd.format(**locals()), "tabix index %s" % os.path.basename(in_file))
return out_file | [
"def",
"tabix_index",
"(",
"in_file",
",",
"config",
",",
"preset",
"=",
"None",
",",
"tabix_args",
"=",
"None",
")",
":",
"in_file",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"in_file",
")",
"out_file",
"=",
"in_file",
"+",
"\".tbi\"",
"if",
"not",... | Index a file using tabix. | [
"Index",
"a",
"file",
"using",
"tabix",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L635-L653 | train | 219,075 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | is_gvcf_file | def is_gvcf_file(in_file):
"""Check if an input file is raw gVCF
"""
to_check = 100
n = 0
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith("##"):
if n > to_check:
break
n += 1
parts = line.split("\t")
# GATK
if parts[4] == "<NON_REF>":
return True
# strelka2
if parts[4] == "." and parts[7].startswith("BLOCKAVG"):
return True
# freebayes
if parts[4] == "<*>":
return True
# platypue
if parts[4] == "N" and parts[6] == "REFCALL":
return True | python | def is_gvcf_file(in_file):
"""Check if an input file is raw gVCF
"""
to_check = 100
n = 0
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith("##"):
if n > to_check:
break
n += 1
parts = line.split("\t")
# GATK
if parts[4] == "<NON_REF>":
return True
# strelka2
if parts[4] == "." and parts[7].startswith("BLOCKAVG"):
return True
# freebayes
if parts[4] == "<*>":
return True
# platypue
if parts[4] == "N" and parts[6] == "REFCALL":
return True | [
"def",
"is_gvcf_file",
"(",
"in_file",
")",
":",
"to_check",
"=",
"100",
"n",
"=",
"0",
"with",
"utils",
".",
"open_gzipsafe",
"(",
"in_file",
")",
"as",
"in_handle",
":",
"for",
"line",
"in",
"in_handle",
":",
"if",
"not",
"line",
".",
"startswith",
"... | Check if an input file is raw gVCF | [
"Check",
"if",
"an",
"input",
"file",
"is",
"raw",
"gVCF"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L655-L678 | train | 219,076 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | cyvcf_add_filter | def cyvcf_add_filter(rec, name):
"""Add a FILTER value to a cyvcf2 record
"""
if rec.FILTER:
filters = rec.FILTER.split(";")
else:
filters = []
if name not in filters:
filters.append(name)
rec.FILTER = filters
return rec | python | def cyvcf_add_filter(rec, name):
"""Add a FILTER value to a cyvcf2 record
"""
if rec.FILTER:
filters = rec.FILTER.split(";")
else:
filters = []
if name not in filters:
filters.append(name)
rec.FILTER = filters
return rec | [
"def",
"cyvcf_add_filter",
"(",
"rec",
",",
"name",
")",
":",
"if",
"rec",
".",
"FILTER",
":",
"filters",
"=",
"rec",
".",
"FILTER",
".",
"split",
"(",
"\";\"",
")",
"else",
":",
"filters",
"=",
"[",
"]",
"if",
"name",
"not",
"in",
"filters",
":",
... | Add a FILTER value to a cyvcf2 record | [
"Add",
"a",
"FILTER",
"value",
"to",
"a",
"cyvcf2",
"record"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L680-L690 | train | 219,077 |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | cyvcf_remove_filter | def cyvcf_remove_filter(rec, name):
"""Remove filter with the given name from a cyvcf2 record
"""
if rec.FILTER:
filters = rec.FILTER.split(";")
else:
filters = []
new_filters = [x for x in filters if not str(x) == name]
if len(new_filters) == 0:
new_filters = ["PASS"]
rec.FILTER = new_filters
return rec | python | def cyvcf_remove_filter(rec, name):
"""Remove filter with the given name from a cyvcf2 record
"""
if rec.FILTER:
filters = rec.FILTER.split(";")
else:
filters = []
new_filters = [x for x in filters if not str(x) == name]
if len(new_filters) == 0:
new_filters = ["PASS"]
rec.FILTER = new_filters
return rec | [
"def",
"cyvcf_remove_filter",
"(",
"rec",
",",
"name",
")",
":",
"if",
"rec",
".",
"FILTER",
":",
"filters",
"=",
"rec",
".",
"FILTER",
".",
"split",
"(",
"\";\"",
")",
"else",
":",
"filters",
"=",
"[",
"]",
"new_filters",
"=",
"[",
"x",
"for",
"x"... | Remove filter with the given name from a cyvcf2 record | [
"Remove",
"filter",
"with",
"the",
"given",
"name",
"from",
"a",
"cyvcf2",
"record"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L692-L703 | train | 219,078 |
bcbio/bcbio-nextgen | bcbio/pipeline/alignment.py | organize_noalign | def organize_noalign(data):
"""CWL target to skip alignment and organize input data.
"""
data = utils.to_single_data(data[0])
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data)))
work_bam = os.path.join(work_dir, "%s-input.bam" % dd.get_sample_name(data))
if data.get("files"):
if data["files"][0].endswith(".cram"):
work_bam = cram.to_bam(data["files"][0], work_bam, data)
else:
assert data["files"][0].endswith(".bam"), data["files"][0]
utils.copy_plus(data["files"][0], work_bam)
bam.index(work_bam, data["config"])
else:
work_bam = None
data["align_bam"] = work_bam
return data | python | def organize_noalign(data):
"""CWL target to skip alignment and organize input data.
"""
data = utils.to_single_data(data[0])
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data)))
work_bam = os.path.join(work_dir, "%s-input.bam" % dd.get_sample_name(data))
if data.get("files"):
if data["files"][0].endswith(".cram"):
work_bam = cram.to_bam(data["files"][0], work_bam, data)
else:
assert data["files"][0].endswith(".bam"), data["files"][0]
utils.copy_plus(data["files"][0], work_bam)
bam.index(work_bam, data["config"])
else:
work_bam = None
data["align_bam"] = work_bam
return data | [
"def",
"organize_noalign",
"(",
"data",
")",
":",
"data",
"=",
"utils",
".",
"to_single_data",
"(",
"data",
"[",
"0",
"]",
")",
"work_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dd",
".",
"get_work_dir",
"(",
"... | CWL target to skip alignment and organize input data. | [
"CWL",
"target",
"to",
"skip",
"alignment",
"and",
"organize",
"input",
"data",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L53-L69 | train | 219,079 |
bcbio/bcbio-nextgen | bcbio/pipeline/alignment.py | align_to_sort_bam | def align_to_sort_bam(fastq1, fastq2, aligner, data):
"""Align to the named genome build, returning a sorted BAM file.
"""
names = data["rgnames"]
align_dir_parts = [data["dirs"]["work"], "align", names["sample"]]
if data.get("disambiguate"):
align_dir_parts.append(data["disambiguate"]["genome_build"])
aligner_index = _get_aligner_index(aligner, data)
align_dir = utils.safe_makedir(os.path.join(*align_dir_parts))
ref_file = tz.get_in(("reference", "fasta", "base"), data)
if fastq1.endswith(".bam"):
data = _align_from_bam(fastq1, aligner, aligner_index, ref_file,
names, align_dir, data)
else:
data = _align_from_fastq(fastq1, fastq2, aligner, aligner_index, ref_file,
names, align_dir, data)
if data["work_bam"] and utils.file_exists(data["work_bam"]):
if data.get("align_split") and dd.get_mark_duplicates(data):
# If merging later with with bamsormadup need query sorted inputs
# but CWL requires a bai file. Create a fake one to make it happy.
bam.fake_index(data["work_bam"], data)
else:
bam.index(data["work_bam"], data["config"])
for extra in ["-sr", "-disc"]:
extra_bam = utils.append_stem(data['work_bam'], extra)
if utils.file_exists(extra_bam):
bam.index(extra_bam, data["config"])
return data | python | def align_to_sort_bam(fastq1, fastq2, aligner, data):
"""Align to the named genome build, returning a sorted BAM file.
"""
names = data["rgnames"]
align_dir_parts = [data["dirs"]["work"], "align", names["sample"]]
if data.get("disambiguate"):
align_dir_parts.append(data["disambiguate"]["genome_build"])
aligner_index = _get_aligner_index(aligner, data)
align_dir = utils.safe_makedir(os.path.join(*align_dir_parts))
ref_file = tz.get_in(("reference", "fasta", "base"), data)
if fastq1.endswith(".bam"):
data = _align_from_bam(fastq1, aligner, aligner_index, ref_file,
names, align_dir, data)
else:
data = _align_from_fastq(fastq1, fastq2, aligner, aligner_index, ref_file,
names, align_dir, data)
if data["work_bam"] and utils.file_exists(data["work_bam"]):
if data.get("align_split") and dd.get_mark_duplicates(data):
# If merging later with with bamsormadup need query sorted inputs
# but CWL requires a bai file. Create a fake one to make it happy.
bam.fake_index(data["work_bam"], data)
else:
bam.index(data["work_bam"], data["config"])
for extra in ["-sr", "-disc"]:
extra_bam = utils.append_stem(data['work_bam'], extra)
if utils.file_exists(extra_bam):
bam.index(extra_bam, data["config"])
return data | [
"def",
"align_to_sort_bam",
"(",
"fastq1",
",",
"fastq2",
",",
"aligner",
",",
"data",
")",
":",
"names",
"=",
"data",
"[",
"\"rgnames\"",
"]",
"align_dir_parts",
"=",
"[",
"data",
"[",
"\"dirs\"",
"]",
"[",
"\"work\"",
"]",
",",
"\"align\"",
",",
"names... | Align to the named genome build, returning a sorted BAM file. | [
"Align",
"to",
"the",
"named",
"genome",
"build",
"returning",
"a",
"sorted",
"BAM",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L71-L98 | train | 219,080 |
bcbio/bcbio-nextgen | bcbio/pipeline/alignment.py | get_aligner_with_aliases | def get_aligner_with_aliases(aligner, data):
"""Retrieve aligner index retriever, including aliases for shared.
Handles tricky cases like gridss where we need bwa indices even with
no aligner specified since they're used internally within GRIDSS.
"""
aligner_aliases = {"sentieon-bwa": "bwa"}
from bcbio import structural
if not aligner and "gridss" in structural.get_svcallers(data):
aligner = "bwa"
return aligner_aliases.get(aligner) or aligner | python | def get_aligner_with_aliases(aligner, data):
"""Retrieve aligner index retriever, including aliases for shared.
Handles tricky cases like gridss where we need bwa indices even with
no aligner specified since they're used internally within GRIDSS.
"""
aligner_aliases = {"sentieon-bwa": "bwa"}
from bcbio import structural
if not aligner and "gridss" in structural.get_svcallers(data):
aligner = "bwa"
return aligner_aliases.get(aligner) or aligner | [
"def",
"get_aligner_with_aliases",
"(",
"aligner",
",",
"data",
")",
":",
"aligner_aliases",
"=",
"{",
"\"sentieon-bwa\"",
":",
"\"bwa\"",
"}",
"from",
"bcbio",
"import",
"structural",
"if",
"not",
"aligner",
"and",
"\"gridss\"",
"in",
"structural",
".",
"get_sv... | Retrieve aligner index retriever, including aliases for shared.
Handles tricky cases like gridss where we need bwa indices even with
no aligner specified since they're used internally within GRIDSS. | [
"Retrieve",
"aligner",
"index",
"retriever",
"including",
"aliases",
"for",
"shared",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L100-L110 | train | 219,081 |
bcbio/bcbio-nextgen | bcbio/pipeline/alignment.py | _get_aligner_index | def _get_aligner_index(aligner, data):
"""Handle multiple specifications of aligner indexes, returning value to pass to aligner.
Original bcbio case -- a list of indices.
CWL case: a single file with secondaryFiles staged in the same directory.
"""
aligner_indexes = tz.get_in(("reference", get_aligner_with_aliases(aligner, data), "indexes"), data)
# standard bcbio case
if aligner_indexes and isinstance(aligner_indexes, (list, tuple)):
aligner_index = os.path.commonprefix(aligner_indexes)
if aligner_index.endswith("."):
aligner_index = aligner_index[:-1]
return aligner_index
# single file -- check for standard naming or directory
elif aligner_indexes and os.path.exists(aligner_indexes):
aligner_dir = os.path.dirname(aligner_indexes)
aligner_prefix = os.path.splitext(aligner_indexes)[0]
if len(glob.glob("%s.*" % aligner_prefix)) > 0:
return aligner_prefix
else:
return aligner_dir
if aligner not in allow_noindices():
raise ValueError("Did not find reference indices for aligner %s in genome: %s" %
(aligner, data["reference"])) | python | def _get_aligner_index(aligner, data):
"""Handle multiple specifications of aligner indexes, returning value to pass to aligner.
Original bcbio case -- a list of indices.
CWL case: a single file with secondaryFiles staged in the same directory.
"""
aligner_indexes = tz.get_in(("reference", get_aligner_with_aliases(aligner, data), "indexes"), data)
# standard bcbio case
if aligner_indexes and isinstance(aligner_indexes, (list, tuple)):
aligner_index = os.path.commonprefix(aligner_indexes)
if aligner_index.endswith("."):
aligner_index = aligner_index[:-1]
return aligner_index
# single file -- check for standard naming or directory
elif aligner_indexes and os.path.exists(aligner_indexes):
aligner_dir = os.path.dirname(aligner_indexes)
aligner_prefix = os.path.splitext(aligner_indexes)[0]
if len(glob.glob("%s.*" % aligner_prefix)) > 0:
return aligner_prefix
else:
return aligner_dir
if aligner not in allow_noindices():
raise ValueError("Did not find reference indices for aligner %s in genome: %s" %
(aligner, data["reference"])) | [
"def",
"_get_aligner_index",
"(",
"aligner",
",",
"data",
")",
":",
"aligner_indexes",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"reference\"",
",",
"get_aligner_with_aliases",
"(",
"aligner",
",",
"data",
")",
",",
"\"indexes\"",
")",
",",
"data",
")",
"# stand... | Handle multiple specifications of aligner indexes, returning value to pass to aligner.
Original bcbio case -- a list of indices.
CWL case: a single file with secondaryFiles staged in the same directory. | [
"Handle",
"multiple",
"specifications",
"of",
"aligner",
"indexes",
"returning",
"value",
"to",
"pass",
"to",
"aligner",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L115-L139 | train | 219,082 |
bcbio/bcbio-nextgen | bcbio/pipeline/alignment.py | _align_from_fastq | def _align_from_fastq(fastq1, fastq2, aligner, align_ref, sam_ref, names,
align_dir, data):
"""Align from fastq inputs, producing sorted BAM output.
"""
config = data["config"]
align_fn = TOOLS[aligner].align_fn
out = align_fn(fastq1, fastq2, align_ref, names, align_dir, data)
# handle align functions that update the main data dictionary in place
if isinstance(out, dict):
assert out.get("work_bam"), (dd.get_sample_name(data), out.get("work_bam"))
return out
# handle output of raw SAM files that need to be converted to BAM
else:
work_bam = bam.sam_to_bam(out, config)
data["work_bam"] = bam.sort(work_bam, config)
return data | python | def _align_from_fastq(fastq1, fastq2, aligner, align_ref, sam_ref, names,
align_dir, data):
"""Align from fastq inputs, producing sorted BAM output.
"""
config = data["config"]
align_fn = TOOLS[aligner].align_fn
out = align_fn(fastq1, fastq2, align_ref, names, align_dir, data)
# handle align functions that update the main data dictionary in place
if isinstance(out, dict):
assert out.get("work_bam"), (dd.get_sample_name(data), out.get("work_bam"))
return out
# handle output of raw SAM files that need to be converted to BAM
else:
work_bam = bam.sam_to_bam(out, config)
data["work_bam"] = bam.sort(work_bam, config)
return data | [
"def",
"_align_from_fastq",
"(",
"fastq1",
",",
"fastq2",
",",
"aligner",
",",
"align_ref",
",",
"sam_ref",
",",
"names",
",",
"align_dir",
",",
"data",
")",
":",
"config",
"=",
"data",
"[",
"\"config\"",
"]",
"align_fn",
"=",
"TOOLS",
"[",
"aligner",
"]... | Align from fastq inputs, producing sorted BAM output. | [
"Align",
"from",
"fastq",
"inputs",
"producing",
"sorted",
"BAM",
"output",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L155-L170 | train | 219,083 |
bcbio/bcbio-nextgen | bcbio/structural/gridss.py | _finalize_memory | def _finalize_memory(jvm_opts):
"""GRIDSS does not recommend setting memory between 32 and 48Gb.
https://github.com/PapenfussLab/gridss#memory-usage
"""
avoid_min = 32
avoid_max = 48
out_opts = []
for opt in jvm_opts:
if opt.startswith("-Xmx"):
spec = opt[4:]
val = int(spec[:-1])
mod = spec[-1]
if mod.upper() == "M":
adjust = 1024
min_val = avoid_min * 1024
max_val = avoid_max * 1024
else:
adjust = 1
min_val, max_val = avoid_min, avoid_max
if val >= min_val and val < max_val:
val = min_val - adjust
opt = "%s%s%s" % (opt[:4], val, mod)
out_opts.append(opt)
return out_opts | python | def _finalize_memory(jvm_opts):
"""GRIDSS does not recommend setting memory between 32 and 48Gb.
https://github.com/PapenfussLab/gridss#memory-usage
"""
avoid_min = 32
avoid_max = 48
out_opts = []
for opt in jvm_opts:
if opt.startswith("-Xmx"):
spec = opt[4:]
val = int(spec[:-1])
mod = spec[-1]
if mod.upper() == "M":
adjust = 1024
min_val = avoid_min * 1024
max_val = avoid_max * 1024
else:
adjust = 1
min_val, max_val = avoid_min, avoid_max
if val >= min_val and val < max_val:
val = min_val - adjust
opt = "%s%s%s" % (opt[:4], val, mod)
out_opts.append(opt)
return out_opts | [
"def",
"_finalize_memory",
"(",
"jvm_opts",
")",
":",
"avoid_min",
"=",
"32",
"avoid_max",
"=",
"48",
"out_opts",
"=",
"[",
"]",
"for",
"opt",
"in",
"jvm_opts",
":",
"if",
"opt",
".",
"startswith",
"(",
"\"-Xmx\"",
")",
":",
"spec",
"=",
"opt",
"[",
... | GRIDSS does not recommend setting memory between 32 and 48Gb.
https://github.com/PapenfussLab/gridss#memory-usage | [
"GRIDSS",
"does",
"not",
"recommend",
"setting",
"memory",
"between",
"32",
"and",
"48Gb",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gridss.py#L70-L94 | train | 219,084 |
bcbio/bcbio-nextgen | bcbio/structural/gridss.py | _setup_reference_files | def _setup_reference_files(data, tx_out_dir):
"""Create a reference directory with fasta and bwa indices.
GRIDSS requires all files in a single directory, so setup with symlinks.
This needs bwa aligner indices available, which we ensure with `get_aligner_with_aliases`
during YAML sample setup.
"""
aligner = dd.get_aligner(data) or "bwa"
out_dir = utils.safe_makedir(os.path.join(tx_out_dir, aligner))
ref_fasta = dd.get_ref_file(data)
ref_files = ["%s%s" % (utils.splitext_plus(ref_fasta)[0], ext) for ext in [".fa", ".fa.fai", ".dict"]]
for orig_file in ref_files + tz.get_in(("reference", aligner, "indexes"), data):
utils.symlink_plus(orig_file, os.path.join(out_dir, os.path.basename(orig_file)))
return os.path.join(out_dir, os.path.basename(ref_fasta)) | python | def _setup_reference_files(data, tx_out_dir):
"""Create a reference directory with fasta and bwa indices.
GRIDSS requires all files in a single directory, so setup with symlinks.
This needs bwa aligner indices available, which we ensure with `get_aligner_with_aliases`
during YAML sample setup.
"""
aligner = dd.get_aligner(data) or "bwa"
out_dir = utils.safe_makedir(os.path.join(tx_out_dir, aligner))
ref_fasta = dd.get_ref_file(data)
ref_files = ["%s%s" % (utils.splitext_plus(ref_fasta)[0], ext) for ext in [".fa", ".fa.fai", ".dict"]]
for orig_file in ref_files + tz.get_in(("reference", aligner, "indexes"), data):
utils.symlink_plus(orig_file, os.path.join(out_dir, os.path.basename(orig_file)))
return os.path.join(out_dir, os.path.basename(ref_fasta)) | [
"def",
"_setup_reference_files",
"(",
"data",
",",
"tx_out_dir",
")",
":",
"aligner",
"=",
"dd",
".",
"get_aligner",
"(",
"data",
")",
"or",
"\"bwa\"",
"out_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tx_out_dir",
... | Create a reference directory with fasta and bwa indices.
GRIDSS requires all files in a single directory, so setup with symlinks.
This needs bwa aligner indices available, which we ensure with `get_aligner_with_aliases`
during YAML sample setup. | [
"Create",
"a",
"reference",
"directory",
"with",
"fasta",
"and",
"bwa",
"indices",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gridss.py#L96-L109 | train | 219,085 |
bcbio/bcbio-nextgen | bcbio/qc/multiqc.py | _add_versions | def _add_versions(samples):
"""Add tool and data versions to the summary.
"""
samples[0]["versions"] = {"tools": programs.write_versions(samples[0]["dirs"], samples[0]["config"]),
"data": provenancedata.write_versions(samples[0]["dirs"], samples)}
return samples | python | def _add_versions(samples):
"""Add tool and data versions to the summary.
"""
samples[0]["versions"] = {"tools": programs.write_versions(samples[0]["dirs"], samples[0]["config"]),
"data": provenancedata.write_versions(samples[0]["dirs"], samples)}
return samples | [
"def",
"_add_versions",
"(",
"samples",
")",
":",
"samples",
"[",
"0",
"]",
"[",
"\"versions\"",
"]",
"=",
"{",
"\"tools\"",
":",
"programs",
".",
"write_versions",
"(",
"samples",
"[",
"0",
"]",
"[",
"\"dirs\"",
"]",
",",
"samples",
"[",
"0",
"]",
"... | Add tool and data versions to the summary. | [
"Add",
"tool",
"and",
"data",
"versions",
"to",
"the",
"summary",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L109-L114 | train | 219,086 |
bcbio/bcbio-nextgen | bcbio/qc/multiqc.py | _summarize_inputs | def _summarize_inputs(samples, out_dir):
"""Summarize inputs for MultiQC reporting in display.
"""
logger.info("summarize target information")
if samples[0].get("analysis", "").lower() in ["variant", "variant2"]:
metrics_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
samples = _merge_target_information(samples, metrics_dir)
logger.info("summarize fastqc")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "fastqc"))
with utils.chdir(out_dir):
_merge_fastqc(samples)
preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)]
if preseq_samples:
logger.info("summarize preseq")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "preseq"))
with utils.chdir(out_dir):
_merge_preseq(preseq_samples)
return samples | python | def _summarize_inputs(samples, out_dir):
"""Summarize inputs for MultiQC reporting in display.
"""
logger.info("summarize target information")
if samples[0].get("analysis", "").lower() in ["variant", "variant2"]:
metrics_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
samples = _merge_target_information(samples, metrics_dir)
logger.info("summarize fastqc")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "fastqc"))
with utils.chdir(out_dir):
_merge_fastqc(samples)
preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)]
if preseq_samples:
logger.info("summarize preseq")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "preseq"))
with utils.chdir(out_dir):
_merge_preseq(preseq_samples)
return samples | [
"def",
"_summarize_inputs",
"(",
"samples",
",",
"out_dir",
")",
":",
"logger",
".",
"info",
"(",
"\"summarize target information\"",
")",
"if",
"samples",
"[",
"0",
"]",
".",
"get",
"(",
"\"analysis\"",
",",
"\"\"",
")",
".",
"lower",
"(",
")",
"in",
"[... | Summarize inputs for MultiQC reporting in display. | [
"Summarize",
"inputs",
"for",
"MultiQC",
"reporting",
"in",
"display",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L116-L135 | train | 219,087 |
bcbio/bcbio-nextgen | bcbio/qc/multiqc.py | _work_path_to_rel_final_path | def _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir):
""" Check if `path` is a work-rooted path, and convert to a relative final-rooted path
"""
if not path or not isinstance(path, str):
return path
upload_path = None
# First, check in the mapping: if it's there is a direct reference and
# it's a file, we immediately return it (saves lots of iterations)
if upload_path_mapping.get(path) is not None and os.path.isfile(path):
upload_path = upload_path_mapping[path]
else:
# Not a file: check for elements in the mapping that contain
# it
paths_to_check = [key for key in upload_path_mapping
if path.startswith(key)]
if paths_to_check:
for work_path in paths_to_check:
if os.path.isdir(work_path):
final_path = upload_path_mapping[work_path]
upload_path = path.replace(work_path, final_path)
break
if upload_path is not None:
return os.path.relpath(upload_path, upload_base_dir)
else:
return None | python | def _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir):
""" Check if `path` is a work-rooted path, and convert to a relative final-rooted path
"""
if not path or not isinstance(path, str):
return path
upload_path = None
# First, check in the mapping: if it's there is a direct reference and
# it's a file, we immediately return it (saves lots of iterations)
if upload_path_mapping.get(path) is not None and os.path.isfile(path):
upload_path = upload_path_mapping[path]
else:
# Not a file: check for elements in the mapping that contain
# it
paths_to_check = [key for key in upload_path_mapping
if path.startswith(key)]
if paths_to_check:
for work_path in paths_to_check:
if os.path.isdir(work_path):
final_path = upload_path_mapping[work_path]
upload_path = path.replace(work_path, final_path)
break
if upload_path is not None:
return os.path.relpath(upload_path, upload_base_dir)
else:
return None | [
"def",
"_work_path_to_rel_final_path",
"(",
"path",
",",
"upload_path_mapping",
",",
"upload_base_dir",
")",
":",
"if",
"not",
"path",
"or",
"not",
"isinstance",
"(",
"path",
",",
"str",
")",
":",
"return",
"path",
"upload_path",
"=",
"None",
"# First, check in ... | Check if `path` is a work-rooted path, and convert to a relative final-rooted path | [
"Check",
"if",
"path",
"is",
"a",
"work",
"-",
"rooted",
"path",
"and",
"convert",
"to",
"a",
"relative",
"final",
"-",
"rooted",
"path"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L195-L222 | train | 219,088 |
bcbio/bcbio-nextgen | bcbio/qc/multiqc.py | _one_exists | def _one_exists(input_files):
"""
at least one file must exist for multiqc to run properly
"""
for f in input_files:
if os.path.exists(f):
return True
return False | python | def _one_exists(input_files):
"""
at least one file must exist for multiqc to run properly
"""
for f in input_files:
if os.path.exists(f):
return True
return False | [
"def",
"_one_exists",
"(",
"input_files",
")",
":",
"for",
"f",
"in",
"input_files",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"f",
")",
":",
"return",
"True",
"return",
"False"
] | at least one file must exist for multiqc to run properly | [
"at",
"least",
"one",
"file",
"must",
"exist",
"for",
"multiqc",
"to",
"run",
"properly"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L224-L231 | train | 219,089 |
bcbio/bcbio-nextgen | bcbio/qc/multiqc.py | _get_input_files | def _get_input_files(samples, base_dir, tx_out_dir):
"""Retrieve input files, keyed by sample and QC method name.
Stages files into the work directory to ensure correct names for
MultiQC sample assessment when running with CWL.
"""
in_files = collections.defaultdict(list)
for data in samples:
sum_qc = tz.get_in(["summary", "qc"], data, {})
if sum_qc in [None, "None"]:
sum_qc = {}
elif isinstance(sum_qc, six.string_types):
sum_qc = {dd.get_algorithm_qc(data)[0]: sum_qc}
elif not isinstance(sum_qc, dict):
raise ValueError("Unexpected summary qc: %s" % sum_qc)
for program, pfiles in sum_qc.items():
if isinstance(pfiles, dict):
pfiles = [pfiles["base"]] + pfiles.get("secondary", [])
# CWL: presents output files as single file plus associated secondary files
elif isinstance(pfiles, six.string_types):
if os.path.exists(pfiles):
pfiles = [os.path.join(basedir, f) for basedir, subdir, filenames in os.walk(os.path.dirname(pfiles)) for f in filenames]
else:
pfiles = []
in_files[(dd.get_sample_name(data), program)].extend(pfiles)
staged_files = []
for (sample, program), files in in_files.items():
cur_dir = utils.safe_makedir(os.path.join(base_dir, "inputs", sample, program))
for f in files:
if _check_multiqc_input(f) and _is_good_file_for_multiqc(f):
if _in_temp_directory(f) or any([cwlutils.is_cwl_run(d) for d in samples]):
staged_f = os.path.join(cur_dir, os.path.basename(f))
shutil.copy(f, staged_f)
staged_files.append(staged_f)
else:
staged_files.append(f)
staged_files.extend(get_qsig_multiqc_files(samples))
# Back compatible -- to migrate to explicit specifications in input YAML
if not any([cwlutils.is_cwl_run(d) for d in samples]):
staged_files += ["trimmed", "htseq-count/*summary"]
# Add in created target_info file
if os.path.isfile(os.path.join(base_dir, "report", "metrics", "target_info.yaml")):
staged_files += [os.path.join(base_dir, "report", "metrics", "target_info.yaml")]
return sorted(list(set(staged_files))) | python | def _get_input_files(samples, base_dir, tx_out_dir):
"""Retrieve input files, keyed by sample and QC method name.
Stages files into the work directory to ensure correct names for
MultiQC sample assessment when running with CWL.
"""
in_files = collections.defaultdict(list)
for data in samples:
sum_qc = tz.get_in(["summary", "qc"], data, {})
if sum_qc in [None, "None"]:
sum_qc = {}
elif isinstance(sum_qc, six.string_types):
sum_qc = {dd.get_algorithm_qc(data)[0]: sum_qc}
elif not isinstance(sum_qc, dict):
raise ValueError("Unexpected summary qc: %s" % sum_qc)
for program, pfiles in sum_qc.items():
if isinstance(pfiles, dict):
pfiles = [pfiles["base"]] + pfiles.get("secondary", [])
# CWL: presents output files as single file plus associated secondary files
elif isinstance(pfiles, six.string_types):
if os.path.exists(pfiles):
pfiles = [os.path.join(basedir, f) for basedir, subdir, filenames in os.walk(os.path.dirname(pfiles)) for f in filenames]
else:
pfiles = []
in_files[(dd.get_sample_name(data), program)].extend(pfiles)
staged_files = []
for (sample, program), files in in_files.items():
cur_dir = utils.safe_makedir(os.path.join(base_dir, "inputs", sample, program))
for f in files:
if _check_multiqc_input(f) and _is_good_file_for_multiqc(f):
if _in_temp_directory(f) or any([cwlutils.is_cwl_run(d) for d in samples]):
staged_f = os.path.join(cur_dir, os.path.basename(f))
shutil.copy(f, staged_f)
staged_files.append(staged_f)
else:
staged_files.append(f)
staged_files.extend(get_qsig_multiqc_files(samples))
# Back compatible -- to migrate to explicit specifications in input YAML
if not any([cwlutils.is_cwl_run(d) for d in samples]):
staged_files += ["trimmed", "htseq-count/*summary"]
# Add in created target_info file
if os.path.isfile(os.path.join(base_dir, "report", "metrics", "target_info.yaml")):
staged_files += [os.path.join(base_dir, "report", "metrics", "target_info.yaml")]
return sorted(list(set(staged_files))) | [
"def",
"_get_input_files",
"(",
"samples",
",",
"base_dir",
",",
"tx_out_dir",
")",
":",
"in_files",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"data",
"in",
"samples",
":",
"sum_qc",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"summary\"",... | Retrieve input files, keyed by sample and QC method name.
Stages files into the work directory to ensure correct names for
MultiQC sample assessment when running with CWL. | [
"Retrieve",
"input",
"files",
"keyed",
"by",
"sample",
"and",
"QC",
"method",
"name",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L233-L276 | train | 219,090 |
bcbio/bcbio-nextgen | bcbio/qc/multiqc.py | _group_by_sample_and_batch | def _group_by_sample_and_batch(samples):
"""Group samples split by QC method back one per sample-batch.
"""
out = collections.defaultdict(list)
for data in samples:
out[(dd.get_sample_name(data), dd.get_align_bam(data), tuple(_get_batches(data)))].append(data)
return [xs[0] for xs in out.values()] | python | def _group_by_sample_and_batch(samples):
"""Group samples split by QC method back one per sample-batch.
"""
out = collections.defaultdict(list)
for data in samples:
out[(dd.get_sample_name(data), dd.get_align_bam(data), tuple(_get_batches(data)))].append(data)
return [xs[0] for xs in out.values()] | [
"def",
"_group_by_sample_and_batch",
"(",
"samples",
")",
":",
"out",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"data",
"in",
"samples",
":",
"out",
"[",
"(",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
",",
"dd",
".",
"get_ali... | Group samples split by QC method back one per sample-batch. | [
"Group",
"samples",
"split",
"by",
"QC",
"method",
"back",
"one",
"per",
"sample",
"-",
"batch",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L287-L293 | train | 219,091 |
bcbio/bcbio-nextgen | bcbio/qc/multiqc.py | _has_bcftools_germline_stats | def _has_bcftools_germline_stats(data):
"""Check for the presence of a germline stats file, CWL compatible.
"""
stats_file = tz.get_in(["summary", "qc"], data)
if isinstance(stats_file, dict):
stats_file = tz.get_in(["variants", "base"], stats_file)
if not stats_file:
stats_file = ""
return stats_file.find("bcftools_stats_germline") > 0 | python | def _has_bcftools_germline_stats(data):
"""Check for the presence of a germline stats file, CWL compatible.
"""
stats_file = tz.get_in(["summary", "qc"], data)
if isinstance(stats_file, dict):
stats_file = tz.get_in(["variants", "base"], stats_file)
if not stats_file:
stats_file = ""
return stats_file.find("bcftools_stats_germline") > 0 | [
"def",
"_has_bcftools_germline_stats",
"(",
"data",
")",
":",
"stats_file",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"summary\"",
",",
"\"qc\"",
"]",
",",
"data",
")",
"if",
"isinstance",
"(",
"stats_file",
",",
"dict",
")",
":",
"stats_file",
"=",
"tz",
".... | Check for the presence of a germline stats file, CWL compatible. | [
"Check",
"for",
"the",
"presence",
"of",
"a",
"germline",
"stats",
"file",
"CWL",
"compatible",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L397-L405 | train | 219,092 |
bcbio/bcbio-nextgen | bcbio/qc/multiqc.py | _is_good_file_for_multiqc | def _is_good_file_for_multiqc(fpath):
"""Returns False if the file is binary or image."""
# Use mimetypes to exclude binary files where possible
(ftype, encoding) = mimetypes.guess_type(fpath)
if encoding is not None:
return False
if ftype is not None and ftype.startswith('image'):
return False
return True | python | def _is_good_file_for_multiqc(fpath):
"""Returns False if the file is binary or image."""
# Use mimetypes to exclude binary files where possible
(ftype, encoding) = mimetypes.guess_type(fpath)
if encoding is not None:
return False
if ftype is not None and ftype.startswith('image'):
return False
return True | [
"def",
"_is_good_file_for_multiqc",
"(",
"fpath",
")",
":",
"# Use mimetypes to exclude binary files where possible",
"(",
"ftype",
",",
"encoding",
")",
"=",
"mimetypes",
".",
"guess_type",
"(",
"fpath",
")",
"if",
"encoding",
"is",
"not",
"None",
":",
"return",
... | Returns False if the file is binary or image. | [
"Returns",
"False",
"if",
"the",
"file",
"is",
"binary",
"or",
"image",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L414-L422 | train | 219,093 |
bcbio/bcbio-nextgen | bcbio/qc/multiqc.py | _parse_disambiguate | def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [0, 0, 0]
with open(disambiguatestatsfilename, "r") as in_handle:
for i, line in enumerate(in_handle):
fields = line.strip().split("\t")
if i == 0:
assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']
else:
disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]
return disambig_stats | python | def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [0, 0, 0]
with open(disambiguatestatsfilename, "r") as in_handle:
for i, line in enumerate(in_handle):
fields = line.strip().split("\t")
if i == 0:
assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']
else:
disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]
return disambig_stats | [
"def",
"_parse_disambiguate",
"(",
"disambiguatestatsfilename",
")",
":",
"disambig_stats",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"with",
"open",
"(",
"disambiguatestatsfilename",
",",
"\"r\"",
")",
"as",
"in_handle",
":",
"for",
"i",
",",
"line",
"in",
... | Parse disambiguation stats from given file. | [
"Parse",
"disambiguation",
"stats",
"from",
"given",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L424-L435 | train | 219,094 |
bcbio/bcbio-nextgen | bcbio/qc/multiqc.py | _merge_metrics | def _merge_metrics(samples, out_dir):
"""Merge metrics from multiple QC steps
"""
logger.info("summarize metrics")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
sample_metrics = collections.defaultdict(dict)
for s in samples:
s = _add_disambiguate(s)
m = tz.get_in(['summary', 'metrics'], s)
if isinstance(m, six.string_types):
m = json.loads(m)
if m:
for me in m.keys():
if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple):
m.pop(me, None)
sample_metrics[dd.get_sample_name(s)].update(m)
out = []
for sample_name, m in sample_metrics.items():
sample_file = os.path.join(out_dir, "%s_bcbio.txt" % sample_name)
with file_transaction(samples[0], sample_file) as tx_out_file:
dt = pd.DataFrame(m, index=['1'])
dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns]
dt['sample'] = sample_name
dt['rRNA_rate'] = m.get('rRNA_rate', "NA")
dt['RiP_pct'] = "%.3f" % (int(m.get("RiP", 0)) / float(m.get("Total_reads", 1)) * 100)
dt = _fix_duplicated_rate(dt)
dt.transpose().to_csv(tx_out_file, sep="\t", header=False)
out.append(sample_file)
return out | python | def _merge_metrics(samples, out_dir):
"""Merge metrics from multiple QC steps
"""
logger.info("summarize metrics")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
sample_metrics = collections.defaultdict(dict)
for s in samples:
s = _add_disambiguate(s)
m = tz.get_in(['summary', 'metrics'], s)
if isinstance(m, six.string_types):
m = json.loads(m)
if m:
for me in m.keys():
if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple):
m.pop(me, None)
sample_metrics[dd.get_sample_name(s)].update(m)
out = []
for sample_name, m in sample_metrics.items():
sample_file = os.path.join(out_dir, "%s_bcbio.txt" % sample_name)
with file_transaction(samples[0], sample_file) as tx_out_file:
dt = pd.DataFrame(m, index=['1'])
dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns]
dt['sample'] = sample_name
dt['rRNA_rate'] = m.get('rRNA_rate', "NA")
dt['RiP_pct'] = "%.3f" % (int(m.get("RiP", 0)) / float(m.get("Total_reads", 1)) * 100)
dt = _fix_duplicated_rate(dt)
dt.transpose().to_csv(tx_out_file, sep="\t", header=False)
out.append(sample_file)
return out | [
"def",
"_merge_metrics",
"(",
"samples",
",",
"out_dir",
")",
":",
"logger",
".",
"info",
"(",
"\"summarize metrics\"",
")",
"out_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"\"report\"",
",",
"\"metri... | Merge metrics from multiple QC steps | [
"Merge",
"metrics",
"from",
"multiple",
"QC",
"steps"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L456-L484 | train | 219,095 |
bcbio/bcbio-nextgen | bcbio/qc/multiqc.py | _merge_fastqc | def _merge_fastqc(samples):
"""
merge all fastqc samples into one by module
"""
fastqc_list = collections.defaultdict(list)
seen = set()
for data in samples:
name = dd.get_sample_name(data)
if name in seen:
continue
seen.add(name)
fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*")
for fn in fns:
if fn.endswith("tsv"):
metric = os.path.basename(fn)
fastqc_list[metric].append([name, fn])
for metric in fastqc_list:
dt_by_sample = []
for fn in fastqc_list[metric]:
dt = pd.read_csv(fn[1], sep="\t")
dt['sample'] = fn[0]
dt_by_sample.append(dt)
dt = utils.rbind(dt_by_sample)
dt.to_csv(metric, sep="\t", index=False, mode ='w')
return samples | python | def _merge_fastqc(samples):
"""
merge all fastqc samples into one by module
"""
fastqc_list = collections.defaultdict(list)
seen = set()
for data in samples:
name = dd.get_sample_name(data)
if name in seen:
continue
seen.add(name)
fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*")
for fn in fns:
if fn.endswith("tsv"):
metric = os.path.basename(fn)
fastqc_list[metric].append([name, fn])
for metric in fastqc_list:
dt_by_sample = []
for fn in fastqc_list[metric]:
dt = pd.read_csv(fn[1], sep="\t")
dt['sample'] = fn[0]
dt_by_sample.append(dt)
dt = utils.rbind(dt_by_sample)
dt.to_csv(metric, sep="\t", index=False, mode ='w')
return samples | [
"def",
"_merge_fastqc",
"(",
"samples",
")",
":",
"fastqc_list",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"seen",
"=",
"set",
"(",
")",
"for",
"data",
"in",
"samples",
":",
"name",
"=",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
"... | merge all fastqc samples into one by module | [
"merge",
"all",
"fastqc",
"samples",
"into",
"one",
"by",
"module"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L486-L511 | train | 219,096 |
bcbio/bcbio-nextgen | scripts/utils/hla_loh_comparison.py | _create_plot | def _create_plot(tumor, in_glob, out_ext, page=1):
"""Create an output plot for the given PDF in the images directory.
"""
out_dir = utils.safe_makedir("images")
out_name = os.path.join(out_dir, "%s-%s" % (tumor, out_ext))
in_file = glob.glob(in_glob)[0]
cmd = ["pdftoppm", in_file, out_name, "-png", "-f", page, "-singlefile"]
if not os.path.exists(out_name + ".png"):
subprocess.check_call([str(x) for x in cmd])
return out_name + ".png" | python | def _create_plot(tumor, in_glob, out_ext, page=1):
"""Create an output plot for the given PDF in the images directory.
"""
out_dir = utils.safe_makedir("images")
out_name = os.path.join(out_dir, "%s-%s" % (tumor, out_ext))
in_file = glob.glob(in_glob)[0]
cmd = ["pdftoppm", in_file, out_name, "-png", "-f", page, "-singlefile"]
if not os.path.exists(out_name + ".png"):
subprocess.check_call([str(x) for x in cmd])
return out_name + ".png" | [
"def",
"_create_plot",
"(",
"tumor",
",",
"in_glob",
",",
"out_ext",
",",
"page",
"=",
"1",
")",
":",
"out_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
"\"images\"",
")",
"out_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"\"%s-%s\"... | Create an output plot for the given PDF in the images directory. | [
"Create",
"an",
"output",
"plot",
"for",
"the",
"given",
"PDF",
"in",
"the",
"images",
"directory",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L64-L73 | train | 219,097 |
bcbio/bcbio-nextgen | scripts/utils/hla_loh_comparison.py | _get_cromwell_execution_dir | def _get_cromwell_execution_dir(base_dir, target_glob):
"""Retrieve the baseline directory with cromwell output files.
Handles Cromwell restarts where there are multiple work directories and
we traverse symlinks back to the original.
"""
cur_dir = glob.glob(os.path.join(base_dir, target_glob))[0]
if os.path.exists(os.path.join(cur_dir, "cwl.output.json")):
return base_dir
else:
symlink_dir = os.path.dirname(os.path.realpath(os.path.join(cur_dir, "script")))
ref_base = os.path.dirname(base_dir)
new_guid = symlink_dir[symlink_dir.find(ref_base) + len(ref_base) + 1:].split("/")[0]
return _get_cromwell_execution_dir(os.path.join(ref_base, new_guid), target_glob) | python | def _get_cromwell_execution_dir(base_dir, target_glob):
"""Retrieve the baseline directory with cromwell output files.
Handles Cromwell restarts where there are multiple work directories and
we traverse symlinks back to the original.
"""
cur_dir = glob.glob(os.path.join(base_dir, target_glob))[0]
if os.path.exists(os.path.join(cur_dir, "cwl.output.json")):
return base_dir
else:
symlink_dir = os.path.dirname(os.path.realpath(os.path.join(cur_dir, "script")))
ref_base = os.path.dirname(base_dir)
new_guid = symlink_dir[symlink_dir.find(ref_base) + len(ref_base) + 1:].split("/")[0]
return _get_cromwell_execution_dir(os.path.join(ref_base, new_guid), target_glob) | [
"def",
"_get_cromwell_execution_dir",
"(",
"base_dir",
",",
"target_glob",
")",
":",
"cur_dir",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"target_glob",
")",
")",
"[",
"0",
"]",
"if",
"os",
".",
"path",
".",
... | Retrieve the baseline directory with cromwell output files.
Handles Cromwell restarts where there are multiple work directories and
we traverse symlinks back to the original. | [
"Retrieve",
"the",
"baseline",
"directory",
"with",
"cromwell",
"output",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L232-L245 | train | 219,098 |
bcbio/bcbio-nextgen | scripts/utils/hla_loh_comparison.py | prep_bam_inputs | def prep_bam_inputs(out_dir, sample, call_file, bam_file):
"""Prepare expected input BAM files from pre-aligned.
"""
base = utils.splitext_plus(os.path.basename(bam_file))[0]
with open(call_file) as in_handle:
for cur_hla in (x.strip() for x in in_handle):
out_file = os.path.join(utils.safe_makedir(os.path.join(out_dir, base)),
"%s.type.%s.filtered.bam" % (base, cur_hla))
if not os.path.exists(out_file):
cmd = ["samtools", "view", "-b","-o", out_file, bam_file, cur_hla]
subprocess.check_call(cmd) | python | def prep_bam_inputs(out_dir, sample, call_file, bam_file):
"""Prepare expected input BAM files from pre-aligned.
"""
base = utils.splitext_plus(os.path.basename(bam_file))[0]
with open(call_file) as in_handle:
for cur_hla in (x.strip() for x in in_handle):
out_file = os.path.join(utils.safe_makedir(os.path.join(out_dir, base)),
"%s.type.%s.filtered.bam" % (base, cur_hla))
if not os.path.exists(out_file):
cmd = ["samtools", "view", "-b","-o", out_file, bam_file, cur_hla]
subprocess.check_call(cmd) | [
"def",
"prep_bam_inputs",
"(",
"out_dir",
",",
"sample",
",",
"call_file",
",",
"bam_file",
")",
":",
"base",
"=",
"utils",
".",
"splitext_plus",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"bam_file",
")",
")",
"[",
"0",
"]",
"with",
"open",
"(",
... | Prepare expected input BAM files from pre-aligned. | [
"Prepare",
"expected",
"input",
"BAM",
"files",
"from",
"pre",
"-",
"aligned",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L247-L257 | train | 219,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.