repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | sample_alt_and_depth | def sample_alt_and_depth(rec, sample):
"""Flexibly get ALT allele and depth counts, handling FreeBayes, MuTect and other cases.
"""
if sample and "AD" in sample:
all_counts = [int(x) for x in sample["AD"]]
alt_counts = sum(all_counts[1:])
depth = sum(all_counts)
elif sample and "AO" in sample and sample.get("RO") is not None:
alts = sample["AO"]
if not isinstance(alts, (list, tuple)):
alts = [alts]
alt_counts = sum([int(x) for x in alts])
depth = alt_counts + int(sample["RO"])
elif "DP" in rec.info and "AF" in rec.info:
af = rec.info["AF"][0] if isinstance(rec.info["AF"], (tuple, list)) else rec.info["AF"]
return None, rec.info["DP"], af
else:
alt_counts = None
if alt_counts is None or depth is None or depth == 0:
return None, None, None
else:
freq = float(alt_counts) / float(depth)
return alt_counts, depth, freq | python | def sample_alt_and_depth(rec, sample):
"""Flexibly get ALT allele and depth counts, handling FreeBayes, MuTect and other cases.
"""
if sample and "AD" in sample:
all_counts = [int(x) for x in sample["AD"]]
alt_counts = sum(all_counts[1:])
depth = sum(all_counts)
elif sample and "AO" in sample and sample.get("RO") is not None:
alts = sample["AO"]
if not isinstance(alts, (list, tuple)):
alts = [alts]
alt_counts = sum([int(x) for x in alts])
depth = alt_counts + int(sample["RO"])
elif "DP" in rec.info and "AF" in rec.info:
af = rec.info["AF"][0] if isinstance(rec.info["AF"], (tuple, list)) else rec.info["AF"]
return None, rec.info["DP"], af
else:
alt_counts = None
if alt_counts is None or depth is None or depth == 0:
return None, None, None
else:
freq = float(alt_counts) / float(depth)
return alt_counts, depth, freq | [
"def",
"sample_alt_and_depth",
"(",
"rec",
",",
"sample",
")",
":",
"if",
"sample",
"and",
"\"AD\"",
"in",
"sample",
":",
"all_counts",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"sample",
"[",
"\"AD\"",
"]",
"]",
"alt_counts",
"=",
"sum",
"("... | Flexibly get ALT allele and depth counts, handling FreeBayes, MuTect and other cases. | [
"Flexibly",
"get",
"ALT",
"allele",
"and",
"depth",
"counts",
"handling",
"FreeBayes",
"MuTect",
"and",
"other",
"cases",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L403-L425 | train | 218,800 |
bcbio/bcbio-nextgen | bcbio/bam/ref.py | fasta_idx | def fasta_idx(in_file, config=None):
"""Retrieve samtools style fasta index.
"""
fasta_index = in_file + ".fai"
if not utils.file_exists(fasta_index):
samtools = config_utils.get_program("samtools", config) if config else "samtools"
cmd = "{samtools} faidx {in_file}"
do.run(cmd.format(**locals()), "samtools faidx")
return fasta_index | python | def fasta_idx(in_file, config=None):
"""Retrieve samtools style fasta index.
"""
fasta_index = in_file + ".fai"
if not utils.file_exists(fasta_index):
samtools = config_utils.get_program("samtools", config) if config else "samtools"
cmd = "{samtools} faidx {in_file}"
do.run(cmd.format(**locals()), "samtools faidx")
return fasta_index | [
"def",
"fasta_idx",
"(",
"in_file",
",",
"config",
"=",
"None",
")",
":",
"fasta_index",
"=",
"in_file",
"+",
"\".fai\"",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"fasta_index",
")",
":",
"samtools",
"=",
"config_utils",
".",
"get_program",
"(",
"\"s... | Retrieve samtools style fasta index. | [
"Retrieve",
"samtools",
"style",
"fasta",
"index",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/ref.py#L9-L17 | train | 218,801 |
bcbio/bcbio-nextgen | bcbio/bam/ref.py | file_contigs | def file_contigs(ref_file, config=None):
"""Iterator of reference contigs and lengths from a reference file.
"""
ContigInfo = collections.namedtuple("ContigInfo", "name size")
with open(fasta_idx(ref_file, config)) as in_handle:
for line in (l for l in in_handle if l.strip()):
name, size = line.split()[:2]
yield ContigInfo(name, int(size)) | python | def file_contigs(ref_file, config=None):
"""Iterator of reference contigs and lengths from a reference file.
"""
ContigInfo = collections.namedtuple("ContigInfo", "name size")
with open(fasta_idx(ref_file, config)) as in_handle:
for line in (l for l in in_handle if l.strip()):
name, size = line.split()[:2]
yield ContigInfo(name, int(size)) | [
"def",
"file_contigs",
"(",
"ref_file",
",",
"config",
"=",
"None",
")",
":",
"ContigInfo",
"=",
"collections",
".",
"namedtuple",
"(",
"\"ContigInfo\"",
",",
"\"name size\"",
")",
"with",
"open",
"(",
"fasta_idx",
"(",
"ref_file",
",",
"config",
")",
")",
... | Iterator of reference contigs and lengths from a reference file. | [
"Iterator",
"of",
"reference",
"contigs",
"and",
"lengths",
"from",
"a",
"reference",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/ref.py#L19-L26 | train | 218,802 |
bcbio/bcbio-nextgen | bcbio/variation/smcounter2.py | run | def run(align_bams, items, ref_file, assoc_files, region=None, out_file=None):
"""Run tumor only smCounter2 calling.
"""
paired = vcfutils.get_paired_bams(align_bams, items)
assert paired and not paired.normal_bam, ("smCounter2 supports tumor-only variant calling: %s" %
(",".join([dd.get_sample_name(d) for d in items])))
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(vrs, region,
out_file, items=items, do_merge=True)
out_file = out_file.replace(".vcf.gz", ".vcf")
out_prefix = utils.splitext_plus(os.path.basename(out_file))[0]
if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
cmd = ["smCounter2", "--runPath", os.path.dirname(tx_out_file),
"--outPrefix", out_prefix,
"--bedTarget", target, "--refGenome", ref_file,
"--bamFile", paired.tumor_bam, "--bamType", "consensus",
"--nCPU", dd.get_num_cores(paired.tumor_data)]
do.run(cmd, "smcounter2 variant calling")
for fname in glob.glob(os.path.join(os.path.dirname(tx_out_file), "*.smCounter*")):
shutil.move(fname, os.path.join(os.path.dirname(out_file), os.path.basename(fname)))
utils.symlink_plus(os.path.join(os.path.dirname(out_file),
"%s.smCounter.cut.vcf" % out_prefix),
out_file)
return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"], remove_orig=False,
prep_cmd="sed 's#FORMAT\t%s#FORMAT\t%s#' | %s" %
(out_prefix, dd.get_sample_name(paired.tumor_data),
vcfutils.add_contig_to_header_cl(dd.get_ref_file(paired.tumor_data), out_file))) | python | def run(align_bams, items, ref_file, assoc_files, region=None, out_file=None):
"""Run tumor only smCounter2 calling.
"""
paired = vcfutils.get_paired_bams(align_bams, items)
assert paired and not paired.normal_bam, ("smCounter2 supports tumor-only variant calling: %s" %
(",".join([dd.get_sample_name(d) for d in items])))
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(vrs, region,
out_file, items=items, do_merge=True)
out_file = out_file.replace(".vcf.gz", ".vcf")
out_prefix = utils.splitext_plus(os.path.basename(out_file))[0]
if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
cmd = ["smCounter2", "--runPath", os.path.dirname(tx_out_file),
"--outPrefix", out_prefix,
"--bedTarget", target, "--refGenome", ref_file,
"--bamFile", paired.tumor_bam, "--bamType", "consensus",
"--nCPU", dd.get_num_cores(paired.tumor_data)]
do.run(cmd, "smcounter2 variant calling")
for fname in glob.glob(os.path.join(os.path.dirname(tx_out_file), "*.smCounter*")):
shutil.move(fname, os.path.join(os.path.dirname(out_file), os.path.basename(fname)))
utils.symlink_plus(os.path.join(os.path.dirname(out_file),
"%s.smCounter.cut.vcf" % out_prefix),
out_file)
return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"], remove_orig=False,
prep_cmd="sed 's#FORMAT\t%s#FORMAT\t%s#' | %s" %
(out_prefix, dd.get_sample_name(paired.tumor_data),
vcfutils.add_contig_to_header_cl(dd.get_ref_file(paired.tumor_data), out_file))) | [
"def",
"run",
"(",
"align_bams",
",",
"items",
",",
"ref_file",
",",
"assoc_files",
",",
"region",
"=",
"None",
",",
"out_file",
"=",
"None",
")",
":",
"paired",
"=",
"vcfutils",
".",
"get_paired_bams",
"(",
"align_bams",
",",
"items",
")",
"assert",
"pa... | Run tumor only smCounter2 calling. | [
"Run",
"tumor",
"only",
"smCounter2",
"calling",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/smcounter2.py#L17-L44 | train | 218,803 |
bcbio/bcbio-nextgen | bcbio/bam/readstats.py | number_of_mapped_reads | def number_of_mapped_reads(data, bam_file, keep_dups=True, bed_file=None, target_name=None):
"""Count mapped reads, allow adjustment for duplicates and BED regions.
Since samtools view does not use indexes for BED files
(https://github.com/samtools/samtools/issues/88)
we loop over regions in a BED file and add the counts together.
Uses a global cache file to store counts, making it possible to pass this single
file for CWL runs. For parallel processes it can have concurrent append writes,
so we have a simple file locking mechanism to avoid this.
"""
# Flag explainer https://broadinstitute.github.io/picard/explain-flags.html
callable_flags = ["not unmapped", "not mate_is_unmapped", "not secondary_alignment",
"not failed_quality_control"]
if keep_dups:
query_flags = callable_flags
flag = 780 # not (read unmapped or mate unmapped or fails QC or secondary alignment)
else:
query_flags = callable_flags + ["not duplicate"]
flag = 1804 # as above plus not duplicate
# Back compatible cache
oldcache_file = _backcompatible_cache_file(query_flags, bed_file, target_name, data)
if oldcache_file:
with open(oldcache_file) as f:
return int(f.read().strip())
# New cache
key = json.dumps({"flags": sorted(query_flags),
"region": os.path.basename(bed_file) if bed_file else "",
"sample": dd.get_sample_name(data)},
separators=(",", ":"), sort_keys=True)
cache_file = get_cache_file(data)
if utils.file_exists(cache_file):
with open(cache_file) as in_handle:
for cur_key, cur_val in (l.strip().split("\t") for l in in_handle):
if cur_key == key:
return int(cur_val)
# Calculate stats
count_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "coverage",
dd.get_sample_name(data), "counts"))
if not bed_file:
bed_file = os.path.join(count_dir, "fullgenome.bed")
if not utils.file_exists(bed_file):
with file_transaction(data, bed_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for c in ref.file_contigs(dd.get_ref_file(data), data["config"]):
out_handle.write("%s\t%s\t%s\n" % (c.name, 0, c.size))
count_file = os.path.join(count_dir,
"%s-%s-counts.txt" % (os.path.splitext(os.path.basename(bed_file))[0], flag))
if not utils.file_exists(count_file):
bam.index(bam_file, data["config"], check_timestamp=False)
num_cores = dd.get_num_cores(data)
with file_transaction(data, count_file) as tx_out_file:
cmd = ("hts_nim_tools count-reads -t {num_cores} -F {flag} {bed_file} {bam_file} > {tx_out_file}")
do.run(cmd.format(**locals()), "Count mapped reads: %s" % (dd.get_sample_name(data)))
count = 0
with open(count_file) as in_handle:
for line in in_handle:
count += int(line.rstrip().split()[-1])
with _simple_lock(cache_file):
with open(cache_file, "a") as out_handle:
out_handle.write("%s\t%s\n" % (key, count))
return count | python | def number_of_mapped_reads(data, bam_file, keep_dups=True, bed_file=None, target_name=None):
"""Count mapped reads, allow adjustment for duplicates and BED regions.
Since samtools view does not use indexes for BED files
(https://github.com/samtools/samtools/issues/88)
we loop over regions in a BED file and add the counts together.
Uses a global cache file to store counts, making it possible to pass this single
file for CWL runs. For parallel processes it can have concurrent append writes,
so we have a simple file locking mechanism to avoid this.
"""
# Flag explainer https://broadinstitute.github.io/picard/explain-flags.html
callable_flags = ["not unmapped", "not mate_is_unmapped", "not secondary_alignment",
"not failed_quality_control"]
if keep_dups:
query_flags = callable_flags
flag = 780 # not (read unmapped or mate unmapped or fails QC or secondary alignment)
else:
query_flags = callable_flags + ["not duplicate"]
flag = 1804 # as above plus not duplicate
# Back compatible cache
oldcache_file = _backcompatible_cache_file(query_flags, bed_file, target_name, data)
if oldcache_file:
with open(oldcache_file) as f:
return int(f.read().strip())
# New cache
key = json.dumps({"flags": sorted(query_flags),
"region": os.path.basename(bed_file) if bed_file else "",
"sample": dd.get_sample_name(data)},
separators=(",", ":"), sort_keys=True)
cache_file = get_cache_file(data)
if utils.file_exists(cache_file):
with open(cache_file) as in_handle:
for cur_key, cur_val in (l.strip().split("\t") for l in in_handle):
if cur_key == key:
return int(cur_val)
# Calculate stats
count_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "coverage",
dd.get_sample_name(data), "counts"))
if not bed_file:
bed_file = os.path.join(count_dir, "fullgenome.bed")
if not utils.file_exists(bed_file):
with file_transaction(data, bed_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for c in ref.file_contigs(dd.get_ref_file(data), data["config"]):
out_handle.write("%s\t%s\t%s\n" % (c.name, 0, c.size))
count_file = os.path.join(count_dir,
"%s-%s-counts.txt" % (os.path.splitext(os.path.basename(bed_file))[0], flag))
if not utils.file_exists(count_file):
bam.index(bam_file, data["config"], check_timestamp=False)
num_cores = dd.get_num_cores(data)
with file_transaction(data, count_file) as tx_out_file:
cmd = ("hts_nim_tools count-reads -t {num_cores} -F {flag} {bed_file} {bam_file} > {tx_out_file}")
do.run(cmd.format(**locals()), "Count mapped reads: %s" % (dd.get_sample_name(data)))
count = 0
with open(count_file) as in_handle:
for line in in_handle:
count += int(line.rstrip().split()[-1])
with _simple_lock(cache_file):
with open(cache_file, "a") as out_handle:
out_handle.write("%s\t%s\n" % (key, count))
return count | [
"def",
"number_of_mapped_reads",
"(",
"data",
",",
"bam_file",
",",
"keep_dups",
"=",
"True",
",",
"bed_file",
"=",
"None",
",",
"target_name",
"=",
"None",
")",
":",
"# Flag explainer https://broadinstitute.github.io/picard/explain-flags.html",
"callable_flags",
"=",
"... | Count mapped reads, allow adjustment for duplicates and BED regions.
Since samtools view does not use indexes for BED files
(https://github.com/samtools/samtools/issues/88)
we loop over regions in a BED file and add the counts together.
Uses a global cache file to store counts, making it possible to pass this single
file for CWL runs. For parallel processes it can have concurrent append writes,
so we have a simple file locking mechanism to avoid this. | [
"Count",
"mapped",
"reads",
"allow",
"adjustment",
"for",
"duplicates",
"and",
"BED",
"regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/readstats.py#L37-L102 | train | 218,804 |
bcbio/bcbio-nextgen | bcbio/bam/readstats.py | _simple_lock | def _simple_lock(f):
"""Simple file lock, times out after 20 second assuming lock is stale
"""
lock_file = f + ".lock"
timeout = 20
curtime = 0
interval = 2
while os.path.exists(lock_file):
time.sleep(interval)
curtime += interval
if curtime > timeout:
os.remove(lock_file)
with open(lock_file, "w") as out_handle:
out_handle.write("locked")
yield
if os.path.exists(lock_file):
os.remove(lock_file) | python | def _simple_lock(f):
"""Simple file lock, times out after 20 second assuming lock is stale
"""
lock_file = f + ".lock"
timeout = 20
curtime = 0
interval = 2
while os.path.exists(lock_file):
time.sleep(interval)
curtime += interval
if curtime > timeout:
os.remove(lock_file)
with open(lock_file, "w") as out_handle:
out_handle.write("locked")
yield
if os.path.exists(lock_file):
os.remove(lock_file) | [
"def",
"_simple_lock",
"(",
"f",
")",
":",
"lock_file",
"=",
"f",
"+",
"\".lock\"",
"timeout",
"=",
"20",
"curtime",
"=",
"0",
"interval",
"=",
"2",
"while",
"os",
".",
"path",
".",
"exists",
"(",
"lock_file",
")",
":",
"time",
".",
"sleep",
"(",
"... | Simple file lock, times out after 20 second assuming lock is stale | [
"Simple",
"file",
"lock",
"times",
"out",
"after",
"20",
"second",
"assuming",
"lock",
"is",
"stale"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/readstats.py#L105-L121 | train | 218,805 |
bcbio/bcbio-nextgen | bcbio/pipeline/region.py | get_max_counts | def get_max_counts(samples):
"""Retrieve number of regions that can be processed in parallel from current samples.
"""
counts = []
for data in (x[0] for x in samples):
count = tz.get_in(["config", "algorithm", "callable_count"], data, 1)
vcs = tz.get_in(["config", "algorithm", "variantcaller"], data, [])
if isinstance(vcs, six.string_types):
vcs = [vcs]
if vcs:
count *= len(vcs)
counts.append(count)
return max(counts) | python | def get_max_counts(samples):
"""Retrieve number of regions that can be processed in parallel from current samples.
"""
counts = []
for data in (x[0] for x in samples):
count = tz.get_in(["config", "algorithm", "callable_count"], data, 1)
vcs = tz.get_in(["config", "algorithm", "variantcaller"], data, [])
if isinstance(vcs, six.string_types):
vcs = [vcs]
if vcs:
count *= len(vcs)
counts.append(count)
return max(counts) | [
"def",
"get_max_counts",
"(",
"samples",
")",
":",
"counts",
"=",
"[",
"]",
"for",
"data",
"in",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"samples",
")",
":",
"count",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
... | Retrieve number of regions that can be processed in parallel from current samples. | [
"Retrieve",
"number",
"of",
"regions",
"that",
"can",
"be",
"processed",
"in",
"parallel",
"from",
"current",
"samples",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L16-L28 | train | 218,806 |
bcbio/bcbio-nextgen | bcbio/pipeline/region.py | _split_by_regions | def _split_by_regions(dirname, out_ext, in_key):
"""Split a BAM file data analysis into chromosomal regions.
"""
def _do_work(data):
# XXX Need to move retrieval of regions into preparation to avoid
# need for files when running in non-shared filesystems
regions = _get_parallel_regions(data)
def _sort_by_size(region):
_, start, end = region
return end - start
regions.sort(key=_sort_by_size, reverse=True)
bam_file = data[in_key]
if bam_file is None:
return None, []
part_info = []
base_out = os.path.splitext(os.path.basename(bam_file))[0]
nowork = [["nochrom"], ["noanalysis", data["config"]["algorithm"]["non_callable_regions"]]]
for region in regions + nowork:
out_dir = os.path.join(data["dirs"]["work"], dirname, data["name"][-1], region[0])
region_outfile = os.path.join(out_dir, "%s-%s%s" %
(base_out, to_safestr(region), out_ext))
part_info.append((region, region_outfile))
out_file = os.path.join(data["dirs"]["work"], dirname, data["name"][-1],
"%s%s" % (base_out, out_ext))
return out_file, part_info
return _do_work | python | def _split_by_regions(dirname, out_ext, in_key):
"""Split a BAM file data analysis into chromosomal regions.
"""
def _do_work(data):
# XXX Need to move retrieval of regions into preparation to avoid
# need for files when running in non-shared filesystems
regions = _get_parallel_regions(data)
def _sort_by_size(region):
_, start, end = region
return end - start
regions.sort(key=_sort_by_size, reverse=True)
bam_file = data[in_key]
if bam_file is None:
return None, []
part_info = []
base_out = os.path.splitext(os.path.basename(bam_file))[0]
nowork = [["nochrom"], ["noanalysis", data["config"]["algorithm"]["non_callable_regions"]]]
for region in regions + nowork:
out_dir = os.path.join(data["dirs"]["work"], dirname, data["name"][-1], region[0])
region_outfile = os.path.join(out_dir, "%s-%s%s" %
(base_out, to_safestr(region), out_ext))
part_info.append((region, region_outfile))
out_file = os.path.join(data["dirs"]["work"], dirname, data["name"][-1],
"%s%s" % (base_out, out_ext))
return out_file, part_info
return _do_work | [
"def",
"_split_by_regions",
"(",
"dirname",
",",
"out_ext",
",",
"in_key",
")",
":",
"def",
"_do_work",
"(",
"data",
")",
":",
"# XXX Need to move retrieval of regions into preparation to avoid",
"# need for files when running in non-shared filesystems",
"regions",
"=",
"_get... | Split a BAM file data analysis into chromosomal regions. | [
"Split",
"a",
"BAM",
"file",
"data",
"analysis",
"into",
"chromosomal",
"regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L40-L65 | train | 218,807 |
bcbio/bcbio-nextgen | bcbio/pipeline/region.py | _get_parallel_regions | def _get_parallel_regions(data):
"""Retrieve regions to run in parallel, putting longest intervals first.
"""
callable_regions = tz.get_in(["config", "algorithm", "callable_regions"], data)
if not callable_regions:
raise ValueError("Did not find any callable regions for sample: %s\n"
"Check 'align/%s/*-callableblocks.bed' and 'regions' to examine callable regions"
% (dd.get_sample_name(data), dd.get_sample_name(data)))
with open(callable_regions) as in_handle:
regions = [(xs[0], int(xs[1]), int(xs[2])) for xs in
(l.rstrip().split("\t") for l in in_handle) if (len(xs) >= 3 and
not xs[0].startswith(("track", "browser",)))]
return regions | python | def _get_parallel_regions(data):
"""Retrieve regions to run in parallel, putting longest intervals first.
"""
callable_regions = tz.get_in(["config", "algorithm", "callable_regions"], data)
if not callable_regions:
raise ValueError("Did not find any callable regions for sample: %s\n"
"Check 'align/%s/*-callableblocks.bed' and 'regions' to examine callable regions"
% (dd.get_sample_name(data), dd.get_sample_name(data)))
with open(callable_regions) as in_handle:
regions = [(xs[0], int(xs[1]), int(xs[2])) for xs in
(l.rstrip().split("\t") for l in in_handle) if (len(xs) >= 3 and
not xs[0].startswith(("track", "browser",)))]
return regions | [
"def",
"_get_parallel_regions",
"(",
"data",
")",
":",
"callable_regions",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"callable_regions\"",
"]",
",",
"data",
")",
"if",
"not",
"callable_regions",
":",
"raise",
"ValueError",
... | Retrieve regions to run in parallel, putting longest intervals first. | [
"Retrieve",
"regions",
"to",
"run",
"in",
"parallel",
"putting",
"longest",
"intervals",
"first",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L67-L79 | train | 218,808 |
bcbio/bcbio-nextgen | bcbio/pipeline/region.py | get_parallel_regions | def get_parallel_regions(batch):
"""CWL target to retrieve a list of callable regions for parallelization.
"""
samples = [utils.to_single_data(d) for d in batch]
regions = _get_parallel_regions(samples[0])
return [{"region": "%s:%s-%s" % (c, s, e)} for c, s, e in regions] | python | def get_parallel_regions(batch):
"""CWL target to retrieve a list of callable regions for parallelization.
"""
samples = [utils.to_single_data(d) for d in batch]
regions = _get_parallel_regions(samples[0])
return [{"region": "%s:%s-%s" % (c, s, e)} for c, s, e in regions] | [
"def",
"get_parallel_regions",
"(",
"batch",
")",
":",
"samples",
"=",
"[",
"utils",
".",
"to_single_data",
"(",
"d",
")",
"for",
"d",
"in",
"batch",
"]",
"regions",
"=",
"_get_parallel_regions",
"(",
"samples",
"[",
"0",
"]",
")",
"return",
"[",
"{",
... | CWL target to retrieve a list of callable regions for parallelization. | [
"CWL",
"target",
"to",
"retrieve",
"a",
"list",
"of",
"callable",
"regions",
"for",
"parallelization",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L81-L86 | train | 218,809 |
bcbio/bcbio-nextgen | bcbio/pipeline/region.py | get_parallel_regions_block | def get_parallel_regions_block(batch):
"""CWL target to retrieve block group of callable regions for parallelization.
Uses blocking to handle multicore runs.
"""
samples = [utils.to_single_data(d) for d in batch]
regions = _get_parallel_regions(samples[0])
out = []
# Currently don't have core information here so aim for about 10 items per partition
n = 10
for region_block in tz.partition_all(n, regions):
out.append({"region_block": ["%s:%s-%s" % (c, s, e) for c, s, e in region_block]})
return out | python | def get_parallel_regions_block(batch):
"""CWL target to retrieve block group of callable regions for parallelization.
Uses blocking to handle multicore runs.
"""
samples = [utils.to_single_data(d) for d in batch]
regions = _get_parallel_regions(samples[0])
out = []
# Currently don't have core information here so aim for about 10 items per partition
n = 10
for region_block in tz.partition_all(n, regions):
out.append({"region_block": ["%s:%s-%s" % (c, s, e) for c, s, e in region_block]})
return out | [
"def",
"get_parallel_regions_block",
"(",
"batch",
")",
":",
"samples",
"=",
"[",
"utils",
".",
"to_single_data",
"(",
"d",
")",
"for",
"d",
"in",
"batch",
"]",
"regions",
"=",
"_get_parallel_regions",
"(",
"samples",
"[",
"0",
"]",
")",
"out",
"=",
"[",... | CWL target to retrieve block group of callable regions for parallelization.
Uses blocking to handle multicore runs. | [
"CWL",
"target",
"to",
"retrieve",
"block",
"group",
"of",
"callable",
"regions",
"for",
"parallelization",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L88-L100 | train | 218,810 |
bcbio/bcbio-nextgen | bcbio/pipeline/region.py | _add_combine_info | def _add_combine_info(output, combine_map, file_key):
"""Do not actually combine, but add details for later combining work.
Each sample will contain information on the out file and additional files
to merge, enabling other splits and recombines without losing information.
"""
files_per_output = collections.defaultdict(list)
for part_file, out_file in combine_map.items():
files_per_output[out_file].append(part_file)
out_by_file = collections.defaultdict(list)
out = []
for data in output:
# Do not pass along nochrom, noanalysis regions
if data["region"][0] not in ["nochrom", "noanalysis"]:
cur_file = data[file_key]
# If we didn't process, no need to add combine information
if cur_file in combine_map:
out_file = combine_map[cur_file]
if "combine" not in data:
data["combine"] = {}
data["combine"][file_key] = {"out": out_file,
"extras": files_per_output.get(out_file, [])}
out_by_file[out_file].append(data)
elif cur_file:
out_by_file[cur_file].append(data)
else:
out.append([data])
for samples in out_by_file.values():
regions = [x["region"] for x in samples]
region_bams = [x["work_bam"] for x in samples]
assert len(regions) == len(region_bams)
if len(set(region_bams)) == 1:
region_bams = [region_bams[0]]
data = samples[0]
data["region_bams"] = region_bams
data["region"] = regions
data = dd.set_mark_duplicates(data, data["config"]["algorithm"]["orig_markduplicates"])
del data["config"]["algorithm"]["orig_markduplicates"]
out.append([data])
return out | python | def _add_combine_info(output, combine_map, file_key):
"""Do not actually combine, but add details for later combining work.
Each sample will contain information on the out file and additional files
to merge, enabling other splits and recombines without losing information.
"""
files_per_output = collections.defaultdict(list)
for part_file, out_file in combine_map.items():
files_per_output[out_file].append(part_file)
out_by_file = collections.defaultdict(list)
out = []
for data in output:
# Do not pass along nochrom, noanalysis regions
if data["region"][0] not in ["nochrom", "noanalysis"]:
cur_file = data[file_key]
# If we didn't process, no need to add combine information
if cur_file in combine_map:
out_file = combine_map[cur_file]
if "combine" not in data:
data["combine"] = {}
data["combine"][file_key] = {"out": out_file,
"extras": files_per_output.get(out_file, [])}
out_by_file[out_file].append(data)
elif cur_file:
out_by_file[cur_file].append(data)
else:
out.append([data])
for samples in out_by_file.values():
regions = [x["region"] for x in samples]
region_bams = [x["work_bam"] for x in samples]
assert len(regions) == len(region_bams)
if len(set(region_bams)) == 1:
region_bams = [region_bams[0]]
data = samples[0]
data["region_bams"] = region_bams
data["region"] = regions
data = dd.set_mark_duplicates(data, data["config"]["algorithm"]["orig_markduplicates"])
del data["config"]["algorithm"]["orig_markduplicates"]
out.append([data])
return out | [
"def",
"_add_combine_info",
"(",
"output",
",",
"combine_map",
",",
"file_key",
")",
":",
"files_per_output",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"part_file",
",",
"out_file",
"in",
"combine_map",
".",
"items",
"(",
")",
":",
"fi... | Do not actually combine, but add details for later combining work.
Each sample will contain information on the out file and additional files
to merge, enabling other splits and recombines without losing information. | [
"Do",
"not",
"actually",
"combine",
"but",
"add",
"details",
"for",
"later",
"combining",
"work",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L102-L141 | train | 218,811 |
bcbio/bcbio-nextgen | bcbio/pipeline/region.py | parallel_prep_region | def parallel_prep_region(samples, run_parallel):
"""Perform full pre-variant calling BAM prep work on regions.
"""
file_key = "work_bam"
split_fn = _split_by_regions("bamprep", "-prep.bam", file_key)
# identify samples that do not need preparation -- no recalibration or realignment
extras = []
torun = []
for data in [x[0] for x in samples]:
if data.get("work_bam"):
data["align_bam"] = data["work_bam"]
if (not dd.get_realign(data) and not dd.get_variantcaller(data)):
extras.append([data])
elif not data.get(file_key):
extras.append([data])
else:
# Do not want to re-run duplicate marking after realignment
data["config"]["algorithm"]["orig_markduplicates"] = dd.get_mark_duplicates(data)
data = dd.set_mark_duplicates(data, False)
torun.append([data])
return extras + parallel_split_combine(torun, split_fn, run_parallel,
"piped_bamprep", _add_combine_info, file_key, ["config"]) | python | def parallel_prep_region(samples, run_parallel):
"""Perform full pre-variant calling BAM prep work on regions.
"""
file_key = "work_bam"
split_fn = _split_by_regions("bamprep", "-prep.bam", file_key)
# identify samples that do not need preparation -- no recalibration or realignment
extras = []
torun = []
for data in [x[0] for x in samples]:
if data.get("work_bam"):
data["align_bam"] = data["work_bam"]
if (not dd.get_realign(data) and not dd.get_variantcaller(data)):
extras.append([data])
elif not data.get(file_key):
extras.append([data])
else:
# Do not want to re-run duplicate marking after realignment
data["config"]["algorithm"]["orig_markduplicates"] = dd.get_mark_duplicates(data)
data = dd.set_mark_duplicates(data, False)
torun.append([data])
return extras + parallel_split_combine(torun, split_fn, run_parallel,
"piped_bamprep", _add_combine_info, file_key, ["config"]) | [
"def",
"parallel_prep_region",
"(",
"samples",
",",
"run_parallel",
")",
":",
"file_key",
"=",
"\"work_bam\"",
"split_fn",
"=",
"_split_by_regions",
"(",
"\"bamprep\"",
",",
"\"-prep.bam\"",
",",
"file_key",
")",
"# identify samples that do not need preparation -- no recali... | Perform full pre-variant calling BAM prep work on regions. | [
"Perform",
"full",
"pre",
"-",
"variant",
"calling",
"BAM",
"prep",
"work",
"on",
"regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L143-L164 | train | 218,812 |
bcbio/bcbio-nextgen | bcbio/pipeline/region.py | delayed_bamprep_merge | def delayed_bamprep_merge(samples, run_parallel):
"""Perform a delayed merge on regional prepared BAM files.
"""
if any("combine" in data[0] for data in samples):
return run_parallel("delayed_bam_merge", samples)
else:
return samples | python | def delayed_bamprep_merge(samples, run_parallel):
"""Perform a delayed merge on regional prepared BAM files.
"""
if any("combine" in data[0] for data in samples):
return run_parallel("delayed_bam_merge", samples)
else:
return samples | [
"def",
"delayed_bamprep_merge",
"(",
"samples",
",",
"run_parallel",
")",
":",
"if",
"any",
"(",
"\"combine\"",
"in",
"data",
"[",
"0",
"]",
"for",
"data",
"in",
"samples",
")",
":",
"return",
"run_parallel",
"(",
"\"delayed_bam_merge\"",
",",
"samples",
")"... | Perform a delayed merge on regional prepared BAM files. | [
"Perform",
"a",
"delayed",
"merge",
"on",
"regional",
"prepared",
"BAM",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L166-L172 | train | 218,813 |
bcbio/bcbio-nextgen | bcbio/pipeline/region.py | clean_sample_data | def clean_sample_data(samples):
"""Clean unnecessary information from sample data, reducing size for message passing.
"""
out = []
for data in (utils.to_single_data(x) for x in samples):
if "dirs" in data:
data["dirs"] = {"work": data["dirs"]["work"], "galaxy": data["dirs"]["galaxy"],
"fastq": data["dirs"].get("fastq")}
data["config"] = {"algorithm": data["config"]["algorithm"],
"resources": data["config"]["resources"]}
for remove_attr in ["config_file", "algorithm"]:
data.pop(remove_attr, None)
out.append([data])
return out | python | def clean_sample_data(samples):
"""Clean unnecessary information from sample data, reducing size for message passing.
"""
out = []
for data in (utils.to_single_data(x) for x in samples):
if "dirs" in data:
data["dirs"] = {"work": data["dirs"]["work"], "galaxy": data["dirs"]["galaxy"],
"fastq": data["dirs"].get("fastq")}
data["config"] = {"algorithm": data["config"]["algorithm"],
"resources": data["config"]["resources"]}
for remove_attr in ["config_file", "algorithm"]:
data.pop(remove_attr, None)
out.append([data])
return out | [
"def",
"clean_sample_data",
"(",
"samples",
")",
":",
"out",
"=",
"[",
"]",
"for",
"data",
"in",
"(",
"utils",
".",
"to_single_data",
"(",
"x",
")",
"for",
"x",
"in",
"samples",
")",
":",
"if",
"\"dirs\"",
"in",
"data",
":",
"data",
"[",
"\"dirs\"",
... | Clean unnecessary information from sample data, reducing size for message passing. | [
"Clean",
"unnecessary",
"information",
"from",
"sample",
"data",
"reducing",
"size",
"for",
"message",
"passing",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L176-L189 | train | 218,814 |
bcbio/bcbio-nextgen | bcbio/ngsalign/star.py | _add_sj_index_commands | def _add_sj_index_commands(fq1, ref_file, gtf_file):
"""
newer versions of STAR can generate splice junction databases on thephfly
this is preferable since we can tailor it to the read lengths
"""
if _has_sj_index(ref_file):
return ""
else:
rlength = fastq.estimate_maximum_read_length(fq1)
cmd = " --sjdbGTFfile %s " % gtf_file
cmd += " --sjdbOverhang %s " % str(rlength - 1)
return cmd | python | def _add_sj_index_commands(fq1, ref_file, gtf_file):
"""
newer versions of STAR can generate splice junction databases on thephfly
this is preferable since we can tailor it to the read lengths
"""
if _has_sj_index(ref_file):
return ""
else:
rlength = fastq.estimate_maximum_read_length(fq1)
cmd = " --sjdbGTFfile %s " % gtf_file
cmd += " --sjdbOverhang %s " % str(rlength - 1)
return cmd | [
"def",
"_add_sj_index_commands",
"(",
"fq1",
",",
"ref_file",
",",
"gtf_file",
")",
":",
"if",
"_has_sj_index",
"(",
"ref_file",
")",
":",
"return",
"\"\"",
"else",
":",
"rlength",
"=",
"fastq",
".",
"estimate_maximum_read_length",
"(",
"fq1",
")",
"cmd",
"=... | newer versions of STAR can generate splice junction databases on thephfly
this is preferable since we can tailor it to the read lengths | [
"newer",
"versions",
"of",
"STAR",
"can",
"generate",
"splice",
"junction",
"databases",
"on",
"thephfly",
"this",
"is",
"preferable",
"since",
"we",
"can",
"tailor",
"it",
"to",
"the",
"read",
"lengths"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/star.py#L119-L130 | train | 218,815 |
bcbio/bcbio-nextgen | bcbio/ngsalign/star.py | _has_sj_index | def _has_sj_index(ref_file):
"""this file won't exist if we can do on the fly splice junction indexing"""
return (file_exists(os.path.join(ref_file, "sjdbInfo.txt")) and
(file_exists(os.path.join(ref_file, "transcriptInfo.tab")))) | python | def _has_sj_index(ref_file):
"""this file won't exist if we can do on the fly splice junction indexing"""
return (file_exists(os.path.join(ref_file, "sjdbInfo.txt")) and
(file_exists(os.path.join(ref_file, "transcriptInfo.tab")))) | [
"def",
"_has_sj_index",
"(",
"ref_file",
")",
":",
"return",
"(",
"file_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"ref_file",
",",
"\"sjdbInfo.txt\"",
")",
")",
"and",
"(",
"file_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"ref_file",
"... | this file won't exist if we can do on the fly splice junction indexing | [
"this",
"file",
"won",
"t",
"exist",
"if",
"we",
"can",
"do",
"on",
"the",
"fly",
"splice",
"junction",
"indexing"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/star.py#L132-L135 | train | 218,816 |
bcbio/bcbio-nextgen | bcbio/ngsalign/star.py | remap_index_fn | def remap_index_fn(ref_file):
"""Map sequence references to equivalent star indexes
"""
return os.path.join(os.path.dirname(os.path.dirname(ref_file)), "star") | python | def remap_index_fn(ref_file):
"""Map sequence references to equivalent star indexes
"""
return os.path.join(os.path.dirname(os.path.dirname(ref_file)), "star") | [
"def",
"remap_index_fn",
"(",
"ref_file",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"ref_file",
")",
")",
",",
"\"star\"",
")"
] | Map sequence references to equivalent star indexes | [
"Map",
"sequence",
"references",
"to",
"equivalent",
"star",
"indexes"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/star.py#L171-L174 | train | 218,817 |
bcbio/bcbio-nextgen | bcbio/ngsalign/star.py | index | def index(ref_file, out_dir, data):
"""Create a STAR index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_gtf_file(data)
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a star index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
cmd = ("STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} "
"--runThreadN {num_cores} "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}")
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir | python | def index(ref_file, out_dir, data):
"""Create a STAR index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_gtf_file(data)
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a star index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
cmd = ("STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} "
"--runThreadN {num_cores} "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}")
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir | [
"def",
"index",
"(",
"ref_file",
",",
"out_dir",
",",
"data",
")",
":",
"(",
"ref_dir",
",",
"local_file",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"ref_file",
")",
"gtf_file",
"=",
"dd",
".",
"get_gtf_file",
"(",
"data",
")",
"if",
"not",
"... | Create a STAR index in the defined reference directory. | [
"Create",
"a",
"STAR",
"index",
"in",
"the",
"defined",
"reference",
"directory",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/star.py#L176-L193 | train | 218,818 |
bcbio/bcbio-nextgen | bcbio/ngsalign/star.py | get_splicejunction_file | def get_splicejunction_file(out_dir, data):
"""
locate the splicejunction file starting from the alignment directory
"""
samplename = dd.get_sample_name(data)
sjfile = os.path.join(out_dir, os.pardir, "{0}SJ.out.tab").format(samplename)
if file_exists(sjfile):
return sjfile
else:
return None | python | def get_splicejunction_file(out_dir, data):
"""
locate the splicejunction file starting from the alignment directory
"""
samplename = dd.get_sample_name(data)
sjfile = os.path.join(out_dir, os.pardir, "{0}SJ.out.tab").format(samplename)
if file_exists(sjfile):
return sjfile
else:
return None | [
"def",
"get_splicejunction_file",
"(",
"out_dir",
",",
"data",
")",
":",
"samplename",
"=",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
"sjfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"os",
".",
"pardir",
",",
"\"{0}SJ.out.tab\"",
... | locate the splicejunction file starting from the alignment directory | [
"locate",
"the",
"splicejunction",
"file",
"starting",
"from",
"the",
"alignment",
"directory"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/star.py#L207-L216 | train | 218,819 |
bcbio/bcbio-nextgen | bcbio/ngsalign/star.py | junction2bed | def junction2bed(junction_file):
"""
reformat the STAR junction file to BED3 format, one end of the splice junction per line
"""
base, _ = os.path.splitext(junction_file)
out_file = base + "-minimized.bed"
if file_exists(out_file):
return out_file
if not file_exists(junction_file):
return None
with file_transaction(out_file) as tx_out_file:
with open(junction_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
tokens = line.split()
chrom, sj1, sj2 = tokens[0:3]
if int(sj1) > int(sj2):
tmp = sj1
sj1 = sj2
sj2 = tmp
out_handle.write("\t".join([chrom, sj1, sj1]) + "\n")
out_handle.write("\t".join([chrom, sj2, sj2]) + "\n")
minimize = bed.minimize(tx_out_file)
minimize.saveas(tx_out_file)
return out_file | python | def junction2bed(junction_file):
"""
reformat the STAR junction file to BED3 format, one end of the splice junction per line
"""
base, _ = os.path.splitext(junction_file)
out_file = base + "-minimized.bed"
if file_exists(out_file):
return out_file
if not file_exists(junction_file):
return None
with file_transaction(out_file) as tx_out_file:
with open(junction_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
tokens = line.split()
chrom, sj1, sj2 = tokens[0:3]
if int(sj1) > int(sj2):
tmp = sj1
sj1 = sj2
sj2 = tmp
out_handle.write("\t".join([chrom, sj1, sj1]) + "\n")
out_handle.write("\t".join([chrom, sj2, sj2]) + "\n")
minimize = bed.minimize(tx_out_file)
minimize.saveas(tx_out_file)
return out_file | [
"def",
"junction2bed",
"(",
"junction_file",
")",
":",
"base",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"junction_file",
")",
"out_file",
"=",
"base",
"+",
"\"-minimized.bed\"",
"if",
"file_exists",
"(",
"out_file",
")",
":",
"return",
"out_... | reformat the STAR junction file to BED3 format, one end of the splice junction per line | [
"reformat",
"the",
"STAR",
"junction",
"file",
"to",
"BED3",
"format",
"one",
"end",
"of",
"the",
"splice",
"junction",
"per",
"line"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/star.py#L218-L242 | train | 218,820 |
bcbio/bcbio-nextgen | bcbio/hla/optitype.py | run | def run(data):
"""HLA typing with OptiType, parsing output from called genotype files.
"""
hlas = []
for hla_fq in tz.get_in(["hla", "fastq"], data, []):
hla_type = re.search("[.-](?P<hlatype>HLA-[\w-]+).fq", hla_fq).group("hlatype")
if hla_type in SUPPORTED_HLAS:
if utils.file_exists(hla_fq):
hlas.append((hla_type, hla_fq))
if len(hlas) > 0:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data), "hla",
"OptiType-HLA-A_B_C"))
# When running UMIs and hla typing we want to pick the original fastqs
if len(hlas) > len(SUPPORTED_HLAS):
hlas = [x for x in hlas if os.path.basename(x[1]).find("-cumi") == -1]
if len(hlas) == len(SUPPORTED_HLAS):
hla_fq = combine_hla_fqs(hlas, out_dir + "-input.fq", data)
if utils.file_exists(hla_fq):
out_file = glob.glob(os.path.join(out_dir, "*", "*_result.tsv"))
if len(out_file) > 0:
out_file = out_file[0]
else:
out_file = _call_hla(hla_fq, out_dir, data)
out_file = _prepare_calls(out_file, os.path.dirname(out_dir), data)
data["hla"].update({"call_file": out_file,
"hlacaller": "optitype"})
return data | python | def run(data):
"""HLA typing with OptiType, parsing output from called genotype files.
"""
hlas = []
for hla_fq in tz.get_in(["hla", "fastq"], data, []):
hla_type = re.search("[.-](?P<hlatype>HLA-[\w-]+).fq", hla_fq).group("hlatype")
if hla_type in SUPPORTED_HLAS:
if utils.file_exists(hla_fq):
hlas.append((hla_type, hla_fq))
if len(hlas) > 0:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data), "hla",
"OptiType-HLA-A_B_C"))
# When running UMIs and hla typing we want to pick the original fastqs
if len(hlas) > len(SUPPORTED_HLAS):
hlas = [x for x in hlas if os.path.basename(x[1]).find("-cumi") == -1]
if len(hlas) == len(SUPPORTED_HLAS):
hla_fq = combine_hla_fqs(hlas, out_dir + "-input.fq", data)
if utils.file_exists(hla_fq):
out_file = glob.glob(os.path.join(out_dir, "*", "*_result.tsv"))
if len(out_file) > 0:
out_file = out_file[0]
else:
out_file = _call_hla(hla_fq, out_dir, data)
out_file = _prepare_calls(out_file, os.path.dirname(out_dir), data)
data["hla"].update({"call_file": out_file,
"hlacaller": "optitype"})
return data | [
"def",
"run",
"(",
"data",
")",
":",
"hlas",
"=",
"[",
"]",
"for",
"hla_fq",
"in",
"tz",
".",
"get_in",
"(",
"[",
"\"hla\"",
",",
"\"fastq\"",
"]",
",",
"data",
",",
"[",
"]",
")",
":",
"hla_type",
"=",
"re",
".",
"search",
"(",
"\"[.-](?P<hlatyp... | HLA typing with OptiType, parsing output from called genotype files. | [
"HLA",
"typing",
"with",
"OptiType",
"parsing",
"output",
"from",
"called",
"genotype",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/optitype.py#L23-L50 | train | 218,821 |
bcbio/bcbio-nextgen | bcbio/hla/optitype.py | combine_hla_fqs | def combine_hla_fqs(hlas, out_file, data):
"""OptiType performs best on a combination of all extracted HLAs.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for hla_type, hla_fq in hlas:
if utils.file_exists(hla_fq):
with open(hla_fq) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
return out_file | python | def combine_hla_fqs(hlas, out_file, data):
"""OptiType performs best on a combination of all extracted HLAs.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for hla_type, hla_fq in hlas:
if utils.file_exists(hla_fq):
with open(hla_fq) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
return out_file | [
"def",
"combine_hla_fqs",
"(",
"hlas",
",",
"out_file",
",",
"data",
")",
":",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"file_transaction",
"(",
"data",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
"with",
"open",
"... | OptiType performs best on a combination of all extracted HLAs. | [
"OptiType",
"performs",
"best",
"on",
"a",
"combination",
"of",
"all",
"extracted",
"HLAs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/optitype.py#L52-L62 | train | 218,822 |
bcbio/bcbio-nextgen | bcbio/hla/optitype.py | _prepare_calls | def _prepare_calls(result_file, out_dir, data):
"""Write summary file of results of HLA typing by allele.
"""
sample = dd.get_sample_name(data)
out_file = os.path.join(out_dir, "%s-optitype.csv" % (sample))
if not utils.file_uptodate(out_file, result_file):
hla_truth = bwakit.get_hla_truthset(data)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
allele_info = _parse_result_file(result_file)
if len(allele_info) == 1:
writer.writerow(["sample", "locus", "alleles", "expected", "validates"])
else:
writer.writerow(["sample", "local", "index", "alleles", "score"])
for j, (alleles, score) in enumerate(allele_info):
for hla_locus, call_alleles in alleles:
truth_alleles = tz.get_in([sample, hla_locus], hla_truth, [])
if len(allele_info) == 1:
writer.writerow([sample, hla_locus,
";".join(call_alleles), ";".join(truth_alleles),
bwakit.matches_truth(call_alleles, truth_alleles, data)])
else:
writer.writerow([sample, hla_locus, j, ";".join(call_alleles), score])
return out_file | python | def _prepare_calls(result_file, out_dir, data):
"""Write summary file of results of HLA typing by allele.
"""
sample = dd.get_sample_name(data)
out_file = os.path.join(out_dir, "%s-optitype.csv" % (sample))
if not utils.file_uptodate(out_file, result_file):
hla_truth = bwakit.get_hla_truthset(data)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
allele_info = _parse_result_file(result_file)
if len(allele_info) == 1:
writer.writerow(["sample", "locus", "alleles", "expected", "validates"])
else:
writer.writerow(["sample", "local", "index", "alleles", "score"])
for j, (alleles, score) in enumerate(allele_info):
for hla_locus, call_alleles in alleles:
truth_alleles = tz.get_in([sample, hla_locus], hla_truth, [])
if len(allele_info) == 1:
writer.writerow([sample, hla_locus,
";".join(call_alleles), ";".join(truth_alleles),
bwakit.matches_truth(call_alleles, truth_alleles, data)])
else:
writer.writerow([sample, hla_locus, j, ";".join(call_alleles), score])
return out_file | [
"def",
"_prepare_calls",
"(",
"result_file",
",",
"out_dir",
",",
"data",
")",
":",
"sample",
"=",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"\"%s-optitype.csv\"",
"%",
"(",
"sam... | Write summary file of results of HLA typing by allele. | [
"Write",
"summary",
"file",
"of",
"results",
"of",
"HLA",
"typing",
"by",
"allele",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/optitype.py#L64-L88 | train | 218,823 |
bcbio/bcbio-nextgen | bcbio/hla/optitype.py | _call_hla | def _call_hla(hla_fq, out_dir, data):
"""Run OptiType HLA calling for a specific fastq input.
"""
bin_dir = os.path.dirname(os.path.realpath(sys.executable))
out_dir = utils.safe_makedir(out_dir)
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
config_file = os.path.join(tx_out_dir, "config.ini")
with open(config_file, "w") as out_handle:
razers3 = os.path.join(bin_dir, "razers3")
if not os.path.exists(razers3):
raise ValueError("Could not find razers3 executable at %s" % (razers3))
out_handle.write(CONFIG_TMPL.format(razers3=razers3, cores=dd.get_cores(data)))
resources = config_utils.get_resources("optitype", data["config"])
if resources.get("options"):
opts = " ".join([str(x) for x in resources["options"]])
else:
opts = ""
cmd = ("OptiTypePipeline.py -v --dna {opts} -o {tx_out_dir} "
"-i {hla_fq} -c {config_file}")
do.run(cmd.format(**locals()), "HLA typing with OptiType")
for outf in os.listdir(tx_out_dir):
shutil.move(os.path.join(tx_out_dir, outf), os.path.join(out_dir, outf))
out_file = glob.glob(os.path.join(out_dir, "*", "*_result.tsv"))
assert len(out_file) == 1, "Expected one result file for OptiType, found %s" % out_file
return out_file[0] | python | def _call_hla(hla_fq, out_dir, data):
"""Run OptiType HLA calling for a specific fastq input.
"""
bin_dir = os.path.dirname(os.path.realpath(sys.executable))
out_dir = utils.safe_makedir(out_dir)
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
config_file = os.path.join(tx_out_dir, "config.ini")
with open(config_file, "w") as out_handle:
razers3 = os.path.join(bin_dir, "razers3")
if not os.path.exists(razers3):
raise ValueError("Could not find razers3 executable at %s" % (razers3))
out_handle.write(CONFIG_TMPL.format(razers3=razers3, cores=dd.get_cores(data)))
resources = config_utils.get_resources("optitype", data["config"])
if resources.get("options"):
opts = " ".join([str(x) for x in resources["options"]])
else:
opts = ""
cmd = ("OptiTypePipeline.py -v --dna {opts} -o {tx_out_dir} "
"-i {hla_fq} -c {config_file}")
do.run(cmd.format(**locals()), "HLA typing with OptiType")
for outf in os.listdir(tx_out_dir):
shutil.move(os.path.join(tx_out_dir, outf), os.path.join(out_dir, outf))
out_file = glob.glob(os.path.join(out_dir, "*", "*_result.tsv"))
assert len(out_file) == 1, "Expected one result file for OptiType, found %s" % out_file
return out_file[0] | [
"def",
"_call_hla",
"(",
"hla_fq",
",",
"out_dir",
",",
"data",
")",
":",
"bin_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"sys",
".",
"executable",
")",
")",
"out_dir",
"=",
"utils",
".",
"safe_makedir... | Run OptiType HLA calling for a specific fastq input. | [
"Run",
"OptiType",
"HLA",
"calling",
"for",
"a",
"specific",
"fastq",
"input",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/optitype.py#L108-L132 | train | 218,824 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/chromhacks.py | is_autosomal | def is_autosomal(chrom):
"""Keep chromosomes that are a digit 1-22, or chr prefixed digit chr1-chr22
"""
try:
int(chrom)
return True
except ValueError:
try:
int(str(chrom.lower().replace("chr", "").replace("_", "").replace("-", "")))
return True
except ValueError:
return False | python | def is_autosomal(chrom):
"""Keep chromosomes that are a digit 1-22, or chr prefixed digit chr1-chr22
"""
try:
int(chrom)
return True
except ValueError:
try:
int(str(chrom.lower().replace("chr", "").replace("_", "").replace("-", "")))
return True
except ValueError:
return False | [
"def",
"is_autosomal",
"(",
"chrom",
")",
":",
"try",
":",
"int",
"(",
"chrom",
")",
"return",
"True",
"except",
"ValueError",
":",
"try",
":",
"int",
"(",
"str",
"(",
"chrom",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"\"chr\"",
",",
"\"\"",
")... | Keep chromosomes that are a digit 1-22, or chr prefixed digit chr1-chr22 | [
"Keep",
"chromosomes",
"that",
"are",
"a",
"digit",
"1",
"-",
"22",
"or",
"chr",
"prefixed",
"digit",
"chr1",
"-",
"chr22"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/chromhacks.py#L11-L22 | train | 218,825 |
bcbio/bcbio-nextgen | bcbio/qc/variant.py | _bcftools_stats | def _bcftools_stats(data, out_dir, vcf_file_key=None, germline=False):
"""Run bcftools stats.
"""
vcinfo = get_active_vcinfo(data)
if vcinfo:
out_dir = utils.safe_makedir(out_dir)
vcf_file = vcinfo[vcf_file_key or "vrn_file"]
if dd.get_jointcaller(data) or "gvcf" in dd.get_tools_on(data):
opts = ""
else:
opts = "-f PASS,."
name = dd.get_sample_name(data)
out_file = os.path.join(out_dir, "%s_bcftools_stats%s.txt" % (name, ("_germline" if germline else "")))
bcftools = config_utils.get_program("bcftools", data["config"])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
orig_out_file = os.path.join(os.path.dirname(tx_out_file), "orig_%s" % os.path.basename(tx_out_file))
cmd = ("{bcftools} stats -s {name} {opts} {vcf_file} > {orig_out_file}")
do.run(cmd.format(**locals()), "bcftools stats %s" % name)
with open(orig_out_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("ID\t"):
parts = line.split("\t")
parts[-1] = "%s\n" % name
line = "\t".join(parts)
out_handle.write(line)
return out_file | python | def _bcftools_stats(data, out_dir, vcf_file_key=None, germline=False):
"""Run bcftools stats.
"""
vcinfo = get_active_vcinfo(data)
if vcinfo:
out_dir = utils.safe_makedir(out_dir)
vcf_file = vcinfo[vcf_file_key or "vrn_file"]
if dd.get_jointcaller(data) or "gvcf" in dd.get_tools_on(data):
opts = ""
else:
opts = "-f PASS,."
name = dd.get_sample_name(data)
out_file = os.path.join(out_dir, "%s_bcftools_stats%s.txt" % (name, ("_germline" if germline else "")))
bcftools = config_utils.get_program("bcftools", data["config"])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
orig_out_file = os.path.join(os.path.dirname(tx_out_file), "orig_%s" % os.path.basename(tx_out_file))
cmd = ("{bcftools} stats -s {name} {opts} {vcf_file} > {orig_out_file}")
do.run(cmd.format(**locals()), "bcftools stats %s" % name)
with open(orig_out_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("ID\t"):
parts = line.split("\t")
parts[-1] = "%s\n" % name
line = "\t".join(parts)
out_handle.write(line)
return out_file | [
"def",
"_bcftools_stats",
"(",
"data",
",",
"out_dir",
",",
"vcf_file_key",
"=",
"None",
",",
"germline",
"=",
"False",
")",
":",
"vcinfo",
"=",
"get_active_vcinfo",
"(",
"data",
")",
"if",
"vcinfo",
":",
"out_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
... | Run bcftools stats. | [
"Run",
"bcftools",
"stats",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/variant.py#L50-L77 | train | 218,826 |
bcbio/bcbio-nextgen | bcbio/qc/variant.py | _add_filename_details | def _add_filename_details(full_f):
"""Add variant callers and germline information standard CWL filenames.
This is an ugly way of working around not having metadata with calls.
"""
out = {"vrn_file": full_f}
f = os.path.basename(full_f)
for vc in list(genotype.get_variantcallers().keys()) + ["ensemble"]:
if f.find("-%s.vcf" % vc) > 0:
out["variantcaller"] = vc
if f.find("-germline-") >= 0:
out["germline"] = full_f
return out | python | def _add_filename_details(full_f):
"""Add variant callers and germline information standard CWL filenames.
This is an ugly way of working around not having metadata with calls.
"""
out = {"vrn_file": full_f}
f = os.path.basename(full_f)
for vc in list(genotype.get_variantcallers().keys()) + ["ensemble"]:
if f.find("-%s.vcf" % vc) > 0:
out["variantcaller"] = vc
if f.find("-germline-") >= 0:
out["germline"] = full_f
return out | [
"def",
"_add_filename_details",
"(",
"full_f",
")",
":",
"out",
"=",
"{",
"\"vrn_file\"",
":",
"full_f",
"}",
"f",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"full_f",
")",
"for",
"vc",
"in",
"list",
"(",
"genotype",
".",
"get_variantcallers",
"(",
... | Add variant callers and germline information standard CWL filenames.
This is an ugly way of working around not having metadata with calls. | [
"Add",
"variant",
"callers",
"and",
"germline",
"information",
"standard",
"CWL",
"filenames",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/variant.py#L79-L91 | train | 218,827 |
bcbio/bcbio-nextgen | bcbio/qc/variant.py | _get_variants | def _get_variants(data):
"""Retrieve variants from CWL and standard inputs for organizing variants.
"""
active_vs = []
if "variants" in data:
variants = data["variants"]
# CWL based list of variants
if isinstance(variants, dict) and "samples" in variants:
variants = variants["samples"]
for v in variants:
# CWL -- a single variant file
if isinstance(v, six.string_types) and os.path.exists(v):
active_vs.append(_add_filename_details(v))
elif (isinstance(v, (list, tuple)) and len(v) > 0 and
isinstance(v[0], six.string_types) and os.path.exists(v[0])):
for subv in v:
active_vs.append(_add_filename_details(subv))
elif isinstance(v, dict):
if v.get("vrn_file"):
active_vs.append(v)
elif v.get("population"):
vrnfile = v.get("population").get("vcf")
active_vs.append(_add_filename_details(vrnfile))
elif v.get("vcf"):
active_vs.append(_add_filename_details(v.get("vcf")))
return active_vs | python | def _get_variants(data):
"""Retrieve variants from CWL and standard inputs for organizing variants.
"""
active_vs = []
if "variants" in data:
variants = data["variants"]
# CWL based list of variants
if isinstance(variants, dict) and "samples" in variants:
variants = variants["samples"]
for v in variants:
# CWL -- a single variant file
if isinstance(v, six.string_types) and os.path.exists(v):
active_vs.append(_add_filename_details(v))
elif (isinstance(v, (list, tuple)) and len(v) > 0 and
isinstance(v[0], six.string_types) and os.path.exists(v[0])):
for subv in v:
active_vs.append(_add_filename_details(subv))
elif isinstance(v, dict):
if v.get("vrn_file"):
active_vs.append(v)
elif v.get("population"):
vrnfile = v.get("population").get("vcf")
active_vs.append(_add_filename_details(vrnfile))
elif v.get("vcf"):
active_vs.append(_add_filename_details(v.get("vcf")))
return active_vs | [
"def",
"_get_variants",
"(",
"data",
")",
":",
"active_vs",
"=",
"[",
"]",
"if",
"\"variants\"",
"in",
"data",
":",
"variants",
"=",
"data",
"[",
"\"variants\"",
"]",
"# CWL based list of variants",
"if",
"isinstance",
"(",
"variants",
",",
"dict",
")",
"and... | Retrieve variants from CWL and standard inputs for organizing variants. | [
"Retrieve",
"variants",
"from",
"CWL",
"and",
"standard",
"inputs",
"for",
"organizing",
"variants",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/variant.py#L93-L118 | train | 218,828 |
bcbio/bcbio-nextgen | bcbio/qc/variant.py | get_active_vcinfo | def get_active_vcinfo(data, use_ensemble=True):
"""Use first caller if ensemble is not active
"""
active_vs = _get_variants(data)
if len(active_vs) > 0:
e_active_vs = []
if use_ensemble:
e_active_vs = [v for v in active_vs if v.get("variantcaller") == "ensemble"]
if len(e_active_vs) == 0:
e_active_vs = [v for v in active_vs if v.get("variantcaller") != "ensemble"]
if len(e_active_vs) > 0:
return e_active_vs[0] | python | def get_active_vcinfo(data, use_ensemble=True):
"""Use first caller if ensemble is not active
"""
active_vs = _get_variants(data)
if len(active_vs) > 0:
e_active_vs = []
if use_ensemble:
e_active_vs = [v for v in active_vs if v.get("variantcaller") == "ensemble"]
if len(e_active_vs) == 0:
e_active_vs = [v for v in active_vs if v.get("variantcaller") != "ensemble"]
if len(e_active_vs) > 0:
return e_active_vs[0] | [
"def",
"get_active_vcinfo",
"(",
"data",
",",
"use_ensemble",
"=",
"True",
")",
":",
"active_vs",
"=",
"_get_variants",
"(",
"data",
")",
"if",
"len",
"(",
"active_vs",
")",
">",
"0",
":",
"e_active_vs",
"=",
"[",
"]",
"if",
"use_ensemble",
":",
"e_activ... | Use first caller if ensemble is not active | [
"Use",
"first",
"caller",
"if",
"ensemble",
"is",
"not",
"active"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/variant.py#L120-L131 | train | 218,829 |
bcbio/bcbio-nextgen | bcbio/qc/variant.py | extract_germline_vcinfo | def extract_germline_vcinfo(data, out_dir):
"""Extract germline VCFs from existing tumor inputs.
"""
supported_germline = set(["vardict", "octopus", "freebayes"])
if dd.get_phenotype(data) in ["tumor"]:
for v in _get_variants(data):
if v.get("variantcaller") in supported_germline:
if v.get("germline"):
return v
else:
d = utils.deepish_copy(data)
d["vrn_file"] = v["vrn_file"]
gd = germline.extract(d, [d], out_dir)
v["germline"] = gd["vrn_file_plus"]["germline"]
return v | python | def extract_germline_vcinfo(data, out_dir):
"""Extract germline VCFs from existing tumor inputs.
"""
supported_germline = set(["vardict", "octopus", "freebayes"])
if dd.get_phenotype(data) in ["tumor"]:
for v in _get_variants(data):
if v.get("variantcaller") in supported_germline:
if v.get("germline"):
return v
else:
d = utils.deepish_copy(data)
d["vrn_file"] = v["vrn_file"]
gd = germline.extract(d, [d], out_dir)
v["germline"] = gd["vrn_file_plus"]["germline"]
return v | [
"def",
"extract_germline_vcinfo",
"(",
"data",
",",
"out_dir",
")",
":",
"supported_germline",
"=",
"set",
"(",
"[",
"\"vardict\"",
",",
"\"octopus\"",
",",
"\"freebayes\"",
"]",
")",
"if",
"dd",
".",
"get_phenotype",
"(",
"data",
")",
"in",
"[",
"\"tumor\""... | Extract germline VCFs from existing tumor inputs. | [
"Extract",
"germline",
"VCFs",
"from",
"existing",
"tumor",
"inputs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/variant.py#L133-L147 | train | 218,830 |
bcbio/bcbio-nextgen | bcbio/pipeline/merge.py | merge_bam_files | def merge_bam_files(bam_files, work_dir, data, out_file=None, batch=None):
"""Merge multiple BAM files from a sample into a single BAM for processing.
Checks system open file limit and merges in batches if necessary to avoid
file handle limits.
"""
out_file = _merge_outfile_fname(out_file, bam_files, work_dir, batch)
if not utils.file_exists(out_file):
if len(bam_files) == 1 and bam.bam_already_sorted(bam_files[0], data["config"], "coordinate"):
with file_transaction(data, out_file) as tx_out_file:
_create_merge_filelist(bam_files, tx_out_file, data["config"])
out_file = bam_files[0]
samtools = config_utils.get_program("samtools", data["config"])
do.run('{} quickcheck -v {}'.format(samtools, out_file),
"Check for valid merged BAM after transfer")
else:
with tx_tmpdir(data) as tmpdir:
with utils.chdir(tmpdir):
with file_transaction(data, out_file) as tx_out_file:
tx_bam_file_list = _create_merge_filelist(bam_files, tx_out_file, data["config"])
samtools = config_utils.get_program("samtools", data["config"])
resources = config_utils.get_resources("samtools", data["config"])
num_cores = dd.get_num_cores(data)
# Aim for 3.5Gb/core memory for BAM merging
num_cores = config_utils.adjust_cores_to_mb_target(
3500, resources.get("memory", "2G"), num_cores)
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
2, "decrease").upper()
if dd.get_mark_duplicates(data):
cmd = _biobambam_merge_dedup_maxcov(data)
else:
cmd = _biobambam_merge_maxcov(data)
do.run(cmd.format(**locals()), "Merge bam files to %s" % os.path.basename(out_file),
None)
do.run('{} quickcheck -v {}'.format(samtools, tx_out_file),
"Check for valid merged BAM")
do.run('{} quickcheck -v {}'.format(samtools, out_file),
"Check for valid merged BAM after transfer")
_finalize_merge(out_file, bam_files, data["config"])
bam.index(out_file, data["config"])
return out_file | python | def merge_bam_files(bam_files, work_dir, data, out_file=None, batch=None):
"""Merge multiple BAM files from a sample into a single BAM for processing.
Checks system open file limit and merges in batches if necessary to avoid
file handle limits.
"""
out_file = _merge_outfile_fname(out_file, bam_files, work_dir, batch)
if not utils.file_exists(out_file):
if len(bam_files) == 1 and bam.bam_already_sorted(bam_files[0], data["config"], "coordinate"):
with file_transaction(data, out_file) as tx_out_file:
_create_merge_filelist(bam_files, tx_out_file, data["config"])
out_file = bam_files[0]
samtools = config_utils.get_program("samtools", data["config"])
do.run('{} quickcheck -v {}'.format(samtools, out_file),
"Check for valid merged BAM after transfer")
else:
with tx_tmpdir(data) as tmpdir:
with utils.chdir(tmpdir):
with file_transaction(data, out_file) as tx_out_file:
tx_bam_file_list = _create_merge_filelist(bam_files, tx_out_file, data["config"])
samtools = config_utils.get_program("samtools", data["config"])
resources = config_utils.get_resources("samtools", data["config"])
num_cores = dd.get_num_cores(data)
# Aim for 3.5Gb/core memory for BAM merging
num_cores = config_utils.adjust_cores_to_mb_target(
3500, resources.get("memory", "2G"), num_cores)
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
2, "decrease").upper()
if dd.get_mark_duplicates(data):
cmd = _biobambam_merge_dedup_maxcov(data)
else:
cmd = _biobambam_merge_maxcov(data)
do.run(cmd.format(**locals()), "Merge bam files to %s" % os.path.basename(out_file),
None)
do.run('{} quickcheck -v {}'.format(samtools, tx_out_file),
"Check for valid merged BAM")
do.run('{} quickcheck -v {}'.format(samtools, out_file),
"Check for valid merged BAM after transfer")
_finalize_merge(out_file, bam_files, data["config"])
bam.index(out_file, data["config"])
return out_file | [
"def",
"merge_bam_files",
"(",
"bam_files",
",",
"work_dir",
",",
"data",
",",
"out_file",
"=",
"None",
",",
"batch",
"=",
"None",
")",
":",
"out_file",
"=",
"_merge_outfile_fname",
"(",
"out_file",
",",
"bam_files",
",",
"work_dir",
",",
"batch",
")",
"if... | Merge multiple BAM files from a sample into a single BAM for processing.
Checks system open file limit and merges in batches if necessary to avoid
file handle limits. | [
"Merge",
"multiple",
"BAM",
"files",
"from",
"a",
"sample",
"into",
"a",
"single",
"BAM",
"for",
"processing",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/merge.py#L39-L79 | train | 218,831 |
bcbio/bcbio-nextgen | bcbio/pipeline/merge.py | _create_merge_filelist | def _create_merge_filelist(bam_files, base_file, config):
"""Create list of input files for merge, ensuring all files are valid.
"""
bam_file_list = "%s.list" % os.path.splitext(base_file)[0]
samtools = config_utils.get_program("samtools", config)
with open(bam_file_list, "w") as out_handle:
for f in sorted(bam_files):
do.run('{} quickcheck -v {}'.format(samtools, f),
"Ensure integrity of input merge BAM files")
out_handle.write("%s\n" % f)
return bam_file_list | python | def _create_merge_filelist(bam_files, base_file, config):
"""Create list of input files for merge, ensuring all files are valid.
"""
bam_file_list = "%s.list" % os.path.splitext(base_file)[0]
samtools = config_utils.get_program("samtools", config)
with open(bam_file_list, "w") as out_handle:
for f in sorted(bam_files):
do.run('{} quickcheck -v {}'.format(samtools, f),
"Ensure integrity of input merge BAM files")
out_handle.write("%s\n" % f)
return bam_file_list | [
"def",
"_create_merge_filelist",
"(",
"bam_files",
",",
"base_file",
",",
"config",
")",
":",
"bam_file_list",
"=",
"\"%s.list\"",
"%",
"os",
".",
"path",
".",
"splitext",
"(",
"base_file",
")",
"[",
"0",
"]",
"samtools",
"=",
"config_utils",
".",
"get_progr... | Create list of input files for merge, ensuring all files are valid. | [
"Create",
"list",
"of",
"input",
"files",
"for",
"merge",
"ensuring",
"all",
"files",
"are",
"valid",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/merge.py#L81-L91 | train | 218,832 |
bcbio/bcbio-nextgen | bcbio/pipeline/merge.py | _merge_outfile_fname | def _merge_outfile_fname(out_file, bam_files, work_dir, batch):
"""Derive correct name of BAM file based on batching.
"""
if out_file is None:
out_file = os.path.join(work_dir, os.path.basename(sorted(bam_files)[0]))
if batch is not None:
base, ext = os.path.splitext(out_file)
out_file = "%s-b%s%s" % (base, batch, ext)
return out_file | python | def _merge_outfile_fname(out_file, bam_files, work_dir, batch):
"""Derive correct name of BAM file based on batching.
"""
if out_file is None:
out_file = os.path.join(work_dir, os.path.basename(sorted(bam_files)[0]))
if batch is not None:
base, ext = os.path.splitext(out_file)
out_file = "%s-b%s%s" % (base, batch, ext)
return out_file | [
"def",
"_merge_outfile_fname",
"(",
"out_file",
",",
"bam_files",
",",
"work_dir",
",",
"batch",
")",
":",
"if",
"out_file",
"is",
"None",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"os",
".",
"path",
".",
"basename",
"... | Derive correct name of BAM file based on batching. | [
"Derive",
"correct",
"name",
"of",
"BAM",
"file",
"based",
"on",
"batching",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/merge.py#L93-L101 | train | 218,833 |
bcbio/bcbio-nextgen | bcbio/pipeline/merge.py | _finalize_merge | def _finalize_merge(out_file, bam_files, config):
"""Handle indexes and cleanups of merged BAM and input files.
"""
# Ensure timestamps are up to date on output file and index
# Works around issues on systems with inconsistent times
for ext in ["", ".bai"]:
if os.path.exists(out_file + ext):
subprocess.check_call(["touch", out_file + ext])
for b in bam_files:
utils.save_diskspace(b, "BAM merged to %s" % out_file, config) | python | def _finalize_merge(out_file, bam_files, config):
"""Handle indexes and cleanups of merged BAM and input files.
"""
# Ensure timestamps are up to date on output file and index
# Works around issues on systems with inconsistent times
for ext in ["", ".bai"]:
if os.path.exists(out_file + ext):
subprocess.check_call(["touch", out_file + ext])
for b in bam_files:
utils.save_diskspace(b, "BAM merged to %s" % out_file, config) | [
"def",
"_finalize_merge",
"(",
"out_file",
",",
"bam_files",
",",
"config",
")",
":",
"# Ensure timestamps are up to date on output file and index",
"# Works around issues on systems with inconsistent times",
"for",
"ext",
"in",
"[",
"\"\"",
",",
"\".bai\"",
"]",
":",
"if",... | Handle indexes and cleanups of merged BAM and input files. | [
"Handle",
"indexes",
"and",
"cleanups",
"of",
"merged",
"BAM",
"and",
"input",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/merge.py#L103-L112 | train | 218,834 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _cwl_workflow_template | def _cwl_workflow_template(inputs, top_level=False):
"""Retrieve CWL inputs shared amongst different workflows.
"""
ready_inputs = []
for inp in inputs:
cur_inp = copy.deepcopy(inp)
for attr in ["source", "valueFrom", "wf_duplicate"]:
cur_inp.pop(attr, None)
if top_level:
cur_inp = workflow._flatten_nested_input(cur_inp)
cur_inp = _clean_record(cur_inp)
ready_inputs.append(cur_inp)
return {"class": "Workflow",
"cwlVersion": "v1.0",
"hints": [],
"requirements": [{"class": "EnvVarRequirement",
"envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]},
{"class": "ScatterFeatureRequirement"},
{"class": "SubworkflowFeatureRequirement"}],
"inputs": ready_inputs,
"outputs": [],
"steps": []} | python | def _cwl_workflow_template(inputs, top_level=False):
"""Retrieve CWL inputs shared amongst different workflows.
"""
ready_inputs = []
for inp in inputs:
cur_inp = copy.deepcopy(inp)
for attr in ["source", "valueFrom", "wf_duplicate"]:
cur_inp.pop(attr, None)
if top_level:
cur_inp = workflow._flatten_nested_input(cur_inp)
cur_inp = _clean_record(cur_inp)
ready_inputs.append(cur_inp)
return {"class": "Workflow",
"cwlVersion": "v1.0",
"hints": [],
"requirements": [{"class": "EnvVarRequirement",
"envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]},
{"class": "ScatterFeatureRequirement"},
{"class": "SubworkflowFeatureRequirement"}],
"inputs": ready_inputs,
"outputs": [],
"steps": []} | [
"def",
"_cwl_workflow_template",
"(",
"inputs",
",",
"top_level",
"=",
"False",
")",
":",
"ready_inputs",
"=",
"[",
"]",
"for",
"inp",
"in",
"inputs",
":",
"cur_inp",
"=",
"copy",
".",
"deepcopy",
"(",
"inp",
")",
"for",
"attr",
"in",
"[",
"\"source\"",
... | Retrieve CWL inputs shared amongst different workflows. | [
"Retrieve",
"CWL",
"inputs",
"shared",
"amongst",
"different",
"workflows",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L42-L63 | train | 218,835 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _get_disk_estimates | def _get_disk_estimates(name, parallel, inputs, file_estimates, samples, disk,
cur_remotes, no_files):
"""Retrieve disk usage estimates as CWL ResourceRequirement and hint.
Disk specification for temporary files and outputs.
Also optionally includes disk input estimates as a custom hint for
platforms which need to stage these and don't pre-estimate these when
allocating machine sizes.
"""
tmp_disk, out_disk, in_disk = 0, 0, 0
if file_estimates:
if disk:
for key, multiplier in disk.items():
if key in file_estimates:
out_disk += int(multiplier * file_estimates[key])
for inp in inputs:
scale = 2.0 if inp.get("type") == "array" else 1.0
# Allocating all samples, could remove for `to_rec` when we ensure we
# don't have to stage. Currently dnanexus stages everything so need to consider
if parallel in ["multi-combined", "multi-batch"] and "dnanexus" in cur_remotes:
scale *= (len(samples))
if workflow.is_cwl_record(inp):
for f in _get_record_fields(inp):
if f["name"] in file_estimates:
in_disk += file_estimates[f["name"]] * scale
elif inp["id"] in file_estimates:
in_disk += file_estimates[inp["id"]] * scale
# Round total estimates to integer, assign extra half to temp space
# It's not entirely clear how different runners interpret this
tmp_disk = int(math.ceil(out_disk * 0.5))
out_disk = int(math.ceil(out_disk))
bcbio_docker_disk = (10 if cur_remotes else 1) * 1024 # Minimum requirements for bcbio Docker image
disk_hint = {"outdirMin": bcbio_docker_disk + out_disk, "tmpdirMin": tmp_disk}
# Skip input disk for steps which require only transformation (and thus no staging)
if no_files:
in_disk = 0
# Avoid accidentally flagging as no staging if we don't know sizes of expected inputs
elif in_disk == 0:
in_disk = 1
input_hint = {"class": "dx:InputResourceRequirement", "indirMin": int(math.ceil(in_disk))}
return disk_hint, input_hint | python | def _get_disk_estimates(name, parallel, inputs, file_estimates, samples, disk,
cur_remotes, no_files):
"""Retrieve disk usage estimates as CWL ResourceRequirement and hint.
Disk specification for temporary files and outputs.
Also optionally includes disk input estimates as a custom hint for
platforms which need to stage these and don't pre-estimate these when
allocating machine sizes.
"""
tmp_disk, out_disk, in_disk = 0, 0, 0
if file_estimates:
if disk:
for key, multiplier in disk.items():
if key in file_estimates:
out_disk += int(multiplier * file_estimates[key])
for inp in inputs:
scale = 2.0 if inp.get("type") == "array" else 1.0
# Allocating all samples, could remove for `to_rec` when we ensure we
# don't have to stage. Currently dnanexus stages everything so need to consider
if parallel in ["multi-combined", "multi-batch"] and "dnanexus" in cur_remotes:
scale *= (len(samples))
if workflow.is_cwl_record(inp):
for f in _get_record_fields(inp):
if f["name"] in file_estimates:
in_disk += file_estimates[f["name"]] * scale
elif inp["id"] in file_estimates:
in_disk += file_estimates[inp["id"]] * scale
# Round total estimates to integer, assign extra half to temp space
# It's not entirely clear how different runners interpret this
tmp_disk = int(math.ceil(out_disk * 0.5))
out_disk = int(math.ceil(out_disk))
bcbio_docker_disk = (10 if cur_remotes else 1) * 1024 # Minimum requirements for bcbio Docker image
disk_hint = {"outdirMin": bcbio_docker_disk + out_disk, "tmpdirMin": tmp_disk}
# Skip input disk for steps which require only transformation (and thus no staging)
if no_files:
in_disk = 0
# Avoid accidentally flagging as no staging if we don't know sizes of expected inputs
elif in_disk == 0:
in_disk = 1
input_hint = {"class": "dx:InputResourceRequirement", "indirMin": int(math.ceil(in_disk))}
return disk_hint, input_hint | [
"def",
"_get_disk_estimates",
"(",
"name",
",",
"parallel",
",",
"inputs",
",",
"file_estimates",
",",
"samples",
",",
"disk",
",",
"cur_remotes",
",",
"no_files",
")",
":",
"tmp_disk",
",",
"out_disk",
",",
"in_disk",
"=",
"0",
",",
"0",
",",
"0",
"if",... | Retrieve disk usage estimates as CWL ResourceRequirement and hint.
Disk specification for temporary files and outputs.
Also optionally includes disk input estimates as a custom hint for
platforms which need to stage these and don't pre-estimate these when
allocating machine sizes. | [
"Retrieve",
"disk",
"usage",
"estimates",
"as",
"CWL",
"ResourceRequirement",
"and",
"hint",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L65-L107 | train | 218,836 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _add_current_quay_tag | def _add_current_quay_tag(repo, container_tags):
"""Lookup the current quay tag for the repository, adding to repo string.
Enables generation of CWL explicitly tied to revisions.
"""
if ':' in repo:
return repo, container_tags
try:
latest_tag = container_tags[repo]
except KeyError:
repo_id = repo[repo.find('/') + 1:]
tags = requests.request("GET", "https://quay.io/api/v1/repository/" + repo_id).json()["tags"]
latest_tag = None
latest_modified = None
for tag, info in tags.items():
if latest_tag:
if (dateutil.parser.parse(info['last_modified']) > dateutil.parser.parse(latest_modified)
and tag != 'latest'):
latest_modified = info['last_modified']
latest_tag = tag
else:
latest_modified = info['last_modified']
latest_tag = tag
container_tags[repo] = str(latest_tag)
latest_pull = repo + ':' + str(latest_tag)
return latest_pull, container_tags | python | def _add_current_quay_tag(repo, container_tags):
"""Lookup the current quay tag for the repository, adding to repo string.
Enables generation of CWL explicitly tied to revisions.
"""
if ':' in repo:
return repo, container_tags
try:
latest_tag = container_tags[repo]
except KeyError:
repo_id = repo[repo.find('/') + 1:]
tags = requests.request("GET", "https://quay.io/api/v1/repository/" + repo_id).json()["tags"]
latest_tag = None
latest_modified = None
for tag, info in tags.items():
if latest_tag:
if (dateutil.parser.parse(info['last_modified']) > dateutil.parser.parse(latest_modified)
and tag != 'latest'):
latest_modified = info['last_modified']
latest_tag = tag
else:
latest_modified = info['last_modified']
latest_tag = tag
container_tags[repo] = str(latest_tag)
latest_pull = repo + ':' + str(latest_tag)
return latest_pull, container_tags | [
"def",
"_add_current_quay_tag",
"(",
"repo",
",",
"container_tags",
")",
":",
"if",
"':'",
"in",
"repo",
":",
"return",
"repo",
",",
"container_tags",
"try",
":",
"latest_tag",
"=",
"container_tags",
"[",
"repo",
"]",
"except",
"KeyError",
":",
"repo_id",
"=... | Lookup the current quay tag for the repository, adding to repo string.
Enables generation of CWL explicitly tied to revisions. | [
"Lookup",
"the",
"current",
"quay",
"tag",
"for",
"the",
"repository",
"adding",
"to",
"repo",
"string",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L109-L134 | train | 218,837 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _write_expressiontool | def _write_expressiontool(step_dir, name, inputs, outputs, expression, parallel):
"""Create an ExpressionTool output for the given inputs
"""
out_file = os.path.join(step_dir, "%s.cwl" % name)
out = {"class": "ExpressionTool",
"cwlVersion": "v1.0",
"requirements": [{"class": "InlineJavascriptRequirement"}],
"inputs": [],
"outputs": [],
"expression": expression}
out = _add_inputs_to_tool(inputs, out, parallel)
out = _add_outputs_to_tool(outputs, out)
_tool_to_file(out, out_file)
return os.path.join("steps", os.path.basename(out_file)) | python | def _write_expressiontool(step_dir, name, inputs, outputs, expression, parallel):
"""Create an ExpressionTool output for the given inputs
"""
out_file = os.path.join(step_dir, "%s.cwl" % name)
out = {"class": "ExpressionTool",
"cwlVersion": "v1.0",
"requirements": [{"class": "InlineJavascriptRequirement"}],
"inputs": [],
"outputs": [],
"expression": expression}
out = _add_inputs_to_tool(inputs, out, parallel)
out = _add_outputs_to_tool(outputs, out)
_tool_to_file(out, out_file)
return os.path.join("steps", os.path.basename(out_file)) | [
"def",
"_write_expressiontool",
"(",
"step_dir",
",",
"name",
",",
"inputs",
",",
"outputs",
",",
"expression",
",",
"parallel",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir",
",",
"\"%s.cwl\"",
"%",
"name",
")",
"out",
"=",
... | Create an ExpressionTool output for the given inputs | [
"Create",
"an",
"ExpressionTool",
"output",
"for",
"the",
"given",
"inputs"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L208-L221 | train | 218,838 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _clean_record | def _clean_record(rec):
"""Remove secondary files from record fields, which are currently not supported.
To be removed later when secondaryFiles added to records.
"""
if workflow.is_cwl_record(rec):
def _clean_fields(d):
if isinstance(d, dict):
if "fields" in d:
out = []
for f in d["fields"]:
f = utils.deepish_copy(f)
f.pop("secondaryFiles", None)
out.append(f)
d["fields"] = out
return d
else:
out = {}
for k, v in d.items():
out[k] = _clean_fields(v)
return out
else:
return d
return _clean_fields(rec)
else:
return rec | python | def _clean_record(rec):
"""Remove secondary files from record fields, which are currently not supported.
To be removed later when secondaryFiles added to records.
"""
if workflow.is_cwl_record(rec):
def _clean_fields(d):
if isinstance(d, dict):
if "fields" in d:
out = []
for f in d["fields"]:
f = utils.deepish_copy(f)
f.pop("secondaryFiles", None)
out.append(f)
d["fields"] = out
return d
else:
out = {}
for k, v in d.items():
out[k] = _clean_fields(v)
return out
else:
return d
return _clean_fields(rec)
else:
return rec | [
"def",
"_clean_record",
"(",
"rec",
")",
":",
"if",
"workflow",
".",
"is_cwl_record",
"(",
"rec",
")",
":",
"def",
"_clean_fields",
"(",
"d",
")",
":",
"if",
"isinstance",
"(",
"d",
",",
"dict",
")",
":",
"if",
"\"fields\"",
"in",
"d",
":",
"out",
... | Remove secondary files from record fields, which are currently not supported.
To be removed later when secondaryFiles added to records. | [
"Remove",
"secondary",
"files",
"from",
"record",
"fields",
"which",
"are",
"currently",
"not",
"supported",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L265-L290 | train | 218,839 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _get_record_fields | def _get_record_fields(d):
"""Get field names from a potentially nested record.
"""
if isinstance(d, dict):
if "fields" in d:
return d["fields"]
else:
for v in d.values():
fields = _get_record_fields(v)
if fields:
return fields | python | def _get_record_fields(d):
"""Get field names from a potentially nested record.
"""
if isinstance(d, dict):
if "fields" in d:
return d["fields"]
else:
for v in d.values():
fields = _get_record_fields(v)
if fields:
return fields | [
"def",
"_get_record_fields",
"(",
"d",
")",
":",
"if",
"isinstance",
"(",
"d",
",",
"dict",
")",
":",
"if",
"\"fields\"",
"in",
"d",
":",
"return",
"d",
"[",
"\"fields\"",
"]",
"else",
":",
"for",
"v",
"in",
"d",
".",
"values",
"(",
")",
":",
"fi... | Get field names from a potentially nested record. | [
"Get",
"field",
"names",
"from",
"a",
"potentially",
"nested",
"record",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L292-L302 | train | 218,840 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _get_sentinel_val | def _get_sentinel_val(v):
"""Retrieve expected sentinel value for an output, expanding records.
"""
out = workflow.get_base_id(v["id"])
if workflow.is_cwl_record(v):
out += ":%s" % ";".join([x["name"] for x in _get_record_fields(v)])
return out | python | def _get_sentinel_val(v):
"""Retrieve expected sentinel value for an output, expanding records.
"""
out = workflow.get_base_id(v["id"])
if workflow.is_cwl_record(v):
out += ":%s" % ";".join([x["name"] for x in _get_record_fields(v)])
return out | [
"def",
"_get_sentinel_val",
"(",
"v",
")",
":",
"out",
"=",
"workflow",
".",
"get_base_id",
"(",
"v",
"[",
"\"id\"",
"]",
")",
"if",
"workflow",
".",
"is_cwl_record",
"(",
"v",
")",
":",
"out",
"+=",
"\":%s\"",
"%",
"\";\"",
".",
"join",
"(",
"[",
... | Retrieve expected sentinel value for an output, expanding records. | [
"Retrieve",
"expected",
"sentinel",
"value",
"for",
"an",
"output",
"expanding",
"records",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L304-L310 | train | 218,841 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _place_input_binding | def _place_input_binding(inp_tool, inp_binding, parallel):
"""Check nesting of variables to determine where to place the input binding.
We want to allow having multiple files together (like fasta_indices), combined
with the itemSeparator, but also support having multiple samples where we pass
things independently.
"""
if (parallel in ["multi-combined", "multi-batch", "batch-split", "batch-parallel",
"batch-merge", "batch-single"] and
tz.get_in(["type", "type"], inp_tool) == "array"):
inp_tool["type"]["inputBinding"] = inp_binding
else:
inp_tool["inputBinding"] = inp_binding
return inp_tool | python | def _place_input_binding(inp_tool, inp_binding, parallel):
"""Check nesting of variables to determine where to place the input binding.
We want to allow having multiple files together (like fasta_indices), combined
with the itemSeparator, but also support having multiple samples where we pass
things independently.
"""
if (parallel in ["multi-combined", "multi-batch", "batch-split", "batch-parallel",
"batch-merge", "batch-single"] and
tz.get_in(["type", "type"], inp_tool) == "array"):
inp_tool["type"]["inputBinding"] = inp_binding
else:
inp_tool["inputBinding"] = inp_binding
return inp_tool | [
"def",
"_place_input_binding",
"(",
"inp_tool",
",",
"inp_binding",
",",
"parallel",
")",
":",
"if",
"(",
"parallel",
"in",
"[",
"\"multi-combined\"",
",",
"\"multi-batch\"",
",",
"\"batch-split\"",
",",
"\"batch-parallel\"",
",",
"\"batch-merge\"",
",",
"\"batch-si... | Check nesting of variables to determine where to place the input binding.
We want to allow having multiple files together (like fasta_indices), combined
with the itemSeparator, but also support having multiple samples where we pass
things independently. | [
"Check",
"nesting",
"of",
"variables",
"to",
"determine",
"where",
"to",
"place",
"the",
"input",
"binding",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L312-L325 | train | 218,842 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _place_secondary_files | def _place_secondary_files(inp_tool, inp_binding=None):
"""Put secondaryFiles at the level of the File item to ensure indexes get passed.
"""
def _is_file(val):
return (val == "File" or (isinstance(val, (list, tuple)) and
("File" in val or any(isinstance(x, dict) and _is_file(val)) for x in val)))
secondary_files = inp_tool.pop("secondaryFiles", None)
if secondary_files:
key = []
while (not _is_file(tz.get_in(key + ["type"], inp_tool))
and not _is_file(tz.get_in(key + ["items"], inp_tool))
and not _is_file(tz.get_in(key + ["items", "items"], inp_tool))):
key.append("type")
if tz.get_in(key, inp_tool):
inp_tool["secondaryFiles"] = secondary_files
elif inp_binding:
nested_inp_binding = copy.deepcopy(inp_binding)
nested_inp_binding["prefix"] = "ignore="
nested_inp_binding["secondaryFiles"] = secondary_files
inp_tool = tz.update_in(inp_tool, key, lambda x: nested_inp_binding)
return inp_tool | python | def _place_secondary_files(inp_tool, inp_binding=None):
"""Put secondaryFiles at the level of the File item to ensure indexes get passed.
"""
def _is_file(val):
return (val == "File" or (isinstance(val, (list, tuple)) and
("File" in val or any(isinstance(x, dict) and _is_file(val)) for x in val)))
secondary_files = inp_tool.pop("secondaryFiles", None)
if secondary_files:
key = []
while (not _is_file(tz.get_in(key + ["type"], inp_tool))
and not _is_file(tz.get_in(key + ["items"], inp_tool))
and not _is_file(tz.get_in(key + ["items", "items"], inp_tool))):
key.append("type")
if tz.get_in(key, inp_tool):
inp_tool["secondaryFiles"] = secondary_files
elif inp_binding:
nested_inp_binding = copy.deepcopy(inp_binding)
nested_inp_binding["prefix"] = "ignore="
nested_inp_binding["secondaryFiles"] = secondary_files
inp_tool = tz.update_in(inp_tool, key, lambda x: nested_inp_binding)
return inp_tool | [
"def",
"_place_secondary_files",
"(",
"inp_tool",
",",
"inp_binding",
"=",
"None",
")",
":",
"def",
"_is_file",
"(",
"val",
")",
":",
"return",
"(",
"val",
"==",
"\"File\"",
"or",
"(",
"isinstance",
"(",
"val",
",",
"(",
"list",
",",
"tuple",
")",
")",... | Put secondaryFiles at the level of the File item to ensure indexes get passed. | [
"Put",
"secondaryFiles",
"at",
"the",
"level",
"of",
"the",
"File",
"item",
"to",
"ensure",
"indexes",
"get",
"passed",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L327-L347 | train | 218,843 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _do_scatter_var | def _do_scatter_var(v, parallel):
"""Logic for scattering a variable.
"""
# For batches, scatter records only at the top level (double nested)
if parallel.startswith("batch") and workflow.is_cwl_record(v):
return (tz.get_in(["type", "type"], v) == "array" and
tz.get_in(["type", "type", "type"], v) == "array")
# Otherwise, scatter arrays
else:
return (tz.get_in(["type", "type"], v) == "array") | python | def _do_scatter_var(v, parallel):
"""Logic for scattering a variable.
"""
# For batches, scatter records only at the top level (double nested)
if parallel.startswith("batch") and workflow.is_cwl_record(v):
return (tz.get_in(["type", "type"], v) == "array" and
tz.get_in(["type", "type", "type"], v) == "array")
# Otherwise, scatter arrays
else:
return (tz.get_in(["type", "type"], v) == "array") | [
"def",
"_do_scatter_var",
"(",
"v",
",",
"parallel",
")",
":",
"# For batches, scatter records only at the top level (double nested)",
"if",
"parallel",
".",
"startswith",
"(",
"\"batch\"",
")",
"and",
"workflow",
".",
"is_cwl_record",
"(",
"v",
")",
":",
"return",
... | Logic for scattering a variable. | [
"Logic",
"for",
"scattering",
"a",
"variable",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L352-L361 | train | 218,844 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _step_template | def _step_template(name, run_file, inputs, outputs, parallel, step_parallelism, scatter=None):
"""Templating function for writing a step to avoid repeating namespaces.
"""
scatter_inputs = []
sinputs = []
for inp in inputs:
step_inp = {"id": workflow.get_base_id(inp["id"]), "source": inp["id"]}
if inp.get("wf_duplicate"):
step_inp["id"] += "_toolinput"
for attr in ["source", "valueFrom"]:
if attr in inp:
step_inp[attr] = inp[attr]
sinputs.append(step_inp)
# An initial parallel scatter and multiple chained parallel sample scatters
if (parallel == "multi-parallel" and
(not step_parallelism or
step_parallelism.get(workflow.get_step_prefix(inp["id"])) == "multi-parallel")):
scatter_inputs.append(step_inp["id"])
# scatter on inputs from previous processes that have been arrayed
elif (_is_scatter_parallel(parallel) and (_do_scatter_var(inp, parallel)
or (scatter and inp["id"] in scatter))):
scatter_inputs.append(step_inp["id"])
out = {"run": run_file,
"id": name,
"in": sinputs,
"out": [{"id": workflow.get_base_id(output["id"])} for output in outputs]}
if _is_scatter_parallel(parallel):
assert scatter_inputs, "Did not find items to scatter on: %s" % name
out.update({"scatterMethod": "dotproduct",
"scatter": scatter_inputs})
return out | python | def _step_template(name, run_file, inputs, outputs, parallel, step_parallelism, scatter=None):
"""Templating function for writing a step to avoid repeating namespaces.
"""
scatter_inputs = []
sinputs = []
for inp in inputs:
step_inp = {"id": workflow.get_base_id(inp["id"]), "source": inp["id"]}
if inp.get("wf_duplicate"):
step_inp["id"] += "_toolinput"
for attr in ["source", "valueFrom"]:
if attr in inp:
step_inp[attr] = inp[attr]
sinputs.append(step_inp)
# An initial parallel scatter and multiple chained parallel sample scatters
if (parallel == "multi-parallel" and
(not step_parallelism or
step_parallelism.get(workflow.get_step_prefix(inp["id"])) == "multi-parallel")):
scatter_inputs.append(step_inp["id"])
# scatter on inputs from previous processes that have been arrayed
elif (_is_scatter_parallel(parallel) and (_do_scatter_var(inp, parallel)
or (scatter and inp["id"] in scatter))):
scatter_inputs.append(step_inp["id"])
out = {"run": run_file,
"id": name,
"in": sinputs,
"out": [{"id": workflow.get_base_id(output["id"])} for output in outputs]}
if _is_scatter_parallel(parallel):
assert scatter_inputs, "Did not find items to scatter on: %s" % name
out.update({"scatterMethod": "dotproduct",
"scatter": scatter_inputs})
return out | [
"def",
"_step_template",
"(",
"name",
",",
"run_file",
",",
"inputs",
",",
"outputs",
",",
"parallel",
",",
"step_parallelism",
",",
"scatter",
"=",
"None",
")",
":",
"scatter_inputs",
"=",
"[",
"]",
"sinputs",
"=",
"[",
"]",
"for",
"inp",
"in",
"inputs"... | Templating function for writing a step to avoid repeating namespaces. | [
"Templating",
"function",
"for",
"writing",
"a",
"step",
"to",
"avoid",
"repeating",
"namespaces",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L363-L393 | train | 218,845 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _get_cur_remotes | def _get_cur_remotes(path):
"""Retrieve remote references defined in the CWL.
"""
cur_remotes = set([])
if isinstance(path, (list, tuple)):
for v in path:
cur_remotes |= _get_cur_remotes(v)
elif isinstance(path, dict):
for v in path.values():
cur_remotes |= _get_cur_remotes(v)
elif path and isinstance(path, six.string_types):
if path.startswith(tuple(INTEGRATION_MAP.keys())):
cur_remotes.add(INTEGRATION_MAP.get(path.split(":")[0] + ":"))
return cur_remotes | python | def _get_cur_remotes(path):
"""Retrieve remote references defined in the CWL.
"""
cur_remotes = set([])
if isinstance(path, (list, tuple)):
for v in path:
cur_remotes |= _get_cur_remotes(v)
elif isinstance(path, dict):
for v in path.values():
cur_remotes |= _get_cur_remotes(v)
elif path and isinstance(path, six.string_types):
if path.startswith(tuple(INTEGRATION_MAP.keys())):
cur_remotes.add(INTEGRATION_MAP.get(path.split(":")[0] + ":"))
return cur_remotes | [
"def",
"_get_cur_remotes",
"(",
"path",
")",
":",
"cur_remotes",
"=",
"set",
"(",
"[",
"]",
")",
"if",
"isinstance",
"(",
"path",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"v",
"in",
"path",
":",
"cur_remotes",
"|=",
"_get_cur_remotes",
"... | Retrieve remote references defined in the CWL. | [
"Retrieve",
"remote",
"references",
"defined",
"in",
"the",
"CWL",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L395-L408 | train | 218,846 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _flatten_samples | def _flatten_samples(samples, base_file, get_retriever):
"""Create a flattened JSON representation of data from the bcbio world map.
"""
flat_data = []
for data in samples:
data["reference"] = _indexes_to_secondary_files(data["reference"], data["genome_build"])
cur_flat = {}
for key_path in [["analysis"], ["description"], ["rgnames"], ["config", "algorithm"],
["metadata"], ["genome_build"], ["resources"],
["files"], ["reference"], ["genome_resources"], ["vrn_file"]]:
cur_key = "__".join(key_path)
for flat_key, flat_val in _to_cwldata(cur_key, tz.get_in(key_path, data), get_retriever):
cur_flat[flat_key] = flat_val
flat_data.append(cur_flat)
out = {}
for key in sorted(list(set(reduce(operator.add, [list(d.keys()) for d in flat_data])))):
# Periods in keys cause issues with WDL and some CWL implementations
clean_key = key.replace(".", "_")
out[clean_key] = []
for cur_flat in flat_data:
out[clean_key].append(cur_flat.get(key))
# special case for back-compatibility with fasta specifications -- yuck
if "reference__fasta__base" not in out and "reference__fasta" in out:
out["reference__fasta__base"] = out["reference__fasta"]
del out["reference__fasta"]
return _samplejson_to_inputs(out), out | python | def _flatten_samples(samples, base_file, get_retriever):
"""Create a flattened JSON representation of data from the bcbio world map.
"""
flat_data = []
for data in samples:
data["reference"] = _indexes_to_secondary_files(data["reference"], data["genome_build"])
cur_flat = {}
for key_path in [["analysis"], ["description"], ["rgnames"], ["config", "algorithm"],
["metadata"], ["genome_build"], ["resources"],
["files"], ["reference"], ["genome_resources"], ["vrn_file"]]:
cur_key = "__".join(key_path)
for flat_key, flat_val in _to_cwldata(cur_key, tz.get_in(key_path, data), get_retriever):
cur_flat[flat_key] = flat_val
flat_data.append(cur_flat)
out = {}
for key in sorted(list(set(reduce(operator.add, [list(d.keys()) for d in flat_data])))):
# Periods in keys cause issues with WDL and some CWL implementations
clean_key = key.replace(".", "_")
out[clean_key] = []
for cur_flat in flat_data:
out[clean_key].append(cur_flat.get(key))
# special case for back-compatibility with fasta specifications -- yuck
if "reference__fasta__base" not in out and "reference__fasta" in out:
out["reference__fasta__base"] = out["reference__fasta"]
del out["reference__fasta"]
return _samplejson_to_inputs(out), out | [
"def",
"_flatten_samples",
"(",
"samples",
",",
"base_file",
",",
"get_retriever",
")",
":",
"flat_data",
"=",
"[",
"]",
"for",
"data",
"in",
"samples",
":",
"data",
"[",
"\"reference\"",
"]",
"=",
"_indexes_to_secondary_files",
"(",
"data",
"[",
"\"reference\... | Create a flattened JSON representation of data from the bcbio world map. | [
"Create",
"a",
"flattened",
"JSON",
"representation",
"of",
"data",
"from",
"the",
"bcbio",
"world",
"map",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L478-L503 | train | 218,847 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _indexes_to_secondary_files | def _indexes_to_secondary_files(gresources, genome_build):
"""Convert a list of genome indexes into a single file plus secondary files.
This ensures that all indices are staged together in a single directory.
"""
out = {}
for refname, val in gresources.items():
if isinstance(val, dict) and "indexes" in val:
# list of indexes -- aligners
if len(val.keys()) == 1:
indexes = sorted(val["indexes"])
if len(indexes) == 0:
if refname not in alignment.allow_noindices():
raise ValueError("Did not find indexes for %s: %s" % (refname, val))
elif len(indexes) == 1:
val = {"indexes": indexes[0]}
else:
val = {"indexes": {"base": indexes[0], "indexes": indexes[1:]}}
# directory plus indexes -- snpEff
elif "base" in val and os.path.isdir(val["base"]) and len(val["indexes"]) > 0:
indexes = val["indexes"]
val = {"base": indexes[0], "indexes": indexes[1:]}
elif isinstance(val, dict) and genome_build in val:
val = _indexes_to_secondary_files(val, genome_build)
out[refname] = val
return out | python | def _indexes_to_secondary_files(gresources, genome_build):
"""Convert a list of genome indexes into a single file plus secondary files.
This ensures that all indices are staged together in a single directory.
"""
out = {}
for refname, val in gresources.items():
if isinstance(val, dict) and "indexes" in val:
# list of indexes -- aligners
if len(val.keys()) == 1:
indexes = sorted(val["indexes"])
if len(indexes) == 0:
if refname not in alignment.allow_noindices():
raise ValueError("Did not find indexes for %s: %s" % (refname, val))
elif len(indexes) == 1:
val = {"indexes": indexes[0]}
else:
val = {"indexes": {"base": indexes[0], "indexes": indexes[1:]}}
# directory plus indexes -- snpEff
elif "base" in val and os.path.isdir(val["base"]) and len(val["indexes"]) > 0:
indexes = val["indexes"]
val = {"base": indexes[0], "indexes": indexes[1:]}
elif isinstance(val, dict) and genome_build in val:
val = _indexes_to_secondary_files(val, genome_build)
out[refname] = val
return out | [
"def",
"_indexes_to_secondary_files",
"(",
"gresources",
",",
"genome_build",
")",
":",
"out",
"=",
"{",
"}",
"for",
"refname",
",",
"val",
"in",
"gresources",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
"and",
"\"index... | Convert a list of genome indexes into a single file plus secondary files.
This ensures that all indices are staged together in a single directory. | [
"Convert",
"a",
"list",
"of",
"genome",
"indexes",
"into",
"a",
"single",
"file",
"plus",
"secondary",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L505-L530 | train | 218,848 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _add_suppl_info | def _add_suppl_info(inp, val):
"""Add supplementary information to inputs from file values.
"""
inp["type"] = _get_avro_type(val)
secondary = _get_secondary_files(val)
if secondary:
inp["secondaryFiles"] = secondary
return inp | python | def _add_suppl_info(inp, val):
"""Add supplementary information to inputs from file values.
"""
inp["type"] = _get_avro_type(val)
secondary = _get_secondary_files(val)
if secondary:
inp["secondaryFiles"] = secondary
return inp | [
"def",
"_add_suppl_info",
"(",
"inp",
",",
"val",
")",
":",
"inp",
"[",
"\"type\"",
"]",
"=",
"_get_avro_type",
"(",
"val",
")",
"secondary",
"=",
"_get_secondary_files",
"(",
"val",
")",
"if",
"secondary",
":",
"inp",
"[",
"\"secondaryFiles\"",
"]",
"=",
... | Add supplementary information to inputs from file values. | [
"Add",
"supplementary",
"information",
"to",
"inputs",
"from",
"file",
"values",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L532-L539 | train | 218,849 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _get_secondary_files | def _get_secondary_files(val):
"""Retrieve associated secondary files.
Normalizes input values into definitions of available secondary files.
Requires indices to be present in all files, since declared CWL secondary
files are not optional. So if we have a mix of BAM (bai) and fastq (gbi) we
ignore the existing indices and will have to regenerate during processing.
"""
out = []
if isinstance(val, (tuple, list)):
s_counts = collections.defaultdict(int)
for x in val:
for s in _get_secondary_files(x):
s_counts[s] += 1
for s, count in s_counts.items():
if s and s not in out and count == len([x for x in val if x]):
out.append(s)
elif isinstance(val, dict) and (val.get("class") == "File" or "File" in val.get("class")):
if "secondaryFiles" in val:
for sf in [x["path"] for x in val["secondaryFiles"]]:
rext = _get_relative_ext(val["path"], sf)
if rext and rext not in out:
out.append(rext)
return out | python | def _get_secondary_files(val):
"""Retrieve associated secondary files.
Normalizes input values into definitions of available secondary files.
Requires indices to be present in all files, since declared CWL secondary
files are not optional. So if we have a mix of BAM (bai) and fastq (gbi) we
ignore the existing indices and will have to regenerate during processing.
"""
out = []
if isinstance(val, (tuple, list)):
s_counts = collections.defaultdict(int)
for x in val:
for s in _get_secondary_files(x):
s_counts[s] += 1
for s, count in s_counts.items():
if s and s not in out and count == len([x for x in val if x]):
out.append(s)
elif isinstance(val, dict) and (val.get("class") == "File" or "File" in val.get("class")):
if "secondaryFiles" in val:
for sf in [x["path"] for x in val["secondaryFiles"]]:
rext = _get_relative_ext(val["path"], sf)
if rext and rext not in out:
out.append(rext)
return out | [
"def",
"_get_secondary_files",
"(",
"val",
")",
":",
"out",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"val",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"s_counts",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"for",
"x",
"in",
"val",
... | Retrieve associated secondary files.
Normalizes input values into definitions of available secondary files.
Requires indices to be present in all files, since declared CWL secondary
files are not optional. So if we have a mix of BAM (bai) and fastq (gbi) we
ignore the existing indices and will have to regenerate during processing. | [
"Retrieve",
"associated",
"secondary",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L541-L564 | train | 218,850 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _get_relative_ext | def _get_relative_ext(of, sf):
"""Retrieve relative extension given the original and secondary files.
"""
def half_finished_trim(orig, prefix):
return (os.path.basename(prefix).count(".") > 0 and
os.path.basename(orig).count(".") == os.path.basename(prefix).count("."))
# Handle remote files
if of.find(":") > 0:
of = os.path.basename(of.split(":")[-1])
if sf.find(":") > 0:
sf = os.path.basename(sf.split(":")[-1])
prefix = os.path.commonprefix([sf, of])
while prefix.endswith(".") or (half_finished_trim(sf, prefix) and half_finished_trim(of, prefix)):
prefix = prefix[:-1]
exts_to_remove = of.replace(prefix, "")
ext_to_add = sf.replace(prefix, "")
# Return extensions relative to original
if not exts_to_remove or exts_to_remove.startswith("."):
return str("^" * exts_to_remove.count(".") + ext_to_add)
else:
raise ValueError("No cross platform way to reference complex extension: %s %s" % (sf, of)) | python | def _get_relative_ext(of, sf):
"""Retrieve relative extension given the original and secondary files.
"""
def half_finished_trim(orig, prefix):
return (os.path.basename(prefix).count(".") > 0 and
os.path.basename(orig).count(".") == os.path.basename(prefix).count("."))
# Handle remote files
if of.find(":") > 0:
of = os.path.basename(of.split(":")[-1])
if sf.find(":") > 0:
sf = os.path.basename(sf.split(":")[-1])
prefix = os.path.commonprefix([sf, of])
while prefix.endswith(".") or (half_finished_trim(sf, prefix) and half_finished_trim(of, prefix)):
prefix = prefix[:-1]
exts_to_remove = of.replace(prefix, "")
ext_to_add = sf.replace(prefix, "")
# Return extensions relative to original
if not exts_to_remove or exts_to_remove.startswith("."):
return str("^" * exts_to_remove.count(".") + ext_to_add)
else:
raise ValueError("No cross platform way to reference complex extension: %s %s" % (sf, of)) | [
"def",
"_get_relative_ext",
"(",
"of",
",",
"sf",
")",
":",
"def",
"half_finished_trim",
"(",
"orig",
",",
"prefix",
")",
":",
"return",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"prefix",
")",
".",
"count",
"(",
"\".\"",
")",
">",
"0",
"and",
... | Retrieve relative extension given the original and secondary files. | [
"Retrieve",
"relative",
"extension",
"given",
"the",
"original",
"and",
"secondary",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L566-L586 | train | 218,851 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _get_avro_type | def _get_avro_type(val):
"""Infer avro type for the current input.
"""
if isinstance(val, dict):
assert val.get("class") == "File" or "File" in val.get("class")
return "File"
elif isinstance(val, (tuple, list)):
types = []
for ctype in [_get_avro_type(v) for v in val]:
if isinstance(ctype, dict):
nested_types = [x["items"] for x in types if isinstance(x, dict)]
if ctype["items"] not in nested_types:
if isinstance(ctype["items"], (list, tuple)):
for t in ctype["items"]:
if t not in types:
types.append(t)
else:
if ctype not in types:
types.append(ctype)
elif isinstance(ctype, (list, tuple)):
for x in ctype:
if x not in types:
types.append(x)
elif ctype not in types:
types.append(ctype)
# handle empty types, allow null
if len(types) == 0:
types = ["null"]
# empty lists
if isinstance(val, (list, tuple)) and len(val) == 0:
types.append({"type": "array", "items": ["null"]})
types = _avoid_duplicate_arrays(types)
# Avoid empty null only arrays which confuse some runners
if len(types) == 1 and types[0] == "null":
types.append("string")
return {"type": "array", "items": (types[0] if len(types) == 1 else types)}
elif val is None:
return ["null"]
# encode booleans as string True/False and unencode on other side
elif isinstance(val, bool) or isinstance(val, six.string_types) and val.lower() in ["true", "false", "none"]:
return ["string", "null", "boolean"]
elif isinstance(val, int):
return "long"
elif isinstance(val, float):
return "double"
else:
return "string" | python | def _get_avro_type(val):
"""Infer avro type for the current input.
"""
if isinstance(val, dict):
assert val.get("class") == "File" or "File" in val.get("class")
return "File"
elif isinstance(val, (tuple, list)):
types = []
for ctype in [_get_avro_type(v) for v in val]:
if isinstance(ctype, dict):
nested_types = [x["items"] for x in types if isinstance(x, dict)]
if ctype["items"] not in nested_types:
if isinstance(ctype["items"], (list, tuple)):
for t in ctype["items"]:
if t not in types:
types.append(t)
else:
if ctype not in types:
types.append(ctype)
elif isinstance(ctype, (list, tuple)):
for x in ctype:
if x not in types:
types.append(x)
elif ctype not in types:
types.append(ctype)
# handle empty types, allow null
if len(types) == 0:
types = ["null"]
# empty lists
if isinstance(val, (list, tuple)) and len(val) == 0:
types.append({"type": "array", "items": ["null"]})
types = _avoid_duplicate_arrays(types)
# Avoid empty null only arrays which confuse some runners
if len(types) == 1 and types[0] == "null":
types.append("string")
return {"type": "array", "items": (types[0] if len(types) == 1 else types)}
elif val is None:
return ["null"]
# encode booleans as string True/False and unencode on other side
elif isinstance(val, bool) or isinstance(val, six.string_types) and val.lower() in ["true", "false", "none"]:
return ["string", "null", "boolean"]
elif isinstance(val, int):
return "long"
elif isinstance(val, float):
return "double"
else:
return "string" | [
"def",
"_get_avro_type",
"(",
"val",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"assert",
"val",
".",
"get",
"(",
"\"class\"",
")",
"==",
"\"File\"",
"or",
"\"File\"",
"in",
"val",
".",
"get",
"(",
"\"class\"",
")",
"return",
"\... | Infer avro type for the current input. | [
"Infer",
"avro",
"type",
"for",
"the",
"current",
"input",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L588-L634 | train | 218,852 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _avoid_duplicate_arrays | def _avoid_duplicate_arrays(types):
"""Collapse arrays when we have multiple types.
"""
arrays = [t for t in types if isinstance(t, dict) and t["type"] == "array"]
others = [t for t in types if not (isinstance(t, dict) and t["type"] == "array")]
if arrays:
items = set([])
for t in arrays:
if isinstance(t["items"], (list, tuple)):
items |= set(t["items"])
else:
items.add(t["items"])
if len(items) == 1:
items = items.pop()
else:
items = sorted(list(items))
arrays = [{"type": "array", "items": items}]
return others + arrays | python | def _avoid_duplicate_arrays(types):
"""Collapse arrays when we have multiple types.
"""
arrays = [t for t in types if isinstance(t, dict) and t["type"] == "array"]
others = [t for t in types if not (isinstance(t, dict) and t["type"] == "array")]
if arrays:
items = set([])
for t in arrays:
if isinstance(t["items"], (list, tuple)):
items |= set(t["items"])
else:
items.add(t["items"])
if len(items) == 1:
items = items.pop()
else:
items = sorted(list(items))
arrays = [{"type": "array", "items": items}]
return others + arrays | [
"def",
"_avoid_duplicate_arrays",
"(",
"types",
")",
":",
"arrays",
"=",
"[",
"t",
"for",
"t",
"in",
"types",
"if",
"isinstance",
"(",
"t",
",",
"dict",
")",
"and",
"t",
"[",
"\"type\"",
"]",
"==",
"\"array\"",
"]",
"others",
"=",
"[",
"t",
"for",
... | Collapse arrays when we have multiple types. | [
"Collapse",
"arrays",
"when",
"we",
"have",
"multiple",
"types",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L636-L653 | train | 218,853 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _samplejson_to_inputs | def _samplejson_to_inputs(svals):
"""Convert sample output into inputs for CWL configuration files, with types.
"""
out = []
for key, val in svals.items():
out.append(_add_suppl_info({"id": "%s" % key}, val))
return out | python | def _samplejson_to_inputs(svals):
"""Convert sample output into inputs for CWL configuration files, with types.
"""
out = []
for key, val in svals.items():
out.append(_add_suppl_info({"id": "%s" % key}, val))
return out | [
"def",
"_samplejson_to_inputs",
"(",
"svals",
")",
":",
"out",
"=",
"[",
"]",
"for",
"key",
",",
"val",
"in",
"svals",
".",
"items",
"(",
")",
":",
"out",
".",
"append",
"(",
"_add_suppl_info",
"(",
"{",
"\"id\"",
":",
"\"%s\"",
"%",
"key",
"}",
",... | Convert sample output into inputs for CWL configuration files, with types. | [
"Convert",
"sample",
"output",
"into",
"inputs",
"for",
"CWL",
"configuration",
"files",
"with",
"types",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L655-L661 | train | 218,854 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _to_cwldata | def _to_cwldata(key, val, get_retriever):
"""Convert nested dictionary into CWL data, flatening and marking up files.
Moves file objects to the top level, enabling insertion in CWL inputs/outputs.
"""
out = []
if isinstance(val, dict):
if len(val) == 2 and "base" in val and "indexes" in val:
if len(val["indexes"]) > 0 and val["base"] == val["indexes"][0]:
out.append(("%s__indexes" % key, _item_to_cwldata(val["base"], get_retriever)))
else:
out.append((key, _to_cwlfile_with_indexes(val, get_retriever)))
# Dump shared nested keys like resources as a JSON string
elif key in workflow.ALWAYS_AVAILABLE or key in workflow.STRING_DICT:
out.append((key, _item_to_cwldata(json.dumps(val), get_retriever)))
elif key in workflow.FLAT_DICT:
flat = []
for k, vs in val.items():
if not isinstance(vs, (list, tuple)):
vs = [vs]
for v in vs:
flat.append("%s:%s" % (k, v))
out.append((key, _item_to_cwldata(flat, get_retriever)))
else:
remain_val = {}
for nkey, nval in val.items():
cur_nkey = "%s__%s" % (key, nkey)
cwl_nval = _item_to_cwldata(nval, get_retriever)
if isinstance(cwl_nval, dict):
out.extend(_to_cwldata(cur_nkey, nval, get_retriever))
elif key in workflow.ALWAYS_AVAILABLE:
remain_val[nkey] = nval
else:
out.append((cur_nkey, cwl_nval))
if remain_val:
out.append((key, json.dumps(remain_val, sort_keys=True, separators=(',', ':'))))
else:
out.append((key, _item_to_cwldata(val, get_retriever)))
return out | python | def _to_cwldata(key, val, get_retriever):
"""Convert nested dictionary into CWL data, flatening and marking up files.
Moves file objects to the top level, enabling insertion in CWL inputs/outputs.
"""
out = []
if isinstance(val, dict):
if len(val) == 2 and "base" in val and "indexes" in val:
if len(val["indexes"]) > 0 and val["base"] == val["indexes"][0]:
out.append(("%s__indexes" % key, _item_to_cwldata(val["base"], get_retriever)))
else:
out.append((key, _to_cwlfile_with_indexes(val, get_retriever)))
# Dump shared nested keys like resources as a JSON string
elif key in workflow.ALWAYS_AVAILABLE or key in workflow.STRING_DICT:
out.append((key, _item_to_cwldata(json.dumps(val), get_retriever)))
elif key in workflow.FLAT_DICT:
flat = []
for k, vs in val.items():
if not isinstance(vs, (list, tuple)):
vs = [vs]
for v in vs:
flat.append("%s:%s" % (k, v))
out.append((key, _item_to_cwldata(flat, get_retriever)))
else:
remain_val = {}
for nkey, nval in val.items():
cur_nkey = "%s__%s" % (key, nkey)
cwl_nval = _item_to_cwldata(nval, get_retriever)
if isinstance(cwl_nval, dict):
out.extend(_to_cwldata(cur_nkey, nval, get_retriever))
elif key in workflow.ALWAYS_AVAILABLE:
remain_val[nkey] = nval
else:
out.append((cur_nkey, cwl_nval))
if remain_val:
out.append((key, json.dumps(remain_val, sort_keys=True, separators=(',', ':'))))
else:
out.append((key, _item_to_cwldata(val, get_retriever)))
return out | [
"def",
"_to_cwldata",
"(",
"key",
",",
"val",
",",
"get_retriever",
")",
":",
"out",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"if",
"len",
"(",
"val",
")",
"==",
"2",
"and",
"\"base\"",
"in",
"val",
"and",
"\"indexes\""... | Convert nested dictionary into CWL data, flatening and marking up files.
Moves file objects to the top level, enabling insertion in CWL inputs/outputs. | [
"Convert",
"nested",
"dictionary",
"into",
"CWL",
"data",
"flatening",
"and",
"marking",
"up",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L663-L701 | train | 218,855 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _to_cwlfile_with_indexes | def _to_cwlfile_with_indexes(val, get_retriever):
"""Convert reads with ready to go indexes into the right CWL object.
Identifies the top level directory and creates a tarball, avoiding
trying to handle complex secondary setups which are not cross platform.
Skips doing this for reference files and standard setups like bwa, which
take up too much time and space to unpack multiple times.
"""
val["indexes"] = _index_blacklist(val["indexes"])
tval = {"base": _remove_remote_prefix(val["base"]),
"indexes": [_remove_remote_prefix(f) for f in val["indexes"]]}
# Standard named set of indices, like bwa
# Do not include snpEff, which we need to isolate inside a nested directory
# hisat2 indices do also not localize cleanly due to compilicated naming
cp_dir, cp_base = os.path.split(os.path.commonprefix([tval["base"]] + tval["indexes"]))
if (cp_base and cp_dir == os.path.dirname(tval["base"]) and
not ("/snpeff/" in cp_dir or "/hisat2" in cp_dir)):
return _item_to_cwldata(val["base"], get_retriever, val["indexes"])
else:
dirname = os.path.dirname(tval["base"])
assert all([x.startswith(dirname) for x in tval["indexes"]])
return {"class": "File", "path": directory_tarball(dirname)} | python | def _to_cwlfile_with_indexes(val, get_retriever):
"""Convert reads with ready to go indexes into the right CWL object.
Identifies the top level directory and creates a tarball, avoiding
trying to handle complex secondary setups which are not cross platform.
Skips doing this for reference files and standard setups like bwa, which
take up too much time and space to unpack multiple times.
"""
val["indexes"] = _index_blacklist(val["indexes"])
tval = {"base": _remove_remote_prefix(val["base"]),
"indexes": [_remove_remote_prefix(f) for f in val["indexes"]]}
# Standard named set of indices, like bwa
# Do not include snpEff, which we need to isolate inside a nested directory
# hisat2 indices do also not localize cleanly due to compilicated naming
cp_dir, cp_base = os.path.split(os.path.commonprefix([tval["base"]] + tval["indexes"]))
if (cp_base and cp_dir == os.path.dirname(tval["base"]) and
not ("/snpeff/" in cp_dir or "/hisat2" in cp_dir)):
return _item_to_cwldata(val["base"], get_retriever, val["indexes"])
else:
dirname = os.path.dirname(tval["base"])
assert all([x.startswith(dirname) for x in tval["indexes"]])
return {"class": "File", "path": directory_tarball(dirname)} | [
"def",
"_to_cwlfile_with_indexes",
"(",
"val",
",",
"get_retriever",
")",
":",
"val",
"[",
"\"indexes\"",
"]",
"=",
"_index_blacklist",
"(",
"val",
"[",
"\"indexes\"",
"]",
")",
"tval",
"=",
"{",
"\"base\"",
":",
"_remove_remote_prefix",
"(",
"val",
"[",
"\"... | Convert reads with ready to go indexes into the right CWL object.
Identifies the top level directory and creates a tarball, avoiding
trying to handle complex secondary setups which are not cross platform.
Skips doing this for reference files and standard setups like bwa, which
take up too much time and space to unpack multiple times. | [
"Convert",
"reads",
"with",
"ready",
"to",
"go",
"indexes",
"into",
"the",
"right",
"CWL",
"object",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L712-L734 | train | 218,856 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _add_secondary_if_exists | def _add_secondary_if_exists(secondary, out, get_retriever):
"""Add secondary files only if present locally or remotely.
"""
secondary = [_file_local_or_remote(y, get_retriever) for y in secondary]
secondary = [z for z in secondary if z]
if secondary:
out["secondaryFiles"] = [{"class": "File", "path": f} for f in secondary]
return out | python | def _add_secondary_if_exists(secondary, out, get_retriever):
"""Add secondary files only if present locally or remotely.
"""
secondary = [_file_local_or_remote(y, get_retriever) for y in secondary]
secondary = [z for z in secondary if z]
if secondary:
out["secondaryFiles"] = [{"class": "File", "path": f} for f in secondary]
return out | [
"def",
"_add_secondary_if_exists",
"(",
"secondary",
",",
"out",
",",
"get_retriever",
")",
":",
"secondary",
"=",
"[",
"_file_local_or_remote",
"(",
"y",
",",
"get_retriever",
")",
"for",
"y",
"in",
"secondary",
"]",
"secondary",
"=",
"[",
"z",
"for",
"z",
... | Add secondary files only if present locally or remotely. | [
"Add",
"secondary",
"files",
"only",
"if",
"present",
"locally",
"or",
"remotely",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L736-L743 | train | 218,857 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _item_to_cwldata | def _item_to_cwldata(x, get_retriever, indexes=None):
""""Markup an item with CWL specific metadata.
"""
if isinstance(x, (list, tuple)):
return [_item_to_cwldata(subx, get_retriever) for subx in x]
elif (x and isinstance(x, six.string_types) and
(((os.path.isfile(x) or os.path.isdir(x)) and os.path.exists(x)) or
objectstore.is_remote(x))):
if _file_local_or_remote(x, get_retriever):
out = {"class": "File", "path": x}
if indexes:
out = _add_secondary_if_exists(indexes, out, get_retriever)
elif x.endswith(".bam"):
out = _add_secondary_if_exists([x + ".bai"], out, get_retriever)
elif x.endswith(".cram"):
out = _add_secondary_if_exists([x + ".crai"], out, get_retriever)
elif x.endswith((".vcf.gz", ".bed.gz")):
out = _add_secondary_if_exists([x + ".tbi"], out, get_retriever)
elif x.endswith(".fa"):
out = _add_secondary_if_exists([x + ".fai", os.path.splitext(x)[0] + ".dict"], out, get_retriever)
elif x.endswith(".fa.gz"):
out = _add_secondary_if_exists([x + ".fai", x + ".gzi", x.replace(".fa.gz", "") + ".dict"],
out, get_retriever)
elif x.endswith(".fq.gz") or x.endswith(".fastq.gz"):
out = _add_secondary_if_exists([x + ".gbi"], out, get_retriever)
elif x.endswith(".gtf"):
out = _add_secondary_if_exists([x + ".db"], out, get_retriever)
else:
out = {"class": "File", "path": directory_tarball(x)}
return out
elif isinstance(x, bool):
return str(x)
else:
return x | python | def _item_to_cwldata(x, get_retriever, indexes=None):
""""Markup an item with CWL specific metadata.
"""
if isinstance(x, (list, tuple)):
return [_item_to_cwldata(subx, get_retriever) for subx in x]
elif (x and isinstance(x, six.string_types) and
(((os.path.isfile(x) or os.path.isdir(x)) and os.path.exists(x)) or
objectstore.is_remote(x))):
if _file_local_or_remote(x, get_retriever):
out = {"class": "File", "path": x}
if indexes:
out = _add_secondary_if_exists(indexes, out, get_retriever)
elif x.endswith(".bam"):
out = _add_secondary_if_exists([x + ".bai"], out, get_retriever)
elif x.endswith(".cram"):
out = _add_secondary_if_exists([x + ".crai"], out, get_retriever)
elif x.endswith((".vcf.gz", ".bed.gz")):
out = _add_secondary_if_exists([x + ".tbi"], out, get_retriever)
elif x.endswith(".fa"):
out = _add_secondary_if_exists([x + ".fai", os.path.splitext(x)[0] + ".dict"], out, get_retriever)
elif x.endswith(".fa.gz"):
out = _add_secondary_if_exists([x + ".fai", x + ".gzi", x.replace(".fa.gz", "") + ".dict"],
out, get_retriever)
elif x.endswith(".fq.gz") or x.endswith(".fastq.gz"):
out = _add_secondary_if_exists([x + ".gbi"], out, get_retriever)
elif x.endswith(".gtf"):
out = _add_secondary_if_exists([x + ".db"], out, get_retriever)
else:
out = {"class": "File", "path": directory_tarball(x)}
return out
elif isinstance(x, bool):
return str(x)
else:
return x | [
"def",
"_item_to_cwldata",
"(",
"x",
",",
"get_retriever",
",",
"indexes",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"[",
"_item_to_cwldata",
"(",
"subx",
",",
"get_retriever",
")",
"f... | Markup an item with CWL specific metadata. | [
"Markup",
"an",
"item",
"with",
"CWL",
"specific",
"metadata",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L745-L778 | train | 218,858 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _file_local_or_remote | def _file_local_or_remote(f, get_retriever):
"""Check for presence of a local or remote file.
"""
if os.path.exists(f):
return f
integration, config = get_retriever.integration_and_config(f)
if integration:
return integration.file_exists(f, config) | python | def _file_local_or_remote(f, get_retriever):
"""Check for presence of a local or remote file.
"""
if os.path.exists(f):
return f
integration, config = get_retriever.integration_and_config(f)
if integration:
return integration.file_exists(f, config) | [
"def",
"_file_local_or_remote",
"(",
"f",
",",
"get_retriever",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"f",
")",
":",
"return",
"f",
"integration",
",",
"config",
"=",
"get_retriever",
".",
"integration_and_config",
"(",
"f",
")",
"if",
"... | Check for presence of a local or remote file. | [
"Check",
"for",
"presence",
"of",
"a",
"local",
"or",
"remote",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L780-L787 | train | 218,859 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | directory_tarball | def directory_tarball(dirname):
"""Create a tarball of a complex directory, avoiding complex secondaryFiles.
Complex secondary files do not work on multiple platforms and are not portable
to WDL, so for now we create a tarball that workers will unpack.
"""
assert os.path.isdir(dirname), dirname
base_dir, tarball_dir = os.path.split(dirname)
while not os.path.exists(os.path.join(base_dir, "seq")) and base_dir and base_dir != "/":
base_dir, extra_tarball = os.path.split(base_dir)
tarball_dir = os.path.join(extra_tarball, tarball_dir)
if base_dir == "/" and not os.path.exists(os.path.join(base_dir, "seq")):
raise ValueError("Did not find relative directory to create tarball for %s" % dirname)
tarball = os.path.join(base_dir, "%s-wf.tar.gz" % (tarball_dir.replace(os.path.sep, "--")))
if not utils.file_exists(tarball):
print("Preparing CWL input tarball: %s" % tarball)
with file_transaction({}, tarball) as tx_tarball:
with utils.chdir(base_dir):
with tarfile.open(tx_tarball, "w:gz") as tar:
tar.add(tarball_dir)
return tarball | python | def directory_tarball(dirname):
"""Create a tarball of a complex directory, avoiding complex secondaryFiles.
Complex secondary files do not work on multiple platforms and are not portable
to WDL, so for now we create a tarball that workers will unpack.
"""
assert os.path.isdir(dirname), dirname
base_dir, tarball_dir = os.path.split(dirname)
while not os.path.exists(os.path.join(base_dir, "seq")) and base_dir and base_dir != "/":
base_dir, extra_tarball = os.path.split(base_dir)
tarball_dir = os.path.join(extra_tarball, tarball_dir)
if base_dir == "/" and not os.path.exists(os.path.join(base_dir, "seq")):
raise ValueError("Did not find relative directory to create tarball for %s" % dirname)
tarball = os.path.join(base_dir, "%s-wf.tar.gz" % (tarball_dir.replace(os.path.sep, "--")))
if not utils.file_exists(tarball):
print("Preparing CWL input tarball: %s" % tarball)
with file_transaction({}, tarball) as tx_tarball:
with utils.chdir(base_dir):
with tarfile.open(tx_tarball, "w:gz") as tar:
tar.add(tarball_dir)
return tarball | [
"def",
"directory_tarball",
"(",
"dirname",
")",
":",
"assert",
"os",
".",
"path",
".",
"isdir",
"(",
"dirname",
")",
",",
"dirname",
"base_dir",
",",
"tarball_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"dirname",
")",
"while",
"not",
"os",
".",
... | Create a tarball of a complex directory, avoiding complex secondaryFiles.
Complex secondary files do not work on multiple platforms and are not portable
to WDL, so for now we create a tarball that workers will unpack. | [
"Create",
"a",
"tarball",
"of",
"a",
"complex",
"directory",
"avoiding",
"complex",
"secondaryFiles",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L789-L809 | train | 218,860 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _calc_input_estimates | def _calc_input_estimates(keyvals, get_retriever):
"""Calculate estimations of input file sizes for disk usage approximation.
These are current dominated by fastq/BAM sizes, so estimate based on that.
"""
out = {}
for key, val in keyvals.items():
size = _calc_file_size(val, 0, get_retriever)
if size:
out[key] = size
return out | python | def _calc_input_estimates(keyvals, get_retriever):
"""Calculate estimations of input file sizes for disk usage approximation.
These are current dominated by fastq/BAM sizes, so estimate based on that.
"""
out = {}
for key, val in keyvals.items():
size = _calc_file_size(val, 0, get_retriever)
if size:
out[key] = size
return out | [
"def",
"_calc_input_estimates",
"(",
"keyvals",
",",
"get_retriever",
")",
":",
"out",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"keyvals",
".",
"items",
"(",
")",
":",
"size",
"=",
"_calc_file_size",
"(",
"val",
",",
"0",
",",
"get_retriever",
")... | Calculate estimations of input file sizes for disk usage approximation.
These are current dominated by fastq/BAM sizes, so estimate based on that. | [
"Calculate",
"estimations",
"of",
"input",
"file",
"sizes",
"for",
"disk",
"usage",
"approximation",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L853-L863 | train | 218,861 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | _get_file_size | def _get_file_size(path, get_retriever):
"""Return file size in megabytes, including querying remote integrations
"""
integration, config = get_retriever.integration_and_config(path)
if integration:
return integration.file_size(path, config)
elif os.path.exists(path):
return os.path.getsize(path) / (1024.0 * 1024.0) | python | def _get_file_size(path, get_retriever):
"""Return file size in megabytes, including querying remote integrations
"""
integration, config = get_retriever.integration_and_config(path)
if integration:
return integration.file_size(path, config)
elif os.path.exists(path):
return os.path.getsize(path) / (1024.0 * 1024.0) | [
"def",
"_get_file_size",
"(",
"path",
",",
"get_retriever",
")",
":",
"integration",
",",
"config",
"=",
"get_retriever",
".",
"integration_and_config",
"(",
"path",
")",
"if",
"integration",
":",
"return",
"integration",
".",
"file_size",
"(",
"path",
",",
"c... | Return file size in megabytes, including querying remote integrations | [
"Return",
"file",
"size",
"in",
"megabytes",
"including",
"querying",
"remote",
"integrations"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L896-L903 | train | 218,862 |
bcbio/bcbio-nextgen | bcbio/cwl/create.py | GetRetriever.integration_and_config | def integration_and_config(self, path):
"""Get a retriever and configuration for the given file path.
"""
if path.startswith(tuple(INTEGRATION_MAP.keys())):
key = INTEGRATION_MAP[path.split(":")[0] + ":"]
integration = self._integrations.get(key)
config = {}
for sample in self._samples:
config = tz.get_in(["config", key], sample)
if config:
break
return integration, config
return None, None | python | def integration_and_config(self, path):
"""Get a retriever and configuration for the given file path.
"""
if path.startswith(tuple(INTEGRATION_MAP.keys())):
key = INTEGRATION_MAP[path.split(":")[0] + ":"]
integration = self._integrations.get(key)
config = {}
for sample in self._samples:
config = tz.get_in(["config", key], sample)
if config:
break
return integration, config
return None, None | [
"def",
"integration_and_config",
"(",
"self",
",",
"path",
")",
":",
"if",
"path",
".",
"startswith",
"(",
"tuple",
"(",
"INTEGRATION_MAP",
".",
"keys",
"(",
")",
")",
")",
":",
"key",
"=",
"INTEGRATION_MAP",
"[",
"path",
".",
"split",
"(",
"\":\"",
")... | Get a retriever and configuration for the given file path. | [
"Get",
"a",
"retriever",
"and",
"configuration",
"for",
"the",
"given",
"file",
"path",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L881-L894 | train | 218,863 |
bcbio/bcbio-nextgen | bcbio/rnaseq/singlecellexperiment.py | make_scrnaseq_object | def make_scrnaseq_object(samples):
"""
load the initial se.rda object using sinclecell-experiment
"""
local_sitelib = R_sitelib()
counts_dir = os.path.dirname(dd.get_in_samples(samples, dd.get_combined_counts))
gtf_file = dd.get_in_samples(samples, dd.get_transcriptome_gtf)
if not gtf_file:
gtf_file = dd.get_in_samples(samples, dd.get_gtf_file)
rda_file = os.path.join(counts_dir, "se.rda")
if not file_exists(rda_file):
with file_transaction(rda_file) as tx_out_file:
rcode = "%s-run.R" % os.path.splitext(rda_file)[0]
rrna_file = "%s-rrna.txt" % os.path.splitext(rda_file)[0]
rrna_file = _find_rRNA_genes(gtf_file, rrna_file)
with open(rcode, "w") as out_handle:
out_handle.write(_script.format(**locals()))
rscript = Rscript_cmd()
try:
# do.run([rscript, "--no-environ", rcode],
# "SingleCellExperiment",
# log_error=False)
rda_file = rcode
except subprocess.CalledProcessError as msg:
logger.exception() | python | def make_scrnaseq_object(samples):
"""
load the initial se.rda object using sinclecell-experiment
"""
local_sitelib = R_sitelib()
counts_dir = os.path.dirname(dd.get_in_samples(samples, dd.get_combined_counts))
gtf_file = dd.get_in_samples(samples, dd.get_transcriptome_gtf)
if not gtf_file:
gtf_file = dd.get_in_samples(samples, dd.get_gtf_file)
rda_file = os.path.join(counts_dir, "se.rda")
if not file_exists(rda_file):
with file_transaction(rda_file) as tx_out_file:
rcode = "%s-run.R" % os.path.splitext(rda_file)[0]
rrna_file = "%s-rrna.txt" % os.path.splitext(rda_file)[0]
rrna_file = _find_rRNA_genes(gtf_file, rrna_file)
with open(rcode, "w") as out_handle:
out_handle.write(_script.format(**locals()))
rscript = Rscript_cmd()
try:
# do.run([rscript, "--no-environ", rcode],
# "SingleCellExperiment",
# log_error=False)
rda_file = rcode
except subprocess.CalledProcessError as msg:
logger.exception() | [
"def",
"make_scrnaseq_object",
"(",
"samples",
")",
":",
"local_sitelib",
"=",
"R_sitelib",
"(",
")",
"counts_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"dd",
".",
"get_in_samples",
"(",
"samples",
",",
"dd",
".",
"get_combined_counts",
")",
")",
"... | load the initial se.rda object using sinclecell-experiment | [
"load",
"the",
"initial",
"se",
".",
"rda",
"object",
"using",
"sinclecell",
"-",
"experiment"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/singlecellexperiment.py#L13-L37 | train | 218,864 |
bcbio/bcbio-nextgen | bcbio/distributed/multi.py | runner | def runner(parallel, config):
"""Run functions, provided by string name, on multiple cores on the current machine.
"""
def run_parallel(fn_name, items):
items = [x for x in items if x is not None]
if len(items) == 0:
return []
items = diagnostics.track_parallel(items, fn_name)
fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else (get_fn(fn_name, parallel), fn_name)
logger.info("multiprocessing: %s" % fn_name)
if "wrapper" in parallel:
wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources", "checkpointed"])}
items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items]
return run_multicore(fn, items, config, parallel=parallel)
return run_parallel | python | def runner(parallel, config):
"""Run functions, provided by string name, on multiple cores on the current machine.
"""
def run_parallel(fn_name, items):
items = [x for x in items if x is not None]
if len(items) == 0:
return []
items = diagnostics.track_parallel(items, fn_name)
fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else (get_fn(fn_name, parallel), fn_name)
logger.info("multiprocessing: %s" % fn_name)
if "wrapper" in parallel:
wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources", "checkpointed"])}
items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items]
return run_multicore(fn, items, config, parallel=parallel)
return run_parallel | [
"def",
"runner",
"(",
"parallel",
",",
"config",
")",
":",
"def",
"run_parallel",
"(",
"fn_name",
",",
"items",
")",
":",
"items",
"=",
"[",
"x",
"for",
"x",
"in",
"items",
"if",
"x",
"is",
"not",
"None",
"]",
"if",
"len",
"(",
"items",
")",
"=="... | Run functions, provided by string name, on multiple cores on the current machine. | [
"Run",
"functions",
"provided",
"by",
"string",
"name",
"on",
"multiple",
"cores",
"on",
"the",
"current",
"machine",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/multi.py#L15-L29 | train | 218,865 |
bcbio/bcbio-nextgen | bcbio/distributed/multi.py | zeromq_aware_logging | def zeromq_aware_logging(f):
"""Ensure multiprocessing logging uses ZeroMQ queues.
ZeroMQ and local stdout/stderr do not behave nicely when intertwined. This
ensures the local logging uses existing ZeroMQ logging queues.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
config = None
for arg in args:
if config_utils.is_std_config_arg(arg):
config = arg
break
elif config_utils.is_nested_config_arg(arg):
config = arg["config"]
elif isinstance(arg, (list, tuple)) and config_utils.is_nested_config_arg(arg[0]):
config = arg[0]["config"]
break
assert config, "Could not find config dictionary in function arguments."
if config.get("parallel", {}).get("log_queue") and not config.get("parallel", {}).get("wrapper"):
handler = setup_local_logging(config, config["parallel"])
else:
handler = None
try:
out = f(*args, **kwargs)
finally:
if handler and hasattr(handler, "close"):
handler.close()
return out
return wrapper | python | def zeromq_aware_logging(f):
"""Ensure multiprocessing logging uses ZeroMQ queues.
ZeroMQ and local stdout/stderr do not behave nicely when intertwined. This
ensures the local logging uses existing ZeroMQ logging queues.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
config = None
for arg in args:
if config_utils.is_std_config_arg(arg):
config = arg
break
elif config_utils.is_nested_config_arg(arg):
config = arg["config"]
elif isinstance(arg, (list, tuple)) and config_utils.is_nested_config_arg(arg[0]):
config = arg[0]["config"]
break
assert config, "Could not find config dictionary in function arguments."
if config.get("parallel", {}).get("log_queue") and not config.get("parallel", {}).get("wrapper"):
handler = setup_local_logging(config, config["parallel"])
else:
handler = None
try:
out = f(*args, **kwargs)
finally:
if handler and hasattr(handler, "close"):
handler.close()
return out
return wrapper | [
"def",
"zeromq_aware_logging",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"None",
"for",
"arg",
"in",
"args",
":",
"if",
"config_utils",
"... | Ensure multiprocessing logging uses ZeroMQ queues.
ZeroMQ and local stdout/stderr do not behave nicely when intertwined. This
ensures the local logging uses existing ZeroMQ logging queues. | [
"Ensure",
"multiprocessing",
"logging",
"uses",
"ZeroMQ",
"queues",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/multi.py#L39-L68 | train | 218,866 |
bcbio/bcbio-nextgen | bcbio/distributed/multi.py | run_multicore | def run_multicore(fn, items, config, parallel=None):
"""Run the function using multiple cores on the given items to process.
"""
if len(items) == 0:
return []
if parallel is None or "num_jobs" not in parallel:
if parallel is None:
parallel = {"type": "local", "cores": config["algorithm"].get("num_cores", 1)}
sysinfo = system.get_info({}, parallel)
parallel = resources.calculate(parallel, items, sysinfo, config,
parallel.get("multiplier", 1),
max_multicore=int(parallel.get("max_multicore", sysinfo["cores"])))
items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"]) for x in items]
if joblib is None:
raise ImportError("Need joblib for multiprocessing parallelization")
out = []
for data in joblib.Parallel(parallel["num_jobs"], batch_size=1, backend="multiprocessing")(joblib.delayed(fn)(*x) for x in items):
if data:
out.extend(data)
return out | python | def run_multicore(fn, items, config, parallel=None):
"""Run the function using multiple cores on the given items to process.
"""
if len(items) == 0:
return []
if parallel is None or "num_jobs" not in parallel:
if parallel is None:
parallel = {"type": "local", "cores": config["algorithm"].get("num_cores", 1)}
sysinfo = system.get_info({}, parallel)
parallel = resources.calculate(parallel, items, sysinfo, config,
parallel.get("multiplier", 1),
max_multicore=int(parallel.get("max_multicore", sysinfo["cores"])))
items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"]) for x in items]
if joblib is None:
raise ImportError("Need joblib for multiprocessing parallelization")
out = []
for data in joblib.Parallel(parallel["num_jobs"], batch_size=1, backend="multiprocessing")(joblib.delayed(fn)(*x) for x in items):
if data:
out.extend(data)
return out | [
"def",
"run_multicore",
"(",
"fn",
",",
"items",
",",
"config",
",",
"parallel",
"=",
"None",
")",
":",
"if",
"len",
"(",
"items",
")",
"==",
"0",
":",
"return",
"[",
"]",
"if",
"parallel",
"is",
"None",
"or",
"\"num_jobs\"",
"not",
"in",
"parallel",... | Run the function using multiple cores on the given items to process. | [
"Run",
"the",
"function",
"using",
"multiple",
"cores",
"on",
"the",
"given",
"items",
"to",
"process",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/multi.py#L70-L89 | train | 218,867 |
bcbio/bcbio-nextgen | scripts/bcbio_fastq_umi_prep.py | _add_umis_with_fastp | def _add_umis_with_fastp(read_fq, umi_fq, out_fq, cores):
"""Add UMIs to reads from separate UMI file using fastp.
"""
with utils.open_gzipsafe(umi_fq) as in_handle:
in_handle.readline() # name
umi_size = len(in_handle.readline().strip())
cmd = ("fastp -Q -A -L -G -w 1 --in1 {read_fq} --in2 {umi_fq} "
"--umi --umi_prefix UMI --umi_loc read2 --umi_len {umi_size} "
"--out1 >(bgzip --threads {cores} -c > {out_fq}) --out2 /dev/null "
"-j /dev/null -h /dev/null")
do.run(cmd.format(**locals()), "Add UMIs to fastq file with fastp") | python | def _add_umis_with_fastp(read_fq, umi_fq, out_fq, cores):
"""Add UMIs to reads from separate UMI file using fastp.
"""
with utils.open_gzipsafe(umi_fq) as in_handle:
in_handle.readline() # name
umi_size = len(in_handle.readline().strip())
cmd = ("fastp -Q -A -L -G -w 1 --in1 {read_fq} --in2 {umi_fq} "
"--umi --umi_prefix UMI --umi_loc read2 --umi_len {umi_size} "
"--out1 >(bgzip --threads {cores} -c > {out_fq}) --out2 /dev/null "
"-j /dev/null -h /dev/null")
do.run(cmd.format(**locals()), "Add UMIs to fastq file with fastp") | [
"def",
"_add_umis_with_fastp",
"(",
"read_fq",
",",
"umi_fq",
",",
"out_fq",
",",
"cores",
")",
":",
"with",
"utils",
".",
"open_gzipsafe",
"(",
"umi_fq",
")",
"as",
"in_handle",
":",
"in_handle",
".",
"readline",
"(",
")",
"# name",
"umi_size",
"=",
"len"... | Add UMIs to reads from separate UMI file using fastp. | [
"Add",
"UMIs",
"to",
"reads",
"from",
"separate",
"UMI",
"file",
"using",
"fastp",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_fastq_umi_prep.py#L117-L127 | train | 218,868 |
bcbio/bcbio-nextgen | scripts/bcbio_fastq_umi_prep.py | _find_umi | def _find_umi(files):
"""Find UMI file using different naming schemes.
R1/R2/R3 => R1/R3 with R2 UMI
R1/R2/I1 => R1/R2 with I1 UMI
"""
base = os.path.basename(_commonprefix(files))
def _file_ext(f):
exts = utils.splitext_plus(os.path.basename(f).replace(base, ""))[0].split("_")
exts = [x for x in exts if x]
return exts[0]
exts = dict([(_file_ext(f), f) for f in files])
if "I1" in exts:
return exts["R1"], exts["R2"], exts["I1"]
else:
assert "R3" in exts, exts
return exts["R1"], exts["R3"], exts["R2"] | python | def _find_umi(files):
"""Find UMI file using different naming schemes.
R1/R2/R3 => R1/R3 with R2 UMI
R1/R2/I1 => R1/R2 with I1 UMI
"""
base = os.path.basename(_commonprefix(files))
def _file_ext(f):
exts = utils.splitext_plus(os.path.basename(f).replace(base, ""))[0].split("_")
exts = [x for x in exts if x]
return exts[0]
exts = dict([(_file_ext(f), f) for f in files])
if "I1" in exts:
return exts["R1"], exts["R2"], exts["I1"]
else:
assert "R3" in exts, exts
return exts["R1"], exts["R3"], exts["R2"] | [
"def",
"_find_umi",
"(",
"files",
")",
":",
"base",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"_commonprefix",
"(",
"files",
")",
")",
"def",
"_file_ext",
"(",
"f",
")",
":",
"exts",
"=",
"utils",
".",
"splitext_plus",
"(",
"os",
".",
"path",
"... | Find UMI file using different naming schemes.
R1/R2/R3 => R1/R3 with R2 UMI
R1/R2/I1 => R1/R2 with I1 UMI | [
"Find",
"UMI",
"file",
"using",
"different",
"naming",
"schemes",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_fastq_umi_prep.py#L176-L194 | train | 218,869 |
bcbio/bcbio-nextgen | scripts/bcbio_fastq_umi_prep.py | _commonprefix | def _commonprefix(files):
"""Retrieve a common prefix for files without extra _R1 _I1 extensions.
Allows alternative naming schemes (R1/R2/R3) (R1/R2/I1).
"""
out = os.path.commonprefix(files)
out = out.rstrip("_R")
out = out.rstrip("_I")
out = out.rstrip("_")
return out | python | def _commonprefix(files):
"""Retrieve a common prefix for files without extra _R1 _I1 extensions.
Allows alternative naming schemes (R1/R2/R3) (R1/R2/I1).
"""
out = os.path.commonprefix(files)
out = out.rstrip("_R")
out = out.rstrip("_I")
out = out.rstrip("_")
return out | [
"def",
"_commonprefix",
"(",
"files",
")",
":",
"out",
"=",
"os",
".",
"path",
".",
"commonprefix",
"(",
"files",
")",
"out",
"=",
"out",
".",
"rstrip",
"(",
"\"_R\"",
")",
"out",
"=",
"out",
".",
"rstrip",
"(",
"\"_I\"",
")",
"out",
"=",
"out",
... | Retrieve a common prefix for files without extra _R1 _I1 extensions.
Allows alternative naming schemes (R1/R2/R3) (R1/R2/I1). | [
"Retrieve",
"a",
"common",
"prefix",
"for",
"files",
"without",
"extra",
"_R1",
"_I1",
"extensions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_fastq_umi_prep.py#L196-L205 | train | 218,870 |
bcbio/bcbio-nextgen | bcbio/variation/vfilter.py | cutoff_w_expression | def cutoff_w_expression(vcf_file, expression, data, name="+", filterext="",
extra_cmd="", limit_regions="variant_regions"):
"""Perform cutoff-based soft filtering using bcftools expressions like %QUAL < 20 || DP < 4.
"""
base, ext = utils.splitext_plus(vcf_file)
out_file = "{base}-filter{filterext}{ext}".format(**locals())
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
if vcfutils.vcf_has_variants(vcf_file):
bcftools = config_utils.get_program("bcftools", data["config"])
bgzip_cmd = "| bgzip -c" if out_file.endswith(".gz") else ""
intervals = ""
if limit_regions == "variant_regions":
variant_regions = dd.get_variant_regions(data)
if variant_regions:
intervals = "-T %s" % vcfutils.bgzip_and_index(variant_regions, data["config"])
cmd = ("{bcftools} filter -O v {intervals} --soft-filter '{name}' "
"-e '{expression}' -m '+' {vcf_file} {extra_cmd} {bgzip_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()),
"Cutoff-based soft filtering %s with %s" % (vcf_file, expression), data)
else:
shutil.copy(vcf_file, out_file)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file | python | def cutoff_w_expression(vcf_file, expression, data, name="+", filterext="",
extra_cmd="", limit_regions="variant_regions"):
"""Perform cutoff-based soft filtering using bcftools expressions like %QUAL < 20 || DP < 4.
"""
base, ext = utils.splitext_plus(vcf_file)
out_file = "{base}-filter{filterext}{ext}".format(**locals())
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
if vcfutils.vcf_has_variants(vcf_file):
bcftools = config_utils.get_program("bcftools", data["config"])
bgzip_cmd = "| bgzip -c" if out_file.endswith(".gz") else ""
intervals = ""
if limit_regions == "variant_regions":
variant_regions = dd.get_variant_regions(data)
if variant_regions:
intervals = "-T %s" % vcfutils.bgzip_and_index(variant_regions, data["config"])
cmd = ("{bcftools} filter -O v {intervals} --soft-filter '{name}' "
"-e '{expression}' -m '+' {vcf_file} {extra_cmd} {bgzip_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()),
"Cutoff-based soft filtering %s with %s" % (vcf_file, expression), data)
else:
shutil.copy(vcf_file, out_file)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file | [
"def",
"cutoff_w_expression",
"(",
"vcf_file",
",",
"expression",
",",
"data",
",",
"name",
"=",
"\"+\"",
",",
"filterext",
"=",
"\"\"",
",",
"extra_cmd",
"=",
"\"\"",
",",
"limit_regions",
"=",
"\"variant_regions\"",
")",
":",
"base",
",",
"ext",
"=",
"ut... | Perform cutoff-based soft filtering using bcftools expressions like %QUAL < 20 || DP < 4. | [
"Perform",
"cutoff",
"-",
"based",
"soft",
"filtering",
"using",
"bcftools",
"expressions",
"like",
"%QUAL",
"<",
"20",
"||",
"DP",
"<",
"4",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L21-L45 | train | 218,871 |
bcbio/bcbio-nextgen | bcbio/variation/vfilter.py | _freebayes_custom | def _freebayes_custom(in_file, ref_file, data):
"""Custom FreeBayes filtering using bcbio.variation, tuned to human NA12878 results.
Experimental: for testing new methods.
"""
if vcfutils.get_paired_phenotype(data):
return None
config = data["config"]
bv_ver = programs.get_version("bcbio_variation", config=config)
if LooseVersion(bv_ver) < LooseVersion("0.1.1"):
return None
out_file = "%s-filter%s" % os.path.splitext(in_file)
if not utils.file_exists(out_file):
tmp_dir = utils.safe_makedir(os.path.join(os.path.dirname(in_file), "tmp"))
resources = config_utils.get_resources("bcbio_variation", config)
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])
java_args = ["-Djava.io.tmpdir=%s" % tmp_dir]
cmd = ["bcbio-variation"] + jvm_opts + java_args + \
["variant-filter", "freebayes", in_file, ref_file]
do.run(cmd, "Custom FreeBayes filtering using bcbio.variation")
return out_file | python | def _freebayes_custom(in_file, ref_file, data):
"""Custom FreeBayes filtering using bcbio.variation, tuned to human NA12878 results.
Experimental: for testing new methods.
"""
if vcfutils.get_paired_phenotype(data):
return None
config = data["config"]
bv_ver = programs.get_version("bcbio_variation", config=config)
if LooseVersion(bv_ver) < LooseVersion("0.1.1"):
return None
out_file = "%s-filter%s" % os.path.splitext(in_file)
if not utils.file_exists(out_file):
tmp_dir = utils.safe_makedir(os.path.join(os.path.dirname(in_file), "tmp"))
resources = config_utils.get_resources("bcbio_variation", config)
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])
java_args = ["-Djava.io.tmpdir=%s" % tmp_dir]
cmd = ["bcbio-variation"] + jvm_opts + java_args + \
["variant-filter", "freebayes", in_file, ref_file]
do.run(cmd, "Custom FreeBayes filtering using bcbio.variation")
return out_file | [
"def",
"_freebayes_custom",
"(",
"in_file",
",",
"ref_file",
",",
"data",
")",
":",
"if",
"vcfutils",
".",
"get_paired_phenotype",
"(",
"data",
")",
":",
"return",
"None",
"config",
"=",
"data",
"[",
"\"config\"",
"]",
"bv_ver",
"=",
"programs",
".",
"get_... | Custom FreeBayes filtering using bcbio.variation, tuned to human NA12878 results.
Experimental: for testing new methods. | [
"Custom",
"FreeBayes",
"filtering",
"using",
"bcbio",
".",
"variation",
"tuned",
"to",
"human",
"NA12878",
"results",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L56-L76 | train | 218,872 |
bcbio/bcbio-nextgen | bcbio/variation/vfilter.py | _freebayes_cutoff | def _freebayes_cutoff(in_file, data):
"""Perform filtering of FreeBayes results, flagging low confidence calls.
Filters using cutoffs on low depth based on Meynert et al's work modeling sensitivity
of homozygote and heterozygote calling on depth:
http://www.ncbi.nlm.nih.gov/pubmed/23773188
and high depth heterozygote SNP filtering based on Heng Li's work
evaluating variant calling artifacts:
http://arxiv.org/abs/1404.0929
Tuned based on NA12878 call comparisons to Genome in a Bottle reference genome.
"""
if not vcfutils.vcf_has_variants(in_file):
base, ext = utils.splitext_plus(in_file)
out_file = "{base}-filter{ext}".format(**locals())
if not utils.file_exists(out_file):
shutil.copy(in_file, out_file)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
depth_thresh, qual_thresh = None, None
if _do_high_depth_filter(data):
stats = _calc_vcf_stats(in_file)
if stats["avg_depth"] > 0:
depth_thresh = int(math.ceil(stats["avg_depth"] + 3 * math.pow(stats["avg_depth"], 0.5)))
qual_thresh = depth_thresh * 2.0 # Multiplier from default GATK QD cutoff filter
filters = ('(AF[0] <= 0.5 && (max(FORMAT/DP) < 4 || (max(FORMAT/DP) < 13 && %QUAL < 10))) || '
'(AF[0] > 0.5 && (max(FORMAT/DP) < 4 && %QUAL < 50))')
if depth_thresh:
filters += ' || (%QUAL < {qual_thresh} && max(FORMAT/DP) > {depth_thresh} && AF[0] <= 0.5)'.format(**locals())
return cutoff_w_expression(in_file, filters, data, name="FBQualDepth") | python | def _freebayes_cutoff(in_file, data):
"""Perform filtering of FreeBayes results, flagging low confidence calls.
Filters using cutoffs on low depth based on Meynert et al's work modeling sensitivity
of homozygote and heterozygote calling on depth:
http://www.ncbi.nlm.nih.gov/pubmed/23773188
and high depth heterozygote SNP filtering based on Heng Li's work
evaluating variant calling artifacts:
http://arxiv.org/abs/1404.0929
Tuned based on NA12878 call comparisons to Genome in a Bottle reference genome.
"""
if not vcfutils.vcf_has_variants(in_file):
base, ext = utils.splitext_plus(in_file)
out_file = "{base}-filter{ext}".format(**locals())
if not utils.file_exists(out_file):
shutil.copy(in_file, out_file)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
depth_thresh, qual_thresh = None, None
if _do_high_depth_filter(data):
stats = _calc_vcf_stats(in_file)
if stats["avg_depth"] > 0:
depth_thresh = int(math.ceil(stats["avg_depth"] + 3 * math.pow(stats["avg_depth"], 0.5)))
qual_thresh = depth_thresh * 2.0 # Multiplier from default GATK QD cutoff filter
filters = ('(AF[0] <= 0.5 && (max(FORMAT/DP) < 4 || (max(FORMAT/DP) < 13 && %QUAL < 10))) || '
'(AF[0] > 0.5 && (max(FORMAT/DP) < 4 && %QUAL < 50))')
if depth_thresh:
filters += ' || (%QUAL < {qual_thresh} && max(FORMAT/DP) > {depth_thresh} && AF[0] <= 0.5)'.format(**locals())
return cutoff_w_expression(in_file, filters, data, name="FBQualDepth") | [
"def",
"_freebayes_cutoff",
"(",
"in_file",
",",
"data",
")",
":",
"if",
"not",
"vcfutils",
".",
"vcf_has_variants",
"(",
"in_file",
")",
":",
"base",
",",
"ext",
"=",
"utils",
".",
"splitext_plus",
"(",
"in_file",
")",
"out_file",
"=",
"\"{base}-filter{ext}... | Perform filtering of FreeBayes results, flagging low confidence calls.
Filters using cutoffs on low depth based on Meynert et al's work modeling sensitivity
of homozygote and heterozygote calling on depth:
http://www.ncbi.nlm.nih.gov/pubmed/23773188
and high depth heterozygote SNP filtering based on Heng Li's work
evaluating variant calling artifacts:
http://arxiv.org/abs/1404.0929
Tuned based on NA12878 call comparisons to Genome in a Bottle reference genome. | [
"Perform",
"filtering",
"of",
"FreeBayes",
"results",
"flagging",
"low",
"confidence",
"calls",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L78-L112 | train | 218,873 |
bcbio/bcbio-nextgen | bcbio/variation/vfilter.py | _calc_vcf_stats | def _calc_vcf_stats(in_file):
"""Calculate statistics on VCF for filtering, saving to a file for quick re-runs.
"""
out_file = "%s-stats.yaml" % utils.splitext_plus(in_file)[0]
if not utils.file_exists(out_file):
stats = {"avg_depth": _average_called_depth(in_file)}
with open(out_file, "w") as out_handle:
yaml.safe_dump(stats, out_handle, default_flow_style=False, allow_unicode=False)
return stats
else:
with open(out_file) as in_handle:
stats = yaml.safe_load(in_handle)
return stats | python | def _calc_vcf_stats(in_file):
"""Calculate statistics on VCF for filtering, saving to a file for quick re-runs.
"""
out_file = "%s-stats.yaml" % utils.splitext_plus(in_file)[0]
if not utils.file_exists(out_file):
stats = {"avg_depth": _average_called_depth(in_file)}
with open(out_file, "w") as out_handle:
yaml.safe_dump(stats, out_handle, default_flow_style=False, allow_unicode=False)
return stats
else:
with open(out_file) as in_handle:
stats = yaml.safe_load(in_handle)
return stats | [
"def",
"_calc_vcf_stats",
"(",
"in_file",
")",
":",
"out_file",
"=",
"\"%s-stats.yaml\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"in_file",
")",
"[",
"0",
"]",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"stats",
"=",
"{",
"\"a... | Calculate statistics on VCF for filtering, saving to a file for quick re-runs. | [
"Calculate",
"statistics",
"on",
"VCF",
"for",
"filtering",
"saving",
"to",
"a",
"file",
"for",
"quick",
"re",
"-",
"runs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L121-L133 | train | 218,874 |
bcbio/bcbio-nextgen | bcbio/variation/vfilter.py | _average_called_depth | def _average_called_depth(in_file):
"""Retrieve the average depth of called reads in the provided VCF.
"""
import cyvcf2
depths = []
for rec in cyvcf2.VCF(str(in_file)):
d = rec.INFO.get("DP")
if d is not None:
depths.append(int(d))
if len(depths) > 0:
return int(math.ceil(numpy.mean(depths)))
else:
return 0 | python | def _average_called_depth(in_file):
"""Retrieve the average depth of called reads in the provided VCF.
"""
import cyvcf2
depths = []
for rec in cyvcf2.VCF(str(in_file)):
d = rec.INFO.get("DP")
if d is not None:
depths.append(int(d))
if len(depths) > 0:
return int(math.ceil(numpy.mean(depths)))
else:
return 0 | [
"def",
"_average_called_depth",
"(",
"in_file",
")",
":",
"import",
"cyvcf2",
"depths",
"=",
"[",
"]",
"for",
"rec",
"in",
"cyvcf2",
".",
"VCF",
"(",
"str",
"(",
"in_file",
")",
")",
":",
"d",
"=",
"rec",
".",
"INFO",
".",
"get",
"(",
"\"DP\"",
")"... | Retrieve the average depth of called reads in the provided VCF. | [
"Retrieve",
"the",
"average",
"depth",
"of",
"called",
"reads",
"in",
"the",
"provided",
"VCF",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L135-L147 | train | 218,875 |
bcbio/bcbio-nextgen | bcbio/variation/vfilter.py | platypus | def platypus(in_file, data):
"""Filter Platypus calls, removing Q20 filter and replacing with depth and quality based filter.
Platypus uses its own VCF nomenclature: TC == DP, FR == AF
Platypus gVCF output appears to have an 0/1 index problem so the reference block
regions are 1 base outside regions of interest. We avoid limiting regions during
filtering when using it.
"""
filters = ('(FR[0] <= 0.5 && TC < 4 && %QUAL < 20) || '
'(TC < 13 && %QUAL < 10) || '
'(FR[0] > 0.5 && TC < 4 && %QUAL < 50)')
limit_regions = "variant_regions" if not vcfutils.is_gvcf_file(in_file) else None
return cutoff_w_expression(in_file, filters, data, name="PlatQualDepth",
extra_cmd="| sed 's/\\tQ20\\t/\\tPASS\\t/'", limit_regions=limit_regions) | python | def platypus(in_file, data):
"""Filter Platypus calls, removing Q20 filter and replacing with depth and quality based filter.
Platypus uses its own VCF nomenclature: TC == DP, FR == AF
Platypus gVCF output appears to have an 0/1 index problem so the reference block
regions are 1 base outside regions of interest. We avoid limiting regions during
filtering when using it.
"""
filters = ('(FR[0] <= 0.5 && TC < 4 && %QUAL < 20) || '
'(TC < 13 && %QUAL < 10) || '
'(FR[0] > 0.5 && TC < 4 && %QUAL < 50)')
limit_regions = "variant_regions" if not vcfutils.is_gvcf_file(in_file) else None
return cutoff_w_expression(in_file, filters, data, name="PlatQualDepth",
extra_cmd="| sed 's/\\tQ20\\t/\\tPASS\\t/'", limit_regions=limit_regions) | [
"def",
"platypus",
"(",
"in_file",
",",
"data",
")",
":",
"filters",
"=",
"(",
"'(FR[0] <= 0.5 && TC < 4 && %QUAL < 20) || '",
"'(TC < 13 && %QUAL < 10) || '",
"'(FR[0] > 0.5 && TC < 4 && %QUAL < 50)'",
")",
"limit_regions",
"=",
"\"variant_regions\"",
"if",
"not",
"vcfutils"... | Filter Platypus calls, removing Q20 filter and replacing with depth and quality based filter.
Platypus uses its own VCF nomenclature: TC == DP, FR == AF
Platypus gVCF output appears to have an 0/1 index problem so the reference block
regions are 1 base outside regions of interest. We avoid limiting regions during
filtering when using it. | [
"Filter",
"Platypus",
"calls",
"removing",
"Q20",
"filter",
"and",
"replacing",
"with",
"depth",
"and",
"quality",
"based",
"filter",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L149-L163 | train | 218,876 |
bcbio/bcbio-nextgen | bcbio/variation/vfilter.py | gatk_snp_cutoff | def gatk_snp_cutoff(in_file, data):
"""Perform cutoff-based soft filtering on GATK SNPs using best-practice recommendations.
We have a more lenient mapping quality (MQ) filter compared to GATK defaults.
The recommended filter (MQ < 40) is too stringent, so we adjust to 30:
http://imgur.com/a/oHRVB
QD and FS are not calculated when generating gVCF output:
https://github.com/broadgsa/gatk-protected/blob/e91472ddc7d58ace52db0cab4d70a072a918d64c/protected/gatk-tools-protected/src/main/java/org/broadinstitute/gatk/tools/walkers/haplotypecaller/HaplotypeCaller.java#L300
The extra command removes escaped quotes in the VCF output which
pyVCF fails on.
Does not use the GATK best practice recommend SOR filter (SOR > 3.0) as it
has a negative impact on sensitivity relative to precision:
https://github.com/bcbio/bcbio_validations/tree/master/gatk4#na12878-hg38
"""
filters = ["MQRankSum < -12.5", "ReadPosRankSum < -8.0"]
# GATK Haplotype caller (v2.2) appears to have much larger HaplotypeScores
# resulting in excessive filtering, so avoid this metric
variantcaller = utils.get_in(data, ("config", "algorithm", "variantcaller"))
if variantcaller not in ["gatk-haplotype", "haplotyper"]:
filters.append("HaplotypeScore > 13.0")
# Additional filter metrics, unless using raw GATK HaplotypeCaller or Sentieon gVCFs
if not (vcfutils.is_gvcf_file(in_file) and variantcaller in ["gatk-haplotype", "haplotyper"]):
filters += ["QD < 2.0"]
filters += ["FS > 60.0"]
filters += _gatk_general()
filters += ["MQ < 30.0"]
return cutoff_w_expression(in_file, 'TYPE="snp" && (%s)' % " || ".join(filters), data, "GATKCutoffSNP", "SNP",
extra_cmd=r"""| sed 's/\\"//g'""") | python | def gatk_snp_cutoff(in_file, data):
"""Perform cutoff-based soft filtering on GATK SNPs using best-practice recommendations.
We have a more lenient mapping quality (MQ) filter compared to GATK defaults.
The recommended filter (MQ < 40) is too stringent, so we adjust to 30:
http://imgur.com/a/oHRVB
QD and FS are not calculated when generating gVCF output:
https://github.com/broadgsa/gatk-protected/blob/e91472ddc7d58ace52db0cab4d70a072a918d64c/protected/gatk-tools-protected/src/main/java/org/broadinstitute/gatk/tools/walkers/haplotypecaller/HaplotypeCaller.java#L300
The extra command removes escaped quotes in the VCF output which
pyVCF fails on.
Does not use the GATK best practice recommend SOR filter (SOR > 3.0) as it
has a negative impact on sensitivity relative to precision:
https://github.com/bcbio/bcbio_validations/tree/master/gatk4#na12878-hg38
"""
filters = ["MQRankSum < -12.5", "ReadPosRankSum < -8.0"]
# GATK Haplotype caller (v2.2) appears to have much larger HaplotypeScores
# resulting in excessive filtering, so avoid this metric
variantcaller = utils.get_in(data, ("config", "algorithm", "variantcaller"))
if variantcaller not in ["gatk-haplotype", "haplotyper"]:
filters.append("HaplotypeScore > 13.0")
# Additional filter metrics, unless using raw GATK HaplotypeCaller or Sentieon gVCFs
if not (vcfutils.is_gvcf_file(in_file) and variantcaller in ["gatk-haplotype", "haplotyper"]):
filters += ["QD < 2.0"]
filters += ["FS > 60.0"]
filters += _gatk_general()
filters += ["MQ < 30.0"]
return cutoff_w_expression(in_file, 'TYPE="snp" && (%s)' % " || ".join(filters), data, "GATKCutoffSNP", "SNP",
extra_cmd=r"""| sed 's/\\"//g'""") | [
"def",
"gatk_snp_cutoff",
"(",
"in_file",
",",
"data",
")",
":",
"filters",
"=",
"[",
"\"MQRankSum < -12.5\"",
",",
"\"ReadPosRankSum < -8.0\"",
"]",
"# GATK Haplotype caller (v2.2) appears to have much larger HaplotypeScores",
"# resulting in excessive filtering, so avoid this metri... | Perform cutoff-based soft filtering on GATK SNPs using best-practice recommendations.
We have a more lenient mapping quality (MQ) filter compared to GATK defaults.
The recommended filter (MQ < 40) is too stringent, so we adjust to 30:
http://imgur.com/a/oHRVB
QD and FS are not calculated when generating gVCF output:
https://github.com/broadgsa/gatk-protected/blob/e91472ddc7d58ace52db0cab4d70a072a918d64c/protected/gatk-tools-protected/src/main/java/org/broadinstitute/gatk/tools/walkers/haplotypecaller/HaplotypeCaller.java#L300
The extra command removes escaped quotes in the VCF output which
pyVCF fails on.
Does not use the GATK best practice recommend SOR filter (SOR > 3.0) as it
has a negative impact on sensitivity relative to precision:
https://github.com/bcbio/bcbio_validations/tree/master/gatk4#na12878-hg38 | [
"Perform",
"cutoff",
"-",
"based",
"soft",
"filtering",
"on",
"GATK",
"SNPs",
"using",
"best",
"-",
"practice",
"recommendations",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L183-L214 | train | 218,877 |
bcbio/bcbio-nextgen | bcbio/bam/counts.py | random_regions | def random_regions(base, n, size):
"""Generate n random regions of 'size' in the provided base spread.
"""
spread = size // 2
base_info = collections.defaultdict(list)
for space, start, end in base:
base_info[space].append(start + spread)
base_info[space].append(end - spread)
regions = []
for _ in range(n):
space = random.choice(base_info.keys())
pos = random.randint(min(base_info[space]), max(base_info[space]))
regions.append([space, pos-spread, pos+spread])
return regions | python | def random_regions(base, n, size):
"""Generate n random regions of 'size' in the provided base spread.
"""
spread = size // 2
base_info = collections.defaultdict(list)
for space, start, end in base:
base_info[space].append(start + spread)
base_info[space].append(end - spread)
regions = []
for _ in range(n):
space = random.choice(base_info.keys())
pos = random.randint(min(base_info[space]), max(base_info[space]))
regions.append([space, pos-spread, pos+spread])
return regions | [
"def",
"random_regions",
"(",
"base",
",",
"n",
",",
"size",
")",
":",
"spread",
"=",
"size",
"//",
"2",
"base_info",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"space",
",",
"start",
",",
"end",
"in",
"base",
":",
"base_info",
... | Generate n random regions of 'size' in the provided base spread. | [
"Generate",
"n",
"random",
"regions",
"of",
"size",
"in",
"the",
"provided",
"base",
"spread",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/counts.py#L52-L65 | train | 218,878 |
bcbio/bcbio-nextgen | bcbio/bam/counts.py | NormalizedBam.all_regions | def all_regions(self):
"""Get a tuple of all chromosome, start and end regions.
"""
regions = []
for sq in self._bam.header["SQ"]:
regions.append((sq["SN"], 1, int(sq["LN"])))
return regions | python | def all_regions(self):
"""Get a tuple of all chromosome, start and end regions.
"""
regions = []
for sq in self._bam.header["SQ"]:
regions.append((sq["SN"], 1, int(sq["LN"])))
return regions | [
"def",
"all_regions",
"(",
"self",
")",
":",
"regions",
"=",
"[",
"]",
"for",
"sq",
"in",
"self",
".",
"_bam",
".",
"header",
"[",
"\"SQ\"",
"]",
":",
"regions",
".",
"append",
"(",
"(",
"sq",
"[",
"\"SN\"",
"]",
",",
"1",
",",
"int",
"(",
"sq"... | Get a tuple of all chromosome, start and end regions. | [
"Get",
"a",
"tuple",
"of",
"all",
"chromosome",
"start",
"and",
"end",
"regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/counts.py#L25-L31 | train | 218,879 |
bcbio/bcbio-nextgen | bcbio/bam/counts.py | NormalizedBam.read_count | def read_count(self, space, start, end):
"""Retrieve the normalized read count in the provided region.
"""
read_counts = 0
for read in self._bam.fetch(space, start, end):
read_counts += 1
return self._normalize(read_counts, self._total) | python | def read_count(self, space, start, end):
"""Retrieve the normalized read count in the provided region.
"""
read_counts = 0
for read in self._bam.fetch(space, start, end):
read_counts += 1
return self._normalize(read_counts, self._total) | [
"def",
"read_count",
"(",
"self",
",",
"space",
",",
"start",
",",
"end",
")",
":",
"read_counts",
"=",
"0",
"for",
"read",
"in",
"self",
".",
"_bam",
".",
"fetch",
"(",
"space",
",",
"start",
",",
"end",
")",
":",
"read_counts",
"+=",
"1",
"return... | Retrieve the normalized read count in the provided region. | [
"Retrieve",
"the",
"normalized",
"read",
"count",
"in",
"the",
"provided",
"region",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/counts.py#L33-L39 | train | 218,880 |
bcbio/bcbio-nextgen | bcbio/bam/counts.py | NormalizedBam.coverage_pileup | def coverage_pileup(self, space, start, end):
"""Retrieve pileup coverage across a specified region.
"""
return ((col.pos, self._normalize(col.n, self._total))
for col in self._bam.pileup(space, start, end)) | python | def coverage_pileup(self, space, start, end):
"""Retrieve pileup coverage across a specified region.
"""
return ((col.pos, self._normalize(col.n, self._total))
for col in self._bam.pileup(space, start, end)) | [
"def",
"coverage_pileup",
"(",
"self",
",",
"space",
",",
"start",
",",
"end",
")",
":",
"return",
"(",
"(",
"col",
".",
"pos",
",",
"self",
".",
"_normalize",
"(",
"col",
".",
"n",
",",
"self",
".",
"_total",
")",
")",
"for",
"col",
"in",
"self"... | Retrieve pileup coverage across a specified region. | [
"Retrieve",
"pileup",
"coverage",
"across",
"a",
"specified",
"region",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/counts.py#L41-L45 | train | 218,881 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/phylowgs.py | _prepare_summary | def _prepare_summary(evolve_file, ssm_file, cnv_file, work_dir, somatic_info):
"""Prepare a summary with gene-labelled heterogeneity from PhyloWGS predictions.
"""
out_file = os.path.join(work_dir, "%s-phylowgs.txt" % somatic_info.tumor_name)
if not utils.file_uptodate(out_file, evolve_file):
with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
ssm_locs = _read_ssm_locs(ssm_file)
cnv_ssms = _read_cnv_ssms(cnv_file)
for i, (ids, tree) in enumerate(_evolve_reader(evolve_file)):
out_handle.write("* Tree %s\n" % (i + 1))
out_handle.write("\n" + "\n".join(tree) + "\n\n")
for nid, freq, gids in ids:
genes = _gids_to_genes(gids, ssm_locs, cnv_ssms, somatic_info.tumor_data)
out_handle.write("%s\t%s\t%s\n" % (nid, freq, ",".join(genes)))
out_handle.write("\n")
return out_file | python | def _prepare_summary(evolve_file, ssm_file, cnv_file, work_dir, somatic_info):
"""Prepare a summary with gene-labelled heterogeneity from PhyloWGS predictions.
"""
out_file = os.path.join(work_dir, "%s-phylowgs.txt" % somatic_info.tumor_name)
if not utils.file_uptodate(out_file, evolve_file):
with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
ssm_locs = _read_ssm_locs(ssm_file)
cnv_ssms = _read_cnv_ssms(cnv_file)
for i, (ids, tree) in enumerate(_evolve_reader(evolve_file)):
out_handle.write("* Tree %s\n" % (i + 1))
out_handle.write("\n" + "\n".join(tree) + "\n\n")
for nid, freq, gids in ids:
genes = _gids_to_genes(gids, ssm_locs, cnv_ssms, somatic_info.tumor_data)
out_handle.write("%s\t%s\t%s\n" % (nid, freq, ",".join(genes)))
out_handle.write("\n")
return out_file | [
"def",
"_prepare_summary",
"(",
"evolve_file",
",",
"ssm_file",
",",
"cnv_file",
",",
"work_dir",
",",
"somatic_info",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"%s-phylowgs.txt\"",
"%",
"somatic_info",
".",
"tumor_name... | Prepare a summary with gene-labelled heterogeneity from PhyloWGS predictions. | [
"Prepare",
"a",
"summary",
"with",
"gene",
"-",
"labelled",
"heterogeneity",
"from",
"PhyloWGS",
"predictions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L39-L55 | train | 218,882 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/phylowgs.py | _gids_to_genes | def _gids_to_genes(gids, ssm_locs, cnv_ssms, data):
"""Convert support ids for SNPs and SSMs into associated genes.
"""
locs = collections.defaultdict(set)
for gid in gids:
cur_locs = []
try:
cur_locs.append(ssm_locs[gid])
except KeyError:
for ssm_loc in cnv_ssms.get(gid, []):
cur_locs.append(ssm_locs[ssm_loc])
for chrom, pos in cur_locs:
locs[chrom].add(pos)
genes = set([])
with tx_tmpdir(data) as tmpdir:
chrom_prefix = "chr" if next(ref.file_contigs(dd.get_ref_file(data))).name.startswith("chr") else ""
loc_file = os.path.join(tmpdir, "battenberg_find_genes.bed")
with open(loc_file, "w") as out_handle:
for chrom in sorted(locs.keys()):
for loc in sorted(list(locs[chrom])):
out_handle.write("%s%s\t%s\t%s\n" % (chrom_prefix, chrom, loc - 1, loc))
ann_file = annotate.add_genes(loc_file, data, max_distance=10000)
for r in pybedtools.BedTool(ann_file):
for gene in r.name.split(","):
if gene != ".":
genes.add(gene)
return sorted(list(genes)) | python | def _gids_to_genes(gids, ssm_locs, cnv_ssms, data):
"""Convert support ids for SNPs and SSMs into associated genes.
"""
locs = collections.defaultdict(set)
for gid in gids:
cur_locs = []
try:
cur_locs.append(ssm_locs[gid])
except KeyError:
for ssm_loc in cnv_ssms.get(gid, []):
cur_locs.append(ssm_locs[ssm_loc])
for chrom, pos in cur_locs:
locs[chrom].add(pos)
genes = set([])
with tx_tmpdir(data) as tmpdir:
chrom_prefix = "chr" if next(ref.file_contigs(dd.get_ref_file(data))).name.startswith("chr") else ""
loc_file = os.path.join(tmpdir, "battenberg_find_genes.bed")
with open(loc_file, "w") as out_handle:
for chrom in sorted(locs.keys()):
for loc in sorted(list(locs[chrom])):
out_handle.write("%s%s\t%s\t%s\n" % (chrom_prefix, chrom, loc - 1, loc))
ann_file = annotate.add_genes(loc_file, data, max_distance=10000)
for r in pybedtools.BedTool(ann_file):
for gene in r.name.split(","):
if gene != ".":
genes.add(gene)
return sorted(list(genes)) | [
"def",
"_gids_to_genes",
"(",
"gids",
",",
"ssm_locs",
",",
"cnv_ssms",
",",
"data",
")",
":",
"locs",
"=",
"collections",
".",
"defaultdict",
"(",
"set",
")",
"for",
"gid",
"in",
"gids",
":",
"cur_locs",
"=",
"[",
"]",
"try",
":",
"cur_locs",
".",
"... | Convert support ids for SNPs and SSMs into associated genes. | [
"Convert",
"support",
"ids",
"for",
"SNPs",
"and",
"SSMs",
"into",
"associated",
"genes",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L57-L83 | train | 218,883 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/phylowgs.py | _evolve_reader | def _evolve_reader(in_file):
"""Generate a list of region IDs and trees from a top_k_trees evolve.py file.
"""
cur_id_list = None
cur_tree = None
with open(in_file) as in_handle:
for line in in_handle:
if line.startswith("id,"):
if cur_id_list:
yield cur_id_list, cur_tree
cur_id_list = []
cur_tree = None
elif cur_tree is not None:
if line.strip() and not line.startswith("Number of non-empty"):
cur_tree.append(line.rstrip())
elif not line.strip() and cur_id_list and len(cur_id_list) > 0:
cur_tree = []
elif line.strip():
parts = []
for part in line.strip().split("\t"):
if part.endswith(","):
part = part[:-1]
parts.append(part)
if len(parts) > 4:
nid, freq, _, _, support = parts
cur_id_list.append((nid, freq, support.split("; ")))
if cur_id_list:
yield cur_id_list, cur_tree | python | def _evolve_reader(in_file):
"""Generate a list of region IDs and trees from a top_k_trees evolve.py file.
"""
cur_id_list = None
cur_tree = None
with open(in_file) as in_handle:
for line in in_handle:
if line.startswith("id,"):
if cur_id_list:
yield cur_id_list, cur_tree
cur_id_list = []
cur_tree = None
elif cur_tree is not None:
if line.strip() and not line.startswith("Number of non-empty"):
cur_tree.append(line.rstrip())
elif not line.strip() and cur_id_list and len(cur_id_list) > 0:
cur_tree = []
elif line.strip():
parts = []
for part in line.strip().split("\t"):
if part.endswith(","):
part = part[:-1]
parts.append(part)
if len(parts) > 4:
nid, freq, _, _, support = parts
cur_id_list.append((nid, freq, support.split("; ")))
if cur_id_list:
yield cur_id_list, cur_tree | [
"def",
"_evolve_reader",
"(",
"in_file",
")",
":",
"cur_id_list",
"=",
"None",
"cur_tree",
"=",
"None",
"with",
"open",
"(",
"in_file",
")",
"as",
"in_handle",
":",
"for",
"line",
"in",
"in_handle",
":",
"if",
"line",
".",
"startswith",
"(",
"\"id,\"",
"... | Generate a list of region IDs and trees from a top_k_trees evolve.py file. | [
"Generate",
"a",
"list",
"of",
"region",
"IDs",
"and",
"trees",
"from",
"a",
"top_k_trees",
"evolve",
".",
"py",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L85-L112 | train | 218,884 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/phylowgs.py | _read_cnv_ssms | def _read_cnv_ssms(in_file):
"""Map CNVs to associated SSMs
"""
out = {}
with open(in_file) as in_handle:
in_handle.readline() # header
for line in in_handle:
parts = line.strip().split()
if len(parts) > 3:
cnvid, _, _, ssms = parts
out[cnvid] = [x.split(",")[0] for x in ssms.split(";")]
return out | python | def _read_cnv_ssms(in_file):
"""Map CNVs to associated SSMs
"""
out = {}
with open(in_file) as in_handle:
in_handle.readline() # header
for line in in_handle:
parts = line.strip().split()
if len(parts) > 3:
cnvid, _, _, ssms = parts
out[cnvid] = [x.split(",")[0] for x in ssms.split(";")]
return out | [
"def",
"_read_cnv_ssms",
"(",
"in_file",
")",
":",
"out",
"=",
"{",
"}",
"with",
"open",
"(",
"in_file",
")",
"as",
"in_handle",
":",
"in_handle",
".",
"readline",
"(",
")",
"# header",
"for",
"line",
"in",
"in_handle",
":",
"parts",
"=",
"line",
".",
... | Map CNVs to associated SSMs | [
"Map",
"CNVs",
"to",
"associated",
"SSMs"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L114-L125 | train | 218,885 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/phylowgs.py | _read_ssm_locs | def _read_ssm_locs(in_file):
"""Map SSMs to chromosomal locations.
"""
out = {}
with open(in_file) as in_handle:
in_handle.readline() # header
for line in in_handle:
sid, loc = line.split()[:2]
chrom, pos = loc.split("_")
out[sid] = (chrom, int(pos))
return out | python | def _read_ssm_locs(in_file):
"""Map SSMs to chromosomal locations.
"""
out = {}
with open(in_file) as in_handle:
in_handle.readline() # header
for line in in_handle:
sid, loc = line.split()[:2]
chrom, pos = loc.split("_")
out[sid] = (chrom, int(pos))
return out | [
"def",
"_read_ssm_locs",
"(",
"in_file",
")",
":",
"out",
"=",
"{",
"}",
"with",
"open",
"(",
"in_file",
")",
"as",
"in_handle",
":",
"in_handle",
".",
"readline",
"(",
")",
"# header",
"for",
"line",
"in",
"in_handle",
":",
"sid",
",",
"loc",
"=",
"... | Map SSMs to chromosomal locations. | [
"Map",
"SSMs",
"to",
"chromosomal",
"locations",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L127-L137 | train | 218,886 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/phylowgs.py | _run_evolve | def _run_evolve(ssm_file, cnv_file, work_dir, data):
"""Run evolve.py to infer subclonal composition.
"""
exe = os.path.join(os.path.dirname(sys.executable), "evolve.py")
assert os.path.exists(exe), "Could not find evolve script for PhyloWGS runs."
out_dir = os.path.join(work_dir, "evolve")
out_file = os.path.join(out_dir, "top_k_trees")
if not utils.file_uptodate(out_file, cnv_file):
with file_transaction(data, out_dir) as tx_out_dir:
with utils.chdir(tx_out_dir):
cmd = [sys.executable, exe, "-r", "42", ssm_file, cnv_file]
do.run(cmd, "Run PhyloWGS evolution")
return out_file | python | def _run_evolve(ssm_file, cnv_file, work_dir, data):
"""Run evolve.py to infer subclonal composition.
"""
exe = os.path.join(os.path.dirname(sys.executable), "evolve.py")
assert os.path.exists(exe), "Could not find evolve script for PhyloWGS runs."
out_dir = os.path.join(work_dir, "evolve")
out_file = os.path.join(out_dir, "top_k_trees")
if not utils.file_uptodate(out_file, cnv_file):
with file_transaction(data, out_dir) as tx_out_dir:
with utils.chdir(tx_out_dir):
cmd = [sys.executable, exe, "-r", "42", ssm_file, cnv_file]
do.run(cmd, "Run PhyloWGS evolution")
return out_file | [
"def",
"_run_evolve",
"(",
"ssm_file",
",",
"cnv_file",
",",
"work_dir",
",",
"data",
")",
":",
"exe",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"sys",
".",
"executable",
")",
",",
"\"evolve.py\"",
")",
"assert... | Run evolve.py to infer subclonal composition. | [
"Run",
"evolve",
".",
"py",
"to",
"infer",
"subclonal",
"composition",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L139-L151 | train | 218,887 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/phylowgs.py | _prep_inputs | def _prep_inputs(vrn_info, cnv_info, somatic_info, work_dir, config):
"""Prepare inputs for running PhyloWGS from variant and CNV calls.
"""
exe = os.path.join(os.path.dirname(sys.executable), "create_phylowgs_inputs.py")
assert os.path.exists(exe), "Could not find input prep script for PhyloWGS runs."
ssm_file = os.path.join(work_dir, "ssm_data.txt")
cnv_file = os.path.join(work_dir, "cnv_data.txt")
if not utils.file_exists(ssm_file) or not utils.file_exists(cnv_file):
with file_transaction(somatic_info.tumor_data, ssm_file, cnv_file) as (tx_ssm_file, tx_cnv_file):
variant_type, input_vcf_file = _prep_vrn_file(vrn_info["vrn_file"], vrn_info["variantcaller"],
work_dir, somatic_info, cnv_info["ignore"], config)
input_cnv_file = _prep_cnv_file(cnv_info["subclones"], work_dir, somatic_info)
cmd = [sys.executable, exe,
"--sample-size", str(config["sample_size"]), "--tumor-sample", somatic_info.tumor_name,
"--battenberg", input_cnv_file, "--cellularity", _read_contam(cnv_info["contamination"]),
"--output-cnvs", tx_cnv_file, "--output-variants", tx_ssm_file,
"--variant-type", variant_type, input_vcf_file]
do.run(cmd, "Prepare PhyloWGS inputs.")
return ssm_file, cnv_file | python | def _prep_inputs(vrn_info, cnv_info, somatic_info, work_dir, config):
"""Prepare inputs for running PhyloWGS from variant and CNV calls.
"""
exe = os.path.join(os.path.dirname(sys.executable), "create_phylowgs_inputs.py")
assert os.path.exists(exe), "Could not find input prep script for PhyloWGS runs."
ssm_file = os.path.join(work_dir, "ssm_data.txt")
cnv_file = os.path.join(work_dir, "cnv_data.txt")
if not utils.file_exists(ssm_file) or not utils.file_exists(cnv_file):
with file_transaction(somatic_info.tumor_data, ssm_file, cnv_file) as (tx_ssm_file, tx_cnv_file):
variant_type, input_vcf_file = _prep_vrn_file(vrn_info["vrn_file"], vrn_info["variantcaller"],
work_dir, somatic_info, cnv_info["ignore"], config)
input_cnv_file = _prep_cnv_file(cnv_info["subclones"], work_dir, somatic_info)
cmd = [sys.executable, exe,
"--sample-size", str(config["sample_size"]), "--tumor-sample", somatic_info.tumor_name,
"--battenberg", input_cnv_file, "--cellularity", _read_contam(cnv_info["contamination"]),
"--output-cnvs", tx_cnv_file, "--output-variants", tx_ssm_file,
"--variant-type", variant_type, input_vcf_file]
do.run(cmd, "Prepare PhyloWGS inputs.")
return ssm_file, cnv_file | [
"def",
"_prep_inputs",
"(",
"vrn_info",
",",
"cnv_info",
",",
"somatic_info",
",",
"work_dir",
",",
"config",
")",
":",
"exe",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"sys",
".",
"executable",
")",
",",
"\"cr... | Prepare inputs for running PhyloWGS from variant and CNV calls. | [
"Prepare",
"inputs",
"for",
"running",
"PhyloWGS",
"from",
"variant",
"and",
"CNV",
"calls",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L153-L171 | train | 218,888 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/phylowgs.py | _prep_cnv_file | def _prep_cnv_file(in_file, work_dir, somatic_info):
"""Prepare Battenberg CNV file for ingest by PhyloWGS.
The PhyloWGS preparation script does not handle 'chr' prefixed chromosomes (hg19 style)
correctly. This converts them over to GRCh37 (no 'chr') style to match preparation
work in _prep_vrn_file.
"""
out_file = os.path.join(work_dir, "%s-prep%s" % utils.splitext_plus(os.path.basename(in_file)))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
out_handle.write(in_handle.readline()) # header
for line in in_handle:
parts = line.split("\t")
parts[1] = _phylowgs_compatible_chroms(parts[1])
out_handle.write("\t".join(parts))
return out_file | python | def _prep_cnv_file(in_file, work_dir, somatic_info):
"""Prepare Battenberg CNV file for ingest by PhyloWGS.
The PhyloWGS preparation script does not handle 'chr' prefixed chromosomes (hg19 style)
correctly. This converts them over to GRCh37 (no 'chr') style to match preparation
work in _prep_vrn_file.
"""
out_file = os.path.join(work_dir, "%s-prep%s" % utils.splitext_plus(os.path.basename(in_file)))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
out_handle.write(in_handle.readline()) # header
for line in in_handle:
parts = line.split("\t")
parts[1] = _phylowgs_compatible_chroms(parts[1])
out_handle.write("\t".join(parts))
return out_file | [
"def",
"_prep_cnv_file",
"(",
"in_file",
",",
"work_dir",
",",
"somatic_info",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"%s-prep%s\"",
"%",
"utils",
".",
"splitext_plus",
"(",
"os",
".",
"path",
".",
"basename",
... | Prepare Battenberg CNV file for ingest by PhyloWGS.
The PhyloWGS preparation script does not handle 'chr' prefixed chromosomes (hg19 style)
correctly. This converts them over to GRCh37 (no 'chr') style to match preparation
work in _prep_vrn_file. | [
"Prepare",
"Battenberg",
"CNV",
"file",
"for",
"ingest",
"by",
"PhyloWGS",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L173-L190 | train | 218,889 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/phylowgs.py | _prep_vrn_file | def _prep_vrn_file(in_file, vcaller, work_dir, somatic_info, ignore_file, config):
"""Create a variant file to feed into the PhyloWGS prep script, limiting records.
Sorts by depth, adding top covered samples up to the sample_size supported
by PhyloWGS. The logic is that the higher depth samples will have better
resolution for frequency differences. More complex implementations could try
to subset based on a distribution of frequencies to best sample the potential
heterogeneity.
Handles MuTect and VarDict as inputs to PhyloWGS.
Fixes chromosome naming to use non chr-prefixed contigs, to match _prep_cnv_file.
"""
if vcaller.startswith("vardict"):
variant_type = "vardict"
elif vcaller == "mutect":
variant_type = "mutect-smchet"
else:
raise ValueError("Unexpected variant caller for PhyloWGS prep: %s" % vcaller)
out_file = os.path.join(work_dir, "%s-%s-prep.vcf" % (utils.splitext_plus(os.path.basename(in_file))[0],
vcaller))
if not utils.file_uptodate(out_file, in_file):
check_fn = _min_sample_pass(ignore_file)
with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file:
tx_out_file_raw = "%s-raw%s" % utils.splitext_plus(tx_out_file)
# Filter inputs
with VariantFile(in_file) as bcf_in:
depths = [_sample_depth(rec, somatic_info.tumor_name) for rec in
filter(check_fn, bcf_in)]
depths.sort(reverse=True)
depth_thresh = depths[:config["sample_size"]][-1] if depths else 0
with VariantFile(in_file) as bcf_in:
with VariantFile(tx_out_file_raw, "w", header=bcf_in.header) as bcf_out:
for rec in bcf_in:
if (check_fn(rec) and
(depth_thresh < 5 or _sample_depth(rec, somatic_info.tumor_name) >= depth_thresh)):
bcf_out.write(rec)
# Fix potential chromosome issues
with open(tx_out_file_raw) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if not line.startswith("#"):
parts = line.split("\t")
parts[0] = _phylowgs_compatible_chroms(parts[0])
line = "\t".join(parts)
out_handle.write(line)
return variant_type, out_file | python | def _prep_vrn_file(in_file, vcaller, work_dir, somatic_info, ignore_file, config):
"""Create a variant file to feed into the PhyloWGS prep script, limiting records.
Sorts by depth, adding top covered samples up to the sample_size supported
by PhyloWGS. The logic is that the higher depth samples will have better
resolution for frequency differences. More complex implementations could try
to subset based on a distribution of frequencies to best sample the potential
heterogeneity.
Handles MuTect and VarDict as inputs to PhyloWGS.
Fixes chromosome naming to use non chr-prefixed contigs, to match _prep_cnv_file.
"""
if vcaller.startswith("vardict"):
variant_type = "vardict"
elif vcaller == "mutect":
variant_type = "mutect-smchet"
else:
raise ValueError("Unexpected variant caller for PhyloWGS prep: %s" % vcaller)
out_file = os.path.join(work_dir, "%s-%s-prep.vcf" % (utils.splitext_plus(os.path.basename(in_file))[0],
vcaller))
if not utils.file_uptodate(out_file, in_file):
check_fn = _min_sample_pass(ignore_file)
with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file:
tx_out_file_raw = "%s-raw%s" % utils.splitext_plus(tx_out_file)
# Filter inputs
with VariantFile(in_file) as bcf_in:
depths = [_sample_depth(rec, somatic_info.tumor_name) for rec in
filter(check_fn, bcf_in)]
depths.sort(reverse=True)
depth_thresh = depths[:config["sample_size"]][-1] if depths else 0
with VariantFile(in_file) as bcf_in:
with VariantFile(tx_out_file_raw, "w", header=bcf_in.header) as bcf_out:
for rec in bcf_in:
if (check_fn(rec) and
(depth_thresh < 5 or _sample_depth(rec, somatic_info.tumor_name) >= depth_thresh)):
bcf_out.write(rec)
# Fix potential chromosome issues
with open(tx_out_file_raw) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if not line.startswith("#"):
parts = line.split("\t")
parts[0] = _phylowgs_compatible_chroms(parts[0])
line = "\t".join(parts)
out_handle.write(line)
return variant_type, out_file | [
"def",
"_prep_vrn_file",
"(",
"in_file",
",",
"vcaller",
",",
"work_dir",
",",
"somatic_info",
",",
"ignore_file",
",",
"config",
")",
":",
"if",
"vcaller",
".",
"startswith",
"(",
"\"vardict\"",
")",
":",
"variant_type",
"=",
"\"vardict\"",
"elif",
"vcaller",... | Create a variant file to feed into the PhyloWGS prep script, limiting records.
Sorts by depth, adding top covered samples up to the sample_size supported
by PhyloWGS. The logic is that the higher depth samples will have better
resolution for frequency differences. More complex implementations could try
to subset based on a distribution of frequencies to best sample the potential
heterogeneity.
Handles MuTect and VarDict as inputs to PhyloWGS.
Fixes chromosome naming to use non chr-prefixed contigs, to match _prep_cnv_file. | [
"Create",
"a",
"variant",
"file",
"to",
"feed",
"into",
"the",
"PhyloWGS",
"prep",
"script",
"limiting",
"records",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/phylowgs.py#L197-L243 | train | 218,890 |
bcbio/bcbio-nextgen | bcbio/srna/group.py | run_prepare | def run_prepare(*data):
"""
Run seqcluster prepare to merge all samples in one file
"""
out_dir = os.path.join(dd.get_work_dir(data[0][0]), "seqcluster", "prepare")
out_dir = os.path.abspath(safe_makedir(out_dir))
prepare_dir = os.path.join(out_dir, "prepare")
tools = dd.get_expression_caller(data[0][0])
if len(tools) == 0:
logger.info("You didn't specify any other expression caller tool."
"You can add to the YAML file:"
"expression_caller:[trna, seqcluster, mirdeep2]")
fn = []
for sample in data:
name = sample[0]["rgnames"]['sample']
fn.append("%s\t%s" % (sample[0]['collapse'], name))
args = namedtuple('args', 'debug print_debug minc minl maxl out')
args = args(False, False, 2, 17, 40, out_dir)
ma_out = op.join(out_dir, "seqs.ma")
seq_out = op.join(out_dir, "seqs.fastq")
min_shared = max(int(len(fn) / 10.0), 1)
if not file_exists(ma_out):
seq_l, sample_l = prepare._read_fastq_files(fn, args)
with file_transaction(ma_out) as ma_tx:
with open(ma_tx, 'w') as ma_handle:
with open(seq_out, 'w') as seq_handle:
logger.info("Prepare seqs.fastq with -minl 17 -maxl 40 -minc 2 --min_shared 0.1")
prepare._create_matrix_uniq_seq(sample_l, seq_l, ma_handle, seq_handle, min_shared)
for sample in data:
sample[0]["seqcluster_prepare_ma"] = ma_out
sample[0]["seqcluster_prepare_fastq"] = seq_out
return data | python | def run_prepare(*data):
"""
Run seqcluster prepare to merge all samples in one file
"""
out_dir = os.path.join(dd.get_work_dir(data[0][0]), "seqcluster", "prepare")
out_dir = os.path.abspath(safe_makedir(out_dir))
prepare_dir = os.path.join(out_dir, "prepare")
tools = dd.get_expression_caller(data[0][0])
if len(tools) == 0:
logger.info("You didn't specify any other expression caller tool."
"You can add to the YAML file:"
"expression_caller:[trna, seqcluster, mirdeep2]")
fn = []
for sample in data:
name = sample[0]["rgnames"]['sample']
fn.append("%s\t%s" % (sample[0]['collapse'], name))
args = namedtuple('args', 'debug print_debug minc minl maxl out')
args = args(False, False, 2, 17, 40, out_dir)
ma_out = op.join(out_dir, "seqs.ma")
seq_out = op.join(out_dir, "seqs.fastq")
min_shared = max(int(len(fn) / 10.0), 1)
if not file_exists(ma_out):
seq_l, sample_l = prepare._read_fastq_files(fn, args)
with file_transaction(ma_out) as ma_tx:
with open(ma_tx, 'w') as ma_handle:
with open(seq_out, 'w') as seq_handle:
logger.info("Prepare seqs.fastq with -minl 17 -maxl 40 -minc 2 --min_shared 0.1")
prepare._create_matrix_uniq_seq(sample_l, seq_l, ma_handle, seq_handle, min_shared)
for sample in data:
sample[0]["seqcluster_prepare_ma"] = ma_out
sample[0]["seqcluster_prepare_fastq"] = seq_out
return data | [
"def",
"run_prepare",
"(",
"*",
"data",
")",
":",
"out_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dd",
".",
"get_work_dir",
"(",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
")",
",",
"\"seqcluster\"",
",",
"\"prepare\"",
")",
"out_dir",
"=",
"os",
... | Run seqcluster prepare to merge all samples in one file | [
"Run",
"seqcluster",
"prepare",
"to",
"merge",
"all",
"samples",
"in",
"one",
"file"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L24-L56 | train | 218,891 |
bcbio/bcbio-nextgen | bcbio/srna/group.py | run_align | def run_align(*data):
"""
Prepare data to run alignment step, only once for each project
"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, "seqcluster", "prepare")
seq_out = op.join(out_dir, "seqs.fastq")
bam_dir = op.join(work_dir, "align")
new_bam_file = op.join(bam_dir, "seqs.bam")
tools = dd.get_expression_caller(data[0][0])
if not file_exists(new_bam_file):
sample = process_alignment(data[0][0], [seq_out, None])
bam_file = dd.get_work_bam(sample[0][0])
shutil.move(bam_file, new_bam_file)
shutil.move(bam_file + ".bai", new_bam_file + ".bai")
shutil.rmtree(op.join(bam_dir, sample[0][0]["rgnames"]['sample']))
for sample in data:
# sample[0]["align_bam"] = sample[0]["clean_fastq"]
sample[0]["cluster_bam"] = new_bam_file
if "mirdeep2" in tools:
novel_db = mirdeep.run(data)
return data | python | def run_align(*data):
"""
Prepare data to run alignment step, only once for each project
"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, "seqcluster", "prepare")
seq_out = op.join(out_dir, "seqs.fastq")
bam_dir = op.join(work_dir, "align")
new_bam_file = op.join(bam_dir, "seqs.bam")
tools = dd.get_expression_caller(data[0][0])
if not file_exists(new_bam_file):
sample = process_alignment(data[0][0], [seq_out, None])
bam_file = dd.get_work_bam(sample[0][0])
shutil.move(bam_file, new_bam_file)
shutil.move(bam_file + ".bai", new_bam_file + ".bai")
shutil.rmtree(op.join(bam_dir, sample[0][0]["rgnames"]['sample']))
for sample in data:
# sample[0]["align_bam"] = sample[0]["clean_fastq"]
sample[0]["cluster_bam"] = new_bam_file
if "mirdeep2" in tools:
novel_db = mirdeep.run(data)
return data | [
"def",
"run_align",
"(",
"*",
"data",
")",
":",
"work_dir",
"=",
"dd",
".",
"get_work_dir",
"(",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"out_dir",
"=",
"op",
".",
"join",
"(",
"work_dir",
",",
"\"seqcluster\"",
",",
"\"prepare\"",
")",
"seq_out",... | Prepare data to run alignment step, only once for each project | [
"Prepare",
"data",
"to",
"run",
"alignment",
"step",
"only",
"once",
"for",
"each",
"project"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L58-L80 | train | 218,892 |
bcbio/bcbio-nextgen | bcbio/srna/group.py | run_cluster | def run_cluster(*data):
"""
Run seqcluster cluster to detect smallRNA clusters
"""
sample = data[0][0]
tools = dd.get_expression_caller(data[0][0])
work_dir = dd.get_work_dir(sample)
out_dir = op.join(work_dir, "seqcluster", "cluster")
out_dir = op.abspath(safe_makedir(out_dir))
prepare_dir = op.join(work_dir, "seqcluster", "prepare")
bam_file = data[0][0]["cluster_bam"]
if "seqcluster" in tools:
gtf_file = dd.get_transcriptome_gtf(sample) if dd.get_transcriptome_gtf(sample) else dd.get_srna_gtf_file(sample)
sample["seqcluster"] = _cluster(bam_file, data[0][0]["seqcluster_prepare_ma"],
out_dir, dd.get_ref_file(sample),
gtf_file)
sample["report"] = _report(sample, dd.get_ref_file(sample))
if "mirge" in tools:
sample["mirge"] = mirge.run(data)
out_mirna = _make_isomir_counts(data, out_dir=op.join(work_dir, "mirbase"))
if out_mirna:
sample = dd.set_mirna_counts(sample, out_mirna[0])
sample = dd.set_isomir_counts(sample, out_mirna[1])
out_novel = _make_isomir_counts(data, "seqbuster_novel", op.join(work_dir, "mirdeep2"), "_novel")
if out_novel:
sample = dd.set_novel_mirna_counts(sample, out_novel[0])
sample = dd.set_novel_isomir_counts(sample, out_novel[1])
data[0][0] = sample
data = spikein.combine_spikein(data)
return data | python | def run_cluster(*data):
"""
Run seqcluster cluster to detect smallRNA clusters
"""
sample = data[0][0]
tools = dd.get_expression_caller(data[0][0])
work_dir = dd.get_work_dir(sample)
out_dir = op.join(work_dir, "seqcluster", "cluster")
out_dir = op.abspath(safe_makedir(out_dir))
prepare_dir = op.join(work_dir, "seqcluster", "prepare")
bam_file = data[0][0]["cluster_bam"]
if "seqcluster" in tools:
gtf_file = dd.get_transcriptome_gtf(sample) if dd.get_transcriptome_gtf(sample) else dd.get_srna_gtf_file(sample)
sample["seqcluster"] = _cluster(bam_file, data[0][0]["seqcluster_prepare_ma"],
out_dir, dd.get_ref_file(sample),
gtf_file)
sample["report"] = _report(sample, dd.get_ref_file(sample))
if "mirge" in tools:
sample["mirge"] = mirge.run(data)
out_mirna = _make_isomir_counts(data, out_dir=op.join(work_dir, "mirbase"))
if out_mirna:
sample = dd.set_mirna_counts(sample, out_mirna[0])
sample = dd.set_isomir_counts(sample, out_mirna[1])
out_novel = _make_isomir_counts(data, "seqbuster_novel", op.join(work_dir, "mirdeep2"), "_novel")
if out_novel:
sample = dd.set_novel_mirna_counts(sample, out_novel[0])
sample = dd.set_novel_isomir_counts(sample, out_novel[1])
data[0][0] = sample
data = spikein.combine_spikein(data)
return data | [
"def",
"run_cluster",
"(",
"*",
"data",
")",
":",
"sample",
"=",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
"tools",
"=",
"dd",
".",
"get_expression_caller",
"(",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"work_dir",
"=",
"dd",
".",
"get_work_dir",
"... | Run seqcluster cluster to detect smallRNA clusters | [
"Run",
"seqcluster",
"cluster",
"to",
"detect",
"smallRNA",
"clusters"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L82-L114 | train | 218,893 |
bcbio/bcbio-nextgen | bcbio/srna/group.py | _cluster | def _cluster(bam_file, ma_file, out_dir, reference, annotation_file=None):
"""
Connect to seqcluster to run cluster with python directly
"""
seqcluster = op.join(get_bcbio_bin(), "seqcluster")
# cl = ["cluster", "-o", out_dir, "-m", ma_file, "-a", bam_file, "-r", reference]
if annotation_file:
annotation_file = "-g " + annotation_file
else:
annotation_file = ""
if not file_exists(op.join(out_dir, "counts.tsv")):
cmd = ("{seqcluster} cluster -o {out_dir} -m {ma_file} -a {bam_file} -r {reference} {annotation_file}")
do.run(cmd.format(**locals()), "Running seqcluster.")
counts = op.join(out_dir, "counts.tsv")
stats = op.join(out_dir, "read_stats.tsv")
json = op.join(out_dir, "seqcluster.json")
return {'out_dir': out_dir, 'count_file': counts, 'stat_file': stats, 'json': json} | python | def _cluster(bam_file, ma_file, out_dir, reference, annotation_file=None):
"""
Connect to seqcluster to run cluster with python directly
"""
seqcluster = op.join(get_bcbio_bin(), "seqcluster")
# cl = ["cluster", "-o", out_dir, "-m", ma_file, "-a", bam_file, "-r", reference]
if annotation_file:
annotation_file = "-g " + annotation_file
else:
annotation_file = ""
if not file_exists(op.join(out_dir, "counts.tsv")):
cmd = ("{seqcluster} cluster -o {out_dir} -m {ma_file} -a {bam_file} -r {reference} {annotation_file}")
do.run(cmd.format(**locals()), "Running seqcluster.")
counts = op.join(out_dir, "counts.tsv")
stats = op.join(out_dir, "read_stats.tsv")
json = op.join(out_dir, "seqcluster.json")
return {'out_dir': out_dir, 'count_file': counts, 'stat_file': stats, 'json': json} | [
"def",
"_cluster",
"(",
"bam_file",
",",
"ma_file",
",",
"out_dir",
",",
"reference",
",",
"annotation_file",
"=",
"None",
")",
":",
"seqcluster",
"=",
"op",
".",
"join",
"(",
"get_bcbio_bin",
"(",
")",
",",
"\"seqcluster\"",
")",
"# cl = [\"cluster\", \"-o\",... | Connect to seqcluster to run cluster with python directly | [
"Connect",
"to",
"seqcluster",
"to",
"run",
"cluster",
"with",
"python",
"directly"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L116-L133 | train | 218,894 |
bcbio/bcbio-nextgen | bcbio/srna/group.py | _report | def _report(data, reference):
"""
Run report of seqcluster to get browser options for results
"""
seqcluster = op.join(get_bcbio_bin(), "seqcluster")
work_dir = dd.get_work_dir(data)
out_dir = safe_makedir(os.path.join(work_dir, "seqcluster", "report"))
out_file = op.join(out_dir, "seqcluster.db")
json = op.join(work_dir, "seqcluster", "cluster", "seqcluster.json")
cmd = ("{seqcluster} report -o {out_dir} -r {reference} -j {json}")
if not file_exists(out_file):
do.run(cmd.format(**locals()), "Run report on clusters")
return out_file | python | def _report(data, reference):
"""
Run report of seqcluster to get browser options for results
"""
seqcluster = op.join(get_bcbio_bin(), "seqcluster")
work_dir = dd.get_work_dir(data)
out_dir = safe_makedir(os.path.join(work_dir, "seqcluster", "report"))
out_file = op.join(out_dir, "seqcluster.db")
json = op.join(work_dir, "seqcluster", "cluster", "seqcluster.json")
cmd = ("{seqcluster} report -o {out_dir} -r {reference} -j {json}")
if not file_exists(out_file):
do.run(cmd.format(**locals()), "Run report on clusters")
return out_file | [
"def",
"_report",
"(",
"data",
",",
"reference",
")",
":",
"seqcluster",
"=",
"op",
".",
"join",
"(",
"get_bcbio_bin",
"(",
")",
",",
"\"seqcluster\"",
")",
"work_dir",
"=",
"dd",
".",
"get_work_dir",
"(",
"data",
")",
"out_dir",
"=",
"safe_makedir",
"("... | Run report of seqcluster to get browser options for results | [
"Run",
"report",
"of",
"seqcluster",
"to",
"get",
"browser",
"options",
"for",
"results"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L135-L147 | train | 218,895 |
bcbio/bcbio-nextgen | bcbio/srna/group.py | report | def report(data):
"""Create a Rmd report for small RNAseq analysis"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, "report")
safe_makedir(out_dir)
summary_file = op.join(out_dir, "summary.csv")
with file_transaction(summary_file) as out_tx:
with open(out_tx, 'w') as out_handle:
out_handle.write("sample_id,%s\n" % _guess_header(data[0][0]))
for sample in data:
info = sample[0]
group = _guess_group(info)
files = info["seqbuster"] if "seqbuster" in info else "None"
out_handle.write(",".join([dd.get_sample_name(info),
group]) + "\n")
_modify_report(work_dir, out_dir)
return summary_file | python | def report(data):
"""Create a Rmd report for small RNAseq analysis"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, "report")
safe_makedir(out_dir)
summary_file = op.join(out_dir, "summary.csv")
with file_transaction(summary_file) as out_tx:
with open(out_tx, 'w') as out_handle:
out_handle.write("sample_id,%s\n" % _guess_header(data[0][0]))
for sample in data:
info = sample[0]
group = _guess_group(info)
files = info["seqbuster"] if "seqbuster" in info else "None"
out_handle.write(",".join([dd.get_sample_name(info),
group]) + "\n")
_modify_report(work_dir, out_dir)
return summary_file | [
"def",
"report",
"(",
"data",
")",
":",
"work_dir",
"=",
"dd",
".",
"get_work_dir",
"(",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"out_dir",
"=",
"op",
".",
"join",
"(",
"work_dir",
",",
"\"report\"",
")",
"safe_makedir",
"(",
"out_dir",
")",
"su... | Create a Rmd report for small RNAseq analysis | [
"Create",
"a",
"Rmd",
"report",
"for",
"small",
"RNAseq",
"analysis"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L149-L165 | train | 218,896 |
bcbio/bcbio-nextgen | bcbio/srna/group.py | _modify_report | def _modify_report(summary_path, out_dir):
"""Read Rmd template and dump with project path."""
summary_path = op.abspath(summary_path)
template = op.normpath(op.join(op.dirname(op.realpath(template_seqcluster.__file__)), "report.rmd"))
content = open(template).read()
out_content = string.Template(content).safe_substitute({'path_abs': summary_path})
out_file = op.join(out_dir, "srna_report.rmd")
with open(out_file, 'w') as out_handle:
out_handle.write(out_content)
return out_file | python | def _modify_report(summary_path, out_dir):
"""Read Rmd template and dump with project path."""
summary_path = op.abspath(summary_path)
template = op.normpath(op.join(op.dirname(op.realpath(template_seqcluster.__file__)), "report.rmd"))
content = open(template).read()
out_content = string.Template(content).safe_substitute({'path_abs': summary_path})
out_file = op.join(out_dir, "srna_report.rmd")
with open(out_file, 'w') as out_handle:
out_handle.write(out_content)
return out_file | [
"def",
"_modify_report",
"(",
"summary_path",
",",
"out_dir",
")",
":",
"summary_path",
"=",
"op",
".",
"abspath",
"(",
"summary_path",
")",
"template",
"=",
"op",
".",
"normpath",
"(",
"op",
".",
"join",
"(",
"op",
".",
"dirname",
"(",
"op",
".",
"rea... | Read Rmd template and dump with project path. | [
"Read",
"Rmd",
"template",
"and",
"dump",
"with",
"project",
"path",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L183-L192 | train | 218,897 |
bcbio/bcbio-nextgen | bcbio/srna/group.py | _make_isomir_counts | def _make_isomir_counts(data, srna_type="seqbuster", out_dir=None, stem=""):
"""
Parse miraligner files to create count matrix.
"""
work_dir = dd.get_work_dir(data[0][0])
if not out_dir:
out_dir = op.join(work_dir, "mirbase")
out_novel_isomir = append_stem(op.join(out_dir, "counts.tsv"), stem)
out_novel_mirna = append_stem(op.join(out_dir, "counts_mirna.tsv"), stem)
logger.debug("Create %s count data at %s." % (srna_type, out_dir))
if file_exists(out_novel_mirna):
return [out_novel_mirna, out_novel_isomir]
out_dts = []
for sample in data:
if sample[0].get(srna_type):
miraligner_fn = sample[0][srna_type]
reads = _read_miraligner(miraligner_fn)
if reads:
out_file, dt, dt_pre = _tab_output(reads, miraligner_fn + ".back", dd.get_sample_name(sample[0]))
out_dts.append(dt)
else:
logger.debug("WARNING::%s has NOT miRNA annotated for %s. Check if fasta files is small or species value." % (dd.get_sample_name(sample[0]), srna_type))
if out_dts:
out_files = _create_counts(out_dts, out_dir)
out_files = [move_safe(out_files[0], out_novel_isomir), move_safe(out_files[1], out_novel_mirna)]
return out_files
else:
logger.debug("WARNING::any samples have miRNA annotated for %s. Check if fasta files is small or species value." % srna_type) | python | def _make_isomir_counts(data, srna_type="seqbuster", out_dir=None, stem=""):
"""
Parse miraligner files to create count matrix.
"""
work_dir = dd.get_work_dir(data[0][0])
if not out_dir:
out_dir = op.join(work_dir, "mirbase")
out_novel_isomir = append_stem(op.join(out_dir, "counts.tsv"), stem)
out_novel_mirna = append_stem(op.join(out_dir, "counts_mirna.tsv"), stem)
logger.debug("Create %s count data at %s." % (srna_type, out_dir))
if file_exists(out_novel_mirna):
return [out_novel_mirna, out_novel_isomir]
out_dts = []
for sample in data:
if sample[0].get(srna_type):
miraligner_fn = sample[0][srna_type]
reads = _read_miraligner(miraligner_fn)
if reads:
out_file, dt, dt_pre = _tab_output(reads, miraligner_fn + ".back", dd.get_sample_name(sample[0]))
out_dts.append(dt)
else:
logger.debug("WARNING::%s has NOT miRNA annotated for %s. Check if fasta files is small or species value." % (dd.get_sample_name(sample[0]), srna_type))
if out_dts:
out_files = _create_counts(out_dts, out_dir)
out_files = [move_safe(out_files[0], out_novel_isomir), move_safe(out_files[1], out_novel_mirna)]
return out_files
else:
logger.debug("WARNING::any samples have miRNA annotated for %s. Check if fasta files is small or species value." % srna_type) | [
"def",
"_make_isomir_counts",
"(",
"data",
",",
"srna_type",
"=",
"\"seqbuster\"",
",",
"out_dir",
"=",
"None",
",",
"stem",
"=",
"\"\"",
")",
":",
"work_dir",
"=",
"dd",
".",
"get_work_dir",
"(",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"if",
"not... | Parse miraligner files to create count matrix. | [
"Parse",
"miraligner",
"files",
"to",
"create",
"count",
"matrix",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L194-L221 | train | 218,898 |
bcbio/bcbio-nextgen | bcbio/bam/coverage.py | _split_regions | def _split_regions(chrom, start, end):
"""Split regions longer than 100kb into smaller sections.
"""
window_size = 1e5
if end - start < window_size * 5:
return [(chrom, start, end)]
else:
out = []
for r in pybedtools.BedTool().window_maker(w=window_size,
b=pybedtools.BedTool("%s\t%s\t%s" % (chrom, start, end),
from_string=True)):
out.append((r.chrom, r.start, r.end))
return out | python | def _split_regions(chrom, start, end):
"""Split regions longer than 100kb into smaller sections.
"""
window_size = 1e5
if end - start < window_size * 5:
return [(chrom, start, end)]
else:
out = []
for r in pybedtools.BedTool().window_maker(w=window_size,
b=pybedtools.BedTool("%s\t%s\t%s" % (chrom, start, end),
from_string=True)):
out.append((r.chrom, r.start, r.end))
return out | [
"def",
"_split_regions",
"(",
"chrom",
",",
"start",
",",
"end",
")",
":",
"window_size",
"=",
"1e5",
"if",
"end",
"-",
"start",
"<",
"window_size",
"*",
"5",
":",
"return",
"[",
"(",
"chrom",
",",
"start",
",",
"end",
")",
"]",
"else",
":",
"out",... | Split regions longer than 100kb into smaller sections. | [
"Split",
"regions",
"longer",
"than",
"100kb",
"into",
"smaller",
"sections",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/coverage.py#L96-L108 | train | 218,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.