repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
_bgzip_from_cram
def _bgzip_from_cram(cram_file, dirs, data): """Create bgzipped fastq files from an input CRAM file in regions of interest. Returns a list with a single file, for single end CRAM files, or two files for paired end input. """ import pybedtools region_file = (tz.get_in(["config", "algorithm", "variant_regions"], data) if tz.get_in(["config", "algorithm", "coverage_interval"], data) in ["regional", "exome", "amplicon"] else None) if region_file: regions = ["%s:%s-%s" % tuple(r[:3]) for r in pybedtools.BedTool(region_file)] else: regions = [None] work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep")) out_s, out_p1, out_p2 = [os.path.join(work_dir, "%s-%s.fq.gz" % (utils.splitext_plus(os.path.basename(cram_file))[0], fext)) for fext in ["s1", "p1", "p2"]] if (not utils.file_exists(out_s) and (not utils.file_exists(out_p1) or not utils.file_exists(out_p2))): cram.index(cram_file, data["config"]) fastqs, part_dir = _cram_to_fastq_regions(regions, cram_file, dirs, data) if len(fastqs[0]) == 1: with file_transaction(data, out_s) as tx_out_file: _merge_and_bgzip([xs[0] for xs in fastqs], tx_out_file, out_s) else: for i, out_file in enumerate([out_p1, out_p2]): if not utils.file_exists(out_file): ext = "/%s" % (i + 1) with file_transaction(data, out_file) as tx_out_file: _merge_and_bgzip([xs[i] for xs in fastqs], tx_out_file, out_file, ext) shutil.rmtree(part_dir) if utils.file_exists(out_p1): return [out_p1, out_p2] else: assert utils.file_exists(out_s) return [out_s]
python
def _bgzip_from_cram(cram_file, dirs, data): """Create bgzipped fastq files from an input CRAM file in regions of interest. Returns a list with a single file, for single end CRAM files, or two files for paired end input. """ import pybedtools region_file = (tz.get_in(["config", "algorithm", "variant_regions"], data) if tz.get_in(["config", "algorithm", "coverage_interval"], data) in ["regional", "exome", "amplicon"] else None) if region_file: regions = ["%s:%s-%s" % tuple(r[:3]) for r in pybedtools.BedTool(region_file)] else: regions = [None] work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep")) out_s, out_p1, out_p2 = [os.path.join(work_dir, "%s-%s.fq.gz" % (utils.splitext_plus(os.path.basename(cram_file))[0], fext)) for fext in ["s1", "p1", "p2"]] if (not utils.file_exists(out_s) and (not utils.file_exists(out_p1) or not utils.file_exists(out_p2))): cram.index(cram_file, data["config"]) fastqs, part_dir = _cram_to_fastq_regions(regions, cram_file, dirs, data) if len(fastqs[0]) == 1: with file_transaction(data, out_s) as tx_out_file: _merge_and_bgzip([xs[0] for xs in fastqs], tx_out_file, out_s) else: for i, out_file in enumerate([out_p1, out_p2]): if not utils.file_exists(out_file): ext = "/%s" % (i + 1) with file_transaction(data, out_file) as tx_out_file: _merge_and_bgzip([xs[i] for xs in fastqs], tx_out_file, out_file, ext) shutil.rmtree(part_dir) if utils.file_exists(out_p1): return [out_p1, out_p2] else: assert utils.file_exists(out_s) return [out_s]
[ "def", "_bgzip_from_cram", "(", "cram_file", ",", "dirs", ",", "data", ")", ":", "import", "pybedtools", "region_file", "=", "(", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"variant_regions\"", "]", ",", "data", ")", "if", "t...
Create bgzipped fastq files from an input CRAM file in regions of interest. Returns a list with a single file, for single end CRAM files, or two files for paired end input.
[ "Create", "bgzipped", "fastq", "files", "from", "an", "input", "CRAM", "file", "in", "regions", "of", "interest", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L402-L439
train
218,600
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
_bgzip_from_cram_sambamba
def _bgzip_from_cram_sambamba(cram_file, dirs, data): """Use sambamba to extract from CRAM via regions. """ raise NotImplementedError("sambamba doesn't yet support retrieval from CRAM by BED file") region_file = (tz.get_in(["config", "algorithm", "variant_regions"], data) if tz.get_in(["config", "algorithm", "coverage_interval"], data) in ["regional", "exome"] else None) base_name = utils.splitext_plus(os.path.basename(cram_file))[0] work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep", "%s-parts" % base_name)) f1, f2, o1, o2, si = [os.path.join(work_dir, "%s.fq" % x) for x in ["match1", "match2", "unmatch1", "unmatch2", "single"]] ref_file = dd.get_ref_file(data) region = "-L %s" % region_file if region_file else "" cmd = ("sambamba view -f bam -l 0 -C {cram_file} -T {ref_file} {region} | " "bamtofastq F={f1} F2={f2} S={si} O={o1} O2={o2}") do.run(cmd.format(**locals()), "Convert CRAM to fastq in regions")
python
def _bgzip_from_cram_sambamba(cram_file, dirs, data): """Use sambamba to extract from CRAM via regions. """ raise NotImplementedError("sambamba doesn't yet support retrieval from CRAM by BED file") region_file = (tz.get_in(["config", "algorithm", "variant_regions"], data) if tz.get_in(["config", "algorithm", "coverage_interval"], data) in ["regional", "exome"] else None) base_name = utils.splitext_plus(os.path.basename(cram_file))[0] work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep", "%s-parts" % base_name)) f1, f2, o1, o2, si = [os.path.join(work_dir, "%s.fq" % x) for x in ["match1", "match2", "unmatch1", "unmatch2", "single"]] ref_file = dd.get_ref_file(data) region = "-L %s" % region_file if region_file else "" cmd = ("sambamba view -f bam -l 0 -C {cram_file} -T {ref_file} {region} | " "bamtofastq F={f1} F2={f2} S={si} O={o1} O2={o2}") do.run(cmd.format(**locals()), "Convert CRAM to fastq in regions")
[ "def", "_bgzip_from_cram_sambamba", "(", "cram_file", ",", "dirs", ",", "data", ")", ":", "raise", "NotImplementedError", "(", "\"sambamba doesn't yet support retrieval from CRAM by BED file\"", ")", "region_file", "=", "(", "tz", ".", "get_in", "(", "[", "\"config\"", ...
Use sambamba to extract from CRAM via regions.
[ "Use", "sambamba", "to", "extract", "from", "CRAM", "via", "regions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L441-L457
train
218,601
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
_cram_to_fastq_regions
def _cram_to_fastq_regions(regions, cram_file, dirs, data): """Convert CRAM files to fastq, potentially within sub regions. Returns multiple fastq files that can be merged back together. """ base_name = utils.splitext_plus(os.path.basename(cram_file))[0] work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep", "%s-parts" % base_name)) fnames = run_multicore(_cram_to_fastq_region, [(cram_file, work_dir, base_name, region, data) for region in regions], data["config"]) # check if we have paired or single end data if any(not _is_gzip_empty(p1) for p1, p2, s in fnames): out = [[p1, p2] for p1, p2, s in fnames] else: out = [[s] for p1, p2, s in fnames] return out, work_dir
python
def _cram_to_fastq_regions(regions, cram_file, dirs, data): """Convert CRAM files to fastq, potentially within sub regions. Returns multiple fastq files that can be merged back together. """ base_name = utils.splitext_plus(os.path.basename(cram_file))[0] work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep", "%s-parts" % base_name)) fnames = run_multicore(_cram_to_fastq_region, [(cram_file, work_dir, base_name, region, data) for region in regions], data["config"]) # check if we have paired or single end data if any(not _is_gzip_empty(p1) for p1, p2, s in fnames): out = [[p1, p2] for p1, p2, s in fnames] else: out = [[s] for p1, p2, s in fnames] return out, work_dir
[ "def", "_cram_to_fastq_regions", "(", "regions", ",", "cram_file", ",", "dirs", ",", "data", ")", ":", "base_name", "=", "utils", ".", "splitext_plus", "(", "os", ".", "path", ".", "basename", "(", "cram_file", ")", ")", "[", "0", "]", "work_dir", "=", ...
Convert CRAM files to fastq, potentially within sub regions. Returns multiple fastq files that can be merged back together.
[ "Convert", "CRAM", "files", "to", "fastq", "potentially", "within", "sub", "regions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L482-L498
train
218,602
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
_cram_to_fastq_region
def _cram_to_fastq_region(cram_file, work_dir, base_name, region, data): """Convert CRAM to fastq in a specified region. """ ref_file = tz.get_in(["reference", "fasta", "base"], data) resources = config_utils.get_resources("bamtofastq", data["config"]) cores = tz.get_in(["config", "algorithm", "num_cores"], data, 1) max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) * cores rext = "-%s" % region.replace(":", "_").replace("-", "_") if region else "full" out_s, out_p1, out_p2, out_o1, out_o2 = [os.path.join(work_dir, "%s%s-%s.fq.gz" % (base_name, rext, fext)) for fext in ["s1", "p1", "p2", "o1", "o2"]] if not utils.file_exists(out_p1): with file_transaction(data, out_s, out_p1, out_p2, out_o1, out_o2) as \ (tx_out_s, tx_out_p1, tx_out_p2, tx_out_o1, tx_out_o2): cram_file = objectstore.cl_input(cram_file) sortprefix = "%s-sort" % utils.splitext_plus(tx_out_s)[0] cmd = ("bamtofastq filename={cram_file} inputformat=cram T={sortprefix} " "gz=1 collate=1 colsbs={max_mem} exclude=SECONDARY,SUPPLEMENTARY " "F={tx_out_p1} F2={tx_out_p2} S={tx_out_s} O={tx_out_o1} O2={tx_out_o2} " "reference={ref_file}") if region: cmd += " ranges='{region}'" do.run(cmd.format(**locals()), "CRAM to fastq %s" % region if region else "") return [[out_p1, out_p2, out_s]]
python
def _cram_to_fastq_region(cram_file, work_dir, base_name, region, data): """Convert CRAM to fastq in a specified region. """ ref_file = tz.get_in(["reference", "fasta", "base"], data) resources = config_utils.get_resources("bamtofastq", data["config"]) cores = tz.get_in(["config", "algorithm", "num_cores"], data, 1) max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) * cores rext = "-%s" % region.replace(":", "_").replace("-", "_") if region else "full" out_s, out_p1, out_p2, out_o1, out_o2 = [os.path.join(work_dir, "%s%s-%s.fq.gz" % (base_name, rext, fext)) for fext in ["s1", "p1", "p2", "o1", "o2"]] if not utils.file_exists(out_p1): with file_transaction(data, out_s, out_p1, out_p2, out_o1, out_o2) as \ (tx_out_s, tx_out_p1, tx_out_p2, tx_out_o1, tx_out_o2): cram_file = objectstore.cl_input(cram_file) sortprefix = "%s-sort" % utils.splitext_plus(tx_out_s)[0] cmd = ("bamtofastq filename={cram_file} inputformat=cram T={sortprefix} " "gz=1 collate=1 colsbs={max_mem} exclude=SECONDARY,SUPPLEMENTARY " "F={tx_out_p1} F2={tx_out_p2} S={tx_out_s} O={tx_out_o1} O2={tx_out_o2} " "reference={ref_file}") if region: cmd += " ranges='{region}'" do.run(cmd.format(**locals()), "CRAM to fastq %s" % region if region else "") return [[out_p1, out_p2, out_s]]
[ "def", "_cram_to_fastq_region", "(", "cram_file", ",", "work_dir", ",", "base_name", ",", "region", ",", "data", ")", ":", "ref_file", "=", "tz", ".", "get_in", "(", "[", "\"reference\"", ",", "\"fasta\"", ",", "\"base\"", "]", ",", "data", ")", "resources...
Convert CRAM to fastq in a specified region.
[ "Convert", "CRAM", "to", "fastq", "in", "a", "specified", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L502-L525
train
218,603
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
_bgzip_from_bam
def _bgzip_from_bam(bam_file, dirs, data, is_retry=False, output_infix=''): """Create bgzipped fastq files from an input BAM file. """ # tools config = data["config"] bamtofastq = config_utils.get_program("bamtofastq", config) resources = config_utils.get_resources("bamtofastq", config) cores = config["algorithm"].get("num_cores", 1) max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) * cores bgzip = tools.get_bgzip_cmd(config, is_retry) # files work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep")) out_file_1 = os.path.join(work_dir, "%s%s-1.fq.gz" % (os.path.splitext(os.path.basename(bam_file))[0], output_infix)) out_file_2 = out_file_1.replace("-1.fq.gz", "-2.fq.gz") needs_retry = False if is_retry or not utils.file_exists(out_file_1): if not bam.is_paired(bam_file): out_file_2 = None with file_transaction(config, out_file_1) as tx_out_file: for f in [tx_out_file, out_file_1, out_file_2]: if f and os.path.exists(f): os.remove(f) fq1_bgzip_cmd = "%s -c /dev/stdin > %s" % (bgzip, tx_out_file) prep_cmd = _seqtk_fastq_prep_cl(data, read_num=0) if prep_cmd: fq1_bgzip_cmd = prep_cmd + " | " + fq1_bgzip_cmd sortprefix = "%s-sort" % os.path.splitext(tx_out_file)[0] if bam.is_paired(bam_file): prep_cmd = _seqtk_fastq_prep_cl(data, read_num=1) fq2_bgzip_cmd = "%s -c /dev/stdin > %s" % (bgzip, out_file_2) if prep_cmd: fq2_bgzip_cmd = prep_cmd + " | " + fq2_bgzip_cmd out_str = ("F=>({fq1_bgzip_cmd}) F2=>({fq2_bgzip_cmd}) S=/dev/null O=/dev/null " "O2=/dev/null collate=1 colsbs={max_mem}") else: out_str = "S=>({fq1_bgzip_cmd})" bam_file = objectstore.cl_input(bam_file) extra_opts = " ".join([str(x) for x in resources.get("options", [])]) cmd = "{bamtofastq} filename={bam_file} T={sortprefix} {extra_opts} " + out_str try: do.run(cmd.format(**locals()), "BAM to bgzipped fastq", checks=[do.file_reasonable_size(tx_out_file, bam_file)], log_error=False) except subprocess.CalledProcessError as msg: if not is_retry and "deflate failed" in str(msg): logger.info("bamtofastq deflate IO failure preparing %s. Retrying with single core." % (bam_file)) needs_retry = True else: logger.exception() raise if needs_retry: return _bgzip_from_bam(bam_file, dirs, data, is_retry=True) else: return [x for x in [out_file_1, out_file_2] if x is not None and utils.file_exists(x)]
python
def _bgzip_from_bam(bam_file, dirs, data, is_retry=False, output_infix=''): """Create bgzipped fastq files from an input BAM file. """ # tools config = data["config"] bamtofastq = config_utils.get_program("bamtofastq", config) resources = config_utils.get_resources("bamtofastq", config) cores = config["algorithm"].get("num_cores", 1) max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) * cores bgzip = tools.get_bgzip_cmd(config, is_retry) # files work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep")) out_file_1 = os.path.join(work_dir, "%s%s-1.fq.gz" % (os.path.splitext(os.path.basename(bam_file))[0], output_infix)) out_file_2 = out_file_1.replace("-1.fq.gz", "-2.fq.gz") needs_retry = False if is_retry or not utils.file_exists(out_file_1): if not bam.is_paired(bam_file): out_file_2 = None with file_transaction(config, out_file_1) as tx_out_file: for f in [tx_out_file, out_file_1, out_file_2]: if f and os.path.exists(f): os.remove(f) fq1_bgzip_cmd = "%s -c /dev/stdin > %s" % (bgzip, tx_out_file) prep_cmd = _seqtk_fastq_prep_cl(data, read_num=0) if prep_cmd: fq1_bgzip_cmd = prep_cmd + " | " + fq1_bgzip_cmd sortprefix = "%s-sort" % os.path.splitext(tx_out_file)[0] if bam.is_paired(bam_file): prep_cmd = _seqtk_fastq_prep_cl(data, read_num=1) fq2_bgzip_cmd = "%s -c /dev/stdin > %s" % (bgzip, out_file_2) if prep_cmd: fq2_bgzip_cmd = prep_cmd + " | " + fq2_bgzip_cmd out_str = ("F=>({fq1_bgzip_cmd}) F2=>({fq2_bgzip_cmd}) S=/dev/null O=/dev/null " "O2=/dev/null collate=1 colsbs={max_mem}") else: out_str = "S=>({fq1_bgzip_cmd})" bam_file = objectstore.cl_input(bam_file) extra_opts = " ".join([str(x) for x in resources.get("options", [])]) cmd = "{bamtofastq} filename={bam_file} T={sortprefix} {extra_opts} " + out_str try: do.run(cmd.format(**locals()), "BAM to bgzipped fastq", checks=[do.file_reasonable_size(tx_out_file, bam_file)], log_error=False) except subprocess.CalledProcessError as msg: if not is_retry and "deflate failed" in str(msg): logger.info("bamtofastq deflate IO failure preparing %s. Retrying with single core." % (bam_file)) needs_retry = True else: logger.exception() raise if needs_retry: return _bgzip_from_bam(bam_file, dirs, data, is_retry=True) else: return [x for x in [out_file_1, out_file_2] if x is not None and utils.file_exists(x)]
[ "def", "_bgzip_from_bam", "(", "bam_file", ",", "dirs", ",", "data", ",", "is_retry", "=", "False", ",", "output_infix", "=", "''", ")", ":", "# tools", "config", "=", "data", "[", "\"config\"", "]", "bamtofastq", "=", "config_utils", ".", "get_program", "...
Create bgzipped fastq files from an input BAM file.
[ "Create", "bgzipped", "fastq", "files", "from", "an", "input", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L532-L586
train
218,604
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
_grabix_index
def _grabix_index(data): """Create grabix index of bgzip input file. grabix does not allow specification of output file, so symlink the original file into a transactional directory. """ in_file = data["bgzip_file"] config = data["config"] grabix = config_utils.get_program("grabix", config) gbi_file = _get_grabix_index(in_file) # We always build grabix input so we can use it for counting reads and doing downsampling if not gbi_file or _is_partial_index(gbi_file): if gbi_file: utils.remove_safe(gbi_file) else: gbi_file = in_file + ".gbi" with file_transaction(data, gbi_file) as tx_gbi_file: tx_in_file = os.path.splitext(tx_gbi_file)[0] utils.symlink_plus(in_file, tx_in_file) do.run([grabix, "index", tx_in_file], "Index input with grabix: %s" % os.path.basename(in_file)) assert utils.file_exists(gbi_file) return [gbi_file]
python
def _grabix_index(data): """Create grabix index of bgzip input file. grabix does not allow specification of output file, so symlink the original file into a transactional directory. """ in_file = data["bgzip_file"] config = data["config"] grabix = config_utils.get_program("grabix", config) gbi_file = _get_grabix_index(in_file) # We always build grabix input so we can use it for counting reads and doing downsampling if not gbi_file or _is_partial_index(gbi_file): if gbi_file: utils.remove_safe(gbi_file) else: gbi_file = in_file + ".gbi" with file_transaction(data, gbi_file) as tx_gbi_file: tx_in_file = os.path.splitext(tx_gbi_file)[0] utils.symlink_plus(in_file, tx_in_file) do.run([grabix, "index", tx_in_file], "Index input with grabix: %s" % os.path.basename(in_file)) assert utils.file_exists(gbi_file) return [gbi_file]
[ "def", "_grabix_index", "(", "data", ")", ":", "in_file", "=", "data", "[", "\"bgzip_file\"", "]", "config", "=", "data", "[", "\"config\"", "]", "grabix", "=", "config_utils", ".", "get_program", "(", "\"grabix\"", ",", "config", ")", "gbi_file", "=", "_g...
Create grabix index of bgzip input file. grabix does not allow specification of output file, so symlink the original file into a transactional directory.
[ "Create", "grabix", "index", "of", "bgzip", "input", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L590-L611
train
218,605
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
_is_partial_index
def _is_partial_index(gbi_file): """Check for truncated output since grabix doesn't write to a transactional directory. """ with open(gbi_file) as in_handle: for i, _ in enumerate(in_handle): if i > 2: return False return True
python
def _is_partial_index(gbi_file): """Check for truncated output since grabix doesn't write to a transactional directory. """ with open(gbi_file) as in_handle: for i, _ in enumerate(in_handle): if i > 2: return False return True
[ "def", "_is_partial_index", "(", "gbi_file", ")", ":", "with", "open", "(", "gbi_file", ")", "as", "in_handle", ":", "for", "i", ",", "_", "in", "enumerate", "(", "in_handle", ")", ":", "if", "i", ">", "2", ":", "return", "False", "return", "True" ]
Check for truncated output since grabix doesn't write to a transactional directory.
[ "Check", "for", "truncated", "output", "since", "grabix", "doesn", "t", "write", "to", "a", "transactional", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L613-L620
train
218,606
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
_bgzip_file
def _bgzip_file(finput, config, work_dir, needs_bgzip, needs_gunzip, needs_convert, data): """Handle bgzip of input file, potentially gunzipping an existing file. Handles cases where finput might be multiple files and need to be concatenated. """ if isinstance(finput, six.string_types): in_file = finput else: assert not needs_convert, "Do not yet handle quality conversion with multiple inputs" return _bgzip_multiple_files(finput, work_dir, data) out_file = os.path.join(work_dir, os.path.basename(in_file).replace(".bz2", "") + (".gz" if not in_file.endswith(".gz") else "")) if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bgzip = tools.get_bgzip_cmd(config) is_remote = objectstore.is_remote(in_file) in_file = objectstore.cl_input(in_file, unpack=needs_gunzip or needs_convert or needs_bgzip or dd.get_trim_ends(data)) if needs_convert or dd.get_trim_ends(data): in_file = fastq_convert_pipe_cl(in_file, data) if needs_gunzip and not (needs_convert or dd.get_trim_ends(data)): if in_file.endswith(".bz2"): gunzip_cmd = "bunzip2 -c {in_file} |".format(**locals()) else: gunzip_cmd = "gunzip -c {in_file} |".format(**locals()) bgzip_in = "/dev/stdin" else: gunzip_cmd = "" bgzip_in = in_file if needs_bgzip: do.run("{gunzip_cmd} {bgzip} -c {bgzip_in} > {tx_out_file}".format(**locals()), "bgzip input file") elif is_remote: bgzip = "| bgzip -c" if (needs_convert or dd.get_trim_ends(data)) else "" do.run("cat {in_file} {bgzip} > {tx_out_file}".format(**locals()), "Get remote input") else: raise ValueError("Unexpected inputs: %s %s %s %s" % (in_file, needs_bgzip, needs_gunzip, needs_convert)) return out_file
python
def _bgzip_file(finput, config, work_dir, needs_bgzip, needs_gunzip, needs_convert, data): """Handle bgzip of input file, potentially gunzipping an existing file. Handles cases where finput might be multiple files and need to be concatenated. """ if isinstance(finput, six.string_types): in_file = finput else: assert not needs_convert, "Do not yet handle quality conversion with multiple inputs" return _bgzip_multiple_files(finput, work_dir, data) out_file = os.path.join(work_dir, os.path.basename(in_file).replace(".bz2", "") + (".gz" if not in_file.endswith(".gz") else "")) if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bgzip = tools.get_bgzip_cmd(config) is_remote = objectstore.is_remote(in_file) in_file = objectstore.cl_input(in_file, unpack=needs_gunzip or needs_convert or needs_bgzip or dd.get_trim_ends(data)) if needs_convert or dd.get_trim_ends(data): in_file = fastq_convert_pipe_cl(in_file, data) if needs_gunzip and not (needs_convert or dd.get_trim_ends(data)): if in_file.endswith(".bz2"): gunzip_cmd = "bunzip2 -c {in_file} |".format(**locals()) else: gunzip_cmd = "gunzip -c {in_file} |".format(**locals()) bgzip_in = "/dev/stdin" else: gunzip_cmd = "" bgzip_in = in_file if needs_bgzip: do.run("{gunzip_cmd} {bgzip} -c {bgzip_in} > {tx_out_file}".format(**locals()), "bgzip input file") elif is_remote: bgzip = "| bgzip -c" if (needs_convert or dd.get_trim_ends(data)) else "" do.run("cat {in_file} {bgzip} > {tx_out_file}".format(**locals()), "Get remote input") else: raise ValueError("Unexpected inputs: %s %s %s %s" % (in_file, needs_bgzip, needs_gunzip, needs_convert)) return out_file
[ "def", "_bgzip_file", "(", "finput", ",", "config", ",", "work_dir", ",", "needs_bgzip", ",", "needs_gunzip", ",", "needs_convert", ",", "data", ")", ":", "if", "isinstance", "(", "finput", ",", "six", ".", "string_types", ")", ":", "in_file", "=", "finput...
Handle bgzip of input file, potentially gunzipping an existing file. Handles cases where finput might be multiple files and need to be concatenated.
[ "Handle", "bgzip", "of", "input", "file", "potentially", "gunzipping", "an", "existing", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L659-L697
train
218,607
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
_check_gzipped_input
def _check_gzipped_input(in_file, data): """Determine if a gzipped input file is blocked gzip or standard. """ grabix = config_utils.get_program("grabix", data["config"]) is_bgzip = subprocess.check_output([grabix, "check", in_file]) if is_bgzip.strip() == "yes": return False, False else: return True, True
python
def _check_gzipped_input(in_file, data): """Determine if a gzipped input file is blocked gzip or standard. """ grabix = config_utils.get_program("grabix", data["config"]) is_bgzip = subprocess.check_output([grabix, "check", in_file]) if is_bgzip.strip() == "yes": return False, False else: return True, True
[ "def", "_check_gzipped_input", "(", "in_file", ",", "data", ")", ":", "grabix", "=", "config_utils", ".", "get_program", "(", "\"grabix\"", ",", "data", "[", "\"config\"", "]", ")", "is_bgzip", "=", "subprocess", ".", "check_output", "(", "[", "grabix", ",",...
Determine if a gzipped input file is blocked gzip or standard.
[ "Determine", "if", "a", "gzipped", "input", "file", "is", "blocked", "gzip", "or", "standard", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L715-L723
train
218,608
bcbio/bcbio-nextgen
bcbio/qc/fastqc.py
run
def run(bam_file, data, fastqc_out): """Run fastqc, generating report in specified directory and parsing metrics. Downsamples to 10 million reads to avoid excessive processing times with large files, unless we're running a Standard/smallRNA-seq/QC pipeline. Handles fastqc 0.11+, which use a single HTML file and older versions that use a directory of files + images. The goal is to eventually move to only 0.11+ """ sentry_file = os.path.join(fastqc_out, "fastqc_report.html") if not os.path.exists(sentry_file): work_dir = os.path.dirname(fastqc_out) utils.safe_makedir(work_dir) ds_file = (bam.downsample(bam_file, data, 1e7, work_dir=work_dir) if data.get("analysis", "").lower() not in ["standard", "smallrna-seq"] else None) if ds_file is not None: bam_file = ds_file frmt = "bam" if bam_file.endswith("bam") else "fastq" fastqc_name = utils.splitext_plus(os.path.basename(bam_file))[0] fastqc_clean_name = dd.get_sample_name(data) num_cores = data["config"]["algorithm"].get("num_cores", 1) with tx_tmpdir(data, work_dir) as tx_tmp_dir: with utils.chdir(tx_tmp_dir): cl = [config_utils.get_program("fastqc", data["config"]), "-d", tx_tmp_dir, "-t", str(num_cores), "--extract", "-o", tx_tmp_dir, "-f", frmt, bam_file] cl = "%s %s %s" % (utils.java_freetype_fix(), utils.local_path_export(), " ".join([str(x) for x in cl])) do.run(cl, "FastQC: %s" % dd.get_sample_name(data)) tx_fastqc_out = os.path.join(tx_tmp_dir, "%s_fastqc" % fastqc_name) tx_combo_file = os.path.join(tx_tmp_dir, "%s_fastqc.html" % fastqc_name) if not os.path.exists(sentry_file) and os.path.exists(tx_combo_file): utils.safe_makedir(fastqc_out) # Use sample name for reports instead of bam file name with open(os.path.join(tx_fastqc_out, "fastqc_data.txt"), 'r') as fastqc_bam_name, \ open(os.path.join(tx_fastqc_out, "_fastqc_data.txt"), 'w') as fastqc_sample_name: for line in fastqc_bam_name: fastqc_sample_name.write(line.replace(os.path.basename(bam_file), fastqc_clean_name)) shutil.move(os.path.join(tx_fastqc_out, "_fastqc_data.txt"), os.path.join(fastqc_out, 'fastqc_data.txt')) shutil.move(tx_combo_file, sentry_file) if os.path.exists("%s.zip" % tx_fastqc_out): shutil.move("%s.zip" % tx_fastqc_out, os.path.join(fastqc_out, "%s.zip" % fastqc_clean_name)) elif not os.path.exists(sentry_file): raise ValueError("FastQC failed to produce output HTML file: %s" % os.listdir(tx_tmp_dir)) logger.info("Produced HTML report %s" % sentry_file) parser = FastQCParser(fastqc_out, dd.get_sample_name(data)) stats = parser.get_fastqc_summary() parser.save_sections_into_file() return stats
python
def run(bam_file, data, fastqc_out): """Run fastqc, generating report in specified directory and parsing metrics. Downsamples to 10 million reads to avoid excessive processing times with large files, unless we're running a Standard/smallRNA-seq/QC pipeline. Handles fastqc 0.11+, which use a single HTML file and older versions that use a directory of files + images. The goal is to eventually move to only 0.11+ """ sentry_file = os.path.join(fastqc_out, "fastqc_report.html") if not os.path.exists(sentry_file): work_dir = os.path.dirname(fastqc_out) utils.safe_makedir(work_dir) ds_file = (bam.downsample(bam_file, data, 1e7, work_dir=work_dir) if data.get("analysis", "").lower() not in ["standard", "smallrna-seq"] else None) if ds_file is not None: bam_file = ds_file frmt = "bam" if bam_file.endswith("bam") else "fastq" fastqc_name = utils.splitext_plus(os.path.basename(bam_file))[0] fastqc_clean_name = dd.get_sample_name(data) num_cores = data["config"]["algorithm"].get("num_cores", 1) with tx_tmpdir(data, work_dir) as tx_tmp_dir: with utils.chdir(tx_tmp_dir): cl = [config_utils.get_program("fastqc", data["config"]), "-d", tx_tmp_dir, "-t", str(num_cores), "--extract", "-o", tx_tmp_dir, "-f", frmt, bam_file] cl = "%s %s %s" % (utils.java_freetype_fix(), utils.local_path_export(), " ".join([str(x) for x in cl])) do.run(cl, "FastQC: %s" % dd.get_sample_name(data)) tx_fastqc_out = os.path.join(tx_tmp_dir, "%s_fastqc" % fastqc_name) tx_combo_file = os.path.join(tx_tmp_dir, "%s_fastqc.html" % fastqc_name) if not os.path.exists(sentry_file) and os.path.exists(tx_combo_file): utils.safe_makedir(fastqc_out) # Use sample name for reports instead of bam file name with open(os.path.join(tx_fastqc_out, "fastqc_data.txt"), 'r') as fastqc_bam_name, \ open(os.path.join(tx_fastqc_out, "_fastqc_data.txt"), 'w') as fastqc_sample_name: for line in fastqc_bam_name: fastqc_sample_name.write(line.replace(os.path.basename(bam_file), fastqc_clean_name)) shutil.move(os.path.join(tx_fastqc_out, "_fastqc_data.txt"), os.path.join(fastqc_out, 'fastqc_data.txt')) shutil.move(tx_combo_file, sentry_file) if os.path.exists("%s.zip" % tx_fastqc_out): shutil.move("%s.zip" % tx_fastqc_out, os.path.join(fastqc_out, "%s.zip" % fastqc_clean_name)) elif not os.path.exists(sentry_file): raise ValueError("FastQC failed to produce output HTML file: %s" % os.listdir(tx_tmp_dir)) logger.info("Produced HTML report %s" % sentry_file) parser = FastQCParser(fastqc_out, dd.get_sample_name(data)) stats = parser.get_fastqc_summary() parser.save_sections_into_file() return stats
[ "def", "run", "(", "bam_file", ",", "data", ",", "fastqc_out", ")", ":", "sentry_file", "=", "os", ".", "path", ".", "join", "(", "fastqc_out", ",", "\"fastqc_report.html\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "sentry_file", ")", ...
Run fastqc, generating report in specified directory and parsing metrics. Downsamples to 10 million reads to avoid excessive processing times with large files, unless we're running a Standard/smallRNA-seq/QC pipeline. Handles fastqc 0.11+, which use a single HTML file and older versions that use a directory of files + images. The goal is to eventually move to only 0.11+
[ "Run", "fastqc", "generating", "report", "in", "specified", "directory", "and", "parsing", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/fastqc.py#L21-L70
train
218,609
bcbio/bcbio-nextgen
bcbio/qc/fastqc.py
FastQCParser._get_module
def _get_module(self, parser, module): """ Get module using fadapa package """ dt = [] lines = parser.clean_data(module) header = lines[0] for data in lines[1:]: if data[0].startswith("#"): # some modules have two headers header = data continue if data[0].find("-") > -1: # expand positions 1-3 to 1, 2, 3 f, s = map(int, data[0].split("-")) for pos in range(f, s): dt.append([str(pos)] + data[1:]) else: dt.append(data) dt = pd.DataFrame(dt) dt.columns = [h.replace(" ", "_") for h in header] dt['sample'] = self.sample return dt
python
def _get_module(self, parser, module): """ Get module using fadapa package """ dt = [] lines = parser.clean_data(module) header = lines[0] for data in lines[1:]: if data[0].startswith("#"): # some modules have two headers header = data continue if data[0].find("-") > -1: # expand positions 1-3 to 1, 2, 3 f, s = map(int, data[0].split("-")) for pos in range(f, s): dt.append([str(pos)] + data[1:]) else: dt.append(data) dt = pd.DataFrame(dt) dt.columns = [h.replace(" ", "_") for h in header] dt['sample'] = self.sample return dt
[ "def", "_get_module", "(", "self", ",", "parser", ",", "module", ")", ":", "dt", "=", "[", "]", "lines", "=", "parser", ".", "clean_data", "(", "module", ")", "header", "=", "lines", "[", "0", "]", "for", "data", "in", "lines", "[", "1", ":", "]"...
Get module using fadapa package
[ "Get", "module", "using", "fadapa", "package" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/fastqc.py#L113-L133
train
218,610
bcbio/bcbio-nextgen
bcbio/server/background.py
GenericSubprocess.start
def start(self): """Spawn the task. Throws RuntimeError if the task was already started.""" if not self.pipe is None: raise RuntimeError("Cannot start task twice") self.ioloop = tornado.ioloop.IOLoop.instance() if self.timeout > 0: self.expiration = self.ioloop.add_timeout( time.time() + self.timeout, self.on_timeout ) self.pipe = subprocess.Popen(**self.args) self.streams = [ (self.pipe.stdout.fileno(), []), (self.pipe.stderr.fileno(), []) ] for fd, d in self.streams: flags = fcntl.fcntl(fd, fcntl.F_GETFL)| os.O_NDELAY fcntl.fcntl( fd, fcntl.F_SETFL, flags) self.ioloop.add_handler( fd, self.stat, self.ioloop.READ|self.ioloop.ERROR)
python
def start(self): """Spawn the task. Throws RuntimeError if the task was already started.""" if not self.pipe is None: raise RuntimeError("Cannot start task twice") self.ioloop = tornado.ioloop.IOLoop.instance() if self.timeout > 0: self.expiration = self.ioloop.add_timeout( time.time() + self.timeout, self.on_timeout ) self.pipe = subprocess.Popen(**self.args) self.streams = [ (self.pipe.stdout.fileno(), []), (self.pipe.stderr.fileno(), []) ] for fd, d in self.streams: flags = fcntl.fcntl(fd, fcntl.F_GETFL)| os.O_NDELAY fcntl.fcntl( fd, fcntl.F_SETFL, flags) self.ioloop.add_handler( fd, self.stat, self.ioloop.READ|self.ioloop.ERROR)
[ "def", "start", "(", "self", ")", ":", "if", "not", "self", ".", "pipe", "is", "None", ":", "raise", "RuntimeError", "(", "\"Cannot start task twice\"", ")", "self", ".", "ioloop", "=", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ...
Spawn the task. Throws RuntimeError if the task was already started.
[ "Spawn", "the", "task", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/server/background.py#L32-L51
train
218,611
bcbio/bcbio-nextgen
bcbio/workflow/xprize.py
get_fc_date
def get_fc_date(out_config_file): """Retrieve flowcell date, reusing older dates if refreshing a present workflow. """ if os.path.exists(out_config_file): with open(out_config_file) as in_handle: old_config = yaml.safe_load(in_handle) fc_date = old_config["fc_date"] else: fc_date = datetime.datetime.now().strftime("%y%m%d") return fc_date
python
def get_fc_date(out_config_file): """Retrieve flowcell date, reusing older dates if refreshing a present workflow. """ if os.path.exists(out_config_file): with open(out_config_file) as in_handle: old_config = yaml.safe_load(in_handle) fc_date = old_config["fc_date"] else: fc_date = datetime.datetime.now().strftime("%y%m%d") return fc_date
[ "def", "get_fc_date", "(", "out_config_file", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "out_config_file", ")", ":", "with", "open", "(", "out_config_file", ")", "as", "in_handle", ":", "old_config", "=", "yaml", ".", "safe_load", "(", "in_han...
Retrieve flowcell date, reusing older dates if refreshing a present workflow.
[ "Retrieve", "flowcell", "date", "reusing", "older", "dates", "if", "refreshing", "a", "present", "workflow", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/xprize.py#L32-L41
train
218,612
bcbio/bcbio-nextgen
scripts/utils/analyze_quality_recal.py
draw_quality_plot
def draw_quality_plot(db_file, plot_file, position_select, title): """Draw a plot of remapped qualities using ggplot2. Remapping information is pulled from the sqlite3 database using sqldf according to the position select attribute, which is a selection phrase like '> 50' or '=28'. plyr is used to summarize data by the original and remapped score for all selected positions. ggplot2 plots a heatmap of remapped counts at each (original, remap) coordinate, with a x=y line added for reference. """ robjects.r.assign('db.file', db_file) robjects.r.assign('plot.file', plot_file) robjects.r.assign('position.select', position_select) robjects.r.assign('title', title) robjects.r(''' library(sqldf) library(plyr) library(ggplot2) sql <- paste("select * from data WHERE position", position.select, sep=" ") exp.data <- sqldf(sql, dbname=db.file) remap.data <- ddply(exp.data, c("orig", "remap"), transform, count=sum(count)) p <- ggplot(remap.data, aes(orig, remap)) + geom_tile(aes(fill = count)) + scale_fill_gradient(low = "white", high = "steelblue", trans="log") + opts(panel.background = theme_rect(fill = "white"), title=title) + geom_abline(intercept=0, slope=1) ggsave(plot.file, p, width=6, height=6) ''')
python
def draw_quality_plot(db_file, plot_file, position_select, title): """Draw a plot of remapped qualities using ggplot2. Remapping information is pulled from the sqlite3 database using sqldf according to the position select attribute, which is a selection phrase like '> 50' or '=28'. plyr is used to summarize data by the original and remapped score for all selected positions. ggplot2 plots a heatmap of remapped counts at each (original, remap) coordinate, with a x=y line added for reference. """ robjects.r.assign('db.file', db_file) robjects.r.assign('plot.file', plot_file) robjects.r.assign('position.select', position_select) robjects.r.assign('title', title) robjects.r(''' library(sqldf) library(plyr) library(ggplot2) sql <- paste("select * from data WHERE position", position.select, sep=" ") exp.data <- sqldf(sql, dbname=db.file) remap.data <- ddply(exp.data, c("orig", "remap"), transform, count=sum(count)) p <- ggplot(remap.data, aes(orig, remap)) + geom_tile(aes(fill = count)) + scale_fill_gradient(low = "white", high = "steelblue", trans="log") + opts(panel.background = theme_rect(fill = "white"), title=title) + geom_abline(intercept=0, slope=1) ggsave(plot.file, p, width=6, height=6) ''')
[ "def", "draw_quality_plot", "(", "db_file", ",", "plot_file", ",", "position_select", ",", "title", ")", ":", "robjects", ".", "r", ".", "assign", "(", "'db.file'", ",", "db_file", ")", "robjects", ".", "r", ".", "assign", "(", "'plot.file'", ",", "plot_fi...
Draw a plot of remapped qualities using ggplot2. Remapping information is pulled from the sqlite3 database using sqldf according to the position select attribute, which is a selection phrase like '> 50' or '=28'. plyr is used to summarize data by the original and remapped score for all selected positions. ggplot2 plots a heatmap of remapped counts at each (original, remap) coordinate, with a x=y line added for reference.
[ "Draw", "a", "plot", "of", "remapped", "qualities", "using", "ggplot2", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/analyze_quality_recal.py#L112-L143
train
218,613
bcbio/bcbio-nextgen
scripts/utils/analyze_quality_recal.py
_positions_to_examine
def _positions_to_examine(db_file): """Determine how to sub-divide recalibration analysis based on read length. """ conn = sqlite3.connect(db_file) cursor = conn.cursor() cursor.execute("""SELECT MAX(position) FROM data""") position = cursor.fetchone()[0] if position is not None: position = int(position) cursor.close() split_at = 50 if position is None: return [] elif position < split_at: return [("<= %s" % position, "lt%s" % position)] else: return [("< %s" % split_at, "lt%s" % split_at), (">= %s" % split_at, "gt%s" % split_at)]
python
def _positions_to_examine(db_file): """Determine how to sub-divide recalibration analysis based on read length. """ conn = sqlite3.connect(db_file) cursor = conn.cursor() cursor.execute("""SELECT MAX(position) FROM data""") position = cursor.fetchone()[0] if position is not None: position = int(position) cursor.close() split_at = 50 if position is None: return [] elif position < split_at: return [("<= %s" % position, "lt%s" % position)] else: return [("< %s" % split_at, "lt%s" % split_at), (">= %s" % split_at, "gt%s" % split_at)]
[ "def", "_positions_to_examine", "(", "db_file", ")", ":", "conn", "=", "sqlite3", ".", "connect", "(", "db_file", ")", "cursor", "=", "conn", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "\"\"\"SELECT MAX(position) FROM data\"\"\"", ")", "position", ...
Determine how to sub-divide recalibration analysis based on read length.
[ "Determine", "how", "to", "sub", "-", "divide", "recalibration", "analysis", "based", "on", "read", "length", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/analyze_quality_recal.py#L145-L162
train
218,614
bcbio/bcbio-nextgen
scripts/utils/analyze_quality_recal.py
_organize_by_position
def _organize_by_position(orig_file, cmp_file, chunk_size): """Read two CSV files of qualities, organizing values by position. """ with open(orig_file) as in_handle: reader1 = csv.reader(in_handle) positions = len(next(reader1)) - 1 for positions in _chunks(range(positions), chunk_size): with open(orig_file) as orig_handle: with open(cmp_file) as cmp_handle: orig_reader = csv.reader(orig_handle) cmp_reader = csv.reader(cmp_handle) for item in _counts_at_position(positions, orig_reader, cmp_reader): yield item
python
def _organize_by_position(orig_file, cmp_file, chunk_size): """Read two CSV files of qualities, organizing values by position. """ with open(orig_file) as in_handle: reader1 = csv.reader(in_handle) positions = len(next(reader1)) - 1 for positions in _chunks(range(positions), chunk_size): with open(orig_file) as orig_handle: with open(cmp_file) as cmp_handle: orig_reader = csv.reader(orig_handle) cmp_reader = csv.reader(cmp_handle) for item in _counts_at_position(positions, orig_reader, cmp_reader): yield item
[ "def", "_organize_by_position", "(", "orig_file", ",", "cmp_file", ",", "chunk_size", ")", ":", "with", "open", "(", "orig_file", ")", "as", "in_handle", ":", "reader1", "=", "csv", ".", "reader", "(", "in_handle", ")", "positions", "=", "len", "(", "next"...
Read two CSV files of qualities, organizing values by position.
[ "Read", "two", "CSV", "files", "of", "qualities", "organizing", "values", "by", "position", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/analyze_quality_recal.py#L183-L196
train
218,615
bcbio/bcbio-nextgen
scripts/utils/analyze_quality_recal.py
_counts_at_position
def _counts_at_position(positions, orig_reader, cmp_reader): """Combine orignal and new qualities at each position, generating counts. """ pos_counts = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(int))) for orig_parts in orig_reader: cmp_parts = next(cmp_reader) for pos in positions: try: pos_counts[pos][int(orig_parts[pos+1])][int(cmp_parts[pos+1])] += 1 except IndexError: pass for pos, count_dict in pos_counts.iteritems(): for orig_val, cmp_dict in count_dict.iteritems(): for cmp_val, count in cmp_dict.iteritems(): yield pos+1, orig_val, cmp_val, count
python
def _counts_at_position(positions, orig_reader, cmp_reader): """Combine orignal and new qualities at each position, generating counts. """ pos_counts = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(int))) for orig_parts in orig_reader: cmp_parts = next(cmp_reader) for pos in positions: try: pos_counts[pos][int(orig_parts[pos+1])][int(cmp_parts[pos+1])] += 1 except IndexError: pass for pos, count_dict in pos_counts.iteritems(): for orig_val, cmp_dict in count_dict.iteritems(): for cmp_val, count in cmp_dict.iteritems(): yield pos+1, orig_val, cmp_val, count
[ "def", "_counts_at_position", "(", "positions", ",", "orig_reader", ",", "cmp_reader", ")", ":", "pos_counts", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "collections", ".", "defaultdict", "(", "lambda", ":", "collections", ".", "defaultdict", "...
Combine orignal and new qualities at each position, generating counts.
[ "Combine", "orignal", "and", "new", "qualities", "at", "each", "position", "generating", "counts", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/analyze_quality_recal.py#L206-L222
train
218,616
bcbio/bcbio-nextgen
scripts/utils/analyze_quality_recal.py
sort_csv
def sort_csv(in_file): """Sort a CSV file by read name, allowing direct comparison. """ out_file = "%s.sort" % in_file if not (os.path.exists(out_file) and os.path.getsize(out_file) > 0): cl = ["sort", "-k", "1,1", in_file] with open(out_file, "w") as out_handle: child = subprocess.Popen(cl, stdout=out_handle) child.wait() return out_file
python
def sort_csv(in_file): """Sort a CSV file by read name, allowing direct comparison. """ out_file = "%s.sort" % in_file if not (os.path.exists(out_file) and os.path.getsize(out_file) > 0): cl = ["sort", "-k", "1,1", in_file] with open(out_file, "w") as out_handle: child = subprocess.Popen(cl, stdout=out_handle) child.wait() return out_file
[ "def", "sort_csv", "(", "in_file", ")", ":", "out_file", "=", "\"%s.sort\"", "%", "in_file", "if", "not", "(", "os", ".", "path", ".", "exists", "(", "out_file", ")", "and", "os", ".", "path", ".", "getsize", "(", "out_file", ")", ">", "0", ")", ":...
Sort a CSV file by read name, allowing direct comparison.
[ "Sort", "a", "CSV", "file", "by", "read", "name", "allowing", "direct", "comparison", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/analyze_quality_recal.py#L224-L233
train
218,617
bcbio/bcbio-nextgen
scripts/utils/analyze_quality_recal.py
fastq_to_csv
def fastq_to_csv(in_file, fastq_format, work_dir): """Convert a fastq file into a CSV of phred quality scores. """ out_file = "%s.csv" % (os.path.splitext(os.path.basename(in_file))[0]) out_file = os.path.join(work_dir, out_file) if not (os.path.exists(out_file) and os.path.getsize(out_file) > 0): with open(in_file) as in_handle: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle) for rec in SeqIO.parse(in_handle, fastq_format): writer.writerow([rec.id] + rec.letter_annotations["phred_quality"]) return out_file
python
def fastq_to_csv(in_file, fastq_format, work_dir): """Convert a fastq file into a CSV of phred quality scores. """ out_file = "%s.csv" % (os.path.splitext(os.path.basename(in_file))[0]) out_file = os.path.join(work_dir, out_file) if not (os.path.exists(out_file) and os.path.getsize(out_file) > 0): with open(in_file) as in_handle: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle) for rec in SeqIO.parse(in_handle, fastq_format): writer.writerow([rec.id] + rec.letter_annotations["phred_quality"]) return out_file
[ "def", "fastq_to_csv", "(", "in_file", ",", "fastq_format", ",", "work_dir", ")", ":", "out_file", "=", "\"%s.csv\"", "%", "(", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "in_file", ")", ")", "[", "0", "]", ")",...
Convert a fastq file into a CSV of phred quality scores.
[ "Convert", "a", "fastq", "file", "into", "a", "CSV", "of", "phred", "quality", "scores", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/analyze_quality_recal.py#L235-L246
train
218,618
bcbio/bcbio-nextgen
scripts/utils/analyze_quality_recal.py
bam_to_fastq
def bam_to_fastq(bam_file, is_paired): """Convert a BAM file to fastq files. """ out_files, out_handles = _get_fastq_handles(bam_file, is_paired) if len(out_handles) > 0: in_bam = pysam.Samfile(bam_file, mode='rb') for read in in_bam: num = 1 if (not read.is_paired or read.is_read1) else 2 # reverse the sequence and quality if mapped to opposite strand if read.is_reverse: seq = str(Seq.reverse_complement(Seq.Seq(read.seq))) qual = "".join(reversed(read.qual)) else: seq = read.seq qual = read.qual out_handles[num].write("@%s\n%s\n+\n%s\n" % (read.qname, seq, qual)) [h.close() for h in out_handles.values()] return out_files
python
def bam_to_fastq(bam_file, is_paired): """Convert a BAM file to fastq files. """ out_files, out_handles = _get_fastq_handles(bam_file, is_paired) if len(out_handles) > 0: in_bam = pysam.Samfile(bam_file, mode='rb') for read in in_bam: num = 1 if (not read.is_paired or read.is_read1) else 2 # reverse the sequence and quality if mapped to opposite strand if read.is_reverse: seq = str(Seq.reverse_complement(Seq.Seq(read.seq))) qual = "".join(reversed(read.qual)) else: seq = read.seq qual = read.qual out_handles[num].write("@%s\n%s\n+\n%s\n" % (read.qname, seq, qual)) [h.close() for h in out_handles.values()] return out_files
[ "def", "bam_to_fastq", "(", "bam_file", ",", "is_paired", ")", ":", "out_files", ",", "out_handles", "=", "_get_fastq_handles", "(", "bam_file", ",", "is_paired", ")", "if", "len", "(", "out_handles", ")", ">", "0", ":", "in_bam", "=", "pysam", ".", "Samfi...
Convert a BAM file to fastq files.
[ "Convert", "a", "BAM", "file", "to", "fastq", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/analyze_quality_recal.py#L248-L267
train
218,619
bcbio/bcbio-nextgen
scripts/utils/analyze_quality_recal.py
run_latex_report
def run_latex_report(base, report_dir, section_info): """Generate a pdf report with plots using latex. """ out_name = "%s_recal_plots.tex" % base out = os.path.join(report_dir, out_name) with open(out, "w") as out_handle: out_tmpl = Template(out_template) out_handle.write(out_tmpl.render(sections=section_info)) start_dir = os.getcwd() try: os.chdir(report_dir) cl = ["pdflatex", out_name] child = subprocess.Popen(cl) child.wait() finally: os.chdir(start_dir)
python
def run_latex_report(base, report_dir, section_info): """Generate a pdf report with plots using latex. """ out_name = "%s_recal_plots.tex" % base out = os.path.join(report_dir, out_name) with open(out, "w") as out_handle: out_tmpl = Template(out_template) out_handle.write(out_tmpl.render(sections=section_info)) start_dir = os.getcwd() try: os.chdir(report_dir) cl = ["pdflatex", out_name] child = subprocess.Popen(cl) child.wait() finally: os.chdir(start_dir)
[ "def", "run_latex_report", "(", "base", ",", "report_dir", ",", "section_info", ")", ":", "out_name", "=", "\"%s_recal_plots.tex\"", "%", "base", "out", "=", "os", ".", "path", ".", "join", "(", "report_dir", ",", "out_name", ")", "with", "open", "(", "out...
Generate a pdf report with plots using latex.
[ "Generate", "a", "pdf", "report", "with", "plots", "using", "latex", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/analyze_quality_recal.py#L301-L316
train
218,620
bcbio/bcbio-nextgen
bcbio/variation/multi.py
bam_needs_processing
def bam_needs_processing(data): """Check if a work input needs processing for parallelization. """ return ((data.get("work_bam") or data.get("align_bam")) and (any(tz.get_in(["config", "algorithm", x], data) for x in ["variantcaller", "mark_duplicates", "recalibrate", "realign", "svcaller", "jointcaller", "variant_regions"]) or any(k in data for k in ["cwl_keys", "output_cwl_keys"])))
python
def bam_needs_processing(data): """Check if a work input needs processing for parallelization. """ return ((data.get("work_bam") or data.get("align_bam")) and (any(tz.get_in(["config", "algorithm", x], data) for x in ["variantcaller", "mark_duplicates", "recalibrate", "realign", "svcaller", "jointcaller", "variant_regions"]) or any(k in data for k in ["cwl_keys", "output_cwl_keys"])))
[ "def", "bam_needs_processing", "(", "data", ")", ":", "return", "(", "(", "data", ".", "get", "(", "\"work_bam\"", ")", "or", "data", ".", "get", "(", "\"align_bam\"", ")", ")", "and", "(", "any", "(", "tz", ".", "get_in", "(", "[", "\"config\"", ","...
Check if a work input needs processing for parallelization.
[ "Check", "if", "a", "work", "input", "needs", "processing", "for", "parallelization", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L30-L37
train
218,621
bcbio/bcbio-nextgen
bcbio/variation/multi.py
get_batch_for_key
def get_batch_for_key(data): """Retrieve batch information useful as a unique key for the sample. """ batches = _get_batches(data, require_bam=False) if len(batches) == 1: return batches[0] else: return tuple(batches)
python
def get_batch_for_key(data): """Retrieve batch information useful as a unique key for the sample. """ batches = _get_batches(data, require_bam=False) if len(batches) == 1: return batches[0] else: return tuple(batches)
[ "def", "get_batch_for_key", "(", "data", ")", ":", "batches", "=", "_get_batches", "(", "data", ",", "require_bam", "=", "False", ")", "if", "len", "(", "batches", ")", "==", "1", ":", "return", "batches", "[", "0", "]", "else", ":", "return", "tuple",...
Retrieve batch information useful as a unique key for the sample.
[ "Retrieve", "batch", "information", "useful", "as", "a", "unique", "key", "for", "the", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L39-L46
train
218,622
bcbio/bcbio-nextgen
bcbio/variation/multi.py
_find_all_groups
def _find_all_groups(items, require_bam=True): """Find all groups """ all_groups = [] for data in items: batches = _get_batches(data, require_bam) all_groups.append(batches) return all_groups
python
def _find_all_groups(items, require_bam=True): """Find all groups """ all_groups = [] for data in items: batches = _get_batches(data, require_bam) all_groups.append(batches) return all_groups
[ "def", "_find_all_groups", "(", "items", ",", "require_bam", "=", "True", ")", ":", "all_groups", "=", "[", "]", "for", "data", "in", "items", ":", "batches", "=", "_get_batches", "(", "data", ",", "require_bam", ")", "all_groups", ".", "append", "(", "b...
Find all groups
[ "Find", "all", "groups" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L57-L64
train
218,623
bcbio/bcbio-nextgen
bcbio/variation/multi.py
_get_representative_batch
def _get_representative_batch(merged): """Prepare dictionary matching batch items to a representative within a group. """ out = {} for mgroup in merged: mgroup = sorted(list(mgroup)) for x in mgroup: out[x] = mgroup[0] return out
python
def _get_representative_batch(merged): """Prepare dictionary matching batch items to a representative within a group. """ out = {} for mgroup in merged: mgroup = sorted(list(mgroup)) for x in mgroup: out[x] = mgroup[0] return out
[ "def", "_get_representative_batch", "(", "merged", ")", ":", "out", "=", "{", "}", "for", "mgroup", "in", "merged", ":", "mgroup", "=", "sorted", "(", "list", "(", "mgroup", ")", ")", "for", "x", "in", "mgroup", ":", "out", "[", "x", "]", "=", "mgr...
Prepare dictionary matching batch items to a representative within a group.
[ "Prepare", "dictionary", "matching", "batch", "items", "to", "a", "representative", "within", "a", "group", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L90-L98
train
218,624
bcbio/bcbio-nextgen
bcbio/variation/multi.py
_group_batches_shared
def _group_batches_shared(xs, caller_batch_fn, prep_data_fn): """Shared functionality for grouping by batches for variant calling and joint calling. """ singles = [] batch_groups = collections.defaultdict(list) for args in xs: data = utils.to_single_data(args) caller, batch = caller_batch_fn(data) region = _list_to_tuple(data["region"]) if "region" in data else () if batch is not None: batches = batch if isinstance(batch, (list, tuple)) else [batch] for b in batches: batch_groups[(b, region, caller)].append(utils.deepish_copy(data)) else: data = prep_data_fn(data, [data]) singles.append(data) batches = [] for batch, items in batch_groups.items(): batch_data = utils.deepish_copy(_pick_lead_item(items)) # For nested primary batches, split permanently by batch if tz.get_in(["metadata", "batch"], batch_data): batch_name = batch[0] batch_data["metadata"]["batch"] = batch_name batch_data = prep_data_fn(batch_data, items) batch_data["group_orig"] = _collapse_subitems(batch_data, items) batch_data["group"] = batch batches.append(batch_data) return singles + batches
python
def _group_batches_shared(xs, caller_batch_fn, prep_data_fn): """Shared functionality for grouping by batches for variant calling and joint calling. """ singles = [] batch_groups = collections.defaultdict(list) for args in xs: data = utils.to_single_data(args) caller, batch = caller_batch_fn(data) region = _list_to_tuple(data["region"]) if "region" in data else () if batch is not None: batches = batch if isinstance(batch, (list, tuple)) else [batch] for b in batches: batch_groups[(b, region, caller)].append(utils.deepish_copy(data)) else: data = prep_data_fn(data, [data]) singles.append(data) batches = [] for batch, items in batch_groups.items(): batch_data = utils.deepish_copy(_pick_lead_item(items)) # For nested primary batches, split permanently by batch if tz.get_in(["metadata", "batch"], batch_data): batch_name = batch[0] batch_data["metadata"]["batch"] = batch_name batch_data = prep_data_fn(batch_data, items) batch_data["group_orig"] = _collapse_subitems(batch_data, items) batch_data["group"] = batch batches.append(batch_data) return singles + batches
[ "def", "_group_batches_shared", "(", "xs", ",", "caller_batch_fn", ",", "prep_data_fn", ")", ":", "singles", "=", "[", "]", "batch_groups", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "args", "in", "xs", ":", "data", "=", "utils", ".",...
Shared functionality for grouping by batches for variant calling and joint calling.
[ "Shared", "functionality", "for", "grouping", "by", "batches", "for", "variant", "calling", "and", "joint", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L106-L133
train
218,625
bcbio/bcbio-nextgen
bcbio/variation/multi.py
group_batches
def group_batches(xs): """Group samples into batches for simultaneous variant calling. Identify all samples to call together: those in the same batch and variant caller. Pull together all BAM files from this batch and process together, Provide details to pull these finalized files back into individual expected files. Only batches files if joint calling not specified. """ def _caller_batches(data): caller = tz.get_in(("config", "algorithm", "variantcaller"), data) jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data) batch = tz.get_in(("metadata", "batch"), data) if not jointcaller else None return caller, batch def _prep_data(data, items): data["region_bams"] = [x["region_bams"] for x in items] return data return _group_batches_shared(xs, _caller_batches, _prep_data)
python
def group_batches(xs): """Group samples into batches for simultaneous variant calling. Identify all samples to call together: those in the same batch and variant caller. Pull together all BAM files from this batch and process together, Provide details to pull these finalized files back into individual expected files. Only batches files if joint calling not specified. """ def _caller_batches(data): caller = tz.get_in(("config", "algorithm", "variantcaller"), data) jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data) batch = tz.get_in(("metadata", "batch"), data) if not jointcaller else None return caller, batch def _prep_data(data, items): data["region_bams"] = [x["region_bams"] for x in items] return data return _group_batches_shared(xs, _caller_batches, _prep_data)
[ "def", "group_batches", "(", "xs", ")", ":", "def", "_caller_batches", "(", "data", ")", ":", "caller", "=", "tz", ".", "get_in", "(", "(", "\"config\"", ",", "\"algorithm\"", ",", "\"variantcaller\"", ")", ",", "data", ")", "jointcaller", "=", "tz", "."...
Group samples into batches for simultaneous variant calling. Identify all samples to call together: those in the same batch and variant caller. Pull together all BAM files from this batch and process together, Provide details to pull these finalized files back into individual expected files. Only batches files if joint calling not specified.
[ "Group", "samples", "into", "batches", "for", "simultaneous", "variant", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L135-L153
train
218,626
bcbio/bcbio-nextgen
bcbio/variation/multi.py
_collapse_subitems
def _collapse_subitems(base, items): """Collapse full data representations relative to a standard base. """ out = [] for d in items: newd = _diff_dict(base, d) out.append(newd) return out
python
def _collapse_subitems(base, items): """Collapse full data representations relative to a standard base. """ out = [] for d in items: newd = _diff_dict(base, d) out.append(newd) return out
[ "def", "_collapse_subitems", "(", "base", ",", "items", ")", ":", "out", "=", "[", "]", "for", "d", "in", "items", ":", "newd", "=", "_diff_dict", "(", "base", ",", "d", ")", "out", ".", "append", "(", "newd", ")", "return", "out" ]
Collapse full data representations relative to a standard base.
[ "Collapse", "full", "data", "representations", "relative", "to", "a", "standard", "base", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L173-L180
train
218,627
bcbio/bcbio-nextgen
bcbio/variation/multi.py
_pick_lead_item
def _pick_lead_item(items): """Pick single representative sample for batch calling to attach calls to. For cancer samples, attach to tumor. """ if vcfutils.is_paired_analysis([dd.get_align_bam(x) for x in items], items): for data in items: if vcfutils.get_paired_phenotype(data) == "tumor": return data raise ValueError("Did not find tumor sample in paired tumor/normal calling") else: return items[0]
python
def _pick_lead_item(items): """Pick single representative sample for batch calling to attach calls to. For cancer samples, attach to tumor. """ if vcfutils.is_paired_analysis([dd.get_align_bam(x) for x in items], items): for data in items: if vcfutils.get_paired_phenotype(data) == "tumor": return data raise ValueError("Did not find tumor sample in paired tumor/normal calling") else: return items[0]
[ "def", "_pick_lead_item", "(", "items", ")", ":", "if", "vcfutils", ".", "is_paired_analysis", "(", "[", "dd", ".", "get_align_bam", "(", "x", ")", "for", "x", "in", "items", "]", ",", "items", ")", ":", "for", "data", "in", "items", ":", "if", "vcfu...
Pick single representative sample for batch calling to attach calls to. For cancer samples, attach to tumor.
[ "Pick", "single", "representative", "sample", "for", "batch", "calling", "to", "attach", "calls", "to", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L198-L209
train
218,628
bcbio/bcbio-nextgen
bcbio/variation/multi.py
get_orig_items
def get_orig_items(base): """Retrieve original items from a diffed set of nested samples. """ assert "group_orig" in base out = [] for data_diff in base["group_orig"]: new = utils.deepish_copy(base) new.pop("group_orig") out.append(_patch_dict(data_diff, new)) return out
python
def get_orig_items(base): """Retrieve original items from a diffed set of nested samples. """ assert "group_orig" in base out = [] for data_diff in base["group_orig"]: new = utils.deepish_copy(base) new.pop("group_orig") out.append(_patch_dict(data_diff, new)) return out
[ "def", "get_orig_items", "(", "base", ")", ":", "assert", "\"group_orig\"", "in", "base", "out", "=", "[", "]", "for", "data_diff", "in", "base", "[", "\"group_orig\"", "]", ":", "new", "=", "utils", ".", "deepish_copy", "(", "base", ")", "new", ".", "...
Retrieve original items from a diffed set of nested samples.
[ "Retrieve", "original", "items", "from", "a", "diffed", "set", "of", "nested", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L211-L220
train
218,629
bcbio/bcbio-nextgen
bcbio/variation/multi.py
_patch_dict
def _patch_dict(diff, base): """Patch a dictionary, substituting in changed items from the nested diff. """ for k, v in diff.items(): if isinstance(v, dict): base[k] = _patch_dict(v, base.get(k, {})) elif not v: base.pop(k, None) else: base[k] = v return base
python
def _patch_dict(diff, base): """Patch a dictionary, substituting in changed items from the nested diff. """ for k, v in diff.items(): if isinstance(v, dict): base[k] = _patch_dict(v, base.get(k, {})) elif not v: base.pop(k, None) else: base[k] = v return base
[ "def", "_patch_dict", "(", "diff", ",", "base", ")", ":", "for", "k", ",", "v", "in", "diff", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "base", "[", "k", "]", "=", "_patch_dict", "(", "v", ",", "base", ...
Patch a dictionary, substituting in changed items from the nested diff.
[ "Patch", "a", "dictionary", "substituting", "in", "changed", "items", "from", "the", "nested", "diff", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L222-L232
train
218,630
bcbio/bcbio-nextgen
bcbio/variation/multi.py
split_variants_by_sample
def split_variants_by_sample(data): """Split a multi-sample call file into inputs for individual samples. For tumor/normal paired analyses, do not split the final file and attach it to the tumor input. """ # not split, do nothing if "group_orig" not in data: return [[data]] # cancer tumor/normal elif (vcfutils.get_paired_phenotype(data) and "tumor" in [vcfutils.get_paired_phenotype(d) for d in get_orig_items(data)]): out = [] for i, sub_data in enumerate(get_orig_items(data)): if vcfutils.get_paired_phenotype(sub_data) == "tumor": cur_batch = tz.get_in(["metadata", "batch"], data) if cur_batch: sub_data["metadata"]["batch"] = cur_batch sub_data["vrn_file"] = data["vrn_file"] else: sub_data.pop("vrn_file", None) out.append([sub_data]) return out # joint calling or population runs, do not split back up and keep in batches else: out = [] for sub_data in get_orig_items(data): cur_batch = tz.get_in(["metadata", "batch"], data) if cur_batch: sub_data["metadata"]["batch"] = cur_batch sub_data["vrn_file_batch"] = data["vrn_file"] sub_data["vrn_file"] = data["vrn_file"] out.append([sub_data]) return out
python
def split_variants_by_sample(data): """Split a multi-sample call file into inputs for individual samples. For tumor/normal paired analyses, do not split the final file and attach it to the tumor input. """ # not split, do nothing if "group_orig" not in data: return [[data]] # cancer tumor/normal elif (vcfutils.get_paired_phenotype(data) and "tumor" in [vcfutils.get_paired_phenotype(d) for d in get_orig_items(data)]): out = [] for i, sub_data in enumerate(get_orig_items(data)): if vcfutils.get_paired_phenotype(sub_data) == "tumor": cur_batch = tz.get_in(["metadata", "batch"], data) if cur_batch: sub_data["metadata"]["batch"] = cur_batch sub_data["vrn_file"] = data["vrn_file"] else: sub_data.pop("vrn_file", None) out.append([sub_data]) return out # joint calling or population runs, do not split back up and keep in batches else: out = [] for sub_data in get_orig_items(data): cur_batch = tz.get_in(["metadata", "batch"], data) if cur_batch: sub_data["metadata"]["batch"] = cur_batch sub_data["vrn_file_batch"] = data["vrn_file"] sub_data["vrn_file"] = data["vrn_file"] out.append([sub_data]) return out
[ "def", "split_variants_by_sample", "(", "data", ")", ":", "# not split, do nothing", "if", "\"group_orig\"", "not", "in", "data", ":", "return", "[", "[", "data", "]", "]", "# cancer tumor/normal", "elif", "(", "vcfutils", ".", "get_paired_phenotype", "(", "data",...
Split a multi-sample call file into inputs for individual samples. For tumor/normal paired analyses, do not split the final file and attach it to the tumor input.
[ "Split", "a", "multi", "-", "sample", "call", "file", "into", "inputs", "for", "individual", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L236-L269
train
218,631
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
run
def run(call_file, ref_file, vrn_files, data): """Run filtering on the input call file, handling SNPs and indels separately. """ algs = [data["config"]["algorithm"]] * len(data.get("vrn_files", [1])) if includes_missingalt(data): logger.info("Removing variants with missing alts from %s." % call_file) call_file = gatk_remove_missingalt(call_file, data) if "gatkcnn" in dd.get_tools_on(data): return _cnn_filter(call_file, vrn_files, data) elif config_utils.use_vqsr(algs, call_file): if vcfutils.is_gvcf_file(call_file): raise ValueError("Cannot force gVCF output with joint calling using tools_on: [gvcf] and use VQSR. " "Try using cutoff-based soft filtering with tools_off: [vqsr]") snp_file, indel_file = vcfutils.split_snps_indels(call_file, ref_file, data["config"]) snp_filter_file = _variant_filtration(snp_file, ref_file, vrn_files, data, "SNP", vfilter.gatk_snp_cutoff) indel_filter_file = _variant_filtration(indel_file, ref_file, vrn_files, data, "INDEL", vfilter.gatk_indel_cutoff) orig_files = [snp_filter_file, indel_filter_file] out_file = "%scombined.vcf.gz" % os.path.commonprefix(orig_files) combined_file = vcfutils.combine_variant_files(orig_files, out_file, ref_file, data["config"]) return combined_file else: snp_filter = vfilter.gatk_snp_cutoff(call_file, data) indel_filter = vfilter.gatk_indel_cutoff(snp_filter, data) return indel_filter
python
def run(call_file, ref_file, vrn_files, data): """Run filtering on the input call file, handling SNPs and indels separately. """ algs = [data["config"]["algorithm"]] * len(data.get("vrn_files", [1])) if includes_missingalt(data): logger.info("Removing variants with missing alts from %s." % call_file) call_file = gatk_remove_missingalt(call_file, data) if "gatkcnn" in dd.get_tools_on(data): return _cnn_filter(call_file, vrn_files, data) elif config_utils.use_vqsr(algs, call_file): if vcfutils.is_gvcf_file(call_file): raise ValueError("Cannot force gVCF output with joint calling using tools_on: [gvcf] and use VQSR. " "Try using cutoff-based soft filtering with tools_off: [vqsr]") snp_file, indel_file = vcfutils.split_snps_indels(call_file, ref_file, data["config"]) snp_filter_file = _variant_filtration(snp_file, ref_file, vrn_files, data, "SNP", vfilter.gatk_snp_cutoff) indel_filter_file = _variant_filtration(indel_file, ref_file, vrn_files, data, "INDEL", vfilter.gatk_indel_cutoff) orig_files = [snp_filter_file, indel_filter_file] out_file = "%scombined.vcf.gz" % os.path.commonprefix(orig_files) combined_file = vcfutils.combine_variant_files(orig_files, out_file, ref_file, data["config"]) return combined_file else: snp_filter = vfilter.gatk_snp_cutoff(call_file, data) indel_filter = vfilter.gatk_indel_cutoff(snp_filter, data) return indel_filter
[ "def", "run", "(", "call_file", ",", "ref_file", ",", "vrn_files", ",", "data", ")", ":", "algs", "=", "[", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", "]", "*", "len", "(", "data", ".", "get", "(", "\"vrn_files\"", ",", "[", "1", "]...
Run filtering on the input call file, handling SNPs and indels separately.
[ "Run", "filtering", "on", "the", "input", "call", "file", "handling", "SNPs", "and", "indels", "separately", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L15-L41
train
218,632
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_cnn_filter
def _cnn_filter(in_file, vrn_files, data): """Perform CNN filtering on input VCF using pre-trained models. """ #tensor_type = "reference" # 1D, reference sequence tensor_type = "read_tensor" # 2D, reads, flags, mapping quality score_file = _cnn_score_variants(in_file, tensor_type, data) return _cnn_tranch_filtering(score_file, vrn_files, tensor_type, data)
python
def _cnn_filter(in_file, vrn_files, data): """Perform CNN filtering on input VCF using pre-trained models. """ #tensor_type = "reference" # 1D, reference sequence tensor_type = "read_tensor" # 2D, reads, flags, mapping quality score_file = _cnn_score_variants(in_file, tensor_type, data) return _cnn_tranch_filtering(score_file, vrn_files, tensor_type, data)
[ "def", "_cnn_filter", "(", "in_file", ",", "vrn_files", ",", "data", ")", ":", "#tensor_type = \"reference\" # 1D, reference sequence", "tensor_type", "=", "\"read_tensor\"", "# 2D, reads, flags, mapping quality", "score_file", "=", "_cnn_score_variants", "(", "in_file", ","...
Perform CNN filtering on input VCF using pre-trained models.
[ "Perform", "CNN", "filtering", "on", "input", "VCF", "using", "pre", "-", "trained", "models", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L45-L51
train
218,633
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_cnn_tranch_filtering
def _cnn_tranch_filtering(in_file, vrn_files, tensor_type, data): """Filter CNN scored VCFs in tranches using standard SNP and Indel truth sets. """ out_file = "%s-filter.vcf.gz" % utils.splitext_plus(in_file)[0] if not utils.file_uptodate(out_file, in_file): runner = broad.runner_from_config(data["config"]) gatk_type = runner.gatk_type() assert gatk_type == "gatk4", "CNN filtering requires GATK4" if "train_hapmap" not in vrn_files: raise ValueError("CNN filtering requires HapMap training inputs: %s" % vrn_files) with file_transaction(data, out_file) as tx_out_file: params = ["-T", "FilterVariantTranches", "--variant", in_file, "--output", tx_out_file, "--snp-truth-vcf", vrn_files["train_hapmap"], "--indel-truth-vcf", vrn_files["train_indels"]] if tensor_type == "reference": params += ["--info-key", "CNN_1D", "--tranche", "99"] else: assert tensor_type == "read_tensor" params += ["--info-key", "CNN_2D", "--tranche", "99"] runner.run_gatk(params) return vcfutils.bgzip_and_index(out_file, data["config"])
python
def _cnn_tranch_filtering(in_file, vrn_files, tensor_type, data): """Filter CNN scored VCFs in tranches using standard SNP and Indel truth sets. """ out_file = "%s-filter.vcf.gz" % utils.splitext_plus(in_file)[0] if not utils.file_uptodate(out_file, in_file): runner = broad.runner_from_config(data["config"]) gatk_type = runner.gatk_type() assert gatk_type == "gatk4", "CNN filtering requires GATK4" if "train_hapmap" not in vrn_files: raise ValueError("CNN filtering requires HapMap training inputs: %s" % vrn_files) with file_transaction(data, out_file) as tx_out_file: params = ["-T", "FilterVariantTranches", "--variant", in_file, "--output", tx_out_file, "--snp-truth-vcf", vrn_files["train_hapmap"], "--indel-truth-vcf", vrn_files["train_indels"]] if tensor_type == "reference": params += ["--info-key", "CNN_1D", "--tranche", "99"] else: assert tensor_type == "read_tensor" params += ["--info-key", "CNN_2D", "--tranche", "99"] runner.run_gatk(params) return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "_cnn_tranch_filtering", "(", "in_file", ",", "vrn_files", ",", "tensor_type", ",", "data", ")", ":", "out_file", "=", "\"%s-filter.vcf.gz\"", "%", "utils", ".", "splitext_plus", "(", "in_file", ")", "[", "0", "]", "if", "not", "utils", ".", "file_upt...
Filter CNN scored VCFs in tranches using standard SNP and Indel truth sets.
[ "Filter", "CNN", "scored", "VCFs", "in", "tranches", "using", "standard", "SNP", "and", "Indel", "truth", "sets", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L53-L74
train
218,634
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_cnn_score_variants
def _cnn_score_variants(in_file, tensor_type, data): """Score variants with pre-trained CNN models. """ out_file = "%s-cnnscore.vcf.gz" % utils.splitext_plus(in_file)[0] if not utils.file_uptodate(out_file, in_file): runner = broad.runner_from_config(data["config"]) gatk_type = runner.gatk_type() assert gatk_type == "gatk4", "CNN filtering requires GATK4" with file_transaction(data, out_file) as tx_out_file: params = ["-T", "CNNScoreVariants", "--variant", in_file, "--reference", dd.get_ref_file(data), "--output", tx_out_file, "--input", dd.get_align_bam(data)] params += ["--tensor-type", tensor_type] runner.run_gatk(params) return vcfutils.bgzip_and_index(out_file, data["config"])
python
def _cnn_score_variants(in_file, tensor_type, data): """Score variants with pre-trained CNN models. """ out_file = "%s-cnnscore.vcf.gz" % utils.splitext_plus(in_file)[0] if not utils.file_uptodate(out_file, in_file): runner = broad.runner_from_config(data["config"]) gatk_type = runner.gatk_type() assert gatk_type == "gatk4", "CNN filtering requires GATK4" with file_transaction(data, out_file) as tx_out_file: params = ["-T", "CNNScoreVariants", "--variant", in_file, "--reference", dd.get_ref_file(data), "--output", tx_out_file, "--input", dd.get_align_bam(data)] params += ["--tensor-type", tensor_type] runner.run_gatk(params) return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "_cnn_score_variants", "(", "in_file", ",", "tensor_type", ",", "data", ")", ":", "out_file", "=", "\"%s-cnnscore.vcf.gz\"", "%", "utils", ".", "splitext_plus", "(", "in_file", ")", "[", "0", "]", "if", "not", "utils", ".", "file_uptodate", "(", "out_...
Score variants with pre-trained CNN models.
[ "Score", "variants", "with", "pre", "-", "trained", "CNN", "models", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L76-L89
train
218,635
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_apply_vqsr
def _apply_vqsr(in_file, ref_file, recal_file, tranch_file, sensitivity_cutoff, filter_type, data): """Apply VQSR based on the specified tranche, returning a filtered VCF file. """ base, ext = utils.splitext_plus(in_file) out_file = "{base}-{filter}filter{ext}".format(base=base, ext=ext, filter=filter_type) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: broad_runner = broad.runner_from_config(data["config"]) gatk_type = broad_runner.gatk_type() if gatk_type == "gatk4": params = ["-T", "ApplyVQSR", "--variant", in_file, "--output", tx_out_file, "--recal-file", recal_file, "--tranches-file", tranch_file] else: params = ["-T", "ApplyRecalibration", "--input", in_file, "--out", tx_out_file, "--recal_file", recal_file, "--tranches_file", tranch_file] params += ["-R", ref_file, "--mode", filter_type] resources = config_utils.get_resources("gatk_apply_recalibration", data["config"]) opts = resources.get("options", []) if not opts: if gatk_type == "gatk4": opts += ["--truth-sensitivity-filter-level", sensitivity_cutoff] else: opts += ["--ts_filter_level", sensitivity_cutoff] params += opts broad_runner.run_gatk(params) return out_file
python
def _apply_vqsr(in_file, ref_file, recal_file, tranch_file, sensitivity_cutoff, filter_type, data): """Apply VQSR based on the specified tranche, returning a filtered VCF file. """ base, ext = utils.splitext_plus(in_file) out_file = "{base}-{filter}filter{ext}".format(base=base, ext=ext, filter=filter_type) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: broad_runner = broad.runner_from_config(data["config"]) gatk_type = broad_runner.gatk_type() if gatk_type == "gatk4": params = ["-T", "ApplyVQSR", "--variant", in_file, "--output", tx_out_file, "--recal-file", recal_file, "--tranches-file", tranch_file] else: params = ["-T", "ApplyRecalibration", "--input", in_file, "--out", tx_out_file, "--recal_file", recal_file, "--tranches_file", tranch_file] params += ["-R", ref_file, "--mode", filter_type] resources = config_utils.get_resources("gatk_apply_recalibration", data["config"]) opts = resources.get("options", []) if not opts: if gatk_type == "gatk4": opts += ["--truth-sensitivity-filter-level", sensitivity_cutoff] else: opts += ["--ts_filter_level", sensitivity_cutoff] params += opts broad_runner.run_gatk(params) return out_file
[ "def", "_apply_vqsr", "(", "in_file", ",", "ref_file", ",", "recal_file", ",", "tranch_file", ",", "sensitivity_cutoff", ",", "filter_type", ",", "data", ")", ":", "base", ",", "ext", "=", "utils", ".", "splitext_plus", "(", "in_file", ")", "out_file", "=", ...
Apply VQSR based on the specified tranche, returning a filtered VCF file.
[ "Apply", "VQSR", "based", "on", "the", "specified", "tranche", "returning", "a", "filtered", "VCF", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L93-L127
train
218,636
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_get_training_data
def _get_training_data(vrn_files): """Retrieve training data, returning an empty set of information if not available. """ out = {"SNP": [], "INDEL": []} # SNPs for name, train_info in [("train_hapmap", "known=false,training=true,truth=true,prior=15.0"), ("train_omni", "known=false,training=true,truth=true,prior=12.0"), ("train_1000g", "known=false,training=true,truth=false,prior=10.0"), ("dbsnp", "known=true,training=false,truth=false,prior=2.0")]: if name not in vrn_files: return {} else: out["SNP"].append((name.replace("train_", ""), train_info, vrn_files[name])) # Indels if "train_indels" in vrn_files: out["INDEL"].append(("mills", "known=true,training=true,truth=true,prior=12.0", vrn_files["train_indels"])) else: return {} return out
python
def _get_training_data(vrn_files): """Retrieve training data, returning an empty set of information if not available. """ out = {"SNP": [], "INDEL": []} # SNPs for name, train_info in [("train_hapmap", "known=false,training=true,truth=true,prior=15.0"), ("train_omni", "known=false,training=true,truth=true,prior=12.0"), ("train_1000g", "known=false,training=true,truth=false,prior=10.0"), ("dbsnp", "known=true,training=false,truth=false,prior=2.0")]: if name not in vrn_files: return {} else: out["SNP"].append((name.replace("train_", ""), train_info, vrn_files[name])) # Indels if "train_indels" in vrn_files: out["INDEL"].append(("mills", "known=true,training=true,truth=true,prior=12.0", vrn_files["train_indels"])) else: return {} return out
[ "def", "_get_training_data", "(", "vrn_files", ")", ":", "out", "=", "{", "\"SNP\"", ":", "[", "]", ",", "\"INDEL\"", ":", "[", "]", "}", "# SNPs", "for", "name", ",", "train_info", "in", "[", "(", "\"train_hapmap\"", ",", "\"known=false,training=true,truth=...
Retrieve training data, returning an empty set of information if not available.
[ "Retrieve", "training", "data", "returning", "an", "empty", "set", "of", "information", "if", "not", "available", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L129-L148
train
218,637
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_get_vqsr_training
def _get_vqsr_training(filter_type, vrn_files, gatk_type): """Return parameters for VQSR training, handling SNPs and Indels. """ params = [] for name, train_info, fname in _get_training_data(vrn_files)[filter_type]: if gatk_type == "gatk4": params.extend(["--resource:%s,%s" % (name, train_info), fname]) if filter_type == "INDEL": params.extend(["--max-gaussians", "4"]) else: params.extend(["-resource:%s,VCF,%s" % (name, train_info), fname]) if filter_type == "INDEL": params.extend(["--maxGaussians", "4"]) return params
python
def _get_vqsr_training(filter_type, vrn_files, gatk_type): """Return parameters for VQSR training, handling SNPs and Indels. """ params = [] for name, train_info, fname in _get_training_data(vrn_files)[filter_type]: if gatk_type == "gatk4": params.extend(["--resource:%s,%s" % (name, train_info), fname]) if filter_type == "INDEL": params.extend(["--max-gaussians", "4"]) else: params.extend(["-resource:%s,VCF,%s" % (name, train_info), fname]) if filter_type == "INDEL": params.extend(["--maxGaussians", "4"]) return params
[ "def", "_get_vqsr_training", "(", "filter_type", ",", "vrn_files", ",", "gatk_type", ")", ":", "params", "=", "[", "]", "for", "name", ",", "train_info", ",", "fname", "in", "_get_training_data", "(", "vrn_files", ")", "[", "filter_type", "]", ":", "if", "...
Return parameters for VQSR training, handling SNPs and Indels.
[ "Return", "parameters", "for", "VQSR", "training", "handling", "SNPs", "and", "Indels", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L153-L166
train
218,638
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_get_vqsr_annotations
def _get_vqsr_annotations(filter_type, data): """Retrieve appropriate annotations to use for VQSR based on filter type. Issues reported with MQ and bwa-mem quality distribution, results in intermittent failures to use VQSR: http://gatkforums.broadinstitute.org/discussion/4425/variant-recalibration-failing http://gatkforums.broadinstitute.org/discussion/4248/variantrecalibrator-removing-all-snps-from-the-training-set """ if filter_type == "SNP": # MQ, MQRankSum anns = ["QD", "FS", "ReadPosRankSum", "SOR"] else: assert filter_type == "INDEL" # MQRankSum anns = ["QD", "FS", "ReadPosRankSum", "SOR"] if dd.get_coverage_interval(data) == "genome": anns += ["DP"] return anns
python
def _get_vqsr_annotations(filter_type, data): """Retrieve appropriate annotations to use for VQSR based on filter type. Issues reported with MQ and bwa-mem quality distribution, results in intermittent failures to use VQSR: http://gatkforums.broadinstitute.org/discussion/4425/variant-recalibration-failing http://gatkforums.broadinstitute.org/discussion/4248/variantrecalibrator-removing-all-snps-from-the-training-set """ if filter_type == "SNP": # MQ, MQRankSum anns = ["QD", "FS", "ReadPosRankSum", "SOR"] else: assert filter_type == "INDEL" # MQRankSum anns = ["QD", "FS", "ReadPosRankSum", "SOR"] if dd.get_coverage_interval(data) == "genome": anns += ["DP"] return anns
[ "def", "_get_vqsr_annotations", "(", "filter_type", ",", "data", ")", ":", "if", "filter_type", "==", "\"SNP\"", ":", "# MQ, MQRankSum", "anns", "=", "[", "\"QD\"", ",", "\"FS\"", ",", "\"ReadPosRankSum\"", ",", "\"SOR\"", "]", "else", ":", "assert", "filter_t...
Retrieve appropriate annotations to use for VQSR based on filter type. Issues reported with MQ and bwa-mem quality distribution, results in intermittent failures to use VQSR: http://gatkforums.broadinstitute.org/discussion/4425/variant-recalibration-failing http://gatkforums.broadinstitute.org/discussion/4248/variantrecalibrator-removing-all-snps-from-the-training-set
[ "Retrieve", "appropriate", "annotations", "to", "use", "for", "VQSR", "based", "on", "filter", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L168-L185
train
218,639
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_run_vqsr
def _run_vqsr(in_file, ref_file, vrn_files, sensitivity_cutoff, filter_type, data): """Run variant quality score recalibration. """ cutoffs = ["100.0", "99.99", "99.98", "99.97", "99.96", "99.95", "99.94", "99.93", "99.92", "99.91", "99.9", "99.8", "99.7", "99.6", "99.5", "99.0", "98.0", "90.0"] if sensitivity_cutoff not in cutoffs: cutoffs.append(sensitivity_cutoff) cutoffs.sort() broad_runner = broad.runner_from_config(data["config"]) gatk_type = broad_runner.gatk_type() base = utils.splitext_plus(in_file)[0] recal_file = ("%s-vqsrrecal.vcf.gz" % base) if gatk_type == "gatk4" else ("%s.recal" % base) tranches_file = "%s.tranches" % base plot_file = "%s-plots.R" % base if not utils.file_exists(recal_file): with file_transaction(data, recal_file, tranches_file, plot_file) as (tx_recal, tx_tranches, tx_plot_file): params = ["-T", "VariantRecalibrator", "-R", ref_file, "--mode", filter_type] if gatk_type == "gatk4": params += ["--variant", in_file, "--output", tx_recal, "--tranches-file", tx_tranches, "--rscript-file", tx_plot_file] else: params += ["--input", in_file, "--recal_file", tx_recal, "--tranches_file", tx_tranches, "--rscript_file", tx_plot_file] params += _get_vqsr_training(filter_type, vrn_files, gatk_type) resources = config_utils.get_resources("gatk_variant_recalibrator", data["config"]) opts = resources.get("options", []) if not opts: for cutoff in cutoffs: opts += ["-tranche", str(cutoff)] for a in _get_vqsr_annotations(filter_type, data): opts += ["-an", a] params += opts cores = dd.get_cores(data) memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None try: broad_runner.new_resources("gatk-vqsr") broad_runner.run_gatk(params, log_error=False, memscale=memscale, parallel_gc=True) except: # Can fail to run if not enough values are present to train. return None, None if gatk_type == "gatk4": vcfutils.bgzip_and_index(recal_file, data["config"]) return recal_file, tranches_file
python
def _run_vqsr(in_file, ref_file, vrn_files, sensitivity_cutoff, filter_type, data): """Run variant quality score recalibration. """ cutoffs = ["100.0", "99.99", "99.98", "99.97", "99.96", "99.95", "99.94", "99.93", "99.92", "99.91", "99.9", "99.8", "99.7", "99.6", "99.5", "99.0", "98.0", "90.0"] if sensitivity_cutoff not in cutoffs: cutoffs.append(sensitivity_cutoff) cutoffs.sort() broad_runner = broad.runner_from_config(data["config"]) gatk_type = broad_runner.gatk_type() base = utils.splitext_plus(in_file)[0] recal_file = ("%s-vqsrrecal.vcf.gz" % base) if gatk_type == "gatk4" else ("%s.recal" % base) tranches_file = "%s.tranches" % base plot_file = "%s-plots.R" % base if not utils.file_exists(recal_file): with file_transaction(data, recal_file, tranches_file, plot_file) as (tx_recal, tx_tranches, tx_plot_file): params = ["-T", "VariantRecalibrator", "-R", ref_file, "--mode", filter_type] if gatk_type == "gatk4": params += ["--variant", in_file, "--output", tx_recal, "--tranches-file", tx_tranches, "--rscript-file", tx_plot_file] else: params += ["--input", in_file, "--recal_file", tx_recal, "--tranches_file", tx_tranches, "--rscript_file", tx_plot_file] params += _get_vqsr_training(filter_type, vrn_files, gatk_type) resources = config_utils.get_resources("gatk_variant_recalibrator", data["config"]) opts = resources.get("options", []) if not opts: for cutoff in cutoffs: opts += ["-tranche", str(cutoff)] for a in _get_vqsr_annotations(filter_type, data): opts += ["-an", a] params += opts cores = dd.get_cores(data) memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None try: broad_runner.new_resources("gatk-vqsr") broad_runner.run_gatk(params, log_error=False, memscale=memscale, parallel_gc=True) except: # Can fail to run if not enough values are present to train. return None, None if gatk_type == "gatk4": vcfutils.bgzip_and_index(recal_file, data["config"]) return recal_file, tranches_file
[ "def", "_run_vqsr", "(", "in_file", ",", "ref_file", ",", "vrn_files", ",", "sensitivity_cutoff", ",", "filter_type", ",", "data", ")", ":", "cutoffs", "=", "[", "\"100.0\"", ",", "\"99.99\"", ",", "\"99.98\"", ",", "\"99.97\"", ",", "\"99.96\"", ",", "\"99....
Run variant quality score recalibration.
[ "Run", "variant", "quality", "score", "recalibration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L187-L230
train
218,640
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_already_cutoff_filtered
def _already_cutoff_filtered(in_file, filter_type): """Check if we have a pre-existing cutoff-based filter file from previous VQSR failure. """ filter_file = "%s-filter%s.vcf.gz" % (utils.splitext_plus(in_file)[0], filter_type) return utils.file_exists(filter_file)
python
def _already_cutoff_filtered(in_file, filter_type): """Check if we have a pre-existing cutoff-based filter file from previous VQSR failure. """ filter_file = "%s-filter%s.vcf.gz" % (utils.splitext_plus(in_file)[0], filter_type) return utils.file_exists(filter_file)
[ "def", "_already_cutoff_filtered", "(", "in_file", ",", "filter_type", ")", ":", "filter_file", "=", "\"%s-filter%s.vcf.gz\"", "%", "(", "utils", ".", "splitext_plus", "(", "in_file", ")", "[", "0", "]", ",", "filter_type", ")", "return", "utils", ".", "file_e...
Check if we have a pre-existing cutoff-based filter file from previous VQSR failure.
[ "Check", "if", "we", "have", "a", "pre", "-", "existing", "cutoff", "-", "based", "filter", "file", "from", "previous", "VQSR", "failure", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L234-L238
train
218,641
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
_variant_filtration
def _variant_filtration(in_file, ref_file, vrn_files, data, filter_type, hard_filter_fn): """Filter SNP and indel variant calls using GATK best practice recommendations. Use cutoff-based soft filters if configuration indicates too little data or already finished a cutoff-based filtering step, otherwise try VQSR. """ # Algorithms multiplied by number of input files to check for large enough sample sizes algs = [data["config"]["algorithm"]] * len(data.get("vrn_files", [1])) if (not config_utils.use_vqsr(algs, in_file) or _already_cutoff_filtered(in_file, filter_type)): logger.info("Skipping VQSR, using cutoff-based filers: we don't have whole genome input data") return hard_filter_fn(in_file, data) elif not _have_training_data(vrn_files): logger.info("Skipping VQSR, using cutoff-based filers: genome build does not have sufficient training data") return hard_filter_fn(in_file, data) else: sensitivities = {"INDEL": "98.0", "SNP": "99.97"} recal_file, tranches_file = _run_vqsr(in_file, ref_file, vrn_files, sensitivities[filter_type], filter_type, data) if recal_file is None: # VQSR failed logger.info("VQSR failed due to lack of training data. Using cutoff-based soft filtering.") return hard_filter_fn(in_file, data) else: return _apply_vqsr(in_file, ref_file, recal_file, tranches_file, sensitivities[filter_type], filter_type, data)
python
def _variant_filtration(in_file, ref_file, vrn_files, data, filter_type, hard_filter_fn): """Filter SNP and indel variant calls using GATK best practice recommendations. Use cutoff-based soft filters if configuration indicates too little data or already finished a cutoff-based filtering step, otherwise try VQSR. """ # Algorithms multiplied by number of input files to check for large enough sample sizes algs = [data["config"]["algorithm"]] * len(data.get("vrn_files", [1])) if (not config_utils.use_vqsr(algs, in_file) or _already_cutoff_filtered(in_file, filter_type)): logger.info("Skipping VQSR, using cutoff-based filers: we don't have whole genome input data") return hard_filter_fn(in_file, data) elif not _have_training_data(vrn_files): logger.info("Skipping VQSR, using cutoff-based filers: genome build does not have sufficient training data") return hard_filter_fn(in_file, data) else: sensitivities = {"INDEL": "98.0", "SNP": "99.97"} recal_file, tranches_file = _run_vqsr(in_file, ref_file, vrn_files, sensitivities[filter_type], filter_type, data) if recal_file is None: # VQSR failed logger.info("VQSR failed due to lack of training data. Using cutoff-based soft filtering.") return hard_filter_fn(in_file, data) else: return _apply_vqsr(in_file, ref_file, recal_file, tranches_file, sensitivities[filter_type], filter_type, data)
[ "def", "_variant_filtration", "(", "in_file", ",", "ref_file", ",", "vrn_files", ",", "data", ",", "filter_type", ",", "hard_filter_fn", ")", ":", "# Algorithms multiplied by number of input files to check for large enough sample sizes", "algs", "=", "[", "data", "[", "\"...
Filter SNP and indel variant calls using GATK best practice recommendations. Use cutoff-based soft filters if configuration indicates too little data or already finished a cutoff-based filtering step, otherwise try VQSR.
[ "Filter", "SNP", "and", "indel", "variant", "calls", "using", "GATK", "best", "practice", "recommendations", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L240-L265
train
218,642
bcbio/bcbio-nextgen
bcbio/variation/gatkfilter.py
gatk_remove_missingalt
def gatk_remove_missingalt(in_file, data): """ GATK 4.1.0.0 outputs variants that have missing ALTs, which breaks downstream tools, this filters those out. """ base = in_file.split('.vcf.gz')[0] out_file = "%s-nomissingalt%s" % (base, '.vcf.gz') if utils.file_exists(out_file): return out_file no_gzip_out = out_file.replace(".vcf.gz", ".vcf") with file_transaction(no_gzip_out) as tx_out_file: with utils.open_gzipsafe(in_file) as in_handle, open(tx_out_file, "w") as out_handle: for line in in_handle: line = remove_missingalt(line) if line: out_handle.write(line) return vcfutils.bgzip_and_index(no_gzip_out, data["config"])
python
def gatk_remove_missingalt(in_file, data): """ GATK 4.1.0.0 outputs variants that have missing ALTs, which breaks downstream tools, this filters those out. """ base = in_file.split('.vcf.gz')[0] out_file = "%s-nomissingalt%s" % (base, '.vcf.gz') if utils.file_exists(out_file): return out_file no_gzip_out = out_file.replace(".vcf.gz", ".vcf") with file_transaction(no_gzip_out) as tx_out_file: with utils.open_gzipsafe(in_file) as in_handle, open(tx_out_file, "w") as out_handle: for line in in_handle: line = remove_missingalt(line) if line: out_handle.write(line) return vcfutils.bgzip_and_index(no_gzip_out, data["config"])
[ "def", "gatk_remove_missingalt", "(", "in_file", ",", "data", ")", ":", "base", "=", "in_file", ".", "split", "(", "'.vcf.gz'", ")", "[", "0", "]", "out_file", "=", "\"%s-nomissingalt%s\"", "%", "(", "base", ",", "'.vcf.gz'", ")", "if", "utils", ".", "fi...
GATK 4.1.0.0 outputs variants that have missing ALTs, which breaks downstream tools, this filters those out.
[ "GATK", "4", ".", "1", ".", "0", ".", "0", "outputs", "variants", "that", "have", "missing", "ALTs", "which", "breaks", "downstream", "tools", "this", "filters", "those", "out", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatkfilter.py#L276-L292
train
218,643
bcbio/bcbio-nextgen
bcbio/rnaseq/cufflinks.py
strand_unknown
def strand_unknown(db, transcript): """ for unstranded data with novel transcripts single exon genes will have no strand information. single exon novel genes are also a source of noise in the Cufflinks assembly so this removes them """ features = list(db.children(transcript)) strand = features[0].strand if strand == ".": return True else: return False
python
def strand_unknown(db, transcript): """ for unstranded data with novel transcripts single exon genes will have no strand information. single exon novel genes are also a source of noise in the Cufflinks assembly so this removes them """ features = list(db.children(transcript)) strand = features[0].strand if strand == ".": return True else: return False
[ "def", "strand_unknown", "(", "db", ",", "transcript", ")", ":", "features", "=", "list", "(", "db", ".", "children", "(", "transcript", ")", ")", "strand", "=", "features", "[", "0", "]", ".", "strand", "if", "strand", "==", "\".\"", ":", "return", ...
for unstranded data with novel transcripts single exon genes will have no strand information. single exon novel genes are also a source of noise in the Cufflinks assembly so this removes them
[ "for", "unstranded", "data", "with", "novel", "transcripts", "single", "exon", "genes", "will", "have", "no", "strand", "information", ".", "single", "exon", "novel", "genes", "are", "also", "a", "source", "of", "noise", "in", "the", "Cufflinks", "assembly", ...
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/cufflinks.py#L156-L167
train
218,644
bcbio/bcbio-nextgen
bcbio/rnaseq/cufflinks.py
fix_cufflinks_attributes
def fix_cufflinks_attributes(ref_gtf, merged_gtf, data, out_file=None): """ replace the cufflinks gene_id and transcript_id with the gene_id and transcript_id from ref_gtf, where available """ base, ext = os.path.splitext(merged_gtf) fixed = out_file if out_file else base + ".clean.fixed" + ext if file_exists(fixed): return fixed ref_db = gtf.get_gtf_db(ref_gtf) merged_db = gtf.get_gtf_db(merged_gtf, in_memory=True) ref_tid_to_gid = {} for gene in ref_db.features_of_type('gene'): for transcript in ref_db.children(gene, level=1): ref_tid_to_gid[transcript.id] = gene.id ctid_to_cgid = {} ctid_to_oid = {} for gene in merged_db.features_of_type('gene'): for transcript in merged_db.children(gene, level=1): ctid_to_cgid[transcript.id] = gene.id feature = list(merged_db.children(transcript))[0] oid = feature.attributes.get("oId", [None])[0] if oid: ctid_to_oid[transcript.id] = oid cgid_to_gid = {} for ctid, oid in ctid_to_oid.items(): cgid = ctid_to_cgid.get(ctid, None) oid = ctid_to_oid.get(ctid, None) gid = ref_tid_to_gid.get(oid, None) if oid else None if cgid and gid: cgid_to_gid[cgid] = gid with file_transaction(data, fixed) as tmp_fixed_file: with open(tmp_fixed_file, "w") as out_handle: for gene in merged_db.features_of_type('gene'): for transcript in merged_db.children(gene, level=1): for feature in merged_db.children(transcript): cgid = feature.attributes.get("gene_id", [None])[0] gid = cgid_to_gid.get(cgid, None) ctid = None if gid: feature.attributes["gene_id"][0] = gid ctid = feature.attributes.get("transcript_id", [None])[0] tid = ctid_to_oid.get(ctid, None) if tid: feature.attributes["transcript_id"][0] = tid if "nearest_ref" in feature.attributes: del feature.attributes["nearest_ref"] if "oId" in feature.attributes: del feature.attributes["oId"] out_handle.write(str(feature) + "\n") return fixed
python
def fix_cufflinks_attributes(ref_gtf, merged_gtf, data, out_file=None): """ replace the cufflinks gene_id and transcript_id with the gene_id and transcript_id from ref_gtf, where available """ base, ext = os.path.splitext(merged_gtf) fixed = out_file if out_file else base + ".clean.fixed" + ext if file_exists(fixed): return fixed ref_db = gtf.get_gtf_db(ref_gtf) merged_db = gtf.get_gtf_db(merged_gtf, in_memory=True) ref_tid_to_gid = {} for gene in ref_db.features_of_type('gene'): for transcript in ref_db.children(gene, level=1): ref_tid_to_gid[transcript.id] = gene.id ctid_to_cgid = {} ctid_to_oid = {} for gene in merged_db.features_of_type('gene'): for transcript in merged_db.children(gene, level=1): ctid_to_cgid[transcript.id] = gene.id feature = list(merged_db.children(transcript))[0] oid = feature.attributes.get("oId", [None])[0] if oid: ctid_to_oid[transcript.id] = oid cgid_to_gid = {} for ctid, oid in ctid_to_oid.items(): cgid = ctid_to_cgid.get(ctid, None) oid = ctid_to_oid.get(ctid, None) gid = ref_tid_to_gid.get(oid, None) if oid else None if cgid and gid: cgid_to_gid[cgid] = gid with file_transaction(data, fixed) as tmp_fixed_file: with open(tmp_fixed_file, "w") as out_handle: for gene in merged_db.features_of_type('gene'): for transcript in merged_db.children(gene, level=1): for feature in merged_db.children(transcript): cgid = feature.attributes.get("gene_id", [None])[0] gid = cgid_to_gid.get(cgid, None) ctid = None if gid: feature.attributes["gene_id"][0] = gid ctid = feature.attributes.get("transcript_id", [None])[0] tid = ctid_to_oid.get(ctid, None) if tid: feature.attributes["transcript_id"][0] = tid if "nearest_ref" in feature.attributes: del feature.attributes["nearest_ref"] if "oId" in feature.attributes: del feature.attributes["oId"] out_handle.write(str(feature) + "\n") return fixed
[ "def", "fix_cufflinks_attributes", "(", "ref_gtf", ",", "merged_gtf", ",", "data", ",", "out_file", "=", "None", ")", ":", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "merged_gtf", ")", "fixed", "=", "out_file", "if", "out_file", "e...
replace the cufflinks gene_id and transcript_id with the gene_id and transcript_id from ref_gtf, where available
[ "replace", "the", "cufflinks", "gene_id", "and", "transcript_id", "with", "the", "gene_id", "and", "transcript_id", "from", "ref_gtf", "where", "available" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/cufflinks.py#L179-L234
train
218,645
bcbio/bcbio-nextgen
bcbio/rnaseq/cufflinks.py
merge
def merge(assembled_gtfs, ref_file, gtf_file, num_cores, data): """ run cuffmerge on a set of assembled GTF files """ assembled_file = tempfile.NamedTemporaryFile(delete=False).name with open(assembled_file, "w") as temp_handle: for assembled in assembled_gtfs: temp_handle.write(assembled + "\n") out_dir = os.path.join("assembly", "cuffmerge") merged_file = os.path.join(out_dir, "merged.gtf") out_file = os.path.join(out_dir, "assembled.gtf") if file_exists(out_file): return out_file if not file_exists(merged_file): with file_transaction(data, out_dir) as tmp_out_dir: cmd = ("cuffmerge -o {tmp_out_dir} --ref-gtf {gtf_file} " "--num-threads {num_cores} --ref-sequence {ref_file} " "{assembled_file}") cmd = cmd.format(**locals()) message = ("Merging the following transcript assemblies with " "Cuffmerge: %s" % ", ".join(assembled_gtfs)) do.run(cmd, message) clean, _ = clean_assembly(merged_file) fixed = fix_cufflinks_attributes(gtf_file, clean, data) classified = annotate_gtf.annotate_novel_coding(fixed, gtf_file, ref_file, data) filtered = annotate_gtf.cleanup_transcripts(classified, gtf_file, ref_file) shutil.move(filtered, out_file) return out_file
python
def merge(assembled_gtfs, ref_file, gtf_file, num_cores, data): """ run cuffmerge on a set of assembled GTF files """ assembled_file = tempfile.NamedTemporaryFile(delete=False).name with open(assembled_file, "w") as temp_handle: for assembled in assembled_gtfs: temp_handle.write(assembled + "\n") out_dir = os.path.join("assembly", "cuffmerge") merged_file = os.path.join(out_dir, "merged.gtf") out_file = os.path.join(out_dir, "assembled.gtf") if file_exists(out_file): return out_file if not file_exists(merged_file): with file_transaction(data, out_dir) as tmp_out_dir: cmd = ("cuffmerge -o {tmp_out_dir} --ref-gtf {gtf_file} " "--num-threads {num_cores} --ref-sequence {ref_file} " "{assembled_file}") cmd = cmd.format(**locals()) message = ("Merging the following transcript assemblies with " "Cuffmerge: %s" % ", ".join(assembled_gtfs)) do.run(cmd, message) clean, _ = clean_assembly(merged_file) fixed = fix_cufflinks_attributes(gtf_file, clean, data) classified = annotate_gtf.annotate_novel_coding(fixed, gtf_file, ref_file, data) filtered = annotate_gtf.cleanup_transcripts(classified, gtf_file, ref_file) shutil.move(filtered, out_file) return out_file
[ "def", "merge", "(", "assembled_gtfs", ",", "ref_file", ",", "gtf_file", ",", "num_cores", ",", "data", ")", ":", "assembled_file", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", ".", "name", "with", "open", "(", "assembled_fil...
run cuffmerge on a set of assembled GTF files
[ "run", "cuffmerge", "on", "a", "set", "of", "assembled", "GTF", "files" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/cufflinks.py#L237-L265
train
218,646
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_vcf_info
def _vcf_info(start, end, mate_id, info=None): """Return breakend information line with mate and imprecise location. """ out = "SVTYPE=BND;MATEID={mate};IMPRECISE;CIPOS=0,{size}".format( mate=mate_id, size=end-start) if info is not None: extra_info = ";".join("{0}={1}".format(k, v) for k, v in info.iteritems()) out = "{0};{1}".format(out, extra_info) return out
python
def _vcf_info(start, end, mate_id, info=None): """Return breakend information line with mate and imprecise location. """ out = "SVTYPE=BND;MATEID={mate};IMPRECISE;CIPOS=0,{size}".format( mate=mate_id, size=end-start) if info is not None: extra_info = ";".join("{0}={1}".format(k, v) for k, v in info.iteritems()) out = "{0};{1}".format(out, extra_info) return out
[ "def", "_vcf_info", "(", "start", ",", "end", ",", "mate_id", ",", "info", "=", "None", ")", ":", "out", "=", "\"SVTYPE=BND;MATEID={mate};IMPRECISE;CIPOS=0,{size}\"", ".", "format", "(", "mate", "=", "mate_id", ",", "size", "=", "end", "-", "start", ")", "...
Return breakend information line with mate and imprecise location.
[ "Return", "breakend", "information", "line", "with", "mate", "and", "imprecise", "location", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L47-L55
train
218,647
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_vcf_alt
def _vcf_alt(base, other_chr, other_pos, isrc, is_first): """Create ALT allele line in VCF 4.1 format associating with other paired end. """ if is_first: pipe = "[" if isrc else "]" out_str = "{base}{pipe}{chr}:{pos}{pipe}" else: pipe = "]" if isrc else "[" out_str = "{pipe}{chr}:{pos}{pipe}{base}" return out_str.format(pipe=pipe, chr=other_chr, pos=other_pos + 1, base=base)
python
def _vcf_alt(base, other_chr, other_pos, isrc, is_first): """Create ALT allele line in VCF 4.1 format associating with other paired end. """ if is_first: pipe = "[" if isrc else "]" out_str = "{base}{pipe}{chr}:{pos}{pipe}" else: pipe = "]" if isrc else "[" out_str = "{pipe}{chr}:{pos}{pipe}{base}" return out_str.format(pipe=pipe, chr=other_chr, pos=other_pos + 1, base=base)
[ "def", "_vcf_alt", "(", "base", ",", "other_chr", ",", "other_pos", ",", "isrc", ",", "is_first", ")", ":", "if", "is_first", ":", "pipe", "=", "\"[\"", "if", "isrc", "else", "\"]\"", "out_str", "=", "\"{base}{pipe}{chr}:{pos}{pipe}\"", "else", ":", "pipe", ...
Create ALT allele line in VCF 4.1 format associating with other paired end.
[ "Create", "ALT", "allele", "line", "in", "VCF", "4", ".", "1", "format", "associating", "with", "other", "paired", "end", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L57-L67
train
218,648
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_breakend_orientation
def _breakend_orientation(strand1, strand2): """Convert BEDPE strand representation of breakpoints into VCF. | strand1 | strand2 | VCF | +----------+----------+--------------+ | + | - | t[p[ ]p]t | | + | + | t]p] t]p] | | - | - | [p[t [p[t | | - | + | ]p]t t[p[ | """ EndOrientation = namedtuple("EndOrientation", ["is_first1", "is_rc1", "is_first2", "is_rc2"]) if strand1 == "+" and strand2 == "-": return EndOrientation(True, True, False, True) elif strand1 == "+" and strand2 == "+": return EndOrientation(True, False, True, False) elif strand1 == "-" and strand2 == "-": return EndOrientation(False, False, False, False) elif strand1 == "-" and strand2 == "+": return EndOrientation(False, True, True, True) else: raise ValueError("Unexpected strand pairing: {0} {1}".format( strand1, strand2))
python
def _breakend_orientation(strand1, strand2): """Convert BEDPE strand representation of breakpoints into VCF. | strand1 | strand2 | VCF | +----------+----------+--------------+ | + | - | t[p[ ]p]t | | + | + | t]p] t]p] | | - | - | [p[t [p[t | | - | + | ]p]t t[p[ | """ EndOrientation = namedtuple("EndOrientation", ["is_first1", "is_rc1", "is_first2", "is_rc2"]) if strand1 == "+" and strand2 == "-": return EndOrientation(True, True, False, True) elif strand1 == "+" and strand2 == "+": return EndOrientation(True, False, True, False) elif strand1 == "-" and strand2 == "-": return EndOrientation(False, False, False, False) elif strand1 == "-" and strand2 == "+": return EndOrientation(False, True, True, True) else: raise ValueError("Unexpected strand pairing: {0} {1}".format( strand1, strand2))
[ "def", "_breakend_orientation", "(", "strand1", ",", "strand2", ")", ":", "EndOrientation", "=", "namedtuple", "(", "\"EndOrientation\"", ",", "[", "\"is_first1\"", ",", "\"is_rc1\"", ",", "\"is_first2\"", ",", "\"is_rc2\"", "]", ")", "if", "strand1", "==", "\"+...
Convert BEDPE strand representation of breakpoints into VCF. | strand1 | strand2 | VCF | +----------+----------+--------------+ | + | - | t[p[ ]p]t | | + | + | t]p] t]p] | | - | - | [p[t [p[t | | - | + | ]p]t t[p[ |
[ "Convert", "BEDPE", "strand", "representation", "of", "breakpoints", "into", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L69-L91
train
218,649
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
build_vcf_parts
def build_vcf_parts(feature, genome_2bit, info=None): """Convert BedPe feature information into VCF part representation. Each feature will have two VCF lines for each side of the breakpoint. """ base1 = genome_2bit[feature.chrom1].get( feature.start1, feature.start1 + 1).upper() id1 = "hydra{0}a".format(feature.name) base2 = genome_2bit[feature.chrom2].get( feature.start2, feature.start2 + 1).upper() id2 = "hydra{0}b".format(feature.name) orientation = _breakend_orientation(feature.strand1, feature.strand2) return (VcfLine(feature.chrom1, feature.start1, id1, base1, _vcf_alt(base1, feature.chrom2, feature.start2, orientation.is_rc1, orientation.is_first1), _vcf_info(feature.start1, feature.end1, id2, info)), VcfLine(feature.chrom2, feature.start2, id2, base2, _vcf_alt(base2, feature.chrom1, feature.start1, orientation.is_rc2, orientation.is_first2), _vcf_info(feature.start2, feature.end2, id1, info)))
python
def build_vcf_parts(feature, genome_2bit, info=None): """Convert BedPe feature information into VCF part representation. Each feature will have two VCF lines for each side of the breakpoint. """ base1 = genome_2bit[feature.chrom1].get( feature.start1, feature.start1 + 1).upper() id1 = "hydra{0}a".format(feature.name) base2 = genome_2bit[feature.chrom2].get( feature.start2, feature.start2 + 1).upper() id2 = "hydra{0}b".format(feature.name) orientation = _breakend_orientation(feature.strand1, feature.strand2) return (VcfLine(feature.chrom1, feature.start1, id1, base1, _vcf_alt(base1, feature.chrom2, feature.start2, orientation.is_rc1, orientation.is_first1), _vcf_info(feature.start1, feature.end1, id2, info)), VcfLine(feature.chrom2, feature.start2, id2, base2, _vcf_alt(base2, feature.chrom1, feature.start1, orientation.is_rc2, orientation.is_first2), _vcf_info(feature.start2, feature.end2, id1, info)))
[ "def", "build_vcf_parts", "(", "feature", ",", "genome_2bit", ",", "info", "=", "None", ")", ":", "base1", "=", "genome_2bit", "[", "feature", ".", "chrom1", "]", ".", "get", "(", "feature", ".", "start1", ",", "feature", ".", "start1", "+", "1", ")", ...
Convert BedPe feature information into VCF part representation. Each feature will have two VCF lines for each side of the breakpoint.
[ "Convert", "BedPe", "feature", "information", "into", "VCF", "part", "representation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L95-L114
train
218,650
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
build_vcf_deletion
def build_vcf_deletion(x, genome_2bit): """Provide representation of deletion from BedPE breakpoints. """ base1 = genome_2bit[x.chrom1].get(x.start1, x.start1 + 1).upper() id1 = "hydra{0}".format(x.name) return VcfLine(x.chrom1, x.start1, id1, base1, "<DEL>", _vcf_single_end_info(x, "DEL", True))
python
def build_vcf_deletion(x, genome_2bit): """Provide representation of deletion from BedPE breakpoints. """ base1 = genome_2bit[x.chrom1].get(x.start1, x.start1 + 1).upper() id1 = "hydra{0}".format(x.name) return VcfLine(x.chrom1, x.start1, id1, base1, "<DEL>", _vcf_single_end_info(x, "DEL", True))
[ "def", "build_vcf_deletion", "(", "x", ",", "genome_2bit", ")", ":", "base1", "=", "genome_2bit", "[", "x", ".", "chrom1", "]", ".", "get", "(", "x", ".", "start1", ",", "x", ".", "start1", "+", "1", ")", ".", "upper", "(", ")", "id1", "=", "\"hy...
Provide representation of deletion from BedPE breakpoints.
[ "Provide", "representation", "of", "deletion", "from", "BedPE", "breakpoints", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L136-L142
train
218,651
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
build_vcf_inversion
def build_vcf_inversion(x1, x2, genome_2bit): """Provide representation of inversion from BedPE breakpoints. """ id1 = "hydra{0}".format(x1.name) start_coords = sorted([x1.start1, x1.end1, x2.start1, x2.end1]) end_coords = sorted([x1.start2, x1.end2, x2.start2, x2.start2]) start_pos = (start_coords[1] + start_coords[2]) // 2 end_pos = (end_coords[1] + end_coords[2]) // 2 base1 = genome_2bit[x1.chrom1].get(start_pos, start_pos + 1).upper() info = "SVTYPE=INV;IMPRECISE;CIPOS={cip1},{cip2};CIEND={cie1},{cie2};" \ "END={end};SVLEN={length}".format(cip1=start_pos - start_coords[0], cip2=start_coords[-1] - start_pos, cie1=end_pos - end_coords[0], cie2=end_coords[-1] - end_pos, end=end_pos, length=end_pos-start_pos) return VcfLine(x1.chrom1, start_pos, id1, base1, "<INV>", info)
python
def build_vcf_inversion(x1, x2, genome_2bit): """Provide representation of inversion from BedPE breakpoints. """ id1 = "hydra{0}".format(x1.name) start_coords = sorted([x1.start1, x1.end1, x2.start1, x2.end1]) end_coords = sorted([x1.start2, x1.end2, x2.start2, x2.start2]) start_pos = (start_coords[1] + start_coords[2]) // 2 end_pos = (end_coords[1] + end_coords[2]) // 2 base1 = genome_2bit[x1.chrom1].get(start_pos, start_pos + 1).upper() info = "SVTYPE=INV;IMPRECISE;CIPOS={cip1},{cip2};CIEND={cie1},{cie2};" \ "END={end};SVLEN={length}".format(cip1=start_pos - start_coords[0], cip2=start_coords[-1] - start_pos, cie1=end_pos - end_coords[0], cie2=end_coords[-1] - end_pos, end=end_pos, length=end_pos-start_pos) return VcfLine(x1.chrom1, start_pos, id1, base1, "<INV>", info)
[ "def", "build_vcf_inversion", "(", "x1", ",", "x2", ",", "genome_2bit", ")", ":", "id1", "=", "\"hydra{0}\"", ".", "format", "(", "x1", ".", "name", ")", "start_coords", "=", "sorted", "(", "[", "x1", ".", "start1", ",", "x1", ".", "end1", ",", "x2",...
Provide representation of inversion from BedPE breakpoints.
[ "Provide", "representation", "of", "inversion", "from", "BedPE", "breakpoints", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L166-L182
train
218,652
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
hydra_parser
def hydra_parser(in_file, options=None): """Parse hydra input file into namedtuple of values. """ if options is None: options = {} BedPe = namedtuple('BedPe', ["chrom1", "start1", "end1", "chrom2", "start2", "end2", "name", "strand1", "strand2", "support"]) with open(in_file) as in_handle: reader = csv.reader(in_handle, dialect="excel-tab") for line in reader: cur = BedPe(line[0], int(line[1]), int(line[2]), line[3], int(line[4]), int(line[5]), line[6], line[8], line[9], float(line[18])) if cur.support >= options.get("min_support", 0): yield cur
python
def hydra_parser(in_file, options=None): """Parse hydra input file into namedtuple of values. """ if options is None: options = {} BedPe = namedtuple('BedPe', ["chrom1", "start1", "end1", "chrom2", "start2", "end2", "name", "strand1", "strand2", "support"]) with open(in_file) as in_handle: reader = csv.reader(in_handle, dialect="excel-tab") for line in reader: cur = BedPe(line[0], int(line[1]), int(line[2]), line[3], int(line[4]), int(line[5]), line[6], line[8], line[9], float(line[18])) if cur.support >= options.get("min_support", 0): yield cur
[ "def", "hydra_parser", "(", "in_file", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "BedPe", "=", "namedtuple", "(", "'BedPe'", ",", "[", "\"chrom1\"", ",", "\"start1\"", ",", "\"end1\"", ",", "\"...
Parse hydra input file into namedtuple of values.
[ "Parse", "hydra", "input", "file", "into", "namedtuple", "of", "values", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L198-L214
train
218,653
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_cluster_by
def _cluster_by(end_iter, attr1, attr2, cluster_distance): """Cluster breakends by specified attributes. """ ClusterInfo = namedtuple("ClusterInfo", ["chroms", "clusters", "lookup"]) chr_clusters = {} chroms = [] brends_by_id = {} for brend in end_iter: if not chr_clusters.has_key(brend.chrom1): chroms.append(brend.chrom1) chr_clusters[brend.chrom1] = ClusterTree(cluster_distance, 1) brends_by_id[int(brend.name)] = brend chr_clusters[brend.chrom1].insert(getattr(brend, attr1), getattr(brend, attr2), int(brend.name)) return ClusterInfo(chroms, chr_clusters, brends_by_id)
python
def _cluster_by(end_iter, attr1, attr2, cluster_distance): """Cluster breakends by specified attributes. """ ClusterInfo = namedtuple("ClusterInfo", ["chroms", "clusters", "lookup"]) chr_clusters = {} chroms = [] brends_by_id = {} for brend in end_iter: if not chr_clusters.has_key(brend.chrom1): chroms.append(brend.chrom1) chr_clusters[brend.chrom1] = ClusterTree(cluster_distance, 1) brends_by_id[int(brend.name)] = brend chr_clusters[brend.chrom1].insert(getattr(brend, attr1), getattr(brend, attr2), int(brend.name)) return ClusterInfo(chroms, chr_clusters, brends_by_id)
[ "def", "_cluster_by", "(", "end_iter", ",", "attr1", ",", "attr2", ",", "cluster_distance", ")", ":", "ClusterInfo", "=", "namedtuple", "(", "\"ClusterInfo\"", ",", "[", "\"chroms\"", ",", "\"clusters\"", ",", "\"lookup\"", "]", ")", "chr_clusters", "=", "{", ...
Cluster breakends by specified attributes.
[ "Cluster", "breakends", "by", "specified", "attributes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L216-L231
train
218,654
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_calculate_cluster_distance
def _calculate_cluster_distance(end_iter): """Compute allowed distance for clustering based on end confidence intervals. """ out = [] sizes = [] for x in end_iter: out.append(x) sizes.append(x.end1 - x.start1) sizes.append(x.end2 - x.start2) distance = sum(sizes) // len(sizes) return distance, out
python
def _calculate_cluster_distance(end_iter): """Compute allowed distance for clustering based on end confidence intervals. """ out = [] sizes = [] for x in end_iter: out.append(x) sizes.append(x.end1 - x.start1) sizes.append(x.end2 - x.start2) distance = sum(sizes) // len(sizes) return distance, out
[ "def", "_calculate_cluster_distance", "(", "end_iter", ")", ":", "out", "=", "[", "]", "sizes", "=", "[", "]", "for", "x", "in", "end_iter", ":", "out", ".", "append", "(", "x", ")", "sizes", ".", "append", "(", "x", ".", "end1", "-", "x", ".", "...
Compute allowed distance for clustering based on end confidence intervals.
[ "Compute", "allowed", "distance", "for", "clustering", "based", "on", "end", "confidence", "intervals", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L233-L243
train
218,655
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
group_hydra_breakends
def group_hydra_breakends(end_iter): """Group together hydra breakends with overlapping ends. This provides a way to identify inversions, translocations and insertions present in hydra break point ends. We cluster together the endpoints and return together any items with closely oriented pairs. This helps in describing more complex rearrangement events. """ cluster_distance, all_ends = _calculate_cluster_distance(end_iter) first_cluster = _cluster_by(all_ends, "start1", "end1", cluster_distance) for chrom in first_cluster.chroms: for _, _, brends in first_cluster.clusters[chrom].getregions(): if len(brends) == 1: yield [first_cluster.lookup[brends[0]]] else: second_cluster = _cluster_by([first_cluster.lookup[x] for x in brends], "start2", "end2", cluster_distance) for chrom2 in second_cluster.chroms: for _, _, brends in second_cluster.clusters[chrom].getregions(): yield [second_cluster.lookup[x] for x in brends]
python
def group_hydra_breakends(end_iter): """Group together hydra breakends with overlapping ends. This provides a way to identify inversions, translocations and insertions present in hydra break point ends. We cluster together the endpoints and return together any items with closely oriented pairs. This helps in describing more complex rearrangement events. """ cluster_distance, all_ends = _calculate_cluster_distance(end_iter) first_cluster = _cluster_by(all_ends, "start1", "end1", cluster_distance) for chrom in first_cluster.chroms: for _, _, brends in first_cluster.clusters[chrom].getregions(): if len(brends) == 1: yield [first_cluster.lookup[brends[0]]] else: second_cluster = _cluster_by([first_cluster.lookup[x] for x in brends], "start2", "end2", cluster_distance) for chrom2 in second_cluster.chroms: for _, _, brends in second_cluster.clusters[chrom].getregions(): yield [second_cluster.lookup[x] for x in brends]
[ "def", "group_hydra_breakends", "(", "end_iter", ")", ":", "cluster_distance", ",", "all_ends", "=", "_calculate_cluster_distance", "(", "end_iter", ")", "first_cluster", "=", "_cluster_by", "(", "all_ends", ",", "\"start1\"", ",", "\"end1\"", ",", "cluster_distance",...
Group together hydra breakends with overlapping ends. This provides a way to identify inversions, translocations and insertions present in hydra break point ends. We cluster together the endpoints and return together any items with closely oriented pairs. This helps in describing more complex rearrangement events.
[ "Group", "together", "hydra", "breakends", "with", "overlapping", "ends", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L245-L264
train
218,656
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_write_vcf_header
def _write_vcf_header(out_handle): """Write VCF header information for Hydra structural variant. """ def w(line): out_handle.write("{0}\n".format(line)) w('##fileformat=VCFv4.1') w('##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">') w('##INFO=<ID=END,Number=1,Type=Integer,' 'Description="End position of the variant described in this record">') w('##INFO=<ID=CIPOS,Number=2,Type=Integer,' 'Description="Confidence interval around POS for imprecise variants">') w('##INFO=<ID=CIEND,Number=2,Type=Integer,' 'Description="Confidence interval around END for imprecise variants">') w('##INFO=<ID=SVLEN,Number=.,Type=Integer,' 'Description="Difference in length between REF and ALT alleles">') w('##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">') w('##INFO=<ID=MATEID,Number=.,Type=String,Description="ID of mate breakends">') w('##INFO=<ID=EVENT,Number=1,Type=String,Description="ID of event associated to breakend">') w('##ALT=<ID=DEL,Description="Deletion">') w('##ALT=<ID=INV,Description="Inversion">') w('##ALT=<ID=DUP,Description="Duplication">') w('##ALT=<ID=DUP:TANDEM,Description="Tandem Duplication">') w('##source=hydra') w("#" + "\t".join(["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO"]))
python
def _write_vcf_header(out_handle): """Write VCF header information for Hydra structural variant. """ def w(line): out_handle.write("{0}\n".format(line)) w('##fileformat=VCFv4.1') w('##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">') w('##INFO=<ID=END,Number=1,Type=Integer,' 'Description="End position of the variant described in this record">') w('##INFO=<ID=CIPOS,Number=2,Type=Integer,' 'Description="Confidence interval around POS for imprecise variants">') w('##INFO=<ID=CIEND,Number=2,Type=Integer,' 'Description="Confidence interval around END for imprecise variants">') w('##INFO=<ID=SVLEN,Number=.,Type=Integer,' 'Description="Difference in length between REF and ALT alleles">') w('##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">') w('##INFO=<ID=MATEID,Number=.,Type=String,Description="ID of mate breakends">') w('##INFO=<ID=EVENT,Number=1,Type=String,Description="ID of event associated to breakend">') w('##ALT=<ID=DEL,Description="Deletion">') w('##ALT=<ID=INV,Description="Inversion">') w('##ALT=<ID=DUP,Description="Duplication">') w('##ALT=<ID=DUP:TANDEM,Description="Tandem Duplication">') w('##source=hydra') w("#" + "\t".join(["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO"]))
[ "def", "_write_vcf_header", "(", "out_handle", ")", ":", "def", "w", "(", "line", ")", ":", "out_handle", ".", "write", "(", "\"{0}\\n\"", ".", "format", "(", "line", ")", ")", "w", "(", "'##fileformat=VCFv4.1'", ")", "w", "(", "'##INFO=<ID=IMPRECISE,Number=...
Write VCF header information for Hydra structural variant.
[ "Write", "VCF", "header", "information", "for", "Hydra", "structural", "variant", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L268-L291
train
218,657
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_write_vcf_breakend
def _write_vcf_breakend(brend, out_handle): """Write out a single VCF line with breakpoint information. """ out_handle.write("{0}\n".format("\t".join(str(x) for x in [brend.chrom, brend.pos + 1, brend.id, brend.ref, brend.alt, ".", "PASS", brend.info])))
python
def _write_vcf_breakend(brend, out_handle): """Write out a single VCF line with breakpoint information. """ out_handle.write("{0}\n".format("\t".join(str(x) for x in [brend.chrom, brend.pos + 1, brend.id, brend.ref, brend.alt, ".", "PASS", brend.info])))
[ "def", "_write_vcf_breakend", "(", "brend", ",", "out_handle", ")", ":", "out_handle", ".", "write", "(", "\"{0}\\n\"", ".", "format", "(", "\"\\t\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "[", "brend", ".", "chrom", ",", "brend", ...
Write out a single VCF line with breakpoint information.
[ "Write", "out", "a", "single", "VCF", "line", "with", "breakpoint", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L293-L298
train
218,658
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
_get_vcf_breakends
def _get_vcf_breakends(hydra_file, genome_2bit, options=None): """Parse BEDPE input, yielding VCF ready breakends. """ if options is None: options = {} for features in group_hydra_breakends(hydra_parser(hydra_file, options)): if len(features) == 1 and is_deletion(features[0], options): yield build_vcf_deletion(features[0], genome_2bit) elif len(features) == 1 and is_tandem_dup(features[0], options): yield build_tandem_deletion(features[0], genome_2bit) elif len(features) == 2 and is_inversion(*features): yield build_vcf_inversion(features[0], features[1], genome_2bit) elif len(features) == 2 and is_translocation(*features): info = get_translocation_info(features[0], features[1]) for feature in features: for brend in build_vcf_parts(feature, genome_2bit, info): yield brend else: for feature in features: for brend in build_vcf_parts(feature, genome_2bit): yield brend
python
def _get_vcf_breakends(hydra_file, genome_2bit, options=None): """Parse BEDPE input, yielding VCF ready breakends. """ if options is None: options = {} for features in group_hydra_breakends(hydra_parser(hydra_file, options)): if len(features) == 1 and is_deletion(features[0], options): yield build_vcf_deletion(features[0], genome_2bit) elif len(features) == 1 and is_tandem_dup(features[0], options): yield build_tandem_deletion(features[0], genome_2bit) elif len(features) == 2 and is_inversion(*features): yield build_vcf_inversion(features[0], features[1], genome_2bit) elif len(features) == 2 and is_translocation(*features): info = get_translocation_info(features[0], features[1]) for feature in features: for brend in build_vcf_parts(feature, genome_2bit, info): yield brend else: for feature in features: for brend in build_vcf_parts(feature, genome_2bit): yield brend
[ "def", "_get_vcf_breakends", "(", "hydra_file", ",", "genome_2bit", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "for", "features", "in", "group_hydra_breakends", "(", "hydra_parser", "(", "hydra_file", ...
Parse BEDPE input, yielding VCF ready breakends.
[ "Parse", "BEDPE", "input", "yielding", "VCF", "ready", "breakends", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L300-L319
train
218,659
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
hydra_to_vcf_writer
def hydra_to_vcf_writer(hydra_file, genome_2bit, options, out_handle): """Write hydra output as sorted VCF file. Requires loading the hydra file into memory to perform sorting on output VCF. Could generalize this to no sorting or by-chromosome approach if this proves too memory intensive. """ _write_vcf_header(out_handle) brends = list(_get_vcf_breakends(hydra_file, genome_2bit, options)) brends.sort(key=attrgetter("chrom", "pos")) for brend in brends: _write_vcf_breakend(brend, out_handle)
python
def hydra_to_vcf_writer(hydra_file, genome_2bit, options, out_handle): """Write hydra output as sorted VCF file. Requires loading the hydra file into memory to perform sorting on output VCF. Could generalize this to no sorting or by-chromosome approach if this proves too memory intensive. """ _write_vcf_header(out_handle) brends = list(_get_vcf_breakends(hydra_file, genome_2bit, options)) brends.sort(key=attrgetter("chrom", "pos")) for brend in brends: _write_vcf_breakend(brend, out_handle)
[ "def", "hydra_to_vcf_writer", "(", "hydra_file", ",", "genome_2bit", ",", "options", ",", "out_handle", ")", ":", "_write_vcf_header", "(", "out_handle", ")", "brends", "=", "list", "(", "_get_vcf_breakends", "(", "hydra_file", ",", "genome_2bit", ",", "options", ...
Write hydra output as sorted VCF file. Requires loading the hydra file into memory to perform sorting on output VCF. Could generalize this to no sorting or by-chromosome approach if this proves too memory intensive.
[ "Write", "hydra", "output", "as", "sorted", "VCF", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L321-L332
train
218,660
bcbio/bcbio-nextgen
bcbio/rnaseq/kallisto.py
kallisto_table
def kallisto_table(kallisto_dir, index): """ convert kallisto output to a count table where the rows are equivalence classes and the columns are cells """ quant_dir = os.path.join(kallisto_dir, "quant") out_file = os.path.join(quant_dir, "matrix.csv") if file_exists(out_file): return out_file tsvfile = os.path.join(quant_dir, "matrix.tsv") ecfile = os.path.join(quant_dir, "matrix.ec") cellsfile = os.path.join(quant_dir, "matrix.cells") fastafile = os.path.splitext(index)[0] + ".fa" fasta_names = fasta.sequence_names(fastafile) ec_names = get_ec_names(ecfile, fasta_names) df = pd.read_table(tsvfile, header=None, names=["ec", "cell", "count"]) df["ec"] = [ec_names[x] for x in df["ec"]] df = df.pivot(index='ec', columns='cell', values='count') cellnames = get_cell_names(cellsfile) colnames = [cellnames[x] for x in df.columns] df.columns = colnames df.to_csv(out_file) return out_file
python
def kallisto_table(kallisto_dir, index): """ convert kallisto output to a count table where the rows are equivalence classes and the columns are cells """ quant_dir = os.path.join(kallisto_dir, "quant") out_file = os.path.join(quant_dir, "matrix.csv") if file_exists(out_file): return out_file tsvfile = os.path.join(quant_dir, "matrix.tsv") ecfile = os.path.join(quant_dir, "matrix.ec") cellsfile = os.path.join(quant_dir, "matrix.cells") fastafile = os.path.splitext(index)[0] + ".fa" fasta_names = fasta.sequence_names(fastafile) ec_names = get_ec_names(ecfile, fasta_names) df = pd.read_table(tsvfile, header=None, names=["ec", "cell", "count"]) df["ec"] = [ec_names[x] for x in df["ec"]] df = df.pivot(index='ec', columns='cell', values='count') cellnames = get_cell_names(cellsfile) colnames = [cellnames[x] for x in df.columns] df.columns = colnames df.to_csv(out_file) return out_file
[ "def", "kallisto_table", "(", "kallisto_dir", ",", "index", ")", ":", "quant_dir", "=", "os", ".", "path", ".", "join", "(", "kallisto_dir", ",", "\"quant\"", ")", "out_file", "=", "os", ".", "path", ".", "join", "(", "quant_dir", ",", "\"matrix.csv\"", ...
convert kallisto output to a count table where the rows are equivalence classes and the columns are cells
[ "convert", "kallisto", "output", "to", "a", "count", "table", "where", "the", "rows", "are", "equivalence", "classes", "and", "the", "columns", "are", "cells" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/kallisto.py#L127-L149
train
218,661
bcbio/bcbio-nextgen
bcbio/rnaseq/kallisto.py
get_ec_names
def get_ec_names(ecfile, fasta_names): """ convert equivalence classes to their set of transcripts """ df = pd.read_table(ecfile, header=None, names=["ec", "transcripts"]) transcript_groups = [x.split(",") for x in df["transcripts"]] transcripts = [] for group in transcript_groups: transcripts.append(":".join([fasta_names[int(x)] for x in group])) return transcripts
python
def get_ec_names(ecfile, fasta_names): """ convert equivalence classes to their set of transcripts """ df = pd.read_table(ecfile, header=None, names=["ec", "transcripts"]) transcript_groups = [x.split(",") for x in df["transcripts"]] transcripts = [] for group in transcript_groups: transcripts.append(":".join([fasta_names[int(x)] for x in group])) return transcripts
[ "def", "get_ec_names", "(", "ecfile", ",", "fasta_names", ")", ":", "df", "=", "pd", ".", "read_table", "(", "ecfile", ",", "header", "=", "None", ",", "names", "=", "[", "\"ec\"", ",", "\"transcripts\"", "]", ")", "transcript_groups", "=", "[", "x", "...
convert equivalence classes to their set of transcripts
[ "convert", "equivalence", "classes", "to", "their", "set", "of", "transcripts" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/kallisto.py#L151-L160
train
218,662
bcbio/bcbio-nextgen
bcbio/illumina/flowcell.py
parse_dirname
def parse_dirname(fc_dir): """Parse the flow cell ID and date from a flow cell directory. """ (_, fc_dir) = os.path.split(fc_dir) parts = fc_dir.split("_") name = None date = None for p in parts: if p.endswith(("XX", "xx", "XY", "X2")): name = p elif len(p) == 6: try: int(p) date = p except ValueError: pass if name is None or date is None: raise ValueError("Did not find flowcell name: %s" % fc_dir) return name, date
python
def parse_dirname(fc_dir): """Parse the flow cell ID and date from a flow cell directory. """ (_, fc_dir) = os.path.split(fc_dir) parts = fc_dir.split("_") name = None date = None for p in parts: if p.endswith(("XX", "xx", "XY", "X2")): name = p elif len(p) == 6: try: int(p) date = p except ValueError: pass if name is None or date is None: raise ValueError("Did not find flowcell name: %s" % fc_dir) return name, date
[ "def", "parse_dirname", "(", "fc_dir", ")", ":", "(", "_", ",", "fc_dir", ")", "=", "os", ".", "path", ".", "split", "(", "fc_dir", ")", "parts", "=", "fc_dir", ".", "split", "(", "\"_\"", ")", "name", "=", "None", "date", "=", "None", "for", "p"...
Parse the flow cell ID and date from a flow cell directory.
[ "Parse", "the", "flow", "cell", "ID", "and", "date", "from", "a", "flow", "cell", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/flowcell.py#L8-L26
train
218,663
bcbio/bcbio-nextgen
bcbio/illumina/flowcell.py
get_qseq_dir
def get_qseq_dir(fc_dir): """Retrieve the qseq directory within Solexa flowcell output. """ machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return machine_bc # otherwise assume we are in the qseq directory # XXX What other cases can we end up with here? else: return fc_dir
python
def get_qseq_dir(fc_dir): """Retrieve the qseq directory within Solexa flowcell output. """ machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return machine_bc # otherwise assume we are in the qseq directory # XXX What other cases can we end up with here? else: return fc_dir
[ "def", "get_qseq_dir", "(", "fc_dir", ")", ":", "machine_bc", "=", "os", ".", "path", ".", "join", "(", "fc_dir", ",", "\"Data\"", ",", "\"Intensities\"", ",", "\"BaseCalls\"", ")", "if", "os", ".", "path", ".", "exists", "(", "machine_bc", ")", ":", "...
Retrieve the qseq directory within Solexa flowcell output.
[ "Retrieve", "the", "qseq", "directory", "within", "Solexa", "flowcell", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/flowcell.py#L28-L37
train
218,664
bcbio/bcbio-nextgen
bcbio/illumina/flowcell.py
get_fastq_dir
def get_fastq_dir(fc_dir): """Retrieve the fastq directory within Solexa flowcell output. """ full_goat_bc = glob.glob(os.path.join(fc_dir, "Data", "*Firecrest*", "Bustard*")) bustard_bc = glob.glob(os.path.join(fc_dir, "Data", "Intensities", "*Bustard*")) machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return os.path.join(machine_bc, "fastq") elif len(full_goat_bc) > 0: return os.path.join(full_goat_bc[0], "fastq") elif len(bustard_bc) > 0: return os.path.join(bustard_bc[0], "fastq") # otherwise assume we are in the fastq directory # XXX What other cases can we end up with here? else: return fc_dir
python
def get_fastq_dir(fc_dir): """Retrieve the fastq directory within Solexa flowcell output. """ full_goat_bc = glob.glob(os.path.join(fc_dir, "Data", "*Firecrest*", "Bustard*")) bustard_bc = glob.glob(os.path.join(fc_dir, "Data", "Intensities", "*Bustard*")) machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return os.path.join(machine_bc, "fastq") elif len(full_goat_bc) > 0: return os.path.join(full_goat_bc[0], "fastq") elif len(bustard_bc) > 0: return os.path.join(bustard_bc[0], "fastq") # otherwise assume we are in the fastq directory # XXX What other cases can we end up with here? else: return fc_dir
[ "def", "get_fastq_dir", "(", "fc_dir", ")", ":", "full_goat_bc", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "fc_dir", ",", "\"Data\"", ",", "\"*Firecrest*\"", ",", "\"Bustard*\"", ")", ")", "bustard_bc", "=", "glob", ".", "glob",...
Retrieve the fastq directory within Solexa flowcell output.
[ "Retrieve", "the", "fastq", "directory", "within", "Solexa", "flowcell", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/flowcell.py#L39-L54
train
218,665
bcbio/bcbio-nextgen
bcbio/illumina/flowcell.py
GalaxySqnLimsApi.run_details
def run_details(self, run): """Retrieve sequencing run details as a dictionary. """ run_data = dict(run=run) req = urllib.request.Request("%s/nglims/api_run_details" % self._base_url, urllib.parse.urlencode(run_data)) response = urllib.request.urlopen(req) info = json.loads(response.read()) if "error" in info: raise ValueError("Problem retrieving info: %s" % info["error"]) else: return info["details"]
python
def run_details(self, run): """Retrieve sequencing run details as a dictionary. """ run_data = dict(run=run) req = urllib.request.Request("%s/nglims/api_run_details" % self._base_url, urllib.parse.urlencode(run_data)) response = urllib.request.urlopen(req) info = json.loads(response.read()) if "error" in info: raise ValueError("Problem retrieving info: %s" % info["error"]) else: return info["details"]
[ "def", "run_details", "(", "self", ",", "run", ")", ":", "run_data", "=", "dict", "(", "run", "=", "run", ")", "req", "=", "urllib", ".", "request", ".", "Request", "(", "\"%s/nglims/api_run_details\"", "%", "self", ".", "_base_url", ",", "urllib", ".", ...
Retrieve sequencing run details as a dictionary.
[ "Retrieve", "sequencing", "run", "details", "as", "a", "dictionary", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/flowcell.py#L70-L81
train
218,666
bcbio/bcbio-nextgen
bcbio/ngsalign/mosaik.py
_mosaik_args_from_config
def _mosaik_args_from_config(config): """Configurable high level options for mosaik. """ multi_mappers = config["algorithm"].get("multiple_mappers", True) multi_flags = ["-m", "all"] if multi_mappers else ["-m", "unique"] error_flags = ["-mm", "2"] num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] return core_flags + multi_flags + error_flags
python
def _mosaik_args_from_config(config): """Configurable high level options for mosaik. """ multi_mappers = config["algorithm"].get("multiple_mappers", True) multi_flags = ["-m", "all"] if multi_mappers else ["-m", "unique"] error_flags = ["-mm", "2"] num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] return core_flags + multi_flags + error_flags
[ "def", "_mosaik_args_from_config", "(", "config", ")", ":", "multi_mappers", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"multiple_mappers\"", ",", "True", ")", "multi_flags", "=", "[", "\"-m\"", ",", "\"all\"", "]", "if", "multi_mappers", "el...
Configurable high level options for mosaik.
[ "Configurable", "high", "level", "options", "for", "mosaik", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/mosaik.py#L14-L22
train
218,667
bcbio/bcbio-nextgen
bcbio/ngsalign/mosaik.py
_convert_fastq
def _convert_fastq(fastq_file, pair_file, rg_name, out_file, config): """Convert fastq inputs into internal Mosaik representation. """ out_file = "{0}-fq.mkb".format(os.path.splitext(out_file)[0]) if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: cl = [config_utils.get_program("mosaik", config, default="MosaikAligner").replace("Aligner", "Build")] cl += ["-q", fastq_file, "-out", tx_out_file, "-st", config["algorithm"].get("platform", "illumina").lower()] if pair_file: cl += ["-q2", pair_file] if rg_name: cl += ["-id", rg_name] env_set = "export MOSAIK_TMP={0}".format(os.path.dirname(tx_out_file)) subprocess.check_call(env_set + " && " + " ".join(cl), shell=True) return out_file
python
def _convert_fastq(fastq_file, pair_file, rg_name, out_file, config): """Convert fastq inputs into internal Mosaik representation. """ out_file = "{0}-fq.mkb".format(os.path.splitext(out_file)[0]) if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: cl = [config_utils.get_program("mosaik", config, default="MosaikAligner").replace("Aligner", "Build")] cl += ["-q", fastq_file, "-out", tx_out_file, "-st", config["algorithm"].get("platform", "illumina").lower()] if pair_file: cl += ["-q2", pair_file] if rg_name: cl += ["-id", rg_name] env_set = "export MOSAIK_TMP={0}".format(os.path.dirname(tx_out_file)) subprocess.check_call(env_set + " && " + " ".join(cl), shell=True) return out_file
[ "def", "_convert_fastq", "(", "fastq_file", ",", "pair_file", ",", "rg_name", ",", "out_file", ",", "config", ")", ":", "out_file", "=", "\"{0}-fq.mkb\"", ".", "format", "(", "os", ".", "path", ".", "splitext", "(", "out_file", ")", "[", "0", "]", ")", ...
Convert fastq inputs into internal Mosaik representation.
[ "Convert", "fastq", "inputs", "into", "internal", "Mosaik", "representation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/mosaik.py#L24-L41
train
218,668
bcbio/bcbio-nextgen
bcbio/ngsalign/mosaik.py
_get_mosaik_nn_args
def _get_mosaik_nn_args(out_file): """Retrieve default neural network files from GitHub to pass to Mosaik. """ base_nn_url = "https://raw.github.com/wanpinglee/MOSAIK/master/src/networkFile/" out = [] for arg, fname in [("-annse", "2.1.26.se.100.005.ann"), ("-annpe", "2.1.26.pe.100.0065.ann")]: arg_fname = os.path.join(os.path.dirname(out_file), fname) if not file_exists(arg_fname): subprocess.check_call(["wget", "-O", arg_fname, base_nn_url + fname]) out += [arg, arg_fname] return out
python
def _get_mosaik_nn_args(out_file): """Retrieve default neural network files from GitHub to pass to Mosaik. """ base_nn_url = "https://raw.github.com/wanpinglee/MOSAIK/master/src/networkFile/" out = [] for arg, fname in [("-annse", "2.1.26.se.100.005.ann"), ("-annpe", "2.1.26.pe.100.0065.ann")]: arg_fname = os.path.join(os.path.dirname(out_file), fname) if not file_exists(arg_fname): subprocess.check_call(["wget", "-O", arg_fname, base_nn_url + fname]) out += [arg, arg_fname] return out
[ "def", "_get_mosaik_nn_args", "(", "out_file", ")", ":", "base_nn_url", "=", "\"https://raw.github.com/wanpinglee/MOSAIK/master/src/networkFile/\"", "out", "=", "[", "]", "for", "arg", ",", "fname", "in", "[", "(", "\"-annse\"", ",", "\"2.1.26.se.100.005.ann\"", ")", ...
Retrieve default neural network files from GitHub to pass to Mosaik.
[ "Retrieve", "default", "neural", "network", "files", "from", "GitHub", "to", "pass", "to", "Mosaik", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/mosaik.py#L43-L54
train
218,669
bcbio/bcbio-nextgen
bcbio/ngsalign/mosaik.py
align
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): """Alignment with MosaikAligner. """ config = data["config"] rg_name = names.get("rg", None) if names else None out_file = os.path.join(align_dir, "%s-align.bam" % names["lane"]) if not file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: built_fastq = _convert_fastq(fastq_file, pair_file, rg_name, out_file, config) cl = [config_utils.get_program("mosaik", config, default="MosaikAligner")] cl += _mosaik_args_from_config(config) cl += extra_args if extra_args is not None else [] cl += ["-ia", ref_file, "-in", built_fastq, "-out", os.path.splitext(tx_out_file)[0]] jump_base = os.path.splitext(ref_file)[0] key_file = "{0}_keys.jmp".format(jump_base) if file_exists(key_file): cl += ["-j", jump_base] # XXX hacky way to guess key size which needs to match # Can I get hash size directly jump_size_gb = os.path.getsize(key_file) / 1073741824.0 if jump_size_gb < 1.0: cl += ["-hs", "13"] cl += _get_mosaik_nn_args(out_file) env_set = "export MOSAIK_TMP={0}".format(os.path.dirname(tx_out_file)) subprocess.check_call(env_set + " && "+ " ".join([str(x) for x in cl]), shell=True) os.remove(built_fastq) return out_file
python
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): """Alignment with MosaikAligner. """ config = data["config"] rg_name = names.get("rg", None) if names else None out_file = os.path.join(align_dir, "%s-align.bam" % names["lane"]) if not file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: built_fastq = _convert_fastq(fastq_file, pair_file, rg_name, out_file, config) cl = [config_utils.get_program("mosaik", config, default="MosaikAligner")] cl += _mosaik_args_from_config(config) cl += extra_args if extra_args is not None else [] cl += ["-ia", ref_file, "-in", built_fastq, "-out", os.path.splitext(tx_out_file)[0]] jump_base = os.path.splitext(ref_file)[0] key_file = "{0}_keys.jmp".format(jump_base) if file_exists(key_file): cl += ["-j", jump_base] # XXX hacky way to guess key size which needs to match # Can I get hash size directly jump_size_gb = os.path.getsize(key_file) / 1073741824.0 if jump_size_gb < 1.0: cl += ["-hs", "13"] cl += _get_mosaik_nn_args(out_file) env_set = "export MOSAIK_TMP={0}".format(os.path.dirname(tx_out_file)) subprocess.check_call(env_set + " && "+ " ".join([str(x) for x in cl]), shell=True) os.remove(built_fastq) return out_file
[ "def", "align", "(", "fastq_file", ",", "pair_file", ",", "ref_file", ",", "names", ",", "align_dir", ",", "data", ",", "extra_args", "=", "None", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "rg_name", "=", "names", ".", "get", "(", "\"rg\...
Alignment with MosaikAligner.
[ "Alignment", "with", "MosaikAligner", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/mosaik.py#L56-L87
train
218,670
bcbio/bcbio-nextgen
bcbio/graph/graph.py
get_bcbio_timings
def get_bcbio_timings(path): """Fetch timing information from a bcbio log file.""" with open(path, 'r') as file_handle: steps = {} for line in file_handle: matches = re.search(r'^\[([^\]]+)\] ([^:]+: .*)', line) if not matches: continue tstamp = matches.group(1) msg = matches.group(2) # XXX: new special logs do not have this #if not msg.find('Timing: ') >= 0: # continue when = datetime.strptime(tstamp, '%Y-%m-%dT%H:%MZ').replace( tzinfo=pytz.timezone('UTC')) step = msg.split(":")[-1].strip() steps[when] = step return steps
python
def get_bcbio_timings(path): """Fetch timing information from a bcbio log file.""" with open(path, 'r') as file_handle: steps = {} for line in file_handle: matches = re.search(r'^\[([^\]]+)\] ([^:]+: .*)', line) if not matches: continue tstamp = matches.group(1) msg = matches.group(2) # XXX: new special logs do not have this #if not msg.find('Timing: ') >= 0: # continue when = datetime.strptime(tstamp, '%Y-%m-%dT%H:%MZ').replace( tzinfo=pytz.timezone('UTC')) step = msg.split(":")[-1].strip() steps[when] = step return steps
[ "def", "get_bcbio_timings", "(", "path", ")", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "file_handle", ":", "steps", "=", "{", "}", "for", "line", "in", "file_handle", ":", "matches", "=", "re", ".", "search", "(", "r'^\\[([^\\]]+)\\] ([^:...
Fetch timing information from a bcbio log file.
[ "Fetch", "timing", "information", "from", "a", "bcbio", "log", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L54-L76
train
218,671
bcbio/bcbio-nextgen
bcbio/graph/graph.py
this_and_prev
def this_and_prev(iterable): """Walk an iterable, returning the current and previous items as a two-tuple.""" try: item = next(iterable) while True: next_item = next(iterable) yield item, next_item item = next_item except StopIteration: return
python
def this_and_prev(iterable): """Walk an iterable, returning the current and previous items as a two-tuple.""" try: item = next(iterable) while True: next_item = next(iterable) yield item, next_item item = next_item except StopIteration: return
[ "def", "this_and_prev", "(", "iterable", ")", ":", "try", ":", "item", "=", "next", "(", "iterable", ")", "while", "True", ":", "next_item", "=", "next", "(", "iterable", ")", "yield", "item", ",", "next_item", "item", "=", "next_item", "except", "StopIt...
Walk an iterable, returning the current and previous items as a two-tuple.
[ "Walk", "an", "iterable", "returning", "the", "current", "and", "previous", "items", "as", "a", "two", "-", "tuple", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L89-L99
train
218,672
bcbio/bcbio-nextgen
bcbio/graph/graph.py
remove_outliers
def remove_outliers(series, stddev): """Remove the outliers from a series.""" return series[(series - series.mean()).abs() < stddev * series.std()]
python
def remove_outliers(series, stddev): """Remove the outliers from a series.""" return series[(series - series.mean()).abs() < stddev * series.std()]
[ "def", "remove_outliers", "(", "series", ",", "stddev", ")", ":", "return", "series", "[", "(", "series", "-", "series", ".", "mean", "(", ")", ")", ".", "abs", "(", ")", "<", "stddev", "*", "series", ".", "std", "(", ")", "]" ]
Remove the outliers from a series.
[ "Remove", "the", "outliers", "from", "a", "series", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L135-L137
train
218,673
bcbio/bcbio-nextgen
bcbio/graph/graph.py
prep_for_graph
def prep_for_graph(data_frame, series=None, delta_series=None, smoothing=None, outlier_stddev=None): """Prepare a dataframe for graphing by calculating deltas for series that need them, resampling, and removing outliers. """ series = series or [] delta_series = delta_series or [] graph = calc_deltas(data_frame, delta_series) for s in series + delta_series: if smoothing: graph[s] = graph[s].resample(smoothing) if outlier_stddev: graph[s] = remove_outliers(graph[s], outlier_stddev) return graph[series + delta_series]
python
def prep_for_graph(data_frame, series=None, delta_series=None, smoothing=None, outlier_stddev=None): """Prepare a dataframe for graphing by calculating deltas for series that need them, resampling, and removing outliers. """ series = series or [] delta_series = delta_series or [] graph = calc_deltas(data_frame, delta_series) for s in series + delta_series: if smoothing: graph[s] = graph[s].resample(smoothing) if outlier_stddev: graph[s] = remove_outliers(graph[s], outlier_stddev) return graph[series + delta_series]
[ "def", "prep_for_graph", "(", "data_frame", ",", "series", "=", "None", ",", "delta_series", "=", "None", ",", "smoothing", "=", "None", ",", "outlier_stddev", "=", "None", ")", ":", "series", "=", "series", "or", "[", "]", "delta_series", "=", "delta_seri...
Prepare a dataframe for graphing by calculating deltas for series that need them, resampling, and removing outliers.
[ "Prepare", "a", "dataframe", "for", "graphing", "by", "calculating", "deltas", "for", "series", "that", "need", "them", "resampling", "and", "removing", "outliers", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L140-L155
train
218,674
bcbio/bcbio-nextgen
bcbio/graph/graph.py
add_common_plot_features
def add_common_plot_features(plot, steps): """Add plot features common to all plots, such as bcbio step information. """ _setup_matplotlib() plot.yaxis.set_tick_params(labelright=True) plot.set_xlabel('') ymax = plot.get_ylim()[1] ticks = {} for tstamp, step in steps.items(): if step == 'finished': continue plot.vlines(tstamp, 0, ymax, linestyles='dashed') tstamp = mpl.dates.num2epoch(mpl.dates.date2num(tstamp)) ticks[tstamp] = step tick_kvs = sorted(ticks.items()) top_axis = plot.twiny() top_axis.set_xlim(*plot.get_xlim()) top_axis.set_xticks([k for k, v in tick_kvs]) top_axis.set_xticklabels([v for k, v in tick_kvs], rotation=45, ha='left', size=pylab.rcParams['font.size']) plot.set_ylim(0) return plot
python
def add_common_plot_features(plot, steps): """Add plot features common to all plots, such as bcbio step information. """ _setup_matplotlib() plot.yaxis.set_tick_params(labelright=True) plot.set_xlabel('') ymax = plot.get_ylim()[1] ticks = {} for tstamp, step in steps.items(): if step == 'finished': continue plot.vlines(tstamp, 0, ymax, linestyles='dashed') tstamp = mpl.dates.num2epoch(mpl.dates.date2num(tstamp)) ticks[tstamp] = step tick_kvs = sorted(ticks.items()) top_axis = plot.twiny() top_axis.set_xlim(*plot.get_xlim()) top_axis.set_xticks([k for k, v in tick_kvs]) top_axis.set_xticklabels([v for k, v in tick_kvs], rotation=45, ha='left', size=pylab.rcParams['font.size']) plot.set_ylim(0) return plot
[ "def", "add_common_plot_features", "(", "plot", ",", "steps", ")", ":", "_setup_matplotlib", "(", ")", "plot", ".", "yaxis", ".", "set_tick_params", "(", "labelright", "=", "True", ")", "plot", ".", "set_xlabel", "(", "''", ")", "ymax", "=", "plot", ".", ...
Add plot features common to all plots, such as bcbio step information.
[ "Add", "plot", "features", "common", "to", "all", "plots", "such", "as", "bcbio", "step", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L158-L183
train
218,675
bcbio/bcbio-nextgen
bcbio/graph/graph.py
log_time_frame
def log_time_frame(bcbio_log): """The bcbio running time frame. :return: an instance of :class collections.namedtuple: with the following fields: start and end """ output = collections.namedtuple("Time", ["start", "end", "steps"]) bcbio_timings = get_bcbio_timings(bcbio_log) return output(min(bcbio_timings), max(bcbio_timings), bcbio_timings)
python
def log_time_frame(bcbio_log): """The bcbio running time frame. :return: an instance of :class collections.namedtuple: with the following fields: start and end """ output = collections.namedtuple("Time", ["start", "end", "steps"]) bcbio_timings = get_bcbio_timings(bcbio_log) return output(min(bcbio_timings), max(bcbio_timings), bcbio_timings)
[ "def", "log_time_frame", "(", "bcbio_log", ")", ":", "output", "=", "collections", ".", "namedtuple", "(", "\"Time\"", ",", "[", "\"start\"", ",", "\"end\"", ",", "\"steps\"", "]", ")", "bcbio_timings", "=", "get_bcbio_timings", "(", "bcbio_log", ")", "return"...
The bcbio running time frame. :return: an instance of :class collections.namedtuple: with the following fields: start and end
[ "The", "bcbio", "running", "time", "frame", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L298-L306
train
218,676
bcbio/bcbio-nextgen
bcbio/graph/graph.py
resource_usage
def resource_usage(bcbio_log, cluster, rawdir, verbose): """Generate system statistics from bcbio runs. Parse the obtained files and put the information in a :class pandas.DataFrame:. :param bcbio_log: local path to bcbio log file written by the run :param cluster: :param rawdir: directory to put raw data files :param verbose: increase verbosity :return: a tuple with three dictionaries, the first one contains an instance of :pandas.DataFrame: for each host, the second one contains information regarding the hardware configuration and the last one contains information regarding timing. :type return: tuple """ data_frames = {} hardware_info = {} time_frame = log_time_frame(bcbio_log) for collectl_file in sorted(os.listdir(rawdir)): if not collectl_file.endswith('.raw.gz'): continue # Only load filenames within sampling timerange (gathered from bcbio_log time_frame) if rawfile_within_timeframe(collectl_file, time_frame): collectl_path = os.path.join(rawdir, collectl_file) data, hardware = load_collectl( collectl_path, time_frame.start, time_frame.end) if len(data) == 0: #raise ValueError("No data present in collectl file %s, mismatch in timestamps between raw collectl and log file?", collectl_path) continue host = re.sub(r'-\d{8}-\d{6}\.raw\.gz$', '', collectl_file) hardware_info[host] = hardware if host not in data_frames: data_frames[host] = data else: data_frames[host] = pd.concat([data_frames[host], data]) return (data_frames, hardware_info, time_frame.steps)
python
def resource_usage(bcbio_log, cluster, rawdir, verbose): """Generate system statistics from bcbio runs. Parse the obtained files and put the information in a :class pandas.DataFrame:. :param bcbio_log: local path to bcbio log file written by the run :param cluster: :param rawdir: directory to put raw data files :param verbose: increase verbosity :return: a tuple with three dictionaries, the first one contains an instance of :pandas.DataFrame: for each host, the second one contains information regarding the hardware configuration and the last one contains information regarding timing. :type return: tuple """ data_frames = {} hardware_info = {} time_frame = log_time_frame(bcbio_log) for collectl_file in sorted(os.listdir(rawdir)): if not collectl_file.endswith('.raw.gz'): continue # Only load filenames within sampling timerange (gathered from bcbio_log time_frame) if rawfile_within_timeframe(collectl_file, time_frame): collectl_path = os.path.join(rawdir, collectl_file) data, hardware = load_collectl( collectl_path, time_frame.start, time_frame.end) if len(data) == 0: #raise ValueError("No data present in collectl file %s, mismatch in timestamps between raw collectl and log file?", collectl_path) continue host = re.sub(r'-\d{8}-\d{6}\.raw\.gz$', '', collectl_file) hardware_info[host] = hardware if host not in data_frames: data_frames[host] = data else: data_frames[host] = pd.concat([data_frames[host], data]) return (data_frames, hardware_info, time_frame.steps)
[ "def", "resource_usage", "(", "bcbio_log", ",", "cluster", ",", "rawdir", ",", "verbose", ")", ":", "data_frames", "=", "{", "}", "hardware_info", "=", "{", "}", "time_frame", "=", "log_time_frame", "(", "bcbio_log", ")", "for", "collectl_file", "in", "sorte...
Generate system statistics from bcbio runs. Parse the obtained files and put the information in a :class pandas.DataFrame:. :param bcbio_log: local path to bcbio log file written by the run :param cluster: :param rawdir: directory to put raw data files :param verbose: increase verbosity :return: a tuple with three dictionaries, the first one contains an instance of :pandas.DataFrame: for each host, the second one contains information regarding the hardware configuration and the last one contains information regarding timing. :type return: tuple
[ "Generate", "system", "statistics", "from", "bcbio", "runs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L319-L362
train
218,677
bcbio/bcbio-nextgen
bcbio/graph/graph.py
generate_graphs
def generate_graphs(data_frames, hardware_info, steps, outdir, verbose=False): """Generate all graphs for a bcbio run.""" _setup_matplotlib() # Hash of hosts containing (data, hardware, steps) tuple collectl_info = collections.defaultdict(dict) for host, data_frame in data_frames.items(): if verbose: print('Generating CPU graph for {}...'.format(host)) graph, data_cpu = graph_cpu(data_frame, steps, hardware_info[host]['num_cpus']) graph.get_figure().savefig( os.path.join(outdir, '{}_cpu.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() ifaces = set([series.split('_')[0] for series in data_frame.keys() if series.startswith(('eth', 'ib'))]) if verbose: print('Generating network graphs for {}...'.format(host)) graph, data_net_bytes = graph_net_bytes(data_frame, steps, ifaces) graph.get_figure().savefig( os.path.join(outdir, '{}_net_bytes.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() graph, data_net_pkts = graph_net_pkts(data_frame, steps, ifaces) graph.get_figure().savefig( os.path.join(outdir, '{}_net_pkts.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() if verbose: print('Generating memory graph for {}...'.format(host)) graph, data_mem = graph_memory(data_frame, steps, hardware_info[host]["memory"]) graph.get_figure().savefig( os.path.join(outdir, '{}_memory.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() if verbose: print('Generating storage I/O graph for {}...'.format(host)) drives = set([ series.split('_')[0] for series in data_frame.keys() if series.startswith(('sd', 'vd', 'hd', 'xvd')) ]) graph, data_disk = graph_disk_io(data_frame, steps, drives) graph.get_figure().savefig( os.path.join(outdir, '{}_disk_io.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() print('Serializing output to pickle object for node {}...'.format(host)) # "Clean" dataframes ready to be plotted collectl_info[host] = { "hardware": hardware_info, "steps": steps, "cpu": data_cpu, "mem": data_mem, "disk": data_disk, "net_bytes": data_net_bytes, "net_pkts": data_net_pkts } return collectl_info
python
def generate_graphs(data_frames, hardware_info, steps, outdir, verbose=False): """Generate all graphs for a bcbio run.""" _setup_matplotlib() # Hash of hosts containing (data, hardware, steps) tuple collectl_info = collections.defaultdict(dict) for host, data_frame in data_frames.items(): if verbose: print('Generating CPU graph for {}...'.format(host)) graph, data_cpu = graph_cpu(data_frame, steps, hardware_info[host]['num_cpus']) graph.get_figure().savefig( os.path.join(outdir, '{}_cpu.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() ifaces = set([series.split('_')[0] for series in data_frame.keys() if series.startswith(('eth', 'ib'))]) if verbose: print('Generating network graphs for {}...'.format(host)) graph, data_net_bytes = graph_net_bytes(data_frame, steps, ifaces) graph.get_figure().savefig( os.path.join(outdir, '{}_net_bytes.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() graph, data_net_pkts = graph_net_pkts(data_frame, steps, ifaces) graph.get_figure().savefig( os.path.join(outdir, '{}_net_pkts.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() if verbose: print('Generating memory graph for {}...'.format(host)) graph, data_mem = graph_memory(data_frame, steps, hardware_info[host]["memory"]) graph.get_figure().savefig( os.path.join(outdir, '{}_memory.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() if verbose: print('Generating storage I/O graph for {}...'.format(host)) drives = set([ series.split('_')[0] for series in data_frame.keys() if series.startswith(('sd', 'vd', 'hd', 'xvd')) ]) graph, data_disk = graph_disk_io(data_frame, steps, drives) graph.get_figure().savefig( os.path.join(outdir, '{}_disk_io.png'.format(host)), bbox_inches='tight', pad_inches=0.25) pylab.close() print('Serializing output to pickle object for node {}...'.format(host)) # "Clean" dataframes ready to be plotted collectl_info[host] = { "hardware": hardware_info, "steps": steps, "cpu": data_cpu, "mem": data_mem, "disk": data_disk, "net_bytes": data_net_bytes, "net_pkts": data_net_pkts } return collectl_info
[ "def", "generate_graphs", "(", "data_frames", ",", "hardware_info", ",", "steps", ",", "outdir", ",", "verbose", "=", "False", ")", ":", "_setup_matplotlib", "(", ")", "# Hash of hosts containing (data, hardware, steps) tuple", "collectl_info", "=", "collections", ".", ...
Generate all graphs for a bcbio run.
[ "Generate", "all", "graphs", "for", "a", "bcbio", "run", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L365-L428
train
218,678
bcbio/bcbio-nextgen
bcbio/variation/ploidy.py
get_ploidy
def get_ploidy(items, region=None): """Retrieve ploidy of a region, handling special cases. """ chrom = chromosome_special_cases(region[0] if isinstance(region, (list, tuple)) else None) ploidy = _configured_ploidy(items) sexes = _configured_genders(items) if chrom == "mitochondrial": # For now, do haploid calling. Could also do pooled calling # but not entirely clear what the best default would be. return ploidy.get("mitochondrial", 1) elif chrom == "X": # Do standard diploid calling if we have any females or unspecified. if "female" in sexes or "f" in sexes: return ploidy.get("female", ploidy["default"]) elif "male" in sexes or "m" in sexes: return ploidy.get("male", 1) else: return ploidy.get("female", ploidy["default"]) elif chrom == "Y": # Always call Y single. If female, filter_vcf_by_sex removes Y regions. return 1 else: return ploidy["default"]
python
def get_ploidy(items, region=None): """Retrieve ploidy of a region, handling special cases. """ chrom = chromosome_special_cases(region[0] if isinstance(region, (list, tuple)) else None) ploidy = _configured_ploidy(items) sexes = _configured_genders(items) if chrom == "mitochondrial": # For now, do haploid calling. Could also do pooled calling # but not entirely clear what the best default would be. return ploidy.get("mitochondrial", 1) elif chrom == "X": # Do standard diploid calling if we have any females or unspecified. if "female" in sexes or "f" in sexes: return ploidy.get("female", ploidy["default"]) elif "male" in sexes or "m" in sexes: return ploidy.get("male", 1) else: return ploidy.get("female", ploidy["default"]) elif chrom == "Y": # Always call Y single. If female, filter_vcf_by_sex removes Y regions. return 1 else: return ploidy["default"]
[ "def", "get_ploidy", "(", "items", ",", "region", "=", "None", ")", ":", "chrom", "=", "chromosome_special_cases", "(", "region", "[", "0", "]", "if", "isinstance", "(", "region", ",", "(", "list", ",", "tuple", ")", ")", "else", "None", ")", "ploidy",...
Retrieve ploidy of a region, handling special cases.
[ "Retrieve", "ploidy", "of", "a", "region", "handling", "special", "cases", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ploidy.py#L43-L66
train
218,679
bcbio/bcbio-nextgen
bcbio/variation/ploidy.py
filter_vcf_by_sex
def filter_vcf_by_sex(vcf_file, items): """Post-filter a single sample VCF, handling sex chromosomes. Removes Y chromosomes from batches with all female samples. """ out_file = "%s-ploidyfix%s" % utils.splitext_plus(vcf_file) if not utils.file_exists(out_file): genders = list(_configured_genders(items)) is_female = len(genders) == 1 and genders[0] and genders[0] in ["female", "f"] if is_female: orig_out_file = out_file out_file = orig_out_file.replace(".vcf.gz", ".vcf") with file_transaction(items[0], out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: with utils.open_gzipsafe(vcf_file) as in_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: chrom = chromosome_special_cases(line.split("\t")) if chrom != "Y": out_handle.write(line) if orig_out_file.endswith(".gz"): out_file = vcfutils.bgzip_and_index(out_file, items[0]["config"]) else: out_file = vcf_file return out_file
python
def filter_vcf_by_sex(vcf_file, items): """Post-filter a single sample VCF, handling sex chromosomes. Removes Y chromosomes from batches with all female samples. """ out_file = "%s-ploidyfix%s" % utils.splitext_plus(vcf_file) if not utils.file_exists(out_file): genders = list(_configured_genders(items)) is_female = len(genders) == 1 and genders[0] and genders[0] in ["female", "f"] if is_female: orig_out_file = out_file out_file = orig_out_file.replace(".vcf.gz", ".vcf") with file_transaction(items[0], out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: with utils.open_gzipsafe(vcf_file) as in_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: chrom = chromosome_special_cases(line.split("\t")) if chrom != "Y": out_handle.write(line) if orig_out_file.endswith(".gz"): out_file = vcfutils.bgzip_and_index(out_file, items[0]["config"]) else: out_file = vcf_file return out_file
[ "def", "filter_vcf_by_sex", "(", "vcf_file", ",", "items", ")", ":", "out_file", "=", "\"%s-ploidyfix%s\"", "%", "utils", ".", "splitext_plus", "(", "vcf_file", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "genders", "=", "list",...
Post-filter a single sample VCF, handling sex chromosomes. Removes Y chromosomes from batches with all female samples.
[ "Post", "-", "filter", "a", "single", "sample", "VCF", "handling", "sex", "chromosomes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ploidy.py#L68-L94
train
218,680
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
variant_filtration
def variant_filtration(call_file, ref_file, vrn_files, data, items): """Filter variant calls using Variant Quality Score Recalibration. Newer GATK with Haplotype calling has combined SNP/indel filtering. """ caller = data["config"]["algorithm"].get("variantcaller") if "gvcf" not in dd.get_tools_on(data): call_file = ploidy.filter_vcf_by_sex(call_file, items) if caller in ["freebayes"]: return vfilter.freebayes(call_file, ref_file, vrn_files, data) elif caller in ["platypus"]: return vfilter.platypus(call_file, data) elif caller in ["samtools"]: return vfilter.samtools(call_file, data) elif caller in ["gatk", "gatk-haplotype", "haplotyper"]: if dd.get_analysis(data).lower().find("rna-seq") >= 0: from bcbio.rnaseq import variation as rnaseq_variation return rnaseq_variation.gatk_filter_rnaseq(call_file, data) else: return gatkfilter.run(call_file, ref_file, vrn_files, data) # no additional filtration for callers that filter as part of call process else: return call_file
python
def variant_filtration(call_file, ref_file, vrn_files, data, items): """Filter variant calls using Variant Quality Score Recalibration. Newer GATK with Haplotype calling has combined SNP/indel filtering. """ caller = data["config"]["algorithm"].get("variantcaller") if "gvcf" not in dd.get_tools_on(data): call_file = ploidy.filter_vcf_by_sex(call_file, items) if caller in ["freebayes"]: return vfilter.freebayes(call_file, ref_file, vrn_files, data) elif caller in ["platypus"]: return vfilter.platypus(call_file, data) elif caller in ["samtools"]: return vfilter.samtools(call_file, data) elif caller in ["gatk", "gatk-haplotype", "haplotyper"]: if dd.get_analysis(data).lower().find("rna-seq") >= 0: from bcbio.rnaseq import variation as rnaseq_variation return rnaseq_variation.gatk_filter_rnaseq(call_file, data) else: return gatkfilter.run(call_file, ref_file, vrn_files, data) # no additional filtration for callers that filter as part of call process else: return call_file
[ "def", "variant_filtration", "(", "call_file", ",", "ref_file", ",", "vrn_files", ",", "data", ",", "items", ")", ":", "caller", "=", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", ".", "get", "(", "\"variantcaller\"", ")", "if", "\"gvcf\"", "n...
Filter variant calls using Variant Quality Score Recalibration. Newer GATK with Haplotype calling has combined SNP/indel filtering.
[ "Filter", "variant", "calls", "using", "Variant", "Quality", "Score", "Recalibration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L23-L45
train
218,681
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_split_by_ready_regions
def _split_by_ready_regions(ext, file_key, dir_ext_fn): """Organize splits based on regions generated by parallel_prep_region. Sort splits so largest regions analyzed first, avoiding potentially lagging runs at end. """ def _sort_by_size(region_w_bams): region, _ = region_w_bams _, start, end = region return end - start def _assign_bams_to_regions(data): """Ensure BAMs aligned with input regions, either global or individual. """ for i, region in enumerate(data["region"]): work_bams = [] for xs in data["region_bams"]: if len(xs) == 1: work_bams.append(xs[0]) else: work_bams.append(xs[i]) for work_bam in work_bams: assert os.path.exists(work_bam), work_bam yield region, work_bams def _do_work(data): if "region" in data: name = data["group"][0] if "group" in data else data["description"] out_dir = os.path.join(data["dirs"]["work"], dir_ext_fn(data)) out_file = os.path.join(out_dir, "%s%s" % (name, ext)) assert isinstance(data["region"], (list, tuple)) out_parts = [] for r, work_bams in sorted(_assign_bams_to_regions(data), key=_sort_by_size, reverse=True): out_region_dir = os.path.join(out_dir, r[0]) out_region_file = os.path.join(out_region_dir, "%s-%s%s" % (name, pregion.to_safestr(r), ext)) out_parts.append((r, work_bams, out_region_file)) return out_file, out_parts else: return None, [] return _do_work
python
def _split_by_ready_regions(ext, file_key, dir_ext_fn): """Organize splits based on regions generated by parallel_prep_region. Sort splits so largest regions analyzed first, avoiding potentially lagging runs at end. """ def _sort_by_size(region_w_bams): region, _ = region_w_bams _, start, end = region return end - start def _assign_bams_to_regions(data): """Ensure BAMs aligned with input regions, either global or individual. """ for i, region in enumerate(data["region"]): work_bams = [] for xs in data["region_bams"]: if len(xs) == 1: work_bams.append(xs[0]) else: work_bams.append(xs[i]) for work_bam in work_bams: assert os.path.exists(work_bam), work_bam yield region, work_bams def _do_work(data): if "region" in data: name = data["group"][0] if "group" in data else data["description"] out_dir = os.path.join(data["dirs"]["work"], dir_ext_fn(data)) out_file = os.path.join(out_dir, "%s%s" % (name, ext)) assert isinstance(data["region"], (list, tuple)) out_parts = [] for r, work_bams in sorted(_assign_bams_to_regions(data), key=_sort_by_size, reverse=True): out_region_dir = os.path.join(out_dir, r[0]) out_region_file = os.path.join(out_region_dir, "%s-%s%s" % (name, pregion.to_safestr(r), ext)) out_parts.append((r, work_bams, out_region_file)) return out_file, out_parts else: return None, [] return _do_work
[ "def", "_split_by_ready_regions", "(", "ext", ",", "file_key", ",", "dir_ext_fn", ")", ":", "def", "_sort_by_size", "(", "region_w_bams", ")", ":", "region", ",", "_", "=", "region_w_bams", "_", ",", "start", ",", "end", "=", "region", "return", "end", "-"...
Organize splits based on regions generated by parallel_prep_region. Sort splits so largest regions analyzed first, avoiding potentially lagging runs at end.
[ "Organize", "splits", "based", "on", "regions", "generated", "by", "parallel_prep_region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L116-L154
train
218,682
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_collapse_by_bam_variantcaller
def _collapse_by_bam_variantcaller(samples): """Collapse regions to a single representative by BAM input, variant caller and batch. """ by_bam = collections.OrderedDict() for data in (x[0] for x in samples): work_bam = utils.get_in(data, ("combine", "work_bam", "out"), data.get("align_bam")) variantcaller = get_variantcaller(data) if isinstance(work_bam, list): work_bam = tuple(work_bam) key = (multi.get_batch_for_key(data), work_bam, variantcaller) try: by_bam[key].append(data) except KeyError: by_bam[key] = [data] out = [] for grouped_data in by_bam.values(): cur = grouped_data[0] cur.pop("region", None) region_bams = cur.pop("region_bams", None) if region_bams and len(region_bams[0]) > 1: cur.pop("work_bam", None) out.append([cur]) return out
python
def _collapse_by_bam_variantcaller(samples): """Collapse regions to a single representative by BAM input, variant caller and batch. """ by_bam = collections.OrderedDict() for data in (x[0] for x in samples): work_bam = utils.get_in(data, ("combine", "work_bam", "out"), data.get("align_bam")) variantcaller = get_variantcaller(data) if isinstance(work_bam, list): work_bam = tuple(work_bam) key = (multi.get_batch_for_key(data), work_bam, variantcaller) try: by_bam[key].append(data) except KeyError: by_bam[key] = [data] out = [] for grouped_data in by_bam.values(): cur = grouped_data[0] cur.pop("region", None) region_bams = cur.pop("region_bams", None) if region_bams and len(region_bams[0]) > 1: cur.pop("work_bam", None) out.append([cur]) return out
[ "def", "_collapse_by_bam_variantcaller", "(", "samples", ")", ":", "by_bam", "=", "collections", ".", "OrderedDict", "(", ")", "for", "data", "in", "(", "x", "[", "0", "]", "for", "x", "in", "samples", ")", ":", "work_bam", "=", "utils", ".", "get_in", ...
Collapse regions to a single representative by BAM input, variant caller and batch.
[ "Collapse", "regions", "to", "a", "single", "representative", "by", "BAM", "input", "variant", "caller", "and", "batch", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L156-L178
train
218,683
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_dup_samples_by_variantcaller
def _dup_samples_by_variantcaller(samples, require_bam=True): """Prepare samples by variant callers, duplicating any with multiple callers. """ samples = [utils.to_single_data(x) for x in samples] samples = germline.split_somatic(samples) to_process = [] extras = [] for data in samples: added = False for i, add in enumerate(handle_multiple_callers(data, "variantcaller", require_bam=require_bam)): added = True add = dd.set_variantcaller_order(add, i) to_process.append([add]) if not added: data = _handle_precalled(data) data = dd.set_variantcaller_order(data, 0) extras.append([data]) return to_process, extras
python
def _dup_samples_by_variantcaller(samples, require_bam=True): """Prepare samples by variant callers, duplicating any with multiple callers. """ samples = [utils.to_single_data(x) for x in samples] samples = germline.split_somatic(samples) to_process = [] extras = [] for data in samples: added = False for i, add in enumerate(handle_multiple_callers(data, "variantcaller", require_bam=require_bam)): added = True add = dd.set_variantcaller_order(add, i) to_process.append([add]) if not added: data = _handle_precalled(data) data = dd.set_variantcaller_order(data, 0) extras.append([data]) return to_process, extras
[ "def", "_dup_samples_by_variantcaller", "(", "samples", ",", "require_bam", "=", "True", ")", ":", "samples", "=", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "samples", "]", "samples", "=", "germline", ".", "split_somatic", "(", ...
Prepare samples by variant callers, duplicating any with multiple callers.
[ "Prepare", "samples", "by", "variant", "callers", "duplicating", "any", "with", "multiple", "callers", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L180-L197
train
218,684
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
parallel_variantcall_region
def parallel_variantcall_region(samples, run_parallel): """Perform variant calling and post-analysis on samples by region. """ to_process, extras = _dup_samples_by_variantcaller(samples) split_fn = _split_by_ready_regions(".vcf.gz", "work_bam", get_variantcaller) samples = _collapse_by_bam_variantcaller( grouped_parallel_split_combine(to_process, split_fn, multi.group_batches, run_parallel, "variantcall_sample", "concat_variant_files", "vrn_file", ["region", "sam_ref", "config"])) return extras + samples
python
def parallel_variantcall_region(samples, run_parallel): """Perform variant calling and post-analysis on samples by region. """ to_process, extras = _dup_samples_by_variantcaller(samples) split_fn = _split_by_ready_regions(".vcf.gz", "work_bam", get_variantcaller) samples = _collapse_by_bam_variantcaller( grouped_parallel_split_combine(to_process, split_fn, multi.group_batches, run_parallel, "variantcall_sample", "concat_variant_files", "vrn_file", ["region", "sam_ref", "config"])) return extras + samples
[ "def", "parallel_variantcall_region", "(", "samples", ",", "run_parallel", ")", ":", "to_process", ",", "extras", "=", "_dup_samples_by_variantcaller", "(", "samples", ")", "split_fn", "=", "_split_by_ready_regions", "(", "\".vcf.gz\"", ",", "\"work_bam\"", ",", "get_...
Perform variant calling and post-analysis on samples by region.
[ "Perform", "variant", "calling", "and", "post", "-", "analysis", "on", "samples", "by", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L199-L209
train
218,685
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
vc_output_record
def vc_output_record(samples): """Prepare output record from variant calling to feed into downstream analysis. Prep work handles reformatting so we return generated dictionaries. For any shared keys that are calculated only once for a batch, like variant calls for the batch, we assign to every sample. """ shared_keys = [["vrn_file"], ["validate", "summary"], ["validate", "tp"], ["validate", "fp"], ["validate", "fn"]] raw = cwlutils.samples_to_records([utils.to_single_data(x) for x in samples]) shared = {} for key in shared_keys: cur = list(set([x for x in [tz.get_in(key, d) for d in raw] if x])) if len(cur) > 0: assert len(cur) == 1, (key, cur) shared[tuple(key)] = cur[0] else: shared[tuple(key)] = None out = [] for d in raw: for key, val in shared.items(): d = tz.update_in(d, key, lambda x: val) out.append([d]) return out
python
def vc_output_record(samples): """Prepare output record from variant calling to feed into downstream analysis. Prep work handles reformatting so we return generated dictionaries. For any shared keys that are calculated only once for a batch, like variant calls for the batch, we assign to every sample. """ shared_keys = [["vrn_file"], ["validate", "summary"], ["validate", "tp"], ["validate", "fp"], ["validate", "fn"]] raw = cwlutils.samples_to_records([utils.to_single_data(x) for x in samples]) shared = {} for key in shared_keys: cur = list(set([x for x in [tz.get_in(key, d) for d in raw] if x])) if len(cur) > 0: assert len(cur) == 1, (key, cur) shared[tuple(key)] = cur[0] else: shared[tuple(key)] = None out = [] for d in raw: for key, val in shared.items(): d = tz.update_in(d, key, lambda x: val) out.append([d]) return out
[ "def", "vc_output_record", "(", "samples", ")", ":", "shared_keys", "=", "[", "[", "\"vrn_file\"", "]", ",", "[", "\"validate\"", ",", "\"summary\"", "]", ",", "[", "\"validate\"", ",", "\"tp\"", "]", ",", "[", "\"validate\"", ",", "\"fp\"", "]", ",", "[...
Prepare output record from variant calling to feed into downstream analysis. Prep work handles reformatting so we return generated dictionaries. For any shared keys that are calculated only once for a batch, like variant calls for the batch, we assign to every sample.
[ "Prepare", "output", "record", "from", "variant", "calling", "to", "feed", "into", "downstream", "analysis", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L212-L236
train
218,686
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
batch_for_variantcall
def batch_for_variantcall(samples): """Prepare a set of samples for parallel variant calling. CWL input target that groups samples into batches and variant callers for parallel processing. If doing joint calling, with `tools_on: [gvcf]`, split the sample into individuals instead of combining into a batch. """ sample_order = [dd.get_sample_name(utils.to_single_data(x)) for x in samples] to_process, extras = _dup_samples_by_variantcaller(samples, require_bam=False) batch_groups = collections.defaultdict(list) to_process = [utils.to_single_data(x) for x in to_process] for data in cwlutils.samples_to_records(to_process): vc = get_variantcaller(data, require_bam=False) batches = dd.get_batches(data) or dd.get_sample_name(data) if not isinstance(batches, (list, tuple)): batches = [batches] for b in batches: batch_groups[(b, vc)].append(utils.deepish_copy(data)) batches = [] for cur_group in batch_groups.values(): joint_calling = any([is_joint(d) for d in cur_group]) if joint_calling: for d in cur_group: batches.append([d]) else: batches.append(cur_group) def by_original_order(xs): return (min([sample_order.index(dd.get_sample_name(x)) for x in xs]), min([dd.get_variantcaller_order(x) for x in xs])) return sorted(batches + extras, key=by_original_order)
python
def batch_for_variantcall(samples): """Prepare a set of samples for parallel variant calling. CWL input target that groups samples into batches and variant callers for parallel processing. If doing joint calling, with `tools_on: [gvcf]`, split the sample into individuals instead of combining into a batch. """ sample_order = [dd.get_sample_name(utils.to_single_data(x)) for x in samples] to_process, extras = _dup_samples_by_variantcaller(samples, require_bam=False) batch_groups = collections.defaultdict(list) to_process = [utils.to_single_data(x) for x in to_process] for data in cwlutils.samples_to_records(to_process): vc = get_variantcaller(data, require_bam=False) batches = dd.get_batches(data) or dd.get_sample_name(data) if not isinstance(batches, (list, tuple)): batches = [batches] for b in batches: batch_groups[(b, vc)].append(utils.deepish_copy(data)) batches = [] for cur_group in batch_groups.values(): joint_calling = any([is_joint(d) for d in cur_group]) if joint_calling: for d in cur_group: batches.append([d]) else: batches.append(cur_group) def by_original_order(xs): return (min([sample_order.index(dd.get_sample_name(x)) for x in xs]), min([dd.get_variantcaller_order(x) for x in xs])) return sorted(batches + extras, key=by_original_order)
[ "def", "batch_for_variantcall", "(", "samples", ")", ":", "sample_order", "=", "[", "dd", ".", "get_sample_name", "(", "utils", ".", "to_single_data", "(", "x", ")", ")", "for", "x", "in", "samples", "]", "to_process", ",", "extras", "=", "_dup_samples_by_va...
Prepare a set of samples for parallel variant calling. CWL input target that groups samples into batches and variant callers for parallel processing. If doing joint calling, with `tools_on: [gvcf]`, split the sample into individuals instead of combining into a batch.
[ "Prepare", "a", "set", "of", "samples", "for", "parallel", "variant", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L241-L272
train
218,687
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_handle_precalled
def _handle_precalled(data): """Copy in external pre-called variants fed into analysis. Symlinks for non-CWL runs where we want to ensure VCF present in a local directory. """ if data.get("vrn_file") and not cwlutils.is_cwl_run(data): vrn_file = data["vrn_file"] if isinstance(vrn_file, (list, tuple)): assert len(vrn_file) == 1 vrn_file = vrn_file[0] precalled_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "precalled")) ext = utils.splitext_plus(vrn_file)[-1] orig_file = os.path.abspath(vrn_file) our_vrn_file = os.path.join(precalled_dir, "%s-precalled%s" % (dd.get_sample_name(data), ext)) utils.copy_plus(orig_file, our_vrn_file) data["vrn_file"] = our_vrn_file return data
python
def _handle_precalled(data): """Copy in external pre-called variants fed into analysis. Symlinks for non-CWL runs where we want to ensure VCF present in a local directory. """ if data.get("vrn_file") and not cwlutils.is_cwl_run(data): vrn_file = data["vrn_file"] if isinstance(vrn_file, (list, tuple)): assert len(vrn_file) == 1 vrn_file = vrn_file[0] precalled_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "precalled")) ext = utils.splitext_plus(vrn_file)[-1] orig_file = os.path.abspath(vrn_file) our_vrn_file = os.path.join(precalled_dir, "%s-precalled%s" % (dd.get_sample_name(data), ext)) utils.copy_plus(orig_file, our_vrn_file) data["vrn_file"] = our_vrn_file return data
[ "def", "_handle_precalled", "(", "data", ")", ":", "if", "data", ".", "get", "(", "\"vrn_file\"", ")", "and", "not", "cwlutils", ".", "is_cwl_run", "(", "data", ")", ":", "vrn_file", "=", "data", "[", "\"vrn_file\"", "]", "if", "isinstance", "(", "vrn_fi...
Copy in external pre-called variants fed into analysis. Symlinks for non-CWL runs where we want to ensure VCF present in a local directory.
[ "Copy", "in", "external", "pre", "-", "called", "variants", "fed", "into", "analysis", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L274-L291
train
218,688
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
handle_multiple_callers
def handle_multiple_callers(data, key, default=None, require_bam=True): """Split samples that potentially require multiple variant calling approaches. """ callers = get_variantcaller(data, key, default, require_bam=require_bam) if isinstance(callers, six.string_types): return [data] elif not callers: return [] else: out = [] for caller in callers: base = copy.deepcopy(data) if not base["config"]["algorithm"].get("orig_%s" % key): base["config"]["algorithm"]["orig_%s" % key] = \ base["config"]["algorithm"][key] base["config"]["algorithm"][key] = caller # if splitting by variant caller, also split by jointcaller if key == "variantcaller": jcallers = get_variantcaller(data, "jointcaller", []) if isinstance(jcallers, six.string_types): jcallers = [jcallers] if jcallers: base["config"]["algorithm"]["orig_jointcaller"] = jcallers jcallers = [x for x in jcallers if x.startswith(caller)] if jcallers: base["config"]["algorithm"]["jointcaller"] = jcallers[0] else: base["config"]["algorithm"]["jointcaller"] = False out.append(base) return out
python
def handle_multiple_callers(data, key, default=None, require_bam=True): """Split samples that potentially require multiple variant calling approaches. """ callers = get_variantcaller(data, key, default, require_bam=require_bam) if isinstance(callers, six.string_types): return [data] elif not callers: return [] else: out = [] for caller in callers: base = copy.deepcopy(data) if not base["config"]["algorithm"].get("orig_%s" % key): base["config"]["algorithm"]["orig_%s" % key] = \ base["config"]["algorithm"][key] base["config"]["algorithm"][key] = caller # if splitting by variant caller, also split by jointcaller if key == "variantcaller": jcallers = get_variantcaller(data, "jointcaller", []) if isinstance(jcallers, six.string_types): jcallers = [jcallers] if jcallers: base["config"]["algorithm"]["orig_jointcaller"] = jcallers jcallers = [x for x in jcallers if x.startswith(caller)] if jcallers: base["config"]["algorithm"]["jointcaller"] = jcallers[0] else: base["config"]["algorithm"]["jointcaller"] = False out.append(base) return out
[ "def", "handle_multiple_callers", "(", "data", ",", "key", ",", "default", "=", "None", ",", "require_bam", "=", "True", ")", ":", "callers", "=", "get_variantcaller", "(", "data", ",", "key", ",", "default", ",", "require_bam", "=", "require_bam", ")", "i...
Split samples that potentially require multiple variant calling approaches.
[ "Split", "samples", "that", "potentially", "require", "multiple", "variant", "calling", "approaches", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L293-L322
train
218,689
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
variantcall_sample
def variantcall_sample(data, region=None, align_bams=None, out_file=None): """Parallel entry point for doing genotyping of a region of a sample. """ if out_file is None or not os.path.exists(out_file) or not os.path.lexists(out_file): utils.safe_makedir(os.path.dirname(out_file)) ref_file = dd.get_ref_file(data) config = data["config"] caller_fns = get_variantcallers() caller_fn = caller_fns[config["algorithm"].get("variantcaller")] if len(align_bams) == 1: items = [data] else: items = multi.get_orig_items(data) assert len(items) == len(align_bams) assoc_files = tz.get_in(("genome_resources", "variation"), data, {}) if not assoc_files: assoc_files = {} for bam_file in align_bams: bam.index(bam_file, data["config"], check_timestamp=False) out_file = caller_fn(align_bams, items, ref_file, assoc_files, region, out_file) if region: data["region"] = region data["vrn_file"] = out_file return [data]
python
def variantcall_sample(data, region=None, align_bams=None, out_file=None): """Parallel entry point for doing genotyping of a region of a sample. """ if out_file is None or not os.path.exists(out_file) or not os.path.lexists(out_file): utils.safe_makedir(os.path.dirname(out_file)) ref_file = dd.get_ref_file(data) config = data["config"] caller_fns = get_variantcallers() caller_fn = caller_fns[config["algorithm"].get("variantcaller")] if len(align_bams) == 1: items = [data] else: items = multi.get_orig_items(data) assert len(items) == len(align_bams) assoc_files = tz.get_in(("genome_resources", "variation"), data, {}) if not assoc_files: assoc_files = {} for bam_file in align_bams: bam.index(bam_file, data["config"], check_timestamp=False) out_file = caller_fn(align_bams, items, ref_file, assoc_files, region, out_file) if region: data["region"] = region data["vrn_file"] = out_file return [data]
[ "def", "variantcall_sample", "(", "data", ",", "region", "=", "None", ",", "align_bams", "=", "None", ",", "out_file", "=", "None", ")", ":", "if", "out_file", "is", "None", "or", "not", "os", ".", "path", ".", "exists", "(", "out_file", ")", "or", "...
Parallel entry point for doing genotyping of a region of a sample.
[ "Parallel", "entry", "point", "for", "doing", "genotyping", "of", "a", "region", "of", "a", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L359-L381
train
218,690
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_get_batch_name
def _get_batch_name(items, skip_jointcheck=False): """Retrieve the shared batch name for a group of items. """ batch_names = collections.defaultdict(int) has_joint = any([is_joint(d) for d in items]) for data in items: if has_joint and not skip_jointcheck: batches = dd.get_sample_name(data) else: batches = dd.get_batches(data) or dd.get_sample_name(data) if not isinstance(batches, (list, tuple)): batches = [batches] for b in batches: batch_names[b] += 1 return sorted(batch_names.items(), key=lambda x: x[-1], reverse=True)[0][0]
python
def _get_batch_name(items, skip_jointcheck=False): """Retrieve the shared batch name for a group of items. """ batch_names = collections.defaultdict(int) has_joint = any([is_joint(d) for d in items]) for data in items: if has_joint and not skip_jointcheck: batches = dd.get_sample_name(data) else: batches = dd.get_batches(data) or dd.get_sample_name(data) if not isinstance(batches, (list, tuple)): batches = [batches] for b in batches: batch_names[b] += 1 return sorted(batch_names.items(), key=lambda x: x[-1], reverse=True)[0][0]
[ "def", "_get_batch_name", "(", "items", ",", "skip_jointcheck", "=", "False", ")", ":", "batch_names", "=", "collections", ".", "defaultdict", "(", "int", ")", "has_joint", "=", "any", "(", "[", "is_joint", "(", "d", ")", "for", "d", "in", "items", "]", ...
Retrieve the shared batch name for a group of items.
[ "Retrieve", "the", "shared", "batch", "name", "for", "a", "group", "of", "items", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L410-L424
train
218,691
bcbio/bcbio-nextgen
bcbio/variation/genotype.py
_run_variantcall_batch_multicore
def _run_variantcall_batch_multicore(items, regions, final_file): """Run variant calling on a batch of items using multiple cores. """ batch_name = _get_batch_name(items) variantcaller = _get_batch_variantcaller(items) work_bams = [dd.get_work_bam(d) or dd.get_align_bam(d) for d in items] def split_fn(data): out = [] for region in regions: region = _region_to_coords(region) chrom, start, end = region region_str = "_".join(str(x) for x in region) out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, chrom, "%s-%s.vcf.gz" % (batch_name, region_str)) out.append((region, work_bams, out_file)) return final_file, out parallel = {"type": "local", "num_jobs": dd.get_num_cores(items[0]), "cores_per_job": 1} run_parallel = dmulti.runner(parallel, items[0]["config"]) to_run = copy.deepcopy(items[0]) to_run["sam_ref"] = dd.get_ref_file(to_run) to_run["group_orig"] = items parallel_split_combine([[to_run]], split_fn, run_parallel, "variantcall_sample", "concat_variant_files", "vrn_file", ["region", "sam_ref", "config"]) return final_file
python
def _run_variantcall_batch_multicore(items, regions, final_file): """Run variant calling on a batch of items using multiple cores. """ batch_name = _get_batch_name(items) variantcaller = _get_batch_variantcaller(items) work_bams = [dd.get_work_bam(d) or dd.get_align_bam(d) for d in items] def split_fn(data): out = [] for region in regions: region = _region_to_coords(region) chrom, start, end = region region_str = "_".join(str(x) for x in region) out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, chrom, "%s-%s.vcf.gz" % (batch_name, region_str)) out.append((region, work_bams, out_file)) return final_file, out parallel = {"type": "local", "num_jobs": dd.get_num_cores(items[0]), "cores_per_job": 1} run_parallel = dmulti.runner(parallel, items[0]["config"]) to_run = copy.deepcopy(items[0]) to_run["sam_ref"] = dd.get_ref_file(to_run) to_run["group_orig"] = items parallel_split_combine([[to_run]], split_fn, run_parallel, "variantcall_sample", "concat_variant_files", "vrn_file", ["region", "sam_ref", "config"]) return final_file
[ "def", "_run_variantcall_batch_multicore", "(", "items", ",", "regions", ",", "final_file", ")", ":", "batch_name", "=", "_get_batch_name", "(", "items", ")", "variantcaller", "=", "_get_batch_variantcaller", "(", "items", ")", "work_bams", "=", "[", "dd", ".", ...
Run variant calling on a batch of items using multiple cores.
[ "Run", "variant", "calling", "on", "a", "batch", "of", "items", "using", "multiple", "cores", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/genotype.py#L462-L486
train
218,692
bcbio/bcbio-nextgen
bcbio/distributed/ipython.py
create
def create(parallel, dirs, config): """Create a cluster based on the provided parallel arguments. Returns an IPython view on the cluster, enabling processing on jobs. Adds a mincores specification if he have machines with a larger number of cores to allow jobs to be batched together for shared memory usage. """ profile_dir = utils.safe_makedir(os.path.join(dirs["work"], get_log_dir(config), "ipython")) has_mincores = any(x.startswith("mincores=") for x in parallel["resources"]) cores = min(_get_common_cores(config["resources"]), parallel["system_cores"]) if cores > 1 and not has_mincores: adj_cores = max(1, int(math.floor(cores * float(parallel.get("mem_pct", 1.0))))) # if we have less scheduled cores than per machine, use the scheduled count if cores > parallel["cores"]: cores = parallel["cores"] # if we have less total cores required for the entire process, use that elif adj_cores > parallel["num_jobs"] * parallel["cores_per_job"]: cores = parallel["num_jobs"] * parallel["cores_per_job"] else: cores = adj_cores cores = per_machine_target_cores(cores, parallel["num_jobs"]) parallel["resources"].append("mincores=%s" % cores) return ipython_cluster.cluster_view(parallel["scheduler"].lower(), parallel["queue"], parallel["num_jobs"], parallel["cores_per_job"], profile=profile_dir, start_wait=parallel["timeout"], extra_params={"resources": parallel["resources"], "mem": parallel["mem"], "tag": parallel.get("tag"), "run_local": parallel.get("run_local"), "local_controller": parallel.get("local_controller")}, retries=parallel.get("retries"))
python
def create(parallel, dirs, config): """Create a cluster based on the provided parallel arguments. Returns an IPython view on the cluster, enabling processing on jobs. Adds a mincores specification if he have machines with a larger number of cores to allow jobs to be batched together for shared memory usage. """ profile_dir = utils.safe_makedir(os.path.join(dirs["work"], get_log_dir(config), "ipython")) has_mincores = any(x.startswith("mincores=") for x in parallel["resources"]) cores = min(_get_common_cores(config["resources"]), parallel["system_cores"]) if cores > 1 and not has_mincores: adj_cores = max(1, int(math.floor(cores * float(parallel.get("mem_pct", 1.0))))) # if we have less scheduled cores than per machine, use the scheduled count if cores > parallel["cores"]: cores = parallel["cores"] # if we have less total cores required for the entire process, use that elif adj_cores > parallel["num_jobs"] * parallel["cores_per_job"]: cores = parallel["num_jobs"] * parallel["cores_per_job"] else: cores = adj_cores cores = per_machine_target_cores(cores, parallel["num_jobs"]) parallel["resources"].append("mincores=%s" % cores) return ipython_cluster.cluster_view(parallel["scheduler"].lower(), parallel["queue"], parallel["num_jobs"], parallel["cores_per_job"], profile=profile_dir, start_wait=parallel["timeout"], extra_params={"resources": parallel["resources"], "mem": parallel["mem"], "tag": parallel.get("tag"), "run_local": parallel.get("run_local"), "local_controller": parallel.get("local_controller")}, retries=parallel.get("retries"))
[ "def", "create", "(", "parallel", ",", "dirs", ",", "config", ")", ":", "profile_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dirs", "[", "\"work\"", "]", ",", "get_log_dir", "(", "config", ")", ",", "\"ipython\""...
Create a cluster based on the provided parallel arguments. Returns an IPython view on the cluster, enabling processing on jobs. Adds a mincores specification if he have machines with a larger number of cores to allow jobs to be batched together for shared memory usage.
[ "Create", "a", "cluster", "based", "on", "the", "provided", "parallel", "arguments", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipython.py#L33-L65
train
218,693
bcbio/bcbio-nextgen
bcbio/distributed/ipython.py
per_machine_target_cores
def per_machine_target_cores(cores, num_jobs): """Select target cores on larger machines to leave room for batch script and controller. On resource constrained environments, we want to pack all bcbio submissions onto a specific number of machines. This gives up some cores to enable sharing cores with the controller and batch script on larger machines. """ if cores >= 32 and num_jobs == 1: cores = cores - 2 elif cores >= 16 and num_jobs in [1, 2]: cores = cores - 1 return cores
python
def per_machine_target_cores(cores, num_jobs): """Select target cores on larger machines to leave room for batch script and controller. On resource constrained environments, we want to pack all bcbio submissions onto a specific number of machines. This gives up some cores to enable sharing cores with the controller and batch script on larger machines. """ if cores >= 32 and num_jobs == 1: cores = cores - 2 elif cores >= 16 and num_jobs in [1, 2]: cores = cores - 1 return cores
[ "def", "per_machine_target_cores", "(", "cores", ",", "num_jobs", ")", ":", "if", "cores", ">=", "32", "and", "num_jobs", "==", "1", ":", "cores", "=", "cores", "-", "2", "elif", "cores", ">=", "16", "and", "num_jobs", "in", "[", "1", ",", "2", "]", ...
Select target cores on larger machines to leave room for batch script and controller. On resource constrained environments, we want to pack all bcbio submissions onto a specific number of machines. This gives up some cores to enable sharing cores with the controller and batch script on larger machines.
[ "Select", "target", "cores", "on", "larger", "machines", "to", "leave", "room", "for", "batch", "script", "and", "controller", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipython.py#L67-L78
train
218,694
bcbio/bcbio-nextgen
bcbio/distributed/ipython.py
_get_common_cores
def _get_common_cores(resources): """Retrieve the most common configured number of cores in the input file. """ all_cores = [] for vs in resources.values(): cores = vs.get("cores") if cores: all_cores.append(int(vs["cores"])) return collections.Counter(all_cores).most_common(1)[0][0]
python
def _get_common_cores(resources): """Retrieve the most common configured number of cores in the input file. """ all_cores = [] for vs in resources.values(): cores = vs.get("cores") if cores: all_cores.append(int(vs["cores"])) return collections.Counter(all_cores).most_common(1)[0][0]
[ "def", "_get_common_cores", "(", "resources", ")", ":", "all_cores", "=", "[", "]", "for", "vs", "in", "resources", ".", "values", "(", ")", ":", "cores", "=", "vs", ".", "get", "(", "\"cores\"", ")", "if", "cores", ":", "all_cores", ".", "append", "...
Retrieve the most common configured number of cores in the input file.
[ "Retrieve", "the", "most", "common", "configured", "number", "of", "cores", "in", "the", "input", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipython.py#L80-L88
train
218,695
bcbio/bcbio-nextgen
bcbio/distributed/ipython.py
zip_args
def zip_args(args, config=None): """Compress arguments using msgpack. """ if msgpack: return [msgpack.packb(x, use_single_float=True, use_bin_type=True) for x in args] else: return args
python
def zip_args(args, config=None): """Compress arguments using msgpack. """ if msgpack: return [msgpack.packb(x, use_single_float=True, use_bin_type=True) for x in args] else: return args
[ "def", "zip_args", "(", "args", ",", "config", "=", "None", ")", ":", "if", "msgpack", ":", "return", "[", "msgpack", ".", "packb", "(", "x", ",", "use_single_float", "=", "True", ",", "use_bin_type", "=", "True", ")", "for", "x", "in", "args", "]", ...
Compress arguments using msgpack.
[ "Compress", "arguments", "using", "msgpack", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipython.py#L103-L109
train
218,696
bcbio/bcbio-nextgen
bcbio/distributed/ipython.py
runner
def runner(view, parallel, dirs, config): """Run a task on an ipython parallel cluster, allowing alternative queue types. view provides map-style access to an existing Ipython cluster. """ def run(fn_name, items): setpath.prepend_bcbiopath() out = [] fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else (_get_ipython_fn(fn_name, parallel), fn_name) items = [x for x in items if x is not None] items = diagnostics.track_parallel(items, fn_name) logger.info("ipython: %s" % fn_name) if len(items) > 0: items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"], parallel) for x in items] if "wrapper" in parallel: wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources"])} items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items] items = zip_args([args for args in items]) for data in view.map_sync(fn, items, track=False): if data: out.extend(unzip_args(data)) return out return run
python
def runner(view, parallel, dirs, config): """Run a task on an ipython parallel cluster, allowing alternative queue types. view provides map-style access to an existing Ipython cluster. """ def run(fn_name, items): setpath.prepend_bcbiopath() out = [] fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else (_get_ipython_fn(fn_name, parallel), fn_name) items = [x for x in items if x is not None] items = diagnostics.track_parallel(items, fn_name) logger.info("ipython: %s" % fn_name) if len(items) > 0: items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"], parallel) for x in items] if "wrapper" in parallel: wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources"])} items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items] items = zip_args([args for args in items]) for data in view.map_sync(fn, items, track=False): if data: out.extend(unzip_args(data)) return out return run
[ "def", "runner", "(", "view", ",", "parallel", ",", "dirs", ",", "config", ")", ":", "def", "run", "(", "fn_name", ",", "items", ")", ":", "setpath", ".", "prepend_bcbiopath", "(", ")", "out", "=", "[", "]", "fn", ",", "fn_name", "=", "(", "fn_name...
Run a task on an ipython parallel cluster, allowing alternative queue types. view provides map-style access to an existing Ipython cluster.
[ "Run", "a", "task", "on", "an", "ipython", "parallel", "cluster", "allowing", "alternative", "queue", "types", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipython.py#L119-L141
train
218,697
bcbio/bcbio-nextgen
bcbio/chipseq/peaks.py
peakcall_prepare
def peakcall_prepare(data, run_parallel): """Entry point for doing peak calling""" caller_fns = get_callers() to_process = [] for sample in data: mimic = copy.copy(sample[0]) callers = dd.get_peakcaller(sample[0]) if not isinstance(callers, list): callers = [callers] for caller in callers: if caller in caller_fns: mimic["peak_fn"] = caller name = dd.get_sample_name(mimic) mimic = _check(mimic, data) if mimic: to_process.append(mimic) else: logger.info("Skipping peak calling. No input sample for %s" % name) if to_process: after_process = run_parallel("peakcalling", to_process) data = _sync(data, after_process) return data
python
def peakcall_prepare(data, run_parallel): """Entry point for doing peak calling""" caller_fns = get_callers() to_process = [] for sample in data: mimic = copy.copy(sample[0]) callers = dd.get_peakcaller(sample[0]) if not isinstance(callers, list): callers = [callers] for caller in callers: if caller in caller_fns: mimic["peak_fn"] = caller name = dd.get_sample_name(mimic) mimic = _check(mimic, data) if mimic: to_process.append(mimic) else: logger.info("Skipping peak calling. No input sample for %s" % name) if to_process: after_process = run_parallel("peakcalling", to_process) data = _sync(data, after_process) return data
[ "def", "peakcall_prepare", "(", "data", ",", "run_parallel", ")", ":", "caller_fns", "=", "get_callers", "(", ")", "to_process", "=", "[", "]", "for", "sample", "in", "data", ":", "mimic", "=", "copy", ".", "copy", "(", "sample", "[", "0", "]", ")", ...
Entry point for doing peak calling
[ "Entry", "point", "for", "doing", "peak", "calling" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/peaks.py#L22-L43
train
218,698
bcbio/bcbio-nextgen
bcbio/chipseq/peaks.py
calling
def calling(data): """Main function to parallelize peak calling.""" chip_bam = data.get("work_bam") input_bam = data.get("work_bam_input", None) caller_fn = get_callers()[data["peak_fn"]] name = dd.get_sample_name(data) out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), data["peak_fn"], name)) out_files = caller_fn(name, chip_bam, input_bam, dd.get_genome_build(data), out_dir, dd.get_chip_method(data), data["resources"], data) greylistdir = greylisting(data) data.update({"peaks_files": out_files}) # data["input_bam_filter"] = input_bam if greylistdir: data["greylist"] = greylistdir return [[data]]
python
def calling(data): """Main function to parallelize peak calling.""" chip_bam = data.get("work_bam") input_bam = data.get("work_bam_input", None) caller_fn = get_callers()[data["peak_fn"]] name = dd.get_sample_name(data) out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), data["peak_fn"], name)) out_files = caller_fn(name, chip_bam, input_bam, dd.get_genome_build(data), out_dir, dd.get_chip_method(data), data["resources"], data) greylistdir = greylisting(data) data.update({"peaks_files": out_files}) # data["input_bam_filter"] = input_bam if greylistdir: data["greylist"] = greylistdir return [[data]]
[ "def", "calling", "(", "data", ")", ":", "chip_bam", "=", "data", ".", "get", "(", "\"work_bam\"", ")", "input_bam", "=", "data", ".", "get", "(", "\"work_bam_input\"", ",", "None", ")", "caller_fn", "=", "get_callers", "(", ")", "[", "data", "[", "\"p...
Main function to parallelize peak calling.
[ "Main", "function", "to", "parallelize", "peak", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/peaks.py#L45-L59
train
218,699