id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
238,000
bcbio/bcbio-nextgen
bcbio/upload/s3.py
upload_file_boto
def upload_file_boto(fname, remote_fname, mditems=None): """Upload a file using boto instead of external tools. """ r_fname = objectstore.parse_remote(remote_fname) conn = objectstore.connect(remote_fname) bucket = conn.lookup(r_fname.bucket) if not bucket: bucket = conn.create_bucket(r_fname.bucket, location=objectstore.get_region(remote_fname)) key = bucket.get_key(r_fname.key, validate=False) if mditems is None: mditems = {} if "x-amz-server-side-encryption" not in mditems: mditems["x-amz-server-side-encryption"] = "AES256" for name, val in mditems.items(): key.set_metadata(name, val) key.set_contents_from_filename(fname, encrypt_key=True)
python
def upload_file_boto(fname, remote_fname, mditems=None): r_fname = objectstore.parse_remote(remote_fname) conn = objectstore.connect(remote_fname) bucket = conn.lookup(r_fname.bucket) if not bucket: bucket = conn.create_bucket(r_fname.bucket, location=objectstore.get_region(remote_fname)) key = bucket.get_key(r_fname.key, validate=False) if mditems is None: mditems = {} if "x-amz-server-side-encryption" not in mditems: mditems["x-amz-server-side-encryption"] = "AES256" for name, val in mditems.items(): key.set_metadata(name, val) key.set_contents_from_filename(fname, encrypt_key=True)
[ "def", "upload_file_boto", "(", "fname", ",", "remote_fname", ",", "mditems", "=", "None", ")", ":", "r_fname", "=", "objectstore", ".", "parse_remote", "(", "remote_fname", ")", "conn", "=", "objectstore", ".", "connect", "(", "remote_fname", ")", "bucket", ...
Upload a file using boto instead of external tools.
[ "Upload", "a", "file", "using", "boto", "instead", "of", "external", "tools", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/s3.py#L83-L98
238,001
bcbio/bcbio-nextgen
bcbio/qc/chipseq.py
run
def run(bam_file, sample, out_dir): """Standard QC metrics for chipseq""" out = {} # if "rchipqc" in dd.get_tools_on(sample): # out = chipqc(bam_file, sample, out_dir) peaks = sample.get("peaks_files", {}).get("main") if peaks: out.update(_reads_in_peaks(bam_file, peaks, sample)) return out
python
def run(bam_file, sample, out_dir): out = {} # if "rchipqc" in dd.get_tools_on(sample): # out = chipqc(bam_file, sample, out_dir) peaks = sample.get("peaks_files", {}).get("main") if peaks: out.update(_reads_in_peaks(bam_file, peaks, sample)) return out
[ "def", "run", "(", "bam_file", ",", "sample", ",", "out_dir", ")", ":", "out", "=", "{", "}", "# if \"rchipqc\" in dd.get_tools_on(sample):", "# out = chipqc(bam_file, sample, out_dir)", "peaks", "=", "sample", ".", "get", "(", "\"peaks_files\"", ",", "{", "}", ...
Standard QC metrics for chipseq
[ "Standard", "QC", "metrics", "for", "chipseq" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/chipseq.py#L16-L25
238,002
bcbio/bcbio-nextgen
bcbio/qc/chipseq.py
_reads_in_peaks
def _reads_in_peaks(bam_file, peaks_file, sample): """Calculate number of reads in peaks""" if not peaks_file: return {} rip = number_of_mapped_reads(sample, bam_file, bed_file = peaks_file) return {"metrics": {"RiP": rip}}
python
def _reads_in_peaks(bam_file, peaks_file, sample): if not peaks_file: return {} rip = number_of_mapped_reads(sample, bam_file, bed_file = peaks_file) return {"metrics": {"RiP": rip}}
[ "def", "_reads_in_peaks", "(", "bam_file", ",", "peaks_file", ",", "sample", ")", ":", "if", "not", "peaks_file", ":", "return", "{", "}", "rip", "=", "number_of_mapped_reads", "(", "sample", ",", "bam_file", ",", "bed_file", "=", "peaks_file", ")", "return"...
Calculate number of reads in peaks
[ "Calculate", "number", "of", "reads", "in", "peaks" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/chipseq.py#L27-L32
238,003
bcbio/bcbio-nextgen
bcbio/qc/chipseq.py
chipqc
def chipqc(bam_file, sample, out_dir): """Attempt code to run ChIPQC bioconductor packate in one sample""" sample_name = dd.get_sample_name(sample) logger.warning("ChIPQC is unstable right now, if it breaks, turn off the tool.") if utils.file_exists(out_dir): return _get_output(out_dir) with tx_tmpdir() as tmp_dir: rcode = _sample_template(sample, tmp_dir) if rcode: # local_sitelib = utils.R_sitelib() rscript = utils.Rscript_cmd() do.run([rscript, "--no-environ", rcode], "ChIPQC in %s" % sample_name, log_error=False) shutil.move(tmp_dir, out_dir) return _get_output(out_dir)
python
def chipqc(bam_file, sample, out_dir): sample_name = dd.get_sample_name(sample) logger.warning("ChIPQC is unstable right now, if it breaks, turn off the tool.") if utils.file_exists(out_dir): return _get_output(out_dir) with tx_tmpdir() as tmp_dir: rcode = _sample_template(sample, tmp_dir) if rcode: # local_sitelib = utils.R_sitelib() rscript = utils.Rscript_cmd() do.run([rscript, "--no-environ", rcode], "ChIPQC in %s" % sample_name, log_error=False) shutil.move(tmp_dir, out_dir) return _get_output(out_dir)
[ "def", "chipqc", "(", "bam_file", ",", "sample", ",", "out_dir", ")", ":", "sample_name", "=", "dd", ".", "get_sample_name", "(", "sample", ")", "logger", ".", "warning", "(", "\"ChIPQC is unstable right now, if it breaks, turn off the tool.\"", ")", "if", "utils", ...
Attempt code to run ChIPQC bioconductor packate in one sample
[ "Attempt", "code", "to", "run", "ChIPQC", "bioconductor", "packate", "in", "one", "sample" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/chipseq.py#L34-L47
238,004
bcbio/bcbio-nextgen
bcbio/qc/chipseq.py
_sample_template
def _sample_template(sample, out_dir): """R code to get QC for one sample""" bam_fn = dd.get_work_bam(sample) genome = dd.get_genome_build(sample) if genome in supported: peaks = sample.get("peaks_files", []).get("main") if peaks: r_code = ("library(ChIPQC);\n" "sample = ChIPQCsample(\"{bam_fn}\"," "\"{peaks}\", " "annotation = \"{genome}\"," ");\n" "ChIPQCreport(sample);\n") r_code_fn = os.path.join(out_dir, "chipqc.r") with open(r_code_fn, 'w') as inh: inh.write(r_code.format(**locals())) return r_code_fn
python
def _sample_template(sample, out_dir): bam_fn = dd.get_work_bam(sample) genome = dd.get_genome_build(sample) if genome in supported: peaks = sample.get("peaks_files", []).get("main") if peaks: r_code = ("library(ChIPQC);\n" "sample = ChIPQCsample(\"{bam_fn}\"," "\"{peaks}\", " "annotation = \"{genome}\"," ");\n" "ChIPQCreport(sample);\n") r_code_fn = os.path.join(out_dir, "chipqc.r") with open(r_code_fn, 'w') as inh: inh.write(r_code.format(**locals())) return r_code_fn
[ "def", "_sample_template", "(", "sample", ",", "out_dir", ")", ":", "bam_fn", "=", "dd", ".", "get_work_bam", "(", "sample", ")", "genome", "=", "dd", ".", "get_genome_build", "(", "sample", ")", "if", "genome", "in", "supported", ":", "peaks", "=", "sam...
R code to get QC for one sample
[ "R", "code", "to", "get", "QC", "for", "one", "sample" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/chipseq.py#L56-L72
238,005
bcbio/bcbio-nextgen
bcbio/rnaseq/featureCounts.py
_change_sample_name
def _change_sample_name(in_file, sample_name, data=None): """Fix name in feature counts log file to get the same name in multiqc report. """ out_file = append_stem(in_file, "_fixed") with file_transaction(data, out_file) as tx_out: with open(tx_out, "w") as out_handle: with open(in_file) as in_handle: for line in in_handle: if line.startswith("Status"): line = "Status\t%s.bam" % sample_name out_handle.write("%s\n" % line.strip()) return out_file
python
def _change_sample_name(in_file, sample_name, data=None): out_file = append_stem(in_file, "_fixed") with file_transaction(data, out_file) as tx_out: with open(tx_out, "w") as out_handle: with open(in_file) as in_handle: for line in in_handle: if line.startswith("Status"): line = "Status\t%s.bam" % sample_name out_handle.write("%s\n" % line.strip()) return out_file
[ "def", "_change_sample_name", "(", "in_file", ",", "sample_name", ",", "data", "=", "None", ")", ":", "out_file", "=", "append_stem", "(", "in_file", ",", "\"_fixed\"", ")", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out", ":", ...
Fix name in feature counts log file to get the same name in multiqc report.
[ "Fix", "name", "in", "feature", "counts", "log", "file", "to", "get", "the", "same", "name", "in", "multiqc", "report", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/featureCounts.py#L57-L69
238,006
bcbio/bcbio-nextgen
bcbio/rnaseq/featureCounts.py
_format_count_file
def _format_count_file(count_file, data): """ this cuts the count file produced from featureCounts down to a two column file of gene ids and number of reads mapping to each gene """ COUNT_COLUMN = 5 out_file = os.path.splitext(count_file)[0] + ".fixed.counts" if file_exists(out_file): return out_file df = pd.io.parsers.read_table(count_file, sep="\t", index_col=0, header=1) df_sub = df.ix[:, COUNT_COLUMN] with file_transaction(data, out_file) as tx_out_file: df_sub.to_csv(tx_out_file, sep="\t", index_label="id", header=False) return out_file
python
def _format_count_file(count_file, data): COUNT_COLUMN = 5 out_file = os.path.splitext(count_file)[0] + ".fixed.counts" if file_exists(out_file): return out_file df = pd.io.parsers.read_table(count_file, sep="\t", index_col=0, header=1) df_sub = df.ix[:, COUNT_COLUMN] with file_transaction(data, out_file) as tx_out_file: df_sub.to_csv(tx_out_file, sep="\t", index_label="id", header=False) return out_file
[ "def", "_format_count_file", "(", "count_file", ",", "data", ")", ":", "COUNT_COLUMN", "=", "5", "out_file", "=", "os", ".", "path", ".", "splitext", "(", "count_file", ")", "[", "0", "]", "+", "\".fixed.counts\"", "if", "file_exists", "(", "out_file", ")"...
this cuts the count file produced from featureCounts down to a two column file of gene ids and number of reads mapping to each gene
[ "this", "cuts", "the", "count", "file", "produced", "from", "featureCounts", "down", "to", "a", "two", "column", "file", "of", "gene", "ids", "and", "number", "of", "reads", "mapping", "to", "each", "gene" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/featureCounts.py#L71-L86
238,007
bcbio/bcbio-nextgen
bcbio/variation/peddy.py
run_qc
def run_qc(_, data, out_dir): """Run quality control in QC environment on a single sample. Enables peddy integration with CWL runs. """ if cwlutils.is_cwl_run(data): qc_data = run_peddy([data], out_dir) if tz.get_in(["summary", "qc", "peddy"], qc_data): return tz.get_in(["summary", "qc", "peddy"], qc_data)
python
def run_qc(_, data, out_dir): if cwlutils.is_cwl_run(data): qc_data = run_peddy([data], out_dir) if tz.get_in(["summary", "qc", "peddy"], qc_data): return tz.get_in(["summary", "qc", "peddy"], qc_data)
[ "def", "run_qc", "(", "_", ",", "data", ",", "out_dir", ")", ":", "if", "cwlutils", ".", "is_cwl_run", "(", "data", ")", ":", "qc_data", "=", "run_peddy", "(", "[", "data", "]", ",", "out_dir", ")", "if", "tz", ".", "get_in", "(", "[", "\"summary\"...
Run quality control in QC environment on a single sample. Enables peddy integration with CWL runs.
[ "Run", "quality", "control", "in", "QC", "environment", "on", "a", "single", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/peddy.py#L35-L43
238,008
bcbio/bcbio-nextgen
bcbio/chipseq/macs2.py
run
def run(name, chip_bam, input_bam, genome_build, out_dir, method, resources, data): """ Run macs2 for chip and input samples avoiding errors due to samples. """ # output file name need to have the caller name config = dd.get_config(data) out_file = os.path.join(out_dir, name + "_peaks_macs2.xls") macs2_file = os.path.join(out_dir, name + "_peaks.xls") if utils.file_exists(out_file): _compres_bdg_files(out_dir) return _get_output_files(out_dir) macs2 = config_utils.get_program("macs2", config) options = " ".join(resources.get("macs2", {}).get("options", "")) genome_size = bam.fasta.total_sequence_length(dd.get_ref_file(data)) genome_size = "" if options.find("-g") > -1 else "-g %s" % genome_size paired = "-f BAMPE" if bam.is_paired(chip_bam) else "" with utils.chdir(out_dir): cmd = _macs2_cmd(method) try: do.run(cmd.format(**locals()), "macs2 for %s" % name) utils.move_safe(macs2_file, out_file) except subprocess.CalledProcessError: raise RuntimeWarning("macs2 terminated with an error.\n" "Please, check the message and report " "error if it is related to bcbio.\n" "You can add specific options for the sample " "setting resources as explained in docs: " "https://bcbio-nextgen.readthedocs.org/en/latest/contents/configuration.html#sample-specific-resources") _compres_bdg_files(out_dir) return _get_output_files(out_dir)
python
def run(name, chip_bam, input_bam, genome_build, out_dir, method, resources, data): # output file name need to have the caller name config = dd.get_config(data) out_file = os.path.join(out_dir, name + "_peaks_macs2.xls") macs2_file = os.path.join(out_dir, name + "_peaks.xls") if utils.file_exists(out_file): _compres_bdg_files(out_dir) return _get_output_files(out_dir) macs2 = config_utils.get_program("macs2", config) options = " ".join(resources.get("macs2", {}).get("options", "")) genome_size = bam.fasta.total_sequence_length(dd.get_ref_file(data)) genome_size = "" if options.find("-g") > -1 else "-g %s" % genome_size paired = "-f BAMPE" if bam.is_paired(chip_bam) else "" with utils.chdir(out_dir): cmd = _macs2_cmd(method) try: do.run(cmd.format(**locals()), "macs2 for %s" % name) utils.move_safe(macs2_file, out_file) except subprocess.CalledProcessError: raise RuntimeWarning("macs2 terminated with an error.\n" "Please, check the message and report " "error if it is related to bcbio.\n" "You can add specific options for the sample " "setting resources as explained in docs: " "https://bcbio-nextgen.readthedocs.org/en/latest/contents/configuration.html#sample-specific-resources") _compres_bdg_files(out_dir) return _get_output_files(out_dir)
[ "def", "run", "(", "name", ",", "chip_bam", ",", "input_bam", ",", "genome_build", ",", "out_dir", ",", "method", ",", "resources", ",", "data", ")", ":", "# output file name need to have the caller name", "config", "=", "dd", ".", "get_config", "(", "data", "...
Run macs2 for chip and input samples avoiding errors due to samples.
[ "Run", "macs2", "for", "chip", "and", "input", "samples", "avoiding", "errors", "due", "to", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/macs2.py#L11-L41
238,009
bcbio/bcbio-nextgen
bcbio/chipseq/macs2.py
_macs2_cmd
def _macs2_cmd(method="chip"): """Main command for macs2 tool.""" if method.lower() == "chip": cmd = ("{macs2} callpeak -t {chip_bam} -c {input_bam} {paired} " " {genome_size} -n {name} -B {options}") elif method.lower() == "atac": cmd = ("{macs2} callpeak -t {chip_bam} --nomodel " " {paired} {genome_size} -n {name} -B {options}" " --nolambda --keep-dup all") else: raise ValueError("chip_method should be chip or atac.") return cmd
python
def _macs2_cmd(method="chip"): if method.lower() == "chip": cmd = ("{macs2} callpeak -t {chip_bam} -c {input_bam} {paired} " " {genome_size} -n {name} -B {options}") elif method.lower() == "atac": cmd = ("{macs2} callpeak -t {chip_bam} --nomodel " " {paired} {genome_size} -n {name} -B {options}" " --nolambda --keep-dup all") else: raise ValueError("chip_method should be chip or atac.") return cmd
[ "def", "_macs2_cmd", "(", "method", "=", "\"chip\"", ")", ":", "if", "method", ".", "lower", "(", ")", "==", "\"chip\"", ":", "cmd", "=", "(", "\"{macs2} callpeak -t {chip_bam} -c {input_bam} {paired} \"", "\" {genome_size} -n {name} -B {options}\"", ")", "elif", "met...
Main command for macs2 tool.
[ "Main", "command", "for", "macs2", "tool", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/chipseq/macs2.py#L60-L71
238,010
bcbio/bcbio-nextgen
bcbio/pipeline/archive.py
to_cram
def to_cram(data): """Convert BAM archive files into indexed CRAM. """ data = utils.to_single_data(data) cram_file = cram.compress(dd.get_work_bam(data) or dd.get_align_bam(data), data) out_key = "archive_bam" if cwlutils.is_cwl_run(data) else "work_bam" data[out_key] = cram_file return [[data]]
python
def to_cram(data): data = utils.to_single_data(data) cram_file = cram.compress(dd.get_work_bam(data) or dd.get_align_bam(data), data) out_key = "archive_bam" if cwlutils.is_cwl_run(data) else "work_bam" data[out_key] = cram_file return [[data]]
[ "def", "to_cram", "(", "data", ")", ":", "data", "=", "utils", ".", "to_single_data", "(", "data", ")", "cram_file", "=", "cram", ".", "compress", "(", "dd", ".", "get_work_bam", "(", "data", ")", "or", "dd", ".", "get_align_bam", "(", "data", ")", "...
Convert BAM archive files into indexed CRAM.
[ "Convert", "BAM", "archive", "files", "into", "indexed", "CRAM", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/archive.py#L11-L18
238,011
bcbio/bcbio-nextgen
bcbio/pipeline/archive.py
compress
def compress(samples, run_parallel): """Perform compression of output files for long term storage. """ to_cram = [] finished = [] for data in [x[0] for x in samples]: if "cram" in dd.get_archive(data) or "cram-lossless" in dd.get_archive(data): to_cram.append([data]) else: finished.append([data]) crammed = run_parallel("archive_to_cram", to_cram) return finished + crammed
python
def compress(samples, run_parallel): to_cram = [] finished = [] for data in [x[0] for x in samples]: if "cram" in dd.get_archive(data) or "cram-lossless" in dd.get_archive(data): to_cram.append([data]) else: finished.append([data]) crammed = run_parallel("archive_to_cram", to_cram) return finished + crammed
[ "def", "compress", "(", "samples", ",", "run_parallel", ")", ":", "to_cram", "=", "[", "]", "finished", "=", "[", "]", "for", "data", "in", "[", "x", "[", "0", "]", "for", "x", "in", "samples", "]", ":", "if", "\"cram\"", "in", "dd", ".", "get_ar...
Perform compression of output files for long term storage.
[ "Perform", "compression", "of", "output", "files", "for", "long", "term", "storage", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/archive.py#L20-L31
238,012
bcbio/bcbio-nextgen
bcbio/qc/samtools.py
run
def run(_, data, out_dir=None): """Run samtools stats with reports on mapped reads, duplicates and insert sizes. """ stats_file, idxstats_file = _get_stats_files(data, out_dir) samtools = config_utils.get_program("samtools", data["config"]) bam_file = dd.get_align_bam(data) or dd.get_work_bam(data) if not utils.file_exists(stats_file): utils.safe_makedir(out_dir) with file_transaction(data, stats_file) as tx_out_file: cores = dd.get_num_cores(data) cmd = "{samtools} stats -@ {cores} {bam_file}" cmd += " > {tx_out_file}" do.run(cmd.format(**locals()), "samtools stats", data) if not utils.file_exists(idxstats_file): utils.safe_makedir(out_dir) with file_transaction(data, idxstats_file) as tx_out_file: cmd = "{samtools} idxstats {bam_file}" cmd += " > {tx_out_file}" do.run(cmd.format(**locals()), "samtools index stats", data) out = {"base": idxstats_file, "secondary": [stats_file]} out["metrics"] = _parse_samtools_stats(stats_file) return out
python
def run(_, data, out_dir=None): stats_file, idxstats_file = _get_stats_files(data, out_dir) samtools = config_utils.get_program("samtools", data["config"]) bam_file = dd.get_align_bam(data) or dd.get_work_bam(data) if not utils.file_exists(stats_file): utils.safe_makedir(out_dir) with file_transaction(data, stats_file) as tx_out_file: cores = dd.get_num_cores(data) cmd = "{samtools} stats -@ {cores} {bam_file}" cmd += " > {tx_out_file}" do.run(cmd.format(**locals()), "samtools stats", data) if not utils.file_exists(idxstats_file): utils.safe_makedir(out_dir) with file_transaction(data, idxstats_file) as tx_out_file: cmd = "{samtools} idxstats {bam_file}" cmd += " > {tx_out_file}" do.run(cmd.format(**locals()), "samtools index stats", data) out = {"base": idxstats_file, "secondary": [stats_file]} out["metrics"] = _parse_samtools_stats(stats_file) return out
[ "def", "run", "(", "_", ",", "data", ",", "out_dir", "=", "None", ")", ":", "stats_file", ",", "idxstats_file", "=", "_get_stats_files", "(", "data", ",", "out_dir", ")", "samtools", "=", "config_utils", ".", "get_program", "(", "\"samtools\"", ",", "data"...
Run samtools stats with reports on mapped reads, duplicates and insert sizes.
[ "Run", "samtools", "stats", "with", "reports", "on", "mapped", "reads", "duplicates", "and", "insert", "sizes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/samtools.py#L13-L34
238,013
bcbio/bcbio-nextgen
bcbio/qc/samtools.py
run_and_save
def run_and_save(data): """Run QC, saving file outputs in data dictionary. """ run(None, data) stats_file, idxstats_file = _get_stats_files(data) data = tz.update_in(data, ["depth", "samtools", "stats"], lambda x: stats_file) data = tz.update_in(data, ["depth", "samtools", "idxstats"], lambda x: idxstats_file) return data
python
def run_and_save(data): run(None, data) stats_file, idxstats_file = _get_stats_files(data) data = tz.update_in(data, ["depth", "samtools", "stats"], lambda x: stats_file) data = tz.update_in(data, ["depth", "samtools", "idxstats"], lambda x: idxstats_file) return data
[ "def", "run_and_save", "(", "data", ")", ":", "run", "(", "None", ",", "data", ")", "stats_file", ",", "idxstats_file", "=", "_get_stats_files", "(", "data", ")", "data", "=", "tz", ".", "update_in", "(", "data", ",", "[", "\"depth\"", ",", "\"samtools\"...
Run QC, saving file outputs in data dictionary.
[ "Run", "QC", "saving", "file", "outputs", "in", "data", "dictionary", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/samtools.py#L36-L43
238,014
bcbio/bcbio-nextgen
bcbio/qc/samtools.py
_get_stats_files
def _get_stats_files(data, out_dir=None): """Retrieve stats files from pre-existing dictionary or filesystem. """ if not out_dir: out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "samtools")) stats_file = tz.get_in(["depth", "samtools", "stats"], data) idxstats_file = tz.get_in(["depth", "samtools", "idxstats"], data) if not stats_file: stats_file = os.path.join(out_dir, "%s.txt" % dd.get_sample_name(data)) if not idxstats_file: idxstats_file = os.path.join(out_dir, "%s-idxstats.txt" % dd.get_sample_name(data)) return stats_file, idxstats_file
python
def _get_stats_files(data, out_dir=None): if not out_dir: out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "samtools")) stats_file = tz.get_in(["depth", "samtools", "stats"], data) idxstats_file = tz.get_in(["depth", "samtools", "idxstats"], data) if not stats_file: stats_file = os.path.join(out_dir, "%s.txt" % dd.get_sample_name(data)) if not idxstats_file: idxstats_file = os.path.join(out_dir, "%s-idxstats.txt" % dd.get_sample_name(data)) return stats_file, idxstats_file
[ "def", "_get_stats_files", "(", "data", ",", "out_dir", "=", "None", ")", ":", "if", "not", "out_dir", ":", "out_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", ")", ",", "\"q...
Retrieve stats files from pre-existing dictionary or filesystem.
[ "Retrieve", "stats", "files", "from", "pre", "-", "existing", "dictionary", "or", "filesystem", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/samtools.py#L45-L57
238,015
bcbio/bcbio-nextgen
bcbio/provenance/do.py
_descr_str
def _descr_str(descr, data, region): """Add additional useful information from data to description string. """ if data: name = dd.get_sample_name(data) if name: descr = "{0} : {1}".format(descr, name) elif "work_bam" in data: descr = "{0} : {1}".format(descr, os.path.basename(data["work_bam"])) if region: descr = "{0} : {1}".format(descr, region) return descr
python
def _descr_str(descr, data, region): if data: name = dd.get_sample_name(data) if name: descr = "{0} : {1}".format(descr, name) elif "work_bam" in data: descr = "{0} : {1}".format(descr, os.path.basename(data["work_bam"])) if region: descr = "{0} : {1}".format(descr, region) return descr
[ "def", "_descr_str", "(", "descr", ",", "data", ",", "region", ")", ":", "if", "data", ":", "name", "=", "dd", ".", "get_sample_name", "(", "data", ")", "if", "name", ":", "descr", "=", "\"{0} : {1}\"", ".", "format", "(", "descr", ",", "name", ")", ...
Add additional useful information from data to description string.
[ "Add", "additional", "useful", "information", "from", "data", "to", "description", "string", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/do.py#L35-L46
238,016
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
get_indelcaller
def get_indelcaller(d_or_c): """Retrieve string for indelcaller to use, or empty string if not specified. """ config = d_or_c if isinstance(d_or_c, dict) and "config" in d_or_c else d_or_c indelcaller = config["algorithm"].get("indelcaller", "") if not indelcaller: indelcaller = "" if isinstance(indelcaller, (list, tuple)): indelcaller = indelcaller[0] if (len(indelcaller) > 0) else "" return indelcaller
python
def get_indelcaller(d_or_c): config = d_or_c if isinstance(d_or_c, dict) and "config" in d_or_c else d_or_c indelcaller = config["algorithm"].get("indelcaller", "") if not indelcaller: indelcaller = "" if isinstance(indelcaller, (list, tuple)): indelcaller = indelcaller[0] if (len(indelcaller) > 0) else "" return indelcaller
[ "def", "get_indelcaller", "(", "d_or_c", ")", ":", "config", "=", "d_or_c", "if", "isinstance", "(", "d_or_c", ",", "dict", ")", "and", "\"config\"", "in", "d_or_c", "else", "d_or_c", "indelcaller", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "("...
Retrieve string for indelcaller to use, or empty string if not specified.
[ "Retrieve", "string", "for", "indelcaller", "to", "use", "or", "empty", "string", "if", "not", "specified", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L140-L149
238,017
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
split_snps_indels
def split_snps_indels(orig_file, ref_file, config): """Split a variant call file into SNPs and INDELs for processing. """ base, ext = utils.splitext_plus(orig_file) snp_file = "{base}-snp{ext}".format(base=base, ext=ext) indel_file = "{base}-indel{ext}".format(base=base, ext=ext) for out_file, select_arg in [(snp_file, "--types snps"), (indel_file, "--exclude-types snps")]: if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" cmd = "{bcftools} view -O {output_type} {orig_file} {select_arg} > {tx_out_file}" do.run(cmd.format(**locals()), "Subset to SNPs and indels") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return snp_file, indel_file
python
def split_snps_indels(orig_file, ref_file, config): base, ext = utils.splitext_plus(orig_file) snp_file = "{base}-snp{ext}".format(base=base, ext=ext) indel_file = "{base}-indel{ext}".format(base=base, ext=ext) for out_file, select_arg in [(snp_file, "--types snps"), (indel_file, "--exclude-types snps")]: if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" cmd = "{bcftools} view -O {output_type} {orig_file} {select_arg} > {tx_out_file}" do.run(cmd.format(**locals()), "Subset to SNPs and indels") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return snp_file, indel_file
[ "def", "split_snps_indels", "(", "orig_file", ",", "ref_file", ",", "config", ")", ":", "base", ",", "ext", "=", "utils", ".", "splitext_plus", "(", "orig_file", ")", "snp_file", "=", "\"{base}-snp{ext}\"", ".", "format", "(", "base", "=", "base", ",", "ex...
Split a variant call file into SNPs and INDELs for processing.
[ "Split", "a", "variant", "call", "file", "into", "SNPs", "and", "INDELs", "for", "processing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L183-L199
238,018
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
get_samples
def get_samples(in_file): """Retrieve samples present in a VCF file """ with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if line.startswith("#CHROM"): parts = line.strip().split("\t") return parts[9:] raise ValueError("Did not find sample header in VCF file %s" % in_file)
python
def get_samples(in_file): with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if line.startswith("#CHROM"): parts = line.strip().split("\t") return parts[9:] raise ValueError("Did not find sample header in VCF file %s" % in_file)
[ "def", "get_samples", "(", "in_file", ")", ":", "with", "utils", ".", "open_gzipsafe", "(", "in_file", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswith", "(", "\"#CHROM\"", ")", ":", "parts", "=", "line",...
Retrieve samples present in a VCF file
[ "Retrieve", "samples", "present", "in", "a", "VCF", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L210-L218
238,019
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_get_exclude_samples
def _get_exclude_samples(in_file, to_exclude): """Identify samples in the exclusion list which are actually in the VCF. """ include, exclude = [], [] to_exclude = set(to_exclude) for s in get_samples(in_file): if s in to_exclude: exclude.append(s) else: include.append(s) return include, exclude
python
def _get_exclude_samples(in_file, to_exclude): include, exclude = [], [] to_exclude = set(to_exclude) for s in get_samples(in_file): if s in to_exclude: exclude.append(s) else: include.append(s) return include, exclude
[ "def", "_get_exclude_samples", "(", "in_file", ",", "to_exclude", ")", ":", "include", ",", "exclude", "=", "[", "]", ",", "[", "]", "to_exclude", "=", "set", "(", "to_exclude", ")", "for", "s", "in", "get_samples", "(", "in_file", ")", ":", "if", "s",...
Identify samples in the exclusion list which are actually in the VCF.
[ "Identify", "samples", "in", "the", "exclusion", "list", "which", "are", "actually", "in", "the", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L220-L230
238,020
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
exclude_samples
def exclude_samples(in_file, out_file, to_exclude, ref_file, config, filters=None): """Exclude specific samples from an input VCF file. """ include, exclude = _get_exclude_samples(in_file, to_exclude) # can use the input sample, all exclusions already gone if len(exclude) == 0: out_file = in_file elif not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" include_str = ",".join(include) filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.' cmd = "{bcftools} view -O {output_type} -s {include_str} {filter_str} {in_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Exclude samples: {}".format(to_exclude)) return out_file
python
def exclude_samples(in_file, out_file, to_exclude, ref_file, config, filters=None): include, exclude = _get_exclude_samples(in_file, to_exclude) # can use the input sample, all exclusions already gone if len(exclude) == 0: out_file = in_file elif not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" include_str = ",".join(include) filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.' cmd = "{bcftools} view -O {output_type} -s {include_str} {filter_str} {in_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Exclude samples: {}".format(to_exclude)) return out_file
[ "def", "exclude_samples", "(", "in_file", ",", "out_file", ",", "to_exclude", ",", "ref_file", ",", "config", ",", "filters", "=", "None", ")", ":", "include", ",", "exclude", "=", "_get_exclude_samples", "(", "in_file", ",", "to_exclude", ")", "# can use the ...
Exclude specific samples from an input VCF file.
[ "Exclude", "specific", "samples", "from", "an", "input", "VCF", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L232-L247
238,021
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
select_sample
def select_sample(in_file, sample, out_file, config, filters=None): """Select a single sample from the supplied multisample VCF file. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: if len(get_samples(in_file)) == 1: shutil.copy(in_file, tx_out_file) else: if in_file.endswith(".gz"): bgzip_and_index(in_file, config) bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.' cmd = "{bcftools} view -O {output_type} {filter_str} {in_file} -s {sample} > {tx_out_file}" do.run(cmd.format(**locals()), "Select sample: %s" % sample) if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
python
def select_sample(in_file, sample, out_file, config, filters=None): if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: if len(get_samples(in_file)) == 1: shutil.copy(in_file, tx_out_file) else: if in_file.endswith(".gz"): bgzip_and_index(in_file, config) bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.' cmd = "{bcftools} view -O {output_type} {filter_str} {in_file} -s {sample} > {tx_out_file}" do.run(cmd.format(**locals()), "Select sample: %s" % sample) if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
[ "def", "select_sample", "(", "in_file", ",", "sample", ",", "out_file", ",", "config", ",", "filters", "=", "None", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ...
Select a single sample from the supplied multisample VCF file.
[ "Select", "a", "single", "sample", "from", "the", "supplied", "multisample", "VCF", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L249-L266
238,022
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
merge_variant_files
def merge_variant_files(orig_files, out_file, ref_file, config, region=None): """Combine multiple VCF files with different samples into a single output file. Uses bcftools merge on bgzipped input files, handling both tricky merge and concatenation of files. Does not correctly handle files with the same sample (use combine_variant_files instead). """ in_pipeline = False if isinstance(orig_files, dict): file_key = config["file_key"] in_pipeline = True orig_files = orig_files[file_key] out_file = _do_merge(orig_files, out_file, config, region) if in_pipeline: return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}] else: return out_file
python
def merge_variant_files(orig_files, out_file, ref_file, config, region=None): in_pipeline = False if isinstance(orig_files, dict): file_key = config["file_key"] in_pipeline = True orig_files = orig_files[file_key] out_file = _do_merge(orig_files, out_file, config, region) if in_pipeline: return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}] else: return out_file
[ "def", "merge_variant_files", "(", "orig_files", ",", "out_file", ",", "ref_file", ",", "config", ",", "region", "=", "None", ")", ":", "in_pipeline", "=", "False", "if", "isinstance", "(", "orig_files", ",", "dict", ")", ":", "file_key", "=", "config", "[...
Combine multiple VCF files with different samples into a single output file. Uses bcftools merge on bgzipped input files, handling both tricky merge and concatenation of files. Does not correctly handle files with the same sample (use combine_variant_files instead).
[ "Combine", "multiple", "VCF", "files", "with", "different", "samples", "into", "a", "single", "output", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L288-L304
238,023
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_do_merge
def _do_merge(orig_files, out_file, config, region): """Do the actual work of merging with bcftools merge. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: _check_samples_nodups(orig_files) prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config) input_vcf_file = "%s-files.txt" % utils.splitext_plus(out_file)[0] with open(input_vcf_file, "w") as out_handle: for fname in prep_files: out_handle.write(fname + "\n") bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" region_str = "-r {}".format(region) if region else "" cmd = "{bcftools} merge -O {output_type} {region_str} `cat {input_vcf_file}` > {tx_out_file}" do.run(cmd.format(**locals()), "Merge variants") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
python
def _do_merge(orig_files, out_file, config, region): if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: _check_samples_nodups(orig_files) prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config) input_vcf_file = "%s-files.txt" % utils.splitext_plus(out_file)[0] with open(input_vcf_file, "w") as out_handle: for fname in prep_files: out_handle.write(fname + "\n") bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" region_str = "-r {}".format(region) if region else "" cmd = "{bcftools} merge -O {output_type} {region_str} `cat {input_vcf_file}` > {tx_out_file}" do.run(cmd.format(**locals()), "Merge variants") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
[ "def", "_do_merge", "(", "orig_files", ",", "out_file", ",", "config", ",", "region", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ")", "as", "tx_out_file", ":", ...
Do the actual work of merging with bcftools merge.
[ "Do", "the", "actual", "work", "of", "merging", "with", "bcftools", "merge", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L306-L324
238,024
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_check_samples_nodups
def _check_samples_nodups(fnames): """Ensure a set of input VCFs do not have duplicate samples. """ counts = defaultdict(int) for f in fnames: for s in get_samples(f): counts[s] += 1 duplicates = [s for s, c in counts.items() if c > 1] if duplicates: raise ValueError("Duplicate samples found in inputs %s: %s" % (duplicates, fnames))
python
def _check_samples_nodups(fnames): counts = defaultdict(int) for f in fnames: for s in get_samples(f): counts[s] += 1 duplicates = [s for s, c in counts.items() if c > 1] if duplicates: raise ValueError("Duplicate samples found in inputs %s: %s" % (duplicates, fnames))
[ "def", "_check_samples_nodups", "(", "fnames", ")", ":", "counts", "=", "defaultdict", "(", "int", ")", "for", "f", "in", "fnames", ":", "for", "s", "in", "get_samples", "(", "f", ")", ":", "counts", "[", "s", "]", "+=", "1", "duplicates", "=", "[", ...
Ensure a set of input VCFs do not have duplicate samples.
[ "Ensure", "a", "set", "of", "input", "VCFs", "do", "not", "have", "duplicate", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L326-L335
238,025
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_sort_by_region
def _sort_by_region(fnames, regions, ref_file, config): """Sort a set of regionally split files by region for ordered output. """ contig_order = {} for i, sq in enumerate(ref.file_contigs(ref_file, config)): contig_order[sq.name] = i sitems = [] assert len(regions) == len(fnames), (regions, fnames) added_fnames = set([]) for region, fname in zip(regions, fnames): if fname not in added_fnames: if isinstance(region, (list, tuple)): c, s, e = region elif isinstance(region, six.string_types) and region.find(":") >= 0: c, coords = region.split(":") s, e = [int(x) for x in coords.split("-")] else: c = region s, e = 0, 0 sitems.append(((contig_order[c], s, e), c, fname)) added_fnames.add(fname) sitems.sort() return [(x[1], x[2]) for x in sitems]
python
def _sort_by_region(fnames, regions, ref_file, config): contig_order = {} for i, sq in enumerate(ref.file_contigs(ref_file, config)): contig_order[sq.name] = i sitems = [] assert len(regions) == len(fnames), (regions, fnames) added_fnames = set([]) for region, fname in zip(regions, fnames): if fname not in added_fnames: if isinstance(region, (list, tuple)): c, s, e = region elif isinstance(region, six.string_types) and region.find(":") >= 0: c, coords = region.split(":") s, e = [int(x) for x in coords.split("-")] else: c = region s, e = 0, 0 sitems.append(((contig_order[c], s, e), c, fname)) added_fnames.add(fname) sitems.sort() return [(x[1], x[2]) for x in sitems]
[ "def", "_sort_by_region", "(", "fnames", ",", "regions", ",", "ref_file", ",", "config", ")", ":", "contig_order", "=", "{", "}", "for", "i", ",", "sq", "in", "enumerate", "(", "ref", ".", "file_contigs", "(", "ref_file", ",", "config", ")", ")", ":", ...
Sort a set of regionally split files by region for ordered output.
[ "Sort", "a", "set", "of", "regionally", "split", "files", "by", "region", "for", "ordered", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L337-L359
238,026
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
concat_variant_files
def concat_variant_files(orig_files, out_file, regions, ref_file, config): """Concatenate multiple variant files from regions into a single output file. Uses GATK4's GatherVcfs, falling back to bcftools concat --naive if it fails. These both only combine samples and avoid parsing, allowing scaling to large file sizes. """ if not utils.file_exists(out_file): input_file_list = _get_file_list(orig_files, out_file, regions, ref_file, config) try: out_file = _run_concat_variant_files_gatk4(input_file_list, out_file, config) except subprocess.CalledProcessError as msg: if ("We require all VCFs to have complete VCF headers" in str(msg) or "Features added out of order" in str(msg) or "The reference allele cannot be missing" in str(msg)): out_file = _run_concat_variant_files_bcftools(input_file_list, out_file, config, naive=True) else: raise if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
python
def concat_variant_files(orig_files, out_file, regions, ref_file, config): if not utils.file_exists(out_file): input_file_list = _get_file_list(orig_files, out_file, regions, ref_file, config) try: out_file = _run_concat_variant_files_gatk4(input_file_list, out_file, config) except subprocess.CalledProcessError as msg: if ("We require all VCFs to have complete VCF headers" in str(msg) or "Features added out of order" in str(msg) or "The reference allele cannot be missing" in str(msg)): out_file = _run_concat_variant_files_bcftools(input_file_list, out_file, config, naive=True) else: raise if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
[ "def", "concat_variant_files", "(", "orig_files", ",", "out_file", ",", "regions", ",", "ref_file", ",", "config", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "input_file_list", "=", "_get_file_list", "(", "orig_files", ",", ...
Concatenate multiple variant files from regions into a single output file. Uses GATK4's GatherVcfs, falling back to bcftools concat --naive if it fails. These both only combine samples and avoid parsing, allowing scaling to large file sizes.
[ "Concatenate", "multiple", "variant", "files", "from", "regions", "into", "a", "single", "output", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L361-L381
238,027
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_run_concat_variant_files_gatk4
def _run_concat_variant_files_gatk4(input_file_list, out_file, config): """Use GATK4 GatherVcfs for concatenation of scattered VCFs. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: params = ["-T", "GatherVcfs", "-I", input_file_list, "-O", tx_out_file] # Use GATK4 for merging, tools_off: [gatk4] applies to variant calling config = utils.deepish_copy(config) if "gatk4" in dd.get_tools_off({"config": config}): config["algorithm"]["tools_off"].remove("gatk4") # Allow specification of verbosity in the unique style this tool uses resources = config_utils.get_resources("gatk", config) opts = [str(x) for x in resources.get("options", [])] if "--verbosity" in opts: params += ["--VERBOSITY:%s" % opts[opts.index("--verbosity") + 1]] broad_runner = broad.runner_from_config(config) broad_runner.run_gatk(params) return out_file
python
def _run_concat_variant_files_gatk4(input_file_list, out_file, config): if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: params = ["-T", "GatherVcfs", "-I", input_file_list, "-O", tx_out_file] # Use GATK4 for merging, tools_off: [gatk4] applies to variant calling config = utils.deepish_copy(config) if "gatk4" in dd.get_tools_off({"config": config}): config["algorithm"]["tools_off"].remove("gatk4") # Allow specification of verbosity in the unique style this tool uses resources = config_utils.get_resources("gatk", config) opts = [str(x) for x in resources.get("options", [])] if "--verbosity" in opts: params += ["--VERBOSITY:%s" % opts[opts.index("--verbosity") + 1]] broad_runner = broad.runner_from_config(config) broad_runner.run_gatk(params) return out_file
[ "def", "_run_concat_variant_files_gatk4", "(", "input_file_list", ",", "out_file", ",", "config", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ")", "as", "tx_out_file",...
Use GATK4 GatherVcfs for concatenation of scattered VCFs.
[ "Use", "GATK4", "GatherVcfs", "for", "concatenation", "of", "scattered", "VCFs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L383-L400
238,028
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_get_file_list
def _get_file_list(orig_files, out_file, regions, ref_file, config): """Create file with region sorted list of non-empty VCFs for concatenating. """ sorted_files = _sort_by_region(orig_files, regions, ref_file, config) exist_files = [(c, x) for c, x in sorted_files if os.path.exists(x) and vcf_has_variants(x)] if len(exist_files) == 0: # no non-empty inputs, merge the empty ones exist_files = [x for c, x in sorted_files if os.path.exists(x)] elif len(exist_files) > 1: exist_files = _fix_gatk_header(exist_files, out_file, config) else: exist_files = [x for c, x in exist_files] ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config) input_file_list = "%s-files.list" % utils.splitext_plus(out_file)[0] with open(input_file_list, "w") as out_handle: for fname in ready_files: out_handle.write(fname + "\n") return input_file_list
python
def _get_file_list(orig_files, out_file, regions, ref_file, config): sorted_files = _sort_by_region(orig_files, regions, ref_file, config) exist_files = [(c, x) for c, x in sorted_files if os.path.exists(x) and vcf_has_variants(x)] if len(exist_files) == 0: # no non-empty inputs, merge the empty ones exist_files = [x for c, x in sorted_files if os.path.exists(x)] elif len(exist_files) > 1: exist_files = _fix_gatk_header(exist_files, out_file, config) else: exist_files = [x for c, x in exist_files] ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config) input_file_list = "%s-files.list" % utils.splitext_plus(out_file)[0] with open(input_file_list, "w") as out_handle: for fname in ready_files: out_handle.write(fname + "\n") return input_file_list
[ "def", "_get_file_list", "(", "orig_files", ",", "out_file", ",", "regions", ",", "ref_file", ",", "config", ")", ":", "sorted_files", "=", "_sort_by_region", "(", "orig_files", ",", "regions", ",", "ref_file", ",", "config", ")", "exist_files", "=", "[", "(...
Create file with region sorted list of non-empty VCFs for concatenating.
[ "Create", "file", "with", "region", "sorted", "list", "of", "non", "-", "empty", "VCFs", "for", "concatenating", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L402-L418
238,029
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_fix_gatk_header
def _fix_gatk_header(exist_files, out_file, config): """Ensure consistent headers for VCF concatenation. Fixes problems for genomes that start with chrM by reheadering the first file. These files do haploid variant calling which lack the PID phasing key/value pair in FORMAT, so initial chrM samples cause errors during concatenation due to the lack of header merging. This fixes this by updating the first header. """ from bcbio.variation import ploidy c, base_file = exist_files[0] replace_file = base_file items = [{"config": config}] if ploidy.get_ploidy(items, region=(c, 1, 2)) == 1: for c, x in exist_files[1:]: if ploidy.get_ploidy(items, (c, 1, 2)) > 1: replace_file = x break base_fix_file = os.path.join(os.path.dirname(out_file), "%s-fixheader%s" % utils.splitext_plus(os.path.basename(base_file))) with file_transaction(config, base_fix_file) as tx_out_file: header_file = "%s-header.vcf" % utils.splitext_plus(tx_out_file)[0] do.run("zgrep ^# %s > %s" % (replace_file, header_file), "Prepare header file for merging") resources = config_utils.get_resources("picard", config) ropts = [] if "options" in resources: ropts += [str(x) for x in resources.get("options", [])] do.run("%s && picard FixVcfHeader HEADER=%s INPUT=%s OUTPUT=%s %s" % (utils.get_java_clprep(), header_file, base_file, base_fix_file, " ".join(ropts)), "Reheader initial VCF file in merge") bgzip_and_index(base_fix_file, config) return [base_fix_file] + [x for (c, x) in exist_files[1:]]
python
def _fix_gatk_header(exist_files, out_file, config): from bcbio.variation import ploidy c, base_file = exist_files[0] replace_file = base_file items = [{"config": config}] if ploidy.get_ploidy(items, region=(c, 1, 2)) == 1: for c, x in exist_files[1:]: if ploidy.get_ploidy(items, (c, 1, 2)) > 1: replace_file = x break base_fix_file = os.path.join(os.path.dirname(out_file), "%s-fixheader%s" % utils.splitext_plus(os.path.basename(base_file))) with file_transaction(config, base_fix_file) as tx_out_file: header_file = "%s-header.vcf" % utils.splitext_plus(tx_out_file)[0] do.run("zgrep ^# %s > %s" % (replace_file, header_file), "Prepare header file for merging") resources = config_utils.get_resources("picard", config) ropts = [] if "options" in resources: ropts += [str(x) for x in resources.get("options", [])] do.run("%s && picard FixVcfHeader HEADER=%s INPUT=%s OUTPUT=%s %s" % (utils.get_java_clprep(), header_file, base_file, base_fix_file, " ".join(ropts)), "Reheader initial VCF file in merge") bgzip_and_index(base_fix_file, config) return [base_fix_file] + [x for (c, x) in exist_files[1:]]
[ "def", "_fix_gatk_header", "(", "exist_files", ",", "out_file", ",", "config", ")", ":", "from", "bcbio", ".", "variation", "import", "ploidy", "c", ",", "base_file", "=", "exist_files", "[", "0", "]", "replace_file", "=", "base_file", "items", "=", "[", "...
Ensure consistent headers for VCF concatenation. Fixes problems for genomes that start with chrM by reheadering the first file. These files do haploid variant calling which lack the PID phasing key/value pair in FORMAT, so initial chrM samples cause errors during concatenation due to the lack of header merging. This fixes this by updating the first header.
[ "Ensure", "consistent", "headers", "for", "VCF", "concatenation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L420-L451
238,030
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
_run_concat_variant_files_bcftools
def _run_concat_variant_files_bcftools(in_list, out_file, config, naive=False): """Concatenate variant files using bcftools concat, potentially using the fast naive option. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" if naive: args = "--naive" else: args = "--allow-overlaps" cmd = "{bcftools} concat {args} -O {output_type} --file-list {in_list} -o {tx_out_file}" do.run(cmd.format(**locals()), "bcftools concat variants") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
python
def _run_concat_variant_files_bcftools(in_list, out_file, config, naive=False): if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" if naive: args = "--naive" else: args = "--allow-overlaps" cmd = "{bcftools} concat {args} -O {output_type} --file-list {in_list} -o {tx_out_file}" do.run(cmd.format(**locals()), "bcftools concat variants") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
[ "def", "_run_concat_variant_files_bcftools", "(", "in_list", ",", "out_file", ",", "config", ",", "naive", "=", "False", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ...
Concatenate variant files using bcftools concat, potentially using the fast naive option.
[ "Concatenate", "variant", "files", "using", "bcftools", "concat", "potentially", "using", "the", "fast", "naive", "option", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L465-L480
238,031
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
combine_variant_files
def combine_variant_files(orig_files, out_file, ref_file, config, quiet_out=True, region=None): """Combine VCF files from the same sample into a single output file. Handles cases where we split files into SNPs/Indels for processing then need to merge back into a final file. """ in_pipeline = False if isinstance(orig_files, dict): file_key = config["file_key"] in_pipeline = True orig_files = orig_files[file_key] if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: exist_files = [x for x in orig_files if os.path.exists(x)] ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config) dict_file = "%s.dict" % utils.splitext_plus(ref_file)[0] cores = dd.get_num_cores({"config": config}) memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None cmd = ["picard"] + broad.get_picard_opts(config, memscale) + \ ["MergeVcfs", "D=%s" % dict_file, "O=%s" % tx_out_file] + \ ["I=%s" % f for f in ready_files] cmd = "%s && %s" % (utils.get_java_clprep(), " ".join(cmd)) do.run(cmd, "Combine variant files") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) if in_pipeline: return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}] else: return out_file
python
def combine_variant_files(orig_files, out_file, ref_file, config, quiet_out=True, region=None): in_pipeline = False if isinstance(orig_files, dict): file_key = config["file_key"] in_pipeline = True orig_files = orig_files[file_key] if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: exist_files = [x for x in orig_files if os.path.exists(x)] ready_files = run_multicore(p_bgzip_and_index, [[x, config] for x in exist_files], config) dict_file = "%s.dict" % utils.splitext_plus(ref_file)[0] cores = dd.get_num_cores({"config": config}) memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None cmd = ["picard"] + broad.get_picard_opts(config, memscale) + \ ["MergeVcfs", "D=%s" % dict_file, "O=%s" % tx_out_file] + \ ["I=%s" % f for f in ready_files] cmd = "%s && %s" % (utils.get_java_clprep(), " ".join(cmd)) do.run(cmd, "Combine variant files") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) if in_pipeline: return [{file_key: out_file, "region": region, "sam_ref": ref_file, "config": config}] else: return out_file
[ "def", "combine_variant_files", "(", "orig_files", ",", "out_file", ",", "ref_file", ",", "config", ",", "quiet_out", "=", "True", ",", "region", "=", "None", ")", ":", "in_pipeline", "=", "False", "if", "isinstance", "(", "orig_files", ",", "dict", ")", "...
Combine VCF files from the same sample into a single output file. Handles cases where we split files into SNPs/Indels for processing then need to merge back into a final file.
[ "Combine", "VCF", "files", "from", "the", "same", "sample", "into", "a", "single", "output", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L482-L511
238,032
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
sort_by_ref
def sort_by_ref(vcf_file, data): """Sort a VCF file by genome reference and position, adding contig information. """ out_file = "%s-prep.vcf.gz" % utils.splitext_plus(vcf_file)[0] if not utils.file_uptodate(out_file, vcf_file): with file_transaction(data, out_file) as tx_out_file: header_file = "%s-header.txt" % utils.splitext_plus(tx_out_file)[0] with open(header_file, "w") as out_handle: for region in ref.file_contigs(dd.get_ref_file(data), data["config"]): out_handle.write("##contig=<ID=%s,length=%s>\n" % (region.name, region.size)) cat_cmd = "zcat" if vcf_file.endswith("vcf.gz") else "cat" cmd = ("{cat_cmd} {vcf_file} | grep -v ^##contig | bcftools annotate -h {header_file} | " "vt sort -m full -o {tx_out_file} -") with utils.chdir(os.path.dirname(tx_out_file)): do.run(cmd.format(**locals()), "Sort VCF by reference") return bgzip_and_index(out_file, data["config"])
python
def sort_by_ref(vcf_file, data): out_file = "%s-prep.vcf.gz" % utils.splitext_plus(vcf_file)[0] if not utils.file_uptodate(out_file, vcf_file): with file_transaction(data, out_file) as tx_out_file: header_file = "%s-header.txt" % utils.splitext_plus(tx_out_file)[0] with open(header_file, "w") as out_handle: for region in ref.file_contigs(dd.get_ref_file(data), data["config"]): out_handle.write("##contig=<ID=%s,length=%s>\n" % (region.name, region.size)) cat_cmd = "zcat" if vcf_file.endswith("vcf.gz") else "cat" cmd = ("{cat_cmd} {vcf_file} | grep -v ^##contig | bcftools annotate -h {header_file} | " "vt sort -m full -o {tx_out_file} -") with utils.chdir(os.path.dirname(tx_out_file)): do.run(cmd.format(**locals()), "Sort VCF by reference") return bgzip_and_index(out_file, data["config"])
[ "def", "sort_by_ref", "(", "vcf_file", ",", "data", ")", ":", "out_file", "=", "\"%s-prep.vcf.gz\"", "%", "utils", ".", "splitext_plus", "(", "vcf_file", ")", "[", "0", "]", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",", "vcf_file", ")",...
Sort a VCF file by genome reference and position, adding contig information.
[ "Sort", "a", "VCF", "file", "by", "genome", "reference", "and", "position", "adding", "contig", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L513-L528
238,033
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
add_contig_to_header
def add_contig_to_header(line, ref_file): """Streaming target to add contigs to a VCF file header. """ if line.startswith("##fileformat=VCF"): out = [line] for region in ref.file_contigs(ref_file): out.append("##contig=<ID=%s,length=%s>" % (region.name, region.size)) return "\n".join(out) else: return line
python
def add_contig_to_header(line, ref_file): if line.startswith("##fileformat=VCF"): out = [line] for region in ref.file_contigs(ref_file): out.append("##contig=<ID=%s,length=%s>" % (region.name, region.size)) return "\n".join(out) else: return line
[ "def", "add_contig_to_header", "(", "line", ",", "ref_file", ")", ":", "if", "line", ".", "startswith", "(", "\"##fileformat=VCF\"", ")", ":", "out", "=", "[", "line", "]", "for", "region", "in", "ref", ".", "file_contigs", "(", "ref_file", ")", ":", "ou...
Streaming target to add contigs to a VCF file header.
[ "Streaming", "target", "to", "add", "contigs", "to", "a", "VCF", "file", "header", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L539-L548
238,034
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
parallel_combine_variants
def parallel_combine_variants(orig_files, out_file, ref_file, config, run_parallel): """Combine variants in parallel by chromosome, concatenating final outputs. """ file_key = "vcf_files" def split_by_region(data): base, ext = utils.splitext_plus(os.path.basename(out_file)) args = [] for region in [x.name for x in ref.file_contigs(ref_file, config)]: region_out = os.path.join(os.path.dirname(out_file), "%s-regions" % base, "%s-%s%s" % (base, region, ext)) utils.safe_makedir(os.path.dirname(region_out)) args.append((region_out, ref_file, config, region)) return out_file, args config = copy.deepcopy(config) config["file_key"] = file_key prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config) items = [[{file_key: prep_files}]] parallel_split_combine(items, split_by_region, run_parallel, "merge_variant_files", "concat_variant_files", file_key, ["region", "sam_ref", "config"], split_outfile_i=0) return out_file
python
def parallel_combine_variants(orig_files, out_file, ref_file, config, run_parallel): file_key = "vcf_files" def split_by_region(data): base, ext = utils.splitext_plus(os.path.basename(out_file)) args = [] for region in [x.name for x in ref.file_contigs(ref_file, config)]: region_out = os.path.join(os.path.dirname(out_file), "%s-regions" % base, "%s-%s%s" % (base, region, ext)) utils.safe_makedir(os.path.dirname(region_out)) args.append((region_out, ref_file, config, region)) return out_file, args config = copy.deepcopy(config) config["file_key"] = file_key prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config) items = [[{file_key: prep_files}]] parallel_split_combine(items, split_by_region, run_parallel, "merge_variant_files", "concat_variant_files", file_key, ["region", "sam_ref", "config"], split_outfile_i=0) return out_file
[ "def", "parallel_combine_variants", "(", "orig_files", ",", "out_file", ",", "ref_file", ",", "config", ",", "run_parallel", ")", ":", "file_key", "=", "\"vcf_files\"", "def", "split_by_region", "(", "data", ")", ":", "base", ",", "ext", "=", "utils", ".", "...
Combine variants in parallel by chromosome, concatenating final outputs.
[ "Combine", "variants", "in", "parallel", "by", "chromosome", "concatenating", "final", "outputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L552-L572
238,035
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
move_vcf
def move_vcf(orig_file, new_file): """Move a VCF file with associated index. """ for ext in ["", ".idx", ".tbi"]: to_move = orig_file + ext if os.path.exists(to_move): shutil.move(to_move, new_file + ext)
python
def move_vcf(orig_file, new_file): for ext in ["", ".idx", ".tbi"]: to_move = orig_file + ext if os.path.exists(to_move): shutil.move(to_move, new_file + ext)
[ "def", "move_vcf", "(", "orig_file", ",", "new_file", ")", ":", "for", "ext", "in", "[", "\"\"", ",", "\".idx\"", ",", "\".tbi\"", "]", ":", "to_move", "=", "orig_file", "+", "ext", "if", "os", ".", "path", ".", "exists", "(", "to_move", ")", ":", ...
Move a VCF file with associated index.
[ "Move", "a", "VCF", "file", "with", "associated", "index", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L576-L582
238,036
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
bgzip_and_index
def bgzip_and_index(in_file, config=None, remove_orig=True, prep_cmd="", tabix_args=None, out_dir=None): """bgzip and tabix index an input file, handling VCF and BED. """ if config is None: config = {} out_file = in_file if in_file.endswith(".gz") else in_file + ".gz" if out_dir: remove_orig = False out_file = os.path.join(out_dir, os.path.basename(out_file)) if (not utils.file_exists(out_file) or not os.path.lexists(out_file) or (utils.file_exists(in_file) and not utils.file_uptodate(out_file, in_file))): assert not in_file == out_file, "Input file is bgzipped but not found: %s" % in_file assert os.path.exists(in_file), "Input file %s not found" % in_file if not utils.file_uptodate(out_file, in_file): with file_transaction(config, out_file) as tx_out_file: bgzip = tools.get_bgzip_cmd(config) cat_cmd = "zcat" if in_file.endswith(".gz") else "cat" if prep_cmd: prep_cmd = "| %s " % prep_cmd cmd = "{cat_cmd} {in_file} {prep_cmd} | {bgzip} -c > {tx_out_file}" try: do.run(cmd.format(**locals()), "bgzip %s" % os.path.basename(in_file)) except subprocess.CalledProcessError: # Race conditions: ignore errors where file has been deleted by another if os.path.exists(in_file) and not os.path.exists(out_file): raise if remove_orig: try: os.remove(in_file) except OSError: # Handle cases where run in parallel and file has been deleted pass tabix_index(out_file, config, tabix_args=tabix_args) return out_file
python
def bgzip_and_index(in_file, config=None, remove_orig=True, prep_cmd="", tabix_args=None, out_dir=None): if config is None: config = {} out_file = in_file if in_file.endswith(".gz") else in_file + ".gz" if out_dir: remove_orig = False out_file = os.path.join(out_dir, os.path.basename(out_file)) if (not utils.file_exists(out_file) or not os.path.lexists(out_file) or (utils.file_exists(in_file) and not utils.file_uptodate(out_file, in_file))): assert not in_file == out_file, "Input file is bgzipped but not found: %s" % in_file assert os.path.exists(in_file), "Input file %s not found" % in_file if not utils.file_uptodate(out_file, in_file): with file_transaction(config, out_file) as tx_out_file: bgzip = tools.get_bgzip_cmd(config) cat_cmd = "zcat" if in_file.endswith(".gz") else "cat" if prep_cmd: prep_cmd = "| %s " % prep_cmd cmd = "{cat_cmd} {in_file} {prep_cmd} | {bgzip} -c > {tx_out_file}" try: do.run(cmd.format(**locals()), "bgzip %s" % os.path.basename(in_file)) except subprocess.CalledProcessError: # Race conditions: ignore errors where file has been deleted by another if os.path.exists(in_file) and not os.path.exists(out_file): raise if remove_orig: try: os.remove(in_file) except OSError: # Handle cases where run in parallel and file has been deleted pass tabix_index(out_file, config, tabix_args=tabix_args) return out_file
[ "def", "bgzip_and_index", "(", "in_file", ",", "config", "=", "None", ",", "remove_orig", "=", "True", ",", "prep_cmd", "=", "\"\"", ",", "tabix_args", "=", "None", ",", "out_dir", "=", "None", ")", ":", "if", "config", "is", "None", ":", "config", "="...
bgzip and tabix index an input file, handling VCF and BED.
[ "bgzip", "and", "tabix", "index", "an", "input", "file", "handling", "VCF", "and", "BED", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L584-L616
238,037
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
tabix_index
def tabix_index(in_file, config, preset=None, tabix_args=None): """Index a file using tabix. """ in_file = os.path.abspath(in_file) out_file = in_file + ".tbi" if not utils.file_exists(out_file) or not utils.file_uptodate(out_file, in_file): # Remove old index files to prevent linking into tx directory utils.remove_safe(out_file) with file_transaction(config, out_file) as tx_out_file: tabix = tools.get_tabix_cmd(config) tx_in_file = os.path.splitext(tx_out_file)[0] utils.symlink_plus(in_file, tx_in_file) if tabix_args: cmd = "{tabix} -f {tabix_args} {tx_in_file}" else: preset = _guess_preset(in_file) if preset is None else preset cmd = "{tabix} -f -p {preset} {tx_in_file}" do.run(cmd.format(**locals()), "tabix index %s" % os.path.basename(in_file)) return out_file
python
def tabix_index(in_file, config, preset=None, tabix_args=None): in_file = os.path.abspath(in_file) out_file = in_file + ".tbi" if not utils.file_exists(out_file) or not utils.file_uptodate(out_file, in_file): # Remove old index files to prevent linking into tx directory utils.remove_safe(out_file) with file_transaction(config, out_file) as tx_out_file: tabix = tools.get_tabix_cmd(config) tx_in_file = os.path.splitext(tx_out_file)[0] utils.symlink_plus(in_file, tx_in_file) if tabix_args: cmd = "{tabix} -f {tabix_args} {tx_in_file}" else: preset = _guess_preset(in_file) if preset is None else preset cmd = "{tabix} -f -p {preset} {tx_in_file}" do.run(cmd.format(**locals()), "tabix index %s" % os.path.basename(in_file)) return out_file
[ "def", "tabix_index", "(", "in_file", ",", "config", ",", "preset", "=", "None", ",", "tabix_args", "=", "None", ")", ":", "in_file", "=", "os", ".", "path", ".", "abspath", "(", "in_file", ")", "out_file", "=", "in_file", "+", "\".tbi\"", "if", "not",...
Index a file using tabix.
[ "Index", "a", "file", "using", "tabix", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L635-L653
238,038
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
is_gvcf_file
def is_gvcf_file(in_file): """Check if an input file is raw gVCF """ to_check = 100 n = 0 with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if not line.startswith("##"): if n > to_check: break n += 1 parts = line.split("\t") # GATK if parts[4] == "<NON_REF>": return True # strelka2 if parts[4] == "." and parts[7].startswith("BLOCKAVG"): return True # freebayes if parts[4] == "<*>": return True # platypue if parts[4] == "N" and parts[6] == "REFCALL": return True
python
def is_gvcf_file(in_file): to_check = 100 n = 0 with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if not line.startswith("##"): if n > to_check: break n += 1 parts = line.split("\t") # GATK if parts[4] == "<NON_REF>": return True # strelka2 if parts[4] == "." and parts[7].startswith("BLOCKAVG"): return True # freebayes if parts[4] == "<*>": return True # platypue if parts[4] == "N" and parts[6] == "REFCALL": return True
[ "def", "is_gvcf_file", "(", "in_file", ")", ":", "to_check", "=", "100", "n", "=", "0", "with", "utils", ".", "open_gzipsafe", "(", "in_file", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "not", "line", ".", "startswith", "...
Check if an input file is raw gVCF
[ "Check", "if", "an", "input", "file", "is", "raw", "gVCF" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L655-L678
238,039
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
cyvcf_add_filter
def cyvcf_add_filter(rec, name): """Add a FILTER value to a cyvcf2 record """ if rec.FILTER: filters = rec.FILTER.split(";") else: filters = [] if name not in filters: filters.append(name) rec.FILTER = filters return rec
python
def cyvcf_add_filter(rec, name): if rec.FILTER: filters = rec.FILTER.split(";") else: filters = [] if name not in filters: filters.append(name) rec.FILTER = filters return rec
[ "def", "cyvcf_add_filter", "(", "rec", ",", "name", ")", ":", "if", "rec", ".", "FILTER", ":", "filters", "=", "rec", ".", "FILTER", ".", "split", "(", "\";\"", ")", "else", ":", "filters", "=", "[", "]", "if", "name", "not", "in", "filters", ":", ...
Add a FILTER value to a cyvcf2 record
[ "Add", "a", "FILTER", "value", "to", "a", "cyvcf2", "record" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L680-L690
238,040
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
cyvcf_remove_filter
def cyvcf_remove_filter(rec, name): """Remove filter with the given name from a cyvcf2 record """ if rec.FILTER: filters = rec.FILTER.split(";") else: filters = [] new_filters = [x for x in filters if not str(x) == name] if len(new_filters) == 0: new_filters = ["PASS"] rec.FILTER = new_filters return rec
python
def cyvcf_remove_filter(rec, name): if rec.FILTER: filters = rec.FILTER.split(";") else: filters = [] new_filters = [x for x in filters if not str(x) == name] if len(new_filters) == 0: new_filters = ["PASS"] rec.FILTER = new_filters return rec
[ "def", "cyvcf_remove_filter", "(", "rec", ",", "name", ")", ":", "if", "rec", ".", "FILTER", ":", "filters", "=", "rec", ".", "FILTER", ".", "split", "(", "\";\"", ")", "else", ":", "filters", "=", "[", "]", "new_filters", "=", "[", "x", "for", "x"...
Remove filter with the given name from a cyvcf2 record
[ "Remove", "filter", "with", "the", "given", "name", "from", "a", "cyvcf2", "record" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L692-L703
238,041
bcbio/bcbio-nextgen
bcbio/pipeline/alignment.py
organize_noalign
def organize_noalign(data): """CWL target to skip alignment and organize input data. """ data = utils.to_single_data(data[0]) work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))) work_bam = os.path.join(work_dir, "%s-input.bam" % dd.get_sample_name(data)) if data.get("files"): if data["files"][0].endswith(".cram"): work_bam = cram.to_bam(data["files"][0], work_bam, data) else: assert data["files"][0].endswith(".bam"), data["files"][0] utils.copy_plus(data["files"][0], work_bam) bam.index(work_bam, data["config"]) else: work_bam = None data["align_bam"] = work_bam return data
python
def organize_noalign(data): data = utils.to_single_data(data[0]) work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))) work_bam = os.path.join(work_dir, "%s-input.bam" % dd.get_sample_name(data)) if data.get("files"): if data["files"][0].endswith(".cram"): work_bam = cram.to_bam(data["files"][0], work_bam, data) else: assert data["files"][0].endswith(".bam"), data["files"][0] utils.copy_plus(data["files"][0], work_bam) bam.index(work_bam, data["config"]) else: work_bam = None data["align_bam"] = work_bam return data
[ "def", "organize_noalign", "(", "data", ")", ":", "data", "=", "utils", ".", "to_single_data", "(", "data", "[", "0", "]", ")", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "...
CWL target to skip alignment and organize input data.
[ "CWL", "target", "to", "skip", "alignment", "and", "organize", "input", "data", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L53-L69
238,042
bcbio/bcbio-nextgen
bcbio/pipeline/alignment.py
align_to_sort_bam
def align_to_sort_bam(fastq1, fastq2, aligner, data): """Align to the named genome build, returning a sorted BAM file. """ names = data["rgnames"] align_dir_parts = [data["dirs"]["work"], "align", names["sample"]] if data.get("disambiguate"): align_dir_parts.append(data["disambiguate"]["genome_build"]) aligner_index = _get_aligner_index(aligner, data) align_dir = utils.safe_makedir(os.path.join(*align_dir_parts)) ref_file = tz.get_in(("reference", "fasta", "base"), data) if fastq1.endswith(".bam"): data = _align_from_bam(fastq1, aligner, aligner_index, ref_file, names, align_dir, data) else: data = _align_from_fastq(fastq1, fastq2, aligner, aligner_index, ref_file, names, align_dir, data) if data["work_bam"] and utils.file_exists(data["work_bam"]): if data.get("align_split") and dd.get_mark_duplicates(data): # If merging later with with bamsormadup need query sorted inputs # but CWL requires a bai file. Create a fake one to make it happy. bam.fake_index(data["work_bam"], data) else: bam.index(data["work_bam"], data["config"]) for extra in ["-sr", "-disc"]: extra_bam = utils.append_stem(data['work_bam'], extra) if utils.file_exists(extra_bam): bam.index(extra_bam, data["config"]) return data
python
def align_to_sort_bam(fastq1, fastq2, aligner, data): names = data["rgnames"] align_dir_parts = [data["dirs"]["work"], "align", names["sample"]] if data.get("disambiguate"): align_dir_parts.append(data["disambiguate"]["genome_build"]) aligner_index = _get_aligner_index(aligner, data) align_dir = utils.safe_makedir(os.path.join(*align_dir_parts)) ref_file = tz.get_in(("reference", "fasta", "base"), data) if fastq1.endswith(".bam"): data = _align_from_bam(fastq1, aligner, aligner_index, ref_file, names, align_dir, data) else: data = _align_from_fastq(fastq1, fastq2, aligner, aligner_index, ref_file, names, align_dir, data) if data["work_bam"] and utils.file_exists(data["work_bam"]): if data.get("align_split") and dd.get_mark_duplicates(data): # If merging later with with bamsormadup need query sorted inputs # but CWL requires a bai file. Create a fake one to make it happy. bam.fake_index(data["work_bam"], data) else: bam.index(data["work_bam"], data["config"]) for extra in ["-sr", "-disc"]: extra_bam = utils.append_stem(data['work_bam'], extra) if utils.file_exists(extra_bam): bam.index(extra_bam, data["config"]) return data
[ "def", "align_to_sort_bam", "(", "fastq1", ",", "fastq2", ",", "aligner", ",", "data", ")", ":", "names", "=", "data", "[", "\"rgnames\"", "]", "align_dir_parts", "=", "[", "data", "[", "\"dirs\"", "]", "[", "\"work\"", "]", ",", "\"align\"", ",", "names...
Align to the named genome build, returning a sorted BAM file.
[ "Align", "to", "the", "named", "genome", "build", "returning", "a", "sorted", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L71-L98
238,043
bcbio/bcbio-nextgen
bcbio/pipeline/alignment.py
get_aligner_with_aliases
def get_aligner_with_aliases(aligner, data): """Retrieve aligner index retriever, including aliases for shared. Handles tricky cases like gridss where we need bwa indices even with no aligner specified since they're used internally within GRIDSS. """ aligner_aliases = {"sentieon-bwa": "bwa"} from bcbio import structural if not aligner and "gridss" in structural.get_svcallers(data): aligner = "bwa" return aligner_aliases.get(aligner) or aligner
python
def get_aligner_with_aliases(aligner, data): aligner_aliases = {"sentieon-bwa": "bwa"} from bcbio import structural if not aligner and "gridss" in structural.get_svcallers(data): aligner = "bwa" return aligner_aliases.get(aligner) or aligner
[ "def", "get_aligner_with_aliases", "(", "aligner", ",", "data", ")", ":", "aligner_aliases", "=", "{", "\"sentieon-bwa\"", ":", "\"bwa\"", "}", "from", "bcbio", "import", "structural", "if", "not", "aligner", "and", "\"gridss\"", "in", "structural", ".", "get_sv...
Retrieve aligner index retriever, including aliases for shared. Handles tricky cases like gridss where we need bwa indices even with no aligner specified since they're used internally within GRIDSS.
[ "Retrieve", "aligner", "index", "retriever", "including", "aliases", "for", "shared", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L100-L110
238,044
bcbio/bcbio-nextgen
bcbio/pipeline/alignment.py
_get_aligner_index
def _get_aligner_index(aligner, data): """Handle multiple specifications of aligner indexes, returning value to pass to aligner. Original bcbio case -- a list of indices. CWL case: a single file with secondaryFiles staged in the same directory. """ aligner_indexes = tz.get_in(("reference", get_aligner_with_aliases(aligner, data), "indexes"), data) # standard bcbio case if aligner_indexes and isinstance(aligner_indexes, (list, tuple)): aligner_index = os.path.commonprefix(aligner_indexes) if aligner_index.endswith("."): aligner_index = aligner_index[:-1] return aligner_index # single file -- check for standard naming or directory elif aligner_indexes and os.path.exists(aligner_indexes): aligner_dir = os.path.dirname(aligner_indexes) aligner_prefix = os.path.splitext(aligner_indexes)[0] if len(glob.glob("%s.*" % aligner_prefix)) > 0: return aligner_prefix else: return aligner_dir if aligner not in allow_noindices(): raise ValueError("Did not find reference indices for aligner %s in genome: %s" % (aligner, data["reference"]))
python
def _get_aligner_index(aligner, data): aligner_indexes = tz.get_in(("reference", get_aligner_with_aliases(aligner, data), "indexes"), data) # standard bcbio case if aligner_indexes and isinstance(aligner_indexes, (list, tuple)): aligner_index = os.path.commonprefix(aligner_indexes) if aligner_index.endswith("."): aligner_index = aligner_index[:-1] return aligner_index # single file -- check for standard naming or directory elif aligner_indexes and os.path.exists(aligner_indexes): aligner_dir = os.path.dirname(aligner_indexes) aligner_prefix = os.path.splitext(aligner_indexes)[0] if len(glob.glob("%s.*" % aligner_prefix)) > 0: return aligner_prefix else: return aligner_dir if aligner not in allow_noindices(): raise ValueError("Did not find reference indices for aligner %s in genome: %s" % (aligner, data["reference"]))
[ "def", "_get_aligner_index", "(", "aligner", ",", "data", ")", ":", "aligner_indexes", "=", "tz", ".", "get_in", "(", "(", "\"reference\"", ",", "get_aligner_with_aliases", "(", "aligner", ",", "data", ")", ",", "\"indexes\"", ")", ",", "data", ")", "# stand...
Handle multiple specifications of aligner indexes, returning value to pass to aligner. Original bcbio case -- a list of indices. CWL case: a single file with secondaryFiles staged in the same directory.
[ "Handle", "multiple", "specifications", "of", "aligner", "indexes", "returning", "value", "to", "pass", "to", "aligner", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L115-L139
238,045
bcbio/bcbio-nextgen
bcbio/pipeline/alignment.py
_align_from_fastq
def _align_from_fastq(fastq1, fastq2, aligner, align_ref, sam_ref, names, align_dir, data): """Align from fastq inputs, producing sorted BAM output. """ config = data["config"] align_fn = TOOLS[aligner].align_fn out = align_fn(fastq1, fastq2, align_ref, names, align_dir, data) # handle align functions that update the main data dictionary in place if isinstance(out, dict): assert out.get("work_bam"), (dd.get_sample_name(data), out.get("work_bam")) return out # handle output of raw SAM files that need to be converted to BAM else: work_bam = bam.sam_to_bam(out, config) data["work_bam"] = bam.sort(work_bam, config) return data
python
def _align_from_fastq(fastq1, fastq2, aligner, align_ref, sam_ref, names, align_dir, data): config = data["config"] align_fn = TOOLS[aligner].align_fn out = align_fn(fastq1, fastq2, align_ref, names, align_dir, data) # handle align functions that update the main data dictionary in place if isinstance(out, dict): assert out.get("work_bam"), (dd.get_sample_name(data), out.get("work_bam")) return out # handle output of raw SAM files that need to be converted to BAM else: work_bam = bam.sam_to_bam(out, config) data["work_bam"] = bam.sort(work_bam, config) return data
[ "def", "_align_from_fastq", "(", "fastq1", ",", "fastq2", ",", "aligner", ",", "align_ref", ",", "sam_ref", ",", "names", ",", "align_dir", ",", "data", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "align_fn", "=", "TOOLS", "[", "aligner", "]...
Align from fastq inputs, producing sorted BAM output.
[ "Align", "from", "fastq", "inputs", "producing", "sorted", "BAM", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L155-L170
238,046
bcbio/bcbio-nextgen
bcbio/structural/gridss.py
_finalize_memory
def _finalize_memory(jvm_opts): """GRIDSS does not recommend setting memory between 32 and 48Gb. https://github.com/PapenfussLab/gridss#memory-usage """ avoid_min = 32 avoid_max = 48 out_opts = [] for opt in jvm_opts: if opt.startswith("-Xmx"): spec = opt[4:] val = int(spec[:-1]) mod = spec[-1] if mod.upper() == "M": adjust = 1024 min_val = avoid_min * 1024 max_val = avoid_max * 1024 else: adjust = 1 min_val, max_val = avoid_min, avoid_max if val >= min_val and val < max_val: val = min_val - adjust opt = "%s%s%s" % (opt[:4], val, mod) out_opts.append(opt) return out_opts
python
def _finalize_memory(jvm_opts): avoid_min = 32 avoid_max = 48 out_opts = [] for opt in jvm_opts: if opt.startswith("-Xmx"): spec = opt[4:] val = int(spec[:-1]) mod = spec[-1] if mod.upper() == "M": adjust = 1024 min_val = avoid_min * 1024 max_val = avoid_max * 1024 else: adjust = 1 min_val, max_val = avoid_min, avoid_max if val >= min_val and val < max_val: val = min_val - adjust opt = "%s%s%s" % (opt[:4], val, mod) out_opts.append(opt) return out_opts
[ "def", "_finalize_memory", "(", "jvm_opts", ")", ":", "avoid_min", "=", "32", "avoid_max", "=", "48", "out_opts", "=", "[", "]", "for", "opt", "in", "jvm_opts", ":", "if", "opt", ".", "startswith", "(", "\"-Xmx\"", ")", ":", "spec", "=", "opt", "[", ...
GRIDSS does not recommend setting memory between 32 and 48Gb. https://github.com/PapenfussLab/gridss#memory-usage
[ "GRIDSS", "does", "not", "recommend", "setting", "memory", "between", "32", "and", "48Gb", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gridss.py#L70-L94
238,047
bcbio/bcbio-nextgen
bcbio/structural/gridss.py
_setup_reference_files
def _setup_reference_files(data, tx_out_dir): """Create a reference directory with fasta and bwa indices. GRIDSS requires all files in a single directory, so setup with symlinks. This needs bwa aligner indices available, which we ensure with `get_aligner_with_aliases` during YAML sample setup. """ aligner = dd.get_aligner(data) or "bwa" out_dir = utils.safe_makedir(os.path.join(tx_out_dir, aligner)) ref_fasta = dd.get_ref_file(data) ref_files = ["%s%s" % (utils.splitext_plus(ref_fasta)[0], ext) for ext in [".fa", ".fa.fai", ".dict"]] for orig_file in ref_files + tz.get_in(("reference", aligner, "indexes"), data): utils.symlink_plus(orig_file, os.path.join(out_dir, os.path.basename(orig_file))) return os.path.join(out_dir, os.path.basename(ref_fasta))
python
def _setup_reference_files(data, tx_out_dir): aligner = dd.get_aligner(data) or "bwa" out_dir = utils.safe_makedir(os.path.join(tx_out_dir, aligner)) ref_fasta = dd.get_ref_file(data) ref_files = ["%s%s" % (utils.splitext_plus(ref_fasta)[0], ext) for ext in [".fa", ".fa.fai", ".dict"]] for orig_file in ref_files + tz.get_in(("reference", aligner, "indexes"), data): utils.symlink_plus(orig_file, os.path.join(out_dir, os.path.basename(orig_file))) return os.path.join(out_dir, os.path.basename(ref_fasta))
[ "def", "_setup_reference_files", "(", "data", ",", "tx_out_dir", ")", ":", "aligner", "=", "dd", ".", "get_aligner", "(", "data", ")", "or", "\"bwa\"", "out_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "tx_out_dir", ...
Create a reference directory with fasta and bwa indices. GRIDSS requires all files in a single directory, so setup with symlinks. This needs bwa aligner indices available, which we ensure with `get_aligner_with_aliases` during YAML sample setup.
[ "Create", "a", "reference", "directory", "with", "fasta", "and", "bwa", "indices", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gridss.py#L96-L109
238,048
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_add_versions
def _add_versions(samples): """Add tool and data versions to the summary. """ samples[0]["versions"] = {"tools": programs.write_versions(samples[0]["dirs"], samples[0]["config"]), "data": provenancedata.write_versions(samples[0]["dirs"], samples)} return samples
python
def _add_versions(samples): samples[0]["versions"] = {"tools": programs.write_versions(samples[0]["dirs"], samples[0]["config"]), "data": provenancedata.write_versions(samples[0]["dirs"], samples)} return samples
[ "def", "_add_versions", "(", "samples", ")", ":", "samples", "[", "0", "]", "[", "\"versions\"", "]", "=", "{", "\"tools\"", ":", "programs", ".", "write_versions", "(", "samples", "[", "0", "]", "[", "\"dirs\"", "]", ",", "samples", "[", "0", "]", "...
Add tool and data versions to the summary.
[ "Add", "tool", "and", "data", "versions", "to", "the", "summary", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L109-L114
238,049
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_summarize_inputs
def _summarize_inputs(samples, out_dir): """Summarize inputs for MultiQC reporting in display. """ logger.info("summarize target information") if samples[0].get("analysis", "").lower() in ["variant", "variant2"]: metrics_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics")) samples = _merge_target_information(samples, metrics_dir) logger.info("summarize fastqc") out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "fastqc")) with utils.chdir(out_dir): _merge_fastqc(samples) preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)] if preseq_samples: logger.info("summarize preseq") out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "preseq")) with utils.chdir(out_dir): _merge_preseq(preseq_samples) return samples
python
def _summarize_inputs(samples, out_dir): logger.info("summarize target information") if samples[0].get("analysis", "").lower() in ["variant", "variant2"]: metrics_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics")) samples = _merge_target_information(samples, metrics_dir) logger.info("summarize fastqc") out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "fastqc")) with utils.chdir(out_dir): _merge_fastqc(samples) preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)] if preseq_samples: logger.info("summarize preseq") out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "preseq")) with utils.chdir(out_dir): _merge_preseq(preseq_samples) return samples
[ "def", "_summarize_inputs", "(", "samples", ",", "out_dir", ")", ":", "logger", ".", "info", "(", "\"summarize target information\"", ")", "if", "samples", "[", "0", "]", ".", "get", "(", "\"analysis\"", ",", "\"\"", ")", ".", "lower", "(", ")", "in", "[...
Summarize inputs for MultiQC reporting in display.
[ "Summarize", "inputs", "for", "MultiQC", "reporting", "in", "display", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L116-L135
238,050
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_work_path_to_rel_final_path
def _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir): """ Check if `path` is a work-rooted path, and convert to a relative final-rooted path """ if not path or not isinstance(path, str): return path upload_path = None # First, check in the mapping: if it's there is a direct reference and # it's a file, we immediately return it (saves lots of iterations) if upload_path_mapping.get(path) is not None and os.path.isfile(path): upload_path = upload_path_mapping[path] else: # Not a file: check for elements in the mapping that contain # it paths_to_check = [key for key in upload_path_mapping if path.startswith(key)] if paths_to_check: for work_path in paths_to_check: if os.path.isdir(work_path): final_path = upload_path_mapping[work_path] upload_path = path.replace(work_path, final_path) break if upload_path is not None: return os.path.relpath(upload_path, upload_base_dir) else: return None
python
def _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir): if not path or not isinstance(path, str): return path upload_path = None # First, check in the mapping: if it's there is a direct reference and # it's a file, we immediately return it (saves lots of iterations) if upload_path_mapping.get(path) is not None and os.path.isfile(path): upload_path = upload_path_mapping[path] else: # Not a file: check for elements in the mapping that contain # it paths_to_check = [key for key in upload_path_mapping if path.startswith(key)] if paths_to_check: for work_path in paths_to_check: if os.path.isdir(work_path): final_path = upload_path_mapping[work_path] upload_path = path.replace(work_path, final_path) break if upload_path is not None: return os.path.relpath(upload_path, upload_base_dir) else: return None
[ "def", "_work_path_to_rel_final_path", "(", "path", ",", "upload_path_mapping", ",", "upload_base_dir", ")", ":", "if", "not", "path", "or", "not", "isinstance", "(", "path", ",", "str", ")", ":", "return", "path", "upload_path", "=", "None", "# First, check in ...
Check if `path` is a work-rooted path, and convert to a relative final-rooted path
[ "Check", "if", "path", "is", "a", "work", "-", "rooted", "path", "and", "convert", "to", "a", "relative", "final", "-", "rooted", "path" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L195-L222
238,051
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_one_exists
def _one_exists(input_files): """ at least one file must exist for multiqc to run properly """ for f in input_files: if os.path.exists(f): return True return False
python
def _one_exists(input_files): for f in input_files: if os.path.exists(f): return True return False
[ "def", "_one_exists", "(", "input_files", ")", ":", "for", "f", "in", "input_files", ":", "if", "os", ".", "path", ".", "exists", "(", "f", ")", ":", "return", "True", "return", "False" ]
at least one file must exist for multiqc to run properly
[ "at", "least", "one", "file", "must", "exist", "for", "multiqc", "to", "run", "properly" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L224-L231
238,052
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_get_input_files
def _get_input_files(samples, base_dir, tx_out_dir): """Retrieve input files, keyed by sample and QC method name. Stages files into the work directory to ensure correct names for MultiQC sample assessment when running with CWL. """ in_files = collections.defaultdict(list) for data in samples: sum_qc = tz.get_in(["summary", "qc"], data, {}) if sum_qc in [None, "None"]: sum_qc = {} elif isinstance(sum_qc, six.string_types): sum_qc = {dd.get_algorithm_qc(data)[0]: sum_qc} elif not isinstance(sum_qc, dict): raise ValueError("Unexpected summary qc: %s" % sum_qc) for program, pfiles in sum_qc.items(): if isinstance(pfiles, dict): pfiles = [pfiles["base"]] + pfiles.get("secondary", []) # CWL: presents output files as single file plus associated secondary files elif isinstance(pfiles, six.string_types): if os.path.exists(pfiles): pfiles = [os.path.join(basedir, f) for basedir, subdir, filenames in os.walk(os.path.dirname(pfiles)) for f in filenames] else: pfiles = [] in_files[(dd.get_sample_name(data), program)].extend(pfiles) staged_files = [] for (sample, program), files in in_files.items(): cur_dir = utils.safe_makedir(os.path.join(base_dir, "inputs", sample, program)) for f in files: if _check_multiqc_input(f) and _is_good_file_for_multiqc(f): if _in_temp_directory(f) or any([cwlutils.is_cwl_run(d) for d in samples]): staged_f = os.path.join(cur_dir, os.path.basename(f)) shutil.copy(f, staged_f) staged_files.append(staged_f) else: staged_files.append(f) staged_files.extend(get_qsig_multiqc_files(samples)) # Back compatible -- to migrate to explicit specifications in input YAML if not any([cwlutils.is_cwl_run(d) for d in samples]): staged_files += ["trimmed", "htseq-count/*summary"] # Add in created target_info file if os.path.isfile(os.path.join(base_dir, "report", "metrics", "target_info.yaml")): staged_files += [os.path.join(base_dir, "report", "metrics", "target_info.yaml")] return sorted(list(set(staged_files)))
python
def _get_input_files(samples, base_dir, tx_out_dir): in_files = collections.defaultdict(list) for data in samples: sum_qc = tz.get_in(["summary", "qc"], data, {}) if sum_qc in [None, "None"]: sum_qc = {} elif isinstance(sum_qc, six.string_types): sum_qc = {dd.get_algorithm_qc(data)[0]: sum_qc} elif not isinstance(sum_qc, dict): raise ValueError("Unexpected summary qc: %s" % sum_qc) for program, pfiles in sum_qc.items(): if isinstance(pfiles, dict): pfiles = [pfiles["base"]] + pfiles.get("secondary", []) # CWL: presents output files as single file plus associated secondary files elif isinstance(pfiles, six.string_types): if os.path.exists(pfiles): pfiles = [os.path.join(basedir, f) for basedir, subdir, filenames in os.walk(os.path.dirname(pfiles)) for f in filenames] else: pfiles = [] in_files[(dd.get_sample_name(data), program)].extend(pfiles) staged_files = [] for (sample, program), files in in_files.items(): cur_dir = utils.safe_makedir(os.path.join(base_dir, "inputs", sample, program)) for f in files: if _check_multiqc_input(f) and _is_good_file_for_multiqc(f): if _in_temp_directory(f) or any([cwlutils.is_cwl_run(d) for d in samples]): staged_f = os.path.join(cur_dir, os.path.basename(f)) shutil.copy(f, staged_f) staged_files.append(staged_f) else: staged_files.append(f) staged_files.extend(get_qsig_multiqc_files(samples)) # Back compatible -- to migrate to explicit specifications in input YAML if not any([cwlutils.is_cwl_run(d) for d in samples]): staged_files += ["trimmed", "htseq-count/*summary"] # Add in created target_info file if os.path.isfile(os.path.join(base_dir, "report", "metrics", "target_info.yaml")): staged_files += [os.path.join(base_dir, "report", "metrics", "target_info.yaml")] return sorted(list(set(staged_files)))
[ "def", "_get_input_files", "(", "samples", ",", "base_dir", ",", "tx_out_dir", ")", ":", "in_files", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "data", "in", "samples", ":", "sum_qc", "=", "tz", ".", "get_in", "(", "[", "\"summary\"",...
Retrieve input files, keyed by sample and QC method name. Stages files into the work directory to ensure correct names for MultiQC sample assessment when running with CWL.
[ "Retrieve", "input", "files", "keyed", "by", "sample", "and", "QC", "method", "name", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L233-L276
238,053
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_group_by_sample_and_batch
def _group_by_sample_and_batch(samples): """Group samples split by QC method back one per sample-batch. """ out = collections.defaultdict(list) for data in samples: out[(dd.get_sample_name(data), dd.get_align_bam(data), tuple(_get_batches(data)))].append(data) return [xs[0] for xs in out.values()]
python
def _group_by_sample_and_batch(samples): out = collections.defaultdict(list) for data in samples: out[(dd.get_sample_name(data), dd.get_align_bam(data), tuple(_get_batches(data)))].append(data) return [xs[0] for xs in out.values()]
[ "def", "_group_by_sample_and_batch", "(", "samples", ")", ":", "out", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "data", "in", "samples", ":", "out", "[", "(", "dd", ".", "get_sample_name", "(", "data", ")", ",", "dd", ".", "get_ali...
Group samples split by QC method back one per sample-batch.
[ "Group", "samples", "split", "by", "QC", "method", "back", "one", "per", "sample", "-", "batch", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L287-L293
238,054
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_has_bcftools_germline_stats
def _has_bcftools_germline_stats(data): """Check for the presence of a germline stats file, CWL compatible. """ stats_file = tz.get_in(["summary", "qc"], data) if isinstance(stats_file, dict): stats_file = tz.get_in(["variants", "base"], stats_file) if not stats_file: stats_file = "" return stats_file.find("bcftools_stats_germline") > 0
python
def _has_bcftools_germline_stats(data): stats_file = tz.get_in(["summary", "qc"], data) if isinstance(stats_file, dict): stats_file = tz.get_in(["variants", "base"], stats_file) if not stats_file: stats_file = "" return stats_file.find("bcftools_stats_germline") > 0
[ "def", "_has_bcftools_germline_stats", "(", "data", ")", ":", "stats_file", "=", "tz", ".", "get_in", "(", "[", "\"summary\"", ",", "\"qc\"", "]", ",", "data", ")", "if", "isinstance", "(", "stats_file", ",", "dict", ")", ":", "stats_file", "=", "tz", "....
Check for the presence of a germline stats file, CWL compatible.
[ "Check", "for", "the", "presence", "of", "a", "germline", "stats", "file", "CWL", "compatible", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L397-L405
238,055
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_is_good_file_for_multiqc
def _is_good_file_for_multiqc(fpath): """Returns False if the file is binary or image.""" # Use mimetypes to exclude binary files where possible (ftype, encoding) = mimetypes.guess_type(fpath) if encoding is not None: return False if ftype is not None and ftype.startswith('image'): return False return True
python
def _is_good_file_for_multiqc(fpath): # Use mimetypes to exclude binary files where possible (ftype, encoding) = mimetypes.guess_type(fpath) if encoding is not None: return False if ftype is not None and ftype.startswith('image'): return False return True
[ "def", "_is_good_file_for_multiqc", "(", "fpath", ")", ":", "# Use mimetypes to exclude binary files where possible", "(", "ftype", ",", "encoding", ")", "=", "mimetypes", ".", "guess_type", "(", "fpath", ")", "if", "encoding", "is", "not", "None", ":", "return", ...
Returns False if the file is binary or image.
[ "Returns", "False", "if", "the", "file", "is", "binary", "or", "image", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L414-L422
238,056
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_parse_disambiguate
def _parse_disambiguate(disambiguatestatsfilename): """Parse disambiguation stats from given file. """ disambig_stats = [0, 0, 0] with open(disambiguatestatsfilename, "r") as in_handle: for i, line in enumerate(in_handle): fields = line.strip().split("\t") if i == 0: assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs'] else: disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])] return disambig_stats
python
def _parse_disambiguate(disambiguatestatsfilename): disambig_stats = [0, 0, 0] with open(disambiguatestatsfilename, "r") as in_handle: for i, line in enumerate(in_handle): fields = line.strip().split("\t") if i == 0: assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs'] else: disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])] return disambig_stats
[ "def", "_parse_disambiguate", "(", "disambiguatestatsfilename", ")", ":", "disambig_stats", "=", "[", "0", ",", "0", ",", "0", "]", "with", "open", "(", "disambiguatestatsfilename", ",", "\"r\"", ")", "as", "in_handle", ":", "for", "i", ",", "line", "in", ...
Parse disambiguation stats from given file.
[ "Parse", "disambiguation", "stats", "from", "given", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L424-L435
238,057
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_merge_metrics
def _merge_metrics(samples, out_dir): """Merge metrics from multiple QC steps """ logger.info("summarize metrics") out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics")) sample_metrics = collections.defaultdict(dict) for s in samples: s = _add_disambiguate(s) m = tz.get_in(['summary', 'metrics'], s) if isinstance(m, six.string_types): m = json.loads(m) if m: for me in m.keys(): if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple): m.pop(me, None) sample_metrics[dd.get_sample_name(s)].update(m) out = [] for sample_name, m in sample_metrics.items(): sample_file = os.path.join(out_dir, "%s_bcbio.txt" % sample_name) with file_transaction(samples[0], sample_file) as tx_out_file: dt = pd.DataFrame(m, index=['1']) dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns] dt['sample'] = sample_name dt['rRNA_rate'] = m.get('rRNA_rate', "NA") dt['RiP_pct'] = "%.3f" % (int(m.get("RiP", 0)) / float(m.get("Total_reads", 1)) * 100) dt = _fix_duplicated_rate(dt) dt.transpose().to_csv(tx_out_file, sep="\t", header=False) out.append(sample_file) return out
python
def _merge_metrics(samples, out_dir): logger.info("summarize metrics") out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics")) sample_metrics = collections.defaultdict(dict) for s in samples: s = _add_disambiguate(s) m = tz.get_in(['summary', 'metrics'], s) if isinstance(m, six.string_types): m = json.loads(m) if m: for me in m.keys(): if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple): m.pop(me, None) sample_metrics[dd.get_sample_name(s)].update(m) out = [] for sample_name, m in sample_metrics.items(): sample_file = os.path.join(out_dir, "%s_bcbio.txt" % sample_name) with file_transaction(samples[0], sample_file) as tx_out_file: dt = pd.DataFrame(m, index=['1']) dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns] dt['sample'] = sample_name dt['rRNA_rate'] = m.get('rRNA_rate', "NA") dt['RiP_pct'] = "%.3f" % (int(m.get("RiP", 0)) / float(m.get("Total_reads", 1)) * 100) dt = _fix_duplicated_rate(dt) dt.transpose().to_csv(tx_out_file, sep="\t", header=False) out.append(sample_file) return out
[ "def", "_merge_metrics", "(", "samples", ",", "out_dir", ")", ":", "logger", ".", "info", "(", "\"summarize metrics\"", ")", "out_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"report\"", ",", "\"metri...
Merge metrics from multiple QC steps
[ "Merge", "metrics", "from", "multiple", "QC", "steps" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L456-L484
238,058
bcbio/bcbio-nextgen
bcbio/qc/multiqc.py
_merge_fastqc
def _merge_fastqc(samples): """ merge all fastqc samples into one by module """ fastqc_list = collections.defaultdict(list) seen = set() for data in samples: name = dd.get_sample_name(data) if name in seen: continue seen.add(name) fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*") for fn in fns: if fn.endswith("tsv"): metric = os.path.basename(fn) fastqc_list[metric].append([name, fn]) for metric in fastqc_list: dt_by_sample = [] for fn in fastqc_list[metric]: dt = pd.read_csv(fn[1], sep="\t") dt['sample'] = fn[0] dt_by_sample.append(dt) dt = utils.rbind(dt_by_sample) dt.to_csv(metric, sep="\t", index=False, mode ='w') return samples
python
def _merge_fastqc(samples): fastqc_list = collections.defaultdict(list) seen = set() for data in samples: name = dd.get_sample_name(data) if name in seen: continue seen.add(name) fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*") for fn in fns: if fn.endswith("tsv"): metric = os.path.basename(fn) fastqc_list[metric].append([name, fn]) for metric in fastqc_list: dt_by_sample = [] for fn in fastqc_list[metric]: dt = pd.read_csv(fn[1], sep="\t") dt['sample'] = fn[0] dt_by_sample.append(dt) dt = utils.rbind(dt_by_sample) dt.to_csv(metric, sep="\t", index=False, mode ='w') return samples
[ "def", "_merge_fastqc", "(", "samples", ")", ":", "fastqc_list", "=", "collections", ".", "defaultdict", "(", "list", ")", "seen", "=", "set", "(", ")", "for", "data", "in", "samples", ":", "name", "=", "dd", ".", "get_sample_name", "(", "data", ")", "...
merge all fastqc samples into one by module
[ "merge", "all", "fastqc", "samples", "into", "one", "by", "module" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/multiqc.py#L486-L511
238,059
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
_create_plot
def _create_plot(tumor, in_glob, out_ext, page=1): """Create an output plot for the given PDF in the images directory. """ out_dir = utils.safe_makedir("images") out_name = os.path.join(out_dir, "%s-%s" % (tumor, out_ext)) in_file = glob.glob(in_glob)[0] cmd = ["pdftoppm", in_file, out_name, "-png", "-f", page, "-singlefile"] if not os.path.exists(out_name + ".png"): subprocess.check_call([str(x) for x in cmd]) return out_name + ".png"
python
def _create_plot(tumor, in_glob, out_ext, page=1): out_dir = utils.safe_makedir("images") out_name = os.path.join(out_dir, "%s-%s" % (tumor, out_ext)) in_file = glob.glob(in_glob)[0] cmd = ["pdftoppm", in_file, out_name, "-png", "-f", page, "-singlefile"] if not os.path.exists(out_name + ".png"): subprocess.check_call([str(x) for x in cmd]) return out_name + ".png"
[ "def", "_create_plot", "(", "tumor", ",", "in_glob", ",", "out_ext", ",", "page", "=", "1", ")", ":", "out_dir", "=", "utils", ".", "safe_makedir", "(", "\"images\"", ")", "out_name", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"%s-%s\"...
Create an output plot for the given PDF in the images directory.
[ "Create", "an", "output", "plot", "for", "the", "given", "PDF", "in", "the", "images", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L64-L73
238,060
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
_get_cromwell_execution_dir
def _get_cromwell_execution_dir(base_dir, target_glob): """Retrieve the baseline directory with cromwell output files. Handles Cromwell restarts where there are multiple work directories and we traverse symlinks back to the original. """ cur_dir = glob.glob(os.path.join(base_dir, target_glob))[0] if os.path.exists(os.path.join(cur_dir, "cwl.output.json")): return base_dir else: symlink_dir = os.path.dirname(os.path.realpath(os.path.join(cur_dir, "script"))) ref_base = os.path.dirname(base_dir) new_guid = symlink_dir[symlink_dir.find(ref_base) + len(ref_base) + 1:].split("/")[0] return _get_cromwell_execution_dir(os.path.join(ref_base, new_guid), target_glob)
python
def _get_cromwell_execution_dir(base_dir, target_glob): cur_dir = glob.glob(os.path.join(base_dir, target_glob))[0] if os.path.exists(os.path.join(cur_dir, "cwl.output.json")): return base_dir else: symlink_dir = os.path.dirname(os.path.realpath(os.path.join(cur_dir, "script"))) ref_base = os.path.dirname(base_dir) new_guid = symlink_dir[symlink_dir.find(ref_base) + len(ref_base) + 1:].split("/")[0] return _get_cromwell_execution_dir(os.path.join(ref_base, new_guid), target_glob)
[ "def", "_get_cromwell_execution_dir", "(", "base_dir", ",", "target_glob", ")", ":", "cur_dir", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "base_dir", ",", "target_glob", ")", ")", "[", "0", "]", "if", "os", ".", "path", ".", ...
Retrieve the baseline directory with cromwell output files. Handles Cromwell restarts where there are multiple work directories and we traverse symlinks back to the original.
[ "Retrieve", "the", "baseline", "directory", "with", "cromwell", "output", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L232-L245
238,061
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
prep_bam_inputs
def prep_bam_inputs(out_dir, sample, call_file, bam_file): """Prepare expected input BAM files from pre-aligned. """ base = utils.splitext_plus(os.path.basename(bam_file))[0] with open(call_file) as in_handle: for cur_hla in (x.strip() for x in in_handle): out_file = os.path.join(utils.safe_makedir(os.path.join(out_dir, base)), "%s.type.%s.filtered.bam" % (base, cur_hla)) if not os.path.exists(out_file): cmd = ["samtools", "view", "-b","-o", out_file, bam_file, cur_hla] subprocess.check_call(cmd)
python
def prep_bam_inputs(out_dir, sample, call_file, bam_file): base = utils.splitext_plus(os.path.basename(bam_file))[0] with open(call_file) as in_handle: for cur_hla in (x.strip() for x in in_handle): out_file = os.path.join(utils.safe_makedir(os.path.join(out_dir, base)), "%s.type.%s.filtered.bam" % (base, cur_hla)) if not os.path.exists(out_file): cmd = ["samtools", "view", "-b","-o", out_file, bam_file, cur_hla] subprocess.check_call(cmd)
[ "def", "prep_bam_inputs", "(", "out_dir", ",", "sample", ",", "call_file", ",", "bam_file", ")", ":", "base", "=", "utils", ".", "splitext_plus", "(", "os", ".", "path", ".", "basename", "(", "bam_file", ")", ")", "[", "0", "]", "with", "open", "(", ...
Prepare expected input BAM files from pre-aligned.
[ "Prepare", "expected", "input", "BAM", "files", "from", "pre", "-", "aligned", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L247-L257
238,062
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
get_hla
def get_hla(sample, cromwell_dir, hla_glob): """Retrieve HLA calls and input fastqs for a sample. """ hla_dir = glob.glob(os.path.join(cromwell_dir, hla_glob, "align", sample, "hla"))[0] fastq = os.path.join(hla_dir, "OptiType-HLA-A_B_C-input.fq") calls = os.path.join(hla_dir, "%s-optitype.csv" % sample) return fastq, calls
python
def get_hla(sample, cromwell_dir, hla_glob): hla_dir = glob.glob(os.path.join(cromwell_dir, hla_glob, "align", sample, "hla"))[0] fastq = os.path.join(hla_dir, "OptiType-HLA-A_B_C-input.fq") calls = os.path.join(hla_dir, "%s-optitype.csv" % sample) return fastq, calls
[ "def", "get_hla", "(", "sample", ",", "cromwell_dir", ",", "hla_glob", ")", ":", "hla_dir", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "cromwell_dir", ",", "hla_glob", ",", "\"align\"", ",", "sample", ",", "\"hla\"", ")", ")", ...
Retrieve HLA calls and input fastqs for a sample.
[ "Retrieve", "HLA", "calls", "and", "input", "fastqs", "for", "a", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L278-L284
238,063
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
name_to_absolute
def name_to_absolute(x): """Convert standard hg38 HLA name into ABSOLUTE naming. """ for c in ["-", "*", ":"]: x = x.replace(c, "_") x = x.lower() return x
python
def name_to_absolute(x): for c in ["-", "*", ":"]: x = x.replace(c, "_") x = x.lower() return x
[ "def", "name_to_absolute", "(", "x", ")", ":", "for", "c", "in", "[", "\"-\"", ",", "\"*\"", ",", "\":\"", "]", ":", "x", "=", "x", ".", "replace", "(", "c", ",", "\"_\"", ")", "x", "=", "x", ".", "lower", "(", ")", "return", "x" ]
Convert standard hg38 HLA name into ABSOLUTE naming.
[ "Convert", "standard", "hg38", "HLA", "name", "into", "ABSOLUTE", "naming", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L286-L292
238,064
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
get_hla_choice
def get_hla_choice(h, hlas, normal_bam, tumor_bam): """Retrieve matching HLA with best read support in both tumor and normal """ def get_counts(bam_file): counts = {} for line in subprocess.check_output(["samtools", "idxstats", bam_file]).split("\n"): if line.startswith(h): name, _, count, _ = line.split() counts[name] = int(count) return counts tcounts = get_counts(tumor_bam) ncounts = get_counts(normal_bam) check_hlas = [x for x in hlas if x.startswith(h) and tcounts.get(x, 0) > 0 and ncounts.get(x, 0) > 0] cur_hlas = sorted(check_hlas, key=lambda x: ncounts[x], reverse=True) #print(cur_hlas[0], tcounts.get(cur_hlas[0]), ncounts.get(cur_hlas[0])) return cur_hlas[0]
python
def get_hla_choice(h, hlas, normal_bam, tumor_bam): def get_counts(bam_file): counts = {} for line in subprocess.check_output(["samtools", "idxstats", bam_file]).split("\n"): if line.startswith(h): name, _, count, _ = line.split() counts[name] = int(count) return counts tcounts = get_counts(tumor_bam) ncounts = get_counts(normal_bam) check_hlas = [x for x in hlas if x.startswith(h) and tcounts.get(x, 0) > 0 and ncounts.get(x, 0) > 0] cur_hlas = sorted(check_hlas, key=lambda x: ncounts[x], reverse=True) #print(cur_hlas[0], tcounts.get(cur_hlas[0]), ncounts.get(cur_hlas[0])) return cur_hlas[0]
[ "def", "get_hla_choice", "(", "h", ",", "hlas", ",", "normal_bam", ",", "tumor_bam", ")", ":", "def", "get_counts", "(", "bam_file", ")", ":", "counts", "=", "{", "}", "for", "line", "in", "subprocess", ".", "check_output", "(", "[", "\"samtools\"", ",",...
Retrieve matching HLA with best read support in both tumor and normal
[ "Retrieve", "matching", "HLA", "with", "best", "read", "support", "in", "both", "tumor", "and", "normal" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L294-L309
238,065
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
prep_hla
def prep_hla(work_dir, sample, calls, hlas, normal_bam, tumor_bam): """Convert HLAs into ABSOLUTE format for use with LOHHLA. LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move """ work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) hla_file = os.path.join(work_dir, "%s-hlas.txt" % sample) with open(calls) as in_handle: with open(hla_file, "w") as out_handle: next(in_handle) # header for line in in_handle: _, _, a, _, _ = line.strip().split(",") a1, a2 = a.split(";") out_handle.write(get_hla_choice(name_to_absolute(a1), hlas, normal_bam, tumor_bam) + "\n") out_handle.write(get_hla_choice(name_to_absolute(a2), hlas, normal_bam, tumor_bam) + "\n") return hla_file
python
def prep_hla(work_dir, sample, calls, hlas, normal_bam, tumor_bam): work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) hla_file = os.path.join(work_dir, "%s-hlas.txt" % sample) with open(calls) as in_handle: with open(hla_file, "w") as out_handle: next(in_handle) # header for line in in_handle: _, _, a, _, _ = line.strip().split(",") a1, a2 = a.split(";") out_handle.write(get_hla_choice(name_to_absolute(a1), hlas, normal_bam, tumor_bam) + "\n") out_handle.write(get_hla_choice(name_to_absolute(a2), hlas, normal_bam, tumor_bam) + "\n") return hla_file
[ "def", "prep_hla", "(", "work_dir", ",", "sample", ",", "calls", ",", "hlas", ",", "normal_bam", ",", "tumor_bam", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "sample", ",", "\"inp...
Convert HLAs into ABSOLUTE format for use with LOHHLA. LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move
[ "Convert", "HLAs", "into", "ABSOLUTE", "format", "for", "use", "with", "LOHHLA", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L311-L326
238,066
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
prep_ploidy
def prep_ploidy(work_dir, sample, bam_file, cromwell_dir, sv_glob): """Create LOHHLA compatible input ploidy file from PureCN output. """ purecn_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=sample, method="purecn", ext="purecn.csv")) work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) out_file = os.path.join(work_dir, "%s-solutions.txt" % sample) with open(purecn_file) as in_handle: reader = csv.reader(in_handle) purecn_stats = dict(zip(next(reader), next(reader))) with open(out_file, "w") as out_handle: out_handle.write("Ploidy\ttumorPurity\ttumorPloidy\n") lohhla_name = utils.splitext_plus(os.path.basename(bam_file))[0] out_handle.write("%s\t%s\t%s\t%s\n" % (lohhla_name, purecn_stats["Ploidy"], purecn_stats["Purity"], purecn_stats["Ploidy"])) return out_file
python
def prep_ploidy(work_dir, sample, bam_file, cromwell_dir, sv_glob): purecn_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=sample, method="purecn", ext="purecn.csv")) work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs")) out_file = os.path.join(work_dir, "%s-solutions.txt" % sample) with open(purecn_file) as in_handle: reader = csv.reader(in_handle) purecn_stats = dict(zip(next(reader), next(reader))) with open(out_file, "w") as out_handle: out_handle.write("Ploidy\ttumorPurity\ttumorPloidy\n") lohhla_name = utils.splitext_plus(os.path.basename(bam_file))[0] out_handle.write("%s\t%s\t%s\t%s\n" % (lohhla_name, purecn_stats["Ploidy"], purecn_stats["Purity"], purecn_stats["Ploidy"])) return out_file
[ "def", "prep_ploidy", "(", "work_dir", ",", "sample", ",", "bam_file", ",", "cromwell_dir", ",", "sv_glob", ")", ":", "purecn_file", "=", "_get_cromwell_file", "(", "cromwell_dir", ",", "sv_glob", ",", "dict", "(", "sample", "=", "sample", ",", "method", "="...
Create LOHHLA compatible input ploidy file from PureCN output.
[ "Create", "LOHHLA", "compatible", "input", "ploidy", "file", "from", "PureCN", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L328-L342
238,067
bcbio/bcbio-nextgen
bcbio/ngsalign/bowtie.py
_bowtie_args_from_config
def _bowtie_args_from_config(data): """Configurable high level options for bowtie. """ config = data['config'] qual_format = config["algorithm"].get("quality_format", "") if qual_format.lower() == "illumina": qual_flags = ["--phred64-quals"] else: qual_flags = [] multi_mappers = config["algorithm"].get("multiple_mappers", True) multi_flags = ["-M", 1] if multi_mappers else ["-m", 1] multi_flags = [] if data["analysis"].lower().startswith("smallrna-seq") else multi_flags cores = config.get("resources", {}).get("bowtie", {}).get("cores", None) num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] return core_flags + qual_flags + multi_flags
python
def _bowtie_args_from_config(data): config = data['config'] qual_format = config["algorithm"].get("quality_format", "") if qual_format.lower() == "illumina": qual_flags = ["--phred64-quals"] else: qual_flags = [] multi_mappers = config["algorithm"].get("multiple_mappers", True) multi_flags = ["-M", 1] if multi_mappers else ["-m", 1] multi_flags = [] if data["analysis"].lower().startswith("smallrna-seq") else multi_flags cores = config.get("resources", {}).get("bowtie", {}).get("cores", None) num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] return core_flags + qual_flags + multi_flags
[ "def", "_bowtie_args_from_config", "(", "data", ")", ":", "config", "=", "data", "[", "'config'", "]", "qual_format", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"quality_format\"", ",", "\"\"", ")", "if", "qual_format", ".", "lower", "(", ...
Configurable high level options for bowtie.
[ "Configurable", "high", "level", "options", "for", "bowtie", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bowtie.py#L13-L28
238,068
bcbio/bcbio-nextgen
bcbio/ngsalign/bowtie.py
align
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): """Do standard or paired end alignment with bowtie. """ num_hits = 1 if data["analysis"].lower().startswith("smallrna-seq"): num_hits = 1000 config = data['config'] out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data))) if data.get("align_split"): final_file = out_file out_file, data = alignprep.setup_combine(final_file, data) fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data) else: final_file = None if fastq_file.endswith(".gz"): fastq_file = "<(gunzip -c %s)" % fastq_file if pair_file: pair_file = "<(gunzip -c %s)" % pair_file if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)): with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file): cl = [config_utils.get_program("bowtie", config)] cl += _bowtie_args_from_config(data) cl += extra_args if extra_args is not None else [] cl += ["-q", "-v", 2, "-k", num_hits, "-X", 2000, # default is too selective for most data "--best", "--strata", "--sam", ref_file] if pair_file: cl += ["-1", fastq_file, "-2", pair_file] else: cl += [fastq_file] cl = [str(i) for i in cl] fix_rg_cmd = r"samtools addreplacerg -r '%s' -" % novoalign.get_rg_info(data["rgnames"]) if fix_rg_cmd: cmd = " ".join(cl) + " | " + fix_rg_cmd + " | " + tobam_cl else: cmd = " ".join(cl) + " | " + tobam_cl do.run(cmd, "Running Bowtie on %s and %s." % (fastq_file, pair_file), data) return out_file
python
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): num_hits = 1 if data["analysis"].lower().startswith("smallrna-seq"): num_hits = 1000 config = data['config'] out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data))) if data.get("align_split"): final_file = out_file out_file, data = alignprep.setup_combine(final_file, data) fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data) else: final_file = None if fastq_file.endswith(".gz"): fastq_file = "<(gunzip -c %s)" % fastq_file if pair_file: pair_file = "<(gunzip -c %s)" % pair_file if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)): with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file): cl = [config_utils.get_program("bowtie", config)] cl += _bowtie_args_from_config(data) cl += extra_args if extra_args is not None else [] cl += ["-q", "-v", 2, "-k", num_hits, "-X", 2000, # default is too selective for most data "--best", "--strata", "--sam", ref_file] if pair_file: cl += ["-1", fastq_file, "-2", pair_file] else: cl += [fastq_file] cl = [str(i) for i in cl] fix_rg_cmd = r"samtools addreplacerg -r '%s' -" % novoalign.get_rg_info(data["rgnames"]) if fix_rg_cmd: cmd = " ".join(cl) + " | " + fix_rg_cmd + " | " + tobam_cl else: cmd = " ".join(cl) + " | " + tobam_cl do.run(cmd, "Running Bowtie on %s and %s." % (fastq_file, pair_file), data) return out_file
[ "def", "align", "(", "fastq_file", ",", "pair_file", ",", "ref_file", ",", "names", ",", "align_dir", ",", "data", ",", "extra_args", "=", "None", ")", ":", "num_hits", "=", "1", "if", "data", "[", "\"analysis\"", "]", ".", "lower", "(", ")", ".", "s...
Do standard or paired end alignment with bowtie.
[ "Do", "standard", "or", "paired", "end", "alignment", "with", "bowtie", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bowtie.py#L30-L74
238,069
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
subset_by_supported
def subset_by_supported(input_file, get_coords, calls_by_name, work_dir, data, headers=("#",)): """Limit CNVkit input to calls with support from another caller. get_coords is a function that return chrom, start, end from a line of the input_file, allowing handling of multiple input file types. """ support_files = [(c, tz.get_in([c, "vrn_file"], calls_by_name)) for c in convert.SUBSET_BY_SUPPORT["cnvkit"]] support_files = [(c, f) for (c, f) in support_files if f and vcfutils.vcf_has_variants(f)] if len(support_files) == 0: return input_file else: out_file = os.path.join(work_dir, "%s-havesupport%s" % utils.splitext_plus(os.path.basename(input_file))) if not utils.file_uptodate(out_file, input_file): input_bed = _input_to_bed(input_file, work_dir, get_coords, headers) pass_coords = set([]) with file_transaction(data, out_file) as tx_out_file: support_beds = " ".join([_sv_vcf_to_bed(f, c, out_file) for c, f in support_files]) tmp_cmp_bed = "%s-intersectwith.bed" % utils.splitext_plus(tx_out_file)[0] cmd = "bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}" do.run(cmd.format(**locals()), "Intersect CNVs with support files") for r in pybedtools.BedTool(tmp_cmp_bed): pass_coords.add((str(r.chrom), str(r.start), str(r.stop))) with open(input_file) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: passes = True if not line.startswith(headers): passes = get_coords(line) in pass_coords if passes: out_handle.write(line) return out_file
python
def subset_by_supported(input_file, get_coords, calls_by_name, work_dir, data, headers=("#",)): support_files = [(c, tz.get_in([c, "vrn_file"], calls_by_name)) for c in convert.SUBSET_BY_SUPPORT["cnvkit"]] support_files = [(c, f) for (c, f) in support_files if f and vcfutils.vcf_has_variants(f)] if len(support_files) == 0: return input_file else: out_file = os.path.join(work_dir, "%s-havesupport%s" % utils.splitext_plus(os.path.basename(input_file))) if not utils.file_uptodate(out_file, input_file): input_bed = _input_to_bed(input_file, work_dir, get_coords, headers) pass_coords = set([]) with file_transaction(data, out_file) as tx_out_file: support_beds = " ".join([_sv_vcf_to_bed(f, c, out_file) for c, f in support_files]) tmp_cmp_bed = "%s-intersectwith.bed" % utils.splitext_plus(tx_out_file)[0] cmd = "bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}" do.run(cmd.format(**locals()), "Intersect CNVs with support files") for r in pybedtools.BedTool(tmp_cmp_bed): pass_coords.add((str(r.chrom), str(r.start), str(r.stop))) with open(input_file) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: passes = True if not line.startswith(headers): passes = get_coords(line) in pass_coords if passes: out_handle.write(line) return out_file
[ "def", "subset_by_supported", "(", "input_file", ",", "get_coords", ",", "calls_by_name", ",", "work_dir", ",", "data", ",", "headers", "=", "(", "\"#\"", ",", ")", ")", ":", "support_files", "=", "[", "(", "c", ",", "tz", ".", "get_in", "(", "[", "c",...
Limit CNVkit input to calls with support from another caller. get_coords is a function that return chrom, start, end from a line of the input_file, allowing handling of multiple input file types.
[ "Limit", "CNVkit", "input", "to", "calls", "with", "support", "from", "another", "caller", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L43-L76
238,070
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_input_to_bed
def _input_to_bed(theta_input, work_dir, get_coords, headers): """Convert input file to a BED file for comparisons """ theta_bed = os.path.join(work_dir, "%s.bed" % os.path.splitext(os.path.basename(theta_input))[0]) with open(theta_input) as in_handle: with open(theta_bed, "w") as out_handle: for line in in_handle: if not line.startswith(headers): chrom, start, end = get_coords(line) out_handle.write("\t".join([chrom, start, end]) + "\n") return theta_bed
python
def _input_to_bed(theta_input, work_dir, get_coords, headers): theta_bed = os.path.join(work_dir, "%s.bed" % os.path.splitext(os.path.basename(theta_input))[0]) with open(theta_input) as in_handle: with open(theta_bed, "w") as out_handle: for line in in_handle: if not line.startswith(headers): chrom, start, end = get_coords(line) out_handle.write("\t".join([chrom, start, end]) + "\n") return theta_bed
[ "def", "_input_to_bed", "(", "theta_input", ",", "work_dir", ",", "get_coords", ",", "headers", ")", ":", "theta_bed", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s.bed\"", "%", "os", ".", "path", ".", "splitext", "(", "os", ".", "pa...
Convert input file to a BED file for comparisons
[ "Convert", "input", "file", "to", "a", "BED", "file", "for", "comparisons" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L88-L98
238,071
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_run_theta
def _run_theta(cnv_info, data, work_dir, run_n3=True): """Run theta, calculating subpopulations and normal contamination. """ out = {"caller": "theta"} max_normal = "0.9" opts = ["-m", max_normal] n2_result = _safe_run_theta(cnv_info["theta_input"], os.path.join(work_dir, "n2"), ".n2.results", ["-n", "2"] + opts, data) if n2_result: out["estimate"] = n2_result if run_n3: n2_bounds = "%s.withBounds" % os.path.splitext(n2_result)[0] n3_result = _safe_run_theta(n2_bounds, os.path.join(work_dir, "n3"), ".n3.results", ["-n", "3", "--RESULTS", n2_result] + opts, data) if n3_result: best_result = _select_model(n2_bounds, n2_result, n3_result, os.path.join(work_dir, "n3"), data) out["estimate"] = best_result out["cnvs"] = _merge_theta_calls(n2_bounds, best_result, cnv_info["vrn_file"], data) return out
python
def _run_theta(cnv_info, data, work_dir, run_n3=True): out = {"caller": "theta"} max_normal = "0.9" opts = ["-m", max_normal] n2_result = _safe_run_theta(cnv_info["theta_input"], os.path.join(work_dir, "n2"), ".n2.results", ["-n", "2"] + opts, data) if n2_result: out["estimate"] = n2_result if run_n3: n2_bounds = "%s.withBounds" % os.path.splitext(n2_result)[0] n3_result = _safe_run_theta(n2_bounds, os.path.join(work_dir, "n3"), ".n3.results", ["-n", "3", "--RESULTS", n2_result] + opts, data) if n3_result: best_result = _select_model(n2_bounds, n2_result, n3_result, os.path.join(work_dir, "n3"), data) out["estimate"] = best_result out["cnvs"] = _merge_theta_calls(n2_bounds, best_result, cnv_info["vrn_file"], data) return out
[ "def", "_run_theta", "(", "cnv_info", ",", "data", ",", "work_dir", ",", "run_n3", "=", "True", ")", ":", "out", "=", "{", "\"caller\"", ":", "\"theta\"", "}", "max_normal", "=", "\"0.9\"", "opts", "=", "[", "\"-m\"", ",", "max_normal", "]", "n2_result",...
Run theta, calculating subpopulations and normal contamination.
[ "Run", "theta", "calculating", "subpopulations", "and", "normal", "contamination", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L100-L120
238,072
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_update_with_calls
def _update_with_calls(result_file, cnv_file): """Update bounds with calls from CNVkit, inferred copy numbers and p-values from THetA. """ results = {} with open(result_file) as in_handle: in_handle.readline() # header _, _, cs, ps = in_handle.readline().strip().split() for i, (c, p) in enumerate(zip(cs.split(":"), ps.split(","))): results[i] = (c, p) cnvs = {} with open(cnv_file) as in_handle: for line in in_handle: chrom, start, end, _, count = line.rstrip().split()[:5] cnvs[(chrom, start, end)] = count def update(i, line): parts = line.rstrip().split("\t") chrom, start, end = parts[1:4] parts += cnvs.get((chrom, start, end), ".") parts += list(results[i]) return "\t".join(parts) + "\n" return update
python
def _update_with_calls(result_file, cnv_file): results = {} with open(result_file) as in_handle: in_handle.readline() # header _, _, cs, ps = in_handle.readline().strip().split() for i, (c, p) in enumerate(zip(cs.split(":"), ps.split(","))): results[i] = (c, p) cnvs = {} with open(cnv_file) as in_handle: for line in in_handle: chrom, start, end, _, count = line.rstrip().split()[:5] cnvs[(chrom, start, end)] = count def update(i, line): parts = line.rstrip().split("\t") chrom, start, end = parts[1:4] parts += cnvs.get((chrom, start, end), ".") parts += list(results[i]) return "\t".join(parts) + "\n" return update
[ "def", "_update_with_calls", "(", "result_file", ",", "cnv_file", ")", ":", "results", "=", "{", "}", "with", "open", "(", "result_file", ")", "as", "in_handle", ":", "in_handle", ".", "readline", "(", ")", "# header", "_", ",", "_", ",", "cs", ",", "p...
Update bounds with calls from CNVkit, inferred copy numbers and p-values from THetA.
[ "Update", "bounds", "with", "calls", "from", "CNVkit", "inferred", "copy", "numbers", "and", "p", "-", "values", "from", "THetA", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L122-L142
238,073
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_merge_theta_calls
def _merge_theta_calls(bounds_file, result_file, cnv_file, data): """Create a final output file with merged CNVkit and THetA copy and population estimates. """ out_file = "%s-merged.txt" % (result_file.replace(".BEST.results", "")) if not utils.file_uptodate(out_file, result_file): with file_transaction(data, out_file) as tx_out_file: updater = _update_with_calls(result_file, cnv_file) with open(bounds_file) as in_handle: with open(tx_out_file, "w") as out_handle: i = 0 for line in in_handle: if line.startswith("#"): parts = line.rstrip().split("\t") parts += ["cnv", "pop_cnvs", "pop_pvals"] out_handle.write("\t".join(parts) + "\n") else: out_handle.write(updater(i, line)) i += 1 return out_file
python
def _merge_theta_calls(bounds_file, result_file, cnv_file, data): out_file = "%s-merged.txt" % (result_file.replace(".BEST.results", "")) if not utils.file_uptodate(out_file, result_file): with file_transaction(data, out_file) as tx_out_file: updater = _update_with_calls(result_file, cnv_file) with open(bounds_file) as in_handle: with open(tx_out_file, "w") as out_handle: i = 0 for line in in_handle: if line.startswith("#"): parts = line.rstrip().split("\t") parts += ["cnv", "pop_cnvs", "pop_pvals"] out_handle.write("\t".join(parts) + "\n") else: out_handle.write(updater(i, line)) i += 1 return out_file
[ "def", "_merge_theta_calls", "(", "bounds_file", ",", "result_file", ",", "cnv_file", ",", "data", ")", ":", "out_file", "=", "\"%s-merged.txt\"", "%", "(", "result_file", ".", "replace", "(", "\".BEST.results\"", ",", "\"\"", ")", ")", "if", "not", "utils", ...
Create a final output file with merged CNVkit and THetA copy and population estimates.
[ "Create", "a", "final", "output", "file", "with", "merged", "CNVkit", "and", "THetA", "copy", "and", "population", "estimates", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L144-L162
238,074
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_select_model
def _select_model(n2_bounds, n2_result, n3_result, out_dir, data): """Run final model selection from n=2 and n=3 options. """ n2_out_file = n2_result.replace(".n2.results", ".BEST.results") n3_out_file = n3_result.replace(".n3.results", ".BEST.results") if not utils.file_exists(n2_out_file) and not utils.file_exists(n3_out_file): cmd = _get_cmd("ModelSelection.py") + [n2_bounds, n2_result, n3_result] do.run(cmd, "Select best THetA model") if utils.file_exists(n2_out_file): return n2_out_file else: assert utils.file_exists(n3_out_file) return n3_out_file
python
def _select_model(n2_bounds, n2_result, n3_result, out_dir, data): n2_out_file = n2_result.replace(".n2.results", ".BEST.results") n3_out_file = n3_result.replace(".n3.results", ".BEST.results") if not utils.file_exists(n2_out_file) and not utils.file_exists(n3_out_file): cmd = _get_cmd("ModelSelection.py") + [n2_bounds, n2_result, n3_result] do.run(cmd, "Select best THetA model") if utils.file_exists(n2_out_file): return n2_out_file else: assert utils.file_exists(n3_out_file) return n3_out_file
[ "def", "_select_model", "(", "n2_bounds", ",", "n2_result", ",", "n3_result", ",", "out_dir", ",", "data", ")", ":", "n2_out_file", "=", "n2_result", ".", "replace", "(", "\".n2.results\"", ",", "\".BEST.results\"", ")", "n3_out_file", "=", "n3_result", ".", "...
Run final model selection from n=2 and n=3 options.
[ "Run", "final", "model", "selection", "from", "n", "=", "2", "and", "n", "=", "3", "options", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L164-L176
238,075
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_safe_run_theta
def _safe_run_theta(input_file, out_dir, output_ext, args, data): """Run THetA, catching and continuing on any errors. """ out_file = os.path.join(out_dir, _split_theta_ext(input_file) + output_ext) skip_file = out_file + ".skipped" if utils.file_exists(skip_file): return None if not utils.file_exists(out_file): with file_transaction(data, out_dir) as tx_out_dir: utils.safe_makedir(tx_out_dir) cmd = _get_cmd("RunTHetA.py") + args + \ [input_file, "--NUM_PROCESSES", dd.get_cores(data), "--FORCE", "-d", tx_out_dir] try: do.run(cmd, "Run THetA to calculate purity", log_error=False) except subprocess.CalledProcessError as msg: if ("Number of intervals must be greater than 1" in str(msg) or "This sample isn't a good candidate for THetA analysis" in str(msg)): with open(os.path.join(tx_out_dir, os.path.basename(skip_file)), "w") as out_handle: out_handle.write("Expected TheTA failure, skipping") return None else: raise return out_file
python
def _safe_run_theta(input_file, out_dir, output_ext, args, data): out_file = os.path.join(out_dir, _split_theta_ext(input_file) + output_ext) skip_file = out_file + ".skipped" if utils.file_exists(skip_file): return None if not utils.file_exists(out_file): with file_transaction(data, out_dir) as tx_out_dir: utils.safe_makedir(tx_out_dir) cmd = _get_cmd("RunTHetA.py") + args + \ [input_file, "--NUM_PROCESSES", dd.get_cores(data), "--FORCE", "-d", tx_out_dir] try: do.run(cmd, "Run THetA to calculate purity", log_error=False) except subprocess.CalledProcessError as msg: if ("Number of intervals must be greater than 1" in str(msg) or "This sample isn't a good candidate for THetA analysis" in str(msg)): with open(os.path.join(tx_out_dir, os.path.basename(skip_file)), "w") as out_handle: out_handle.write("Expected TheTA failure, skipping") return None else: raise return out_file
[ "def", "_safe_run_theta", "(", "input_file", ",", "out_dir", ",", "output_ext", ",", "args", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "_split_theta_ext", "(", "input_file", ")", "+", "output_ext", ")", ...
Run THetA, catching and continuing on any errors.
[ "Run", "THetA", "catching", "and", "continuing", "on", "any", "errors", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L178-L201
238,076
bcbio/bcbio-nextgen
bcbio/heterogeneity/theta.py
_get_cmd
def _get_cmd(cmd): """Retrieve required commands for running THetA with our local bcbio python. """ check_cmd = "RunTHetA.py" try: local_cmd = subprocess.check_output(["which", check_cmd]).strip() except subprocess.CalledProcessError: return None return [sys.executable, "%s/%s" % (os.path.dirname(os.path.realpath(local_cmd)), cmd)]
python
def _get_cmd(cmd): check_cmd = "RunTHetA.py" try: local_cmd = subprocess.check_output(["which", check_cmd]).strip() except subprocess.CalledProcessError: return None return [sys.executable, "%s/%s" % (os.path.dirname(os.path.realpath(local_cmd)), cmd)]
[ "def", "_get_cmd", "(", "cmd", ")", ":", "check_cmd", "=", "\"RunTHetA.py\"", "try", ":", "local_cmd", "=", "subprocess", ".", "check_output", "(", "[", "\"which\"", ",", "check_cmd", "]", ")", ".", "strip", "(", ")", "except", "subprocess", ".", "CalledPr...
Retrieve required commands for running THetA with our local bcbio python.
[ "Retrieve", "required", "commands", "for", "running", "THetA", "with", "our", "local", "bcbio", "python", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/theta.py#L213-L221
238,077
bcbio/bcbio-nextgen
bcbio/srna/mirge.py
run
def run(data): """Proxy function to run the tool""" sample = data[0][0] work_dir = dd.get_work_dir(sample) out_dir = os.path.join(work_dir, "mirge") lib = _find_lib(sample) mirge = _find_mirge(sample) bowtie = _find_bowtie(sample) sps = dd.get_species(sample) species = SPS.get(sps, "") if not species: raise ValueError("species not supported (hsa, mmu, rno, dre, cel, dme): %s" % sps) if not lib: raise ValueError("-lib option is not set up in resources for mirge tool." " Read above warnings lines.") if not utils.file_exists(out_dir): with tx_tmpdir() as tmp_dir: sample_file = _create_sample_file(data, tmp_dir) do.run(_cmd().format(**locals()), "Running miRge2.0.") shutil.move(tmp_dir, out_dir) return [os.path.abspath(fn) for fn in glob.glob(os.path.join(out_dir, "*", "*"))]
python
def run(data): sample = data[0][0] work_dir = dd.get_work_dir(sample) out_dir = os.path.join(work_dir, "mirge") lib = _find_lib(sample) mirge = _find_mirge(sample) bowtie = _find_bowtie(sample) sps = dd.get_species(sample) species = SPS.get(sps, "") if not species: raise ValueError("species not supported (hsa, mmu, rno, dre, cel, dme): %s" % sps) if not lib: raise ValueError("-lib option is not set up in resources for mirge tool." " Read above warnings lines.") if not utils.file_exists(out_dir): with tx_tmpdir() as tmp_dir: sample_file = _create_sample_file(data, tmp_dir) do.run(_cmd().format(**locals()), "Running miRge2.0.") shutil.move(tmp_dir, out_dir) return [os.path.abspath(fn) for fn in glob.glob(os.path.join(out_dir, "*", "*"))]
[ "def", "run", "(", "data", ")", ":", "sample", "=", "data", "[", "0", "]", "[", "0", "]", "work_dir", "=", "dd", ".", "get_work_dir", "(", "sample", ")", "out_dir", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"mirge\"", ")", "lib...
Proxy function to run the tool
[ "Proxy", "function", "to", "run", "the", "tool" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/mirge.py#L20-L41
238,078
bcbio/bcbio-nextgen
bcbio/srna/mirge.py
_create_sample_file
def _create_sample_file(data, out_dir): """from data list all the fastq files in a file""" sample_file = os.path.join(out_dir, "sample_file.txt") with open(sample_file, 'w') as outh: for sample in data: outh.write(sample[0]["clean_fastq"] + "\n") return sample_file
python
def _create_sample_file(data, out_dir): sample_file = os.path.join(out_dir, "sample_file.txt") with open(sample_file, 'w') as outh: for sample in data: outh.write(sample[0]["clean_fastq"] + "\n") return sample_file
[ "def", "_create_sample_file", "(", "data", ",", "out_dir", ")", ":", "sample_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"sample_file.txt\"", ")", "with", "open", "(", "sample_file", ",", "'w'", ")", "as", "outh", ":", "for", "samp...
from data list all the fastq files in a file
[ "from", "data", "list", "all", "the", "fastq", "files", "in", "a", "file" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/mirge.py#L55-L61
238,079
bcbio/bcbio-nextgen
bcbio/srna/mirge.py
_find_lib
def _find_lib(data): """Find mirge libs""" options = " ".join(data.get('resources', {}).get('mirge', {}).get("options", "")) if options.find("-lib") > -1 and utils.file_exists(options.split()[1]): return options if not options: logger.warning("miRge libraries not found. Follow these instructions to install them:") logger.warning("https://github.com/mhalushka/miRge#download-libraries") logger.warning("Then, pass -lib LIB_PATH with resourcces:mirge:options:[...]") logger.warning("More information: https://bcbio-nextgen.readthedocs.io/en/latest/contents/pipelines.html#smallrna-seq")
python
def _find_lib(data): options = " ".join(data.get('resources', {}).get('mirge', {}).get("options", "")) if options.find("-lib") > -1 and utils.file_exists(options.split()[1]): return options if not options: logger.warning("miRge libraries not found. Follow these instructions to install them:") logger.warning("https://github.com/mhalushka/miRge#download-libraries") logger.warning("Then, pass -lib LIB_PATH with resourcces:mirge:options:[...]") logger.warning("More information: https://bcbio-nextgen.readthedocs.io/en/latest/contents/pipelines.html#smallrna-seq")
[ "def", "_find_lib", "(", "data", ")", ":", "options", "=", "\" \"", ".", "join", "(", "data", ".", "get", "(", "'resources'", ",", "{", "}", ")", ".", "get", "(", "'mirge'", ",", "{", "}", ")", ".", "get", "(", "\"options\"", ",", "\"\"", ")", ...
Find mirge libs
[ "Find", "mirge", "libs" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/mirge.py#L71-L80
238,080
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
get_input_sequence_files
def get_input_sequence_files(data, default=None): """ returns the input sequencing files, these can be single or paired FASTQ files or BAM files """ if "files" not in data or data.get("files") is None: file1, file2 = None, None elif len(data["files"]) == 2: file1, file2 = data["files"] else: assert len(data["files"]) == 1, data["files"] file1, file2 = data["files"][0], None return file1, file2
python
def get_input_sequence_files(data, default=None): if "files" not in data or data.get("files") is None: file1, file2 = None, None elif len(data["files"]) == 2: file1, file2 = data["files"] else: assert len(data["files"]) == 1, data["files"] file1, file2 = data["files"][0], None return file1, file2
[ "def", "get_input_sequence_files", "(", "data", ",", "default", "=", "None", ")", ":", "if", "\"files\"", "not", "in", "data", "or", "data", ".", "get", "(", "\"files\"", ")", "is", "None", ":", "file1", ",", "file2", "=", "None", ",", "None", "elif", ...
returns the input sequencing files, these can be single or paired FASTQ files or BAM files
[ "returns", "the", "input", "sequencing", "files", "these", "can", "be", "single", "or", "paired", "FASTQ", "files", "or", "BAM", "files" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L223-L235
238,081
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
get_umi_consensus
def get_umi_consensus(data): """Retrieve UMI for consensus based preparation. We specify this either as a separate fastq file or embedded in the read name as `fastq_name`.` """ consensus_choices = (["fastq_name"]) umi = tz.get_in(["config", "algorithm", "umi_type"], data) # don't run consensus UMI calling for scrna-seq if tz.get_in(["analysis"], data, "").lower() == "scrna-seq": return False if umi and (umi in consensus_choices or os.path.exists(umi)): assert tz.get_in(["config", "algorithm", "mark_duplicates"], data, True), \ "Using consensus UMI inputs requires marking duplicates" return umi
python
def get_umi_consensus(data): consensus_choices = (["fastq_name"]) umi = tz.get_in(["config", "algorithm", "umi_type"], data) # don't run consensus UMI calling for scrna-seq if tz.get_in(["analysis"], data, "").lower() == "scrna-seq": return False if umi and (umi in consensus_choices or os.path.exists(umi)): assert tz.get_in(["config", "algorithm", "mark_duplicates"], data, True), \ "Using consensus UMI inputs requires marking duplicates" return umi
[ "def", "get_umi_consensus", "(", "data", ")", ":", "consensus_choices", "=", "(", "[", "\"fastq_name\"", "]", ")", "umi", "=", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"umi_type\"", "]", ",", "data", ")", "# don't run consens...
Retrieve UMI for consensus based preparation. We specify this either as a separate fastq file or embedded in the read name as `fastq_name`.`
[ "Retrieve", "UMI", "for", "consensus", "based", "preparation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L237-L251
238,082
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
get_dexseq_gff
def get_dexseq_gff(config, default=None): """ some older versions of the genomes have the DEXseq gff file as gff instead of gff3, so this handles that by looking for either one """ dexseq_gff = tz.get_in(tz.get_in(['dexseq_gff', 'keys'], LOOKUPS, {}), config, None) if not dexseq_gff: return None gtf_file = get_gtf_file(config) if gtf_file: base_dir = os.path.dirname(gtf_file) else: base_dir = os.path.dirname(dexseq_gff) base, _ = os.path.splitext(dexseq_gff) gff_file = os.path.join(base_dir, base + ".gff") if file_exists(gff_file): return gff_file gtf_file = os.path.join(base_dir, base + ".gff3") if file_exists(gtf_file): return gtf_file else: return None
python
def get_dexseq_gff(config, default=None): dexseq_gff = tz.get_in(tz.get_in(['dexseq_gff', 'keys'], LOOKUPS, {}), config, None) if not dexseq_gff: return None gtf_file = get_gtf_file(config) if gtf_file: base_dir = os.path.dirname(gtf_file) else: base_dir = os.path.dirname(dexseq_gff) base, _ = os.path.splitext(dexseq_gff) gff_file = os.path.join(base_dir, base + ".gff") if file_exists(gff_file): return gff_file gtf_file = os.path.join(base_dir, base + ".gff3") if file_exists(gtf_file): return gtf_file else: return None
[ "def", "get_dexseq_gff", "(", "config", ",", "default", "=", "None", ")", ":", "dexseq_gff", "=", "tz", ".", "get_in", "(", "tz", ".", "get_in", "(", "[", "'dexseq_gff'", ",", "'keys'", "]", ",", "LOOKUPS", ",", "{", "}", ")", ",", "config", ",", "...
some older versions of the genomes have the DEXseq gff file as gff instead of gff3, so this handles that by looking for either one
[ "some", "older", "versions", "of", "the", "genomes", "have", "the", "DEXseq", "gff", "file", "as", "gff", "instead", "of", "gff3", "so", "this", "handles", "that", "by", "looking", "for", "either", "one" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L253-L275
238,083
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
get_in_samples
def get_in_samples(samples, fn): """ for a list of samples, return the value of a global option """ for sample in samples: sample = to_single_data(sample) if fn(sample, None): return fn(sample) return None
python
def get_in_samples(samples, fn): for sample in samples: sample = to_single_data(sample) if fn(sample, None): return fn(sample) return None
[ "def", "get_in_samples", "(", "samples", ",", "fn", ")", ":", "for", "sample", "in", "samples", ":", "sample", "=", "to_single_data", "(", "sample", ")", "if", "fn", "(", "sample", ",", "None", ")", ":", "return", "fn", "(", "sample", ")", "return", ...
for a list of samples, return the value of a global option
[ "for", "a", "list", "of", "samples", "return", "the", "value", "of", "a", "global", "option" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L329-L337
238,084
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
update_summary_qc
def update_summary_qc(data, key, base=None, secondary=None): """ updates summary_qc with a new section, keyed by key. stick files into summary_qc if you want them propagated forward and available for multiqc """ summary = get_summary_qc(data, {}) if base and secondary: summary[key] = {"base": base, "secondary": secondary} elif base: summary[key] = {"base": base} elif secondary: summary[key] = {"secondary": secondary} data = set_summary_qc(data, summary) return data
python
def update_summary_qc(data, key, base=None, secondary=None): summary = get_summary_qc(data, {}) if base and secondary: summary[key] = {"base": base, "secondary": secondary} elif base: summary[key] = {"base": base} elif secondary: summary[key] = {"secondary": secondary} data = set_summary_qc(data, summary) return data
[ "def", "update_summary_qc", "(", "data", ",", "key", ",", "base", "=", "None", ",", "secondary", "=", "None", ")", ":", "summary", "=", "get_summary_qc", "(", "data", ",", "{", "}", ")", "if", "base", "and", "secondary", ":", "summary", "[", "key", "...
updates summary_qc with a new section, keyed by key. stick files into summary_qc if you want them propagated forward and available for multiqc
[ "updates", "summary_qc", "with", "a", "new", "section", "keyed", "by", "key", ".", "stick", "files", "into", "summary_qc", "if", "you", "want", "them", "propagated", "forward", "and", "available", "for", "multiqc" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L351-L365
238,085
bcbio/bcbio-nextgen
bcbio/pipeline/datadict.py
has_variantcalls
def has_variantcalls(data): """ returns True if the data dictionary is configured for variant calling """ analysis = get_analysis(data).lower() variant_pipeline = analysis.startswith(("standard", "variant", "variant2")) variantcaller = get_variantcaller(data) return variant_pipeline or variantcaller
python
def has_variantcalls(data): analysis = get_analysis(data).lower() variant_pipeline = analysis.startswith(("standard", "variant", "variant2")) variantcaller = get_variantcaller(data) return variant_pipeline or variantcaller
[ "def", "has_variantcalls", "(", "data", ")", ":", "analysis", "=", "get_analysis", "(", "data", ")", ".", "lower", "(", ")", "variant_pipeline", "=", "analysis", ".", "startswith", "(", "(", "\"standard\"", ",", "\"variant\"", ",", "\"variant2\"", ")", ")", ...
returns True if the data dictionary is configured for variant calling
[ "returns", "True", "if", "the", "data", "dictionary", "is", "configured", "for", "variant", "calling" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/datadict.py#L367-L374
238,086
bcbio/bcbio-nextgen
bcbio/rnaseq/qc.py
estimate_library_complexity
def estimate_library_complexity(df, algorithm="RNA-seq"): """ estimate library complexity from the number of reads vs. number of unique start sites. returns "NA" if there are not enough data points to fit the line """ DEFAULT_CUTOFFS = {"RNA-seq": (0.25, 0.40)} cutoffs = DEFAULT_CUTOFFS[algorithm] if len(df) < 5: return {"unique_starts_per_read": 'nan', "complexity": "NA"} model = sm.ols(formula="starts ~ reads", data=df) fitted = model.fit() slope = fitted.params["reads"] if slope <= cutoffs[0]: complexity = "LOW" elif slope <= cutoffs[1]: complexity = "MEDIUM" else: complexity = "HIGH" # for now don't return the complexity flag return {"Unique Starts Per Read": float(slope)}
python
def estimate_library_complexity(df, algorithm="RNA-seq"): DEFAULT_CUTOFFS = {"RNA-seq": (0.25, 0.40)} cutoffs = DEFAULT_CUTOFFS[algorithm] if len(df) < 5: return {"unique_starts_per_read": 'nan', "complexity": "NA"} model = sm.ols(formula="starts ~ reads", data=df) fitted = model.fit() slope = fitted.params["reads"] if slope <= cutoffs[0]: complexity = "LOW" elif slope <= cutoffs[1]: complexity = "MEDIUM" else: complexity = "HIGH" # for now don't return the complexity flag return {"Unique Starts Per Read": float(slope)}
[ "def", "estimate_library_complexity", "(", "df", ",", "algorithm", "=", "\"RNA-seq\"", ")", ":", "DEFAULT_CUTOFFS", "=", "{", "\"RNA-seq\"", ":", "(", "0.25", ",", "0.40", ")", "}", "cutoffs", "=", "DEFAULT_CUTOFFS", "[", "algorithm", "]", "if", "len", "(", ...
estimate library complexity from the number of reads vs. number of unique start sites. returns "NA" if there are not enough data points to fit the line
[ "estimate", "library", "complexity", "from", "the", "number", "of", "reads", "vs", ".", "number", "of", "unique", "start", "sites", ".", "returns", "NA", "if", "there", "are", "not", "enough", "data", "points", "to", "fit", "the", "line" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/qc.py#L42-L64
238,087
bcbio/bcbio-nextgen
bcbio/galaxy/api.py
GalaxyApiAccess.run_details
def run_details(self, run_bc, run_date=None): """Next Gen LIMS specific API functionality. """ try: details = self._get("/nglims/api_run_details", dict(run=run_bc)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_bc) if "error" in details and run_date is not None: try: details = self._get("/nglims/api_run_details", dict(run=run_date)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_date) return details
python
def run_details(self, run_bc, run_date=None): try: details = self._get("/nglims/api_run_details", dict(run=run_bc)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_bc) if "error" in details and run_date is not None: try: details = self._get("/nglims/api_run_details", dict(run=run_date)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_date) return details
[ "def", "run_details", "(", "self", ",", "run_bc", ",", "run_date", "=", "None", ")", ":", "try", ":", "details", "=", "self", ".", "_get", "(", "\"/nglims/api_run_details\"", ",", "dict", "(", "run", "=", "run_bc", ")", ")", "except", "ValueError", ":", ...
Next Gen LIMS specific API functionality.
[ "Next", "Gen", "LIMS", "specific", "API", "functionality", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/api.py#L52-L64
238,088
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
fixrg
def fixrg(in_bam, names, ref_file, dirs, data): """Fix read group in a file, using samtools addreplacerg. addreplacerg does not remove the old read group, causing confusion when checking. We use reheader to work around this """ work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bamclean", dd.get_sample_name(data))) out_file = os.path.join(work_dir, "%s-fixrg.bam" % utils.splitext_plus(os.path.basename(in_bam))[0]) if not utils.file_exists(out_file): out_file = os.path.join(work_dir, "%s-fixrg.bam" % dd.get_sample_name(data)) if not utils.file_uptodate(out_file, in_bam): with file_transaction(data, out_file) as tx_out_file: rg_info = novoalign.get_rg_info(names) new_header = "%s-header.txt" % os.path.splitext(out_file)[0] cores = dd.get_cores(data) do.run("samtools view -H {in_bam} | grep -v ^@RG > {new_header}".format(**locals()), "Create empty RG header: %s" % dd.get_sample_name(data)) cmd = ("samtools reheader {new_header} {in_bam} | " "samtools addreplacerg -@ {cores} -r '{rg_info}' -m overwrite_all -O bam -o {tx_out_file} -") do.run(cmd.format(**locals()), "Fix read groups: %s" % dd.get_sample_name(data)) return out_file
python
def fixrg(in_bam, names, ref_file, dirs, data): work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bamclean", dd.get_sample_name(data))) out_file = os.path.join(work_dir, "%s-fixrg.bam" % utils.splitext_plus(os.path.basename(in_bam))[0]) if not utils.file_exists(out_file): out_file = os.path.join(work_dir, "%s-fixrg.bam" % dd.get_sample_name(data)) if not utils.file_uptodate(out_file, in_bam): with file_transaction(data, out_file) as tx_out_file: rg_info = novoalign.get_rg_info(names) new_header = "%s-header.txt" % os.path.splitext(out_file)[0] cores = dd.get_cores(data) do.run("samtools view -H {in_bam} | grep -v ^@RG > {new_header}".format(**locals()), "Create empty RG header: %s" % dd.get_sample_name(data)) cmd = ("samtools reheader {new_header} {in_bam} | " "samtools addreplacerg -@ {cores} -r '{rg_info}' -m overwrite_all -O bam -o {tx_out_file} -") do.run(cmd.format(**locals()), "Fix read groups: %s" % dd.get_sample_name(data)) return out_file
[ "def", "fixrg", "(", "in_bam", ",", "names", ",", "ref_file", ",", "dirs", ",", "data", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", ")", ",", "\"bamclean\"...
Fix read group in a file, using samtools addreplacerg. addreplacerg does not remove the old read group, causing confusion when checking. We use reheader to work around this
[ "Fix", "read", "group", "in", "a", "file", "using", "samtools", "addreplacerg", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L20-L40
238,089
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
_target_chroms_and_header
def _target_chroms_and_header(bam_file, data): """Get a list of chromosomes to target and new updated ref_file header. Could potentially handle remapping from chr1 -> 1 but currently disabled due to speed issues. """ special_remaps = {"chrM": "MT", "MT": "chrM"} target_chroms = dict([(x.name, i) for i, x in enumerate(ref.file_contigs(dd.get_ref_file(data))) if chromhacks.is_autosomal_or_sex(x.name)]) out_chroms = [] with pysam.Samfile(bam_file, "rb") as bamfile: for bami, bam_contig in enumerate([c["SN"] for c in bamfile.header["SQ"]]): if bam_contig in target_chroms: target_chrom = bam_contig elif bam_contig in special_remaps and special_remaps[bam_contig] in target_chroms: target_chrom = special_remaps[bam_contig] elif bam_contig.startswith("chr") and bam_contig.replace("chr", "") in target_chroms: target_chrom = bam_contig.replace("chr", "") elif "chr%s" % bam_contig in target_chroms: target_chrom = "chr%s" % bam_contig else: target_chrom = None # target_chrom == bam_contig ensures we don't try chr1 -> 1 style remapping if target_chrom and target_chrom == bam_contig: # Order not required if dealing with SAM file header fixing #assert bami == target_chroms[target_chrom], \ # ("remove_extracontigs: Non-matching order of standard contig: %s %s (%s vs %s)" % # (bam_file, target_chrom, bami, target_chroms[target_chrom])) out_chroms.append(target_chrom) assert out_chroms, ("remove_extracontigs: Did not find any chromosomes in reference file: %s %s" % (bam_file, target_chroms)) return out_chroms
python
def _target_chroms_and_header(bam_file, data): special_remaps = {"chrM": "MT", "MT": "chrM"} target_chroms = dict([(x.name, i) for i, x in enumerate(ref.file_contigs(dd.get_ref_file(data))) if chromhacks.is_autosomal_or_sex(x.name)]) out_chroms = [] with pysam.Samfile(bam_file, "rb") as bamfile: for bami, bam_contig in enumerate([c["SN"] for c in bamfile.header["SQ"]]): if bam_contig in target_chroms: target_chrom = bam_contig elif bam_contig in special_remaps and special_remaps[bam_contig] in target_chroms: target_chrom = special_remaps[bam_contig] elif bam_contig.startswith("chr") and bam_contig.replace("chr", "") in target_chroms: target_chrom = bam_contig.replace("chr", "") elif "chr%s" % bam_contig in target_chroms: target_chrom = "chr%s" % bam_contig else: target_chrom = None # target_chrom == bam_contig ensures we don't try chr1 -> 1 style remapping if target_chrom and target_chrom == bam_contig: # Order not required if dealing with SAM file header fixing #assert bami == target_chroms[target_chrom], \ # ("remove_extracontigs: Non-matching order of standard contig: %s %s (%s vs %s)" % # (bam_file, target_chrom, bami, target_chroms[target_chrom])) out_chroms.append(target_chrom) assert out_chroms, ("remove_extracontigs: Did not find any chromosomes in reference file: %s %s" % (bam_file, target_chroms)) return out_chroms
[ "def", "_target_chroms_and_header", "(", "bam_file", ",", "data", ")", ":", "special_remaps", "=", "{", "\"chrM\"", ":", "\"MT\"", ",", "\"MT\"", ":", "\"chrM\"", "}", "target_chroms", "=", "dict", "(", "[", "(", "x", ".", "name", ",", "i", ")", "for", ...
Get a list of chromosomes to target and new updated ref_file header. Could potentially handle remapping from chr1 -> 1 but currently disabled due to speed issues.
[ "Get", "a", "list", "of", "chromosomes", "to", "target", "and", "new", "updated", "ref_file", "header", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L75-L106
238,090
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
picard_prep
def picard_prep(in_bam, names, ref_file, dirs, data): """Prepare input BAM using Picard and GATK cleaning tools. - ReorderSam to reorder file to reference - AddOrReplaceReadGroups to add read group information and coordinate sort - PrintReads to filters to remove problem records: - filterMBQ to remove reads with mismatching bases and base qualities """ runner = broad.runner_from_path("picard", data["config"]) work_dir = utils.safe_makedir(os.path.join(dirs["work"], "bamclean", names["sample"])) runner.run_fn("picard_index_ref", ref_file) reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % os.path.splitext(os.path.basename(in_bam))[0]) if not utils.file_exists(reorder_bam): reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % dd.get_sample_name(data)) reorder_bam = runner.run_fn("picard_reorder", in_bam, ref_file, reorder_bam) rg_bam = runner.run_fn("picard_fix_rgs", reorder_bam, names) return _filter_bad_reads(rg_bam, ref_file, data)
python
def picard_prep(in_bam, names, ref_file, dirs, data): runner = broad.runner_from_path("picard", data["config"]) work_dir = utils.safe_makedir(os.path.join(dirs["work"], "bamclean", names["sample"])) runner.run_fn("picard_index_ref", ref_file) reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % os.path.splitext(os.path.basename(in_bam))[0]) if not utils.file_exists(reorder_bam): reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % dd.get_sample_name(data)) reorder_bam = runner.run_fn("picard_reorder", in_bam, ref_file, reorder_bam) rg_bam = runner.run_fn("picard_fix_rgs", reorder_bam, names) return _filter_bad_reads(rg_bam, ref_file, data)
[ "def", "picard_prep", "(", "in_bam", ",", "names", ",", "ref_file", ",", "dirs", ",", "data", ")", ":", "runner", "=", "broad", ".", "runner_from_path", "(", "\"picard\"", ",", "data", "[", "\"config\"", "]", ")", "work_dir", "=", "utils", ".", "safe_mak...
Prepare input BAM using Picard and GATK cleaning tools. - ReorderSam to reorder file to reference - AddOrReplaceReadGroups to add read group information and coordinate sort - PrintReads to filters to remove problem records: - filterMBQ to remove reads with mismatching bases and base qualities
[ "Prepare", "input", "BAM", "using", "Picard", "and", "GATK", "cleaning", "tools", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L122-L139
238,091
bcbio/bcbio-nextgen
bcbio/pipeline/cleanbam.py
_filter_bad_reads
def _filter_bad_reads(in_bam, ref_file, data): """Use GATK filter to remove problem reads which choke GATK and Picard. """ bam.index(in_bam, data["config"]) out_file = "%s-gatkfilter.bam" % os.path.splitext(in_bam)[0] if not utils.file_exists(out_file): with tx_tmpdir(data) as tmp_dir: with file_transaction(data, out_file) as tx_out_file: params = [("FixMisencodedBaseQualityReads" if dd.get_quality_format(data, "").lower() == "illumina" else "PrintReads"), "-R", ref_file, "-I", in_bam, "-O", tx_out_file, "-RF", "MatchingBasesAndQualsReadFilter", "-RF", "SeqIsStoredReadFilter", "-RF", "CigarContainsNoNOperator"] jvm_opts = broad.get_gatk_opts(data["config"], tmp_dir) do.run(broad.gatk_cmd("gatk", jvm_opts, params), "Filter problem reads") bam.index(out_file, data["config"]) return out_file
python
def _filter_bad_reads(in_bam, ref_file, data): bam.index(in_bam, data["config"]) out_file = "%s-gatkfilter.bam" % os.path.splitext(in_bam)[0] if not utils.file_exists(out_file): with tx_tmpdir(data) as tmp_dir: with file_transaction(data, out_file) as tx_out_file: params = [("FixMisencodedBaseQualityReads" if dd.get_quality_format(data, "").lower() == "illumina" else "PrintReads"), "-R", ref_file, "-I", in_bam, "-O", tx_out_file, "-RF", "MatchingBasesAndQualsReadFilter", "-RF", "SeqIsStoredReadFilter", "-RF", "CigarContainsNoNOperator"] jvm_opts = broad.get_gatk_opts(data["config"], tmp_dir) do.run(broad.gatk_cmd("gatk", jvm_opts, params), "Filter problem reads") bam.index(out_file, data["config"]) return out_file
[ "def", "_filter_bad_reads", "(", "in_bam", ",", "ref_file", ",", "data", ")", ":", "bam", ".", "index", "(", "in_bam", ",", "data", "[", "\"config\"", "]", ")", "out_file", "=", "\"%s-gatkfilter.bam\"", "%", "os", ".", "path", ".", "splitext", "(", "in_b...
Use GATK filter to remove problem reads which choke GATK and Picard.
[ "Use", "GATK", "filter", "to", "remove", "problem", "reads", "which", "choke", "GATK", "and", "Picard", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/cleanbam.py#L141-L161
238,092
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
generate_parallel
def generate_parallel(samples, run_parallel): """Provide parallel preparation of summary information for alignment and variant calling. """ to_analyze, extras = _split_samples_by_qc(samples) qced = run_parallel("pipeline_summary", to_analyze) samples = _combine_qc_samples(qced) + extras qsign_info = run_parallel("qsignature_summary", [samples]) metadata_file = _merge_metadata([samples]) summary_file = write_project_summary(samples, qsign_info) out = [] for data in samples: if "summary" not in data[0]: data[0]["summary"] = {} data[0]["summary"]["project"] = summary_file data[0]["summary"]["metadata"] = metadata_file if qsign_info: data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"] out.append(data) out = _add_researcher_summary(out, summary_file) # MultiQC must be run after all file outputs are set: return [[utils.to_single_data(d)] for d in run_parallel("multiqc_summary", [out])]
python
def generate_parallel(samples, run_parallel): to_analyze, extras = _split_samples_by_qc(samples) qced = run_parallel("pipeline_summary", to_analyze) samples = _combine_qc_samples(qced) + extras qsign_info = run_parallel("qsignature_summary", [samples]) metadata_file = _merge_metadata([samples]) summary_file = write_project_summary(samples, qsign_info) out = [] for data in samples: if "summary" not in data[0]: data[0]["summary"] = {} data[0]["summary"]["project"] = summary_file data[0]["summary"]["metadata"] = metadata_file if qsign_info: data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"] out.append(data) out = _add_researcher_summary(out, summary_file) # MultiQC must be run after all file outputs are set: return [[utils.to_single_data(d)] for d in run_parallel("multiqc_summary", [out])]
[ "def", "generate_parallel", "(", "samples", ",", "run_parallel", ")", ":", "to_analyze", ",", "extras", "=", "_split_samples_by_qc", "(", "samples", ")", "qced", "=", "run_parallel", "(", "\"pipeline_summary\"", ",", "to_analyze", ")", "samples", "=", "_combine_qc...
Provide parallel preparation of summary information for alignment and variant calling.
[ "Provide", "parallel", "preparation", "of", "summary", "information", "for", "alignment", "and", "variant", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L38-L58
238,093
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
pipeline_summary
def pipeline_summary(data): """Provide summary information on processing sample. Handles standard and CWL (single QC output) cases. """ data = utils.to_single_data(data) work_bam = dd.get_align_bam(data) or dd.get_work_bam(data) if not work_bam or not work_bam.endswith(".bam"): work_bam = None if dd.get_ref_file(data): if work_bam or (tz.get_in(["config", "algorithm", "kraken"], data)): # kraken doesn't need bam logger.info("QC: %s %s" % (dd.get_sample_name(data), ", ".join(dd.get_algorithm_qc(data)))) work_data = cwlutils.unpack_tarballs(utils.deepish_copy(data), data) data["summary"] = _run_qc_tools(work_bam, work_data) if (len(dd.get_algorithm_qc(data)) == 1 and "output_cwl_keys" in data): data["summary"]["qc"] = data["summary"]["qc"].get(dd.get_algorithm_qc(data)[0]) return [[data]]
python
def pipeline_summary(data): data = utils.to_single_data(data) work_bam = dd.get_align_bam(data) or dd.get_work_bam(data) if not work_bam or not work_bam.endswith(".bam"): work_bam = None if dd.get_ref_file(data): if work_bam or (tz.get_in(["config", "algorithm", "kraken"], data)): # kraken doesn't need bam logger.info("QC: %s %s" % (dd.get_sample_name(data), ", ".join(dd.get_algorithm_qc(data)))) work_data = cwlutils.unpack_tarballs(utils.deepish_copy(data), data) data["summary"] = _run_qc_tools(work_bam, work_data) if (len(dd.get_algorithm_qc(data)) == 1 and "output_cwl_keys" in data): data["summary"]["qc"] = data["summary"]["qc"].get(dd.get_algorithm_qc(data)[0]) return [[data]]
[ "def", "pipeline_summary", "(", "data", ")", ":", "data", "=", "utils", ".", "to_single_data", "(", "data", ")", "work_bam", "=", "dd", ".", "get_align_bam", "(", "data", ")", "or", "dd", ".", "get_work_bam", "(", "data", ")", "if", "not", "work_bam", ...
Provide summary information on processing sample. Handles standard and CWL (single QC output) cases.
[ "Provide", "summary", "information", "on", "processing", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L60-L76
238,094
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
get_qc_tools
def get_qc_tools(data): """Retrieve a list of QC tools to use based on configuration and analysis type. Uses defaults if previously set. """ if dd.get_algorithm_qc(data): return dd.get_algorithm_qc(data) analysis = data["analysis"].lower() to_run = [] if tz.get_in(["config", "algorithm", "kraken"], data): to_run.append("kraken") if "fastqc" not in dd.get_tools_off(data): to_run.append("fastqc") if any([tool in dd.get_tools_on(data) for tool in ["qualimap", "qualimap_full"]]): to_run.append("qualimap") if analysis.startswith("rna-seq") or analysis == "smallrna-seq": if "qualimap" not in dd.get_tools_off(data): if gtf.is_qualimap_compatible(dd.get_gtf_file(data)): to_run.append("qualimap_rnaseq") else: logger.debug("GTF not compatible with Qualimap, skipping.") if analysis.startswith("chip-seq"): to_run.append("chipqc") if analysis.startswith("smallrna-seq"): to_run.append("small-rna") to_run.append("atropos") if "coverage_qc" not in dd.get_tools_off(data): to_run.append("samtools") if dd.has_variantcalls(data): if "coverage_qc" not in dd.get_tools_off(data): to_run += ["coverage", "picard"] to_run += ["qsignature", "variants"] if vcfanno.is_human(data): to_run += ["contamination", "peddy"] if vcfutils.get_paired_phenotype(data): to_run += ["viral"] if damage.should_filter([data]): to_run += ["damage"] if dd.get_umi_consensus(data): to_run += ["umi"] if tz.get_in(["config", "algorithm", "preseq"], data): to_run.append("preseq") to_run = [tool for tool in to_run if tool not in dd.get_tools_off(data)] to_run.sort() return to_run
python
def get_qc_tools(data): if dd.get_algorithm_qc(data): return dd.get_algorithm_qc(data) analysis = data["analysis"].lower() to_run = [] if tz.get_in(["config", "algorithm", "kraken"], data): to_run.append("kraken") if "fastqc" not in dd.get_tools_off(data): to_run.append("fastqc") if any([tool in dd.get_tools_on(data) for tool in ["qualimap", "qualimap_full"]]): to_run.append("qualimap") if analysis.startswith("rna-seq") or analysis == "smallrna-seq": if "qualimap" not in dd.get_tools_off(data): if gtf.is_qualimap_compatible(dd.get_gtf_file(data)): to_run.append("qualimap_rnaseq") else: logger.debug("GTF not compatible with Qualimap, skipping.") if analysis.startswith("chip-seq"): to_run.append("chipqc") if analysis.startswith("smallrna-seq"): to_run.append("small-rna") to_run.append("atropos") if "coverage_qc" not in dd.get_tools_off(data): to_run.append("samtools") if dd.has_variantcalls(data): if "coverage_qc" not in dd.get_tools_off(data): to_run += ["coverage", "picard"] to_run += ["qsignature", "variants"] if vcfanno.is_human(data): to_run += ["contamination", "peddy"] if vcfutils.get_paired_phenotype(data): to_run += ["viral"] if damage.should_filter([data]): to_run += ["damage"] if dd.get_umi_consensus(data): to_run += ["umi"] if tz.get_in(["config", "algorithm", "preseq"], data): to_run.append("preseq") to_run = [tool for tool in to_run if tool not in dd.get_tools_off(data)] to_run.sort() return to_run
[ "def", "get_qc_tools", "(", "data", ")", ":", "if", "dd", ".", "get_algorithm_qc", "(", "data", ")", ":", "return", "dd", ".", "get_algorithm_qc", "(", "data", ")", "analysis", "=", "data", "[", "\"analysis\"", "]", ".", "lower", "(", ")", "to_run", "=...
Retrieve a list of QC tools to use based on configuration and analysis type. Uses defaults if previously set.
[ "Retrieve", "a", "list", "of", "QC", "tools", "to", "use", "based", "on", "configuration", "and", "analysis", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L78-L123
238,095
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_run_qc_tools
def _run_qc_tools(bam_file, data): """Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools """ from bcbio.qc import (atropos, contamination, coverage, damage, fastqc, kraken, qsignature, qualimap, samtools, picard, srna, umi, variant, viral, preseq, chipseq) tools = {"fastqc": fastqc.run, "atropos": atropos.run, "small-rna": srna.run, "samtools": samtools.run, "qualimap": qualimap.run, "qualimap_rnaseq": qualimap.run_rnaseq, "qsignature": qsignature.run, "contamination": contamination.run, "coverage": coverage.run, "damage": damage.run, "variants": variant.run, "peddy": peddy.run_qc, "kraken": kraken.run, "picard": picard.run, "umi": umi.run, "viral": viral.run, "preseq": preseq.run, "chipqc": chipseq.run } qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"])) metrics = {} qc_out = utils.deepish_copy(dd.get_summary_qc(data)) for program_name in dd.get_algorithm_qc(data): if not bam_file and program_name != "kraken": # kraken doesn't need bam continue if dd.get_phenotype(data) == "germline" and program_name != "variants": continue qc_fn = tools[program_name] cur_qc_dir = os.path.join(qc_dir, program_name) out = qc_fn(bam_file, data, cur_qc_dir) qc_files = None if out and isinstance(out, dict): # Check for metrics output, two cases: # 1. output with {"metrics"} and files ("base") if "metrics" in out: metrics.update(out.pop("metrics")) # 2. a dictionary of metrics elif "base" not in out: metrics.update(out) # Check for files only output if "base" in out: qc_files = out elif out and isinstance(out, six.string_types) and os.path.exists(out): qc_files = {"base": out, "secondary": []} if not qc_files: qc_files = _organize_qc_files(program_name, cur_qc_dir) if qc_files: qc_out[program_name] = qc_files metrics["Name"] = dd.get_sample_name(data) metrics["Quality format"] = dd.get_quality_format(data).lower() return {"qc": qc_out, "metrics": metrics}
python
def _run_qc_tools(bam_file, data): from bcbio.qc import (atropos, contamination, coverage, damage, fastqc, kraken, qsignature, qualimap, samtools, picard, srna, umi, variant, viral, preseq, chipseq) tools = {"fastqc": fastqc.run, "atropos": atropos.run, "small-rna": srna.run, "samtools": samtools.run, "qualimap": qualimap.run, "qualimap_rnaseq": qualimap.run_rnaseq, "qsignature": qsignature.run, "contamination": contamination.run, "coverage": coverage.run, "damage": damage.run, "variants": variant.run, "peddy": peddy.run_qc, "kraken": kraken.run, "picard": picard.run, "umi": umi.run, "viral": viral.run, "preseq": preseq.run, "chipqc": chipseq.run } qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"])) metrics = {} qc_out = utils.deepish_copy(dd.get_summary_qc(data)) for program_name in dd.get_algorithm_qc(data): if not bam_file and program_name != "kraken": # kraken doesn't need bam continue if dd.get_phenotype(data) == "germline" and program_name != "variants": continue qc_fn = tools[program_name] cur_qc_dir = os.path.join(qc_dir, program_name) out = qc_fn(bam_file, data, cur_qc_dir) qc_files = None if out and isinstance(out, dict): # Check for metrics output, two cases: # 1. output with {"metrics"} and files ("base") if "metrics" in out: metrics.update(out.pop("metrics")) # 2. a dictionary of metrics elif "base" not in out: metrics.update(out) # Check for files only output if "base" in out: qc_files = out elif out and isinstance(out, six.string_types) and os.path.exists(out): qc_files = {"base": out, "secondary": []} if not qc_files: qc_files = _organize_qc_files(program_name, cur_qc_dir) if qc_files: qc_out[program_name] = qc_files metrics["Name"] = dd.get_sample_name(data) metrics["Quality format"] = dd.get_quality_format(data).lower() return {"qc": qc_out, "metrics": metrics}
[ "def", "_run_qc_tools", "(", "bam_file", ",", "data", ")", ":", "from", "bcbio", ".", "qc", "import", "(", "atropos", ",", "contamination", ",", "coverage", ",", "damage", ",", "fastqc", ",", "kraken", ",", "qsignature", ",", "qualimap", ",", "samtools", ...
Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools
[ "Run", "a", "set", "of", "third", "party", "quality", "control", "tools", "returning", "QC", "directory", "and", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L125-L187
238,096
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_organize_qc_files
def _organize_qc_files(program, qc_dir): """Organize outputs from quality control runs into a base file and secondary outputs. Provides compatibility with CWL output. Returns None if no files created during processing. """ base_files = {"fastqc": "fastqc_report.html", "qualimap_rnaseq": "qualimapReport.html", "qualimap": "qualimapReport.html"} if os.path.exists(qc_dir): out_files = [] for fname in [os.path.join(qc_dir, x) for x in os.listdir(qc_dir)]: if os.path.isfile(fname) and not fname.endswith(".bcbiotmp"): out_files.append(fname) elif os.path.isdir(fname) and not fname.endswith("tx"): for root, dirs, files in os.walk(fname): for f in files: if not f.endswith(".bcbiotmp"): out_files.append(os.path.join(root, f)) if len(out_files) > 0 and all([not f.endswith("-failed.log") for f in out_files]): if len(out_files) == 1: base = out_files[0] secondary = [] else: base = None if program in base_files: base_choices = [x for x in out_files if x.endswith("/%s" % base_files[program])] if len(base_choices) == 1: base = base_choices[0] if not base: base = out_files[0] secondary = [x for x in out_files if x != base] return {"base": base, "secondary": secondary}
python
def _organize_qc_files(program, qc_dir): base_files = {"fastqc": "fastqc_report.html", "qualimap_rnaseq": "qualimapReport.html", "qualimap": "qualimapReport.html"} if os.path.exists(qc_dir): out_files = [] for fname in [os.path.join(qc_dir, x) for x in os.listdir(qc_dir)]: if os.path.isfile(fname) and not fname.endswith(".bcbiotmp"): out_files.append(fname) elif os.path.isdir(fname) and not fname.endswith("tx"): for root, dirs, files in os.walk(fname): for f in files: if not f.endswith(".bcbiotmp"): out_files.append(os.path.join(root, f)) if len(out_files) > 0 and all([not f.endswith("-failed.log") for f in out_files]): if len(out_files) == 1: base = out_files[0] secondary = [] else: base = None if program in base_files: base_choices = [x for x in out_files if x.endswith("/%s" % base_files[program])] if len(base_choices) == 1: base = base_choices[0] if not base: base = out_files[0] secondary = [x for x in out_files if x != base] return {"base": base, "secondary": secondary}
[ "def", "_organize_qc_files", "(", "program", ",", "qc_dir", ")", ":", "base_files", "=", "{", "\"fastqc\"", ":", "\"fastqc_report.html\"", ",", "\"qualimap_rnaseq\"", ":", "\"qualimapReport.html\"", ",", "\"qualimap\"", ":", "\"qualimapReport.html\"", "}", "if", "os",...
Organize outputs from quality control runs into a base file and secondary outputs. Provides compatibility with CWL output. Returns None if no files created during processing.
[ "Organize", "outputs", "from", "quality", "control", "runs", "into", "a", "base", "file", "and", "secondary", "outputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L189-L220
238,097
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_split_samples_by_qc
def _split_samples_by_qc(samples): """Split data into individual quality control steps for a run. """ to_process = [] extras = [] for data in [utils.to_single_data(x) for x in samples]: qcs = dd.get_algorithm_qc(data) # kraken doesn't need bam if qcs and (dd.get_align_bam(data) or dd.get_work_bam(data) or tz.get_in(["config", "algorithm", "kraken"], data)): for qc in qcs: add = copy.deepcopy(data) add["config"]["algorithm"]["qc"] = [qc] to_process.append([add]) else: extras.append([data]) return to_process, extras
python
def _split_samples_by_qc(samples): to_process = [] extras = [] for data in [utils.to_single_data(x) for x in samples]: qcs = dd.get_algorithm_qc(data) # kraken doesn't need bam if qcs and (dd.get_align_bam(data) or dd.get_work_bam(data) or tz.get_in(["config", "algorithm", "kraken"], data)): for qc in qcs: add = copy.deepcopy(data) add["config"]["algorithm"]["qc"] = [qc] to_process.append([add]) else: extras.append([data]) return to_process, extras
[ "def", "_split_samples_by_qc", "(", "samples", ")", ":", "to_process", "=", "[", "]", "extras", "=", "[", "]", "for", "data", "in", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "samples", "]", ":", "qcs", "=", "dd", ".", "g...
Split data into individual quality control steps for a run.
[ "Split", "data", "into", "individual", "quality", "control", "steps", "for", "a", "run", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L224-L240
238,098
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
_combine_qc_samples
def _combine_qc_samples(samples): """Combine split QC analyses into single samples based on BAM files. """ by_bam = collections.defaultdict(list) for data in [utils.to_single_data(x) for x in samples]: batch = dd.get_batch(data) or dd.get_sample_name(data) if not isinstance(batch, (list, tuple)): batch = [batch] batch = tuple(batch) by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data) out = [] for data_group in by_bam.values(): data = data_group[0] alg_qc = [] qc = {} metrics = {} for d in data_group: qc.update(dd.get_summary_qc(d)) metrics.update(dd.get_summary_metrics(d)) alg_qc.extend(dd.get_algorithm_qc(d)) data["config"]["algorithm"]["qc"] = alg_qc data["summary"]["qc"] = qc data["summary"]["metrics"] = metrics out.append([data]) return out
python
def _combine_qc_samples(samples): by_bam = collections.defaultdict(list) for data in [utils.to_single_data(x) for x in samples]: batch = dd.get_batch(data) or dd.get_sample_name(data) if not isinstance(batch, (list, tuple)): batch = [batch] batch = tuple(batch) by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data) out = [] for data_group in by_bam.values(): data = data_group[0] alg_qc = [] qc = {} metrics = {} for d in data_group: qc.update(dd.get_summary_qc(d)) metrics.update(dd.get_summary_metrics(d)) alg_qc.extend(dd.get_algorithm_qc(d)) data["config"]["algorithm"]["qc"] = alg_qc data["summary"]["qc"] = qc data["summary"]["metrics"] = metrics out.append([data]) return out
[ "def", "_combine_qc_samples", "(", "samples", ")", ":", "by_bam", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "data", "in", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "samples", "]", ":", "batch", "=", "d...
Combine split QC analyses into single samples based on BAM files.
[ "Combine", "split", "QC", "analyses", "into", "single", "samples", "based", "on", "BAM", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L242-L266
238,099
bcbio/bcbio-nextgen
bcbio/pipeline/qcsummary.py
write_project_summary
def write_project_summary(samples, qsign_info=None): """Write project summary information on the provided samples. write out dirs, genome resources, """ work_dir = samples[0][0]["dirs"]["work"] out_file = os.path.join(work_dir, "project-summary.yaml") upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"]) if "dir" in samples[0][0]["upload"] else "") date = str(datetime.now()) prev_samples = _other_pipeline_samples(out_file, samples) with open(out_file, "w") as out_handle: yaml.safe_dump({"date": date}, out_handle, default_flow_style=False, allow_unicode=False) if qsign_info: qsign_out = utils.deepish_copy(qsign_info[0]) qsign_out.pop("out_dir", None) yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"upload": upload_dir}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle, default_flow_style=False, allow_unicode=False) return out_file
python
def write_project_summary(samples, qsign_info=None): work_dir = samples[0][0]["dirs"]["work"] out_file = os.path.join(work_dir, "project-summary.yaml") upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"]) if "dir" in samples[0][0]["upload"] else "") date = str(datetime.now()) prev_samples = _other_pipeline_samples(out_file, samples) with open(out_file, "w") as out_handle: yaml.safe_dump({"date": date}, out_handle, default_flow_style=False, allow_unicode=False) if qsign_info: qsign_out = utils.deepish_copy(qsign_info[0]) qsign_out.pop("out_dir", None) yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"upload": upload_dir}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle, default_flow_style=False, allow_unicode=False) return out_file
[ "def", "write_project_summary", "(", "samples", ",", "qsign_info", "=", "None", ")", ":", "work_dir", "=", "samples", "[", "0", "]", "[", "0", "]", "[", "\"dirs\"", "]", "[", "\"work\"", "]", "out_file", "=", "os", ".", "path", ".", "join", "(", "wor...
Write project summary information on the provided samples. write out dirs, genome resources,
[ "Write", "project", "summary", "information", "on", "the", "provided", "samples", ".", "write", "out", "dirs", "genome", "resources" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/qcsummary.py#L270-L295