id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 51 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
237,700 | bcbio/bcbio-nextgen | bcbio/variation/bamprep.py | _gatk_extract_reads_cl | def _gatk_extract_reads_cl(data, region, prep_params, tmp_dir):
"""Use GATK to extract reads from full BAM file.
"""
args = ["PrintReads",
"-L", region_to_gatk(region),
"-R", dd.get_ref_file(data),
"-I", data["work_bam"]]
# GATK3 back compatibility, need to specify analysis type
if "gatk4" in dd.get_tools_off(data):
args = ["--analysis_type"] + args
runner = broad.runner_from_config(data["config"])
return runner.cl_gatk(args, tmp_dir) | python | def _gatk_extract_reads_cl(data, region, prep_params, tmp_dir):
args = ["PrintReads",
"-L", region_to_gatk(region),
"-R", dd.get_ref_file(data),
"-I", data["work_bam"]]
# GATK3 back compatibility, need to specify analysis type
if "gatk4" in dd.get_tools_off(data):
args = ["--analysis_type"] + args
runner = broad.runner_from_config(data["config"])
return runner.cl_gatk(args, tmp_dir) | [
"def",
"_gatk_extract_reads_cl",
"(",
"data",
",",
"region",
",",
"prep_params",
",",
"tmp_dir",
")",
":",
"args",
"=",
"[",
"\"PrintReads\"",
",",
"\"-L\"",
",",
"region_to_gatk",
"(",
"region",
")",
",",
"\"-R\"",
",",
"dd",
".",
"get_ref_file",
"(",
"da... | Use GATK to extract reads from full BAM file. | [
"Use",
"GATK",
"to",
"extract",
"reads",
"from",
"full",
"BAM",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bamprep.py#L23-L34 |
237,701 | bcbio/bcbio-nextgen | bcbio/variation/bamprep.py | _piped_input_cl | def _piped_input_cl(data, region, tmp_dir, out_base_file, prep_params):
"""Retrieve the commandline for streaming input into preparation step.
"""
return data["work_bam"], _gatk_extract_reads_cl(data, region, prep_params, tmp_dir) | python | def _piped_input_cl(data, region, tmp_dir, out_base_file, prep_params):
return data["work_bam"], _gatk_extract_reads_cl(data, region, prep_params, tmp_dir) | [
"def",
"_piped_input_cl",
"(",
"data",
",",
"region",
",",
"tmp_dir",
",",
"out_base_file",
",",
"prep_params",
")",
":",
"return",
"data",
"[",
"\"work_bam\"",
"]",
",",
"_gatk_extract_reads_cl",
"(",
"data",
",",
"region",
",",
"prep_params",
",",
"tmp_dir",... | Retrieve the commandline for streaming input into preparation step. | [
"Retrieve",
"the",
"commandline",
"for",
"streaming",
"input",
"into",
"preparation",
"step",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bamprep.py#L36-L39 |
237,702 | bcbio/bcbio-nextgen | bcbio/variation/bamprep.py | _piped_realign_gatk | def _piped_realign_gatk(data, region, cl, out_base_file, tmp_dir, prep_params):
"""Perform realignment with GATK, using input commandline.
GATK requires writing to disk and indexing before realignment.
"""
broad_runner = broad.runner_from_config(data["config"])
pa_bam = "%s-prealign%s" % os.path.splitext(out_base_file)
if not utils.file_exists(pa_bam):
with file_transaction(data, pa_bam) as tx_out_file:
cmd = "{cl} -o {tx_out_file}".format(**locals())
do.run(cmd, "GATK re-alignment {0}".format(region), data)
bam.index(pa_bam, data["config"])
realn_file = realign.gatk_realigner_targets(broad_runner, pa_bam, dd.get_ref_file(data), data["config"],
region=region_to_gatk(region),
known_vrns=dd.get_variation_resources(data))
realn_cl = realign.gatk_indel_realignment_cl(broad_runner, pa_bam, dd.get_ref_file(data),
realn_file, tmp_dir, region=region_to_gatk(region),
known_vrns=dd.get_variation_resources(data))
return pa_bam, realn_cl | python | def _piped_realign_gatk(data, region, cl, out_base_file, tmp_dir, prep_params):
broad_runner = broad.runner_from_config(data["config"])
pa_bam = "%s-prealign%s" % os.path.splitext(out_base_file)
if not utils.file_exists(pa_bam):
with file_transaction(data, pa_bam) as tx_out_file:
cmd = "{cl} -o {tx_out_file}".format(**locals())
do.run(cmd, "GATK re-alignment {0}".format(region), data)
bam.index(pa_bam, data["config"])
realn_file = realign.gatk_realigner_targets(broad_runner, pa_bam, dd.get_ref_file(data), data["config"],
region=region_to_gatk(region),
known_vrns=dd.get_variation_resources(data))
realn_cl = realign.gatk_indel_realignment_cl(broad_runner, pa_bam, dd.get_ref_file(data),
realn_file, tmp_dir, region=region_to_gatk(region),
known_vrns=dd.get_variation_resources(data))
return pa_bam, realn_cl | [
"def",
"_piped_realign_gatk",
"(",
"data",
",",
"region",
",",
"cl",
",",
"out_base_file",
",",
"tmp_dir",
",",
"prep_params",
")",
":",
"broad_runner",
"=",
"broad",
".",
"runner_from_config",
"(",
"data",
"[",
"\"config\"",
"]",
")",
"pa_bam",
"=",
"\"%s-p... | Perform realignment with GATK, using input commandline.
GATK requires writing to disk and indexing before realignment. | [
"Perform",
"realignment",
"with",
"GATK",
"using",
"input",
"commandline",
".",
"GATK",
"requires",
"writing",
"to",
"disk",
"and",
"indexing",
"before",
"realignment",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bamprep.py#L41-L58 |
237,703 | bcbio/bcbio-nextgen | bcbio/variation/bamprep.py | _get_prep_params | def _get_prep_params(data):
"""Retrieve configuration parameters with defaults for preparing BAM files.
"""
realign_param = dd.get_realign(data)
realign_param = "gatk" if realign_param is True else realign_param
return {"realign": realign_param} | python | def _get_prep_params(data):
realign_param = dd.get_realign(data)
realign_param = "gatk" if realign_param is True else realign_param
return {"realign": realign_param} | [
"def",
"_get_prep_params",
"(",
"data",
")",
":",
"realign_param",
"=",
"dd",
".",
"get_realign",
"(",
"data",
")",
"realign_param",
"=",
"\"gatk\"",
"if",
"realign_param",
"is",
"True",
"else",
"realign_param",
"return",
"{",
"\"realign\"",
":",
"realign_param"... | Retrieve configuration parameters with defaults for preparing BAM files. | [
"Retrieve",
"configuration",
"parameters",
"with",
"defaults",
"for",
"preparing",
"BAM",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bamprep.py#L90-L95 |
237,704 | bcbio/bcbio-nextgen | bcbio/variation/bamprep.py | _piped_bamprep_region | def _piped_bamprep_region(data, region, out_file, tmp_dir):
"""Do work of preparing BAM input file on the selected region.
"""
if _need_prep(data):
prep_params = _get_prep_params(data)
_piped_bamprep_region_gatk(data, region, prep_params, out_file, tmp_dir)
else:
raise ValueError("No realignment specified") | python | def _piped_bamprep_region(data, region, out_file, tmp_dir):
if _need_prep(data):
prep_params = _get_prep_params(data)
_piped_bamprep_region_gatk(data, region, prep_params, out_file, tmp_dir)
else:
raise ValueError("No realignment specified") | [
"def",
"_piped_bamprep_region",
"(",
"data",
",",
"region",
",",
"out_file",
",",
"tmp_dir",
")",
":",
"if",
"_need_prep",
"(",
"data",
")",
":",
"prep_params",
"=",
"_get_prep_params",
"(",
"data",
")",
"_piped_bamprep_region_gatk",
"(",
"data",
",",
"region"... | Do work of preparing BAM input file on the selected region. | [
"Do",
"work",
"of",
"preparing",
"BAM",
"input",
"file",
"on",
"the",
"selected",
"region",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bamprep.py#L101-L108 |
237,705 | bcbio/bcbio-nextgen | bcbio/variation/bamprep.py | piped_bamprep | def piped_bamprep(data, region=None, out_file=None):
"""Perform full BAM preparation using pipes to avoid intermediate disk IO.
Handles realignment of original BAMs.
"""
data["region"] = region
if not _need_prep(data):
return [data]
else:
utils.safe_makedir(os.path.dirname(out_file))
if region[0] == "nochrom":
prep_bam = shared.write_nochr_reads(data["work_bam"], out_file, data["config"])
elif region[0] == "noanalysis":
prep_bam = shared.write_noanalysis_reads(data["work_bam"], region[1], out_file,
data["config"])
else:
if not utils.file_exists(out_file):
with tx_tmpdir(data) as tmp_dir:
_piped_bamprep_region(data, region, out_file, tmp_dir)
prep_bam = out_file
bam.index(prep_bam, data["config"])
data["work_bam"] = prep_bam
return [data] | python | def piped_bamprep(data, region=None, out_file=None):
data["region"] = region
if not _need_prep(data):
return [data]
else:
utils.safe_makedir(os.path.dirname(out_file))
if region[0] == "nochrom":
prep_bam = shared.write_nochr_reads(data["work_bam"], out_file, data["config"])
elif region[0] == "noanalysis":
prep_bam = shared.write_noanalysis_reads(data["work_bam"], region[1], out_file,
data["config"])
else:
if not utils.file_exists(out_file):
with tx_tmpdir(data) as tmp_dir:
_piped_bamprep_region(data, region, out_file, tmp_dir)
prep_bam = out_file
bam.index(prep_bam, data["config"])
data["work_bam"] = prep_bam
return [data] | [
"def",
"piped_bamprep",
"(",
"data",
",",
"region",
"=",
"None",
",",
"out_file",
"=",
"None",
")",
":",
"data",
"[",
"\"region\"",
"]",
"=",
"region",
"if",
"not",
"_need_prep",
"(",
"data",
")",
":",
"return",
"[",
"data",
"]",
"else",
":",
"utils"... | Perform full BAM preparation using pipes to avoid intermediate disk IO.
Handles realignment of original BAMs. | [
"Perform",
"full",
"BAM",
"preparation",
"using",
"pipes",
"to",
"avoid",
"intermediate",
"disk",
"IO",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/bamprep.py#L110-L132 |
237,706 | bcbio/bcbio-nextgen | bcbio/upload/galaxy.py | update_file | def update_file(finfo, sample_info, config):
"""Update file in Galaxy data libraries.
"""
if GalaxyInstance is None:
raise ImportError("Could not import bioblend.galaxy")
if "dir" not in config:
raise ValueError("Galaxy upload requires `dir` parameter in config specifying the "
"shared filesystem path to move files to.")
if "outputs" in config:
_galaxy_tool_copy(finfo, config["outputs"])
else:
_galaxy_library_upload(finfo, sample_info, config) | python | def update_file(finfo, sample_info, config):
if GalaxyInstance is None:
raise ImportError("Could not import bioblend.galaxy")
if "dir" not in config:
raise ValueError("Galaxy upload requires `dir` parameter in config specifying the "
"shared filesystem path to move files to.")
if "outputs" in config:
_galaxy_tool_copy(finfo, config["outputs"])
else:
_galaxy_library_upload(finfo, sample_info, config) | [
"def",
"update_file",
"(",
"finfo",
",",
"sample_info",
",",
"config",
")",
":",
"if",
"GalaxyInstance",
"is",
"None",
":",
"raise",
"ImportError",
"(",
"\"Could not import bioblend.galaxy\"",
")",
"if",
"\"dir\"",
"not",
"in",
"config",
":",
"raise",
"ValueErro... | Update file in Galaxy data libraries. | [
"Update",
"file",
"in",
"Galaxy",
"data",
"libraries",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/galaxy.py#L28-L39 |
237,707 | bcbio/bcbio-nextgen | bcbio/upload/galaxy.py | _galaxy_tool_copy | def _galaxy_tool_copy(finfo, outputs):
"""Copy information directly to pre-defined outputs from a Galaxy tool.
XXX Needs generalization
"""
tool_map = {"align": "bam", "variants": "vcf.gz"}
for galaxy_key, finfo_type in tool_map.items():
if galaxy_key in outputs and finfo.get("type") == finfo_type:
shutil.copy(finfo["path"], outputs[galaxy_key]) | python | def _galaxy_tool_copy(finfo, outputs):
tool_map = {"align": "bam", "variants": "vcf.gz"}
for galaxy_key, finfo_type in tool_map.items():
if galaxy_key in outputs and finfo.get("type") == finfo_type:
shutil.copy(finfo["path"], outputs[galaxy_key]) | [
"def",
"_galaxy_tool_copy",
"(",
"finfo",
",",
"outputs",
")",
":",
"tool_map",
"=",
"{",
"\"align\"",
":",
"\"bam\"",
",",
"\"variants\"",
":",
"\"vcf.gz\"",
"}",
"for",
"galaxy_key",
",",
"finfo_type",
"in",
"tool_map",
".",
"items",
"(",
")",
":",
"if",... | Copy information directly to pre-defined outputs from a Galaxy tool.
XXX Needs generalization | [
"Copy",
"information",
"directly",
"to",
"pre",
"-",
"defined",
"outputs",
"from",
"a",
"Galaxy",
"tool",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/galaxy.py#L41-L49 |
237,708 | bcbio/bcbio-nextgen | bcbio/upload/galaxy.py | _galaxy_library_upload | def _galaxy_library_upload(finfo, sample_info, config):
"""Upload results to galaxy library.
"""
folder_name = "%s_%s" % (config["fc_date"], config["fc_name"])
storage_dir = utils.safe_makedir(os.path.join(config["dir"], folder_name))
if finfo.get("type") == "directory":
storage_file = None
if finfo.get("ext") == "qc":
pdf_file = qcsummary.prep_pdf(finfo["path"], config)
if pdf_file:
finfo["path"] = pdf_file
finfo["type"] = "pdf"
storage_file = filesystem.copy_finfo(finfo, storage_dir, pass_uptodate=True)
else:
storage_file = filesystem.copy_finfo(finfo, storage_dir, pass_uptodate=True)
if "galaxy_url" in config and "galaxy_api_key" in config:
galaxy_url = config["galaxy_url"]
if not galaxy_url.endswith("/"):
galaxy_url += "/"
gi = GalaxyInstance(galaxy_url, config["galaxy_api_key"])
else:
raise ValueError("Galaxy upload requires `galaxy_url` and `galaxy_api_key` in config")
if storage_file and sample_info and not finfo.get("index", False) and not finfo.get("plus", False):
_to_datalibrary_safe(storage_file, gi, folder_name, sample_info, config) | python | def _galaxy_library_upload(finfo, sample_info, config):
folder_name = "%s_%s" % (config["fc_date"], config["fc_name"])
storage_dir = utils.safe_makedir(os.path.join(config["dir"], folder_name))
if finfo.get("type") == "directory":
storage_file = None
if finfo.get("ext") == "qc":
pdf_file = qcsummary.prep_pdf(finfo["path"], config)
if pdf_file:
finfo["path"] = pdf_file
finfo["type"] = "pdf"
storage_file = filesystem.copy_finfo(finfo, storage_dir, pass_uptodate=True)
else:
storage_file = filesystem.copy_finfo(finfo, storage_dir, pass_uptodate=True)
if "galaxy_url" in config and "galaxy_api_key" in config:
galaxy_url = config["galaxy_url"]
if not galaxy_url.endswith("/"):
galaxy_url += "/"
gi = GalaxyInstance(galaxy_url, config["galaxy_api_key"])
else:
raise ValueError("Galaxy upload requires `galaxy_url` and `galaxy_api_key` in config")
if storage_file and sample_info and not finfo.get("index", False) and not finfo.get("plus", False):
_to_datalibrary_safe(storage_file, gi, folder_name, sample_info, config) | [
"def",
"_galaxy_library_upload",
"(",
"finfo",
",",
"sample_info",
",",
"config",
")",
":",
"folder_name",
"=",
"\"%s_%s\"",
"%",
"(",
"config",
"[",
"\"fc_date\"",
"]",
",",
"config",
"[",
"\"fc_name\"",
"]",
")",
"storage_dir",
"=",
"utils",
".",
"safe_mak... | Upload results to galaxy library. | [
"Upload",
"results",
"to",
"galaxy",
"library",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/galaxy.py#L51-L74 |
237,709 | bcbio/bcbio-nextgen | bcbio/upload/galaxy.py | _to_datalibrary_safe | def _to_datalibrary_safe(fname, gi, folder_name, sample_info, config):
"""Upload with retries for intermittent JSON failures.
"""
num_tries = 0
max_tries = 5
while 1:
try:
_to_datalibrary(fname, gi, folder_name, sample_info, config)
break
except (simplejson.scanner.JSONDecodeError, bioblend.galaxy.client.ConnectionError) as e:
num_tries += 1
if num_tries > max_tries:
raise
print("Retrying upload, failed with:", str(e))
time.sleep(5) | python | def _to_datalibrary_safe(fname, gi, folder_name, sample_info, config):
num_tries = 0
max_tries = 5
while 1:
try:
_to_datalibrary(fname, gi, folder_name, sample_info, config)
break
except (simplejson.scanner.JSONDecodeError, bioblend.galaxy.client.ConnectionError) as e:
num_tries += 1
if num_tries > max_tries:
raise
print("Retrying upload, failed with:", str(e))
time.sleep(5) | [
"def",
"_to_datalibrary_safe",
"(",
"fname",
",",
"gi",
",",
"folder_name",
",",
"sample_info",
",",
"config",
")",
":",
"num_tries",
"=",
"0",
"max_tries",
"=",
"5",
"while",
"1",
":",
"try",
":",
"_to_datalibrary",
"(",
"fname",
",",
"gi",
",",
"folder... | Upload with retries for intermittent JSON failures. | [
"Upload",
"with",
"retries",
"for",
"intermittent",
"JSON",
"failures",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/galaxy.py#L76-L90 |
237,710 | bcbio/bcbio-nextgen | bcbio/upload/galaxy.py | _to_datalibrary | def _to_datalibrary(fname, gi, folder_name, sample_info, config):
"""Upload a file to a Galaxy data library in a project specific folder.
"""
library = _get_library(gi, sample_info, config)
libitems = gi.libraries.show_library(library.id, contents=True)
folder = _get_folder(gi, folder_name, library, libitems)
_file_to_folder(gi, fname, sample_info, libitems, library, folder) | python | def _to_datalibrary(fname, gi, folder_name, sample_info, config):
library = _get_library(gi, sample_info, config)
libitems = gi.libraries.show_library(library.id, contents=True)
folder = _get_folder(gi, folder_name, library, libitems)
_file_to_folder(gi, fname, sample_info, libitems, library, folder) | [
"def",
"_to_datalibrary",
"(",
"fname",
",",
"gi",
",",
"folder_name",
",",
"sample_info",
",",
"config",
")",
":",
"library",
"=",
"_get_library",
"(",
"gi",
",",
"sample_info",
",",
"config",
")",
"libitems",
"=",
"gi",
".",
"libraries",
".",
"show_libra... | Upload a file to a Galaxy data library in a project specific folder. | [
"Upload",
"a",
"file",
"to",
"a",
"Galaxy",
"data",
"library",
"in",
"a",
"project",
"specific",
"folder",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/galaxy.py#L92-L98 |
237,711 | bcbio/bcbio-nextgen | bcbio/upload/galaxy.py | _file_to_folder | def _file_to_folder(gi, fname, sample_info, libitems, library, folder):
"""Check if file exists on Galaxy, if not upload to specified folder.
"""
full_name = os.path.join(folder["name"], os.path.basename(fname))
# Handle VCF: Galaxy reports VCF files without the gzip extension
file_type = "vcf_bgzip" if full_name.endswith(".vcf.gz") else "auto"
if full_name.endswith(".vcf.gz"):
full_name = full_name.replace(".vcf.gz", ".vcf")
for item in libitems:
if item["name"] == full_name:
return item
logger.info("Uploading to Galaxy library '%s': %s" % (library.name, full_name))
return gi.libraries.upload_from_galaxy_filesystem(str(library.id), fname, folder_id=str(folder["id"]),
link_data_only="link_to_files",
dbkey=sample_info["genome_build"],
file_type=file_type,
roles=str(library.roles) if library.roles else None) | python | def _file_to_folder(gi, fname, sample_info, libitems, library, folder):
full_name = os.path.join(folder["name"], os.path.basename(fname))
# Handle VCF: Galaxy reports VCF files without the gzip extension
file_type = "vcf_bgzip" if full_name.endswith(".vcf.gz") else "auto"
if full_name.endswith(".vcf.gz"):
full_name = full_name.replace(".vcf.gz", ".vcf")
for item in libitems:
if item["name"] == full_name:
return item
logger.info("Uploading to Galaxy library '%s': %s" % (library.name, full_name))
return gi.libraries.upload_from_galaxy_filesystem(str(library.id), fname, folder_id=str(folder["id"]),
link_data_only="link_to_files",
dbkey=sample_info["genome_build"],
file_type=file_type,
roles=str(library.roles) if library.roles else None) | [
"def",
"_file_to_folder",
"(",
"gi",
",",
"fname",
",",
"sample_info",
",",
"libitems",
",",
"library",
",",
"folder",
")",
":",
"full_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
"[",
"\"name\"",
"]",
",",
"os",
".",
"path",
".",
"base... | Check if file exists on Galaxy, if not upload to specified folder. | [
"Check",
"if",
"file",
"exists",
"on",
"Galaxy",
"if",
"not",
"upload",
"to",
"specified",
"folder",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/galaxy.py#L100-L118 |
237,712 | bcbio/bcbio-nextgen | bcbio/upload/galaxy.py | _get_folder | def _get_folder(gi, folder_name, library, libitems):
"""Retrieve or create a folder inside the library with the specified name.
"""
for item in libitems:
if item["type"] == "folder" and item["name"] == "/%s" % folder_name:
return item
return gi.libraries.create_folder(library.id, folder_name)[0] | python | def _get_folder(gi, folder_name, library, libitems):
for item in libitems:
if item["type"] == "folder" and item["name"] == "/%s" % folder_name:
return item
return gi.libraries.create_folder(library.id, folder_name)[0] | [
"def",
"_get_folder",
"(",
"gi",
",",
"folder_name",
",",
"library",
",",
"libitems",
")",
":",
"for",
"item",
"in",
"libitems",
":",
"if",
"item",
"[",
"\"type\"",
"]",
"==",
"\"folder\"",
"and",
"item",
"[",
"\"name\"",
"]",
"==",
"\"/%s\"",
"%",
"fo... | Retrieve or create a folder inside the library with the specified name. | [
"Retrieve",
"or",
"create",
"a",
"folder",
"inside",
"the",
"library",
"with",
"the",
"specified",
"name",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/galaxy.py#L120-L126 |
237,713 | bcbio/bcbio-nextgen | bcbio/upload/galaxy.py | _get_library | def _get_library(gi, sample_info, config):
"""Retrieve the appropriate data library for the current user.
"""
galaxy_lib = sample_info.get("galaxy_library",
config.get("galaxy_library"))
role = sample_info.get("galaxy_role",
config.get("galaxy_role"))
if galaxy_lib:
return _get_library_from_name(gi, galaxy_lib, role, sample_info, create=True)
elif config.get("private_libs") or config.get("lab_association") or config.get("researcher"):
return _library_from_nglims(gi, sample_info, config)
else:
raise ValueError("No Galaxy library specified for sample: %s" %
sample_info["description"]) | python | def _get_library(gi, sample_info, config):
galaxy_lib = sample_info.get("galaxy_library",
config.get("galaxy_library"))
role = sample_info.get("galaxy_role",
config.get("galaxy_role"))
if galaxy_lib:
return _get_library_from_name(gi, galaxy_lib, role, sample_info, create=True)
elif config.get("private_libs") or config.get("lab_association") or config.get("researcher"):
return _library_from_nglims(gi, sample_info, config)
else:
raise ValueError("No Galaxy library specified for sample: %s" %
sample_info["description"]) | [
"def",
"_get_library",
"(",
"gi",
",",
"sample_info",
",",
"config",
")",
":",
"galaxy_lib",
"=",
"sample_info",
".",
"get",
"(",
"\"galaxy_library\"",
",",
"config",
".",
"get",
"(",
"\"galaxy_library\"",
")",
")",
"role",
"=",
"sample_info",
".",
"get",
... | Retrieve the appropriate data library for the current user. | [
"Retrieve",
"the",
"appropriate",
"data",
"library",
"for",
"the",
"current",
"user",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/galaxy.py#L130-L143 |
237,714 | bcbio/bcbio-nextgen | bcbio/upload/galaxy.py | _library_from_nglims | def _library_from_nglims(gi, sample_info, config):
"""Retrieve upload library from nglims specified user libraries.
"""
names = [config.get(x, "").strip() for x in ["lab_association", "researcher"]
if config.get(x)]
for name in names:
for ext in ["sequencing", "lab"]:
check_name = "%s %s" % (name.split()[0], ext)
try:
return _get_library_from_name(gi, check_name, None, sample_info)
except ValueError:
pass
check_names = set([x.lower() for x in names])
for libname, role in config["private_libs"]:
# Try to find library for lab or rsearcher
if libname.lower() in check_names:
return _get_library_from_name(gi, libname, role, sample_info)
# default to first private library if available
if len(config.get("private_libs", [])) > 0:
libname, role = config["private_libs"][0]
return _get_library_from_name(gi, libname, role, sample_info)
# otherwise use the lab association or researcher name
elif len(names) > 0:
return _get_library_from_name(gi, names[0], None, sample_info, create=True)
else:
raise ValueError("Could not find Galaxy library for sample %s" % sample_info["description"]) | python | def _library_from_nglims(gi, sample_info, config):
names = [config.get(x, "").strip() for x in ["lab_association", "researcher"]
if config.get(x)]
for name in names:
for ext in ["sequencing", "lab"]:
check_name = "%s %s" % (name.split()[0], ext)
try:
return _get_library_from_name(gi, check_name, None, sample_info)
except ValueError:
pass
check_names = set([x.lower() for x in names])
for libname, role in config["private_libs"]:
# Try to find library for lab or rsearcher
if libname.lower() in check_names:
return _get_library_from_name(gi, libname, role, sample_info)
# default to first private library if available
if len(config.get("private_libs", [])) > 0:
libname, role = config["private_libs"][0]
return _get_library_from_name(gi, libname, role, sample_info)
# otherwise use the lab association or researcher name
elif len(names) > 0:
return _get_library_from_name(gi, names[0], None, sample_info, create=True)
else:
raise ValueError("Could not find Galaxy library for sample %s" % sample_info["description"]) | [
"def",
"_library_from_nglims",
"(",
"gi",
",",
"sample_info",
",",
"config",
")",
":",
"names",
"=",
"[",
"config",
".",
"get",
"(",
"x",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"[",
"\"lab_association\"",
",",
"\"researcher\"",
"]"... | Retrieve upload library from nglims specified user libraries. | [
"Retrieve",
"upload",
"library",
"from",
"nglims",
"specified",
"user",
"libraries",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/galaxy.py#L163-L188 |
237,715 | bcbio/bcbio-nextgen | bcbio/rnaseq/ericscript.py | prepare_input_data | def prepare_input_data(config):
""" In case of disambiguation, we want to run fusion calling on
the disambiguated reads, which are in the work_bam file.
As EricScript accepts 2 fastq files as input, we need to convert
the .bam to 2 .fq files.
"""
if not dd.get_disambiguate(config):
return dd.get_input_sequence_files(config)
work_bam = dd.get_work_bam(config)
logger.info("Converting disambiguated reads to fastq...")
fq_files = convert_bam_to_fastq(
work_bam, dd.get_work_dir(config), None, None, config
)
return fq_files | python | def prepare_input_data(config):
if not dd.get_disambiguate(config):
return dd.get_input_sequence_files(config)
work_bam = dd.get_work_bam(config)
logger.info("Converting disambiguated reads to fastq...")
fq_files = convert_bam_to_fastq(
work_bam, dd.get_work_dir(config), None, None, config
)
return fq_files | [
"def",
"prepare_input_data",
"(",
"config",
")",
":",
"if",
"not",
"dd",
".",
"get_disambiguate",
"(",
"config",
")",
":",
"return",
"dd",
".",
"get_input_sequence_files",
"(",
"config",
")",
"work_bam",
"=",
"dd",
".",
"get_work_bam",
"(",
"config",
")",
... | In case of disambiguation, we want to run fusion calling on
the disambiguated reads, which are in the work_bam file.
As EricScript accepts 2 fastq files as input, we need to convert
the .bam to 2 .fq files. | [
"In",
"case",
"of",
"disambiguation",
"we",
"want",
"to",
"run",
"fusion",
"calling",
"on",
"the",
"disambiguated",
"reads",
"which",
"are",
"in",
"the",
"work_bam",
"file",
".",
"As",
"EricScript",
"accepts",
"2",
"fastq",
"files",
"as",
"input",
"we",
"n... | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/ericscript.py#L32-L47 |
237,716 | bcbio/bcbio-nextgen | bcbio/rnaseq/ericscript.py | EricScriptConfig.get_run_command | def get_run_command(self, tx_output_dir, input_files):
"""Constructs a command to run EricScript via do.run function.
:param tx_output_dir: A location where all EricScript output will be
written during execution.
:param input_files: an iterable with paths to 2 fastq files
with input data.
:return: list
"""
logger.debug("Input data: %s" % ', '.join(input_files))
cmd = [
self.EXECUTABLE,
'-db', self._db_location,
'-name', self._sample_name,
'-o', tx_output_dir,
] + list(input_files)
return "export PATH=%s:%s:\"$PATH\"; %s;" % (self._get_samtools0_path(), self._get_ericscript_path(), " ".join(cmd)) | python | def get_run_command(self, tx_output_dir, input_files):
logger.debug("Input data: %s" % ', '.join(input_files))
cmd = [
self.EXECUTABLE,
'-db', self._db_location,
'-name', self._sample_name,
'-o', tx_output_dir,
] + list(input_files)
return "export PATH=%s:%s:\"$PATH\"; %s;" % (self._get_samtools0_path(), self._get_ericscript_path(), " ".join(cmd)) | [
"def",
"get_run_command",
"(",
"self",
",",
"tx_output_dir",
",",
"input_files",
")",
":",
"logger",
".",
"debug",
"(",
"\"Input data: %s\"",
"%",
"', '",
".",
"join",
"(",
"input_files",
")",
")",
"cmd",
"=",
"[",
"self",
".",
"EXECUTABLE",
",",
"'-db'",
... | Constructs a command to run EricScript via do.run function.
:param tx_output_dir: A location where all EricScript output will be
written during execution.
:param input_files: an iterable with paths to 2 fastq files
with input data.
:return: list | [
"Constructs",
"a",
"command",
"to",
"run",
"EricScript",
"via",
"do",
".",
"run",
"function",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/ericscript.py#L107-L123 |
237,717 | bcbio/bcbio-nextgen | bcbio/rnaseq/ericscript.py | EricScriptConfig._get_ericscript_path | def _get_ericscript_path(self):
"""Retrieve PATH to the isolated eriscript anaconda environment.
"""
es = utils.which(os.path.join(utils.get_bcbio_bin(), self.EXECUTABLE))
return os.path.dirname(os.path.realpath(es)) | python | def _get_ericscript_path(self):
es = utils.which(os.path.join(utils.get_bcbio_bin(), self.EXECUTABLE))
return os.path.dirname(os.path.realpath(es)) | [
"def",
"_get_ericscript_path",
"(",
"self",
")",
":",
"es",
"=",
"utils",
".",
"which",
"(",
"os",
".",
"path",
".",
"join",
"(",
"utils",
".",
"get_bcbio_bin",
"(",
")",
",",
"self",
".",
"EXECUTABLE",
")",
")",
"return",
"os",
".",
"path",
".",
"... | Retrieve PATH to the isolated eriscript anaconda environment. | [
"Retrieve",
"PATH",
"to",
"the",
"isolated",
"eriscript",
"anaconda",
"environment",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/ericscript.py#L125-L129 |
237,718 | bcbio/bcbio-nextgen | bcbio/rnaseq/ericscript.py | EricScriptConfig._get_samtools0_path | def _get_samtools0_path(self):
"""Retrieve PATH to the samtools version specific for eriscript.
"""
samtools_path = os.path.realpath(os.path.join(self._get_ericscript_path(),"..", "..", "bin"))
return samtools_path | python | def _get_samtools0_path(self):
samtools_path = os.path.realpath(os.path.join(self._get_ericscript_path(),"..", "..", "bin"))
return samtools_path | [
"def",
"_get_samtools0_path",
"(",
"self",
")",
":",
"samtools_path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_get_ericscript_path",
"(",
")",
",",
"\"..\"",
",",
"\"..\"",
",",
"\"bin\"",
")",
"... | Retrieve PATH to the samtools version specific for eriscript. | [
"Retrieve",
"PATH",
"to",
"the",
"samtools",
"version",
"specific",
"for",
"eriscript",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/ericscript.py#L130-L134 |
237,719 | bcbio/bcbio-nextgen | bcbio/rnaseq/ericscript.py | EricScriptConfig.output_dir | def output_dir(self):
"""Absolute path to permanent location in working directory
where EricScript output will be stored.
"""
if self._output_dir is None:
self._output_dir = self._get_output_dir()
return self._output_dir | python | def output_dir(self):
if self._output_dir is None:
self._output_dir = self._get_output_dir()
return self._output_dir | [
"def",
"output_dir",
"(",
"self",
")",
":",
"if",
"self",
".",
"_output_dir",
"is",
"None",
":",
"self",
".",
"_output_dir",
"=",
"self",
".",
"_get_output_dir",
"(",
")",
"return",
"self",
".",
"_output_dir"
] | Absolute path to permanent location in working directory
where EricScript output will be stored. | [
"Absolute",
"path",
"to",
"permanent",
"location",
"in",
"working",
"directory",
"where",
"EricScript",
"output",
"will",
"be",
"stored",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/ericscript.py#L137-L143 |
237,720 | bcbio/bcbio-nextgen | bcbio/rnaseq/ericscript.py | EricScriptConfig.reference_index | def reference_index(self):
"""Absolute path to the BWA index for EricScript reference data."""
if self._db_location:
ref_indices = glob.glob(os.path.join(self._db_location, "*", self._REF_INDEX))
if ref_indices:
return ref_indices[0] | python | def reference_index(self):
if self._db_location:
ref_indices = glob.glob(os.path.join(self._db_location, "*", self._REF_INDEX))
if ref_indices:
return ref_indices[0] | [
"def",
"reference_index",
"(",
"self",
")",
":",
"if",
"self",
".",
"_db_location",
":",
"ref_indices",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_db_location",
",",
"\"*\"",
",",
"self",
".",
"_REF_INDEX",
")",
... | Absolute path to the BWA index for EricScript reference data. | [
"Absolute",
"path",
"to",
"the",
"BWA",
"index",
"for",
"EricScript",
"reference",
"data",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/ericscript.py#L158-L163 |
237,721 | bcbio/bcbio-nextgen | bcbio/rnaseq/ericscript.py | EricScriptConfig.reference_fasta | def reference_fasta(self):
"""Absolute path to the fasta file with EricScript reference data."""
if self._db_location:
ref_files = glob.glob(os.path.join(self._db_location, "*", self._REF_FASTA))
if ref_files:
return ref_files[0] | python | def reference_fasta(self):
if self._db_location:
ref_files = glob.glob(os.path.join(self._db_location, "*", self._REF_FASTA))
if ref_files:
return ref_files[0] | [
"def",
"reference_fasta",
"(",
"self",
")",
":",
"if",
"self",
".",
"_db_location",
":",
"ref_files",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_db_location",
",",
"\"*\"",
",",
"self",
".",
"_REF_FASTA",
")",
... | Absolute path to the fasta file with EricScript reference data. | [
"Absolute",
"path",
"to",
"the",
"fasta",
"file",
"with",
"EricScript",
"reference",
"data",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/ericscript.py#L166-L171 |
237,722 | bcbio/bcbio-nextgen | bcbio/qc/contamination.py | _get_input_args | def _get_input_args(bam_file, data, out_base, background):
"""Retrieve input args, depending on genome build.
VerifyBamID2 only handles GRCh37 (1, 2, 3) not hg19, so need to generate
a pileup for hg19 and fix chromosome naming.
"""
if dd.get_genome_build(data) in ["hg19"]:
return ["--PileupFile", _create_pileup(bam_file, data, out_base, background)]
else:
return ["--BamFile", bam_file] | python | def _get_input_args(bam_file, data, out_base, background):
if dd.get_genome_build(data) in ["hg19"]:
return ["--PileupFile", _create_pileup(bam_file, data, out_base, background)]
else:
return ["--BamFile", bam_file] | [
"def",
"_get_input_args",
"(",
"bam_file",
",",
"data",
",",
"out_base",
",",
"background",
")",
":",
"if",
"dd",
".",
"get_genome_build",
"(",
"data",
")",
"in",
"[",
"\"hg19\"",
"]",
":",
"return",
"[",
"\"--PileupFile\"",
",",
"_create_pileup",
"(",
"ba... | Retrieve input args, depending on genome build.
VerifyBamID2 only handles GRCh37 (1, 2, 3) not hg19, so need to generate
a pileup for hg19 and fix chromosome naming. | [
"Retrieve",
"input",
"args",
"depending",
"on",
"genome",
"build",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/contamination.py#L78-L87 |
237,723 | bcbio/bcbio-nextgen | bcbio/qc/contamination.py | _create_pileup | def _create_pileup(bam_file, data, out_base, background):
"""Create pileup calls in the regions of interest for hg19 -> GRCh37 chromosome mapping.
"""
out_file = "%s-mpileup.txt" % out_base
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
background_bed = os.path.normpath(os.path.join(
os.path.dirname(os.path.realpath(utils.which("verifybamid2"))),
"resource", "%s.%s.%s.vcf.gz.dat.bed" % (background["dataset"],
background["nvars"], background["build"])))
local_bed = os.path.join(os.path.dirname(out_base),
"%s.%s-hg19.bed" % (background["dataset"], background["nvars"]))
if not utils.file_exists(local_bed):
with file_transaction(data, local_bed) as tx_local_bed:
with open(background_bed) as in_handle:
with open(tx_local_bed, "w") as out_handle:
for line in in_handle:
out_handle.write("chr%s" % line)
mpileup_cl = samtools.prep_mpileup([bam_file], dd.get_ref_file(data), data["config"], want_bcf=False,
target_regions=local_bed)
cl = ("{mpileup_cl} | sed 's/^chr//' > {tx_out_file}")
do.run(cl.format(**locals()), "Create pileup from BAM input")
return out_file | python | def _create_pileup(bam_file, data, out_base, background):
out_file = "%s-mpileup.txt" % out_base
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
background_bed = os.path.normpath(os.path.join(
os.path.dirname(os.path.realpath(utils.which("verifybamid2"))),
"resource", "%s.%s.%s.vcf.gz.dat.bed" % (background["dataset"],
background["nvars"], background["build"])))
local_bed = os.path.join(os.path.dirname(out_base),
"%s.%s-hg19.bed" % (background["dataset"], background["nvars"]))
if not utils.file_exists(local_bed):
with file_transaction(data, local_bed) as tx_local_bed:
with open(background_bed) as in_handle:
with open(tx_local_bed, "w") as out_handle:
for line in in_handle:
out_handle.write("chr%s" % line)
mpileup_cl = samtools.prep_mpileup([bam_file], dd.get_ref_file(data), data["config"], want_bcf=False,
target_regions=local_bed)
cl = ("{mpileup_cl} | sed 's/^chr//' > {tx_out_file}")
do.run(cl.format(**locals()), "Create pileup from BAM input")
return out_file | [
"def",
"_create_pileup",
"(",
"bam_file",
",",
"data",
",",
"out_base",
",",
"background",
")",
":",
"out_file",
"=",
"\"%s-mpileup.txt\"",
"%",
"out_base",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"file_transaction",
"(",
... | Create pileup calls in the regions of interest for hg19 -> GRCh37 chromosome mapping. | [
"Create",
"pileup",
"calls",
"in",
"the",
"regions",
"of",
"interest",
"for",
"hg19",
"-",
">",
"GRCh37",
"chromosome",
"mapping",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/contamination.py#L89-L111 |
237,724 | bcbio/bcbio-nextgen | bcbio/structural/convert.py | _cnvbed_to_bed | def _cnvbed_to_bed(in_file, caller, out_file):
"""Convert cn_mops CNV based bed files into flattened BED
"""
with open(out_file, "w") as out_handle:
for feat in pybedtools.BedTool(in_file):
out_handle.write("\t".join([feat.chrom, str(feat.start), str(feat.end),
"cnv%s_%s" % (feat.score, caller)])
+ "\n") | python | def _cnvbed_to_bed(in_file, caller, out_file):
with open(out_file, "w") as out_handle:
for feat in pybedtools.BedTool(in_file):
out_handle.write("\t".join([feat.chrom, str(feat.start), str(feat.end),
"cnv%s_%s" % (feat.score, caller)])
+ "\n") | [
"def",
"_cnvbed_to_bed",
"(",
"in_file",
",",
"caller",
",",
"out_file",
")",
":",
"with",
"open",
"(",
"out_file",
",",
"\"w\"",
")",
"as",
"out_handle",
":",
"for",
"feat",
"in",
"pybedtools",
".",
"BedTool",
"(",
"in_file",
")",
":",
"out_handle",
"."... | Convert cn_mops CNV based bed files into flattened BED | [
"Convert",
"cn_mops",
"CNV",
"based",
"bed",
"files",
"into",
"flattened",
"BED"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/convert.py#L44-L51 |
237,725 | bcbio/bcbio-nextgen | bcbio/structural/convert.py | to_bed | def to_bed(call, sample, work_dir, calls, data):
"""Create a simplified BED file from caller specific input.
"""
out_file = os.path.join(work_dir, "%s-%s-flat.bed" % (sample, call["variantcaller"]))
if call.get("vrn_file") and not utils.file_uptodate(out_file, call["vrn_file"]):
with file_transaction(data, out_file) as tx_out_file:
convert_fn = CALLER_TO_BED.get(call["variantcaller"])
if convert_fn:
vrn_file = call["vrn_file"]
if call["variantcaller"] in SUBSET_BY_SUPPORT:
ecalls = [x for x in calls if x["variantcaller"] in SUBSET_BY_SUPPORT[call["variantcaller"]]]
if len(ecalls) > 0:
vrn_file = _subset_by_support(call["vrn_file"], ecalls, data)
convert_fn(vrn_file, call["variantcaller"], tx_out_file)
if utils.file_exists(out_file):
return out_file | python | def to_bed(call, sample, work_dir, calls, data):
out_file = os.path.join(work_dir, "%s-%s-flat.bed" % (sample, call["variantcaller"]))
if call.get("vrn_file") and not utils.file_uptodate(out_file, call["vrn_file"]):
with file_transaction(data, out_file) as tx_out_file:
convert_fn = CALLER_TO_BED.get(call["variantcaller"])
if convert_fn:
vrn_file = call["vrn_file"]
if call["variantcaller"] in SUBSET_BY_SUPPORT:
ecalls = [x for x in calls if x["variantcaller"] in SUBSET_BY_SUPPORT[call["variantcaller"]]]
if len(ecalls) > 0:
vrn_file = _subset_by_support(call["vrn_file"], ecalls, data)
convert_fn(vrn_file, call["variantcaller"], tx_out_file)
if utils.file_exists(out_file):
return out_file | [
"def",
"to_bed",
"(",
"call",
",",
"sample",
",",
"work_dir",
",",
"calls",
",",
"data",
")",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"%s-%s-flat.bed\"",
"%",
"(",
"sample",
",",
"call",
"[",
"\"variantcaller\"",
"... | Create a simplified BED file from caller specific input. | [
"Create",
"a",
"simplified",
"BED",
"file",
"from",
"caller",
"specific",
"input",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/convert.py#L65-L80 |
237,726 | bcbio/bcbio-nextgen | bcbio/structural/convert.py | _subset_by_support | def _subset_by_support(orig_vcf, cmp_calls, data):
"""Subset orig_vcf to calls also present in any of the comparison callers.
"""
cmp_vcfs = [x["vrn_file"] for x in cmp_calls]
out_file = "%s-inensemble.vcf.gz" % utils.splitext_plus(orig_vcf)[0]
if not utils.file_uptodate(out_file, orig_vcf):
with file_transaction(data, out_file) as tx_out_file:
cmd = "bedtools intersect -header -wa -f 0.5 -r -a {orig_vcf} -b "
for cmp_vcf in cmp_vcfs:
cmd += "<(bcftools view -f 'PASS,.' %s) " % cmp_vcf
cmd += "| bgzip -c > {tx_out_file}"
do.run(cmd.format(**locals()), "Subset calls by those present in Ensemble output")
return vcfutils.bgzip_and_index(out_file, data["config"]) | python | def _subset_by_support(orig_vcf, cmp_calls, data):
cmp_vcfs = [x["vrn_file"] for x in cmp_calls]
out_file = "%s-inensemble.vcf.gz" % utils.splitext_plus(orig_vcf)[0]
if not utils.file_uptodate(out_file, orig_vcf):
with file_transaction(data, out_file) as tx_out_file:
cmd = "bedtools intersect -header -wa -f 0.5 -r -a {orig_vcf} -b "
for cmp_vcf in cmp_vcfs:
cmd += "<(bcftools view -f 'PASS,.' %s) " % cmp_vcf
cmd += "| bgzip -c > {tx_out_file}"
do.run(cmd.format(**locals()), "Subset calls by those present in Ensemble output")
return vcfutils.bgzip_and_index(out_file, data["config"]) | [
"def",
"_subset_by_support",
"(",
"orig_vcf",
",",
"cmp_calls",
",",
"data",
")",
":",
"cmp_vcfs",
"=",
"[",
"x",
"[",
"\"vrn_file\"",
"]",
"for",
"x",
"in",
"cmp_calls",
"]",
"out_file",
"=",
"\"%s-inensemble.vcf.gz\"",
"%",
"utils",
".",
"splitext_plus",
"... | Subset orig_vcf to calls also present in any of the comparison callers. | [
"Subset",
"orig_vcf",
"to",
"calls",
"also",
"present",
"in",
"any",
"of",
"the",
"comparison",
"callers",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/convert.py#L82-L94 |
237,727 | bcbio/bcbio-nextgen | bcbio/qc/coverage.py | run | def run(bam_file, data, out_dir):
"""Run coverage QC analysis
"""
out = dict()
out_dir = utils.safe_makedir(out_dir)
if dd.get_coverage(data) and dd.get_coverage(data) not in ["None"]:
merged_bed_file = bedutils.clean_file(dd.get_coverage_merged(data), data, prefix="cov-", simple=True)
target_name = "coverage"
elif dd.get_coverage_interval(data) != "genome":
merged_bed_file = dd.get_variant_regions_merged(data) or dd.get_sample_callable(data)
target_name = "variant_regions"
else:
merged_bed_file = None
target_name = "genome"
avg_depth = cov.get_average_coverage(target_name, merged_bed_file, data)
if target_name == "coverage":
out_files = cov.coverage_region_detailed_stats(target_name, merged_bed_file, data, out_dir)
else:
out_files = []
out['Avg_coverage'] = avg_depth
samtools_stats_dir = os.path.join(out_dir, os.path.pardir, 'samtools')
from bcbio.qc import samtools
samtools_stats = samtools.run(bam_file, data, samtools_stats_dir)["metrics"]
out["Total_reads"] = total_reads = int(samtools_stats["Total_reads"])
out["Mapped_reads"] = mapped = int(samtools_stats["Mapped_reads"])
out["Mapped_paired_reads"] = int(samtools_stats["Mapped_paired_reads"])
out['Duplicates'] = dups = int(samtools_stats["Duplicates"])
if total_reads:
out["Mapped_reads_pct"] = 100.0 * mapped / total_reads
if mapped:
out['Duplicates_pct'] = 100.0 * dups / mapped
if dd.get_coverage_interval(data) == "genome":
mapped_unique = mapped - dups
else:
mapped_unique = readstats.number_of_mapped_reads(data, bam_file, keep_dups=False)
out['Mapped_unique_reads'] = mapped_unique
if merged_bed_file:
ontarget = readstats.number_of_mapped_reads(
data, bam_file, keep_dups=False, bed_file=merged_bed_file, target_name=target_name)
out["Ontarget_unique_reads"] = ontarget
if mapped_unique:
out["Ontarget_pct"] = 100.0 * ontarget / mapped_unique
out['Offtarget_pct'] = 100.0 * (mapped_unique - ontarget) / mapped_unique
if dd.get_coverage_interval(data) != "genome":
# Skip padded calculation for WGS even if the "coverage" file is specified
# the padded statistic makes only sense for exomes and panels
padded_bed_file = bedutils.get_padded_bed_file(out_dir, merged_bed_file, 200, data)
ontarget_padded = readstats.number_of_mapped_reads(
data, bam_file, keep_dups=False, bed_file=padded_bed_file, target_name=target_name + "_padded")
out["Ontarget_padded_pct"] = 100.0 * ontarget_padded / mapped_unique
if total_reads:
out['Usable_pct'] = 100.0 * ontarget / total_reads
indexcov_files = _goleft_indexcov(bam_file, data, out_dir)
out_files += [x for x in indexcov_files if x and utils.file_exists(x)]
out = {"metrics": out}
if len(out_files) > 0:
out["base"] = out_files[0]
out["secondary"] = out_files[1:]
return out | python | def run(bam_file, data, out_dir):
out = dict()
out_dir = utils.safe_makedir(out_dir)
if dd.get_coverage(data) and dd.get_coverage(data) not in ["None"]:
merged_bed_file = bedutils.clean_file(dd.get_coverage_merged(data), data, prefix="cov-", simple=True)
target_name = "coverage"
elif dd.get_coverage_interval(data) != "genome":
merged_bed_file = dd.get_variant_regions_merged(data) or dd.get_sample_callable(data)
target_name = "variant_regions"
else:
merged_bed_file = None
target_name = "genome"
avg_depth = cov.get_average_coverage(target_name, merged_bed_file, data)
if target_name == "coverage":
out_files = cov.coverage_region_detailed_stats(target_name, merged_bed_file, data, out_dir)
else:
out_files = []
out['Avg_coverage'] = avg_depth
samtools_stats_dir = os.path.join(out_dir, os.path.pardir, 'samtools')
from bcbio.qc import samtools
samtools_stats = samtools.run(bam_file, data, samtools_stats_dir)["metrics"]
out["Total_reads"] = total_reads = int(samtools_stats["Total_reads"])
out["Mapped_reads"] = mapped = int(samtools_stats["Mapped_reads"])
out["Mapped_paired_reads"] = int(samtools_stats["Mapped_paired_reads"])
out['Duplicates'] = dups = int(samtools_stats["Duplicates"])
if total_reads:
out["Mapped_reads_pct"] = 100.0 * mapped / total_reads
if mapped:
out['Duplicates_pct'] = 100.0 * dups / mapped
if dd.get_coverage_interval(data) == "genome":
mapped_unique = mapped - dups
else:
mapped_unique = readstats.number_of_mapped_reads(data, bam_file, keep_dups=False)
out['Mapped_unique_reads'] = mapped_unique
if merged_bed_file:
ontarget = readstats.number_of_mapped_reads(
data, bam_file, keep_dups=False, bed_file=merged_bed_file, target_name=target_name)
out["Ontarget_unique_reads"] = ontarget
if mapped_unique:
out["Ontarget_pct"] = 100.0 * ontarget / mapped_unique
out['Offtarget_pct'] = 100.0 * (mapped_unique - ontarget) / mapped_unique
if dd.get_coverage_interval(data) != "genome":
# Skip padded calculation for WGS even if the "coverage" file is specified
# the padded statistic makes only sense for exomes and panels
padded_bed_file = bedutils.get_padded_bed_file(out_dir, merged_bed_file, 200, data)
ontarget_padded = readstats.number_of_mapped_reads(
data, bam_file, keep_dups=False, bed_file=padded_bed_file, target_name=target_name + "_padded")
out["Ontarget_padded_pct"] = 100.0 * ontarget_padded / mapped_unique
if total_reads:
out['Usable_pct'] = 100.0 * ontarget / total_reads
indexcov_files = _goleft_indexcov(bam_file, data, out_dir)
out_files += [x for x in indexcov_files if x and utils.file_exists(x)]
out = {"metrics": out}
if len(out_files) > 0:
out["base"] = out_files[0]
out["secondary"] = out_files[1:]
return out | [
"def",
"run",
"(",
"bam_file",
",",
"data",
",",
"out_dir",
")",
":",
"out",
"=",
"dict",
"(",
")",
"out_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
"out_dir",
")",
"if",
"dd",
".",
"get_coverage",
"(",
"data",
")",
"and",
"dd",
".",
"get_coverage"... | Run coverage QC analysis | [
"Run",
"coverage",
"QC",
"analysis"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/coverage.py#L15-L82 |
237,728 | bcbio/bcbio-nextgen | bcbio/qc/coverage.py | _goleft_indexcov | def _goleft_indexcov(bam_file, data, out_dir):
"""Use goleft indexcov to estimate coverage distributions using BAM index.
Only used for whole genome runs as captures typically don't have enough data
to be useful for index-only summaries.
"""
if not dd.get_coverage_interval(data) == "genome":
return []
out_dir = utils.safe_makedir(os.path.join(out_dir, "indexcov"))
out_files = [os.path.join(out_dir, "%s-indexcov.%s" % (dd.get_sample_name(data), ext))
for ext in ["roc", "ped", "bed.gz"]]
if not utils.file_uptodate(out_files[-1], bam_file):
with transaction.tx_tmpdir(data) as tmp_dir:
tmp_dir = utils.safe_makedir(os.path.join(tmp_dir, dd.get_sample_name(data)))
gender_chroms = [x.name for x in ref.file_contigs(dd.get_ref_file(data)) if chromhacks.is_sex(x.name)]
gender_args = "--sex %s" % (",".join(gender_chroms)) if gender_chroms else ""
cmd = "goleft indexcov --directory {tmp_dir} {gender_args} -- {bam_file}"
try:
do.run(cmd.format(**locals()), "QC: goleft indexcov")
except subprocess.CalledProcessError as msg:
if not ("indexcov: no usable" in str(msg) or
("indexcov: expected" in str(msg) and "sex chromosomes, found:" in str(msg))):
raise
for out_file in out_files:
orig_file = os.path.join(tmp_dir, os.path.basename(out_file))
if utils.file_exists(orig_file):
utils.copy_plus(orig_file, out_file)
# MultiQC needs non-gzipped/BED inputs so unpack the file
out_bed = out_files[-1].replace(".bed.gz", ".tsv")
if utils.file_exists(out_files[-1]) and not utils.file_exists(out_bed):
with transaction.file_transaction(data, out_bed) as tx_out_bed:
cmd = "gunzip -c %s > %s" % (out_files[-1], tx_out_bed)
do.run(cmd, "Unpack indexcov BED file")
out_files[-1] = out_bed
return [x for x in out_files if utils.file_exists(x)] | python | def _goleft_indexcov(bam_file, data, out_dir):
if not dd.get_coverage_interval(data) == "genome":
return []
out_dir = utils.safe_makedir(os.path.join(out_dir, "indexcov"))
out_files = [os.path.join(out_dir, "%s-indexcov.%s" % (dd.get_sample_name(data), ext))
for ext in ["roc", "ped", "bed.gz"]]
if not utils.file_uptodate(out_files[-1], bam_file):
with transaction.tx_tmpdir(data) as tmp_dir:
tmp_dir = utils.safe_makedir(os.path.join(tmp_dir, dd.get_sample_name(data)))
gender_chroms = [x.name for x in ref.file_contigs(dd.get_ref_file(data)) if chromhacks.is_sex(x.name)]
gender_args = "--sex %s" % (",".join(gender_chroms)) if gender_chroms else ""
cmd = "goleft indexcov --directory {tmp_dir} {gender_args} -- {bam_file}"
try:
do.run(cmd.format(**locals()), "QC: goleft indexcov")
except subprocess.CalledProcessError as msg:
if not ("indexcov: no usable" in str(msg) or
("indexcov: expected" in str(msg) and "sex chromosomes, found:" in str(msg))):
raise
for out_file in out_files:
orig_file = os.path.join(tmp_dir, os.path.basename(out_file))
if utils.file_exists(orig_file):
utils.copy_plus(orig_file, out_file)
# MultiQC needs non-gzipped/BED inputs so unpack the file
out_bed = out_files[-1].replace(".bed.gz", ".tsv")
if utils.file_exists(out_files[-1]) and not utils.file_exists(out_bed):
with transaction.file_transaction(data, out_bed) as tx_out_bed:
cmd = "gunzip -c %s > %s" % (out_files[-1], tx_out_bed)
do.run(cmd, "Unpack indexcov BED file")
out_files[-1] = out_bed
return [x for x in out_files if utils.file_exists(x)] | [
"def",
"_goleft_indexcov",
"(",
"bam_file",
",",
"data",
",",
"out_dir",
")",
":",
"if",
"not",
"dd",
".",
"get_coverage_interval",
"(",
"data",
")",
"==",
"\"genome\"",
":",
"return",
"[",
"]",
"out_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
"os",
".... | Use goleft indexcov to estimate coverage distributions using BAM index.
Only used for whole genome runs as captures typically don't have enough data
to be useful for index-only summaries. | [
"Use",
"goleft",
"indexcov",
"to",
"estimate",
"coverage",
"distributions",
"using",
"BAM",
"index",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/coverage.py#L84-L118 |
237,729 | bcbio/bcbio-nextgen | bcbio/broad/picardrun.py | picard_sort | def picard_sort(picard, align_bam, sort_order="coordinate",
out_file=None, compression_level=None, pipe=False):
"""Sort a BAM file by coordinates.
"""
base, ext = os.path.splitext(align_bam)
if out_file is None:
out_file = "%s-sort%s" % (base, ext)
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", align_bam),
("OUTPUT", out_file if pipe else tx_out_file),
("TMP_DIR", tmp_dir),
("SORT_ORDER", sort_order)]
if compression_level:
opts.append(("COMPRESSION_LEVEL", compression_level))
picard.run("SortSam", opts, pipe=pipe)
return out_file | python | def picard_sort(picard, align_bam, sort_order="coordinate",
out_file=None, compression_level=None, pipe=False):
base, ext = os.path.splitext(align_bam)
if out_file is None:
out_file = "%s-sort%s" % (base, ext)
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", align_bam),
("OUTPUT", out_file if pipe else tx_out_file),
("TMP_DIR", tmp_dir),
("SORT_ORDER", sort_order)]
if compression_level:
opts.append(("COMPRESSION_LEVEL", compression_level))
picard.run("SortSam", opts, pipe=pipe)
return out_file | [
"def",
"picard_sort",
"(",
"picard",
",",
"align_bam",
",",
"sort_order",
"=",
"\"coordinate\"",
",",
"out_file",
"=",
"None",
",",
"compression_level",
"=",
"None",
",",
"pipe",
"=",
"False",
")",
":",
"base",
",",
"ext",
"=",
"os",
".",
"path",
".",
... | Sort a BAM file by coordinates. | [
"Sort",
"a",
"BAM",
"file",
"by",
"coordinates",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L46-L63 |
237,730 | bcbio/bcbio-nextgen | bcbio/broad/picardrun.py | picard_merge | def picard_merge(picard, in_files, out_file=None,
merge_seq_dicts=False):
"""Merge multiple BAM files together with Picard.
"""
if out_file is None:
out_file = "%smerge.bam" % os.path.commonprefix(in_files)
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("OUTPUT", tx_out_file),
("SORT_ORDER", "coordinate"),
("MERGE_SEQUENCE_DICTIONARIES",
"true" if merge_seq_dicts else "false"),
("USE_THREADING", "true"),
("TMP_DIR", tmp_dir)]
for in_file in in_files:
opts.append(("INPUT", in_file))
picard.run("MergeSamFiles", opts)
return out_file | python | def picard_merge(picard, in_files, out_file=None,
merge_seq_dicts=False):
if out_file is None:
out_file = "%smerge.bam" % os.path.commonprefix(in_files)
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("OUTPUT", tx_out_file),
("SORT_ORDER", "coordinate"),
("MERGE_SEQUENCE_DICTIONARIES",
"true" if merge_seq_dicts else "false"),
("USE_THREADING", "true"),
("TMP_DIR", tmp_dir)]
for in_file in in_files:
opts.append(("INPUT", in_file))
picard.run("MergeSamFiles", opts)
return out_file | [
"def",
"picard_merge",
"(",
"picard",
",",
"in_files",
",",
"out_file",
"=",
"None",
",",
"merge_seq_dicts",
"=",
"False",
")",
":",
"if",
"out_file",
"is",
"None",
":",
"out_file",
"=",
"\"%smerge.bam\"",
"%",
"os",
".",
"path",
".",
"commonprefix",
"(",
... | Merge multiple BAM files together with Picard. | [
"Merge",
"multiple",
"BAM",
"files",
"together",
"with",
"Picard",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L65-L83 |
237,731 | bcbio/bcbio-nextgen | bcbio/broad/picardrun.py | picard_reorder | def picard_reorder(picard, in_bam, ref_file, out_file):
"""Reorder BAM file to match reference file ordering.
"""
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", in_bam),
("OUTPUT", tx_out_file),
("REFERENCE", ref_file),
("ALLOW_INCOMPLETE_DICT_CONCORDANCE", "true"),
("TMP_DIR", tmp_dir)]
picard.run("ReorderSam", opts)
return out_file | python | def picard_reorder(picard, in_bam, ref_file, out_file):
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", in_bam),
("OUTPUT", tx_out_file),
("REFERENCE", ref_file),
("ALLOW_INCOMPLETE_DICT_CONCORDANCE", "true"),
("TMP_DIR", tmp_dir)]
picard.run("ReorderSam", opts)
return out_file | [
"def",
"picard_reorder",
"(",
"picard",
",",
"in_bam",
",",
"ref_file",
",",
"out_file",
")",
":",
"if",
"not",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"tx_tmpdir",
"(",
"picard",
".",
"_config",
")",
"as",
"tmp_dir",
":",
"with",
"file_transacti... | Reorder BAM file to match reference file ordering. | [
"Reorder",
"BAM",
"file",
"to",
"match",
"reference",
"file",
"ordering",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L95-L107 |
237,732 | bcbio/bcbio-nextgen | bcbio/broad/picardrun.py | picard_fix_rgs | def picard_fix_rgs(picard, in_bam, names):
"""Add read group information to BAM files and coordinate sort.
"""
out_file = "%s-fixrgs.bam" % os.path.splitext(in_bam)[0]
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", in_bam),
("OUTPUT", tx_out_file),
("SORT_ORDER", "coordinate"),
("RGID", names["rg"]),
("RGLB", names.get("lb", "unknown")),
("RGPL", names["pl"]),
("RGPU", names["pu"]),
("RGSM", names["sample"]),
("TMP_DIR", tmp_dir)]
picard.run("AddOrReplaceReadGroups", opts)
return out_file | python | def picard_fix_rgs(picard, in_bam, names):
out_file = "%s-fixrgs.bam" % os.path.splitext(in_bam)[0]
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", in_bam),
("OUTPUT", tx_out_file),
("SORT_ORDER", "coordinate"),
("RGID", names["rg"]),
("RGLB", names.get("lb", "unknown")),
("RGPL", names["pl"]),
("RGPU", names["pu"]),
("RGSM", names["sample"]),
("TMP_DIR", tmp_dir)]
picard.run("AddOrReplaceReadGroups", opts)
return out_file | [
"def",
"picard_fix_rgs",
"(",
"picard",
",",
"in_bam",
",",
"names",
")",
":",
"out_file",
"=",
"\"%s-fixrgs.bam\"",
"%",
"os",
".",
"path",
".",
"splitext",
"(",
"in_bam",
")",
"[",
"0",
"]",
"if",
"not",
"file_exists",
"(",
"out_file",
")",
":",
"wit... | Add read group information to BAM files and coordinate sort. | [
"Add",
"read",
"group",
"information",
"to",
"BAM",
"files",
"and",
"coordinate",
"sort",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L109-L126 |
237,733 | bcbio/bcbio-nextgen | bcbio/broad/picardrun.py | picard_index_ref | def picard_index_ref(picard, ref_file):
"""Provide a Picard style dict index file for a reference genome.
"""
dict_file = "%s.dict" % os.path.splitext(ref_file)[0]
if not file_exists(dict_file):
with file_transaction(picard._config, dict_file) as tx_dict_file:
opts = [("REFERENCE", ref_file),
("OUTPUT", tx_dict_file)]
picard.run("CreateSequenceDictionary", opts)
return dict_file | python | def picard_index_ref(picard, ref_file):
dict_file = "%s.dict" % os.path.splitext(ref_file)[0]
if not file_exists(dict_file):
with file_transaction(picard._config, dict_file) as tx_dict_file:
opts = [("REFERENCE", ref_file),
("OUTPUT", tx_dict_file)]
picard.run("CreateSequenceDictionary", opts)
return dict_file | [
"def",
"picard_index_ref",
"(",
"picard",
",",
"ref_file",
")",
":",
"dict_file",
"=",
"\"%s.dict\"",
"%",
"os",
".",
"path",
".",
"splitext",
"(",
"ref_file",
")",
"[",
"0",
"]",
"if",
"not",
"file_exists",
"(",
"dict_file",
")",
":",
"with",
"file_tran... | Provide a Picard style dict index file for a reference genome. | [
"Provide",
"a",
"Picard",
"style",
"dict",
"index",
"file",
"for",
"a",
"reference",
"genome",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L142-L151 |
237,734 | bcbio/bcbio-nextgen | bcbio/broad/picardrun.py | picard_bam_to_fastq | def picard_bam_to_fastq(picard, in_bam, fastq_one, fastq_two=None):
"""Convert BAM file to fastq.
"""
if not file_exists(fastq_one):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, fastq_one) as tx_out1:
opts = [("INPUT", in_bam),
("FASTQ", tx_out1),
("TMP_DIR", tmp_dir)]
if fastq_two is not None:
opts += [("SECOND_END_FASTQ", fastq_two)]
picard.run("SamToFastq", opts)
return (fastq_one, fastq_two) | python | def picard_bam_to_fastq(picard, in_bam, fastq_one, fastq_two=None):
if not file_exists(fastq_one):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, fastq_one) as tx_out1:
opts = [("INPUT", in_bam),
("FASTQ", tx_out1),
("TMP_DIR", tmp_dir)]
if fastq_two is not None:
opts += [("SECOND_END_FASTQ", fastq_two)]
picard.run("SamToFastq", opts)
return (fastq_one, fastq_two) | [
"def",
"picard_bam_to_fastq",
"(",
"picard",
",",
"in_bam",
",",
"fastq_one",
",",
"fastq_two",
"=",
"None",
")",
":",
"if",
"not",
"file_exists",
"(",
"fastq_one",
")",
":",
"with",
"tx_tmpdir",
"(",
"picard",
".",
"_config",
")",
"as",
"tmp_dir",
":",
... | Convert BAM file to fastq. | [
"Convert",
"BAM",
"file",
"to",
"fastq",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L174-L186 |
237,735 | bcbio/bcbio-nextgen | bcbio/broad/picardrun.py | picard_sam_to_bam | def picard_sam_to_bam(picard, align_sam, fastq_bam, ref_file,
is_paired=False):
"""Convert SAM to BAM, including unmapped reads from fastq BAM file.
"""
to_retain = ["XS", "XG", "XM", "XN", "XO", "YT"]
if align_sam.endswith(".sam"):
out_bam = "%s.bam" % os.path.splitext(align_sam)[0]
elif align_sam.endswith("-align.bam"):
out_bam = "%s.bam" % align_sam.replace("-align.bam", "")
else:
raise NotImplementedError("Input format not recognized")
if not file_exists(out_bam):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_bam) as tx_out_bam:
opts = [("UNMAPPED", fastq_bam),
("ALIGNED", align_sam),
("OUTPUT", tx_out_bam),
("REFERENCE_SEQUENCE", ref_file),
("TMP_DIR", tmp_dir),
("PAIRED_RUN", ("true" if is_paired else "false")),
]
opts += [("ATTRIBUTES_TO_RETAIN", x) for x in to_retain]
picard.run("MergeBamAlignment", opts)
return out_bam | python | def picard_sam_to_bam(picard, align_sam, fastq_bam, ref_file,
is_paired=False):
to_retain = ["XS", "XG", "XM", "XN", "XO", "YT"]
if align_sam.endswith(".sam"):
out_bam = "%s.bam" % os.path.splitext(align_sam)[0]
elif align_sam.endswith("-align.bam"):
out_bam = "%s.bam" % align_sam.replace("-align.bam", "")
else:
raise NotImplementedError("Input format not recognized")
if not file_exists(out_bam):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_bam) as tx_out_bam:
opts = [("UNMAPPED", fastq_bam),
("ALIGNED", align_sam),
("OUTPUT", tx_out_bam),
("REFERENCE_SEQUENCE", ref_file),
("TMP_DIR", tmp_dir),
("PAIRED_RUN", ("true" if is_paired else "false")),
]
opts += [("ATTRIBUTES_TO_RETAIN", x) for x in to_retain]
picard.run("MergeBamAlignment", opts)
return out_bam | [
"def",
"picard_sam_to_bam",
"(",
"picard",
",",
"align_sam",
",",
"fastq_bam",
",",
"ref_file",
",",
"is_paired",
"=",
"False",
")",
":",
"to_retain",
"=",
"[",
"\"XS\"",
",",
"\"XG\"",
",",
"\"XM\"",
",",
"\"XN\"",
",",
"\"XO\"",
",",
"\"YT\"",
"]",
"if... | Convert SAM to BAM, including unmapped reads from fastq BAM file. | [
"Convert",
"SAM",
"to",
"BAM",
"including",
"unmapped",
"reads",
"from",
"fastq",
"BAM",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L188-L211 |
237,736 | bcbio/bcbio-nextgen | bcbio/broad/picardrun.py | picard_formatconverter | def picard_formatconverter(picard, align_sam):
"""Convert aligned SAM file to BAM format.
"""
out_bam = "%s.bam" % os.path.splitext(align_sam)[0]
if not file_exists(out_bam):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_bam) as tx_out_bam:
opts = [("INPUT", align_sam),
("OUTPUT", tx_out_bam),
("TMP_DIR", tmp_dir)]
picard.run("SamFormatConverter", opts)
return out_bam | python | def picard_formatconverter(picard, align_sam):
out_bam = "%s.bam" % os.path.splitext(align_sam)[0]
if not file_exists(out_bam):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_bam) as tx_out_bam:
opts = [("INPUT", align_sam),
("OUTPUT", tx_out_bam),
("TMP_DIR", tmp_dir)]
picard.run("SamFormatConverter", opts)
return out_bam | [
"def",
"picard_formatconverter",
"(",
"picard",
",",
"align_sam",
")",
":",
"out_bam",
"=",
"\"%s.bam\"",
"%",
"os",
".",
"path",
".",
"splitext",
"(",
"align_sam",
")",
"[",
"0",
"]",
"if",
"not",
"file_exists",
"(",
"out_bam",
")",
":",
"with",
"tx_tmp... | Convert aligned SAM file to BAM format. | [
"Convert",
"aligned",
"SAM",
"file",
"to",
"BAM",
"format",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L213-L224 |
237,737 | bcbio/bcbio-nextgen | bcbio/broad/picardrun.py | picard_fixmate | def picard_fixmate(picard, align_bam):
"""Run Picard's FixMateInformation generating an aligned output file.
"""
base, ext = os.path.splitext(align_bam)
out_file = "%s-sort%s" % (base, ext)
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", align_bam),
("OUTPUT", tx_out_file),
("TMP_DIR", tmp_dir),
("SORT_ORDER", "coordinate")]
picard.run("FixMateInformation", opts)
return out_file | python | def picard_fixmate(picard, align_bam):
base, ext = os.path.splitext(align_bam)
out_file = "%s-sort%s" % (base, ext)
if not file_exists(out_file):
with tx_tmpdir(picard._config) as tmp_dir:
with file_transaction(picard._config, out_file) as tx_out_file:
opts = [("INPUT", align_bam),
("OUTPUT", tx_out_file),
("TMP_DIR", tmp_dir),
("SORT_ORDER", "coordinate")]
picard.run("FixMateInformation", opts)
return out_file | [
"def",
"picard_fixmate",
"(",
"picard",
",",
"align_bam",
")",
":",
"base",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"align_bam",
")",
"out_file",
"=",
"\"%s-sort%s\"",
"%",
"(",
"base",
",",
"ext",
")",
"if",
"not",
"file_exists",
"("... | Run Picard's FixMateInformation generating an aligned output file. | [
"Run",
"Picard",
"s",
"FixMateInformation",
"generating",
"an",
"aligned",
"output",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L244-L257 |
237,738 | bcbio/bcbio-nextgen | bcbio/broad/picardrun.py | picard_idxstats | def picard_idxstats(picard, align_bam):
"""Retrieve alignment stats from picard using BamIndexStats.
"""
opts = [("INPUT", align_bam)]
stdout = picard.run("BamIndexStats", opts, get_stdout=True)
out = []
AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"])
for line in stdout.split("\n"):
if line:
parts = line.split()
if len(parts) == 2:
_, unaligned = parts
out.append(AlignInfo("nocontig", 0, 0, int(unaligned)))
elif len(parts) == 7:
contig, _, length, _, aligned, _, unaligned = parts
out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned)))
else:
raise ValueError("Unexpected output from BamIndexStats: %s" % line)
return out | python | def picard_idxstats(picard, align_bam):
opts = [("INPUT", align_bam)]
stdout = picard.run("BamIndexStats", opts, get_stdout=True)
out = []
AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"])
for line in stdout.split("\n"):
if line:
parts = line.split()
if len(parts) == 2:
_, unaligned = parts
out.append(AlignInfo("nocontig", 0, 0, int(unaligned)))
elif len(parts) == 7:
contig, _, length, _, aligned, _, unaligned = parts
out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned)))
else:
raise ValueError("Unexpected output from BamIndexStats: %s" % line)
return out | [
"def",
"picard_idxstats",
"(",
"picard",
",",
"align_bam",
")",
":",
"opts",
"=",
"[",
"(",
"\"INPUT\"",
",",
"align_bam",
")",
"]",
"stdout",
"=",
"picard",
".",
"run",
"(",
"\"BamIndexStats\"",
",",
"opts",
",",
"get_stdout",
"=",
"True",
")",
"out",
... | Retrieve alignment stats from picard using BamIndexStats. | [
"Retrieve",
"alignment",
"stats",
"from",
"picard",
"using",
"BamIndexStats",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L259-L277 |
237,739 | bcbio/bcbio-nextgen | bcbio/broad/picardrun.py | bed2interval | def bed2interval(align_file, bed, out_file=None):
"""Converts a bed file to an interval file for use with some of the
Picard tools by grabbing the header from the alignment file, reording
the bed file columns and gluing them together.
align_file can be in BAM or SAM format.
bed needs to be in bed12 format:
http://genome.ucsc.edu/FAQ/FAQformat.html#format1.5
"""
import pysam
base, ext = os.path.splitext(align_file)
if out_file is None:
out_file = base + ".interval"
with pysam.Samfile(align_file, "r" if ext.endswith(".sam") else "rb") as in_bam:
header = in_bam.text
def reorder_line(line):
splitline = line.strip().split("\t")
reordered = "\t".join([splitline[0], str(int(splitline[1]) + 1), splitline[2],
splitline[5], splitline[3]])
return reordered + "\n"
with file_transaction(out_file) as tx_out_file:
with open(bed) as bed_handle:
with open(tx_out_file, "w") as out_handle:
out_handle.write(header)
for line in bed_handle:
out_handle.write(reorder_line(line))
return out_file | python | def bed2interval(align_file, bed, out_file=None):
import pysam
base, ext = os.path.splitext(align_file)
if out_file is None:
out_file = base + ".interval"
with pysam.Samfile(align_file, "r" if ext.endswith(".sam") else "rb") as in_bam:
header = in_bam.text
def reorder_line(line):
splitline = line.strip().split("\t")
reordered = "\t".join([splitline[0], str(int(splitline[1]) + 1), splitline[2],
splitline[5], splitline[3]])
return reordered + "\n"
with file_transaction(out_file) as tx_out_file:
with open(bed) as bed_handle:
with open(tx_out_file, "w") as out_handle:
out_handle.write(header)
for line in bed_handle:
out_handle.write(reorder_line(line))
return out_file | [
"def",
"bed2interval",
"(",
"align_file",
",",
"bed",
",",
"out_file",
"=",
"None",
")",
":",
"import",
"pysam",
"base",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"align_file",
")",
"if",
"out_file",
"is",
"None",
":",
"out_file",
"=",
... | Converts a bed file to an interval file for use with some of the
Picard tools by grabbing the header from the alignment file, reording
the bed file columns and gluing them together.
align_file can be in BAM or SAM format.
bed needs to be in bed12 format:
http://genome.ucsc.edu/FAQ/FAQformat.html#format1.5 | [
"Converts",
"a",
"bed",
"file",
"to",
"an",
"interval",
"file",
"for",
"use",
"with",
"some",
"of",
"the",
"Picard",
"tools",
"by",
"grabbing",
"the",
"header",
"from",
"the",
"alignment",
"file",
"reording",
"the",
"bed",
"file",
"columns",
"and",
"gluing... | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L279-L309 |
237,740 | bcbio/bcbio-nextgen | bcbio/variation/vardict.py | _enforce_max_region_size | def _enforce_max_region_size(in_file, data):
"""Ensure we don't have any chunks in the region greater than 20kb.
VarDict memory usage depends on size of individual windows in the input
file. This breaks regions into 20kb chunks with 250bp overlaps. 20kb gives
~1Gb/core memory usage and the overlaps avoid missing indels spanning a
gap. Downstream VarDict merging sorts out any variants across windows.
https://github.com/AstraZeneca-NGS/VarDictJava/issues/64
"""
max_size = 20000
overlap_size = 250
def _has_larger_regions(f):
return any(r.stop - r.start > max_size for r in pybedtools.BedTool(f))
out_file = "%s-regionlimit%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
if _has_larger_regions(in_file):
with file_transaction(data, out_file) as tx_out_file:
pybedtools.BedTool().window_maker(w=max_size,
s=max_size - overlap_size,
b=pybedtools.BedTool(in_file)).saveas(tx_out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file | python | def _enforce_max_region_size(in_file, data):
max_size = 20000
overlap_size = 250
def _has_larger_regions(f):
return any(r.stop - r.start > max_size for r in pybedtools.BedTool(f))
out_file = "%s-regionlimit%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
if _has_larger_regions(in_file):
with file_transaction(data, out_file) as tx_out_file:
pybedtools.BedTool().window_maker(w=max_size,
s=max_size - overlap_size,
b=pybedtools.BedTool(in_file)).saveas(tx_out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file | [
"def",
"_enforce_max_region_size",
"(",
"in_file",
",",
"data",
")",
":",
"max_size",
"=",
"20000",
"overlap_size",
"=",
"250",
"def",
"_has_larger_regions",
"(",
"f",
")",
":",
"return",
"any",
"(",
"r",
".",
"stop",
"-",
"r",
".",
"start",
">",
"max_si... | Ensure we don't have any chunks in the region greater than 20kb.
VarDict memory usage depends on size of individual windows in the input
file. This breaks regions into 20kb chunks with 250bp overlaps. 20kb gives
~1Gb/core memory usage and the overlaps avoid missing indels spanning a
gap. Downstream VarDict merging sorts out any variants across windows.
https://github.com/AstraZeneca-NGS/VarDictJava/issues/64 | [
"Ensure",
"we",
"don",
"t",
"have",
"any",
"chunks",
"in",
"the",
"region",
"greater",
"than",
"20kb",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vardict.py#L90-L113 |
237,741 | bcbio/bcbio-nextgen | bcbio/variation/vardict.py | run_vardict | def run_vardict(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run VarDict variant calling.
"""
items = shared.add_highdepth_genome_exclusion(items)
if vcfutils.is_paired_analysis(align_bams, items):
call_file = _run_vardict_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file | python | def run_vardict(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
items = shared.add_highdepth_genome_exclusion(items)
if vcfutils.is_paired_analysis(align_bams, items):
call_file = _run_vardict_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file | [
"def",
"run_vardict",
"(",
"align_bams",
",",
"items",
",",
"ref_file",
",",
"assoc_files",
",",
"region",
"=",
"None",
",",
"out_file",
"=",
"None",
")",
":",
"items",
"=",
"shared",
".",
"add_highdepth_genome_exclusion",
"(",
"items",
")",
"if",
"vcfutils"... | Run VarDict variant calling. | [
"Run",
"VarDict",
"variant",
"calling",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vardict.py#L115-L127 |
237,742 | bcbio/bcbio-nextgen | bcbio/variation/vardict.py | _get_jvm_opts | def _get_jvm_opts(data, out_file):
"""Retrieve JVM options when running the Java version of VarDict.
"""
if get_vardict_command(data) == "vardict-java":
resources = config_utils.get_resources("vardict", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx4g"])
jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file))
return "export VAR_DICT_OPTS='%s' && " % " ".join(jvm_opts)
else:
return "" | python | def _get_jvm_opts(data, out_file):
if get_vardict_command(data) == "vardict-java":
resources = config_utils.get_resources("vardict", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx4g"])
jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file))
return "export VAR_DICT_OPTS='%s' && " % " ".join(jvm_opts)
else:
return "" | [
"def",
"_get_jvm_opts",
"(",
"data",
",",
"out_file",
")",
":",
"if",
"get_vardict_command",
"(",
"data",
")",
"==",
"\"vardict-java\"",
":",
"resources",
"=",
"config_utils",
".",
"get_resources",
"(",
"\"vardict\"",
",",
"data",
"[",
"\"config\"",
"]",
")",
... | Retrieve JVM options when running the Java version of VarDict. | [
"Retrieve",
"JVM",
"options",
"when",
"running",
"the",
"Java",
"version",
"of",
"VarDict",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vardict.py#L129-L138 |
237,743 | bcbio/bcbio-nextgen | bcbio/variation/vardict.py | _run_vardict_caller | def _run_vardict_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect SNPs and indels with VarDict.
var2vcf_valid uses -A flag which reports all alleles and improves sensitivity:
https://github.com/AstraZeneca-NGS/VarDict/issues/35#issuecomment-276738191
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(
vrs, region, out_file, items=items, do_merge=False)
num_bams = len(align_bams)
sample_vcf_names = [] # for individual sample names, given batch calling may be required
for bamfile, item in zip(align_bams, items):
# prepare commands
sample = dd.get_sample_name(item)
vardict = get_vardict_command(items[0])
opts, var2vcf_opts = _vardict_options_from_config(items, config, out_file, target)
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
compress_cmd = "| bgzip -c" if tx_out_file.endswith("gz") else ""
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
remove_dup = vcfutils.remove_dup_cl()
py_cl = os.path.join(utils.get_bcbio_bin(), "py")
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
setup = ("%s && unset JAVA_HOME &&" % utils.get_R_exports())
contig_cl = vcfutils.add_contig_to_header_cl(ref_file, tx_out_file)
lowfreq_filter = _lowfreq_linear_filter(0, False)
cmd = ("{setup}{jvm_opts}{vardict} -G {ref_file} "
"-N {sample} -b {bamfile} {opts} "
"| teststrandbias.R "
"| var2vcf_valid.pl -A -N {sample} -E {var2vcf_opts} "
"| {contig_cl} | bcftools filter -i 'QUAL >= 0' | {lowfreq_filter} "
"| {fix_ambig_ref} | {fix_ambig_alt} | {remove_dup} | {vcfstreamsort} {compress_cmd}")
if num_bams > 1:
temp_file_prefix = out_file.replace(".gz", "").replace(".vcf", "") + item["name"][1]
tmp_out = temp_file_prefix + ".temp.vcf"
tmp_out += ".gz" if out_file.endswith("gz") else ""
sample_vcf_names.append(tmp_out)
with file_transaction(item, tmp_out) as tx_tmp_file:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_tmp_file, config, samples=[sample])
else:
cmd += " > {tx_tmp_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
else:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config, samples=[sample])
else:
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
if num_bams > 1:
# N.B. merge_variant_files wants region in 1-based end-inclusive
# coordinates. Thus use bamprep.region_to_gatk
vcfutils.merge_variant_files(orig_files=sample_vcf_names,
out_file=tx_out_file, ref_file=ref_file,
config=config, region=bamprep.region_to_gatk(region))
return out_file | python | def _run_vardict_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(
vrs, region, out_file, items=items, do_merge=False)
num_bams = len(align_bams)
sample_vcf_names = [] # for individual sample names, given batch calling may be required
for bamfile, item in zip(align_bams, items):
# prepare commands
sample = dd.get_sample_name(item)
vardict = get_vardict_command(items[0])
opts, var2vcf_opts = _vardict_options_from_config(items, config, out_file, target)
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
compress_cmd = "| bgzip -c" if tx_out_file.endswith("gz") else ""
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
remove_dup = vcfutils.remove_dup_cl()
py_cl = os.path.join(utils.get_bcbio_bin(), "py")
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
setup = ("%s && unset JAVA_HOME &&" % utils.get_R_exports())
contig_cl = vcfutils.add_contig_to_header_cl(ref_file, tx_out_file)
lowfreq_filter = _lowfreq_linear_filter(0, False)
cmd = ("{setup}{jvm_opts}{vardict} -G {ref_file} "
"-N {sample} -b {bamfile} {opts} "
"| teststrandbias.R "
"| var2vcf_valid.pl -A -N {sample} -E {var2vcf_opts} "
"| {contig_cl} | bcftools filter -i 'QUAL >= 0' | {lowfreq_filter} "
"| {fix_ambig_ref} | {fix_ambig_alt} | {remove_dup} | {vcfstreamsort} {compress_cmd}")
if num_bams > 1:
temp_file_prefix = out_file.replace(".gz", "").replace(".vcf", "") + item["name"][1]
tmp_out = temp_file_prefix + ".temp.vcf"
tmp_out += ".gz" if out_file.endswith("gz") else ""
sample_vcf_names.append(tmp_out)
with file_transaction(item, tmp_out) as tx_tmp_file:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_tmp_file, config, samples=[sample])
else:
cmd += " > {tx_tmp_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
else:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config, samples=[sample])
else:
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
if num_bams > 1:
# N.B. merge_variant_files wants region in 1-based end-inclusive
# coordinates. Thus use bamprep.region_to_gatk
vcfutils.merge_variant_files(orig_files=sample_vcf_names,
out_file=tx_out_file, ref_file=ref_file,
config=config, region=bamprep.region_to_gatk(region))
return out_file | [
"def",
"_run_vardict_caller",
"(",
"align_bams",
",",
"items",
",",
"ref_file",
",",
"assoc_files",
",",
"region",
"=",
"None",
",",
"out_file",
"=",
"None",
")",
":",
"config",
"=",
"items",
"[",
"0",
"]",
"[",
"\"config\"",
"]",
"if",
"out_file",
"is",... | Detect SNPs and indels with VarDict.
var2vcf_valid uses -A flag which reports all alleles and improves sensitivity:
https://github.com/AstraZeneca-NGS/VarDict/issues/35#issuecomment-276738191 | [
"Detect",
"SNPs",
"and",
"indels",
"with",
"VarDict",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vardict.py#L140-L201 |
237,744 | bcbio/bcbio-nextgen | bcbio/variation/vardict.py | _lowfreq_linear_filter | def _lowfreq_linear_filter(tumor_index, is_paired):
"""Linear classifier for removing low frequency false positives.
Uses a logistic classifier based on 0.5% tumor only variants from the smcounter2 paper:
https://github.com/bcbio/bcbio_validations/tree/master/somatic-lowfreq
The classifier uses strand bias (SBF) and read mismatches (NM) and
applies only for low frequency (<2%) and low depth (<30) variants.
"""
if is_paired:
sbf = "FORMAT/SBF[%s]" % tumor_index
nm = "FORMAT/NM[%s]" % tumor_index
else:
sbf = "INFO/SBF"
nm = "INFO/NM"
cmd = ("""bcftools filter --soft-filter 'LowFreqBias' --mode '+' """
"""-e 'FORMAT/AF[{tumor_index}] < 0.02 && FORMAT/VD[{tumor_index}] < 30 """
"""&& {sbf} < 0.1 && {nm} >= 2.0'""")
return cmd.format(**locals()) | python | def _lowfreq_linear_filter(tumor_index, is_paired):
if is_paired:
sbf = "FORMAT/SBF[%s]" % tumor_index
nm = "FORMAT/NM[%s]" % tumor_index
else:
sbf = "INFO/SBF"
nm = "INFO/NM"
cmd = ("""bcftools filter --soft-filter 'LowFreqBias' --mode '+' """
"""-e 'FORMAT/AF[{tumor_index}] < 0.02 && FORMAT/VD[{tumor_index}] < 30 """
"""&& {sbf} < 0.1 && {nm} >= 2.0'""")
return cmd.format(**locals()) | [
"def",
"_lowfreq_linear_filter",
"(",
"tumor_index",
",",
"is_paired",
")",
":",
"if",
"is_paired",
":",
"sbf",
"=",
"\"FORMAT/SBF[%s]\"",
"%",
"tumor_index",
"nm",
"=",
"\"FORMAT/NM[%s]\"",
"%",
"tumor_index",
"else",
":",
"sbf",
"=",
"\"INFO/SBF\"",
"nm",
"=",... | Linear classifier for removing low frequency false positives.
Uses a logistic classifier based on 0.5% tumor only variants from the smcounter2 paper:
https://github.com/bcbio/bcbio_validations/tree/master/somatic-lowfreq
The classifier uses strand bias (SBF) and read mismatches (NM) and
applies only for low frequency (<2%) and low depth (<30) variants. | [
"Linear",
"classifier",
"for",
"removing",
"low",
"frequency",
"false",
"positives",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vardict.py#L203-L222 |
237,745 | bcbio/bcbio-nextgen | bcbio/variation/vardict.py | add_db_germline_flag | def add_db_germline_flag(line):
"""Adds a DB flag for Germline filters, allowing downstream compatibility with PureCN.
"""
if line.startswith("#CHROM"):
headers = ['##INFO=<ID=DB,Number=0,Type=Flag,Description="Likely germline variant">']
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
if parts[7].find("STATUS=Germline") >= 0:
parts[7] += ";DB"
return "\t".join(parts) | python | def add_db_germline_flag(line):
if line.startswith("#CHROM"):
headers = ['##INFO=<ID=DB,Number=0,Type=Flag,Description="Likely germline variant">']
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
if parts[7].find("STATUS=Germline") >= 0:
parts[7] += ";DB"
return "\t".join(parts) | [
"def",
"add_db_germline_flag",
"(",
"line",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"\"#CHROM\"",
")",
":",
"headers",
"=",
"[",
"'##INFO=<ID=DB,Number=0,Type=Flag,Description=\"Likely germline variant\">'",
"]",
"return",
"\"\\n\"",
".",
"join",
"(",
"headers... | Adds a DB flag for Germline filters, allowing downstream compatibility with PureCN. | [
"Adds",
"a",
"DB",
"flag",
"for",
"Germline",
"filters",
"allowing",
"downstream",
"compatibility",
"with",
"PureCN",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vardict.py#L224-L236 |
237,746 | bcbio/bcbio-nextgen | bcbio/variation/vardict.py | depth_freq_filter | def depth_freq_filter(line, tumor_index, aligner):
"""Command line to filter VarDict calls based on depth, frequency and quality.
Looks at regions with low depth for allele frequency (AF * DP < 6, the equivalent
of < 13bp for heterogygote calls, but generalized. Within these calls filters if a
calls has:
- Low mapping quality and multiple mismatches in a read (NM)
For bwa only: MQ < 55.0 and NM > 1.0 or MQ < 60.0 and NM > 2.0
- Low depth (DP < 10)
- Low QUAL (QUAL < 45)
Also filters in low allele frequency regions with poor quality, if all of these are
true:
- Allele frequency < 0.2
- Quality < 55
- P-value (SSF) > 0.06
"""
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=LowAlleleDepth,Description="Low depth per allele frequency '
'along with poor depth, quality, mapping quality and read mismatches.">'),
('##FILTER=<ID=LowFreqQuality,Description="Low frequency read with '
'poor quality and p-value (SSF).">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
qual = utils.safe_to_float(parts[5])
dp = utils.safe_to_float(sample_ft.get("DP"))
af = utils.safe_to_float(sample_ft.get("AF"))
nm = utils.safe_to_float(sample_ft.get("NM"))
mq = utils.safe_to_float(sample_ft.get("MQ"))
ssfs = [x for x in parts[7].split(";") if x.startswith("SSF=")]
pval = utils.safe_to_float(ssfs[0].split("=")[-1] if ssfs else None)
fname = None
if not chromhacks.is_sex(parts[0]) and dp is not None and af is not None:
if dp * af < 6:
if aligner == "bwa" and nm is not None and mq is not None:
if (mq < 55.0 and nm > 1.0) or (mq < 60.0 and nm > 2.0):
fname = "LowAlleleDepth"
if dp < 10:
fname = "LowAlleleDepth"
if qual is not None and qual < 45:
fname = "LowAlleleDepth"
if af is not None and qual is not None and pval is not None:
if af < 0.2 and qual < 45 and pval > 0.06:
fname = "LowFreqQuality"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line | python | def depth_freq_filter(line, tumor_index, aligner):
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=LowAlleleDepth,Description="Low depth per allele frequency '
'along with poor depth, quality, mapping quality and read mismatches.">'),
('##FILTER=<ID=LowFreqQuality,Description="Low frequency read with '
'poor quality and p-value (SSF).">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
qual = utils.safe_to_float(parts[5])
dp = utils.safe_to_float(sample_ft.get("DP"))
af = utils.safe_to_float(sample_ft.get("AF"))
nm = utils.safe_to_float(sample_ft.get("NM"))
mq = utils.safe_to_float(sample_ft.get("MQ"))
ssfs = [x for x in parts[7].split(";") if x.startswith("SSF=")]
pval = utils.safe_to_float(ssfs[0].split("=")[-1] if ssfs else None)
fname = None
if not chromhacks.is_sex(parts[0]) and dp is not None and af is not None:
if dp * af < 6:
if aligner == "bwa" and nm is not None and mq is not None:
if (mq < 55.0 and nm > 1.0) or (mq < 60.0 and nm > 2.0):
fname = "LowAlleleDepth"
if dp < 10:
fname = "LowAlleleDepth"
if qual is not None and qual < 45:
fname = "LowAlleleDepth"
if af is not None and qual is not None and pval is not None:
if af < 0.2 and qual < 45 and pval > 0.06:
fname = "LowFreqQuality"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line | [
"def",
"depth_freq_filter",
"(",
"line",
",",
"tumor_index",
",",
"aligner",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"\"#CHROM\"",
")",
":",
"headers",
"=",
"[",
"(",
"'##FILTER=<ID=LowAlleleDepth,Description=\"Low depth per allele frequency '",
"'along with poo... | Command line to filter VarDict calls based on depth, frequency and quality.
Looks at regions with low depth for allele frequency (AF * DP < 6, the equivalent
of < 13bp for heterogygote calls, but generalized. Within these calls filters if a
calls has:
- Low mapping quality and multiple mismatches in a read (NM)
For bwa only: MQ < 55.0 and NM > 1.0 or MQ < 60.0 and NM > 2.0
- Low depth (DP < 10)
- Low QUAL (QUAL < 45)
Also filters in low allele frequency regions with poor quality, if all of these are
true:
- Allele frequency < 0.2
- Quality < 55
- P-value (SSF) > 0.06 | [
"Command",
"line",
"to",
"filter",
"VarDict",
"calls",
"based",
"on",
"depth",
"frequency",
"and",
"quality",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vardict.py#L238-L293 |
237,747 | bcbio/bcbio-nextgen | bcbio/variation/vardict.py | get_vardict_command | def get_vardict_command(data):
"""
convert variantcaller specification to proper vardict command, handling
string or list specification
"""
vcaller = dd.get_variantcaller(data)
if isinstance(vcaller, list):
vardict = [x for x in vcaller if "vardict" in x]
if not vardict:
return None
vardict = vardict[0]
elif not vcaller:
return None
else:
vardict = vcaller
vardict = "vardict-java" if not vardict.endswith("-perl") else "vardict"
return vardict | python | def get_vardict_command(data):
vcaller = dd.get_variantcaller(data)
if isinstance(vcaller, list):
vardict = [x for x in vcaller if "vardict" in x]
if not vardict:
return None
vardict = vardict[0]
elif not vcaller:
return None
else:
vardict = vcaller
vardict = "vardict-java" if not vardict.endswith("-perl") else "vardict"
return vardict | [
"def",
"get_vardict_command",
"(",
"data",
")",
":",
"vcaller",
"=",
"dd",
".",
"get_variantcaller",
"(",
"data",
")",
"if",
"isinstance",
"(",
"vcaller",
",",
"list",
")",
":",
"vardict",
"=",
"[",
"x",
"for",
"x",
"in",
"vcaller",
"if",
"\"vardict\"",
... | convert variantcaller specification to proper vardict command, handling
string or list specification | [
"convert",
"variantcaller",
"specification",
"to",
"proper",
"vardict",
"command",
"handling",
"string",
"or",
"list",
"specification"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vardict.py#L362-L378 |
237,748 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | run | def run(vrn_info, calls_by_name, somatic_info, do_plots=True, handle_failures=True):
"""Run BubbleTree given variant calls, CNVs and somatic
"""
if "seq2c" in calls_by_name:
cnv_info = calls_by_name["seq2c"]
elif "cnvkit" in calls_by_name:
cnv_info = calls_by_name["cnvkit"]
else:
raise ValueError("BubbleTree only currently support CNVkit and Seq2c: %s" % ", ".join(calls_by_name.keys()))
work_dir = _cur_workdir(somatic_info.tumor_data)
class OutWriter:
def __init__(self, out_handle):
self.writer = csv.writer(out_handle)
def write_header(self):
self.writer.writerow(["chrom", "start", "end", "freq"])
def write_row(self, rec, stats):
self.writer.writerow([_to_ucsc_style(rec.chrom), rec.start, rec.stop, stats["tumor"]["freq"]])
vcf_csv = prep_vrn_file(vrn_info["vrn_file"], vrn_info["variantcaller"],
work_dir, somatic_info, OutWriter, cnv_info["cns"])
cnv_csv = _prep_cnv_file(cnv_info["cns"], cnv_info["variantcaller"], work_dir,
somatic_info.tumor_data)
wide_lrr = cnv_info["variantcaller"] == "cnvkit" and somatic_info.normal_bam is None
return _run_bubbletree(vcf_csv, cnv_csv, somatic_info.tumor_data, wide_lrr, do_plots,
handle_failures) | python | def run(vrn_info, calls_by_name, somatic_info, do_plots=True, handle_failures=True):
if "seq2c" in calls_by_name:
cnv_info = calls_by_name["seq2c"]
elif "cnvkit" in calls_by_name:
cnv_info = calls_by_name["cnvkit"]
else:
raise ValueError("BubbleTree only currently support CNVkit and Seq2c: %s" % ", ".join(calls_by_name.keys()))
work_dir = _cur_workdir(somatic_info.tumor_data)
class OutWriter:
def __init__(self, out_handle):
self.writer = csv.writer(out_handle)
def write_header(self):
self.writer.writerow(["chrom", "start", "end", "freq"])
def write_row(self, rec, stats):
self.writer.writerow([_to_ucsc_style(rec.chrom), rec.start, rec.stop, stats["tumor"]["freq"]])
vcf_csv = prep_vrn_file(vrn_info["vrn_file"], vrn_info["variantcaller"],
work_dir, somatic_info, OutWriter, cnv_info["cns"])
cnv_csv = _prep_cnv_file(cnv_info["cns"], cnv_info["variantcaller"], work_dir,
somatic_info.tumor_data)
wide_lrr = cnv_info["variantcaller"] == "cnvkit" and somatic_info.normal_bam is None
return _run_bubbletree(vcf_csv, cnv_csv, somatic_info.tumor_data, wide_lrr, do_plots,
handle_failures) | [
"def",
"run",
"(",
"vrn_info",
",",
"calls_by_name",
",",
"somatic_info",
",",
"do_plots",
"=",
"True",
",",
"handle_failures",
"=",
"True",
")",
":",
"if",
"\"seq2c\"",
"in",
"calls_by_name",
":",
"cnv_info",
"=",
"calls_by_name",
"[",
"\"seq2c\"",
"]",
"el... | Run BubbleTree given variant calls, CNVs and somatic | [
"Run",
"BubbleTree",
"given",
"variant",
"calls",
"CNVs",
"and",
"somatic"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L34-L57 |
237,749 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | _run_bubbletree | def _run_bubbletree(vcf_csv, cnv_csv, data, wide_lrr=False, do_plots=True,
handle_failures=True):
"""Create R script and run on input data
BubbleTree has some internal hardcoded paramters that assume a smaller
distribution of log2 scores. This is not true for tumor-only calls, so if
we specify wide_lrr we scale the calculations to actually get calls. Need a
better long term solution with flexible parameters.
"""
lrr_scale = 10.0 if wide_lrr else 1.0
local_sitelib = utils.R_sitelib()
base = utils.splitext_plus(vcf_csv)[0]
r_file = "%s-run.R" % base
bubbleplot_out = "%s-bubbleplot.pdf" % base
trackplot_out = "%s-trackplot.pdf" % base
calls_out = "%s-calls.rds" % base
freqs_out = "%s-bubbletree_prevalence.txt" % base
sample = dd.get_sample_name(data)
do_plots = "yes" if do_plots else "no"
with open(r_file, "w") as out_handle:
out_handle.write(_script.format(**locals()))
if not utils.file_exists(freqs_out):
cmd = "%s && %s --no-environ %s" % (utils.get_R_exports(), utils.Rscript_cmd(), r_file)
try:
do.run(cmd, "Assess heterogeneity with BubbleTree")
except subprocess.CalledProcessError as msg:
if handle_failures and _allowed_bubbletree_errorstates(str(msg)):
with open(freqs_out, "w") as out_handle:
out_handle.write('bubbletree failed:\n %s"\n' % (str(msg)))
else:
logger.exception()
raise
return {"caller": "bubbletree",
"report": freqs_out,
"plot": {"bubble": bubbleplot_out, "track": trackplot_out}} | python | def _run_bubbletree(vcf_csv, cnv_csv, data, wide_lrr=False, do_plots=True,
handle_failures=True):
lrr_scale = 10.0 if wide_lrr else 1.0
local_sitelib = utils.R_sitelib()
base = utils.splitext_plus(vcf_csv)[0]
r_file = "%s-run.R" % base
bubbleplot_out = "%s-bubbleplot.pdf" % base
trackplot_out = "%s-trackplot.pdf" % base
calls_out = "%s-calls.rds" % base
freqs_out = "%s-bubbletree_prevalence.txt" % base
sample = dd.get_sample_name(data)
do_plots = "yes" if do_plots else "no"
with open(r_file, "w") as out_handle:
out_handle.write(_script.format(**locals()))
if not utils.file_exists(freqs_out):
cmd = "%s && %s --no-environ %s" % (utils.get_R_exports(), utils.Rscript_cmd(), r_file)
try:
do.run(cmd, "Assess heterogeneity with BubbleTree")
except subprocess.CalledProcessError as msg:
if handle_failures and _allowed_bubbletree_errorstates(str(msg)):
with open(freqs_out, "w") as out_handle:
out_handle.write('bubbletree failed:\n %s"\n' % (str(msg)))
else:
logger.exception()
raise
return {"caller": "bubbletree",
"report": freqs_out,
"plot": {"bubble": bubbleplot_out, "track": trackplot_out}} | [
"def",
"_run_bubbletree",
"(",
"vcf_csv",
",",
"cnv_csv",
",",
"data",
",",
"wide_lrr",
"=",
"False",
",",
"do_plots",
"=",
"True",
",",
"handle_failures",
"=",
"True",
")",
":",
"lrr_scale",
"=",
"10.0",
"if",
"wide_lrr",
"else",
"1.0",
"local_sitelib",
"... | Create R script and run on input data
BubbleTree has some internal hardcoded paramters that assume a smaller
distribution of log2 scores. This is not true for tumor-only calls, so if
we specify wide_lrr we scale the calculations to actually get calls. Need a
better long term solution with flexible parameters. | [
"Create",
"R",
"script",
"and",
"run",
"on",
"input",
"data"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L59-L93 |
237,750 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | _prep_cnv_file | def _prep_cnv_file(cns_file, svcaller, work_dir, data):
"""Create a CSV file of CNV calls with log2 and number of marks.
"""
in_file = cns_file
out_file = os.path.join(work_dir, "%s-%s-prep.csv" % (utils.splitext_plus(os.path.basename(in_file))[0],
svcaller))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
writer = csv.writer(out_handle)
writer.writerow(["chrom", "start", "end", "num.mark", "seg.mean"])
header = next(reader)
for line in reader:
cur = dict(zip(header, line))
if chromhacks.is_autosomal(cur["chromosome"]):
writer.writerow([_to_ucsc_style(cur["chromosome"]), cur["start"],
cur["end"], cur["probes"], cur["log2"]])
return out_file | python | def _prep_cnv_file(cns_file, svcaller, work_dir, data):
in_file = cns_file
out_file = os.path.join(work_dir, "%s-%s-prep.csv" % (utils.splitext_plus(os.path.basename(in_file))[0],
svcaller))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
writer = csv.writer(out_handle)
writer.writerow(["chrom", "start", "end", "num.mark", "seg.mean"])
header = next(reader)
for line in reader:
cur = dict(zip(header, line))
if chromhacks.is_autosomal(cur["chromosome"]):
writer.writerow([_to_ucsc_style(cur["chromosome"]), cur["start"],
cur["end"], cur["probes"], cur["log2"]])
return out_file | [
"def",
"_prep_cnv_file",
"(",
"cns_file",
",",
"svcaller",
",",
"work_dir",
",",
"data",
")",
":",
"in_file",
"=",
"cns_file",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"%s-%s-prep.csv\"",
"%",
"(",
"utils",
".",
"splitext_pl... | Create a CSV file of CNV calls with log2 and number of marks. | [
"Create",
"a",
"CSV",
"file",
"of",
"CNV",
"calls",
"with",
"log2",
"and",
"number",
"of",
"marks",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L104-L123 |
237,751 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | prep_vrn_file | def prep_vrn_file(in_file, vcaller, work_dir, somatic_info, writer_class, seg_file=None, params=None):
"""Select heterozygous variants in the normal sample with sufficient depth.
writer_class implements write_header and write_row to write VCF outputs
from a record and extracted tumor/normal statistics.
"""
data = somatic_info.tumor_data
if not params:
params = PARAMS
out_file = os.path.join(work_dir, "%s-%s-prep.csv" % (utils.splitext_plus(os.path.basename(in_file))[0],
vcaller))
if not utils.file_uptodate(out_file, in_file):
# ready_bed = _identify_heterogeneity_blocks_seg(in_file, seg_file, params, work_dir, somatic_info)
ready_bed = None
if ready_bed and utils.file_exists(ready_bed):
sub_file = _create_subset_file(in_file, ready_bed, work_dir, data)
else:
sub_file = in_file
max_depth = max_normal_germline_depth(sub_file, params, somatic_info)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = writer_class(out_handle)
writer.write_header()
bcf_in = pysam.VariantFile(sub_file)
for rec in bcf_in:
stats = _is_possible_loh(rec, bcf_in, params, somatic_info, max_normal_depth=max_depth)
if chromhacks.is_autosomal(rec.chrom) and stats is not None:
writer.write_row(rec, stats)
return out_file | python | def prep_vrn_file(in_file, vcaller, work_dir, somatic_info, writer_class, seg_file=None, params=None):
data = somatic_info.tumor_data
if not params:
params = PARAMS
out_file = os.path.join(work_dir, "%s-%s-prep.csv" % (utils.splitext_plus(os.path.basename(in_file))[0],
vcaller))
if not utils.file_uptodate(out_file, in_file):
# ready_bed = _identify_heterogeneity_blocks_seg(in_file, seg_file, params, work_dir, somatic_info)
ready_bed = None
if ready_bed and utils.file_exists(ready_bed):
sub_file = _create_subset_file(in_file, ready_bed, work_dir, data)
else:
sub_file = in_file
max_depth = max_normal_germline_depth(sub_file, params, somatic_info)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = writer_class(out_handle)
writer.write_header()
bcf_in = pysam.VariantFile(sub_file)
for rec in bcf_in:
stats = _is_possible_loh(rec, bcf_in, params, somatic_info, max_normal_depth=max_depth)
if chromhacks.is_autosomal(rec.chrom) and stats is not None:
writer.write_row(rec, stats)
return out_file | [
"def",
"prep_vrn_file",
"(",
"in_file",
",",
"vcaller",
",",
"work_dir",
",",
"somatic_info",
",",
"writer_class",
",",
"seg_file",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"data",
"=",
"somatic_info",
".",
"tumor_data",
"if",
"not",
"params",
":... | Select heterozygous variants in the normal sample with sufficient depth.
writer_class implements write_header and write_row to write VCF outputs
from a record and extracted tumor/normal statistics. | [
"Select",
"heterozygous",
"variants",
"in",
"the",
"normal",
"sample",
"with",
"sufficient",
"depth",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L125-L153 |
237,752 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | max_normal_germline_depth | def max_normal_germline_depth(in_file, params, somatic_info):
"""Calculate threshold for excluding potential heterozygotes based on normal depth.
"""
bcf_in = pysam.VariantFile(in_file)
depths = []
for rec in bcf_in:
stats = _is_possible_loh(rec, bcf_in, params, somatic_info)
if tz.get_in(["normal", "depth"], stats):
depths.append(tz.get_in(["normal", "depth"], stats))
if depths:
return np.median(depths) * NORMAL_FILTER_PARAMS["max_depth_percent"] | python | def max_normal_germline_depth(in_file, params, somatic_info):
bcf_in = pysam.VariantFile(in_file)
depths = []
for rec in bcf_in:
stats = _is_possible_loh(rec, bcf_in, params, somatic_info)
if tz.get_in(["normal", "depth"], stats):
depths.append(tz.get_in(["normal", "depth"], stats))
if depths:
return np.median(depths) * NORMAL_FILTER_PARAMS["max_depth_percent"] | [
"def",
"max_normal_germline_depth",
"(",
"in_file",
",",
"params",
",",
"somatic_info",
")",
":",
"bcf_in",
"=",
"pysam",
".",
"VariantFile",
"(",
"in_file",
")",
"depths",
"=",
"[",
"]",
"for",
"rec",
"in",
"bcf_in",
":",
"stats",
"=",
"_is_possible_loh",
... | Calculate threshold for excluding potential heterozygotes based on normal depth. | [
"Calculate",
"threshold",
"for",
"excluding",
"potential",
"heterozygotes",
"based",
"on",
"normal",
"depth",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L162-L172 |
237,753 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | _identify_heterogeneity_blocks_hmm | def _identify_heterogeneity_blocks_hmm(in_file, params, work_dir, somatic_info):
"""Use a HMM to identify blocks of heterogeneity to use for calculating allele frequencies.
The goal is to subset the genome to a more reasonable section that contains potential
loss of heterogeneity or other allele frequency adjustment based on selection.
"""
def _segment_by_hmm(chrom, freqs, coords):
cur_coords = []
for j, state in enumerate(_predict_states(freqs)):
if state == 0: # heterozygote region
if len(cur_coords) == 0:
num_misses = 0
cur_coords.append(coords[j])
else:
num_misses += 1
if num_misses > params["hetblock"]["allowed_misses"]:
if len(cur_coords) >= params["hetblock"]["min_alleles"]:
yield min(cur_coords), max(cur_coords)
cur_coords = []
if len(cur_coords) >= params["hetblock"]["min_alleles"]:
yield min(cur_coords), max(cur_coords)
return _identify_heterogeneity_blocks_shared(in_file, _segment_by_hmm, params, work_dir, somatic_info) | python | def _identify_heterogeneity_blocks_hmm(in_file, params, work_dir, somatic_info):
def _segment_by_hmm(chrom, freqs, coords):
cur_coords = []
for j, state in enumerate(_predict_states(freqs)):
if state == 0: # heterozygote region
if len(cur_coords) == 0:
num_misses = 0
cur_coords.append(coords[j])
else:
num_misses += 1
if num_misses > params["hetblock"]["allowed_misses"]:
if len(cur_coords) >= params["hetblock"]["min_alleles"]:
yield min(cur_coords), max(cur_coords)
cur_coords = []
if len(cur_coords) >= params["hetblock"]["min_alleles"]:
yield min(cur_coords), max(cur_coords)
return _identify_heterogeneity_blocks_shared(in_file, _segment_by_hmm, params, work_dir, somatic_info) | [
"def",
"_identify_heterogeneity_blocks_hmm",
"(",
"in_file",
",",
"params",
",",
"work_dir",
",",
"somatic_info",
")",
":",
"def",
"_segment_by_hmm",
"(",
"chrom",
",",
"freqs",
",",
"coords",
")",
":",
"cur_coords",
"=",
"[",
"]",
"for",
"j",
",",
"state",
... | Use a HMM to identify blocks of heterogeneity to use for calculating allele frequencies.
The goal is to subset the genome to a more reasonable section that contains potential
loss of heterogeneity or other allele frequency adjustment based on selection. | [
"Use",
"a",
"HMM",
"to",
"identify",
"blocks",
"of",
"heterogeneity",
"to",
"use",
"for",
"calculating",
"allele",
"frequencies",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L195-L216 |
237,754 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | _predict_states | def _predict_states(freqs):
"""Use frequencies to predict states across a chromosome.
Normalize so heterozygote blocks are assigned state 0 and homozygous
are assigned state 1.
"""
from hmmlearn import hmm
freqs = np.column_stack([np.array(freqs)])
model = hmm.GaussianHMM(2, covariance_type="full")
model.fit(freqs)
states = model.predict(freqs)
freqs_by_state = collections.defaultdict(list)
for i, state in enumerate(states):
freqs_by_state[state].append(freqs[i])
if np.median(freqs_by_state[0]) > np.median(freqs_by_state[1]):
states = [0 if s == 1 else 1 for s in states]
return states | python | def _predict_states(freqs):
from hmmlearn import hmm
freqs = np.column_stack([np.array(freqs)])
model = hmm.GaussianHMM(2, covariance_type="full")
model.fit(freqs)
states = model.predict(freqs)
freqs_by_state = collections.defaultdict(list)
for i, state in enumerate(states):
freqs_by_state[state].append(freqs[i])
if np.median(freqs_by_state[0]) > np.median(freqs_by_state[1]):
states = [0 if s == 1 else 1 for s in states]
return states | [
"def",
"_predict_states",
"(",
"freqs",
")",
":",
"from",
"hmmlearn",
"import",
"hmm",
"freqs",
"=",
"np",
".",
"column_stack",
"(",
"[",
"np",
".",
"array",
"(",
"freqs",
")",
"]",
")",
"model",
"=",
"hmm",
".",
"GaussianHMM",
"(",
"2",
",",
"covari... | Use frequencies to predict states across a chromosome.
Normalize so heterozygote blocks are assigned state 0 and homozygous
are assigned state 1. | [
"Use",
"frequencies",
"to",
"predict",
"states",
"across",
"a",
"chromosome",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L230-L246 |
237,755 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | _freqs_by_chromosome | def _freqs_by_chromosome(in_file, params, somatic_info):
"""Retrieve frequencies across each chromosome as inputs to HMM.
"""
freqs = []
coords = []
cur_chrom = None
with pysam.VariantFile(in_file) as bcf_in:
for rec in bcf_in:
if _is_biallelic_snp(rec) and _passes_plus_germline(rec) and chromhacks.is_autosomal(rec.chrom):
if cur_chrom is None or rec.chrom != cur_chrom:
if cur_chrom and len(freqs) > 0:
yield cur_chrom, freqs, coords
cur_chrom = rec.chrom
freqs = []
coords = []
stats = _tumor_normal_stats(rec, somatic_info)
if tz.get_in(["tumor", "depth"], stats, 0) > params["min_depth"]:
# not a ref only call
if len(rec.samples) == 0 or sum(rec.samples[somatic_info.tumor_name].allele_indices) > 0:
freqs.append(tz.get_in(["tumor", "freq"], stats))
coords.append(rec.start)
if cur_chrom and len(freqs) > 0:
yield cur_chrom, freqs, coords | python | def _freqs_by_chromosome(in_file, params, somatic_info):
freqs = []
coords = []
cur_chrom = None
with pysam.VariantFile(in_file) as bcf_in:
for rec in bcf_in:
if _is_biallelic_snp(rec) and _passes_plus_germline(rec) and chromhacks.is_autosomal(rec.chrom):
if cur_chrom is None or rec.chrom != cur_chrom:
if cur_chrom and len(freqs) > 0:
yield cur_chrom, freqs, coords
cur_chrom = rec.chrom
freqs = []
coords = []
stats = _tumor_normal_stats(rec, somatic_info)
if tz.get_in(["tumor", "depth"], stats, 0) > params["min_depth"]:
# not a ref only call
if len(rec.samples) == 0 or sum(rec.samples[somatic_info.tumor_name].allele_indices) > 0:
freqs.append(tz.get_in(["tumor", "freq"], stats))
coords.append(rec.start)
if cur_chrom and len(freqs) > 0:
yield cur_chrom, freqs, coords | [
"def",
"_freqs_by_chromosome",
"(",
"in_file",
",",
"params",
",",
"somatic_info",
")",
":",
"freqs",
"=",
"[",
"]",
"coords",
"=",
"[",
"]",
"cur_chrom",
"=",
"None",
"with",
"pysam",
".",
"VariantFile",
"(",
"in_file",
")",
"as",
"bcf_in",
":",
"for",
... | Retrieve frequencies across each chromosome as inputs to HMM. | [
"Retrieve",
"frequencies",
"across",
"each",
"chromosome",
"as",
"inputs",
"to",
"HMM",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L248-L270 |
237,756 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | _create_subset_file | def _create_subset_file(in_file, het_region_bed, work_dir, data):
"""Subset the VCF to a set of pre-calculated smaller regions.
"""
cnv_regions = shared.get_base_cnv_regions(data, work_dir)
region_bed = bedutils.intersect_two(het_region_bed, cnv_regions, work_dir, data)
out_file = os.path.join(work_dir, "%s-origsubset.bcf" % utils.splitext_plus(os.path.basename(in_file))[0])
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
regions = ("-R %s" % region_bed) if utils.file_exists(region_bed) else ""
cmd = "bcftools view {regions} -o {tx_out_file} -O b {in_file}"
do.run(cmd.format(**locals()), "Extract regions for BubbleTree frequency determination")
return out_file | python | def _create_subset_file(in_file, het_region_bed, work_dir, data):
cnv_regions = shared.get_base_cnv_regions(data, work_dir)
region_bed = bedutils.intersect_two(het_region_bed, cnv_regions, work_dir, data)
out_file = os.path.join(work_dir, "%s-origsubset.bcf" % utils.splitext_plus(os.path.basename(in_file))[0])
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
regions = ("-R %s" % region_bed) if utils.file_exists(region_bed) else ""
cmd = "bcftools view {regions} -o {tx_out_file} -O b {in_file}"
do.run(cmd.format(**locals()), "Extract regions for BubbleTree frequency determination")
return out_file | [
"def",
"_create_subset_file",
"(",
"in_file",
",",
"het_region_bed",
",",
"work_dir",
",",
"data",
")",
":",
"cnv_regions",
"=",
"shared",
".",
"get_base_cnv_regions",
"(",
"data",
",",
"work_dir",
")",
"region_bed",
"=",
"bedutils",
".",
"intersect_two",
"(",
... | Subset the VCF to a set of pre-calculated smaller regions. | [
"Subset",
"the",
"VCF",
"to",
"a",
"set",
"of",
"pre",
"-",
"calculated",
"smaller",
"regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L272-L283 |
237,757 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | is_info_germline | def is_info_germline(rec):
"""Check if a variant record is germline based on INFO attributes.
Works with VarDict's annotation of STATUS.
"""
if hasattr(rec, "INFO"):
status = rec.INFO.get("STATUS", "").lower()
else:
status = rec.info.get("STATUS", "").lower()
return status == "germline" or status.find("loh") >= 0 | python | def is_info_germline(rec):
if hasattr(rec, "INFO"):
status = rec.INFO.get("STATUS", "").lower()
else:
status = rec.info.get("STATUS", "").lower()
return status == "germline" or status.find("loh") >= 0 | [
"def",
"is_info_germline",
"(",
"rec",
")",
":",
"if",
"hasattr",
"(",
"rec",
",",
"\"INFO\"",
")",
":",
"status",
"=",
"rec",
".",
"INFO",
".",
"get",
"(",
"\"STATUS\"",
",",
"\"\"",
")",
".",
"lower",
"(",
")",
"else",
":",
"status",
"=",
"rec",
... | Check if a variant record is germline based on INFO attributes.
Works with VarDict's annotation of STATUS. | [
"Check",
"if",
"a",
"variant",
"record",
"is",
"germline",
"based",
"on",
"INFO",
"attributes",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L290-L299 |
237,758 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | _tumor_normal_stats | def _tumor_normal_stats(rec, somatic_info, vcf_rec):
"""Retrieve depth and frequency of tumor and normal samples.
"""
out = {"normal": {"alt": None, "depth": None, "freq": None},
"tumor": {"alt": 0, "depth": 0, "freq": None}}
if hasattr(vcf_rec, "samples"):
samples = [(s, {}) for s in vcf_rec.samples]
for fkey in ["AD", "AO", "RO", "AF", "DP"]:
try:
for i, v in enumerate(rec.format(fkey)):
samples[i][1][fkey] = v
except KeyError:
pass
# Handle INFO only inputs
elif len(rec.samples) == 0:
samples = [(somatic_info.tumor_name, None)]
else:
samples = rec.samples.items()
for name, sample in samples:
alt, depth, freq = sample_alt_and_depth(rec, sample)
if depth is not None and freq is not None:
if name == somatic_info.normal_name:
key = "normal"
elif name == somatic_info.tumor_name:
key = "tumor"
out[key]["freq"] = freq
out[key]["depth"] = depth
out[key]["alt"] = alt
return out | python | def _tumor_normal_stats(rec, somatic_info, vcf_rec):
out = {"normal": {"alt": None, "depth": None, "freq": None},
"tumor": {"alt": 0, "depth": 0, "freq": None}}
if hasattr(vcf_rec, "samples"):
samples = [(s, {}) for s in vcf_rec.samples]
for fkey in ["AD", "AO", "RO", "AF", "DP"]:
try:
for i, v in enumerate(rec.format(fkey)):
samples[i][1][fkey] = v
except KeyError:
pass
# Handle INFO only inputs
elif len(rec.samples) == 0:
samples = [(somatic_info.tumor_name, None)]
else:
samples = rec.samples.items()
for name, sample in samples:
alt, depth, freq = sample_alt_and_depth(rec, sample)
if depth is not None and freq is not None:
if name == somatic_info.normal_name:
key = "normal"
elif name == somatic_info.tumor_name:
key = "tumor"
out[key]["freq"] = freq
out[key]["depth"] = depth
out[key]["alt"] = alt
return out | [
"def",
"_tumor_normal_stats",
"(",
"rec",
",",
"somatic_info",
",",
"vcf_rec",
")",
":",
"out",
"=",
"{",
"\"normal\"",
":",
"{",
"\"alt\"",
":",
"None",
",",
"\"depth\"",
":",
"None",
",",
"\"freq\"",
":",
"None",
"}",
",",
"\"tumor\"",
":",
"{",
"\"a... | Retrieve depth and frequency of tumor and normal samples. | [
"Retrieve",
"depth",
"and",
"frequency",
"of",
"tumor",
"and",
"normal",
"samples",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L328-L356 |
237,759 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | _is_possible_loh | def _is_possible_loh(rec, vcf_rec, params, somatic_info, use_status=False, max_normal_depth=None):
"""Check if the VCF record is a het in the normal with sufficient support.
Only returns SNPs, since indels tend to have less precise frequency measurements.
"""
if _is_biallelic_snp(rec) and _passes_plus_germline(rec, use_status=use_status):
stats = _tumor_normal_stats(rec, somatic_info, vcf_rec)
depths = [tz.get_in([x, "depth"], stats) for x in ["normal", "tumor"]]
depths = [d for d in depths if d is not None]
normal_freq = tz.get_in(["normal", "freq"], stats)
tumor_freq = tz.get_in(["tumor", "freq"], stats)
if all([d > params["min_depth"] for d in depths]):
if max_normal_depth and tz.get_in(["normal", "depth"], stats, 0) > max_normal_depth:
return None
if normal_freq is not None:
if normal_freq >= params["min_freq"] and normal_freq <= params["max_freq"]:
return stats
elif (tumor_freq >= params["tumor_only"]["min_freq"] and
tumor_freq <= params["tumor_only"]["max_freq"]):
if (vcf_rec and not _has_population_germline(vcf_rec)) or is_population_germline(rec):
return stats | python | def _is_possible_loh(rec, vcf_rec, params, somatic_info, use_status=False, max_normal_depth=None):
if _is_biallelic_snp(rec) and _passes_plus_germline(rec, use_status=use_status):
stats = _tumor_normal_stats(rec, somatic_info, vcf_rec)
depths = [tz.get_in([x, "depth"], stats) for x in ["normal", "tumor"]]
depths = [d for d in depths if d is not None]
normal_freq = tz.get_in(["normal", "freq"], stats)
tumor_freq = tz.get_in(["tumor", "freq"], stats)
if all([d > params["min_depth"] for d in depths]):
if max_normal_depth and tz.get_in(["normal", "depth"], stats, 0) > max_normal_depth:
return None
if normal_freq is not None:
if normal_freq >= params["min_freq"] and normal_freq <= params["max_freq"]:
return stats
elif (tumor_freq >= params["tumor_only"]["min_freq"] and
tumor_freq <= params["tumor_only"]["max_freq"]):
if (vcf_rec and not _has_population_germline(vcf_rec)) or is_population_germline(rec):
return stats | [
"def",
"_is_possible_loh",
"(",
"rec",
",",
"vcf_rec",
",",
"params",
",",
"somatic_info",
",",
"use_status",
"=",
"False",
",",
"max_normal_depth",
"=",
"None",
")",
":",
"if",
"_is_biallelic_snp",
"(",
"rec",
")",
"and",
"_passes_plus_germline",
"(",
"rec",
... | Check if the VCF record is a het in the normal with sufficient support.
Only returns SNPs, since indels tend to have less precise frequency measurements. | [
"Check",
"if",
"the",
"VCF",
"record",
"is",
"a",
"het",
"in",
"the",
"normal",
"with",
"sufficient",
"support",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L358-L378 |
237,760 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | _has_population_germline | def _has_population_germline(rec):
"""Check if header defines population annotated germline samples for tumor only.
"""
for k in population_keys:
if k in rec.header.info:
return True
return False | python | def _has_population_germline(rec):
for k in population_keys:
if k in rec.header.info:
return True
return False | [
"def",
"_has_population_germline",
"(",
"rec",
")",
":",
"for",
"k",
"in",
"population_keys",
":",
"if",
"k",
"in",
"rec",
".",
"header",
".",
"info",
":",
"return",
"True",
"return",
"False"
] | Check if header defines population annotated germline samples for tumor only. | [
"Check",
"if",
"header",
"defines",
"population",
"annotated",
"germline",
"samples",
"for",
"tumor",
"only",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L380-L386 |
237,761 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | is_population_germline | def is_population_germline(rec):
"""Identify a germline calls based on annoations with ExAC or other population databases.
"""
min_count = 50
for k in population_keys:
if k in rec.info:
val = rec.info.get(k)
if "," in val:
val = val.split(",")[0]
if isinstance(val, (list, tuple)):
val = max(val)
if int(val) > min_count:
return True
return False | python | def is_population_germline(rec):
min_count = 50
for k in population_keys:
if k in rec.info:
val = rec.info.get(k)
if "," in val:
val = val.split(",")[0]
if isinstance(val, (list, tuple)):
val = max(val)
if int(val) > min_count:
return True
return False | [
"def",
"is_population_germline",
"(",
"rec",
")",
":",
"min_count",
"=",
"50",
"for",
"k",
"in",
"population_keys",
":",
"if",
"k",
"in",
"rec",
".",
"info",
":",
"val",
"=",
"rec",
".",
"info",
".",
"get",
"(",
"k",
")",
"if",
"\",\"",
"in",
"val"... | Identify a germline calls based on annoations with ExAC or other population databases. | [
"Identify",
"a",
"germline",
"calls",
"based",
"on",
"annoations",
"with",
"ExAC",
"or",
"other",
"population",
"databases",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L388-L401 |
237,762 | bcbio/bcbio-nextgen | bcbio/heterogeneity/bubbletree.py | sample_alt_and_depth | def sample_alt_and_depth(rec, sample):
"""Flexibly get ALT allele and depth counts, handling FreeBayes, MuTect and other cases.
"""
if sample and "AD" in sample:
all_counts = [int(x) for x in sample["AD"]]
alt_counts = sum(all_counts[1:])
depth = sum(all_counts)
elif sample and "AO" in sample and sample.get("RO") is not None:
alts = sample["AO"]
if not isinstance(alts, (list, tuple)):
alts = [alts]
alt_counts = sum([int(x) for x in alts])
depth = alt_counts + int(sample["RO"])
elif "DP" in rec.info and "AF" in rec.info:
af = rec.info["AF"][0] if isinstance(rec.info["AF"], (tuple, list)) else rec.info["AF"]
return None, rec.info["DP"], af
else:
alt_counts = None
if alt_counts is None or depth is None or depth == 0:
return None, None, None
else:
freq = float(alt_counts) / float(depth)
return alt_counts, depth, freq | python | def sample_alt_and_depth(rec, sample):
if sample and "AD" in sample:
all_counts = [int(x) for x in sample["AD"]]
alt_counts = sum(all_counts[1:])
depth = sum(all_counts)
elif sample and "AO" in sample and sample.get("RO") is not None:
alts = sample["AO"]
if not isinstance(alts, (list, tuple)):
alts = [alts]
alt_counts = sum([int(x) for x in alts])
depth = alt_counts + int(sample["RO"])
elif "DP" in rec.info and "AF" in rec.info:
af = rec.info["AF"][0] if isinstance(rec.info["AF"], (tuple, list)) else rec.info["AF"]
return None, rec.info["DP"], af
else:
alt_counts = None
if alt_counts is None or depth is None or depth == 0:
return None, None, None
else:
freq = float(alt_counts) / float(depth)
return alt_counts, depth, freq | [
"def",
"sample_alt_and_depth",
"(",
"rec",
",",
"sample",
")",
":",
"if",
"sample",
"and",
"\"AD\"",
"in",
"sample",
":",
"all_counts",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"sample",
"[",
"\"AD\"",
"]",
"]",
"alt_counts",
"=",
"sum",
"("... | Flexibly get ALT allele and depth counts, handling FreeBayes, MuTect and other cases. | [
"Flexibly",
"get",
"ALT",
"allele",
"and",
"depth",
"counts",
"handling",
"FreeBayes",
"MuTect",
"and",
"other",
"cases",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/bubbletree.py#L403-L425 |
237,763 | bcbio/bcbio-nextgen | bcbio/bam/ref.py | fasta_idx | def fasta_idx(in_file, config=None):
"""Retrieve samtools style fasta index.
"""
fasta_index = in_file + ".fai"
if not utils.file_exists(fasta_index):
samtools = config_utils.get_program("samtools", config) if config else "samtools"
cmd = "{samtools} faidx {in_file}"
do.run(cmd.format(**locals()), "samtools faidx")
return fasta_index | python | def fasta_idx(in_file, config=None):
fasta_index = in_file + ".fai"
if not utils.file_exists(fasta_index):
samtools = config_utils.get_program("samtools", config) if config else "samtools"
cmd = "{samtools} faidx {in_file}"
do.run(cmd.format(**locals()), "samtools faidx")
return fasta_index | [
"def",
"fasta_idx",
"(",
"in_file",
",",
"config",
"=",
"None",
")",
":",
"fasta_index",
"=",
"in_file",
"+",
"\".fai\"",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"fasta_index",
")",
":",
"samtools",
"=",
"config_utils",
".",
"get_program",
"(",
"\"s... | Retrieve samtools style fasta index. | [
"Retrieve",
"samtools",
"style",
"fasta",
"index",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/ref.py#L9-L17 |
237,764 | bcbio/bcbio-nextgen | bcbio/bam/ref.py | file_contigs | def file_contigs(ref_file, config=None):
"""Iterator of reference contigs and lengths from a reference file.
"""
ContigInfo = collections.namedtuple("ContigInfo", "name size")
with open(fasta_idx(ref_file, config)) as in_handle:
for line in (l for l in in_handle if l.strip()):
name, size = line.split()[:2]
yield ContigInfo(name, int(size)) | python | def file_contigs(ref_file, config=None):
ContigInfo = collections.namedtuple("ContigInfo", "name size")
with open(fasta_idx(ref_file, config)) as in_handle:
for line in (l for l in in_handle if l.strip()):
name, size = line.split()[:2]
yield ContigInfo(name, int(size)) | [
"def",
"file_contigs",
"(",
"ref_file",
",",
"config",
"=",
"None",
")",
":",
"ContigInfo",
"=",
"collections",
".",
"namedtuple",
"(",
"\"ContigInfo\"",
",",
"\"name size\"",
")",
"with",
"open",
"(",
"fasta_idx",
"(",
"ref_file",
",",
"config",
")",
")",
... | Iterator of reference contigs and lengths from a reference file. | [
"Iterator",
"of",
"reference",
"contigs",
"and",
"lengths",
"from",
"a",
"reference",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/ref.py#L19-L26 |
237,765 | bcbio/bcbio-nextgen | bcbio/variation/smcounter2.py | run | def run(align_bams, items, ref_file, assoc_files, region=None, out_file=None):
"""Run tumor only smCounter2 calling.
"""
paired = vcfutils.get_paired_bams(align_bams, items)
assert paired and not paired.normal_bam, ("smCounter2 supports tumor-only variant calling: %s" %
(",".join([dd.get_sample_name(d) for d in items])))
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(vrs, region,
out_file, items=items, do_merge=True)
out_file = out_file.replace(".vcf.gz", ".vcf")
out_prefix = utils.splitext_plus(os.path.basename(out_file))[0]
if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
cmd = ["smCounter2", "--runPath", os.path.dirname(tx_out_file),
"--outPrefix", out_prefix,
"--bedTarget", target, "--refGenome", ref_file,
"--bamFile", paired.tumor_bam, "--bamType", "consensus",
"--nCPU", dd.get_num_cores(paired.tumor_data)]
do.run(cmd, "smcounter2 variant calling")
for fname in glob.glob(os.path.join(os.path.dirname(tx_out_file), "*.smCounter*")):
shutil.move(fname, os.path.join(os.path.dirname(out_file), os.path.basename(fname)))
utils.symlink_plus(os.path.join(os.path.dirname(out_file),
"%s.smCounter.cut.vcf" % out_prefix),
out_file)
return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"], remove_orig=False,
prep_cmd="sed 's#FORMAT\t%s#FORMAT\t%s#' | %s" %
(out_prefix, dd.get_sample_name(paired.tumor_data),
vcfutils.add_contig_to_header_cl(dd.get_ref_file(paired.tumor_data), out_file))) | python | def run(align_bams, items, ref_file, assoc_files, region=None, out_file=None):
paired = vcfutils.get_paired_bams(align_bams, items)
assert paired and not paired.normal_bam, ("smCounter2 supports tumor-only variant calling: %s" %
(",".join([dd.get_sample_name(d) for d in items])))
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(vrs, region,
out_file, items=items, do_merge=True)
out_file = out_file.replace(".vcf.gz", ".vcf")
out_prefix = utils.splitext_plus(os.path.basename(out_file))[0]
if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
cmd = ["smCounter2", "--runPath", os.path.dirname(tx_out_file),
"--outPrefix", out_prefix,
"--bedTarget", target, "--refGenome", ref_file,
"--bamFile", paired.tumor_bam, "--bamType", "consensus",
"--nCPU", dd.get_num_cores(paired.tumor_data)]
do.run(cmd, "smcounter2 variant calling")
for fname in glob.glob(os.path.join(os.path.dirname(tx_out_file), "*.smCounter*")):
shutil.move(fname, os.path.join(os.path.dirname(out_file), os.path.basename(fname)))
utils.symlink_plus(os.path.join(os.path.dirname(out_file),
"%s.smCounter.cut.vcf" % out_prefix),
out_file)
return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"], remove_orig=False,
prep_cmd="sed 's#FORMAT\t%s#FORMAT\t%s#' | %s" %
(out_prefix, dd.get_sample_name(paired.tumor_data),
vcfutils.add_contig_to_header_cl(dd.get_ref_file(paired.tumor_data), out_file))) | [
"def",
"run",
"(",
"align_bams",
",",
"items",
",",
"ref_file",
",",
"assoc_files",
",",
"region",
"=",
"None",
",",
"out_file",
"=",
"None",
")",
":",
"paired",
"=",
"vcfutils",
".",
"get_paired_bams",
"(",
"align_bams",
",",
"items",
")",
"assert",
"pa... | Run tumor only smCounter2 calling. | [
"Run",
"tumor",
"only",
"smCounter2",
"calling",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/smcounter2.py#L17-L44 |
237,766 | bcbio/bcbio-nextgen | bcbio/bam/readstats.py | number_of_mapped_reads | def number_of_mapped_reads(data, bam_file, keep_dups=True, bed_file=None, target_name=None):
"""Count mapped reads, allow adjustment for duplicates and BED regions.
Since samtools view does not use indexes for BED files
(https://github.com/samtools/samtools/issues/88)
we loop over regions in a BED file and add the counts together.
Uses a global cache file to store counts, making it possible to pass this single
file for CWL runs. For parallel processes it can have concurrent append writes,
so we have a simple file locking mechanism to avoid this.
"""
# Flag explainer https://broadinstitute.github.io/picard/explain-flags.html
callable_flags = ["not unmapped", "not mate_is_unmapped", "not secondary_alignment",
"not failed_quality_control"]
if keep_dups:
query_flags = callable_flags
flag = 780 # not (read unmapped or mate unmapped or fails QC or secondary alignment)
else:
query_flags = callable_flags + ["not duplicate"]
flag = 1804 # as above plus not duplicate
# Back compatible cache
oldcache_file = _backcompatible_cache_file(query_flags, bed_file, target_name, data)
if oldcache_file:
with open(oldcache_file) as f:
return int(f.read().strip())
# New cache
key = json.dumps({"flags": sorted(query_flags),
"region": os.path.basename(bed_file) if bed_file else "",
"sample": dd.get_sample_name(data)},
separators=(",", ":"), sort_keys=True)
cache_file = get_cache_file(data)
if utils.file_exists(cache_file):
with open(cache_file) as in_handle:
for cur_key, cur_val in (l.strip().split("\t") for l in in_handle):
if cur_key == key:
return int(cur_val)
# Calculate stats
count_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "coverage",
dd.get_sample_name(data), "counts"))
if not bed_file:
bed_file = os.path.join(count_dir, "fullgenome.bed")
if not utils.file_exists(bed_file):
with file_transaction(data, bed_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for c in ref.file_contigs(dd.get_ref_file(data), data["config"]):
out_handle.write("%s\t%s\t%s\n" % (c.name, 0, c.size))
count_file = os.path.join(count_dir,
"%s-%s-counts.txt" % (os.path.splitext(os.path.basename(bed_file))[0], flag))
if not utils.file_exists(count_file):
bam.index(bam_file, data["config"], check_timestamp=False)
num_cores = dd.get_num_cores(data)
with file_transaction(data, count_file) as tx_out_file:
cmd = ("hts_nim_tools count-reads -t {num_cores} -F {flag} {bed_file} {bam_file} > {tx_out_file}")
do.run(cmd.format(**locals()), "Count mapped reads: %s" % (dd.get_sample_name(data)))
count = 0
with open(count_file) as in_handle:
for line in in_handle:
count += int(line.rstrip().split()[-1])
with _simple_lock(cache_file):
with open(cache_file, "a") as out_handle:
out_handle.write("%s\t%s\n" % (key, count))
return count | python | def number_of_mapped_reads(data, bam_file, keep_dups=True, bed_file=None, target_name=None):
# Flag explainer https://broadinstitute.github.io/picard/explain-flags.html
callable_flags = ["not unmapped", "not mate_is_unmapped", "not secondary_alignment",
"not failed_quality_control"]
if keep_dups:
query_flags = callable_flags
flag = 780 # not (read unmapped or mate unmapped or fails QC or secondary alignment)
else:
query_flags = callable_flags + ["not duplicate"]
flag = 1804 # as above plus not duplicate
# Back compatible cache
oldcache_file = _backcompatible_cache_file(query_flags, bed_file, target_name, data)
if oldcache_file:
with open(oldcache_file) as f:
return int(f.read().strip())
# New cache
key = json.dumps({"flags": sorted(query_flags),
"region": os.path.basename(bed_file) if bed_file else "",
"sample": dd.get_sample_name(data)},
separators=(",", ":"), sort_keys=True)
cache_file = get_cache_file(data)
if utils.file_exists(cache_file):
with open(cache_file) as in_handle:
for cur_key, cur_val in (l.strip().split("\t") for l in in_handle):
if cur_key == key:
return int(cur_val)
# Calculate stats
count_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "coverage",
dd.get_sample_name(data), "counts"))
if not bed_file:
bed_file = os.path.join(count_dir, "fullgenome.bed")
if not utils.file_exists(bed_file):
with file_transaction(data, bed_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for c in ref.file_contigs(dd.get_ref_file(data), data["config"]):
out_handle.write("%s\t%s\t%s\n" % (c.name, 0, c.size))
count_file = os.path.join(count_dir,
"%s-%s-counts.txt" % (os.path.splitext(os.path.basename(bed_file))[0], flag))
if not utils.file_exists(count_file):
bam.index(bam_file, data["config"], check_timestamp=False)
num_cores = dd.get_num_cores(data)
with file_transaction(data, count_file) as tx_out_file:
cmd = ("hts_nim_tools count-reads -t {num_cores} -F {flag} {bed_file} {bam_file} > {tx_out_file}")
do.run(cmd.format(**locals()), "Count mapped reads: %s" % (dd.get_sample_name(data)))
count = 0
with open(count_file) as in_handle:
for line in in_handle:
count += int(line.rstrip().split()[-1])
with _simple_lock(cache_file):
with open(cache_file, "a") as out_handle:
out_handle.write("%s\t%s\n" % (key, count))
return count | [
"def",
"number_of_mapped_reads",
"(",
"data",
",",
"bam_file",
",",
"keep_dups",
"=",
"True",
",",
"bed_file",
"=",
"None",
",",
"target_name",
"=",
"None",
")",
":",
"# Flag explainer https://broadinstitute.github.io/picard/explain-flags.html",
"callable_flags",
"=",
"... | Count mapped reads, allow adjustment for duplicates and BED regions.
Since samtools view does not use indexes for BED files
(https://github.com/samtools/samtools/issues/88)
we loop over regions in a BED file and add the counts together.
Uses a global cache file to store counts, making it possible to pass this single
file for CWL runs. For parallel processes it can have concurrent append writes,
so we have a simple file locking mechanism to avoid this. | [
"Count",
"mapped",
"reads",
"allow",
"adjustment",
"for",
"duplicates",
"and",
"BED",
"regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/readstats.py#L37-L102 |
237,767 | bcbio/bcbio-nextgen | bcbio/bam/readstats.py | _simple_lock | def _simple_lock(f):
"""Simple file lock, times out after 20 second assuming lock is stale
"""
lock_file = f + ".lock"
timeout = 20
curtime = 0
interval = 2
while os.path.exists(lock_file):
time.sleep(interval)
curtime += interval
if curtime > timeout:
os.remove(lock_file)
with open(lock_file, "w") as out_handle:
out_handle.write("locked")
yield
if os.path.exists(lock_file):
os.remove(lock_file) | python | def _simple_lock(f):
lock_file = f + ".lock"
timeout = 20
curtime = 0
interval = 2
while os.path.exists(lock_file):
time.sleep(interval)
curtime += interval
if curtime > timeout:
os.remove(lock_file)
with open(lock_file, "w") as out_handle:
out_handle.write("locked")
yield
if os.path.exists(lock_file):
os.remove(lock_file) | [
"def",
"_simple_lock",
"(",
"f",
")",
":",
"lock_file",
"=",
"f",
"+",
"\".lock\"",
"timeout",
"=",
"20",
"curtime",
"=",
"0",
"interval",
"=",
"2",
"while",
"os",
".",
"path",
".",
"exists",
"(",
"lock_file",
")",
":",
"time",
".",
"sleep",
"(",
"... | Simple file lock, times out after 20 second assuming lock is stale | [
"Simple",
"file",
"lock",
"times",
"out",
"after",
"20",
"second",
"assuming",
"lock",
"is",
"stale"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/readstats.py#L105-L121 |
237,768 | bcbio/bcbio-nextgen | bcbio/pipeline/region.py | get_max_counts | def get_max_counts(samples):
"""Retrieve number of regions that can be processed in parallel from current samples.
"""
counts = []
for data in (x[0] for x in samples):
count = tz.get_in(["config", "algorithm", "callable_count"], data, 1)
vcs = tz.get_in(["config", "algorithm", "variantcaller"], data, [])
if isinstance(vcs, six.string_types):
vcs = [vcs]
if vcs:
count *= len(vcs)
counts.append(count)
return max(counts) | python | def get_max_counts(samples):
counts = []
for data in (x[0] for x in samples):
count = tz.get_in(["config", "algorithm", "callable_count"], data, 1)
vcs = tz.get_in(["config", "algorithm", "variantcaller"], data, [])
if isinstance(vcs, six.string_types):
vcs = [vcs]
if vcs:
count *= len(vcs)
counts.append(count)
return max(counts) | [
"def",
"get_max_counts",
"(",
"samples",
")",
":",
"counts",
"=",
"[",
"]",
"for",
"data",
"in",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"samples",
")",
":",
"count",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
... | Retrieve number of regions that can be processed in parallel from current samples. | [
"Retrieve",
"number",
"of",
"regions",
"that",
"can",
"be",
"processed",
"in",
"parallel",
"from",
"current",
"samples",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L16-L28 |
237,769 | bcbio/bcbio-nextgen | bcbio/pipeline/region.py | _split_by_regions | def _split_by_regions(dirname, out_ext, in_key):
"""Split a BAM file data analysis into chromosomal regions.
"""
def _do_work(data):
# XXX Need to move retrieval of regions into preparation to avoid
# need for files when running in non-shared filesystems
regions = _get_parallel_regions(data)
def _sort_by_size(region):
_, start, end = region
return end - start
regions.sort(key=_sort_by_size, reverse=True)
bam_file = data[in_key]
if bam_file is None:
return None, []
part_info = []
base_out = os.path.splitext(os.path.basename(bam_file))[0]
nowork = [["nochrom"], ["noanalysis", data["config"]["algorithm"]["non_callable_regions"]]]
for region in regions + nowork:
out_dir = os.path.join(data["dirs"]["work"], dirname, data["name"][-1], region[0])
region_outfile = os.path.join(out_dir, "%s-%s%s" %
(base_out, to_safestr(region), out_ext))
part_info.append((region, region_outfile))
out_file = os.path.join(data["dirs"]["work"], dirname, data["name"][-1],
"%s%s" % (base_out, out_ext))
return out_file, part_info
return _do_work | python | def _split_by_regions(dirname, out_ext, in_key):
def _do_work(data):
# XXX Need to move retrieval of regions into preparation to avoid
# need for files when running in non-shared filesystems
regions = _get_parallel_regions(data)
def _sort_by_size(region):
_, start, end = region
return end - start
regions.sort(key=_sort_by_size, reverse=True)
bam_file = data[in_key]
if bam_file is None:
return None, []
part_info = []
base_out = os.path.splitext(os.path.basename(bam_file))[0]
nowork = [["nochrom"], ["noanalysis", data["config"]["algorithm"]["non_callable_regions"]]]
for region in regions + nowork:
out_dir = os.path.join(data["dirs"]["work"], dirname, data["name"][-1], region[0])
region_outfile = os.path.join(out_dir, "%s-%s%s" %
(base_out, to_safestr(region), out_ext))
part_info.append((region, region_outfile))
out_file = os.path.join(data["dirs"]["work"], dirname, data["name"][-1],
"%s%s" % (base_out, out_ext))
return out_file, part_info
return _do_work | [
"def",
"_split_by_regions",
"(",
"dirname",
",",
"out_ext",
",",
"in_key",
")",
":",
"def",
"_do_work",
"(",
"data",
")",
":",
"# XXX Need to move retrieval of regions into preparation to avoid",
"# need for files when running in non-shared filesystems",
"regions",
"=",
"_get... | Split a BAM file data analysis into chromosomal regions. | [
"Split",
"a",
"BAM",
"file",
"data",
"analysis",
"into",
"chromosomal",
"regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L40-L65 |
237,770 | bcbio/bcbio-nextgen | bcbio/pipeline/region.py | _get_parallel_regions | def _get_parallel_regions(data):
"""Retrieve regions to run in parallel, putting longest intervals first.
"""
callable_regions = tz.get_in(["config", "algorithm", "callable_regions"], data)
if not callable_regions:
raise ValueError("Did not find any callable regions for sample: %s\n"
"Check 'align/%s/*-callableblocks.bed' and 'regions' to examine callable regions"
% (dd.get_sample_name(data), dd.get_sample_name(data)))
with open(callable_regions) as in_handle:
regions = [(xs[0], int(xs[1]), int(xs[2])) for xs in
(l.rstrip().split("\t") for l in in_handle) if (len(xs) >= 3 and
not xs[0].startswith(("track", "browser",)))]
return regions | python | def _get_parallel_regions(data):
callable_regions = tz.get_in(["config", "algorithm", "callable_regions"], data)
if not callable_regions:
raise ValueError("Did not find any callable regions for sample: %s\n"
"Check 'align/%s/*-callableblocks.bed' and 'regions' to examine callable regions"
% (dd.get_sample_name(data), dd.get_sample_name(data)))
with open(callable_regions) as in_handle:
regions = [(xs[0], int(xs[1]), int(xs[2])) for xs in
(l.rstrip().split("\t") for l in in_handle) if (len(xs) >= 3 and
not xs[0].startswith(("track", "browser",)))]
return regions | [
"def",
"_get_parallel_regions",
"(",
"data",
")",
":",
"callable_regions",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"callable_regions\"",
"]",
",",
"data",
")",
"if",
"not",
"callable_regions",
":",
"raise",
"ValueError",
... | Retrieve regions to run in parallel, putting longest intervals first. | [
"Retrieve",
"regions",
"to",
"run",
"in",
"parallel",
"putting",
"longest",
"intervals",
"first",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L67-L79 |
237,771 | bcbio/bcbio-nextgen | bcbio/pipeline/region.py | get_parallel_regions | def get_parallel_regions(batch):
"""CWL target to retrieve a list of callable regions for parallelization.
"""
samples = [utils.to_single_data(d) for d in batch]
regions = _get_parallel_regions(samples[0])
return [{"region": "%s:%s-%s" % (c, s, e)} for c, s, e in regions] | python | def get_parallel_regions(batch):
samples = [utils.to_single_data(d) for d in batch]
regions = _get_parallel_regions(samples[0])
return [{"region": "%s:%s-%s" % (c, s, e)} for c, s, e in regions] | [
"def",
"get_parallel_regions",
"(",
"batch",
")",
":",
"samples",
"=",
"[",
"utils",
".",
"to_single_data",
"(",
"d",
")",
"for",
"d",
"in",
"batch",
"]",
"regions",
"=",
"_get_parallel_regions",
"(",
"samples",
"[",
"0",
"]",
")",
"return",
"[",
"{",
... | CWL target to retrieve a list of callable regions for parallelization. | [
"CWL",
"target",
"to",
"retrieve",
"a",
"list",
"of",
"callable",
"regions",
"for",
"parallelization",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L81-L86 |
237,772 | bcbio/bcbio-nextgen | bcbio/pipeline/region.py | get_parallel_regions_block | def get_parallel_regions_block(batch):
"""CWL target to retrieve block group of callable regions for parallelization.
Uses blocking to handle multicore runs.
"""
samples = [utils.to_single_data(d) for d in batch]
regions = _get_parallel_regions(samples[0])
out = []
# Currently don't have core information here so aim for about 10 items per partition
n = 10
for region_block in tz.partition_all(n, regions):
out.append({"region_block": ["%s:%s-%s" % (c, s, e) for c, s, e in region_block]})
return out | python | def get_parallel_regions_block(batch):
samples = [utils.to_single_data(d) for d in batch]
regions = _get_parallel_regions(samples[0])
out = []
# Currently don't have core information here so aim for about 10 items per partition
n = 10
for region_block in tz.partition_all(n, regions):
out.append({"region_block": ["%s:%s-%s" % (c, s, e) for c, s, e in region_block]})
return out | [
"def",
"get_parallel_regions_block",
"(",
"batch",
")",
":",
"samples",
"=",
"[",
"utils",
".",
"to_single_data",
"(",
"d",
")",
"for",
"d",
"in",
"batch",
"]",
"regions",
"=",
"_get_parallel_regions",
"(",
"samples",
"[",
"0",
"]",
")",
"out",
"=",
"[",... | CWL target to retrieve block group of callable regions for parallelization.
Uses blocking to handle multicore runs. | [
"CWL",
"target",
"to",
"retrieve",
"block",
"group",
"of",
"callable",
"regions",
"for",
"parallelization",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L88-L100 |
237,773 | bcbio/bcbio-nextgen | bcbio/pipeline/region.py | _add_combine_info | def _add_combine_info(output, combine_map, file_key):
"""Do not actually combine, but add details for later combining work.
Each sample will contain information on the out file and additional files
to merge, enabling other splits and recombines without losing information.
"""
files_per_output = collections.defaultdict(list)
for part_file, out_file in combine_map.items():
files_per_output[out_file].append(part_file)
out_by_file = collections.defaultdict(list)
out = []
for data in output:
# Do not pass along nochrom, noanalysis regions
if data["region"][0] not in ["nochrom", "noanalysis"]:
cur_file = data[file_key]
# If we didn't process, no need to add combine information
if cur_file in combine_map:
out_file = combine_map[cur_file]
if "combine" not in data:
data["combine"] = {}
data["combine"][file_key] = {"out": out_file,
"extras": files_per_output.get(out_file, [])}
out_by_file[out_file].append(data)
elif cur_file:
out_by_file[cur_file].append(data)
else:
out.append([data])
for samples in out_by_file.values():
regions = [x["region"] for x in samples]
region_bams = [x["work_bam"] for x in samples]
assert len(regions) == len(region_bams)
if len(set(region_bams)) == 1:
region_bams = [region_bams[0]]
data = samples[0]
data["region_bams"] = region_bams
data["region"] = regions
data = dd.set_mark_duplicates(data, data["config"]["algorithm"]["orig_markduplicates"])
del data["config"]["algorithm"]["orig_markduplicates"]
out.append([data])
return out | python | def _add_combine_info(output, combine_map, file_key):
files_per_output = collections.defaultdict(list)
for part_file, out_file in combine_map.items():
files_per_output[out_file].append(part_file)
out_by_file = collections.defaultdict(list)
out = []
for data in output:
# Do not pass along nochrom, noanalysis regions
if data["region"][0] not in ["nochrom", "noanalysis"]:
cur_file = data[file_key]
# If we didn't process, no need to add combine information
if cur_file in combine_map:
out_file = combine_map[cur_file]
if "combine" not in data:
data["combine"] = {}
data["combine"][file_key] = {"out": out_file,
"extras": files_per_output.get(out_file, [])}
out_by_file[out_file].append(data)
elif cur_file:
out_by_file[cur_file].append(data)
else:
out.append([data])
for samples in out_by_file.values():
regions = [x["region"] for x in samples]
region_bams = [x["work_bam"] for x in samples]
assert len(regions) == len(region_bams)
if len(set(region_bams)) == 1:
region_bams = [region_bams[0]]
data = samples[0]
data["region_bams"] = region_bams
data["region"] = regions
data = dd.set_mark_duplicates(data, data["config"]["algorithm"]["orig_markduplicates"])
del data["config"]["algorithm"]["orig_markduplicates"]
out.append([data])
return out | [
"def",
"_add_combine_info",
"(",
"output",
",",
"combine_map",
",",
"file_key",
")",
":",
"files_per_output",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"part_file",
",",
"out_file",
"in",
"combine_map",
".",
"items",
"(",
")",
":",
"fi... | Do not actually combine, but add details for later combining work.
Each sample will contain information on the out file and additional files
to merge, enabling other splits and recombines without losing information. | [
"Do",
"not",
"actually",
"combine",
"but",
"add",
"details",
"for",
"later",
"combining",
"work",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L102-L141 |
237,774 | bcbio/bcbio-nextgen | bcbio/pipeline/region.py | parallel_prep_region | def parallel_prep_region(samples, run_parallel):
"""Perform full pre-variant calling BAM prep work on regions.
"""
file_key = "work_bam"
split_fn = _split_by_regions("bamprep", "-prep.bam", file_key)
# identify samples that do not need preparation -- no recalibration or realignment
extras = []
torun = []
for data in [x[0] for x in samples]:
if data.get("work_bam"):
data["align_bam"] = data["work_bam"]
if (not dd.get_realign(data) and not dd.get_variantcaller(data)):
extras.append([data])
elif not data.get(file_key):
extras.append([data])
else:
# Do not want to re-run duplicate marking after realignment
data["config"]["algorithm"]["orig_markduplicates"] = dd.get_mark_duplicates(data)
data = dd.set_mark_duplicates(data, False)
torun.append([data])
return extras + parallel_split_combine(torun, split_fn, run_parallel,
"piped_bamprep", _add_combine_info, file_key, ["config"]) | python | def parallel_prep_region(samples, run_parallel):
file_key = "work_bam"
split_fn = _split_by_regions("bamprep", "-prep.bam", file_key)
# identify samples that do not need preparation -- no recalibration or realignment
extras = []
torun = []
for data in [x[0] for x in samples]:
if data.get("work_bam"):
data["align_bam"] = data["work_bam"]
if (not dd.get_realign(data) and not dd.get_variantcaller(data)):
extras.append([data])
elif not data.get(file_key):
extras.append([data])
else:
# Do not want to re-run duplicate marking after realignment
data["config"]["algorithm"]["orig_markduplicates"] = dd.get_mark_duplicates(data)
data = dd.set_mark_duplicates(data, False)
torun.append([data])
return extras + parallel_split_combine(torun, split_fn, run_parallel,
"piped_bamprep", _add_combine_info, file_key, ["config"]) | [
"def",
"parallel_prep_region",
"(",
"samples",
",",
"run_parallel",
")",
":",
"file_key",
"=",
"\"work_bam\"",
"split_fn",
"=",
"_split_by_regions",
"(",
"\"bamprep\"",
",",
"\"-prep.bam\"",
",",
"file_key",
")",
"# identify samples that do not need preparation -- no recali... | Perform full pre-variant calling BAM prep work on regions. | [
"Perform",
"full",
"pre",
"-",
"variant",
"calling",
"BAM",
"prep",
"work",
"on",
"regions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L143-L164 |
237,775 | bcbio/bcbio-nextgen | bcbio/pipeline/region.py | delayed_bamprep_merge | def delayed_bamprep_merge(samples, run_parallel):
"""Perform a delayed merge on regional prepared BAM files.
"""
if any("combine" in data[0] for data in samples):
return run_parallel("delayed_bam_merge", samples)
else:
return samples | python | def delayed_bamprep_merge(samples, run_parallel):
if any("combine" in data[0] for data in samples):
return run_parallel("delayed_bam_merge", samples)
else:
return samples | [
"def",
"delayed_bamprep_merge",
"(",
"samples",
",",
"run_parallel",
")",
":",
"if",
"any",
"(",
"\"combine\"",
"in",
"data",
"[",
"0",
"]",
"for",
"data",
"in",
"samples",
")",
":",
"return",
"run_parallel",
"(",
"\"delayed_bam_merge\"",
",",
"samples",
")"... | Perform a delayed merge on regional prepared BAM files. | [
"Perform",
"a",
"delayed",
"merge",
"on",
"regional",
"prepared",
"BAM",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L166-L172 |
237,776 | bcbio/bcbio-nextgen | bcbio/pipeline/region.py | clean_sample_data | def clean_sample_data(samples):
"""Clean unnecessary information from sample data, reducing size for message passing.
"""
out = []
for data in (utils.to_single_data(x) for x in samples):
if "dirs" in data:
data["dirs"] = {"work": data["dirs"]["work"], "galaxy": data["dirs"]["galaxy"],
"fastq": data["dirs"].get("fastq")}
data["config"] = {"algorithm": data["config"]["algorithm"],
"resources": data["config"]["resources"]}
for remove_attr in ["config_file", "algorithm"]:
data.pop(remove_attr, None)
out.append([data])
return out | python | def clean_sample_data(samples):
out = []
for data in (utils.to_single_data(x) for x in samples):
if "dirs" in data:
data["dirs"] = {"work": data["dirs"]["work"], "galaxy": data["dirs"]["galaxy"],
"fastq": data["dirs"].get("fastq")}
data["config"] = {"algorithm": data["config"]["algorithm"],
"resources": data["config"]["resources"]}
for remove_attr in ["config_file", "algorithm"]:
data.pop(remove_attr, None)
out.append([data])
return out | [
"def",
"clean_sample_data",
"(",
"samples",
")",
":",
"out",
"=",
"[",
"]",
"for",
"data",
"in",
"(",
"utils",
".",
"to_single_data",
"(",
"x",
")",
"for",
"x",
"in",
"samples",
")",
":",
"if",
"\"dirs\"",
"in",
"data",
":",
"data",
"[",
"\"dirs\"",
... | Clean unnecessary information from sample data, reducing size for message passing. | [
"Clean",
"unnecessary",
"information",
"from",
"sample",
"data",
"reducing",
"size",
"for",
"message",
"passing",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L176-L189 |
237,777 | bcbio/bcbio-nextgen | bcbio/ngsalign/star.py | _add_sj_index_commands | def _add_sj_index_commands(fq1, ref_file, gtf_file):
"""
newer versions of STAR can generate splice junction databases on thephfly
this is preferable since we can tailor it to the read lengths
"""
if _has_sj_index(ref_file):
return ""
else:
rlength = fastq.estimate_maximum_read_length(fq1)
cmd = " --sjdbGTFfile %s " % gtf_file
cmd += " --sjdbOverhang %s " % str(rlength - 1)
return cmd | python | def _add_sj_index_commands(fq1, ref_file, gtf_file):
if _has_sj_index(ref_file):
return ""
else:
rlength = fastq.estimate_maximum_read_length(fq1)
cmd = " --sjdbGTFfile %s " % gtf_file
cmd += " --sjdbOverhang %s " % str(rlength - 1)
return cmd | [
"def",
"_add_sj_index_commands",
"(",
"fq1",
",",
"ref_file",
",",
"gtf_file",
")",
":",
"if",
"_has_sj_index",
"(",
"ref_file",
")",
":",
"return",
"\"\"",
"else",
":",
"rlength",
"=",
"fastq",
".",
"estimate_maximum_read_length",
"(",
"fq1",
")",
"cmd",
"=... | newer versions of STAR can generate splice junction databases on thephfly
this is preferable since we can tailor it to the read lengths | [
"newer",
"versions",
"of",
"STAR",
"can",
"generate",
"splice",
"junction",
"databases",
"on",
"thephfly",
"this",
"is",
"preferable",
"since",
"we",
"can",
"tailor",
"it",
"to",
"the",
"read",
"lengths"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/star.py#L119-L130 |
237,778 | bcbio/bcbio-nextgen | bcbio/ngsalign/star.py | _has_sj_index | def _has_sj_index(ref_file):
"""this file won't exist if we can do on the fly splice junction indexing"""
return (file_exists(os.path.join(ref_file, "sjdbInfo.txt")) and
(file_exists(os.path.join(ref_file, "transcriptInfo.tab")))) | python | def _has_sj_index(ref_file):
return (file_exists(os.path.join(ref_file, "sjdbInfo.txt")) and
(file_exists(os.path.join(ref_file, "transcriptInfo.tab")))) | [
"def",
"_has_sj_index",
"(",
"ref_file",
")",
":",
"return",
"(",
"file_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"ref_file",
",",
"\"sjdbInfo.txt\"",
")",
")",
"and",
"(",
"file_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"ref_file",
"... | this file won't exist if we can do on the fly splice junction indexing | [
"this",
"file",
"won",
"t",
"exist",
"if",
"we",
"can",
"do",
"on",
"the",
"fly",
"splice",
"junction",
"indexing"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/star.py#L132-L135 |
237,779 | bcbio/bcbio-nextgen | bcbio/ngsalign/star.py | remap_index_fn | def remap_index_fn(ref_file):
"""Map sequence references to equivalent star indexes
"""
return os.path.join(os.path.dirname(os.path.dirname(ref_file)), "star") | python | def remap_index_fn(ref_file):
return os.path.join(os.path.dirname(os.path.dirname(ref_file)), "star") | [
"def",
"remap_index_fn",
"(",
"ref_file",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"ref_file",
")",
")",
",",
"\"star\"",
")"
] | Map sequence references to equivalent star indexes | [
"Map",
"sequence",
"references",
"to",
"equivalent",
"star",
"indexes"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/star.py#L171-L174 |
237,780 | bcbio/bcbio-nextgen | bcbio/ngsalign/star.py | index | def index(ref_file, out_dir, data):
"""Create a STAR index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_gtf_file(data)
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a star index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
cmd = ("STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} "
"--runThreadN {num_cores} "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}")
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir | python | def index(ref_file, out_dir, data):
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_gtf_file(data)
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a star index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
cmd = ("STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} "
"--runThreadN {num_cores} "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}")
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir | [
"def",
"index",
"(",
"ref_file",
",",
"out_dir",
",",
"data",
")",
":",
"(",
"ref_dir",
",",
"local_file",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"ref_file",
")",
"gtf_file",
"=",
"dd",
".",
"get_gtf_file",
"(",
"data",
")",
"if",
"not",
"... | Create a STAR index in the defined reference directory. | [
"Create",
"a",
"STAR",
"index",
"in",
"the",
"defined",
"reference",
"directory",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/star.py#L176-L193 |
237,781 | bcbio/bcbio-nextgen | bcbio/ngsalign/star.py | get_splicejunction_file | def get_splicejunction_file(out_dir, data):
"""
locate the splicejunction file starting from the alignment directory
"""
samplename = dd.get_sample_name(data)
sjfile = os.path.join(out_dir, os.pardir, "{0}SJ.out.tab").format(samplename)
if file_exists(sjfile):
return sjfile
else:
return None | python | def get_splicejunction_file(out_dir, data):
samplename = dd.get_sample_name(data)
sjfile = os.path.join(out_dir, os.pardir, "{0}SJ.out.tab").format(samplename)
if file_exists(sjfile):
return sjfile
else:
return None | [
"def",
"get_splicejunction_file",
"(",
"out_dir",
",",
"data",
")",
":",
"samplename",
"=",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
"sjfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"os",
".",
"pardir",
",",
"\"{0}SJ.out.tab\"",
... | locate the splicejunction file starting from the alignment directory | [
"locate",
"the",
"splicejunction",
"file",
"starting",
"from",
"the",
"alignment",
"directory"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/star.py#L207-L216 |
237,782 | bcbio/bcbio-nextgen | bcbio/ngsalign/star.py | junction2bed | def junction2bed(junction_file):
"""
reformat the STAR junction file to BED3 format, one end of the splice junction per line
"""
base, _ = os.path.splitext(junction_file)
out_file = base + "-minimized.bed"
if file_exists(out_file):
return out_file
if not file_exists(junction_file):
return None
with file_transaction(out_file) as tx_out_file:
with open(junction_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
tokens = line.split()
chrom, sj1, sj2 = tokens[0:3]
if int(sj1) > int(sj2):
tmp = sj1
sj1 = sj2
sj2 = tmp
out_handle.write("\t".join([chrom, sj1, sj1]) + "\n")
out_handle.write("\t".join([chrom, sj2, sj2]) + "\n")
minimize = bed.minimize(tx_out_file)
minimize.saveas(tx_out_file)
return out_file | python | def junction2bed(junction_file):
base, _ = os.path.splitext(junction_file)
out_file = base + "-minimized.bed"
if file_exists(out_file):
return out_file
if not file_exists(junction_file):
return None
with file_transaction(out_file) as tx_out_file:
with open(junction_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
tokens = line.split()
chrom, sj1, sj2 = tokens[0:3]
if int(sj1) > int(sj2):
tmp = sj1
sj1 = sj2
sj2 = tmp
out_handle.write("\t".join([chrom, sj1, sj1]) + "\n")
out_handle.write("\t".join([chrom, sj2, sj2]) + "\n")
minimize = bed.minimize(tx_out_file)
minimize.saveas(tx_out_file)
return out_file | [
"def",
"junction2bed",
"(",
"junction_file",
")",
":",
"base",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"junction_file",
")",
"out_file",
"=",
"base",
"+",
"\"-minimized.bed\"",
"if",
"file_exists",
"(",
"out_file",
")",
":",
"return",
"out_... | reformat the STAR junction file to BED3 format, one end of the splice junction per line | [
"reformat",
"the",
"STAR",
"junction",
"file",
"to",
"BED3",
"format",
"one",
"end",
"of",
"the",
"splice",
"junction",
"per",
"line"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/star.py#L218-L242 |
237,783 | bcbio/bcbio-nextgen | bcbio/hla/optitype.py | run | def run(data):
"""HLA typing with OptiType, parsing output from called genotype files.
"""
hlas = []
for hla_fq in tz.get_in(["hla", "fastq"], data, []):
hla_type = re.search("[.-](?P<hlatype>HLA-[\w-]+).fq", hla_fq).group("hlatype")
if hla_type in SUPPORTED_HLAS:
if utils.file_exists(hla_fq):
hlas.append((hla_type, hla_fq))
if len(hlas) > 0:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data), "hla",
"OptiType-HLA-A_B_C"))
# When running UMIs and hla typing we want to pick the original fastqs
if len(hlas) > len(SUPPORTED_HLAS):
hlas = [x for x in hlas if os.path.basename(x[1]).find("-cumi") == -1]
if len(hlas) == len(SUPPORTED_HLAS):
hla_fq = combine_hla_fqs(hlas, out_dir + "-input.fq", data)
if utils.file_exists(hla_fq):
out_file = glob.glob(os.path.join(out_dir, "*", "*_result.tsv"))
if len(out_file) > 0:
out_file = out_file[0]
else:
out_file = _call_hla(hla_fq, out_dir, data)
out_file = _prepare_calls(out_file, os.path.dirname(out_dir), data)
data["hla"].update({"call_file": out_file,
"hlacaller": "optitype"})
return data | python | def run(data):
hlas = []
for hla_fq in tz.get_in(["hla", "fastq"], data, []):
hla_type = re.search("[.-](?P<hlatype>HLA-[\w-]+).fq", hla_fq).group("hlatype")
if hla_type in SUPPORTED_HLAS:
if utils.file_exists(hla_fq):
hlas.append((hla_type, hla_fq))
if len(hlas) > 0:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data), "hla",
"OptiType-HLA-A_B_C"))
# When running UMIs and hla typing we want to pick the original fastqs
if len(hlas) > len(SUPPORTED_HLAS):
hlas = [x for x in hlas if os.path.basename(x[1]).find("-cumi") == -1]
if len(hlas) == len(SUPPORTED_HLAS):
hla_fq = combine_hla_fqs(hlas, out_dir + "-input.fq", data)
if utils.file_exists(hla_fq):
out_file = glob.glob(os.path.join(out_dir, "*", "*_result.tsv"))
if len(out_file) > 0:
out_file = out_file[0]
else:
out_file = _call_hla(hla_fq, out_dir, data)
out_file = _prepare_calls(out_file, os.path.dirname(out_dir), data)
data["hla"].update({"call_file": out_file,
"hlacaller": "optitype"})
return data | [
"def",
"run",
"(",
"data",
")",
":",
"hlas",
"=",
"[",
"]",
"for",
"hla_fq",
"in",
"tz",
".",
"get_in",
"(",
"[",
"\"hla\"",
",",
"\"fastq\"",
"]",
",",
"data",
",",
"[",
"]",
")",
":",
"hla_type",
"=",
"re",
".",
"search",
"(",
"\"[.-](?P<hlatyp... | HLA typing with OptiType, parsing output from called genotype files. | [
"HLA",
"typing",
"with",
"OptiType",
"parsing",
"output",
"from",
"called",
"genotype",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/optitype.py#L23-L50 |
237,784 | bcbio/bcbio-nextgen | bcbio/hla/optitype.py | combine_hla_fqs | def combine_hla_fqs(hlas, out_file, data):
"""OptiType performs best on a combination of all extracted HLAs.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for hla_type, hla_fq in hlas:
if utils.file_exists(hla_fq):
with open(hla_fq) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
return out_file | python | def combine_hla_fqs(hlas, out_file, data):
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for hla_type, hla_fq in hlas:
if utils.file_exists(hla_fq):
with open(hla_fq) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
return out_file | [
"def",
"combine_hla_fqs",
"(",
"hlas",
",",
"out_file",
",",
"data",
")",
":",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"file_transaction",
"(",
"data",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
"with",
"open",
"... | OptiType performs best on a combination of all extracted HLAs. | [
"OptiType",
"performs",
"best",
"on",
"a",
"combination",
"of",
"all",
"extracted",
"HLAs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/optitype.py#L52-L62 |
237,785 | bcbio/bcbio-nextgen | bcbio/hla/optitype.py | _prepare_calls | def _prepare_calls(result_file, out_dir, data):
"""Write summary file of results of HLA typing by allele.
"""
sample = dd.get_sample_name(data)
out_file = os.path.join(out_dir, "%s-optitype.csv" % (sample))
if not utils.file_uptodate(out_file, result_file):
hla_truth = bwakit.get_hla_truthset(data)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
allele_info = _parse_result_file(result_file)
if len(allele_info) == 1:
writer.writerow(["sample", "locus", "alleles", "expected", "validates"])
else:
writer.writerow(["sample", "local", "index", "alleles", "score"])
for j, (alleles, score) in enumerate(allele_info):
for hla_locus, call_alleles in alleles:
truth_alleles = tz.get_in([sample, hla_locus], hla_truth, [])
if len(allele_info) == 1:
writer.writerow([sample, hla_locus,
";".join(call_alleles), ";".join(truth_alleles),
bwakit.matches_truth(call_alleles, truth_alleles, data)])
else:
writer.writerow([sample, hla_locus, j, ";".join(call_alleles), score])
return out_file | python | def _prepare_calls(result_file, out_dir, data):
sample = dd.get_sample_name(data)
out_file = os.path.join(out_dir, "%s-optitype.csv" % (sample))
if not utils.file_uptodate(out_file, result_file):
hla_truth = bwakit.get_hla_truthset(data)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
allele_info = _parse_result_file(result_file)
if len(allele_info) == 1:
writer.writerow(["sample", "locus", "alleles", "expected", "validates"])
else:
writer.writerow(["sample", "local", "index", "alleles", "score"])
for j, (alleles, score) in enumerate(allele_info):
for hla_locus, call_alleles in alleles:
truth_alleles = tz.get_in([sample, hla_locus], hla_truth, [])
if len(allele_info) == 1:
writer.writerow([sample, hla_locus,
";".join(call_alleles), ";".join(truth_alleles),
bwakit.matches_truth(call_alleles, truth_alleles, data)])
else:
writer.writerow([sample, hla_locus, j, ";".join(call_alleles), score])
return out_file | [
"def",
"_prepare_calls",
"(",
"result_file",
",",
"out_dir",
",",
"data",
")",
":",
"sample",
"=",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"\"%s-optitype.csv\"",
"%",
"(",
"sam... | Write summary file of results of HLA typing by allele. | [
"Write",
"summary",
"file",
"of",
"results",
"of",
"HLA",
"typing",
"by",
"allele",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/optitype.py#L64-L88 |
237,786 | bcbio/bcbio-nextgen | bcbio/hla/optitype.py | _call_hla | def _call_hla(hla_fq, out_dir, data):
"""Run OptiType HLA calling for a specific fastq input.
"""
bin_dir = os.path.dirname(os.path.realpath(sys.executable))
out_dir = utils.safe_makedir(out_dir)
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
config_file = os.path.join(tx_out_dir, "config.ini")
with open(config_file, "w") as out_handle:
razers3 = os.path.join(bin_dir, "razers3")
if not os.path.exists(razers3):
raise ValueError("Could not find razers3 executable at %s" % (razers3))
out_handle.write(CONFIG_TMPL.format(razers3=razers3, cores=dd.get_cores(data)))
resources = config_utils.get_resources("optitype", data["config"])
if resources.get("options"):
opts = " ".join([str(x) for x in resources["options"]])
else:
opts = ""
cmd = ("OptiTypePipeline.py -v --dna {opts} -o {tx_out_dir} "
"-i {hla_fq} -c {config_file}")
do.run(cmd.format(**locals()), "HLA typing with OptiType")
for outf in os.listdir(tx_out_dir):
shutil.move(os.path.join(tx_out_dir, outf), os.path.join(out_dir, outf))
out_file = glob.glob(os.path.join(out_dir, "*", "*_result.tsv"))
assert len(out_file) == 1, "Expected one result file for OptiType, found %s" % out_file
return out_file[0] | python | def _call_hla(hla_fq, out_dir, data):
bin_dir = os.path.dirname(os.path.realpath(sys.executable))
out_dir = utils.safe_makedir(out_dir)
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
config_file = os.path.join(tx_out_dir, "config.ini")
with open(config_file, "w") as out_handle:
razers3 = os.path.join(bin_dir, "razers3")
if not os.path.exists(razers3):
raise ValueError("Could not find razers3 executable at %s" % (razers3))
out_handle.write(CONFIG_TMPL.format(razers3=razers3, cores=dd.get_cores(data)))
resources = config_utils.get_resources("optitype", data["config"])
if resources.get("options"):
opts = " ".join([str(x) for x in resources["options"]])
else:
opts = ""
cmd = ("OptiTypePipeline.py -v --dna {opts} -o {tx_out_dir} "
"-i {hla_fq} -c {config_file}")
do.run(cmd.format(**locals()), "HLA typing with OptiType")
for outf in os.listdir(tx_out_dir):
shutil.move(os.path.join(tx_out_dir, outf), os.path.join(out_dir, outf))
out_file = glob.glob(os.path.join(out_dir, "*", "*_result.tsv"))
assert len(out_file) == 1, "Expected one result file for OptiType, found %s" % out_file
return out_file[0] | [
"def",
"_call_hla",
"(",
"hla_fq",
",",
"out_dir",
",",
"data",
")",
":",
"bin_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"sys",
".",
"executable",
")",
")",
"out_dir",
"=",
"utils",
".",
"safe_makedir... | Run OptiType HLA calling for a specific fastq input. | [
"Run",
"OptiType",
"HLA",
"calling",
"for",
"a",
"specific",
"fastq",
"input",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/optitype.py#L108-L132 |
237,787 | bcbio/bcbio-nextgen | bcbio/heterogeneity/chromhacks.py | is_autosomal | def is_autosomal(chrom):
"""Keep chromosomes that are a digit 1-22, or chr prefixed digit chr1-chr22
"""
try:
int(chrom)
return True
except ValueError:
try:
int(str(chrom.lower().replace("chr", "").replace("_", "").replace("-", "")))
return True
except ValueError:
return False | python | def is_autosomal(chrom):
try:
int(chrom)
return True
except ValueError:
try:
int(str(chrom.lower().replace("chr", "").replace("_", "").replace("-", "")))
return True
except ValueError:
return False | [
"def",
"is_autosomal",
"(",
"chrom",
")",
":",
"try",
":",
"int",
"(",
"chrom",
")",
"return",
"True",
"except",
"ValueError",
":",
"try",
":",
"int",
"(",
"str",
"(",
"chrom",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"\"chr\"",
",",
"\"\"",
")... | Keep chromosomes that are a digit 1-22, or chr prefixed digit chr1-chr22 | [
"Keep",
"chromosomes",
"that",
"are",
"a",
"digit",
"1",
"-",
"22",
"or",
"chr",
"prefixed",
"digit",
"chr1",
"-",
"chr22"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/chromhacks.py#L11-L22 |
237,788 | bcbio/bcbio-nextgen | bcbio/qc/variant.py | _bcftools_stats | def _bcftools_stats(data, out_dir, vcf_file_key=None, germline=False):
"""Run bcftools stats.
"""
vcinfo = get_active_vcinfo(data)
if vcinfo:
out_dir = utils.safe_makedir(out_dir)
vcf_file = vcinfo[vcf_file_key or "vrn_file"]
if dd.get_jointcaller(data) or "gvcf" in dd.get_tools_on(data):
opts = ""
else:
opts = "-f PASS,."
name = dd.get_sample_name(data)
out_file = os.path.join(out_dir, "%s_bcftools_stats%s.txt" % (name, ("_germline" if germline else "")))
bcftools = config_utils.get_program("bcftools", data["config"])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
orig_out_file = os.path.join(os.path.dirname(tx_out_file), "orig_%s" % os.path.basename(tx_out_file))
cmd = ("{bcftools} stats -s {name} {opts} {vcf_file} > {orig_out_file}")
do.run(cmd.format(**locals()), "bcftools stats %s" % name)
with open(orig_out_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("ID\t"):
parts = line.split("\t")
parts[-1] = "%s\n" % name
line = "\t".join(parts)
out_handle.write(line)
return out_file | python | def _bcftools_stats(data, out_dir, vcf_file_key=None, germline=False):
vcinfo = get_active_vcinfo(data)
if vcinfo:
out_dir = utils.safe_makedir(out_dir)
vcf_file = vcinfo[vcf_file_key or "vrn_file"]
if dd.get_jointcaller(data) or "gvcf" in dd.get_tools_on(data):
opts = ""
else:
opts = "-f PASS,."
name = dd.get_sample_name(data)
out_file = os.path.join(out_dir, "%s_bcftools_stats%s.txt" % (name, ("_germline" if germline else "")))
bcftools = config_utils.get_program("bcftools", data["config"])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
orig_out_file = os.path.join(os.path.dirname(tx_out_file), "orig_%s" % os.path.basename(tx_out_file))
cmd = ("{bcftools} stats -s {name} {opts} {vcf_file} > {orig_out_file}")
do.run(cmd.format(**locals()), "bcftools stats %s" % name)
with open(orig_out_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("ID\t"):
parts = line.split("\t")
parts[-1] = "%s\n" % name
line = "\t".join(parts)
out_handle.write(line)
return out_file | [
"def",
"_bcftools_stats",
"(",
"data",
",",
"out_dir",
",",
"vcf_file_key",
"=",
"None",
",",
"germline",
"=",
"False",
")",
":",
"vcinfo",
"=",
"get_active_vcinfo",
"(",
"data",
")",
"if",
"vcinfo",
":",
"out_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
... | Run bcftools stats. | [
"Run",
"bcftools",
"stats",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/variant.py#L50-L77 |
237,789 | bcbio/bcbio-nextgen | bcbio/qc/variant.py | _add_filename_details | def _add_filename_details(full_f):
"""Add variant callers and germline information standard CWL filenames.
This is an ugly way of working around not having metadata with calls.
"""
out = {"vrn_file": full_f}
f = os.path.basename(full_f)
for vc in list(genotype.get_variantcallers().keys()) + ["ensemble"]:
if f.find("-%s.vcf" % vc) > 0:
out["variantcaller"] = vc
if f.find("-germline-") >= 0:
out["germline"] = full_f
return out | python | def _add_filename_details(full_f):
out = {"vrn_file": full_f}
f = os.path.basename(full_f)
for vc in list(genotype.get_variantcallers().keys()) + ["ensemble"]:
if f.find("-%s.vcf" % vc) > 0:
out["variantcaller"] = vc
if f.find("-germline-") >= 0:
out["germline"] = full_f
return out | [
"def",
"_add_filename_details",
"(",
"full_f",
")",
":",
"out",
"=",
"{",
"\"vrn_file\"",
":",
"full_f",
"}",
"f",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"full_f",
")",
"for",
"vc",
"in",
"list",
"(",
"genotype",
".",
"get_variantcallers",
"(",
... | Add variant callers and germline information standard CWL filenames.
This is an ugly way of working around not having metadata with calls. | [
"Add",
"variant",
"callers",
"and",
"germline",
"information",
"standard",
"CWL",
"filenames",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/variant.py#L79-L91 |
237,790 | bcbio/bcbio-nextgen | bcbio/qc/variant.py | _get_variants | def _get_variants(data):
"""Retrieve variants from CWL and standard inputs for organizing variants.
"""
active_vs = []
if "variants" in data:
variants = data["variants"]
# CWL based list of variants
if isinstance(variants, dict) and "samples" in variants:
variants = variants["samples"]
for v in variants:
# CWL -- a single variant file
if isinstance(v, six.string_types) and os.path.exists(v):
active_vs.append(_add_filename_details(v))
elif (isinstance(v, (list, tuple)) and len(v) > 0 and
isinstance(v[0], six.string_types) and os.path.exists(v[0])):
for subv in v:
active_vs.append(_add_filename_details(subv))
elif isinstance(v, dict):
if v.get("vrn_file"):
active_vs.append(v)
elif v.get("population"):
vrnfile = v.get("population").get("vcf")
active_vs.append(_add_filename_details(vrnfile))
elif v.get("vcf"):
active_vs.append(_add_filename_details(v.get("vcf")))
return active_vs | python | def _get_variants(data):
active_vs = []
if "variants" in data:
variants = data["variants"]
# CWL based list of variants
if isinstance(variants, dict) and "samples" in variants:
variants = variants["samples"]
for v in variants:
# CWL -- a single variant file
if isinstance(v, six.string_types) and os.path.exists(v):
active_vs.append(_add_filename_details(v))
elif (isinstance(v, (list, tuple)) and len(v) > 0 and
isinstance(v[0], six.string_types) and os.path.exists(v[0])):
for subv in v:
active_vs.append(_add_filename_details(subv))
elif isinstance(v, dict):
if v.get("vrn_file"):
active_vs.append(v)
elif v.get("population"):
vrnfile = v.get("population").get("vcf")
active_vs.append(_add_filename_details(vrnfile))
elif v.get("vcf"):
active_vs.append(_add_filename_details(v.get("vcf")))
return active_vs | [
"def",
"_get_variants",
"(",
"data",
")",
":",
"active_vs",
"=",
"[",
"]",
"if",
"\"variants\"",
"in",
"data",
":",
"variants",
"=",
"data",
"[",
"\"variants\"",
"]",
"# CWL based list of variants",
"if",
"isinstance",
"(",
"variants",
",",
"dict",
")",
"and... | Retrieve variants from CWL and standard inputs for organizing variants. | [
"Retrieve",
"variants",
"from",
"CWL",
"and",
"standard",
"inputs",
"for",
"organizing",
"variants",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/variant.py#L93-L118 |
237,791 | bcbio/bcbio-nextgen | bcbio/qc/variant.py | get_active_vcinfo | def get_active_vcinfo(data, use_ensemble=True):
"""Use first caller if ensemble is not active
"""
active_vs = _get_variants(data)
if len(active_vs) > 0:
e_active_vs = []
if use_ensemble:
e_active_vs = [v for v in active_vs if v.get("variantcaller") == "ensemble"]
if len(e_active_vs) == 0:
e_active_vs = [v for v in active_vs if v.get("variantcaller") != "ensemble"]
if len(e_active_vs) > 0:
return e_active_vs[0] | python | def get_active_vcinfo(data, use_ensemble=True):
active_vs = _get_variants(data)
if len(active_vs) > 0:
e_active_vs = []
if use_ensemble:
e_active_vs = [v for v in active_vs if v.get("variantcaller") == "ensemble"]
if len(e_active_vs) == 0:
e_active_vs = [v for v in active_vs if v.get("variantcaller") != "ensemble"]
if len(e_active_vs) > 0:
return e_active_vs[0] | [
"def",
"get_active_vcinfo",
"(",
"data",
",",
"use_ensemble",
"=",
"True",
")",
":",
"active_vs",
"=",
"_get_variants",
"(",
"data",
")",
"if",
"len",
"(",
"active_vs",
")",
">",
"0",
":",
"e_active_vs",
"=",
"[",
"]",
"if",
"use_ensemble",
":",
"e_activ... | Use first caller if ensemble is not active | [
"Use",
"first",
"caller",
"if",
"ensemble",
"is",
"not",
"active"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/variant.py#L120-L131 |
237,792 | bcbio/bcbio-nextgen | bcbio/qc/variant.py | extract_germline_vcinfo | def extract_germline_vcinfo(data, out_dir):
"""Extract germline VCFs from existing tumor inputs.
"""
supported_germline = set(["vardict", "octopus", "freebayes"])
if dd.get_phenotype(data) in ["tumor"]:
for v in _get_variants(data):
if v.get("variantcaller") in supported_germline:
if v.get("germline"):
return v
else:
d = utils.deepish_copy(data)
d["vrn_file"] = v["vrn_file"]
gd = germline.extract(d, [d], out_dir)
v["germline"] = gd["vrn_file_plus"]["germline"]
return v | python | def extract_germline_vcinfo(data, out_dir):
supported_germline = set(["vardict", "octopus", "freebayes"])
if dd.get_phenotype(data) in ["tumor"]:
for v in _get_variants(data):
if v.get("variantcaller") in supported_germline:
if v.get("germline"):
return v
else:
d = utils.deepish_copy(data)
d["vrn_file"] = v["vrn_file"]
gd = germline.extract(d, [d], out_dir)
v["germline"] = gd["vrn_file_plus"]["germline"]
return v | [
"def",
"extract_germline_vcinfo",
"(",
"data",
",",
"out_dir",
")",
":",
"supported_germline",
"=",
"set",
"(",
"[",
"\"vardict\"",
",",
"\"octopus\"",
",",
"\"freebayes\"",
"]",
")",
"if",
"dd",
".",
"get_phenotype",
"(",
"data",
")",
"in",
"[",
"\"tumor\""... | Extract germline VCFs from existing tumor inputs. | [
"Extract",
"germline",
"VCFs",
"from",
"existing",
"tumor",
"inputs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/variant.py#L133-L147 |
237,793 | bcbio/bcbio-nextgen | bcbio/pipeline/merge.py | merge_bam_files | def merge_bam_files(bam_files, work_dir, data, out_file=None, batch=None):
"""Merge multiple BAM files from a sample into a single BAM for processing.
Checks system open file limit and merges in batches if necessary to avoid
file handle limits.
"""
out_file = _merge_outfile_fname(out_file, bam_files, work_dir, batch)
if not utils.file_exists(out_file):
if len(bam_files) == 1 and bam.bam_already_sorted(bam_files[0], data["config"], "coordinate"):
with file_transaction(data, out_file) as tx_out_file:
_create_merge_filelist(bam_files, tx_out_file, data["config"])
out_file = bam_files[0]
samtools = config_utils.get_program("samtools", data["config"])
do.run('{} quickcheck -v {}'.format(samtools, out_file),
"Check for valid merged BAM after transfer")
else:
with tx_tmpdir(data) as tmpdir:
with utils.chdir(tmpdir):
with file_transaction(data, out_file) as tx_out_file:
tx_bam_file_list = _create_merge_filelist(bam_files, tx_out_file, data["config"])
samtools = config_utils.get_program("samtools", data["config"])
resources = config_utils.get_resources("samtools", data["config"])
num_cores = dd.get_num_cores(data)
# Aim for 3.5Gb/core memory for BAM merging
num_cores = config_utils.adjust_cores_to_mb_target(
3500, resources.get("memory", "2G"), num_cores)
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
2, "decrease").upper()
if dd.get_mark_duplicates(data):
cmd = _biobambam_merge_dedup_maxcov(data)
else:
cmd = _biobambam_merge_maxcov(data)
do.run(cmd.format(**locals()), "Merge bam files to %s" % os.path.basename(out_file),
None)
do.run('{} quickcheck -v {}'.format(samtools, tx_out_file),
"Check for valid merged BAM")
do.run('{} quickcheck -v {}'.format(samtools, out_file),
"Check for valid merged BAM after transfer")
_finalize_merge(out_file, bam_files, data["config"])
bam.index(out_file, data["config"])
return out_file | python | def merge_bam_files(bam_files, work_dir, data, out_file=None, batch=None):
out_file = _merge_outfile_fname(out_file, bam_files, work_dir, batch)
if not utils.file_exists(out_file):
if len(bam_files) == 1 and bam.bam_already_sorted(bam_files[0], data["config"], "coordinate"):
with file_transaction(data, out_file) as tx_out_file:
_create_merge_filelist(bam_files, tx_out_file, data["config"])
out_file = bam_files[0]
samtools = config_utils.get_program("samtools", data["config"])
do.run('{} quickcheck -v {}'.format(samtools, out_file),
"Check for valid merged BAM after transfer")
else:
with tx_tmpdir(data) as tmpdir:
with utils.chdir(tmpdir):
with file_transaction(data, out_file) as tx_out_file:
tx_bam_file_list = _create_merge_filelist(bam_files, tx_out_file, data["config"])
samtools = config_utils.get_program("samtools", data["config"])
resources = config_utils.get_resources("samtools", data["config"])
num_cores = dd.get_num_cores(data)
# Aim for 3.5Gb/core memory for BAM merging
num_cores = config_utils.adjust_cores_to_mb_target(
3500, resources.get("memory", "2G"), num_cores)
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
2, "decrease").upper()
if dd.get_mark_duplicates(data):
cmd = _biobambam_merge_dedup_maxcov(data)
else:
cmd = _biobambam_merge_maxcov(data)
do.run(cmd.format(**locals()), "Merge bam files to %s" % os.path.basename(out_file),
None)
do.run('{} quickcheck -v {}'.format(samtools, tx_out_file),
"Check for valid merged BAM")
do.run('{} quickcheck -v {}'.format(samtools, out_file),
"Check for valid merged BAM after transfer")
_finalize_merge(out_file, bam_files, data["config"])
bam.index(out_file, data["config"])
return out_file | [
"def",
"merge_bam_files",
"(",
"bam_files",
",",
"work_dir",
",",
"data",
",",
"out_file",
"=",
"None",
",",
"batch",
"=",
"None",
")",
":",
"out_file",
"=",
"_merge_outfile_fname",
"(",
"out_file",
",",
"bam_files",
",",
"work_dir",
",",
"batch",
")",
"if... | Merge multiple BAM files from a sample into a single BAM for processing.
Checks system open file limit and merges in batches if necessary to avoid
file handle limits. | [
"Merge",
"multiple",
"BAM",
"files",
"from",
"a",
"sample",
"into",
"a",
"single",
"BAM",
"for",
"processing",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/merge.py#L39-L79 |
237,794 | bcbio/bcbio-nextgen | bcbio/pipeline/merge.py | _create_merge_filelist | def _create_merge_filelist(bam_files, base_file, config):
"""Create list of input files for merge, ensuring all files are valid.
"""
bam_file_list = "%s.list" % os.path.splitext(base_file)[0]
samtools = config_utils.get_program("samtools", config)
with open(bam_file_list, "w") as out_handle:
for f in sorted(bam_files):
do.run('{} quickcheck -v {}'.format(samtools, f),
"Ensure integrity of input merge BAM files")
out_handle.write("%s\n" % f)
return bam_file_list | python | def _create_merge_filelist(bam_files, base_file, config):
bam_file_list = "%s.list" % os.path.splitext(base_file)[0]
samtools = config_utils.get_program("samtools", config)
with open(bam_file_list, "w") as out_handle:
for f in sorted(bam_files):
do.run('{} quickcheck -v {}'.format(samtools, f),
"Ensure integrity of input merge BAM files")
out_handle.write("%s\n" % f)
return bam_file_list | [
"def",
"_create_merge_filelist",
"(",
"bam_files",
",",
"base_file",
",",
"config",
")",
":",
"bam_file_list",
"=",
"\"%s.list\"",
"%",
"os",
".",
"path",
".",
"splitext",
"(",
"base_file",
")",
"[",
"0",
"]",
"samtools",
"=",
"config_utils",
".",
"get_progr... | Create list of input files for merge, ensuring all files are valid. | [
"Create",
"list",
"of",
"input",
"files",
"for",
"merge",
"ensuring",
"all",
"files",
"are",
"valid",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/merge.py#L81-L91 |
237,795 | bcbio/bcbio-nextgen | bcbio/pipeline/merge.py | _merge_outfile_fname | def _merge_outfile_fname(out_file, bam_files, work_dir, batch):
"""Derive correct name of BAM file based on batching.
"""
if out_file is None:
out_file = os.path.join(work_dir, os.path.basename(sorted(bam_files)[0]))
if batch is not None:
base, ext = os.path.splitext(out_file)
out_file = "%s-b%s%s" % (base, batch, ext)
return out_file | python | def _merge_outfile_fname(out_file, bam_files, work_dir, batch):
if out_file is None:
out_file = os.path.join(work_dir, os.path.basename(sorted(bam_files)[0]))
if batch is not None:
base, ext = os.path.splitext(out_file)
out_file = "%s-b%s%s" % (base, batch, ext)
return out_file | [
"def",
"_merge_outfile_fname",
"(",
"out_file",
",",
"bam_files",
",",
"work_dir",
",",
"batch",
")",
":",
"if",
"out_file",
"is",
"None",
":",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"os",
".",
"path",
".",
"basename",
"... | Derive correct name of BAM file based on batching. | [
"Derive",
"correct",
"name",
"of",
"BAM",
"file",
"based",
"on",
"batching",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/merge.py#L93-L101 |
237,796 | bcbio/bcbio-nextgen | bcbio/pipeline/merge.py | _finalize_merge | def _finalize_merge(out_file, bam_files, config):
"""Handle indexes and cleanups of merged BAM and input files.
"""
# Ensure timestamps are up to date on output file and index
# Works around issues on systems with inconsistent times
for ext in ["", ".bai"]:
if os.path.exists(out_file + ext):
subprocess.check_call(["touch", out_file + ext])
for b in bam_files:
utils.save_diskspace(b, "BAM merged to %s" % out_file, config) | python | def _finalize_merge(out_file, bam_files, config):
# Ensure timestamps are up to date on output file and index
# Works around issues on systems with inconsistent times
for ext in ["", ".bai"]:
if os.path.exists(out_file + ext):
subprocess.check_call(["touch", out_file + ext])
for b in bam_files:
utils.save_diskspace(b, "BAM merged to %s" % out_file, config) | [
"def",
"_finalize_merge",
"(",
"out_file",
",",
"bam_files",
",",
"config",
")",
":",
"# Ensure timestamps are up to date on output file and index",
"# Works around issues on systems with inconsistent times",
"for",
"ext",
"in",
"[",
"\"\"",
",",
"\".bai\"",
"]",
":",
"if",... | Handle indexes and cleanups of merged BAM and input files. | [
"Handle",
"indexes",
"and",
"cleanups",
"of",
"merged",
"BAM",
"and",
"input",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/merge.py#L103-L112 |
237,797 | bcbio/bcbio-nextgen | bcbio/cwl/create.py | _cwl_workflow_template | def _cwl_workflow_template(inputs, top_level=False):
"""Retrieve CWL inputs shared amongst different workflows.
"""
ready_inputs = []
for inp in inputs:
cur_inp = copy.deepcopy(inp)
for attr in ["source", "valueFrom", "wf_duplicate"]:
cur_inp.pop(attr, None)
if top_level:
cur_inp = workflow._flatten_nested_input(cur_inp)
cur_inp = _clean_record(cur_inp)
ready_inputs.append(cur_inp)
return {"class": "Workflow",
"cwlVersion": "v1.0",
"hints": [],
"requirements": [{"class": "EnvVarRequirement",
"envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]},
{"class": "ScatterFeatureRequirement"},
{"class": "SubworkflowFeatureRequirement"}],
"inputs": ready_inputs,
"outputs": [],
"steps": []} | python | def _cwl_workflow_template(inputs, top_level=False):
ready_inputs = []
for inp in inputs:
cur_inp = copy.deepcopy(inp)
for attr in ["source", "valueFrom", "wf_duplicate"]:
cur_inp.pop(attr, None)
if top_level:
cur_inp = workflow._flatten_nested_input(cur_inp)
cur_inp = _clean_record(cur_inp)
ready_inputs.append(cur_inp)
return {"class": "Workflow",
"cwlVersion": "v1.0",
"hints": [],
"requirements": [{"class": "EnvVarRequirement",
"envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]},
{"class": "ScatterFeatureRequirement"},
{"class": "SubworkflowFeatureRequirement"}],
"inputs": ready_inputs,
"outputs": [],
"steps": []} | [
"def",
"_cwl_workflow_template",
"(",
"inputs",
",",
"top_level",
"=",
"False",
")",
":",
"ready_inputs",
"=",
"[",
"]",
"for",
"inp",
"in",
"inputs",
":",
"cur_inp",
"=",
"copy",
".",
"deepcopy",
"(",
"inp",
")",
"for",
"attr",
"in",
"[",
"\"source\"",
... | Retrieve CWL inputs shared amongst different workflows. | [
"Retrieve",
"CWL",
"inputs",
"shared",
"amongst",
"different",
"workflows",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L42-L63 |
237,798 | bcbio/bcbio-nextgen | bcbio/cwl/create.py | _get_disk_estimates | def _get_disk_estimates(name, parallel, inputs, file_estimates, samples, disk,
cur_remotes, no_files):
"""Retrieve disk usage estimates as CWL ResourceRequirement and hint.
Disk specification for temporary files and outputs.
Also optionally includes disk input estimates as a custom hint for
platforms which need to stage these and don't pre-estimate these when
allocating machine sizes.
"""
tmp_disk, out_disk, in_disk = 0, 0, 0
if file_estimates:
if disk:
for key, multiplier in disk.items():
if key in file_estimates:
out_disk += int(multiplier * file_estimates[key])
for inp in inputs:
scale = 2.0 if inp.get("type") == "array" else 1.0
# Allocating all samples, could remove for `to_rec` when we ensure we
# don't have to stage. Currently dnanexus stages everything so need to consider
if parallel in ["multi-combined", "multi-batch"] and "dnanexus" in cur_remotes:
scale *= (len(samples))
if workflow.is_cwl_record(inp):
for f in _get_record_fields(inp):
if f["name"] in file_estimates:
in_disk += file_estimates[f["name"]] * scale
elif inp["id"] in file_estimates:
in_disk += file_estimates[inp["id"]] * scale
# Round total estimates to integer, assign extra half to temp space
# It's not entirely clear how different runners interpret this
tmp_disk = int(math.ceil(out_disk * 0.5))
out_disk = int(math.ceil(out_disk))
bcbio_docker_disk = (10 if cur_remotes else 1) * 1024 # Minimum requirements for bcbio Docker image
disk_hint = {"outdirMin": bcbio_docker_disk + out_disk, "tmpdirMin": tmp_disk}
# Skip input disk for steps which require only transformation (and thus no staging)
if no_files:
in_disk = 0
# Avoid accidentally flagging as no staging if we don't know sizes of expected inputs
elif in_disk == 0:
in_disk = 1
input_hint = {"class": "dx:InputResourceRequirement", "indirMin": int(math.ceil(in_disk))}
return disk_hint, input_hint | python | def _get_disk_estimates(name, parallel, inputs, file_estimates, samples, disk,
cur_remotes, no_files):
tmp_disk, out_disk, in_disk = 0, 0, 0
if file_estimates:
if disk:
for key, multiplier in disk.items():
if key in file_estimates:
out_disk += int(multiplier * file_estimates[key])
for inp in inputs:
scale = 2.0 if inp.get("type") == "array" else 1.0
# Allocating all samples, could remove for `to_rec` when we ensure we
# don't have to stage. Currently dnanexus stages everything so need to consider
if parallel in ["multi-combined", "multi-batch"] and "dnanexus" in cur_remotes:
scale *= (len(samples))
if workflow.is_cwl_record(inp):
for f in _get_record_fields(inp):
if f["name"] in file_estimates:
in_disk += file_estimates[f["name"]] * scale
elif inp["id"] in file_estimates:
in_disk += file_estimates[inp["id"]] * scale
# Round total estimates to integer, assign extra half to temp space
# It's not entirely clear how different runners interpret this
tmp_disk = int(math.ceil(out_disk * 0.5))
out_disk = int(math.ceil(out_disk))
bcbio_docker_disk = (10 if cur_remotes else 1) * 1024 # Minimum requirements for bcbio Docker image
disk_hint = {"outdirMin": bcbio_docker_disk + out_disk, "tmpdirMin": tmp_disk}
# Skip input disk for steps which require only transformation (and thus no staging)
if no_files:
in_disk = 0
# Avoid accidentally flagging as no staging if we don't know sizes of expected inputs
elif in_disk == 0:
in_disk = 1
input_hint = {"class": "dx:InputResourceRequirement", "indirMin": int(math.ceil(in_disk))}
return disk_hint, input_hint | [
"def",
"_get_disk_estimates",
"(",
"name",
",",
"parallel",
",",
"inputs",
",",
"file_estimates",
",",
"samples",
",",
"disk",
",",
"cur_remotes",
",",
"no_files",
")",
":",
"tmp_disk",
",",
"out_disk",
",",
"in_disk",
"=",
"0",
",",
"0",
",",
"0",
"if",... | Retrieve disk usage estimates as CWL ResourceRequirement and hint.
Disk specification for temporary files and outputs.
Also optionally includes disk input estimates as a custom hint for
platforms which need to stage these and don't pre-estimate these when
allocating machine sizes. | [
"Retrieve",
"disk",
"usage",
"estimates",
"as",
"CWL",
"ResourceRequirement",
"and",
"hint",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L65-L107 |
237,799 | bcbio/bcbio-nextgen | bcbio/cwl/create.py | _add_current_quay_tag | def _add_current_quay_tag(repo, container_tags):
"""Lookup the current quay tag for the repository, adding to repo string.
Enables generation of CWL explicitly tied to revisions.
"""
if ':' in repo:
return repo, container_tags
try:
latest_tag = container_tags[repo]
except KeyError:
repo_id = repo[repo.find('/') + 1:]
tags = requests.request("GET", "https://quay.io/api/v1/repository/" + repo_id).json()["tags"]
latest_tag = None
latest_modified = None
for tag, info in tags.items():
if latest_tag:
if (dateutil.parser.parse(info['last_modified']) > dateutil.parser.parse(latest_modified)
and tag != 'latest'):
latest_modified = info['last_modified']
latest_tag = tag
else:
latest_modified = info['last_modified']
latest_tag = tag
container_tags[repo] = str(latest_tag)
latest_pull = repo + ':' + str(latest_tag)
return latest_pull, container_tags | python | def _add_current_quay_tag(repo, container_tags):
if ':' in repo:
return repo, container_tags
try:
latest_tag = container_tags[repo]
except KeyError:
repo_id = repo[repo.find('/') + 1:]
tags = requests.request("GET", "https://quay.io/api/v1/repository/" + repo_id).json()["tags"]
latest_tag = None
latest_modified = None
for tag, info in tags.items():
if latest_tag:
if (dateutil.parser.parse(info['last_modified']) > dateutil.parser.parse(latest_modified)
and tag != 'latest'):
latest_modified = info['last_modified']
latest_tag = tag
else:
latest_modified = info['last_modified']
latest_tag = tag
container_tags[repo] = str(latest_tag)
latest_pull = repo + ':' + str(latest_tag)
return latest_pull, container_tags | [
"def",
"_add_current_quay_tag",
"(",
"repo",
",",
"container_tags",
")",
":",
"if",
"':'",
"in",
"repo",
":",
"return",
"repo",
",",
"container_tags",
"try",
":",
"latest_tag",
"=",
"container_tags",
"[",
"repo",
"]",
"except",
"KeyError",
":",
"repo_id",
"=... | Lookup the current quay tag for the repository, adding to repo string.
Enables generation of CWL explicitly tied to revisions. | [
"Lookup",
"the",
"current",
"quay",
"tag",
"for",
"the",
"repository",
"adding",
"to",
"repo",
"string",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L109-L134 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.