repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
bcbio/bcbio-nextgen | bcbio/pipeline/genome.py | get_resources | def get_resources(genome, ref_file, data):
"""Retrieve genome information from a genome-references.yaml file.
"""
base_dir = os.path.normpath(os.path.dirname(ref_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % genome.replace("-test", ""))
if not os.path.exists(resource_file):
raise IOError("Did not find resource file for %s: %s\n"
"To update bcbio_nextgen.py with genome resources for standard builds, run:\n"
"bcbio_nextgen.py upgrade -u skip"
% (genome, resource_file))
with open(resource_file) as in_handle:
resources = yaml.safe_load(in_handle)
def resource_file_path(x):
if isinstance(x, six.string_types) and os.path.exists(os.path.join(base_dir, x)):
return os.path.normpath(os.path.join(base_dir, x))
return x
cleaned = utils.dictapply(resources, resource_file_path)
return ensure_annotations(cleaned, data) | python | def get_resources(genome, ref_file, data):
"""Retrieve genome information from a genome-references.yaml file.
"""
base_dir = os.path.normpath(os.path.dirname(ref_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % genome.replace("-test", ""))
if not os.path.exists(resource_file):
raise IOError("Did not find resource file for %s: %s\n"
"To update bcbio_nextgen.py with genome resources for standard builds, run:\n"
"bcbio_nextgen.py upgrade -u skip"
% (genome, resource_file))
with open(resource_file) as in_handle:
resources = yaml.safe_load(in_handle)
def resource_file_path(x):
if isinstance(x, six.string_types) and os.path.exists(os.path.join(base_dir, x)):
return os.path.normpath(os.path.join(base_dir, x))
return x
cleaned = utils.dictapply(resources, resource_file_path)
return ensure_annotations(cleaned, data) | [
"def",
"get_resources",
"(",
"genome",
",",
"ref_file",
",",
"data",
")",
":",
"base_dir",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"ref_file",
")",
")",
"resource_file",
"=",
"os",
".",
"path",
".",
"join... | Retrieve genome information from a genome-references.yaml file. | [
"Retrieve",
"genome",
"information",
"from",
"a",
"genome",
"-",
"references",
".",
"yaml",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/genome.py#L24-L42 | train | 218,500 |
bcbio/bcbio-nextgen | bcbio/pipeline/genome.py | add_required_resources | def add_required_resources(resources):
"""Add default or empty values for required resources referenced in CWL
"""
required = [["variation", "cosmic"], ["variation", "clinvar"], ["variation", "dbsnp"],
["variation", "lcr"], ["variation", "polyx"],
["variation", "encode_blacklist"], ["variation", "gc_profile"],
["variation", "germline_het_pon"],
["variation", "train_hapmap"], ["variation", "train_indels"],
["variation", "editing"], ["variation", "exac"], ["variation", "esp"],
["variation", "gnomad_exome"],
["variation", "1000g"], ["aliases", "human"]]
for key in required:
if not tz.get_in(key, resources):
resources = tz.update_in(resources, key, lambda x: None)
return resources | python | def add_required_resources(resources):
"""Add default or empty values for required resources referenced in CWL
"""
required = [["variation", "cosmic"], ["variation", "clinvar"], ["variation", "dbsnp"],
["variation", "lcr"], ["variation", "polyx"],
["variation", "encode_blacklist"], ["variation", "gc_profile"],
["variation", "germline_het_pon"],
["variation", "train_hapmap"], ["variation", "train_indels"],
["variation", "editing"], ["variation", "exac"], ["variation", "esp"],
["variation", "gnomad_exome"],
["variation", "1000g"], ["aliases", "human"]]
for key in required:
if not tz.get_in(key, resources):
resources = tz.update_in(resources, key, lambda x: None)
return resources | [
"def",
"add_required_resources",
"(",
"resources",
")",
":",
"required",
"=",
"[",
"[",
"\"variation\"",
",",
"\"cosmic\"",
"]",
",",
"[",
"\"variation\"",
",",
"\"clinvar\"",
"]",
",",
"[",
"\"variation\"",
",",
"\"dbsnp\"",
"]",
",",
"[",
"\"variation\"",
... | Add default or empty values for required resources referenced in CWL | [
"Add",
"default",
"or",
"empty",
"values",
"for",
"required",
"resources",
"referenced",
"in",
"CWL"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/genome.py#L44-L58 | train | 218,501 |
bcbio/bcbio-nextgen | bcbio/pipeline/genome.py | ensure_annotations | def ensure_annotations(resources, data):
"""Prepare any potentially missing annotations for downstream processing in a local directory.
"""
transcript_gff = tz.get_in(["rnaseq", "transcripts"], resources)
if transcript_gff and utils.file_exists(transcript_gff):
out_dir = os.path.join(tz.get_in(["dirs", "work"], data),
"inputs", "data", "annotations")
resources["rnaseq"]["gene_bed"] = gtf.gtf_to_bed(transcript_gff, out_dir)
return resources | python | def ensure_annotations(resources, data):
"""Prepare any potentially missing annotations for downstream processing in a local directory.
"""
transcript_gff = tz.get_in(["rnaseq", "transcripts"], resources)
if transcript_gff and utils.file_exists(transcript_gff):
out_dir = os.path.join(tz.get_in(["dirs", "work"], data),
"inputs", "data", "annotations")
resources["rnaseq"]["gene_bed"] = gtf.gtf_to_bed(transcript_gff, out_dir)
return resources | [
"def",
"ensure_annotations",
"(",
"resources",
",",
"data",
")",
":",
"transcript_gff",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"rnaseq\"",
",",
"\"transcripts\"",
"]",
",",
"resources",
")",
"if",
"transcript_gff",
"and",
"utils",
".",
"file_exists",
"(",
"tr... | Prepare any potentially missing annotations for downstream processing in a local directory. | [
"Prepare",
"any",
"potentially",
"missing",
"annotations",
"for",
"downstream",
"processing",
"in",
"a",
"local",
"directory",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/genome.py#L60-L68 | train | 218,502 |
bcbio/bcbio-nextgen | bcbio/pipeline/genome.py | abs_file_paths | def abs_file_paths(xs, base_dir=None, ignore_keys=None, fileonly_keys=None, cur_key=None,
do_download=True):
"""Normalize any file paths found in a subdirectory of configuration input.
base_dir -- directory to normalize relative paths to
ignore_keys -- algorithm key names to ignore normalize for (keywords, not files/directories)
fileonly_keys -- algorithm key names to only expand files (not directories)
cur_key -- current key when calling recursively
"""
ignore_keys = set([]) if ignore_keys is None else set(ignore_keys)
fileonly_keys = set([]) if fileonly_keys is None else set(fileonly_keys)
if base_dir is None:
base_dir = os.getcwd()
orig_dir = os.getcwd()
os.chdir(base_dir)
input_dir = os.path.join(base_dir, "inputs")
if isinstance(xs, dict):
out = {}
for k, v in xs.items():
if k not in ignore_keys and v and isinstance(v, six.string_types):
if v.lower() == "none":
out[k] = None
else:
out[k] = abs_file_paths(v, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
elif isinstance(v, (list, tuple)):
out[k] = [abs_file_paths(x, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
for x in v]
else:
out[k] = v
elif isinstance(xs, six.string_types):
if os.path.exists(xs) or (do_download and objectstore.is_remote(xs)):
dl = objectstore.download(xs, input_dir)
if dl and cur_key not in ignore_keys and not (cur_key in fileonly_keys and not os.path.isfile(dl)):
out = os.path.normpath(os.path.join(base_dir, dl))
else:
out = xs
else:
out = xs
else:
out = xs
os.chdir(orig_dir)
return out | python | def abs_file_paths(xs, base_dir=None, ignore_keys=None, fileonly_keys=None, cur_key=None,
do_download=True):
"""Normalize any file paths found in a subdirectory of configuration input.
base_dir -- directory to normalize relative paths to
ignore_keys -- algorithm key names to ignore normalize for (keywords, not files/directories)
fileonly_keys -- algorithm key names to only expand files (not directories)
cur_key -- current key when calling recursively
"""
ignore_keys = set([]) if ignore_keys is None else set(ignore_keys)
fileonly_keys = set([]) if fileonly_keys is None else set(fileonly_keys)
if base_dir is None:
base_dir = os.getcwd()
orig_dir = os.getcwd()
os.chdir(base_dir)
input_dir = os.path.join(base_dir, "inputs")
if isinstance(xs, dict):
out = {}
for k, v in xs.items():
if k not in ignore_keys and v and isinstance(v, six.string_types):
if v.lower() == "none":
out[k] = None
else:
out[k] = abs_file_paths(v, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
elif isinstance(v, (list, tuple)):
out[k] = [abs_file_paths(x, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
for x in v]
else:
out[k] = v
elif isinstance(xs, six.string_types):
if os.path.exists(xs) or (do_download and objectstore.is_remote(xs)):
dl = objectstore.download(xs, input_dir)
if dl and cur_key not in ignore_keys and not (cur_key in fileonly_keys and not os.path.isfile(dl)):
out = os.path.normpath(os.path.join(base_dir, dl))
else:
out = xs
else:
out = xs
else:
out = xs
os.chdir(orig_dir)
return out | [
"def",
"abs_file_paths",
"(",
"xs",
",",
"base_dir",
"=",
"None",
",",
"ignore_keys",
"=",
"None",
",",
"fileonly_keys",
"=",
"None",
",",
"cur_key",
"=",
"None",
",",
"do_download",
"=",
"True",
")",
":",
"ignore_keys",
"=",
"set",
"(",
"[",
"]",
")",... | Normalize any file paths found in a subdirectory of configuration input.
base_dir -- directory to normalize relative paths to
ignore_keys -- algorithm key names to ignore normalize for (keywords, not files/directories)
fileonly_keys -- algorithm key names to only expand files (not directories)
cur_key -- current key when calling recursively | [
"Normalize",
"any",
"file",
"paths",
"found",
"in",
"a",
"subdirectory",
"of",
"configuration",
"input",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/genome.py#L72-L113 | train | 218,503 |
bcbio/bcbio-nextgen | bcbio/pipeline/genome.py | _get_galaxy_tool_info | def _get_galaxy_tool_info(galaxy_base):
"""Retrieve Galaxy tool-data information from defaults or galaxy config file.
"""
ini_file = os.path.join(galaxy_base, "universe_wsgi.ini")
info = {"tool_data_table_config_path": os.path.join(galaxy_base, "tool_data_table_conf.xml"),
"tool_data_path": os.path.join(galaxy_base, "tool-data")}
config = configparser.ConfigParser()
config.read(ini_file)
if "app:main" in config.sections():
for option in config.options("app:main"):
if option in info:
info[option] = os.path.join(galaxy_base, config.get("app:main", option))
return info | python | def _get_galaxy_tool_info(galaxy_base):
"""Retrieve Galaxy tool-data information from defaults or galaxy config file.
"""
ini_file = os.path.join(galaxy_base, "universe_wsgi.ini")
info = {"tool_data_table_config_path": os.path.join(galaxy_base, "tool_data_table_conf.xml"),
"tool_data_path": os.path.join(galaxy_base, "tool-data")}
config = configparser.ConfigParser()
config.read(ini_file)
if "app:main" in config.sections():
for option in config.options("app:main"):
if option in info:
info[option] = os.path.join(galaxy_base, config.get("app:main", option))
return info | [
"def",
"_get_galaxy_tool_info",
"(",
"galaxy_base",
")",
":",
"ini_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"galaxy_base",
",",
"\"universe_wsgi.ini\"",
")",
"info",
"=",
"{",
"\"tool_data_table_config_path\"",
":",
"os",
".",
"path",
".",
"join",
"(",... | Retrieve Galaxy tool-data information from defaults or galaxy config file. | [
"Retrieve",
"Galaxy",
"tool",
"-",
"data",
"information",
"from",
"defaults",
"or",
"galaxy",
"config",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/genome.py#L195-L207 | train | 218,504 |
bcbio/bcbio-nextgen | bcbio/pipeline/genome.py | get_builds | def get_builds(galaxy_base):
"""Retrieve configured genome builds and reference files, using Galaxy configuration files.
Allows multiple dbkey specifications in the same file, using the most recently added.
"""
name = "samtools"
galaxy_config = _get_galaxy_tool_info(galaxy_base)
galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"])
loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"],
galaxy_base)
assert not need_remap, "Should not need to remap reference files"
fnames = {}
for dbkey, fname in _galaxy_loc_iter(loc_file, galaxy_dt):
fnames[dbkey] = fname
out = []
for dbkey in sorted(fnames.keys()):
out.append((dbkey, fnames[dbkey]))
return out | python | def get_builds(galaxy_base):
"""Retrieve configured genome builds and reference files, using Galaxy configuration files.
Allows multiple dbkey specifications in the same file, using the most recently added.
"""
name = "samtools"
galaxy_config = _get_galaxy_tool_info(galaxy_base)
galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"])
loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"],
galaxy_base)
assert not need_remap, "Should not need to remap reference files"
fnames = {}
for dbkey, fname in _galaxy_loc_iter(loc_file, galaxy_dt):
fnames[dbkey] = fname
out = []
for dbkey in sorted(fnames.keys()):
out.append((dbkey, fnames[dbkey]))
return out | [
"def",
"get_builds",
"(",
"galaxy_base",
")",
":",
"name",
"=",
"\"samtools\"",
"galaxy_config",
"=",
"_get_galaxy_tool_info",
"(",
"galaxy_base",
")",
"galaxy_dt",
"=",
"_get_galaxy_data_table",
"(",
"name",
",",
"galaxy_config",
"[",
"\"tool_data_table_config_path\"",... | Retrieve configured genome builds and reference files, using Galaxy configuration files.
Allows multiple dbkey specifications in the same file, using the most recently added. | [
"Retrieve",
"configured",
"genome",
"builds",
"and",
"reference",
"files",
"using",
"Galaxy",
"configuration",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/genome.py#L268-L285 | train | 218,505 |
bcbio/bcbio-nextgen | bcbio/variation/varscan.py | _get_jvm_opts | def _get_jvm_opts(config, tmp_dir):
"""Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF.
"""
resources = config_utils.get_resources("varscan", config)
jvm_opts = resources.get("jvm_opts", ["-Xmx750m", "-Xmx2g"])
jvm_opts = config_utils.adjust_opts(jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
jvm_opts += ["-Duser.language=en", "-Duser.country=US"]
jvm_opts += broad.get_default_jvm_opts(tmp_dir)
return " ".join(jvm_opts) | python | def _get_jvm_opts(config, tmp_dir):
"""Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF.
"""
resources = config_utils.get_resources("varscan", config)
jvm_opts = resources.get("jvm_opts", ["-Xmx750m", "-Xmx2g"])
jvm_opts = config_utils.adjust_opts(jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
jvm_opts += ["-Duser.language=en", "-Duser.country=US"]
jvm_opts += broad.get_default_jvm_opts(tmp_dir)
return " ".join(jvm_opts) | [
"def",
"_get_jvm_opts",
"(",
"config",
",",
"tmp_dir",
")",
":",
"resources",
"=",
"config_utils",
".",
"get_resources",
"(",
"\"varscan\"",
",",
"config",
")",
"jvm_opts",
"=",
"resources",
".",
"get",
"(",
"\"jvm_opts\"",
",",
"[",
"\"-Xmx750m\"",
",",
"\"... | Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF. | [
"Retrieve",
"common",
"options",
"for",
"running",
"VarScan",
".",
"Handles",
"jvm_opts",
"setting",
"user",
"and",
"country",
"to",
"English",
"to",
"avoid",
"issues",
"with",
"different",
"locales",
"producing",
"non",
"-",
"compliant",
"VCF",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/varscan.py#L36-L48 | train | 218,506 |
bcbio/bcbio-nextgen | bcbio/variation/varscan.py | _varscan_options_from_config | def _varscan_options_from_config(config):
"""Retrieve additional options for VarScan from the configuration.
"""
opts = ["--min-coverage 5", "--p-value 0.98", "--strand-filter 1"]
resources = config_utils.get_resources("varscan", config)
if resources.get("options"):
opts += [str(x) for x in resources["options"]]
return opts | python | def _varscan_options_from_config(config):
"""Retrieve additional options for VarScan from the configuration.
"""
opts = ["--min-coverage 5", "--p-value 0.98", "--strand-filter 1"]
resources = config_utils.get_resources("varscan", config)
if resources.get("options"):
opts += [str(x) for x in resources["options"]]
return opts | [
"def",
"_varscan_options_from_config",
"(",
"config",
")",
":",
"opts",
"=",
"[",
"\"--min-coverage 5\"",
",",
"\"--p-value 0.98\"",
",",
"\"--strand-filter 1\"",
"]",
"resources",
"=",
"config_utils",
".",
"get_resources",
"(",
"\"varscan\"",
",",
"config",
")",
"i... | Retrieve additional options for VarScan from the configuration. | [
"Retrieve",
"additional",
"options",
"for",
"VarScan",
"from",
"the",
"configuration",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/varscan.py#L51-L58 | train | 218,507 |
bcbio/bcbio-nextgen | bcbio/variation/varscan.py | spv_freq_filter | def spv_freq_filter(line, tumor_index):
"""Filter VarScan calls based on the SPV value and frequency.
Removes calls with SPV < 0.05 and a tumor FREQ > 0.35.
False positives dominate these higher frequency, low SPV calls. They appear
to be primarily non-somatic/germline variants not removed by other filters.
"""
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=SpvFreq,Description="High frequency (tumor FREQ > 0.35) '
'and low p-value for somatic (SPV < 0.05)">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
freq = utils.safe_to_float(sample_ft.get("FREQ"))
spvs = [x for x in parts[7].split(";") if x.startswith("SPV=")]
spv = utils.safe_to_float(spvs[0].split("=")[-1] if spvs else None)
fname = None
if spv is not None and freq is not None:
if spv < 0.05 and freq > 0.35:
fname = "SpvFreq"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line | python | def spv_freq_filter(line, tumor_index):
"""Filter VarScan calls based on the SPV value and frequency.
Removes calls with SPV < 0.05 and a tumor FREQ > 0.35.
False positives dominate these higher frequency, low SPV calls. They appear
to be primarily non-somatic/germline variants not removed by other filters.
"""
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=SpvFreq,Description="High frequency (tumor FREQ > 0.35) '
'and low p-value for somatic (SPV < 0.05)">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
freq = utils.safe_to_float(sample_ft.get("FREQ"))
spvs = [x for x in parts[7].split(";") if x.startswith("SPV=")]
spv = utils.safe_to_float(spvs[0].split("=")[-1] if spvs else None)
fname = None
if spv is not None and freq is not None:
if spv < 0.05 and freq > 0.35:
fname = "SpvFreq"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line | [
"def",
"spv_freq_filter",
"(",
"line",
",",
"tumor_index",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"\"#CHROM\"",
")",
":",
"headers",
"=",
"[",
"(",
"'##FILTER=<ID=SpvFreq,Description=\"High frequency (tumor FREQ > 0.35) '",
"'and low p-value for somatic (SPV < 0.05... | Filter VarScan calls based on the SPV value and frequency.
Removes calls with SPV < 0.05 and a tumor FREQ > 0.35.
False positives dominate these higher frequency, low SPV calls. They appear
to be primarily non-somatic/germline variants not removed by other filters. | [
"Filter",
"VarScan",
"calls",
"based",
"on",
"the",
"SPV",
"value",
"and",
"frequency",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/varscan.py#L61-L91 | train | 218,508 |
bcbio/bcbio-nextgen | bcbio/variation/varscan.py | _create_sample_list | def _create_sample_list(in_bams, vcf_file):
"""Pull sample names from input BAMs and create input sample list.
"""
out_file = "%s-sample_list.txt" % os.path.splitext(vcf_file)[0]
with open(out_file, "w") as out_handle:
for in_bam in in_bams:
with pysam.Samfile(in_bam, "rb") as work_bam:
for rg in work_bam.header.get("RG", []):
out_handle.write("%s\n" % rg["SM"])
return out_file | python | def _create_sample_list(in_bams, vcf_file):
"""Pull sample names from input BAMs and create input sample list.
"""
out_file = "%s-sample_list.txt" % os.path.splitext(vcf_file)[0]
with open(out_file, "w") as out_handle:
for in_bam in in_bams:
with pysam.Samfile(in_bam, "rb") as work_bam:
for rg in work_bam.header.get("RG", []):
out_handle.write("%s\n" % rg["SM"])
return out_file | [
"def",
"_create_sample_list",
"(",
"in_bams",
",",
"vcf_file",
")",
":",
"out_file",
"=",
"\"%s-sample_list.txt\"",
"%",
"os",
".",
"path",
".",
"splitext",
"(",
"vcf_file",
")",
"[",
"0",
"]",
"with",
"open",
"(",
"out_file",
",",
"\"w\"",
")",
"as",
"o... | Pull sample names from input BAMs and create input sample list. | [
"Pull",
"sample",
"names",
"from",
"input",
"BAMs",
"and",
"create",
"input",
"sample",
"list",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/varscan.py#L274-L283 | train | 218,509 |
bcbio/bcbio-nextgen | bcbio/variation/varscan.py | _varscan_work | def _varscan_work(align_bams, ref_file, items, target_regions, out_file):
"""Perform SNP and indel genotyping with VarScan.
"""
config = items[0]["config"]
orig_out_file = out_file
out_file = orig_out_file.replace(".vcf.gz", ".vcf")
max_read_depth = "1000"
sample_list = _create_sample_list(align_bams, out_file)
mpileup = samtools.prep_mpileup(align_bams, ref_file, config, max_read_depth,
target_regions=target_regions, want_bcf=False)
# VarScan fails to generate a header on files that start with
# zerocoverage calls; strip these with grep, we're not going to
# call on them
remove_zerocoverage = r"{ ifne grep -v -P '\t0\t\t$' || true; }"
# we use ifne from moreutils to ensure we process only on files with input, skipping otherwise
# http://manpages.ubuntu.com/manpages/natty/man1/ifne.1.html
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_jvm_opts(config, tmp_dir)
opts = " ".join(_varscan_options_from_config(config))
min_af = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
export = utils.local_path_export()
cmd = ("{export} {mpileup} | {remove_zerocoverage} | "
"ifne varscan {jvm_opts} mpileup2cns {opts} "
"--vcf-sample-list {sample_list} --min-var-freq {min_af} --output-vcf --variants | "
"""{py_cl} -x 'bcbio.variation.vcfutils.add_contig_to_header(x, "{ref_file}")' | """
"{py_cl} -x 'bcbio.variation.varscan.fix_varscan_output(x)' | "
"{fix_ambig_ref} | {fix_ambig_alt} | ifne vcfuniqalleles > {out_file}")
do.run(cmd.format(**locals()), "Varscan", None,
[do.file_exists(out_file)])
os.remove(sample_list)
# VarScan can create completely empty files in regions without
# variants, so we create a correctly formatted empty file
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
if orig_out_file.endswith(".gz"):
vcfutils.bgzip_and_index(out_file, config) | python | def _varscan_work(align_bams, ref_file, items, target_regions, out_file):
"""Perform SNP and indel genotyping with VarScan.
"""
config = items[0]["config"]
orig_out_file = out_file
out_file = orig_out_file.replace(".vcf.gz", ".vcf")
max_read_depth = "1000"
sample_list = _create_sample_list(align_bams, out_file)
mpileup = samtools.prep_mpileup(align_bams, ref_file, config, max_read_depth,
target_regions=target_regions, want_bcf=False)
# VarScan fails to generate a header on files that start with
# zerocoverage calls; strip these with grep, we're not going to
# call on them
remove_zerocoverage = r"{ ifne grep -v -P '\t0\t\t$' || true; }"
# we use ifne from moreutils to ensure we process only on files with input, skipping otherwise
# http://manpages.ubuntu.com/manpages/natty/man1/ifne.1.html
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_jvm_opts(config, tmp_dir)
opts = " ".join(_varscan_options_from_config(config))
min_af = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
export = utils.local_path_export()
cmd = ("{export} {mpileup} | {remove_zerocoverage} | "
"ifne varscan {jvm_opts} mpileup2cns {opts} "
"--vcf-sample-list {sample_list} --min-var-freq {min_af} --output-vcf --variants | "
"""{py_cl} -x 'bcbio.variation.vcfutils.add_contig_to_header(x, "{ref_file}")' | """
"{py_cl} -x 'bcbio.variation.varscan.fix_varscan_output(x)' | "
"{fix_ambig_ref} | {fix_ambig_alt} | ifne vcfuniqalleles > {out_file}")
do.run(cmd.format(**locals()), "Varscan", None,
[do.file_exists(out_file)])
os.remove(sample_list)
# VarScan can create completely empty files in regions without
# variants, so we create a correctly formatted empty file
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
if orig_out_file.endswith(".gz"):
vcfutils.bgzip_and_index(out_file, config) | [
"def",
"_varscan_work",
"(",
"align_bams",
",",
"ref_file",
",",
"items",
",",
"target_regions",
",",
"out_file",
")",
":",
"config",
"=",
"items",
"[",
"0",
"]",
"[",
"\"config\"",
"]",
"orig_out_file",
"=",
"out_file",
"out_file",
"=",
"orig_out_file",
"."... | Perform SNP and indel genotyping with VarScan. | [
"Perform",
"SNP",
"and",
"indel",
"genotyping",
"with",
"VarScan",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/varscan.py#L286-L327 | train | 218,510 |
bcbio/bcbio-nextgen | bcbio/distributed/ipythontasks.py | apply | def apply(object, args=None, kwargs=None):
"""Python3 apply replacement for double unpacking of inputs during apply.
Thanks to: https://github.com/stefanholek/apply
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return object(*args, **kwargs) | python | def apply(object, args=None, kwargs=None):
"""Python3 apply replacement for double unpacking of inputs during apply.
Thanks to: https://github.com/stefanholek/apply
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return object(*args, **kwargs) | [
"def",
"apply",
"(",
"object",
",",
"args",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"(",
")",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"return",
"object",
"(",
"*",
"ar... | Python3 apply replacement for double unpacking of inputs during apply.
Thanks to: https://github.com/stefanholek/apply | [
"Python3",
"apply",
"replacement",
"for",
"double",
"unpacking",
"of",
"inputs",
"during",
"apply",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipythontasks.py#L71-L80 | train | 218,511 |
bcbio/bcbio-nextgen | bcbio/structural/annotate.py | add_genes | def add_genes(in_file, data, max_distance=10000, work_dir=None):
"""Add gene annotations to a BED file from pre-prepared RNA-seq data.
max_distance -- only keep annotations within this distance of event
"""
gene_file = regions.get_sv_bed(data, "exons", out_dir=os.path.dirname(in_file))
if gene_file and utils.file_exists(in_file):
out_file = "%s-annotated.bed" % utils.splitext_plus(in_file)[0]
if work_dir:
out_file = os.path.join(work_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file):
fai_file = ref.fasta_idx(dd.get_ref_file(data))
with file_transaction(data, out_file) as tx_out_file:
_add_genes_to_bed(in_file, gene_file, fai_file, tx_out_file, data, max_distance)
return out_file
else:
return in_file | python | def add_genes(in_file, data, max_distance=10000, work_dir=None):
"""Add gene annotations to a BED file from pre-prepared RNA-seq data.
max_distance -- only keep annotations within this distance of event
"""
gene_file = regions.get_sv_bed(data, "exons", out_dir=os.path.dirname(in_file))
if gene_file and utils.file_exists(in_file):
out_file = "%s-annotated.bed" % utils.splitext_plus(in_file)[0]
if work_dir:
out_file = os.path.join(work_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file):
fai_file = ref.fasta_idx(dd.get_ref_file(data))
with file_transaction(data, out_file) as tx_out_file:
_add_genes_to_bed(in_file, gene_file, fai_file, tx_out_file, data, max_distance)
return out_file
else:
return in_file | [
"def",
"add_genes",
"(",
"in_file",
",",
"data",
",",
"max_distance",
"=",
"10000",
",",
"work_dir",
"=",
"None",
")",
":",
"gene_file",
"=",
"regions",
".",
"get_sv_bed",
"(",
"data",
",",
"\"exons\"",
",",
"out_dir",
"=",
"os",
".",
"path",
".",
"dir... | Add gene annotations to a BED file from pre-prepared RNA-seq data.
max_distance -- only keep annotations within this distance of event | [
"Add",
"gene",
"annotations",
"to",
"a",
"BED",
"file",
"from",
"pre",
"-",
"prepared",
"RNA",
"-",
"seq",
"data",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/annotate.py#L17-L33 | train | 218,512 |
bcbio/bcbio-nextgen | bcbio/structural/annotate.py | _add_genes_to_bed | def _add_genes_to_bed(in_file, gene_file, fai_file, out_file, data, max_distance=10000):
"""Re-usable subcomponent that annotates BED file genes from another BED
"""
try:
input_rec = next(iter(pybedtools.BedTool(in_file)))
except StopIteration: # empty file
utils.copy_plus(in_file, out_file)
return
# keep everything after standard chrom/start/end, 1-based
extra_fields = list(range(4, len(input_rec.fields) + 1))
# keep the new gene annotation
gene_index = len(input_rec.fields) + 4
extra_fields.append(gene_index)
columns = ",".join([str(x) for x in extra_fields])
max_column = max(extra_fields) + 1
ops = ",".join(["distinct"] * len(extra_fields))
# swap over gene name to '.' if beyond maximum distance
# cut removes the last distance column which can cause issues
# with bedtools merge: 'ERROR: illegal character '.' found in integer conversion of string'
distance_filter = (r"""awk -F$'\t' -v OFS='\t' '{if ($NF > %s || $NF < -%s) $%s = "."} {print}'""" %
(max_distance, max_distance, gene_index))
sort_cmd = bedutils.get_sort_cmd(os.path.dirname(out_file))
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
# Ensure gene transcripts match reference genome
ready_gene_file = os.path.join(os.path.dirname(out_file), "%s-genomeonly.bed" %
(utils.splitext_plus(os.path.basename(gene_file))[0]))
ready_gene_file = bedutils.subset_to_genome(gene_file, ready_gene_file, data)
exports = "export TMPDIR=%s && %s" % (os.path.dirname(out_file), utils.local_path_export())
bcbio_py = sys.executable
gsort = config_utils.get_program("gsort", data)
cmd = ("{exports}{cat_cmd} {in_file} | grep -v ^track | grep -v ^browser | grep -v ^# | "
"{bcbio_py} -c 'from bcbio.variation import bedutils; bedutils.remove_bad()' | "
"{gsort} - {fai_file} | "
"bedtools closest -g {fai_file} "
"-D ref -t first -a - -b <({gsort} {ready_gene_file} {fai_file}) | "
"{distance_filter} | cut -f 1-{max_column} | "
"bedtools merge -i - -c {columns} -o {ops} -delim ',' -d -10 > {out_file}")
do.run(cmd.format(**locals()), "Annotate BED file with gene info") | python | def _add_genes_to_bed(in_file, gene_file, fai_file, out_file, data, max_distance=10000):
"""Re-usable subcomponent that annotates BED file genes from another BED
"""
try:
input_rec = next(iter(pybedtools.BedTool(in_file)))
except StopIteration: # empty file
utils.copy_plus(in_file, out_file)
return
# keep everything after standard chrom/start/end, 1-based
extra_fields = list(range(4, len(input_rec.fields) + 1))
# keep the new gene annotation
gene_index = len(input_rec.fields) + 4
extra_fields.append(gene_index)
columns = ",".join([str(x) for x in extra_fields])
max_column = max(extra_fields) + 1
ops = ",".join(["distinct"] * len(extra_fields))
# swap over gene name to '.' if beyond maximum distance
# cut removes the last distance column which can cause issues
# with bedtools merge: 'ERROR: illegal character '.' found in integer conversion of string'
distance_filter = (r"""awk -F$'\t' -v OFS='\t' '{if ($NF > %s || $NF < -%s) $%s = "."} {print}'""" %
(max_distance, max_distance, gene_index))
sort_cmd = bedutils.get_sort_cmd(os.path.dirname(out_file))
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
# Ensure gene transcripts match reference genome
ready_gene_file = os.path.join(os.path.dirname(out_file), "%s-genomeonly.bed" %
(utils.splitext_plus(os.path.basename(gene_file))[0]))
ready_gene_file = bedutils.subset_to_genome(gene_file, ready_gene_file, data)
exports = "export TMPDIR=%s && %s" % (os.path.dirname(out_file), utils.local_path_export())
bcbio_py = sys.executable
gsort = config_utils.get_program("gsort", data)
cmd = ("{exports}{cat_cmd} {in_file} | grep -v ^track | grep -v ^browser | grep -v ^# | "
"{bcbio_py} -c 'from bcbio.variation import bedutils; bedutils.remove_bad()' | "
"{gsort} - {fai_file} | "
"bedtools closest -g {fai_file} "
"-D ref -t first -a - -b <({gsort} {ready_gene_file} {fai_file}) | "
"{distance_filter} | cut -f 1-{max_column} | "
"bedtools merge -i - -c {columns} -o {ops} -delim ',' -d -10 > {out_file}")
do.run(cmd.format(**locals()), "Annotate BED file with gene info") | [
"def",
"_add_genes_to_bed",
"(",
"in_file",
",",
"gene_file",
",",
"fai_file",
",",
"out_file",
",",
"data",
",",
"max_distance",
"=",
"10000",
")",
":",
"try",
":",
"input_rec",
"=",
"next",
"(",
"iter",
"(",
"pybedtools",
".",
"BedTool",
"(",
"in_file",
... | Re-usable subcomponent that annotates BED file genes from another BED | [
"Re",
"-",
"usable",
"subcomponent",
"that",
"annotates",
"BED",
"file",
"genes",
"from",
"another",
"BED"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/annotate.py#L35-L72 | train | 218,513 |
bcbio/bcbio-nextgen | bcbio/distributed/clusterk.py | create | def create(parallel):
"""Create a queue based on the provided parallel arguments.
TODO Startup/tear-down. Currently using default queue for testing
"""
queue = {k: v for k, v in parallel.items() if k in ["queue", "cores_per_job", "mem"]}
yield queue | python | def create(parallel):
"""Create a queue based on the provided parallel arguments.
TODO Startup/tear-down. Currently using default queue for testing
"""
queue = {k: v for k, v in parallel.items() if k in ["queue", "cores_per_job", "mem"]}
yield queue | [
"def",
"create",
"(",
"parallel",
")",
":",
"queue",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"parallel",
".",
"items",
"(",
")",
"if",
"k",
"in",
"[",
"\"queue\"",
",",
"\"cores_per_job\"",
",",
"\"mem\"",
"]",
"}",
"yield",
"queue"
] | Create a queue based on the provided parallel arguments.
TODO Startup/tear-down. Currently using default queue for testing | [
"Create",
"a",
"queue",
"based",
"on",
"the",
"provided",
"parallel",
"arguments",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/clusterk.py#L11-L17 | train | 218,514 |
bcbio/bcbio-nextgen | bcbio/distributed/clusterk.py | runner | def runner(queue, parallel):
"""Run individual jobs on an existing queue.
"""
def run(fn_name, items):
logger.info("clusterk: %s" % fn_name)
assert "wrapper" in parallel, "Clusterk requires bcbio-nextgen-vm wrapper"
fn = getattr(__import__("{base}.clusterktasks".format(base=parallel["module"]),
fromlist=["clusterktasks"]),
parallel["wrapper"])
wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources", "pack"])}
out = []
for data in [fn(fn_name, queue, parallel.get("wrapper_args"), wrap_parallel, x) for x in items]:
if data:
out.extend(data)
return out
return run | python | def runner(queue, parallel):
"""Run individual jobs on an existing queue.
"""
def run(fn_name, items):
logger.info("clusterk: %s" % fn_name)
assert "wrapper" in parallel, "Clusterk requires bcbio-nextgen-vm wrapper"
fn = getattr(__import__("{base}.clusterktasks".format(base=parallel["module"]),
fromlist=["clusterktasks"]),
parallel["wrapper"])
wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources", "pack"])}
out = []
for data in [fn(fn_name, queue, parallel.get("wrapper_args"), wrap_parallel, x) for x in items]:
if data:
out.extend(data)
return out
return run | [
"def",
"runner",
"(",
"queue",
",",
"parallel",
")",
":",
"def",
"run",
"(",
"fn_name",
",",
"items",
")",
":",
"logger",
".",
"info",
"(",
"\"clusterk: %s\"",
"%",
"fn_name",
")",
"assert",
"\"wrapper\"",
"in",
"parallel",
",",
"\"Clusterk requires bcbio-ne... | Run individual jobs on an existing queue. | [
"Run",
"individual",
"jobs",
"on",
"an",
"existing",
"queue",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/clusterk.py#L19-L34 | train | 218,515 |
bcbio/bcbio-nextgen | bcbio/structural/prioritize.py | is_gene_list | def is_gene_list(bed_file):
"""Check if the file is only a list of genes, not a BED
"""
with utils.open_gzipsafe(bed_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
if len(line.split()) == 1:
return True
else:
return False | python | def is_gene_list(bed_file):
"""Check if the file is only a list of genes, not a BED
"""
with utils.open_gzipsafe(bed_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
if len(line.split()) == 1:
return True
else:
return False | [
"def",
"is_gene_list",
"(",
"bed_file",
")",
":",
"with",
"utils",
".",
"open_gzipsafe",
"(",
"bed_file",
")",
"as",
"in_handle",
":",
"for",
"line",
"in",
"in_handle",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"if",
"len",
"... | Check if the file is only a list of genes, not a BED | [
"Check",
"if",
"the",
"file",
"is",
"only",
"a",
"list",
"of",
"genes",
"not",
"a",
"BED"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/prioritize.py#L51-L60 | train | 218,516 |
bcbio/bcbio-nextgen | bcbio/structural/prioritize.py | _find_gene_list_from_bed | def _find_gene_list_from_bed(bed_file, base_file, data):
"""Retrieve list of gene names from input BED file.
"""
# Check for a gene list, we can just return that.
if is_gene_list(bed_file):
return bed_file
out_file = "%s-genes.txt" % utils.splitext_plus(base_file)[0]
if not os.path.exists(out_file):
genes = set([])
import pybedtools
with utils.open_gzipsafe(bed_file) as in_handle:
for r in pybedtools.BedTool(in_handle):
if r.name:
if not r.name.startswith("{"):
genes.add(r.name)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
if len(genes) > 0:
out_handle.write("\n".join(sorted(list(genes))) + "\n")
if utils.file_exists(out_file):
return out_file | python | def _find_gene_list_from_bed(bed_file, base_file, data):
"""Retrieve list of gene names from input BED file.
"""
# Check for a gene list, we can just return that.
if is_gene_list(bed_file):
return bed_file
out_file = "%s-genes.txt" % utils.splitext_plus(base_file)[0]
if not os.path.exists(out_file):
genes = set([])
import pybedtools
with utils.open_gzipsafe(bed_file) as in_handle:
for r in pybedtools.BedTool(in_handle):
if r.name:
if not r.name.startswith("{"):
genes.add(r.name)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
if len(genes) > 0:
out_handle.write("\n".join(sorted(list(genes))) + "\n")
if utils.file_exists(out_file):
return out_file | [
"def",
"_find_gene_list_from_bed",
"(",
"bed_file",
",",
"base_file",
",",
"data",
")",
":",
"# Check for a gene list, we can just return that.",
"if",
"is_gene_list",
"(",
"bed_file",
")",
":",
"return",
"bed_file",
"out_file",
"=",
"\"%s-genes.txt\"",
"%",
"utils",
... | Retrieve list of gene names from input BED file. | [
"Retrieve",
"list",
"of",
"gene",
"names",
"from",
"input",
"BED",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/prioritize.py#L62-L82 | train | 218,517 |
bcbio/bcbio-nextgen | bcbio/structural/prioritize.py | _combine_files | def _combine_files(tsv_files, work_dir, data):
"""Combine multiple priority tsv files into a final sorted output.
"""
header = "\t".join(["caller", "sample", "chrom", "start", "end", "svtype",
"lof", "annotation", "split_read_support", "paired_support_PE", "paired_support_PR"])
sample = dd.get_sample_name(data)
out_file = os.path.join(work_dir, "%s-prioritize.tsv" % (sample))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
tmpdir = os.path.dirname(tx_out_file)
input_files = " ".join(tsv_files)
sort_cmd = bedutils.get_sort_cmd(tmpdir)
cmd = "{{ echo '{header}'; cat {input_files} | {sort_cmd} -k3,3 -k4,4n; }} > {tx_out_file}"
do.run(cmd.format(**locals()), "Combine prioritized from multiple callers")
return out_file | python | def _combine_files(tsv_files, work_dir, data):
"""Combine multiple priority tsv files into a final sorted output.
"""
header = "\t".join(["caller", "sample", "chrom", "start", "end", "svtype",
"lof", "annotation", "split_read_support", "paired_support_PE", "paired_support_PR"])
sample = dd.get_sample_name(data)
out_file = os.path.join(work_dir, "%s-prioritize.tsv" % (sample))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
tmpdir = os.path.dirname(tx_out_file)
input_files = " ".join(tsv_files)
sort_cmd = bedutils.get_sort_cmd(tmpdir)
cmd = "{{ echo '{header}'; cat {input_files} | {sort_cmd} -k3,3 -k4,4n; }} > {tx_out_file}"
do.run(cmd.format(**locals()), "Combine prioritized from multiple callers")
return out_file | [
"def",
"_combine_files",
"(",
"tsv_files",
",",
"work_dir",
",",
"data",
")",
":",
"header",
"=",
"\"\\t\"",
".",
"join",
"(",
"[",
"\"caller\"",
",",
"\"sample\"",
",",
"\"chrom\"",
",",
"\"start\"",
",",
"\"end\"",
",",
"\"svtype\"",
",",
"\"lof\"",
",",... | Combine multiple priority tsv files into a final sorted output. | [
"Combine",
"multiple",
"priority",
"tsv",
"files",
"into",
"a",
"final",
"sorted",
"output",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/prioritize.py#L145-L159 | train | 218,518 |
bcbio/bcbio-nextgen | bcbio/structural/prioritize.py | _cnvkit_prioritize | def _cnvkit_prioritize(sample, genes, allele_file, metrics_file):
"""Summarize non-diploid calls with copy numbers and confidence intervals.
"""
mdf = pd.read_table(metrics_file)
mdf.columns = [x.lower() for x in mdf.columns]
if len(genes) > 0:
mdf = mdf[mdf["gene"].str.contains("|".join(genes))]
mdf = mdf[["chromosome", "start", "end", "gene", "log2", "ci_hi", "ci_lo"]]
adf = pd.read_table(allele_file)
if len(genes) > 0:
adf = adf[adf["gene"].str.contains("|".join(genes))]
if "cn1" in adf.columns and "cn2" in adf.columns:
adf = adf[["chromosome", "start", "end", "cn", "cn1", "cn2"]]
else:
adf = adf[["chromosome", "start", "end", "cn"]]
df = pd.merge(mdf, adf, on=["chromosome", "start", "end"])
df = df[df["cn"] != 2]
if len(df) > 0:
def passes(row):
spread = abs(row["ci_hi"] - row["ci_lo"])
return spread < 0.25
df["passes"] = df.apply(passes, axis=1)
df.insert(0, "sample", [sample] * len(df))
return df | python | def _cnvkit_prioritize(sample, genes, allele_file, metrics_file):
"""Summarize non-diploid calls with copy numbers and confidence intervals.
"""
mdf = pd.read_table(metrics_file)
mdf.columns = [x.lower() for x in mdf.columns]
if len(genes) > 0:
mdf = mdf[mdf["gene"].str.contains("|".join(genes))]
mdf = mdf[["chromosome", "start", "end", "gene", "log2", "ci_hi", "ci_lo"]]
adf = pd.read_table(allele_file)
if len(genes) > 0:
adf = adf[adf["gene"].str.contains("|".join(genes))]
if "cn1" in adf.columns and "cn2" in adf.columns:
adf = adf[["chromosome", "start", "end", "cn", "cn1", "cn2"]]
else:
adf = adf[["chromosome", "start", "end", "cn"]]
df = pd.merge(mdf, adf, on=["chromosome", "start", "end"])
df = df[df["cn"] != 2]
if len(df) > 0:
def passes(row):
spread = abs(row["ci_hi"] - row["ci_lo"])
return spread < 0.25
df["passes"] = df.apply(passes, axis=1)
df.insert(0, "sample", [sample] * len(df))
return df | [
"def",
"_cnvkit_prioritize",
"(",
"sample",
",",
"genes",
",",
"allele_file",
",",
"metrics_file",
")",
":",
"mdf",
"=",
"pd",
".",
"read_table",
"(",
"metrics_file",
")",
"mdf",
".",
"columns",
"=",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
... | Summarize non-diploid calls with copy numbers and confidence intervals. | [
"Summarize",
"non",
"-",
"diploid",
"calls",
"with",
"copy",
"numbers",
"and",
"confidence",
"intervals",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/prioritize.py#L167-L190 | train | 218,519 |
bcbio/bcbio-nextgen | bcbio/structural/prioritize.py | _cnv_prioritize | def _cnv_prioritize(data):
"""Perform confidence interval based prioritization for CNVs.
"""
supported = {"cnvkit": {"inputs": ["call_file", "segmetrics"], "fn": _cnvkit_prioritize}}
pcall = None
priority_files = None
for call in data.get("sv", []):
if call["variantcaller"] in supported:
priority_files = [call.get(x) for x in supported[call["variantcaller"]]["inputs"]]
priority_files = [x for x in priority_files if x is not None and utils.file_exists(x)]
if len(priority_files) == len(supported[call["variantcaller"]]["inputs"]):
pcall = call
break
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if pcall and prioritize_by:
out_file = "%s-prioritize.tsv" % utils.splitext_plus(priority_files[0])[0]
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
if gene_list:
with open(gene_list) as in_handle:
genes = [x.strip() for x in in_handle]
args = [dd.get_sample_name(data), genes] + priority_files
df = supported[pcall["variantcaller"]]["fn"](*args)
with file_transaction(data, out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
pcall["priority"] = out_file
return data | python | def _cnv_prioritize(data):
"""Perform confidence interval based prioritization for CNVs.
"""
supported = {"cnvkit": {"inputs": ["call_file", "segmetrics"], "fn": _cnvkit_prioritize}}
pcall = None
priority_files = None
for call in data.get("sv", []):
if call["variantcaller"] in supported:
priority_files = [call.get(x) for x in supported[call["variantcaller"]]["inputs"]]
priority_files = [x for x in priority_files if x is not None and utils.file_exists(x)]
if len(priority_files) == len(supported[call["variantcaller"]]["inputs"]):
pcall = call
break
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if pcall and prioritize_by:
out_file = "%s-prioritize.tsv" % utils.splitext_plus(priority_files[0])[0]
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
if gene_list:
with open(gene_list) as in_handle:
genes = [x.strip() for x in in_handle]
args = [dd.get_sample_name(data), genes] + priority_files
df = supported[pcall["variantcaller"]]["fn"](*args)
with file_transaction(data, out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
pcall["priority"] = out_file
return data | [
"def",
"_cnv_prioritize",
"(",
"data",
")",
":",
"supported",
"=",
"{",
"\"cnvkit\"",
":",
"{",
"\"inputs\"",
":",
"[",
"\"call_file\"",
",",
"\"segmetrics\"",
"]",
",",
"\"fn\"",
":",
"_cnvkit_prioritize",
"}",
"}",
"pcall",
"=",
"None",
"priority_files",
"... | Perform confidence interval based prioritization for CNVs. | [
"Perform",
"confidence",
"interval",
"based",
"prioritization",
"for",
"CNVs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/prioritize.py#L192-L217 | train | 218,520 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | get_default_jvm_opts | def get_default_jvm_opts(tmp_dir=None, parallel_gc=False):
"""Retrieve default JVM tuning options
Avoids issues with multiple spun up Java processes running into out of memory errors.
Parallel GC can use a lot of cores on big machines and primarily helps reduce task latency
and responsiveness which are not needed for batch jobs.
https://github.com/bcbio/bcbio-nextgen/issues/532#issuecomment-50989027
https://wiki.csiro.au/pages/viewpage.action?pageId=545034311
http://stackoverflow.com/questions/9738911/javas-serial-garbage-collector-performing-far-better-than-other-garbage-collect
However, serial GC causes issues with Spark local runs so we use parallel for those cases:
https://github.com/broadinstitute/gatk/issues/3605#issuecomment-332370070
"""
opts = ["-XX:+UseSerialGC"] if not parallel_gc else []
if tmp_dir:
opts.append("-Djava.io.tmpdir=%s" % tmp_dir)
return opts | python | def get_default_jvm_opts(tmp_dir=None, parallel_gc=False):
"""Retrieve default JVM tuning options
Avoids issues with multiple spun up Java processes running into out of memory errors.
Parallel GC can use a lot of cores on big machines and primarily helps reduce task latency
and responsiveness which are not needed for batch jobs.
https://github.com/bcbio/bcbio-nextgen/issues/532#issuecomment-50989027
https://wiki.csiro.au/pages/viewpage.action?pageId=545034311
http://stackoverflow.com/questions/9738911/javas-serial-garbage-collector-performing-far-better-than-other-garbage-collect
However, serial GC causes issues with Spark local runs so we use parallel for those cases:
https://github.com/broadinstitute/gatk/issues/3605#issuecomment-332370070
"""
opts = ["-XX:+UseSerialGC"] if not parallel_gc else []
if tmp_dir:
opts.append("-Djava.io.tmpdir=%s" % tmp_dir)
return opts | [
"def",
"get_default_jvm_opts",
"(",
"tmp_dir",
"=",
"None",
",",
"parallel_gc",
"=",
"False",
")",
":",
"opts",
"=",
"[",
"\"-XX:+UseSerialGC\"",
"]",
"if",
"not",
"parallel_gc",
"else",
"[",
"]",
"if",
"tmp_dir",
":",
"opts",
".",
"append",
"(",
"\"-Djava... | Retrieve default JVM tuning options
Avoids issues with multiple spun up Java processes running into out of memory errors.
Parallel GC can use a lot of cores on big machines and primarily helps reduce task latency
and responsiveness which are not needed for batch jobs.
https://github.com/bcbio/bcbio-nextgen/issues/532#issuecomment-50989027
https://wiki.csiro.au/pages/viewpage.action?pageId=545034311
http://stackoverflow.com/questions/9738911/javas-serial-garbage-collector-performing-far-better-than-other-garbage-collect
However, serial GC causes issues with Spark local runs so we use parallel for those cases:
https://github.com/broadinstitute/gatk/issues/3605#issuecomment-332370070 | [
"Retrieve",
"default",
"JVM",
"tuning",
"options"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L23-L38 | train | 218,521 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | _get_gatk_opts | def _get_gatk_opts(config, names, tmp_dir=None, memscale=None, include_gatk=True, parallel_gc=False):
"""Retrieve GATK memory specifications, moving down a list of potential specifications.
"""
if include_gatk and "gatk4" in dd.get_tools_off({"config": config}):
opts = ["-U", "LENIENT_VCF_PROCESSING", "--read_filter",
"BadCigar", "--read_filter", "NotPrimaryAlignment"]
else:
opts = []
jvm_opts = ["-Xms750m", "-Xmx2g"]
for n in names:
resources = config_utils.get_resources(n, config)
if resources and resources.get("jvm_opts"):
jvm_opts = resources.get("jvm_opts")
break
if memscale:
jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": memscale}})
jvm_opts += get_default_jvm_opts(tmp_dir, parallel_gc=parallel_gc)
return jvm_opts + opts | python | def _get_gatk_opts(config, names, tmp_dir=None, memscale=None, include_gatk=True, parallel_gc=False):
"""Retrieve GATK memory specifications, moving down a list of potential specifications.
"""
if include_gatk and "gatk4" in dd.get_tools_off({"config": config}):
opts = ["-U", "LENIENT_VCF_PROCESSING", "--read_filter",
"BadCigar", "--read_filter", "NotPrimaryAlignment"]
else:
opts = []
jvm_opts = ["-Xms750m", "-Xmx2g"]
for n in names:
resources = config_utils.get_resources(n, config)
if resources and resources.get("jvm_opts"):
jvm_opts = resources.get("jvm_opts")
break
if memscale:
jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": memscale}})
jvm_opts += get_default_jvm_opts(tmp_dir, parallel_gc=parallel_gc)
return jvm_opts + opts | [
"def",
"_get_gatk_opts",
"(",
"config",
",",
"names",
",",
"tmp_dir",
"=",
"None",
",",
"memscale",
"=",
"None",
",",
"include_gatk",
"=",
"True",
",",
"parallel_gc",
"=",
"False",
")",
":",
"if",
"include_gatk",
"and",
"\"gatk4\"",
"in",
"dd",
".",
"get... | Retrieve GATK memory specifications, moving down a list of potential specifications. | [
"Retrieve",
"GATK",
"memory",
"specifications",
"moving",
"down",
"a",
"list",
"of",
"potential",
"specifications",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L40-L57 | train | 218,522 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | _clean_java_out | def _clean_java_out(version_str):
"""Remove extra environmental information reported in java when querying for versions.
Java will report information like _JAVA_OPTIONS environmental variables in the output.
"""
out = []
for line in version_str.decode().split("\n"):
if line.startswith("Picked up"):
pass
if line.find("setlocale") > 0:
pass
else:
out.append(line)
return "\n".join(out) | python | def _clean_java_out(version_str):
"""Remove extra environmental information reported in java when querying for versions.
Java will report information like _JAVA_OPTIONS environmental variables in the output.
"""
out = []
for line in version_str.decode().split("\n"):
if line.startswith("Picked up"):
pass
if line.find("setlocale") > 0:
pass
else:
out.append(line)
return "\n".join(out) | [
"def",
"_clean_java_out",
"(",
"version_str",
")",
":",
"out",
"=",
"[",
"]",
"for",
"line",
"in",
"version_str",
".",
"decode",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"\"Picked up\"",
")",
":",
"pass",
... | Remove extra environmental information reported in java when querying for versions.
Java will report information like _JAVA_OPTIONS environmental variables in the output. | [
"Remove",
"extra",
"environmental",
"information",
"reported",
"in",
"java",
"when",
"querying",
"for",
"versions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L69-L82 | train | 218,523 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | get_mutect_version | def get_mutect_version(mutect_jar):
"""Retrieves version from input jar name since there is not an easy way to get MuTect version.
Check mutect jar for SomaticIndelDetector, which is an Appistry feature
"""
cl = ["java", "-Xms128m", "-Xmx256m"] + get_default_jvm_opts() + ["-jar", mutect_jar, "-h"]
with closing(subprocess.Popen(cl, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout) as stdout:
if "SomaticIndelDetector" in stdout.read().strip():
mutect_type = "-appistry"
else:
mutect_type = ""
version = os.path.basename(mutect_jar).lower()
for to_remove in [".jar", "-standalone", "mutect"]:
version = version.replace(to_remove, "")
if version.startswith(("-", ".")):
version = version[1:]
if not version:
raise ValueError("Unable to determine MuTect version from jar file. "
"Need to have version contained in jar (ie. muTect-1.1.5.jar): %s" % mutect_jar)
_check_for_bad_version(version, "MuTect")
return version + mutect_type | python | def get_mutect_version(mutect_jar):
"""Retrieves version from input jar name since there is not an easy way to get MuTect version.
Check mutect jar for SomaticIndelDetector, which is an Appistry feature
"""
cl = ["java", "-Xms128m", "-Xmx256m"] + get_default_jvm_opts() + ["-jar", mutect_jar, "-h"]
with closing(subprocess.Popen(cl, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout) as stdout:
if "SomaticIndelDetector" in stdout.read().strip():
mutect_type = "-appistry"
else:
mutect_type = ""
version = os.path.basename(mutect_jar).lower()
for to_remove in [".jar", "-standalone", "mutect"]:
version = version.replace(to_remove, "")
if version.startswith(("-", ".")):
version = version[1:]
if not version:
raise ValueError("Unable to determine MuTect version from jar file. "
"Need to have version contained in jar (ie. muTect-1.1.5.jar): %s" % mutect_jar)
_check_for_bad_version(version, "MuTect")
return version + mutect_type | [
"def",
"get_mutect_version",
"(",
"mutect_jar",
")",
":",
"cl",
"=",
"[",
"\"java\"",
",",
"\"-Xms128m\"",
",",
"\"-Xmx256m\"",
"]",
"+",
"get_default_jvm_opts",
"(",
")",
"+",
"[",
"\"-jar\"",
",",
"mutect_jar",
",",
"\"-h\"",
"]",
"with",
"closing",
"(",
... | Retrieves version from input jar name since there is not an easy way to get MuTect version.
Check mutect jar for SomaticIndelDetector, which is an Appistry feature | [
"Retrieves",
"version",
"from",
"input",
"jar",
"name",
"since",
"there",
"is",
"not",
"an",
"easy",
"way",
"to",
"get",
"MuTect",
"version",
".",
"Check",
"mutect",
"jar",
"for",
"SomaticIndelDetector",
"which",
"is",
"an",
"Appistry",
"feature"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L110-L129 | train | 218,524 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | gatk_cmd | def gatk_cmd(name, jvm_opts, params, config=None):
"""Retrieve PATH to gatk using locally installed java.
"""
if name == "gatk":
if isinstance(config, dict) and "config" not in config:
data = {"config": config}
else:
data = config
if not data or "gatk4" not in dd.get_tools_off(data):
return _gatk4_cmd(jvm_opts, params, data)
else:
name = "gatk3"
gatk_cmd = utils.which(os.path.join(os.path.dirname(os.path.realpath(sys.executable)), name))
# if we can't find via the local executable, fallback to being in the path
if not gatk_cmd:
gatk_cmd = utils.which(name)
if gatk_cmd:
return "%s && export PATH=%s:\"$PATH\" && %s %s %s" % \
(utils.clear_java_home(), utils.get_java_binpath(gatk_cmd), gatk_cmd,
" ".join(jvm_opts), " ".join([str(x) for x in params])) | python | def gatk_cmd(name, jvm_opts, params, config=None):
"""Retrieve PATH to gatk using locally installed java.
"""
if name == "gatk":
if isinstance(config, dict) and "config" not in config:
data = {"config": config}
else:
data = config
if not data or "gatk4" not in dd.get_tools_off(data):
return _gatk4_cmd(jvm_opts, params, data)
else:
name = "gatk3"
gatk_cmd = utils.which(os.path.join(os.path.dirname(os.path.realpath(sys.executable)), name))
# if we can't find via the local executable, fallback to being in the path
if not gatk_cmd:
gatk_cmd = utils.which(name)
if gatk_cmd:
return "%s && export PATH=%s:\"$PATH\" && %s %s %s" % \
(utils.clear_java_home(), utils.get_java_binpath(gatk_cmd), gatk_cmd,
" ".join(jvm_opts), " ".join([str(x) for x in params])) | [
"def",
"gatk_cmd",
"(",
"name",
",",
"jvm_opts",
",",
"params",
",",
"config",
"=",
"None",
")",
":",
"if",
"name",
"==",
"\"gatk\"",
":",
"if",
"isinstance",
"(",
"config",
",",
"dict",
")",
"and",
"\"config\"",
"not",
"in",
"config",
":",
"data",
"... | Retrieve PATH to gatk using locally installed java. | [
"Retrieve",
"PATH",
"to",
"gatk",
"using",
"locally",
"installed",
"java",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L530-L549 | train | 218,525 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | _gatk4_cmd | def _gatk4_cmd(jvm_opts, params, data):
"""Retrieve unified command for GATK4, using 'gatk'. GATK3 is 'gatk3'.
"""
gatk_cmd = utils.which(os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "gatk"))
return "%s && export PATH=%s:\"$PATH\" && gatk --java-options '%s' %s" % \
(utils.clear_java_home(), utils.get_java_binpath(gatk_cmd),
" ".join(jvm_opts), " ".join([str(x) for x in params])) | python | def _gatk4_cmd(jvm_opts, params, data):
"""Retrieve unified command for GATK4, using 'gatk'. GATK3 is 'gatk3'.
"""
gatk_cmd = utils.which(os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "gatk"))
return "%s && export PATH=%s:\"$PATH\" && gatk --java-options '%s' %s" % \
(utils.clear_java_home(), utils.get_java_binpath(gatk_cmd),
" ".join(jvm_opts), " ".join([str(x) for x in params])) | [
"def",
"_gatk4_cmd",
"(",
"jvm_opts",
",",
"params",
",",
"data",
")",
":",
"gatk_cmd",
"=",
"utils",
".",
"which",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"sys",
... | Retrieve unified command for GATK4, using 'gatk'. GATK3 is 'gatk3'. | [
"Retrieve",
"unified",
"command",
"for",
"GATK4",
"using",
"gatk",
".",
"GATK3",
"is",
"gatk3",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L551-L557 | train | 218,526 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | runner_from_path | def runner_from_path(cmd, config):
"""Simple command line runner that expects a bash cmd in the PATH.
This makes Picard tools back compatible with new approach of a single
jar + bash script.
"""
if cmd.endswith("picard"):
return PicardCmdRunner(cmd, config)
else:
raise ValueError("Do not support PATH running for %s" % cmd) | python | def runner_from_path(cmd, config):
"""Simple command line runner that expects a bash cmd in the PATH.
This makes Picard tools back compatible with new approach of a single
jar + bash script.
"""
if cmd.endswith("picard"):
return PicardCmdRunner(cmd, config)
else:
raise ValueError("Do not support PATH running for %s" % cmd) | [
"def",
"runner_from_path",
"(",
"cmd",
",",
"config",
")",
":",
"if",
"cmd",
".",
"endswith",
"(",
"\"picard\"",
")",
":",
"return",
"PicardCmdRunner",
"(",
"cmd",
",",
"config",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Do not support PATH running for ... | Simple command line runner that expects a bash cmd in the PATH.
This makes Picard tools back compatible with new approach of a single
jar + bash script. | [
"Simple",
"command",
"line",
"runner",
"that",
"expects",
"a",
"bash",
"cmd",
"in",
"the",
"PATH",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L587-L596 | train | 218,527 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner._set_default_versions | def _set_default_versions(self, config):
"""Retrieve pre-computed version information for expensive to retrieve versions.
Starting up GATK takes a lot of resources so we do it once at start of analysis.
"""
out = []
for name in ["gatk", "gatk4", "picard", "mutect"]:
v = tz.get_in(["resources", name, "version"], config)
if not v:
try:
v = programs.get_version(name, config=config)
except KeyError:
v = None
out.append(v)
self._gatk_version, self._gatk4_version, self._picard_version, self._mutect_version = out | python | def _set_default_versions(self, config):
"""Retrieve pre-computed version information for expensive to retrieve versions.
Starting up GATK takes a lot of resources so we do it once at start of analysis.
"""
out = []
for name in ["gatk", "gatk4", "picard", "mutect"]:
v = tz.get_in(["resources", name, "version"], config)
if not v:
try:
v = programs.get_version(name, config=config)
except KeyError:
v = None
out.append(v)
self._gatk_version, self._gatk4_version, self._picard_version, self._mutect_version = out | [
"def",
"_set_default_versions",
"(",
"self",
",",
"config",
")",
":",
"out",
"=",
"[",
"]",
"for",
"name",
"in",
"[",
"\"gatk\"",
",",
"\"gatk4\"",
",",
"\"picard\"",
",",
"\"mutect\"",
"]",
":",
"v",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"resources\""... | Retrieve pre-computed version information for expensive to retrieve versions.
Starting up GATK takes a lot of resources so we do it once at start of analysis. | [
"Retrieve",
"pre",
"-",
"computed",
"version",
"information",
"for",
"expensive",
"to",
"retrieve",
"versions",
".",
"Starting",
"up",
"GATK",
"takes",
"a",
"lot",
"of",
"resources",
"so",
"we",
"do",
"it",
"once",
"at",
"start",
"of",
"analysis",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L171-L184 | train | 218,528 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.new_resources | def new_resources(self, program):
"""Set new resource usage for the given program.
This allows customization of memory usage for particular sub-programs
of GATK like HaplotypeCaller.
"""
resources = config_utils.get_resources(program, self._config)
if resources.get("jvm_opts"):
self._jvm_opts = resources.get("jvm_opts") | python | def new_resources(self, program):
"""Set new resource usage for the given program.
This allows customization of memory usage for particular sub-programs
of GATK like HaplotypeCaller.
"""
resources = config_utils.get_resources(program, self._config)
if resources.get("jvm_opts"):
self._jvm_opts = resources.get("jvm_opts") | [
"def",
"new_resources",
"(",
"self",
",",
"program",
")",
":",
"resources",
"=",
"config_utils",
".",
"get_resources",
"(",
"program",
",",
"self",
".",
"_config",
")",
"if",
"resources",
".",
"get",
"(",
"\"jvm_opts\"",
")",
":",
"self",
".",
"_jvm_opts",... | Set new resource usage for the given program.
This allows customization of memory usage for particular sub-programs
of GATK like HaplotypeCaller. | [
"Set",
"new",
"resource",
"usage",
"for",
"the",
"given",
"program",
".",
"This",
"allows",
"customization",
"of",
"memory",
"usage",
"for",
"particular",
"sub",
"-",
"programs",
"of",
"GATK",
"like",
"HaplotypeCaller",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L186-L193 | train | 218,529 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.run_fn | def run_fn(self, name, *args, **kwds):
"""Run pre-built functionality that used Broad tools by name.
See the gatkrun, picardrun module for available functions.
"""
fn = None
to_check = [picardrun]
for ns in to_check:
try:
fn = getattr(ns, name)
break
except AttributeError:
pass
assert fn is not None, "Could not find function %s in %s" % (name, to_check)
return fn(self, *args, **kwds) | python | def run_fn(self, name, *args, **kwds):
"""Run pre-built functionality that used Broad tools by name.
See the gatkrun, picardrun module for available functions.
"""
fn = None
to_check = [picardrun]
for ns in to_check:
try:
fn = getattr(ns, name)
break
except AttributeError:
pass
assert fn is not None, "Could not find function %s in %s" % (name, to_check)
return fn(self, *args, **kwds) | [
"def",
"run_fn",
"(",
"self",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"fn",
"=",
"None",
"to_check",
"=",
"[",
"picardrun",
"]",
"for",
"ns",
"in",
"to_check",
":",
"try",
":",
"fn",
"=",
"getattr",
"(",
"ns",
",",
"name... | Run pre-built functionality that used Broad tools by name.
See the gatkrun, picardrun module for available functions. | [
"Run",
"pre",
"-",
"built",
"functionality",
"that",
"used",
"Broad",
"tools",
"by",
"name",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L195-L209 | train | 218,530 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.cl_picard | def cl_picard(self, command, options, memscale=None):
"""Prepare a Picard commandline.
"""
options = ["%s=%s" % (x, y) for x, y in options]
options.append("VALIDATION_STRINGENCY=SILENT")
return self._get_picard_cmd(command, memscale=memscale) + options | python | def cl_picard(self, command, options, memscale=None):
"""Prepare a Picard commandline.
"""
options = ["%s=%s" % (x, y) for x, y in options]
options.append("VALIDATION_STRINGENCY=SILENT")
return self._get_picard_cmd(command, memscale=memscale) + options | [
"def",
"cl_picard",
"(",
"self",
",",
"command",
",",
"options",
",",
"memscale",
"=",
"None",
")",
":",
"options",
"=",
"[",
"\"%s=%s\"",
"%",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"options",
"]",
"options",
".",
"append",
"(",
"\... | Prepare a Picard commandline. | [
"Prepare",
"a",
"Picard",
"commandline",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L211-L216 | train | 218,531 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.run | def run(self, command, options, pipe=False, get_stdout=False, memscale=None):
"""Run a Picard command with the provided option pairs.
"""
cl = self.cl_picard(command, options, memscale=memscale)
if pipe:
subprocess.Popen(cl)
elif get_stdout:
p = subprocess.Popen(cl, stdout=subprocess.PIPE)
stdout = p.stdout.read()
p.wait()
p.stdout.close()
return stdout
else:
do.run(cl, "Picard {0}".format(command), None) | python | def run(self, command, options, pipe=False, get_stdout=False, memscale=None):
"""Run a Picard command with the provided option pairs.
"""
cl = self.cl_picard(command, options, memscale=memscale)
if pipe:
subprocess.Popen(cl)
elif get_stdout:
p = subprocess.Popen(cl, stdout=subprocess.PIPE)
stdout = p.stdout.read()
p.wait()
p.stdout.close()
return stdout
else:
do.run(cl, "Picard {0}".format(command), None) | [
"def",
"run",
"(",
"self",
",",
"command",
",",
"options",
",",
"pipe",
"=",
"False",
",",
"get_stdout",
"=",
"False",
",",
"memscale",
"=",
"None",
")",
":",
"cl",
"=",
"self",
".",
"cl_picard",
"(",
"command",
",",
"options",
",",
"memscale",
"=",
... | Run a Picard command with the provided option pairs. | [
"Run",
"a",
"Picard",
"command",
"with",
"the",
"provided",
"option",
"pairs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L218-L231 | train | 218,532 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.cl_mutect | def cl_mutect(self, params, tmp_dir):
"""Define parameters to run the mutect paired algorithm.
"""
gatk_jar = self._get_jar("muTect", ["mutect"])
# Decrease memory slightly from configuration to avoid memory allocation errors
jvm_opts = config_utils.adjust_opts(self._jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
return ["java"] + jvm_opts + get_default_jvm_opts(tmp_dir) + \
["-jar", gatk_jar] + [str(x) for x in params] | python | def cl_mutect(self, params, tmp_dir):
"""Define parameters to run the mutect paired algorithm.
"""
gatk_jar = self._get_jar("muTect", ["mutect"])
# Decrease memory slightly from configuration to avoid memory allocation errors
jvm_opts = config_utils.adjust_opts(self._jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
return ["java"] + jvm_opts + get_default_jvm_opts(tmp_dir) + \
["-jar", gatk_jar] + [str(x) for x in params] | [
"def",
"cl_mutect",
"(",
"self",
",",
"params",
",",
"tmp_dir",
")",
":",
"gatk_jar",
"=",
"self",
".",
"_get_jar",
"(",
"\"muTect\"",
",",
"[",
"\"mutect\"",
"]",
")",
"# Decrease memory slightly from configuration to avoid memory allocation errors",
"jvm_opts",
"=",... | Define parameters to run the mutect paired algorithm. | [
"Define",
"parameters",
"to",
"run",
"the",
"mutect",
"paired",
"algorithm",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L330-L339 | train | 218,533 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.run_gatk | def run_gatk(self, params, tmp_dir=None, log_error=True,
data=None, region=None, memscale=None, parallel_gc=False, ld_preload=False):
"""Top level interface to running a GATK command.
ld_preload injects required libraries for Java JNI calls:
https://gatkforums.broadinstitute.org/gatk/discussion/8810/something-about-create-pon-workflow
"""
needs_java7 = LooseVersion(self.get_gatk_version()) < LooseVersion("3.6")
# For old Java requirements use global java 7
if needs_java7:
setpath.remove_bcbiopath()
with tx_tmpdir(self._config) as local_tmp_dir:
if tmp_dir is None:
tmp_dir = local_tmp_dir
cl = self.cl_gatk(params, tmp_dir, memscale=memscale, parallel_gc=parallel_gc)
atype_index = params.index("-T") if params.count("-T") > 0 \
else params.index("--analysis_type")
prog = params[atype_index + 1]
cl = fix_missing_spark_user(cl, prog, params)
if ld_preload:
cl = "export LD_PRELOAD=%s/lib/libopenblas.so && %s" % (os.path.dirname(utils.get_bcbio_bin()), cl)
do.run(cl, "GATK: {0}".format(prog), data, region=region,
log_error=log_error)
if needs_java7:
setpath.prepend_bcbiopath() | python | def run_gatk(self, params, tmp_dir=None, log_error=True,
data=None, region=None, memscale=None, parallel_gc=False, ld_preload=False):
"""Top level interface to running a GATK command.
ld_preload injects required libraries for Java JNI calls:
https://gatkforums.broadinstitute.org/gatk/discussion/8810/something-about-create-pon-workflow
"""
needs_java7 = LooseVersion(self.get_gatk_version()) < LooseVersion("3.6")
# For old Java requirements use global java 7
if needs_java7:
setpath.remove_bcbiopath()
with tx_tmpdir(self._config) as local_tmp_dir:
if tmp_dir is None:
tmp_dir = local_tmp_dir
cl = self.cl_gatk(params, tmp_dir, memscale=memscale, parallel_gc=parallel_gc)
atype_index = params.index("-T") if params.count("-T") > 0 \
else params.index("--analysis_type")
prog = params[atype_index + 1]
cl = fix_missing_spark_user(cl, prog, params)
if ld_preload:
cl = "export LD_PRELOAD=%s/lib/libopenblas.so && %s" % (os.path.dirname(utils.get_bcbio_bin()), cl)
do.run(cl, "GATK: {0}".format(prog), data, region=region,
log_error=log_error)
if needs_java7:
setpath.prepend_bcbiopath() | [
"def",
"run_gatk",
"(",
"self",
",",
"params",
",",
"tmp_dir",
"=",
"None",
",",
"log_error",
"=",
"True",
",",
"data",
"=",
"None",
",",
"region",
"=",
"None",
",",
"memscale",
"=",
"None",
",",
"parallel_gc",
"=",
"False",
",",
"ld_preload",
"=",
"... | Top level interface to running a GATK command.
ld_preload injects required libraries for Java JNI calls:
https://gatkforums.broadinstitute.org/gatk/discussion/8810/something-about-create-pon-workflow | [
"Top",
"level",
"interface",
"to",
"running",
"a",
"GATK",
"command",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L341-L365 | train | 218,534 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.get_gatk_version | def get_gatk_version(self):
"""Retrieve GATK version, handling locally and config cached versions.
Calling version can be expensive due to all the startup and shutdown
of JVMs, so we prefer cached version information.
"""
if self._gatk_version is None:
self._set_default_versions(self._config)
if "gatk4" not in dd.get_tools_off({"config": self._config}):
# In cases whwere we don't have manifest versions. Not possible to get
# version from commandline with GATK4 alpha version
if self._gatk4_version is None:
self._gatk4_version = "4.0"
return self._gatk4_version
elif self._gatk_version is not None:
return self._gatk_version
else:
if self._has_gatk_conda_wrapper():
gatk_jar = None
else:
gatk_jar = self._get_jar("GenomeAnalysisTK", ["GenomeAnalysisTKLite"], allow_missing=True)
self._gatk_version = get_gatk_version(gatk_jar, config=self._config)
return self._gatk_version | python | def get_gatk_version(self):
"""Retrieve GATK version, handling locally and config cached versions.
Calling version can be expensive due to all the startup and shutdown
of JVMs, so we prefer cached version information.
"""
if self._gatk_version is None:
self._set_default_versions(self._config)
if "gatk4" not in dd.get_tools_off({"config": self._config}):
# In cases whwere we don't have manifest versions. Not possible to get
# version from commandline with GATK4 alpha version
if self._gatk4_version is None:
self._gatk4_version = "4.0"
return self._gatk4_version
elif self._gatk_version is not None:
return self._gatk_version
else:
if self._has_gatk_conda_wrapper():
gatk_jar = None
else:
gatk_jar = self._get_jar("GenomeAnalysisTK", ["GenomeAnalysisTKLite"], allow_missing=True)
self._gatk_version = get_gatk_version(gatk_jar, config=self._config)
return self._gatk_version | [
"def",
"get_gatk_version",
"(",
"self",
")",
":",
"if",
"self",
".",
"_gatk_version",
"is",
"None",
":",
"self",
".",
"_set_default_versions",
"(",
"self",
".",
"_config",
")",
"if",
"\"gatk4\"",
"not",
"in",
"dd",
".",
"get_tools_off",
"(",
"{",
"\"config... | Retrieve GATK version, handling locally and config cached versions.
Calling version can be expensive due to all the startup and shutdown
of JVMs, so we prefer cached version information. | [
"Retrieve",
"GATK",
"version",
"handling",
"locally",
"and",
"config",
"cached",
"versions",
".",
"Calling",
"version",
"can",
"be",
"expensive",
"due",
"to",
"all",
"the",
"startup",
"and",
"shutdown",
"of",
"JVMs",
"so",
"we",
"prefer",
"cached",
"version",
... | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L376-L398 | train | 218,535 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.get_mutect_version | def get_mutect_version(self):
"""Retrieve the Mutect version.
"""
if self._mutect_version is None:
mutect_jar = self._get_jar("muTect", ["mutect"])
self._mutect_version = get_mutect_version(mutect_jar)
return self._mutect_version | python | def get_mutect_version(self):
"""Retrieve the Mutect version.
"""
if self._mutect_version is None:
mutect_jar = self._get_jar("muTect", ["mutect"])
self._mutect_version = get_mutect_version(mutect_jar)
return self._mutect_version | [
"def",
"get_mutect_version",
"(",
"self",
")",
":",
"if",
"self",
".",
"_mutect_version",
"is",
"None",
":",
"mutect_jar",
"=",
"self",
".",
"_get_jar",
"(",
"\"muTect\"",
",",
"[",
"\"mutect\"",
"]",
")",
"self",
".",
"_mutect_version",
"=",
"get_mutect_ver... | Retrieve the Mutect version. | [
"Retrieve",
"the",
"Mutect",
"version",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L400-L406 | train | 218,536 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner.gatk_major_version | def gatk_major_version(self):
"""Retrieve the GATK major version, handling multiple GATK distributions.
Has special cases for GATK nightly builds, Appistry releases and
GATK prior to 2.3.
"""
full_version = self.get_gatk_version()
# Working with a recent version if using nightlies
if full_version.startswith("nightly-"):
return "3.6"
parts = full_version.split("-")
if len(parts) == 4:
appistry_release, version, subversion, githash = parts
elif len(parts) == 3:
version, subversion, githash = parts
elif len(parts) == 2:
version, subversion = parts
elif len(parts) == 1:
version = parts[0]
# version was not properly implemented in earlier GATKs
else:
version = "2.3"
if version.startswith("v"):
version = version[1:]
return version | python | def gatk_major_version(self):
"""Retrieve the GATK major version, handling multiple GATK distributions.
Has special cases for GATK nightly builds, Appistry releases and
GATK prior to 2.3.
"""
full_version = self.get_gatk_version()
# Working with a recent version if using nightlies
if full_version.startswith("nightly-"):
return "3.6"
parts = full_version.split("-")
if len(parts) == 4:
appistry_release, version, subversion, githash = parts
elif len(parts) == 3:
version, subversion, githash = parts
elif len(parts) == 2:
version, subversion = parts
elif len(parts) == 1:
version = parts[0]
# version was not properly implemented in earlier GATKs
else:
version = "2.3"
if version.startswith("v"):
version = version[1:]
return version | [
"def",
"gatk_major_version",
"(",
"self",
")",
":",
"full_version",
"=",
"self",
".",
"get_gatk_version",
"(",
")",
"# Working with a recent version if using nightlies",
"if",
"full_version",
".",
"startswith",
"(",
"\"nightly-\"",
")",
":",
"return",
"\"3.6\"",
"part... | Retrieve the GATK major version, handling multiple GATK distributions.
Has special cases for GATK nightly builds, Appistry releases and
GATK prior to 2.3. | [
"Retrieve",
"the",
"GATK",
"major",
"version",
"handling",
"multiple",
"GATK",
"distributions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L420-L444 | train | 218,537 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner._get_picard_cmd | def _get_picard_cmd(self, command, memscale=None):
"""Retrieve the base Picard command, handling both shell scripts and directory of jars.
"""
resources = config_utils.get_resources("picard", self._config)
if memscale:
jvm_opts = get_picard_opts(self._config, memscale=memscale)
elif resources.get("jvm_opts"):
jvm_opts = resources.get("jvm_opts")
else:
jvm_opts = self._jvm_opts
if os.path.isdir(self._picard_ref):
dist_file = self._get_jar(command)
return ["java"] + jvm_opts + get_default_jvm_opts() + ["-jar", dist_file]
else:
# XXX Cannot currently set JVM opts with picard-tools script
return [self._picard_ref, command] | python | def _get_picard_cmd(self, command, memscale=None):
"""Retrieve the base Picard command, handling both shell scripts and directory of jars.
"""
resources = config_utils.get_resources("picard", self._config)
if memscale:
jvm_opts = get_picard_opts(self._config, memscale=memscale)
elif resources.get("jvm_opts"):
jvm_opts = resources.get("jvm_opts")
else:
jvm_opts = self._jvm_opts
if os.path.isdir(self._picard_ref):
dist_file = self._get_jar(command)
return ["java"] + jvm_opts + get_default_jvm_opts() + ["-jar", dist_file]
else:
# XXX Cannot currently set JVM opts with picard-tools script
return [self._picard_ref, command] | [
"def",
"_get_picard_cmd",
"(",
"self",
",",
"command",
",",
"memscale",
"=",
"None",
")",
":",
"resources",
"=",
"config_utils",
".",
"get_resources",
"(",
"\"picard\"",
",",
"self",
".",
"_config",
")",
"if",
"memscale",
":",
"jvm_opts",
"=",
"get_picard_op... | Retrieve the base Picard command, handling both shell scripts and directory of jars. | [
"Retrieve",
"the",
"base",
"Picard",
"command",
"handling",
"both",
"shell",
"scripts",
"and",
"directory",
"of",
"jars",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L446-L461 | train | 218,538 |
bcbio/bcbio-nextgen | bcbio/broad/__init__.py | BroadRunner._get_jar | def _get_jar(self, command, alts=None, allow_missing=False):
"""Retrieve the jar for running the specified command.
"""
dirs = []
for bdir in [self._gatk_dir, self._picard_ref]:
dirs.extend([bdir,
os.path.join(bdir, os.pardir, "gatk")])
if alts is None: alts = []
for check_cmd in [command] + alts:
for dir_check in dirs:
try:
check_file = config_utils.get_jar(check_cmd, dir_check)
return check_file
except ValueError as msg:
if str(msg).find("multiple") > 0:
raise
else:
pass
if allow_missing:
return None
else:
raise ValueError("Could not find jar %s in %s:%s" % (command, self._picard_ref, self._gatk_dir)) | python | def _get_jar(self, command, alts=None, allow_missing=False):
"""Retrieve the jar for running the specified command.
"""
dirs = []
for bdir in [self._gatk_dir, self._picard_ref]:
dirs.extend([bdir,
os.path.join(bdir, os.pardir, "gatk")])
if alts is None: alts = []
for check_cmd in [command] + alts:
for dir_check in dirs:
try:
check_file = config_utils.get_jar(check_cmd, dir_check)
return check_file
except ValueError as msg:
if str(msg).find("multiple") > 0:
raise
else:
pass
if allow_missing:
return None
else:
raise ValueError("Could not find jar %s in %s:%s" % (command, self._picard_ref, self._gatk_dir)) | [
"def",
"_get_jar",
"(",
"self",
",",
"command",
",",
"alts",
"=",
"None",
",",
"allow_missing",
"=",
"False",
")",
":",
"dirs",
"=",
"[",
"]",
"for",
"bdir",
"in",
"[",
"self",
".",
"_gatk_dir",
",",
"self",
".",
"_picard_ref",
"]",
":",
"dirs",
".... | Retrieve the jar for running the specified command. | [
"Retrieve",
"the",
"jar",
"for",
"running",
"the",
"specified",
"command",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L463-L484 | train | 218,539 |
bcbio/bcbio-nextgen | bcbio/utils.py | cpmap | def cpmap(cores=1):
"""Configurable parallel map context manager.
Returns appropriate map compatible function based on configuration:
- Local single core (the default)
- Multiple local cores
"""
if int(cores) == 1:
yield itertools.imap
else:
if futures is None:
raise ImportError("concurrent.futures not available")
pool = futures.ProcessPoolExecutor(cores)
yield pool.map
pool.shutdown() | python | def cpmap(cores=1):
"""Configurable parallel map context manager.
Returns appropriate map compatible function based on configuration:
- Local single core (the default)
- Multiple local cores
"""
if int(cores) == 1:
yield itertools.imap
else:
if futures is None:
raise ImportError("concurrent.futures not available")
pool = futures.ProcessPoolExecutor(cores)
yield pool.map
pool.shutdown() | [
"def",
"cpmap",
"(",
"cores",
"=",
"1",
")",
":",
"if",
"int",
"(",
"cores",
")",
"==",
"1",
":",
"yield",
"itertools",
".",
"imap",
"else",
":",
"if",
"futures",
"is",
"None",
":",
"raise",
"ImportError",
"(",
"\"concurrent.futures not available\"",
")"... | Configurable parallel map context manager.
Returns appropriate map compatible function based on configuration:
- Local single core (the default)
- Multiple local cores | [
"Configurable",
"parallel",
"map",
"context",
"manager",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L34-L48 | train | 218,540 |
bcbio/bcbio-nextgen | bcbio/utils.py | map_wrap | def map_wrap(f):
"""Wrap standard function to easily pass into 'map' processing.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper | python | def map_wrap(f):
"""Wrap standard function to easily pass into 'map' processing.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper | [
"def",
"map_wrap",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | Wrap standard function to easily pass into 'map' processing. | [
"Wrap",
"standard",
"function",
"to",
"easily",
"pass",
"into",
"map",
"processing",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L50-L56 | train | 218,541 |
bcbio/bcbio-nextgen | bcbio/utils.py | unpack_worlds | def unpack_worlds(items):
"""Handle all the ways we can pass multiple samples for back-compatibility.
"""
# Unpack nested lists of samples grouped together (old IPython style)
if isinstance(items[0], (list, tuple)) and len(items[0]) == 1:
out = []
for d in items:
assert len(d) == 1 and isinstance(d[0], dict), len(d)
out.append(d[0])
# Unpack a single argument with multiple samples (CWL style)
elif isinstance(items, (list, tuple)) and len(items) == 1 and isinstance(items[0], (list, tuple)):
out = items[0]
else:
out = items
return out | python | def unpack_worlds(items):
"""Handle all the ways we can pass multiple samples for back-compatibility.
"""
# Unpack nested lists of samples grouped together (old IPython style)
if isinstance(items[0], (list, tuple)) and len(items[0]) == 1:
out = []
for d in items:
assert len(d) == 1 and isinstance(d[0], dict), len(d)
out.append(d[0])
# Unpack a single argument with multiple samples (CWL style)
elif isinstance(items, (list, tuple)) and len(items) == 1 and isinstance(items[0], (list, tuple)):
out = items[0]
else:
out = items
return out | [
"def",
"unpack_worlds",
"(",
"items",
")",
":",
"# Unpack nested lists of samples grouped together (old IPython style)",
"if",
"isinstance",
"(",
"items",
"[",
"0",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"len",
"(",
"items",
"[",
"0",
"]",
")",
... | Handle all the ways we can pass multiple samples for back-compatibility. | [
"Handle",
"all",
"the",
"ways",
"we",
"can",
"pass",
"multiple",
"samples",
"for",
"back",
"-",
"compatibility",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L160-L174 | train | 218,542 |
bcbio/bcbio-nextgen | bcbio/utils.py | safe_makedir | def safe_makedir(dname):
"""Make a directory if it doesn't exist, handling concurrent race conditions.
"""
if not dname:
return dname
num_tries = 0
max_tries = 5
while not os.path.exists(dname):
# we could get an error here if multiple processes are creating
# the directory at the same time. Grr, concurrency.
try:
os.makedirs(dname)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
return dname | python | def safe_makedir(dname):
"""Make a directory if it doesn't exist, handling concurrent race conditions.
"""
if not dname:
return dname
num_tries = 0
max_tries = 5
while not os.path.exists(dname):
# we could get an error here if multiple processes are creating
# the directory at the same time. Grr, concurrency.
try:
os.makedirs(dname)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
return dname | [
"def",
"safe_makedir",
"(",
"dname",
")",
":",
"if",
"not",
"dname",
":",
"return",
"dname",
"num_tries",
"=",
"0",
"max_tries",
"=",
"5",
"while",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dname",
")",
":",
"# we could get an error here if multiple pro... | Make a directory if it doesn't exist, handling concurrent race conditions. | [
"Make",
"a",
"directory",
"if",
"it",
"doesn",
"t",
"exist",
"handling",
"concurrent",
"race",
"conditions",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L176-L193 | train | 218,543 |
bcbio/bcbio-nextgen | bcbio/utils.py | tmpfile | def tmpfile(*args, **kwargs):
"""Make a tempfile, safely cleaning up file descriptors on completion.
"""
(fd, fname) = tempfile.mkstemp(*args, **kwargs)
try:
yield fname
finally:
os.close(fd)
if os.path.exists(fname):
os.remove(fname) | python | def tmpfile(*args, **kwargs):
"""Make a tempfile, safely cleaning up file descriptors on completion.
"""
(fd, fname) = tempfile.mkstemp(*args, **kwargs)
try:
yield fname
finally:
os.close(fd)
if os.path.exists(fname):
os.remove(fname) | [
"def",
"tmpfile",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"(",
"fd",
",",
"fname",
")",
"=",
"tempfile",
".",
"mkstemp",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"yield",
"fname",
"finally",
":",
"os",
".",
"clos... | Make a tempfile, safely cleaning up file descriptors on completion. | [
"Make",
"a",
"tempfile",
"safely",
"cleaning",
"up",
"file",
"descriptors",
"on",
"completion",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L221-L230 | train | 218,544 |
bcbio/bcbio-nextgen | bcbio/utils.py | file_exists | def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
try:
return fname and os.path.exists(fname) and os.path.getsize(fname) > 0
except OSError:
return False | python | def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
try:
return fname and os.path.exists(fname) and os.path.getsize(fname) > 0
except OSError:
return False | [
"def",
"file_exists",
"(",
"fname",
")",
":",
"try",
":",
"return",
"fname",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"fname",
")",
"and",
"os",
".",
"path",
".",
"getsize",
"(",
"fname",
")",
">",
"0",
"except",
"OSError",
":",
"return",
"Fal... | Check if a file exists and is non-empty. | [
"Check",
"if",
"a",
"file",
"exists",
"and",
"is",
"non",
"-",
"empty",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L232-L238 | train | 218,545 |
bcbio/bcbio-nextgen | bcbio/utils.py | get_size | def get_size(path):
""" Returns the size in bytes if `path` is a file,
or the size of all files in `path` if it's a directory.
Analogous to `du -s`.
"""
if os.path.isfile(path):
return os.path.getsize(path)
return sum(get_size(os.path.join(path, f)) for f in os.listdir(path)) | python | def get_size(path):
""" Returns the size in bytes if `path` is a file,
or the size of all files in `path` if it's a directory.
Analogous to `du -s`.
"""
if os.path.isfile(path):
return os.path.getsize(path)
return sum(get_size(os.path.join(path, f)) for f in os.listdir(path)) | [
"def",
"get_size",
"(",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"getsize",
"(",
"path",
")",
"return",
"sum",
"(",
"get_size",
"(",
"os",
".",
"path",
".",
"join",
"(",
... | Returns the size in bytes if `path` is a file,
or the size of all files in `path` if it's a directory.
Analogous to `du -s`. | [
"Returns",
"the",
"size",
"in",
"bytes",
"if",
"path",
"is",
"a",
"file",
"or",
"the",
"size",
"of",
"all",
"files",
"in",
"path",
"if",
"it",
"s",
"a",
"directory",
".",
"Analogous",
"to",
"du",
"-",
"s",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L241-L248 | train | 218,546 |
bcbio/bcbio-nextgen | bcbio/utils.py | read_galaxy_amqp_config | def read_galaxy_amqp_config(galaxy_config, base_dir):
"""Read connection information on the RabbitMQ server from Galaxy config.
"""
galaxy_config = add_full_path(galaxy_config, base_dir)
config = six.moves.configparser.ConfigParser()
config.read(galaxy_config)
amqp_config = {}
for option in config.options("galaxy_amqp"):
amqp_config[option] = config.get("galaxy_amqp", option)
return amqp_config | python | def read_galaxy_amqp_config(galaxy_config, base_dir):
"""Read connection information on the RabbitMQ server from Galaxy config.
"""
galaxy_config = add_full_path(galaxy_config, base_dir)
config = six.moves.configparser.ConfigParser()
config.read(galaxy_config)
amqp_config = {}
for option in config.options("galaxy_amqp"):
amqp_config[option] = config.get("galaxy_amqp", option)
return amqp_config | [
"def",
"read_galaxy_amqp_config",
"(",
"galaxy_config",
",",
"base_dir",
")",
":",
"galaxy_config",
"=",
"add_full_path",
"(",
"galaxy_config",
",",
"base_dir",
")",
"config",
"=",
"six",
".",
"moves",
".",
"configparser",
".",
"ConfigParser",
"(",
")",
"config"... | Read connection information on the RabbitMQ server from Galaxy config. | [
"Read",
"connection",
"information",
"on",
"the",
"RabbitMQ",
"server",
"from",
"Galaxy",
"config",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L279-L288 | train | 218,547 |
bcbio/bcbio-nextgen | bcbio/utils.py | move_safe | def move_safe(origin, target):
"""
Move file, skip if exists
"""
if origin == target:
return origin
if file_exists(target):
return target
shutil.move(origin, target)
return target | python | def move_safe(origin, target):
"""
Move file, skip if exists
"""
if origin == target:
return origin
if file_exists(target):
return target
shutil.move(origin, target)
return target | [
"def",
"move_safe",
"(",
"origin",
",",
"target",
")",
":",
"if",
"origin",
"==",
"target",
":",
"return",
"origin",
"if",
"file_exists",
"(",
"target",
")",
":",
"return",
"target",
"shutil",
".",
"move",
"(",
"origin",
",",
"target",
")",
"return",
"... | Move file, skip if exists | [
"Move",
"file",
"skip",
"if",
"exists"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L315-L324 | train | 218,548 |
bcbio/bcbio-nextgen | bcbio/utils.py | file_plus_index | def file_plus_index(fname):
"""Convert a file name into the file plus required indexes.
"""
exts = {".vcf": ".idx", ".bam": ".bai", ".vcf.gz": ".tbi", ".bed.gz": ".tbi",
".fq.gz": ".gbi"}
ext = splitext_plus(fname)[-1]
if ext in exts:
return [fname, fname + exts[ext]]
else:
return [fname] | python | def file_plus_index(fname):
"""Convert a file name into the file plus required indexes.
"""
exts = {".vcf": ".idx", ".bam": ".bai", ".vcf.gz": ".tbi", ".bed.gz": ".tbi",
".fq.gz": ".gbi"}
ext = splitext_plus(fname)[-1]
if ext in exts:
return [fname, fname + exts[ext]]
else:
return [fname] | [
"def",
"file_plus_index",
"(",
"fname",
")",
":",
"exts",
"=",
"{",
"\".vcf\"",
":",
"\".idx\"",
",",
"\".bam\"",
":",
"\".bai\"",
",",
"\".vcf.gz\"",
":",
"\".tbi\"",
",",
"\".bed.gz\"",
":",
"\".tbi\"",
",",
"\".fq.gz\"",
":",
"\".gbi\"",
"}",
"ext",
"="... | Convert a file name into the file plus required indexes. | [
"Convert",
"a",
"file",
"name",
"into",
"the",
"file",
"plus",
"required",
"indexes",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L326-L335 | train | 218,549 |
bcbio/bcbio-nextgen | bcbio/utils.py | remove_plus | def remove_plus(orig):
"""Remove a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext):
remove_safe(orig + ext) | python | def remove_plus(orig):
"""Remove a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext):
remove_safe(orig + ext) | [
"def",
"remove_plus",
"(",
"orig",
")",
":",
"for",
"ext",
"in",
"[",
"\"\"",
",",
"\".idx\"",
",",
"\".gbi\"",
",",
"\".tbi\"",
",",
"\".bai\"",
"]",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"orig",
"+",
"ext",
")",
":",
"remove_safe",
"(... | Remove a fils, including biological index files. | [
"Remove",
"a",
"fils",
"including",
"biological",
"index",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L337-L342 | train | 218,550 |
bcbio/bcbio-nextgen | bcbio/utils.py | copy_plus | def copy_plus(orig, new):
"""Copy a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
shutil.copyfile(orig + ext, new + ext) | python | def copy_plus(orig, new):
"""Copy a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
shutil.copyfile(orig + ext, new + ext) | [
"def",
"copy_plus",
"(",
"orig",
",",
"new",
")",
":",
"for",
"ext",
"in",
"[",
"\"\"",
",",
"\".idx\"",
",",
"\".gbi\"",
",",
"\".tbi\"",
",",
"\".bai\"",
"]",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"orig",
"+",
"ext",
")",
"and",
"("... | Copy a fils, including biological index files. | [
"Copy",
"a",
"fils",
"including",
"biological",
"index",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L344-L349 | train | 218,551 |
bcbio/bcbio-nextgen | bcbio/utils.py | merge_config_files | def merge_config_files(fnames):
"""Merge configuration files, preferring definitions in latter files.
"""
def _load_yaml(fname):
with open(fname) as in_handle:
config = yaml.safe_load(in_handle)
return config
out = _load_yaml(fnames[0])
for fname in fnames[1:]:
cur = _load_yaml(fname)
for k, v in cur.items():
if k in out and isinstance(out[k], dict):
out[k].update(v)
else:
out[k] = v
return out | python | def merge_config_files(fnames):
"""Merge configuration files, preferring definitions in latter files.
"""
def _load_yaml(fname):
with open(fname) as in_handle:
config = yaml.safe_load(in_handle)
return config
out = _load_yaml(fnames[0])
for fname in fnames[1:]:
cur = _load_yaml(fname)
for k, v in cur.items():
if k in out and isinstance(out[k], dict):
out[k].update(v)
else:
out[k] = v
return out | [
"def",
"merge_config_files",
"(",
"fnames",
")",
":",
"def",
"_load_yaml",
"(",
"fname",
")",
":",
"with",
"open",
"(",
"fname",
")",
"as",
"in_handle",
":",
"config",
"=",
"yaml",
".",
"safe_load",
"(",
"in_handle",
")",
"return",
"config",
"out",
"=",
... | Merge configuration files, preferring definitions in latter files. | [
"Merge",
"configuration",
"files",
"preferring",
"definitions",
"in",
"latter",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L475-L490 | train | 218,552 |
bcbio/bcbio-nextgen | bcbio/utils.py | deepish_copy | def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.items():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out | python | def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.items():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out | [
"def",
"deepish_copy",
"(",
"org",
")",
":",
"out",
"=",
"dict",
"(",
")",
".",
"fromkeys",
"(",
"org",
")",
"for",
"k",
",",
"v",
"in",
"org",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"out",
"[",
"k",... | Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/ | [
"Improved",
"speed",
"deep",
"copy",
"for",
"dictionaries",
"of",
"simple",
"python",
"types",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L492-L510 | train | 218,553 |
bcbio/bcbio-nextgen | bcbio/utils.py | reservoir_sample | def reservoir_sample(stream, num_items, item_parser=lambda x: x):
"""
samples num_items from the stream keeping each with equal probability
"""
kept = []
for index, item in enumerate(stream):
if index < num_items:
kept.append(item_parser(item))
else:
r = random.randint(0, index)
if r < num_items:
kept[r] = item_parser(item)
return kept | python | def reservoir_sample(stream, num_items, item_parser=lambda x: x):
"""
samples num_items from the stream keeping each with equal probability
"""
kept = []
for index, item in enumerate(stream):
if index < num_items:
kept.append(item_parser(item))
else:
r = random.randint(0, index)
if r < num_items:
kept[r] = item_parser(item)
return kept | [
"def",
"reservoir_sample",
"(",
"stream",
",",
"num_items",
",",
"item_parser",
"=",
"lambda",
"x",
":",
"x",
")",
":",
"kept",
"=",
"[",
"]",
"for",
"index",
",",
"item",
"in",
"enumerate",
"(",
"stream",
")",
":",
"if",
"index",
"<",
"num_items",
"... | samples num_items from the stream keeping each with equal probability | [
"samples",
"num_items",
"from",
"the",
"stream",
"keeping",
"each",
"with",
"equal",
"probability"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L663-L675 | train | 218,554 |
bcbio/bcbio-nextgen | bcbio/utils.py | dictapply | def dictapply(d, fn):
"""
apply a function to all non-dict values in a dictionary
"""
for k, v in d.items():
if isinstance(v, dict):
v = dictapply(v, fn)
else:
d[k] = fn(v)
return d | python | def dictapply(d, fn):
"""
apply a function to all non-dict values in a dictionary
"""
for k, v in d.items():
if isinstance(v, dict):
v = dictapply(v, fn)
else:
d[k] = fn(v)
return d | [
"def",
"dictapply",
"(",
"d",
",",
"fn",
")",
":",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"v",
"=",
"dictapply",
"(",
"v",
",",
"fn",
")",
"else",
":",
"d",
"[",
... | apply a function to all non-dict values in a dictionary | [
"apply",
"a",
"function",
"to",
"all",
"non",
"-",
"dict",
"values",
"in",
"a",
"dictionary"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L681-L690 | train | 218,555 |
bcbio/bcbio-nextgen | bcbio/utils.py | Rscript_cmd | def Rscript_cmd():
"""Retrieve path to locally installed Rscript or first in PATH.
Prefers Rscript version installed via conda to a system version.
"""
rscript = which(os.path.join(get_bcbio_bin(), "Rscript"))
if rscript:
return rscript
else:
return which("Rscript") | python | def Rscript_cmd():
"""Retrieve path to locally installed Rscript or first in PATH.
Prefers Rscript version installed via conda to a system version.
"""
rscript = which(os.path.join(get_bcbio_bin(), "Rscript"))
if rscript:
return rscript
else:
return which("Rscript") | [
"def",
"Rscript_cmd",
"(",
")",
":",
"rscript",
"=",
"which",
"(",
"os",
".",
"path",
".",
"join",
"(",
"get_bcbio_bin",
"(",
")",
",",
"\"Rscript\"",
")",
")",
"if",
"rscript",
":",
"return",
"rscript",
"else",
":",
"return",
"which",
"(",
"\"Rscript\... | Retrieve path to locally installed Rscript or first in PATH.
Prefers Rscript version installed via conda to a system version. | [
"Retrieve",
"path",
"to",
"locally",
"installed",
"Rscript",
"or",
"first",
"in",
"PATH",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L692-L701 | train | 218,556 |
bcbio/bcbio-nextgen | bcbio/utils.py | R_package_resource | def R_package_resource(package, resource):
"""
return a path to an R package resource, if it is available
"""
package_path = R_package_path(package)
if not package_path:
return None
package_resource = os.path.join(package_path, resource)
if not file_exists(package_resource):
return None
else:
return package_resource | python | def R_package_resource(package, resource):
"""
return a path to an R package resource, if it is available
"""
package_path = R_package_path(package)
if not package_path:
return None
package_resource = os.path.join(package_path, resource)
if not file_exists(package_resource):
return None
else:
return package_resource | [
"def",
"R_package_resource",
"(",
"package",
",",
"resource",
")",
":",
"package_path",
"=",
"R_package_path",
"(",
"package",
")",
"if",
"not",
"package_path",
":",
"return",
"None",
"package_resource",
"=",
"os",
".",
"path",
".",
"join",
"(",
"package_path"... | return a path to an R package resource, if it is available | [
"return",
"a",
"path",
"to",
"an",
"R",
"package",
"resource",
"if",
"it",
"is",
"available"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L727-L738 | train | 218,557 |
bcbio/bcbio-nextgen | bcbio/utils.py | get_java_binpath | def get_java_binpath(cmd=None):
"""Retrieve path for java to use, handling custom BCBIO_JAVA_HOME
Defaults to the dirname of cmd, or local anaconda directory
"""
if os.environ.get("BCBIO_JAVA_HOME"):
test_cmd = os.path.join(os.environ["BCBIO_JAVA_HOME"], "bin", "java")
if os.path.exists(test_cmd):
cmd = test_cmd
if not cmd:
cmd = Rscript_cmd()
return os.path.dirname(cmd) | python | def get_java_binpath(cmd=None):
"""Retrieve path for java to use, handling custom BCBIO_JAVA_HOME
Defaults to the dirname of cmd, or local anaconda directory
"""
if os.environ.get("BCBIO_JAVA_HOME"):
test_cmd = os.path.join(os.environ["BCBIO_JAVA_HOME"], "bin", "java")
if os.path.exists(test_cmd):
cmd = test_cmd
if not cmd:
cmd = Rscript_cmd()
return os.path.dirname(cmd) | [
"def",
"get_java_binpath",
"(",
"cmd",
"=",
"None",
")",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"BCBIO_JAVA_HOME\"",
")",
":",
"test_cmd",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
"[",
"\"BCBIO_JAVA_HOME\"",
"]",
","... | Retrieve path for java to use, handling custom BCBIO_JAVA_HOME
Defaults to the dirname of cmd, or local anaconda directory | [
"Retrieve",
"path",
"for",
"java",
"to",
"use",
"handling",
"custom",
"BCBIO_JAVA_HOME"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L740-L751 | train | 218,558 |
bcbio/bcbio-nextgen | bcbio/utils.py | clear_java_home | def clear_java_home():
"""Clear JAVA_HOME environment or reset to BCBIO_JAVA_HOME.
Avoids accidental java injection but respects custom BCBIO_JAVA_HOME
command.
"""
if os.environ.get("BCBIO_JAVA_HOME"):
test_cmd = os.path.join(os.environ["BCBIO_JAVA_HOME"], "bin", "java")
if os.path.exists(test_cmd):
return "export JAVA_HOME=%s" % os.environ["BCBIO_JAVA_HOME"]
return "unset JAVA_HOME" | python | def clear_java_home():
"""Clear JAVA_HOME environment or reset to BCBIO_JAVA_HOME.
Avoids accidental java injection but respects custom BCBIO_JAVA_HOME
command.
"""
if os.environ.get("BCBIO_JAVA_HOME"):
test_cmd = os.path.join(os.environ["BCBIO_JAVA_HOME"], "bin", "java")
if os.path.exists(test_cmd):
return "export JAVA_HOME=%s" % os.environ["BCBIO_JAVA_HOME"]
return "unset JAVA_HOME" | [
"def",
"clear_java_home",
"(",
")",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"BCBIO_JAVA_HOME\"",
")",
":",
"test_cmd",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
"[",
"\"BCBIO_JAVA_HOME\"",
"]",
",",
"\"bin\"",
",",
"\"... | Clear JAVA_HOME environment or reset to BCBIO_JAVA_HOME.
Avoids accidental java injection but respects custom BCBIO_JAVA_HOME
command. | [
"Clear",
"JAVA_HOME",
"environment",
"or",
"reset",
"to",
"BCBIO_JAVA_HOME",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L753-L763 | train | 218,559 |
bcbio/bcbio-nextgen | bcbio/utils.py | perl_cmd | def perl_cmd():
"""Retrieve path to locally installed conda Perl or first in PATH.
"""
perl = which(os.path.join(get_bcbio_bin(), "perl"))
if perl:
return perl
else:
return which("perl") | python | def perl_cmd():
"""Retrieve path to locally installed conda Perl or first in PATH.
"""
perl = which(os.path.join(get_bcbio_bin(), "perl"))
if perl:
return perl
else:
return which("perl") | [
"def",
"perl_cmd",
"(",
")",
":",
"perl",
"=",
"which",
"(",
"os",
".",
"path",
".",
"join",
"(",
"get_bcbio_bin",
"(",
")",
",",
"\"perl\"",
")",
")",
"if",
"perl",
":",
"return",
"perl",
"else",
":",
"return",
"which",
"(",
"\"perl\"",
")"
] | Retrieve path to locally installed conda Perl or first in PATH. | [
"Retrieve",
"path",
"to",
"locally",
"installed",
"conda",
"Perl",
"or",
"first",
"in",
"PATH",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L773-L780 | train | 218,560 |
bcbio/bcbio-nextgen | bcbio/utils.py | get_perl_exports | def get_perl_exports(tmpdir=None):
"""Environmental exports to use conda installed perl.
"""
perl_path = os.path.dirname(perl_cmd())
out = "unset PERL5LIB && export PATH=%s:\"$PATH\"" % (perl_path)
if tmpdir:
out += " && export TMPDIR=%s" % (tmpdir)
return out | python | def get_perl_exports(tmpdir=None):
"""Environmental exports to use conda installed perl.
"""
perl_path = os.path.dirname(perl_cmd())
out = "unset PERL5LIB && export PATH=%s:\"$PATH\"" % (perl_path)
if tmpdir:
out += " && export TMPDIR=%s" % (tmpdir)
return out | [
"def",
"get_perl_exports",
"(",
"tmpdir",
"=",
"None",
")",
":",
"perl_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"perl_cmd",
"(",
")",
")",
"out",
"=",
"\"unset PERL5LIB && export PATH=%s:\\\"$PATH\\\"\"",
"%",
"(",
"perl_path",
")",
"if",
"tmpdir",
... | Environmental exports to use conda installed perl. | [
"Environmental",
"exports",
"to",
"use",
"conda",
"installed",
"perl",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L782-L789 | train | 218,561 |
bcbio/bcbio-nextgen | bcbio/utils.py | get_all_conda_bins | def get_all_conda_bins():
"""Retrieve all possible conda bin directories, including environments.
"""
bcbio_bin = get_bcbio_bin()
conda_dir = os.path.dirname(bcbio_bin)
if os.path.join("anaconda", "envs") in conda_dir:
conda_dir = os.path.join(conda_dir[:conda_dir.rfind(os.path.join("anaconda", "envs"))], "anaconda")
return [bcbio_bin] + list(glob.glob(os.path.join(conda_dir, "envs", "*", "bin"))) | python | def get_all_conda_bins():
"""Retrieve all possible conda bin directories, including environments.
"""
bcbio_bin = get_bcbio_bin()
conda_dir = os.path.dirname(bcbio_bin)
if os.path.join("anaconda", "envs") in conda_dir:
conda_dir = os.path.join(conda_dir[:conda_dir.rfind(os.path.join("anaconda", "envs"))], "anaconda")
return [bcbio_bin] + list(glob.glob(os.path.join(conda_dir, "envs", "*", "bin"))) | [
"def",
"get_all_conda_bins",
"(",
")",
":",
"bcbio_bin",
"=",
"get_bcbio_bin",
"(",
")",
"conda_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"bcbio_bin",
")",
"if",
"os",
".",
"path",
".",
"join",
"(",
"\"anaconda\"",
",",
"\"envs\"",
")",
"in",
... | Retrieve all possible conda bin directories, including environments. | [
"Retrieve",
"all",
"possible",
"conda",
"bin",
"directories",
"including",
"environments",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L809-L816 | train | 218,562 |
bcbio/bcbio-nextgen | bcbio/utils.py | get_program_python | def get_program_python(cmd):
"""Get the full path to a python version linked to the command.
Allows finding python based programs in python 2 versus python 3
environments.
"""
full_cmd = os.path.realpath(which(cmd))
cmd_python = os.path.join(os.path.dirname(full_cmd), "python")
env_python = None
if "envs" in cmd_python:
parts = cmd_python.split(os.sep)
env_python = os.path.join(os.sep.join(parts[:parts.index("envs") + 2]), "bin", "python")
if os.path.exists(cmd_python):
return cmd_python
elif env_python and os.path.exists(env_python):
return env_python
else:
return os.path.realpath(sys.executable) | python | def get_program_python(cmd):
"""Get the full path to a python version linked to the command.
Allows finding python based programs in python 2 versus python 3
environments.
"""
full_cmd = os.path.realpath(which(cmd))
cmd_python = os.path.join(os.path.dirname(full_cmd), "python")
env_python = None
if "envs" in cmd_python:
parts = cmd_python.split(os.sep)
env_python = os.path.join(os.sep.join(parts[:parts.index("envs") + 2]), "bin", "python")
if os.path.exists(cmd_python):
return cmd_python
elif env_python and os.path.exists(env_python):
return env_python
else:
return os.path.realpath(sys.executable) | [
"def",
"get_program_python",
"(",
"cmd",
")",
":",
"full_cmd",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"which",
"(",
"cmd",
")",
")",
"cmd_python",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"full_cmd",
... | Get the full path to a python version linked to the command.
Allows finding python based programs in python 2 versus python 3
environments. | [
"Get",
"the",
"full",
"path",
"to",
"a",
"python",
"version",
"linked",
"to",
"the",
"command",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L818-L835 | train | 218,563 |
bcbio/bcbio-nextgen | bcbio/utils.py | local_path_export | def local_path_export(at_start=True, env_cmd=None):
"""Retrieve paths to local install, also including environment paths if env_cmd included.
"""
paths = [get_bcbio_bin()]
if env_cmd:
env_path = os.path.dirname(get_program_python(env_cmd))
if env_path not in paths:
paths.insert(0, env_path)
if at_start:
return "export PATH=%s:\"$PATH\" && " % (":".join(paths))
else:
return "export PATH=\"$PATH\":%s && " % (":".join(paths)) | python | def local_path_export(at_start=True, env_cmd=None):
"""Retrieve paths to local install, also including environment paths if env_cmd included.
"""
paths = [get_bcbio_bin()]
if env_cmd:
env_path = os.path.dirname(get_program_python(env_cmd))
if env_path not in paths:
paths.insert(0, env_path)
if at_start:
return "export PATH=%s:\"$PATH\" && " % (":".join(paths))
else:
return "export PATH=\"$PATH\":%s && " % (":".join(paths)) | [
"def",
"local_path_export",
"(",
"at_start",
"=",
"True",
",",
"env_cmd",
"=",
"None",
")",
":",
"paths",
"=",
"[",
"get_bcbio_bin",
"(",
")",
"]",
"if",
"env_cmd",
":",
"env_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"get_program_python",
"(",
... | Retrieve paths to local install, also including environment paths if env_cmd included. | [
"Retrieve",
"paths",
"to",
"local",
"install",
"also",
"including",
"environment",
"paths",
"if",
"env_cmd",
"included",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L837-L848 | train | 218,564 |
bcbio/bcbio-nextgen | bcbio/utils.py | rbind | def rbind(dfs):
"""
acts like rbind for pandas dataframes
"""
if len(dfs) == 1:
return dfs[0]
df = dfs[0]
for d in dfs[1:]:
df = df.append(d)
return df | python | def rbind(dfs):
"""
acts like rbind for pandas dataframes
"""
if len(dfs) == 1:
return dfs[0]
df = dfs[0]
for d in dfs[1:]:
df = df.append(d)
return df | [
"def",
"rbind",
"(",
"dfs",
")",
":",
"if",
"len",
"(",
"dfs",
")",
"==",
"1",
":",
"return",
"dfs",
"[",
"0",
"]",
"df",
"=",
"dfs",
"[",
"0",
"]",
"for",
"d",
"in",
"dfs",
"[",
"1",
":",
"]",
":",
"df",
"=",
"df",
".",
"append",
"(",
... | acts like rbind for pandas dataframes | [
"acts",
"like",
"rbind",
"for",
"pandas",
"dataframes"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L902-L911 | train | 218,565 |
bcbio/bcbio-nextgen | bcbio/utils.py | sort_filenames | def sort_filenames(filenames):
"""
sort a list of files by filename only, ignoring the directory names
"""
basenames = [os.path.basename(x) for x in filenames]
indexes = [i[0] for i in sorted(enumerate(basenames), key=lambda x:x[1])]
return [filenames[x] for x in indexes] | python | def sort_filenames(filenames):
"""
sort a list of files by filename only, ignoring the directory names
"""
basenames = [os.path.basename(x) for x in filenames]
indexes = [i[0] for i in sorted(enumerate(basenames), key=lambda x:x[1])]
return [filenames[x] for x in indexes] | [
"def",
"sort_filenames",
"(",
"filenames",
")",
":",
"basenames",
"=",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"x",
")",
"for",
"x",
"in",
"filenames",
"]",
"indexes",
"=",
"[",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"sorted",
"(",
"enumerate"... | sort a list of files by filename only, ignoring the directory names | [
"sort",
"a",
"list",
"of",
"files",
"by",
"filename",
"only",
"ignoring",
"the",
"directory",
"names"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L936-L942 | train | 218,566 |
bcbio/bcbio-nextgen | bcbio/utils.py | walk_json | def walk_json(d, func):
""" Walk over a parsed JSON nested structure `d`, apply `func` to each leaf element and replace it with result
"""
if isinstance(d, Mapping):
return OrderedDict((k, walk_json(v, func)) for k, v in d.items())
elif isinstance(d, list):
return [walk_json(v, func) for v in d]
else:
return func(d) | python | def walk_json(d, func):
""" Walk over a parsed JSON nested structure `d`, apply `func` to each leaf element and replace it with result
"""
if isinstance(d, Mapping):
return OrderedDict((k, walk_json(v, func)) for k, v in d.items())
elif isinstance(d, list):
return [walk_json(v, func) for v in d]
else:
return func(d) | [
"def",
"walk_json",
"(",
"d",
",",
"func",
")",
":",
"if",
"isinstance",
"(",
"d",
",",
"Mapping",
")",
":",
"return",
"OrderedDict",
"(",
"(",
"k",
",",
"walk_json",
"(",
"v",
",",
"func",
")",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items... | Walk over a parsed JSON nested structure `d`, apply `func` to each leaf element and replace it with result | [
"Walk",
"over",
"a",
"parsed",
"JSON",
"nested",
"structure",
"d",
"apply",
"func",
"to",
"each",
"leaf",
"element",
"and",
"replace",
"it",
"with",
"result"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L989-L997 | train | 218,567 |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | _link_bam_file | def _link_bam_file(in_file, new_dir, data):
"""Provide symlinks of BAM file and existing indexes if needed.
"""
new_dir = utils.safe_makedir(new_dir)
out_file = os.path.join(new_dir, os.path.basename(in_file))
if not utils.file_exists(out_file):
out_file = os.path.join(new_dir, "%s-prealign.bam" % dd.get_sample_name(data))
if data.get("cwl_keys"):
# Has indexes, we're okay to go with the original file
if utils.file_exists(in_file + ".bai"):
out_file = in_file
else:
utils.copy_plus(in_file, out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file | python | def _link_bam_file(in_file, new_dir, data):
"""Provide symlinks of BAM file and existing indexes if needed.
"""
new_dir = utils.safe_makedir(new_dir)
out_file = os.path.join(new_dir, os.path.basename(in_file))
if not utils.file_exists(out_file):
out_file = os.path.join(new_dir, "%s-prealign.bam" % dd.get_sample_name(data))
if data.get("cwl_keys"):
# Has indexes, we're okay to go with the original file
if utils.file_exists(in_file + ".bai"):
out_file = in_file
else:
utils.copy_plus(in_file, out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file | [
"def",
"_link_bam_file",
"(",
"in_file",
",",
"new_dir",
",",
"data",
")",
":",
"new_dir",
"=",
"utils",
".",
"safe_makedir",
"(",
"new_dir",
")",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"new_dir",
",",
"os",
".",
"path",
".",
"basename",... | Provide symlinks of BAM file and existing indexes if needed. | [
"Provide",
"symlinks",
"of",
"BAM",
"file",
"and",
"existing",
"indexes",
"if",
"needed",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L69-L84 | train | 218,568 |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | _add_supplemental_bams | def _add_supplemental_bams(data):
"""Add supplemental files produced by alignment, useful for structural
variant calling.
"""
file_key = "work_bam"
if data.get(file_key):
for supext in ["disc", "sr"]:
base, ext = os.path.splitext(data[file_key])
test_file = "%s-%s%s" % (base, supext, ext)
if os.path.exists(test_file):
sup_key = file_key + "_plus"
if sup_key not in data:
data[sup_key] = {}
data[sup_key][supext] = test_file
return data | python | def _add_supplemental_bams(data):
"""Add supplemental files produced by alignment, useful for structural
variant calling.
"""
file_key = "work_bam"
if data.get(file_key):
for supext in ["disc", "sr"]:
base, ext = os.path.splitext(data[file_key])
test_file = "%s-%s%s" % (base, supext, ext)
if os.path.exists(test_file):
sup_key = file_key + "_plus"
if sup_key not in data:
data[sup_key] = {}
data[sup_key][supext] = test_file
return data | [
"def",
"_add_supplemental_bams",
"(",
"data",
")",
":",
"file_key",
"=",
"\"work_bam\"",
"if",
"data",
".",
"get",
"(",
"file_key",
")",
":",
"for",
"supext",
"in",
"[",
"\"disc\"",
",",
"\"sr\"",
"]",
":",
"base",
",",
"ext",
"=",
"os",
".",
"path",
... | Add supplemental files produced by alignment, useful for structural
variant calling. | [
"Add",
"supplemental",
"files",
"produced",
"by",
"alignment",
"useful",
"for",
"structural",
"variant",
"calling",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L86-L100 | train | 218,569 |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | _add_hla_files | def _add_hla_files(data):
"""Add extracted fastq files of HLA alleles for typing.
"""
if "hla" not in data:
data["hla"] = {}
align_file = dd.get_align_bam(data)
hla_dir = os.path.join(os.path.dirname(align_file), "hla")
if not os.path.exists(hla_dir):
hla_files = None
else:
hla_files = sorted(list(glob.glob(os.path.join(hla_dir, "%s.*.fq" % os.path.basename(align_file)))))
data["hla"]["fastq"] = hla_files
return data | python | def _add_hla_files(data):
"""Add extracted fastq files of HLA alleles for typing.
"""
if "hla" not in data:
data["hla"] = {}
align_file = dd.get_align_bam(data)
hla_dir = os.path.join(os.path.dirname(align_file), "hla")
if not os.path.exists(hla_dir):
hla_files = None
else:
hla_files = sorted(list(glob.glob(os.path.join(hla_dir, "%s.*.fq" % os.path.basename(align_file)))))
data["hla"]["fastq"] = hla_files
return data | [
"def",
"_add_hla_files",
"(",
"data",
")",
":",
"if",
"\"hla\"",
"not",
"in",
"data",
":",
"data",
"[",
"\"hla\"",
"]",
"=",
"{",
"}",
"align_file",
"=",
"dd",
".",
"get_align_bam",
"(",
"data",
")",
"hla_dir",
"=",
"os",
".",
"path",
".",
"join",
... | Add extracted fastq files of HLA alleles for typing. | [
"Add",
"extracted",
"fastq",
"files",
"of",
"HLA",
"alleles",
"for",
"typing",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L102-L114 | train | 218,570 |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | prep_samples | def prep_samples(*items):
"""Handle any global preparatory steps for samples with potentially shared data.
Avoids race conditions in postprocess alignment when performing prep tasks
on shared files between multiple similar samples.
Cleans input BED files to avoid issues with overlapping input segments.
"""
out = []
for data in (utils.to_single_data(x) for x in items):
data = cwlutils.normalize_missing(data)
data = cwlutils.unpack_tarballs(data, data)
data = clean_inputs(data)
out.append([data])
return out | python | def prep_samples(*items):
"""Handle any global preparatory steps for samples with potentially shared data.
Avoids race conditions in postprocess alignment when performing prep tasks
on shared files between multiple similar samples.
Cleans input BED files to avoid issues with overlapping input segments.
"""
out = []
for data in (utils.to_single_data(x) for x in items):
data = cwlutils.normalize_missing(data)
data = cwlutils.unpack_tarballs(data, data)
data = clean_inputs(data)
out.append([data])
return out | [
"def",
"prep_samples",
"(",
"*",
"items",
")",
":",
"out",
"=",
"[",
"]",
"for",
"data",
"in",
"(",
"utils",
".",
"to_single_data",
"(",
"x",
")",
"for",
"x",
"in",
"items",
")",
":",
"data",
"=",
"cwlutils",
".",
"normalize_missing",
"(",
"data",
... | Handle any global preparatory steps for samples with potentially shared data.
Avoids race conditions in postprocess alignment when performing prep tasks
on shared files between multiple similar samples.
Cleans input BED files to avoid issues with overlapping input segments. | [
"Handle",
"any",
"global",
"preparatory",
"steps",
"for",
"samples",
"with",
"potentially",
"shared",
"data",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L192-L206 | train | 218,571 |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | clean_inputs | def clean_inputs(data):
"""Clean BED input files to avoid overlapping segments that cause downstream issues.
Per-merges inputs to avoid needing to call multiple times during later parallel steps.
"""
if not utils.get_in(data, ("config", "algorithm", "variant_regions_orig")):
data["config"]["algorithm"]["variant_regions_orig"] = dd.get_variant_regions(data)
clean_vr = clean_file(dd.get_variant_regions(data), data, prefix="cleaned-")
merged_vr = merge_overlaps(clean_vr, data)
data["config"]["algorithm"]["variant_regions"] = clean_vr
data["config"]["algorithm"]["variant_regions_merged"] = merged_vr
if dd.get_coverage(data):
if not utils.get_in(data, ("config", "algorithm", "coverage_orig")):
data["config"]["algorithm"]["coverage_orig"] = dd.get_coverage(data)
clean_cov_bed = clean_file(dd.get_coverage(data), data, prefix="cov-", simple=True)
merged_cov_bed = merge_overlaps(clean_cov_bed, data)
data["config"]["algorithm"]["coverage"] = clean_cov_bed
data["config"]["algorithm"]["coverage_merged"] = merged_cov_bed
if 'seq2c' in get_svcallers(data):
seq2c_ready_bed = prep_seq2c_bed(data)
if not seq2c_ready_bed:
logger.warning("Can't run Seq2C without a svregions or variant_regions BED file")
else:
data["config"]["algorithm"]["seq2c_bed_ready"] = seq2c_ready_bed
elif regions.get_sv_bed(data):
dd.set_sv_regions(data, clean_file(regions.get_sv_bed(data), data, prefix="svregions-"))
return data | python | def clean_inputs(data):
"""Clean BED input files to avoid overlapping segments that cause downstream issues.
Per-merges inputs to avoid needing to call multiple times during later parallel steps.
"""
if not utils.get_in(data, ("config", "algorithm", "variant_regions_orig")):
data["config"]["algorithm"]["variant_regions_orig"] = dd.get_variant_regions(data)
clean_vr = clean_file(dd.get_variant_regions(data), data, prefix="cleaned-")
merged_vr = merge_overlaps(clean_vr, data)
data["config"]["algorithm"]["variant_regions"] = clean_vr
data["config"]["algorithm"]["variant_regions_merged"] = merged_vr
if dd.get_coverage(data):
if not utils.get_in(data, ("config", "algorithm", "coverage_orig")):
data["config"]["algorithm"]["coverage_orig"] = dd.get_coverage(data)
clean_cov_bed = clean_file(dd.get_coverage(data), data, prefix="cov-", simple=True)
merged_cov_bed = merge_overlaps(clean_cov_bed, data)
data["config"]["algorithm"]["coverage"] = clean_cov_bed
data["config"]["algorithm"]["coverage_merged"] = merged_cov_bed
if 'seq2c' in get_svcallers(data):
seq2c_ready_bed = prep_seq2c_bed(data)
if not seq2c_ready_bed:
logger.warning("Can't run Seq2C without a svregions or variant_regions BED file")
else:
data["config"]["algorithm"]["seq2c_bed_ready"] = seq2c_ready_bed
elif regions.get_sv_bed(data):
dd.set_sv_regions(data, clean_file(regions.get_sv_bed(data), data, prefix="svregions-"))
return data | [
"def",
"clean_inputs",
"(",
"data",
")",
":",
"if",
"not",
"utils",
".",
"get_in",
"(",
"data",
",",
"(",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"variant_regions_orig\"",
")",
")",
":",
"data",
"[",
"\"config\"",
"]",
"[",
"\"algorithm\"",
"]",
"[",
... | Clean BED input files to avoid overlapping segments that cause downstream issues.
Per-merges inputs to avoid needing to call multiple times during later parallel steps. | [
"Clean",
"BED",
"input",
"files",
"to",
"avoid",
"overlapping",
"segments",
"that",
"cause",
"downstream",
"issues",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L208-L236 | train | 218,572 |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | postprocess_alignment | def postprocess_alignment(data):
"""Perform post-processing steps required on full BAM files.
Prepares list of callable genome regions allowing subsequent parallelization.
"""
data = cwlutils.normalize_missing(utils.to_single_data(data))
data = cwlutils.unpack_tarballs(data, data)
bam_file = data.get("align_bam") or data.get("work_bam")
ref_file = dd.get_ref_file(data)
if vmulti.bam_needs_processing(data) and bam_file and bam_file.endswith(".bam"):
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data)))
bam_file_ready = os.path.join(out_dir, os.path.basename(bam_file))
if not utils.file_exists(bam_file_ready):
utils.symlink_plus(bam_file, bam_file_ready)
bam.index(bam_file_ready, data["config"])
covinfo = callable.sample_callable_bed(bam_file_ready, ref_file, data)
callable_region_bed, nblock_bed = \
callable.block_regions(covinfo.raw_callable, bam_file_ready, ref_file, data)
data["regions"] = {"nblock": nblock_bed,
"callable": covinfo.raw_callable,
"sample_callable": covinfo.callable,
"mapped_stats": readstats.get_cache_file(data)}
data["depth"] = covinfo.depth_files
data = coverage.assign_interval(data)
data = samtools.run_and_save(data)
data = recalibrate.prep_recal(data)
data = recalibrate.apply_recal(data)
elif dd.get_variant_regions(data):
callable_region_bed, nblock_bed = \
callable.block_regions(dd.get_variant_regions(data), bam_file, ref_file, data)
data["regions"] = {"nblock": nblock_bed, "callable": dd.get_variant_regions(data),
"sample_callable": dd.get_variant_regions(data)}
return [[data]] | python | def postprocess_alignment(data):
"""Perform post-processing steps required on full BAM files.
Prepares list of callable genome regions allowing subsequent parallelization.
"""
data = cwlutils.normalize_missing(utils.to_single_data(data))
data = cwlutils.unpack_tarballs(data, data)
bam_file = data.get("align_bam") or data.get("work_bam")
ref_file = dd.get_ref_file(data)
if vmulti.bam_needs_processing(data) and bam_file and bam_file.endswith(".bam"):
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data)))
bam_file_ready = os.path.join(out_dir, os.path.basename(bam_file))
if not utils.file_exists(bam_file_ready):
utils.symlink_plus(bam_file, bam_file_ready)
bam.index(bam_file_ready, data["config"])
covinfo = callable.sample_callable_bed(bam_file_ready, ref_file, data)
callable_region_bed, nblock_bed = \
callable.block_regions(covinfo.raw_callable, bam_file_ready, ref_file, data)
data["regions"] = {"nblock": nblock_bed,
"callable": covinfo.raw_callable,
"sample_callable": covinfo.callable,
"mapped_stats": readstats.get_cache_file(data)}
data["depth"] = covinfo.depth_files
data = coverage.assign_interval(data)
data = samtools.run_and_save(data)
data = recalibrate.prep_recal(data)
data = recalibrate.apply_recal(data)
elif dd.get_variant_regions(data):
callable_region_bed, nblock_bed = \
callable.block_regions(dd.get_variant_regions(data), bam_file, ref_file, data)
data["regions"] = {"nblock": nblock_bed, "callable": dd.get_variant_regions(data),
"sample_callable": dd.get_variant_regions(data)}
return [[data]] | [
"def",
"postprocess_alignment",
"(",
"data",
")",
":",
"data",
"=",
"cwlutils",
".",
"normalize_missing",
"(",
"utils",
".",
"to_single_data",
"(",
"data",
")",
")",
"data",
"=",
"cwlutils",
".",
"unpack_tarballs",
"(",
"data",
",",
"data",
")",
"bam_file",
... | Perform post-processing steps required on full BAM files.
Prepares list of callable genome regions allowing subsequent parallelization. | [
"Perform",
"post",
"-",
"processing",
"steps",
"required",
"on",
"full",
"BAM",
"files",
".",
"Prepares",
"list",
"of",
"callable",
"genome",
"regions",
"allowing",
"subsequent",
"parallelization",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L238-L270 | train | 218,573 |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | _merge_out_from_infiles | def _merge_out_from_infiles(in_files):
"""Generate output merged file name from set of input files.
Handles non-shared filesystems where we don't know output path when setting
up split parts.
"""
fname = os.path.commonprefix([os.path.basename(f) for f in in_files])
while fname.endswith(("-", "_", ".")):
fname = fname[:-1]
ext = os.path.splitext(in_files[0])[-1]
dirname = os.path.dirname(in_files[0])
while dirname.endswith(("split", "merge")):
dirname = os.path.dirname(dirname)
return os.path.join(dirname, "%s%s" % (fname, ext)) | python | def _merge_out_from_infiles(in_files):
"""Generate output merged file name from set of input files.
Handles non-shared filesystems where we don't know output path when setting
up split parts.
"""
fname = os.path.commonprefix([os.path.basename(f) for f in in_files])
while fname.endswith(("-", "_", ".")):
fname = fname[:-1]
ext = os.path.splitext(in_files[0])[-1]
dirname = os.path.dirname(in_files[0])
while dirname.endswith(("split", "merge")):
dirname = os.path.dirname(dirname)
return os.path.join(dirname, "%s%s" % (fname, ext)) | [
"def",
"_merge_out_from_infiles",
"(",
"in_files",
")",
":",
"fname",
"=",
"os",
".",
"path",
".",
"commonprefix",
"(",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
"for",
"f",
"in",
"in_files",
"]",
")",
"while",
"fname",
".",
"endswith",
... | Generate output merged file name from set of input files.
Handles non-shared filesystems where we don't know output path when setting
up split parts. | [
"Generate",
"output",
"merged",
"file",
"name",
"from",
"set",
"of",
"input",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L272-L285 | train | 218,574 |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | delayed_bam_merge | def delayed_bam_merge(data):
"""Perform a merge on previously prepped files, delayed in processing.
Handles merging of associated split read and discordant files if present.
"""
if data.get("combine"):
assert len(data["combine"].keys()) == 1
file_key = list(data["combine"].keys())[0]
extras = []
for x in data["combine"][file_key].get("extras", []):
if isinstance(x, (list, tuple)):
extras.extend(x)
else:
extras.append(x)
if file_key in data:
extras.append(data[file_key])
in_files = sorted(list(set(extras)))
out_file = tz.get_in(["combine", file_key, "out"], data, _merge_out_from_infiles(in_files))
sup_exts = data.get(file_key + "_plus", {}).keys()
for ext in list(sup_exts) + [""]:
merged_file = None
if os.path.exists(utils.append_stem(out_file, "-" + ext)):
cur_out_file, cur_in_files = out_file, []
if ext:
cur_in_files = list(filter(os.path.exists, (utils.append_stem(f, "-" + ext) for f in in_files)))
cur_out_file = utils.append_stem(out_file, "-" + ext) if len(cur_in_files) > 0 else None
else:
cur_in_files, cur_out_file = in_files, out_file
if cur_out_file:
config = copy.deepcopy(data["config"])
if len(cur_in_files) > 0:
merged_file = merge_bam_files(cur_in_files, os.path.dirname(cur_out_file), data,
out_file=cur_out_file)
else:
assert os.path.exists(cur_out_file)
merged_file = cur_out_file
if merged_file:
if ext:
data[file_key + "_plus"][ext] = merged_file
else:
data[file_key] = merged_file
data.pop("region", None)
data.pop("combine", None)
return [[data]] | python | def delayed_bam_merge(data):
"""Perform a merge on previously prepped files, delayed in processing.
Handles merging of associated split read and discordant files if present.
"""
if data.get("combine"):
assert len(data["combine"].keys()) == 1
file_key = list(data["combine"].keys())[0]
extras = []
for x in data["combine"][file_key].get("extras", []):
if isinstance(x, (list, tuple)):
extras.extend(x)
else:
extras.append(x)
if file_key in data:
extras.append(data[file_key])
in_files = sorted(list(set(extras)))
out_file = tz.get_in(["combine", file_key, "out"], data, _merge_out_from_infiles(in_files))
sup_exts = data.get(file_key + "_plus", {}).keys()
for ext in list(sup_exts) + [""]:
merged_file = None
if os.path.exists(utils.append_stem(out_file, "-" + ext)):
cur_out_file, cur_in_files = out_file, []
if ext:
cur_in_files = list(filter(os.path.exists, (utils.append_stem(f, "-" + ext) for f in in_files)))
cur_out_file = utils.append_stem(out_file, "-" + ext) if len(cur_in_files) > 0 else None
else:
cur_in_files, cur_out_file = in_files, out_file
if cur_out_file:
config = copy.deepcopy(data["config"])
if len(cur_in_files) > 0:
merged_file = merge_bam_files(cur_in_files, os.path.dirname(cur_out_file), data,
out_file=cur_out_file)
else:
assert os.path.exists(cur_out_file)
merged_file = cur_out_file
if merged_file:
if ext:
data[file_key + "_plus"][ext] = merged_file
else:
data[file_key] = merged_file
data.pop("region", None)
data.pop("combine", None)
return [[data]] | [
"def",
"delayed_bam_merge",
"(",
"data",
")",
":",
"if",
"data",
".",
"get",
"(",
"\"combine\"",
")",
":",
"assert",
"len",
"(",
"data",
"[",
"\"combine\"",
"]",
".",
"keys",
"(",
")",
")",
"==",
"1",
"file_key",
"=",
"list",
"(",
"data",
"[",
"\"c... | Perform a merge on previously prepped files, delayed in processing.
Handles merging of associated split read and discordant files if present. | [
"Perform",
"a",
"merge",
"on",
"previously",
"prepped",
"files",
"delayed",
"in",
"processing",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L287-L330 | train | 218,575 |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | merge_split_alignments | def merge_split_alignments(data):
"""Merge split BAM inputs generated by common workflow language runs.
"""
data = utils.to_single_data(data)
data = _merge_align_bams(data)
data = _merge_hla_fastq_inputs(data)
return [[data]] | python | def merge_split_alignments(data):
"""Merge split BAM inputs generated by common workflow language runs.
"""
data = utils.to_single_data(data)
data = _merge_align_bams(data)
data = _merge_hla_fastq_inputs(data)
return [[data]] | [
"def",
"merge_split_alignments",
"(",
"data",
")",
":",
"data",
"=",
"utils",
".",
"to_single_data",
"(",
"data",
")",
"data",
"=",
"_merge_align_bams",
"(",
"data",
")",
"data",
"=",
"_merge_hla_fastq_inputs",
"(",
"data",
")",
"return",
"[",
"[",
"data",
... | Merge split BAM inputs generated by common workflow language runs. | [
"Merge",
"split",
"BAM",
"inputs",
"generated",
"by",
"common",
"workflow",
"language",
"runs",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L332-L338 | train | 218,576 |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | _merge_align_bams | def _merge_align_bams(data):
"""Merge multiple alignment BAMs, including split and discordant reads.
"""
for key in (["work_bam"], ["work_bam_plus", "disc"], ["work_bam_plus", "sr"], ["umi_bam"]):
in_files = tz.get_in(key, data, [])
if not isinstance(in_files, (list, tuple)):
in_files = [in_files]
in_files = [x for x in in_files if x and x != "None"]
if in_files:
ext = "-%s" % key[-1] if len(key) > 1 else ""
out_file = os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data),
"%s-sort%s.bam" % (dd.get_sample_name(data), ext))
merged_file = merge_bam_files(in_files, utils.safe_makedir(os.path.dirname(out_file)),
data, out_file=out_file)
data = tz.update_in(data, key, lambda x: merged_file)
else:
data = tz.update_in(data, key, lambda x: None)
if "align_bam" in data and "work_bam" in data:
data["align_bam"] = data["work_bam"]
return data | python | def _merge_align_bams(data):
"""Merge multiple alignment BAMs, including split and discordant reads.
"""
for key in (["work_bam"], ["work_bam_plus", "disc"], ["work_bam_plus", "sr"], ["umi_bam"]):
in_files = tz.get_in(key, data, [])
if not isinstance(in_files, (list, tuple)):
in_files = [in_files]
in_files = [x for x in in_files if x and x != "None"]
if in_files:
ext = "-%s" % key[-1] if len(key) > 1 else ""
out_file = os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data),
"%s-sort%s.bam" % (dd.get_sample_name(data), ext))
merged_file = merge_bam_files(in_files, utils.safe_makedir(os.path.dirname(out_file)),
data, out_file=out_file)
data = tz.update_in(data, key, lambda x: merged_file)
else:
data = tz.update_in(data, key, lambda x: None)
if "align_bam" in data and "work_bam" in data:
data["align_bam"] = data["work_bam"]
return data | [
"def",
"_merge_align_bams",
"(",
"data",
")",
":",
"for",
"key",
"in",
"(",
"[",
"\"work_bam\"",
"]",
",",
"[",
"\"work_bam_plus\"",
",",
"\"disc\"",
"]",
",",
"[",
"\"work_bam_plus\"",
",",
"\"sr\"",
"]",
",",
"[",
"\"umi_bam\"",
"]",
")",
":",
"in_file... | Merge multiple alignment BAMs, including split and discordant reads. | [
"Merge",
"multiple",
"alignment",
"BAMs",
"including",
"split",
"and",
"discordant",
"reads",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L340-L359 | train | 218,577 |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | _merge_hla_fastq_inputs | def _merge_hla_fastq_inputs(data):
"""Merge HLA inputs from a split initial alignment.
"""
hla_key = ["hla", "fastq"]
hla_sample_files = [x for x in (tz.get_in(hla_key, data) or []) if x and x != "None"]
merged_hlas = None
if hla_sample_files:
out_files = collections.defaultdict(list)
for hla_file in utils.flatten(hla_sample_files):
rehla = re.search(r".hla.(?P<hlatype>[\w-]+).fq", hla_file)
if rehla:
hlatype = rehla.group("hlatype")
out_files[hlatype].append(hla_file)
if len(out_files) > 0:
hla_outdir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data), "hla"))
merged_hlas = []
for hlatype, files in out_files.items():
out_file = os.path.join(hla_outdir, "%s-%s.fq" % (dd.get_sample_name(data), hlatype))
optitype.combine_hla_fqs([(hlatype, f) for f in files], out_file, data)
merged_hlas.append(out_file)
data = tz.update_in(data, hla_key, lambda x: merged_hlas)
return data | python | def _merge_hla_fastq_inputs(data):
"""Merge HLA inputs from a split initial alignment.
"""
hla_key = ["hla", "fastq"]
hla_sample_files = [x for x in (tz.get_in(hla_key, data) or []) if x and x != "None"]
merged_hlas = None
if hla_sample_files:
out_files = collections.defaultdict(list)
for hla_file in utils.flatten(hla_sample_files):
rehla = re.search(r".hla.(?P<hlatype>[\w-]+).fq", hla_file)
if rehla:
hlatype = rehla.group("hlatype")
out_files[hlatype].append(hla_file)
if len(out_files) > 0:
hla_outdir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data), "hla"))
merged_hlas = []
for hlatype, files in out_files.items():
out_file = os.path.join(hla_outdir, "%s-%s.fq" % (dd.get_sample_name(data), hlatype))
optitype.combine_hla_fqs([(hlatype, f) for f in files], out_file, data)
merged_hlas.append(out_file)
data = tz.update_in(data, hla_key, lambda x: merged_hlas)
return data | [
"def",
"_merge_hla_fastq_inputs",
"(",
"data",
")",
":",
"hla_key",
"=",
"[",
"\"hla\"",
",",
"\"fastq\"",
"]",
"hla_sample_files",
"=",
"[",
"x",
"for",
"x",
"in",
"(",
"tz",
".",
"get_in",
"(",
"hla_key",
",",
"data",
")",
"or",
"[",
"]",
")",
"if"... | Merge HLA inputs from a split initial alignment. | [
"Merge",
"HLA",
"inputs",
"from",
"a",
"split",
"initial",
"alignment",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L361-L383 | train | 218,578 |
bcbio/bcbio-nextgen | bcbio/pipeline/sample.py | prepare_bcbio_samples | def prepare_bcbio_samples(sample):
"""
Function that will use specific function to merge input files
"""
logger.info("Preparing %s files %s to merge into %s." % (sample['name'], sample['files'], sample['out_file']))
if sample['fn'] == "fq_merge":
out_file = fq_merge(sample['files'], sample['out_file'], sample['config'])
elif sample['fn'] == "bam_merge":
out_file = bam_merge(sample['files'], sample['out_file'], sample['config'])
elif sample['fn'] == "query_gsm":
out_file = query_gsm(sample['files'], sample['out_file'], sample['config'])
elif sample['fn'] == "query_srr":
out_file = query_srr(sample['files'], sample['out_file'], sample['config'])
sample['out_file'] = out_file
return [sample] | python | def prepare_bcbio_samples(sample):
"""
Function that will use specific function to merge input files
"""
logger.info("Preparing %s files %s to merge into %s." % (sample['name'], sample['files'], sample['out_file']))
if sample['fn'] == "fq_merge":
out_file = fq_merge(sample['files'], sample['out_file'], sample['config'])
elif sample['fn'] == "bam_merge":
out_file = bam_merge(sample['files'], sample['out_file'], sample['config'])
elif sample['fn'] == "query_gsm":
out_file = query_gsm(sample['files'], sample['out_file'], sample['config'])
elif sample['fn'] == "query_srr":
out_file = query_srr(sample['files'], sample['out_file'], sample['config'])
sample['out_file'] = out_file
return [sample] | [
"def",
"prepare_bcbio_samples",
"(",
"sample",
")",
":",
"logger",
".",
"info",
"(",
"\"Preparing %s files %s to merge into %s.\"",
"%",
"(",
"sample",
"[",
"'name'",
"]",
",",
"sample",
"[",
"'files'",
"]",
",",
"sample",
"[",
"'out_file'",
"]",
")",
")",
"... | Function that will use specific function to merge input files | [
"Function",
"that",
"will",
"use",
"specific",
"function",
"to",
"merge",
"input",
"files"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/sample.py#L385-L399 | train | 218,579 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/__init__.py | _get_calls | def _get_calls(data, cnv_only=False):
"""Retrieve calls, organized by name, to use for heterogeneity analysis.
"""
cnvs_supported = set(["cnvkit", "battenberg"])
out = {}
for sv in data.get("sv", []):
if not cnv_only or sv["variantcaller"] in cnvs_supported:
out[sv["variantcaller"]] = sv
return out | python | def _get_calls(data, cnv_only=False):
"""Retrieve calls, organized by name, to use for heterogeneity analysis.
"""
cnvs_supported = set(["cnvkit", "battenberg"])
out = {}
for sv in data.get("sv", []):
if not cnv_only or sv["variantcaller"] in cnvs_supported:
out[sv["variantcaller"]] = sv
return out | [
"def",
"_get_calls",
"(",
"data",
",",
"cnv_only",
"=",
"False",
")",
":",
"cnvs_supported",
"=",
"set",
"(",
"[",
"\"cnvkit\"",
",",
"\"battenberg\"",
"]",
")",
"out",
"=",
"{",
"}",
"for",
"sv",
"in",
"data",
".",
"get",
"(",
"\"sv\"",
",",
"[",
... | Retrieve calls, organized by name, to use for heterogeneity analysis. | [
"Retrieve",
"calls",
"organized",
"by",
"name",
"to",
"use",
"for",
"heterogeneity",
"analysis",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/__init__.py#L17-L25 | train | 218,580 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/__init__.py | get_variants | def get_variants(data, include_germline=False):
"""Retrieve set of variant calls to use for heterogeneity analysis.
"""
data = utils.deepish_copy(data)
supported = ["precalled", "vardict", "vardict-java", "vardict-perl",
"freebayes", "octopus", "strelka2"]
# Right now mutect2 and mutect do not provide heterozygous germline calls
# to be useful https://github.com/bcbio/bcbio-nextgen/issues/2464
# supported += ["mutect2", "mutect"]
if include_germline:
supported.insert(1, "gatk-haplotype")
out = []
# CWL based input
if isinstance(data.get("variants"), dict) and "samples" in data["variants"]:
cur_vs = []
# Unpack single sample list of files
if (isinstance(data["variants"]["samples"], (list, tuple)) and
len(data["variants"]["samples"]) == 1 and isinstance(data["variants"]["samples"][0], (list, tuple))):
data["variants"]["samples"] = data["variants"]["samples"][0]
for fname in data["variants"]["samples"]:
variantcaller = utils.splitext_plus(os.path.basename(fname))[0]
variantcaller = variantcaller.replace(dd.get_sample_name(data) + "-", "")
for batch in dd.get_batches(data):
variantcaller = variantcaller.replace(batch + "-", "")
cur_vs.append({"vrn_file": fname, "variantcaller": variantcaller})
data["variants"] = cur_vs
for v in data.get("variants", []):
if v["variantcaller"] in supported and v.get("vrn_file"):
out.append((supported.index(v["variantcaller"]), v))
out.sort()
return [xs[1] for xs in out] | python | def get_variants(data, include_germline=False):
"""Retrieve set of variant calls to use for heterogeneity analysis.
"""
data = utils.deepish_copy(data)
supported = ["precalled", "vardict", "vardict-java", "vardict-perl",
"freebayes", "octopus", "strelka2"]
# Right now mutect2 and mutect do not provide heterozygous germline calls
# to be useful https://github.com/bcbio/bcbio-nextgen/issues/2464
# supported += ["mutect2", "mutect"]
if include_germline:
supported.insert(1, "gatk-haplotype")
out = []
# CWL based input
if isinstance(data.get("variants"), dict) and "samples" in data["variants"]:
cur_vs = []
# Unpack single sample list of files
if (isinstance(data["variants"]["samples"], (list, tuple)) and
len(data["variants"]["samples"]) == 1 and isinstance(data["variants"]["samples"][0], (list, tuple))):
data["variants"]["samples"] = data["variants"]["samples"][0]
for fname in data["variants"]["samples"]:
variantcaller = utils.splitext_plus(os.path.basename(fname))[0]
variantcaller = variantcaller.replace(dd.get_sample_name(data) + "-", "")
for batch in dd.get_batches(data):
variantcaller = variantcaller.replace(batch + "-", "")
cur_vs.append({"vrn_file": fname, "variantcaller": variantcaller})
data["variants"] = cur_vs
for v in data.get("variants", []):
if v["variantcaller"] in supported and v.get("vrn_file"):
out.append((supported.index(v["variantcaller"]), v))
out.sort()
return [xs[1] for xs in out] | [
"def",
"get_variants",
"(",
"data",
",",
"include_germline",
"=",
"False",
")",
":",
"data",
"=",
"utils",
".",
"deepish_copy",
"(",
"data",
")",
"supported",
"=",
"[",
"\"precalled\"",
",",
"\"vardict\"",
",",
"\"vardict-java\"",
",",
"\"vardict-perl\"",
",",... | Retrieve set of variant calls to use for heterogeneity analysis. | [
"Retrieve",
"set",
"of",
"variant",
"calls",
"to",
"use",
"for",
"heterogeneity",
"analysis",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/__init__.py#L27-L57 | train | 218,581 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/__init__.py | _ready_for_het_analysis | def _ready_for_het_analysis(items):
"""Check if a sample has input information for heterogeneity analysis.
We currently require a tumor/normal sample containing both CNV and variant calls.
"""
paired = vcfutils.get_paired_bams([dd.get_align_bam(d) for d in items], items)
has_het = any(dd.get_hetcaller(d) for d in items)
if has_het and paired:
return get_variants(paired.tumor_data) and _get_calls(paired.tumor_data, cnv_only=True) | python | def _ready_for_het_analysis(items):
"""Check if a sample has input information for heterogeneity analysis.
We currently require a tumor/normal sample containing both CNV and variant calls.
"""
paired = vcfutils.get_paired_bams([dd.get_align_bam(d) for d in items], items)
has_het = any(dd.get_hetcaller(d) for d in items)
if has_het and paired:
return get_variants(paired.tumor_data) and _get_calls(paired.tumor_data, cnv_only=True) | [
"def",
"_ready_for_het_analysis",
"(",
"items",
")",
":",
"paired",
"=",
"vcfutils",
".",
"get_paired_bams",
"(",
"[",
"dd",
".",
"get_align_bam",
"(",
"d",
")",
"for",
"d",
"in",
"items",
"]",
",",
"items",
")",
"has_het",
"=",
"any",
"(",
"dd",
".",
... | Check if a sample has input information for heterogeneity analysis.
We currently require a tumor/normal sample containing both CNV and variant calls. | [
"Check",
"if",
"a",
"sample",
"has",
"input",
"information",
"for",
"heterogeneity",
"analysis",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/__init__.py#L59-L67 | train | 218,582 |
bcbio/bcbio-nextgen | bcbio/heterogeneity/__init__.py | run | def run(items, run_parallel):
"""Top level entry point for calculating heterogeneity, handles organization and job distribution.
"""
to_process = []
extras = []
for batch, cur_items in _group_by_batches(items).items():
if _ready_for_het_analysis(cur_items):
to_process.append((batch, cur_items))
else:
for data in cur_items:
extras.append([data])
processed = run_parallel("heterogeneity_estimate", ([xs, b, xs[0]["config"]] for b, xs in to_process))
return _group_by_sample_and_batch(extras + processed) | python | def run(items, run_parallel):
"""Top level entry point for calculating heterogeneity, handles organization and job distribution.
"""
to_process = []
extras = []
for batch, cur_items in _group_by_batches(items).items():
if _ready_for_het_analysis(cur_items):
to_process.append((batch, cur_items))
else:
for data in cur_items:
extras.append([data])
processed = run_parallel("heterogeneity_estimate", ([xs, b, xs[0]["config"]] for b, xs in to_process))
return _group_by_sample_and_batch(extras + processed) | [
"def",
"run",
"(",
"items",
",",
"run_parallel",
")",
":",
"to_process",
"=",
"[",
"]",
"extras",
"=",
"[",
"]",
"for",
"batch",
",",
"cur_items",
"in",
"_group_by_batches",
"(",
"items",
")",
".",
"items",
"(",
")",
":",
"if",
"_ready_for_het_analysis",... | Top level entry point for calculating heterogeneity, handles organization and job distribution. | [
"Top",
"level",
"entry",
"point",
"for",
"calculating",
"heterogeneity",
"handles",
"organization",
"and",
"job",
"distribution",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/__init__.py#L122-L134 | train | 218,583 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | create_inputs | def create_inputs(data):
"""Index input reads and prepare groups of reads to process concurrently.
Allows parallelization of alignment beyond processors available on a single
machine. Prepares a bgzip and grabix indexed file for retrieving sections
of files.
"""
from bcbio.pipeline import sample
data = cwlutils.normalize_missing(data)
aligner = tz.get_in(("config", "algorithm", "aligner"), data)
# CRAM files must be converted to bgzipped fastq, unless not aligning.
# Also need to prep and download remote files.
if not ("files" in data and data["files"] and aligner and (_is_cram_input(data["files"]) or
objectstore.is_remote(data["files"][0]))):
# skip indexing on samples without input files or not doing alignment
if ("files" not in data or not data["files"] or data["files"][0] is None or not aligner):
return [[data]]
data["files_orig"] = data["files"]
data["files"] = prep_fastq_inputs(data["files"], data)
# preparation converts illumina into sanger format
data["config"]["algorithm"]["quality_format"] = "standard"
# Handle any necessary trimming
data = utils.to_single_data(sample.trim_sample(data)[0])
_prep_grabix_indexes(data["files"], data)
data = _set_align_split_size(data)
out = []
if tz.get_in(["config", "algorithm", "align_split_size"], data):
splits = _find_read_splits(data["files"][0], int(data["config"]["algorithm"]["align_split_size"]))
for split in splits:
cur_data = copy.deepcopy(data)
cur_data["align_split"] = split
out.append([cur_data])
else:
out.append([data])
if "output_cwl_keys" in data:
out = cwlutils.samples_to_records([utils.to_single_data(x) for x in out],
["files", "align_split", "config__algorithm__quality_format"])
return out | python | def create_inputs(data):
"""Index input reads and prepare groups of reads to process concurrently.
Allows parallelization of alignment beyond processors available on a single
machine. Prepares a bgzip and grabix indexed file for retrieving sections
of files.
"""
from bcbio.pipeline import sample
data = cwlutils.normalize_missing(data)
aligner = tz.get_in(("config", "algorithm", "aligner"), data)
# CRAM files must be converted to bgzipped fastq, unless not aligning.
# Also need to prep and download remote files.
if not ("files" in data and data["files"] and aligner and (_is_cram_input(data["files"]) or
objectstore.is_remote(data["files"][0]))):
# skip indexing on samples without input files or not doing alignment
if ("files" not in data or not data["files"] or data["files"][0] is None or not aligner):
return [[data]]
data["files_orig"] = data["files"]
data["files"] = prep_fastq_inputs(data["files"], data)
# preparation converts illumina into sanger format
data["config"]["algorithm"]["quality_format"] = "standard"
# Handle any necessary trimming
data = utils.to_single_data(sample.trim_sample(data)[0])
_prep_grabix_indexes(data["files"], data)
data = _set_align_split_size(data)
out = []
if tz.get_in(["config", "algorithm", "align_split_size"], data):
splits = _find_read_splits(data["files"][0], int(data["config"]["algorithm"]["align_split_size"]))
for split in splits:
cur_data = copy.deepcopy(data)
cur_data["align_split"] = split
out.append([cur_data])
else:
out.append([data])
if "output_cwl_keys" in data:
out = cwlutils.samples_to_records([utils.to_single_data(x) for x in out],
["files", "align_split", "config__algorithm__quality_format"])
return out | [
"def",
"create_inputs",
"(",
"data",
")",
":",
"from",
"bcbio",
".",
"pipeline",
"import",
"sample",
"data",
"=",
"cwlutils",
".",
"normalize_missing",
"(",
"data",
")",
"aligner",
"=",
"tz",
".",
"get_in",
"(",
"(",
"\"config\"",
",",
"\"algorithm\"",
","... | Index input reads and prepare groups of reads to process concurrently.
Allows parallelization of alignment beyond processors available on a single
machine. Prepares a bgzip and grabix indexed file for retrieving sections
of files. | [
"Index",
"input",
"reads",
"and",
"prepare",
"groups",
"of",
"reads",
"to",
"process",
"concurrently",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L25-L62 | train | 218,584 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | _set_align_split_size | def _set_align_split_size(data):
"""Set useful align_split_size, generating an estimate if it doesn't exist.
We try to split on larger inputs and avoid too many pieces, aiming for size
chunks of 5Gb or at most 50 maximum splits.
The size estimate used in calculations is 20 million reads for ~5Gb.
For UMI calculations we skip splitting since we're going to align and
re-align after consensus.
For CWL runs, we pick larger split sizes to avoid overhead of staging each chunk.
"""
if cwlutils.is_cwl_run(data):
target_size = 20 # Gb
target_size_reads = 80 # million reads
else:
target_size = 5 # Gb
target_size_reads = 20 # million reads
max_splits = 100 # Avoid too many pieces, causing merge memory problems
val = dd.get_align_split_size(data)
umi_consensus = dd.get_umi_consensus(data)
if val is None:
if not umi_consensus:
total_size = 0 # Gb
# Use original files if we might have reduced the size of our prepped files
input_files = data.get("files_orig", []) if dd.get_save_diskspace(data) else data.get("files", [])
for fname in input_files:
if os.path.exists(fname):
total_size += os.path.getsize(fname) / (1024.0 * 1024.0 * 1024.0)
# Only set if we have files and are bigger than the target size
if total_size > target_size:
data["config"]["algorithm"]["align_split_size"] = \
int(1e6 * _pick_align_split_size(total_size, target_size,
target_size_reads, max_splits))
elif val:
assert not umi_consensus, "Cannot set align_split_size to %s with UMI conensus specified" % val
return data | python | def _set_align_split_size(data):
"""Set useful align_split_size, generating an estimate if it doesn't exist.
We try to split on larger inputs and avoid too many pieces, aiming for size
chunks of 5Gb or at most 50 maximum splits.
The size estimate used in calculations is 20 million reads for ~5Gb.
For UMI calculations we skip splitting since we're going to align and
re-align after consensus.
For CWL runs, we pick larger split sizes to avoid overhead of staging each chunk.
"""
if cwlutils.is_cwl_run(data):
target_size = 20 # Gb
target_size_reads = 80 # million reads
else:
target_size = 5 # Gb
target_size_reads = 20 # million reads
max_splits = 100 # Avoid too many pieces, causing merge memory problems
val = dd.get_align_split_size(data)
umi_consensus = dd.get_umi_consensus(data)
if val is None:
if not umi_consensus:
total_size = 0 # Gb
# Use original files if we might have reduced the size of our prepped files
input_files = data.get("files_orig", []) if dd.get_save_diskspace(data) else data.get("files", [])
for fname in input_files:
if os.path.exists(fname):
total_size += os.path.getsize(fname) / (1024.0 * 1024.0 * 1024.0)
# Only set if we have files and are bigger than the target size
if total_size > target_size:
data["config"]["algorithm"]["align_split_size"] = \
int(1e6 * _pick_align_split_size(total_size, target_size,
target_size_reads, max_splits))
elif val:
assert not umi_consensus, "Cannot set align_split_size to %s with UMI conensus specified" % val
return data | [
"def",
"_set_align_split_size",
"(",
"data",
")",
":",
"if",
"cwlutils",
".",
"is_cwl_run",
"(",
"data",
")",
":",
"target_size",
"=",
"20",
"# Gb",
"target_size_reads",
"=",
"80",
"# million reads",
"else",
":",
"target_size",
"=",
"5",
"# Gb",
"target_size_r... | Set useful align_split_size, generating an estimate if it doesn't exist.
We try to split on larger inputs and avoid too many pieces, aiming for size
chunks of 5Gb or at most 50 maximum splits.
The size estimate used in calculations is 20 million reads for ~5Gb.
For UMI calculations we skip splitting since we're going to align and
re-align after consensus.
For CWL runs, we pick larger split sizes to avoid overhead of staging each chunk. | [
"Set",
"useful",
"align_split_size",
"generating",
"an",
"estimate",
"if",
"it",
"doesn",
"t",
"exist",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L64-L101 | train | 218,585 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | _pick_align_split_size | def _pick_align_split_size(total_size, target_size, target_size_reads, max_splits):
"""Do the work of picking an alignment split size for the given criteria.
"""
# Too many pieces, increase our target size to get max_splits pieces
if total_size // target_size > max_splits:
piece_size = total_size // max_splits
return int(piece_size * target_size_reads / target_size)
else:
return int(target_size_reads) | python | def _pick_align_split_size(total_size, target_size, target_size_reads, max_splits):
"""Do the work of picking an alignment split size for the given criteria.
"""
# Too many pieces, increase our target size to get max_splits pieces
if total_size // target_size > max_splits:
piece_size = total_size // max_splits
return int(piece_size * target_size_reads / target_size)
else:
return int(target_size_reads) | [
"def",
"_pick_align_split_size",
"(",
"total_size",
",",
"target_size",
",",
"target_size_reads",
",",
"max_splits",
")",
":",
"# Too many pieces, increase our target size to get max_splits pieces",
"if",
"total_size",
"//",
"target_size",
">",
"max_splits",
":",
"piece_size"... | Do the work of picking an alignment split size for the given criteria. | [
"Do",
"the",
"work",
"of",
"picking",
"an",
"alignment",
"split",
"size",
"for",
"the",
"given",
"criteria",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L103-L111 | train | 218,586 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | split_namedpipe_cls | def split_namedpipe_cls(pair1_file, pair2_file, data):
"""Create a commandline suitable for use as a named pipe with reads in a given region.
"""
if "align_split" in data:
start, end = [int(x) for x in data["align_split"].split("-")]
else:
start, end = None, None
if pair1_file.endswith(".sdf"):
assert not pair2_file, pair2_file
return rtg.to_fastq_apipe_cl(pair1_file, start, end)
else:
out = []
for in_file in pair1_file, pair2_file:
if in_file:
assert _get_grabix_index(in_file), "Need grabix index for %s" % in_file
out.append("<(grabix grab {in_file} {start} {end})".format(**locals()))
else:
out.append(None)
return out | python | def split_namedpipe_cls(pair1_file, pair2_file, data):
"""Create a commandline suitable for use as a named pipe with reads in a given region.
"""
if "align_split" in data:
start, end = [int(x) for x in data["align_split"].split("-")]
else:
start, end = None, None
if pair1_file.endswith(".sdf"):
assert not pair2_file, pair2_file
return rtg.to_fastq_apipe_cl(pair1_file, start, end)
else:
out = []
for in_file in pair1_file, pair2_file:
if in_file:
assert _get_grabix_index(in_file), "Need grabix index for %s" % in_file
out.append("<(grabix grab {in_file} {start} {end})".format(**locals()))
else:
out.append(None)
return out | [
"def",
"split_namedpipe_cls",
"(",
"pair1_file",
",",
"pair2_file",
",",
"data",
")",
":",
"if",
"\"align_split\"",
"in",
"data",
":",
"start",
",",
"end",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"data",
"[",
"\"align_split\"",
"]",
".",
"spl... | Create a commandline suitable for use as a named pipe with reads in a given region. | [
"Create",
"a",
"commandline",
"suitable",
"for",
"use",
"as",
"a",
"named",
"pipe",
"with",
"reads",
"in",
"a",
"given",
"region",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L113-L131 | train | 218,587 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | _seqtk_fastq_prep_cl | def _seqtk_fastq_prep_cl(data, in_file=None, read_num=0):
"""Provide a commandline for prep of fastq inputs with seqtk.
Handles fast conversion of fastq quality scores and trimming.
"""
needs_convert = dd.get_quality_format(data).lower() == "illumina"
trim_ends = dd.get_trim_ends(data)
seqtk = config_utils.get_program("seqtk", data["config"])
if in_file:
in_file = objectstore.cl_input(in_file)
else:
in_file = "/dev/stdin"
cmd = ""
if needs_convert:
cmd += "{seqtk} seq -Q64 -V {in_file}".format(**locals())
if trim_ends:
left_trim, right_trim = trim_ends[0:2] if data.get("read_num", read_num) == 0 else trim_ends[2:4]
if left_trim or right_trim:
trim_infile = "/dev/stdin" if needs_convert else in_file
pipe = " | " if needs_convert else ""
cmd += "{pipe}{seqtk} trimfq -b {left_trim} -e {right_trim} {trim_infile}".format(**locals())
return cmd | python | def _seqtk_fastq_prep_cl(data, in_file=None, read_num=0):
"""Provide a commandline for prep of fastq inputs with seqtk.
Handles fast conversion of fastq quality scores and trimming.
"""
needs_convert = dd.get_quality_format(data).lower() == "illumina"
trim_ends = dd.get_trim_ends(data)
seqtk = config_utils.get_program("seqtk", data["config"])
if in_file:
in_file = objectstore.cl_input(in_file)
else:
in_file = "/dev/stdin"
cmd = ""
if needs_convert:
cmd += "{seqtk} seq -Q64 -V {in_file}".format(**locals())
if trim_ends:
left_trim, right_trim = trim_ends[0:2] if data.get("read_num", read_num) == 0 else trim_ends[2:4]
if left_trim or right_trim:
trim_infile = "/dev/stdin" if needs_convert else in_file
pipe = " | " if needs_convert else ""
cmd += "{pipe}{seqtk} trimfq -b {left_trim} -e {right_trim} {trim_infile}".format(**locals())
return cmd | [
"def",
"_seqtk_fastq_prep_cl",
"(",
"data",
",",
"in_file",
"=",
"None",
",",
"read_num",
"=",
"0",
")",
":",
"needs_convert",
"=",
"dd",
".",
"get_quality_format",
"(",
"data",
")",
".",
"lower",
"(",
")",
"==",
"\"illumina\"",
"trim_ends",
"=",
"dd",
"... | Provide a commandline for prep of fastq inputs with seqtk.
Handles fast conversion of fastq quality scores and trimming. | [
"Provide",
"a",
"commandline",
"for",
"prep",
"of",
"fastq",
"inputs",
"with",
"seqtk",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L133-L154 | train | 218,588 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | fastq_convert_pipe_cl | def fastq_convert_pipe_cl(in_file, data):
"""Create an anonymous pipe converting Illumina 1.3-1.7 to Sanger.
Uses seqtk: https://github.com/lh3/seqt
"""
cmd = _seqtk_fastq_prep_cl(data, in_file)
if not cmd:
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
cmd = cat_cmd + " " + in_file
return "<(%s)" % cmd | python | def fastq_convert_pipe_cl(in_file, data):
"""Create an anonymous pipe converting Illumina 1.3-1.7 to Sanger.
Uses seqtk: https://github.com/lh3/seqt
"""
cmd = _seqtk_fastq_prep_cl(data, in_file)
if not cmd:
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
cmd = cat_cmd + " " + in_file
return "<(%s)" % cmd | [
"def",
"fastq_convert_pipe_cl",
"(",
"in_file",
",",
"data",
")",
":",
"cmd",
"=",
"_seqtk_fastq_prep_cl",
"(",
"data",
",",
"in_file",
")",
"if",
"not",
"cmd",
":",
"cat_cmd",
"=",
"\"zcat\"",
"if",
"in_file",
".",
"endswith",
"(",
"\".gz\"",
")",
"else",... | Create an anonymous pipe converting Illumina 1.3-1.7 to Sanger.
Uses seqtk: https://github.com/lh3/seqt | [
"Create",
"an",
"anonymous",
"pipe",
"converting",
"Illumina",
"1",
".",
"3",
"-",
"1",
".",
"7",
"to",
"Sanger",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L156-L165 | train | 218,589 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | parallel_multiplier | def parallel_multiplier(items):
"""Determine if we will be parallelizing items during processing.
"""
multiplier = 1
for data in (x[0] for x in items):
if (tz.get_in(["config", "algorithm", "align_split_size"], data) is not False and
tz.get_in(["algorithm", "align_split_size"], data) is not False):
multiplier += 50
return multiplier | python | def parallel_multiplier(items):
"""Determine if we will be parallelizing items during processing.
"""
multiplier = 1
for data in (x[0] for x in items):
if (tz.get_in(["config", "algorithm", "align_split_size"], data) is not False and
tz.get_in(["algorithm", "align_split_size"], data) is not False):
multiplier += 50
return multiplier | [
"def",
"parallel_multiplier",
"(",
"items",
")",
":",
"multiplier",
"=",
"1",
"for",
"data",
"in",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"items",
")",
":",
"if",
"(",
"tz",
".",
"get_in",
"(",
"[",
"\"config\"",
",",
"\"algorithm\"",
",",
"\"a... | Determine if we will be parallelizing items during processing. | [
"Determine",
"if",
"we",
"will",
"be",
"parallelizing",
"items",
"during",
"processing",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L169-L177 | train | 218,590 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | setup_combine | def setup_combine(final_file, data):
"""Setup the data and outputs to allow merging data back together.
"""
if "align_split" not in data:
return final_file, data
align_dir = os.path.dirname(final_file)
base, ext = os.path.splitext(os.path.basename(final_file))
start, end = [int(x) for x in data["align_split"].split("-")]
out_file = os.path.join(utils.safe_makedir(os.path.join(align_dir, "split")),
"%s-%s_%s%s" % (base, start, end, ext))
data["combine"] = {"work_bam": {"out": final_file, "extras": []}}
return out_file, data | python | def setup_combine(final_file, data):
"""Setup the data and outputs to allow merging data back together.
"""
if "align_split" not in data:
return final_file, data
align_dir = os.path.dirname(final_file)
base, ext = os.path.splitext(os.path.basename(final_file))
start, end = [int(x) for x in data["align_split"].split("-")]
out_file = os.path.join(utils.safe_makedir(os.path.join(align_dir, "split")),
"%s-%s_%s%s" % (base, start, end, ext))
data["combine"] = {"work_bam": {"out": final_file, "extras": []}}
return out_file, data | [
"def",
"setup_combine",
"(",
"final_file",
",",
"data",
")",
":",
"if",
"\"align_split\"",
"not",
"in",
"data",
":",
"return",
"final_file",
",",
"data",
"align_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"final_file",
")",
"base",
",",
"ext",
"="... | Setup the data and outputs to allow merging data back together. | [
"Setup",
"the",
"data",
"and",
"outputs",
"to",
"allow",
"merging",
"data",
"back",
"together",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L181-L192 | train | 218,591 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | merge_split_alignments | def merge_split_alignments(samples, run_parallel):
"""Manage merging split alignments back into a final working BAM file.
Perform de-duplication on the final merged file.
"""
ready = []
file_key = "work_bam"
to_merge = collections.defaultdict(list)
for data in (xs[0] for xs in samples):
if data.get("combine"):
out_key = tz.get_in(["combine", file_key, "out"], data)
if not out_key:
out_key = data["rgnames"]["lane"]
to_merge[out_key].append(data)
else:
ready.append([data])
ready_merge = []
hla_merges = []
for mgroup in to_merge.values():
cur_data = mgroup[0]
del cur_data["align_split"]
for x in mgroup[1:]:
cur_data["combine"][file_key]["extras"].append(x[file_key])
ready_merge.append([cur_data])
cur_hla = None
for d in mgroup:
hla_files = tz.get_in(["hla", "fastq"], d)
if hla_files:
if not cur_hla:
cur_hla = {"rgnames": {"sample": dd.get_sample_name(cur_data)},
"config": cur_data["config"], "dirs": cur_data["dirs"],
"hla": {"fastq": []}}
cur_hla["hla"]["fastq"].append(hla_files)
if cur_hla:
hla_merges.append([cur_hla])
if not tz.get_in(["config", "algorithm", "kraken"], data):
# kraken requires fasta filenames from data['files'] as input.
# We don't want to remove those files if kraken qc is required.
_save_fastq_space(samples)
merged = run_parallel("delayed_bam_merge", ready_merge)
hla_merge_raw = run_parallel("merge_split_alignments", hla_merges)
hla_merges = {}
for hla_merge in [x[0] for x in hla_merge_raw]:
hla_merges[dd.get_sample_name(hla_merge)] = tz.get_in(["hla", "fastq"], hla_merge)
# Add stable 'align_bam' target to use for retrieving raw alignment
out = []
for data in [x[0] for x in merged + ready]:
if data.get("work_bam"):
data["align_bam"] = data["work_bam"]
if dd.get_sample_name(data) in hla_merges:
data["hla"]["fastq"] = hla_merges[dd.get_sample_name(data)]
else:
hla_files = glob.glob(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data), "hla", "*.fq"))
if hla_files:
data["hla"]["fastq"] = hla_files
out.append([data])
return out | python | def merge_split_alignments(samples, run_parallel):
"""Manage merging split alignments back into a final working BAM file.
Perform de-duplication on the final merged file.
"""
ready = []
file_key = "work_bam"
to_merge = collections.defaultdict(list)
for data in (xs[0] for xs in samples):
if data.get("combine"):
out_key = tz.get_in(["combine", file_key, "out"], data)
if not out_key:
out_key = data["rgnames"]["lane"]
to_merge[out_key].append(data)
else:
ready.append([data])
ready_merge = []
hla_merges = []
for mgroup in to_merge.values():
cur_data = mgroup[0]
del cur_data["align_split"]
for x in mgroup[1:]:
cur_data["combine"][file_key]["extras"].append(x[file_key])
ready_merge.append([cur_data])
cur_hla = None
for d in mgroup:
hla_files = tz.get_in(["hla", "fastq"], d)
if hla_files:
if not cur_hla:
cur_hla = {"rgnames": {"sample": dd.get_sample_name(cur_data)},
"config": cur_data["config"], "dirs": cur_data["dirs"],
"hla": {"fastq": []}}
cur_hla["hla"]["fastq"].append(hla_files)
if cur_hla:
hla_merges.append([cur_hla])
if not tz.get_in(["config", "algorithm", "kraken"], data):
# kraken requires fasta filenames from data['files'] as input.
# We don't want to remove those files if kraken qc is required.
_save_fastq_space(samples)
merged = run_parallel("delayed_bam_merge", ready_merge)
hla_merge_raw = run_parallel("merge_split_alignments", hla_merges)
hla_merges = {}
for hla_merge in [x[0] for x in hla_merge_raw]:
hla_merges[dd.get_sample_name(hla_merge)] = tz.get_in(["hla", "fastq"], hla_merge)
# Add stable 'align_bam' target to use for retrieving raw alignment
out = []
for data in [x[0] for x in merged + ready]:
if data.get("work_bam"):
data["align_bam"] = data["work_bam"]
if dd.get_sample_name(data) in hla_merges:
data["hla"]["fastq"] = hla_merges[dd.get_sample_name(data)]
else:
hla_files = glob.glob(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data), "hla", "*.fq"))
if hla_files:
data["hla"]["fastq"] = hla_files
out.append([data])
return out | [
"def",
"merge_split_alignments",
"(",
"samples",
",",
"run_parallel",
")",
":",
"ready",
"=",
"[",
"]",
"file_key",
"=",
"\"work_bam\"",
"to_merge",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"data",
"in",
"(",
"xs",
"[",
"0",
"]",
... | Manage merging split alignments back into a final working BAM file.
Perform de-duplication on the final merged file. | [
"Manage",
"merging",
"split",
"alignments",
"back",
"into",
"a",
"final",
"working",
"BAM",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L194-L252 | train | 218,592 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | _save_fastq_space | def _save_fastq_space(items):
"""Potentially save fastq space prior to merging, since alignments done.
"""
to_cleanup = {}
for data in (utils.to_single_data(x) for x in items):
for fname in data.get("files", []):
if os.path.realpath(fname).startswith(dd.get_work_dir(data)):
to_cleanup[fname] = data["config"]
for fname, config in to_cleanup.items():
utils.save_diskspace(fname, "Cleanup prep files after alignment finished", config) | python | def _save_fastq_space(items):
"""Potentially save fastq space prior to merging, since alignments done.
"""
to_cleanup = {}
for data in (utils.to_single_data(x) for x in items):
for fname in data.get("files", []):
if os.path.realpath(fname).startswith(dd.get_work_dir(data)):
to_cleanup[fname] = data["config"]
for fname, config in to_cleanup.items():
utils.save_diskspace(fname, "Cleanup prep files after alignment finished", config) | [
"def",
"_save_fastq_space",
"(",
"items",
")",
":",
"to_cleanup",
"=",
"{",
"}",
"for",
"data",
"in",
"(",
"utils",
".",
"to_single_data",
"(",
"x",
")",
"for",
"x",
"in",
"items",
")",
":",
"for",
"fname",
"in",
"data",
".",
"get",
"(",
"\"files\"",... | Potentially save fastq space prior to merging, since alignments done. | [
"Potentially",
"save",
"fastq",
"space",
"prior",
"to",
"merging",
"since",
"alignments",
"done",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L254-L263 | train | 218,593 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | total_reads_from_grabix | def total_reads_from_grabix(in_file):
"""Retrieve total reads in a fastq file from grabix index.
"""
gbi_file = _get_grabix_index(in_file)
if gbi_file:
with open(gbi_file) as in_handle:
next(in_handle) # throw away
num_lines = int(next(in_handle).strip())
assert num_lines % 4 == 0, "Expected lines to be multiple of 4"
return num_lines // 4
else:
return 0 | python | def total_reads_from_grabix(in_file):
"""Retrieve total reads in a fastq file from grabix index.
"""
gbi_file = _get_grabix_index(in_file)
if gbi_file:
with open(gbi_file) as in_handle:
next(in_handle) # throw away
num_lines = int(next(in_handle).strip())
assert num_lines % 4 == 0, "Expected lines to be multiple of 4"
return num_lines // 4
else:
return 0 | [
"def",
"total_reads_from_grabix",
"(",
"in_file",
")",
":",
"gbi_file",
"=",
"_get_grabix_index",
"(",
"in_file",
")",
"if",
"gbi_file",
":",
"with",
"open",
"(",
"gbi_file",
")",
"as",
"in_handle",
":",
"next",
"(",
"in_handle",
")",
"# throw away",
"num_line... | Retrieve total reads in a fastq file from grabix index. | [
"Retrieve",
"total",
"reads",
"in",
"a",
"fastq",
"file",
"from",
"grabix",
"index",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L281-L292 | train | 218,594 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | _find_read_splits | def _find_read_splits(in_file, split_size):
"""Determine sections of fastq files to process in splits.
Assumes a 4 line order to input files (name, read, name, quality).
grabix is 1-based inclusive, so return coordinates in that format.
"""
num_lines = total_reads_from_grabix(in_file) * 4
assert num_lines and num_lines > 0, "Did not find grabix index reads: %s %s" % (in_file, num_lines)
split_lines = split_size * 4
chunks = []
last = 1
for chunki in range(num_lines // split_lines + min(1, num_lines % split_lines)):
new = last + split_lines - 1
chunks.append((last, min(new, num_lines)))
last = new + 1
return ["%s-%s" % (s, e) for s, e in chunks] | python | def _find_read_splits(in_file, split_size):
"""Determine sections of fastq files to process in splits.
Assumes a 4 line order to input files (name, read, name, quality).
grabix is 1-based inclusive, so return coordinates in that format.
"""
num_lines = total_reads_from_grabix(in_file) * 4
assert num_lines and num_lines > 0, "Did not find grabix index reads: %s %s" % (in_file, num_lines)
split_lines = split_size * 4
chunks = []
last = 1
for chunki in range(num_lines // split_lines + min(1, num_lines % split_lines)):
new = last + split_lines - 1
chunks.append((last, min(new, num_lines)))
last = new + 1
return ["%s-%s" % (s, e) for s, e in chunks] | [
"def",
"_find_read_splits",
"(",
"in_file",
",",
"split_size",
")",
":",
"num_lines",
"=",
"total_reads_from_grabix",
"(",
"in_file",
")",
"*",
"4",
"assert",
"num_lines",
"and",
"num_lines",
">",
"0",
",",
"\"Did not find grabix index reads: %s %s\"",
"%",
"(",
"... | Determine sections of fastq files to process in splits.
Assumes a 4 line order to input files (name, read, name, quality).
grabix is 1-based inclusive, so return coordinates in that format. | [
"Determine",
"sections",
"of",
"fastq",
"files",
"to",
"process",
"in",
"splits",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L294-L309 | train | 218,595 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | _ready_gzip_fastq | def _ready_gzip_fastq(in_files, data, require_bgzip=False):
"""Check if we have gzipped fastq and don't need format conversion or splitting.
Avoid forcing bgzip if we don't need indexed files.
"""
all_gzipped = all([not x or x.endswith(".gz") for x in in_files])
if require_bgzip and all_gzipped:
all_gzipped = all([not x or not _check_gzipped_input(x, data)[0] for x in in_files])
needs_convert = dd.get_quality_format(data).lower() == "illumina"
needs_trim = dd.get_trim_ends(data)
do_splitting = dd.get_align_split_size(data) is not False
return (all_gzipped and not needs_convert and not do_splitting and
not objectstore.is_remote(in_files[0]) and not needs_trim and not get_downsample_params(data)) | python | def _ready_gzip_fastq(in_files, data, require_bgzip=False):
"""Check if we have gzipped fastq and don't need format conversion or splitting.
Avoid forcing bgzip if we don't need indexed files.
"""
all_gzipped = all([not x or x.endswith(".gz") for x in in_files])
if require_bgzip and all_gzipped:
all_gzipped = all([not x or not _check_gzipped_input(x, data)[0] for x in in_files])
needs_convert = dd.get_quality_format(data).lower() == "illumina"
needs_trim = dd.get_trim_ends(data)
do_splitting = dd.get_align_split_size(data) is not False
return (all_gzipped and not needs_convert and not do_splitting and
not objectstore.is_remote(in_files[0]) and not needs_trim and not get_downsample_params(data)) | [
"def",
"_ready_gzip_fastq",
"(",
"in_files",
",",
"data",
",",
"require_bgzip",
"=",
"False",
")",
":",
"all_gzipped",
"=",
"all",
"(",
"[",
"not",
"x",
"or",
"x",
".",
"endswith",
"(",
"\".gz\"",
")",
"for",
"x",
"in",
"in_files",
"]",
")",
"if",
"r... | Check if we have gzipped fastq and don't need format conversion or splitting.
Avoid forcing bgzip if we don't need indexed files. | [
"Check",
"if",
"we",
"have",
"gzipped",
"fastq",
"and",
"don",
"t",
"need",
"format",
"conversion",
"or",
"splitting",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L319-L331 | train | 218,596 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | prep_fastq_inputs | def prep_fastq_inputs(in_files, data):
"""Prepare bgzipped fastq inputs
"""
if len(in_files) == 1 and _is_bam_input(in_files):
out = _bgzip_from_bam(in_files[0], data["dirs"], data)
elif len(in_files) == 1 and _is_cram_input(in_files):
out = _bgzip_from_cram(in_files[0], data["dirs"], data)
elif len(in_files) in [1, 2] and _ready_gzip_fastq(in_files, data):
out = _symlink_in_files(in_files, data)
else:
if len(in_files) > 2:
fpairs = fastq.combine_pairs(in_files)
pair_types = set([len(xs) for xs in fpairs])
assert len(pair_types) == 1
fpairs.sort(key=lambda x: os.path.basename(x[0]))
organized = [[xs[0] for xs in fpairs]]
if len(fpairs[0]) > 1:
organized.append([xs[1] for xs in fpairs])
in_files = organized
parallel = {"type": "local", "num_jobs": len(in_files),
"cores_per_job": max(1, data["config"]["algorithm"]["num_cores"] // len(in_files))}
inputs = [{"in_file": x, "read_num": i, "dirs": data["dirs"], "config": data["config"],
"is_cwl": "cwl_keys" in data,
"rgnames": data["rgnames"]}
for i, x in enumerate(in_files) if x]
out = run_multicore(_bgzip_from_fastq_parallel, [[d] for d in inputs], data["config"], parallel)
return out | python | def prep_fastq_inputs(in_files, data):
"""Prepare bgzipped fastq inputs
"""
if len(in_files) == 1 and _is_bam_input(in_files):
out = _bgzip_from_bam(in_files[0], data["dirs"], data)
elif len(in_files) == 1 and _is_cram_input(in_files):
out = _bgzip_from_cram(in_files[0], data["dirs"], data)
elif len(in_files) in [1, 2] and _ready_gzip_fastq(in_files, data):
out = _symlink_in_files(in_files, data)
else:
if len(in_files) > 2:
fpairs = fastq.combine_pairs(in_files)
pair_types = set([len(xs) for xs in fpairs])
assert len(pair_types) == 1
fpairs.sort(key=lambda x: os.path.basename(x[0]))
organized = [[xs[0] for xs in fpairs]]
if len(fpairs[0]) > 1:
organized.append([xs[1] for xs in fpairs])
in_files = organized
parallel = {"type": "local", "num_jobs": len(in_files),
"cores_per_job": max(1, data["config"]["algorithm"]["num_cores"] // len(in_files))}
inputs = [{"in_file": x, "read_num": i, "dirs": data["dirs"], "config": data["config"],
"is_cwl": "cwl_keys" in data,
"rgnames": data["rgnames"]}
for i, x in enumerate(in_files) if x]
out = run_multicore(_bgzip_from_fastq_parallel, [[d] for d in inputs], data["config"], parallel)
return out | [
"def",
"prep_fastq_inputs",
"(",
"in_files",
",",
"data",
")",
":",
"if",
"len",
"(",
"in_files",
")",
"==",
"1",
"and",
"_is_bam_input",
"(",
"in_files",
")",
":",
"out",
"=",
"_bgzip_from_bam",
"(",
"in_files",
"[",
"0",
"]",
",",
"data",
"[",
"\"dir... | Prepare bgzipped fastq inputs | [
"Prepare",
"bgzipped",
"fastq",
"inputs"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L333-L359 | train | 218,597 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | _symlink_or_copy_grabix | def _symlink_or_copy_grabix(in_file, out_file, data):
"""We cannot symlink in CWL, but may be able to use inputs or copy
"""
if cwlutils.is_cwl_run(data):
# Has grabix indexes, we're okay to go
if utils.file_exists(in_file + ".gbi"):
out_file = in_file
else:
utils.copy_plus(in_file, out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file | python | def _symlink_or_copy_grabix(in_file, out_file, data):
"""We cannot symlink in CWL, but may be able to use inputs or copy
"""
if cwlutils.is_cwl_run(data):
# Has grabix indexes, we're okay to go
if utils.file_exists(in_file + ".gbi"):
out_file = in_file
else:
utils.copy_plus(in_file, out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file | [
"def",
"_symlink_or_copy_grabix",
"(",
"in_file",
",",
"out_file",
",",
"data",
")",
":",
"if",
"cwlutils",
".",
"is_cwl_run",
"(",
"data",
")",
":",
"# Has grabix indexes, we're okay to go",
"if",
"utils",
".",
"file_exists",
"(",
"in_file",
"+",
"\".gbi\"",
")... | We cannot symlink in CWL, but may be able to use inputs or copy | [
"We",
"cannot",
"symlink",
"in",
"CWL",
"but",
"may",
"be",
"able",
"to",
"use",
"inputs",
"or",
"copy"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L372-L383 | train | 218,598 |
bcbio/bcbio-nextgen | bcbio/ngsalign/alignprep.py | _prep_grabix_indexes | def _prep_grabix_indexes(in_files, data):
"""Parallel preparation of grabix indexes for files.
"""
# if we have gzipped but not bgzipped, add a fake index for CWL support
# Also skips bgzip indexing if we don't need alignment splitting
if _ready_gzip_fastq(in_files, data) and (not _ready_gzip_fastq(in_files, data, require_bgzip=True) or
dd.get_align_split_size(data) is False):
for in_file in in_files:
if not utils.file_exists(in_file + ".gbi"):
with file_transaction(data, in_file + ".gbi") as tx_gbi_file:
with open(tx_gbi_file, "w") as out_handle:
out_handle.write("Not grabix indexed; index added for compatibility.\n")
else:
items = [[{"bgzip_file": x, "config": copy.deepcopy(data["config"])}] for x in in_files if x]
run_multicore(_grabix_index, items, data["config"])
return data | python | def _prep_grabix_indexes(in_files, data):
"""Parallel preparation of grabix indexes for files.
"""
# if we have gzipped but not bgzipped, add a fake index for CWL support
# Also skips bgzip indexing if we don't need alignment splitting
if _ready_gzip_fastq(in_files, data) and (not _ready_gzip_fastq(in_files, data, require_bgzip=True) or
dd.get_align_split_size(data) is False):
for in_file in in_files:
if not utils.file_exists(in_file + ".gbi"):
with file_transaction(data, in_file + ".gbi") as tx_gbi_file:
with open(tx_gbi_file, "w") as out_handle:
out_handle.write("Not grabix indexed; index added for compatibility.\n")
else:
items = [[{"bgzip_file": x, "config": copy.deepcopy(data["config"])}] for x in in_files if x]
run_multicore(_grabix_index, items, data["config"])
return data | [
"def",
"_prep_grabix_indexes",
"(",
"in_files",
",",
"data",
")",
":",
"# if we have gzipped but not bgzipped, add a fake index for CWL support",
"# Also skips bgzip indexing if we don't need alignment splitting",
"if",
"_ready_gzip_fastq",
"(",
"in_files",
",",
"data",
")",
"and",... | Parallel preparation of grabix indexes for files. | [
"Parallel",
"preparation",
"of",
"grabix",
"indexes",
"for",
"files",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L385-L400 | train | 218,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.