repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
prep_samples_and_config
def prep_samples_and_config(run_folder, ldetails, fastq_dir, config): """Prepare sample fastq files and provide global sample configuration for the flowcell. Handles merging of fastq files split by lane and also by the bcl2fastq preparation process. """ fastq_final_dir = utils.safe_makedir(os.path.join(fastq_dir, "merged")) cores = utils.get_in(config, ("algorithm", "num_cores"), 1) ldetails = joblib.Parallel(cores)(joblib.delayed(_prep_sample_and_config)(x, fastq_dir, fastq_final_dir) for x in _group_same_samples(ldetails)) config_file = _write_sample_config(run_folder, [x for x in ldetails if x]) return config_file, fastq_final_dir
python
def prep_samples_and_config(run_folder, ldetails, fastq_dir, config): """Prepare sample fastq files and provide global sample configuration for the flowcell. Handles merging of fastq files split by lane and also by the bcl2fastq preparation process. """ fastq_final_dir = utils.safe_makedir(os.path.join(fastq_dir, "merged")) cores = utils.get_in(config, ("algorithm", "num_cores"), 1) ldetails = joblib.Parallel(cores)(joblib.delayed(_prep_sample_and_config)(x, fastq_dir, fastq_final_dir) for x in _group_same_samples(ldetails)) config_file = _write_sample_config(run_folder, [x for x in ldetails if x]) return config_file, fastq_final_dir
[ "def", "prep_samples_and_config", "(", "run_folder", ",", "ldetails", ",", "fastq_dir", ",", "config", ")", ":", "fastq_final_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "fastq_dir", ",", "\"merged\"", ")", ")", "cores"...
Prepare sample fastq files and provide global sample configuration for the flowcell. Handles merging of fastq files split by lane and also by the bcl2fastq preparation process.
[ "Prepare", "sample", "fastq", "files", "and", "provide", "global", "sample", "configuration", "for", "the", "flowcell", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L23-L34
train
218,100
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_prep_sample_and_config
def _prep_sample_and_config(ldetail_group, fastq_dir, fastq_final_dir): """Prepare output fastq file and configuration for a single sample. Only passes non-empty files through for processing. """ files = [] print("->", ldetail_group[0]["name"], len(ldetail_group)) for read in ["R1", "R2"]: fastq_inputs = sorted(list(set(reduce(operator.add, (_get_fastq_files(x, read, fastq_dir) for x in ldetail_group))))) if len(fastq_inputs) > 0: files.append(_concat_bgzip_fastq(fastq_inputs, fastq_final_dir, read, ldetail_group[0])) if len(files) > 0: if _non_empty(files[0]): out = ldetail_group[0] out["files"] = files return out
python
def _prep_sample_and_config(ldetail_group, fastq_dir, fastq_final_dir): """Prepare output fastq file and configuration for a single sample. Only passes non-empty files through for processing. """ files = [] print("->", ldetail_group[0]["name"], len(ldetail_group)) for read in ["R1", "R2"]: fastq_inputs = sorted(list(set(reduce(operator.add, (_get_fastq_files(x, read, fastq_dir) for x in ldetail_group))))) if len(fastq_inputs) > 0: files.append(_concat_bgzip_fastq(fastq_inputs, fastq_final_dir, read, ldetail_group[0])) if len(files) > 0: if _non_empty(files[0]): out = ldetail_group[0] out["files"] = files return out
[ "def", "_prep_sample_and_config", "(", "ldetail_group", ",", "fastq_dir", ",", "fastq_final_dir", ")", ":", "files", "=", "[", "]", "print", "(", "\"->\"", ",", "ldetail_group", "[", "0", "]", "[", "\"name\"", "]", ",", "len", "(", "ldetail_group", ")", ")...
Prepare output fastq file and configuration for a single sample. Only passes non-empty files through for processing.
[ "Prepare", "output", "fastq", "file", "and", "configuration", "for", "a", "single", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L36-L52
train
218,101
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_write_sample_config
def _write_sample_config(run_folder, ldetails): """Generate a bcbio-nextgen YAML configuration file for processing a sample. """ out_file = os.path.join(run_folder, "%s.yaml" % os.path.basename(run_folder)) with open(out_file, "w") as out_handle: fc_name, fc_date = flowcell.parse_dirname(run_folder) out = {"details": sorted([_prepare_sample(x, run_folder) for x in ldetails], key=operator.itemgetter("name", "description")), "fc_name": fc_name, "fc_date": fc_date} yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out_file
python
def _write_sample_config(run_folder, ldetails): """Generate a bcbio-nextgen YAML configuration file for processing a sample. """ out_file = os.path.join(run_folder, "%s.yaml" % os.path.basename(run_folder)) with open(out_file, "w") as out_handle: fc_name, fc_date = flowcell.parse_dirname(run_folder) out = {"details": sorted([_prepare_sample(x, run_folder) for x in ldetails], key=operator.itemgetter("name", "description")), "fc_name": fc_name, "fc_date": fc_date} yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out_file
[ "def", "_write_sample_config", "(", "run_folder", ",", "ldetails", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "run_folder", ",", "\"%s.yaml\"", "%", "os", ".", "path", ".", "basename", "(", "run_folder", ")", ")", "with", "open", "("...
Generate a bcbio-nextgen YAML configuration file for processing a sample.
[ "Generate", "a", "bcbio", "-", "nextgen", "YAML", "configuration", "file", "for", "processing", "a", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L60-L71
train
218,102
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_prepare_sample
def _prepare_sample(data, run_folder): """Extract passed keywords from input LIMS information. """ want = set(["description", "files", "genome_build", "name", "analysis", "upload", "algorithm"]) out = {} for k, v in data.items(): if k in want: out[k] = _relative_paths(v, run_folder) if "algorithm" not in out: analysis, algorithm = _select_default_algorithm(out.get("analysis")) out["algorithm"] = algorithm out["analysis"] = analysis description = "%s-%s" % (out["name"], clean_name(out["description"])) out["name"] = [out["name"], description] out["description"] = description return out
python
def _prepare_sample(data, run_folder): """Extract passed keywords from input LIMS information. """ want = set(["description", "files", "genome_build", "name", "analysis", "upload", "algorithm"]) out = {} for k, v in data.items(): if k in want: out[k] = _relative_paths(v, run_folder) if "algorithm" not in out: analysis, algorithm = _select_default_algorithm(out.get("analysis")) out["algorithm"] = algorithm out["analysis"] = analysis description = "%s-%s" % (out["name"], clean_name(out["description"])) out["name"] = [out["name"], description] out["description"] = description return out
[ "def", "_prepare_sample", "(", "data", ",", "run_folder", ")", ":", "want", "=", "set", "(", "[", "\"description\"", ",", "\"files\"", ",", "\"genome_build\"", ",", "\"name\"", ",", "\"analysis\"", ",", "\"upload\"", ",", "\"algorithm\"", "]", ")", "out", "=...
Extract passed keywords from input LIMS information.
[ "Extract", "passed", "keywords", "from", "input", "LIMS", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L73-L88
train
218,103
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_select_default_algorithm
def _select_default_algorithm(analysis): """Provide default algorithm sections from templates or standard """ if not analysis or analysis == "Standard": return "Standard", {"aligner": "bwa", "platform": "illumina", "quality_format": "Standard", "recalibrate": False, "realign": False, "mark_duplicates": True, "variantcaller": False} elif "variant" in analysis: try: config, _ = template.name_to_config(analysis) except ValueError: config, _ = template.name_to_config("freebayes-variant") return "variant", config["details"][0]["algorithm"] else: return analysis, {}
python
def _select_default_algorithm(analysis): """Provide default algorithm sections from templates or standard """ if not analysis or analysis == "Standard": return "Standard", {"aligner": "bwa", "platform": "illumina", "quality_format": "Standard", "recalibrate": False, "realign": False, "mark_duplicates": True, "variantcaller": False} elif "variant" in analysis: try: config, _ = template.name_to_config(analysis) except ValueError: config, _ = template.name_to_config("freebayes-variant") return "variant", config["details"][0]["algorithm"] else: return analysis, {}
[ "def", "_select_default_algorithm", "(", "analysis", ")", ":", "if", "not", "analysis", "or", "analysis", "==", "\"Standard\"", ":", "return", "\"Standard\"", ",", "{", "\"aligner\"", ":", "\"bwa\"", ",", "\"platform\"", ":", "\"illumina\"", ",", "\"quality_format...
Provide default algorithm sections from templates or standard
[ "Provide", "default", "algorithm", "sections", "from", "templates", "or", "standard" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L90-L104
train
218,104
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_relative_paths
def _relative_paths(xs, base_path): """Adjust paths to be relative to the provided base path. """ if isinstance(xs, six.string_types): if xs.startswith(base_path): return xs.replace(base_path + "/", "", 1) else: return xs elif isinstance(xs, (list, tuple)): return [_relative_paths(x, base_path) for x in xs] elif isinstance(xs, dict): out = {} for k, v in xs.items(): out[k] = _relative_paths(v, base_path) return out else: return xs
python
def _relative_paths(xs, base_path): """Adjust paths to be relative to the provided base path. """ if isinstance(xs, six.string_types): if xs.startswith(base_path): return xs.replace(base_path + "/", "", 1) else: return xs elif isinstance(xs, (list, tuple)): return [_relative_paths(x, base_path) for x in xs] elif isinstance(xs, dict): out = {} for k, v in xs.items(): out[k] = _relative_paths(v, base_path) return out else: return xs
[ "def", "_relative_paths", "(", "xs", ",", "base_path", ")", ":", "if", "isinstance", "(", "xs", ",", "six", ".", "string_types", ")", ":", "if", "xs", ".", "startswith", "(", "base_path", ")", ":", "return", "xs", ".", "replace", "(", "base_path", "+",...
Adjust paths to be relative to the provided base path.
[ "Adjust", "paths", "to", "be", "relative", "to", "the", "provided", "base", "path", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L106-L122
train
218,105
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_get_fastq_files
def _get_fastq_files(ldetail, read, fastq_dir): """Retrieve fastq files corresponding to the sample and read number. """ return glob.glob(os.path.join(fastq_dir, "Project_%s" % ldetail["project_name"], "Sample_%s" % ldetail["name"], "%s_*_%s_*.fastq.gz" % (ldetail["name"], read)))
python
def _get_fastq_files(ldetail, read, fastq_dir): """Retrieve fastq files corresponding to the sample and read number. """ return glob.glob(os.path.join(fastq_dir, "Project_%s" % ldetail["project_name"], "Sample_%s" % ldetail["name"], "%s_*_%s_*.fastq.gz" % (ldetail["name"], read)))
[ "def", "_get_fastq_files", "(", "ldetail", ",", "read", ",", "fastq_dir", ")", ":", "return", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "fastq_dir", ",", "\"Project_%s\"", "%", "ldetail", "[", "\"project_name\"", "]", ",", "\"Sample_%s...
Retrieve fastq files corresponding to the sample and read number.
[ "Retrieve", "fastq", "files", "corresponding", "to", "the", "sample", "and", "read", "number", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L124-L129
train
218,106
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_concat_bgzip_fastq
def _concat_bgzip_fastq(finputs, out_dir, read, ldetail): """Concatenate multiple input fastq files, preparing a bgzipped output file. """ out_file = os.path.join(out_dir, "%s_%s.fastq.gz" % (ldetail["name"], read)) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out_file: subprocess.check_call("zcat %s | bgzip -c > %s" % (" ".join(finputs), tx_out_file), shell=True) return out_file
python
def _concat_bgzip_fastq(finputs, out_dir, read, ldetail): """Concatenate multiple input fastq files, preparing a bgzipped output file. """ out_file = os.path.join(out_dir, "%s_%s.fastq.gz" % (ldetail["name"], read)) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out_file: subprocess.check_call("zcat %s | bgzip -c > %s" % (" ".join(finputs), tx_out_file), shell=True) return out_file
[ "def", "_concat_bgzip_fastq", "(", "finputs", ",", "out_dir", ",", "read", ",", "ldetail", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"%s_%s.fastq.gz\"", "%", "(", "ldetail", "[", "\"name\"", "]", ",", "read", ")", ...
Concatenate multiple input fastq files, preparing a bgzipped output file.
[ "Concatenate", "multiple", "input", "fastq", "files", "preparing", "a", "bgzipped", "output", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L131-L138
train
218,107
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
get_runinfo
def get_runinfo(galaxy_url, galaxy_apikey, run_folder, storedir): """Retrieve flattened run information for a processed directory from Galaxy nglims API. """ galaxy_api = GalaxyApiAccess(galaxy_url, galaxy_apikey) fc_name, fc_date = flowcell.parse_dirname(run_folder) galaxy_info = galaxy_api.run_details(fc_name, fc_date) if "error" in galaxy_info: return galaxy_info if not galaxy_info["run_name"].startswith(fc_date) and not galaxy_info["run_name"].endswith(fc_name): raise ValueError("Galaxy NGLIMS information %s does not match flowcell %s %s" % (galaxy_info["run_name"], fc_date, fc_name)) ldetails = _flatten_lane_details(galaxy_info) out = [] for item in ldetails: # Do uploads for all non-controls if item["description"] != "control" or item["project_name"] != "control": item["upload"] = {"method": "galaxy", "run_id": galaxy_info["run_id"], "fc_name": fc_name, "fc_date": fc_date, "dir": storedir, "galaxy_url": galaxy_url, "galaxy_api_key": galaxy_apikey} for k in ["lab_association", "private_libs", "researcher", "researcher_id", "sample_id", "galaxy_library", "galaxy_role"]: item["upload"][k] = item.pop(k, "") out.append(item) return out
python
def get_runinfo(galaxy_url, galaxy_apikey, run_folder, storedir): """Retrieve flattened run information for a processed directory from Galaxy nglims API. """ galaxy_api = GalaxyApiAccess(galaxy_url, galaxy_apikey) fc_name, fc_date = flowcell.parse_dirname(run_folder) galaxy_info = galaxy_api.run_details(fc_name, fc_date) if "error" in galaxy_info: return galaxy_info if not galaxy_info["run_name"].startswith(fc_date) and not galaxy_info["run_name"].endswith(fc_name): raise ValueError("Galaxy NGLIMS information %s does not match flowcell %s %s" % (galaxy_info["run_name"], fc_date, fc_name)) ldetails = _flatten_lane_details(galaxy_info) out = [] for item in ldetails: # Do uploads for all non-controls if item["description"] != "control" or item["project_name"] != "control": item["upload"] = {"method": "galaxy", "run_id": galaxy_info["run_id"], "fc_name": fc_name, "fc_date": fc_date, "dir": storedir, "galaxy_url": galaxy_url, "galaxy_api_key": galaxy_apikey} for k in ["lab_association", "private_libs", "researcher", "researcher_id", "sample_id", "galaxy_library", "galaxy_role"]: item["upload"][k] = item.pop(k, "") out.append(item) return out
[ "def", "get_runinfo", "(", "galaxy_url", ",", "galaxy_apikey", ",", "run_folder", ",", "storedir", ")", ":", "galaxy_api", "=", "GalaxyApiAccess", "(", "galaxy_url", ",", "galaxy_apikey", ")", "fc_name", ",", "fc_date", "=", "flowcell", ".", "parse_dirname", "("...
Retrieve flattened run information for a processed directory from Galaxy nglims API.
[ "Retrieve", "flattened", "run", "information", "for", "a", "processed", "directory", "from", "Galaxy", "nglims", "API", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L148-L172
train
218,108
bcbio/bcbio-nextgen
bcbio/galaxy/nglims.py
_flatten_lane_details
def _flatten_lane_details(runinfo): """Provide flattened lane information with multiplexed barcodes separated. """ out = [] for ldetail in runinfo["details"]: # handle controls if "project_name" not in ldetail and ldetail["description"] == "control": ldetail["project_name"] = "control" for i, barcode in enumerate(ldetail.get("multiplex", [{}])): cur = copy.deepcopy(ldetail) cur["name"] = "%s-%s" % (ldetail["name"], i + 1) cur["description"] = barcode.get("name", ldetail["description"]) cur["bc_index"] = barcode.get("sequence", "") cur["project_name"] = clean_name(ldetail["project_name"]) out.append(cur) return out
python
def _flatten_lane_details(runinfo): """Provide flattened lane information with multiplexed barcodes separated. """ out = [] for ldetail in runinfo["details"]: # handle controls if "project_name" not in ldetail and ldetail["description"] == "control": ldetail["project_name"] = "control" for i, barcode in enumerate(ldetail.get("multiplex", [{}])): cur = copy.deepcopy(ldetail) cur["name"] = "%s-%s" % (ldetail["name"], i + 1) cur["description"] = barcode.get("name", ldetail["description"]) cur["bc_index"] = barcode.get("sequence", "") cur["project_name"] = clean_name(ldetail["project_name"]) out.append(cur) return out
[ "def", "_flatten_lane_details", "(", "runinfo", ")", ":", "out", "=", "[", "]", "for", "ldetail", "in", "runinfo", "[", "\"details\"", "]", ":", "# handle controls", "if", "\"project_name\"", "not", "in", "ldetail", "and", "ldetail", "[", "\"description\"", "]...
Provide flattened lane information with multiplexed barcodes separated.
[ "Provide", "flattened", "lane", "information", "with", "multiplexed", "barcodes", "separated", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/nglims.py#L174-L189
train
218,109
bcbio/bcbio-nextgen
bcbio/distributed/split.py
grouped_parallel_split_combine
def grouped_parallel_split_combine(args, split_fn, group_fn, parallel_fn, parallel_name, combine_name, file_key, combine_arg_keys, split_outfile_i=-1): """Parallel split runner that allows grouping of samples during processing. This builds on parallel_split_combine to provide the additional ability to group samples and subsequently split them back apart. This allows analysis of related samples together. In addition to the arguments documented in parallel_split_combine, this needs: group_fn: A function that groups samples together given their configuration details. """ grouped_args = group_fn(args) split_args, combine_map, finished_out, extras = _get_split_tasks(grouped_args, split_fn, file_key, split_outfile_i) final_output = parallel_fn(parallel_name, split_args) combine_args, final_args = _organize_output(final_output, combine_map, file_key, combine_arg_keys) parallel_fn(combine_name, combine_args) return finished_out + final_args + extras
python
def grouped_parallel_split_combine(args, split_fn, group_fn, parallel_fn, parallel_name, combine_name, file_key, combine_arg_keys, split_outfile_i=-1): """Parallel split runner that allows grouping of samples during processing. This builds on parallel_split_combine to provide the additional ability to group samples and subsequently split them back apart. This allows analysis of related samples together. In addition to the arguments documented in parallel_split_combine, this needs: group_fn: A function that groups samples together given their configuration details. """ grouped_args = group_fn(args) split_args, combine_map, finished_out, extras = _get_split_tasks(grouped_args, split_fn, file_key, split_outfile_i) final_output = parallel_fn(parallel_name, split_args) combine_args, final_args = _organize_output(final_output, combine_map, file_key, combine_arg_keys) parallel_fn(combine_name, combine_args) return finished_out + final_args + extras
[ "def", "grouped_parallel_split_combine", "(", "args", ",", "split_fn", ",", "group_fn", ",", "parallel_fn", ",", "parallel_name", ",", "combine_name", ",", "file_key", ",", "combine_arg_keys", ",", "split_outfile_i", "=", "-", "1", ")", ":", "grouped_args", "=", ...
Parallel split runner that allows grouping of samples during processing. This builds on parallel_split_combine to provide the additional ability to group samples and subsequently split them back apart. This allows analysis of related samples together. In addition to the arguments documented in parallel_split_combine, this needs: group_fn: A function that groups samples together given their configuration details.
[ "Parallel", "split", "runner", "that", "allows", "grouping", "of", "samples", "during", "processing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/split.py#L18-L39
train
218,110
bcbio/bcbio-nextgen
bcbio/distributed/split.py
parallel_split_combine
def parallel_split_combine(args, split_fn, parallel_fn, parallel_name, combiner, file_key, combine_arg_keys, split_outfile_i=-1): """Split, run split items in parallel then combine to output file. split_fn: Split an input file into parts for processing. Returns the name of the combined output file along with the individual split output names and arguments for the parallel function. parallel_fn: Reference to run_parallel function that will run single core, multicore, or distributed as needed. parallel_name: The name of the function, defined in bcbio.distributed.tasks/multitasks/ipythontasks to run in parallel. combiner: The name of the function, also from tasks, that combines the split output files into a final ready to run file. Can also be a callable function if combining is delayed. split_outfile_i: the location of the output file in the arguments generated by the split function. Defaults to the last item in the list. """ args = [x[0] for x in args] split_args, combine_map, finished_out, extras = _get_split_tasks(args, split_fn, file_key, split_outfile_i) split_output = parallel_fn(parallel_name, split_args) if isinstance(combiner, six.string_types): combine_args, final_args = _organize_output(split_output, combine_map, file_key, combine_arg_keys) parallel_fn(combiner, combine_args) elif callable(combiner): final_args = combiner(split_output, combine_map, file_key) return finished_out + final_args + extras
python
def parallel_split_combine(args, split_fn, parallel_fn, parallel_name, combiner, file_key, combine_arg_keys, split_outfile_i=-1): """Split, run split items in parallel then combine to output file. split_fn: Split an input file into parts for processing. Returns the name of the combined output file along with the individual split output names and arguments for the parallel function. parallel_fn: Reference to run_parallel function that will run single core, multicore, or distributed as needed. parallel_name: The name of the function, defined in bcbio.distributed.tasks/multitasks/ipythontasks to run in parallel. combiner: The name of the function, also from tasks, that combines the split output files into a final ready to run file. Can also be a callable function if combining is delayed. split_outfile_i: the location of the output file in the arguments generated by the split function. Defaults to the last item in the list. """ args = [x[0] for x in args] split_args, combine_map, finished_out, extras = _get_split_tasks(args, split_fn, file_key, split_outfile_i) split_output = parallel_fn(parallel_name, split_args) if isinstance(combiner, six.string_types): combine_args, final_args = _organize_output(split_output, combine_map, file_key, combine_arg_keys) parallel_fn(combiner, combine_args) elif callable(combiner): final_args = combiner(split_output, combine_map, file_key) return finished_out + final_args + extras
[ "def", "parallel_split_combine", "(", "args", ",", "split_fn", ",", "parallel_fn", ",", "parallel_name", ",", "combiner", ",", "file_key", ",", "combine_arg_keys", ",", "split_outfile_i", "=", "-", "1", ")", ":", "args", "=", "[", "x", "[", "0", "]", "for"...
Split, run split items in parallel then combine to output file. split_fn: Split an input file into parts for processing. Returns the name of the combined output file along with the individual split output names and arguments for the parallel function. parallel_fn: Reference to run_parallel function that will run single core, multicore, or distributed as needed. parallel_name: The name of the function, defined in bcbio.distributed.tasks/multitasks/ipythontasks to run in parallel. combiner: The name of the function, also from tasks, that combines the split output files into a final ready to run file. Can also be a callable function if combining is delayed. split_outfile_i: the location of the output file in the arguments generated by the split function. Defaults to the last item in the list.
[ "Split", "run", "split", "items", "in", "parallel", "then", "combine", "to", "output", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/split.py#L41-L69
train
218,111
bcbio/bcbio-nextgen
bcbio/distributed/split.py
_get_extra_args
def _get_extra_args(extra_args, arg_keys): """Retrieve extra arguments to pass along to combine function. Special cases like reference files and configuration information are passed as single items, the rest as lists mapping to each data item combined. """ # XXX back compatible hack -- should have a way to specify these. single_keys = set(["sam_ref", "config"]) out = [] for i, arg_key in enumerate(arg_keys): vals = [xs[i] for xs in extra_args] if arg_key in single_keys: out.append(vals[-1]) else: out.append(vals) return out
python
def _get_extra_args(extra_args, arg_keys): """Retrieve extra arguments to pass along to combine function. Special cases like reference files and configuration information are passed as single items, the rest as lists mapping to each data item combined. """ # XXX back compatible hack -- should have a way to specify these. single_keys = set(["sam_ref", "config"]) out = [] for i, arg_key in enumerate(arg_keys): vals = [xs[i] for xs in extra_args] if arg_key in single_keys: out.append(vals[-1]) else: out.append(vals) return out
[ "def", "_get_extra_args", "(", "extra_args", ",", "arg_keys", ")", ":", "# XXX back compatible hack -- should have a way to specify these.", "single_keys", "=", "set", "(", "[", "\"sam_ref\"", ",", "\"config\"", "]", ")", "out", "=", "[", "]", "for", "i", ",", "ar...
Retrieve extra arguments to pass along to combine function. Special cases like reference files and configuration information are passed as single items, the rest as lists mapping to each data item combined.
[ "Retrieve", "extra", "arguments", "to", "pass", "along", "to", "combine", "function", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/split.py#L71-L87
train
218,112
bcbio/bcbio-nextgen
bcbio/distributed/split.py
_organize_output
def _organize_output(output, combine_map, file_key, combine_arg_keys): """Combine output details for parallelization. file_key is the key name of the output file used in merging. We extract this file from the output data. combine_arg_keys are extra items to pass along to the combine function. """ out_map = collections.defaultdict(list) extra_args = collections.defaultdict(list) final_args = collections.OrderedDict() extras = [] for data in output: cur_file = data.get(file_key) if not cur_file: extras.append([data]) else: cur_out = combine_map[cur_file] out_map[cur_out].append(cur_file) extra_args[cur_out].append([data[x] for x in combine_arg_keys]) data[file_key] = cur_out if cur_out not in final_args: final_args[cur_out] = [data] else: extras.append([data]) combine_args = [[v, k] + _get_extra_args(extra_args[k], combine_arg_keys) for (k, v) in out_map.items()] return combine_args, list(final_args.values()) + extras
python
def _organize_output(output, combine_map, file_key, combine_arg_keys): """Combine output details for parallelization. file_key is the key name of the output file used in merging. We extract this file from the output data. combine_arg_keys are extra items to pass along to the combine function. """ out_map = collections.defaultdict(list) extra_args = collections.defaultdict(list) final_args = collections.OrderedDict() extras = [] for data in output: cur_file = data.get(file_key) if not cur_file: extras.append([data]) else: cur_out = combine_map[cur_file] out_map[cur_out].append(cur_file) extra_args[cur_out].append([data[x] for x in combine_arg_keys]) data[file_key] = cur_out if cur_out not in final_args: final_args[cur_out] = [data] else: extras.append([data]) combine_args = [[v, k] + _get_extra_args(extra_args[k], combine_arg_keys) for (k, v) in out_map.items()] return combine_args, list(final_args.values()) + extras
[ "def", "_organize_output", "(", "output", ",", "combine_map", ",", "file_key", ",", "combine_arg_keys", ")", ":", "out_map", "=", "collections", ".", "defaultdict", "(", "list", ")", "extra_args", "=", "collections", ".", "defaultdict", "(", "list", ")", "fina...
Combine output details for parallelization. file_key is the key name of the output file used in merging. We extract this file from the output data. combine_arg_keys are extra items to pass along to the combine function.
[ "Combine", "output", "details", "for", "parallelization", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/split.py#L89-L116
train
218,113
bcbio/bcbio-nextgen
bcbio/distributed/split.py
_get_split_tasks
def _get_split_tasks(args, split_fn, file_key, outfile_i=-1): """Split up input files and arguments, returning arguments for parallel processing. outfile_i specifies the location of the output file in the arguments to the processing function. Defaults to the last item in the list. """ split_args = [] combine_map = {} finished_map = collections.OrderedDict() extras = [] for data in args: out_final, out_parts = split_fn(data) for parts in out_parts: split_args.append([utils.deepish_copy(data)] + list(parts)) for part_file in [x[outfile_i] for x in out_parts]: combine_map[part_file] = out_final if len(out_parts) == 0: if out_final is not None: if out_final not in finished_map: data[file_key] = out_final finished_map[out_final] = [data] else: extras.append([data]) else: extras.append([data]) return split_args, combine_map, list(finished_map.values()), extras
python
def _get_split_tasks(args, split_fn, file_key, outfile_i=-1): """Split up input files and arguments, returning arguments for parallel processing. outfile_i specifies the location of the output file in the arguments to the processing function. Defaults to the last item in the list. """ split_args = [] combine_map = {} finished_map = collections.OrderedDict() extras = [] for data in args: out_final, out_parts = split_fn(data) for parts in out_parts: split_args.append([utils.deepish_copy(data)] + list(parts)) for part_file in [x[outfile_i] for x in out_parts]: combine_map[part_file] = out_final if len(out_parts) == 0: if out_final is not None: if out_final not in finished_map: data[file_key] = out_final finished_map[out_final] = [data] else: extras.append([data]) else: extras.append([data]) return split_args, combine_map, list(finished_map.values()), extras
[ "def", "_get_split_tasks", "(", "args", ",", "split_fn", ",", "file_key", ",", "outfile_i", "=", "-", "1", ")", ":", "split_args", "=", "[", "]", "combine_map", "=", "{", "}", "finished_map", "=", "collections", ".", "OrderedDict", "(", ")", "extras", "=...
Split up input files and arguments, returning arguments for parallel processing. outfile_i specifies the location of the output file in the arguments to the processing function. Defaults to the last item in the list.
[ "Split", "up", "input", "files", "and", "arguments", "returning", "arguments", "for", "parallel", "processing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/split.py#L118-L143
train
218,114
bcbio/bcbio-nextgen
bcbio/ngsalign/snap.py
remap_index_fn
def remap_index_fn(ref_file): """Map sequence references to snap reference directory, using standard layout. """ snap_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "snap") assert os.path.exists(snap_dir) and os.path.isdir(snap_dir), snap_dir return snap_dir
python
def remap_index_fn(ref_file): """Map sequence references to snap reference directory, using standard layout. """ snap_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "snap") assert os.path.exists(snap_dir) and os.path.isdir(snap_dir), snap_dir return snap_dir
[ "def", "remap_index_fn", "(", "ref_file", ")", ":", "snap_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "ref_file", ")", ",", "os", ".", "pardir", ",", "\"snap\"", ")", "assert", "os", ".", "path", ".", "ex...
Map sequence references to snap reference directory, using standard layout.
[ "Map", "sequence", "references", "to", "snap", "reference", "directory", "using", "standard", "layout", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/snap.py#L81-L86
train
218,115
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
use_general_sv_bins
def use_general_sv_bins(data): """Check if we should use a general binning approach for a sample. Checks if CNVkit is enabled and we haven't already run CNVkit. """ if any([c in dd.get_svcaller(data) for c in ["cnvkit", "titancna", "purecn", "gatk-cnv"]]): if not _get_original_coverage(data): return True return False
python
def use_general_sv_bins(data): """Check if we should use a general binning approach for a sample. Checks if CNVkit is enabled and we haven't already run CNVkit. """ if any([c in dd.get_svcaller(data) for c in ["cnvkit", "titancna", "purecn", "gatk-cnv"]]): if not _get_original_coverage(data): return True return False
[ "def", "use_general_sv_bins", "(", "data", ")", ":", "if", "any", "(", "[", "c", "in", "dd", ".", "get_svcaller", "(", "data", ")", "for", "c", "in", "[", "\"cnvkit\"", ",", "\"titancna\"", ",", "\"purecn\"", ",", "\"gatk-cnv\"", "]", "]", ")", ":", ...
Check if we should use a general binning approach for a sample. Checks if CNVkit is enabled and we haven't already run CNVkit.
[ "Check", "if", "we", "should", "use", "a", "general", "binning", "approach", "for", "a", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L31-L39
train
218,116
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
bin_approach
def bin_approach(data): """Check for binning approach from configuration or normalized file. """ for approach in ["cnvkit", "gatk-cnv"]: if approach in dd.get_svcaller(data): return approach norm_file = tz.get_in(["depth", "bins", "normalized"], data) if norm_file.endswith(("-crstandardized.tsv", "-crdenoised.tsv")): return "gatk-cnv" if norm_file.endswith(".cnr"): return "cnvkit"
python
def bin_approach(data): """Check for binning approach from configuration or normalized file. """ for approach in ["cnvkit", "gatk-cnv"]: if approach in dd.get_svcaller(data): return approach norm_file = tz.get_in(["depth", "bins", "normalized"], data) if norm_file.endswith(("-crstandardized.tsv", "-crdenoised.tsv")): return "gatk-cnv" if norm_file.endswith(".cnr"): return "cnvkit"
[ "def", "bin_approach", "(", "data", ")", ":", "for", "approach", "in", "[", "\"cnvkit\"", ",", "\"gatk-cnv\"", "]", ":", "if", "approach", "in", "dd", ".", "get_svcaller", "(", "data", ")", ":", "return", "approach", "norm_file", "=", "tz", ".", "get_in"...
Check for binning approach from configuration or normalized file.
[ "Check", "for", "binning", "approach", "from", "configuration", "or", "normalized", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L41-L51
train
218,117
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_cnvkit_by_type
def _cnvkit_by_type(items, background): """Dispatch to specific CNVkit functionality based on input type. """ if len(items + background) == 1: return _run_cnvkit_single(items[0]) elif vcfutils.get_paired_phenotype(items[0]): return _run_cnvkit_cancer(items, background) else: return _run_cnvkit_population(items, background)
python
def _cnvkit_by_type(items, background): """Dispatch to specific CNVkit functionality based on input type. """ if len(items + background) == 1: return _run_cnvkit_single(items[0]) elif vcfutils.get_paired_phenotype(items[0]): return _run_cnvkit_cancer(items, background) else: return _run_cnvkit_population(items, background)
[ "def", "_cnvkit_by_type", "(", "items", ",", "background", ")", ":", "if", "len", "(", "items", "+", "background", ")", "==", "1", ":", "return", "_run_cnvkit_single", "(", "items", "[", "0", "]", ")", "elif", "vcfutils", ".", "get_paired_phenotype", "(", ...
Dispatch to specific CNVkit functionality based on input type.
[ "Dispatch", "to", "specific", "CNVkit", "functionality", "based", "on", "input", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L63-L71
train
218,118
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_associate_cnvkit_out
def _associate_cnvkit_out(ckouts, items, is_somatic=False): """Associate cnvkit output with individual items. """ assert len(ckouts) == len(items) out = [] upload_counts = collections.defaultdict(int) for ckout, data in zip(ckouts, items): ckout = copy.deepcopy(ckout) ckout["variantcaller"] = "cnvkit" if utils.file_exists(ckout["cns"]) and _cna_has_values(ckout["cns"]): ckout = _add_seg_to_output(ckout, data) ckout = _add_gainloss_to_output(ckout, data) ckout = _add_segmetrics_to_output(ckout, data) ckout = _add_variantcalls_to_output(ckout, data, items, is_somatic) # ckout = _add_coverage_bedgraph_to_output(ckout, data) ckout = _add_cnr_bedgraph_and_bed_to_output(ckout, data) if "svplots" in dd.get_tools_on(data): ckout = _add_plots_to_output(ckout, data) ckout["do_upload"] = upload_counts[ckout.get("vrn_file")] == 0 if "sv" not in data: data["sv"] = [] data["sv"].append(ckout) if ckout.get("vrn_file"): upload_counts[ckout["vrn_file"]] += 1 out.append(data) return out
python
def _associate_cnvkit_out(ckouts, items, is_somatic=False): """Associate cnvkit output with individual items. """ assert len(ckouts) == len(items) out = [] upload_counts = collections.defaultdict(int) for ckout, data in zip(ckouts, items): ckout = copy.deepcopy(ckout) ckout["variantcaller"] = "cnvkit" if utils.file_exists(ckout["cns"]) and _cna_has_values(ckout["cns"]): ckout = _add_seg_to_output(ckout, data) ckout = _add_gainloss_to_output(ckout, data) ckout = _add_segmetrics_to_output(ckout, data) ckout = _add_variantcalls_to_output(ckout, data, items, is_somatic) # ckout = _add_coverage_bedgraph_to_output(ckout, data) ckout = _add_cnr_bedgraph_and_bed_to_output(ckout, data) if "svplots" in dd.get_tools_on(data): ckout = _add_plots_to_output(ckout, data) ckout["do_upload"] = upload_counts[ckout.get("vrn_file")] == 0 if "sv" not in data: data["sv"] = [] data["sv"].append(ckout) if ckout.get("vrn_file"): upload_counts[ckout["vrn_file"]] += 1 out.append(data) return out
[ "def", "_associate_cnvkit_out", "(", "ckouts", ",", "items", ",", "is_somatic", "=", "False", ")", ":", "assert", "len", "(", "ckouts", ")", "==", "len", "(", "items", ")", "out", "=", "[", "]", "upload_counts", "=", "collections", ".", "defaultdict", "(...
Associate cnvkit output with individual items.
[ "Associate", "cnvkit", "output", "with", "individual", "items", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L73-L98
train
218,119
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_run_cnvkit_single
def _run_cnvkit_single(data, background=None): """Process a single input file with BAM or uniform background. """ if not background: background = [] ckouts = _run_cnvkit_shared([data], background) if not ckouts: return [data] else: assert len(ckouts) == 1 return _associate_cnvkit_out(ckouts, [data])
python
def _run_cnvkit_single(data, background=None): """Process a single input file with BAM or uniform background. """ if not background: background = [] ckouts = _run_cnvkit_shared([data], background) if not ckouts: return [data] else: assert len(ckouts) == 1 return _associate_cnvkit_out(ckouts, [data])
[ "def", "_run_cnvkit_single", "(", "data", ",", "background", "=", "None", ")", ":", "if", "not", "background", ":", "background", "=", "[", "]", "ckouts", "=", "_run_cnvkit_shared", "(", "[", "data", "]", ",", "background", ")", "if", "not", "ckouts", ":...
Process a single input file with BAM or uniform background.
[ "Process", "a", "single", "input", "file", "with", "BAM", "or", "uniform", "background", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L100-L110
train
218,120
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_run_cnvkit_population
def _run_cnvkit_population(items, background): """Run CNVkit on a population of samples. Tries to calculate background based on case/controls, otherwise uses samples from the same batch as background. """ if background and len(background) > 0: inputs = items else: inputs, background = shared.find_case_control(items) # if we have case/control organized background or a single sample if len(inputs) == 1 or len(background) > 0: ckouts = _run_cnvkit_shared(inputs, background) return _associate_cnvkit_out(ckouts, inputs) + background # otherwise run each sample with the others in the batch as background else: out = [] for cur_input in items: background = [d for d in items if dd.get_sample_name(d) != dd.get_sample_name(cur_input)] ckouts = _run_cnvkit_shared([cur_input], background) out.extend(_associate_cnvkit_out(ckouts, [cur_input])) return out
python
def _run_cnvkit_population(items, background): """Run CNVkit on a population of samples. Tries to calculate background based on case/controls, otherwise uses samples from the same batch as background. """ if background and len(background) > 0: inputs = items else: inputs, background = shared.find_case_control(items) # if we have case/control organized background or a single sample if len(inputs) == 1 or len(background) > 0: ckouts = _run_cnvkit_shared(inputs, background) return _associate_cnvkit_out(ckouts, inputs) + background # otherwise run each sample with the others in the batch as background else: out = [] for cur_input in items: background = [d for d in items if dd.get_sample_name(d) != dd.get_sample_name(cur_input)] ckouts = _run_cnvkit_shared([cur_input], background) out.extend(_associate_cnvkit_out(ckouts, [cur_input])) return out
[ "def", "_run_cnvkit_population", "(", "items", ",", "background", ")", ":", "if", "background", "and", "len", "(", "background", ")", ">", "0", ":", "inputs", "=", "items", "else", ":", "inputs", ",", "background", "=", "shared", ".", "find_case_control", ...
Run CNVkit on a population of samples. Tries to calculate background based on case/controls, otherwise uses samples from the same batch as background.
[ "Run", "CNVkit", "on", "a", "population", "of", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L141-L163
train
218,121
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_prep_cmd
def _prep_cmd(cmd, tx_out_file): """Wrap CNVkit commands ensuring we use local temporary directories. """ cmd = " ".join(cmd) if isinstance(cmd, (list, tuple)) else cmd return "export TMPDIR=%s && %s" % (os.path.dirname(tx_out_file), cmd)
python
def _prep_cmd(cmd, tx_out_file): """Wrap CNVkit commands ensuring we use local temporary directories. """ cmd = " ".join(cmd) if isinstance(cmd, (list, tuple)) else cmd return "export TMPDIR=%s && %s" % (os.path.dirname(tx_out_file), cmd)
[ "def", "_prep_cmd", "(", "cmd", ",", "tx_out_file", ")", ":", "cmd", "=", "\" \"", ".", "join", "(", "cmd", ")", "if", "isinstance", "(", "cmd", ",", "(", "list", ",", "tuple", ")", ")", "else", "cmd", "return", "\"export TMPDIR=%s && %s\"", "%", "(", ...
Wrap CNVkit commands ensuring we use local temporary directories.
[ "Wrap", "CNVkit", "commands", "ensuring", "we", "use", "local", "temporary", "directories", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L168-L172
train
218,122
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_bam_to_outbase
def _bam_to_outbase(bam_file, work_dir, data): """Convert an input BAM file into CNVkit expected output. Handles previous non-batch cases to avoid re-calculating, returning both new and old values: """ batch = dd.get_batch(data) or dd.get_sample_name(data) out_base = os.path.splitext(os.path.basename(bam_file))[0].split(".")[0] base = os.path.join(work_dir, out_base) return "%s-%s" % (base, batch), base
python
def _bam_to_outbase(bam_file, work_dir, data): """Convert an input BAM file into CNVkit expected output. Handles previous non-batch cases to avoid re-calculating, returning both new and old values: """ batch = dd.get_batch(data) or dd.get_sample_name(data) out_base = os.path.splitext(os.path.basename(bam_file))[0].split(".")[0] base = os.path.join(work_dir, out_base) return "%s-%s" % (base, batch), base
[ "def", "_bam_to_outbase", "(", "bam_file", ",", "work_dir", ",", "data", ")", ":", "batch", "=", "dd", ".", "get_batch", "(", "data", ")", "or", "dd", ".", "get_sample_name", "(", "data", ")", "out_base", "=", "os", ".", "path", ".", "splitext", "(", ...
Convert an input BAM file into CNVkit expected output. Handles previous non-batch cases to avoid re-calculating, returning both new and old values:
[ "Convert", "an", "input", "BAM", "file", "into", "CNVkit", "expected", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L174-L183
train
218,123
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_run_cnvkit_shared
def _run_cnvkit_shared(inputs, backgrounds): """Shared functionality to run CNVkit, parallelizing over multiple BAM files. Handles new style cases where we have pre-normalized inputs and old cases where we run CNVkit individually. """ if tz.get_in(["depth", "bins", "normalized"], inputs[0]): ckouts = [] for data in inputs: cnr_file = tz.get_in(["depth", "bins", "normalized"], data) cns_file = os.path.join(_sv_workdir(data), "%s.cns" % dd.get_sample_name(data)) cns_file = _cnvkit_segment(cnr_file, dd.get_coverage_interval(data), data, inputs + backgrounds, cns_file) ckouts.append({"cnr": cnr_file, "cns": cns_file, "background": tz.get_in(["depth", "bins", "background"], data)}) return ckouts else: return _run_cnvkit_shared_orig(inputs, backgrounds)
python
def _run_cnvkit_shared(inputs, backgrounds): """Shared functionality to run CNVkit, parallelizing over multiple BAM files. Handles new style cases where we have pre-normalized inputs and old cases where we run CNVkit individually. """ if tz.get_in(["depth", "bins", "normalized"], inputs[0]): ckouts = [] for data in inputs: cnr_file = tz.get_in(["depth", "bins", "normalized"], data) cns_file = os.path.join(_sv_workdir(data), "%s.cns" % dd.get_sample_name(data)) cns_file = _cnvkit_segment(cnr_file, dd.get_coverage_interval(data), data, inputs + backgrounds, cns_file) ckouts.append({"cnr": cnr_file, "cns": cns_file, "background": tz.get_in(["depth", "bins", "background"], data)}) return ckouts else: return _run_cnvkit_shared_orig(inputs, backgrounds)
[ "def", "_run_cnvkit_shared", "(", "inputs", ",", "backgrounds", ")", ":", "if", "tz", ".", "get_in", "(", "[", "\"depth\"", ",", "\"bins\"", ",", "\"normalized\"", "]", ",", "inputs", "[", "0", "]", ")", ":", "ckouts", "=", "[", "]", "for", "data", "...
Shared functionality to run CNVkit, parallelizing over multiple BAM files. Handles new style cases where we have pre-normalized inputs and old cases where we run CNVkit individually.
[ "Shared", "functionality", "to", "run", "CNVkit", "parallelizing", "over", "multiple", "BAM", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L201-L218
train
218,124
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_get_general_coverage
def _get_general_coverage(data, itype): """Retrieve coverage information from new shared SV bins. """ work_bam = dd.get_align_bam(data) or dd.get_work_bam(data) return [{"bam": work_bam, "file": tz.get_in(["depth", "bins", "target"], data), "cnntype": "target", "itype": itype, "sample": dd.get_sample_name(data)}, {"bam": work_bam, "file": tz.get_in(["depth", "bins", "antitarget"], data), "cnntype": "antitarget", "itype": itype, "sample": dd.get_sample_name(data)}]
python
def _get_general_coverage(data, itype): """Retrieve coverage information from new shared SV bins. """ work_bam = dd.get_align_bam(data) or dd.get_work_bam(data) return [{"bam": work_bam, "file": tz.get_in(["depth", "bins", "target"], data), "cnntype": "target", "itype": itype, "sample": dd.get_sample_name(data)}, {"bam": work_bam, "file": tz.get_in(["depth", "bins", "antitarget"], data), "cnntype": "antitarget", "itype": itype, "sample": dd.get_sample_name(data)}]
[ "def", "_get_general_coverage", "(", "data", ",", "itype", ")", ":", "work_bam", "=", "dd", ".", "get_align_bam", "(", "data", ")", "or", "dd", ".", "get_work_bam", "(", "data", ")", "return", "[", "{", "\"bam\"", ":", "work_bam", ",", "\"file\"", ":", ...
Retrieve coverage information from new shared SV bins.
[ "Retrieve", "coverage", "information", "from", "new", "shared", "SV", "bins", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L228-L235
train
218,125
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_cnvkit_segment
def _cnvkit_segment(cnr_file, cov_interval, data, items, out_file=None, detailed=False): """Perform segmentation and copy number calling on normalized inputs """ if not out_file: out_file = "%s.cns" % os.path.splitext(cnr_file)[0] if not utils.file_uptodate(out_file, cnr_file): with file_transaction(data, out_file) as tx_out_file: if not _cna_has_values(cnr_file): with open(tx_out_file, "w") as out_handle: out_handle.write("chromosome\tstart\tend\tgene\tlog2\tprobes\tCN1\tCN2\tbaf\tweight\n") else: # Scale cores to avoid memory issues with segmentation # https://github.com/etal/cnvkit/issues/346 if cov_interval == "genome": cores = max(1, dd.get_cores(data) // 2) else: cores = dd.get_cores(data) cmd = [_get_cmd(), "segment", "-p", str(cores), "-o", tx_out_file, cnr_file] small_vrn_files = _compatible_small_variants(data, items) if len(small_vrn_files) > 0 and _cna_has_values(cnr_file) and cov_interval != "genome": cmd += ["--vcf", small_vrn_files[0].name, "--sample-id", small_vrn_files[0].sample] if small_vrn_files[0].normal: cmd += ["--normal-id", small_vrn_files[0].normal] resources = config_utils.get_resources("cnvkit_segment", data["config"]) user_options = resources.get("options", []) cmd += [str(x) for x in user_options] if cov_interval == "genome" and "--threshold" not in user_options: cmd += ["--threshold", "0.00001"] # For tumors, remove very low normalized regions, avoiding upcaptured noise # https://github.com/bcbio/bcbio-nextgen/issues/2171#issuecomment-348333650 # unless we want detailed segmentation for downstream tools paired = vcfutils.get_paired(items) if paired: #if detailed: # cmd += ["-m", "hmm-tumor"] if "--drop-low-coverage" not in user_options: cmd += ["--drop-low-coverage"] # preferentially use conda installed Rscript export_cmd = ("%s && export TMPDIR=%s && " % (utils.get_R_exports(), os.path.dirname(tx_out_file))) do.run(export_cmd + " ".join(cmd), "CNVkit segment") return out_file
python
def _cnvkit_segment(cnr_file, cov_interval, data, items, out_file=None, detailed=False): """Perform segmentation and copy number calling on normalized inputs """ if not out_file: out_file = "%s.cns" % os.path.splitext(cnr_file)[0] if not utils.file_uptodate(out_file, cnr_file): with file_transaction(data, out_file) as tx_out_file: if not _cna_has_values(cnr_file): with open(tx_out_file, "w") as out_handle: out_handle.write("chromosome\tstart\tend\tgene\tlog2\tprobes\tCN1\tCN2\tbaf\tweight\n") else: # Scale cores to avoid memory issues with segmentation # https://github.com/etal/cnvkit/issues/346 if cov_interval == "genome": cores = max(1, dd.get_cores(data) // 2) else: cores = dd.get_cores(data) cmd = [_get_cmd(), "segment", "-p", str(cores), "-o", tx_out_file, cnr_file] small_vrn_files = _compatible_small_variants(data, items) if len(small_vrn_files) > 0 and _cna_has_values(cnr_file) and cov_interval != "genome": cmd += ["--vcf", small_vrn_files[0].name, "--sample-id", small_vrn_files[0].sample] if small_vrn_files[0].normal: cmd += ["--normal-id", small_vrn_files[0].normal] resources = config_utils.get_resources("cnvkit_segment", data["config"]) user_options = resources.get("options", []) cmd += [str(x) for x in user_options] if cov_interval == "genome" and "--threshold" not in user_options: cmd += ["--threshold", "0.00001"] # For tumors, remove very low normalized regions, avoiding upcaptured noise # https://github.com/bcbio/bcbio-nextgen/issues/2171#issuecomment-348333650 # unless we want detailed segmentation for downstream tools paired = vcfutils.get_paired(items) if paired: #if detailed: # cmd += ["-m", "hmm-tumor"] if "--drop-low-coverage" not in user_options: cmd += ["--drop-low-coverage"] # preferentially use conda installed Rscript export_cmd = ("%s && export TMPDIR=%s && " % (utils.get_R_exports(), os.path.dirname(tx_out_file))) do.run(export_cmd + " ".join(cmd), "CNVkit segment") return out_file
[ "def", "_cnvkit_segment", "(", "cnr_file", ",", "cov_interval", ",", "data", ",", "items", ",", "out_file", "=", "None", ",", "detailed", "=", "False", ")", ":", "if", "not", "out_file", ":", "out_file", "=", "\"%s.cns\"", "%", "os", ".", "path", ".", ...
Perform segmentation and copy number calling on normalized inputs
[ "Perform", "segmentation", "and", "copy", "number", "calling", "on", "normalized", "inputs" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L297-L338
train
218,126
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_cnvkit_metrics
def _cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, items): """Estimate noise of a sample using a flat background. Only used for panel/targeted data due to memory issues with whole genome samples. """ if cov_interval == "genome": return cnns target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0] background_file = "%s-flatbackground.cnn" % utils.splitext_plus(target_cnn)[0] background_file = cnvkit_background([], background_file, items, target_bed, antitarget_bed) cnr_file, data = _cnvkit_fix_base(cnns, background_file, items, "-flatbackground") cns_file = _cnvkit_segment(cnr_file, cov_interval, data) metrics_file = "%s-metrics.txt" % utils.splitext_plus(target_cnn)[0] if not utils.file_exists(metrics_file): with file_transaction(data, metrics_file) as tx_metrics_file: cmd = [_get_cmd(), "metrics", "-o", tx_metrics_file, "-s", cns_file, "--", cnr_file] do.run(_prep_cmd(cmd, tx_metrics_file), "CNVkit metrics") metrics = _read_metrics_file(metrics_file) out = [] for cnn in cnns: cnn["metrics"] = metrics out.append(cnn) return out
python
def _cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, items): """Estimate noise of a sample using a flat background. Only used for panel/targeted data due to memory issues with whole genome samples. """ if cov_interval == "genome": return cnns target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0] background_file = "%s-flatbackground.cnn" % utils.splitext_plus(target_cnn)[0] background_file = cnvkit_background([], background_file, items, target_bed, antitarget_bed) cnr_file, data = _cnvkit_fix_base(cnns, background_file, items, "-flatbackground") cns_file = _cnvkit_segment(cnr_file, cov_interval, data) metrics_file = "%s-metrics.txt" % utils.splitext_plus(target_cnn)[0] if not utils.file_exists(metrics_file): with file_transaction(data, metrics_file) as tx_metrics_file: cmd = [_get_cmd(), "metrics", "-o", tx_metrics_file, "-s", cns_file, "--", cnr_file] do.run(_prep_cmd(cmd, tx_metrics_file), "CNVkit metrics") metrics = _read_metrics_file(metrics_file) out = [] for cnn in cnns: cnn["metrics"] = metrics out.append(cnn) return out
[ "def", "_cnvkit_metrics", "(", "cnns", ",", "target_bed", ",", "antitarget_bed", ",", "cov_interval", ",", "items", ")", ":", "if", "cov_interval", "==", "\"genome\"", ":", "return", "cnns", "target_cnn", "=", "[", "x", "[", "\"file\"", "]", "for", "x", "i...
Estimate noise of a sample using a flat background. Only used for panel/targeted data due to memory issues with whole genome samples.
[ "Estimate", "noise", "of", "a", "sample", "using", "a", "flat", "background", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L340-L364
train
218,127
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_cnvkit_fix
def _cnvkit_fix(cnns, background_cnn, items, ckouts): """Normalize samples, correcting sources of bias. """ return [_cnvkit_fix_base(cnns, background_cnn, items, ckouts)]
python
def _cnvkit_fix(cnns, background_cnn, items, ckouts): """Normalize samples, correcting sources of bias. """ return [_cnvkit_fix_base(cnns, background_cnn, items, ckouts)]
[ "def", "_cnvkit_fix", "(", "cnns", ",", "background_cnn", ",", "items", ",", "ckouts", ")", ":", "return", "[", "_cnvkit_fix_base", "(", "cnns", ",", "background_cnn", ",", "items", ",", "ckouts", ")", "]" ]
Normalize samples, correcting sources of bias.
[ "Normalize", "samples", "correcting", "sources", "of", "bias", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L374-L377
train
218,128
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_select_background_cnns
def _select_background_cnns(cnns): """Select cnns to use for background calculations. Uses background samples in cohort, and will remove CNNs with high on target variability. Uses (number of segments * biweight midvariance) as metric for variability with higher numbers being more unreliable. """ min_for_variability_analysis = 20 pct_keep = 0.10 b_cnns = [x for x in cnns if x["itype"] == "background" and x.get("metrics")] assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background" if len(b_cnns) >= min_for_variability_analysis: b_cnns_w_metrics = [] for b_cnn in b_cnns: unreliability = b_cnn["metrics"]["segments"] * b_cnn["metrics"]["bivar"] b_cnns_w_metrics.append((unreliability, b_cnn)) b_cnns_w_metrics.sort() to_keep = int(math.ceil(pct_keep * len(b_cnns) / 2.0) * 2) b_cnns = [x[1] for x in b_cnns_w_metrics][:to_keep] assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background" return [x["file"] for x in b_cnns]
python
def _select_background_cnns(cnns): """Select cnns to use for background calculations. Uses background samples in cohort, and will remove CNNs with high on target variability. Uses (number of segments * biweight midvariance) as metric for variability with higher numbers being more unreliable. """ min_for_variability_analysis = 20 pct_keep = 0.10 b_cnns = [x for x in cnns if x["itype"] == "background" and x.get("metrics")] assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background" if len(b_cnns) >= min_for_variability_analysis: b_cnns_w_metrics = [] for b_cnn in b_cnns: unreliability = b_cnn["metrics"]["segments"] * b_cnn["metrics"]["bivar"] b_cnns_w_metrics.append((unreliability, b_cnn)) b_cnns_w_metrics.sort() to_keep = int(math.ceil(pct_keep * len(b_cnns) / 2.0) * 2) b_cnns = [x[1] for x in b_cnns_w_metrics][:to_keep] assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background" return [x["file"] for x in b_cnns]
[ "def", "_select_background_cnns", "(", "cnns", ")", ":", "min_for_variability_analysis", "=", "20", "pct_keep", "=", "0.10", "b_cnns", "=", "[", "x", "for", "x", "in", "cnns", "if", "x", "[", "\"itype\"", "]", "==", "\"background\"", "and", "x", ".", "get"...
Select cnns to use for background calculations. Uses background samples in cohort, and will remove CNNs with high on target variability. Uses (number of segments * biweight midvariance) as metric for variability with higher numbers being more unreliable.
[ "Select", "cnns", "to", "use", "for", "background", "calculations", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L400-L420
train
218,129
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
cnvkit_background
def cnvkit_background(background_cnns, out_file, items, target_bed=None, antitarget_bed=None): """Calculate background reference, handling flat case with no normal sample. """ if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: cmd = [_get_cmd(), "reference", "-f", dd.get_ref_file(items[0]), "-o", tx_out_file] gender = _get_batch_gender(items) if gender: cmd += ["--sample-sex", gender] if len(background_cnns) == 0: assert target_bed and antitarget_bed, "Missing CNNs and target BEDs for flat background" cmd += ["-t", target_bed, "-a", antitarget_bed] else: cmd += background_cnns do.run(_prep_cmd(cmd, tx_out_file), "CNVkit background") return out_file
python
def cnvkit_background(background_cnns, out_file, items, target_bed=None, antitarget_bed=None): """Calculate background reference, handling flat case with no normal sample. """ if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: cmd = [_get_cmd(), "reference", "-f", dd.get_ref_file(items[0]), "-o", tx_out_file] gender = _get_batch_gender(items) if gender: cmd += ["--sample-sex", gender] if len(background_cnns) == 0: assert target_bed and antitarget_bed, "Missing CNNs and target BEDs for flat background" cmd += ["-t", target_bed, "-a", antitarget_bed] else: cmd += background_cnns do.run(_prep_cmd(cmd, tx_out_file), "CNVkit background") return out_file
[ "def", "cnvkit_background", "(", "background_cnns", ",", "out_file", ",", "items", ",", "target_bed", "=", "None", ",", "antitarget_bed", "=", "None", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", ...
Calculate background reference, handling flat case with no normal sample.
[ "Calculate", "background", "reference", "handling", "flat", "case", "with", "no", "normal", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L422-L437
train
218,130
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_get_batch_gender
def _get_batch_gender(items): """Retrieve gender for a batch of items if consistent. Better not to specify for mixed populations, CNVkit will work it out https://github.com/bcbio/bcbio-nextgen/commit/1a0e217c8a4d3cee10fa890fb3cfd4db5034281d#r26279752 """ genders = set([population.get_gender(x) for x in items]) if len(genders) == 1: gender = genders.pop() if gender != "unknown": return gender
python
def _get_batch_gender(items): """Retrieve gender for a batch of items if consistent. Better not to specify for mixed populations, CNVkit will work it out https://github.com/bcbio/bcbio-nextgen/commit/1a0e217c8a4d3cee10fa890fb3cfd4db5034281d#r26279752 """ genders = set([population.get_gender(x) for x in items]) if len(genders) == 1: gender = genders.pop() if gender != "unknown": return gender
[ "def", "_get_batch_gender", "(", "items", ")", ":", "genders", "=", "set", "(", "[", "population", ".", "get_gender", "(", "x", ")", "for", "x", "in", "items", "]", ")", "if", "len", "(", "genders", ")", "==", "1", ":", "gender", "=", "genders", "....
Retrieve gender for a batch of items if consistent. Better not to specify for mixed populations, CNVkit will work it out https://github.com/bcbio/bcbio-nextgen/commit/1a0e217c8a4d3cee10fa890fb3cfd4db5034281d#r26279752
[ "Retrieve", "gender", "for", "a", "batch", "of", "items", "if", "consistent", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L440-L451
train
218,131
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
targets_w_bins
def targets_w_bins(cnv_file, access_file, target_anti_fn, work_dir, data): """Calculate target and anti-target files with pre-determined bins. """ target_file = os.path.join(work_dir, "%s-target.bed" % dd.get_sample_name(data)) anti_file = os.path.join(work_dir, "%s-antitarget.bed" % dd.get_sample_name(data)) if not utils.file_exists(target_file): target_bin, _ = target_anti_fn() with file_transaction(data, target_file) as tx_out_file: cmd = [_get_cmd(), "target", cnv_file, "--split", "-o", tx_out_file, "--avg-size", str(target_bin)] do.run(_prep_cmd(cmd, tx_out_file), "CNVkit target") if not os.path.exists(anti_file): _, anti_bin = target_anti_fn() with file_transaction(data, anti_file) as tx_out_file: # Create access file without targets to avoid overlap # antitarget in cnvkit is meant to do this but appears to not always happen # after chromosome 1 tx_access_file = os.path.join(os.path.dirname(tx_out_file), os.path.basename(access_file)) pybedtools.BedTool(access_file).subtract(cnv_file).saveas(tx_access_file) cmd = [_get_cmd(), "antitarget", "-g", tx_access_file, cnv_file, "-o", tx_out_file, "--avg-size", str(anti_bin)] do.run(_prep_cmd(cmd, tx_out_file), "CNVkit antitarget") return target_file, anti_file
python
def targets_w_bins(cnv_file, access_file, target_anti_fn, work_dir, data): """Calculate target and anti-target files with pre-determined bins. """ target_file = os.path.join(work_dir, "%s-target.bed" % dd.get_sample_name(data)) anti_file = os.path.join(work_dir, "%s-antitarget.bed" % dd.get_sample_name(data)) if not utils.file_exists(target_file): target_bin, _ = target_anti_fn() with file_transaction(data, target_file) as tx_out_file: cmd = [_get_cmd(), "target", cnv_file, "--split", "-o", tx_out_file, "--avg-size", str(target_bin)] do.run(_prep_cmd(cmd, tx_out_file), "CNVkit target") if not os.path.exists(anti_file): _, anti_bin = target_anti_fn() with file_transaction(data, anti_file) as tx_out_file: # Create access file without targets to avoid overlap # antitarget in cnvkit is meant to do this but appears to not always happen # after chromosome 1 tx_access_file = os.path.join(os.path.dirname(tx_out_file), os.path.basename(access_file)) pybedtools.BedTool(access_file).subtract(cnv_file).saveas(tx_access_file) cmd = [_get_cmd(), "antitarget", "-g", tx_access_file, cnv_file, "-o", tx_out_file, "--avg-size", str(anti_bin)] do.run(_prep_cmd(cmd, tx_out_file), "CNVkit antitarget") return target_file, anti_file
[ "def", "targets_w_bins", "(", "cnv_file", ",", "access_file", ",", "target_anti_fn", ",", "work_dir", ",", "data", ")", ":", "target_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-target.bed\"", "%", "dd", ".", "get_sample_name", "("...
Calculate target and anti-target files with pre-determined bins.
[ "Calculate", "target", "and", "anti", "-", "target", "files", "with", "pre", "-", "determined", "bins", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L453-L475
train
218,132
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
targets_from_background
def targets_from_background(back_cnn, work_dir, data): """Retrieve target and antitarget BEDs from background CNN file. """ target_file = os.path.join(work_dir, "%s.target.bed" % dd.get_sample_name(data)) anti_file = os.path.join(work_dir, "%s.antitarget.bed" % dd.get_sample_name(data)) if not utils.file_exists(target_file): with file_transaction(data, target_file) as tx_out_file: out_base = tx_out_file.replace(".target.bed", "") cmd = [_get_cmd("reference2targets.py"), "-o", out_base, back_cnn] do.run(_prep_cmd(cmd, tx_out_file), "CNVkit targets from background") shutil.copy(out_base + ".antitarget.bed", anti_file) return target_file, anti_file
python
def targets_from_background(back_cnn, work_dir, data): """Retrieve target and antitarget BEDs from background CNN file. """ target_file = os.path.join(work_dir, "%s.target.bed" % dd.get_sample_name(data)) anti_file = os.path.join(work_dir, "%s.antitarget.bed" % dd.get_sample_name(data)) if not utils.file_exists(target_file): with file_transaction(data, target_file) as tx_out_file: out_base = tx_out_file.replace(".target.bed", "") cmd = [_get_cmd("reference2targets.py"), "-o", out_base, back_cnn] do.run(_prep_cmd(cmd, tx_out_file), "CNVkit targets from background") shutil.copy(out_base + ".antitarget.bed", anti_file) return target_file, anti_file
[ "def", "targets_from_background", "(", "back_cnn", ",", "work_dir", ",", "data", ")", ":", "target_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s.target.bed\"", "%", "dd", ".", "get_sample_name", "(", "data", ")", ")", "anti_file", ...
Retrieve target and antitarget BEDs from background CNN file.
[ "Retrieve", "target", "and", "antitarget", "BEDs", "from", "background", "CNN", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L477-L488
train
218,133
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_add_seg_to_output
def _add_seg_to_output(out, data, enumerate_chroms=False): """Export outputs to 'seg' format compatible with IGV and GenePattern. """ out_file = "%s.seg" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export", "seg"] if enumerate_chroms: cmd += ["--enumerate-chroms"] cmd += ["-o", tx_out_file, out["cns"]] do.run(cmd, "CNVkit export seg") out["seg"] = out_file return out
python
def _add_seg_to_output(out, data, enumerate_chroms=False): """Export outputs to 'seg' format compatible with IGV and GenePattern. """ out_file = "%s.seg" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export", "seg"] if enumerate_chroms: cmd += ["--enumerate-chroms"] cmd += ["-o", tx_out_file, out["cns"]] do.run(cmd, "CNVkit export seg") out["seg"] = out_file return out
[ "def", "_add_seg_to_output", "(", "out", ",", "data", ",", "enumerate_chroms", "=", "False", ")", ":", "out_file", "=", "\"%s.seg\"", "%", "os", ".", "path", ".", "splitext", "(", "out", "[", "\"cns\"", "]", ")", "[", "0", "]", "if", "not", "utils", ...
Export outputs to 'seg' format compatible with IGV and GenePattern.
[ "Export", "outputs", "to", "seg", "format", "compatible", "with", "IGV", "and", "GenePattern", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L490-L503
train
218,134
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_add_variantcalls_to_output
def _add_variantcalls_to_output(out, data, items, is_somatic=False): """Call ploidy and convert into VCF and BED representations. """ call_file = "%s-call%s" % os.path.splitext(out["cns"]) if not utils.file_exists(call_file): with file_transaction(data, call_file) as tx_call_file: filters = ["--filter", "cn"] cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "call"] + \ filters + \ ["--ploidy", str(ploidy.get_ploidy([data])), "-o", tx_call_file, out["cns"]] small_vrn_files = _compatible_small_variants(data, items) if len(small_vrn_files) > 0 and _cna_has_values(out["cns"]): cmd += ["--vcf", small_vrn_files[0].name, "--sample-id", small_vrn_files[0].sample] if small_vrn_files[0].normal: cmd += ["--normal-id", small_vrn_files[0].normal] if not is_somatic: cmd += ["-m", "clonal"] gender = _get_batch_gender(items) if gender: cmd += ["--sample-sex", gender] do.run(cmd, "CNVkit call ploidy") calls = {} for outformat in ["bed", "vcf"]: out_file = "%s.%s" % (os.path.splitext(call_file)[0], outformat) calls[outformat] = out_file if not os.path.exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export", outformat, "--sample-id", dd.get_sample_name(data), "--ploidy", str(ploidy.get_ploidy([data])), "-o", tx_out_file, call_file] do.run(cmd, "CNVkit export %s" % outformat) out["call_file"] = call_file out["vrn_bed"] = annotate.add_genes(calls["bed"], data) effects_vcf, _ = effects.add_to_vcf(calls["vcf"], data, "snpeff") out["vrn_file"] = effects_vcf or calls["vcf"] out["vrn_file"] = shared.annotate_with_depth(out["vrn_file"], items) return out
python
def _add_variantcalls_to_output(out, data, items, is_somatic=False): """Call ploidy and convert into VCF and BED representations. """ call_file = "%s-call%s" % os.path.splitext(out["cns"]) if not utils.file_exists(call_file): with file_transaction(data, call_file) as tx_call_file: filters = ["--filter", "cn"] cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "call"] + \ filters + \ ["--ploidy", str(ploidy.get_ploidy([data])), "-o", tx_call_file, out["cns"]] small_vrn_files = _compatible_small_variants(data, items) if len(small_vrn_files) > 0 and _cna_has_values(out["cns"]): cmd += ["--vcf", small_vrn_files[0].name, "--sample-id", small_vrn_files[0].sample] if small_vrn_files[0].normal: cmd += ["--normal-id", small_vrn_files[0].normal] if not is_somatic: cmd += ["-m", "clonal"] gender = _get_batch_gender(items) if gender: cmd += ["--sample-sex", gender] do.run(cmd, "CNVkit call ploidy") calls = {} for outformat in ["bed", "vcf"]: out_file = "%s.%s" % (os.path.splitext(call_file)[0], outformat) calls[outformat] = out_file if not os.path.exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export", outformat, "--sample-id", dd.get_sample_name(data), "--ploidy", str(ploidy.get_ploidy([data])), "-o", tx_out_file, call_file] do.run(cmd, "CNVkit export %s" % outformat) out["call_file"] = call_file out["vrn_bed"] = annotate.add_genes(calls["bed"], data) effects_vcf, _ = effects.add_to_vcf(calls["vcf"], data, "snpeff") out["vrn_file"] = effects_vcf or calls["vcf"] out["vrn_file"] = shared.annotate_with_depth(out["vrn_file"], items) return out
[ "def", "_add_variantcalls_to_output", "(", "out", ",", "data", ",", "items", ",", "is_somatic", "=", "False", ")", ":", "call_file", "=", "\"%s-call%s\"", "%", "os", ".", "path", ".", "splitext", "(", "out", "[", "\"cns\"", "]", ")", "if", "not", "utils"...
Call ploidy and convert into VCF and BED representations.
[ "Call", "ploidy", "and", "convert", "into", "VCF", "and", "BED", "representations", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L538-L576
train
218,135
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_add_segmetrics_to_output
def _add_segmetrics_to_output(out, data): """Add metrics for measuring reliability of CNV estimates. """ out_file = "%s-segmetrics.txt" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "segmetrics", "--median", "--iqr", "--ci", "--pi", "-s", out["cns"], "-o", tx_out_file, out["cnr"]] # Use less fine grained bootstrapping intervals for whole genome runs if dd.get_coverage_interval(data) == "genome": cmd += ["--alpha", "0.1", "--bootstrap", "50"] else: cmd += ["--alpha", "0.01", "--bootstrap", "500"] do.run(cmd, "CNVkit segmetrics") out["segmetrics"] = out_file return out
python
def _add_segmetrics_to_output(out, data): """Add metrics for measuring reliability of CNV estimates. """ out_file = "%s-segmetrics.txt" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "segmetrics", "--median", "--iqr", "--ci", "--pi", "-s", out["cns"], "-o", tx_out_file, out["cnr"]] # Use less fine grained bootstrapping intervals for whole genome runs if dd.get_coverage_interval(data) == "genome": cmd += ["--alpha", "0.1", "--bootstrap", "50"] else: cmd += ["--alpha", "0.01", "--bootstrap", "500"] do.run(cmd, "CNVkit segmetrics") out["segmetrics"] = out_file return out
[ "def", "_add_segmetrics_to_output", "(", "out", ",", "data", ")", ":", "out_file", "=", "\"%s-segmetrics.txt\"", "%", "os", ".", "path", ".", "splitext", "(", "out", "[", "\"cns\"", "]", ")", "[", "0", "]", "if", "not", "utils", ".", "file_exists", "(", ...
Add metrics for measuring reliability of CNV estimates.
[ "Add", "metrics", "for", "measuring", "reliability", "of", "CNV", "estimates", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L578-L594
train
218,136
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_add_gainloss_to_output
def _add_gainloss_to_output(out, data): """Add gainloss based on genes, helpful for identifying changes in smaller genes. """ out_file = "%s-gainloss.txt" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "gainloss", "-s", out["cns"], "-o", tx_out_file, out["cnr"]] gender = _get_batch_gender([data]) if gender: cmd += ["--sample-sex", gender] do.run(cmd, "CNVkit gainloss") out["gainloss"] = out_file return out
python
def _add_gainloss_to_output(out, data): """Add gainloss based on genes, helpful for identifying changes in smaller genes. """ out_file = "%s-gainloss.txt" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "gainloss", "-s", out["cns"], "-o", tx_out_file, out["cnr"]] gender = _get_batch_gender([data]) if gender: cmd += ["--sample-sex", gender] do.run(cmd, "CNVkit gainloss") out["gainloss"] = out_file return out
[ "def", "_add_gainloss_to_output", "(", "out", ",", "data", ")", ":", "out_file", "=", "\"%s-gainloss.txt\"", "%", "os", ".", "path", ".", "splitext", "(", "out", "[", "\"cns\"", "]", ")", "[", "0", "]", "if", "not", "utils", ".", "file_exists", "(", "o...
Add gainloss based on genes, helpful for identifying changes in smaller genes.
[ "Add", "gainloss", "based", "on", "genes", "helpful", "for", "identifying", "changes", "in", "smaller", "genes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L596-L609
train
218,137
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_add_coverage_bedgraph_to_output
def _add_coverage_bedgraph_to_output(out, data): """Add BedGraph representation of coverage to the output """ out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0] if utils.file_exists(out_file): out["bedgraph"] = out_file return out bam_file = dd.get_align_bam(data) bedtools = config_utils.get_program("bedtools", data["config"]) samtools = config_utils.get_program("samtools", data["config"]) cns_file = out["cns"] bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False).name with file_transaction(data, out_file) as tx_out_file: cmd = ("sed 1d {cns_file} | cut -f1,2,3 > {bed_file}; " "{samtools} view -b -L {bed_file} {bam_file} | " "{bedtools} genomecov -bg -ibam - -g {bed_file} >" "{tx_out_file}").format(**locals()) do.run(cmd, "CNVkit bedGraph conversion") os.remove(bed_file) out["bedgraph"] = out_file return out
python
def _add_coverage_bedgraph_to_output(out, data): """Add BedGraph representation of coverage to the output """ out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0] if utils.file_exists(out_file): out["bedgraph"] = out_file return out bam_file = dd.get_align_bam(data) bedtools = config_utils.get_program("bedtools", data["config"]) samtools = config_utils.get_program("samtools", data["config"]) cns_file = out["cns"] bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False).name with file_transaction(data, out_file) as tx_out_file: cmd = ("sed 1d {cns_file} | cut -f1,2,3 > {bed_file}; " "{samtools} view -b -L {bed_file} {bam_file} | " "{bedtools} genomecov -bg -ibam - -g {bed_file} >" "{tx_out_file}").format(**locals()) do.run(cmd, "CNVkit bedGraph conversion") os.remove(bed_file) out["bedgraph"] = out_file return out
[ "def", "_add_coverage_bedgraph_to_output", "(", "out", ",", "data", ")", ":", "out_file", "=", "\"%s.coverage.bedgraph\"", "%", "os", ".", "path", ".", "splitext", "(", "out", "[", "\"cns\"", "]", ")", "[", "0", "]", "if", "utils", ".", "file_exists", "(",...
Add BedGraph representation of coverage to the output
[ "Add", "BedGraph", "representation", "of", "coverage", "to", "the", "output" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L611-L631
train
218,138
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_add_plots_to_output
def _add_plots_to_output(out, data): """Add CNVkit plots summarizing called copy number values. """ out["plot"] = {} diagram_plot = _add_diagram_plot(out, data) if diagram_plot: out["plot"]["diagram"] = diagram_plot scatter = _add_scatter_plot(out, data) if scatter: out["plot"]["scatter"] = scatter scatter_global = _add_global_scatter_plot(out, data) if scatter_global: out["plot"]["scatter_global"] = scatter_global return out
python
def _add_plots_to_output(out, data): """Add CNVkit plots summarizing called copy number values. """ out["plot"] = {} diagram_plot = _add_diagram_plot(out, data) if diagram_plot: out["plot"]["diagram"] = diagram_plot scatter = _add_scatter_plot(out, data) if scatter: out["plot"]["scatter"] = scatter scatter_global = _add_global_scatter_plot(out, data) if scatter_global: out["plot"]["scatter_global"] = scatter_global return out
[ "def", "_add_plots_to_output", "(", "out", ",", "data", ")", ":", "out", "[", "\"plot\"", "]", "=", "{", "}", "diagram_plot", "=", "_add_diagram_plot", "(", "out", ",", "data", ")", "if", "diagram_plot", ":", "out", "[", "\"plot\"", "]", "[", "\"diagram\...
Add CNVkit plots summarizing called copy number values.
[ "Add", "CNVkit", "plots", "summarizing", "called", "copy", "number", "values", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L633-L646
train
218,139
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
_get_larger_chroms
def _get_larger_chroms(ref_file): """Retrieve larger chromosomes, avoiding the smaller ones for plotting. """ from scipy.cluster.vq import kmeans, vq all_sizes = [] for c in ref.file_contigs(ref_file): all_sizes.append(float(c.size)) all_sizes.sort() if len(all_sizes) > 5: # separate out smaller chromosomes and haplotypes with kmeans centroids, _ = kmeans(np.array(all_sizes), 2) idx, _ = vq(np.array(all_sizes), centroids) little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes))) little_sizes = [x[1] for x in little_sizes] # create one more cluster with the smaller, removing the haplotypes centroids2, _ = kmeans(np.array(little_sizes), 2) idx2, _ = vq(np.array(little_sizes), centroids2) little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes))) little_sizes2 = [x[1] for x in little_sizes2] # get any chromosomes not in haplotype/random bin thresh = max(little_sizes2) else: thresh = 0 larger_chroms = [] for c in ref.file_contigs(ref_file): if c.size > thresh: larger_chroms.append(c.name) return larger_chroms
python
def _get_larger_chroms(ref_file): """Retrieve larger chromosomes, avoiding the smaller ones for plotting. """ from scipy.cluster.vq import kmeans, vq all_sizes = [] for c in ref.file_contigs(ref_file): all_sizes.append(float(c.size)) all_sizes.sort() if len(all_sizes) > 5: # separate out smaller chromosomes and haplotypes with kmeans centroids, _ = kmeans(np.array(all_sizes), 2) idx, _ = vq(np.array(all_sizes), centroids) little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes))) little_sizes = [x[1] for x in little_sizes] # create one more cluster with the smaller, removing the haplotypes centroids2, _ = kmeans(np.array(little_sizes), 2) idx2, _ = vq(np.array(little_sizes), centroids2) little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes))) little_sizes2 = [x[1] for x in little_sizes2] # get any chromosomes not in haplotype/random bin thresh = max(little_sizes2) else: thresh = 0 larger_chroms = [] for c in ref.file_contigs(ref_file): if c.size > thresh: larger_chroms.append(c.name) return larger_chroms
[ "def", "_get_larger_chroms", "(", "ref_file", ")", ":", "from", "scipy", ".", "cluster", ".", "vq", "import", "kmeans", ",", "vq", "all_sizes", "=", "[", "]", "for", "c", "in", "ref", ".", "file_contigs", "(", "ref_file", ")", ":", "all_sizes", ".", "a...
Retrieve larger chromosomes, avoiding the smaller ones for plotting.
[ "Retrieve", "larger", "chromosomes", "avoiding", "the", "smaller", "ones", "for", "plotting", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L648-L675
train
218,140
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
segment_from_cnr
def segment_from_cnr(cnr_file, data, out_base): """Provide segmentation on a cnr file, used in external PureCN integration. """ cns_file = _cnvkit_segment(cnr_file, dd.get_coverage_interval(data), data, [data], out_file="%s.cns" % out_base, detailed=True) out = _add_seg_to_output({"cns": cns_file}, data, enumerate_chroms=False) return out["seg"]
python
def segment_from_cnr(cnr_file, data, out_base): """Provide segmentation on a cnr file, used in external PureCN integration. """ cns_file = _cnvkit_segment(cnr_file, dd.get_coverage_interval(data), data, [data], out_file="%s.cns" % out_base, detailed=True) out = _add_seg_to_output({"cns": cns_file}, data, enumerate_chroms=False) return out["seg"]
[ "def", "segment_from_cnr", "(", "cnr_file", ",", "data", ",", "out_base", ")", ":", "cns_file", "=", "_cnvkit_segment", "(", "cnr_file", ",", "dd", ".", "get_coverage_interval", "(", "data", ")", ",", "data", ",", "[", "data", "]", ",", "out_file", "=", ...
Provide segmentation on a cnr file, used in external PureCN integration.
[ "Provide", "segmentation", "on", "a", "cnr", "file", "used", "in", "external", "PureCN", "integration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L747-L753
train
218,141
bcbio/bcbio-nextgen
bcbio/structural/cnvkit.py
export_theta
def export_theta(ckout, data): """Provide updated set of data with export information for TheTA2 input. """ cns_file = chromhacks.bed_to_standardonly(ckout["cns"], data, headers="chromosome") cnr_file = chromhacks.bed_to_standardonly(ckout["cnr"], data, headers="chromosome") out_file = "%s-theta.input" % utils.splitext_plus(cns_file)[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [_get_cmd(), "export", "theta", cns_file, cnr_file, "-o", tx_out_file] do.run(_prep_cmd(cmd, tx_out_file), "Export CNVkit calls as inputs for TheTA2") ckout["theta_input"] = out_file return ckout
python
def export_theta(ckout, data): """Provide updated set of data with export information for TheTA2 input. """ cns_file = chromhacks.bed_to_standardonly(ckout["cns"], data, headers="chromosome") cnr_file = chromhacks.bed_to_standardonly(ckout["cnr"], data, headers="chromosome") out_file = "%s-theta.input" % utils.splitext_plus(cns_file)[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [_get_cmd(), "export", "theta", cns_file, cnr_file, "-o", tx_out_file] do.run(_prep_cmd(cmd, tx_out_file), "Export CNVkit calls as inputs for TheTA2") ckout["theta_input"] = out_file return ckout
[ "def", "export_theta", "(", "ckout", ",", "data", ")", ":", "cns_file", "=", "chromhacks", ".", "bed_to_standardonly", "(", "ckout", "[", "\"cns\"", "]", ",", "data", ",", "headers", "=", "\"chromosome\"", ")", "cnr_file", "=", "chromhacks", ".", "bed_to_sta...
Provide updated set of data with export information for TheTA2 input.
[ "Provide", "updated", "set", "of", "data", "with", "export", "information", "for", "TheTA2", "input", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L757-L768
train
218,142
bcbio/bcbio-nextgen
bcbio/rnaseq/stringtie.py
_stringtie_expression
def _stringtie_expression(bam, data, out_dir="."): """ only estimate expression the Stringtie, do not assemble new transcripts """ gtf_file = dd.get_gtf_file(data) num_cores = dd.get_num_cores(data) error_message = "The %s file for %s is missing. StringTie has an error." stringtie = config_utils.get_program("stringtie", data, default="stringtie") # don't assemble transcripts unless asked exp_flag = ("-e" if "stringtie" not in dd.get_transcript_assembler(data) else "") base_cmd = ("{stringtie} {exp_flag} -b {out_dir} -p {num_cores} -G {gtf_file} " "-o {out_gtf} {bam}") transcript_file = os.path.join(out_dir, "t_data.ctab") exon_file = os.path.join(out_dir, "e_data.ctab") out_gtf = os.path.join(out_dir, "stringtie-assembly.gtf") if file_exists(transcript_file): return exon_file, transcript_file, out_gtf cmd = base_cmd.format(**locals()) do.run(cmd, "Running Stringtie on %s." % bam) assert file_exists(exon_file), error_message % ("exon", exon_file) assert file_exists(transcript_file), error_message % ("transcript", transcript_file) return transcript_file
python
def _stringtie_expression(bam, data, out_dir="."): """ only estimate expression the Stringtie, do not assemble new transcripts """ gtf_file = dd.get_gtf_file(data) num_cores = dd.get_num_cores(data) error_message = "The %s file for %s is missing. StringTie has an error." stringtie = config_utils.get_program("stringtie", data, default="stringtie") # don't assemble transcripts unless asked exp_flag = ("-e" if "stringtie" not in dd.get_transcript_assembler(data) else "") base_cmd = ("{stringtie} {exp_flag} -b {out_dir} -p {num_cores} -G {gtf_file} " "-o {out_gtf} {bam}") transcript_file = os.path.join(out_dir, "t_data.ctab") exon_file = os.path.join(out_dir, "e_data.ctab") out_gtf = os.path.join(out_dir, "stringtie-assembly.gtf") if file_exists(transcript_file): return exon_file, transcript_file, out_gtf cmd = base_cmd.format(**locals()) do.run(cmd, "Running Stringtie on %s." % bam) assert file_exists(exon_file), error_message % ("exon", exon_file) assert file_exists(transcript_file), error_message % ("transcript", transcript_file) return transcript_file
[ "def", "_stringtie_expression", "(", "bam", ",", "data", ",", "out_dir", "=", "\".\"", ")", ":", "gtf_file", "=", "dd", ".", "get_gtf_file", "(", "data", ")", "num_cores", "=", "dd", ".", "get_num_cores", "(", "data", ")", "error_message", "=", "\"The %s f...
only estimate expression the Stringtie, do not assemble new transcripts
[ "only", "estimate", "expression", "the", "Stringtie", "do", "not", "assemble", "new", "transcripts" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/stringtie.py#L21-L43
train
218,143
bcbio/bcbio-nextgen
bcbio/rnaseq/stringtie.py
run_stringtie_expression
def run_stringtie_expression(data): """ estimate expression from Stringtie, using the bcbio datadict does not do transcriptome assembly """ bam = dd.get_work_bam(data) sample_name = dd.get_sample_name(data) out_dir = os.path.join("stringtie", sample_name) isoform_fpkm = os.path.join(out_dir, sample_name + ".isoform.fpkm") gene_fpkm = os.path.join(out_dir, sample_name + ".fpkm") assembly = os.path.abspath(os.path.join(out_dir, "stringtie-assembly.gtf")) if file_exists(isoform_fpkm) and file_exists(gene_fpkm): data = dd.set_stringtie_dir(data, out_dir) data = dd.set_fpkm(data, gene_fpkm) data = dd.set_fpkm_isoform(data, isoform_fpkm) if "stringtie" in dd.get_transcript_assembler(data): assembled_gtfs = dd.get_assembled_gtf(data) assembled_gtfs.append(assembly) data = dd.set_assembled_gtf(data, assembled_gtfs) return data with file_transaction(data, out_dir) as tx_out_dir: transcript_file = _stringtie_expression(bam, data, tx_out_dir) df = _parse_ballgown(transcript_file) _write_fpkms(df, tx_out_dir, sample_name) data = dd.set_stringtie_dir(data, out_dir) data = dd.set_fpkm(data, gene_fpkm) data = dd.set_fpkm_isoform(data, isoform_fpkm) if "stringtie" in dd.get_transcript_assembler(data): assembled_gtfs = dd.get_assembled_gtf(data) assembled_gtfs.append(assembly) data = dd.set_assembled_gtf(data, assembled_gtfs) return data
python
def run_stringtie_expression(data): """ estimate expression from Stringtie, using the bcbio datadict does not do transcriptome assembly """ bam = dd.get_work_bam(data) sample_name = dd.get_sample_name(data) out_dir = os.path.join("stringtie", sample_name) isoform_fpkm = os.path.join(out_dir, sample_name + ".isoform.fpkm") gene_fpkm = os.path.join(out_dir, sample_name + ".fpkm") assembly = os.path.abspath(os.path.join(out_dir, "stringtie-assembly.gtf")) if file_exists(isoform_fpkm) and file_exists(gene_fpkm): data = dd.set_stringtie_dir(data, out_dir) data = dd.set_fpkm(data, gene_fpkm) data = dd.set_fpkm_isoform(data, isoform_fpkm) if "stringtie" in dd.get_transcript_assembler(data): assembled_gtfs = dd.get_assembled_gtf(data) assembled_gtfs.append(assembly) data = dd.set_assembled_gtf(data, assembled_gtfs) return data with file_transaction(data, out_dir) as tx_out_dir: transcript_file = _stringtie_expression(bam, data, tx_out_dir) df = _parse_ballgown(transcript_file) _write_fpkms(df, tx_out_dir, sample_name) data = dd.set_stringtie_dir(data, out_dir) data = dd.set_fpkm(data, gene_fpkm) data = dd.set_fpkm_isoform(data, isoform_fpkm) if "stringtie" in dd.get_transcript_assembler(data): assembled_gtfs = dd.get_assembled_gtf(data) assembled_gtfs.append(assembly) data = dd.set_assembled_gtf(data, assembled_gtfs) return data
[ "def", "run_stringtie_expression", "(", "data", ")", ":", "bam", "=", "dd", ".", "get_work_bam", "(", "data", ")", "sample_name", "=", "dd", ".", "get_sample_name", "(", "data", ")", "out_dir", "=", "os", ".", "path", ".", "join", "(", "\"stringtie\"", "...
estimate expression from Stringtie, using the bcbio datadict does not do transcriptome assembly
[ "estimate", "expression", "from", "Stringtie", "using", "the", "bcbio", "datadict", "does", "not", "do", "transcriptome", "assembly" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/stringtie.py#L45-L76
train
218,144
bcbio/bcbio-nextgen
bcbio/upload/filesystem.py
update_file
def update_file(finfo, sample_info, config, pass_uptodate=False): """Update the file in local filesystem storage. """ storage_dir = utils.safe_makedir(_get_storage_dir(finfo, config)) if finfo.get("type") == "directory": return _copy_finfo_directory(finfo, storage_dir) else: return _copy_finfo(finfo, storage_dir, pass_uptodate=pass_uptodate)
python
def update_file(finfo, sample_info, config, pass_uptodate=False): """Update the file in local filesystem storage. """ storage_dir = utils.safe_makedir(_get_storage_dir(finfo, config)) if finfo.get("type") == "directory": return _copy_finfo_directory(finfo, storage_dir) else: return _copy_finfo(finfo, storage_dir, pass_uptodate=pass_uptodate)
[ "def", "update_file", "(", "finfo", ",", "sample_info", ",", "config", ",", "pass_uptodate", "=", "False", ")", ":", "storage_dir", "=", "utils", ".", "safe_makedir", "(", "_get_storage_dir", "(", "finfo", ",", "config", ")", ")", "if", "finfo", ".", "get"...
Update the file in local filesystem storage.
[ "Update", "the", "file", "in", "local", "filesystem", "storage", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/filesystem.py#L10-L17
train
218,145
bcbio/bcbio-nextgen
bcbio/upload/filesystem.py
_copy_finfo
def _copy_finfo(finfo, storage_dir, pass_uptodate=False): """Copy a file into the output storage directory. """ out_file = _get_file_upload_path(finfo, storage_dir) if not shared.up_to_date(out_file, finfo): logger.info("Storing in local filesystem: %s" % out_file) shutil.copy(finfo["path"], out_file) return out_file if pass_uptodate: return out_file
python
def _copy_finfo(finfo, storage_dir, pass_uptodate=False): """Copy a file into the output storage directory. """ out_file = _get_file_upload_path(finfo, storage_dir) if not shared.up_to_date(out_file, finfo): logger.info("Storing in local filesystem: %s" % out_file) shutil.copy(finfo["path"], out_file) return out_file if pass_uptodate: return out_file
[ "def", "_copy_finfo", "(", "finfo", ",", "storage_dir", ",", "pass_uptodate", "=", "False", ")", ":", "out_file", "=", "_get_file_upload_path", "(", "finfo", ",", "storage_dir", ")", "if", "not", "shared", ".", "up_to_date", "(", "out_file", ",", "finfo", ")...
Copy a file into the output storage directory.
[ "Copy", "a", "file", "into", "the", "output", "storage", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/filesystem.py#L63-L72
train
218,146
bcbio/bcbio-nextgen
bcbio/upload/filesystem.py
_copy_finfo_directory
def _copy_finfo_directory(finfo, out_dir): """Copy a directory into the final output directory. """ out_dir = _get_dir_upload_path(finfo, out_dir) if not shared.up_to_date(out_dir, finfo): logger.info("Storing directory in local filesystem: %s" % out_dir) if os.path.exists(out_dir): shutil.rmtree(out_dir) shutil.copytree(finfo["path"], out_dir) for tmpdir in ["tx", "tmp"]: if os.path.exists(os.path.join(out_dir, tmpdir)): shutil.rmtree(os.path.join(out_dir, tmpdir)) os.utime(out_dir, None) return out_dir
python
def _copy_finfo_directory(finfo, out_dir): """Copy a directory into the final output directory. """ out_dir = _get_dir_upload_path(finfo, out_dir) if not shared.up_to_date(out_dir, finfo): logger.info("Storing directory in local filesystem: %s" % out_dir) if os.path.exists(out_dir): shutil.rmtree(out_dir) shutil.copytree(finfo["path"], out_dir) for tmpdir in ["tx", "tmp"]: if os.path.exists(os.path.join(out_dir, tmpdir)): shutil.rmtree(os.path.join(out_dir, tmpdir)) os.utime(out_dir, None) return out_dir
[ "def", "_copy_finfo_directory", "(", "finfo", ",", "out_dir", ")", ":", "out_dir", "=", "_get_dir_upload_path", "(", "finfo", ",", "out_dir", ")", "if", "not", "shared", ".", "up_to_date", "(", "out_dir", ",", "finfo", ")", ":", "logger", ".", "info", "(",...
Copy a directory into the final output directory.
[ "Copy", "a", "directory", "into", "the", "final", "output", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/filesystem.py#L74-L87
train
218,147
bcbio/bcbio-nextgen
bcbio/variation/naming.py
handle_synonyms
def handle_synonyms(in_file, ref_file, genome_build, work_dir, data): """Potentially handle remapping synonymous chromosome names between builds. Handles tab delimited file formats like BED and VCF where the contig is in the first column. """ if genome_build in GMAP and ref_file: mappings = GMAP[genome_build] contigs = set([c.name for c in ref.file_contigs(ref_file)]) out_file = os.path.join(work_dir, "%s-fixed_contigs%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_exists(out_file): if out_file.endswith(".gz"): out_file = out_file.replace(".gz", "") needs_bgzip = True else: needs_bgzip = False checked_file = "%s.checked" % utils.splitext_plus(out_file)[0] if not _matches_contigs(in_file, contigs, checked_file): with file_transaction(data, out_file) as tx_out_file: _write_newname_file(in_file, tx_out_file, mappings) if needs_bgzip: out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file return in_file
python
def handle_synonyms(in_file, ref_file, genome_build, work_dir, data): """Potentially handle remapping synonymous chromosome names between builds. Handles tab delimited file formats like BED and VCF where the contig is in the first column. """ if genome_build in GMAP and ref_file: mappings = GMAP[genome_build] contigs = set([c.name for c in ref.file_contigs(ref_file)]) out_file = os.path.join(work_dir, "%s-fixed_contigs%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_exists(out_file): if out_file.endswith(".gz"): out_file = out_file.replace(".gz", "") needs_bgzip = True else: needs_bgzip = False checked_file = "%s.checked" % utils.splitext_plus(out_file)[0] if not _matches_contigs(in_file, contigs, checked_file): with file_transaction(data, out_file) as tx_out_file: _write_newname_file(in_file, tx_out_file, mappings) if needs_bgzip: out_file = vcfutils.bgzip_and_index(out_file, data["config"]) return out_file return in_file
[ "def", "handle_synonyms", "(", "in_file", ",", "ref_file", ",", "genome_build", ",", "work_dir", ",", "data", ")", ":", "if", "genome_build", "in", "GMAP", "and", "ref_file", ":", "mappings", "=", "GMAP", "[", "genome_build", "]", "contigs", "=", "set", "(...
Potentially handle remapping synonymous chromosome names between builds. Handles tab delimited file formats like BED and VCF where the contig is in the first column.
[ "Potentially", "handle", "remapping", "synonymous", "chromosome", "names", "between", "builds", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/naming.py#L101-L124
train
218,148
bcbio/bcbio-nextgen
bcbio/variation/naming.py
_write_newname_file
def _write_newname_file(in_file, out_file, mappings): """Re-write an input file with contigs matching the correct reference. """ with utils.open_gzipsafe(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: parts = line.split("\t") new_contig = mappings.get(parts[0]) if new_contig: parts[0] = new_contig out_handle.write("\t".join(parts))
python
def _write_newname_file(in_file, out_file, mappings): """Re-write an input file with contigs matching the correct reference. """ with utils.open_gzipsafe(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: parts = line.split("\t") new_contig = mappings.get(parts[0]) if new_contig: parts[0] = new_contig out_handle.write("\t".join(parts))
[ "def", "_write_newname_file", "(", "in_file", ",", "out_file", ",", "mappings", ")", ":", "with", "utils", ".", "open_gzipsafe", "(", "in_file", ")", "as", "in_handle", ":", "with", "open", "(", "out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "for"...
Re-write an input file with contigs matching the correct reference.
[ "Re", "-", "write", "an", "input", "file", "with", "contigs", "matching", "the", "correct", "reference", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/naming.py#L126-L139
train
218,149
bcbio/bcbio-nextgen
bcbio/variation/naming.py
_matches_contigs
def _matches_contigs(in_file, contigs, checked_file): """Check if the contigs in the input file match the defined contigs in the reference genome. """ tocheck_contigs = 2 if utils.file_exists(checked_file): with open(checked_file) as in_handle: return in_handle.read().strip() == "match" else: with utils.open_gzipsafe(in_file) as in_handle: to_check = set([]) for line in in_handle: if not line.startswith("#"): to_check.add(line.split()[0]) if len(to_check) >= tocheck_contigs: break with open(checked_file, "w") as out_handle: if any([c not in contigs for c in to_check]): out_handle.write("different") return False else: out_handle.write("match") return True
python
def _matches_contigs(in_file, contigs, checked_file): """Check if the contigs in the input file match the defined contigs in the reference genome. """ tocheck_contigs = 2 if utils.file_exists(checked_file): with open(checked_file) as in_handle: return in_handle.read().strip() == "match" else: with utils.open_gzipsafe(in_file) as in_handle: to_check = set([]) for line in in_handle: if not line.startswith("#"): to_check.add(line.split()[0]) if len(to_check) >= tocheck_contigs: break with open(checked_file, "w") as out_handle: if any([c not in contigs for c in to_check]): out_handle.write("different") return False else: out_handle.write("match") return True
[ "def", "_matches_contigs", "(", "in_file", ",", "contigs", ",", "checked_file", ")", ":", "tocheck_contigs", "=", "2", "if", "utils", ".", "file_exists", "(", "checked_file", ")", ":", "with", "open", "(", "checked_file", ")", "as", "in_handle", ":", "return...
Check if the contigs in the input file match the defined contigs in the reference genome.
[ "Check", "if", "the", "contigs", "in", "the", "input", "file", "match", "the", "defined", "contigs", "in", "the", "reference", "genome", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/naming.py#L141-L162
train
218,150
bcbio/bcbio-nextgen
bcbio/ngsalign/novoalign.py
align_bam
def align_bam(in_bam, ref_file, names, align_dir, data): """Perform realignment of input BAM file; uses unix pipes for avoid IO. """ config = data["config"] out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"])) novoalign = config_utils.get_program("novoalign", config) samtools = config_utils.get_program("samtools", config) resources = config_utils.get_resources("novoalign", config) num_cores = config["algorithm"].get("num_cores", 1) max_mem = resources.get("memory", "4G").upper() extra_novo_args = " ".join(_novoalign_args_from_config(config, False)) if not file_exists(out_file): with tx_tmpdir(data, base_dir=align_dir) as work_dir: with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file): rg_info = get_rg_info(names) tx_out_prefix = os.path.splitext(tx_out_file)[0] prefix1 = "%s-in1" % tx_out_prefix cmd = ("unset JAVA_HOME && " "{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} " "| {novoalign} -o SAM '{rg_info}' -d {ref_file} -f /dev/stdin " " -F BAMPE -c {num_cores} {extra_novo_args} | ") cmd = (cmd + tobam_cl).format(**locals()) do.run(cmd, "Novoalign: %s" % names["sample"], None, [do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)]) return out_file
python
def align_bam(in_bam, ref_file, names, align_dir, data): """Perform realignment of input BAM file; uses unix pipes for avoid IO. """ config = data["config"] out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"])) novoalign = config_utils.get_program("novoalign", config) samtools = config_utils.get_program("samtools", config) resources = config_utils.get_resources("novoalign", config) num_cores = config["algorithm"].get("num_cores", 1) max_mem = resources.get("memory", "4G").upper() extra_novo_args = " ".join(_novoalign_args_from_config(config, False)) if not file_exists(out_file): with tx_tmpdir(data, base_dir=align_dir) as work_dir: with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file): rg_info = get_rg_info(names) tx_out_prefix = os.path.splitext(tx_out_file)[0] prefix1 = "%s-in1" % tx_out_prefix cmd = ("unset JAVA_HOME && " "{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} " "| {novoalign} -o SAM '{rg_info}' -d {ref_file} -f /dev/stdin " " -F BAMPE -c {num_cores} {extra_novo_args} | ") cmd = (cmd + tobam_cl).format(**locals()) do.run(cmd, "Novoalign: %s" % names["sample"], None, [do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)]) return out_file
[ "def", "align_bam", "(", "in_bam", ",", "ref_file", ",", "names", ",", "align_dir", ",", "data", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "out_file", "=", "os", ".", "path", ".", "join", "(", "align_dir", ",", "\"{0}-sort.bam\"", ".", "...
Perform realignment of input BAM file; uses unix pipes for avoid IO.
[ "Perform", "realignment", "of", "input", "BAM", "file", ";", "uses", "unix", "pipes", "for", "avoid", "IO", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/novoalign.py#L29-L54
train
218,151
bcbio/bcbio-nextgen
bcbio/ngsalign/novoalign.py
_novoalign_args_from_config
def _novoalign_args_from_config(config, need_quality=True): """Select novoalign options based on configuration parameters. """ if need_quality: qual_format = config["algorithm"].get("quality_format", "").lower() qual_flags = ["-F", "ILMFQ" if qual_format == "illumina" else "STDFQ"] else: qual_flags = [] multi_mappers = config["algorithm"].get("multiple_mappers") if multi_mappers is True: multi_flag = "Random" elif isinstance(multi_mappers, six.string_types): multi_flag = multi_mappers else: multi_flag = "None" multi_flags = ["-r"] + multi_flag.split() resources = config_utils.get_resources("novoalign", config) # default arguments for improved variant calling based on # comparisons to reference materials: turn off soft clipping and recalibrate if resources.get("options") is None: extra_args = ["-o", "FullNW", "-k"] else: extra_args = [str(x) for x in resources.get("options", [])] return qual_flags + multi_flags + extra_args
python
def _novoalign_args_from_config(config, need_quality=True): """Select novoalign options based on configuration parameters. """ if need_quality: qual_format = config["algorithm"].get("quality_format", "").lower() qual_flags = ["-F", "ILMFQ" if qual_format == "illumina" else "STDFQ"] else: qual_flags = [] multi_mappers = config["algorithm"].get("multiple_mappers") if multi_mappers is True: multi_flag = "Random" elif isinstance(multi_mappers, six.string_types): multi_flag = multi_mappers else: multi_flag = "None" multi_flags = ["-r"] + multi_flag.split() resources = config_utils.get_resources("novoalign", config) # default arguments for improved variant calling based on # comparisons to reference materials: turn off soft clipping and recalibrate if resources.get("options") is None: extra_args = ["-o", "FullNW", "-k"] else: extra_args = [str(x) for x in resources.get("options", [])] return qual_flags + multi_flags + extra_args
[ "def", "_novoalign_args_from_config", "(", "config", ",", "need_quality", "=", "True", ")", ":", "if", "need_quality", ":", "qual_format", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"quality_format\"", ",", "\"\"", ")", ".", "lower", "(", "...
Select novoalign options based on configuration parameters.
[ "Select", "novoalign", "options", "based", "on", "configuration", "parameters", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/novoalign.py#L92-L115
train
218,152
bcbio/bcbio-nextgen
bcbio/ngsalign/novoalign.py
remap_index_fn
def remap_index_fn(ref_file): """Map sequence references to equivalent novoalign indexes. """ checks = [os.path.splitext(ref_file)[0].replace("/seq/", "/novoalign/"), os.path.splitext(ref_file)[0] + ".ndx", ref_file + ".bs.ndx", ref_file + ".ndx"] for check in checks: if os.path.exists(check): return check return checks[0]
python
def remap_index_fn(ref_file): """Map sequence references to equivalent novoalign indexes. """ checks = [os.path.splitext(ref_file)[0].replace("/seq/", "/novoalign/"), os.path.splitext(ref_file)[0] + ".ndx", ref_file + ".bs.ndx", ref_file + ".ndx"] for check in checks: if os.path.exists(check): return check return checks[0]
[ "def", "remap_index_fn", "(", "ref_file", ")", ":", "checks", "=", "[", "os", ".", "path", ".", "splitext", "(", "ref_file", ")", "[", "0", "]", ".", "replace", "(", "\"/seq/\"", ",", "\"/novoalign/\"", ")", ",", "os", ".", "path", ".", "splitext", "...
Map sequence references to equivalent novoalign indexes.
[ "Map", "sequence", "references", "to", "equivalent", "novoalign", "indexes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/novoalign.py#L138-L148
train
218,153
bcbio/bcbio-nextgen
bcbio/variation/sentieon.py
license_export
def license_export(data): """Retrieve export statement for sentieon license server. """ resources = config_utils.get_resources("sentieon", data["config"]) server = resources.get("keyfile") if not server: server = tz.get_in(["resources", "sentieon", "keyfile"], data) if not server: raise ValueError("Need to set resources keyfile with URL:port of license server, local license file or " "environmental variables to export \n" "http://bcbio-nextgen.readthedocs.io/en/latest/contents/configuration.html#resources\n" "Configuration: %s" % pprint.pformat(data)) if isinstance(server, six.string_types): return "export SENTIEON_LICENSE=%s && " % server else: assert isinstance(server, dict), server exports = "" for key, val in server.items(): exports += "export %s=%s && " % (key.upper(), val) return exports
python
def license_export(data): """Retrieve export statement for sentieon license server. """ resources = config_utils.get_resources("sentieon", data["config"]) server = resources.get("keyfile") if not server: server = tz.get_in(["resources", "sentieon", "keyfile"], data) if not server: raise ValueError("Need to set resources keyfile with URL:port of license server, local license file or " "environmental variables to export \n" "http://bcbio-nextgen.readthedocs.io/en/latest/contents/configuration.html#resources\n" "Configuration: %s" % pprint.pformat(data)) if isinstance(server, six.string_types): return "export SENTIEON_LICENSE=%s && " % server else: assert isinstance(server, dict), server exports = "" for key, val in server.items(): exports += "export %s=%s && " % (key.upper(), val) return exports
[ "def", "license_export", "(", "data", ")", ":", "resources", "=", "config_utils", ".", "get_resources", "(", "\"sentieon\"", ",", "data", "[", "\"config\"", "]", ")", "server", "=", "resources", ".", "get", "(", "\"keyfile\"", ")", "if", "not", "server", "...
Retrieve export statement for sentieon license server.
[ "Retrieve", "export", "statement", "for", "sentieon", "license", "server", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/sentieon.py#L25-L44
train
218,154
bcbio/bcbio-nextgen
bcbio/variation/sentieon.py
_get_interval
def _get_interval(variant_regions, region, out_file, items): """Retrieve interval to run analysis in. Handles no targets, BED and regions region can be a single region or list of multiple regions for multicore calling. """ target = shared.subset_variant_regions(variant_regions, region, out_file, items) if target: if isinstance(target, six.string_types) and os.path.isfile(target): return "--interval %s" % target else: return "--interval %s" % bamprep.region_to_gatk(target) else: return ""
python
def _get_interval(variant_regions, region, out_file, items): """Retrieve interval to run analysis in. Handles no targets, BED and regions region can be a single region or list of multiple regions for multicore calling. """ target = shared.subset_variant_regions(variant_regions, region, out_file, items) if target: if isinstance(target, six.string_types) and os.path.isfile(target): return "--interval %s" % target else: return "--interval %s" % bamprep.region_to_gatk(target) else: return ""
[ "def", "_get_interval", "(", "variant_regions", ",", "region", ",", "out_file", ",", "items", ")", ":", "target", "=", "shared", ".", "subset_variant_regions", "(", "variant_regions", ",", "region", ",", "out_file", ",", "items", ")", "if", "target", ":", "i...
Retrieve interval to run analysis in. Handles no targets, BED and regions region can be a single region or list of multiple regions for multicore calling.
[ "Retrieve", "interval", "to", "run", "analysis", "in", ".", "Handles", "no", "targets", "BED", "and", "regions" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/sentieon.py#L46-L58
train
218,155
bcbio/bcbio-nextgen
bcbio/variation/sentieon.py
run_tnscope
def run_tnscope(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Call variants with Sentieon's TNscope somatic caller. """ if out_file is None: out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0] if not utils.file_exists(out_file): variant_regions = bedutils.population_variant_regions(items, merged=True) interval = _get_interval(variant_regions, region, out_file, items) with file_transaction(items[0], out_file) as tx_out_file: paired = vcfutils.get_paired_bams(align_bams, items) assert paired and paired.normal_bam, "Require normal BAM for Sentieon TNscope" dbsnp = "--dbsnp %s" % (assoc_files.get("dbsnp")) if "dbsnp" in assoc_files else "" license = license_export(items[0]) cores = dd.get_num_cores(items[0]) cmd = ("{license}sentieon driver -t {cores} -r {ref_file} " "-i {paired.tumor_bam} -i {paired.normal_bam} {interval} " "--algo TNscope " "--tumor_sample {paired.tumor_name} --normal_sample {paired.normal_name} " "{dbsnp} {tx_out_file}") do.run(cmd.format(**locals()), "Sentieon TNscope") return out_file
python
def run_tnscope(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Call variants with Sentieon's TNscope somatic caller. """ if out_file is None: out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0] if not utils.file_exists(out_file): variant_regions = bedutils.population_variant_regions(items, merged=True) interval = _get_interval(variant_regions, region, out_file, items) with file_transaction(items[0], out_file) as tx_out_file: paired = vcfutils.get_paired_bams(align_bams, items) assert paired and paired.normal_bam, "Require normal BAM for Sentieon TNscope" dbsnp = "--dbsnp %s" % (assoc_files.get("dbsnp")) if "dbsnp" in assoc_files else "" license = license_export(items[0]) cores = dd.get_num_cores(items[0]) cmd = ("{license}sentieon driver -t {cores} -r {ref_file} " "-i {paired.tumor_bam} -i {paired.normal_bam} {interval} " "--algo TNscope " "--tumor_sample {paired.tumor_name} --normal_sample {paired.normal_name} " "{dbsnp} {tx_out_file}") do.run(cmd.format(**locals()), "Sentieon TNscope") return out_file
[ "def", "run_tnscope", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "if", "out_file", "is", "None", ":", "out_file", "=", "\"%s-variants.vcf.gz\"", "%", "utils", "....
Call variants with Sentieon's TNscope somatic caller.
[ "Call", "variants", "with", "Sentieon", "s", "TNscope", "somatic", "caller", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/sentieon.py#L60-L81
train
218,156
bcbio/bcbio-nextgen
bcbio/variation/sentieon.py
run_gvcftyper
def run_gvcftyper(vrn_files, out_file, region, data): """Produce joint called variants from input gVCF files. """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: license = license_export(data) ref_file = dd.get_ref_file(data) input_files = " ".join(vrn_files) region = bamprep.region_to_gatk(region) cmd = ("{license}sentieon driver -r {ref_file} --interval {region} " "--algo GVCFtyper {tx_out_file} {input_files}") do.run(cmd.format(**locals()), "Sentieon GVCFtyper") return out_file
python
def run_gvcftyper(vrn_files, out_file, region, data): """Produce joint called variants from input gVCF files. """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: license = license_export(data) ref_file = dd.get_ref_file(data) input_files = " ".join(vrn_files) region = bamprep.region_to_gatk(region) cmd = ("{license}sentieon driver -r {ref_file} --interval {region} " "--algo GVCFtyper {tx_out_file} {input_files}") do.run(cmd.format(**locals()), "Sentieon GVCFtyper") return out_file
[ "def", "run_gvcftyper", "(", "vrn_files", ",", "out_file", ",", "region", ",", "data", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", ":", ...
Produce joint called variants from input gVCF files.
[ "Produce", "joint", "called", "variants", "from", "input", "gVCF", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/sentieon.py#L137-L149
train
218,157
bcbio/bcbio-nextgen
bcbio/variation/sentieon.py
bqsr_table
def bqsr_table(data): """Generate recalibration tables as inputs to BQSR. """ in_file = dd.get_align_bam(data) out_file = "%s-recal-table.txt" % utils.splitext_plus(in_file)[0] if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: assoc_files = dd.get_variation_resources(data) known = "-k %s" % (assoc_files.get("dbsnp")) if "dbsnp" in assoc_files else "" license = license_export(data) cores = dd.get_num_cores(data) ref_file = dd.get_ref_file(data) cmd = ("{license}sentieon driver -t {cores} -r {ref_file} " "-i {in_file} --algo QualCal {known} {tx_out_file}") do.run(cmd.format(**locals()), "Sentieon QualCal generate table") return out_file
python
def bqsr_table(data): """Generate recalibration tables as inputs to BQSR. """ in_file = dd.get_align_bam(data) out_file = "%s-recal-table.txt" % utils.splitext_plus(in_file)[0] if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: assoc_files = dd.get_variation_resources(data) known = "-k %s" % (assoc_files.get("dbsnp")) if "dbsnp" in assoc_files else "" license = license_export(data) cores = dd.get_num_cores(data) ref_file = dd.get_ref_file(data) cmd = ("{license}sentieon driver -t {cores} -r {ref_file} " "-i {in_file} --algo QualCal {known} {tx_out_file}") do.run(cmd.format(**locals()), "Sentieon QualCal generate table") return out_file
[ "def", "bqsr_table", "(", "data", ")", ":", "in_file", "=", "dd", ".", "get_align_bam", "(", "data", ")", "out_file", "=", "\"%s-recal-table.txt\"", "%", "utils", ".", "splitext_plus", "(", "in_file", ")", "[", "0", "]", "if", "not", "utils", ".", "file_...
Generate recalibration tables as inputs to BQSR.
[ "Generate", "recalibration", "tables", "as", "inputs", "to", "BQSR", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/sentieon.py#L151-L166
train
218,158
bcbio/bcbio-nextgen
bcbio/ngsalign/rtg.py
to_sdf
def to_sdf(files, data): """Convert a fastq or BAM input into a SDF indexed file. """ # BAM if len(files) == 1 and files[0].endswith(".bam"): qual = [] format = ["-f", "sam-pe" if bam.is_paired(files[0]) else "sam-se"] inputs = [files[0]] # fastq else: qual = ["-q", "illumina" if dd.get_quality_format(data).lower() == "illumina" else "sanger"] format = ["-f", "fastq"] if len(files) == 2: inputs = ["-l", files[0], "-r", files[1]] else: assert len(files) == 1 inputs = [files[0]] work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "align_prep")) out_file = os.path.join(work_dir, "%s.sdf" % utils.splitext_plus(os.path.basename(os.path.commonprefix(files)))[0]) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = _rtg_cmd(["rtg", "format", "-o", tx_out_file] + format + qual + inputs) do.run(cmd, "Format inputs to indexed SDF") return out_file
python
def to_sdf(files, data): """Convert a fastq or BAM input into a SDF indexed file. """ # BAM if len(files) == 1 and files[0].endswith(".bam"): qual = [] format = ["-f", "sam-pe" if bam.is_paired(files[0]) else "sam-se"] inputs = [files[0]] # fastq else: qual = ["-q", "illumina" if dd.get_quality_format(data).lower() == "illumina" else "sanger"] format = ["-f", "fastq"] if len(files) == 2: inputs = ["-l", files[0], "-r", files[1]] else: assert len(files) == 1 inputs = [files[0]] work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "align_prep")) out_file = os.path.join(work_dir, "%s.sdf" % utils.splitext_plus(os.path.basename(os.path.commonprefix(files)))[0]) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = _rtg_cmd(["rtg", "format", "-o", tx_out_file] + format + qual + inputs) do.run(cmd, "Format inputs to indexed SDF") return out_file
[ "def", "to_sdf", "(", "files", ",", "data", ")", ":", "# BAM", "if", "len", "(", "files", ")", "==", "1", "and", "files", "[", "0", "]", ".", "endswith", "(", "\".bam\"", ")", ":", "qual", "=", "[", "]", "format", "=", "[", "\"-f\"", ",", "\"sa...
Convert a fastq or BAM input into a SDF indexed file.
[ "Convert", "a", "fastq", "or", "BAM", "input", "into", "a", "SDF", "indexed", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/rtg.py#L16-L40
train
218,159
bcbio/bcbio-nextgen
bcbio/ngsalign/rtg.py
to_fastq_apipe_cl
def to_fastq_apipe_cl(sdf_file, start=None, end=None): """Return a command lines to provide streaming fastq input. For paired end, returns a forward and reverse command line. For single end returns a single command line and None for the pair. """ cmd = ["rtg", "sdf2fastq", "--no-gzip", "-o", "-"] if start is not None: cmd += ["--start-id=%s" % start] if end is not None: cmd += ["--end-id=%s" % end] if is_paired(sdf_file): out = [] for ext in ["left", "right"]: out.append("<(%s)" % _rtg_cmd(cmd + ["-i", os.path.join(sdf_file, ext)])) return out else: cmd += ["-i", sdf_file] return ["<(%s)" % _rtg_cmd(cmd), None]
python
def to_fastq_apipe_cl(sdf_file, start=None, end=None): """Return a command lines to provide streaming fastq input. For paired end, returns a forward and reverse command line. For single end returns a single command line and None for the pair. """ cmd = ["rtg", "sdf2fastq", "--no-gzip", "-o", "-"] if start is not None: cmd += ["--start-id=%s" % start] if end is not None: cmd += ["--end-id=%s" % end] if is_paired(sdf_file): out = [] for ext in ["left", "right"]: out.append("<(%s)" % _rtg_cmd(cmd + ["-i", os.path.join(sdf_file, ext)])) return out else: cmd += ["-i", sdf_file] return ["<(%s)" % _rtg_cmd(cmd), None]
[ "def", "to_fastq_apipe_cl", "(", "sdf_file", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "cmd", "=", "[", "\"rtg\"", ",", "\"sdf2fastq\"", ",", "\"--no-gzip\"", ",", "\"-o\"", ",", "\"-\"", "]", "if", "start", "is", "not", "None", ":"...
Return a command lines to provide streaming fastq input. For paired end, returns a forward and reverse command line. For single end returns a single command line and None for the pair.
[ "Return", "a", "command", "lines", "to", "provide", "streaming", "fastq", "input", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/rtg.py#L82-L100
train
218,160
bcbio/bcbio-nextgen
bcbio/pipeline/tools.py
get_tabix_cmd
def get_tabix_cmd(config): """Retrieve tabix command, handling new bcftools tabix and older tabix. """ try: bcftools = config_utils.get_program("bcftools", config) # bcftools has terrible error codes and stderr output, swallow those. bcftools_tabix = subprocess.check_output("{bcftools} 2>&1; echo $?".format(**locals()), shell=True).decode().find("tabix") >= 0 except config_utils.CmdNotFound: bcftools_tabix = False if bcftools_tabix: return "{0} tabix".format(bcftools) else: tabix = config_utils.get_program("tabix", config) return tabix
python
def get_tabix_cmd(config): """Retrieve tabix command, handling new bcftools tabix and older tabix. """ try: bcftools = config_utils.get_program("bcftools", config) # bcftools has terrible error codes and stderr output, swallow those. bcftools_tabix = subprocess.check_output("{bcftools} 2>&1; echo $?".format(**locals()), shell=True).decode().find("tabix") >= 0 except config_utils.CmdNotFound: bcftools_tabix = False if bcftools_tabix: return "{0} tabix".format(bcftools) else: tabix = config_utils.get_program("tabix", config) return tabix
[ "def", "get_tabix_cmd", "(", "config", ")", ":", "try", ":", "bcftools", "=", "config_utils", ".", "get_program", "(", "\"bcftools\"", ",", "config", ")", "# bcftools has terrible error codes and stderr output, swallow those.", "bcftools_tabix", "=", "subprocess", ".", ...
Retrieve tabix command, handling new bcftools tabix and older tabix.
[ "Retrieve", "tabix", "command", "handling", "new", "bcftools", "tabix", "and", "older", "tabix", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/tools.py#L10-L24
train
218,161
bcbio/bcbio-nextgen
bcbio/pipeline/tools.py
get_bgzip_cmd
def get_bgzip_cmd(config, is_retry=False): """Retrieve command to use for bgzip, trying to use bgzip parallel threads. By default, parallel bgzip is enabled in bcbio. If it causes problems please report them. You can turn parallel bgzip off with `tools_off: [pbgzip]` """ num_cores = tz.get_in(["algorithm", "num_cores"], config, 1) cmd = config_utils.get_program("bgzip", config) if (not is_retry and num_cores > 1 and "pbgzip" not in dd.get_tools_off({"config": config})): cmd += " --threads %s" % num_cores return cmd
python
def get_bgzip_cmd(config, is_retry=False): """Retrieve command to use for bgzip, trying to use bgzip parallel threads. By default, parallel bgzip is enabled in bcbio. If it causes problems please report them. You can turn parallel bgzip off with `tools_off: [pbgzip]` """ num_cores = tz.get_in(["algorithm", "num_cores"], config, 1) cmd = config_utils.get_program("bgzip", config) if (not is_retry and num_cores > 1 and "pbgzip" not in dd.get_tools_off({"config": config})): cmd += " --threads %s" % num_cores return cmd
[ "def", "get_bgzip_cmd", "(", "config", ",", "is_retry", "=", "False", ")", ":", "num_cores", "=", "tz", ".", "get_in", "(", "[", "\"algorithm\"", ",", "\"num_cores\"", "]", ",", "config", ",", "1", ")", "cmd", "=", "config_utils", ".", "get_program", "("...
Retrieve command to use for bgzip, trying to use bgzip parallel threads. By default, parallel bgzip is enabled in bcbio. If it causes problems please report them. You can turn parallel bgzip off with `tools_off: [pbgzip]`
[ "Retrieve", "command", "to", "use", "for", "bgzip", "trying", "to", "use", "bgzip", "parallel", "threads", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/tools.py#L26-L37
train
218,162
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
is_empty
def is_empty(bam_file): """Determine if a BAM file is empty """ bam_file = objectstore.cl_input(bam_file) cmd = ("set -o pipefail; " "samtools view {bam_file} | head -1 | wc -l") p = subprocess.Popen(cmd.format(**locals()), shell=True, executable=do.find_bash(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)) stdout, stderr = p.communicate() stdout = stdout.decode() stderr = stderr.decode() if ((p.returncode == 0 or p.returncode == 141) and (stderr == "" or (stderr.startswith("gof3r") and stderr.endswith("broken pipe")))): return int(stdout) == 0 else: raise ValueError("Failed to check empty status of BAM file: %s" % str(stderr))
python
def is_empty(bam_file): """Determine if a BAM file is empty """ bam_file = objectstore.cl_input(bam_file) cmd = ("set -o pipefail; " "samtools view {bam_file} | head -1 | wc -l") p = subprocess.Popen(cmd.format(**locals()), shell=True, executable=do.find_bash(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)) stdout, stderr = p.communicate() stdout = stdout.decode() stderr = stderr.decode() if ((p.returncode == 0 or p.returncode == 141) and (stderr == "" or (stderr.startswith("gof3r") and stderr.endswith("broken pipe")))): return int(stdout) == 0 else: raise ValueError("Failed to check empty status of BAM file: %s" % str(stderr))
[ "def", "is_empty", "(", "bam_file", ")", ":", "bam_file", "=", "objectstore", ".", "cl_input", "(", "bam_file", ")", "cmd", "=", "(", "\"set -o pipefail; \"", "\"samtools view {bam_file} | head -1 | wc -l\"", ")", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ...
Determine if a BAM file is empty
[ "Determine", "if", "a", "BAM", "file", "is", "empty" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L24-L41
train
218,163
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
fake_index
def fake_index(in_bam, data): """Create a fake index file for namesorted BAMs. bais require by CWL for consistency. """ index_file = "%s.bai" % in_bam if not utils.file_exists(index_file): with file_transaction(data, index_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: out_handle.write("name sorted -- no index") return index_file
python
def fake_index(in_bam, data): """Create a fake index file for namesorted BAMs. bais require by CWL for consistency. """ index_file = "%s.bai" % in_bam if not utils.file_exists(index_file): with file_transaction(data, index_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: out_handle.write("name sorted -- no index") return index_file
[ "def", "fake_index", "(", "in_bam", ",", "data", ")", ":", "index_file", "=", "\"%s.bai\"", "%", "in_bam", "if", "not", "utils", ".", "file_exists", "(", "index_file", ")", ":", "with", "file_transaction", "(", "data", ",", "index_file", ")", "as", "tx_out...
Create a fake index file for namesorted BAMs. bais require by CWL for consistency.
[ "Create", "a", "fake", "index", "file", "for", "namesorted", "BAMs", ".", "bais", "require", "by", "CWL", "for", "consistency", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L67-L75
train
218,164
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
index
def index(in_bam, config, check_timestamp=True): """Index a BAM file, skipping if index present. Centralizes BAM indexing providing ability to switch indexing approaches. """ assert is_bam(in_bam), "%s in not a BAM file" % in_bam index_file = "%s.bai" % in_bam alt_index_file = "%s.bai" % os.path.splitext(in_bam)[0] if check_timestamp: bai_exists = utils.file_uptodate(index_file, in_bam) or utils.file_uptodate(alt_index_file, in_bam) else: bai_exists = utils.file_exists(index_file) or utils.file_exists(alt_index_file) if not bai_exists: # Remove old index files and re-run to prevent linking into tx directory for fname in [index_file, alt_index_file]: utils.remove_safe(fname) samtools = config_utils.get_program("samtools", config) num_cores = config["algorithm"].get("num_cores", 1) with file_transaction(config, index_file) as tx_index_file: cmd = "{samtools} index -@ {num_cores} {in_bam} {tx_index_file}" do.run(cmd.format(**locals()), "Index BAM file: %s" % os.path.basename(in_bam)) return index_file if utils.file_exists(index_file) else alt_index_file
python
def index(in_bam, config, check_timestamp=True): """Index a BAM file, skipping if index present. Centralizes BAM indexing providing ability to switch indexing approaches. """ assert is_bam(in_bam), "%s in not a BAM file" % in_bam index_file = "%s.bai" % in_bam alt_index_file = "%s.bai" % os.path.splitext(in_bam)[0] if check_timestamp: bai_exists = utils.file_uptodate(index_file, in_bam) or utils.file_uptodate(alt_index_file, in_bam) else: bai_exists = utils.file_exists(index_file) or utils.file_exists(alt_index_file) if not bai_exists: # Remove old index files and re-run to prevent linking into tx directory for fname in [index_file, alt_index_file]: utils.remove_safe(fname) samtools = config_utils.get_program("samtools", config) num_cores = config["algorithm"].get("num_cores", 1) with file_transaction(config, index_file) as tx_index_file: cmd = "{samtools} index -@ {num_cores} {in_bam} {tx_index_file}" do.run(cmd.format(**locals()), "Index BAM file: %s" % os.path.basename(in_bam)) return index_file if utils.file_exists(index_file) else alt_index_file
[ "def", "index", "(", "in_bam", ",", "config", ",", "check_timestamp", "=", "True", ")", ":", "assert", "is_bam", "(", "in_bam", ")", ",", "\"%s in not a BAM file\"", "%", "in_bam", "index_file", "=", "\"%s.bai\"", "%", "in_bam", "alt_index_file", "=", "\"%s.ba...
Index a BAM file, skipping if index present. Centralizes BAM indexing providing ability to switch indexing approaches.
[ "Index", "a", "BAM", "file", "skipping", "if", "index", "present", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L77-L98
train
218,165
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
remove
def remove(in_bam): """ remove bam file and the index if exists """ if utils.file_exists(in_bam): utils.remove_safe(in_bam) if utils.file_exists(in_bam + ".bai"): utils.remove_safe(in_bam + ".bai")
python
def remove(in_bam): """ remove bam file and the index if exists """ if utils.file_exists(in_bam): utils.remove_safe(in_bam) if utils.file_exists(in_bam + ".bai"): utils.remove_safe(in_bam + ".bai")
[ "def", "remove", "(", "in_bam", ")", ":", "if", "utils", ".", "file_exists", "(", "in_bam", ")", ":", "utils", ".", "remove_safe", "(", "in_bam", ")", "if", "utils", ".", "file_exists", "(", "in_bam", "+", "\".bai\"", ")", ":", "utils", ".", "remove_sa...
remove bam file and the index if exists
[ "remove", "bam", "file", "and", "the", "index", "if", "exists" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L100-L107
train
218,166
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
idxstats
def idxstats(in_bam, data): """Return BAM index stats for the given file, using samtools idxstats. """ index(in_bam, data["config"], check_timestamp=False) AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"]) samtools = config_utils.get_program("samtools", data["config"]) idxstats_out = subprocess.check_output([samtools, "idxstats", in_bam]).decode() out = [] for line in idxstats_out.split("\n"): if line.strip(): contig, length, aligned, unaligned = line.split("\t") out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned))) return out
python
def idxstats(in_bam, data): """Return BAM index stats for the given file, using samtools idxstats. """ index(in_bam, data["config"], check_timestamp=False) AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"]) samtools = config_utils.get_program("samtools", data["config"]) idxstats_out = subprocess.check_output([samtools, "idxstats", in_bam]).decode() out = [] for line in idxstats_out.split("\n"): if line.strip(): contig, length, aligned, unaligned = line.split("\t") out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned))) return out
[ "def", "idxstats", "(", "in_bam", ",", "data", ")", ":", "index", "(", "in_bam", ",", "data", "[", "\"config\"", "]", ",", "check_timestamp", "=", "False", ")", "AlignInfo", "=", "collections", ".", "namedtuple", "(", "\"AlignInfo\"", ",", "[", "\"contig\"...
Return BAM index stats for the given file, using samtools idxstats.
[ "Return", "BAM", "index", "stats", "for", "the", "given", "file", "using", "samtools", "idxstats", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L109-L121
train
218,167
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
fai_from_bam
def fai_from_bam(ref_file, bam_file, out_file, data): """Create a fai index with only contigs in the input BAM file. """ contigs = set([x.contig for x in idxstats(bam_file, data)]) if not utils.file_uptodate(out_file, bam_file): with open(ref.fasta_idx(ref_file, data["config"])) as in_handle: with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for line in (l for l in in_handle if l.strip()): if line.split()[0] in contigs: out_handle.write(line) return out_file
python
def fai_from_bam(ref_file, bam_file, out_file, data): """Create a fai index with only contigs in the input BAM file. """ contigs = set([x.contig for x in idxstats(bam_file, data)]) if not utils.file_uptodate(out_file, bam_file): with open(ref.fasta_idx(ref_file, data["config"])) as in_handle: with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for line in (l for l in in_handle if l.strip()): if line.split()[0] in contigs: out_handle.write(line) return out_file
[ "def", "fai_from_bam", "(", "ref_file", ",", "bam_file", ",", "out_file", ",", "data", ")", ":", "contigs", "=", "set", "(", "[", "x", ".", "contig", "for", "x", "in", "idxstats", "(", "bam_file", ",", "data", ")", "]", ")", "if", "not", "utils", "...
Create a fai index with only contigs in the input BAM file.
[ "Create", "a", "fai", "index", "with", "only", "contigs", "in", "the", "input", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L123-L134
train
218,168
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
ref_file_from_bam
def ref_file_from_bam(bam_file, data): """Subset a fasta input file to only a fraction of input contigs. """ new_ref = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "inputs", "ref")), "%s-subset.fa" % dd.get_genome_build(data)) if not utils.file_exists(new_ref): with file_transaction(data, new_ref) as tx_out_file: contig_file = "%s-contigs.txt" % utils.splitext_plus(new_ref)[0] with open(contig_file, "w") as out_handle: for contig in [x.contig for x in idxstats(bam_file, data) if x.contig != "*"]: out_handle.write("%s\n" % contig) cmd = "seqtk subseq -l 100 %s %s > %s" % (dd.get_ref_file(data), contig_file, tx_out_file) do.run(cmd, "Subset %s to BAM file contigs" % dd.get_genome_build(data)) ref.fasta_idx(new_ref, data["config"]) runner = broad.runner_from_path("picard", data["config"]) runner.run_fn("picard_index_ref", new_ref) return {"base": new_ref}
python
def ref_file_from_bam(bam_file, data): """Subset a fasta input file to only a fraction of input contigs. """ new_ref = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "inputs", "ref")), "%s-subset.fa" % dd.get_genome_build(data)) if not utils.file_exists(new_ref): with file_transaction(data, new_ref) as tx_out_file: contig_file = "%s-contigs.txt" % utils.splitext_plus(new_ref)[0] with open(contig_file, "w") as out_handle: for contig in [x.contig for x in idxstats(bam_file, data) if x.contig != "*"]: out_handle.write("%s\n" % contig) cmd = "seqtk subseq -l 100 %s %s > %s" % (dd.get_ref_file(data), contig_file, tx_out_file) do.run(cmd, "Subset %s to BAM file contigs" % dd.get_genome_build(data)) ref.fasta_idx(new_ref, data["config"]) runner = broad.runner_from_path("picard", data["config"]) runner.run_fn("picard_index_ref", new_ref) return {"base": new_ref}
[ "def", "ref_file_from_bam", "(", "bam_file", ",", "data", ")", ":", "new_ref", "=", "os", ".", "path", ".", "join", "(", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", ")", ",", "\"inpu...
Subset a fasta input file to only a fraction of input contigs.
[ "Subset", "a", "fasta", "input", "file", "to", "only", "a", "fraction", "of", "input", "contigs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L136-L152
train
218,169
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
get_downsample_pct
def get_downsample_pct(in_bam, target_counts, data): """Retrieve percentage of file to downsample to get to target counts. Avoids minimal downsample which is not especially useful for improving QC times; 90& or more of reads. """ total = sum(x.aligned for x in idxstats(in_bam, data)) with pysam.Samfile(in_bam, "rb") as work_bam: n_rgs = max(1, len(work_bam.header.get("RG", []))) rg_target = n_rgs * target_counts if total > rg_target: pct = float(rg_target) / float(total) if pct < 0.9: return pct
python
def get_downsample_pct(in_bam, target_counts, data): """Retrieve percentage of file to downsample to get to target counts. Avoids minimal downsample which is not especially useful for improving QC times; 90& or more of reads. """ total = sum(x.aligned for x in idxstats(in_bam, data)) with pysam.Samfile(in_bam, "rb") as work_bam: n_rgs = max(1, len(work_bam.header.get("RG", []))) rg_target = n_rgs * target_counts if total > rg_target: pct = float(rg_target) / float(total) if pct < 0.9: return pct
[ "def", "get_downsample_pct", "(", "in_bam", ",", "target_counts", ",", "data", ")", ":", "total", "=", "sum", "(", "x", ".", "aligned", "for", "x", "in", "idxstats", "(", "in_bam", ",", "data", ")", ")", "with", "pysam", ".", "Samfile", "(", "in_bam", ...
Retrieve percentage of file to downsample to get to target counts. Avoids minimal downsample which is not especially useful for improving QC times; 90& or more of reads.
[ "Retrieve", "percentage", "of", "file", "to", "downsample", "to", "get", "to", "target", "counts", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L154-L167
train
218,170
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
downsample
def downsample(in_bam, data, target_counts, work_dir=None): """Downsample a BAM file to the specified number of target counts. """ index(in_bam, data["config"], check_timestamp=False) ds_pct = get_downsample_pct(in_bam, target_counts, data) if ds_pct: out_file = "%s-downsample%s" % os.path.splitext(in_bam) if work_dir: out_file = os.path.join(work_dir, os.path.basename(out_file)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: samtools = config_utils.get_program("samtools", data["config"]) num_cores = dd.get_num_cores(data) ds_pct = "42." + "{ds_pct:.3}".format(ds_pct=ds_pct).replace("0.", "") cmd = ("{samtools} view -O BAM -@ {num_cores} -o {tx_out_file} " "-s {ds_pct} {in_bam}") do.run(cmd.format(**locals()), "Downsample BAM file: %s" % os.path.basename(in_bam)) return out_file
python
def downsample(in_bam, data, target_counts, work_dir=None): """Downsample a BAM file to the specified number of target counts. """ index(in_bam, data["config"], check_timestamp=False) ds_pct = get_downsample_pct(in_bam, target_counts, data) if ds_pct: out_file = "%s-downsample%s" % os.path.splitext(in_bam) if work_dir: out_file = os.path.join(work_dir, os.path.basename(out_file)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: samtools = config_utils.get_program("samtools", data["config"]) num_cores = dd.get_num_cores(data) ds_pct = "42." + "{ds_pct:.3}".format(ds_pct=ds_pct).replace("0.", "") cmd = ("{samtools} view -O BAM -@ {num_cores} -o {tx_out_file} " "-s {ds_pct} {in_bam}") do.run(cmd.format(**locals()), "Downsample BAM file: %s" % os.path.basename(in_bam)) return out_file
[ "def", "downsample", "(", "in_bam", ",", "data", ",", "target_counts", ",", "work_dir", "=", "None", ")", ":", "index", "(", "in_bam", ",", "data", "[", "\"config\"", "]", ",", "check_timestamp", "=", "False", ")", "ds_pct", "=", "get_downsample_pct", "(",...
Downsample a BAM file to the specified number of target counts.
[ "Downsample", "a", "BAM", "file", "to", "the", "specified", "number", "of", "target", "counts", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L177-L194
train
218,171
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
get_maxcov_downsample_cl
def get_maxcov_downsample_cl(data, in_pipe=None): """Retrieve command line for max coverage downsampling, fitting into bamsormadup output. """ max_cov = _get_maxcov_downsample(data) if dd.get_aligner(data) not in ["snap"] else None if max_cov: if in_pipe == "bamsormadup": prefix = "level=0" elif in_pipe == "samtools": prefix = "-l 0" else: prefix = "" # Swap over to multiple cores until after testing #core_arg = "-t %s" % dd.get_num_cores(data) core_arg = "" return ("%s | variant - -b %s --mark-as-qc-fail --max-coverage %s" % (prefix, core_arg, max_cov)) else: if in_pipe == "bamsormadup": prefix = "indexfilename={tx_out_file}.bai" else: prefix = "" return prefix
python
def get_maxcov_downsample_cl(data, in_pipe=None): """Retrieve command line for max coverage downsampling, fitting into bamsormadup output. """ max_cov = _get_maxcov_downsample(data) if dd.get_aligner(data) not in ["snap"] else None if max_cov: if in_pipe == "bamsormadup": prefix = "level=0" elif in_pipe == "samtools": prefix = "-l 0" else: prefix = "" # Swap over to multiple cores until after testing #core_arg = "-t %s" % dd.get_num_cores(data) core_arg = "" return ("%s | variant - -b %s --mark-as-qc-fail --max-coverage %s" % (prefix, core_arg, max_cov)) else: if in_pipe == "bamsormadup": prefix = "indexfilename={tx_out_file}.bai" else: prefix = "" return prefix
[ "def", "get_maxcov_downsample_cl", "(", "data", ",", "in_pipe", "=", "None", ")", ":", "max_cov", "=", "_get_maxcov_downsample", "(", "data", ")", "if", "dd", ".", "get_aligner", "(", "data", ")", "not", "in", "[", "\"snap\"", "]", "else", "None", "if", ...
Retrieve command line for max coverage downsampling, fitting into bamsormadup output.
[ "Retrieve", "command", "line", "for", "max", "coverage", "downsampling", "fitting", "into", "bamsormadup", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L196-L217
train
218,172
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
_get_maxcov_downsample
def _get_maxcov_downsample(data): """Calculate maximum coverage downsampling for whole genome samples. Returns None if we're not doing downsampling. """ from bcbio.bam import ref from bcbio.ngsalign import alignprep, bwa from bcbio.variation import coverage fastq_file = data["files"][0] params = alignprep.get_downsample_params(data) if params: num_reads = alignprep.total_reads_from_grabix(fastq_file) if num_reads: vrs = dd.get_variant_regions_merged(data) total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]) if vrs: callable_size = pybedtools.BedTool(vrs).total_coverage() genome_cov_pct = callable_size / float(total_size) else: callable_size = total_size genome_cov_pct = 1.0 if (genome_cov_pct > coverage.GENOME_COV_THRESH and dd.get_coverage_interval(data) in ["genome", None, False]): total_counts, total_sizes = 0, 0 for count, size in bwa.fastq_size_output(fastq_file, 5000): total_counts += int(count) total_sizes += (int(size) * int(count)) read_size = float(total_sizes) / float(total_counts) avg_cov = float(num_reads * read_size) / callable_size if avg_cov >= params["min_coverage_for_downsampling"]: return int(avg_cov * params["maxcov_downsample_multiplier"]) return None
python
def _get_maxcov_downsample(data): """Calculate maximum coverage downsampling for whole genome samples. Returns None if we're not doing downsampling. """ from bcbio.bam import ref from bcbio.ngsalign import alignprep, bwa from bcbio.variation import coverage fastq_file = data["files"][0] params = alignprep.get_downsample_params(data) if params: num_reads = alignprep.total_reads_from_grabix(fastq_file) if num_reads: vrs = dd.get_variant_regions_merged(data) total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]) if vrs: callable_size = pybedtools.BedTool(vrs).total_coverage() genome_cov_pct = callable_size / float(total_size) else: callable_size = total_size genome_cov_pct = 1.0 if (genome_cov_pct > coverage.GENOME_COV_THRESH and dd.get_coverage_interval(data) in ["genome", None, False]): total_counts, total_sizes = 0, 0 for count, size in bwa.fastq_size_output(fastq_file, 5000): total_counts += int(count) total_sizes += (int(size) * int(count)) read_size = float(total_sizes) / float(total_counts) avg_cov = float(num_reads * read_size) / callable_size if avg_cov >= params["min_coverage_for_downsampling"]: return int(avg_cov * params["maxcov_downsample_multiplier"]) return None
[ "def", "_get_maxcov_downsample", "(", "data", ")", ":", "from", "bcbio", ".", "bam", "import", "ref", "from", "bcbio", ".", "ngsalign", "import", "alignprep", ",", "bwa", "from", "bcbio", ".", "variation", "import", "coverage", "fastq_file", "=", "data", "["...
Calculate maximum coverage downsampling for whole genome samples. Returns None if we're not doing downsampling.
[ "Calculate", "maximum", "coverage", "downsampling", "for", "whole", "genome", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L219-L250
train
218,173
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
check_header
def check_header(in_bam, rgnames, ref_file, config): """Ensure passed in BAM header matches reference file and read groups names. """ _check_bam_contigs(in_bam, ref_file, config) _check_sample(in_bam, rgnames)
python
def check_header(in_bam, rgnames, ref_file, config): """Ensure passed in BAM header matches reference file and read groups names. """ _check_bam_contigs(in_bam, ref_file, config) _check_sample(in_bam, rgnames)
[ "def", "check_header", "(", "in_bam", ",", "rgnames", ",", "ref_file", ",", "config", ")", ":", "_check_bam_contigs", "(", "in_bam", ",", "ref_file", ",", "config", ")", "_check_sample", "(", "in_bam", ",", "rgnames", ")" ]
Ensure passed in BAM header matches reference file and read groups names.
[ "Ensure", "passed", "in", "BAM", "header", "matches", "reference", "file", "and", "read", "groups", "names", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L253-L257
train
218,174
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
_check_sample
def _check_sample(in_bam, rgnames): """Ensure input sample name matches expected run group names. """ with pysam.Samfile(in_bam, "rb") as bamfile: rg = bamfile.header.get("RG", [{}]) msgs = [] warnings = [] if len(rg) > 1: warnings.append("Multiple read groups found in input BAM. Expect single RG per BAM.") if len(rg) == 0: msgs.append("No read groups found in input BAM. Expect single RG per BAM.") if len(rg) > 0 and any(x.get("SM") != rgnames["sample"] for x in rg): msgs.append("Read group sample name (SM) does not match configuration `description`: %s vs %s" % (rg[0].get("SM"), rgnames["sample"])) if len(msgs) > 0: raise ValueError("Problems with pre-aligned input BAM file: %s\n" % (in_bam) + "\n".join(msgs) + "\nSetting `bam_clean: fixrg`\n" "in the configuration can often fix this issue.") if warnings: print("*** Potential problems in input BAM compared to reference:\n%s\n" % "\n".join(warnings))
python
def _check_sample(in_bam, rgnames): """Ensure input sample name matches expected run group names. """ with pysam.Samfile(in_bam, "rb") as bamfile: rg = bamfile.header.get("RG", [{}]) msgs = [] warnings = [] if len(rg) > 1: warnings.append("Multiple read groups found in input BAM. Expect single RG per BAM.") if len(rg) == 0: msgs.append("No read groups found in input BAM. Expect single RG per BAM.") if len(rg) > 0 and any(x.get("SM") != rgnames["sample"] for x in rg): msgs.append("Read group sample name (SM) does not match configuration `description`: %s vs %s" % (rg[0].get("SM"), rgnames["sample"])) if len(msgs) > 0: raise ValueError("Problems with pre-aligned input BAM file: %s\n" % (in_bam) + "\n".join(msgs) + "\nSetting `bam_clean: fixrg`\n" "in the configuration can often fix this issue.") if warnings: print("*** Potential problems in input BAM compared to reference:\n%s\n" % "\n".join(warnings))
[ "def", "_check_sample", "(", "in_bam", ",", "rgnames", ")", ":", "with", "pysam", ".", "Samfile", "(", "in_bam", ",", "\"rb\"", ")", "as", "bamfile", ":", "rg", "=", "bamfile", ".", "header", ".", "get", "(", "\"RG\"", ",", "[", "{", "}", "]", ")",...
Ensure input sample name matches expected run group names.
[ "Ensure", "input", "sample", "name", "matches", "expected", "run", "group", "names", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L259-L280
train
218,175
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
_check_bam_contigs
def _check_bam_contigs(in_bam, ref_file, config): """Ensure a pre-aligned BAM file matches the expected reference genome. """ # GATK allows chromosome M to be in multiple locations, skip checking it allowed_outoforder = ["chrM", "MT"] ref_contigs = [c.name for c in ref.file_contigs(ref_file, config)] with pysam.Samfile(in_bam, "rb") as bamfile: bam_contigs = [c["SN"] for c in bamfile.header["SQ"]] extra_bcs = [x for x in bam_contigs if x not in ref_contigs] extra_rcs = [x for x in ref_contigs if x not in bam_contigs] problems = [] warnings = [] for bc, rc in zip_longest([x for x in bam_contigs if (x not in extra_bcs and x not in allowed_outoforder)], [x for x in ref_contigs if (x not in extra_rcs and x not in allowed_outoforder)]): if bc != rc: if bc and rc: problems.append("Reference mismatch. BAM: %s Reference: %s" % (bc, rc)) elif bc: warnings.append("Extra BAM chromosomes: %s" % bc) elif rc: warnings.append("Extra reference chromosomes: %s" % rc) for bc in extra_bcs: warnings.append("Extra BAM chromosomes: %s" % bc) for rc in extra_rcs: warnings.append("Extra reference chromosomes: %s" % rc) if problems: raise ValueError("Unexpected order, name or contig mismatches between input BAM and reference file:\n%s\n" "Setting `bam_clean: remove_extracontigs` in the configuration can often fix this issue." % "\n".join(problems)) if warnings: print("*** Potential problems in input BAM compared to reference:\n%s\n" % "\n".join(warnings))
python
def _check_bam_contigs(in_bam, ref_file, config): """Ensure a pre-aligned BAM file matches the expected reference genome. """ # GATK allows chromosome M to be in multiple locations, skip checking it allowed_outoforder = ["chrM", "MT"] ref_contigs = [c.name for c in ref.file_contigs(ref_file, config)] with pysam.Samfile(in_bam, "rb") as bamfile: bam_contigs = [c["SN"] for c in bamfile.header["SQ"]] extra_bcs = [x for x in bam_contigs if x not in ref_contigs] extra_rcs = [x for x in ref_contigs if x not in bam_contigs] problems = [] warnings = [] for bc, rc in zip_longest([x for x in bam_contigs if (x not in extra_bcs and x not in allowed_outoforder)], [x for x in ref_contigs if (x not in extra_rcs and x not in allowed_outoforder)]): if bc != rc: if bc and rc: problems.append("Reference mismatch. BAM: %s Reference: %s" % (bc, rc)) elif bc: warnings.append("Extra BAM chromosomes: %s" % bc) elif rc: warnings.append("Extra reference chromosomes: %s" % rc) for bc in extra_bcs: warnings.append("Extra BAM chromosomes: %s" % bc) for rc in extra_rcs: warnings.append("Extra reference chromosomes: %s" % rc) if problems: raise ValueError("Unexpected order, name or contig mismatches between input BAM and reference file:\n%s\n" "Setting `bam_clean: remove_extracontigs` in the configuration can often fix this issue." % "\n".join(problems)) if warnings: print("*** Potential problems in input BAM compared to reference:\n%s\n" % "\n".join(warnings))
[ "def", "_check_bam_contigs", "(", "in_bam", ",", "ref_file", ",", "config", ")", ":", "# GATK allows chromosome M to be in multiple locations, skip checking it", "allowed_outoforder", "=", "[", "\"chrM\"", ",", "\"MT\"", "]", "ref_contigs", "=", "[", "c", ".", "name", ...
Ensure a pre-aligned BAM file matches the expected reference genome.
[ "Ensure", "a", "pre", "-", "aligned", "BAM", "file", "matches", "the", "expected", "reference", "genome", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L282-L315
train
218,176
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
sort
def sort(in_bam, config, order="coordinate", out_dir=None): """Sort a BAM file, skipping if already present. """ assert is_bam(in_bam), "%s in not a BAM file" % in_bam if bam_already_sorted(in_bam, config, order): return in_bam sort_stem = _get_sort_stem(in_bam, order, out_dir) sort_file = sort_stem + ".bam" if not utils.file_exists(sort_file): samtools = config_utils.get_program("samtools", config) cores = config["algorithm"].get("num_cores", 1) with file_transaction(config, sort_file) as tx_sort_file: tx_sort_stem = os.path.splitext(tx_sort_file)[0] tx_dir = utils.safe_makedir(os.path.dirname(tx_sort_file)) order_flag = "-n" if order == "queryname" else "" resources = config_utils.get_resources("samtools", config) # Slightly decrease memory and allow more accurate representation # in Mb to ensure fits within systems like SLURM mem = config_utils.adjust_memory(resources.get("memory", "2G"), 1.25, "decrease", out_modifier="M").upper() cmd = ("{samtools} sort -@ {cores} -m {mem} -O BAM {order_flag} " "-T {tx_sort_stem}-sort -o {tx_sort_file} {in_bam}") do.run(cmd.format(**locals()), "Sort BAM file %s: %s to %s" % (order, os.path.basename(in_bam), os.path.basename(sort_file))) return sort_file
python
def sort(in_bam, config, order="coordinate", out_dir=None): """Sort a BAM file, skipping if already present. """ assert is_bam(in_bam), "%s in not a BAM file" % in_bam if bam_already_sorted(in_bam, config, order): return in_bam sort_stem = _get_sort_stem(in_bam, order, out_dir) sort_file = sort_stem + ".bam" if not utils.file_exists(sort_file): samtools = config_utils.get_program("samtools", config) cores = config["algorithm"].get("num_cores", 1) with file_transaction(config, sort_file) as tx_sort_file: tx_sort_stem = os.path.splitext(tx_sort_file)[0] tx_dir = utils.safe_makedir(os.path.dirname(tx_sort_file)) order_flag = "-n" if order == "queryname" else "" resources = config_utils.get_resources("samtools", config) # Slightly decrease memory and allow more accurate representation # in Mb to ensure fits within systems like SLURM mem = config_utils.adjust_memory(resources.get("memory", "2G"), 1.25, "decrease", out_modifier="M").upper() cmd = ("{samtools} sort -@ {cores} -m {mem} -O BAM {order_flag} " "-T {tx_sort_stem}-sort -o {tx_sort_file} {in_bam}") do.run(cmd.format(**locals()), "Sort BAM file %s: %s to %s" % (order, os.path.basename(in_bam), os.path.basename(sort_file))) return sort_file
[ "def", "sort", "(", "in_bam", ",", "config", ",", "order", "=", "\"coordinate\"", ",", "out_dir", "=", "None", ")", ":", "assert", "is_bam", "(", "in_bam", ")", ",", "\"%s in not a BAM file\"", "%", "in_bam", "if", "bam_already_sorted", "(", "in_bam", ",", ...
Sort a BAM file, skipping if already present.
[ "Sort", "a", "BAM", "file", "skipping", "if", "already", "present", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L406-L431
train
218,177
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
aligner_from_header
def aligner_from_header(in_bam): """Identify aligner from the BAM header; handling pre-aligned inputs. """ from bcbio.pipeline.alignment import TOOLS with pysam.Samfile(in_bam, "rb") as bamfile: for pg in bamfile.header.get("PG", []): for ka in TOOLS.keys(): if pg.get("PN", "").lower().find(ka) >= 0: return ka
python
def aligner_from_header(in_bam): """Identify aligner from the BAM header; handling pre-aligned inputs. """ from bcbio.pipeline.alignment import TOOLS with pysam.Samfile(in_bam, "rb") as bamfile: for pg in bamfile.header.get("PG", []): for ka in TOOLS.keys(): if pg.get("PN", "").lower().find(ka) >= 0: return ka
[ "def", "aligner_from_header", "(", "in_bam", ")", ":", "from", "bcbio", ".", "pipeline", ".", "alignment", "import", "TOOLS", "with", "pysam", ".", "Samfile", "(", "in_bam", ",", "\"rb\"", ")", "as", "bamfile", ":", "for", "pg", "in", "bamfile", ".", "he...
Identify aligner from the BAM header; handling pre-aligned inputs.
[ "Identify", "aligner", "from", "the", "BAM", "header", ";", "handling", "pre", "-", "aligned", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L455-L463
train
218,178
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
sample_name
def sample_name(in_bam): """Get sample name from BAM file. """ with pysam.AlignmentFile(in_bam, "rb", check_sq=False) as in_pysam: try: if "RG" in in_pysam.header: return in_pysam.header["RG"][0]["SM"] except ValueError: return None
python
def sample_name(in_bam): """Get sample name from BAM file. """ with pysam.AlignmentFile(in_bam, "rb", check_sq=False) as in_pysam: try: if "RG" in in_pysam.header: return in_pysam.header["RG"][0]["SM"] except ValueError: return None
[ "def", "sample_name", "(", "in_bam", ")", ":", "with", "pysam", ".", "AlignmentFile", "(", "in_bam", ",", "\"rb\"", ",", "check_sq", "=", "False", ")", "as", "in_pysam", ":", "try", ":", "if", "\"RG\"", "in", "in_pysam", ".", "header", ":", "return", "...
Get sample name from BAM file.
[ "Get", "sample", "name", "from", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L465-L473
train
218,179
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
filter_primary
def filter_primary(bam_file, data): """Filter reads to primary only BAM. Removes: - not primary alignment (0x100) 256 - supplementary alignment (0x800) 2048 """ stem, ext = os.path.splitext(bam_file) out_file = stem + ".primary" + ext if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cores = dd.get_num_cores(data) cmd = ("samtools view -@ {cores} -F 2304 -b {bam_file} > {tx_out_file}") do.run(cmd.format(**locals()), ("Filtering primary alignments in %s." % os.path.basename(bam_file))) return out_file
python
def filter_primary(bam_file, data): """Filter reads to primary only BAM. Removes: - not primary alignment (0x100) 256 - supplementary alignment (0x800) 2048 """ stem, ext = os.path.splitext(bam_file) out_file = stem + ".primary" + ext if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cores = dd.get_num_cores(data) cmd = ("samtools view -@ {cores} -F 2304 -b {bam_file} > {tx_out_file}") do.run(cmd.format(**locals()), ("Filtering primary alignments in %s." % os.path.basename(bam_file))) return out_file
[ "def", "filter_primary", "(", "bam_file", ",", "data", ")", ":", "stem", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "bam_file", ")", "out_file", "=", "stem", "+", "\".primary\"", "+", "ext", "if", "not", "utils", ".", "file_exists", "(",...
Filter reads to primary only BAM. Removes: - not primary alignment (0x100) 256 - supplementary alignment (0x800) 2048
[ "Filter", "reads", "to", "primary", "only", "BAM", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L496-L511
train
218,180
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
estimate_max_mapq
def estimate_max_mapq(in_bam, nreads=1e6): """Guess maximum MAPQ in a BAM file of reads with alignments """ with pysam.Samfile(in_bam, "rb") as work_bam: reads = tz.take(int(nreads), work_bam) return max([x.mapq for x in reads if not x.is_unmapped])
python
def estimate_max_mapq(in_bam, nreads=1e6): """Guess maximum MAPQ in a BAM file of reads with alignments """ with pysam.Samfile(in_bam, "rb") as work_bam: reads = tz.take(int(nreads), work_bam) return max([x.mapq for x in reads if not x.is_unmapped])
[ "def", "estimate_max_mapq", "(", "in_bam", ",", "nreads", "=", "1e6", ")", ":", "with", "pysam", ".", "Samfile", "(", "in_bam", ",", "\"rb\"", ")", "as", "work_bam", ":", "reads", "=", "tz", ".", "take", "(", "int", "(", "nreads", ")", ",", "work_bam...
Guess maximum MAPQ in a BAM file of reads with alignments
[ "Guess", "maximum", "MAPQ", "in", "a", "BAM", "file", "of", "reads", "with", "alignments" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L513-L518
train
218,181
bcbio/bcbio-nextgen
bcbio/bam/__init__.py
convert_cufflinks_mapq
def convert_cufflinks_mapq(in_bam, out_bam=None): """Cufflinks expects the not-valid 255 MAPQ for uniquely mapped reads. This detects the maximum mapping quality in a BAM file and sets reads with that quality to be 255 """ CUFFLINKSMAPQ = 255 if not out_bam: out_bam = os.path.splitext(in_bam)[0] + "-cufflinks.bam" if utils.file_exists(out_bam): return out_bam maxmapq = estimate_max_mapq(in_bam) if maxmapq == CUFFLINKSMAPQ: return in_bam logger.info("Converting MAPQ scores in %s to be Cufflinks compatible." % in_bam) with pysam.Samfile(in_bam, "rb") as in_bam_fh: with pysam.Samfile(out_bam, "wb", template=in_bam_fh) as out_bam_fh: for read in in_bam_fh: if read.mapq == maxmapq and not read.is_unmapped: read.mapq = CUFFLINKSMAPQ out_bam_fh.write(read) return out_bam
python
def convert_cufflinks_mapq(in_bam, out_bam=None): """Cufflinks expects the not-valid 255 MAPQ for uniquely mapped reads. This detects the maximum mapping quality in a BAM file and sets reads with that quality to be 255 """ CUFFLINKSMAPQ = 255 if not out_bam: out_bam = os.path.splitext(in_bam)[0] + "-cufflinks.bam" if utils.file_exists(out_bam): return out_bam maxmapq = estimate_max_mapq(in_bam) if maxmapq == CUFFLINKSMAPQ: return in_bam logger.info("Converting MAPQ scores in %s to be Cufflinks compatible." % in_bam) with pysam.Samfile(in_bam, "rb") as in_bam_fh: with pysam.Samfile(out_bam, "wb", template=in_bam_fh) as out_bam_fh: for read in in_bam_fh: if read.mapq == maxmapq and not read.is_unmapped: read.mapq = CUFFLINKSMAPQ out_bam_fh.write(read) return out_bam
[ "def", "convert_cufflinks_mapq", "(", "in_bam", ",", "out_bam", "=", "None", ")", ":", "CUFFLINKSMAPQ", "=", "255", "if", "not", "out_bam", ":", "out_bam", "=", "os", ".", "path", ".", "splitext", "(", "in_bam", ")", "[", "0", "]", "+", "\"-cufflinks.bam...
Cufflinks expects the not-valid 255 MAPQ for uniquely mapped reads. This detects the maximum mapping quality in a BAM file and sets reads with that quality to be 255
[ "Cufflinks", "expects", "the", "not", "-", "valid", "255", "MAPQ", "for", "uniquely", "mapped", "reads", ".", "This", "detects", "the", "maximum", "mapping", "quality", "in", "a", "BAM", "file", "and", "sets", "reads", "with", "that", "quality", "to", "be"...
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L520-L540
train
218,182
bcbio/bcbio-nextgen
bcbio/variation/annotation.py
get_gatk_annotations
def get_gatk_annotations(config, include_depth=True, include_baseqranksum=True, gatk_input=True): """Retrieve annotations to use for GATK VariantAnnotator. If include_depth is false, we'll skip annotating DP. Since GATK downsamples this will undercount on high depth sequencing and the standard outputs from the original callers may be preferable. BaseQRankSum can cause issues with some MuTect2 and other runs, so we provide option to skip it. """ broad_runner = broad.runner_from_config(config) anns = ["MappingQualityRankSumTest", "MappingQualityZero", "QualByDepth", "ReadPosRankSumTest", "RMSMappingQuality"] if include_baseqranksum: anns += ["BaseQualityRankSumTest"] # Some annotations not working correctly with external datasets and GATK 3 if gatk_input or broad_runner.gatk_type() == "gatk4": anns += ["FisherStrand"] if broad_runner.gatk_type() == "gatk4": anns += ["MappingQuality"] else: anns += ["GCContent", "HaplotypeScore", "HomopolymerRun"] if include_depth: anns += ["DepthPerAlleleBySample"] if broad_runner.gatk_type() in ["restricted", "gatk4"]: anns += ["Coverage"] else: anns += ["DepthOfCoverage"] return anns
python
def get_gatk_annotations(config, include_depth=True, include_baseqranksum=True, gatk_input=True): """Retrieve annotations to use for GATK VariantAnnotator. If include_depth is false, we'll skip annotating DP. Since GATK downsamples this will undercount on high depth sequencing and the standard outputs from the original callers may be preferable. BaseQRankSum can cause issues with some MuTect2 and other runs, so we provide option to skip it. """ broad_runner = broad.runner_from_config(config) anns = ["MappingQualityRankSumTest", "MappingQualityZero", "QualByDepth", "ReadPosRankSumTest", "RMSMappingQuality"] if include_baseqranksum: anns += ["BaseQualityRankSumTest"] # Some annotations not working correctly with external datasets and GATK 3 if gatk_input or broad_runner.gatk_type() == "gatk4": anns += ["FisherStrand"] if broad_runner.gatk_type() == "gatk4": anns += ["MappingQuality"] else: anns += ["GCContent", "HaplotypeScore", "HomopolymerRun"] if include_depth: anns += ["DepthPerAlleleBySample"] if broad_runner.gatk_type() in ["restricted", "gatk4"]: anns += ["Coverage"] else: anns += ["DepthOfCoverage"] return anns
[ "def", "get_gatk_annotations", "(", "config", ",", "include_depth", "=", "True", ",", "include_baseqranksum", "=", "True", ",", "gatk_input", "=", "True", ")", ":", "broad_runner", "=", "broad", ".", "runner_from_config", "(", "config", ")", "anns", "=", "[", ...
Retrieve annotations to use for GATK VariantAnnotator. If include_depth is false, we'll skip annotating DP. Since GATK downsamples this will undercount on high depth sequencing and the standard outputs from the original callers may be preferable. BaseQRankSum can cause issues with some MuTect2 and other runs, so we provide option to skip it.
[ "Retrieve", "annotations", "to", "use", "for", "GATK", "VariantAnnotator", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/annotation.py#L17-L46
train
218,183
bcbio/bcbio-nextgen
bcbio/variation/annotation.py
finalize_vcf
def finalize_vcf(in_file, variantcaller, items): """Perform cleanup and dbSNP annotation of the final VCF. - Adds contigs to header for bcftools compatibility - adds sample information for tumor/normal """ out_file = "%s-annotated%s" % utils.splitext_plus(in_file) if not utils.file_uptodate(out_file, in_file): header_cl = _add_vcf_header_sample_cl(in_file, items, out_file) contig_cl = _add_contig_cl(in_file, items, out_file) cls = [x for x in (contig_cl, header_cl) if x] if cls: post_cl = " | ".join(cls) + " | " else: post_cl = None dbsnp_file = tz.get_in(("genome_resources", "variation", "dbsnp"), items[0]) if dbsnp_file: out_file = _add_dbsnp(in_file, dbsnp_file, items[0], out_file, post_cl) if utils.file_exists(out_file): return vcfutils.bgzip_and_index(out_file, items[0]["config"]) else: return in_file
python
def finalize_vcf(in_file, variantcaller, items): """Perform cleanup and dbSNP annotation of the final VCF. - Adds contigs to header for bcftools compatibility - adds sample information for tumor/normal """ out_file = "%s-annotated%s" % utils.splitext_plus(in_file) if not utils.file_uptodate(out_file, in_file): header_cl = _add_vcf_header_sample_cl(in_file, items, out_file) contig_cl = _add_contig_cl(in_file, items, out_file) cls = [x for x in (contig_cl, header_cl) if x] if cls: post_cl = " | ".join(cls) + " | " else: post_cl = None dbsnp_file = tz.get_in(("genome_resources", "variation", "dbsnp"), items[0]) if dbsnp_file: out_file = _add_dbsnp(in_file, dbsnp_file, items[0], out_file, post_cl) if utils.file_exists(out_file): return vcfutils.bgzip_and_index(out_file, items[0]["config"]) else: return in_file
[ "def", "finalize_vcf", "(", "in_file", ",", "variantcaller", ",", "items", ")", ":", "out_file", "=", "\"%s-annotated%s\"", "%", "utils", ".", "splitext_plus", "(", "in_file", ")", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",", "in_file", ...
Perform cleanup and dbSNP annotation of the final VCF. - Adds contigs to header for bcftools compatibility - adds sample information for tumor/normal
[ "Perform", "cleanup", "and", "dbSNP", "annotation", "of", "the", "final", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/annotation.py#L48-L69
train
218,184
bcbio/bcbio-nextgen
bcbio/variation/annotation.py
_add_vcf_header_sample_cl
def _add_vcf_header_sample_cl(in_file, items, base_file): """Add phenotype information to a VCF header. Encode tumor/normal relationships in VCF header. Could also eventually handle more complicated pedigree information if useful. """ paired = vcfutils.get_paired(items) if paired: toadd = ["##SAMPLE=<ID=%s,Genomes=Tumor>" % paired.tumor_name] if paired.normal_name: toadd.append("##SAMPLE=<ID=%s,Genomes=Germline>" % paired.normal_name) toadd.append("##PEDIGREE=<Derived=%s,Original=%s>" % (paired.tumor_name, paired.normal_name)) new_header = _update_header(in_file, base_file, toadd, _fix_generic_tn_names(paired)) if vcfutils.vcf_has_variants(in_file): cmd = "bcftools reheader -h {new_header} | bcftools view " return cmd.format(**locals())
python
def _add_vcf_header_sample_cl(in_file, items, base_file): """Add phenotype information to a VCF header. Encode tumor/normal relationships in VCF header. Could also eventually handle more complicated pedigree information if useful. """ paired = vcfutils.get_paired(items) if paired: toadd = ["##SAMPLE=<ID=%s,Genomes=Tumor>" % paired.tumor_name] if paired.normal_name: toadd.append("##SAMPLE=<ID=%s,Genomes=Germline>" % paired.normal_name) toadd.append("##PEDIGREE=<Derived=%s,Original=%s>" % (paired.tumor_name, paired.normal_name)) new_header = _update_header(in_file, base_file, toadd, _fix_generic_tn_names(paired)) if vcfutils.vcf_has_variants(in_file): cmd = "bcftools reheader -h {new_header} | bcftools view " return cmd.format(**locals())
[ "def", "_add_vcf_header_sample_cl", "(", "in_file", ",", "items", ",", "base_file", ")", ":", "paired", "=", "vcfutils", ".", "get_paired", "(", "items", ")", "if", "paired", ":", "toadd", "=", "[", "\"##SAMPLE=<ID=%s,Genomes=Tumor>\"", "%", "paired", ".", "tu...
Add phenotype information to a VCF header. Encode tumor/normal relationships in VCF header. Could also eventually handle more complicated pedigree information if useful.
[ "Add", "phenotype", "information", "to", "a", "VCF", "header", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/annotation.py#L98-L113
train
218,185
bcbio/bcbio-nextgen
bcbio/variation/annotation.py
_update_header
def _update_header(orig_vcf, base_file, new_lines, chrom_process_fn=None): """Fix header with additional lines and remapping of generic sample names. """ new_header = "%s-sample_header.txt" % utils.splitext_plus(base_file)[0] with open(new_header, "w") as out_handle: chrom_line = None with utils.open_gzipsafe(orig_vcf) as in_handle: for line in in_handle: if line.startswith("##"): out_handle.write(line) else: chrom_line = line break assert chrom_line is not None for line in new_lines: out_handle.write(line + "\n") if chrom_process_fn: chrom_line = chrom_process_fn(chrom_line) out_handle.write(chrom_line) return new_header
python
def _update_header(orig_vcf, base_file, new_lines, chrom_process_fn=None): """Fix header with additional lines and remapping of generic sample names. """ new_header = "%s-sample_header.txt" % utils.splitext_plus(base_file)[0] with open(new_header, "w") as out_handle: chrom_line = None with utils.open_gzipsafe(orig_vcf) as in_handle: for line in in_handle: if line.startswith("##"): out_handle.write(line) else: chrom_line = line break assert chrom_line is not None for line in new_lines: out_handle.write(line + "\n") if chrom_process_fn: chrom_line = chrom_process_fn(chrom_line) out_handle.write(chrom_line) return new_header
[ "def", "_update_header", "(", "orig_vcf", ",", "base_file", ",", "new_lines", ",", "chrom_process_fn", "=", "None", ")", ":", "new_header", "=", "\"%s-sample_header.txt\"", "%", "utils", ".", "splitext_plus", "(", "base_file", ")", "[", "0", "]", "with", "open...
Fix header with additional lines and remapping of generic sample names.
[ "Fix", "header", "with", "additional", "lines", "and", "remapping", "of", "generic", "sample", "names", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/annotation.py#L115-L134
train
218,186
bcbio/bcbio-nextgen
bcbio/variation/annotation.py
_add_dbsnp
def _add_dbsnp(orig_file, dbsnp_file, data, out_file=None, post_cl=None): """Annotate a VCF file with dbSNP. vcfanno has flexible matching for NON_REF gVCF positions, matching at position and REF allele, matching ALT NON_REF as a wildcard. """ orig_file = vcfutils.bgzip_and_index(orig_file, data["config"]) if out_file is None: out_file = "%s-wdbsnp.vcf.gz" % utils.splitext_plus(orig_file)[0] if not utils.file_uptodate(out_file, orig_file): with file_transaction(data, out_file) as tx_out_file: conf_file = os.path.join(os.path.dirname(out_file), "dbsnp.conf") with open(conf_file, "w") as out_handle: out_handle.write(_DBSNP_TEMPLATE % os.path.normpath(os.path.join(dd.get_work_dir(data), dbsnp_file))) if not post_cl: post_cl = "" cores = dd.get_num_cores(data) cmd = ("vcfanno -p {cores} {conf_file} {orig_file} | {post_cl} " "bgzip -c > {tx_out_file}") do.run(cmd.format(**locals()), "Annotate with dbSNP") return vcfutils.bgzip_and_index(out_file, data["config"])
python
def _add_dbsnp(orig_file, dbsnp_file, data, out_file=None, post_cl=None): """Annotate a VCF file with dbSNP. vcfanno has flexible matching for NON_REF gVCF positions, matching at position and REF allele, matching ALT NON_REF as a wildcard. """ orig_file = vcfutils.bgzip_and_index(orig_file, data["config"]) if out_file is None: out_file = "%s-wdbsnp.vcf.gz" % utils.splitext_plus(orig_file)[0] if not utils.file_uptodate(out_file, orig_file): with file_transaction(data, out_file) as tx_out_file: conf_file = os.path.join(os.path.dirname(out_file), "dbsnp.conf") with open(conf_file, "w") as out_handle: out_handle.write(_DBSNP_TEMPLATE % os.path.normpath(os.path.join(dd.get_work_dir(data), dbsnp_file))) if not post_cl: post_cl = "" cores = dd.get_num_cores(data) cmd = ("vcfanno -p {cores} {conf_file} {orig_file} | {post_cl} " "bgzip -c > {tx_out_file}") do.run(cmd.format(**locals()), "Annotate with dbSNP") return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "_add_dbsnp", "(", "orig_file", ",", "dbsnp_file", ",", "data", ",", "out_file", "=", "None", ",", "post_cl", "=", "None", ")", ":", "orig_file", "=", "vcfutils", ".", "bgzip_and_index", "(", "orig_file", ",", "data", "[", "\"config\"", "]", ")", ...
Annotate a VCF file with dbSNP. vcfanno has flexible matching for NON_REF gVCF positions, matching at position and REF allele, matching ALT NON_REF as a wildcard.
[ "Annotate", "a", "VCF", "file", "with", "dbSNP", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/annotation.py#L154-L173
train
218,187
bcbio/bcbio-nextgen
bcbio/variation/annotation.py
get_context_files
def get_context_files(data): """Retrieve pre-installed annotation files for annotating genome context. """ ref_file = dd.get_ref_file(data) all_files = [] for ext in [".bed.gz"]: all_files += sorted(glob.glob(os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir, "coverage", "problem_regions", "*", "*%s" % ext)))) return sorted(all_files)
python
def get_context_files(data): """Retrieve pre-installed annotation files for annotating genome context. """ ref_file = dd.get_ref_file(data) all_files = [] for ext in [".bed.gz"]: all_files += sorted(glob.glob(os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir, "coverage", "problem_regions", "*", "*%s" % ext)))) return sorted(all_files)
[ "def", "get_context_files", "(", "data", ")", ":", "ref_file", "=", "dd", ".", "get_ref_file", "(", "data", ")", "all_files", "=", "[", "]", "for", "ext", "in", "[", "\".bed.gz\"", "]", ":", "all_files", "+=", "sorted", "(", "glob", ".", "glob", "(", ...
Retrieve pre-installed annotation files for annotating genome context.
[ "Retrieve", "pre", "-", "installed", "annotation", "files", "for", "annotating", "genome", "context", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/annotation.py#L175-L184
train
218,188
bcbio/bcbio-nextgen
bcbio/variation/annotation.py
add_genome_context
def add_genome_context(orig_file, data): """Annotate a file with annotations of genome context using vcfanno. """ out_file = "%s-context.vcf.gz" % utils.splitext_plus(orig_file)[0] if not utils.file_uptodate(out_file, orig_file): with file_transaction(data, out_file) as tx_out_file: config_file = "%s.toml" % (utils.splitext_plus(tx_out_file)[0]) with open(config_file, "w") as out_handle: all_names = [] for fname in dd.get_genome_context_files(data): bt = pybedtools.BedTool(fname) if bt.field_count() >= 4: d, base = os.path.split(fname) _, prefix = os.path.split(d) name = "%s_%s" % (prefix, utils.splitext_plus(base)[0]) out_handle.write("[[annotation]]\n") out_handle.write('file = "%s"\n' % fname) out_handle.write("columns = [4]\n") out_handle.write('names = ["%s"]\n' % name) out_handle.write('ops = ["uniq"]\n') all_names.append(name) out_handle.write("[[postannotation]]\n") out_handle.write("fields = [%s]\n" % (", ".join(['"%s"' % n for n in all_names]))) out_handle.write('name = "genome_context"\n') out_handle.write('op = "concat"\n') out_handle.write('type = "String"\n') cmd = "vcfanno {config_file} {orig_file} | bgzip -c > {tx_out_file}" do.run(cmd.format(**locals()), "Annotate with problem annotations", data) return vcfutils.bgzip_and_index(out_file, data["config"])
python
def add_genome_context(orig_file, data): """Annotate a file with annotations of genome context using vcfanno. """ out_file = "%s-context.vcf.gz" % utils.splitext_plus(orig_file)[0] if not utils.file_uptodate(out_file, orig_file): with file_transaction(data, out_file) as tx_out_file: config_file = "%s.toml" % (utils.splitext_plus(tx_out_file)[0]) with open(config_file, "w") as out_handle: all_names = [] for fname in dd.get_genome_context_files(data): bt = pybedtools.BedTool(fname) if bt.field_count() >= 4: d, base = os.path.split(fname) _, prefix = os.path.split(d) name = "%s_%s" % (prefix, utils.splitext_plus(base)[0]) out_handle.write("[[annotation]]\n") out_handle.write('file = "%s"\n' % fname) out_handle.write("columns = [4]\n") out_handle.write('names = ["%s"]\n' % name) out_handle.write('ops = ["uniq"]\n') all_names.append(name) out_handle.write("[[postannotation]]\n") out_handle.write("fields = [%s]\n" % (", ".join(['"%s"' % n for n in all_names]))) out_handle.write('name = "genome_context"\n') out_handle.write('op = "concat"\n') out_handle.write('type = "String"\n') cmd = "vcfanno {config_file} {orig_file} | bgzip -c > {tx_out_file}" do.run(cmd.format(**locals()), "Annotate with problem annotations", data) return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "add_genome_context", "(", "orig_file", ",", "data", ")", ":", "out_file", "=", "\"%s-context.vcf.gz\"", "%", "utils", ".", "splitext_plus", "(", "orig_file", ")", "[", "0", "]", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",", "orig_...
Annotate a file with annotations of genome context using vcfanno.
[ "Annotate", "a", "file", "with", "annotations", "of", "genome", "context", "using", "vcfanno", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/annotation.py#L186-L214
train
218,189
bcbio/bcbio-nextgen
bcbio/illumina/demultiplex.py
_submit_and_wait
def _submit_and_wait(cmd, cores, config, output_dir): """Submit command with batch script specified in configuration, wait until finished """ batch_script = "submit_bcl2fastq.sh" if not os.path.exists(batch_script + ".finished"): if os.path.exists(batch_script + ".failed"): os.remove(batch_script + ".failed") with open(batch_script, "w") as out_handle: out_handle.write(config["process"]["bcl2fastq_batch"].format( cores=cores, bcl2fastq_cmd=" ".join(cmd), batch_script=batch_script)) submit_cmd = utils.get_in(config, ("process", "submit_cmd")) subprocess.check_call(submit_cmd.format(batch_script=batch_script), shell=True) # wait until finished or failure checkpoint file while 1: if os.path.exists(batch_script + ".finished"): break if os.path.exists(batch_script + ".failed"): raise ValueError("bcl2fastq batch script failed: %s" % os.path.join(output_dir, batch_script)) time.sleep(5)
python
def _submit_and_wait(cmd, cores, config, output_dir): """Submit command with batch script specified in configuration, wait until finished """ batch_script = "submit_bcl2fastq.sh" if not os.path.exists(batch_script + ".finished"): if os.path.exists(batch_script + ".failed"): os.remove(batch_script + ".failed") with open(batch_script, "w") as out_handle: out_handle.write(config["process"]["bcl2fastq_batch"].format( cores=cores, bcl2fastq_cmd=" ".join(cmd), batch_script=batch_script)) submit_cmd = utils.get_in(config, ("process", "submit_cmd")) subprocess.check_call(submit_cmd.format(batch_script=batch_script), shell=True) # wait until finished or failure checkpoint file while 1: if os.path.exists(batch_script + ".finished"): break if os.path.exists(batch_script + ".failed"): raise ValueError("bcl2fastq batch script failed: %s" % os.path.join(output_dir, batch_script)) time.sleep(5)
[ "def", "_submit_and_wait", "(", "cmd", ",", "cores", ",", "config", ",", "output_dir", ")", ":", "batch_script", "=", "\"submit_bcl2fastq.sh\"", "if", "not", "os", ".", "path", ".", "exists", "(", "batch_script", "+", "\".finished\"", ")", ":", "if", "os", ...
Submit command with batch script specified in configuration, wait until finished
[ "Submit", "command", "with", "batch", "script", "specified", "in", "configuration", "wait", "until", "finished" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/demultiplex.py#L32-L51
train
218,190
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_prep_items_from_base
def _prep_items_from_base(base, in_files, metadata, separators, force_single=False): """Prepare a set of configuration items for input files. """ details = [] in_files = _expand_dirs(in_files, KNOWN_EXTS) in_files = _expand_wildcards(in_files) ext_groups = collections.defaultdict(list) for ext, files in itertools.groupby( in_files, lambda x: KNOWN_EXTS.get(utils.splitext_plus(x)[-1].lower())): ext_groups[ext].extend(list(files)) for ext, files in ext_groups.items(): if ext == "bam": for f in files: details.append(_prep_bam_input(f, base)) elif ext in ["fastq", "fq", "fasta"]: files, glob_files = _find_glob_matches(files, metadata) for fs in glob_files: details.append(_prep_fastq_input(fs, base)) for fs in fastq.combine_pairs(files, force_single, separators=separators): details.append(_prep_fastq_input(fs, base)) elif ext in ["vcf"]: for f in files: details.append(_prep_vcf_input(f, base)) else: print("Ignoring unexpected input file types %s: %s" % (ext, list(files))) return details
python
def _prep_items_from_base(base, in_files, metadata, separators, force_single=False): """Prepare a set of configuration items for input files. """ details = [] in_files = _expand_dirs(in_files, KNOWN_EXTS) in_files = _expand_wildcards(in_files) ext_groups = collections.defaultdict(list) for ext, files in itertools.groupby( in_files, lambda x: KNOWN_EXTS.get(utils.splitext_plus(x)[-1].lower())): ext_groups[ext].extend(list(files)) for ext, files in ext_groups.items(): if ext == "bam": for f in files: details.append(_prep_bam_input(f, base)) elif ext in ["fastq", "fq", "fasta"]: files, glob_files = _find_glob_matches(files, metadata) for fs in glob_files: details.append(_prep_fastq_input(fs, base)) for fs in fastq.combine_pairs(files, force_single, separators=separators): details.append(_prep_fastq_input(fs, base)) elif ext in ["vcf"]: for f in files: details.append(_prep_vcf_input(f, base)) else: print("Ignoring unexpected input file types %s: %s" % (ext, list(files))) return details
[ "def", "_prep_items_from_base", "(", "base", ",", "in_files", ",", "metadata", ",", "separators", ",", "force_single", "=", "False", ")", ":", "details", "=", "[", "]", "in_files", "=", "_expand_dirs", "(", "in_files", ",", "KNOWN_EXTS", ")", "in_files", "="...
Prepare a set of configuration items for input files.
[ "Prepare", "a", "set", "of", "configuration", "items", "for", "input", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L100-L126
train
218,191
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_find_glob_matches
def _find_glob_matches(in_files, metadata): """Group files that match by globs for merging, rather than by explicit pairs. """ reg_files = copy.deepcopy(in_files) glob_files = [] for glob_search in [x for x in metadata.keys() if "*" in x]: cur = [] for fname in in_files: if fnmatch.fnmatch(fname, "*/%s" % glob_search): cur.append(fname) reg_files.remove(fname) assert cur, "Did not find file matches for %s" % glob_search glob_files.append(cur) return reg_files, glob_files
python
def _find_glob_matches(in_files, metadata): """Group files that match by globs for merging, rather than by explicit pairs. """ reg_files = copy.deepcopy(in_files) glob_files = [] for glob_search in [x for x in metadata.keys() if "*" in x]: cur = [] for fname in in_files: if fnmatch.fnmatch(fname, "*/%s" % glob_search): cur.append(fname) reg_files.remove(fname) assert cur, "Did not find file matches for %s" % glob_search glob_files.append(cur) return reg_files, glob_files
[ "def", "_find_glob_matches", "(", "in_files", ",", "metadata", ")", ":", "reg_files", "=", "copy", ".", "deepcopy", "(", "in_files", ")", "glob_files", "=", "[", "]", "for", "glob_search", "in", "[", "x", "for", "x", "in", "metadata", ".", "keys", "(", ...
Group files that match by globs for merging, rather than by explicit pairs.
[ "Group", "files", "that", "match", "by", "globs", "for", "merging", "rather", "than", "by", "explicit", "pairs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L128-L141
train
218,192
bcbio/bcbio-nextgen
bcbio/workflow/template.py
name_to_config
def name_to_config(template): """Read template file into a dictionary to use as base for all samples. Handles well-known template names, pulled from GitHub repository and local files. """ if objectstore.is_remote(template): with objectstore.open_file(template) as in_handle: config = yaml.safe_load(in_handle) with objectstore.open_file(template) as in_handle: txt_config = in_handle.read() elif os.path.isfile(template): if template.endswith(".csv"): raise ValueError("Expected YAML file for template and found CSV, are arguments switched? %s" % template) with open(template) as in_handle: txt_config = in_handle.read() with open(template) as in_handle: config = yaml.safe_load(in_handle) else: base_url = "https://raw.github.com/bcbio/bcbio-nextgen/master/config/templates/%s.yaml" try: with contextlib.closing(urllib.request.urlopen(base_url % template)) as in_handle: txt_config = in_handle.read().decode() with contextlib.closing(urllib.request.urlopen(base_url % template)) as in_handle: config = yaml.safe_load(in_handle) except (urllib.error.HTTPError, urllib.error.URLError): raise ValueError("Could not find template '%s' locally or in standard templates on GitHub" % template) return config, txt_config
python
def name_to_config(template): """Read template file into a dictionary to use as base for all samples. Handles well-known template names, pulled from GitHub repository and local files. """ if objectstore.is_remote(template): with objectstore.open_file(template) as in_handle: config = yaml.safe_load(in_handle) with objectstore.open_file(template) as in_handle: txt_config = in_handle.read() elif os.path.isfile(template): if template.endswith(".csv"): raise ValueError("Expected YAML file for template and found CSV, are arguments switched? %s" % template) with open(template) as in_handle: txt_config = in_handle.read() with open(template) as in_handle: config = yaml.safe_load(in_handle) else: base_url = "https://raw.github.com/bcbio/bcbio-nextgen/master/config/templates/%s.yaml" try: with contextlib.closing(urllib.request.urlopen(base_url % template)) as in_handle: txt_config = in_handle.read().decode() with contextlib.closing(urllib.request.urlopen(base_url % template)) as in_handle: config = yaml.safe_load(in_handle) except (urllib.error.HTTPError, urllib.error.URLError): raise ValueError("Could not find template '%s' locally or in standard templates on GitHub" % template) return config, txt_config
[ "def", "name_to_config", "(", "template", ")", ":", "if", "objectstore", ".", "is_remote", "(", "template", ")", ":", "with", "objectstore", ".", "open_file", "(", "template", ")", "as", "in_handle", ":", "config", "=", "yaml", ".", "safe_load", "(", "in_h...
Read template file into a dictionary to use as base for all samples. Handles well-known template names, pulled from GitHub repository and local files.
[ "Read", "template", "file", "into", "a", "dictionary", "to", "use", "as", "base", "for", "all", "samples", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L168-L196
train
218,193
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_write_config_file
def _write_config_file(items, global_vars, template, project_name, out_dir, remotes): """Write configuration file, adding required top level attributes. """ config_dir = utils.safe_makedir(os.path.join(out_dir, "config")) out_config_file = os.path.join(config_dir, "%s.yaml" % project_name) out = {"fc_name": project_name, "upload": {"dir": "../final"}, "details": items} if remotes.get("base"): r_base = objectstore.parse_remote(remotes.get("base")) out["upload"]["method"] = r_base.store out["upload"]["bucket"] = r_base.bucket out["upload"]["folder"] = os.path.join(r_base.key, "final") if r_base.key else "final" if r_base.region: out["upload"]["region"] = r_base.region if global_vars: out["globals"] = global_vars for k, v in template.items(): if k not in ["details"]: out[k] = v if os.path.exists(out_config_file): shutil.move(out_config_file, out_config_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) with open(out_config_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out_config_file
python
def _write_config_file(items, global_vars, template, project_name, out_dir, remotes): """Write configuration file, adding required top level attributes. """ config_dir = utils.safe_makedir(os.path.join(out_dir, "config")) out_config_file = os.path.join(config_dir, "%s.yaml" % project_name) out = {"fc_name": project_name, "upload": {"dir": "../final"}, "details": items} if remotes.get("base"): r_base = objectstore.parse_remote(remotes.get("base")) out["upload"]["method"] = r_base.store out["upload"]["bucket"] = r_base.bucket out["upload"]["folder"] = os.path.join(r_base.key, "final") if r_base.key else "final" if r_base.region: out["upload"]["region"] = r_base.region if global_vars: out["globals"] = global_vars for k, v in template.items(): if k not in ["details"]: out[k] = v if os.path.exists(out_config_file): shutil.move(out_config_file, out_config_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) with open(out_config_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out_config_file
[ "def", "_write_config_file", "(", "items", ",", "global_vars", ",", "template", ",", "project_name", ",", "out_dir", ",", "remotes", ")", ":", "config_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"con...
Write configuration file, adding required top level attributes.
[ "Write", "configuration", "file", "adding", "required", "top", "level", "attributes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L205-L231
train
218,194
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_set_global_vars
def _set_global_vars(metadata): """Identify files used multiple times in metadata and replace with global variables """ fnames = collections.defaultdict(list) for sample in metadata.keys(): for k, v in metadata[sample].items(): if isinstance(v, six.string_types) and os.path.isfile(v): v = _expand_file(v) metadata[sample][k] = v fnames[v].append(k) global_vars = {} # Skip global vars -- more confusing than useful # loc_counts = collections.defaultdict(int) # global_var_sub = {} # for fname, locs in fnames.items(): # if len(locs) > 1: # loc_counts[locs[0]] += 1 # name = "%s%s" % (locs[0], loc_counts[locs[0]]) # global_var_sub[fname] = name # global_vars[name] = fname # for sample in metadata.keys(): # for k, v in metadata[sample].items(): # if isinstance(v, six.string_types) and v in global_var_sub: # metadata[sample][k] = global_var_sub[v] return metadata, global_vars
python
def _set_global_vars(metadata): """Identify files used multiple times in metadata and replace with global variables """ fnames = collections.defaultdict(list) for sample in metadata.keys(): for k, v in metadata[sample].items(): if isinstance(v, six.string_types) and os.path.isfile(v): v = _expand_file(v) metadata[sample][k] = v fnames[v].append(k) global_vars = {} # Skip global vars -- more confusing than useful # loc_counts = collections.defaultdict(int) # global_var_sub = {} # for fname, locs in fnames.items(): # if len(locs) > 1: # loc_counts[locs[0]] += 1 # name = "%s%s" % (locs[0], loc_counts[locs[0]]) # global_var_sub[fname] = name # global_vars[name] = fname # for sample in metadata.keys(): # for k, v in metadata[sample].items(): # if isinstance(v, six.string_types) and v in global_var_sub: # metadata[sample][k] = global_var_sub[v] return metadata, global_vars
[ "def", "_set_global_vars", "(", "metadata", ")", ":", "fnames", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "sample", "in", "metadata", ".", "keys", "(", ")", ":", "for", "k", ",", "v", "in", "metadata", "[", "sample", "]", ".", ...
Identify files used multiple times in metadata and replace with global variables
[ "Identify", "files", "used", "multiple", "times", "in", "metadata", "and", "replace", "with", "global", "variables" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L238-L262
train
218,195
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_clean_string
def _clean_string(v, sinfo): """Test for and clean unicode present in template CSVs. """ if isinstance(v, (list, tuple)): return [_clean_string(x, sinfo) for x in v] else: assert isinstance(v, six.string_types), v try: if hasattr(v, "decode"): return str(v.decode("ascii")) else: return str(v.encode("ascii").decode("ascii")) except UnicodeDecodeError as msg: raise ValueError("Found unicode character in template CSV line %s:\n%s" % (sinfo, str(msg)))
python
def _clean_string(v, sinfo): """Test for and clean unicode present in template CSVs. """ if isinstance(v, (list, tuple)): return [_clean_string(x, sinfo) for x in v] else: assert isinstance(v, six.string_types), v try: if hasattr(v, "decode"): return str(v.decode("ascii")) else: return str(v.encode("ascii").decode("ascii")) except UnicodeDecodeError as msg: raise ValueError("Found unicode character in template CSV line %s:\n%s" % (sinfo, str(msg)))
[ "def", "_clean_string", "(", "v", ",", "sinfo", ")", ":", "if", "isinstance", "(", "v", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "_clean_string", "(", "x", ",", "sinfo", ")", "for", "x", "in", "v", "]", "else", ":", "assert",...
Test for and clean unicode present in template CSVs.
[ "Test", "for", "and", "clean", "unicode", "present", "in", "template", "CSVs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L264-L277
train
218,196
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_parse_metadata
def _parse_metadata(in_handle): """Reads metadata from a simple CSV structured input file. samplename,batch,phenotype ERR256785,batch1,normal """ metadata = {} reader = csv.reader(in_handle) while 1: header = next(reader) if not header[0].startswith("#"): break keys = [x.strip() for x in header[1:]] for sinfo in (x for x in reader if x and not x[0].startswith("#")): sinfo = [_strip_and_convert_lists(x) for x in sinfo] sample = sinfo[0] if isinstance(sample, list): sample = tuple(sample) # sanity check to avoid duplicate rows if sample in metadata: raise ValueError("Sample %s present multiple times in metadata file.\n" "If you need to specify multiple attributes as a list " "use a semi-colon to separate them on a single line.\n" "https://bcbio-nextgen.readthedocs.org/en/latest/" "contents/configuration.html#automated-sample-configuration\n" "Duplicate line is %s" % (sample, sinfo)) vals = [_clean_string(v, sinfo) for v in sinfo[1:]] metadata[sample] = dict(zip(keys, vals)) metadata, global_vars = _set_global_vars(metadata) return metadata, global_vars
python
def _parse_metadata(in_handle): """Reads metadata from a simple CSV structured input file. samplename,batch,phenotype ERR256785,batch1,normal """ metadata = {} reader = csv.reader(in_handle) while 1: header = next(reader) if not header[0].startswith("#"): break keys = [x.strip() for x in header[1:]] for sinfo in (x for x in reader if x and not x[0].startswith("#")): sinfo = [_strip_and_convert_lists(x) for x in sinfo] sample = sinfo[0] if isinstance(sample, list): sample = tuple(sample) # sanity check to avoid duplicate rows if sample in metadata: raise ValueError("Sample %s present multiple times in metadata file.\n" "If you need to specify multiple attributes as a list " "use a semi-colon to separate them on a single line.\n" "https://bcbio-nextgen.readthedocs.org/en/latest/" "contents/configuration.html#automated-sample-configuration\n" "Duplicate line is %s" % (sample, sinfo)) vals = [_clean_string(v, sinfo) for v in sinfo[1:]] metadata[sample] = dict(zip(keys, vals)) metadata, global_vars = _set_global_vars(metadata) return metadata, global_vars
[ "def", "_parse_metadata", "(", "in_handle", ")", ":", "metadata", "=", "{", "}", "reader", "=", "csv", ".", "reader", "(", "in_handle", ")", "while", "1", ":", "header", "=", "next", "(", "reader", ")", "if", "not", "header", "[", "0", "]", ".", "s...
Reads metadata from a simple CSV structured input file. samplename,batch,phenotype ERR256785,batch1,normal
[ "Reads", "metadata", "from", "a", "simple", "CSV", "structured", "input", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L279-L308
train
218,197
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_pname_and_metadata
def _pname_and_metadata(in_file): """Retrieve metadata and project name from the input metadata CSV file. Uses the input file name for the project name and for back compatibility, accepts the project name as an input, providing no metadata. """ if os.path.isfile(in_file): with open(in_file) as in_handle: md, global_vars = _parse_metadata(in_handle) base = os.path.splitext(os.path.basename(in_file))[0] md_file = in_file elif objectstore.is_remote(in_file): with objectstore.open_file(in_file) as in_handle: md, global_vars = _parse_metadata(in_handle) base = os.path.splitext(os.path.basename(in_file))[0] md_file = None else: if in_file.endswith(".csv"): raise ValueError("Did not find input metadata file: %s" % in_file) base, md, global_vars = _safe_name(os.path.splitext(os.path.basename(in_file))[0]), {}, {} md_file = None return _safe_name(base), md, global_vars, md_file
python
def _pname_and_metadata(in_file): """Retrieve metadata and project name from the input metadata CSV file. Uses the input file name for the project name and for back compatibility, accepts the project name as an input, providing no metadata. """ if os.path.isfile(in_file): with open(in_file) as in_handle: md, global_vars = _parse_metadata(in_handle) base = os.path.splitext(os.path.basename(in_file))[0] md_file = in_file elif objectstore.is_remote(in_file): with objectstore.open_file(in_file) as in_handle: md, global_vars = _parse_metadata(in_handle) base = os.path.splitext(os.path.basename(in_file))[0] md_file = None else: if in_file.endswith(".csv"): raise ValueError("Did not find input metadata file: %s" % in_file) base, md, global_vars = _safe_name(os.path.splitext(os.path.basename(in_file))[0]), {}, {} md_file = None return _safe_name(base), md, global_vars, md_file
[ "def", "_pname_and_metadata", "(", "in_file", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "in_file", ")", ":", "with", "open", "(", "in_file", ")", "as", "in_handle", ":", "md", ",", "global_vars", "=", "_parse_metadata", "(", "in_handle", ")"...
Retrieve metadata and project name from the input metadata CSV file. Uses the input file name for the project name and for back compatibility, accepts the project name as an input, providing no metadata.
[ "Retrieve", "metadata", "and", "project", "name", "from", "the", "input", "metadata", "CSV", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L318-L339
train
218,198
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_handle_special_yaml_cases
def _handle_special_yaml_cases(v): """Handle values that pass integer, boolean, list or dictionary values. """ if "::" in v: out = {} for part in v.split("::"): k_part, v_part = part.split(":") out[k_part] = v_part.split(";") v = out elif ";" in v: # split lists and remove accidental empty values v = [x for x in v.split(";") if x != ""] elif isinstance(v, list): v = v else: try: v = int(v) except ValueError: if v.lower() == "true": v = True elif v.lower() == "false": v = False return v
python
def _handle_special_yaml_cases(v): """Handle values that pass integer, boolean, list or dictionary values. """ if "::" in v: out = {} for part in v.split("::"): k_part, v_part = part.split(":") out[k_part] = v_part.split(";") v = out elif ";" in v: # split lists and remove accidental empty values v = [x for x in v.split(";") if x != ""] elif isinstance(v, list): v = v else: try: v = int(v) except ValueError: if v.lower() == "true": v = True elif v.lower() == "false": v = False return v
[ "def", "_handle_special_yaml_cases", "(", "v", ")", ":", "if", "\"::\"", "in", "v", ":", "out", "=", "{", "}", "for", "part", "in", "v", ".", "split", "(", "\"::\"", ")", ":", "k_part", ",", "v_part", "=", "part", ".", "split", "(", "\":\"", ")", ...
Handle values that pass integer, boolean, list or dictionary values.
[ "Handle", "values", "that", "pass", "integer", "boolean", "list", "or", "dictionary", "values", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L341-L363
train
218,199