repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_add_ped_metadata
def _add_ped_metadata(name, metadata): """Add standard PED file attributes into metadata if not present. http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped """ ignore = set(["-9", "undefined", "unknown", "."]) def _ped_mapping(x, valmap): try: x = int(x) except ValueError: x = -1 for k, v in valmap.items(): if k == x: return v return None def _ped_to_gender(x): return _ped_mapping(x, {1: "male", 2: "female"}) def _ped_to_phenotype(x): known_phenotypes = set(["unaffected", "affected", "tumor", "normal"]) if x in known_phenotypes: return x else: return _ped_mapping(x, {1: "unaffected", 2: "affected"}) def _ped_to_batch(x): if x not in ignore and x != "0": return x with open(metadata["ped"]) as in_handle: for line in in_handle: parts = line.split("\t")[:6] if parts[1] == str(name): for index, key, convert_fn in [(4, "sex", _ped_to_gender), (0, "batch", _ped_to_batch), (5, "phenotype", _ped_to_phenotype)]: val = convert_fn(parts[index]) if val is not None and key not in metadata: metadata[key] = val break return metadata
python
def _add_ped_metadata(name, metadata): """Add standard PED file attributes into metadata if not present. http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped """ ignore = set(["-9", "undefined", "unknown", "."]) def _ped_mapping(x, valmap): try: x = int(x) except ValueError: x = -1 for k, v in valmap.items(): if k == x: return v return None def _ped_to_gender(x): return _ped_mapping(x, {1: "male", 2: "female"}) def _ped_to_phenotype(x): known_phenotypes = set(["unaffected", "affected", "tumor", "normal"]) if x in known_phenotypes: return x else: return _ped_mapping(x, {1: "unaffected", 2: "affected"}) def _ped_to_batch(x): if x not in ignore and x != "0": return x with open(metadata["ped"]) as in_handle: for line in in_handle: parts = line.split("\t")[:6] if parts[1] == str(name): for index, key, convert_fn in [(4, "sex", _ped_to_gender), (0, "batch", _ped_to_batch), (5, "phenotype", _ped_to_phenotype)]: val = convert_fn(parts[index]) if val is not None and key not in metadata: metadata[key] = val break return metadata
[ "def", "_add_ped_metadata", "(", "name", ",", "metadata", ")", ":", "ignore", "=", "set", "(", "[", "\"-9\"", ",", "\"undefined\"", ",", "\"unknown\"", ",", "\".\"", "]", ")", "def", "_ped_mapping", "(", "x", ",", "valmap", ")", ":", "try", ":", "x", ...
Add standard PED file attributes into metadata if not present. http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped
[ "Add", "standard", "PED", "file", "attributes", "into", "metadata", "if", "not", "present", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L365-L401
train
218,200
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_add_metadata
def _add_metadata(item, metadata, remotes, only_metadata=False): """Add metadata information from CSV file to current item. Retrieves metadata based on 'description' parsed from input CSV file. Adds to object and handles special keys: - `description`: A new description for the item. Used to relabel items based on the pre-determined description from fastq name or BAM read groups. - Keys matching supported names in the algorithm section map to key/value pairs there instead of metadata. """ for check_key in [item["description"]] + _get_file_keys(item) + _get_vrn_keys(item): item_md = metadata.get(check_key) if item_md: break if not item_md: item_md = _find_glob_metadata(item["files"], metadata) if remotes.get("region"): item["algorithm"]["variant_regions"] = remotes["region"] TOP_LEVEL = set(["description", "genome_build", "lane", "vrn_file", "files", "analysis"]) keep_sample = True if item_md and len(item_md) > 0: if "metadata" not in item: item["metadata"] = {} for k, v in item_md.items(): if v: if k in TOP_LEVEL: item[k] = v elif k in run_info.ALGORITHM_KEYS: v = _handle_special_yaml_cases(v) item["algorithm"][k] = v else: v = _handle_special_yaml_cases(v) item["metadata"][k] = v elif len(metadata) > 0: warn = "Dropped sample" if only_metadata else "Added minimal sample information" print("WARNING: %s: metadata not found for %s, %s" % (warn, item["description"], [os.path.basename(f) for f in item["files"]])) keep_sample = not only_metadata if tz.get_in(["metadata", "ped"], item): item["metadata"] = _add_ped_metadata(item["description"], item["metadata"]) return item if keep_sample else None
python
def _add_metadata(item, metadata, remotes, only_metadata=False): """Add metadata information from CSV file to current item. Retrieves metadata based on 'description' parsed from input CSV file. Adds to object and handles special keys: - `description`: A new description for the item. Used to relabel items based on the pre-determined description from fastq name or BAM read groups. - Keys matching supported names in the algorithm section map to key/value pairs there instead of metadata. """ for check_key in [item["description"]] + _get_file_keys(item) + _get_vrn_keys(item): item_md = metadata.get(check_key) if item_md: break if not item_md: item_md = _find_glob_metadata(item["files"], metadata) if remotes.get("region"): item["algorithm"]["variant_regions"] = remotes["region"] TOP_LEVEL = set(["description", "genome_build", "lane", "vrn_file", "files", "analysis"]) keep_sample = True if item_md and len(item_md) > 0: if "metadata" not in item: item["metadata"] = {} for k, v in item_md.items(): if v: if k in TOP_LEVEL: item[k] = v elif k in run_info.ALGORITHM_KEYS: v = _handle_special_yaml_cases(v) item["algorithm"][k] = v else: v = _handle_special_yaml_cases(v) item["metadata"][k] = v elif len(metadata) > 0: warn = "Dropped sample" if only_metadata else "Added minimal sample information" print("WARNING: %s: metadata not found for %s, %s" % (warn, item["description"], [os.path.basename(f) for f in item["files"]])) keep_sample = not only_metadata if tz.get_in(["metadata", "ped"], item): item["metadata"] = _add_ped_metadata(item["description"], item["metadata"]) return item if keep_sample else None
[ "def", "_add_metadata", "(", "item", ",", "metadata", ",", "remotes", ",", "only_metadata", "=", "False", ")", ":", "for", "check_key", "in", "[", "item", "[", "\"description\"", "]", "]", "+", "_get_file_keys", "(", "item", ")", "+", "_get_vrn_keys", "(",...
Add metadata information from CSV file to current item. Retrieves metadata based on 'description' parsed from input CSV file. Adds to object and handles special keys: - `description`: A new description for the item. Used to relabel items based on the pre-determined description from fastq name or BAM read groups. - Keys matching supported names in the algorithm section map to key/value pairs there instead of metadata.
[ "Add", "metadata", "information", "from", "CSV", "file", "to", "current", "item", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L421-L461
train
218,201
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_retrieve_remote
def _retrieve_remote(fnames): """Retrieve remote inputs found in the same bucket as the template or metadata files. """ for fname in fnames: if objectstore.is_remote(fname): inputs = [] regions = [] remote_base = os.path.dirname(fname) for rfname in objectstore.list(remote_base): if rfname.endswith(tuple(KNOWN_EXTS.keys())): inputs.append(rfname) elif rfname.endswith((".bed", ".bed.gz")): regions.append(rfname) return {"base": remote_base, "inputs": inputs, "region": regions[0] if len(regions) == 1 else None} return {}
python
def _retrieve_remote(fnames): """Retrieve remote inputs found in the same bucket as the template or metadata files. """ for fname in fnames: if objectstore.is_remote(fname): inputs = [] regions = [] remote_base = os.path.dirname(fname) for rfname in objectstore.list(remote_base): if rfname.endswith(tuple(KNOWN_EXTS.keys())): inputs.append(rfname) elif rfname.endswith((".bed", ".bed.gz")): regions.append(rfname) return {"base": remote_base, "inputs": inputs, "region": regions[0] if len(regions) == 1 else None} return {}
[ "def", "_retrieve_remote", "(", "fnames", ")", ":", "for", "fname", "in", "fnames", ":", "if", "objectstore", ".", "is_remote", "(", "fname", ")", ":", "inputs", "=", "[", "]", "regions", "=", "[", "]", "remote_base", "=", "os", ".", "path", ".", "di...
Retrieve remote inputs found in the same bucket as the template or metadata files.
[ "Retrieve", "remote", "inputs", "found", "in", "the", "same", "bucket", "as", "the", "template", "or", "metadata", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L477-L493
train
218,202
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_convert_to_relpaths
def _convert_to_relpaths(data, work_dir): """Convert absolute paths in the input data to relative paths to the work directory. """ work_dir = os.path.abspath(work_dir) data["files"] = [os.path.relpath(f, work_dir) for f in data["files"]] for topk in ["metadata", "algorithm"]: for k, v in data[topk].items(): if isinstance(v, six.string_types) and os.path.isfile(v) and os.path.isabs(v): data[topk][k] = os.path.relpath(v, work_dir) return data
python
def _convert_to_relpaths(data, work_dir): """Convert absolute paths in the input data to relative paths to the work directory. """ work_dir = os.path.abspath(work_dir) data["files"] = [os.path.relpath(f, work_dir) for f in data["files"]] for topk in ["metadata", "algorithm"]: for k, v in data[topk].items(): if isinstance(v, six.string_types) and os.path.isfile(v) and os.path.isabs(v): data[topk][k] = os.path.relpath(v, work_dir) return data
[ "def", "_convert_to_relpaths", "(", "data", ",", "work_dir", ")", ":", "work_dir", "=", "os", ".", "path", ".", "abspath", "(", "work_dir", ")", "data", "[", "\"files\"", "]", "=", "[", "os", ".", "path", ".", "relpath", "(", "f", ",", "work_dir", ")...
Convert absolute paths in the input data to relative paths to the work directory.
[ "Convert", "absolute", "paths", "in", "the", "input", "data", "to", "relative", "paths", "to", "the", "work", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L495-L504
train
218,203
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_check_all_metadata_found
def _check_all_metadata_found(metadata, items): """Print warning if samples in CSV file are missing in folder""" for name in metadata: seen = False for sample in items: check_file = sample["files"][0] if sample.get("files") else sample["vrn_file"] if isinstance(name, (tuple, list)): if check_file.find(name[0]) > -1: seen = True elif check_file.find(name) > -1: seen = True elif "*" in name and fnmatch.fnmatch(check_file, "*/%s" % name): seen = True if not seen: print("WARNING: sample not found %s" % str(name))
python
def _check_all_metadata_found(metadata, items): """Print warning if samples in CSV file are missing in folder""" for name in metadata: seen = False for sample in items: check_file = sample["files"][0] if sample.get("files") else sample["vrn_file"] if isinstance(name, (tuple, list)): if check_file.find(name[0]) > -1: seen = True elif check_file.find(name) > -1: seen = True elif "*" in name and fnmatch.fnmatch(check_file, "*/%s" % name): seen = True if not seen: print("WARNING: sample not found %s" % str(name))
[ "def", "_check_all_metadata_found", "(", "metadata", ",", "items", ")", ":", "for", "name", "in", "metadata", ":", "seen", "=", "False", "for", "sample", "in", "items", ":", "check_file", "=", "sample", "[", "\"files\"", "]", "[", "0", "]", "if", "sample...
Print warning if samples in CSV file are missing in folder
[ "Print", "warning", "if", "samples", "in", "CSV", "file", "are", "missing", "in", "folder" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L506-L520
train
218,204
bcbio/bcbio-nextgen
bcbio/workflow/template.py
_copy_to_configdir
def _copy_to_configdir(items, out_dir, args): """Copy configuration files like PED inputs to working config directory. """ out = [] for item in items: ped_file = tz.get_in(["metadata", "ped"], item) if ped_file and os.path.exists(ped_file): ped_config_file = os.path.join(out_dir, "config", os.path.basename(ped_file)) if not os.path.exists(ped_config_file): shutil.copy(ped_file, ped_config_file) item["metadata"]["ped"] = ped_config_file out.append(item) if hasattr(args, "systemconfig") and args.systemconfig: shutil.copy(args.systemconfig, os.path.join(out_dir, "config", os.path.basename(args.systemconfig))) return out
python
def _copy_to_configdir(items, out_dir, args): """Copy configuration files like PED inputs to working config directory. """ out = [] for item in items: ped_file = tz.get_in(["metadata", "ped"], item) if ped_file and os.path.exists(ped_file): ped_config_file = os.path.join(out_dir, "config", os.path.basename(ped_file)) if not os.path.exists(ped_config_file): shutil.copy(ped_file, ped_config_file) item["metadata"]["ped"] = ped_config_file out.append(item) if hasattr(args, "systemconfig") and args.systemconfig: shutil.copy(args.systemconfig, os.path.join(out_dir, "config", os.path.basename(args.systemconfig))) return out
[ "def", "_copy_to_configdir", "(", "items", ",", "out_dir", ",", "args", ")", ":", "out", "=", "[", "]", "for", "item", "in", "items", ":", "ped_file", "=", "tz", ".", "get_in", "(", "[", "\"metadata\"", ",", "\"ped\"", "]", ",", "item", ")", "if", ...
Copy configuration files like PED inputs to working config directory.
[ "Copy", "configuration", "files", "like", "PED", "inputs", "to", "working", "config", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L522-L536
train
218,205
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
load_system_config
def load_system_config(config_file=None, work_dir=None, allow_missing=False): """Load bcbio_system.yaml configuration file, handling standard defaults. Looks for configuration file in default location within final base directory from a standard installation. Handles both standard installs (galaxy/bcbio_system.yaml) and docker installs (config/bcbio_system.yaml). """ docker_config = _get_docker_config() if config_file is None: config_file = "bcbio_system.yaml" if not os.path.exists(config_file): base_dir = get_base_installdir() test_config = os.path.join(base_dir, "galaxy", config_file) if os.path.exists(test_config): config_file = test_config elif allow_missing: config_file = None else: raise ValueError("Could not find input system configuration file %s, " "including inside standard directory %s" % (config_file, os.path.join(base_dir, "galaxy"))) config = load_config(config_file) if config_file else {} if docker_config: assert work_dir is not None, "Need working directory to merge docker config" config_file = os.path.join(work_dir, "%s-merged%s" % os.path.splitext(os.path.basename(config_file))) config = _merge_system_configs(config, docker_config, config_file) if "algorithm" not in config: config["algorithm"] = {} config["bcbio_system"] = config_file return config, config_file
python
def load_system_config(config_file=None, work_dir=None, allow_missing=False): """Load bcbio_system.yaml configuration file, handling standard defaults. Looks for configuration file in default location within final base directory from a standard installation. Handles both standard installs (galaxy/bcbio_system.yaml) and docker installs (config/bcbio_system.yaml). """ docker_config = _get_docker_config() if config_file is None: config_file = "bcbio_system.yaml" if not os.path.exists(config_file): base_dir = get_base_installdir() test_config = os.path.join(base_dir, "galaxy", config_file) if os.path.exists(test_config): config_file = test_config elif allow_missing: config_file = None else: raise ValueError("Could not find input system configuration file %s, " "including inside standard directory %s" % (config_file, os.path.join(base_dir, "galaxy"))) config = load_config(config_file) if config_file else {} if docker_config: assert work_dir is not None, "Need working directory to merge docker config" config_file = os.path.join(work_dir, "%s-merged%s" % os.path.splitext(os.path.basename(config_file))) config = _merge_system_configs(config, docker_config, config_file) if "algorithm" not in config: config["algorithm"] = {} config["bcbio_system"] = config_file return config, config_file
[ "def", "load_system_config", "(", "config_file", "=", "None", ",", "work_dir", "=", "None", ",", "allow_missing", "=", "False", ")", ":", "docker_config", "=", "_get_docker_config", "(", ")", "if", "config_file", "is", "None", ":", "config_file", "=", "\"bcbio...
Load bcbio_system.yaml configuration file, handling standard defaults. Looks for configuration file in default location within final base directory from a standard installation. Handles both standard installs (galaxy/bcbio_system.yaml) and docker installs (config/bcbio_system.yaml).
[ "Load", "bcbio_system", ".", "yaml", "configuration", "file", "handling", "standard", "defaults", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L50-L79
train
218,206
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
_merge_system_configs
def _merge_system_configs(host_config, container_config, out_file=None): """Create a merged system configuration from external and internal specification. """ out = copy.deepcopy(container_config) for k, v in host_config.items(): if k in set(["galaxy_config"]): out[k] = v elif k == "resources": for pname, resources in v.items(): if not isinstance(resources, dict) and pname not in out[k]: out[k][pname] = resources else: for rname, rval in resources.items(): if (rname in set(["cores", "jvm_opts", "memory"]) or pname in set(["gatk", "mutect"])): if pname not in out[k]: out[k][pname] = {} out[k][pname][rname] = rval # Ensure final file is relocatable by mapping back to reference directory if "bcbio_system" in out and ("galaxy_config" not in out or not os.path.isabs(out["galaxy_config"])): out["galaxy_config"] = os.path.normpath(os.path.join(os.path.dirname(out["bcbio_system"]), os.pardir, "galaxy", "universe_wsgi.ini")) if out_file: with open(out_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out
python
def _merge_system_configs(host_config, container_config, out_file=None): """Create a merged system configuration from external and internal specification. """ out = copy.deepcopy(container_config) for k, v in host_config.items(): if k in set(["galaxy_config"]): out[k] = v elif k == "resources": for pname, resources in v.items(): if not isinstance(resources, dict) and pname not in out[k]: out[k][pname] = resources else: for rname, rval in resources.items(): if (rname in set(["cores", "jvm_opts", "memory"]) or pname in set(["gatk", "mutect"])): if pname not in out[k]: out[k][pname] = {} out[k][pname][rname] = rval # Ensure final file is relocatable by mapping back to reference directory if "bcbio_system" in out and ("galaxy_config" not in out or not os.path.isabs(out["galaxy_config"])): out["galaxy_config"] = os.path.normpath(os.path.join(os.path.dirname(out["bcbio_system"]), os.pardir, "galaxy", "universe_wsgi.ini")) if out_file: with open(out_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out
[ "def", "_merge_system_configs", "(", "host_config", ",", "container_config", ",", "out_file", "=", "None", ")", ":", "out", "=", "copy", ".", "deepcopy", "(", "container_config", ")", "for", "k", ",", "v", "in", "host_config", ".", "items", "(", ")", ":", ...
Create a merged system configuration from external and internal specification.
[ "Create", "a", "merged", "system", "configuration", "from", "external", "and", "internal", "specification", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L84-L110
train
218,207
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
merge_resources
def merge_resources(args): """Merge docker local resources and global resource specification in a set of arguments. Finds the `data` object within passed arguments and updates the resources from a local docker configuration if present. """ docker_config = _get_docker_config() if not docker_config: return args else: def _update_resources(config): config["resources"] = _merge_system_configs(config, docker_config)["resources"] return config return _update_config(args, _update_resources, allow_missing=True)
python
def merge_resources(args): """Merge docker local resources and global resource specification in a set of arguments. Finds the `data` object within passed arguments and updates the resources from a local docker configuration if present. """ docker_config = _get_docker_config() if not docker_config: return args else: def _update_resources(config): config["resources"] = _merge_system_configs(config, docker_config)["resources"] return config return _update_config(args, _update_resources, allow_missing=True)
[ "def", "merge_resources", "(", "args", ")", ":", "docker_config", "=", "_get_docker_config", "(", ")", "if", "not", "docker_config", ":", "return", "args", "else", ":", "def", "_update_resources", "(", "config", ")", ":", "config", "[", "\"resources\"", "]", ...
Merge docker local resources and global resource specification in a set of arguments. Finds the `data` object within passed arguments and updates the resources from a local docker configuration if present.
[ "Merge", "docker", "local", "resources", "and", "global", "resource", "specification", "in", "a", "set", "of", "arguments", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L118-L131
train
218,208
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
load_config
def load_config(config_file): """Load YAML config file, replacing environmental variables. """ with open(config_file) as in_handle: config = yaml.safe_load(in_handle) config = _expand_paths(config) if 'resources' not in config: config['resources'] = {} # lowercase resource names, the preferred way to specify, for back-compatibility newr = {} for k, v in config["resources"].items(): if k.lower() != k: newr[k.lower()] = v config["resources"].update(newr) return config
python
def load_config(config_file): """Load YAML config file, replacing environmental variables. """ with open(config_file) as in_handle: config = yaml.safe_load(in_handle) config = _expand_paths(config) if 'resources' not in config: config['resources'] = {} # lowercase resource names, the preferred way to specify, for back-compatibility newr = {} for k, v in config["resources"].items(): if k.lower() != k: newr[k.lower()] = v config["resources"].update(newr) return config
[ "def", "load_config", "(", "config_file", ")", ":", "with", "open", "(", "config_file", ")", "as", "in_handle", ":", "config", "=", "yaml", ".", "safe_load", "(", "in_handle", ")", "config", "=", "_expand_paths", "(", "config", ")", "if", "'resources'", "n...
Load YAML config file, replacing environmental variables.
[ "Load", "YAML", "config", "file", "replacing", "environmental", "variables", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L133-L147
train
218,209
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
get_resources
def get_resources(name, config): """Retrieve resources for a program, pulling from multiple config sources. """ return tz.get_in(["resources", name], config, tz.get_in(["resources", "default"], config, {}))
python
def get_resources(name, config): """Retrieve resources for a program, pulling from multiple config sources. """ return tz.get_in(["resources", name], config, tz.get_in(["resources", "default"], config, {}))
[ "def", "get_resources", "(", "name", ",", "config", ")", ":", "return", "tz", ".", "get_in", "(", "[", "\"resources\"", ",", "name", "]", ",", "config", ",", "tz", ".", "get_in", "(", "[", "\"resources\"", ",", "\"default\"", "]", ",", "config", ",", ...
Retrieve resources for a program, pulling from multiple config sources.
[ "Retrieve", "resources", "for", "a", "program", "pulling", "from", "multiple", "config", "sources", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L165-L169
train
218,210
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
get_program
def get_program(name, config, ptype="cmd", default=None): """Retrieve program information from the configuration. This handles back compatible location specification in input YAML. The preferred location for program information is in `resources` but the older `program` tag is also supported. """ # support taking in the data dictionary config = config.get("config", config) try: pconfig = config.get("resources", {})[name] # If have leftover old except KeyError: pconfig = {} old_config = config.get("program", {}).get(name, None) if old_config: for key in ["dir", "cmd"]: if not key in pconfig: pconfig[key] = old_config if ptype == "cmd": return _get_program_cmd(name, pconfig, config, default) elif ptype == "dir": return _get_program_dir(name, pconfig) else: raise ValueError("Don't understand program type: %s" % ptype)
python
def get_program(name, config, ptype="cmd", default=None): """Retrieve program information from the configuration. This handles back compatible location specification in input YAML. The preferred location for program information is in `resources` but the older `program` tag is also supported. """ # support taking in the data dictionary config = config.get("config", config) try: pconfig = config.get("resources", {})[name] # If have leftover old except KeyError: pconfig = {} old_config = config.get("program", {}).get(name, None) if old_config: for key in ["dir", "cmd"]: if not key in pconfig: pconfig[key] = old_config if ptype == "cmd": return _get_program_cmd(name, pconfig, config, default) elif ptype == "dir": return _get_program_dir(name, pconfig) else: raise ValueError("Don't understand program type: %s" % ptype)
[ "def", "get_program", "(", "name", ",", "config", ",", "ptype", "=", "\"cmd\"", ",", "default", "=", "None", ")", ":", "# support taking in the data dictionary", "config", "=", "config", ".", "get", "(", "\"config\"", ",", "config", ")", "try", ":", "pconfig...
Retrieve program information from the configuration. This handles back compatible location specification in input YAML. The preferred location for program information is in `resources` but the older `program` tag is also supported.
[ "Retrieve", "program", "information", "from", "the", "configuration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L171-L195
train
218,211
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
_get_program_cmd
def _get_program_cmd(name, pconfig, config, default): """Retrieve commandline of a program. """ if pconfig is None: return name elif isinstance(pconfig, six.string_types): return pconfig elif "cmd" in pconfig: return pconfig["cmd"] elif default is not None: return default else: return name
python
def _get_program_cmd(name, pconfig, config, default): """Retrieve commandline of a program. """ if pconfig is None: return name elif isinstance(pconfig, six.string_types): return pconfig elif "cmd" in pconfig: return pconfig["cmd"] elif default is not None: return default else: return name
[ "def", "_get_program_cmd", "(", "name", ",", "pconfig", ",", "config", ",", "default", ")", ":", "if", "pconfig", "is", "None", ":", "return", "name", "elif", "isinstance", "(", "pconfig", ",", "six", ".", "string_types", ")", ":", "return", "pconfig", "...
Retrieve commandline of a program.
[ "Retrieve", "commandline", "of", "a", "program", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L221-L233
train
218,212
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
get_jar
def get_jar(base_name, dname): """Retrieve a jar in the provided directory """ jars = glob.glob(os.path.join(expand_path(dname), "%s*.jar" % base_name)) if len(jars) == 1: return jars[0] elif len(jars) > 1: raise ValueError("Found multiple jars for %s in %s. Need single jar: %s" % (base_name, dname, jars)) else: raise ValueError("Could not find java jar %s in %s" % (base_name, dname))
python
def get_jar(base_name, dname): """Retrieve a jar in the provided directory """ jars = glob.glob(os.path.join(expand_path(dname), "%s*.jar" % base_name)) if len(jars) == 1: return jars[0] elif len(jars) > 1: raise ValueError("Found multiple jars for %s in %s. Need single jar: %s" % (base_name, dname, jars)) else: raise ValueError("Could not find java jar %s in %s" % (base_name, dname))
[ "def", "get_jar", "(", "base_name", ",", "dname", ")", ":", "jars", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "expand_path", "(", "dname", ")", ",", "\"%s*.jar\"", "%", "base_name", ")", ")", "if", "len", "(", "jars", ")",...
Retrieve a jar in the provided directory
[ "Retrieve", "a", "jar", "in", "the", "provided", "directory" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L247-L259
train
218,213
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
get_algorithm_config
def get_algorithm_config(xs): """Flexibly extract algorithm configuration for a sample from any function arguments. """ if isinstance(xs, dict): xs = [xs] for x in xs: if is_std_config_arg(x): return x["algorithm"] elif is_nested_config_arg(x): return x["config"]["algorithm"] elif isinstance(x, (list, tuple)) and is_nested_config_arg(x[0]): return x[0]["config"]["algorithm"] raise ValueError("Did not find algorithm configuration in items: {0}" .format(pprint.pformat(xs)))
python
def get_algorithm_config(xs): """Flexibly extract algorithm configuration for a sample from any function arguments. """ if isinstance(xs, dict): xs = [xs] for x in xs: if is_std_config_arg(x): return x["algorithm"] elif is_nested_config_arg(x): return x["config"]["algorithm"] elif isinstance(x, (list, tuple)) and is_nested_config_arg(x[0]): return x[0]["config"]["algorithm"] raise ValueError("Did not find algorithm configuration in items: {0}" .format(pprint.pformat(xs)))
[ "def", "get_algorithm_config", "(", "xs", ")", ":", "if", "isinstance", "(", "xs", ",", "dict", ")", ":", "xs", "=", "[", "xs", "]", "for", "x", "in", "xs", ":", "if", "is_std_config_arg", "(", "x", ")", ":", "return", "x", "[", "\"algorithm\"", "]...
Flexibly extract algorithm configuration for a sample from any function arguments.
[ "Flexibly", "extract", "algorithm", "configuration", "for", "a", "sample", "from", "any", "function", "arguments", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L269-L282
train
218,214
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
get_dataarg
def get_dataarg(args): """Retrieve the world 'data' argument from a set of input parameters. """ for i, arg in enumerate(args): if is_nested_config_arg(arg): return i, arg elif is_std_config_arg(arg): return i, {"config": arg} elif isinstance(arg, (list, tuple)) and is_nested_config_arg(arg[0]): return i, arg[0] raise ValueError("Did not find configuration or data object in arguments: %s" % args)
python
def get_dataarg(args): """Retrieve the world 'data' argument from a set of input parameters. """ for i, arg in enumerate(args): if is_nested_config_arg(arg): return i, arg elif is_std_config_arg(arg): return i, {"config": arg} elif isinstance(arg, (list, tuple)) and is_nested_config_arg(arg[0]): return i, arg[0] raise ValueError("Did not find configuration or data object in arguments: %s" % args)
[ "def", "get_dataarg", "(", "args", ")", ":", "for", "i", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "if", "is_nested_config_arg", "(", "arg", ")", ":", "return", "i", ",", "arg", "elif", "is_std_config_arg", "(", "arg", ")", ":", "return", ...
Retrieve the world 'data' argument from a set of input parameters.
[ "Retrieve", "the", "world", "data", "argument", "from", "a", "set", "of", "input", "parameters", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L284-L294
train
218,215
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
add_cores_to_config
def add_cores_to_config(args, cores_per_job, parallel=None): """Add information about available cores for a job to configuration. Ugly hack to update core information in a configuration dictionary. """ def _update_cores(config): config["algorithm"]["num_cores"] = int(cores_per_job) if parallel: parallel.pop("view", None) config["parallel"] = parallel return config return _update_config(args, _update_cores)
python
def add_cores_to_config(args, cores_per_job, parallel=None): """Add information about available cores for a job to configuration. Ugly hack to update core information in a configuration dictionary. """ def _update_cores(config): config["algorithm"]["num_cores"] = int(cores_per_job) if parallel: parallel.pop("view", None) config["parallel"] = parallel return config return _update_config(args, _update_cores)
[ "def", "add_cores_to_config", "(", "args", ",", "cores_per_job", ",", "parallel", "=", "None", ")", ":", "def", "_update_cores", "(", "config", ")", ":", "config", "[", "\"algorithm\"", "]", "[", "\"num_cores\"", "]", "=", "int", "(", "cores_per_job", ")", ...
Add information about available cores for a job to configuration. Ugly hack to update core information in a configuration dictionary.
[ "Add", "information", "about", "available", "cores", "for", "a", "job", "to", "configuration", ".", "Ugly", "hack", "to", "update", "core", "information", "in", "a", "configuration", "dictionary", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L296-L306
train
218,216
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
_update_config
def _update_config(args, update_fn, allow_missing=False): """Update configuration, nested in argument list, with the provided update function. """ new_i = None for i, arg in enumerate(args): if (is_std_config_arg(arg) or is_nested_config_arg(arg) or (isinstance(arg, (list, tuple)) and is_nested_config_arg(arg[0]))): new_i = i break if new_i is None: if allow_missing: return args else: raise ValueError("Could not find configuration in args: %s" % str(args)) new_arg = args[new_i] if is_nested_config_arg(new_arg): new_arg["config"] = update_fn(copy.deepcopy(new_arg["config"])) elif is_std_config_arg(new_arg): new_arg = update_fn(copy.deepcopy(new_arg)) elif isinstance(arg, (list, tuple)) and is_nested_config_arg(new_arg[0]): new_arg_first = new_arg[0] new_arg_first["config"] = update_fn(copy.deepcopy(new_arg_first["config"])) new_arg = [new_arg_first] + new_arg[1:] else: raise ValueError("Unexpected configuration dictionary: %s" % new_arg) args = list(args)[:] args[new_i] = new_arg return args
python
def _update_config(args, update_fn, allow_missing=False): """Update configuration, nested in argument list, with the provided update function. """ new_i = None for i, arg in enumerate(args): if (is_std_config_arg(arg) or is_nested_config_arg(arg) or (isinstance(arg, (list, tuple)) and is_nested_config_arg(arg[0]))): new_i = i break if new_i is None: if allow_missing: return args else: raise ValueError("Could not find configuration in args: %s" % str(args)) new_arg = args[new_i] if is_nested_config_arg(new_arg): new_arg["config"] = update_fn(copy.deepcopy(new_arg["config"])) elif is_std_config_arg(new_arg): new_arg = update_fn(copy.deepcopy(new_arg)) elif isinstance(arg, (list, tuple)) and is_nested_config_arg(new_arg[0]): new_arg_first = new_arg[0] new_arg_first["config"] = update_fn(copy.deepcopy(new_arg_first["config"])) new_arg = [new_arg_first] + new_arg[1:] else: raise ValueError("Unexpected configuration dictionary: %s" % new_arg) args = list(args)[:] args[new_i] = new_arg return args
[ "def", "_update_config", "(", "args", ",", "update_fn", ",", "allow_missing", "=", "False", ")", ":", "new_i", "=", "None", "for", "i", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "if", "(", "is_std_config_arg", "(", "arg", ")", "or", "is_nes...
Update configuration, nested in argument list, with the provided update function.
[ "Update", "configuration", "nested", "in", "argument", "list", "with", "the", "provided", "update", "function", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L308-L336
train
218,217
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
convert_to_bytes
def convert_to_bytes(mem_str): """Convert a memory specification, potentially with M or G, into bytes. """ if str(mem_str)[-1].upper().endswith("G"): return int(round(float(mem_str[:-1]) * 1024 * 1024)) elif str(mem_str)[-1].upper().endswith("M"): return int(round(float(mem_str[:-1]) * 1024)) else: return int(round(float(mem_str)))
python
def convert_to_bytes(mem_str): """Convert a memory specification, potentially with M or G, into bytes. """ if str(mem_str)[-1].upper().endswith("G"): return int(round(float(mem_str[:-1]) * 1024 * 1024)) elif str(mem_str)[-1].upper().endswith("M"): return int(round(float(mem_str[:-1]) * 1024)) else: return int(round(float(mem_str)))
[ "def", "convert_to_bytes", "(", "mem_str", ")", ":", "if", "str", "(", "mem_str", ")", "[", "-", "1", "]", ".", "upper", "(", ")", ".", "endswith", "(", "\"G\"", ")", ":", "return", "int", "(", "round", "(", "float", "(", "mem_str", "[", ":", "-"...
Convert a memory specification, potentially with M or G, into bytes.
[ "Convert", "a", "memory", "specification", "potentially", "with", "M", "or", "G", "into", "bytes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L338-L346
train
218,218
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
adjust_memory
def adjust_memory(val, magnitude, direction="increase", out_modifier="", maximum=None): """Adjust memory based on number of cores utilized. """ modifier = val[-1:] amount = float(val[:-1]) if direction == "decrease": new_amount = amount / float(magnitude) # dealing with a specifier like 1G, need to scale to Mb if new_amount < 1 or (out_modifier.upper().startswith("M") and modifier.upper().startswith("G")): if modifier.upper().startswith("G"): new_amount = (amount * 1024) / magnitude modifier = "M" + modifier[1:] else: raise ValueError("Unexpected decrease in memory: %s by %s" % (val, magnitude)) amount = int(new_amount) elif direction == "increase" and magnitude > 1: # for increases with multiple cores, leave small percentage of # memory for system to maintain process running resource and # avoid OOM killers adjuster = 0.91 amount = int(math.ceil(amount * (adjuster * magnitude))) if out_modifier.upper().startswith("G") and modifier.upper().startswith("M"): modifier = out_modifier amount = int(math.floor(amount / 1024.0)) if out_modifier.upper().startswith("M") and modifier.upper().startswith("G"): modifier = out_modifier modifier = int(amount * 1024) if maximum: max_modifier = maximum[-1] max_amount = float(maximum[:-1]) if modifier.upper() == "G" and max_modifier.upper() == "M": max_amount = max_amount / 1024.0 elif modifier.upper() == "M" and max_modifier.upper() == "G": max_amount = max_amount * 1024.0 amount = min([amount, max_amount]) return "{amount}{modifier}".format(amount=int(math.floor(amount)), modifier=modifier)
python
def adjust_memory(val, magnitude, direction="increase", out_modifier="", maximum=None): """Adjust memory based on number of cores utilized. """ modifier = val[-1:] amount = float(val[:-1]) if direction == "decrease": new_amount = amount / float(magnitude) # dealing with a specifier like 1G, need to scale to Mb if new_amount < 1 or (out_modifier.upper().startswith("M") and modifier.upper().startswith("G")): if modifier.upper().startswith("G"): new_amount = (amount * 1024) / magnitude modifier = "M" + modifier[1:] else: raise ValueError("Unexpected decrease in memory: %s by %s" % (val, magnitude)) amount = int(new_amount) elif direction == "increase" and magnitude > 1: # for increases with multiple cores, leave small percentage of # memory for system to maintain process running resource and # avoid OOM killers adjuster = 0.91 amount = int(math.ceil(amount * (adjuster * magnitude))) if out_modifier.upper().startswith("G") and modifier.upper().startswith("M"): modifier = out_modifier amount = int(math.floor(amount / 1024.0)) if out_modifier.upper().startswith("M") and modifier.upper().startswith("G"): modifier = out_modifier modifier = int(amount * 1024) if maximum: max_modifier = maximum[-1] max_amount = float(maximum[:-1]) if modifier.upper() == "G" and max_modifier.upper() == "M": max_amount = max_amount / 1024.0 elif modifier.upper() == "M" and max_modifier.upper() == "G": max_amount = max_amount * 1024.0 amount = min([amount, max_amount]) return "{amount}{modifier}".format(amount=int(math.floor(amount)), modifier=modifier)
[ "def", "adjust_memory", "(", "val", ",", "magnitude", ",", "direction", "=", "\"increase\"", ",", "out_modifier", "=", "\"\"", ",", "maximum", "=", "None", ")", ":", "modifier", "=", "val", "[", "-", "1", ":", "]", "amount", "=", "float", "(", "val", ...
Adjust memory based on number of cores utilized.
[ "Adjust", "memory", "based", "on", "number", "of", "cores", "utilized", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L361-L396
train
218,219
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
adjust_opts
def adjust_opts(in_opts, config): """Establish JVM opts, adjusting memory for the context if needed. This allows using less or more memory for highly parallel or multicore supporting processes, respectively. """ memory_adjust = config["algorithm"].get("memory_adjust", {}) out_opts = [] for opt in in_opts: if opt.startswith("-Xmx") or (opt.startswith("-Xms") and memory_adjust.get("direction") == "decrease"): arg = opt[:4] opt = "{arg}{val}".format(arg=arg, val=adjust_memory(opt[4:], memory_adjust.get("magnitude", 1), memory_adjust.get("direction"), maximum=memory_adjust.get("maximum"))) out_opts.append(opt) return out_opts
python
def adjust_opts(in_opts, config): """Establish JVM opts, adjusting memory for the context if needed. This allows using less or more memory for highly parallel or multicore supporting processes, respectively. """ memory_adjust = config["algorithm"].get("memory_adjust", {}) out_opts = [] for opt in in_opts: if opt.startswith("-Xmx") or (opt.startswith("-Xms") and memory_adjust.get("direction") == "decrease"): arg = opt[:4] opt = "{arg}{val}".format(arg=arg, val=adjust_memory(opt[4:], memory_adjust.get("magnitude", 1), memory_adjust.get("direction"), maximum=memory_adjust.get("maximum"))) out_opts.append(opt) return out_opts
[ "def", "adjust_opts", "(", "in_opts", ",", "config", ")", ":", "memory_adjust", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"memory_adjust\"", ",", "{", "}", ")", "out_opts", "=", "[", "]", "for", "opt", "in", "in_opts", ":", "if", "op...
Establish JVM opts, adjusting memory for the context if needed. This allows using less or more memory for highly parallel or multicore supporting processes, respectively.
[ "Establish", "JVM", "opts", "adjusting", "memory", "for", "the", "context", "if", "needed", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L398-L415
train
218,220
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
use_vqsr
def use_vqsr(algs, call_file=None): """Processing uses GATK's Variant Quality Score Recalibration. """ from bcbio.variation import vcfutils vqsr_callers = set(["gatk", "gatk-haplotype"]) vqsr_sample_thresh = 50 vqsr_supported = collections.defaultdict(int) coverage_intervals = set([]) for alg in algs: callers = alg.get("variantcaller") if isinstance(callers, six.string_types): callers = [callers] if not callers: # no variant calling, no VQSR continue if "vqsr" in (alg.get("tools_off") or []): # VQSR turned off continue for c in callers: if c in vqsr_callers: if "vqsr" in (alg.get("tools_on") or []): # VQSR turned on: vqsr_supported[c] += 1 coverage_intervals.add("genome") # Do not try VQSR for gVCF inputs elif call_file and vcfutils.is_gvcf_file(call_file): pass else: coverage_intervals.add(alg.get("coverage_interval", "exome").lower()) vqsr_supported[c] += 1 if len(vqsr_supported) > 0: num_samples = max(vqsr_supported.values()) if "genome" in coverage_intervals or num_samples >= vqsr_sample_thresh: return True return False
python
def use_vqsr(algs, call_file=None): """Processing uses GATK's Variant Quality Score Recalibration. """ from bcbio.variation import vcfutils vqsr_callers = set(["gatk", "gatk-haplotype"]) vqsr_sample_thresh = 50 vqsr_supported = collections.defaultdict(int) coverage_intervals = set([]) for alg in algs: callers = alg.get("variantcaller") if isinstance(callers, six.string_types): callers = [callers] if not callers: # no variant calling, no VQSR continue if "vqsr" in (alg.get("tools_off") or []): # VQSR turned off continue for c in callers: if c in vqsr_callers: if "vqsr" in (alg.get("tools_on") or []): # VQSR turned on: vqsr_supported[c] += 1 coverage_intervals.add("genome") # Do not try VQSR for gVCF inputs elif call_file and vcfutils.is_gvcf_file(call_file): pass else: coverage_intervals.add(alg.get("coverage_interval", "exome").lower()) vqsr_supported[c] += 1 if len(vqsr_supported) > 0: num_samples = max(vqsr_supported.values()) if "genome" in coverage_intervals or num_samples >= vqsr_sample_thresh: return True return False
[ "def", "use_vqsr", "(", "algs", ",", "call_file", "=", "None", ")", ":", "from", "bcbio", ".", "variation", "import", "vcfutils", "vqsr_callers", "=", "set", "(", "[", "\"gatk\"", ",", "\"gatk-haplotype\"", "]", ")", "vqsr_sample_thresh", "=", "50", "vqsr_su...
Processing uses GATK's Variant Quality Score Recalibration.
[ "Processing", "uses", "GATK", "s", "Variant", "Quality", "Score", "Recalibration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L419-L450
train
218,221
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
use_bcbio_variation_recall
def use_bcbio_variation_recall(algs): """Processing uses bcbio-variation-recall. Avoids core requirement if not used. """ for alg in algs: jointcaller = alg.get("jointcaller", []) if not isinstance(jointcaller, (tuple, list)): jointcaller = [jointcaller] for caller in jointcaller: if caller not in set(["gatk-haplotype-joint", None, False]): return True return False
python
def use_bcbio_variation_recall(algs): """Processing uses bcbio-variation-recall. Avoids core requirement if not used. """ for alg in algs: jointcaller = alg.get("jointcaller", []) if not isinstance(jointcaller, (tuple, list)): jointcaller = [jointcaller] for caller in jointcaller: if caller not in set(["gatk-haplotype-joint", None, False]): return True return False
[ "def", "use_bcbio_variation_recall", "(", "algs", ")", ":", "for", "alg", "in", "algs", ":", "jointcaller", "=", "alg", ".", "get", "(", "\"jointcaller\"", ",", "[", "]", ")", "if", "not", "isinstance", "(", "jointcaller", ",", "(", "tuple", ",", "list",...
Processing uses bcbio-variation-recall. Avoids core requirement if not used.
[ "Processing", "uses", "bcbio", "-", "variation", "-", "recall", ".", "Avoids", "core", "requirement", "if", "not", "used", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L457-L467
train
218,222
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
program_installed
def program_installed(program, data): """ returns True if the path to a program can be found """ try: path = get_program(program, data) except CmdNotFound: return False return True
python
def program_installed(program, data): """ returns True if the path to a program can be found """ try: path = get_program(program, data) except CmdNotFound: return False return True
[ "def", "program_installed", "(", "program", ",", "data", ")", ":", "try", ":", "path", "=", "get_program", "(", "program", ",", "data", ")", "except", "CmdNotFound", ":", "return", "False", "return", "True" ]
returns True if the path to a program can be found
[ "returns", "True", "if", "the", "path", "to", "a", "program", "can", "be", "found" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L480-L488
train
218,223
bcbio/bcbio-nextgen
bcbio/provenance/versioncheck.py
samtools
def samtools(items): """Ensure samtools has parallel processing required for piped analysis. """ samtools = config_utils.get_program("samtools", items[0]["config"]) p = subprocess.Popen([samtools, "sort", "-h"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, stderr = p.communicate() p.stdout.close() p.stderr.close() if str(output).find("-@") == -1 and str(stderr).find("-@") == -1: return ("Installed version of samtools sort does not have support for " "multithreading (-@ option) " "required to support bwa piped alignment and BAM merging. " "Please upgrade to the latest version " "from http://samtools.sourceforge.net/")
python
def samtools(items): """Ensure samtools has parallel processing required for piped analysis. """ samtools = config_utils.get_program("samtools", items[0]["config"]) p = subprocess.Popen([samtools, "sort", "-h"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, stderr = p.communicate() p.stdout.close() p.stderr.close() if str(output).find("-@") == -1 and str(stderr).find("-@") == -1: return ("Installed version of samtools sort does not have support for " "multithreading (-@ option) " "required to support bwa piped alignment and BAM merging. " "Please upgrade to the latest version " "from http://samtools.sourceforge.net/")
[ "def", "samtools", "(", "items", ")", ":", "samtools", "=", "config_utils", ".", "get_program", "(", "\"samtools\"", ",", "items", "[", "0", "]", "[", "\"config\"", "]", ")", "p", "=", "subprocess", ".", "Popen", "(", "[", "samtools", ",", "\"sort\"", ...
Ensure samtools has parallel processing required for piped analysis.
[ "Ensure", "samtools", "has", "parallel", "processing", "required", "for", "piped", "analysis", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/versioncheck.py#L11-L25
train
218,224
bcbio/bcbio-nextgen
bcbio/provenance/versioncheck.py
_needs_java
def _needs_java(data): """Check if a caller needs external java for MuTect. No longer check for older GATK (<3.6) versions because of time cost; this won't be relevant to most runs so we skip the sanity check. """ vc = dd.get_variantcaller(data) if isinstance(vc, dict): out = {} for k, v in vc.items(): if not isinstance(v, (list, tuple)): v = [v] out[k] = v vc = out elif not isinstance(vc, (list, tuple)): vc = [vc] if "mutect" in vc or ("somatic" in vc and "mutect" in vc["somatic"]): return True if "gatk" in vc or "gatk-haplotype" in vc or ("germline" in vc and "gatk-haplotype" in vc["germline"]): pass # runner = broad.runner_from_config(data["config"]) # version = runner.get_gatk_version() # if LooseVersion(version) < LooseVersion("3.6"): # return True return False
python
def _needs_java(data): """Check if a caller needs external java for MuTect. No longer check for older GATK (<3.6) versions because of time cost; this won't be relevant to most runs so we skip the sanity check. """ vc = dd.get_variantcaller(data) if isinstance(vc, dict): out = {} for k, v in vc.items(): if not isinstance(v, (list, tuple)): v = [v] out[k] = v vc = out elif not isinstance(vc, (list, tuple)): vc = [vc] if "mutect" in vc or ("somatic" in vc and "mutect" in vc["somatic"]): return True if "gatk" in vc or "gatk-haplotype" in vc or ("germline" in vc and "gatk-haplotype" in vc["germline"]): pass # runner = broad.runner_from_config(data["config"]) # version = runner.get_gatk_version() # if LooseVersion(version) < LooseVersion("3.6"): # return True return False
[ "def", "_needs_java", "(", "data", ")", ":", "vc", "=", "dd", ".", "get_variantcaller", "(", "data", ")", "if", "isinstance", "(", "vc", ",", "dict", ")", ":", "out", "=", "{", "}", "for", "k", ",", "v", "in", "vc", ".", "items", "(", ")", ":",...
Check if a caller needs external java for MuTect. No longer check for older GATK (<3.6) versions because of time cost; this won't be relevant to most runs so we skip the sanity check.
[ "Check", "if", "a", "caller", "needs", "external", "java", "for", "MuTect", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/versioncheck.py#L32-L56
train
218,225
bcbio/bcbio-nextgen
bcbio/provenance/versioncheck.py
java
def java(items): """Check for presence of external Java 1.7 for tools that require it. """ if any([_needs_java(d) for d in items]): min_version = "1.7" max_version = "1.8" with setpath.orig_paths(): java = utils.which("java") if not java: return ("java not found on PATH. Java %s required for MuTect and GATK < 3.6." % min_version) p = subprocess.Popen([java, "-Xms250m", "-Xmx250m", "-version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, _ = p.communicate() p.stdout.close() version = "" for line in output.split("\n"): if line.startswith(("java version", "openjdk version")): version = line.strip().split()[-1] if version.startswith('"'): version = version[1:] if version.endswith('"'): version = version[:-1] if (not version or LooseVersion(version) >= LooseVersion(max_version) or LooseVersion(version) < LooseVersion(min_version)): return ("java version %s required for running MuTect and GATK < 3.6.\n" "It needs to be first on your PATH so running 'java -version' give the correct version.\n" "Found version %s at %s" % (min_version, version, java))
python
def java(items): """Check for presence of external Java 1.7 for tools that require it. """ if any([_needs_java(d) for d in items]): min_version = "1.7" max_version = "1.8" with setpath.orig_paths(): java = utils.which("java") if not java: return ("java not found on PATH. Java %s required for MuTect and GATK < 3.6." % min_version) p = subprocess.Popen([java, "-Xms250m", "-Xmx250m", "-version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, _ = p.communicate() p.stdout.close() version = "" for line in output.split("\n"): if line.startswith(("java version", "openjdk version")): version = line.strip().split()[-1] if version.startswith('"'): version = version[1:] if version.endswith('"'): version = version[:-1] if (not version or LooseVersion(version) >= LooseVersion(max_version) or LooseVersion(version) < LooseVersion(min_version)): return ("java version %s required for running MuTect and GATK < 3.6.\n" "It needs to be first on your PATH so running 'java -version' give the correct version.\n" "Found version %s at %s" % (min_version, version, java))
[ "def", "java", "(", "items", ")", ":", "if", "any", "(", "[", "_needs_java", "(", "d", ")", "for", "d", "in", "items", "]", ")", ":", "min_version", "=", "\"1.7\"", "max_version", "=", "\"1.8\"", "with", "setpath", ".", "orig_paths", "(", ")", ":", ...
Check for presence of external Java 1.7 for tools that require it.
[ "Check", "for", "presence", "of", "external", "Java", "1", ".", "7", "for", "tools", "that", "require", "it", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/versioncheck.py#L58-L84
train
218,226
bcbio/bcbio-nextgen
bcbio/install.py
upgrade_bcbio
def upgrade_bcbio(args): """Perform upgrade of bcbio to latest release, or from GitHub development version. Handles bcbio, third party tools and data. """ print("Upgrading bcbio") args = add_install_defaults(args) if args.upgrade in ["stable", "system", "deps", "development"]: if args.upgrade == "development": anaconda_dir = _update_conda_devel() _check_for_conda_problems() print("Upgrading bcbio-nextgen to latest development version") pip_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "pip") git_tag = "@%s" % args.revision if args.revision != "master" else "" _pip_safe_ssl([[pip_bin, "install", "--upgrade", "--no-deps", "git+%s%s#egg=bcbio-nextgen" % (REMOTES["gitrepo"], git_tag)]], anaconda_dir) print("Upgrade of bcbio-nextgen development code complete.") else: _update_conda_packages() _check_for_conda_problems() print("Upgrade of bcbio-nextgen code complete.") if args.cwl and args.upgrade: _update_bcbiovm() try: _set_matplotlib_default_backend() except OSError: pass if args.tooldir: with bcbio_tmpdir(): print("Upgrading third party tools to latest versions") _symlink_bcbio(args, script="bcbio_nextgen.py") _symlink_bcbio(args, script="bcbio_setup_genome.py") _symlink_bcbio(args, script="bcbio_prepare_samples.py") _symlink_bcbio(args, script="bcbio_fastq_umi_prep.py") if args.cwl: _symlink_bcbio(args, "bcbio_vm.py", "bcbiovm") _symlink_bcbio(args, "python", "bcbiovm", "bcbiovm") upgrade_thirdparty_tools(args, REMOTES) print("Third party tools upgrade complete.") if args.toolplus: print("Installing additional tools") _install_toolplus(args) if args.install_data: for default in DEFAULT_INDEXES: if default not in args.aligners: args.aligners.append(default) if len(args.aligners) == 0: print("Warning: no aligners provided with `--aligners` flag") if len(args.genomes) == 0: print("Data not installed, no genomes provided with `--genomes` flag") else: with bcbio_tmpdir(): print("Upgrading bcbio-nextgen data files") upgrade_bcbio_data(args, REMOTES) print("bcbio-nextgen data upgrade complete.") if args.isolate and args.tooldir: print("Isolated tool installation not automatically added to environmental variables") print(" Add:\n {t}/bin to PATH".format(t=args.tooldir)) save_install_defaults(args) args.datadir = _get_data_dir() _install_container_bcbio_system(args.datadir) print("Upgrade completed successfully.") return args
python
def upgrade_bcbio(args): """Perform upgrade of bcbio to latest release, or from GitHub development version. Handles bcbio, third party tools and data. """ print("Upgrading bcbio") args = add_install_defaults(args) if args.upgrade in ["stable", "system", "deps", "development"]: if args.upgrade == "development": anaconda_dir = _update_conda_devel() _check_for_conda_problems() print("Upgrading bcbio-nextgen to latest development version") pip_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "pip") git_tag = "@%s" % args.revision if args.revision != "master" else "" _pip_safe_ssl([[pip_bin, "install", "--upgrade", "--no-deps", "git+%s%s#egg=bcbio-nextgen" % (REMOTES["gitrepo"], git_tag)]], anaconda_dir) print("Upgrade of bcbio-nextgen development code complete.") else: _update_conda_packages() _check_for_conda_problems() print("Upgrade of bcbio-nextgen code complete.") if args.cwl and args.upgrade: _update_bcbiovm() try: _set_matplotlib_default_backend() except OSError: pass if args.tooldir: with bcbio_tmpdir(): print("Upgrading third party tools to latest versions") _symlink_bcbio(args, script="bcbio_nextgen.py") _symlink_bcbio(args, script="bcbio_setup_genome.py") _symlink_bcbio(args, script="bcbio_prepare_samples.py") _symlink_bcbio(args, script="bcbio_fastq_umi_prep.py") if args.cwl: _symlink_bcbio(args, "bcbio_vm.py", "bcbiovm") _symlink_bcbio(args, "python", "bcbiovm", "bcbiovm") upgrade_thirdparty_tools(args, REMOTES) print("Third party tools upgrade complete.") if args.toolplus: print("Installing additional tools") _install_toolplus(args) if args.install_data: for default in DEFAULT_INDEXES: if default not in args.aligners: args.aligners.append(default) if len(args.aligners) == 0: print("Warning: no aligners provided with `--aligners` flag") if len(args.genomes) == 0: print("Data not installed, no genomes provided with `--genomes` flag") else: with bcbio_tmpdir(): print("Upgrading bcbio-nextgen data files") upgrade_bcbio_data(args, REMOTES) print("bcbio-nextgen data upgrade complete.") if args.isolate and args.tooldir: print("Isolated tool installation not automatically added to environmental variables") print(" Add:\n {t}/bin to PATH".format(t=args.tooldir)) save_install_defaults(args) args.datadir = _get_data_dir() _install_container_bcbio_system(args.datadir) print("Upgrade completed successfully.") return args
[ "def", "upgrade_bcbio", "(", "args", ")", ":", "print", "(", "\"Upgrading bcbio\"", ")", "args", "=", "add_install_defaults", "(", "args", ")", "if", "args", ".", "upgrade", "in", "[", "\"stable\"", ",", "\"system\"", ",", "\"deps\"", ",", "\"development\"", ...
Perform upgrade of bcbio to latest release, or from GitHub development version. Handles bcbio, third party tools and data.
[ "Perform", "upgrade", "of", "bcbio", "to", "latest", "release", "or", "from", "GitHub", "development", "version", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L51-L115
train
218,227
bcbio/bcbio-nextgen
bcbio/install.py
_pip_safe_ssl
def _pip_safe_ssl(cmds, anaconda_dir): """Run pip, retrying with conda SSL certificate if global certificate fails. """ try: for cmd in cmds: subprocess.check_call(cmd) except subprocess.CalledProcessError: _set_pip_ssl(anaconda_dir) for cmd in cmds: subprocess.check_call(cmd)
python
def _pip_safe_ssl(cmds, anaconda_dir): """Run pip, retrying with conda SSL certificate if global certificate fails. """ try: for cmd in cmds: subprocess.check_call(cmd) except subprocess.CalledProcessError: _set_pip_ssl(anaconda_dir) for cmd in cmds: subprocess.check_call(cmd)
[ "def", "_pip_safe_ssl", "(", "cmds", ",", "anaconda_dir", ")", ":", "try", ":", "for", "cmd", "in", "cmds", ":", "subprocess", ".", "check_call", "(", "cmd", ")", "except", "subprocess", ".", "CalledProcessError", ":", "_set_pip_ssl", "(", "anaconda_dir", ")...
Run pip, retrying with conda SSL certificate if global certificate fails.
[ "Run", "pip", "retrying", "with", "conda", "SSL", "certificate", "if", "global", "certificate", "fails", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L117-L126
train
218,228
bcbio/bcbio-nextgen
bcbio/install.py
_set_pip_ssl
def _set_pip_ssl(anaconda_dir): """Set PIP SSL certificate to installed conda certificate to avoid SSL errors """ if anaconda_dir: cert_file = os.path.join(anaconda_dir, "ssl", "cert.pem") if os.path.exists(cert_file): os.environ["PIP_CERT"] = cert_file
python
def _set_pip_ssl(anaconda_dir): """Set PIP SSL certificate to installed conda certificate to avoid SSL errors """ if anaconda_dir: cert_file = os.path.join(anaconda_dir, "ssl", "cert.pem") if os.path.exists(cert_file): os.environ["PIP_CERT"] = cert_file
[ "def", "_set_pip_ssl", "(", "anaconda_dir", ")", ":", "if", "anaconda_dir", ":", "cert_file", "=", "os", ".", "path", ".", "join", "(", "anaconda_dir", ",", "\"ssl\"", ",", "\"cert.pem\"", ")", "if", "os", ".", "path", ".", "exists", "(", "cert_file", ")...
Set PIP SSL certificate to installed conda certificate to avoid SSL errors
[ "Set", "PIP", "SSL", "certificate", "to", "installed", "conda", "certificate", "to", "avoid", "SSL", "errors" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L128-L134
train
218,229
bcbio/bcbio-nextgen
bcbio/install.py
_set_matplotlib_default_backend
def _set_matplotlib_default_backend(): """ matplotlib will try to print to a display if it is available, but don't want to run it in interactive mode. we tried setting the backend to 'Agg'' before importing, but it was still resulting in issues. we replace the existing backend with 'agg' in the default matplotlibrc. This is a hack until we can find a better solution """ if _matplotlib_installed(): import matplotlib matplotlib.use('Agg', force=True) config = matplotlib.matplotlib_fname() if os.access(config, os.W_OK): with file_transaction(config) as tx_out_file: with open(config) as in_file, open(tx_out_file, "w") as out_file: for line in in_file: if line.split(":")[0].strip() == "backend": out_file.write("backend: agg\n") else: out_file.write(line)
python
def _set_matplotlib_default_backend(): """ matplotlib will try to print to a display if it is available, but don't want to run it in interactive mode. we tried setting the backend to 'Agg'' before importing, but it was still resulting in issues. we replace the existing backend with 'agg' in the default matplotlibrc. This is a hack until we can find a better solution """ if _matplotlib_installed(): import matplotlib matplotlib.use('Agg', force=True) config = matplotlib.matplotlib_fname() if os.access(config, os.W_OK): with file_transaction(config) as tx_out_file: with open(config) as in_file, open(tx_out_file, "w") as out_file: for line in in_file: if line.split(":")[0].strip() == "backend": out_file.write("backend: agg\n") else: out_file.write(line)
[ "def", "_set_matplotlib_default_backend", "(", ")", ":", "if", "_matplotlib_installed", "(", ")", ":", "import", "matplotlib", "matplotlib", ".", "use", "(", "'Agg'", ",", "force", "=", "True", ")", "config", "=", "matplotlib", ".", "matplotlib_fname", "(", ")...
matplotlib will try to print to a display if it is available, but don't want to run it in interactive mode. we tried setting the backend to 'Agg'' before importing, but it was still resulting in issues. we replace the existing backend with 'agg' in the default matplotlibrc. This is a hack until we can find a better solution
[ "matplotlib", "will", "try", "to", "print", "to", "a", "display", "if", "it", "is", "available", "but", "don", "t", "want", "to", "run", "it", "in", "interactive", "mode", ".", "we", "tried", "setting", "the", "backend", "to", "Agg", "before", "importing...
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L136-L155
train
218,230
bcbio/bcbio-nextgen
bcbio/install.py
_symlink_bcbio
def _symlink_bcbio(args, script="bcbio_nextgen.py", env_name=None, prefix=None): """Ensure a bcbio-nextgen script symlink in final tool directory. """ if env_name: bcbio_anaconda = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(sys.executable))), "envs", env_name, "bin", script) else: bcbio_anaconda = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), script) bindir = os.path.join(args.tooldir, "bin") if not os.path.exists(bindir): os.makedirs(bindir) if prefix: script = "%s_%s" % (prefix, script) bcbio_final = os.path.join(bindir, script) if not os.path.exists(bcbio_final): if os.path.lexists(bcbio_final): subprocess.check_call(["rm", "-f", bcbio_final]) subprocess.check_call(["ln", "-s", bcbio_anaconda, bcbio_final])
python
def _symlink_bcbio(args, script="bcbio_nextgen.py", env_name=None, prefix=None): """Ensure a bcbio-nextgen script symlink in final tool directory. """ if env_name: bcbio_anaconda = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(sys.executable))), "envs", env_name, "bin", script) else: bcbio_anaconda = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), script) bindir = os.path.join(args.tooldir, "bin") if not os.path.exists(bindir): os.makedirs(bindir) if prefix: script = "%s_%s" % (prefix, script) bcbio_final = os.path.join(bindir, script) if not os.path.exists(bcbio_final): if os.path.lexists(bcbio_final): subprocess.check_call(["rm", "-f", bcbio_final]) subprocess.check_call(["ln", "-s", bcbio_anaconda, bcbio_final])
[ "def", "_symlink_bcbio", "(", "args", ",", "script", "=", "\"bcbio_nextgen.py\"", ",", "env_name", "=", "None", ",", "prefix", "=", "None", ")", ":", "if", "env_name", ":", "bcbio_anaconda", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ...
Ensure a bcbio-nextgen script symlink in final tool directory.
[ "Ensure", "a", "bcbio", "-", "nextgen", "script", "symlink", "in", "final", "tool", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L164-L181
train
218,231
bcbio/bcbio-nextgen
bcbio/install.py
_install_container_bcbio_system
def _install_container_bcbio_system(datadir): """Install limited bcbio_system.yaml file for setting core and memory usage. Adds any non-specific programs to the exposed bcbio_system.yaml file, only when upgrade happening inside a docker container. """ base_file = os.path.join(datadir, "config", "bcbio_system.yaml") if not os.path.exists(base_file): return expose_file = os.path.join(datadir, "galaxy", "bcbio_system.yaml") expose = set(["memory", "cores", "jvm_opts"]) with open(base_file) as in_handle: config = yaml.safe_load(in_handle) if os.path.exists(expose_file): with open(expose_file) as in_handle: expose_config = yaml.safe_load(in_handle) else: expose_config = {"resources": {}} for pname, vals in config["resources"].items(): expose_vals = {} for k, v in vals.items(): if k in expose: expose_vals[k] = v if len(expose_vals) > 0 and pname not in expose_config["resources"]: expose_config["resources"][pname] = expose_vals if expose_file and os.path.exists(os.path.dirname(expose_file)): with open(expose_file, "w") as out_handle: yaml.safe_dump(expose_config, out_handle, default_flow_style=False, allow_unicode=False) return expose_file
python
def _install_container_bcbio_system(datadir): """Install limited bcbio_system.yaml file for setting core and memory usage. Adds any non-specific programs to the exposed bcbio_system.yaml file, only when upgrade happening inside a docker container. """ base_file = os.path.join(datadir, "config", "bcbio_system.yaml") if not os.path.exists(base_file): return expose_file = os.path.join(datadir, "galaxy", "bcbio_system.yaml") expose = set(["memory", "cores", "jvm_opts"]) with open(base_file) as in_handle: config = yaml.safe_load(in_handle) if os.path.exists(expose_file): with open(expose_file) as in_handle: expose_config = yaml.safe_load(in_handle) else: expose_config = {"resources": {}} for pname, vals in config["resources"].items(): expose_vals = {} for k, v in vals.items(): if k in expose: expose_vals[k] = v if len(expose_vals) > 0 and pname not in expose_config["resources"]: expose_config["resources"][pname] = expose_vals if expose_file and os.path.exists(os.path.dirname(expose_file)): with open(expose_file, "w") as out_handle: yaml.safe_dump(expose_config, out_handle, default_flow_style=False, allow_unicode=False) return expose_file
[ "def", "_install_container_bcbio_system", "(", "datadir", ")", ":", "base_file", "=", "os", ".", "path", ".", "join", "(", "datadir", ",", "\"config\"", ",", "\"bcbio_system.yaml\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "base_file", ")", ...
Install limited bcbio_system.yaml file for setting core and memory usage. Adds any non-specific programs to the exposed bcbio_system.yaml file, only when upgrade happening inside a docker container.
[ "Install", "limited", "bcbio_system", ".", "yaml", "file", "for", "setting", "core", "and", "memory", "usage", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L183-L211
train
218,232
bcbio/bcbio-nextgen
bcbio/install.py
_check_for_conda_problems
def _check_for_conda_problems(): """Identify post-install conda problems and fix. - libgcc upgrades can remove libquadmath, which moved to libgcc-ng """ conda_bin = _get_conda_bin() channels = _get_conda_channels(conda_bin) lib_dir = os.path.join(os.path.dirname(conda_bin), os.pardir, "lib") for l in ["libgomp.so.1", "libquadmath.so"]: if not os.path.exists(os.path.join(lib_dir, l)): subprocess.check_call([conda_bin, "install", "-f", "--yes"] + channels + ["libgcc-ng"])
python
def _check_for_conda_problems(): """Identify post-install conda problems and fix. - libgcc upgrades can remove libquadmath, which moved to libgcc-ng """ conda_bin = _get_conda_bin() channels = _get_conda_channels(conda_bin) lib_dir = os.path.join(os.path.dirname(conda_bin), os.pardir, "lib") for l in ["libgomp.so.1", "libquadmath.so"]: if not os.path.exists(os.path.join(lib_dir, l)): subprocess.check_call([conda_bin, "install", "-f", "--yes"] + channels + ["libgcc-ng"])
[ "def", "_check_for_conda_problems", "(", ")", ":", "conda_bin", "=", "_get_conda_bin", "(", ")", "channels", "=", "_get_conda_channels", "(", "conda_bin", ")", "lib_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "co...
Identify post-install conda problems and fix. - libgcc upgrades can remove libquadmath, which moved to libgcc-ng
[ "Identify", "post", "-", "install", "conda", "problems", "and", "fix", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L218-L228
train
218,233
bcbio/bcbio-nextgen
bcbio/install.py
_update_bcbiovm
def _update_bcbiovm(): """Update or install a local bcbiovm install with tools and dependencies. """ print("## CWL support with bcbio-vm") python_env = "python=3" conda_bin, env_name = _add_environment("bcbiovm", python_env) channels = _get_conda_channels(conda_bin) base_cmd = [conda_bin, "install", "--yes", "--name", env_name] + channels subprocess.check_call(base_cmd + [python_env, "nomkl", "bcbio-nextgen"]) extra_uptodate = ["cromwell"] subprocess.check_call(base_cmd + [python_env, "bcbio-nextgen-vm"] + extra_uptodate)
python
def _update_bcbiovm(): """Update or install a local bcbiovm install with tools and dependencies. """ print("## CWL support with bcbio-vm") python_env = "python=3" conda_bin, env_name = _add_environment("bcbiovm", python_env) channels = _get_conda_channels(conda_bin) base_cmd = [conda_bin, "install", "--yes", "--name", env_name] + channels subprocess.check_call(base_cmd + [python_env, "nomkl", "bcbio-nextgen"]) extra_uptodate = ["cromwell"] subprocess.check_call(base_cmd + [python_env, "bcbio-nextgen-vm"] + extra_uptodate)
[ "def", "_update_bcbiovm", "(", ")", ":", "print", "(", "\"## CWL support with bcbio-vm\"", ")", "python_env", "=", "\"python=3\"", "conda_bin", ",", "env_name", "=", "_add_environment", "(", "\"bcbiovm\"", ",", "python_env", ")", "channels", "=", "_get_conda_channels"...
Update or install a local bcbiovm install with tools and dependencies.
[ "Update", "or", "install", "a", "local", "bcbiovm", "install", "with", "tools", "and", "dependencies", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L230-L240
train
218,234
bcbio/bcbio-nextgen
bcbio/install.py
_get_conda_channels
def _get_conda_channels(conda_bin): """Retrieve default conda channels, checking if they are pre-specified in config. This allows users to override defaults with specific mirrors in their .condarc """ channels = ["bioconda", "conda-forge"] out = [] config = yaml.safe_load(subprocess.check_output([conda_bin, "config", "--show"])) for c in channels: present = False for orig_c in config.get("channels") or []: if orig_c.endswith((c, "%s/" % c)): present = True break if not present: out += ["-c", c] return out
python
def _get_conda_channels(conda_bin): """Retrieve default conda channels, checking if they are pre-specified in config. This allows users to override defaults with specific mirrors in their .condarc """ channels = ["bioconda", "conda-forge"] out = [] config = yaml.safe_load(subprocess.check_output([conda_bin, "config", "--show"])) for c in channels: present = False for orig_c in config.get("channels") or []: if orig_c.endswith((c, "%s/" % c)): present = True break if not present: out += ["-c", c] return out
[ "def", "_get_conda_channels", "(", "conda_bin", ")", ":", "channels", "=", "[", "\"bioconda\"", ",", "\"conda-forge\"", "]", "out", "=", "[", "]", "config", "=", "yaml", ".", "safe_load", "(", "subprocess", ".", "check_output", "(", "[", "conda_bin", ",", ...
Retrieve default conda channels, checking if they are pre-specified in config. This allows users to override defaults with specific mirrors in their .condarc
[ "Retrieve", "default", "conda", "channels", "checking", "if", "they", "are", "pre", "-", "specified", "in", "config", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L255-L271
train
218,235
bcbio/bcbio-nextgen
bcbio/install.py
_update_conda_packages
def _update_conda_packages(): """If installed in an anaconda directory, upgrade conda packages. """ conda_bin = _get_conda_bin() channels = _get_conda_channels(conda_bin) assert conda_bin, ("Could not find anaconda distribution for upgrading bcbio.\n" "Using python at %s but could not find conda." % (os.path.realpath(sys.executable))) req_file = "bcbio-update-requirements.txt" if os.path.exists(req_file): os.remove(req_file) subprocess.check_call(["wget", "-O", req_file, "--no-check-certificate", REMOTES["requirements"]]) subprocess.check_call([conda_bin, "install", "--quiet", "--yes"] + channels + ["--file", req_file]) if os.path.exists(req_file): os.remove(req_file) return os.path.dirname(os.path.dirname(conda_bin))
python
def _update_conda_packages(): """If installed in an anaconda directory, upgrade conda packages. """ conda_bin = _get_conda_bin() channels = _get_conda_channels(conda_bin) assert conda_bin, ("Could not find anaconda distribution for upgrading bcbio.\n" "Using python at %s but could not find conda." % (os.path.realpath(sys.executable))) req_file = "bcbio-update-requirements.txt" if os.path.exists(req_file): os.remove(req_file) subprocess.check_call(["wget", "-O", req_file, "--no-check-certificate", REMOTES["requirements"]]) subprocess.check_call([conda_bin, "install", "--quiet", "--yes"] + channels + ["--file", req_file]) if os.path.exists(req_file): os.remove(req_file) return os.path.dirname(os.path.dirname(conda_bin))
[ "def", "_update_conda_packages", "(", ")", ":", "conda_bin", "=", "_get_conda_bin", "(", ")", "channels", "=", "_get_conda_channels", "(", "conda_bin", ")", "assert", "conda_bin", ",", "(", "\"Could not find anaconda distribution for upgrading bcbio.\\n\"", "\"Using python ...
If installed in an anaconda directory, upgrade conda packages.
[ "If", "installed", "in", "an", "anaconda", "directory", "upgrade", "conda", "packages", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L273-L288
train
218,236
bcbio/bcbio-nextgen
bcbio/install.py
_update_conda_devel
def _update_conda_devel(): """Update to the latest development conda package. """ conda_bin = _get_conda_bin() channels = _get_conda_channels(conda_bin) assert conda_bin, "Could not find anaconda distribution for upgrading bcbio" subprocess.check_call([conda_bin, "install", "--quiet", "--yes"] + channels + ["bcbio-nextgen>=%s" % version.__version__.replace("a0", "a")]) return os.path.dirname(os.path.dirname(conda_bin))
python
def _update_conda_devel(): """Update to the latest development conda package. """ conda_bin = _get_conda_bin() channels = _get_conda_channels(conda_bin) assert conda_bin, "Could not find anaconda distribution for upgrading bcbio" subprocess.check_call([conda_bin, "install", "--quiet", "--yes"] + channels + ["bcbio-nextgen>=%s" % version.__version__.replace("a0", "a")]) return os.path.dirname(os.path.dirname(conda_bin))
[ "def", "_update_conda_devel", "(", ")", ":", "conda_bin", "=", "_get_conda_bin", "(", ")", "channels", "=", "_get_conda_channels", "(", "conda_bin", ")", "assert", "conda_bin", ",", "\"Could not find anaconda distribution for upgrading bcbio\"", "subprocess", ".", "check_...
Update to the latest development conda package.
[ "Update", "to", "the", "latest", "development", "conda", "package", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L290-L298
train
218,237
bcbio/bcbio-nextgen
bcbio/install.py
get_genome_dir
def get_genome_dir(gid, galaxy_dir, data): """Return standard location of genome directories. """ if galaxy_dir: refs = genome.get_refs(gid, None, galaxy_dir, data) seq_file = tz.get_in(["fasta", "base"], refs) if seq_file and os.path.exists(seq_file): return os.path.dirname(os.path.dirname(seq_file)) else: gdirs = glob.glob(os.path.join(_get_data_dir(), "genomes", "*", gid)) if len(gdirs) == 1 and os.path.exists(gdirs[0]): return gdirs[0]
python
def get_genome_dir(gid, galaxy_dir, data): """Return standard location of genome directories. """ if galaxy_dir: refs = genome.get_refs(gid, None, galaxy_dir, data) seq_file = tz.get_in(["fasta", "base"], refs) if seq_file and os.path.exists(seq_file): return os.path.dirname(os.path.dirname(seq_file)) else: gdirs = glob.glob(os.path.join(_get_data_dir(), "genomes", "*", gid)) if len(gdirs) == 1 and os.path.exists(gdirs[0]): return gdirs[0]
[ "def", "get_genome_dir", "(", "gid", ",", "galaxy_dir", ",", "data", ")", ":", "if", "galaxy_dir", ":", "refs", "=", "genome", ".", "get_refs", "(", "gid", ",", "None", ",", "galaxy_dir", ",", "data", ")", "seq_file", "=", "tz", ".", "get_in", "(", "...
Return standard location of genome directories.
[ "Return", "standard", "location", "of", "genome", "directories", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L300-L311
train
218,238
bcbio/bcbio-nextgen
bcbio/install.py
_prepare_cwl_tarballs
def _prepare_cwl_tarballs(data_dir): """Create CWL ready tarballs for complex directories. Avoids need for CWL runners to pass and serialize complex directories of files, which is inconsistent between runners. """ for dbref_dir in filter(os.path.isdir, glob.glob(os.path.join(data_dir, "genomes", "*", "*"))): base_dir, dbref = os.path.split(dbref_dir) for indexdir in TARBALL_DIRECTORIES: cur_target = os.path.join(dbref_dir, indexdir) if os.path.isdir(cur_target): # Some indices, like rtg, have a single nested directory subdirs = [x for x in os.listdir(cur_target) if os.path.isdir(os.path.join(cur_target, x))] if len(subdirs) == 1: cur_target = os.path.join(cur_target, subdirs[0]) create.directory_tarball(cur_target)
python
def _prepare_cwl_tarballs(data_dir): """Create CWL ready tarballs for complex directories. Avoids need for CWL runners to pass and serialize complex directories of files, which is inconsistent between runners. """ for dbref_dir in filter(os.path.isdir, glob.glob(os.path.join(data_dir, "genomes", "*", "*"))): base_dir, dbref = os.path.split(dbref_dir) for indexdir in TARBALL_DIRECTORIES: cur_target = os.path.join(dbref_dir, indexdir) if os.path.isdir(cur_target): # Some indices, like rtg, have a single nested directory subdirs = [x for x in os.listdir(cur_target) if os.path.isdir(os.path.join(cur_target, x))] if len(subdirs) == 1: cur_target = os.path.join(cur_target, subdirs[0]) create.directory_tarball(cur_target)
[ "def", "_prepare_cwl_tarballs", "(", "data_dir", ")", ":", "for", "dbref_dir", "in", "filter", "(", "os", ".", "path", ".", "isdir", ",", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "data_dir", ",", "\"genomes\"", ",", "\"*\"", ",", ...
Create CWL ready tarballs for complex directories. Avoids need for CWL runners to pass and serialize complex directories of files, which is inconsistent between runners.
[ "Create", "CWL", "ready", "tarballs", "for", "complex", "directories", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L358-L373
train
218,239
bcbio/bcbio-nextgen
bcbio/install.py
_upgrade_genome_resources
def _upgrade_genome_resources(galaxy_dir, base_url): """Retrieve latest version of genome resource YAML configuration files. """ import requests for dbkey, ref_file in genome.get_builds(galaxy_dir): # Check for a remote genome resources file remote_url = base_url % dbkey requests.packages.urllib3.disable_warnings() r = requests.get(remote_url, verify=False) if r.status_code == requests.codes.ok: local_file = os.path.join(os.path.dirname(ref_file), os.path.basename(remote_url)) if os.path.exists(local_file): with open(local_file) as in_handle: local_config = yaml.safe_load(in_handle) remote_config = yaml.safe_load(r.text) needs_update = remote_config["version"] > local_config.get("version", 0) if needs_update: shutil.move(local_file, local_file + ".old%s" % local_config.get("version", 0)) else: needs_update = True if needs_update: print("Updating %s genome resources configuration" % dbkey) with open(local_file, "w") as out_handle: out_handle.write(r.text)
python
def _upgrade_genome_resources(galaxy_dir, base_url): """Retrieve latest version of genome resource YAML configuration files. """ import requests for dbkey, ref_file in genome.get_builds(galaxy_dir): # Check for a remote genome resources file remote_url = base_url % dbkey requests.packages.urllib3.disable_warnings() r = requests.get(remote_url, verify=False) if r.status_code == requests.codes.ok: local_file = os.path.join(os.path.dirname(ref_file), os.path.basename(remote_url)) if os.path.exists(local_file): with open(local_file) as in_handle: local_config = yaml.safe_load(in_handle) remote_config = yaml.safe_load(r.text) needs_update = remote_config["version"] > local_config.get("version", 0) if needs_update: shutil.move(local_file, local_file + ".old%s" % local_config.get("version", 0)) else: needs_update = True if needs_update: print("Updating %s genome resources configuration" % dbkey) with open(local_file, "w") as out_handle: out_handle.write(r.text)
[ "def", "_upgrade_genome_resources", "(", "galaxy_dir", ",", "base_url", ")", ":", "import", "requests", "for", "dbkey", ",", "ref_file", "in", "genome", ".", "get_builds", "(", "galaxy_dir", ")", ":", "# Check for a remote genome resources file", "remote_url", "=", ...
Retrieve latest version of genome resource YAML configuration files.
[ "Retrieve", "latest", "version", "of", "genome", "resource", "YAML", "configuration", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L375-L398
train
218,240
bcbio/bcbio-nextgen
bcbio/install.py
_upgrade_snpeff_data
def _upgrade_snpeff_data(galaxy_dir, args, remotes): """Install or upgrade snpEff databases, localized to reference directory. """ snpeff_version = effects.snpeff_version(args) if not snpeff_version: return for dbkey, ref_file in genome.get_builds(galaxy_dir): resource_file = os.path.join(os.path.dirname(ref_file), "%s-resources.yaml" % dbkey) if os.path.exists(resource_file): with open(resource_file) as in_handle: resources = yaml.safe_load(in_handle) snpeff_db, snpeff_base_dir = effects.get_db({"genome_resources": resources, "reference": {"fasta": {"base": ref_file}}}) if snpeff_db: snpeff_db_dir = os.path.join(snpeff_base_dir, snpeff_db) if os.path.exists(snpeff_db_dir) and _is_old_database(snpeff_db_dir, args): shutil.rmtree(snpeff_db_dir) if not os.path.exists(snpeff_db_dir): print("Installing snpEff database %s in %s" % (snpeff_db, snpeff_base_dir)) dl_url = remotes["snpeff_dl_url"].format( snpeff_ver=snpeff_version.replace(".", "_"), genome=snpeff_db) dl_file = os.path.basename(dl_url) with utils.chdir(snpeff_base_dir): subprocess.check_call(["wget", "--no-check-certificate", "-c", "-O", dl_file, dl_url]) subprocess.check_call(["unzip", dl_file]) os.remove(dl_file) dl_dir = os.path.join(snpeff_base_dir, "data", snpeff_db) shutil.move(dl_dir, snpeff_db_dir) os.rmdir(os.path.join(snpeff_base_dir, "data")) if args.cwl: create.directory_tarball(snpeff_db_dir)
python
def _upgrade_snpeff_data(galaxy_dir, args, remotes): """Install or upgrade snpEff databases, localized to reference directory. """ snpeff_version = effects.snpeff_version(args) if not snpeff_version: return for dbkey, ref_file in genome.get_builds(galaxy_dir): resource_file = os.path.join(os.path.dirname(ref_file), "%s-resources.yaml" % dbkey) if os.path.exists(resource_file): with open(resource_file) as in_handle: resources = yaml.safe_load(in_handle) snpeff_db, snpeff_base_dir = effects.get_db({"genome_resources": resources, "reference": {"fasta": {"base": ref_file}}}) if snpeff_db: snpeff_db_dir = os.path.join(snpeff_base_dir, snpeff_db) if os.path.exists(snpeff_db_dir) and _is_old_database(snpeff_db_dir, args): shutil.rmtree(snpeff_db_dir) if not os.path.exists(snpeff_db_dir): print("Installing snpEff database %s in %s" % (snpeff_db, snpeff_base_dir)) dl_url = remotes["snpeff_dl_url"].format( snpeff_ver=snpeff_version.replace(".", "_"), genome=snpeff_db) dl_file = os.path.basename(dl_url) with utils.chdir(snpeff_base_dir): subprocess.check_call(["wget", "--no-check-certificate", "-c", "-O", dl_file, dl_url]) subprocess.check_call(["unzip", dl_file]) os.remove(dl_file) dl_dir = os.path.join(snpeff_base_dir, "data", snpeff_db) shutil.move(dl_dir, snpeff_db_dir) os.rmdir(os.path.join(snpeff_base_dir, "data")) if args.cwl: create.directory_tarball(snpeff_db_dir)
[ "def", "_upgrade_snpeff_data", "(", "galaxy_dir", ",", "args", ",", "remotes", ")", ":", "snpeff_version", "=", "effects", ".", "snpeff_version", "(", "args", ")", "if", "not", "snpeff_version", ":", "return", "for", "dbkey", ",", "ref_file", "in", "genome", ...
Install or upgrade snpEff databases, localized to reference directory.
[ "Install", "or", "upgrade", "snpEff", "databases", "localized", "to", "reference", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L404-L435
train
218,241
bcbio/bcbio-nextgen
bcbio/install.py
_is_old_database
def _is_old_database(db_dir, args): """Check for old database versions, supported in snpEff 4.1. """ snpeff_version = effects.snpeff_version(args) if LooseVersion(snpeff_version) >= LooseVersion("4.1"): pred_file = os.path.join(db_dir, "snpEffectPredictor.bin") if not utils.file_exists(pred_file): return True with utils.open_gzipsafe(pred_file, is_gz=True) as in_handle: version_info = in_handle.readline().strip().split("\t") program, version = version_info[:2] if not program.lower() == "snpeff" or LooseVersion(snpeff_version) > LooseVersion(version): return True return False
python
def _is_old_database(db_dir, args): """Check for old database versions, supported in snpEff 4.1. """ snpeff_version = effects.snpeff_version(args) if LooseVersion(snpeff_version) >= LooseVersion("4.1"): pred_file = os.path.join(db_dir, "snpEffectPredictor.bin") if not utils.file_exists(pred_file): return True with utils.open_gzipsafe(pred_file, is_gz=True) as in_handle: version_info = in_handle.readline().strip().split("\t") program, version = version_info[:2] if not program.lower() == "snpeff" or LooseVersion(snpeff_version) > LooseVersion(version): return True return False
[ "def", "_is_old_database", "(", "db_dir", ",", "args", ")", ":", "snpeff_version", "=", "effects", ".", "snpeff_version", "(", "args", ")", "if", "LooseVersion", "(", "snpeff_version", ")", ">=", "LooseVersion", "(", "\"4.1\"", ")", ":", "pred_file", "=", "o...
Check for old database versions, supported in snpEff 4.1.
[ "Check", "for", "old", "database", "versions", "supported", "in", "snpEff", "4", ".", "1", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L437-L450
train
218,242
bcbio/bcbio-nextgen
bcbio/install.py
_get_biodata
def _get_biodata(base_file, args): """Retrieve biodata genome targets customized by install parameters. """ with open(base_file) as in_handle: config = yaml.safe_load(in_handle) config["install_liftover"] = False config["genome_indexes"] = args.aligners ann_groups = config.pop("annotation_groups", {}) config["genomes"] = [_setup_genome_annotations(g, args, ann_groups) for g in config["genomes"] if g["dbkey"] in args.genomes] return config
python
def _get_biodata(base_file, args): """Retrieve biodata genome targets customized by install parameters. """ with open(base_file) as in_handle: config = yaml.safe_load(in_handle) config["install_liftover"] = False config["genome_indexes"] = args.aligners ann_groups = config.pop("annotation_groups", {}) config["genomes"] = [_setup_genome_annotations(g, args, ann_groups) for g in config["genomes"] if g["dbkey"] in args.genomes] return config
[ "def", "_get_biodata", "(", "base_file", ",", "args", ")", ":", "with", "open", "(", "base_file", ")", "as", "in_handle", ":", "config", "=", "yaml", ".", "safe_load", "(", "in_handle", ")", "config", "[", "\"install_liftover\"", "]", "=", "False", "config...
Retrieve biodata genome targets customized by install parameters.
[ "Retrieve", "biodata", "genome", "targets", "customized", "by", "install", "parameters", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L452-L462
train
218,243
bcbio/bcbio-nextgen
bcbio/install.py
_setup_genome_annotations
def _setup_genome_annotations(g, args, ann_groups): """Configure genome annotations to install based on datatarget. """ available_anns = g.get("annotations", []) + g.pop("annotations_available", []) anns = [] for orig_target in args.datatarget: if orig_target in ann_groups: targets = ann_groups[orig_target] else: targets = [orig_target] for target in targets: if target in available_anns: anns.append(target) g["annotations"] = anns if "variation" not in args.datatarget and "validation" in g: del g["validation"] return g
python
def _setup_genome_annotations(g, args, ann_groups): """Configure genome annotations to install based on datatarget. """ available_anns = g.get("annotations", []) + g.pop("annotations_available", []) anns = [] for orig_target in args.datatarget: if orig_target in ann_groups: targets = ann_groups[orig_target] else: targets = [orig_target] for target in targets: if target in available_anns: anns.append(target) g["annotations"] = anns if "variation" not in args.datatarget and "validation" in g: del g["validation"] return g
[ "def", "_setup_genome_annotations", "(", "g", ",", "args", ",", "ann_groups", ")", ":", "available_anns", "=", "g", ".", "get", "(", "\"annotations\"", ",", "[", "]", ")", "+", "g", ".", "pop", "(", "\"annotations_available\"", ",", "[", "]", ")", "anns"...
Configure genome annotations to install based on datatarget.
[ "Configure", "genome", "annotations", "to", "install", "based", "on", "datatarget", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L464-L480
train
218,244
bcbio/bcbio-nextgen
bcbio/install.py
upgrade_thirdparty_tools
def upgrade_thirdparty_tools(args, remotes): """Install and update third party tools used in the pipeline. Creates a manifest directory with installed programs on the system. """ cbl = get_cloudbiolinux(remotes) if args.toolconf and os.path.exists(args.toolconf): package_yaml = args.toolconf else: package_yaml = os.path.join(cbl["dir"], "contrib", "flavor", "ngs_pipeline_minimal", "packages-conda.yaml") sys.path.insert(0, cbl["dir"]) cbl_conda = __import__("cloudbio.package.conda", fromlist=["conda"]) cbl_conda.install_in(_get_conda_bin(), args.tooldir, package_yaml) manifest_dir = os.path.join(_get_data_dir(), "manifest") print("Creating manifest of installed packages in %s" % manifest_dir) cbl_manifest = __import__("cloudbio.manifest", fromlist=["manifest"]) if os.path.exists(manifest_dir): for fname in os.listdir(manifest_dir): if not fname.startswith("toolplus"): os.remove(os.path.join(manifest_dir, fname)) cbl_manifest.create(manifest_dir, args.tooldir)
python
def upgrade_thirdparty_tools(args, remotes): """Install and update third party tools used in the pipeline. Creates a manifest directory with installed programs on the system. """ cbl = get_cloudbiolinux(remotes) if args.toolconf and os.path.exists(args.toolconf): package_yaml = args.toolconf else: package_yaml = os.path.join(cbl["dir"], "contrib", "flavor", "ngs_pipeline_minimal", "packages-conda.yaml") sys.path.insert(0, cbl["dir"]) cbl_conda = __import__("cloudbio.package.conda", fromlist=["conda"]) cbl_conda.install_in(_get_conda_bin(), args.tooldir, package_yaml) manifest_dir = os.path.join(_get_data_dir(), "manifest") print("Creating manifest of installed packages in %s" % manifest_dir) cbl_manifest = __import__("cloudbio.manifest", fromlist=["manifest"]) if os.path.exists(manifest_dir): for fname in os.listdir(manifest_dir): if not fname.startswith("toolplus"): os.remove(os.path.join(manifest_dir, fname)) cbl_manifest.create(manifest_dir, args.tooldir)
[ "def", "upgrade_thirdparty_tools", "(", "args", ",", "remotes", ")", ":", "cbl", "=", "get_cloudbiolinux", "(", "remotes", ")", "if", "args", ".", "toolconf", "and", "os", ".", "path", ".", "exists", "(", "args", ".", "toolconf", ")", ":", "package_yaml", ...
Install and update third party tools used in the pipeline. Creates a manifest directory with installed programs on the system.
[ "Install", "and", "update", "third", "party", "tools", "used", "in", "the", "pipeline", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L482-L503
train
218,245
bcbio/bcbio-nextgen
bcbio/install.py
_install_toolplus
def _install_toolplus(args): """Install additional tools we cannot distribute, updating local manifest. """ manifest_dir = os.path.join(_get_data_dir(), "manifest") toolplus_manifest = os.path.join(manifest_dir, "toolplus-packages.yaml") system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml") # Handle toolplus installs inside Docker container if not os.path.exists(system_config): docker_system_config = os.path.join(_get_data_dir(), "config", "bcbio_system.yaml") if os.path.exists(docker_system_config): system_config = docker_system_config toolplus_dir = os.path.join(_get_data_dir(), "toolplus") for tool in args.toolplus: if tool.name in set(["gatk", "mutect"]): print("Installing %s" % tool.name) _install_gatk_jar(tool.name, tool.fname, toolplus_manifest, system_config, toolplus_dir) else: raise ValueError("Unexpected toolplus argument: %s %s" % (tool.name, tool.fname))
python
def _install_toolplus(args): """Install additional tools we cannot distribute, updating local manifest. """ manifest_dir = os.path.join(_get_data_dir(), "manifest") toolplus_manifest = os.path.join(manifest_dir, "toolplus-packages.yaml") system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml") # Handle toolplus installs inside Docker container if not os.path.exists(system_config): docker_system_config = os.path.join(_get_data_dir(), "config", "bcbio_system.yaml") if os.path.exists(docker_system_config): system_config = docker_system_config toolplus_dir = os.path.join(_get_data_dir(), "toolplus") for tool in args.toolplus: if tool.name in set(["gatk", "mutect"]): print("Installing %s" % tool.name) _install_gatk_jar(tool.name, tool.fname, toolplus_manifest, system_config, toolplus_dir) else: raise ValueError("Unexpected toolplus argument: %s %s" % (tool.name, tool.fname))
[ "def", "_install_toolplus", "(", "args", ")", ":", "manifest_dir", "=", "os", ".", "path", ".", "join", "(", "_get_data_dir", "(", ")", ",", "\"manifest\"", ")", "toolplus_manifest", "=", "os", ".", "path", ".", "join", "(", "manifest_dir", ",", "\"toolplu...
Install additional tools we cannot distribute, updating local manifest.
[ "Install", "additional", "tools", "we", "cannot", "distribute", "updating", "local", "manifest", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L505-L522
train
218,246
bcbio/bcbio-nextgen
bcbio/install.py
_install_gatk_jar
def _install_gatk_jar(name, fname, manifest, system_config, toolplus_dir): """Install a jar for GATK or associated tools like MuTect. """ if not fname.endswith(".jar"): raise ValueError("--toolplus argument for %s expects a jar file: %s" % (name, fname)) version = get_gatk_jar_version(name, fname) store_dir = utils.safe_makedir(os.path.join(toolplus_dir, name, version)) shutil.copyfile(fname, os.path.join(store_dir, os.path.basename(fname))) _update_system_file(system_config, name, {"dir": store_dir}) _update_manifest(manifest, name, version)
python
def _install_gatk_jar(name, fname, manifest, system_config, toolplus_dir): """Install a jar for GATK or associated tools like MuTect. """ if not fname.endswith(".jar"): raise ValueError("--toolplus argument for %s expects a jar file: %s" % (name, fname)) version = get_gatk_jar_version(name, fname) store_dir = utils.safe_makedir(os.path.join(toolplus_dir, name, version)) shutil.copyfile(fname, os.path.join(store_dir, os.path.basename(fname))) _update_system_file(system_config, name, {"dir": store_dir}) _update_manifest(manifest, name, version)
[ "def", "_install_gatk_jar", "(", "name", ",", "fname", ",", "manifest", ",", "system_config", ",", "toolplus_dir", ")", ":", "if", "not", "fname", ".", "endswith", "(", "\".jar\"", ")", ":", "raise", "ValueError", "(", "\"--toolplus argument for %s expects a jar f...
Install a jar for GATK or associated tools like MuTect.
[ "Install", "a", "jar", "for", "GATK", "or", "associated", "tools", "like", "MuTect", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L532-L541
train
218,247
bcbio/bcbio-nextgen
bcbio/install.py
_update_manifest
def _update_manifest(manifest_file, name, version): """Update the toolplus manifest file with updated name and version """ if os.path.exists(manifest_file): with open(manifest_file) as in_handle: manifest = yaml.safe_load(in_handle) else: manifest = {} manifest[name] = {"name": name, "version": version} with open(manifest_file, "w") as out_handle: yaml.safe_dump(manifest, out_handle, default_flow_style=False, allow_unicode=False)
python
def _update_manifest(manifest_file, name, version): """Update the toolplus manifest file with updated name and version """ if os.path.exists(manifest_file): with open(manifest_file) as in_handle: manifest = yaml.safe_load(in_handle) else: manifest = {} manifest[name] = {"name": name, "version": version} with open(manifest_file, "w") as out_handle: yaml.safe_dump(manifest, out_handle, default_flow_style=False, allow_unicode=False)
[ "def", "_update_manifest", "(", "manifest_file", ",", "name", ",", "version", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "manifest_file", ")", ":", "with", "open", "(", "manifest_file", ")", "as", "in_handle", ":", "manifest", "=", "yaml", "....
Update the toolplus manifest file with updated name and version
[ "Update", "the", "toolplus", "manifest", "file", "with", "updated", "name", "and", "version" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L543-L553
train
218,248
bcbio/bcbio-nextgen
bcbio/install.py
_update_system_file
def _update_system_file(system_file, name, new_kvs): """Update the bcbio_system.yaml file with new resource information. """ if os.path.exists(system_file): bak_file = system_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") shutil.copyfile(system_file, bak_file) with open(system_file) as in_handle: config = yaml.safe_load(in_handle) else: utils.safe_makedir(os.path.dirname(system_file)) config = {} new_rs = {} added = False for rname, r_kvs in config.get("resources", {}).items(): if rname == name: for k, v in new_kvs.items(): r_kvs[k] = v added = True new_rs[rname] = r_kvs if not added: new_rs[name] = new_kvs config["resources"] = new_rs with open(system_file, "w") as out_handle: yaml.safe_dump(config, out_handle, default_flow_style=False, allow_unicode=False)
python
def _update_system_file(system_file, name, new_kvs): """Update the bcbio_system.yaml file with new resource information. """ if os.path.exists(system_file): bak_file = system_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") shutil.copyfile(system_file, bak_file) with open(system_file) as in_handle: config = yaml.safe_load(in_handle) else: utils.safe_makedir(os.path.dirname(system_file)) config = {} new_rs = {} added = False for rname, r_kvs in config.get("resources", {}).items(): if rname == name: for k, v in new_kvs.items(): r_kvs[k] = v added = True new_rs[rname] = r_kvs if not added: new_rs[name] = new_kvs config["resources"] = new_rs with open(system_file, "w") as out_handle: yaml.safe_dump(config, out_handle, default_flow_style=False, allow_unicode=False)
[ "def", "_update_system_file", "(", "system_file", ",", "name", ",", "new_kvs", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "system_file", ")", ":", "bak_file", "=", "system_file", "+", "\".bak%s\"", "%", "datetime", ".", "datetime", ".", "now", ...
Update the bcbio_system.yaml file with new resource information.
[ "Update", "the", "bcbio_system", ".", "yaml", "file", "with", "new", "resource", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L555-L578
train
218,249
bcbio/bcbio-nextgen
bcbio/install.py
_install_kraken_db
def _install_kraken_db(datadir, args): """Install kraken minimal DB in genome folder. """ import requests kraken = os.path.join(datadir, "genomes/kraken") url = "https://ccb.jhu.edu/software/kraken/dl/minikraken.tgz" compress = os.path.join(kraken, os.path.basename(url)) base, ext = utils.splitext_plus(os.path.basename(url)) db = os.path.join(kraken, base) tooldir = args.tooldir or get_defaults()["tooldir"] requests.packages.urllib3.disable_warnings() last_mod = urllib.request.urlopen(url).info().get('Last-Modified') last_mod = dateutil.parser.parse(last_mod).astimezone(dateutil.tz.tzutc()) if os.path.exists(os.path.join(tooldir, "bin", "kraken")): if not os.path.exists(db): is_new_version = True else: cur_file = glob.glob(os.path.join(kraken, "minikraken_*"))[0] cur_version = datetime.datetime.utcfromtimestamp(os.path.getmtime(cur_file)) is_new_version = last_mod.date() > cur_version.date() if is_new_version: shutil.move(cur_file, cur_file.replace('minikraken', 'old')) if not os.path.exists(kraken): utils.safe_makedir(kraken) if is_new_version: if not os.path.exists(compress): subprocess.check_call(["wget", "-O", compress, url, "--no-check-certificate"]) cmd = ["tar", "-xzvf", compress, "-C", kraken] subprocess.check_call(cmd) last_version = glob.glob(os.path.join(kraken, "minikraken_*")) utils.symlink_plus(os.path.join(kraken, last_version[0]), os.path.join(kraken, "minikraken")) utils.remove_safe(compress) else: print("You have the latest version %s." % last_mod) else: raise argparse.ArgumentTypeError("kraken not installed in tooldir %s." % os.path.join(tooldir, "bin", "kraken"))
python
def _install_kraken_db(datadir, args): """Install kraken minimal DB in genome folder. """ import requests kraken = os.path.join(datadir, "genomes/kraken") url = "https://ccb.jhu.edu/software/kraken/dl/minikraken.tgz" compress = os.path.join(kraken, os.path.basename(url)) base, ext = utils.splitext_plus(os.path.basename(url)) db = os.path.join(kraken, base) tooldir = args.tooldir or get_defaults()["tooldir"] requests.packages.urllib3.disable_warnings() last_mod = urllib.request.urlopen(url).info().get('Last-Modified') last_mod = dateutil.parser.parse(last_mod).astimezone(dateutil.tz.tzutc()) if os.path.exists(os.path.join(tooldir, "bin", "kraken")): if not os.path.exists(db): is_new_version = True else: cur_file = glob.glob(os.path.join(kraken, "minikraken_*"))[0] cur_version = datetime.datetime.utcfromtimestamp(os.path.getmtime(cur_file)) is_new_version = last_mod.date() > cur_version.date() if is_new_version: shutil.move(cur_file, cur_file.replace('minikraken', 'old')) if not os.path.exists(kraken): utils.safe_makedir(kraken) if is_new_version: if not os.path.exists(compress): subprocess.check_call(["wget", "-O", compress, url, "--no-check-certificate"]) cmd = ["tar", "-xzvf", compress, "-C", kraken] subprocess.check_call(cmd) last_version = glob.glob(os.path.join(kraken, "minikraken_*")) utils.symlink_plus(os.path.join(kraken, last_version[0]), os.path.join(kraken, "minikraken")) utils.remove_safe(compress) else: print("You have the latest version %s." % last_mod) else: raise argparse.ArgumentTypeError("kraken not installed in tooldir %s." % os.path.join(tooldir, "bin", "kraken"))
[ "def", "_install_kraken_db", "(", "datadir", ",", "args", ")", ":", "import", "requests", "kraken", "=", "os", ".", "path", ".", "join", "(", "datadir", ",", "\"genomes/kraken\"", ")", "url", "=", "\"https://ccb.jhu.edu/software/kraken/dl/minikraken.tgz\"", "compres...
Install kraken minimal DB in genome folder.
[ "Install", "kraken", "minimal", "DB", "in", "genome", "folder", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L580-L616
train
218,250
bcbio/bcbio-nextgen
bcbio/install.py
_get_install_config
def _get_install_config(): """Return the YAML configuration file used to store upgrade information. """ try: data_dir = _get_data_dir() except ValueError: return None config_dir = utils.safe_makedir(os.path.join(data_dir, "config")) return os.path.join(config_dir, "install-params.yaml")
python
def _get_install_config(): """Return the YAML configuration file used to store upgrade information. """ try: data_dir = _get_data_dir() except ValueError: return None config_dir = utils.safe_makedir(os.path.join(data_dir, "config")) return os.path.join(config_dir, "install-params.yaml")
[ "def", "_get_install_config", "(", ")", ":", "try", ":", "data_dir", "=", "_get_data_dir", "(", ")", "except", "ValueError", ":", "return", "None", "config_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "data_dir", ",", ...
Return the YAML configuration file used to store upgrade information.
[ "Return", "the", "YAML", "configuration", "file", "used", "to", "store", "upgrade", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L620-L628
train
218,251
bcbio/bcbio-nextgen
bcbio/install.py
save_install_defaults
def save_install_defaults(args): """Save installation information to make future upgrades easier. """ install_config = _get_install_config() if install_config is None: return if utils.file_exists(install_config): with open(install_config) as in_handle: cur_config = yaml.safe_load(in_handle) else: cur_config = {} if args.tooldir: cur_config["tooldir"] = args.tooldir cur_config["isolate"] = args.isolate for attr in ["genomes", "aligners", "datatarget"]: if not cur_config.get(attr): cur_config[attr] = [] for x in getattr(args, attr): if x not in cur_config[attr]: cur_config[attr].append(x) # toolplus -- save non-filename inputs attr = "toolplus" if not cur_config.get(attr): cur_config[attr] = [] for x in getattr(args, attr): if not x.fname: if x.name not in cur_config[attr]: cur_config[attr].append(x.name) with open(install_config, "w") as out_handle: yaml.safe_dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False)
python
def save_install_defaults(args): """Save installation information to make future upgrades easier. """ install_config = _get_install_config() if install_config is None: return if utils.file_exists(install_config): with open(install_config) as in_handle: cur_config = yaml.safe_load(in_handle) else: cur_config = {} if args.tooldir: cur_config["tooldir"] = args.tooldir cur_config["isolate"] = args.isolate for attr in ["genomes", "aligners", "datatarget"]: if not cur_config.get(attr): cur_config[attr] = [] for x in getattr(args, attr): if x not in cur_config[attr]: cur_config[attr].append(x) # toolplus -- save non-filename inputs attr = "toolplus" if not cur_config.get(attr): cur_config[attr] = [] for x in getattr(args, attr): if not x.fname: if x.name not in cur_config[attr]: cur_config[attr].append(x.name) with open(install_config, "w") as out_handle: yaml.safe_dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False)
[ "def", "save_install_defaults", "(", "args", ")", ":", "install_config", "=", "_get_install_config", "(", ")", "if", "install_config", "is", "None", ":", "return", "if", "utils", ".", "file_exists", "(", "install_config", ")", ":", "with", "open", "(", "instal...
Save installation information to make future upgrades easier.
[ "Save", "installation", "information", "to", "make", "future", "upgrades", "easier", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L630-L659
train
218,252
bcbio/bcbio-nextgen
bcbio/install.py
add_install_defaults
def add_install_defaults(args): """Add any saved installation defaults to the upgrade. """ # Ensure we install data if we've specified any secondary installation targets if len(args.genomes) > 0 or len(args.aligners) > 0 or len(args.datatarget) > 0: args.install_data = True install_config = _get_install_config() if install_config is None or not utils.file_exists(install_config): default_args = {} else: with open(install_config) as in_handle: default_args = yaml.safe_load(in_handle) # if we are upgrading to development, also upgrade the tools if args.upgrade in ["development"] and (args.tooldir or "tooldir" in default_args): args.tools = True if args.tools and args.tooldir is None: if "tooldir" in default_args: args.tooldir = str(default_args["tooldir"]) else: raise ValueError("Default tool directory not yet saved in config defaults. " "Specify the '--tooldir=/path/to/tools' to upgrade tools. " "After a successful upgrade, the '--tools' parameter will " "work for future upgrades.") for attr in ["genomes", "aligners"]: # don't upgrade default genomes if a genome was specified if attr == "genomes" and len(args.genomes) > 0: continue for x in default_args.get(attr, []): x = str(x) new_val = getattr(args, attr) if x not in getattr(args, attr): new_val.append(x) setattr(args, attr, new_val) args = _datatarget_defaults(args, default_args) if "isolate" in default_args and args.isolate is not True: args.isolate = default_args["isolate"] return args
python
def add_install_defaults(args): """Add any saved installation defaults to the upgrade. """ # Ensure we install data if we've specified any secondary installation targets if len(args.genomes) > 0 or len(args.aligners) > 0 or len(args.datatarget) > 0: args.install_data = True install_config = _get_install_config() if install_config is None or not utils.file_exists(install_config): default_args = {} else: with open(install_config) as in_handle: default_args = yaml.safe_load(in_handle) # if we are upgrading to development, also upgrade the tools if args.upgrade in ["development"] and (args.tooldir or "tooldir" in default_args): args.tools = True if args.tools and args.tooldir is None: if "tooldir" in default_args: args.tooldir = str(default_args["tooldir"]) else: raise ValueError("Default tool directory not yet saved in config defaults. " "Specify the '--tooldir=/path/to/tools' to upgrade tools. " "After a successful upgrade, the '--tools' parameter will " "work for future upgrades.") for attr in ["genomes", "aligners"]: # don't upgrade default genomes if a genome was specified if attr == "genomes" and len(args.genomes) > 0: continue for x in default_args.get(attr, []): x = str(x) new_val = getattr(args, attr) if x not in getattr(args, attr): new_val.append(x) setattr(args, attr, new_val) args = _datatarget_defaults(args, default_args) if "isolate" in default_args and args.isolate is not True: args.isolate = default_args["isolate"] return args
[ "def", "add_install_defaults", "(", "args", ")", ":", "# Ensure we install data if we've specified any secondary installation targets", "if", "len", "(", "args", ".", "genomes", ")", ">", "0", "or", "len", "(", "args", ".", "aligners", ")", ">", "0", "or", "len", ...
Add any saved installation defaults to the upgrade.
[ "Add", "any", "saved", "installation", "defaults", "to", "the", "upgrade", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L661-L697
train
218,253
bcbio/bcbio-nextgen
bcbio/install.py
_datatarget_defaults
def _datatarget_defaults(args, default_args): """Set data installation targets, handling defaults. Sets variation, rnaseq, smallrna as default targets if we're not isolated to a single method. Provides back compatibility for toolplus specifications. """ default_data = default_args.get("datatarget", []) # back-compatible toolplus specifications for x in default_args.get("toolplus", []): val = None if x == "data": val = "gemini" elif x in ["cadd", "dbnsfp", "dbscsnv", "kraken", "gnomad"]: val = x if val and val not in default_data: default_data.append(val) new_val = getattr(args, "datatarget") for x in default_data: if x not in new_val: new_val.append(x) has_std_target = False std_targets = ["variation", "rnaseq", "smallrna"] for target in std_targets: if target in new_val: has_std_target = True break if not has_std_target: new_val = new_val + std_targets setattr(args, "datatarget", new_val) return args
python
def _datatarget_defaults(args, default_args): """Set data installation targets, handling defaults. Sets variation, rnaseq, smallrna as default targets if we're not isolated to a single method. Provides back compatibility for toolplus specifications. """ default_data = default_args.get("datatarget", []) # back-compatible toolplus specifications for x in default_args.get("toolplus", []): val = None if x == "data": val = "gemini" elif x in ["cadd", "dbnsfp", "dbscsnv", "kraken", "gnomad"]: val = x if val and val not in default_data: default_data.append(val) new_val = getattr(args, "datatarget") for x in default_data: if x not in new_val: new_val.append(x) has_std_target = False std_targets = ["variation", "rnaseq", "smallrna"] for target in std_targets: if target in new_val: has_std_target = True break if not has_std_target: new_val = new_val + std_targets setattr(args, "datatarget", new_val) return args
[ "def", "_datatarget_defaults", "(", "args", ",", "default_args", ")", ":", "default_data", "=", "default_args", ".", "get", "(", "\"datatarget\"", ",", "[", "]", ")", "# back-compatible toolplus specifications", "for", "x", "in", "default_args", ".", "get", "(", ...
Set data installation targets, handling defaults. Sets variation, rnaseq, smallrna as default targets if we're not isolated to a single method. Provides back compatibility for toolplus specifications.
[ "Set", "data", "installation", "targets", "handling", "defaults", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/install.py#L699-L730
train
218,254
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_merge_wf_inputs
def _merge_wf_inputs(new, out, wf_outputs, to_ignore, parallel, nested_inputs): """Merge inputs for a sub-workflow, adding any not present inputs in out. Skips inputs that are internally generated or generated and ignored, keeping only as inputs those that we do not generate internally. """ internal_generated_ids = [] for vignore in to_ignore: vignore_id = _get_string_vid(vignore) # ignore anything we generate internally, but not those we need to pull in # from the external process if vignore_id not in [v["id"] for v in wf_outputs]: internal_generated_ids.append(vignore_id) ignore_ids = set(internal_generated_ids + [v["id"] for v in wf_outputs]) cur_ids = set([v["id"] for v in out]) remapped_new = [] for v in new: remapped_v = copy.deepcopy(v) outv = copy.deepcopy(v) outv["id"] = get_base_id(v["id"]) outv["source"] = v["id"] if outv["id"] not in cur_ids and outv["id"] not in ignore_ids: if nested_inputs and v["id"] in nested_inputs: outv = _flatten_nested_input(outv) out.append(outv) if remapped_v["id"] in set([v["source"] for v in out]): remapped_v["source"] = get_base_id(remapped_v["id"]) remapped_new.append(remapped_v) return out, remapped_new
python
def _merge_wf_inputs(new, out, wf_outputs, to_ignore, parallel, nested_inputs): """Merge inputs for a sub-workflow, adding any not present inputs in out. Skips inputs that are internally generated or generated and ignored, keeping only as inputs those that we do not generate internally. """ internal_generated_ids = [] for vignore in to_ignore: vignore_id = _get_string_vid(vignore) # ignore anything we generate internally, but not those we need to pull in # from the external process if vignore_id not in [v["id"] for v in wf_outputs]: internal_generated_ids.append(vignore_id) ignore_ids = set(internal_generated_ids + [v["id"] for v in wf_outputs]) cur_ids = set([v["id"] for v in out]) remapped_new = [] for v in new: remapped_v = copy.deepcopy(v) outv = copy.deepcopy(v) outv["id"] = get_base_id(v["id"]) outv["source"] = v["id"] if outv["id"] not in cur_ids and outv["id"] not in ignore_ids: if nested_inputs and v["id"] in nested_inputs: outv = _flatten_nested_input(outv) out.append(outv) if remapped_v["id"] in set([v["source"] for v in out]): remapped_v["source"] = get_base_id(remapped_v["id"]) remapped_new.append(remapped_v) return out, remapped_new
[ "def", "_merge_wf_inputs", "(", "new", ",", "out", ",", "wf_outputs", ",", "to_ignore", ",", "parallel", ",", "nested_inputs", ")", ":", "internal_generated_ids", "=", "[", "]", "for", "vignore", "in", "to_ignore", ":", "vignore_id", "=", "_get_string_vid", "(...
Merge inputs for a sub-workflow, adding any not present inputs in out. Skips inputs that are internally generated or generated and ignored, keeping only as inputs those that we do not generate internally.
[ "Merge", "inputs", "for", "a", "sub", "-", "workflow", "adding", "any", "not", "present", "inputs", "in", "out", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L72-L100
train
218,255
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_merge_wf_outputs
def _merge_wf_outputs(new, cur, parallel): """Merge outputs for a sub-workflow, replacing variables changed in later steps. ignore_ids are those used internally in a sub-workflow but not exposed to subsequent steps """ new_ids = set([]) out = [] for v in new: outv = {} outv["source"] = v["id"] outv["id"] = "%s" % get_base_id(v["id"]) outv["type"] = v["type"] if "secondaryFiles" in v: outv["secondaryFiles"] = v["secondaryFiles"] if tz.get_in(["outputBinding", "secondaryFiles"], v): outv["secondaryFiles"] = tz.get_in(["outputBinding", "secondaryFiles"], v) new_ids.add(outv["id"]) out.append(outv) for outv in cur: if outv["id"] not in new_ids: out.append(outv) return out
python
def _merge_wf_outputs(new, cur, parallel): """Merge outputs for a sub-workflow, replacing variables changed in later steps. ignore_ids are those used internally in a sub-workflow but not exposed to subsequent steps """ new_ids = set([]) out = [] for v in new: outv = {} outv["source"] = v["id"] outv["id"] = "%s" % get_base_id(v["id"]) outv["type"] = v["type"] if "secondaryFiles" in v: outv["secondaryFiles"] = v["secondaryFiles"] if tz.get_in(["outputBinding", "secondaryFiles"], v): outv["secondaryFiles"] = tz.get_in(["outputBinding", "secondaryFiles"], v) new_ids.add(outv["id"]) out.append(outv) for outv in cur: if outv["id"] not in new_ids: out.append(outv) return out
[ "def", "_merge_wf_outputs", "(", "new", ",", "cur", ",", "parallel", ")", ":", "new_ids", "=", "set", "(", "[", "]", ")", "out", "=", "[", "]", "for", "v", "in", "new", ":", "outv", "=", "{", "}", "outv", "[", "\"source\"", "]", "=", "v", "[", ...
Merge outputs for a sub-workflow, replacing variables changed in later steps. ignore_ids are those used internally in a sub-workflow but not exposed to subsequent steps
[ "Merge", "outputs", "for", "a", "sub", "-", "workflow", "replacing", "variables", "changed", "in", "later", "steps", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L102-L123
train
218,256
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_extract_from_subworkflow
def _extract_from_subworkflow(vs, step): """Remove internal variable names when moving from sub-workflow to main. """ substep_ids = set([x.name for x in step.workflow]) out = [] for var in vs: internal = False parts = var["id"].split("/") if len(parts) > 1: if parts[0] in substep_ids: internal = True if not internal: var.pop("source", None) out.append(var) return out
python
def _extract_from_subworkflow(vs, step): """Remove internal variable names when moving from sub-workflow to main. """ substep_ids = set([x.name for x in step.workflow]) out = [] for var in vs: internal = False parts = var["id"].split("/") if len(parts) > 1: if parts[0] in substep_ids: internal = True if not internal: var.pop("source", None) out.append(var) return out
[ "def", "_extract_from_subworkflow", "(", "vs", ",", "step", ")", ":", "substep_ids", "=", "set", "(", "[", "x", ".", "name", "for", "x", "in", "step", ".", "workflow", "]", ")", "out", "=", "[", "]", "for", "var", "in", "vs", ":", "internal", "=", ...
Remove internal variable names when moving from sub-workflow to main.
[ "Remove", "internal", "variable", "names", "when", "moving", "from", "sub", "-", "workflow", "to", "main", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L125-L139
train
218,257
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
is_cwl_record
def is_cwl_record(d): """Check if an input is a CWL record, from any level of nesting. """ if isinstance(d, dict): if d.get("type") == "record": return d else: recs = list(filter(lambda x: x is not None, [is_cwl_record(v) for v in d.values()])) return recs[0] if recs else None else: return None
python
def is_cwl_record(d): """Check if an input is a CWL record, from any level of nesting. """ if isinstance(d, dict): if d.get("type") == "record": return d else: recs = list(filter(lambda x: x is not None, [is_cwl_record(v) for v in d.values()])) return recs[0] if recs else None else: return None
[ "def", "is_cwl_record", "(", "d", ")", ":", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "if", "d", ".", "get", "(", "\"type\"", ")", "==", "\"record\"", ":", "return", "d", "else", ":", "recs", "=", "list", "(", "filter", "(", "lambda", ...
Check if an input is a CWL record, from any level of nesting.
[ "Check", "if", "an", "input", "is", "a", "CWL", "record", "from", "any", "level", "of", "nesting", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L150-L160
train
218,258
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_get_step_inputs
def _get_step_inputs(step, file_vs, std_vs, parallel_ids, wf=None): """Retrieve inputs for a step from existing variables. Potentially nests inputs to deal with merging split variables. If we split previously and are merging now, then we only nest those coming from the split process. """ inputs = [] skip_inputs = set([]) for orig_input in [_get_variable(x, file_vs) for x in _handle_special_inputs(step.inputs, file_vs)]: inputs.append(orig_input) # Only add description and other information for non-record inputs, otherwise batched with records if not any(is_cwl_record(x) for x in inputs): inputs += [v for v in std_vs if get_base_id(v["id"]) not in skip_inputs] nested_inputs = [] if step.parallel in ["single-merge", "batch-merge"]: if parallel_ids: inputs = [_nest_variable(x) if x["id"] in parallel_ids else x for x in inputs] nested_inputs = parallel_ids[:] parallel_ids = [] elif step.parallel in ["multi-combined"]: assert len(parallel_ids) == 0 nested_inputs = [x["id"] for x in inputs] inputs = [_nest_variable(x) for x in inputs] elif step.parallel in ["multi-batch"]: assert len(parallel_ids) == 0 nested_inputs = [x["id"] for x in inputs] # If we're batching,with mixed records/inputs avoid double nesting records inputs = [_nest_variable(x, check_records=(len(inputs) > 1)) for x in inputs] # avoid inputs/outputs with the same name outputs = [_get_string_vid(x["id"]) for x in step.outputs] final_inputs = [] for input in inputs: input["wf_duplicate"] = get_base_id(input["id"]) in outputs final_inputs.append(input) return inputs, parallel_ids, nested_inputs
python
def _get_step_inputs(step, file_vs, std_vs, parallel_ids, wf=None): """Retrieve inputs for a step from existing variables. Potentially nests inputs to deal with merging split variables. If we split previously and are merging now, then we only nest those coming from the split process. """ inputs = [] skip_inputs = set([]) for orig_input in [_get_variable(x, file_vs) for x in _handle_special_inputs(step.inputs, file_vs)]: inputs.append(orig_input) # Only add description and other information for non-record inputs, otherwise batched with records if not any(is_cwl_record(x) for x in inputs): inputs += [v for v in std_vs if get_base_id(v["id"]) not in skip_inputs] nested_inputs = [] if step.parallel in ["single-merge", "batch-merge"]: if parallel_ids: inputs = [_nest_variable(x) if x["id"] in parallel_ids else x for x in inputs] nested_inputs = parallel_ids[:] parallel_ids = [] elif step.parallel in ["multi-combined"]: assert len(parallel_ids) == 0 nested_inputs = [x["id"] for x in inputs] inputs = [_nest_variable(x) for x in inputs] elif step.parallel in ["multi-batch"]: assert len(parallel_ids) == 0 nested_inputs = [x["id"] for x in inputs] # If we're batching,with mixed records/inputs avoid double nesting records inputs = [_nest_variable(x, check_records=(len(inputs) > 1)) for x in inputs] # avoid inputs/outputs with the same name outputs = [_get_string_vid(x["id"]) for x in step.outputs] final_inputs = [] for input in inputs: input["wf_duplicate"] = get_base_id(input["id"]) in outputs final_inputs.append(input) return inputs, parallel_ids, nested_inputs
[ "def", "_get_step_inputs", "(", "step", ",", "file_vs", ",", "std_vs", ",", "parallel_ids", ",", "wf", "=", "None", ")", ":", "inputs", "=", "[", "]", "skip_inputs", "=", "set", "(", "[", "]", ")", "for", "orig_input", "in", "[", "_get_variable", "(", ...
Retrieve inputs for a step from existing variables. Potentially nests inputs to deal with merging split variables. If we split previously and are merging now, then we only nest those coming from the split process.
[ "Retrieve", "inputs", "for", "a", "step", "from", "existing", "variables", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L162-L197
train
218,259
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_nest_variable
def _nest_variable(v, check_records=False): """Nest a variable when moving from scattered back to consolidated. check_records -- avoid re-nesting a record input if it comes from a previous step and is already nested, don't need to re-array. """ if (check_records and is_cwl_record(v) and len(v["id"].split("/")) > 1 and v.get("type", {}).get("type") == "array"): return v else: v = copy.deepcopy(v) v["type"] = {"type": "array", "items": v["type"]} return v
python
def _nest_variable(v, check_records=False): """Nest a variable when moving from scattered back to consolidated. check_records -- avoid re-nesting a record input if it comes from a previous step and is already nested, don't need to re-array. """ if (check_records and is_cwl_record(v) and len(v["id"].split("/")) > 1 and v.get("type", {}).get("type") == "array"): return v else: v = copy.deepcopy(v) v["type"] = {"type": "array", "items": v["type"]} return v
[ "def", "_nest_variable", "(", "v", ",", "check_records", "=", "False", ")", ":", "if", "(", "check_records", "and", "is_cwl_record", "(", "v", ")", "and", "len", "(", "v", "[", "\"id\"", "]", ".", "split", "(", "\"/\"", ")", ")", ">", "1", "and", "...
Nest a variable when moving from scattered back to consolidated. check_records -- avoid re-nesting a record input if it comes from a previous step and is already nested, don't need to re-array.
[ "Nest", "a", "variable", "when", "moving", "from", "scattered", "back", "to", "consolidated", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L251-L263
train
218,260
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_clean_output
def _clean_output(v): """Remove output specific variables to allow variables to be inputs to next steps. """ out = copy.deepcopy(v) outb = out.pop("outputBinding", {}) if "secondaryFiles" in outb: out["secondaryFiles"] = outb["secondaryFiles"] return out
python
def _clean_output(v): """Remove output specific variables to allow variables to be inputs to next steps. """ out = copy.deepcopy(v) outb = out.pop("outputBinding", {}) if "secondaryFiles" in outb: out["secondaryFiles"] = outb["secondaryFiles"] return out
[ "def", "_clean_output", "(", "v", ")", ":", "out", "=", "copy", ".", "deepcopy", "(", "v", ")", "outb", "=", "out", ".", "pop", "(", "\"outputBinding\"", ",", "{", "}", ")", "if", "\"secondaryFiles\"", "in", "outb", ":", "out", "[", "\"secondaryFiles\"...
Remove output specific variables to allow variables to be inputs to next steps.
[ "Remove", "output", "specific", "variables", "to", "allow", "variables", "to", "be", "inputs", "to", "next", "steps", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L265-L272
train
218,261
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_get_variable
def _get_variable(vid, variables): """Retrieve an input variable from our existing pool of options. """ if isinstance(vid, six.string_types): vid = get_base_id(vid) else: vid = _get_string_vid(vid) for v in variables: if vid == get_base_id(v["id"]): return copy.deepcopy(v) raise ValueError("Did not find variable %s in \n%s" % (vid, pprint.pformat(variables)))
python
def _get_variable(vid, variables): """Retrieve an input variable from our existing pool of options. """ if isinstance(vid, six.string_types): vid = get_base_id(vid) else: vid = _get_string_vid(vid) for v in variables: if vid == get_base_id(v["id"]): return copy.deepcopy(v) raise ValueError("Did not find variable %s in \n%s" % (vid, pprint.pformat(variables)))
[ "def", "_get_variable", "(", "vid", ",", "variables", ")", ":", "if", "isinstance", "(", "vid", ",", "six", ".", "string_types", ")", ":", "vid", "=", "get_base_id", "(", "vid", ")", "else", ":", "vid", "=", "_get_string_vid", "(", "vid", ")", "for", ...
Retrieve an input variable from our existing pool of options.
[ "Retrieve", "an", "input", "variable", "from", "our", "existing", "pool", "of", "options", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L280-L290
train
218,262
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_handle_special_inputs
def _handle_special_inputs(inputs, variables): """Adjust input variables based on special cases. This case handles inputs where we are optional or can have flexible choices. XXX Need to better expose this at a top level definition. """ from bcbio import structural optional = [["config", "algorithm", "coverage"], ["config", "algorithm", "variant_regions"], ["config", "algorithm", "sv_regions"], ["config", "algorithm", "validate"], ["config", "algorithm", "validate_regions"]] all_vs = set([get_base_id(v["id"]) for v in variables]) out = [] for input in inputs: if input == ["reference", "aligner", "indexes"]: for v in variables: vid = get_base_id(v["id"]).split("__") if vid[0] == "reference" and vid[1] in alignment.TOOLS: out.append(vid) elif input == ["reference", "snpeff", "genome_build"]: found_indexes = False for v in variables: vid = get_base_id(v["id"]).split("__") if vid[0] == "reference" and vid[1] == "snpeff": out.append(vid) found_indexes = True assert found_indexes, "Found no snpEff indexes in %s" % [v["id"] for v in variables] elif input == ["config", "algorithm", "background", "cnv_reference"]: for v in variables: vid = get_base_id(v["id"]).split("__") if (vid[:4] == ["config", "algorithm", "background", "cnv_reference"] and structural.supports_cnv_reference(vid[4])): out.append(vid) elif input in optional: if _get_string_vid(input) in all_vs: out.append(input) else: out.append(input) return out
python
def _handle_special_inputs(inputs, variables): """Adjust input variables based on special cases. This case handles inputs where we are optional or can have flexible choices. XXX Need to better expose this at a top level definition. """ from bcbio import structural optional = [["config", "algorithm", "coverage"], ["config", "algorithm", "variant_regions"], ["config", "algorithm", "sv_regions"], ["config", "algorithm", "validate"], ["config", "algorithm", "validate_regions"]] all_vs = set([get_base_id(v["id"]) for v in variables]) out = [] for input in inputs: if input == ["reference", "aligner", "indexes"]: for v in variables: vid = get_base_id(v["id"]).split("__") if vid[0] == "reference" and vid[1] in alignment.TOOLS: out.append(vid) elif input == ["reference", "snpeff", "genome_build"]: found_indexes = False for v in variables: vid = get_base_id(v["id"]).split("__") if vid[0] == "reference" and vid[1] == "snpeff": out.append(vid) found_indexes = True assert found_indexes, "Found no snpEff indexes in %s" % [v["id"] for v in variables] elif input == ["config", "algorithm", "background", "cnv_reference"]: for v in variables: vid = get_base_id(v["id"]).split("__") if (vid[:4] == ["config", "algorithm", "background", "cnv_reference"] and structural.supports_cnv_reference(vid[4])): out.append(vid) elif input in optional: if _get_string_vid(input) in all_vs: out.append(input) else: out.append(input) return out
[ "def", "_handle_special_inputs", "(", "inputs", ",", "variables", ")", ":", "from", "bcbio", "import", "structural", "optional", "=", "[", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"coverage\"", "]", ",", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"...
Adjust input variables based on special cases. This case handles inputs where we are optional or can have flexible choices. XXX Need to better expose this at a top level definition.
[ "Adjust", "input", "variables", "based", "on", "special", "cases", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L292-L332
train
218,263
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_create_record
def _create_record(name, field_defs, step_name, inputs, unlist, file_vs, std_vs, parallel): """Create an output record by rearranging inputs. Batching processes create records that reformat the inputs for parallelization. """ if field_defs: fields = [] inherit = [] inherit_all = False inherit_exclude = [] for fdef in field_defs: if not fdef.get("type"): if fdef["id"] == "inherit": inherit_all = True inherit_exclude = fdef.get("exclude", []) else: inherit.append(fdef["id"]) else: cur = {"name": _get_string_vid(fdef["id"]), "type": fdef["type"]} fields.append(_add_secondary_to_rec_field(fdef, cur)) if inherit_all: fields.extend(_infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, exclude=inherit_exclude)) elif inherit: fields.extend(_infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, inherit)) else: fields = _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel) out = {"id": "%s/%s" % (step_name, name), "type": {"name": name, "type": "record", "fields": fields}} if parallel in ["batch-single", "multi-batch"]: out = _nest_variable(out) return out
python
def _create_record(name, field_defs, step_name, inputs, unlist, file_vs, std_vs, parallel): """Create an output record by rearranging inputs. Batching processes create records that reformat the inputs for parallelization. """ if field_defs: fields = [] inherit = [] inherit_all = False inherit_exclude = [] for fdef in field_defs: if not fdef.get("type"): if fdef["id"] == "inherit": inherit_all = True inherit_exclude = fdef.get("exclude", []) else: inherit.append(fdef["id"]) else: cur = {"name": _get_string_vid(fdef["id"]), "type": fdef["type"]} fields.append(_add_secondary_to_rec_field(fdef, cur)) if inherit_all: fields.extend(_infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, exclude=inherit_exclude)) elif inherit: fields.extend(_infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, inherit)) else: fields = _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel) out = {"id": "%s/%s" % (step_name, name), "type": {"name": name, "type": "record", "fields": fields}} if parallel in ["batch-single", "multi-batch"]: out = _nest_variable(out) return out
[ "def", "_create_record", "(", "name", ",", "field_defs", ",", "step_name", ",", "inputs", ",", "unlist", ",", "file_vs", ",", "std_vs", ",", "parallel", ")", ":", "if", "field_defs", ":", "fields", "=", "[", "]", "inherit", "=", "[", "]", "inherit_all", ...
Create an output record by rearranging inputs. Batching processes create records that reformat the inputs for parallelization.
[ "Create", "an", "output", "record", "by", "rearranging", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L348-L382
train
218,264
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_infer_record_outputs
def _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, to_include=None, exclude=None): """Infer the outputs of a record from the original inputs """ fields = [] unlist = set([_get_string_vid(x) for x in unlist]) input_vids = set([_get_string_vid(v) for v in _handle_special_inputs(inputs, file_vs)]) to_include = set([_get_string_vid(x) for x in to_include]) if to_include else None to_exclude = tuple(set([_get_string_vid(x) for x in exclude])) if exclude else None added = set([]) for raw_v in std_vs + [v for v in file_vs if get_base_id(v["id"]) in input_vids]: # unpack record inside this record and un-nested inputs to avoid double nested cur_record = is_cwl_record(raw_v) if cur_record: # unlist = unlist | set([field["name"] for field in cur_record["fields"]]) nested_vs = [{"id": field["name"], "type": field["type"]} for field in cur_record["fields"]] else: nested_vs = [raw_v] for orig_v in nested_vs: if (get_base_id(orig_v["id"]) not in added and (not to_include or get_base_id(orig_v["id"]) in to_include)): if to_exclude is None or not get_base_id(orig_v["id"]).startswith(to_exclude): cur_v = {} cur_v["name"] = get_base_id(orig_v["id"]) cur_v["type"] = orig_v["type"] if cur_v["name"] in unlist: cur_v = _flatten_nested_input(cur_v) fields.append(_add_secondary_to_rec_field(orig_v, cur_v)) added.add(get_base_id(orig_v["id"])) return fields
python
def _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, to_include=None, exclude=None): """Infer the outputs of a record from the original inputs """ fields = [] unlist = set([_get_string_vid(x) for x in unlist]) input_vids = set([_get_string_vid(v) for v in _handle_special_inputs(inputs, file_vs)]) to_include = set([_get_string_vid(x) for x in to_include]) if to_include else None to_exclude = tuple(set([_get_string_vid(x) for x in exclude])) if exclude else None added = set([]) for raw_v in std_vs + [v for v in file_vs if get_base_id(v["id"]) in input_vids]: # unpack record inside this record and un-nested inputs to avoid double nested cur_record = is_cwl_record(raw_v) if cur_record: # unlist = unlist | set([field["name"] for field in cur_record["fields"]]) nested_vs = [{"id": field["name"], "type": field["type"]} for field in cur_record["fields"]] else: nested_vs = [raw_v] for orig_v in nested_vs: if (get_base_id(orig_v["id"]) not in added and (not to_include or get_base_id(orig_v["id"]) in to_include)): if to_exclude is None or not get_base_id(orig_v["id"]).startswith(to_exclude): cur_v = {} cur_v["name"] = get_base_id(orig_v["id"]) cur_v["type"] = orig_v["type"] if cur_v["name"] in unlist: cur_v = _flatten_nested_input(cur_v) fields.append(_add_secondary_to_rec_field(orig_v, cur_v)) added.add(get_base_id(orig_v["id"])) return fields
[ "def", "_infer_record_outputs", "(", "inputs", ",", "unlist", ",", "file_vs", ",", "std_vs", ",", "parallel", ",", "to_include", "=", "None", ",", "exclude", "=", "None", ")", ":", "fields", "=", "[", "]", "unlist", "=", "set", "(", "[", "_get_string_vid...
Infer the outputs of a record from the original inputs
[ "Infer", "the", "outputs", "of", "a", "record", "from", "the", "original", "inputs" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L390-L419
train
218,265
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_create_variable
def _create_variable(orig_v, step, variables): """Create a new output variable, potentially over-writing existing or creating new. """ # get current variable, and convert to be the output of our process step try: v = _get_variable(orig_v["id"], variables) except ValueError: v = copy.deepcopy(orig_v) if not isinstance(v["id"], six.string_types): v["id"] = _get_string_vid(v["id"]) for key, val in orig_v.items(): if key not in ["id", "type"]: v[key] = val if orig_v.get("type") != "null": v["type"] = orig_v["type"] v["id"] = "%s/%s" % (step.name, get_base_id(v["id"])) return v
python
def _create_variable(orig_v, step, variables): """Create a new output variable, potentially over-writing existing or creating new. """ # get current variable, and convert to be the output of our process step try: v = _get_variable(orig_v["id"], variables) except ValueError: v = copy.deepcopy(orig_v) if not isinstance(v["id"], six.string_types): v["id"] = _get_string_vid(v["id"]) for key, val in orig_v.items(): if key not in ["id", "type"]: v[key] = val if orig_v.get("type") != "null": v["type"] = orig_v["type"] v["id"] = "%s/%s" % (step.name, get_base_id(v["id"])) return v
[ "def", "_create_variable", "(", "orig_v", ",", "step", ",", "variables", ")", ":", "# get current variable, and convert to be the output of our process step", "try", ":", "v", "=", "_get_variable", "(", "orig_v", "[", "\"id\"", "]", ",", "variables", ")", "except", ...
Create a new output variable, potentially over-writing existing or creating new.
[ "Create", "a", "new", "output", "variable", "potentially", "over", "-", "writing", "existing", "or", "creating", "new", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L421-L437
train
218,266
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
_merge_variables
def _merge_variables(new, cur): """Add any new variables to the world representation in cur. Replaces any variables adjusted by previous steps. """ new_added = set([]) out = [] for cur_var in cur: updated = False for new_var in new: if get_base_id(new_var["id"]) == get_base_id(cur_var["id"]): out.append(new_var) new_added.add(new_var["id"]) updated = True break if not updated: out.append(cur_var) for new_var in new: if new_var["id"] not in new_added: out.append(new_var) return out
python
def _merge_variables(new, cur): """Add any new variables to the world representation in cur. Replaces any variables adjusted by previous steps. """ new_added = set([]) out = [] for cur_var in cur: updated = False for new_var in new: if get_base_id(new_var["id"]) == get_base_id(cur_var["id"]): out.append(new_var) new_added.add(new_var["id"]) updated = True break if not updated: out.append(cur_var) for new_var in new: if new_var["id"] not in new_added: out.append(new_var) return out
[ "def", "_merge_variables", "(", "new", ",", "cur", ")", ":", "new_added", "=", "set", "(", "[", "]", ")", "out", "=", "[", "]", "for", "cur_var", "in", "cur", ":", "updated", "=", "False", "for", "new_var", "in", "new", ":", "if", "get_base_id", "(...
Add any new variables to the world representation in cur. Replaces any variables adjusted by previous steps.
[ "Add", "any", "new", "variables", "to", "the", "world", "representation", "in", "cur", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L439-L459
train
218,267
bcbio/bcbio-nextgen
bcbio/pipeline/disambiguate/__init__.py
split
def split(*items): """Split samples into all possible genomes for alignment. """ out = [] for data in [x[0] for x in items]: dis_orgs = data["config"]["algorithm"].get("disambiguate") if dis_orgs: if not data.get("disambiguate", None): data["disambiguate"] = {"genome_build": data["genome_build"], "base": True} out.append([data]) # handle the instance where a single organism is disambiguated if isinstance(dis_orgs, six.string_types): dis_orgs = [dis_orgs] for dis_org in dis_orgs: dis_data = copy.deepcopy(data) dis_data["disambiguate"] = {"genome_build": dis_org} dis_data["genome_build"] = dis_org dis_data["config"]["algorithm"]["effects"] = False dis_data = run_info.add_reference_resources(dis_data) out.append([dis_data]) else: out.append([data]) return out
python
def split(*items): """Split samples into all possible genomes for alignment. """ out = [] for data in [x[0] for x in items]: dis_orgs = data["config"]["algorithm"].get("disambiguate") if dis_orgs: if not data.get("disambiguate", None): data["disambiguate"] = {"genome_build": data["genome_build"], "base": True} out.append([data]) # handle the instance where a single organism is disambiguated if isinstance(dis_orgs, six.string_types): dis_orgs = [dis_orgs] for dis_org in dis_orgs: dis_data = copy.deepcopy(data) dis_data["disambiguate"] = {"genome_build": dis_org} dis_data["genome_build"] = dis_org dis_data["config"]["algorithm"]["effects"] = False dis_data = run_info.add_reference_resources(dis_data) out.append([dis_data]) else: out.append([data]) return out
[ "def", "split", "(", "*", "items", ")", ":", "out", "=", "[", "]", "for", "data", "in", "[", "x", "[", "0", "]", "for", "x", "in", "items", "]", ":", "dis_orgs", "=", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", ".", "get", "(", ...
Split samples into all possible genomes for alignment.
[ "Split", "samples", "into", "all", "possible", "genomes", "for", "alignment", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/disambiguate/__init__.py#L28-L51
train
218,268
bcbio/bcbio-nextgen
bcbio/pipeline/disambiguate/__init__.py
resolve
def resolve(items, run_parallel): """Combine aligned and split samples into final set of disambiguated reads. """ out = [] to_process = collections.defaultdict(list) for data in [x[0] for x in items]: if "disambiguate" in data: split_part = tuple([int(x) for x in data["align_split"].split("-")]) if data.get("combine") else None to_process[(dd.get_sample_name(data), split_part)].append(data) else: out.append([data]) if len(to_process) > 0: dis1 = run_parallel("run_disambiguate", [(xs, xs[0]["config"]) for xs in to_process.values()]) disambigs_by_name = collections.defaultdict(list) print(len(dis1)) for xs in dis1: assert len(xs) == 1 data = xs[0] disambigs_by_name[dd.get_sample_name(data)].append(data) dis2 = run_parallel("disambiguate_merge_extras", [(xs, xs[0]["config"]) for xs in disambigs_by_name.values()]) else: dis2 = [] return out + dis2
python
def resolve(items, run_parallel): """Combine aligned and split samples into final set of disambiguated reads. """ out = [] to_process = collections.defaultdict(list) for data in [x[0] for x in items]: if "disambiguate" in data: split_part = tuple([int(x) for x in data["align_split"].split("-")]) if data.get("combine") else None to_process[(dd.get_sample_name(data), split_part)].append(data) else: out.append([data]) if len(to_process) > 0: dis1 = run_parallel("run_disambiguate", [(xs, xs[0]["config"]) for xs in to_process.values()]) disambigs_by_name = collections.defaultdict(list) print(len(dis1)) for xs in dis1: assert len(xs) == 1 data = xs[0] disambigs_by_name[dd.get_sample_name(data)].append(data) dis2 = run_parallel("disambiguate_merge_extras", [(xs, xs[0]["config"]) for xs in disambigs_by_name.values()]) else: dis2 = [] return out + dis2
[ "def", "resolve", "(", "items", ",", "run_parallel", ")", ":", "out", "=", "[", "]", "to_process", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "data", "in", "[", "x", "[", "0", "]", "for", "x", "in", "items", "]", ":", "if", ...
Combine aligned and split samples into final set of disambiguated reads.
[ "Combine", "aligned", "and", "split", "samples", "into", "final", "set", "of", "disambiguated", "reads", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/disambiguate/__init__.py#L53-L77
train
218,269
bcbio/bcbio-nextgen
bcbio/pipeline/disambiguate/__init__.py
merge_extras
def merge_extras(items, config): """Merge extra disambiguated reads into a final BAM file. """ final = {} for extra_name in items[0]["disambiguate"].keys(): in_files = [] for data in items: in_files.append(data["disambiguate"][extra_name]) out_file = "%s-allmerged%s" % os.path.splitext(in_files[0]) if in_files[0].endswith(".bam"): merged_file = merge.merge_bam_files(in_files, os.path.dirname(out_file), items[0], out_file=out_file) else: assert extra_name == "summary", extra_name merged_file = _merge_summary(in_files, out_file, items[0]) final[extra_name] = merged_file out = [] for data in items: data["disambiguate"] = final out.append([data]) return out
python
def merge_extras(items, config): """Merge extra disambiguated reads into a final BAM file. """ final = {} for extra_name in items[0]["disambiguate"].keys(): in_files = [] for data in items: in_files.append(data["disambiguate"][extra_name]) out_file = "%s-allmerged%s" % os.path.splitext(in_files[0]) if in_files[0].endswith(".bam"): merged_file = merge.merge_bam_files(in_files, os.path.dirname(out_file), items[0], out_file=out_file) else: assert extra_name == "summary", extra_name merged_file = _merge_summary(in_files, out_file, items[0]) final[extra_name] = merged_file out = [] for data in items: data["disambiguate"] = final out.append([data]) return out
[ "def", "merge_extras", "(", "items", ",", "config", ")", ":", "final", "=", "{", "}", "for", "extra_name", "in", "items", "[", "0", "]", "[", "\"disambiguate\"", "]", ".", "keys", "(", ")", ":", "in_files", "=", "[", "]", "for", "data", "in", "item...
Merge extra disambiguated reads into a final BAM file.
[ "Merge", "extra", "disambiguated", "reads", "into", "a", "final", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/disambiguate/__init__.py#L79-L99
train
218,270
bcbio/bcbio-nextgen
bcbio/pipeline/disambiguate/__init__.py
_merge_summary
def _merge_summary(in_files, out_file, data): """Create one big summary file for disambiguation from multiple splits. """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for i, in_file in enumerate(in_files): with open(in_file) as in_handle: for j, line in enumerate(in_handle): if j == 0: if i == 0: out_handle.write(line) else: out_handle.write(line) return out_file
python
def _merge_summary(in_files, out_file, data): """Create one big summary file for disambiguation from multiple splits. """ if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for i, in_file in enumerate(in_files): with open(in_file) as in_handle: for j, line in enumerate(in_handle): if j == 0: if i == 0: out_handle.write(line) else: out_handle.write(line) return out_file
[ "def", "_merge_summary", "(", "in_files", ",", "out_file", ",", "data", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", ":", "with", "open", ...
Create one big summary file for disambiguation from multiple splits.
[ "Create", "one", "big", "summary", "file", "for", "disambiguation", "from", "multiple", "splits", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/disambiguate/__init__.py#L101-L115
train
218,271
bcbio/bcbio-nextgen
bcbio/pipeline/disambiguate/__init__.py
_run_python
def _run_python(work_bam_a, work_bam_b, out_dir, aligner, prefix, items): """Run python version of disambiguation """ Args = collections.namedtuple("Args", "A B output_dir intermediate_dir " "no_sort prefix aligner") args = Args(work_bam_a, work_bam_b, out_dir, out_dir, True, "", aligner) disambiguate_main(args)
python
def _run_python(work_bam_a, work_bam_b, out_dir, aligner, prefix, items): """Run python version of disambiguation """ Args = collections.namedtuple("Args", "A B output_dir intermediate_dir " "no_sort prefix aligner") args = Args(work_bam_a, work_bam_b, out_dir, out_dir, True, "", aligner) disambiguate_main(args)
[ "def", "_run_python", "(", "work_bam_a", ",", "work_bam_b", ",", "out_dir", ",", "aligner", ",", "prefix", ",", "items", ")", ":", "Args", "=", "collections", ".", "namedtuple", "(", "\"Args\"", ",", "\"A B output_dir intermediate_dir \"", "\"no_sort prefix aligner\...
Run python version of disambiguation
[ "Run", "python", "version", "of", "disambiguation" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/disambiguate/__init__.py#L153-L159
train
218,272
bcbio/bcbio-nextgen
bcbio/cwl/main.py
run
def run(args): """Run a CWL preparation pipeline. """ dirs, config, run_info_yaml = run_info.prep_system(args.sample_config, args.systemconfig) integrations = args.integrations if hasattr(args, "integrations") else {} world = run_info.organize(dirs, config, run_info_yaml, is_cwl=True, integrations=integrations) create.from_world(world, run_info_yaml, integrations=integrations, add_container_tag=args.add_container_tag)
python
def run(args): """Run a CWL preparation pipeline. """ dirs, config, run_info_yaml = run_info.prep_system(args.sample_config, args.systemconfig) integrations = args.integrations if hasattr(args, "integrations") else {} world = run_info.organize(dirs, config, run_info_yaml, is_cwl=True, integrations=integrations) create.from_world(world, run_info_yaml, integrations=integrations, add_container_tag=args.add_container_tag)
[ "def", "run", "(", "args", ")", ":", "dirs", ",", "config", ",", "run_info_yaml", "=", "run_info", ".", "prep_system", "(", "args", ".", "sample_config", ",", "args", ".", "systemconfig", ")", "integrations", "=", "args", ".", "integrations", "if", "hasatt...
Run a CWL preparation pipeline.
[ "Run", "a", "CWL", "preparation", "pipeline", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/main.py#L6-L12
train
218,273
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
assign_interval
def assign_interval(data): """Identify coverage based on percent of genome covered and relation to targets. Classifies coverage into 3 categories: - genome: Full genome coverage - regional: Regional coverage, like exome capture, with off-target reads - amplicon: Amplication based regional coverage without off-target reads """ if not dd.get_coverage_interval(data): vrs = dd.get_variant_regions_merged(data) callable_file = dd.get_sample_callable(data) if vrs: callable_size = pybedtools.BedTool(vrs).total_coverage() else: callable_size = pybedtools.BedTool(callable_file).total_coverage() total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]) genome_cov_pct = callable_size / float(total_size) if genome_cov_pct > GENOME_COV_THRESH: cov_interval = "genome" offtarget_pct = 0.0 elif not vrs: cov_interval = "regional" offtarget_pct = 0.0 else: offtarget_pct = _count_offtarget(data, dd.get_align_bam(data) or dd.get_work_bam(data), vrs or callable_file, "variant_regions") if offtarget_pct > OFFTARGET_THRESH: cov_interval = "regional" else: cov_interval = "amplicon" logger.info("%s: Assigned coverage as '%s' with %.1f%% genome coverage and %.1f%% offtarget coverage" % (dd.get_sample_name(data), cov_interval, genome_cov_pct * 100.0, offtarget_pct * 100.0)) data["config"]["algorithm"]["coverage_interval"] = cov_interval return data
python
def assign_interval(data): """Identify coverage based on percent of genome covered and relation to targets. Classifies coverage into 3 categories: - genome: Full genome coverage - regional: Regional coverage, like exome capture, with off-target reads - amplicon: Amplication based regional coverage without off-target reads """ if not dd.get_coverage_interval(data): vrs = dd.get_variant_regions_merged(data) callable_file = dd.get_sample_callable(data) if vrs: callable_size = pybedtools.BedTool(vrs).total_coverage() else: callable_size = pybedtools.BedTool(callable_file).total_coverage() total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]) genome_cov_pct = callable_size / float(total_size) if genome_cov_pct > GENOME_COV_THRESH: cov_interval = "genome" offtarget_pct = 0.0 elif not vrs: cov_interval = "regional" offtarget_pct = 0.0 else: offtarget_pct = _count_offtarget(data, dd.get_align_bam(data) or dd.get_work_bam(data), vrs or callable_file, "variant_regions") if offtarget_pct > OFFTARGET_THRESH: cov_interval = "regional" else: cov_interval = "amplicon" logger.info("%s: Assigned coverage as '%s' with %.1f%% genome coverage and %.1f%% offtarget coverage" % (dd.get_sample_name(data), cov_interval, genome_cov_pct * 100.0, offtarget_pct * 100.0)) data["config"]["algorithm"]["coverage_interval"] = cov_interval return data
[ "def", "assign_interval", "(", "data", ")", ":", "if", "not", "dd", ".", "get_coverage_interval", "(", "data", ")", ":", "vrs", "=", "dd", ".", "get_variant_regions_merged", "(", "data", ")", "callable_file", "=", "dd", ".", "get_sample_callable", "(", "data...
Identify coverage based on percent of genome covered and relation to targets. Classifies coverage into 3 categories: - genome: Full genome coverage - regional: Regional coverage, like exome capture, with off-target reads - amplicon: Amplication based regional coverage without off-target reads
[ "Identify", "coverage", "based", "on", "percent", "of", "genome", "covered", "and", "relation", "to", "targets", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L29-L62
train
218,274
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
calculate
def calculate(bam_file, data, sv_bed): """Calculate coverage in parallel using mosdepth. Removes duplicates and secondary reads from the counts: if ( b->core.flag & (BAM_FUNMAP | BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP) ) continue; """ params = {"min": dd.get_coverage_depth_min(data)} variant_regions = dd.get_variant_regions_merged(data) if not variant_regions: variant_regions = _create_genome_regions(data) # Back compatible with previous pre-mosdepth callable files callable_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))), "%s-coverage.callable.bed" % (dd.get_sample_name(data))) if not utils.file_uptodate(callable_file, bam_file): vr_quantize = ("0:1:%s:" % (params["min"]), ["NO_COVERAGE", "LOW_COVERAGE", "CALLABLE"]) to_calculate = [("variant_regions", variant_regions, vr_quantize, None, "coverage_perbase" in dd.get_tools_on(data)), ("sv_regions", bedutils.clean_file(sv_bed, data, prefix="svregions-"), None, None, False), ("coverage", bedutils.clean_file(dd.get_coverage(data), data, prefix="cov-"), None, DEPTH_THRESHOLDS, False)] depth_files = {} for target_name, region_bed, quantize, thresholds, per_base in to_calculate: if region_bed: cur_depth = {} depth_info = run_mosdepth(data, target_name, region_bed, quantize=quantize, thresholds=thresholds, per_base=per_base) for attr in ("dist", "regions", "thresholds", "per_base"): val = getattr(depth_info, attr, None) if val: cur_depth[attr] = val depth_files[target_name] = cur_depth if target_name == "variant_regions": callable_file = depth_info.quantize else: depth_files = {} final_callable = _subset_to_variant_regions(callable_file, variant_regions, data) return final_callable, depth_files
python
def calculate(bam_file, data, sv_bed): """Calculate coverage in parallel using mosdepth. Removes duplicates and secondary reads from the counts: if ( b->core.flag & (BAM_FUNMAP | BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP) ) continue; """ params = {"min": dd.get_coverage_depth_min(data)} variant_regions = dd.get_variant_regions_merged(data) if not variant_regions: variant_regions = _create_genome_regions(data) # Back compatible with previous pre-mosdepth callable files callable_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))), "%s-coverage.callable.bed" % (dd.get_sample_name(data))) if not utils.file_uptodate(callable_file, bam_file): vr_quantize = ("0:1:%s:" % (params["min"]), ["NO_COVERAGE", "LOW_COVERAGE", "CALLABLE"]) to_calculate = [("variant_regions", variant_regions, vr_quantize, None, "coverage_perbase" in dd.get_tools_on(data)), ("sv_regions", bedutils.clean_file(sv_bed, data, prefix="svregions-"), None, None, False), ("coverage", bedutils.clean_file(dd.get_coverage(data), data, prefix="cov-"), None, DEPTH_THRESHOLDS, False)] depth_files = {} for target_name, region_bed, quantize, thresholds, per_base in to_calculate: if region_bed: cur_depth = {} depth_info = run_mosdepth(data, target_name, region_bed, quantize=quantize, thresholds=thresholds, per_base=per_base) for attr in ("dist", "regions", "thresholds", "per_base"): val = getattr(depth_info, attr, None) if val: cur_depth[attr] = val depth_files[target_name] = cur_depth if target_name == "variant_regions": callable_file = depth_info.quantize else: depth_files = {} final_callable = _subset_to_variant_regions(callable_file, variant_regions, data) return final_callable, depth_files
[ "def", "calculate", "(", "bam_file", ",", "data", ",", "sv_bed", ")", ":", "params", "=", "{", "\"min\"", ":", "dd", ".", "get_coverage_depth_min", "(", "data", ")", "}", "variant_regions", "=", "dd", ".", "get_variant_regions_merged", "(", "data", ")", "i...
Calculate coverage in parallel using mosdepth. Removes duplicates and secondary reads from the counts: if ( b->core.flag & (BAM_FUNMAP | BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP) ) continue;
[ "Calculate", "coverage", "in", "parallel", "using", "mosdepth", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L73-L111
train
218,275
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
_create_genome_regions
def _create_genome_regions(data): """Create whole genome contigs we want to process, only non-alts. Skips problem contigs like HLAs for downstream analysis. """ work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "coverage", dd.get_sample_name(data))) variant_regions = os.path.join(work_dir, "target-genome.bed") with file_transaction(data, variant_regions) as tx_variant_regions: with open(tx_variant_regions, "w") as out_handle: for c in shared.get_noalt_contigs(data): out_handle.write("%s\t%s\t%s\n" % (c.name, 0, c.size)) return variant_regions
python
def _create_genome_regions(data): """Create whole genome contigs we want to process, only non-alts. Skips problem contigs like HLAs for downstream analysis. """ work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "coverage", dd.get_sample_name(data))) variant_regions = os.path.join(work_dir, "target-genome.bed") with file_transaction(data, variant_regions) as tx_variant_regions: with open(tx_variant_regions, "w") as out_handle: for c in shared.get_noalt_contigs(data): out_handle.write("%s\t%s\t%s\n" % (c.name, 0, c.size)) return variant_regions
[ "def", "_create_genome_regions", "(", "data", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", ")", ",", "\"coverage\"", ",", "dd", ".", "get_sample_name", "(", "da...
Create whole genome contigs we want to process, only non-alts. Skips problem contigs like HLAs for downstream analysis.
[ "Create", "whole", "genome", "contigs", "we", "want", "to", "process", "only", "non", "-", "alts", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L113-L124
train
218,276
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
_subset_to_variant_regions
def _subset_to_variant_regions(callable_file, variant_regions, data): """Subset output callable file to only variant regions of interest. """ out_file = "%s-vrsubset.bed" % utils.splitext_plus(callable_file)[0] if not utils.file_uptodate(out_file, callable_file): with file_transaction(data, out_file) as tx_out_file: with utils.open_gzipsafe(callable_file) as in_handle: pybedtools.BedTool(in_handle).intersect(variant_regions).saveas(tx_out_file) return out_file
python
def _subset_to_variant_regions(callable_file, variant_regions, data): """Subset output callable file to only variant regions of interest. """ out_file = "%s-vrsubset.bed" % utils.splitext_plus(callable_file)[0] if not utils.file_uptodate(out_file, callable_file): with file_transaction(data, out_file) as tx_out_file: with utils.open_gzipsafe(callable_file) as in_handle: pybedtools.BedTool(in_handle).intersect(variant_regions).saveas(tx_out_file) return out_file
[ "def", "_subset_to_variant_regions", "(", "callable_file", ",", "variant_regions", ",", "data", ")", ":", "out_file", "=", "\"%s-vrsubset.bed\"", "%", "utils", ".", "splitext_plus", "(", "callable_file", ")", "[", "0", "]", "if", "not", "utils", ".", "file_uptod...
Subset output callable file to only variant regions of interest.
[ "Subset", "output", "callable", "file", "to", "only", "variant", "regions", "of", "interest", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L126-L134
train
218,277
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
_average_genome_coverage
def _average_genome_coverage(data, bam_file): """Quickly calculate average coverage for whole genome files using indices. Includes all reads, with duplicates. Uses sampling of 10M reads. """ total = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]) read_counts = sum(x.aligned for x in bam.idxstats(bam_file, data)) with pysam.Samfile(bam_file, "rb") as pysam_bam: read_size = np.median(list(itertools.islice((a.query_length for a in pysam_bam.fetch()), int(1e7)))) avg_cov = float(read_counts * read_size) / total return avg_cov
python
def _average_genome_coverage(data, bam_file): """Quickly calculate average coverage for whole genome files using indices. Includes all reads, with duplicates. Uses sampling of 10M reads. """ total = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]) read_counts = sum(x.aligned for x in bam.idxstats(bam_file, data)) with pysam.Samfile(bam_file, "rb") as pysam_bam: read_size = np.median(list(itertools.islice((a.query_length for a in pysam_bam.fetch()), int(1e7)))) avg_cov = float(read_counts * read_size) / total return avg_cov
[ "def", "_average_genome_coverage", "(", "data", ",", "bam_file", ")", ":", "total", "=", "sum", "(", "[", "c", ".", "size", "for", "c", "in", "ref", ".", "file_contigs", "(", "dd", ".", "get_ref_file", "(", "data", ")", ",", "data", "[", "\"config\"", ...
Quickly calculate average coverage for whole genome files using indices. Includes all reads, with duplicates. Uses sampling of 10M reads.
[ "Quickly", "calculate", "average", "coverage", "for", "whole", "genome", "files", "using", "indices", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L171-L181
train
218,278
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
regions_coverage
def regions_coverage(bed_file, target_name, data): """Generate coverage over regions of interest using mosdepth. """ ready_bed = tz.get_in(["depth", target_name, "regions"], data) if ready_bed: return ready_bed else: return run_mosdepth(data, target_name, bed_file).regions
python
def regions_coverage(bed_file, target_name, data): """Generate coverage over regions of interest using mosdepth. """ ready_bed = tz.get_in(["depth", target_name, "regions"], data) if ready_bed: return ready_bed else: return run_mosdepth(data, target_name, bed_file).regions
[ "def", "regions_coverage", "(", "bed_file", ",", "target_name", ",", "data", ")", ":", "ready_bed", "=", "tz", ".", "get_in", "(", "[", "\"depth\"", ",", "target_name", ",", "\"regions\"", "]", ",", "data", ")", "if", "ready_bed", ":", "return", "ready_bed...
Generate coverage over regions of interest using mosdepth.
[ "Generate", "coverage", "over", "regions", "of", "interest", "using", "mosdepth", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L197-L204
train
218,279
bcbio/bcbio-nextgen
bcbio/variation/coverage.py
coverage_region_detailed_stats
def coverage_region_detailed_stats(target_name, bed_file, data, out_dir): """ Calculate coverage at different completeness cutoff for region in coverage option. """ if bed_file and utils.file_exists(bed_file): ready_depth = tz.get_in(["depth", target_name], data) if ready_depth: cov_file = ready_depth["regions"] dist_file = ready_depth["dist"] thresholds_file = ready_depth.get("thresholds") out_cov_file = os.path.join(out_dir, os.path.basename(cov_file)) out_dist_file = os.path.join(out_dir, os.path.basename(dist_file)) out_thresholds_file = os.path.join(out_dir, os.path.basename(thresholds_file)) \ if thresholds_file and os.path.isfile(thresholds_file) else None if not utils.file_uptodate(out_cov_file, cov_file): utils.copy_plus(cov_file, out_cov_file) utils.copy_plus(dist_file, out_dist_file) utils.copy_plus(thresholds_file, out_thresholds_file) if out_thresholds_file else None return [out_cov_file, out_dist_file] + ([out_thresholds_file] if out_thresholds_file else []) return []
python
def coverage_region_detailed_stats(target_name, bed_file, data, out_dir): """ Calculate coverage at different completeness cutoff for region in coverage option. """ if bed_file and utils.file_exists(bed_file): ready_depth = tz.get_in(["depth", target_name], data) if ready_depth: cov_file = ready_depth["regions"] dist_file = ready_depth["dist"] thresholds_file = ready_depth.get("thresholds") out_cov_file = os.path.join(out_dir, os.path.basename(cov_file)) out_dist_file = os.path.join(out_dir, os.path.basename(dist_file)) out_thresholds_file = os.path.join(out_dir, os.path.basename(thresholds_file)) \ if thresholds_file and os.path.isfile(thresholds_file) else None if not utils.file_uptodate(out_cov_file, cov_file): utils.copy_plus(cov_file, out_cov_file) utils.copy_plus(dist_file, out_dist_file) utils.copy_plus(thresholds_file, out_thresholds_file) if out_thresholds_file else None return [out_cov_file, out_dist_file] + ([out_thresholds_file] if out_thresholds_file else []) return []
[ "def", "coverage_region_detailed_stats", "(", "target_name", ",", "bed_file", ",", "data", ",", "out_dir", ")", ":", "if", "bed_file", "and", "utils", ".", "file_exists", "(", "bed_file", ")", ":", "ready_depth", "=", "tz", ".", "get_in", "(", "[", "\"depth\...
Calculate coverage at different completeness cutoff for region in coverage option.
[ "Calculate", "coverage", "at", "different", "completeness", "cutoff", "for", "region", "in", "coverage", "option", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/coverage.py#L249-L269
train
218,280
bcbio/bcbio-nextgen
bcbio/heterogeneity/loh.py
get_coords
def get_coords(data): """Retrieve coordinates of genes of interest for prioritization. Can read from CIViC input data or a supplied BED file of chrom, start, end and gene information. """ for category, vtypes in [("LOH", {"LOSS", "HETEROZYGOSITY"}), ("amplification", {"AMPLIFICATION"})]: out = tz.get_in([category, dd.get_genome_build(data)], _COORDS, {}) priority_file = dd.get_svprioritize(data) if priority_file: if os.path.basename(priority_file).find("civic") >= 0: for chrom, start, end, gene in _civic_regions(priority_file, vtypes, dd.get_disease(data)): out[gene] = (chrom, start, end) elif os.path.basename(priority_file).find(".bed") >= 0: for line in utils.open_gzipsafe(priority_file): parts = line.strip().split("\t") if len(parts) >= 4: chrom, start, end, gene = parts[:4] out[gene] = (chrom, int(start), int(end)) yield category, out
python
def get_coords(data): """Retrieve coordinates of genes of interest for prioritization. Can read from CIViC input data or a supplied BED file of chrom, start, end and gene information. """ for category, vtypes in [("LOH", {"LOSS", "HETEROZYGOSITY"}), ("amplification", {"AMPLIFICATION"})]: out = tz.get_in([category, dd.get_genome_build(data)], _COORDS, {}) priority_file = dd.get_svprioritize(data) if priority_file: if os.path.basename(priority_file).find("civic") >= 0: for chrom, start, end, gene in _civic_regions(priority_file, vtypes, dd.get_disease(data)): out[gene] = (chrom, start, end) elif os.path.basename(priority_file).find(".bed") >= 0: for line in utils.open_gzipsafe(priority_file): parts = line.strip().split("\t") if len(parts) >= 4: chrom, start, end, gene = parts[:4] out[gene] = (chrom, int(start), int(end)) yield category, out
[ "def", "get_coords", "(", "data", ")", ":", "for", "category", ",", "vtypes", "in", "[", "(", "\"LOH\"", ",", "{", "\"LOSS\"", ",", "\"HETEROZYGOSITY\"", "}", ")", ",", "(", "\"amplification\"", ",", "{", "\"AMPLIFICATION\"", "}", ")", "]", ":", "out", ...
Retrieve coordinates of genes of interest for prioritization. Can read from CIViC input data or a supplied BED file of chrom, start, end and gene information.
[ "Retrieve", "coordinates", "of", "genes", "of", "interest", "for", "prioritization", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/loh.py#L29-L49
train
218,281
bcbio/bcbio-nextgen
bcbio/heterogeneity/loh.py
_civic_regions
def _civic_regions(civic_file, variant_types=None, diseases=None, drugs=None): """Retrieve gene regions and names filtered by variant_types and diseases. """ if isinstance(diseases, six.string_types): diseases = [diseases] with utils.open_gzipsafe(civic_file) as in_handle: reader = csv.reader(in_handle, delimiter="\t") for chrom, start, end, info_str in reader: info = edn_loads(info_str) if not variant_types or _matches(info["support"]["variants"], variant_types): if not diseases or _matches(info["support"]["diseases"], diseases): if not drugs or _matches(info["support"]["drugs"], drugs): yield (chrom, int(start), int(end), list(info["name"])[0])
python
def _civic_regions(civic_file, variant_types=None, diseases=None, drugs=None): """Retrieve gene regions and names filtered by variant_types and diseases. """ if isinstance(diseases, six.string_types): diseases = [diseases] with utils.open_gzipsafe(civic_file) as in_handle: reader = csv.reader(in_handle, delimiter="\t") for chrom, start, end, info_str in reader: info = edn_loads(info_str) if not variant_types or _matches(info["support"]["variants"], variant_types): if not diseases or _matches(info["support"]["diseases"], diseases): if not drugs or _matches(info["support"]["drugs"], drugs): yield (chrom, int(start), int(end), list(info["name"])[0])
[ "def", "_civic_regions", "(", "civic_file", ",", "variant_types", "=", "None", ",", "diseases", "=", "None", ",", "drugs", "=", "None", ")", ":", "if", "isinstance", "(", "diseases", ",", "six", ".", "string_types", ")", ":", "diseases", "=", "[", "disea...
Retrieve gene regions and names filtered by variant_types and diseases.
[ "Retrieve", "gene", "regions", "and", "names", "filtered", "by", "variant_types", "and", "diseases", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/loh.py#L58-L70
train
218,282
bcbio/bcbio-nextgen
bcbio/heterogeneity/loh.py
summary_status
def summary_status(call, data): """Retrieve status in regions of interest, along with heterogeneity metrics. Provides output with overall purity and ploidy, along with region specific calls. """ out_file = None if call.get("vrn_file") and os.path.exists(call.get("vrn_file")): out_file = os.path.join(os.path.dirname(call["vrn_file"]), "%s-%s-lohsummary.yaml" % (dd.get_sample_name(data), call["variantcaller"])) if not utils.file_uptodate(out_file, call["vrn_file"]): out = {} if call["variantcaller"] == "titancna": out.update(_titancna_summary(call, data)) pass elif call["variantcaller"] == "purecn": out.update(_purecn_summary(call, data)) if out: out["description"] = dd.get_sample_name(data) out["variantcaller"] = call["variantcaller"] with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out_file if out_file and os.path.exists(out_file) else None
python
def summary_status(call, data): """Retrieve status in regions of interest, along with heterogeneity metrics. Provides output with overall purity and ploidy, along with region specific calls. """ out_file = None if call.get("vrn_file") and os.path.exists(call.get("vrn_file")): out_file = os.path.join(os.path.dirname(call["vrn_file"]), "%s-%s-lohsummary.yaml" % (dd.get_sample_name(data), call["variantcaller"])) if not utils.file_uptodate(out_file, call["vrn_file"]): out = {} if call["variantcaller"] == "titancna": out.update(_titancna_summary(call, data)) pass elif call["variantcaller"] == "purecn": out.update(_purecn_summary(call, data)) if out: out["description"] = dd.get_sample_name(data) out["variantcaller"] = call["variantcaller"] with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out_file if out_file and os.path.exists(out_file) else None
[ "def", "summary_status", "(", "call", ",", "data", ")", ":", "out_file", "=", "None", "if", "call", ".", "get", "(", "\"vrn_file\"", ")", "and", "os", ".", "path", ".", "exists", "(", "call", ".", "get", "(", "\"vrn_file\"", ")", ")", ":", "out_file"...
Retrieve status in regions of interest, along with heterogeneity metrics. Provides output with overall purity and ploidy, along with region specific calls.
[ "Retrieve", "status", "in", "regions", "of", "interest", "along", "with", "heterogeneity", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/loh.py#L72-L95
train
218,283
bcbio/bcbio-nextgen
bcbio/heterogeneity/loh.py
_check_copy_number_changes
def _check_copy_number_changes(svtype, cn, minor_cn, data): """Check if copy number changes match the expected svtype. """ if svtype == "LOH" and minor_cn == 0: return svtype elif svtype == "amplification" and cn > dd.get_ploidy(data): return svtype else: return "std"
python
def _check_copy_number_changes(svtype, cn, minor_cn, data): """Check if copy number changes match the expected svtype. """ if svtype == "LOH" and minor_cn == 0: return svtype elif svtype == "amplification" and cn > dd.get_ploidy(data): return svtype else: return "std"
[ "def", "_check_copy_number_changes", "(", "svtype", ",", "cn", ",", "minor_cn", ",", "data", ")", ":", "if", "svtype", "==", "\"LOH\"", "and", "minor_cn", "==", "0", ":", "return", "svtype", "elif", "svtype", "==", "\"amplification\"", "and", "cn", ">", "d...
Check if copy number changes match the expected svtype.
[ "Check", "if", "copy", "number", "changes", "match", "the", "expected", "svtype", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/loh.py#L97-L105
train
218,284
bcbio/bcbio-nextgen
bcbio/heterogeneity/loh.py
_titancna_summary
def _titancna_summary(call, data): """Summarize purity, ploidy and LOH for TitanCNA. """ out = {} for svtype, coords in get_coords(data): cur_calls = {k: collections.defaultdict(int) for k in coords.keys()} with open(call["subclones"]) as in_handle: header = in_handle.readline().strip().split() for line in in_handle: val = dict(zip(header, line.strip().split())) start = int(val["Start_Position.bp."]) end = int(val["End_Position.bp."]) for region, cur_coords in coords.items(): if val["Chromosome"] == cur_coords[0] and are_overlapping((start, end), cur_coords[1:]): cur_calls[region][_check_copy_number_changes(svtype, _to_cn(val["Copy_Number"]), _to_cn(val["MinorCN"]), data)] += 1 out[svtype] = {r: _merge_cn_calls(c, svtype) for r, c in cur_calls.items()} with open(call["hetsummary"]) as in_handle: vals = dict(zip(in_handle.readline().strip().split("\t"), in_handle.readline().strip().split("\t"))) out["purity"] = vals["purity"] out["ploidy"] = vals["ploidy"] return out
python
def _titancna_summary(call, data): """Summarize purity, ploidy and LOH for TitanCNA. """ out = {} for svtype, coords in get_coords(data): cur_calls = {k: collections.defaultdict(int) for k in coords.keys()} with open(call["subclones"]) as in_handle: header = in_handle.readline().strip().split() for line in in_handle: val = dict(zip(header, line.strip().split())) start = int(val["Start_Position.bp."]) end = int(val["End_Position.bp."]) for region, cur_coords in coords.items(): if val["Chromosome"] == cur_coords[0] and are_overlapping((start, end), cur_coords[1:]): cur_calls[region][_check_copy_number_changes(svtype, _to_cn(val["Copy_Number"]), _to_cn(val["MinorCN"]), data)] += 1 out[svtype] = {r: _merge_cn_calls(c, svtype) for r, c in cur_calls.items()} with open(call["hetsummary"]) as in_handle: vals = dict(zip(in_handle.readline().strip().split("\t"), in_handle.readline().strip().split("\t"))) out["purity"] = vals["purity"] out["ploidy"] = vals["ploidy"] return out
[ "def", "_titancna_summary", "(", "call", ",", "data", ")", ":", "out", "=", "{", "}", "for", "svtype", ",", "coords", "in", "get_coords", "(", "data", ")", ":", "cur_calls", "=", "{", "k", ":", "collections", ".", "defaultdict", "(", "int", ")", "for...
Summarize purity, ploidy and LOH for TitanCNA.
[ "Summarize", "purity", "ploidy", "and", "LOH", "for", "TitanCNA", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/loh.py#L110-L132
train
218,285
bcbio/bcbio-nextgen
bcbio/heterogeneity/loh.py
_purecn_summary
def _purecn_summary(call, data): """Summarize purity, ploidy and LOH for PureCN. """ out = {} for svtype, coords in get_coords(data): cur_calls = {k: collections.defaultdict(int) for k in coords.keys()} with open(call["loh"]) as in_handle: in_handle.readline() # header for line in in_handle: _, chrom, start, end, _, cn, minor_cn = line.split(",")[:7] start = int(start) end = int(end) for region, cur_coords in coords.items(): if chrom == cur_coords[0] and are_overlapping((start, end), cur_coords[1:]): cur_calls[region][_check_copy_number_changes(svtype, _to_cn(cn), _to_cn(minor_cn), data)] += 1 out[svtype] = {r: _merge_cn_calls(c, svtype) for r, c in cur_calls.items()} with open(call["hetsummary"]) as in_handle: vals = dict(zip(in_handle.readline().strip().replace('"', '').split(","), in_handle.readline().strip().split(","))) out["purity"] = vals["Purity"] out["ploidy"] = vals["Ploidy"] return out
python
def _purecn_summary(call, data): """Summarize purity, ploidy and LOH for PureCN. """ out = {} for svtype, coords in get_coords(data): cur_calls = {k: collections.defaultdict(int) for k in coords.keys()} with open(call["loh"]) as in_handle: in_handle.readline() # header for line in in_handle: _, chrom, start, end, _, cn, minor_cn = line.split(",")[:7] start = int(start) end = int(end) for region, cur_coords in coords.items(): if chrom == cur_coords[0] and are_overlapping((start, end), cur_coords[1:]): cur_calls[region][_check_copy_number_changes(svtype, _to_cn(cn), _to_cn(minor_cn), data)] += 1 out[svtype] = {r: _merge_cn_calls(c, svtype) for r, c in cur_calls.items()} with open(call["hetsummary"]) as in_handle: vals = dict(zip(in_handle.readline().strip().replace('"', '').split(","), in_handle.readline().strip().split(","))) out["purity"] = vals["Purity"] out["ploidy"] = vals["Ploidy"] return out
[ "def", "_purecn_summary", "(", "call", ",", "data", ")", ":", "out", "=", "{", "}", "for", "svtype", ",", "coords", "in", "get_coords", "(", "data", ")", ":", "cur_calls", "=", "{", "k", ":", "collections", ".", "defaultdict", "(", "int", ")", "for",...
Summarize purity, ploidy and LOH for PureCN.
[ "Summarize", "purity", "ploidy", "and", "LOH", "for", "PureCN", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/heterogeneity/loh.py#L134-L155
train
218,286
bcbio/bcbio-nextgen
scripts/utils/plink_to_vcf.py
fix_vcf_line
def fix_vcf_line(parts, ref_base): """Orient VCF allele calls with respect to reference base. Handles cases with ref and variant swaps. strand complements. """ swap = {"1/1": "0/0", "0/1": "0/1", "0/0": "1/1", "./.": "./."} complements = {"G": "C", "A": "T", "C": "G", "T": "A", "N": "N"} varinfo, genotypes = fix_line_problems(parts) ref, var = varinfo[3:5] # non-reference regions or non-informative, can't do anything if ref_base in [None, "N"] or set(genotypes) == set(["./."]): varinfo = None # matching reference, all good elif ref_base == ref: assert ref_base == ref, (ref_base, parts) # swapped reference and alternate regions elif ref_base == var or ref in ["N", "0"]: varinfo[3] = var varinfo[4] = ref genotypes = [swap[x] for x in genotypes] # reference is on alternate strand elif ref_base != ref and complements.get(ref) == ref_base: varinfo[3] = complements[ref] varinfo[4] = ",".join([complements[v] for v in var.split(",")]) # unspecified alternative base elif ref_base != ref and var in ["N", "0"]: varinfo[3] = ref_base varinfo[4] = ref genotypes = [swap[x] for x in genotypes] # swapped and on alternate strand elif ref_base != ref and complements.get(var) == ref_base: varinfo[3] = complements[var] varinfo[4] = ",".join([complements[v] for v in ref.split(",")]) genotypes = [swap[x] for x in genotypes] else: print "Did not associate ref {0} with line: {1}".format( ref_base, varinfo) if varinfo is not None: return varinfo + genotypes
python
def fix_vcf_line(parts, ref_base): """Orient VCF allele calls with respect to reference base. Handles cases with ref and variant swaps. strand complements. """ swap = {"1/1": "0/0", "0/1": "0/1", "0/0": "1/1", "./.": "./."} complements = {"G": "C", "A": "T", "C": "G", "T": "A", "N": "N"} varinfo, genotypes = fix_line_problems(parts) ref, var = varinfo[3:5] # non-reference regions or non-informative, can't do anything if ref_base in [None, "N"] or set(genotypes) == set(["./."]): varinfo = None # matching reference, all good elif ref_base == ref: assert ref_base == ref, (ref_base, parts) # swapped reference and alternate regions elif ref_base == var or ref in ["N", "0"]: varinfo[3] = var varinfo[4] = ref genotypes = [swap[x] for x in genotypes] # reference is on alternate strand elif ref_base != ref and complements.get(ref) == ref_base: varinfo[3] = complements[ref] varinfo[4] = ",".join([complements[v] for v in var.split(",")]) # unspecified alternative base elif ref_base != ref and var in ["N", "0"]: varinfo[3] = ref_base varinfo[4] = ref genotypes = [swap[x] for x in genotypes] # swapped and on alternate strand elif ref_base != ref and complements.get(var) == ref_base: varinfo[3] = complements[var] varinfo[4] = ",".join([complements[v] for v in ref.split(",")]) genotypes = [swap[x] for x in genotypes] else: print "Did not associate ref {0} with line: {1}".format( ref_base, varinfo) if varinfo is not None: return varinfo + genotypes
[ "def", "fix_vcf_line", "(", "parts", ",", "ref_base", ")", ":", "swap", "=", "{", "\"1/1\"", ":", "\"0/0\"", ",", "\"0/1\"", ":", "\"0/1\"", ",", "\"0/0\"", ":", "\"1/1\"", ",", "\"./.\"", ":", "\"./.\"", "}", "complements", "=", "{", "\"G\"", ":", "\"...
Orient VCF allele calls with respect to reference base. Handles cases with ref and variant swaps. strand complements.
[ "Orient", "VCF", "allele", "calls", "with", "respect", "to", "reference", "base", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/plink_to_vcf.py#L79-L117
train
218,287
bcbio/bcbio-nextgen
scripts/utils/plink_to_vcf.py
fix_nonref_positions
def fix_nonref_positions(in_file, ref_file): """Fix Genotyping VCF positions where the bases are all variants. The plink/pseq output does not handle these correctly, and has all reference/variant bases reversed. """ ignore_chrs = ["."] ref2bit = twobit.TwoBitFile(open(ref_file)) out_file = in_file.replace("-raw.vcf", ".vcf") with open(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: parts = line.rstrip("\r\n").split("\t") pos = int(parts[1]) # handle chr/non-chr naming if parts[0] not in ref2bit.keys() and parts[0].replace("chr", "") in ref2bit.keys(): parts[0] = parts[0].replace("chr", "") # handle X chromosome elif parts[0] not in ref2bit.keys() and parts[0] == "23": for test in ["X", "chrX"]: if test in ref2bit.keys(): parts[0] == test ref_base = None if parts[0] not in ignore_chrs: try: ref_base = ref2bit[parts[0]].get(pos-1, pos).upper() except Exception as msg: print "Skipping line. Failed to retrieve reference base for %s\n%s" % (str(parts), msg) parts = fix_vcf_line(parts, ref_base) if parts is not None: out_handle.write("\t".join(parts) + "\n") return out_file
python
def fix_nonref_positions(in_file, ref_file): """Fix Genotyping VCF positions where the bases are all variants. The plink/pseq output does not handle these correctly, and has all reference/variant bases reversed. """ ignore_chrs = ["."] ref2bit = twobit.TwoBitFile(open(ref_file)) out_file = in_file.replace("-raw.vcf", ".vcf") with open(in_file) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("#"): out_handle.write(line) else: parts = line.rstrip("\r\n").split("\t") pos = int(parts[1]) # handle chr/non-chr naming if parts[0] not in ref2bit.keys() and parts[0].replace("chr", "") in ref2bit.keys(): parts[0] = parts[0].replace("chr", "") # handle X chromosome elif parts[0] not in ref2bit.keys() and parts[0] == "23": for test in ["X", "chrX"]: if test in ref2bit.keys(): parts[0] == test ref_base = None if parts[0] not in ignore_chrs: try: ref_base = ref2bit[parts[0]].get(pos-1, pos).upper() except Exception as msg: print "Skipping line. Failed to retrieve reference base for %s\n%s" % (str(parts), msg) parts = fix_vcf_line(parts, ref_base) if parts is not None: out_handle.write("\t".join(parts) + "\n") return out_file
[ "def", "fix_nonref_positions", "(", "in_file", ",", "ref_file", ")", ":", "ignore_chrs", "=", "[", "\".\"", "]", "ref2bit", "=", "twobit", ".", "TwoBitFile", "(", "open", "(", "ref_file", ")", ")", "out_file", "=", "in_file", ".", "replace", "(", "\"-raw.v...
Fix Genotyping VCF positions where the bases are all variants. The plink/pseq output does not handle these correctly, and has all reference/variant bases reversed.
[ "Fix", "Genotyping", "VCF", "positions", "where", "the", "bases", "are", "all", "variants", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/plink_to_vcf.py#L119-L154
train
218,288
bcbio/bcbio-nextgen
bcbio/structural/cn_mops.py
run
def run(items, background=None): """Detect copy number variations from batched set of samples using cn.mops. """ if not background: background = [] names = [tz.get_in(["rgnames", "sample"], x) for x in items + background] work_bams = [x["align_bam"] for x in items + background] if len(items + background) < 2: raise ValueError("cn.mops only works on batches with multiple samples") data = items[0] work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural", names[0], "cn_mops")) parallel = {"type": "local", "cores": data["config"]["algorithm"].get("num_cores", 1), "progs": ["delly"]} with pysam.Samfile(work_bams[0], "rb") as pysam_work_bam: chroms = [None] if _get_regional_bed_file(items[0]) else pysam_work_bam.references out_files = run_multicore(_run_on_chrom, [(chrom, work_bams, names, work_dir, items) for chrom in chroms], data["config"], parallel) out_file = _combine_out_files(out_files, work_dir, data) out = [] for data in items: if "sv" not in data: data["sv"] = [] data["sv"].append({"variantcaller": "cn_mops", "vrn_file": _prep_sample_cnvs(out_file, data)}) out.append(data) return out
python
def run(items, background=None): """Detect copy number variations from batched set of samples using cn.mops. """ if not background: background = [] names = [tz.get_in(["rgnames", "sample"], x) for x in items + background] work_bams = [x["align_bam"] for x in items + background] if len(items + background) < 2: raise ValueError("cn.mops only works on batches with multiple samples") data = items[0] work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural", names[0], "cn_mops")) parallel = {"type": "local", "cores": data["config"]["algorithm"].get("num_cores", 1), "progs": ["delly"]} with pysam.Samfile(work_bams[0], "rb") as pysam_work_bam: chroms = [None] if _get_regional_bed_file(items[0]) else pysam_work_bam.references out_files = run_multicore(_run_on_chrom, [(chrom, work_bams, names, work_dir, items) for chrom in chroms], data["config"], parallel) out_file = _combine_out_files(out_files, work_dir, data) out = [] for data in items: if "sv" not in data: data["sv"] = [] data["sv"].append({"variantcaller": "cn_mops", "vrn_file": _prep_sample_cnvs(out_file, data)}) out.append(data) return out
[ "def", "run", "(", "items", ",", "background", "=", "None", ")", ":", "if", "not", "background", ":", "background", "=", "[", "]", "names", "=", "[", "tz", ".", "get_in", "(", "[", "\"rgnames\"", ",", "\"sample\"", "]", ",", "x", ")", "for", "x", ...
Detect copy number variations from batched set of samples using cn.mops.
[ "Detect", "copy", "number", "variations", "from", "batched", "set", "of", "samples", "using", "cn", ".", "mops", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cn_mops.py#L22-L48
train
218,289
bcbio/bcbio-nextgen
bcbio/structural/cn_mops.py
_combine_out_files
def _combine_out_files(chr_files, work_dir, data): """Concatenate all CNV calls into a single file. """ out_file = "%s.bed" % sshared.outname_from_inputs(chr_files) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for chr_file in chr_files: with open(chr_file) as in_handle: is_empty = in_handle.readline().startswith("track name=empty") if not is_empty: with open(chr_file) as in_handle: shutil.copyfileobj(in_handle, out_handle) return out_file
python
def _combine_out_files(chr_files, work_dir, data): """Concatenate all CNV calls into a single file. """ out_file = "%s.bed" % sshared.outname_from_inputs(chr_files) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for chr_file in chr_files: with open(chr_file) as in_handle: is_empty = in_handle.readline().startswith("track name=empty") if not is_empty: with open(chr_file) as in_handle: shutil.copyfileobj(in_handle, out_handle) return out_file
[ "def", "_combine_out_files", "(", "chr_files", ",", "work_dir", ",", "data", ")", ":", "out_file", "=", "\"%s.bed\"", "%", "sshared", ".", "outname_from_inputs", "(", "chr_files", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "wit...
Concatenate all CNV calls into a single file.
[ "Concatenate", "all", "CNV", "calls", "into", "a", "single", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cn_mops.py#L50-L63
train
218,290
bcbio/bcbio-nextgen
bcbio/structural/cn_mops.py
_prep_sample_cnvs
def _prep_sample_cnvs(cnv_file, data): """Convert a multiple sample CNV file into a single BED file for a sample. Handles matching and fixing names where R converts numerical IDs (1234) into strings by adding an X (X1234), and converts other characters into '.'s. http://stat.ethz.ch/R-manual/R-devel/library/base/html/make.names.html """ import pybedtools sample_name = tz.get_in(["rgnames", "sample"], data) def make_names(name): return re.sub("[^\w.]", '.', name) def matches_sample_name(feat): return (feat.name == sample_name or feat.name == "X%s" % sample_name or feat.name == make_names(sample_name)) def update_sample_name(feat): feat.name = sample_name return feat sample_file = os.path.join(os.path.dirname(cnv_file), "%s-cnv.bed" % sample_name) if not utils.file_exists(sample_file): with file_transaction(data, sample_file) as tx_out_file: with shared.bedtools_tmpdir(data): pybedtools.BedTool(cnv_file).filter(matches_sample_name).each(update_sample_name).saveas(tx_out_file) return sample_file
python
def _prep_sample_cnvs(cnv_file, data): """Convert a multiple sample CNV file into a single BED file for a sample. Handles matching and fixing names where R converts numerical IDs (1234) into strings by adding an X (X1234), and converts other characters into '.'s. http://stat.ethz.ch/R-manual/R-devel/library/base/html/make.names.html """ import pybedtools sample_name = tz.get_in(["rgnames", "sample"], data) def make_names(name): return re.sub("[^\w.]", '.', name) def matches_sample_name(feat): return (feat.name == sample_name or feat.name == "X%s" % sample_name or feat.name == make_names(sample_name)) def update_sample_name(feat): feat.name = sample_name return feat sample_file = os.path.join(os.path.dirname(cnv_file), "%s-cnv.bed" % sample_name) if not utils.file_exists(sample_file): with file_transaction(data, sample_file) as tx_out_file: with shared.bedtools_tmpdir(data): pybedtools.BedTool(cnv_file).filter(matches_sample_name).each(update_sample_name).saveas(tx_out_file) return sample_file
[ "def", "_prep_sample_cnvs", "(", "cnv_file", ",", "data", ")", ":", "import", "pybedtools", "sample_name", "=", "tz", ".", "get_in", "(", "[", "\"rgnames\"", ",", "\"sample\"", "]", ",", "data", ")", "def", "make_names", "(", "name", ")", ":", "return", ...
Convert a multiple sample CNV file into a single BED file for a sample. Handles matching and fixing names where R converts numerical IDs (1234) into strings by adding an X (X1234), and converts other characters into '.'s. http://stat.ethz.ch/R-manual/R-devel/library/base/html/make.names.html
[ "Convert", "a", "multiple", "sample", "CNV", "file", "into", "a", "single", "BED", "file", "for", "a", "sample", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cn_mops.py#L65-L87
train
218,291
bcbio/bcbio-nextgen
bcbio/structural/cn_mops.py
_run_on_chrom
def _run_on_chrom(chrom, work_bams, names, work_dir, items): """Run cn.mops on work BAMs for a specific chromosome. """ local_sitelib = utils.R_sitelib() batch = sshared.get_cur_batch(items) ext = "-%s-cnv" % batch if batch else "-cnv" out_file = os.path.join(work_dir, "%s%s-%s.bed" % (os.path.splitext(os.path.basename(work_bams[0]))[0], ext, chrom if chrom else "all")) if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: rcode = "%s-run.R" % os.path.splitext(out_file)[0] with open(rcode, "w") as out_handle: out_handle.write(_script.format(prep_str=_prep_load_script(work_bams, names, chrom, items), out_file=tx_out_file, local_sitelib=local_sitelib)) rscript = utils.Rscript_cmd() try: do.run([rscript, "--no-environ", rcode], "cn.mops CNV detection", items[0], log_error=False) except subprocess.CalledProcessError as msg: # cn.mops errors out if no CNVs found. Just write an empty file. if _allowed_cnmops_errorstates(str(msg)): with open(tx_out_file, "w") as out_handle: out_handle.write('track name=empty description="No CNVs found"\n') else: logger.exception() raise return [out_file]
python
def _run_on_chrom(chrom, work_bams, names, work_dir, items): """Run cn.mops on work BAMs for a specific chromosome. """ local_sitelib = utils.R_sitelib() batch = sshared.get_cur_batch(items) ext = "-%s-cnv" % batch if batch else "-cnv" out_file = os.path.join(work_dir, "%s%s-%s.bed" % (os.path.splitext(os.path.basename(work_bams[0]))[0], ext, chrom if chrom else "all")) if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: rcode = "%s-run.R" % os.path.splitext(out_file)[0] with open(rcode, "w") as out_handle: out_handle.write(_script.format(prep_str=_prep_load_script(work_bams, names, chrom, items), out_file=tx_out_file, local_sitelib=local_sitelib)) rscript = utils.Rscript_cmd() try: do.run([rscript, "--no-environ", rcode], "cn.mops CNV detection", items[0], log_error=False) except subprocess.CalledProcessError as msg: # cn.mops errors out if no CNVs found. Just write an empty file. if _allowed_cnmops_errorstates(str(msg)): with open(tx_out_file, "w") as out_handle: out_handle.write('track name=empty description="No CNVs found"\n') else: logger.exception() raise return [out_file]
[ "def", "_run_on_chrom", "(", "chrom", ",", "work_bams", ",", "names", ",", "work_dir", ",", "items", ")", ":", "local_sitelib", "=", "utils", ".", "R_sitelib", "(", ")", "batch", "=", "sshared", ".", "get_cur_batch", "(", "items", ")", "ext", "=", "\"-%s...
Run cn.mops on work BAMs for a specific chromosome.
[ "Run", "cn", ".", "mops", "on", "work", "BAMs", "for", "a", "specific", "chromosome", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cn_mops.py#L91-L117
train
218,292
bcbio/bcbio-nextgen
bcbio/structural/cn_mops.py
_get_regional_bed_file
def _get_regional_bed_file(data): """If we are running a non-genome analysis, pull the regional file for analysis. """ variant_regions = bedutils.merge_overlaps(tz.get_in(["config", "algorithm", "variant_regions"], data), data) is_genome = data["config"]["algorithm"].get("coverage_interval", "exome").lower() in ["genome"] if variant_regions and utils.file_exists(variant_regions) and not is_genome: return variant_regions
python
def _get_regional_bed_file(data): """If we are running a non-genome analysis, pull the regional file for analysis. """ variant_regions = bedutils.merge_overlaps(tz.get_in(["config", "algorithm", "variant_regions"], data), data) is_genome = data["config"]["algorithm"].get("coverage_interval", "exome").lower() in ["genome"] if variant_regions and utils.file_exists(variant_regions) and not is_genome: return variant_regions
[ "def", "_get_regional_bed_file", "(", "data", ")", ":", "variant_regions", "=", "bedutils", ".", "merge_overlaps", "(", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"variant_regions\"", "]", ",", "data", ")", ",", "data", ")", "i...
If we are running a non-genome analysis, pull the regional file for analysis.
[ "If", "we", "are", "running", "a", "non", "-", "genome", "analysis", "pull", "the", "regional", "file", "for", "analysis", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cn_mops.py#L134-L141
train
218,293
bcbio/bcbio-nextgen
bcbio/structural/cn_mops.py
_population_load_script
def _population_load_script(work_bams, names, chrom, pairmode, items): """Prepare BAMs for assessing CNVs in a population. """ bed_file = _get_regional_bed_file(items[0]) if bed_file: return _population_prep_targeted.format(bam_file_str=",".join(work_bams), names_str=",".join(names), chrom=chrom, num_cores=0, pairmode=pairmode, bed_file=bed_file) else: return _population_prep.format(bam_file_str=",".join(work_bams), names_str=",".join(names), chrom=chrom, num_cores=0, pairmode=pairmode)
python
def _population_load_script(work_bams, names, chrom, pairmode, items): """Prepare BAMs for assessing CNVs in a population. """ bed_file = _get_regional_bed_file(items[0]) if bed_file: return _population_prep_targeted.format(bam_file_str=",".join(work_bams), names_str=",".join(names), chrom=chrom, num_cores=0, pairmode=pairmode, bed_file=bed_file) else: return _population_prep.format(bam_file_str=",".join(work_bams), names_str=",".join(names), chrom=chrom, num_cores=0, pairmode=pairmode)
[ "def", "_population_load_script", "(", "work_bams", ",", "names", ",", "chrom", ",", "pairmode", ",", "items", ")", ":", "bed_file", "=", "_get_regional_bed_file", "(", "items", "[", "0", "]", ")", "if", "bed_file", ":", "return", "_population_prep_targeted", ...
Prepare BAMs for assessing CNVs in a population.
[ "Prepare", "BAMs", "for", "assessing", "CNVs", "in", "a", "population", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cn_mops.py#L143-L152
train
218,294
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
tobam_cl
def tobam_cl(data, out_file, is_paired=False): """Prepare command line for producing de-duplicated sorted output. - If no deduplication, sort and prepare a BAM file. - If paired, then use samblaster and prepare discordant outputs. - If unpaired, use biobambam's bammarkduplicates """ do_dedup = _check_dedup(data) umi_consensus = dd.get_umi_consensus(data) with file_transaction(data, out_file) as tx_out_file: if not do_dedup: yield (sam_to_sortbam_cl(data, tx_out_file), tx_out_file) elif umi_consensus: yield (_sam_to_grouped_umi_cl(data, umi_consensus, tx_out_file), tx_out_file) elif is_paired and _need_sr_disc_reads(data) and not _too_many_contigs(dd.get_ref_file(data)): sr_file = "%s-sr.bam" % os.path.splitext(out_file)[0] disc_file = "%s-disc.bam" % os.path.splitext(out_file)[0] with file_transaction(data, sr_file) as tx_sr_file: with file_transaction(data, disc_file) as tx_disc_file: yield (samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file), tx_out_file) else: yield (_biobambam_dedup_sort(data, tx_out_file), tx_out_file)
python
def tobam_cl(data, out_file, is_paired=False): """Prepare command line for producing de-duplicated sorted output. - If no deduplication, sort and prepare a BAM file. - If paired, then use samblaster and prepare discordant outputs. - If unpaired, use biobambam's bammarkduplicates """ do_dedup = _check_dedup(data) umi_consensus = dd.get_umi_consensus(data) with file_transaction(data, out_file) as tx_out_file: if not do_dedup: yield (sam_to_sortbam_cl(data, tx_out_file), tx_out_file) elif umi_consensus: yield (_sam_to_grouped_umi_cl(data, umi_consensus, tx_out_file), tx_out_file) elif is_paired and _need_sr_disc_reads(data) and not _too_many_contigs(dd.get_ref_file(data)): sr_file = "%s-sr.bam" % os.path.splitext(out_file)[0] disc_file = "%s-disc.bam" % os.path.splitext(out_file)[0] with file_transaction(data, sr_file) as tx_sr_file: with file_transaction(data, disc_file) as tx_disc_file: yield (samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file), tx_out_file) else: yield (_biobambam_dedup_sort(data, tx_out_file), tx_out_file)
[ "def", "tobam_cl", "(", "data", ",", "out_file", ",", "is_paired", "=", "False", ")", ":", "do_dedup", "=", "_check_dedup", "(", "data", ")", "umi_consensus", "=", "dd", ".", "get_umi_consensus", "(", "data", ")", "with", "file_transaction", "(", "data", "...
Prepare command line for producing de-duplicated sorted output. - If no deduplication, sort and prepare a BAM file. - If paired, then use samblaster and prepare discordant outputs. - If unpaired, use biobambam's bammarkduplicates
[ "Prepare", "command", "line", "for", "producing", "de", "-", "duplicated", "sorted", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L30-L52
train
218,295
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
_get_cores_memory
def _get_cores_memory(data, downscale=2): """Retrieve cores and memory, using samtools as baseline. For memory, scaling down because we share with alignment and de-duplication. """ resources = config_utils.get_resources("samtools", data["config"]) num_cores = data["config"]["algorithm"].get("num_cores", 1) max_mem = config_utils.adjust_memory(resources.get("memory", "2G"), downscale, "decrease").upper() return num_cores, max_mem
python
def _get_cores_memory(data, downscale=2): """Retrieve cores and memory, using samtools as baseline. For memory, scaling down because we share with alignment and de-duplication. """ resources = config_utils.get_resources("samtools", data["config"]) num_cores = data["config"]["algorithm"].get("num_cores", 1) max_mem = config_utils.adjust_memory(resources.get("memory", "2G"), downscale, "decrease").upper() return num_cores, max_mem
[ "def", "_get_cores_memory", "(", "data", ",", "downscale", "=", "2", ")", ":", "resources", "=", "config_utils", ".", "get_resources", "(", "\"samtools\"", ",", "data", "[", "\"config\"", "]", ")", "num_cores", "=", "data", "[", "\"config\"", "]", "[", "\"...
Retrieve cores and memory, using samtools as baseline. For memory, scaling down because we share with alignment and de-duplication.
[ "Retrieve", "cores", "and", "memory", "using", "samtools", "as", "baseline", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L69-L78
train
218,296
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
sam_to_sortbam_cl
def sam_to_sortbam_cl(data, tx_out_file, name_sort=False): """Convert to sorted BAM output. Set name_sort to True to sort reads by queryname """ samtools = config_utils.get_program("samtools", data["config"]) cores, mem = _get_cores_memory(data, downscale=2) tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] sort_flag = "-n" if name_sort else "" return ("{samtools} sort -@ {cores} -m {mem} {sort_flag} " "-T {tmp_file} -o {tx_out_file} /dev/stdin".format(**locals()))
python
def sam_to_sortbam_cl(data, tx_out_file, name_sort=False): """Convert to sorted BAM output. Set name_sort to True to sort reads by queryname """ samtools = config_utils.get_program("samtools", data["config"]) cores, mem = _get_cores_memory(data, downscale=2) tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] sort_flag = "-n" if name_sort else "" return ("{samtools} sort -@ {cores} -m {mem} {sort_flag} " "-T {tmp_file} -o {tx_out_file} /dev/stdin".format(**locals()))
[ "def", "sam_to_sortbam_cl", "(", "data", ",", "tx_out_file", ",", "name_sort", "=", "False", ")", ":", "samtools", "=", "config_utils", ".", "get_program", "(", "\"samtools\"", ",", "data", "[", "\"config\"", "]", ")", "cores", ",", "mem", "=", "_get_cores_m...
Convert to sorted BAM output. Set name_sort to True to sort reads by queryname
[ "Convert", "to", "sorted", "BAM", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L80-L90
train
218,297
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
samblaster_dedup_sort
def samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file): """Deduplicate and sort with samblaster, produces split read and discordant pair files. """ samblaster = config_utils.get_program("samblaster", data["config"]) samtools = config_utils.get_program("samtools", data["config"]) tmp_prefix = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] tobam_cmd = ("{samtools} sort {sort_opt} -@ {cores} -m {mem} -T {tmp_prefix}-{dext} {out_file} -") # full BAM -- associate more memory and cores cores, mem = _get_cores_memory(data, downscale=2) # Potentially downsample to maximum coverage here if not splitting and whole genome sample ds_cmd = None if data.get("align_split") else bam.get_maxcov_downsample_cl(data, "samtools") sort_opt = "-n" if data.get("align_split") and dd.get_mark_duplicates(data) else "" if ds_cmd: dedup_cmd = "%s %s > %s" % (tobam_cmd.format(out_file="", dext="full", **locals()), ds_cmd, tx_out_file) else: dedup_cmd = tobam_cmd.format(out_file="-o %s" % tx_out_file, dext="full", **locals()) # split and discordant BAMs -- give less memory/cores since smaller files sort_opt = "" cores, mem = _get_cores_memory(data, downscale=4) splitter_cmd = tobam_cmd.format(out_file="-o %s" % tx_sr_file, dext="spl", **locals()) discordant_cmd = tobam_cmd.format(out_file="-o %s" % tx_disc_file, dext="disc", **locals()) # samblaster 0.1.22 and better require the -M flag for compatibility with bwa-mem cmd = ("{samblaster} --addMateTags -M --splitterFile >({splitter_cmd}) --discordantFile >({discordant_cmd}) " "| {dedup_cmd}") return cmd.format(**locals())
python
def samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file): """Deduplicate and sort with samblaster, produces split read and discordant pair files. """ samblaster = config_utils.get_program("samblaster", data["config"]) samtools = config_utils.get_program("samtools", data["config"]) tmp_prefix = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] tobam_cmd = ("{samtools} sort {sort_opt} -@ {cores} -m {mem} -T {tmp_prefix}-{dext} {out_file} -") # full BAM -- associate more memory and cores cores, mem = _get_cores_memory(data, downscale=2) # Potentially downsample to maximum coverage here if not splitting and whole genome sample ds_cmd = None if data.get("align_split") else bam.get_maxcov_downsample_cl(data, "samtools") sort_opt = "-n" if data.get("align_split") and dd.get_mark_duplicates(data) else "" if ds_cmd: dedup_cmd = "%s %s > %s" % (tobam_cmd.format(out_file="", dext="full", **locals()), ds_cmd, tx_out_file) else: dedup_cmd = tobam_cmd.format(out_file="-o %s" % tx_out_file, dext="full", **locals()) # split and discordant BAMs -- give less memory/cores since smaller files sort_opt = "" cores, mem = _get_cores_memory(data, downscale=4) splitter_cmd = tobam_cmd.format(out_file="-o %s" % tx_sr_file, dext="spl", **locals()) discordant_cmd = tobam_cmd.format(out_file="-o %s" % tx_disc_file, dext="disc", **locals()) # samblaster 0.1.22 and better require the -M flag for compatibility with bwa-mem cmd = ("{samblaster} --addMateTags -M --splitterFile >({splitter_cmd}) --discordantFile >({discordant_cmd}) " "| {dedup_cmd}") return cmd.format(**locals())
[ "def", "samblaster_dedup_sort", "(", "data", ",", "tx_out_file", ",", "tx_sr_file", ",", "tx_disc_file", ")", ":", "samblaster", "=", "config_utils", ".", "get_program", "(", "\"samblaster\"", ",", "data", "[", "\"config\"", "]", ")", "samtools", "=", "config_ut...
Deduplicate and sort with samblaster, produces split read and discordant pair files.
[ "Deduplicate", "and", "sort", "with", "samblaster", "produces", "split", "read", "and", "discordant", "pair", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L92-L116
train
218,298
bcbio/bcbio-nextgen
bcbio/ngsalign/postalign.py
_biobambam_dedup_sort
def _biobambam_dedup_sort(data, tx_out_file): """Perform streaming deduplication and sorting with biobambam's bamsormadup """ samtools = config_utils.get_program("samtools", data["config"]) cores, mem = _get_cores_memory(data, downscale=2) tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] if data.get("align_split"): sort_opt = "-n" if data.get("align_split") and _check_dedup(data) else "" cmd = "{samtools} sort %s -@ {cores} -m {mem} -O bam -T {tmp_file}-namesort -o {tx_out_file} -" % sort_opt else: # scale core usage to avoid memory issues with larger WGS samples cores = max(1, int(math.ceil(cores * 0.75))) ds_cmd = bam.get_maxcov_downsample_cl(data, "bamsormadup") bamsormadup = config_utils.get_program("bamsormadup", data) cmd = ("{bamsormadup} inputformat=sam threads={cores} tmpfile={tmp_file}-markdup " "SO=coordinate %s > {tx_out_file}" % ds_cmd) return cmd.format(**locals())
python
def _biobambam_dedup_sort(data, tx_out_file): """Perform streaming deduplication and sorting with biobambam's bamsormadup """ samtools = config_utils.get_program("samtools", data["config"]) cores, mem = _get_cores_memory(data, downscale=2) tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] if data.get("align_split"): sort_opt = "-n" if data.get("align_split") and _check_dedup(data) else "" cmd = "{samtools} sort %s -@ {cores} -m {mem} -O bam -T {tmp_file}-namesort -o {tx_out_file} -" % sort_opt else: # scale core usage to avoid memory issues with larger WGS samples cores = max(1, int(math.ceil(cores * 0.75))) ds_cmd = bam.get_maxcov_downsample_cl(data, "bamsormadup") bamsormadup = config_utils.get_program("bamsormadup", data) cmd = ("{bamsormadup} inputformat=sam threads={cores} tmpfile={tmp_file}-markdup " "SO=coordinate %s > {tx_out_file}" % ds_cmd) return cmd.format(**locals())
[ "def", "_biobambam_dedup_sort", "(", "data", ",", "tx_out_file", ")", ":", "samtools", "=", "config_utils", ".", "get_program", "(", "\"samtools\"", ",", "data", "[", "\"config\"", "]", ")", "cores", ",", "mem", "=", "_get_cores_memory", "(", "data", ",", "d...
Perform streaming deduplication and sorting with biobambam's bamsormadup
[ "Perform", "streaming", "deduplication", "and", "sorting", "with", "biobambam", "s", "bamsormadup" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/postalign.py#L118-L134
train
218,299