repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_fill_prioritization_targets
def _fill_prioritization_targets(data): """Fill in globally installed files for prioritization. """ ref_file = dd.get_ref_file(data) for target in ["svprioritize", "coverage"]: val = tz.get_in(["config", "algorithm", target], data) if val and not os.path.exists(val) and not objectstore.is_remote(val): installed_vals = [] # Check prioritize directory for ext in [".bed", ".bed.gz"]: installed_vals += glob.glob(os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir, "coverage", "prioritize", val + "*%s" % ext))) # Check sv-annotation directory for prioritize gene name lists if target == "svprioritize": simple_sv_bin = utils.which("simple_sv_annotation.py") if simple_sv_bin: installed_vals += glob.glob(os.path.join(os.path.dirname(os.path.realpath(simple_sv_bin)), "%s*" % os.path.basename(val))) if len(installed_vals) == 0: # some targets can be filled in later if target not in set(["coverage"]): raise ValueError("Configuration problem. BED file not found for %s: %s" % (target, val)) else: installed_val = val elif len(installed_vals) == 1: installed_val = installed_vals[0] else: # check for partial matches installed_val = None for v in installed_vals: if v.endswith(val + ".bed.gz") or v.endswith(val + ".bed"): installed_val = v break # handle date-stamped inputs if not installed_val: installed_val = sorted(installed_vals, reverse=True)[0] data = tz.update_in(data, ["config", "algorithm", target], lambda x: installed_val) return data
python
def _fill_prioritization_targets(data): """Fill in globally installed files for prioritization. """ ref_file = dd.get_ref_file(data) for target in ["svprioritize", "coverage"]: val = tz.get_in(["config", "algorithm", target], data) if val and not os.path.exists(val) and not objectstore.is_remote(val): installed_vals = [] # Check prioritize directory for ext in [".bed", ".bed.gz"]: installed_vals += glob.glob(os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir, "coverage", "prioritize", val + "*%s" % ext))) # Check sv-annotation directory for prioritize gene name lists if target == "svprioritize": simple_sv_bin = utils.which("simple_sv_annotation.py") if simple_sv_bin: installed_vals += glob.glob(os.path.join(os.path.dirname(os.path.realpath(simple_sv_bin)), "%s*" % os.path.basename(val))) if len(installed_vals) == 0: # some targets can be filled in later if target not in set(["coverage"]): raise ValueError("Configuration problem. BED file not found for %s: %s" % (target, val)) else: installed_val = val elif len(installed_vals) == 1: installed_val = installed_vals[0] else: # check for partial matches installed_val = None for v in installed_vals: if v.endswith(val + ".bed.gz") or v.endswith(val + ".bed"): installed_val = v break # handle date-stamped inputs if not installed_val: installed_val = sorted(installed_vals, reverse=True)[0] data = tz.update_in(data, ["config", "algorithm", target], lambda x: installed_val) return data
[ "def", "_fill_prioritization_targets", "(", "data", ")", ":", "ref_file", "=", "dd", ".", "get_ref_file", "(", "data", ")", "for", "target", "in", "[", "\"svprioritize\"", ",", "\"coverage\"", "]", ":", "val", "=", "tz", ".", "get_in", "(", "[", "\"config\...
Fill in globally installed files for prioritization.
[ "Fill", "in", "globally", "installed", "files", "for", "prioritization", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L276-L315
train
219,200
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_clean_algorithm
def _clean_algorithm(data): """Clean algorithm keys, handling items that can be specified as lists or single items. """ # convert single items to lists for key in ["variantcaller", "jointcaller", "svcaller"]: val = tz.get_in(["algorithm", key], data) if val: if not isinstance(val, (list, tuple)) and isinstance(val, six.string_types): val = [val] # check for cases like [false] or [None] if isinstance(val, (list, tuple)): if len(val) == 1 and not val[0] or (isinstance(val[0], six.string_types) and val[0].lower() in ["none", "false"]): val = False data["algorithm"][key] = val return data
python
def _clean_algorithm(data): """Clean algorithm keys, handling items that can be specified as lists or single items. """ # convert single items to lists for key in ["variantcaller", "jointcaller", "svcaller"]: val = tz.get_in(["algorithm", key], data) if val: if not isinstance(val, (list, tuple)) and isinstance(val, six.string_types): val = [val] # check for cases like [false] or [None] if isinstance(val, (list, tuple)): if len(val) == 1 and not val[0] or (isinstance(val[0], six.string_types) and val[0].lower() in ["none", "false"]): val = False data["algorithm"][key] = val return data
[ "def", "_clean_algorithm", "(", "data", ")", ":", "# convert single items to lists", "for", "key", "in", "[", "\"variantcaller\"", ",", "\"jointcaller\"", ",", "\"svcaller\"", "]", ":", "val", "=", "tz", ".", "get_in", "(", "[", "\"algorithm\"", ",", "key", "]...
Clean algorithm keys, handling items that can be specified as lists or single items.
[ "Clean", "algorithm", "keys", "handling", "items", "that", "can", "be", "specified", "as", "lists", "or", "single", "items", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L336-L351
train
219,201
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_organize_tools_on
def _organize_tools_on(data, is_cwl): """Ensure tools_on inputs match items specified elsewhere. """ # want tools_on: [gvcf] if joint calling specified in CWL if is_cwl: if tz.get_in(["algorithm", "jointcaller"], data): val = tz.get_in(["algorithm", "tools_on"], data) if not val: val = [] if not isinstance(val, (list, tuple)): val = [val] if "gvcf" not in val: val.append("gvcf") data["algorithm"]["tools_on"] = val return data
python
def _organize_tools_on(data, is_cwl): """Ensure tools_on inputs match items specified elsewhere. """ # want tools_on: [gvcf] if joint calling specified in CWL if is_cwl: if tz.get_in(["algorithm", "jointcaller"], data): val = tz.get_in(["algorithm", "tools_on"], data) if not val: val = [] if not isinstance(val, (list, tuple)): val = [val] if "gvcf" not in val: val.append("gvcf") data["algorithm"]["tools_on"] = val return data
[ "def", "_organize_tools_on", "(", "data", ",", "is_cwl", ")", ":", "# want tools_on: [gvcf] if joint calling specified in CWL", "if", "is_cwl", ":", "if", "tz", ".", "get_in", "(", "[", "\"algorithm\"", ",", "\"jointcaller\"", "]", ",", "data", ")", ":", "val", ...
Ensure tools_on inputs match items specified elsewhere.
[ "Ensure", "tools_on", "inputs", "match", "items", "specified", "elsewhere", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L353-L367
train
219,202
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_clean_background
def _clean_background(data): """Clean up background specification, remaining back compatible. """ allowed_keys = set(["variant", "cnv_reference"]) val = tz.get_in(["algorithm", "background"], data) errors = [] if val: out = {} # old style specification, single string for variant if isinstance(val, six.string_types): out["variant"] = _file_to_abs(val, [os.getcwd()]) elif isinstance(val, dict): for k, v in val.items(): if k in allowed_keys: if isinstance(v, six.string_types): out[k] = _file_to_abs(v, [os.getcwd()]) else: assert isinstance(v, dict) for ik, iv in v.items(): v[ik] = _file_to_abs(iv, [os.getcwd()]) out[k] = v else: errors.append("Unexpected key: %s" % k) else: errors.append("Unexpected input: %s" % val) if errors: raise ValueError("Problematic algorithm background specification for %s:\n %s" % (data["description"], "\n".join(errors))) out["cnv_reference"] = structural.standardize_cnv_reference({"config": data, "description": data["description"]}) data["algorithm"]["background"] = out return data
python
def _clean_background(data): """Clean up background specification, remaining back compatible. """ allowed_keys = set(["variant", "cnv_reference"]) val = tz.get_in(["algorithm", "background"], data) errors = [] if val: out = {} # old style specification, single string for variant if isinstance(val, six.string_types): out["variant"] = _file_to_abs(val, [os.getcwd()]) elif isinstance(val, dict): for k, v in val.items(): if k in allowed_keys: if isinstance(v, six.string_types): out[k] = _file_to_abs(v, [os.getcwd()]) else: assert isinstance(v, dict) for ik, iv in v.items(): v[ik] = _file_to_abs(iv, [os.getcwd()]) out[k] = v else: errors.append("Unexpected key: %s" % k) else: errors.append("Unexpected input: %s" % val) if errors: raise ValueError("Problematic algorithm background specification for %s:\n %s" % (data["description"], "\n".join(errors))) out["cnv_reference"] = structural.standardize_cnv_reference({"config": data, "description": data["description"]}) data["algorithm"]["background"] = out return data
[ "def", "_clean_background", "(", "data", ")", ":", "allowed_keys", "=", "set", "(", "[", "\"variant\"", ",", "\"cnv_reference\"", "]", ")", "val", "=", "tz", ".", "get_in", "(", "[", "\"algorithm\"", ",", "\"background\"", "]", ",", "data", ")", "errors", ...
Clean up background specification, remaining back compatible.
[ "Clean", "up", "background", "specification", "remaining", "back", "compatible", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L369-L400
train
219,203
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_clean_characters
def _clean_characters(x): """Clean problem characters in sample lane or descriptions. """ if not isinstance(x, six.string_types): x = str(x) else: if not all(ord(char) < 128 for char in x): msg = "Found unicode character in input YAML (%s)" % (x) raise ValueError(repr(msg)) for problem in [" ", ".", "/", "\\", "[", "]", "&", ";", "#", "+", ":", ")", "("]: x = x.replace(problem, "_") return x
python
def _clean_characters(x): """Clean problem characters in sample lane or descriptions. """ if not isinstance(x, six.string_types): x = str(x) else: if not all(ord(char) < 128 for char in x): msg = "Found unicode character in input YAML (%s)" % (x) raise ValueError(repr(msg)) for problem in [" ", ".", "/", "\\", "[", "]", "&", ";", "#", "+", ":", ")", "("]: x = x.replace(problem, "_") return x
[ "def", "_clean_characters", "(", "x", ")", ":", "if", "not", "isinstance", "(", "x", ",", "six", ".", "string_types", ")", ":", "x", "=", "str", "(", "x", ")", "else", ":", "if", "not", "all", "(", "ord", "(", "char", ")", "<", "128", "for", "c...
Clean problem characters in sample lane or descriptions.
[ "Clean", "problem", "characters", "in", "sample", "lane", "or", "descriptions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L402-L413
train
219,204
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
prep_rg_names
def prep_rg_names(item, config, fc_name, fc_date): """Generate read group names from item inputs. """ if fc_name and fc_date: lane_name = "%s_%s_%s" % (item["lane"], fc_date, fc_name) else: lane_name = item["description"] return {"rg": item["description"], "sample": item["description"], "lane": lane_name, "pl": (tz.get_in(["algorithm", "platform"], item) or tz.get_in(["algorithm", "platform"], item, "illumina")).lower(), "lb": tz.get_in(["metadata", "library"], item), "pu": tz.get_in(["metadata", "platform_unit"], item) or lane_name}
python
def prep_rg_names(item, config, fc_name, fc_date): """Generate read group names from item inputs. """ if fc_name and fc_date: lane_name = "%s_%s_%s" % (item["lane"], fc_date, fc_name) else: lane_name = item["description"] return {"rg": item["description"], "sample": item["description"], "lane": lane_name, "pl": (tz.get_in(["algorithm", "platform"], item) or tz.get_in(["algorithm", "platform"], item, "illumina")).lower(), "lb": tz.get_in(["metadata", "library"], item), "pu": tz.get_in(["metadata", "platform_unit"], item) or lane_name}
[ "def", "prep_rg_names", "(", "item", ",", "config", ",", "fc_name", ",", "fc_date", ")", ":", "if", "fc_name", "and", "fc_date", ":", "lane_name", "=", "\"%s_%s_%s\"", "%", "(", "item", "[", "\"lane\"", "]", ",", "fc_date", ",", "fc_name", ")", "else", ...
Generate read group names from item inputs.
[ "Generate", "read", "group", "names", "from", "item", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L415-L428
train
219,205
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_for_duplicates
def _check_for_duplicates(xs, attr, check_fn=None): """Identify and raise errors on duplicate items. """ dups = [] for key, vals in itertools.groupby(x[attr] for x in xs): if len(list(vals)) > 1: dups.append(key) if len(dups) > 0: psamples = [] for x in xs: if x[attr] in dups: psamples.append(x) # option to skip problem based on custom input function. if check_fn and check_fn(psamples): return descrs = [x["description"] for x in psamples] raise ValueError("Duplicate '%s' found in input sample configuration.\n" "Required to be unique for a project: %s\n" "Problem found in these samples: %s" % (attr, dups, descrs))
python
def _check_for_duplicates(xs, attr, check_fn=None): """Identify and raise errors on duplicate items. """ dups = [] for key, vals in itertools.groupby(x[attr] for x in xs): if len(list(vals)) > 1: dups.append(key) if len(dups) > 0: psamples = [] for x in xs: if x[attr] in dups: psamples.append(x) # option to skip problem based on custom input function. if check_fn and check_fn(psamples): return descrs = [x["description"] for x in psamples] raise ValueError("Duplicate '%s' found in input sample configuration.\n" "Required to be unique for a project: %s\n" "Problem found in these samples: %s" % (attr, dups, descrs))
[ "def", "_check_for_duplicates", "(", "xs", ",", "attr", ",", "check_fn", "=", "None", ")", ":", "dups", "=", "[", "]", "for", "key", ",", "vals", "in", "itertools", ".", "groupby", "(", "x", "[", "attr", "]", "for", "x", "in", "xs", ")", ":", "if...
Identify and raise errors on duplicate items.
[ "Identify", "and", "raise", "errors", "on", "duplicate", "items", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L432-L450
train
219,206
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_for_batch_clashes
def _check_for_batch_clashes(xs): """Check that batch names do not overlap with sample names. """ names = set([x["description"] for x in xs]) dups = set([]) for x in xs: batches = tz.get_in(("metadata", "batch"), x) if batches: if not isinstance(batches, (list, tuple)): batches = [batches] for batch in batches: if batch in names: dups.add(batch) if len(dups) > 0: raise ValueError("Batch names must be unique from sample descriptions.\n" "Clashing batch names: %s" % sorted(list(dups)))
python
def _check_for_batch_clashes(xs): """Check that batch names do not overlap with sample names. """ names = set([x["description"] for x in xs]) dups = set([]) for x in xs: batches = tz.get_in(("metadata", "batch"), x) if batches: if not isinstance(batches, (list, tuple)): batches = [batches] for batch in batches: if batch in names: dups.add(batch) if len(dups) > 0: raise ValueError("Batch names must be unique from sample descriptions.\n" "Clashing batch names: %s" % sorted(list(dups)))
[ "def", "_check_for_batch_clashes", "(", "xs", ")", ":", "names", "=", "set", "(", "[", "x", "[", "\"description\"", "]", "for", "x", "in", "xs", "]", ")", "dups", "=", "set", "(", "[", "]", ")", "for", "x", "in", "xs", ":", "batches", "=", "tz", ...
Check that batch names do not overlap with sample names.
[ "Check", "that", "batch", "names", "do", "not", "overlap", "with", "sample", "names", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L452-L467
train
219,207
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_for_problem_somatic_batches
def _check_for_problem_somatic_batches(items, config): """Identify problem batch setups for somatic calling. We do not support multiple tumors in a single batch and VarDict(Java) does not handle pooled calling, only tumor/normal. """ to_check = [] for data in items: data = copy.deepcopy(data) data["config"] = config_utils.update_w_custom(config, data) to_check.append(data) data_by_batches = collections.defaultdict(list) for data in to_check: batches = dd.get_batches(data) if batches: for batch in batches: data_by_batches[batch].append(data) for batch, items in data_by_batches.items(): if vcfutils.get_paired(items): vcfutils.check_paired_problems(items) elif len(items) > 1: vcs = vcfutils.get_somatic_variantcallers(items) if "vardict" in vcs: raise ValueError("VarDict does not support pooled non-tumor/normal calling, in batch %s: %s" % (batch, [dd.get_sample_name(data) for data in items])) elif "mutect" in vcs or "mutect2" in vcs: raise ValueError("MuTect and MuTect2 require a 'phenotype: tumor' sample for calling, " "in batch %s: %s" % (batch, [dd.get_sample_name(data) for data in items]))
python
def _check_for_problem_somatic_batches(items, config): """Identify problem batch setups for somatic calling. We do not support multiple tumors in a single batch and VarDict(Java) does not handle pooled calling, only tumor/normal. """ to_check = [] for data in items: data = copy.deepcopy(data) data["config"] = config_utils.update_w_custom(config, data) to_check.append(data) data_by_batches = collections.defaultdict(list) for data in to_check: batches = dd.get_batches(data) if batches: for batch in batches: data_by_batches[batch].append(data) for batch, items in data_by_batches.items(): if vcfutils.get_paired(items): vcfutils.check_paired_problems(items) elif len(items) > 1: vcs = vcfutils.get_somatic_variantcallers(items) if "vardict" in vcs: raise ValueError("VarDict does not support pooled non-tumor/normal calling, in batch %s: %s" % (batch, [dd.get_sample_name(data) for data in items])) elif "mutect" in vcs or "mutect2" in vcs: raise ValueError("MuTect and MuTect2 require a 'phenotype: tumor' sample for calling, " "in batch %s: %s" % (batch, [dd.get_sample_name(data) for data in items]))
[ "def", "_check_for_problem_somatic_batches", "(", "items", ",", "config", ")", ":", "to_check", "=", "[", "]", "for", "data", "in", "items", ":", "data", "=", "copy", ".", "deepcopy", "(", "data", ")", "data", "[", "\"config\"", "]", "=", "config_utils", ...
Identify problem batch setups for somatic calling. We do not support multiple tumors in a single batch and VarDict(Java) does not handle pooled calling, only tumor/normal.
[ "Identify", "problem", "batch", "setups", "for", "somatic", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L469-L497
train
219,208
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_for_misplaced
def _check_for_misplaced(xs, subkey, other_keys): """Ensure configuration keys are not incorrectly nested under other keys. """ problems = [] for x in xs: check_dict = x.get(subkey, {}) for to_check in other_keys: if to_check in check_dict: problems.append((x["description"], to_check, subkey)) if len(problems) > 0: raise ValueError("\n".join(["Incorrectly nested keys found in sample YAML. These should be top level:", " sample | key name | nested under ", "----------------+-----------------+----------------"] + ["% 15s | % 15s | % 15s" % (a, b, c) for (a, b, c) in problems]))
python
def _check_for_misplaced(xs, subkey, other_keys): """Ensure configuration keys are not incorrectly nested under other keys. """ problems = [] for x in xs: check_dict = x.get(subkey, {}) for to_check in other_keys: if to_check in check_dict: problems.append((x["description"], to_check, subkey)) if len(problems) > 0: raise ValueError("\n".join(["Incorrectly nested keys found in sample YAML. These should be top level:", " sample | key name | nested under ", "----------------+-----------------+----------------"] + ["% 15s | % 15s | % 15s" % (a, b, c) for (a, b, c) in problems]))
[ "def", "_check_for_misplaced", "(", "xs", ",", "subkey", ",", "other_keys", ")", ":", "problems", "=", "[", "]", "for", "x", "in", "xs", ":", "check_dict", "=", "x", ".", "get", "(", "subkey", ",", "{", "}", ")", "for", "to_check", "in", "other_keys"...
Ensure configuration keys are not incorrectly nested under other keys.
[ "Ensure", "configuration", "keys", "are", "not", "incorrectly", "nested", "under", "other", "keys", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L499-L512
train
219,209
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_for_degenerate_interesting_groups
def _check_for_degenerate_interesting_groups(items): """ Make sure interesting_groups specify existing metadata and that the interesting_group is not all of the same for all of the samples """ igkey = ("algorithm", "bcbiornaseq", "interesting_groups") interesting_groups = tz.get_in(igkey, items[0], []) if isinstance(interesting_groups, str): interesting_groups = [interesting_groups] for group in interesting_groups: values = [tz.get_in(("metadata", group), x, None) for x in items] if all(x is None for x in values): raise ValueError("group %s is labelled as an interesting group, " "but does not appear in the metadata." % group) if len(list(tz.unique(values))) == 1: raise ValueError("group %s is marked as an interesting group, " "but all samples have the same value." % group)
python
def _check_for_degenerate_interesting_groups(items): """ Make sure interesting_groups specify existing metadata and that the interesting_group is not all of the same for all of the samples """ igkey = ("algorithm", "bcbiornaseq", "interesting_groups") interesting_groups = tz.get_in(igkey, items[0], []) if isinstance(interesting_groups, str): interesting_groups = [interesting_groups] for group in interesting_groups: values = [tz.get_in(("metadata", group), x, None) for x in items] if all(x is None for x in values): raise ValueError("group %s is labelled as an interesting group, " "but does not appear in the metadata." % group) if len(list(tz.unique(values))) == 1: raise ValueError("group %s is marked as an interesting group, " "but all samples have the same value." % group)
[ "def", "_check_for_degenerate_interesting_groups", "(", "items", ")", ":", "igkey", "=", "(", "\"algorithm\"", ",", "\"bcbiornaseq\"", ",", "\"interesting_groups\"", ")", "interesting_groups", "=", "tz", ".", "get_in", "(", "igkey", ",", "items", "[", "0", "]", ...
Make sure interesting_groups specify existing metadata and that the interesting_group is not all of the same for all of the samples
[ "Make", "sure", "interesting_groups", "specify", "existing", "metadata", "and", "that", "the", "interesting_group", "is", "not", "all", "of", "the", "same", "for", "all", "of", "the", "samples" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L514-L529
train
219,210
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_algorithm_keys
def _check_algorithm_keys(item): """Check for unexpected keys in the algorithm section. Needs to be manually updated when introducing new keys, but avoids silent bugs with typos in key names. """ problem_keys = [k for k in item["algorithm"].keys() if k not in ALGORITHM_KEYS] if len(problem_keys) > 0: raise ValueError("Unexpected configuration keyword in 'algorithm' section: %s\n" "See configuration documentation for supported options:\n%s\n" % (problem_keys, ALG_DOC_URL))
python
def _check_algorithm_keys(item): """Check for unexpected keys in the algorithm section. Needs to be manually updated when introducing new keys, but avoids silent bugs with typos in key names. """ problem_keys = [k for k in item["algorithm"].keys() if k not in ALGORITHM_KEYS] if len(problem_keys) > 0: raise ValueError("Unexpected configuration keyword in 'algorithm' section: %s\n" "See configuration documentation for supported options:\n%s\n" % (problem_keys, ALG_DOC_URL))
[ "def", "_check_algorithm_keys", "(", "item", ")", ":", "problem_keys", "=", "[", "k", "for", "k", "in", "item", "[", "\"algorithm\"", "]", ".", "keys", "(", ")", "if", "k", "not", "in", "ALGORITHM_KEYS", "]", "if", "len", "(", "problem_keys", ")", ">",...
Check for unexpected keys in the algorithm section. Needs to be manually updated when introducing new keys, but avoids silent bugs with typos in key names.
[ "Check", "for", "unexpected", "keys", "in", "the", "algorithm", "section", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L575-L585
train
219,211
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_algorithm_values
def _check_algorithm_values(item): """Check for misplaced inputs in the algorithms. - Identify incorrect boolean values where a choice is required. """ problems = [] for k, v in item.get("algorithm", {}).items(): if v is True and k not in ALG_ALLOW_BOOLEANS: problems.append("%s set as true" % k) elif v is False and (k not in ALG_ALLOW_BOOLEANS and k not in ALG_ALLOW_FALSE): problems.append("%s set as false" % k) if len(problems) > 0: raise ValueError("Incorrect settings in 'algorithm' section for %s:\n%s" "\nSee configuration documentation for supported options:\n%s\n" % (item["description"], "\n".join(problems), ALG_DOC_URL))
python
def _check_algorithm_values(item): """Check for misplaced inputs in the algorithms. - Identify incorrect boolean values where a choice is required. """ problems = [] for k, v in item.get("algorithm", {}).items(): if v is True and k not in ALG_ALLOW_BOOLEANS: problems.append("%s set as true" % k) elif v is False and (k not in ALG_ALLOW_BOOLEANS and k not in ALG_ALLOW_FALSE): problems.append("%s set as false" % k) if len(problems) > 0: raise ValueError("Incorrect settings in 'algorithm' section for %s:\n%s" "\nSee configuration documentation for supported options:\n%s\n" % (item["description"], "\n".join(problems), ALG_DOC_URL))
[ "def", "_check_algorithm_values", "(", "item", ")", ":", "problems", "=", "[", "]", "for", "k", ",", "v", "in", "item", ".", "get", "(", "\"algorithm\"", ",", "{", "}", ")", ".", "items", "(", ")", ":", "if", "v", "is", "True", "and", "k", "not",...
Check for misplaced inputs in the algorithms. - Identify incorrect boolean values where a choice is required.
[ "Check", "for", "misplaced", "inputs", "in", "the", "algorithms", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L587-L601
train
219,212
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_toplevel_misplaced
def _check_toplevel_misplaced(item): """Check for algorithm keys accidentally placed at the top level. """ problem_keys = [k for k in item.keys() if k in ALGORITHM_KEYS] if len(problem_keys) > 0: raise ValueError("Unexpected configuration keywords found in top level of %s: %s\n" "This should be placed in the 'algorithm' section." % (item["description"], problem_keys)) problem_keys = [k for k in item.keys() if k not in TOPLEVEL_KEYS] if len(problem_keys) > 0: raise ValueError("Unexpected configuration keywords found in top level of %s: %s\n" % (item["description"], problem_keys))
python
def _check_toplevel_misplaced(item): """Check for algorithm keys accidentally placed at the top level. """ problem_keys = [k for k in item.keys() if k in ALGORITHM_KEYS] if len(problem_keys) > 0: raise ValueError("Unexpected configuration keywords found in top level of %s: %s\n" "This should be placed in the 'algorithm' section." % (item["description"], problem_keys)) problem_keys = [k for k in item.keys() if k not in TOPLEVEL_KEYS] if len(problem_keys) > 0: raise ValueError("Unexpected configuration keywords found in top level of %s: %s\n" % (item["description"], problem_keys))
[ "def", "_check_toplevel_misplaced", "(", "item", ")", ":", "problem_keys", "=", "[", "k", "for", "k", "in", "item", ".", "keys", "(", ")", "if", "k", "in", "ALGORITHM_KEYS", "]", "if", "len", "(", "problem_keys", ")", ">", "0", ":", "raise", "ValueErro...
Check for algorithm keys accidentally placed at the top level.
[ "Check", "for", "algorithm", "keys", "accidentally", "placed", "at", "the", "top", "level", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L604-L615
train
219,213
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_quality_format
def _check_quality_format(items): """ Check if quality_format="standard" and fastq_format is not sanger """ SAMPLE_FORMAT = {"illumina_1.3+": "illumina", "illumina_1.5+": "illumina", "illumina_1.8+": "standard", "solexa": "solexa", "sanger": "standard"} fastq_extensions = ["fq.gz", "fastq.gz", ".fastq", ".fq"] for item in items: specified_format = item["algorithm"].get("quality_format", "standard").lower() if specified_format not in SAMPLE_FORMAT.values(): raise ValueError("Quality format specified in the YAML file" "is not supported. Supported values are %s." % (SAMPLE_FORMAT.values())) fastq_file = next((f for f in item.get("files") or [] if f.endswith(tuple(fastq_extensions))), None) if fastq_file and specified_format and not objectstore.is_remote(fastq_file): fastq_format = _detect_fastq_format(fastq_file) detected_encodings = set([SAMPLE_FORMAT[x] for x in fastq_format]) if detected_encodings: if specified_format not in detected_encodings: raise ValueError("Quality format specified in the YAML " "file might be a different encoding. " "'%s' was specified but possible formats " "detected were %s." % (specified_format, ", ".join(detected_encodings)))
python
def _check_quality_format(items): """ Check if quality_format="standard" and fastq_format is not sanger """ SAMPLE_FORMAT = {"illumina_1.3+": "illumina", "illumina_1.5+": "illumina", "illumina_1.8+": "standard", "solexa": "solexa", "sanger": "standard"} fastq_extensions = ["fq.gz", "fastq.gz", ".fastq", ".fq"] for item in items: specified_format = item["algorithm"].get("quality_format", "standard").lower() if specified_format not in SAMPLE_FORMAT.values(): raise ValueError("Quality format specified in the YAML file" "is not supported. Supported values are %s." % (SAMPLE_FORMAT.values())) fastq_file = next((f for f in item.get("files") or [] if f.endswith(tuple(fastq_extensions))), None) if fastq_file and specified_format and not objectstore.is_remote(fastq_file): fastq_format = _detect_fastq_format(fastq_file) detected_encodings = set([SAMPLE_FORMAT[x] for x in fastq_format]) if detected_encodings: if specified_format not in detected_encodings: raise ValueError("Quality format specified in the YAML " "file might be a different encoding. " "'%s' was specified but possible formats " "detected were %s." % (specified_format, ", ".join(detected_encodings)))
[ "def", "_check_quality_format", "(", "items", ")", ":", "SAMPLE_FORMAT", "=", "{", "\"illumina_1.3+\"", ":", "\"illumina\"", ",", "\"illumina_1.5+\"", ":", "\"illumina\"", ",", "\"illumina_1.8+\"", ":", "\"standard\"", ",", "\"solexa\"", ":", "\"solexa\"", ",", "\"s...
Check if quality_format="standard" and fastq_format is not sanger
[ "Check", "if", "quality_format", "=", "standard", "and", "fastq_format", "is", "not", "sanger" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L648-L677
train
219,214
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_aligner
def _check_aligner(item): """Ensure specified aligner is valid choice. """ allowed = set(list(alignment.TOOLS.keys()) + [None, False]) if item["algorithm"].get("aligner") not in allowed: raise ValueError("Unexpected algorithm 'aligner' parameter: %s\n" "Supported options: %s\n" % (item["algorithm"].get("aligner"), sorted(list(allowed))))
python
def _check_aligner(item): """Ensure specified aligner is valid choice. """ allowed = set(list(alignment.TOOLS.keys()) + [None, False]) if item["algorithm"].get("aligner") not in allowed: raise ValueError("Unexpected algorithm 'aligner' parameter: %s\n" "Supported options: %s\n" % (item["algorithm"].get("aligner"), sorted(list(allowed))))
[ "def", "_check_aligner", "(", "item", ")", ":", "allowed", "=", "set", "(", "list", "(", "alignment", ".", "TOOLS", ".", "keys", "(", ")", ")", "+", "[", "None", ",", "False", "]", ")", "if", "item", "[", "\"algorithm\"", "]", ".", "get", "(", "\...
Ensure specified aligner is valid choice.
[ "Ensure", "specified", "aligner", "is", "valid", "choice", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L680-L687
train
219,215
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_variantcaller
def _check_variantcaller(item): """Ensure specified variantcaller is a valid choice. """ allowed = set(list(genotype.get_variantcallers().keys()) + [None, False]) vcs = item["algorithm"].get("variantcaller") if not isinstance(vcs, dict): vcs = {"variantcaller": vcs} for vc_set in vcs.values(): if not isinstance(vc_set, (tuple, list)): vc_set = [vc_set] problem = [x for x in vc_set if x not in allowed] if len(problem) > 0: raise ValueError("Unexpected algorithm 'variantcaller' parameter: %s\n" "Supported options: %s\n" % (problem, sorted(list(allowed)))) # Ensure germline somatic calling only specified with tumor/normal samples if "germline" in vcs or "somatic" in vcs: paired = vcfutils.get_paired_phenotype(item) if not paired: raise ValueError("%s: somatic/germline calling in 'variantcaller' " "but tumor/normal metadata phenotype not specified" % dd.get_sample_name(item))
python
def _check_variantcaller(item): """Ensure specified variantcaller is a valid choice. """ allowed = set(list(genotype.get_variantcallers().keys()) + [None, False]) vcs = item["algorithm"].get("variantcaller") if not isinstance(vcs, dict): vcs = {"variantcaller": vcs} for vc_set in vcs.values(): if not isinstance(vc_set, (tuple, list)): vc_set = [vc_set] problem = [x for x in vc_set if x not in allowed] if len(problem) > 0: raise ValueError("Unexpected algorithm 'variantcaller' parameter: %s\n" "Supported options: %s\n" % (problem, sorted(list(allowed)))) # Ensure germline somatic calling only specified with tumor/normal samples if "germline" in vcs or "somatic" in vcs: paired = vcfutils.get_paired_phenotype(item) if not paired: raise ValueError("%s: somatic/germline calling in 'variantcaller' " "but tumor/normal metadata phenotype not specified" % dd.get_sample_name(item))
[ "def", "_check_variantcaller", "(", "item", ")", ":", "allowed", "=", "set", "(", "list", "(", "genotype", ".", "get_variantcallers", "(", ")", ".", "keys", "(", ")", ")", "+", "[", "None", ",", "False", "]", ")", "vcs", "=", "item", "[", "\"algorith...
Ensure specified variantcaller is a valid choice.
[ "Ensure", "specified", "variantcaller", "is", "a", "valid", "choice", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L689-L708
train
219,216
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_svcaller
def _check_svcaller(item): """Ensure the provide structural variant caller is valid. """ allowed = set(reduce(operator.add, [list(d.keys()) for d in structural._CALLERS.values()]) + [None, False]) svs = item["algorithm"].get("svcaller") if not isinstance(svs, (list, tuple)): svs = [svs] problem = [x for x in svs if x not in allowed] if len(problem) > 0: raise ValueError("Unexpected algorithm 'svcaller' parameters: %s\n" "Supported options: %s\n" % (" ".join(["'%s'" % x for x in problem]), sorted(list(allowed))))
python
def _check_svcaller(item): """Ensure the provide structural variant caller is valid. """ allowed = set(reduce(operator.add, [list(d.keys()) for d in structural._CALLERS.values()]) + [None, False]) svs = item["algorithm"].get("svcaller") if not isinstance(svs, (list, tuple)): svs = [svs] problem = [x for x in svs if x not in allowed] if len(problem) > 0: raise ValueError("Unexpected algorithm 'svcaller' parameters: %s\n" "Supported options: %s\n" % (" ".join(["'%s'" % x for x in problem]), sorted(list(allowed))))
[ "def", "_check_svcaller", "(", "item", ")", ":", "allowed", "=", "set", "(", "reduce", "(", "operator", ".", "add", ",", "[", "list", "(", "d", ".", "keys", "(", ")", ")", "for", "d", "in", "structural", ".", "_CALLERS", ".", "values", "(", ")", ...
Ensure the provide structural variant caller is valid.
[ "Ensure", "the", "provide", "structural", "variant", "caller", "is", "valid", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L710-L721
train
219,217
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_hetcaller
def _check_hetcaller(item): """Ensure upstream SV callers requires to heterogeneity analysis are available. """ svs = _get_as_list(item, "svcaller") hets = _get_as_list(item, "hetcaller") if hets or any([x in svs for x in ["titancna", "purecn"]]): if not any([x in svs for x in ["cnvkit", "gatk-cnv"]]): raise ValueError("Heterogeneity caller used but need CNV calls. Add `gatk4-cnv` " "or `cnvkit` to `svcaller` in sample: %s" % item["description"])
python
def _check_hetcaller(item): """Ensure upstream SV callers requires to heterogeneity analysis are available. """ svs = _get_as_list(item, "svcaller") hets = _get_as_list(item, "hetcaller") if hets or any([x in svs for x in ["titancna", "purecn"]]): if not any([x in svs for x in ["cnvkit", "gatk-cnv"]]): raise ValueError("Heterogeneity caller used but need CNV calls. Add `gatk4-cnv` " "or `cnvkit` to `svcaller` in sample: %s" % item["description"])
[ "def", "_check_hetcaller", "(", "item", ")", ":", "svs", "=", "_get_as_list", "(", "item", ",", "\"svcaller\"", ")", "hets", "=", "_get_as_list", "(", "item", ",", "\"hetcaller\"", ")", "if", "hets", "or", "any", "(", "[", "x", "in", "svs", "for", "x",...
Ensure upstream SV callers requires to heterogeneity analysis are available.
[ "Ensure", "upstream", "SV", "callers", "requires", "to", "heterogeneity", "analysis", "are", "available", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L731-L739
train
219,218
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_jointcaller
def _check_jointcaller(data): """Ensure specified jointcaller is valid. """ allowed = set(joint.get_callers() + [None, False]) cs = data["algorithm"].get("jointcaller", []) if not isinstance(cs, (tuple, list)): cs = [cs] problem = [x for x in cs if x not in allowed] if len(problem) > 0: raise ValueError("Unexpected algorithm 'jointcaller' parameter: %s\n" "Supported options: %s\n" % (problem, sorted(list(allowed), key=lambda x: x or "")))
python
def _check_jointcaller(data): """Ensure specified jointcaller is valid. """ allowed = set(joint.get_callers() + [None, False]) cs = data["algorithm"].get("jointcaller", []) if not isinstance(cs, (tuple, list)): cs = [cs] problem = [x for x in cs if x not in allowed] if len(problem) > 0: raise ValueError("Unexpected algorithm 'jointcaller' parameter: %s\n" "Supported options: %s\n" % (problem, sorted(list(allowed), key=lambda x: x or "")))
[ "def", "_check_jointcaller", "(", "data", ")", ":", "allowed", "=", "set", "(", "joint", ".", "get_callers", "(", ")", "+", "[", "None", ",", "False", "]", ")", "cs", "=", "data", "[", "\"algorithm\"", "]", ".", "get", "(", "\"jointcaller\"", ",", "[...
Ensure specified jointcaller is valid.
[ "Ensure", "specified", "jointcaller", "is", "valid", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L741-L751
train
219,219
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_realign
def _check_realign(data): """Check for realignment, which is not supported in GATK4 """ if "gatk4" not in data["algorithm"].get("tools_off", []) and not "gatk4" == data["algorithm"].get("tools_off"): if data["algorithm"].get("realign"): raise ValueError("In sample %s, realign specified but it is not supported for GATK4. " "Realignment is generally not necessary for most variant callers." % (dd.get_sample_name(data)))
python
def _check_realign(data): """Check for realignment, which is not supported in GATK4 """ if "gatk4" not in data["algorithm"].get("tools_off", []) and not "gatk4" == data["algorithm"].get("tools_off"): if data["algorithm"].get("realign"): raise ValueError("In sample %s, realign specified but it is not supported for GATK4. " "Realignment is generally not necessary for most variant callers." % (dd.get_sample_name(data)))
[ "def", "_check_realign", "(", "data", ")", ":", "if", "\"gatk4\"", "not", "in", "data", "[", "\"algorithm\"", "]", ".", "get", "(", "\"tools_off\"", ",", "[", "]", ")", "and", "not", "\"gatk4\"", "==", "data", "[", "\"algorithm\"", "]", ".", "get", "("...
Check for realignment, which is not supported in GATK4
[ "Check", "for", "realignment", "which", "is", "not", "supported", "in", "GATK4" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L767-L774
train
219,220
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_trim
def _check_trim(data): """Check for valid values for trim_reads. """ trim = data["algorithm"].get("trim_reads") if trim: if trim == "fastp" and data["algorithm"].get("align_split_size") is not False: raise ValueError("In sample %s, `trim_reads: fastp` currently requires `align_split_size: false`" % (dd.get_sample_name(data)))
python
def _check_trim(data): """Check for valid values for trim_reads. """ trim = data["algorithm"].get("trim_reads") if trim: if trim == "fastp" and data["algorithm"].get("align_split_size") is not False: raise ValueError("In sample %s, `trim_reads: fastp` currently requires `align_split_size: false`" % (dd.get_sample_name(data)))
[ "def", "_check_trim", "(", "data", ")", ":", "trim", "=", "data", "[", "\"algorithm\"", "]", ".", "get", "(", "\"trim_reads\"", ")", "if", "trim", ":", "if", "trim", "==", "\"fastp\"", "and", "data", "[", "\"algorithm\"", "]", ".", "get", "(", "\"align...
Check for valid values for trim_reads.
[ "Check", "for", "valid", "values", "for", "trim_reads", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L776-L783
train
219,221
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_check_sample_config
def _check_sample_config(items, in_file, config): """Identify common problems in input sample configuration files. """ logger.info("Checking sample YAML configuration: %s" % in_file) _check_quality_format(items) _check_for_duplicates(items, "lane") _check_for_duplicates(items, "description") _check_for_degenerate_interesting_groups(items) _check_for_batch_clashes(items) _check_for_problem_somatic_batches(items, config) _check_for_misplaced(items, "algorithm", ["resources", "metadata", "analysis", "description", "genome_build", "lane", "files"]) [_check_toplevel_misplaced(x) for x in items] [_check_algorithm_keys(x) for x in items] [_check_algorithm_values(x) for x in items] [_check_aligner(x) for x in items] [_check_variantcaller(x) for x in items] [_check_svcaller(x) for x in items] [_check_hetcaller(x) for x in items] [_check_indelcaller(x) for x in items] [_check_jointcaller(x) for x in items] [_check_hlacaller(x) for x in items] [_check_realign(x) for x in items] [_check_trim(x) for x in items]
python
def _check_sample_config(items, in_file, config): """Identify common problems in input sample configuration files. """ logger.info("Checking sample YAML configuration: %s" % in_file) _check_quality_format(items) _check_for_duplicates(items, "lane") _check_for_duplicates(items, "description") _check_for_degenerate_interesting_groups(items) _check_for_batch_clashes(items) _check_for_problem_somatic_batches(items, config) _check_for_misplaced(items, "algorithm", ["resources", "metadata", "analysis", "description", "genome_build", "lane", "files"]) [_check_toplevel_misplaced(x) for x in items] [_check_algorithm_keys(x) for x in items] [_check_algorithm_values(x) for x in items] [_check_aligner(x) for x in items] [_check_variantcaller(x) for x in items] [_check_svcaller(x) for x in items] [_check_hetcaller(x) for x in items] [_check_indelcaller(x) for x in items] [_check_jointcaller(x) for x in items] [_check_hlacaller(x) for x in items] [_check_realign(x) for x in items] [_check_trim(x) for x in items]
[ "def", "_check_sample_config", "(", "items", ",", "in_file", ",", "config", ")", ":", "logger", ".", "info", "(", "\"Checking sample YAML configuration: %s\"", "%", "in_file", ")", "_check_quality_format", "(", "items", ")", "_check_for_duplicates", "(", "items", ",...
Identify common problems in input sample configuration files.
[ "Identify", "common", "problems", "in", "input", "sample", "configuration", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L786-L811
train
219,222
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_file_to_abs
def _file_to_abs(x, dnames, makedir=False): """Make a file absolute using the supplied base directory choices. """ if x is None or os.path.isabs(x): return x elif isinstance(x, six.string_types) and objectstore.is_remote(x): return x elif isinstance(x, six.string_types) and x.lower() == "none": return None else: for dname in dnames: if dname: normx = os.path.normpath(os.path.join(dname, x)) if os.path.exists(normx): return normx elif makedir: utils.safe_makedir(normx) return normx raise ValueError("Did not find input file %s in %s" % (x, dnames))
python
def _file_to_abs(x, dnames, makedir=False): """Make a file absolute using the supplied base directory choices. """ if x is None or os.path.isabs(x): return x elif isinstance(x, six.string_types) and objectstore.is_remote(x): return x elif isinstance(x, six.string_types) and x.lower() == "none": return None else: for dname in dnames: if dname: normx = os.path.normpath(os.path.join(dname, x)) if os.path.exists(normx): return normx elif makedir: utils.safe_makedir(normx) return normx raise ValueError("Did not find input file %s in %s" % (x, dnames))
[ "def", "_file_to_abs", "(", "x", ",", "dnames", ",", "makedir", "=", "False", ")", ":", "if", "x", "is", "None", "or", "os", ".", "path", ".", "isabs", "(", "x", ")", ":", "return", "x", "elif", "isinstance", "(", "x", ",", "six", ".", "string_ty...
Make a file absolute using the supplied base directory choices.
[ "Make", "a", "file", "absolute", "using", "the", "supplied", "base", "directory", "choices", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L815-L833
train
219,223
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_normalize_files
def _normalize_files(item, fc_dir=None): """Ensure the files argument is a list of absolute file names. Handles BAM, single and paired end fastq, as well as split inputs. """ files = item.get("files") if files: if isinstance(files, six.string_types): files = [files] fastq_dir = flowcell.get_fastq_dir(fc_dir) if fc_dir else os.getcwd() files = [_file_to_abs(x, [os.getcwd(), fc_dir, fastq_dir]) for x in files] files = [x for x in files if x] _sanity_check_files(item, files) item["files"] = files return item
python
def _normalize_files(item, fc_dir=None): """Ensure the files argument is a list of absolute file names. Handles BAM, single and paired end fastq, as well as split inputs. """ files = item.get("files") if files: if isinstance(files, six.string_types): files = [files] fastq_dir = flowcell.get_fastq_dir(fc_dir) if fc_dir else os.getcwd() files = [_file_to_abs(x, [os.getcwd(), fc_dir, fastq_dir]) for x in files] files = [x for x in files if x] _sanity_check_files(item, files) item["files"] = files return item
[ "def", "_normalize_files", "(", "item", ",", "fc_dir", "=", "None", ")", ":", "files", "=", "item", ".", "get", "(", "\"files\"", ")", "if", "files", ":", "if", "isinstance", "(", "files", ",", "six", ".", "string_types", ")", ":", "files", "=", "[",...
Ensure the files argument is a list of absolute file names. Handles BAM, single and paired end fastq, as well as split inputs.
[ "Ensure", "the", "files", "argument", "is", "a", "list", "of", "absolute", "file", "names", ".", "Handles", "BAM", "single", "and", "paired", "end", "fastq", "as", "well", "as", "split", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L835-L848
train
219,224
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_sanity_check_files
def _sanity_check_files(item, files): """Ensure input files correspond with supported approaches. Handles BAM, fastqs, plus split fastqs. """ msg = None file_types = set([("bam" if x.endswith(".bam") else "fastq") for x in files if x]) if len(file_types) > 1: msg = "Found multiple file types (BAM and fastq)" file_type = file_types.pop() if file_type == "bam": if len(files) != 1: msg = "Expect a single BAM file input as input" elif file_type == "fastq": if len(files) not in [1, 2] and item["analysis"].lower() != "scrna-seq": pair_types = set([len(xs) for xs in fastq.combine_pairs(files)]) if len(pair_types) != 1 or pair_types.pop() not in [1, 2]: msg = "Expect either 1 (single end) or 2 (paired end) fastq inputs" if len(files) == 2 and files[0] == files[1]: msg = "Expect both fastq files to not be the same" if msg: raise ValueError("%s for %s: %s" % (msg, item.get("description", ""), files))
python
def _sanity_check_files(item, files): """Ensure input files correspond with supported approaches. Handles BAM, fastqs, plus split fastqs. """ msg = None file_types = set([("bam" if x.endswith(".bam") else "fastq") for x in files if x]) if len(file_types) > 1: msg = "Found multiple file types (BAM and fastq)" file_type = file_types.pop() if file_type == "bam": if len(files) != 1: msg = "Expect a single BAM file input as input" elif file_type == "fastq": if len(files) not in [1, 2] and item["analysis"].lower() != "scrna-seq": pair_types = set([len(xs) for xs in fastq.combine_pairs(files)]) if len(pair_types) != 1 or pair_types.pop() not in [1, 2]: msg = "Expect either 1 (single end) or 2 (paired end) fastq inputs" if len(files) == 2 and files[0] == files[1]: msg = "Expect both fastq files to not be the same" if msg: raise ValueError("%s for %s: %s" % (msg, item.get("description", ""), files))
[ "def", "_sanity_check_files", "(", "item", ",", "files", ")", ":", "msg", "=", "None", "file_types", "=", "set", "(", "[", "(", "\"bam\"", "if", "x", ".", "endswith", "(", "\".bam\"", ")", "else", "\"fastq\"", ")", "for", "x", "in", "files", "if", "x...
Ensure input files correspond with supported approaches. Handles BAM, fastqs, plus split fastqs.
[ "Ensure", "input", "files", "correspond", "with", "supported", "approaches", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L850-L871
train
219,225
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
add_metadata_defaults
def add_metadata_defaults(md): """Central location for defaults for algorithm inputs. """ defaults = {"batch": None, "phenotype": ""} for k, v in defaults.items(): if k not in md: md[k] = v return md
python
def add_metadata_defaults(md): """Central location for defaults for algorithm inputs. """ defaults = {"batch": None, "phenotype": ""} for k, v in defaults.items(): if k not in md: md[k] = v return md
[ "def", "add_metadata_defaults", "(", "md", ")", ":", "defaults", "=", "{", "\"batch\"", ":", "None", ",", "\"phenotype\"", ":", "\"\"", "}", "for", "k", ",", "v", "in", "defaults", ".", "items", "(", ")", ":", "if", "k", "not", "in", "md", ":", "md...
Central location for defaults for algorithm inputs.
[ "Central", "location", "for", "defaults", "for", "algorithm", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L1031-L1039
train
219,226
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_add_algorithm_defaults
def _add_algorithm_defaults(algorithm, analysis, is_cwl): """Central location specifying defaults for algorithm inputs. Converts allowed multiple inputs into lists if specified as a single item. Converts required single items into string if specified as a list """ if not algorithm: algorithm = {} defaults = {"archive": None, "tools_off": [], "tools_on": [], "qc": [], "trim_reads": False, "adapters": [], "effects": "snpeff", "quality_format": "standard", "expression_caller": ["salmon"] if analysis.lower().find("rna-seq") >= 0 else None, "align_split_size": None, "bam_clean": False, "nomap_split_size": 250, "nomap_split_targets": _get_nomap_split_targets(analysis, is_cwl), "mark_duplicates": False if not algorithm.get("aligner") else True, "coverage_interval": None, "min_allele_fraction": 10.0, "recalibrate": False, "realign": False, "ensemble": None, "exclude_regions": [], "variant_regions": None, "svcaller": [], "svvalidate": None, "svprioritize": None, "validate": None, "validate_regions": None, "vcfanno": []} convert_to_list = set(["tools_off", "tools_on", "hetcaller", "variantcaller", "svcaller", "qc", "disambiguate", "vcfanno", "adapters", "custom_trim", "exclude_regions"]) convert_to_single = set(["hlacaller", "indelcaller", "validate_method"]) for k, v in defaults.items(): if k not in algorithm: algorithm[k] = v for k, v in algorithm.items(): if k in convert_to_list: if v and not isinstance(v, (list, tuple)) and not isinstance(v, dict): algorithm[k] = [v] # ensure dictionary specified inputs get converted into individual lists elif v and not isinstance(v, (list, tuple)) and isinstance(v, dict): new = {} for innerk, innerv in v.items(): if innerv and not isinstance(innerv, (list, tuple)) and not isinstance(innerv, dict): innerv = [innerv] new[innerk] = innerv algorithm[k] = new elif v is None: algorithm[k] = [] elif k in convert_to_single: if v and not isinstance(v, six.string_types): if isinstance(v, (list, tuple)) and len(v) == 1: algorithm[k] = v[0] else: raise ValueError("Unexpected input in sample YAML; need a single item for %s: %s" % (k, v)) return algorithm
python
def _add_algorithm_defaults(algorithm, analysis, is_cwl): """Central location specifying defaults for algorithm inputs. Converts allowed multiple inputs into lists if specified as a single item. Converts required single items into string if specified as a list """ if not algorithm: algorithm = {} defaults = {"archive": None, "tools_off": [], "tools_on": [], "qc": [], "trim_reads": False, "adapters": [], "effects": "snpeff", "quality_format": "standard", "expression_caller": ["salmon"] if analysis.lower().find("rna-seq") >= 0 else None, "align_split_size": None, "bam_clean": False, "nomap_split_size": 250, "nomap_split_targets": _get_nomap_split_targets(analysis, is_cwl), "mark_duplicates": False if not algorithm.get("aligner") else True, "coverage_interval": None, "min_allele_fraction": 10.0, "recalibrate": False, "realign": False, "ensemble": None, "exclude_regions": [], "variant_regions": None, "svcaller": [], "svvalidate": None, "svprioritize": None, "validate": None, "validate_regions": None, "vcfanno": []} convert_to_list = set(["tools_off", "tools_on", "hetcaller", "variantcaller", "svcaller", "qc", "disambiguate", "vcfanno", "adapters", "custom_trim", "exclude_regions"]) convert_to_single = set(["hlacaller", "indelcaller", "validate_method"]) for k, v in defaults.items(): if k not in algorithm: algorithm[k] = v for k, v in algorithm.items(): if k in convert_to_list: if v and not isinstance(v, (list, tuple)) and not isinstance(v, dict): algorithm[k] = [v] # ensure dictionary specified inputs get converted into individual lists elif v and not isinstance(v, (list, tuple)) and isinstance(v, dict): new = {} for innerk, innerv in v.items(): if innerv and not isinstance(innerv, (list, tuple)) and not isinstance(innerv, dict): innerv = [innerv] new[innerk] = innerv algorithm[k] = new elif v is None: algorithm[k] = [] elif k in convert_to_single: if v and not isinstance(v, six.string_types): if isinstance(v, (list, tuple)) and len(v) == 1: algorithm[k] = v[0] else: raise ValueError("Unexpected input in sample YAML; need a single item for %s: %s" % (k, v)) return algorithm
[ "def", "_add_algorithm_defaults", "(", "algorithm", ",", "analysis", ",", "is_cwl", ")", ":", "if", "not", "algorithm", ":", "algorithm", "=", "{", "}", "defaults", "=", "{", "\"archive\"", ":", "None", ",", "\"tools_off\"", ":", "[", "]", ",", "\"tools_on...
Central location specifying defaults for algorithm inputs. Converts allowed multiple inputs into lists if specified as a single item. Converts required single items into string if specified as a list
[ "Central", "location", "specifying", "defaults", "for", "algorithm", "inputs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L1055-L1116
train
219,227
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
_replace_global_vars
def _replace_global_vars(xs, global_vars): """Replace globally shared names from input header with value. The value of the `algorithm` item may be a pointer to a real file specified in the `global` section. If found, replace with the full value. """ if isinstance(xs, (list, tuple)): return [_replace_global_vars(x) for x in xs] elif isinstance(xs, dict): final = {} for k, v in xs.items(): if isinstance(v, six.string_types) and v in global_vars: v = global_vars[v] final[k] = v return final else: return xs
python
def _replace_global_vars(xs, global_vars): """Replace globally shared names from input header with value. The value of the `algorithm` item may be a pointer to a real file specified in the `global` section. If found, replace with the full value. """ if isinstance(xs, (list, tuple)): return [_replace_global_vars(x) for x in xs] elif isinstance(xs, dict): final = {} for k, v in xs.items(): if isinstance(v, six.string_types) and v in global_vars: v = global_vars[v] final[k] = v return final else: return xs
[ "def", "_replace_global_vars", "(", "xs", ",", "global_vars", ")", ":", "if", "isinstance", "(", "xs", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "_replace_global_vars", "(", "x", ")", "for", "x", "in", "xs", "]", "elif", "isinstance...
Replace globally shared names from input header with value. The value of the `algorithm` item may be a pointer to a real file specified in the `global` section. If found, replace with the full value.
[ "Replace", "globally", "shared", "names", "from", "input", "header", "with", "value", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L1118-L1135
train
219,228
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
prep_system
def prep_system(run_info_yaml, bcbio_system=None): """Prepare system configuration information from an input configuration file. This does the work of parsing the system input file and setting up directories for use in 'organize'. """ work_dir = os.getcwd() config, config_file = config_utils.load_system_config(bcbio_system, work_dir) dirs = setup_directories(work_dir, os.path.normpath(os.path.dirname(os.path.dirname(run_info_yaml))), config, config_file) return [dirs, config, run_info_yaml]
python
def prep_system(run_info_yaml, bcbio_system=None): """Prepare system configuration information from an input configuration file. This does the work of parsing the system input file and setting up directories for use in 'organize'. """ work_dir = os.getcwd() config, config_file = config_utils.load_system_config(bcbio_system, work_dir) dirs = setup_directories(work_dir, os.path.normpath(os.path.dirname(os.path.dirname(run_info_yaml))), config, config_file) return [dirs, config, run_info_yaml]
[ "def", "prep_system", "(", "run_info_yaml", ",", "bcbio_system", "=", "None", ")", ":", "work_dir", "=", "os", ".", "getcwd", "(", ")", "config", ",", "config_file", "=", "config_utils", ".", "load_system_config", "(", "bcbio_system", ",", "work_dir", ")", "...
Prepare system configuration information from an input configuration file. This does the work of parsing the system input file and setting up directories for use in 'organize'.
[ "Prepare", "system", "configuration", "information", "from", "an", "input", "configuration", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L1150-L1160
train
219,229
bcbio/bcbio-nextgen
bcbio/variation/platypus.py
run
def run(align_bams, items, ref_file, assoc_files, region, out_file): """Run platypus variant calling, germline whole genome or exome. """ assert out_file.endswith(".vcf.gz") if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: for align_bam in align_bams: bam.index(align_bam, items[0]["config"]) cmd = ["platypus", "callVariants", "--regions=%s" % _subset_regions(region, out_file, items), "--bamFiles=%s" % ",".join(align_bams), "--refFile=%s" % dd.get_ref_file(items[0]), "--output=-", "--logFileName", "/dev/null", "--verbosity=1"] resources = config_utils.get_resources("platypus", items[0]["config"]) if resources.get("options"): # normalize options so we can set defaults without overwriting user specified for opt in resources["options"]: if "=" in opt: key, val = opt.split("=") cmd.extend([key, val]) else: cmd.append(opt) if any("gvcf" in dd.get_tools_on(d) for d in items): cmd += ["--outputRefCalls", "1", "--refCallBlockSize", "50000"] # Adjust default filter thresholds to achieve similar sensitivity/specificity to other callers # Currently not used after doing more cross validation as they increase false positives # which seems to be a major advantage for Platypus users. # tuned_opts = ["--hapScoreThreshold", "10", "--scThreshold", "0.99", "--filteredReadsFrac", "0.9", # "--rmsmqThreshold", "20", "--qdThreshold", "0", "--abThreshold", "0.0001", # "--minVarFreq", "0.0", "--assemble", "1"] # for okey, oval in utils.partition_all(2, tuned_opts): # if okey not in cmd: # cmd.extend([okey, oval]) # Avoid filtering duplicates on high depth targeted regions where we don't mark duplicates if any(not dd.get_mark_duplicates(data) for data in items): cmd += ["--filterDuplicates=0"] post_process_cmd = (" | %s | %s | %s | vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | " "vcfstreamsort | bgzip -c > %s" % (vcfutils.fix_ambiguous_cl(), vcfutils.fix_ambiguous_cl(5), vcfutils.add_contig_to_header_cl(dd.get_ref_file(items[0]), tx_out_file), tx_out_file)) do.run(" ".join(cmd) + post_process_cmd, "platypus variant calling") out_file = vcfutils.bgzip_and_index(out_file, items[0]["config"]) return out_file
python
def run(align_bams, items, ref_file, assoc_files, region, out_file): """Run platypus variant calling, germline whole genome or exome. """ assert out_file.endswith(".vcf.gz") if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: for align_bam in align_bams: bam.index(align_bam, items[0]["config"]) cmd = ["platypus", "callVariants", "--regions=%s" % _subset_regions(region, out_file, items), "--bamFiles=%s" % ",".join(align_bams), "--refFile=%s" % dd.get_ref_file(items[0]), "--output=-", "--logFileName", "/dev/null", "--verbosity=1"] resources = config_utils.get_resources("platypus", items[0]["config"]) if resources.get("options"): # normalize options so we can set defaults without overwriting user specified for opt in resources["options"]: if "=" in opt: key, val = opt.split("=") cmd.extend([key, val]) else: cmd.append(opt) if any("gvcf" in dd.get_tools_on(d) for d in items): cmd += ["--outputRefCalls", "1", "--refCallBlockSize", "50000"] # Adjust default filter thresholds to achieve similar sensitivity/specificity to other callers # Currently not used after doing more cross validation as they increase false positives # which seems to be a major advantage for Platypus users. # tuned_opts = ["--hapScoreThreshold", "10", "--scThreshold", "0.99", "--filteredReadsFrac", "0.9", # "--rmsmqThreshold", "20", "--qdThreshold", "0", "--abThreshold", "0.0001", # "--minVarFreq", "0.0", "--assemble", "1"] # for okey, oval in utils.partition_all(2, tuned_opts): # if okey not in cmd: # cmd.extend([okey, oval]) # Avoid filtering duplicates on high depth targeted regions where we don't mark duplicates if any(not dd.get_mark_duplicates(data) for data in items): cmd += ["--filterDuplicates=0"] post_process_cmd = (" | %s | %s | %s | vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | " "vcfstreamsort | bgzip -c > %s" % (vcfutils.fix_ambiguous_cl(), vcfutils.fix_ambiguous_cl(5), vcfutils.add_contig_to_header_cl(dd.get_ref_file(items[0]), tx_out_file), tx_out_file)) do.run(" ".join(cmd) + post_process_cmd, "platypus variant calling") out_file = vcfutils.bgzip_and_index(out_file, items[0]["config"]) return out_file
[ "def", "run", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", ",", "out_file", ")", ":", "assert", "out_file", ".", "endswith", "(", "\".vcf.gz\"", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ...
Run platypus variant calling, germline whole genome or exome.
[ "Run", "platypus", "variant", "calling", "germline", "whole", "genome", "or", "exome", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/platypus.py#L19-L62
train
219,230
bcbio/bcbio-nextgen
bcbio/qc/srna.py
run
def run(bam_file, data, out_dir): """Create several log files""" m = {"base": None, "secondary": []} m.update(_mirbase_stats(data, out_dir)) m["secondary"].append(_seqcluster_stats(data, out_dir))
python
def run(bam_file, data, out_dir): """Create several log files""" m = {"base": None, "secondary": []} m.update(_mirbase_stats(data, out_dir)) m["secondary"].append(_seqcluster_stats(data, out_dir))
[ "def", "run", "(", "bam_file", ",", "data", ",", "out_dir", ")", ":", "m", "=", "{", "\"base\"", ":", "None", ",", "\"secondary\"", ":", "[", "]", "}", "m", ".", "update", "(", "_mirbase_stats", "(", "data", ",", "out_dir", ")", ")", "m", "[", "\...
Create several log files
[ "Create", "several", "log", "files" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/srna.py#L14-L18
train
219,231
bcbio/bcbio-nextgen
bcbio/qc/srna.py
_mirbase_stats
def _mirbase_stats(data, out_dir): """Create stats from miraligner""" utils.safe_makedir(out_dir) out_file = os.path.join(out_dir, "%s_bcbio_mirbase.txt" % dd.get_sample_name(data)) out_file_novel = os.path.join(out_dir, "%s_bcbio_mirdeeep2.txt" % dd.get_sample_name(data)) mirbase_fn = data.get("seqbuster", None) if mirbase_fn: _get_stats_from_miraligner(mirbase_fn, out_file, "seqbuster") mirdeep_fn = data.get("seqbuster_novel", None) if mirdeep_fn: _get_stats_from_miraligner(mirdeep_fn, out_file_novel, "mirdeep2") return {"base": out_file, "secondary": [out_file_novel]}
python
def _mirbase_stats(data, out_dir): """Create stats from miraligner""" utils.safe_makedir(out_dir) out_file = os.path.join(out_dir, "%s_bcbio_mirbase.txt" % dd.get_sample_name(data)) out_file_novel = os.path.join(out_dir, "%s_bcbio_mirdeeep2.txt" % dd.get_sample_name(data)) mirbase_fn = data.get("seqbuster", None) if mirbase_fn: _get_stats_from_miraligner(mirbase_fn, out_file, "seqbuster") mirdeep_fn = data.get("seqbuster_novel", None) if mirdeep_fn: _get_stats_from_miraligner(mirdeep_fn, out_file_novel, "mirdeep2") return {"base": out_file, "secondary": [out_file_novel]}
[ "def", "_mirbase_stats", "(", "data", ",", "out_dir", ")", ":", "utils", ".", "safe_makedir", "(", "out_dir", ")", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"%s_bcbio_mirbase.txt\"", "%", "dd", ".", "get_sample_name", "(", "da...
Create stats from miraligner
[ "Create", "stats", "from", "miraligner" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/srna.py#L20-L31
train
219,232
bcbio/bcbio-nextgen
bcbio/qc/srna.py
_seqcluster_stats
def _seqcluster_stats(data, out_dir): """Parse seqcluster output""" name = dd.get_sample_name(data) fn = data.get("seqcluster", {}).get("stat_file", None) if not fn: return None out_file = os.path.join(out_dir, "%s.txt" % name) df = pd.read_csv(fn, sep="\t", names = ["reads", "sample", "type"]) df_sample = df[df["sample"] == name] df_sample.to_csv(out_file, sep="\t") return out_file
python
def _seqcluster_stats(data, out_dir): """Parse seqcluster output""" name = dd.get_sample_name(data) fn = data.get("seqcluster", {}).get("stat_file", None) if not fn: return None out_file = os.path.join(out_dir, "%s.txt" % name) df = pd.read_csv(fn, sep="\t", names = ["reads", "sample", "type"]) df_sample = df[df["sample"] == name] df_sample.to_csv(out_file, sep="\t") return out_file
[ "def", "_seqcluster_stats", "(", "data", ",", "out_dir", ")", ":", "name", "=", "dd", ".", "get_sample_name", "(", "data", ")", "fn", "=", "data", ".", "get", "(", "\"seqcluster\"", ",", "{", "}", ")", ".", "get", "(", "\"stat_file\"", ",", "None", "...
Parse seqcluster output
[ "Parse", "seqcluster", "output" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/srna.py#L61-L71
train
219,233
bcbio/bcbio-nextgen
bcbio/illumina/samplesheet.py
from_flowcell
def from_flowcell(run_folder, lane_details, out_dir=None): """Convert a flowcell into a samplesheet for demultiplexing. """ fcid = os.path.basename(run_folder) if out_dir is None: out_dir = run_folder out_file = os.path.join(out_dir, "%s.csv" % fcid) with open(out_file, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(["FCID", "Lane", "Sample_ID", "SampleRef", "Index", "Description", "Control", "Recipe", "Operator", "SampleProject"]) for ldetail in lane_details: writer.writerow(_lane_detail_to_ss(fcid, ldetail)) return out_file
python
def from_flowcell(run_folder, lane_details, out_dir=None): """Convert a flowcell into a samplesheet for demultiplexing. """ fcid = os.path.basename(run_folder) if out_dir is None: out_dir = run_folder out_file = os.path.join(out_dir, "%s.csv" % fcid) with open(out_file, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(["FCID", "Lane", "Sample_ID", "SampleRef", "Index", "Description", "Control", "Recipe", "Operator", "SampleProject"]) for ldetail in lane_details: writer.writerow(_lane_detail_to_ss(fcid, ldetail)) return out_file
[ "def", "from_flowcell", "(", "run_folder", ",", "lane_details", ",", "out_dir", "=", "None", ")", ":", "fcid", "=", "os", ".", "path", ".", "basename", "(", "run_folder", ")", "if", "out_dir", "is", "None", ":", "out_dir", "=", "run_folder", "out_file", ...
Convert a flowcell into a samplesheet for demultiplexing.
[ "Convert", "a", "flowcell", "into", "a", "samplesheet", "for", "demultiplexing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/samplesheet.py#L20-L33
train
219,234
bcbio/bcbio-nextgen
bcbio/illumina/samplesheet.py
_lane_detail_to_ss
def _lane_detail_to_ss(fcid, ldetail): """Convert information about a lane into Illumina samplesheet output. """ return [fcid, ldetail["lane"], ldetail["name"], ldetail["genome_build"], ldetail["bc_index"], ldetail["description"].encode("ascii", "ignore"), "N", "", "", ldetail["project_name"]]
python
def _lane_detail_to_ss(fcid, ldetail): """Convert information about a lane into Illumina samplesheet output. """ return [fcid, ldetail["lane"], ldetail["name"], ldetail["genome_build"], ldetail["bc_index"], ldetail["description"].encode("ascii", "ignore"), "N", "", "", ldetail["project_name"]]
[ "def", "_lane_detail_to_ss", "(", "fcid", ",", "ldetail", ")", ":", "return", "[", "fcid", ",", "ldetail", "[", "\"lane\"", "]", ",", "ldetail", "[", "\"name\"", "]", ",", "ldetail", "[", "\"genome_build\"", "]", ",", "ldetail", "[", "\"bc_index\"", "]", ...
Convert information about a lane into Illumina samplesheet output.
[ "Convert", "information", "about", "a", "lane", "into", "Illumina", "samplesheet", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/samplesheet.py#L35-L40
train
219,235
bcbio/bcbio-nextgen
bcbio/illumina/samplesheet.py
_organize_lanes
def _organize_lanes(info_iter, barcode_ids): """Organize flat lane information into nested YAML structure. """ all_lanes = [] for (fcid, lane, sampleref), info in itertools.groupby(info_iter, lambda x: (x[0], x[1], x[1])): info = list(info) cur_lane = dict(flowcell_id=fcid, lane=lane, genome_build=info[0][3], analysis="Standard") if not _has_barcode(info): cur_lane["description"] = info[0][1] else: # barcoded sample cur_lane["description"] = "Barcoded lane %s" % lane multiplex = [] for (_, _, sample_id, _, bc_seq) in info: bc_type, bc_id = barcode_ids[bc_seq] multiplex.append(dict(barcode_type=bc_type, barcode_id=bc_id, sequence=bc_seq, name=sample_id)) cur_lane["multiplex"] = multiplex all_lanes.append(cur_lane) return all_lanes
python
def _organize_lanes(info_iter, barcode_ids): """Organize flat lane information into nested YAML structure. """ all_lanes = [] for (fcid, lane, sampleref), info in itertools.groupby(info_iter, lambda x: (x[0], x[1], x[1])): info = list(info) cur_lane = dict(flowcell_id=fcid, lane=lane, genome_build=info[0][3], analysis="Standard") if not _has_barcode(info): cur_lane["description"] = info[0][1] else: # barcoded sample cur_lane["description"] = "Barcoded lane %s" % lane multiplex = [] for (_, _, sample_id, _, bc_seq) in info: bc_type, bc_id = barcode_ids[bc_seq] multiplex.append(dict(barcode_type=bc_type, barcode_id=bc_id, sequence=bc_seq, name=sample_id)) cur_lane["multiplex"] = multiplex all_lanes.append(cur_lane) return all_lanes
[ "def", "_organize_lanes", "(", "info_iter", ",", "barcode_ids", ")", ":", "all_lanes", "=", "[", "]", "for", "(", "fcid", ",", "lane", ",", "sampleref", ")", ",", "info", "in", "itertools", ".", "groupby", "(", "info_iter", ",", "lambda", "x", ":", "("...
Organize flat lane information into nested YAML structure.
[ "Organize", "flat", "lane", "information", "into", "nested", "YAML", "structure", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/samplesheet.py#L44-L64
train
219,236
bcbio/bcbio-nextgen
bcbio/illumina/samplesheet.py
_generate_barcode_ids
def _generate_barcode_ids(info_iter): """Create unique barcode IDs assigned to sequences """ bc_type = "SampleSheet" barcodes = list(set([x[-1] for x in info_iter])) barcodes.sort() barcode_ids = {} for i, bc in enumerate(barcodes): barcode_ids[bc] = (bc_type, i+1) return barcode_ids
python
def _generate_barcode_ids(info_iter): """Create unique barcode IDs assigned to sequences """ bc_type = "SampleSheet" barcodes = list(set([x[-1] for x in info_iter])) barcodes.sort() barcode_ids = {} for i, bc in enumerate(barcodes): barcode_ids[bc] = (bc_type, i+1) return barcode_ids
[ "def", "_generate_barcode_ids", "(", "info_iter", ")", ":", "bc_type", "=", "\"SampleSheet\"", "barcodes", "=", "list", "(", "set", "(", "[", "x", "[", "-", "1", "]", "for", "x", "in", "info_iter", "]", ")", ")", "barcodes", ".", "sort", "(", ")", "b...
Create unique barcode IDs assigned to sequences
[ "Create", "unique", "barcode", "IDs", "assigned", "to", "sequences" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/samplesheet.py#L70-L79
train
219,237
bcbio/bcbio-nextgen
bcbio/illumina/samplesheet.py
_read_input_csv
def _read_input_csv(in_file): """Parse useful details from SampleSheet CSV file. """ with io.open(in_file, newline=None) as in_handle: reader = csv.reader(in_handle) next(reader) # header for line in reader: if line: # empty lines (fc_id, lane, sample_id, genome, barcode) = line[:5] yield fc_id, lane, sample_id, genome, barcode
python
def _read_input_csv(in_file): """Parse useful details from SampleSheet CSV file. """ with io.open(in_file, newline=None) as in_handle: reader = csv.reader(in_handle) next(reader) # header for line in reader: if line: # empty lines (fc_id, lane, sample_id, genome, barcode) = line[:5] yield fc_id, lane, sample_id, genome, barcode
[ "def", "_read_input_csv", "(", "in_file", ")", ":", "with", "io", ".", "open", "(", "in_file", ",", "newline", "=", "None", ")", "as", "in_handle", ":", "reader", "=", "csv", ".", "reader", "(", "in_handle", ")", "next", "(", "reader", ")", "# header",...
Parse useful details from SampleSheet CSV file.
[ "Parse", "useful", "details", "from", "SampleSheet", "CSV", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/samplesheet.py#L81-L90
train
219,238
bcbio/bcbio-nextgen
bcbio/illumina/samplesheet.py
_get_flowcell_id
def _get_flowcell_id(in_file, require_single=True): """Retrieve the unique flowcell id represented in the SampleSheet. """ fc_ids = set([x[0] for x in _read_input_csv(in_file)]) if require_single and len(fc_ids) > 1: raise ValueError("There are several FCIDs in the same samplesheet file: %s" % in_file) else: return fc_ids
python
def _get_flowcell_id(in_file, require_single=True): """Retrieve the unique flowcell id represented in the SampleSheet. """ fc_ids = set([x[0] for x in _read_input_csv(in_file)]) if require_single and len(fc_ids) > 1: raise ValueError("There are several FCIDs in the same samplesheet file: %s" % in_file) else: return fc_ids
[ "def", "_get_flowcell_id", "(", "in_file", ",", "require_single", "=", "True", ")", ":", "fc_ids", "=", "set", "(", "[", "x", "[", "0", "]", "for", "x", "in", "_read_input_csv", "(", "in_file", ")", "]", ")", "if", "require_single", "and", "len", "(", ...
Retrieve the unique flowcell id represented in the SampleSheet.
[ "Retrieve", "the", "unique", "flowcell", "id", "represented", "in", "the", "SampleSheet", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/samplesheet.py#L92-L99
train
219,239
bcbio/bcbio-nextgen
bcbio/illumina/samplesheet.py
csv2yaml
def csv2yaml(in_file, out_file=None): """Convert a CSV SampleSheet to YAML run_info format. """ if out_file is None: out_file = "%s.yaml" % os.path.splitext(in_file)[0] barcode_ids = _generate_barcode_ids(_read_input_csv(in_file)) lanes = _organize_lanes(_read_input_csv(in_file), barcode_ids) with open(out_file, "w") as out_handle: out_handle.write(yaml.safe_dump(lanes, default_flow_style=False)) return out_file
python
def csv2yaml(in_file, out_file=None): """Convert a CSV SampleSheet to YAML run_info format. """ if out_file is None: out_file = "%s.yaml" % os.path.splitext(in_file)[0] barcode_ids = _generate_barcode_ids(_read_input_csv(in_file)) lanes = _organize_lanes(_read_input_csv(in_file), barcode_ids) with open(out_file, "w") as out_handle: out_handle.write(yaml.safe_dump(lanes, default_flow_style=False)) return out_file
[ "def", "csv2yaml", "(", "in_file", ",", "out_file", "=", "None", ")", ":", "if", "out_file", "is", "None", ":", "out_file", "=", "\"%s.yaml\"", "%", "os", ".", "path", ".", "splitext", "(", "in_file", ")", "[", "0", "]", "barcode_ids", "=", "_generate_...
Convert a CSV SampleSheet to YAML run_info format.
[ "Convert", "a", "CSV", "SampleSheet", "to", "YAML", "run_info", "format", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/samplesheet.py#L101-L110
train
219,240
bcbio/bcbio-nextgen
bcbio/illumina/samplesheet.py
run_has_samplesheet
def run_has_samplesheet(fc_dir, config, require_single=True): """Checks if there's a suitable SampleSheet.csv present for the run """ fc_name, _ = flowcell.parse_dirname(fc_dir) sheet_dirs = config.get("samplesheet_directories", []) fcid_sheet = {} for ss_dir in (s for s in sheet_dirs if os.path.exists(s)): with utils.chdir(ss_dir): for ss in glob.glob("*.csv"): fc_ids = _get_flowcell_id(ss, require_single) for fcid in fc_ids: if fcid: fcid_sheet[fcid] = os.path.join(ss_dir, ss) # difflib handles human errors while entering data on the SampleSheet. # Only one best candidate is returned (if any). 0.85 cutoff allows for # maximum of 2 mismatches in fcid potential_fcids = difflib.get_close_matches(fc_name, fcid_sheet.keys(), 1, 0.85) if len(potential_fcids) > 0 and potential_fcids[0] in fcid_sheet: return fcid_sheet[potential_fcids[0]] else: return None
python
def run_has_samplesheet(fc_dir, config, require_single=True): """Checks if there's a suitable SampleSheet.csv present for the run """ fc_name, _ = flowcell.parse_dirname(fc_dir) sheet_dirs = config.get("samplesheet_directories", []) fcid_sheet = {} for ss_dir in (s for s in sheet_dirs if os.path.exists(s)): with utils.chdir(ss_dir): for ss in glob.glob("*.csv"): fc_ids = _get_flowcell_id(ss, require_single) for fcid in fc_ids: if fcid: fcid_sheet[fcid] = os.path.join(ss_dir, ss) # difflib handles human errors while entering data on the SampleSheet. # Only one best candidate is returned (if any). 0.85 cutoff allows for # maximum of 2 mismatches in fcid potential_fcids = difflib.get_close_matches(fc_name, fcid_sheet.keys(), 1, 0.85) if len(potential_fcids) > 0 and potential_fcids[0] in fcid_sheet: return fcid_sheet[potential_fcids[0]] else: return None
[ "def", "run_has_samplesheet", "(", "fc_dir", ",", "config", ",", "require_single", "=", "True", ")", ":", "fc_name", ",", "_", "=", "flowcell", ".", "parse_dirname", "(", "fc_dir", ")", "sheet_dirs", "=", "config", ".", "get", "(", "\"samplesheet_directories\"...
Checks if there's a suitable SampleSheet.csv present for the run
[ "Checks", "if", "there", "s", "a", "suitable", "SampleSheet", ".", "csv", "present", "for", "the", "run" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/samplesheet.py#L112-L133
train
219,241
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
combine_bam
def combine_bam(in_files, out_file, config): """Parallel target to combine multiple BAM files. """ runner = broad.runner_from_path("picard", config) runner.run_fn("picard_merge", in_files, out_file) for in_file in in_files: save_diskspace(in_file, "Merged into {0}".format(out_file), config) bam.index(out_file, config) return out_file
python
def combine_bam(in_files, out_file, config): """Parallel target to combine multiple BAM files. """ runner = broad.runner_from_path("picard", config) runner.run_fn("picard_merge", in_files, out_file) for in_file in in_files: save_diskspace(in_file, "Merged into {0}".format(out_file), config) bam.index(out_file, config) return out_file
[ "def", "combine_bam", "(", "in_files", ",", "out_file", ",", "config", ")", ":", "runner", "=", "broad", ".", "runner_from_path", "(", "\"picard\"", ",", "config", ")", "runner", ".", "run_fn", "(", "\"picard_merge\"", ",", "in_files", ",", "out_file", ")", ...
Parallel target to combine multiple BAM files.
[ "Parallel", "target", "to", "combine", "multiple", "BAM", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L25-L33
train
219,242
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
write_nochr_reads
def write_nochr_reads(in_file, out_file, config): """Write a BAM file of reads that are not mapped on a reference chromosome. This is useful for maintaining non-mapped reads in parallel processes that split processing by chromosome. """ if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: samtools = config_utils.get_program("samtools", config) cmd = "{samtools} view -b -f 4 {in_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Select unmapped reads") return out_file
python
def write_nochr_reads(in_file, out_file, config): """Write a BAM file of reads that are not mapped on a reference chromosome. This is useful for maintaining non-mapped reads in parallel processes that split processing by chromosome. """ if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: samtools = config_utils.get_program("samtools", config) cmd = "{samtools} view -b -f 4 {in_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Select unmapped reads") return out_file
[ "def", "write_nochr_reads", "(", "in_file", ",", "out_file", ",", "config", ")", ":", "if", "not", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ")", "as", "tx_out_file", ":", "samtools", "=", "config_u...
Write a BAM file of reads that are not mapped on a reference chromosome. This is useful for maintaining non-mapped reads in parallel processes that split processing by chromosome.
[ "Write", "a", "BAM", "file", "of", "reads", "that", "are", "not", "mapped", "on", "a", "reference", "chromosome", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L57-L68
train
219,243
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
write_noanalysis_reads
def write_noanalysis_reads(in_file, region_file, out_file, config): """Write a BAM file of reads in the specified region file that are not analyzed. We want to get only reads not in analysis regions but also make use of the BAM index to perform well on large files. The tricky part is avoiding command line limits. There is a nice discussion on SeqAnswers: http://seqanswers.com/forums/showthread.php?t=29538 sambamba supports intersection via an input BED file so avoids command line length issues. """ if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bedtools = config_utils.get_program("bedtools", config) sambamba = config_utils.get_program("sambamba", config) cl = ("{sambamba} view -f bam -l 0 -L {region_file} {in_file} | " "{bedtools} intersect -abam - -b {region_file} -f 1.0 -nonamecheck" "> {tx_out_file}") do.run(cl.format(**locals()), "Select unanalyzed reads") return out_file
python
def write_noanalysis_reads(in_file, region_file, out_file, config): """Write a BAM file of reads in the specified region file that are not analyzed. We want to get only reads not in analysis regions but also make use of the BAM index to perform well on large files. The tricky part is avoiding command line limits. There is a nice discussion on SeqAnswers: http://seqanswers.com/forums/showthread.php?t=29538 sambamba supports intersection via an input BED file so avoids command line length issues. """ if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bedtools = config_utils.get_program("bedtools", config) sambamba = config_utils.get_program("sambamba", config) cl = ("{sambamba} view -f bam -l 0 -L {region_file} {in_file} | " "{bedtools} intersect -abam - -b {region_file} -f 1.0 -nonamecheck" "> {tx_out_file}") do.run(cl.format(**locals()), "Select unanalyzed reads") return out_file
[ "def", "write_noanalysis_reads", "(", "in_file", ",", "region_file", ",", "out_file", ",", "config", ")", ":", "if", "not", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ")", "as", "tx_out_file", ":", "...
Write a BAM file of reads in the specified region file that are not analyzed. We want to get only reads not in analysis regions but also make use of the BAM index to perform well on large files. The tricky part is avoiding command line limits. There is a nice discussion on SeqAnswers: http://seqanswers.com/forums/showthread.php?t=29538 sambamba supports intersection via an input BED file so avoids command line length issues.
[ "Write", "a", "BAM", "file", "of", "reads", "in", "the", "specified", "region", "file", "that", "are", "not", "analyzed", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L70-L88
train
219,244
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
subset_bam_by_region
def subset_bam_by_region(in_file, region, config, out_file_base=None): """Subset BAM files based on specified chromosome region. """ if out_file_base is not None: base, ext = os.path.splitext(out_file_base) else: base, ext = os.path.splitext(in_file) out_file = "%s-subset%s%s" % (base, region, ext) if not file_exists(out_file): with pysam.Samfile(in_file, "rb") as in_bam: target_tid = in_bam.gettid(region) assert region is not None, \ "Did not find reference region %s in %s" % \ (region, in_file) with file_transaction(config, out_file) as tx_out_file: with pysam.Samfile(tx_out_file, "wb", template=in_bam) as out_bam: for read in in_bam: if read.tid == target_tid: out_bam.write(read) return out_file
python
def subset_bam_by_region(in_file, region, config, out_file_base=None): """Subset BAM files based on specified chromosome region. """ if out_file_base is not None: base, ext = os.path.splitext(out_file_base) else: base, ext = os.path.splitext(in_file) out_file = "%s-subset%s%s" % (base, region, ext) if not file_exists(out_file): with pysam.Samfile(in_file, "rb") as in_bam: target_tid = in_bam.gettid(region) assert region is not None, \ "Did not find reference region %s in %s" % \ (region, in_file) with file_transaction(config, out_file) as tx_out_file: with pysam.Samfile(tx_out_file, "wb", template=in_bam) as out_bam: for read in in_bam: if read.tid == target_tid: out_bam.write(read) return out_file
[ "def", "subset_bam_by_region", "(", "in_file", ",", "region", ",", "config", ",", "out_file_base", "=", "None", ")", ":", "if", "out_file_base", "is", "not", "None", ":", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "out_file_base", ...
Subset BAM files based on specified chromosome region.
[ "Subset", "BAM", "files", "based", "on", "specified", "chromosome", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L90-L109
train
219,245
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
subset_bed_by_chrom
def subset_bed_by_chrom(in_file, chrom, data, out_dir=None): """Subset a BED file to only have items from the specified chromosome. """ if out_dir is None: out_dir = os.path.dirname(in_file) base, ext = os.path.splitext(os.path.basename(in_file)) out_file = os.path.join(out_dir, "%s-%s%s" % (base, chrom, ext)) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: _rewrite_bed_with_chrom(in_file, tx_out_file, chrom) return out_file
python
def subset_bed_by_chrom(in_file, chrom, data, out_dir=None): """Subset a BED file to only have items from the specified chromosome. """ if out_dir is None: out_dir = os.path.dirname(in_file) base, ext = os.path.splitext(os.path.basename(in_file)) out_file = os.path.join(out_dir, "%s-%s%s" % (base, chrom, ext)) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: _rewrite_bed_with_chrom(in_file, tx_out_file, chrom) return out_file
[ "def", "subset_bed_by_chrom", "(", "in_file", ",", "chrom", ",", "data", ",", "out_dir", "=", "None", ")", ":", "if", "out_dir", "is", "None", ":", "out_dir", "=", "os", ".", "path", ".", "dirname", "(", "in_file", ")", "base", ",", "ext", "=", "os",...
Subset a BED file to only have items from the specified chromosome.
[ "Subset", "a", "BED", "file", "to", "only", "have", "items", "from", "the", "specified", "chromosome", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L111-L121
train
219,246
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
remove_lcr_regions
def remove_lcr_regions(orig_bed, items): """If configured and available, update a BED file to remove low complexity regions. """ lcr_bed = tz.get_in(["genome_resources", "variation", "lcr"], items[0]) if lcr_bed and os.path.exists(lcr_bed) and "lcr" in get_exclude_regions(items): return _remove_regions(orig_bed, [lcr_bed], "nolcr", items[0]) else: return orig_bed
python
def remove_lcr_regions(orig_bed, items): """If configured and available, update a BED file to remove low complexity regions. """ lcr_bed = tz.get_in(["genome_resources", "variation", "lcr"], items[0]) if lcr_bed and os.path.exists(lcr_bed) and "lcr" in get_exclude_regions(items): return _remove_regions(orig_bed, [lcr_bed], "nolcr", items[0]) else: return orig_bed
[ "def", "remove_lcr_regions", "(", "orig_bed", ",", "items", ")", ":", "lcr_bed", "=", "tz", ".", "get_in", "(", "[", "\"genome_resources\"", ",", "\"variation\"", ",", "\"lcr\"", "]", ",", "items", "[", "0", "]", ")", "if", "lcr_bed", "and", "os", ".", ...
If configured and available, update a BED file to remove low complexity regions.
[ "If", "configured", "and", "available", "update", "a", "BED", "file", "to", "remove", "low", "complexity", "regions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L143-L150
train
219,247
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
remove_polyx_regions
def remove_polyx_regions(in_file, items): """Remove polyX stretches, contributing to long variant runtimes. """ ex_bed = tz.get_in(["genome_resources", "variation", "polyx"], items[0]) if ex_bed and os.path.exists(ex_bed): return _remove_regions(in_file, [ex_bed], "nopolyx", items[0]) else: return in_file
python
def remove_polyx_regions(in_file, items): """Remove polyX stretches, contributing to long variant runtimes. """ ex_bed = tz.get_in(["genome_resources", "variation", "polyx"], items[0]) if ex_bed and os.path.exists(ex_bed): return _remove_regions(in_file, [ex_bed], "nopolyx", items[0]) else: return in_file
[ "def", "remove_polyx_regions", "(", "in_file", ",", "items", ")", ":", "ex_bed", "=", "tz", ".", "get_in", "(", "[", "\"genome_resources\"", ",", "\"variation\"", ",", "\"polyx\"", "]", ",", "items", "[", "0", "]", ")", "if", "ex_bed", "and", "os", ".", ...
Remove polyX stretches, contributing to long variant runtimes.
[ "Remove", "polyX", "stretches", "contributing", "to", "long", "variant", "runtimes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L152-L159
train
219,248
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
add_highdepth_genome_exclusion
def add_highdepth_genome_exclusion(items): """Add exclusions to input items to avoid slow runtimes on whole genomes. """ out = [] for d in items: d = utils.deepish_copy(d) if dd.get_coverage_interval(d) == "genome": e = dd.get_exclude_regions(d) if "highdepth" not in e: e.append("highdepth") d = dd.set_exclude_regions(d, e) out.append(d) return out
python
def add_highdepth_genome_exclusion(items): """Add exclusions to input items to avoid slow runtimes on whole genomes. """ out = [] for d in items: d = utils.deepish_copy(d) if dd.get_coverage_interval(d) == "genome": e = dd.get_exclude_regions(d) if "highdepth" not in e: e.append("highdepth") d = dd.set_exclude_regions(d, e) out.append(d) return out
[ "def", "add_highdepth_genome_exclusion", "(", "items", ")", ":", "out", "=", "[", "]", "for", "d", "in", "items", ":", "d", "=", "utils", ".", "deepish_copy", "(", "d", ")", "if", "dd", ".", "get_coverage_interval", "(", "d", ")", "==", "\"genome\"", "...
Add exclusions to input items to avoid slow runtimes on whole genomes.
[ "Add", "exclusions", "to", "input", "items", "to", "avoid", "slow", "runtimes", "on", "whole", "genomes", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L161-L173
train
219,249
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
remove_highdepth_regions
def remove_highdepth_regions(in_file, items): """Remove high depth regions from a BED file for analyzing a set of calls. Tries to avoid spurious errors and slow run times in collapsed repeat regions. Also adds ENCODE blacklist regions which capture additional collapsed repeats around centromeres. """ encode_bed = tz.get_in(["genome_resources", "variation", "encode_blacklist"], items[0]) if encode_bed and os.path.exists(encode_bed): return _remove_regions(in_file, [encode_bed], "glimit", items[0]) else: return in_file
python
def remove_highdepth_regions(in_file, items): """Remove high depth regions from a BED file for analyzing a set of calls. Tries to avoid spurious errors and slow run times in collapsed repeat regions. Also adds ENCODE blacklist regions which capture additional collapsed repeats around centromeres. """ encode_bed = tz.get_in(["genome_resources", "variation", "encode_blacklist"], items[0]) if encode_bed and os.path.exists(encode_bed): return _remove_regions(in_file, [encode_bed], "glimit", items[0]) else: return in_file
[ "def", "remove_highdepth_regions", "(", "in_file", ",", "items", ")", ":", "encode_bed", "=", "tz", ".", "get_in", "(", "[", "\"genome_resources\"", ",", "\"variation\"", ",", "\"encode_blacklist\"", "]", ",", "items", "[", "0", "]", ")", "if", "encode_bed", ...
Remove high depth regions from a BED file for analyzing a set of calls. Tries to avoid spurious errors and slow run times in collapsed repeat regions. Also adds ENCODE blacklist regions which capture additional collapsed repeats around centromeres.
[ "Remove", "high", "depth", "regions", "from", "a", "BED", "file", "for", "analyzing", "a", "set", "of", "calls", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L175-L187
train
219,250
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
_remove_regions
def _remove_regions(in_file, remove_beds, ext, data): """Subtract a list of BED files from an input BED. General approach handling none, one and more remove_beds. """ from bcbio.variation import bedutils out_file = "%s-%s.bed" % (utils.splitext_plus(in_file)[0], ext) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: with bedtools_tmpdir(data): if len(remove_beds) == 0: to_remove = None elif len(remove_beds) == 1: to_remove = remove_beds[0] else: to_remove = "%s-all.bed" % utils.splitext_plus(tx_out_file)[0] with open(to_remove, "w") as out_handle: for b in remove_beds: with utils.open_gzipsafe(b) as in_handle: for line in in_handle: parts = line.split("\t") out_handle.write("\t".join(parts[:4]).rstrip() + "\n") if utils.file_exists(to_remove): to_remove = bedutils.sort_merge(to_remove, data) if to_remove and utils.file_exists(to_remove): cmd = "bedtools subtract -nonamecheck -a {in_file} -b {to_remove} > {tx_out_file}" do.run(cmd.format(**locals()), "Remove problematic regions: %s" % ext) else: utils.symlink_plus(in_file, out_file) return out_file
python
def _remove_regions(in_file, remove_beds, ext, data): """Subtract a list of BED files from an input BED. General approach handling none, one and more remove_beds. """ from bcbio.variation import bedutils out_file = "%s-%s.bed" % (utils.splitext_plus(in_file)[0], ext) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: with bedtools_tmpdir(data): if len(remove_beds) == 0: to_remove = None elif len(remove_beds) == 1: to_remove = remove_beds[0] else: to_remove = "%s-all.bed" % utils.splitext_plus(tx_out_file)[0] with open(to_remove, "w") as out_handle: for b in remove_beds: with utils.open_gzipsafe(b) as in_handle: for line in in_handle: parts = line.split("\t") out_handle.write("\t".join(parts[:4]).rstrip() + "\n") if utils.file_exists(to_remove): to_remove = bedutils.sort_merge(to_remove, data) if to_remove and utils.file_exists(to_remove): cmd = "bedtools subtract -nonamecheck -a {in_file} -b {to_remove} > {tx_out_file}" do.run(cmd.format(**locals()), "Remove problematic regions: %s" % ext) else: utils.symlink_plus(in_file, out_file) return out_file
[ "def", "_remove_regions", "(", "in_file", ",", "remove_beds", ",", "ext", ",", "data", ")", ":", "from", "bcbio", ".", "variation", "import", "bedutils", "out_file", "=", "\"%s-%s.bed\"", "%", "(", "utils", ".", "splitext_plus", "(", "in_file", ")", "[", "...
Subtract a list of BED files from an input BED. General approach handling none, one and more remove_beds.
[ "Subtract", "a", "list", "of", "BED", "files", "from", "an", "input", "BED", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L189-L218
train
219,251
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
get_exclude_regions
def get_exclude_regions(items): """Retrieve regions to exclude from a set of items. Includes back compatibility for older custom ways of specifying different exclusions. """ def _get_sample_excludes(d): excludes = dd.get_exclude_regions(d) # back compatible if tz.get_in(("config", "algorithm", "remove_lcr"), d, False): excludes.append("lcr") return excludes out = reduce(operator.add, [_get_sample_excludes(d) for d in items]) return sorted(list(set(out)))
python
def get_exclude_regions(items): """Retrieve regions to exclude from a set of items. Includes back compatibility for older custom ways of specifying different exclusions. """ def _get_sample_excludes(d): excludes = dd.get_exclude_regions(d) # back compatible if tz.get_in(("config", "algorithm", "remove_lcr"), d, False): excludes.append("lcr") return excludes out = reduce(operator.add, [_get_sample_excludes(d) for d in items]) return sorted(list(set(out)))
[ "def", "get_exclude_regions", "(", "items", ")", ":", "def", "_get_sample_excludes", "(", "d", ")", ":", "excludes", "=", "dd", ".", "get_exclude_regions", "(", "d", ")", "# back compatible", "if", "tz", ".", "get_in", "(", "(", "\"config\"", ",", "\"algorit...
Retrieve regions to exclude from a set of items. Includes back compatibility for older custom ways of specifying different exclusions.
[ "Retrieve", "regions", "to", "exclude", "from", "a", "set", "of", "items", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L231-L244
train
219,252
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
to_multiregion
def to_multiregion(region): """Convert a single region or multiple region specification into multiregion list. If a single region (chrom, start, end), returns [(chrom, start, end)] otherwise returns multiregion. """ assert isinstance(region, (list, tuple)), region if isinstance(region[0], (list, tuple)): return region else: assert len(region) == 3 return [tuple(region)]
python
def to_multiregion(region): """Convert a single region or multiple region specification into multiregion list. If a single region (chrom, start, end), returns [(chrom, start, end)] otherwise returns multiregion. """ assert isinstance(region, (list, tuple)), region if isinstance(region[0], (list, tuple)): return region else: assert len(region) == 3 return [tuple(region)]
[ "def", "to_multiregion", "(", "region", ")", ":", "assert", "isinstance", "(", "region", ",", "(", "list", ",", "tuple", ")", ")", ",", "region", "if", "isinstance", "(", "region", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "retu...
Convert a single region or multiple region specification into multiregion list. If a single region (chrom, start, end), returns [(chrom, start, end)] otherwise returns multiregion.
[ "Convert", "a", "single", "region", "or", "multiple", "region", "specification", "into", "multiregion", "list", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L261-L272
train
219,253
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
subset_variant_regions
def subset_variant_regions(variant_regions, region, out_file, items=None, do_merge=True, data=None): """Return BED file subset by a specified chromosome region. variant_regions is a BED file, region is a chromosome name or tuple of (name, start, end) for a genomic region. """ if region is None: return variant_regions elif variant_regions is None: return region elif not isinstance(region, (list, tuple)) and region.find(":") > 0: raise ValueError("Partial chromosome regions not supported") else: merge_text = "-unmerged" if not do_merge else "" subset_file = "{0}".format(utils.splitext_plus(out_file)[0]) subset_file += "%s-regions.bed" % (merge_text) if not os.path.exists(subset_file): data = items[0] if items else data with file_transaction(data, subset_file) as tx_subset_file: if isinstance(region, (list, tuple)): _subset_bed_by_region(variant_regions, tx_subset_file, to_multiregion(region), dd.get_ref_file(data), do_merge=do_merge) else: _rewrite_bed_with_chrom(variant_regions, tx_subset_file, region) if os.path.getsize(subset_file) == 0: return region else: return subset_file
python
def subset_variant_regions(variant_regions, region, out_file, items=None, do_merge=True, data=None): """Return BED file subset by a specified chromosome region. variant_regions is a BED file, region is a chromosome name or tuple of (name, start, end) for a genomic region. """ if region is None: return variant_regions elif variant_regions is None: return region elif not isinstance(region, (list, tuple)) and region.find(":") > 0: raise ValueError("Partial chromosome regions not supported") else: merge_text = "-unmerged" if not do_merge else "" subset_file = "{0}".format(utils.splitext_plus(out_file)[0]) subset_file += "%s-regions.bed" % (merge_text) if not os.path.exists(subset_file): data = items[0] if items else data with file_transaction(data, subset_file) as tx_subset_file: if isinstance(region, (list, tuple)): _subset_bed_by_region(variant_regions, tx_subset_file, to_multiregion(region), dd.get_ref_file(data), do_merge=do_merge) else: _rewrite_bed_with_chrom(variant_regions, tx_subset_file, region) if os.path.getsize(subset_file) == 0: return region else: return subset_file
[ "def", "subset_variant_regions", "(", "variant_regions", ",", "region", ",", "out_file", ",", "items", "=", "None", ",", "do_merge", "=", "True", ",", "data", "=", "None", ")", ":", "if", "region", "is", "None", ":", "return", "variant_regions", "elif", "v...
Return BED file subset by a specified chromosome region. variant_regions is a BED file, region is a chromosome name or tuple of (name, start, end) for a genomic region.
[ "Return", "BED", "file", "subset", "by", "a", "specified", "chromosome", "region", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L275-L302
train
219,254
bcbio/bcbio-nextgen
bcbio/structural/delly.py
_delly_exclude_file
def _delly_exclude_file(items, base_file, chrom): """Prepare a delly-specific exclude file eliminating chromosomes. Delly wants excluded chromosomes listed as just the chromosome, with no coordinates. """ base_exclude = sshared.prepare_exclude_file(items, base_file, chrom) out_file = "%s-delly%s" % utils.splitext_plus(base_exclude) with file_transaction(items[0], out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: with open(base_exclude) as in_handle: for line in in_handle: parts = line.split("\t") if parts[0] == chrom: out_handle.write(line) else: out_handle.write("%s\n" % parts[0]) return out_file
python
def _delly_exclude_file(items, base_file, chrom): """Prepare a delly-specific exclude file eliminating chromosomes. Delly wants excluded chromosomes listed as just the chromosome, with no coordinates. """ base_exclude = sshared.prepare_exclude_file(items, base_file, chrom) out_file = "%s-delly%s" % utils.splitext_plus(base_exclude) with file_transaction(items[0], out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: with open(base_exclude) as in_handle: for line in in_handle: parts = line.split("\t") if parts[0] == chrom: out_handle.write(line) else: out_handle.write("%s\n" % parts[0]) return out_file
[ "def", "_delly_exclude_file", "(", "items", ",", "base_file", ",", "chrom", ")", ":", "base_exclude", "=", "sshared", ".", "prepare_exclude_file", "(", "items", ",", "base_file", ",", "chrom", ")", "out_file", "=", "\"%s-delly%s\"", "%", "utils", ".", "splitex...
Prepare a delly-specific exclude file eliminating chromosomes. Delly wants excluded chromosomes listed as just the chromosome, with no coordinates.
[ "Prepare", "a", "delly", "-", "specific", "exclude", "file", "eliminating", "chromosomes", ".", "Delly", "wants", "excluded", "chromosomes", "listed", "as", "just", "the", "chromosome", "with", "no", "coordinates", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/delly.py#L24-L39
train
219,255
bcbio/bcbio-nextgen
bcbio/structural/delly.py
_run_delly
def _run_delly(bam_files, chrom, ref_file, work_dir, items): """Run delly, calling structural variations for the specified type. """ batch = sshared.get_cur_batch(items) ext = "-%s-svs" % batch if batch else "-svs" out_file = os.path.join(work_dir, "%s%s-%s.bcf" % (os.path.splitext(os.path.basename(bam_files[0]))[0], ext, chrom)) final_file = "%s.vcf.gz" % (utils.splitext_plus(out_file)[0]) cores = min(utils.get_in(items[0], ("config", "algorithm", "num_cores"), 1), len(bam_files)) if not utils.file_exists(out_file) and not utils.file_exists(final_file): with file_transaction(items[0], out_file) as tx_out_file: if sshared.has_variant_regions(items, out_file, chrom): exclude = ["-x", _delly_exclude_file(items, out_file, chrom)] cmd = ["delly", "call", "-g", ref_file, "-o", tx_out_file] + exclude + bam_files multi_cmd = "export OMP_NUM_THREADS=%s && export LC_ALL=C && " % cores try: do.run(multi_cmd + " ".join(cmd), "delly structural variant") except subprocess.CalledProcessError as msg: # Small input samples, write an empty vcf if "Sample has not enough data to estimate library parameters" in str(msg): pass # delly returns an error exit code if there are no variants elif "No structural variants found" not in str(msg): raise return [_bgzip_and_clean(out_file, items)]
python
def _run_delly(bam_files, chrom, ref_file, work_dir, items): """Run delly, calling structural variations for the specified type. """ batch = sshared.get_cur_batch(items) ext = "-%s-svs" % batch if batch else "-svs" out_file = os.path.join(work_dir, "%s%s-%s.bcf" % (os.path.splitext(os.path.basename(bam_files[0]))[0], ext, chrom)) final_file = "%s.vcf.gz" % (utils.splitext_plus(out_file)[0]) cores = min(utils.get_in(items[0], ("config", "algorithm", "num_cores"), 1), len(bam_files)) if not utils.file_exists(out_file) and not utils.file_exists(final_file): with file_transaction(items[0], out_file) as tx_out_file: if sshared.has_variant_regions(items, out_file, chrom): exclude = ["-x", _delly_exclude_file(items, out_file, chrom)] cmd = ["delly", "call", "-g", ref_file, "-o", tx_out_file] + exclude + bam_files multi_cmd = "export OMP_NUM_THREADS=%s && export LC_ALL=C && " % cores try: do.run(multi_cmd + " ".join(cmd), "delly structural variant") except subprocess.CalledProcessError as msg: # Small input samples, write an empty vcf if "Sample has not enough data to estimate library parameters" in str(msg): pass # delly returns an error exit code if there are no variants elif "No structural variants found" not in str(msg): raise return [_bgzip_and_clean(out_file, items)]
[ "def", "_run_delly", "(", "bam_files", ",", "chrom", ",", "ref_file", ",", "work_dir", ",", "items", ")", ":", "batch", "=", "sshared", ".", "get_cur_batch", "(", "items", ")", "ext", "=", "\"-%s-svs\"", "%", "batch", "if", "batch", "else", "\"-svs\"", "...
Run delly, calling structural variations for the specified type.
[ "Run", "delly", "calling", "structural", "variations", "for", "the", "specified", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/delly.py#L43-L68
train
219,256
bcbio/bcbio-nextgen
bcbio/structural/delly.py
_bgzip_and_clean
def _bgzip_and_clean(bcf_file, items): """Create a clean bgzipped VCF output file from bcf for downstream processing. Also corrects problems with missing likelihoods: https://github.com/dellytools/delly/issues/37 GATK does not like missing GLs like '.,.,.'. This converts them to the recognized '.' """ out_file = "%s.vcf.gz" % (utils.splitext_plus(bcf_file)[0]) if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: if not utils.file_exists(bcf_file): vcfutils.write_empty_vcf(tx_out_file, samples=[dd.get_sample_name(d) for d in items]) else: cmd = ("bcftools view {bcf_file} | sed 's/\.,\.,\././' | bgzip -c > {tx_out_file}") do.run(cmd.format(**locals()), "Convert and clean delly output") return vcfutils.bgzip_and_index(out_file, items[0]["config"])
python
def _bgzip_and_clean(bcf_file, items): """Create a clean bgzipped VCF output file from bcf for downstream processing. Also corrects problems with missing likelihoods: https://github.com/dellytools/delly/issues/37 GATK does not like missing GLs like '.,.,.'. This converts them to the recognized '.' """ out_file = "%s.vcf.gz" % (utils.splitext_plus(bcf_file)[0]) if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: if not utils.file_exists(bcf_file): vcfutils.write_empty_vcf(tx_out_file, samples=[dd.get_sample_name(d) for d in items]) else: cmd = ("bcftools view {bcf_file} | sed 's/\.,\.,\././' | bgzip -c > {tx_out_file}") do.run(cmd.format(**locals()), "Convert and clean delly output") return vcfutils.bgzip_and_index(out_file, items[0]["config"])
[ "def", "_bgzip_and_clean", "(", "bcf_file", ",", "items", ")", ":", "out_file", "=", "\"%s.vcf.gz\"", "%", "(", "utils", ".", "splitext_plus", "(", "bcf_file", ")", "[", "0", "]", ")", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", ...
Create a clean bgzipped VCF output file from bcf for downstream processing. Also corrects problems with missing likelihoods: https://github.com/dellytools/delly/issues/37 GATK does not like missing GLs like '.,.,.'. This converts them to the recognized '.'
[ "Create", "a", "clean", "bgzipped", "VCF", "output", "file", "from", "bcf", "for", "downstream", "processing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/delly.py#L70-L84
train
219,257
bcbio/bcbio-nextgen
bcbio/structural/delly.py
_prep_subsampled_bams
def _prep_subsampled_bams(data, work_dir): """Prepare a subsampled BAM file with discordants from samblaster and minimal correct pairs. This attempts to minimize run times by pre-extracting useful reads mixed with subsampled normal pairs to estimate paired end distributions: https://groups.google.com/d/msg/delly-users/xmia4lwOd1Q/uaajoBkahAIJ Subsamples correctly aligned reads to 100 million based on speedseq defaults and evaluations on NA12878 whole genome data: https://github.com/cc2qe/speedseq/blob/ca624ba9affb0bd0fb88834ca896e9122639ec94/bin/speedseq#L1102 XXX Currently not used as new versions of delly do not get good sensitivity with downsampled BAMs. """ sr_bam, disc_bam = sshared.get_split_discordants(data, work_dir) ds_bam = bam.downsample(dd.get_align_bam(data), data, 1e8, read_filter="-F 'not secondary_alignment and proper_pair'", always_run=True, work_dir=work_dir) out_bam = "%s-final%s" % utils.splitext_plus(ds_bam) if not utils.file_exists(out_bam): bam.merge([ds_bam, sr_bam, disc_bam], out_bam, data["config"]) bam.index(out_bam, data["config"]) return [out_bam]
python
def _prep_subsampled_bams(data, work_dir): """Prepare a subsampled BAM file with discordants from samblaster and minimal correct pairs. This attempts to minimize run times by pre-extracting useful reads mixed with subsampled normal pairs to estimate paired end distributions: https://groups.google.com/d/msg/delly-users/xmia4lwOd1Q/uaajoBkahAIJ Subsamples correctly aligned reads to 100 million based on speedseq defaults and evaluations on NA12878 whole genome data: https://github.com/cc2qe/speedseq/blob/ca624ba9affb0bd0fb88834ca896e9122639ec94/bin/speedseq#L1102 XXX Currently not used as new versions of delly do not get good sensitivity with downsampled BAMs. """ sr_bam, disc_bam = sshared.get_split_discordants(data, work_dir) ds_bam = bam.downsample(dd.get_align_bam(data), data, 1e8, read_filter="-F 'not secondary_alignment and proper_pair'", always_run=True, work_dir=work_dir) out_bam = "%s-final%s" % utils.splitext_plus(ds_bam) if not utils.file_exists(out_bam): bam.merge([ds_bam, sr_bam, disc_bam], out_bam, data["config"]) bam.index(out_bam, data["config"]) return [out_bam]
[ "def", "_prep_subsampled_bams", "(", "data", ",", "work_dir", ")", ":", "sr_bam", ",", "disc_bam", "=", "sshared", ".", "get_split_discordants", "(", "data", ",", "work_dir", ")", "ds_bam", "=", "bam", ".", "downsample", "(", "dd", ".", "get_align_bam", "(",...
Prepare a subsampled BAM file with discordants from samblaster and minimal correct pairs. This attempts to minimize run times by pre-extracting useful reads mixed with subsampled normal pairs to estimate paired end distributions: https://groups.google.com/d/msg/delly-users/xmia4lwOd1Q/uaajoBkahAIJ Subsamples correctly aligned reads to 100 million based on speedseq defaults and evaluations on NA12878 whole genome data: https://github.com/cc2qe/speedseq/blob/ca624ba9affb0bd0fb88834ca896e9122639ec94/bin/speedseq#L1102 XXX Currently not used as new versions of delly do not get good sensitivity with downsampled BAMs.
[ "Prepare", "a", "subsampled", "BAM", "file", "with", "discordants", "from", "samblaster", "and", "minimal", "correct", "pairs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/delly.py#L88-L112
train
219,258
bcbio/bcbio-nextgen
bcbio/structural/delly.py
run
def run(items): """Perform detection of structural variations with delly. Performs post-call filtering with a custom filter tuned based on NA12878 Moleculo and PacBio data, using calls prepared by @ryanlayer and @cc2qe Filters using the high quality variant pairs (DV) compared with high quality reference pairs (DR). """ work_dir = utils.safe_makedir(os.path.join(items[0]["dirs"]["work"], "structural", dd.get_sample_name(items[0]), "delly")) # Add core request for delly config = copy.deepcopy(items[0]["config"]) delly_config = utils.get_in(config, ("resources", "delly"), {}) delly_config["cores"] = 1 config["resources"]["delly"] = delly_config parallel = {"type": "local", "cores": config["algorithm"].get("num_cores", 1), "progs": ["delly"]} work_bams = [dd.get_align_bam(d) for d in items] ref_file = dd.get_ref_file(items[0]) exclude_file = _get_full_exclude_file(items, work_bams, work_dir) bytype_vcfs = run_multicore(_run_delly, [(work_bams, chrom, ref_file, work_dir, items) for chrom in sshared.get_sv_chroms(items, exclude_file)], config, parallel) out_file = "%s.vcf.gz" % sshared.outname_from_inputs(bytype_vcfs) combo_vcf = vcfutils.combine_variant_files(bytype_vcfs, out_file, ref_file, config) out = [] upload_counts = collections.defaultdict(int) for data in items: if "sv" not in data: data["sv"] = [] base, ext = utils.splitext_plus(combo_vcf) final_vcf = sshared.finalize_sv(combo_vcf, data, items) if final_vcf: delly_vcf = _delly_count_evidence_filter(final_vcf, data) data["sv"].append({"variantcaller": "delly", "vrn_file": delly_vcf, "do_upload": upload_counts[final_vcf] == 0, # only upload a single file per batch "exclude": exclude_file}) upload_counts[final_vcf] += 1 out.append(data) return out
python
def run(items): """Perform detection of structural variations with delly. Performs post-call filtering with a custom filter tuned based on NA12878 Moleculo and PacBio data, using calls prepared by @ryanlayer and @cc2qe Filters using the high quality variant pairs (DV) compared with high quality reference pairs (DR). """ work_dir = utils.safe_makedir(os.path.join(items[0]["dirs"]["work"], "structural", dd.get_sample_name(items[0]), "delly")) # Add core request for delly config = copy.deepcopy(items[0]["config"]) delly_config = utils.get_in(config, ("resources", "delly"), {}) delly_config["cores"] = 1 config["resources"]["delly"] = delly_config parallel = {"type": "local", "cores": config["algorithm"].get("num_cores", 1), "progs": ["delly"]} work_bams = [dd.get_align_bam(d) for d in items] ref_file = dd.get_ref_file(items[0]) exclude_file = _get_full_exclude_file(items, work_bams, work_dir) bytype_vcfs = run_multicore(_run_delly, [(work_bams, chrom, ref_file, work_dir, items) for chrom in sshared.get_sv_chroms(items, exclude_file)], config, parallel) out_file = "%s.vcf.gz" % sshared.outname_from_inputs(bytype_vcfs) combo_vcf = vcfutils.combine_variant_files(bytype_vcfs, out_file, ref_file, config) out = [] upload_counts = collections.defaultdict(int) for data in items: if "sv" not in data: data["sv"] = [] base, ext = utils.splitext_plus(combo_vcf) final_vcf = sshared.finalize_sv(combo_vcf, data, items) if final_vcf: delly_vcf = _delly_count_evidence_filter(final_vcf, data) data["sv"].append({"variantcaller": "delly", "vrn_file": delly_vcf, "do_upload": upload_counts[final_vcf] == 0, # only upload a single file per batch "exclude": exclude_file}) upload_counts[final_vcf] += 1 out.append(data) return out
[ "def", "run", "(", "items", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "items", "[", "0", "]", "[", "\"dirs\"", "]", "[", "\"work\"", "]", ",", "\"structural\"", ",", "dd", ".", "get_sample_name...
Perform detection of structural variations with delly. Performs post-call filtering with a custom filter tuned based on NA12878 Moleculo and PacBio data, using calls prepared by @ryanlayer and @cc2qe Filters using the high quality variant pairs (DV) compared with high quality reference pairs (DR).
[ "Perform", "detection", "of", "structural", "variations", "with", "delly", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/delly.py#L143-L185
train
219,259
bcbio/bcbio-nextgen
bcbio/rnaseq/oncofuse.py
_disambiguate_star_fusion_junctions
def _disambiguate_star_fusion_junctions(star_junction_file, contamination_bam, disambig_out_file, data): """ Disambiguate detected fusions based on alignments to another species. """ out_file = disambig_out_file fusiondict = {} with open(star_junction_file, "r") as in_handle: for my_line in in_handle: my_line_split = my_line.strip().split("\t") if len(my_line_split) < 10: continue fusiondict[my_line_split[9]] = my_line.strip("\n") with pysam.Samfile(contamination_bam, "rb") as samfile: for my_read in samfile: if my_read.is_unmapped or my_read.is_secondary: continue if my_read.qname in fusiondict: fusiondict.pop(my_read.qname) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, 'w') as myhandle: for my_key in fusiondict: print(fusiondict[my_key], file=myhandle) return out_file
python
def _disambiguate_star_fusion_junctions(star_junction_file, contamination_bam, disambig_out_file, data): """ Disambiguate detected fusions based on alignments to another species. """ out_file = disambig_out_file fusiondict = {} with open(star_junction_file, "r") as in_handle: for my_line in in_handle: my_line_split = my_line.strip().split("\t") if len(my_line_split) < 10: continue fusiondict[my_line_split[9]] = my_line.strip("\n") with pysam.Samfile(contamination_bam, "rb") as samfile: for my_read in samfile: if my_read.is_unmapped or my_read.is_secondary: continue if my_read.qname in fusiondict: fusiondict.pop(my_read.qname) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, 'w') as myhandle: for my_key in fusiondict: print(fusiondict[my_key], file=myhandle) return out_file
[ "def", "_disambiguate_star_fusion_junctions", "(", "star_junction_file", ",", "contamination_bam", ",", "disambig_out_file", ",", "data", ")", ":", "out_file", "=", "disambig_out_file", "fusiondict", "=", "{", "}", "with", "open", "(", "star_junction_file", ",", "\"r\...
Disambiguate detected fusions based on alignments to another species.
[ "Disambiguate", "detected", "fusions", "based", "on", "alignments", "to", "another", "species", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/oncofuse.py#L167-L189
train
219,260
dylanaraps/pywal
pywal/wallpaper.py
get_desktop_env
def get_desktop_env(): """Identify the current running desktop environment.""" desktop = os.environ.get("XDG_CURRENT_DESKTOP") if desktop: return desktop desktop = os.environ.get("DESKTOP_SESSION") if desktop: return desktop desktop = os.environ.get("GNOME_DESKTOP_SESSION_ID") if desktop: return "GNOME" desktop = os.environ.get("MATE_DESKTOP_SESSION_ID") if desktop: return "MATE" desktop = os.environ.get("SWAYSOCK") if desktop: return "SWAY" desktop = os.environ.get("DESKTOP_STARTUP_ID") if desktop and "awesome" in desktop: return "AWESOME" return None
python
def get_desktop_env(): """Identify the current running desktop environment.""" desktop = os.environ.get("XDG_CURRENT_DESKTOP") if desktop: return desktop desktop = os.environ.get("DESKTOP_SESSION") if desktop: return desktop desktop = os.environ.get("GNOME_DESKTOP_SESSION_ID") if desktop: return "GNOME" desktop = os.environ.get("MATE_DESKTOP_SESSION_ID") if desktop: return "MATE" desktop = os.environ.get("SWAYSOCK") if desktop: return "SWAY" desktop = os.environ.get("DESKTOP_STARTUP_ID") if desktop and "awesome" in desktop: return "AWESOME" return None
[ "def", "get_desktop_env", "(", ")", ":", "desktop", "=", "os", ".", "environ", ".", "get", "(", "\"XDG_CURRENT_DESKTOP\"", ")", "if", "desktop", ":", "return", "desktop", "desktop", "=", "os", ".", "environ", ".", "get", "(", "\"DESKTOP_SESSION\"", ")", "i...
Identify the current running desktop environment.
[ "Identify", "the", "current", "running", "desktop", "environment", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/wallpaper.py#L13-L39
train
219,261
dylanaraps/pywal
pywal/wallpaper.py
set_wm_wallpaper
def set_wm_wallpaper(img): """Set the wallpaper for non desktop environments.""" if shutil.which("feh"): util.disown(["feh", "--bg-fill", img]) elif shutil.which("nitrogen"): util.disown(["nitrogen", "--set-zoom-fill", img]) elif shutil.which("bgs"): util.disown(["bgs", "-z", img]) elif shutil.which("hsetroot"): util.disown(["hsetroot", "-fill", img]) elif shutil.which("habak"): util.disown(["habak", "-mS", img]) elif shutil.which("display"): util.disown(["display", "-backdrop", "-window", "root", img]) else: logging.error("No wallpaper setter found.") return
python
def set_wm_wallpaper(img): """Set the wallpaper for non desktop environments.""" if shutil.which("feh"): util.disown(["feh", "--bg-fill", img]) elif shutil.which("nitrogen"): util.disown(["nitrogen", "--set-zoom-fill", img]) elif shutil.which("bgs"): util.disown(["bgs", "-z", img]) elif shutil.which("hsetroot"): util.disown(["hsetroot", "-fill", img]) elif shutil.which("habak"): util.disown(["habak", "-mS", img]) elif shutil.which("display"): util.disown(["display", "-backdrop", "-window", "root", img]) else: logging.error("No wallpaper setter found.") return
[ "def", "set_wm_wallpaper", "(", "img", ")", ":", "if", "shutil", ".", "which", "(", "\"feh\"", ")", ":", "util", ".", "disown", "(", "[", "\"feh\"", ",", "\"--bg-fill\"", ",", "img", "]", ")", "elif", "shutil", ".", "which", "(", "\"nitrogen\"", ")", ...
Set the wallpaper for non desktop environments.
[ "Set", "the", "wallpaper", "for", "non", "desktop", "environments", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/wallpaper.py#L48-L70
train
219,262
dylanaraps/pywal
pywal/wallpaper.py
set_desktop_wallpaper
def set_desktop_wallpaper(desktop, img): """Set the wallpaper for the desktop environment.""" desktop = str(desktop).lower() if "xfce" in desktop or "xubuntu" in desktop: # XFCE requires two commands since they differ between versions. xfconf("/backdrop/screen0/monitor0/image-path", img) xfconf("/backdrop/screen0/monitor0/workspace0/last-image", img) elif "muffin" in desktop or "cinnamon" in desktop: util.disown(["gsettings", "set", "org.cinnamon.desktop.background", "picture-uri", "file://" + urllib.parse.quote(img)]) elif "gnome" in desktop or "unity" in desktop: util.disown(["gsettings", "set", "org.gnome.desktop.background", "picture-uri", "file://" + urllib.parse.quote(img)]) elif "mate" in desktop: util.disown(["gsettings", "set", "org.mate.background", "picture-filename", img]) elif "sway" in desktop: util.disown(["swaymsg", "output", "*", "bg", img, "fill"]) elif "awesome" in desktop: util.disown(["awesome-client", "require('gears').wallpaper.maximized('{img}')" .format(**locals())]) else: set_wm_wallpaper(img)
python
def set_desktop_wallpaper(desktop, img): """Set the wallpaper for the desktop environment.""" desktop = str(desktop).lower() if "xfce" in desktop or "xubuntu" in desktop: # XFCE requires two commands since they differ between versions. xfconf("/backdrop/screen0/monitor0/image-path", img) xfconf("/backdrop/screen0/monitor0/workspace0/last-image", img) elif "muffin" in desktop or "cinnamon" in desktop: util.disown(["gsettings", "set", "org.cinnamon.desktop.background", "picture-uri", "file://" + urllib.parse.quote(img)]) elif "gnome" in desktop or "unity" in desktop: util.disown(["gsettings", "set", "org.gnome.desktop.background", "picture-uri", "file://" + urllib.parse.quote(img)]) elif "mate" in desktop: util.disown(["gsettings", "set", "org.mate.background", "picture-filename", img]) elif "sway" in desktop: util.disown(["swaymsg", "output", "*", "bg", img, "fill"]) elif "awesome" in desktop: util.disown(["awesome-client", "require('gears').wallpaper.maximized('{img}')" .format(**locals())]) else: set_wm_wallpaper(img)
[ "def", "set_desktop_wallpaper", "(", "desktop", ",", "img", ")", ":", "desktop", "=", "str", "(", "desktop", ")", ".", "lower", "(", ")", "if", "\"xfce\"", "in", "desktop", "or", "\"xubuntu\"", "in", "desktop", ":", "# XFCE requires two commands since they diffe...
Set the wallpaper for the desktop environment.
[ "Set", "the", "wallpaper", "for", "the", "desktop", "environment", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/wallpaper.py#L73-L105
train
219,263
dylanaraps/pywal
pywal/wallpaper.py
set_mac_wallpaper
def set_mac_wallpaper(img): """Set the wallpaper on macOS.""" db_file = "Library/Application Support/Dock/desktoppicture.db" db_path = os.path.join(HOME, db_file) img_dir, _ = os.path.split(img) # Clear the existing picture data and write the image paths sql = "delete from data; " sql += "insert into data values(\"%s\"); " % img_dir sql += "insert into data values(\"%s\"); " % img # Set all monitors/workspaces to the selected image sql += "update preferences set data_id=2 where key=1 or key=2 or key=3; " sql += "update preferences set data_id=1 where key=10 or key=20 or key=30;" subprocess.call(["sqlite3", db_path, sql]) # Kill the dock to fix issues with cached wallpapers. # macOS caches wallpapers and if a wallpaper is set that shares # the filename with a cached wallpaper, the cached wallpaper is # used instead. subprocess.call(["killall", "Dock"])
python
def set_mac_wallpaper(img): """Set the wallpaper on macOS.""" db_file = "Library/Application Support/Dock/desktoppicture.db" db_path = os.path.join(HOME, db_file) img_dir, _ = os.path.split(img) # Clear the existing picture data and write the image paths sql = "delete from data; " sql += "insert into data values(\"%s\"); " % img_dir sql += "insert into data values(\"%s\"); " % img # Set all monitors/workspaces to the selected image sql += "update preferences set data_id=2 where key=1 or key=2 or key=3; " sql += "update preferences set data_id=1 where key=10 or key=20 or key=30;" subprocess.call(["sqlite3", db_path, sql]) # Kill the dock to fix issues with cached wallpapers. # macOS caches wallpapers and if a wallpaper is set that shares # the filename with a cached wallpaper, the cached wallpaper is # used instead. subprocess.call(["killall", "Dock"])
[ "def", "set_mac_wallpaper", "(", "img", ")", ":", "db_file", "=", "\"Library/Application Support/Dock/desktoppicture.db\"", "db_path", "=", "os", ".", "path", ".", "join", "(", "HOME", ",", "db_file", ")", "img_dir", ",", "_", "=", "os", ".", "path", ".", "s...
Set the wallpaper on macOS.
[ "Set", "the", "wallpaper", "on", "macOS", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/wallpaper.py#L108-L129
train
219,264
dylanaraps/pywal
pywal/wallpaper.py
set_win_wallpaper
def set_win_wallpaper(img): """Set the wallpaper on Windows.""" # There's a different command depending on the architecture # of Windows. We check the PROGRAMFILES envar since using # platform is unreliable. if "x86" in os.environ["PROGRAMFILES"]: ctypes.windll.user32.SystemParametersInfoW(20, 0, img, 3) else: ctypes.windll.user32.SystemParametersInfoA(20, 0, img, 3)
python
def set_win_wallpaper(img): """Set the wallpaper on Windows.""" # There's a different command depending on the architecture # of Windows. We check the PROGRAMFILES envar since using # platform is unreliable. if "x86" in os.environ["PROGRAMFILES"]: ctypes.windll.user32.SystemParametersInfoW(20, 0, img, 3) else: ctypes.windll.user32.SystemParametersInfoA(20, 0, img, 3)
[ "def", "set_win_wallpaper", "(", "img", ")", ":", "# There's a different command depending on the architecture", "# of Windows. We check the PROGRAMFILES envar since using", "# platform is unreliable.", "if", "\"x86\"", "in", "os", ".", "environ", "[", "\"PROGRAMFILES\"", "]", ":...
Set the wallpaper on Windows.
[ "Set", "the", "wallpaper", "on", "Windows", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/wallpaper.py#L132-L140
train
219,265
dylanaraps/pywal
pywal/wallpaper.py
change
def change(img): """Set the wallpaper.""" if not os.path.isfile(img): return desktop = get_desktop_env() if OS == "Darwin": set_mac_wallpaper(img) elif OS == "Windows": set_win_wallpaper(img) else: set_desktop_wallpaper(desktop, img) logging.info("Set the new wallpaper.")
python
def change(img): """Set the wallpaper.""" if not os.path.isfile(img): return desktop = get_desktop_env() if OS == "Darwin": set_mac_wallpaper(img) elif OS == "Windows": set_win_wallpaper(img) else: set_desktop_wallpaper(desktop, img) logging.info("Set the new wallpaper.")
[ "def", "change", "(", "img", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "img", ")", ":", "return", "desktop", "=", "get_desktop_env", "(", ")", "if", "OS", "==", "\"Darwin\"", ":", "set_mac_wallpaper", "(", "img", ")", "elif", "OS"...
Set the wallpaper.
[ "Set", "the", "wallpaper", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/wallpaper.py#L143-L159
train
219,266
dylanaraps/pywal
pywal/wallpaper.py
get
def get(cache_dir=CACHE_DIR): """Get the current wallpaper.""" current_wall = os.path.join(cache_dir, "wal") if os.path.isfile(current_wall): return util.read_file(current_wall)[0] return "None"
python
def get(cache_dir=CACHE_DIR): """Get the current wallpaper.""" current_wall = os.path.join(cache_dir, "wal") if os.path.isfile(current_wall): return util.read_file(current_wall)[0] return "None"
[ "def", "get", "(", "cache_dir", "=", "CACHE_DIR", ")", ":", "current_wall", "=", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "\"wal\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "current_wall", ")", ":", "return", "util", ".", "rea...
Get the current wallpaper.
[ "Get", "the", "current", "wallpaper", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/wallpaper.py#L162-L169
train
219,267
dylanaraps/pywal
pywal/util.py
save_file_json
def save_file_json(data, export_file): """Write data to a json file.""" create_dir(os.path.dirname(export_file)) with open(export_file, "w") as file: json.dump(data, file, indent=4)
python
def save_file_json(data, export_file): """Write data to a json file.""" create_dir(os.path.dirname(export_file)) with open(export_file, "w") as file: json.dump(data, file, indent=4)
[ "def", "save_file_json", "(", "data", ",", "export_file", ")", ":", "create_dir", "(", "os", ".", "path", ".", "dirname", "(", "export_file", ")", ")", "with", "open", "(", "export_file", ",", "\"w\"", ")", "as", "file", ":", "json", ".", "dump", "(", ...
Write data to a json file.
[ "Write", "data", "to", "a", "json", "file", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/util.py#L90-L95
train
219,268
dylanaraps/pywal
pywal/util.py
setup_logging
def setup_logging(): """Logging config.""" logging.basicConfig(format=("[%(levelname)s\033[0m] " "\033[1;31m%(module)s\033[0m: " "%(message)s"), level=logging.INFO, stream=sys.stdout) logging.addLevelName(logging.ERROR, '\033[1;31mE') logging.addLevelName(logging.INFO, '\033[1;32mI') logging.addLevelName(logging.WARNING, '\033[1;33mW')
python
def setup_logging(): """Logging config.""" logging.basicConfig(format=("[%(levelname)s\033[0m] " "\033[1;31m%(module)s\033[0m: " "%(message)s"), level=logging.INFO, stream=sys.stdout) logging.addLevelName(logging.ERROR, '\033[1;31mE') logging.addLevelName(logging.INFO, '\033[1;32mI') logging.addLevelName(logging.WARNING, '\033[1;33mW')
[ "def", "setup_logging", "(", ")", ":", "logging", ".", "basicConfig", "(", "format", "=", "(", "\"[%(levelname)s\\033[0m] \"", "\"\\033[1;31m%(module)s\\033[0m: \"", "\"%(message)s\"", ")", ",", "level", "=", "logging", ".", "INFO", ",", "stream", "=", "sys", ".",...
Logging config.
[ "Logging", "config", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/util.py#L103-L112
train
219,269
dylanaraps/pywal
pywal/util.py
darken_color
def darken_color(color, amount): """Darken a hex color.""" color = [int(col * (1 - amount)) for col in hex_to_rgb(color)] return rgb_to_hex(color)
python
def darken_color(color, amount): """Darken a hex color.""" color = [int(col * (1 - amount)) for col in hex_to_rgb(color)] return rgb_to_hex(color)
[ "def", "darken_color", "(", "color", ",", "amount", ")", ":", "color", "=", "[", "int", "(", "col", "*", "(", "1", "-", "amount", ")", ")", "for", "col", "in", "hex_to_rgb", "(", "color", ")", "]", "return", "rgb_to_hex", "(", "color", ")" ]
Darken a hex color.
[ "Darken", "a", "hex", "color", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/util.py#L131-L134
train
219,270
dylanaraps/pywal
pywal/util.py
lighten_color
def lighten_color(color, amount): """Lighten a hex color.""" color = [int(col + (255 - col) * amount) for col in hex_to_rgb(color)] return rgb_to_hex(color)
python
def lighten_color(color, amount): """Lighten a hex color.""" color = [int(col + (255 - col) * amount) for col in hex_to_rgb(color)] return rgb_to_hex(color)
[ "def", "lighten_color", "(", "color", ",", "amount", ")", ":", "color", "=", "[", "int", "(", "col", "+", "(", "255", "-", "col", ")", "*", "amount", ")", "for", "col", "in", "hex_to_rgb", "(", "color", ")", "]", "return", "rgb_to_hex", "(", "color...
Lighten a hex color.
[ "Lighten", "a", "hex", "color", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/util.py#L137-L140
train
219,271
dylanaraps/pywal
pywal/util.py
blend_color
def blend_color(color, color2): """Blend two colors together.""" r1, g1, b1 = hex_to_rgb(color) r2, g2, b2 = hex_to_rgb(color2) r3 = int(0.5 * r1 + 0.5 * r2) g3 = int(0.5 * g1 + 0.5 * g2) b3 = int(0.5 * b1 + 0.5 * b2) return rgb_to_hex((r3, g3, b3))
python
def blend_color(color, color2): """Blend two colors together.""" r1, g1, b1 = hex_to_rgb(color) r2, g2, b2 = hex_to_rgb(color2) r3 = int(0.5 * r1 + 0.5 * r2) g3 = int(0.5 * g1 + 0.5 * g2) b3 = int(0.5 * b1 + 0.5 * b2) return rgb_to_hex((r3, g3, b3))
[ "def", "blend_color", "(", "color", ",", "color2", ")", ":", "r1", ",", "g1", ",", "b1", "=", "hex_to_rgb", "(", "color", ")", "r2", ",", "g2", ",", "b2", "=", "hex_to_rgb", "(", "color2", ")", "r3", "=", "int", "(", "0.5", "*", "r1", "+", "0.5...
Blend two colors together.
[ "Blend", "two", "colors", "together", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/util.py#L143-L152
train
219,272
dylanaraps/pywal
pywal/util.py
saturate_color
def saturate_color(color, amount): """Saturate a hex color.""" r, g, b = hex_to_rgb(color) r, g, b = [x/255.0 for x in (r, g, b)] h, l, s = colorsys.rgb_to_hls(r, g, b) s = amount r, g, b = colorsys.hls_to_rgb(h, l, s) r, g, b = [x*255.0 for x in (r, g, b)] return rgb_to_hex((int(r), int(g), int(b)))
python
def saturate_color(color, amount): """Saturate a hex color.""" r, g, b = hex_to_rgb(color) r, g, b = [x/255.0 for x in (r, g, b)] h, l, s = colorsys.rgb_to_hls(r, g, b) s = amount r, g, b = colorsys.hls_to_rgb(h, l, s) r, g, b = [x*255.0 for x in (r, g, b)] return rgb_to_hex((int(r), int(g), int(b)))
[ "def", "saturate_color", "(", "color", ",", "amount", ")", ":", "r", ",", "g", ",", "b", "=", "hex_to_rgb", "(", "color", ")", "r", ",", "g", ",", "b", "=", "[", "x", "/", "255.0", "for", "x", "in", "(", "r", ",", "g", ",", "b", ")", "]", ...
Saturate a hex color.
[ "Saturate", "a", "hex", "color", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/util.py#L155-L164
train
219,273
dylanaraps/pywal
pywal/util.py
disown
def disown(cmd): """Call a system command in the background, disown it and hide it's output.""" subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
python
def disown(cmd): """Call a system command in the background, disown it and hide it's output.""" subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
[ "def", "disown", "(", "cmd", ")", ":", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "DEVNULL", ",", "stderr", "=", "subprocess", ".", "DEVNULL", ")" ]
Call a system command in the background, disown it and hide it's output.
[ "Call", "a", "system", "command", "in", "the", "background", "disown", "it", "and", "hide", "it", "s", "output", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/util.py#L172-L177
train
219,274
dylanaraps/pywal
pywal/util.py
get_pid
def get_pid(name): """Check if process is running by name.""" if not shutil.which("pidof"): return False try: subprocess.check_output(["pidof", "-s", name]) except subprocess.CalledProcessError: return False return True
python
def get_pid(name): """Check if process is running by name.""" if not shutil.which("pidof"): return False try: subprocess.check_output(["pidof", "-s", name]) except subprocess.CalledProcessError: return False return True
[ "def", "get_pid", "(", "name", ")", ":", "if", "not", "shutil", ".", "which", "(", "\"pidof\"", ")", ":", "return", "False", "try", ":", "subprocess", ".", "check_output", "(", "[", "\"pidof\"", ",", "\"-s\"", ",", "name", "]", ")", "except", "subproce...
Check if process is running by name.
[ "Check", "if", "process", "is", "running", "by", "name", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/util.py#L180-L190
train
219,275
dylanaraps/pywal
pywal/image.py
get_image_dir
def get_image_dir(img_dir): """Get all images in a directory.""" current_wall = wallpaper.get() current_wall = os.path.basename(current_wall) file_types = (".png", ".jpg", ".jpeg", ".jpe", ".gif") return [img.name for img in os.scandir(img_dir) if img.name.lower().endswith(file_types)], current_wall
python
def get_image_dir(img_dir): """Get all images in a directory.""" current_wall = wallpaper.get() current_wall = os.path.basename(current_wall) file_types = (".png", ".jpg", ".jpeg", ".jpe", ".gif") return [img.name for img in os.scandir(img_dir) if img.name.lower().endswith(file_types)], current_wall
[ "def", "get_image_dir", "(", "img_dir", ")", ":", "current_wall", "=", "wallpaper", ".", "get", "(", ")", "current_wall", "=", "os", ".", "path", ".", "basename", "(", "current_wall", ")", "file_types", "=", "(", "\".png\"", ",", "\".jpg\"", ",", "\".jpeg\...
Get all images in a directory.
[ "Get", "all", "images", "in", "a", "directory", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/image.py#L15-L23
train
219,276
dylanaraps/pywal
pywal/image.py
get_random_image
def get_random_image(img_dir): """Pick a random image file from a directory.""" images, current_wall = get_image_dir(img_dir) if len(images) > 2 and current_wall in images: images.remove(current_wall) elif not images: logging.error("No images found in directory.") sys.exit(1) random.shuffle(images) return os.path.join(img_dir, images[0])
python
def get_random_image(img_dir): """Pick a random image file from a directory.""" images, current_wall = get_image_dir(img_dir) if len(images) > 2 and current_wall in images: images.remove(current_wall) elif not images: logging.error("No images found in directory.") sys.exit(1) random.shuffle(images) return os.path.join(img_dir, images[0])
[ "def", "get_random_image", "(", "img_dir", ")", ":", "images", ",", "current_wall", "=", "get_image_dir", "(", "img_dir", ")", "if", "len", "(", "images", ")", ">", "2", "and", "current_wall", "in", "images", ":", "images", ".", "remove", "(", "current_wal...
Pick a random image file from a directory.
[ "Pick", "a", "random", "image", "file", "from", "a", "directory", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/image.py#L26-L38
train
219,277
dylanaraps/pywal
pywal/image.py
get_next_image
def get_next_image(img_dir): """Get the next image in a dir.""" images, current_wall = get_image_dir(img_dir) images.sort(key=lambda img: [int(x) if x.isdigit() else x for x in re.split('([0-9]+)', img)]) try: next_index = images.index(current_wall) + 1 except ValueError: next_index = 0 try: image = images[next_index] except IndexError: image = images[0] return os.path.join(img_dir, image)
python
def get_next_image(img_dir): """Get the next image in a dir.""" images, current_wall = get_image_dir(img_dir) images.sort(key=lambda img: [int(x) if x.isdigit() else x for x in re.split('([0-9]+)', img)]) try: next_index = images.index(current_wall) + 1 except ValueError: next_index = 0 try: image = images[next_index] except IndexError: image = images[0] return os.path.join(img_dir, image)
[ "def", "get_next_image", "(", "img_dir", ")", ":", "images", ",", "current_wall", "=", "get_image_dir", "(", "img_dir", ")", "images", ".", "sort", "(", "key", "=", "lambda", "img", ":", "[", "int", "(", "x", ")", "if", "x", ".", "isdigit", "(", ")",...
Get the next image in a dir.
[ "Get", "the", "next", "image", "in", "a", "dir", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/image.py#L41-L59
train
219,278
dylanaraps/pywal
pywal/image.py
get
def get(img, cache_dir=CACHE_DIR, iterative=False): """Validate image input.""" if os.path.isfile(img): wal_img = img elif os.path.isdir(img): if iterative: wal_img = get_next_image(img) else: wal_img = get_random_image(img) else: logging.error("No valid image file found.") sys.exit(1) wal_img = os.path.abspath(wal_img) # Cache the image file path. util.save_file(wal_img, os.path.join(cache_dir, "wal")) logging.info("Using image \033[1;37m%s\033[0m.", os.path.basename(wal_img)) return wal_img
python
def get(img, cache_dir=CACHE_DIR, iterative=False): """Validate image input.""" if os.path.isfile(img): wal_img = img elif os.path.isdir(img): if iterative: wal_img = get_next_image(img) else: wal_img = get_random_image(img) else: logging.error("No valid image file found.") sys.exit(1) wal_img = os.path.abspath(wal_img) # Cache the image file path. util.save_file(wal_img, os.path.join(cache_dir, "wal")) logging.info("Using image \033[1;37m%s\033[0m.", os.path.basename(wal_img)) return wal_img
[ "def", "get", "(", "img", ",", "cache_dir", "=", "CACHE_DIR", ",", "iterative", "=", "False", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "img", ")", ":", "wal_img", "=", "img", "elif", "os", ".", "path", ".", "isdir", "(", "img", ")",...
Validate image input.
[ "Validate", "image", "input", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/image.py#L62-L84
train
219,279
dylanaraps/pywal
pywal/scripts/gtk_reload.py
gtk_reload
def gtk_reload(): """Reload GTK2 themes.""" events = gtk.gdk.Event(gtk.gdk.CLIENT_EVENT) data = gtk.gdk.atom_intern("_GTK_READ_RCFILES", False) events.data_format = 8 events.send_event = True events.message_type = data events.send_clientmessage_toall()
python
def gtk_reload(): """Reload GTK2 themes.""" events = gtk.gdk.Event(gtk.gdk.CLIENT_EVENT) data = gtk.gdk.atom_intern("_GTK_READ_RCFILES", False) events.data_format = 8 events.send_event = True events.message_type = data events.send_clientmessage_toall()
[ "def", "gtk_reload", "(", ")", ":", "events", "=", "gtk", ".", "gdk", ".", "Event", "(", "gtk", ".", "gdk", ".", "CLIENT_EVENT", ")", "data", "=", "gtk", ".", "gdk", ".", "atom_intern", "(", "\"_GTK_READ_RCFILES\"", ",", "False", ")", "events", ".", ...
Reload GTK2 themes.
[ "Reload", "GTK2", "themes", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/scripts/gtk_reload.py#L17-L24
train
219,280
dylanaraps/pywal
pywal/colors.py
list_backends
def list_backends(): """List color backends.""" return [b.name.replace(".py", "") for b in os.scandir(os.path.join(MODULE_DIR, "backends")) if "__" not in b.name]
python
def list_backends(): """List color backends.""" return [b.name.replace(".py", "") for b in os.scandir(os.path.join(MODULE_DIR, "backends")) if "__" not in b.name]
[ "def", "list_backends", "(", ")", ":", "return", "[", "b", ".", "name", ".", "replace", "(", "\".py\"", ",", "\"\"", ")", "for", "b", "in", "os", ".", "scandir", "(", "os", ".", "path", ".", "join", "(", "MODULE_DIR", ",", "\"backends\"", ")", ")",...
List color backends.
[ "List", "color", "backends", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/colors.py#L15-L19
train
219,281
dylanaraps/pywal
pywal/colors.py
colors_to_dict
def colors_to_dict(colors, img): """Convert list of colors to pywal format.""" return { "wallpaper": img, "alpha": util.Color.alpha_num, "special": { "background": colors[0], "foreground": colors[15], "cursor": colors[15] }, "colors": { "color0": colors[0], "color1": colors[1], "color2": colors[2], "color3": colors[3], "color4": colors[4], "color5": colors[5], "color6": colors[6], "color7": colors[7], "color8": colors[8], "color9": colors[9], "color10": colors[10], "color11": colors[11], "color12": colors[12], "color13": colors[13], "color14": colors[14], "color15": colors[15] } }
python
def colors_to_dict(colors, img): """Convert list of colors to pywal format.""" return { "wallpaper": img, "alpha": util.Color.alpha_num, "special": { "background": colors[0], "foreground": colors[15], "cursor": colors[15] }, "colors": { "color0": colors[0], "color1": colors[1], "color2": colors[2], "color3": colors[3], "color4": colors[4], "color5": colors[5], "color6": colors[6], "color7": colors[7], "color8": colors[8], "color9": colors[9], "color10": colors[10], "color11": colors[11], "color12": colors[12], "color13": colors[13], "color14": colors[14], "color15": colors[15] } }
[ "def", "colors_to_dict", "(", "colors", ",", "img", ")", ":", "return", "{", "\"wallpaper\"", ":", "img", ",", "\"alpha\"", ":", "util", ".", "Color", ".", "alpha_num", ",", "\"special\"", ":", "{", "\"background\"", ":", "colors", "[", "0", "]", ",", ...
Convert list of colors to pywal format.
[ "Convert", "list", "of", "colors", "to", "pywal", "format", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/colors.py#L22-L52
train
219,282
dylanaraps/pywal
pywal/colors.py
generic_adjust
def generic_adjust(colors, light): """Generic color adjustment for themers.""" if light: for color in colors: color = util.saturate_color(color, 0.60) color = util.darken_color(color, 0.5) colors[0] = util.lighten_color(colors[0], 0.95) colors[7] = util.darken_color(colors[0], 0.75) colors[8] = util.darken_color(colors[0], 0.25) colors[15] = colors[7] else: colors[0] = util.darken_color(colors[0], 0.80) colors[7] = util.lighten_color(colors[0], 0.75) colors[8] = util.lighten_color(colors[0], 0.25) colors[15] = colors[7] return colors
python
def generic_adjust(colors, light): """Generic color adjustment for themers.""" if light: for color in colors: color = util.saturate_color(color, 0.60) color = util.darken_color(color, 0.5) colors[0] = util.lighten_color(colors[0], 0.95) colors[7] = util.darken_color(colors[0], 0.75) colors[8] = util.darken_color(colors[0], 0.25) colors[15] = colors[7] else: colors[0] = util.darken_color(colors[0], 0.80) colors[7] = util.lighten_color(colors[0], 0.75) colors[8] = util.lighten_color(colors[0], 0.25) colors[15] = colors[7] return colors
[ "def", "generic_adjust", "(", "colors", ",", "light", ")", ":", "if", "light", ":", "for", "color", "in", "colors", ":", "color", "=", "util", ".", "saturate_color", "(", "color", ",", "0.60", ")", "color", "=", "util", ".", "darken_color", "(", "color...
Generic color adjustment for themers.
[ "Generic", "color", "adjustment", "for", "themers", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/colors.py#L55-L73
train
219,283
dylanaraps/pywal
pywal/colors.py
saturate_colors
def saturate_colors(colors, amount): """Saturate all colors.""" if amount and float(amount) <= 1.0: for i, _ in enumerate(colors): if i not in [0, 7, 8, 15]: colors[i] = util.saturate_color(colors[i], float(amount)) return colors
python
def saturate_colors(colors, amount): """Saturate all colors.""" if amount and float(amount) <= 1.0: for i, _ in enumerate(colors): if i not in [0, 7, 8, 15]: colors[i] = util.saturate_color(colors[i], float(amount)) return colors
[ "def", "saturate_colors", "(", "colors", ",", "amount", ")", ":", "if", "amount", "and", "float", "(", "amount", ")", "<=", "1.0", ":", "for", "i", ",", "_", "in", "enumerate", "(", "colors", ")", ":", "if", "i", "not", "in", "[", "0", ",", "7", ...
Saturate all colors.
[ "Saturate", "all", "colors", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/colors.py#L76-L83
train
219,284
dylanaraps/pywal
pywal/colors.py
get_backend
def get_backend(backend): """Figure out which backend to use.""" if backend == "random": backends = list_backends() random.shuffle(backends) return backends[0] return backend
python
def get_backend(backend): """Figure out which backend to use.""" if backend == "random": backends = list_backends() random.shuffle(backends) return backends[0] return backend
[ "def", "get_backend", "(", "backend", ")", ":", "if", "backend", "==", "\"random\"", ":", "backends", "=", "list_backends", "(", ")", "random", ".", "shuffle", "(", "backends", ")", "return", "backends", "[", "0", "]", "return", "backend" ]
Figure out which backend to use.
[ "Figure", "out", "which", "backend", "to", "use", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/colors.py#L97-L104
train
219,285
dylanaraps/pywal
pywal/colors.py
palette
def palette(): """Generate a palette from the colors.""" for i in range(0, 16): if i % 8 == 0: print() if i > 7: i = "8;5;%s" % i print("\033[4%sm%s\033[0m" % (i, " " * (80 // 20)), end="") print("\n")
python
def palette(): """Generate a palette from the colors.""" for i in range(0, 16): if i % 8 == 0: print() if i > 7: i = "8;5;%s" % i print("\033[4%sm%s\033[0m" % (i, " " * (80 // 20)), end="") print("\n")
[ "def", "palette", "(", ")", ":", "for", "i", "in", "range", "(", "0", ",", "16", ")", ":", "if", "i", "%", "8", "==", "0", ":", "print", "(", ")", "if", "i", ">", "7", ":", "i", "=", "\"8;5;%s\"", "%", "i", "print", "(", "\"\\033[4%sm%s\\033[...
Generate a palette from the colors.
[ "Generate", "a", "palette", "from", "the", "colors", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/colors.py#L107-L118
train
219,286
dylanaraps/pywal
pywal/backends/colorthief.py
gen_colors
def gen_colors(img): """Loop until 16 colors are generated.""" color_cmd = ColorThief(img).get_palette for i in range(0, 10, 1): raw_colors = color_cmd(color_count=8 + i) if len(raw_colors) >= 8: break elif i == 10: logging.error("ColorThief couldn't generate a suitable palette.") sys.exit(1) else: logging.warning("ColorThief couldn't generate a palette.") logging.warning("Trying a larger palette size %s", 8 + i) return [util.rgb_to_hex(color) for color in raw_colors]
python
def gen_colors(img): """Loop until 16 colors are generated.""" color_cmd = ColorThief(img).get_palette for i in range(0, 10, 1): raw_colors = color_cmd(color_count=8 + i) if len(raw_colors) >= 8: break elif i == 10: logging.error("ColorThief couldn't generate a suitable palette.") sys.exit(1) else: logging.warning("ColorThief couldn't generate a palette.") logging.warning("Trying a larger palette size %s", 8 + i) return [util.rgb_to_hex(color) for color in raw_colors]
[ "def", "gen_colors", "(", "img", ")", ":", "color_cmd", "=", "ColorThief", "(", "img", ")", ".", "get_palette", "for", "i", "in", "range", "(", "0", ",", "10", ",", "1", ")", ":", "raw_colors", "=", "color_cmd", "(", "color_count", "=", "8", "+", "...
Loop until 16 colors are generated.
[ "Loop", "until", "16", "colors", "are", "generated", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/backends/colorthief.py#L18-L36
train
219,287
dylanaraps/pywal
pywal/theme.py
list_out
def list_out(): """List all themes in a pretty format.""" dark_themes = [theme.name.replace(".json", "") for theme in list_themes()] ligh_themes = [theme.name.replace(".json", "") for theme in list_themes(dark=False)] user_themes = [theme.name.replace(".json", "") for theme in list_themes_user()] if user_themes: print("\033[1;32mUser Themes\033[0m:") print(" -", "\n - ".join(sorted(user_themes))) print("\033[1;32mDark Themes\033[0m:") print(" -", "\n - ".join(sorted(dark_themes))) print("\033[1;32mLight Themes\033[0m:") print(" -", "\n - ".join(sorted(ligh_themes))) print("\033[1;32mExtra\033[0m:") print(" - random (select a random dark theme)") print(" - random_dark (select a random dark theme)") print(" - random_light (select a random light theme)")
python
def list_out(): """List all themes in a pretty format.""" dark_themes = [theme.name.replace(".json", "") for theme in list_themes()] ligh_themes = [theme.name.replace(".json", "") for theme in list_themes(dark=False)] user_themes = [theme.name.replace(".json", "") for theme in list_themes_user()] if user_themes: print("\033[1;32mUser Themes\033[0m:") print(" -", "\n - ".join(sorted(user_themes))) print("\033[1;32mDark Themes\033[0m:") print(" -", "\n - ".join(sorted(dark_themes))) print("\033[1;32mLight Themes\033[0m:") print(" -", "\n - ".join(sorted(ligh_themes))) print("\033[1;32mExtra\033[0m:") print(" - random (select a random dark theme)") print(" - random_dark (select a random dark theme)") print(" - random_light (select a random light theme)")
[ "def", "list_out", "(", ")", ":", "dark_themes", "=", "[", "theme", ".", "name", ".", "replace", "(", "\".json\"", ",", "\"\"", ")", "for", "theme", "in", "list_themes", "(", ")", "]", "ligh_themes", "=", "[", "theme", ".", "name", ".", "replace", "(...
List all themes in a pretty format.
[ "List", "all", "themes", "in", "a", "pretty", "format", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/theme.py#L13-L35
train
219,288
dylanaraps/pywal
pywal/theme.py
list_themes
def list_themes(dark=True): """List all installed theme files.""" dark = "dark" if dark else "light" themes = os.scandir(os.path.join(MODULE_DIR, "colorschemes", dark)) return [t for t in themes if os.path.isfile(t.path)]
python
def list_themes(dark=True): """List all installed theme files.""" dark = "dark" if dark else "light" themes = os.scandir(os.path.join(MODULE_DIR, "colorschemes", dark)) return [t for t in themes if os.path.isfile(t.path)]
[ "def", "list_themes", "(", "dark", "=", "True", ")", ":", "dark", "=", "\"dark\"", "if", "dark", "else", "\"light\"", "themes", "=", "os", ".", "scandir", "(", "os", ".", "path", ".", "join", "(", "MODULE_DIR", ",", "\"colorschemes\"", ",", "dark", ")"...
List all installed theme files.
[ "List", "all", "installed", "theme", "files", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/theme.py#L38-L42
train
219,289
dylanaraps/pywal
pywal/theme.py
list_themes_user
def list_themes_user(): """List user theme files.""" themes = [*os.scandir(os.path.join(CONF_DIR, "colorschemes/dark/")), *os.scandir(os.path.join(CONF_DIR, "colorschemes/light/"))] return [t for t in themes if os.path.isfile(t.path)]
python
def list_themes_user(): """List user theme files.""" themes = [*os.scandir(os.path.join(CONF_DIR, "colorschemes/dark/")), *os.scandir(os.path.join(CONF_DIR, "colorschemes/light/"))] return [t for t in themes if os.path.isfile(t.path)]
[ "def", "list_themes_user", "(", ")", ":", "themes", "=", "[", "*", "os", ".", "scandir", "(", "os", ".", "path", ".", "join", "(", "CONF_DIR", ",", "\"colorschemes/dark/\"", ")", ")", ",", "*", "os", ".", "scandir", "(", "os", ".", "path", ".", "jo...
List user theme files.
[ "List", "user", "theme", "files", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/theme.py#L45-L49
train
219,290
dylanaraps/pywal
pywal/theme.py
terminal_sexy_to_wal
def terminal_sexy_to_wal(data): """Convert terminal.sexy json schema to wal.""" data["colors"] = {} data["special"] = { "foreground": data["foreground"], "background": data["background"], "cursor": data["color"][9] } for i, color in enumerate(data["color"]): data["colors"]["color%s" % i] = color return data
python
def terminal_sexy_to_wal(data): """Convert terminal.sexy json schema to wal.""" data["colors"] = {} data["special"] = { "foreground": data["foreground"], "background": data["background"], "cursor": data["color"][9] } for i, color in enumerate(data["color"]): data["colors"]["color%s" % i] = color return data
[ "def", "terminal_sexy_to_wal", "(", "data", ")", ":", "data", "[", "\"colors\"", "]", "=", "{", "}", "data", "[", "\"special\"", "]", "=", "{", "\"foreground\"", ":", "data", "[", "\"foreground\"", "]", ",", "\"background\"", ":", "data", "[", "\"backgroun...
Convert terminal.sexy json schema to wal.
[ "Convert", "terminal", ".", "sexy", "json", "schema", "to", "wal", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/theme.py#L52-L64
train
219,291
dylanaraps/pywal
pywal/theme.py
parse
def parse(theme_file): """Parse the theme file.""" data = util.read_file_json(theme_file) if "wallpaper" not in data: data["wallpaper"] = "None" if "alpha" not in data: data["alpha"] = util.Color.alpha_num # Terminal.sexy format. if "color" in data: data = terminal_sexy_to_wal(data) return data
python
def parse(theme_file): """Parse the theme file.""" data = util.read_file_json(theme_file) if "wallpaper" not in data: data["wallpaper"] = "None" if "alpha" not in data: data["alpha"] = util.Color.alpha_num # Terminal.sexy format. if "color" in data: data = terminal_sexy_to_wal(data) return data
[ "def", "parse", "(", "theme_file", ")", ":", "data", "=", "util", ".", "read_file_json", "(", "theme_file", ")", "if", "\"wallpaper\"", "not", "in", "data", ":", "data", "[", "\"wallpaper\"", "]", "=", "\"None\"", "if", "\"alpha\"", "not", "in", "data", ...
Parse the theme file.
[ "Parse", "the", "theme", "file", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/theme.py#L67-L81
train
219,292
dylanaraps/pywal
pywal/theme.py
get_random_theme
def get_random_theme(dark=True): """Get a random theme file.""" themes = [theme.path for theme in list_themes(dark)] random.shuffle(themes) return themes[0]
python
def get_random_theme(dark=True): """Get a random theme file.""" themes = [theme.path for theme in list_themes(dark)] random.shuffle(themes) return themes[0]
[ "def", "get_random_theme", "(", "dark", "=", "True", ")", ":", "themes", "=", "[", "theme", ".", "path", "for", "theme", "in", "list_themes", "(", "dark", ")", "]", "random", ".", "shuffle", "(", "themes", ")", "return", "themes", "[", "0", "]" ]
Get a random theme file.
[ "Get", "a", "random", "theme", "file", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/theme.py#L84-L88
train
219,293
dylanaraps/pywal
pywal/theme.py
file
def file(input_file, light=False): """Import colorscheme from json file.""" util.create_dir(os.path.join(CONF_DIR, "colorschemes/light/")) util.create_dir(os.path.join(CONF_DIR, "colorschemes/dark/")) theme_name = ".".join((input_file, "json")) bri = "light" if light else "dark" user_theme_file = os.path.join(CONF_DIR, "colorschemes", bri, theme_name) theme_file = os.path.join(MODULE_DIR, "colorschemes", bri, theme_name) # Find the theme file. if input_file in ("random", "random_dark"): theme_file = get_random_theme() elif input_file == "random_light": theme_file = get_random_theme(light) elif os.path.isfile(user_theme_file): theme_file = user_theme_file elif os.path.isfile(input_file): theme_file = input_file # Parse the theme file. if os.path.isfile(theme_file): logging.info("Set theme to \033[1;37m%s\033[0m.", os.path.basename(theme_file)) return parse(theme_file) logging.error("No %s colorscheme file found.", bri) logging.error("Try adding '-l' to set light themes.") logging.error("Try removing '-l' to set dark themes.") sys.exit(1)
python
def file(input_file, light=False): """Import colorscheme from json file.""" util.create_dir(os.path.join(CONF_DIR, "colorschemes/light/")) util.create_dir(os.path.join(CONF_DIR, "colorschemes/dark/")) theme_name = ".".join((input_file, "json")) bri = "light" if light else "dark" user_theme_file = os.path.join(CONF_DIR, "colorschemes", bri, theme_name) theme_file = os.path.join(MODULE_DIR, "colorschemes", bri, theme_name) # Find the theme file. if input_file in ("random", "random_dark"): theme_file = get_random_theme() elif input_file == "random_light": theme_file = get_random_theme(light) elif os.path.isfile(user_theme_file): theme_file = user_theme_file elif os.path.isfile(input_file): theme_file = input_file # Parse the theme file. if os.path.isfile(theme_file): logging.info("Set theme to \033[1;37m%s\033[0m.", os.path.basename(theme_file)) return parse(theme_file) logging.error("No %s colorscheme file found.", bri) logging.error("Try adding '-l' to set light themes.") logging.error("Try removing '-l' to set dark themes.") sys.exit(1)
[ "def", "file", "(", "input_file", ",", "light", "=", "False", ")", ":", "util", ".", "create_dir", "(", "os", ".", "path", ".", "join", "(", "CONF_DIR", ",", "\"colorschemes/light/\"", ")", ")", "util", ".", "create_dir", "(", "os", ".", "path", ".", ...
Import colorscheme from json file.
[ "Import", "colorscheme", "from", "json", "file", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/theme.py#L91-L124
train
219,294
dylanaraps/pywal
pywal/backends/wal.py
imagemagick
def imagemagick(color_count, img, magick_command): """Call Imagemagick to generate a scheme.""" flags = ["-resize", "25%", "-colors", str(color_count), "-unique-colors", "txt:-"] img += "[0]" return subprocess.check_output([*magick_command, img, *flags]).splitlines()
python
def imagemagick(color_count, img, magick_command): """Call Imagemagick to generate a scheme.""" flags = ["-resize", "25%", "-colors", str(color_count), "-unique-colors", "txt:-"] img += "[0]" return subprocess.check_output([*magick_command, img, *flags]).splitlines()
[ "def", "imagemagick", "(", "color_count", ",", "img", ",", "magick_command", ")", ":", "flags", "=", "[", "\"-resize\"", ",", "\"25%\"", ",", "\"-colors\"", ",", "str", "(", "color_count", ")", ",", "\"-unique-colors\"", ",", "\"txt:-\"", "]", "img", "+=", ...
Call Imagemagick to generate a scheme.
[ "Call", "Imagemagick", "to", "generate", "a", "scheme", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/backends/wal.py#L13-L19
train
219,295
dylanaraps/pywal
pywal/backends/wal.py
has_im
def has_im(): """Check to see if the user has im installed.""" if shutil.which("magick"): return ["magick", "convert"] if shutil.which("convert"): return ["convert"] logging.error("Imagemagick wasn't found on your system.") logging.error("Try another backend. (wal --backend)") sys.exit(1)
python
def has_im(): """Check to see if the user has im installed.""" if shutil.which("magick"): return ["magick", "convert"] if shutil.which("convert"): return ["convert"] logging.error("Imagemagick wasn't found on your system.") logging.error("Try another backend. (wal --backend)") sys.exit(1)
[ "def", "has_im", "(", ")", ":", "if", "shutil", ".", "which", "(", "\"magick\"", ")", ":", "return", "[", "\"magick\"", ",", "\"convert\"", "]", "if", "shutil", ".", "which", "(", "\"convert\"", ")", ":", "return", "[", "\"convert\"", "]", "logging", "...
Check to see if the user has im installed.
[ "Check", "to", "see", "if", "the", "user", "has", "im", "installed", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/backends/wal.py#L22-L32
train
219,296
dylanaraps/pywal
pywal/backends/wal.py
gen_colors
def gen_colors(img): """Format the output from imagemagick into a list of hex colors.""" magick_command = has_im() for i in range(0, 20, 1): raw_colors = imagemagick(16 + i, img, magick_command) if len(raw_colors) > 16: break elif i == 19: logging.error("Imagemagick couldn't generate a suitable palette.") sys.exit(1) else: logging.warning("Imagemagick couldn't generate a palette.") logging.warning("Trying a larger palette size %s", 16 + i) return [re.search("#.{6}", str(col)).group(0) for col in raw_colors[1:]]
python
def gen_colors(img): """Format the output from imagemagick into a list of hex colors.""" magick_command = has_im() for i in range(0, 20, 1): raw_colors = imagemagick(16 + i, img, magick_command) if len(raw_colors) > 16: break elif i == 19: logging.error("Imagemagick couldn't generate a suitable palette.") sys.exit(1) else: logging.warning("Imagemagick couldn't generate a palette.") logging.warning("Trying a larger palette size %s", 16 + i) return [re.search("#.{6}", str(col)).group(0) for col in raw_colors[1:]]
[ "def", "gen_colors", "(", "img", ")", ":", "magick_command", "=", "has_im", "(", ")", "for", "i", "in", "range", "(", "0", ",", "20", ",", "1", ")", ":", "raw_colors", "=", "imagemagick", "(", "16", "+", "i", ",", "img", ",", "magick_command", ")",...
Format the output from imagemagick into a list of hex colors.
[ "Format", "the", "output", "from", "imagemagick", "into", "a", "list", "of", "hex", "colors", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/backends/wal.py#L35-L54
train
219,297
dylanaraps/pywal
pywal/backends/wal.py
adjust
def adjust(colors, light): """Adjust the generated colors and store them in a dict that we will later save in json format.""" raw_colors = colors[:1] + colors[8:16] + colors[8:-1] # Manually adjust colors. if light: for color in raw_colors: color = util.saturate_color(color, 0.5) raw_colors[0] = util.lighten_color(colors[-1], 0.85) raw_colors[7] = colors[0] raw_colors[8] = util.darken_color(colors[-1], 0.4) raw_colors[15] = colors[0] else: # Darken the background color slightly. if raw_colors[0][1] != "0": raw_colors[0] = util.darken_color(raw_colors[0], 0.40) raw_colors[7] = util.blend_color(raw_colors[7], "#EEEEEE") raw_colors[8] = util.darken_color(raw_colors[7], 0.30) raw_colors[15] = util.blend_color(raw_colors[15], "#EEEEEE") return raw_colors
python
def adjust(colors, light): """Adjust the generated colors and store them in a dict that we will later save in json format.""" raw_colors = colors[:1] + colors[8:16] + colors[8:-1] # Manually adjust colors. if light: for color in raw_colors: color = util.saturate_color(color, 0.5) raw_colors[0] = util.lighten_color(colors[-1], 0.85) raw_colors[7] = colors[0] raw_colors[8] = util.darken_color(colors[-1], 0.4) raw_colors[15] = colors[0] else: # Darken the background color slightly. if raw_colors[0][1] != "0": raw_colors[0] = util.darken_color(raw_colors[0], 0.40) raw_colors[7] = util.blend_color(raw_colors[7], "#EEEEEE") raw_colors[8] = util.darken_color(raw_colors[7], 0.30) raw_colors[15] = util.blend_color(raw_colors[15], "#EEEEEE") return raw_colors
[ "def", "adjust", "(", "colors", ",", "light", ")", ":", "raw_colors", "=", "colors", "[", ":", "1", "]", "+", "colors", "[", "8", ":", "16", "]", "+", "colors", "[", "8", ":", "-", "1", "]", "# Manually adjust colors.", "if", "light", ":", "for", ...
Adjust the generated colors and store them in a dict that we will later save in json format.
[ "Adjust", "the", "generated", "colors", "and", "store", "them", "in", "a", "dict", "that", "we", "will", "later", "save", "in", "json", "format", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/backends/wal.py#L57-L81
train
219,298
dylanaraps/pywal
pywal/__main__.py
parse_args_exit
def parse_args_exit(parser): """Process args that exit.""" args = parser.parse_args() if len(sys.argv) <= 1: parser.print_help() sys.exit(1) if args.v: parser.exit(0, "wal %s\n" % __version__) if args.preview: print("Current colorscheme:", sep='') colors.palette() sys.exit(0) if args.i and args.theme: parser.error("Conflicting arguments -i and -f.") if args.r: reload.colors() sys.exit(0) if args.c: scheme_dir = os.path.join(CACHE_DIR, "schemes") shutil.rmtree(scheme_dir, ignore_errors=True) sys.exit(0) if not args.i and \ not args.theme and \ not args.R and \ not args.backend: parser.error("No input specified.\n" "--backend, --theme, -i or -R are required.") if args.theme == "list_themes": theme.list_out() sys.exit(0) if args.backend == "list_backends": print("\n - ".join(["\033[1;32mBackends\033[0m:", *colors.list_backends()])) sys.exit(0)
python
def parse_args_exit(parser): """Process args that exit.""" args = parser.parse_args() if len(sys.argv) <= 1: parser.print_help() sys.exit(1) if args.v: parser.exit(0, "wal %s\n" % __version__) if args.preview: print("Current colorscheme:", sep='') colors.palette() sys.exit(0) if args.i and args.theme: parser.error("Conflicting arguments -i and -f.") if args.r: reload.colors() sys.exit(0) if args.c: scheme_dir = os.path.join(CACHE_DIR, "schemes") shutil.rmtree(scheme_dir, ignore_errors=True) sys.exit(0) if not args.i and \ not args.theme and \ not args.R and \ not args.backend: parser.error("No input specified.\n" "--backend, --theme, -i or -R are required.") if args.theme == "list_themes": theme.list_out() sys.exit(0) if args.backend == "list_backends": print("\n - ".join(["\033[1;32mBackends\033[0m:", *colors.list_backends()])) sys.exit(0)
[ "def", "parse_args_exit", "(", "parser", ")", ":", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "len", "(", "sys", ".", "argv", ")", "<=", "1", ":", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "1", ")", "if", "arg...
Process args that exit.
[ "Process", "args", "that", "exit", "." ]
c823e3c9dbd0100ca09caf824e77d296685a1c1e
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/__main__.py#L105-L147
train
219,299