repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
classifyplot_from_valfile
def classifyplot_from_valfile(val_file, outtype="png", title=None, size=None, samples=None, callers=None): """Create a plot from a summarized validation file. Does new-style plotting of summarized metrics of false negative rate and false discovery rate. https://en.wikipedia.org/wiki/Sensitivity_and_specificity """ mpl.use('Agg', force=True) df = pd.read_csv(val_file) grouped = df.groupby(["sample", "caller", "vtype"]) df = grouped.apply(_calculate_fnr_fdr) df = df.reset_index() if len(df) == 0: return [] else: out_file = "%s.%s" % (os.path.splitext(val_file)[0], outtype) _do_classifyplot(df, out_file, title, size, samples, callers) return [out_file]
python
def classifyplot_from_valfile(val_file, outtype="png", title=None, size=None, samples=None, callers=None): """Create a plot from a summarized validation file. Does new-style plotting of summarized metrics of false negative rate and false discovery rate. https://en.wikipedia.org/wiki/Sensitivity_and_specificity """ mpl.use('Agg', force=True) df = pd.read_csv(val_file) grouped = df.groupby(["sample", "caller", "vtype"]) df = grouped.apply(_calculate_fnr_fdr) df = df.reset_index() if len(df) == 0: return [] else: out_file = "%s.%s" % (os.path.splitext(val_file)[0], outtype) _do_classifyplot(df, out_file, title, size, samples, callers) return [out_file]
[ "def", "classifyplot_from_valfile", "(", "val_file", ",", "outtype", "=", "\"png\"", ",", "title", "=", "None", ",", "size", "=", "None", ",", "samples", "=", "None", ",", "callers", "=", "None", ")", ":", "mpl", ".", "use", "(", "'Agg'", ",", "force",...
Create a plot from a summarized validation file. Does new-style plotting of summarized metrics of false negative rate and false discovery rate. https://en.wikipedia.org/wiki/Sensitivity_and_specificity
[ "Create", "a", "plot", "from", "a", "summarized", "validation", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L35-L53
train
218,000
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
create
def create(plot_data, header, ploti, sample_config, out_file_base, outtype="png", title=None, size=None): """Create plots of validation results for a sample, labeling prep strategies. """ if mpl is None or plt is None or sns is None: not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None]) logger.info("No validation plot. Missing imports: %s" % not_found) return None mpl.use('Agg', force=True) if header: df = pd.DataFrame(plot_data, columns=header) else: df = plot_data df["aligner"] = [get_aligner(x, sample_config) for x in df["sample"]] df["bamprep"] = [get_bamprep(x, sample_config) for x in df["sample"]] floors = get_group_floors(df, cat_labels) df["value.floor"] = [get_floor_value(x, cat, vartype, floors) for (x, cat, vartype) in zip(df["value"], df["category"], df["variant.type"])] out = [] for i, prep in enumerate(df["bamprep"].unique()): out.append(plot_prep_methods(df, prep, i + ploti, out_file_base, outtype, title, size)) return out
python
def create(plot_data, header, ploti, sample_config, out_file_base, outtype="png", title=None, size=None): """Create plots of validation results for a sample, labeling prep strategies. """ if mpl is None or plt is None or sns is None: not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None]) logger.info("No validation plot. Missing imports: %s" % not_found) return None mpl.use('Agg', force=True) if header: df = pd.DataFrame(plot_data, columns=header) else: df = plot_data df["aligner"] = [get_aligner(x, sample_config) for x in df["sample"]] df["bamprep"] = [get_bamprep(x, sample_config) for x in df["sample"]] floors = get_group_floors(df, cat_labels) df["value.floor"] = [get_floor_value(x, cat, vartype, floors) for (x, cat, vartype) in zip(df["value"], df["category"], df["variant.type"])] out = [] for i, prep in enumerate(df["bamprep"].unique()): out.append(plot_prep_methods(df, prep, i + ploti, out_file_base, outtype, title, size)) return out
[ "def", "create", "(", "plot_data", ",", "header", ",", "ploti", ",", "sample_config", ",", "out_file_base", ",", "outtype", "=", "\"png\"", ",", "title", "=", "None", ",", "size", "=", "None", ")", ":", "if", "mpl", "is", "None", "or", "plt", "is", "...
Create plots of validation results for a sample, labeling prep strategies.
[ "Create", "plots", "of", "validation", "results", "for", "a", "sample", "labeling", "prep", "strategies", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L145-L167
train
218,001
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
plot_prep_methods
def plot_prep_methods(df, prep, prepi, out_file_base, outtype, title=None, size=None): """Plot comparison between BAM preparation methods. """ samples = df[(df["bamprep"] == prep)]["sample"].unique() assert len(samples) >= 1, samples out_file = "%s-%s.%s" % (out_file_base, samples[0], outtype) df = df[df["category"].isin(cat_labels)] _seaborn(df, prep, prepi, out_file, title, size) return out_file
python
def plot_prep_methods(df, prep, prepi, out_file_base, outtype, title=None, size=None): """Plot comparison between BAM preparation methods. """ samples = df[(df["bamprep"] == prep)]["sample"].unique() assert len(samples) >= 1, samples out_file = "%s-%s.%s" % (out_file_base, samples[0], outtype) df = df[df["category"].isin(cat_labels)] _seaborn(df, prep, prepi, out_file, title, size) return out_file
[ "def", "plot_prep_methods", "(", "df", ",", "prep", ",", "prepi", ",", "out_file_base", ",", "outtype", ",", "title", "=", "None", ",", "size", "=", "None", ")", ":", "samples", "=", "df", "[", "(", "df", "[", "\"bamprep\"", "]", "==", "prep", ")", ...
Plot comparison between BAM preparation methods.
[ "Plot", "comparison", "between", "BAM", "preparation", "methods", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L178-L187
train
218,002
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
_seaborn
def _seaborn(df, prep, prepi, out_file, title=None, size=None): """Plot using seaborn wrapper around matplotlib. """ plt.ioff() sns.set(style='dark') vtypes = df["variant.type"].unique() callers = sorted(df["caller"].unique()) cats = _check_cats(["concordant", "discordant-missing-total", "discordant-extra-total", "discordant-shared-total"], vtypes, df, prep, callers) fig, axs = plt.subplots(len(vtypes), len(cats)) width = 0.8 for i, vtype in enumerate(vtypes): ax_row = axs[i] if len(vtypes) > 1 else axs for j, cat in enumerate(cats): vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers) if len(cats) == 1: assert j == 0 ax = ax_row else: ax = ax_row[j] if i == 0: ax.set_title(cat_labels[cat], size=14) ax.get_yaxis().set_ticks([]) if j == 0: ax.set_ylabel(vtype_labels[vtype], size=14) ax.bar(np.arange(len(callers)), vals, width=width) ax.set_ylim(0, maxval) if i == len(vtypes) - 1: ax.set_xticks(np.arange(len(callers)) + width / 2.0) ax.set_xticklabels([caller_labels.get(x, x).replace("__", "\n") if x else "" for x in callers], size=8, rotation=45) else: ax.get_xaxis().set_ticks([]) _annotate(ax, labels, vals, np.arange(len(callers)), width) fig.text(.5, .95, prep_labels.get(prep, "") if title is None else title, horizontalalignment='center', size=16) fig.subplots_adjust(left=0.05, right=0.95, top=0.87, bottom=0.15, wspace=0.1, hspace=0.1) x, y = (10, 5) if size is None else size fig.set_size_inches(x, y) fig.savefig(out_file)
python
def _seaborn(df, prep, prepi, out_file, title=None, size=None): """Plot using seaborn wrapper around matplotlib. """ plt.ioff() sns.set(style='dark') vtypes = df["variant.type"].unique() callers = sorted(df["caller"].unique()) cats = _check_cats(["concordant", "discordant-missing-total", "discordant-extra-total", "discordant-shared-total"], vtypes, df, prep, callers) fig, axs = plt.subplots(len(vtypes), len(cats)) width = 0.8 for i, vtype in enumerate(vtypes): ax_row = axs[i] if len(vtypes) > 1 else axs for j, cat in enumerate(cats): vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers) if len(cats) == 1: assert j == 0 ax = ax_row else: ax = ax_row[j] if i == 0: ax.set_title(cat_labels[cat], size=14) ax.get_yaxis().set_ticks([]) if j == 0: ax.set_ylabel(vtype_labels[vtype], size=14) ax.bar(np.arange(len(callers)), vals, width=width) ax.set_ylim(0, maxval) if i == len(vtypes) - 1: ax.set_xticks(np.arange(len(callers)) + width / 2.0) ax.set_xticklabels([caller_labels.get(x, x).replace("__", "\n") if x else "" for x in callers], size=8, rotation=45) else: ax.get_xaxis().set_ticks([]) _annotate(ax, labels, vals, np.arange(len(callers)), width) fig.text(.5, .95, prep_labels.get(prep, "") if title is None else title, horizontalalignment='center', size=16) fig.subplots_adjust(left=0.05, right=0.95, top=0.87, bottom=0.15, wspace=0.1, hspace=0.1) x, y = (10, 5) if size is None else size fig.set_size_inches(x, y) fig.savefig(out_file)
[ "def", "_seaborn", "(", "df", ",", "prep", ",", "prepi", ",", "out_file", ",", "title", "=", "None", ",", "size", "=", "None", ")", ":", "plt", ".", "ioff", "(", ")", "sns", ".", "set", "(", "style", "=", "'dark'", ")", "vtypes", "=", "df", "["...
Plot using seaborn wrapper around matplotlib.
[ "Plot", "using", "seaborn", "wrapper", "around", "matplotlib", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L189-L228
train
218,003
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
_check_cats
def _check_cats(cats, vtypes, df, prep, callers): """Only include categories in the final output if they have values. """ out = [] for cat in cats: all_vals = [] for vtype in vtypes: vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers) all_vals.extend(vals) if sum(all_vals) / float(len(all_vals)) > 2: out.append(cat) if len(out) == 0: return cats else: return out
python
def _check_cats(cats, vtypes, df, prep, callers): """Only include categories in the final output if they have values. """ out = [] for cat in cats: all_vals = [] for vtype in vtypes: vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers) all_vals.extend(vals) if sum(all_vals) / float(len(all_vals)) > 2: out.append(cat) if len(out) == 0: return cats else: return out
[ "def", "_check_cats", "(", "cats", ",", "vtypes", ",", "df", ",", "prep", ",", "callers", ")", ":", "out", "=", "[", "]", "for", "cat", "in", "cats", ":", "all_vals", "=", "[", "]", "for", "vtype", "in", "vtypes", ":", "vals", ",", "labels", ",",...
Only include categories in the final output if they have values.
[ "Only", "include", "categories", "in", "the", "final", "output", "if", "they", "have", "values", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L230-L244
train
218,004
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
_get_chart_info
def _get_chart_info(df, vtype, cat, prep, callers): """Retrieve values for a specific variant type, category and prep method. """ maxval_raw = max(list(df["value.floor"])) curdf = df[(df["variant.type"] == vtype) & (df["category"] == cat) & (df["bamprep"] == prep)] vals = [] labels = [] for c in callers: row = curdf[df["caller"] == c] if len(row) > 0: vals.append(list(row["value.floor"])[0]) labels.append(list(row["value"])[0]) else: vals.append(1) labels.append("") return vals, labels, maxval_raw
python
def _get_chart_info(df, vtype, cat, prep, callers): """Retrieve values for a specific variant type, category and prep method. """ maxval_raw = max(list(df["value.floor"])) curdf = df[(df["variant.type"] == vtype) & (df["category"] == cat) & (df["bamprep"] == prep)] vals = [] labels = [] for c in callers: row = curdf[df["caller"] == c] if len(row) > 0: vals.append(list(row["value.floor"])[0]) labels.append(list(row["value"])[0]) else: vals.append(1) labels.append("") return vals, labels, maxval_raw
[ "def", "_get_chart_info", "(", "df", ",", "vtype", ",", "cat", ",", "prep", ",", "callers", ")", ":", "maxval_raw", "=", "max", "(", "list", "(", "df", "[", "\"value.floor\"", "]", ")", ")", "curdf", "=", "df", "[", "(", "df", "[", "\"variant.type\""...
Retrieve values for a specific variant type, category and prep method.
[ "Retrieve", "values", "for", "a", "specific", "variant", "type", "category", "and", "prep", "method", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L246-L262
train
218,005
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
_annotate
def _annotate(ax, annotate, height, left, width): """Annotate axis with labels. """ annotate_yrange_factor = 0.010 xticks = np.array(left) + width / 2.0 ymin, ymax = ax.get_ylim() yrange = ymax - ymin # Reset ymax and ymin so there's enough room to see the annotation of # the top-most if ymax > 0: ymax += yrange * 0.15 if ymin < 0: ymin -= yrange * 0.15 ax.set_ylim(ymin, ymax) yrange = ymax - ymin offset_ = yrange * annotate_yrange_factor if isinstance(annotate, collections.Iterable): annotations = map(str, annotate) else: annotations = ['%.3f' % h if type(h) is np.float_ else str(h) for h in height] for x, h, annotation in zip(xticks, height, annotations): # Adjust the offset to account for negative bars offset = offset_ if h >= 0 else -1 * offset_ verticalalignment = 'bottom' if h >= 0 else 'top' if len(str(annotation)) > 6: size = 7 elif len(str(annotation)) > 5: size = 8 else: size = 10 # Finally, add the text to the axes ax.annotate(annotation, (x, h + offset), verticalalignment=verticalalignment, horizontalalignment='center', size=size)
python
def _annotate(ax, annotate, height, left, width): """Annotate axis with labels. """ annotate_yrange_factor = 0.010 xticks = np.array(left) + width / 2.0 ymin, ymax = ax.get_ylim() yrange = ymax - ymin # Reset ymax and ymin so there's enough room to see the annotation of # the top-most if ymax > 0: ymax += yrange * 0.15 if ymin < 0: ymin -= yrange * 0.15 ax.set_ylim(ymin, ymax) yrange = ymax - ymin offset_ = yrange * annotate_yrange_factor if isinstance(annotate, collections.Iterable): annotations = map(str, annotate) else: annotations = ['%.3f' % h if type(h) is np.float_ else str(h) for h in height] for x, h, annotation in zip(xticks, height, annotations): # Adjust the offset to account for negative bars offset = offset_ if h >= 0 else -1 * offset_ verticalalignment = 'bottom' if h >= 0 else 'top' if len(str(annotation)) > 6: size = 7 elif len(str(annotation)) > 5: size = 8 else: size = 10 # Finally, add the text to the axes ax.annotate(annotation, (x, h + offset), verticalalignment=verticalalignment, horizontalalignment='center', size=size)
[ "def", "_annotate", "(", "ax", ",", "annotate", ",", "height", ",", "left", ",", "width", ")", ":", "annotate_yrange_factor", "=", "0.010", "xticks", "=", "np", ".", "array", "(", "left", ")", "+", "width", "/", "2.0", "ymin", ",", "ymax", "=", "ax",...
Annotate axis with labels.
[ "Annotate", "axis", "with", "labels", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L264-L302
train
218,006
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
_ggplot
def _ggplot(df, out_file): """Plot faceted items with ggplot wrapper on top of matplotlib. XXX Not yet functional """ import ggplot as gg df["variant.type"] = [vtype_labels[x] for x in df["variant.type"]] df["category"] = [cat_labels[x] for x in df["category"]] df["caller"] = [caller_labels.get(x, None) for x in df["caller"]] p = (gg.ggplot(df, gg.aes(x="caller", y="value.floor")) + gg.geom_bar() + gg.facet_wrap("variant.type", "category") + gg.theme_seaborn()) gg.ggsave(p, out_file)
python
def _ggplot(df, out_file): """Plot faceted items with ggplot wrapper on top of matplotlib. XXX Not yet functional """ import ggplot as gg df["variant.type"] = [vtype_labels[x] for x in df["variant.type"]] df["category"] = [cat_labels[x] for x in df["category"]] df["caller"] = [caller_labels.get(x, None) for x in df["caller"]] p = (gg.ggplot(df, gg.aes(x="caller", y="value.floor")) + gg.geom_bar() + gg.facet_wrap("variant.type", "category") + gg.theme_seaborn()) gg.ggsave(p, out_file)
[ "def", "_ggplot", "(", "df", ",", "out_file", ")", ":", "import", "ggplot", "as", "gg", "df", "[", "\"variant.type\"", "]", "=", "[", "vtype_labels", "[", "x", "]", "for", "x", "in", "df", "[", "\"variant.type\"", "]", "]", "df", "[", "\"category\"", ...
Plot faceted items with ggplot wrapper on top of matplotlib. XXX Not yet functional
[ "Plot", "faceted", "items", "with", "ggplot", "wrapper", "on", "top", "of", "matplotlib", ".", "XXX", "Not", "yet", "functional" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L304-L315
train
218,007
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
get_floor_value
def get_floor_value(x, cat, vartype, floors): """Modify values so all have the same relative scale for differences. Using the chosen base heights, adjusts an individual sub-plot to be consistent relative to that height. """ all_base = floors[vartype] cur_max = floors[(cat, vartype)] if cur_max > all_base: diff = cur_max - all_base x = max(1, x - diff) return x
python
def get_floor_value(x, cat, vartype, floors): """Modify values so all have the same relative scale for differences. Using the chosen base heights, adjusts an individual sub-plot to be consistent relative to that height. """ all_base = floors[vartype] cur_max = floors[(cat, vartype)] if cur_max > all_base: diff = cur_max - all_base x = max(1, x - diff) return x
[ "def", "get_floor_value", "(", "x", ",", "cat", ",", "vartype", ",", "floors", ")", ":", "all_base", "=", "floors", "[", "vartype", "]", "cur_max", "=", "floors", "[", "(", "cat", ",", "vartype", ")", "]", "if", "cur_max", ">", "all_base", ":", "diff...
Modify values so all have the same relative scale for differences. Using the chosen base heights, adjusts an individual sub-plot to be consistent relative to that height.
[ "Modify", "values", "so", "all", "have", "the", "same", "relative", "scale", "for", "differences", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L317-L328
train
218,008
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
get_group_floors
def get_group_floors(df, cat_labels): """Retrieve the floor for a given row of comparisons, creating a normalized set of differences. We need to set non-zero floors so large numbers (like concordance) don't drown out small numbers (like discordance). This defines the height for a row of comparisons as either the minimum height of any sub-plot, or the maximum difference between higher and lower (plus 10%). """ group_maxes = collections.defaultdict(list) group_diffs = collections.defaultdict(list) diff_pad = 0.1 # 10% padding onto difference to avoid large numbers looking like zero for name, group in df.groupby(["category", "variant.type"]): label, stype = name if label in cat_labels: diff = max(group["value"]) - min(group["value"]) group_diffs[stype].append(diff + int(diff_pad * diff)) group_maxes[stype].append(max(group["value"])) group_maxes[name].append(max(group["value"])) out = {} for k, vs in group_maxes.items(): if k in group_diffs: out[k] = max(max(group_diffs[stype]), min(vs)) else: out[k] = min(vs) return out
python
def get_group_floors(df, cat_labels): """Retrieve the floor for a given row of comparisons, creating a normalized set of differences. We need to set non-zero floors so large numbers (like concordance) don't drown out small numbers (like discordance). This defines the height for a row of comparisons as either the minimum height of any sub-plot, or the maximum difference between higher and lower (plus 10%). """ group_maxes = collections.defaultdict(list) group_diffs = collections.defaultdict(list) diff_pad = 0.1 # 10% padding onto difference to avoid large numbers looking like zero for name, group in df.groupby(["category", "variant.type"]): label, stype = name if label in cat_labels: diff = max(group["value"]) - min(group["value"]) group_diffs[stype].append(diff + int(diff_pad * diff)) group_maxes[stype].append(max(group["value"])) group_maxes[name].append(max(group["value"])) out = {} for k, vs in group_maxes.items(): if k in group_diffs: out[k] = max(max(group_diffs[stype]), min(vs)) else: out[k] = min(vs) return out
[ "def", "get_group_floors", "(", "df", ",", "cat_labels", ")", ":", "group_maxes", "=", "collections", ".", "defaultdict", "(", "list", ")", "group_diffs", "=", "collections", ".", "defaultdict", "(", "list", ")", "diff_pad", "=", "0.1", "# 10% padding onto diffe...
Retrieve the floor for a given row of comparisons, creating a normalized set of differences. We need to set non-zero floors so large numbers (like concordance) don't drown out small numbers (like discordance). This defines the height for a row of comparisons as either the minimum height of any sub-plot, or the maximum difference between higher and lower (plus 10%).
[ "Retrieve", "the", "floor", "for", "a", "given", "row", "of", "comparisons", "creating", "a", "normalized", "set", "of", "differences", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L330-L354
train
218,009
bcbio/bcbio-nextgen
scripts/bcbio_nextgen.py
_sanity_check_args
def _sanity_check_args(args): """Ensure dependent arguments are correctly specified """ if "scheduler" in args and "queue" in args: if args.scheduler and not args.queue: if args.scheduler != "sge": return "IPython parallel scheduler (-s) specified. This also requires a queue (-q)." elif args.queue and not args.scheduler: return "IPython parallel queue (-q) supplied. This also requires a scheduler (-s)." elif args.paralleltype == "ipython" and (not args.queue or not args.scheduler): return "IPython parallel requires queue (-q) and scheduler (-s) arguments."
python
def _sanity_check_args(args): """Ensure dependent arguments are correctly specified """ if "scheduler" in args and "queue" in args: if args.scheduler and not args.queue: if args.scheduler != "sge": return "IPython parallel scheduler (-s) specified. This also requires a queue (-q)." elif args.queue and not args.scheduler: return "IPython parallel queue (-q) supplied. This also requires a scheduler (-s)." elif args.paralleltype == "ipython" and (not args.queue or not args.scheduler): return "IPython parallel requires queue (-q) and scheduler (-s) arguments."
[ "def", "_sanity_check_args", "(", "args", ")", ":", "if", "\"scheduler\"", "in", "args", "and", "\"queue\"", "in", "args", ":", "if", "args", ".", "scheduler", "and", "not", "args", ".", "queue", ":", "if", "args", ".", "scheduler", "!=", "\"sge\"", ":",...
Ensure dependent arguments are correctly specified
[ "Ensure", "dependent", "arguments", "are", "correctly", "specified" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_nextgen.py#L143-L153
train
218,010
bcbio/bcbio-nextgen
scripts/bcbio_nextgen.py
_add_inputs_to_kwargs
def _add_inputs_to_kwargs(args, kwargs, parser): """Convert input system config, flow cell directory and sample yaml to kwargs. Handles back compatibility with previous commandlines while allowing flexible specification of input parameters. """ inputs = [x for x in [args.global_config, args.fc_dir] + args.run_config if x is not None] global_config = "bcbio_system.yaml" # default configuration if not specified if kwargs.get("workflow", "") == "template": if args.only_metadata: inputs.append("--only-metadata") if args.force_single: inputs.append("--force-single") if args.separators: inputs.extend(["--separators", args.separators]) kwargs["inputs"] = inputs return kwargs elif len(inputs) == 1: if os.path.isfile(inputs[0]): fc_dir = None run_info_yaml = inputs[0] else: fc_dir = inputs[0] run_info_yaml = None elif len(inputs) == 2: if os.path.isfile(inputs[0]): global_config = inputs[0] if os.path.isfile(inputs[1]): fc_dir = None run_info_yaml = inputs[1] else: fc_dir = inputs[1] run_info_yaml = None else: fc_dir, run_info_yaml = inputs elif len(inputs) == 3: global_config, fc_dir, run_info_yaml = inputs elif args.version: print(version.__version__) sys.exit() else: print("Incorrect input arguments", inputs) parser.print_help() sys.exit() if fc_dir: fc_dir = os.path.abspath(fc_dir) if run_info_yaml: run_info_yaml = os.path.abspath(run_info_yaml) if kwargs.get("workflow"): kwargs["inputs"] = inputs kwargs["config_file"] = global_config kwargs["fc_dir"] = fc_dir kwargs["run_info_yaml"] = run_info_yaml return kwargs
python
def _add_inputs_to_kwargs(args, kwargs, parser): """Convert input system config, flow cell directory and sample yaml to kwargs. Handles back compatibility with previous commandlines while allowing flexible specification of input parameters. """ inputs = [x for x in [args.global_config, args.fc_dir] + args.run_config if x is not None] global_config = "bcbio_system.yaml" # default configuration if not specified if kwargs.get("workflow", "") == "template": if args.only_metadata: inputs.append("--only-metadata") if args.force_single: inputs.append("--force-single") if args.separators: inputs.extend(["--separators", args.separators]) kwargs["inputs"] = inputs return kwargs elif len(inputs) == 1: if os.path.isfile(inputs[0]): fc_dir = None run_info_yaml = inputs[0] else: fc_dir = inputs[0] run_info_yaml = None elif len(inputs) == 2: if os.path.isfile(inputs[0]): global_config = inputs[0] if os.path.isfile(inputs[1]): fc_dir = None run_info_yaml = inputs[1] else: fc_dir = inputs[1] run_info_yaml = None else: fc_dir, run_info_yaml = inputs elif len(inputs) == 3: global_config, fc_dir, run_info_yaml = inputs elif args.version: print(version.__version__) sys.exit() else: print("Incorrect input arguments", inputs) parser.print_help() sys.exit() if fc_dir: fc_dir = os.path.abspath(fc_dir) if run_info_yaml: run_info_yaml = os.path.abspath(run_info_yaml) if kwargs.get("workflow"): kwargs["inputs"] = inputs kwargs["config_file"] = global_config kwargs["fc_dir"] = fc_dir kwargs["run_info_yaml"] = run_info_yaml return kwargs
[ "def", "_add_inputs_to_kwargs", "(", "args", ",", "kwargs", ",", "parser", ")", ":", "inputs", "=", "[", "x", "for", "x", "in", "[", "args", ".", "global_config", ",", "args", ".", "fc_dir", "]", "+", "args", ".", "run_config", "if", "x", "is", "not"...
Convert input system config, flow cell directory and sample yaml to kwargs. Handles back compatibility with previous commandlines while allowing flexible specification of input parameters.
[ "Convert", "input", "system", "config", "flow", "cell", "directory", "and", "sample", "yaml", "to", "kwargs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_nextgen.py#L162-L216
train
218,011
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
_add_commas
def _add_commas(s, sep=','): """Add commas to output counts. From: http://code.activestate.com/recipes/498181 """ if len(s) <= 3: return s return _add_commas(s[:-3], sep) + sep + s[-3:]
python
def _add_commas(s, sep=','): """Add commas to output counts. From: http://code.activestate.com/recipes/498181 """ if len(s) <= 3: return s return _add_commas(s[:-3], sep) + sep + s[-3:]
[ "def", "_add_commas", "(", "s", ",", "sep", "=", "','", ")", ":", "if", "len", "(", "s", ")", "<=", "3", ":", "return", "s", "return", "_add_commas", "(", "s", "[", ":", "-", "3", "]", ",", "sep", ")", "+", "sep", "+", "s", "[", "-", "3", ...
Add commas to output counts. From: http://code.activestate.com/recipes/498181
[ "Add", "commas", "to", "output", "counts", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L428-L436
train
218,012
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
bed_to_interval
def bed_to_interval(orig_bed, bam_file): """Add header and format BED bait and target files for Picard if necessary. """ with open(orig_bed) as in_handle: line = in_handle.readline() if line.startswith("@"): yield orig_bed else: with pysam.Samfile(bam_file, "rb") as bam_handle: header = bam_handle.text with tmpfile(dir=os.path.dirname(orig_bed), prefix="picardbed") as tmp_bed: with open(tmp_bed, "w") as out_handle: out_handle.write(header) with open(orig_bed) as in_handle: for i, line in enumerate(in_handle): parts = line.rstrip().split("\t") if len(parts) == 4: chrom, start, end, name = parts strand = "+" elif len(parts) >= 3: chrom, start, end = parts[:3] strand = "+" name = "r%s" % i out = [chrom, start, end, strand, name] out_handle.write("\t".join(out) + "\n") yield tmp_bed
python
def bed_to_interval(orig_bed, bam_file): """Add header and format BED bait and target files for Picard if necessary. """ with open(orig_bed) as in_handle: line = in_handle.readline() if line.startswith("@"): yield orig_bed else: with pysam.Samfile(bam_file, "rb") as bam_handle: header = bam_handle.text with tmpfile(dir=os.path.dirname(orig_bed), prefix="picardbed") as tmp_bed: with open(tmp_bed, "w") as out_handle: out_handle.write(header) with open(orig_bed) as in_handle: for i, line in enumerate(in_handle): parts = line.rstrip().split("\t") if len(parts) == 4: chrom, start, end, name = parts strand = "+" elif len(parts) >= 3: chrom, start, end = parts[:3] strand = "+" name = "r%s" % i out = [chrom, start, end, strand, name] out_handle.write("\t".join(out) + "\n") yield tmp_bed
[ "def", "bed_to_interval", "(", "orig_bed", ",", "bam_file", ")", ":", "with", "open", "(", "orig_bed", ")", "as", "in_handle", ":", "line", "=", "in_handle", ".", "readline", "(", ")", "if", "line", ".", "startswith", "(", "\"@\"", ")", ":", "yield", "...
Add header and format BED bait and target files for Picard if necessary.
[ "Add", "header", "and", "format", "BED", "bait", "and", "target", "files", "for", "Picard", "if", "necessary", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L440-L465
train
218,013
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetricsParser.get_summary_metrics
def get_summary_metrics(self, align_metrics, dup_metrics, insert_metrics=None, hybrid_metrics=None, vrn_vals=None, rnaseq_metrics=None): """Retrieve a high level summary of interesting metrics. """ with open(align_metrics) as in_handle: align_vals = self._parse_align_metrics(in_handle) if dup_metrics: with open(dup_metrics) as in_handle: dup_vals = self._parse_dup_metrics(in_handle) else: dup_vals = {} (insert_vals, hybrid_vals, rnaseq_vals) = (None, None, None) if insert_metrics and file_exists(insert_metrics): with open(insert_metrics) as in_handle: insert_vals = self._parse_insert_metrics(in_handle) if hybrid_metrics and file_exists(hybrid_metrics): with open(hybrid_metrics) as in_handle: hybrid_vals = self._parse_hybrid_metrics(in_handle) if rnaseq_metrics and file_exists(rnaseq_metrics): with open(rnaseq_metrics) as in_handle: rnaseq_vals = self._parse_rnaseq_metrics(in_handle) return self._tabularize_metrics(align_vals, dup_vals, insert_vals, hybrid_vals, vrn_vals, rnaseq_vals)
python
def get_summary_metrics(self, align_metrics, dup_metrics, insert_metrics=None, hybrid_metrics=None, vrn_vals=None, rnaseq_metrics=None): """Retrieve a high level summary of interesting metrics. """ with open(align_metrics) as in_handle: align_vals = self._parse_align_metrics(in_handle) if dup_metrics: with open(dup_metrics) as in_handle: dup_vals = self._parse_dup_metrics(in_handle) else: dup_vals = {} (insert_vals, hybrid_vals, rnaseq_vals) = (None, None, None) if insert_metrics and file_exists(insert_metrics): with open(insert_metrics) as in_handle: insert_vals = self._parse_insert_metrics(in_handle) if hybrid_metrics and file_exists(hybrid_metrics): with open(hybrid_metrics) as in_handle: hybrid_vals = self._parse_hybrid_metrics(in_handle) if rnaseq_metrics and file_exists(rnaseq_metrics): with open(rnaseq_metrics) as in_handle: rnaseq_vals = self._parse_rnaseq_metrics(in_handle) return self._tabularize_metrics(align_vals, dup_vals, insert_vals, hybrid_vals, vrn_vals, rnaseq_vals)
[ "def", "get_summary_metrics", "(", "self", ",", "align_metrics", ",", "dup_metrics", ",", "insert_metrics", "=", "None", ",", "hybrid_metrics", "=", "None", ",", "vrn_vals", "=", "None", ",", "rnaseq_metrics", "=", "None", ")", ":", "with", "open", "(", "ali...
Retrieve a high level summary of interesting metrics.
[ "Retrieve", "a", "high", "level", "summary", "of", "interesting", "metrics", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L24-L48
train
218,014
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetricsParser.extract_metrics
def extract_metrics(self, metrics_files): """Return summary information for a lane of metrics files. """ extension_maps = dict( align_metrics=(self._parse_align_metrics, "AL"), dup_metrics=(self._parse_dup_metrics, "DUP"), hs_metrics=(self._parse_hybrid_metrics, "HS"), insert_metrics=(self._parse_insert_metrics, "INS"), rnaseq_metrics=(self._parse_rnaseq_metrics, "RNA")) all_metrics = dict() for fname in metrics_files: ext = os.path.splitext(fname)[-1][1:] try: parse_fn, prefix = extension_maps[ext] except KeyError: parse_fn = None if parse_fn: with open(fname) as in_handle: for key, val in parse_fn(in_handle).items(): if not key.startswith(prefix): key = "%s_%s" % (prefix, key) all_metrics[key] = val return all_metrics
python
def extract_metrics(self, metrics_files): """Return summary information for a lane of metrics files. """ extension_maps = dict( align_metrics=(self._parse_align_metrics, "AL"), dup_metrics=(self._parse_dup_metrics, "DUP"), hs_metrics=(self._parse_hybrid_metrics, "HS"), insert_metrics=(self._parse_insert_metrics, "INS"), rnaseq_metrics=(self._parse_rnaseq_metrics, "RNA")) all_metrics = dict() for fname in metrics_files: ext = os.path.splitext(fname)[-1][1:] try: parse_fn, prefix = extension_maps[ext] except KeyError: parse_fn = None if parse_fn: with open(fname) as in_handle: for key, val in parse_fn(in_handle).items(): if not key.startswith(prefix): key = "%s_%s" % (prefix, key) all_metrics[key] = val return all_metrics
[ "def", "extract_metrics", "(", "self", ",", "metrics_files", ")", ":", "extension_maps", "=", "dict", "(", "align_metrics", "=", "(", "self", ".", "_parse_align_metrics", ",", "\"AL\"", ")", ",", "dup_metrics", "=", "(", "self", ".", "_parse_dup_metrics", ",",...
Return summary information for a lane of metrics files.
[ "Return", "summary", "information", "for", "a", "lane", "of", "metrics", "files", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L50-L72
train
218,015
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetrics.report
def report(self, align_bam, ref_file, is_paired, bait_file, target_file, variant_region_file, config): """Produce report metrics using Picard with sorted aligned BAM file. """ dup_metrics = self._get_current_dup_metrics(align_bam) align_metrics = self._collect_align_metrics(align_bam, ref_file) # Prefer the GC metrics in FastQC instead of Picard # gc_graph, gc_metrics = self._gc_bias(align_bam, ref_file) gc_graph = None insert_graph, insert_metrics, hybrid_metrics = (None, None, None) if is_paired: insert_graph, insert_metrics = self._insert_sizes(align_bam) if bait_file and target_file: assert os.path.exists(bait_file), (bait_file, "does not exist!") assert os.path.exists(target_file), (target_file, "does not exist!") hybrid_metrics = self._hybrid_select_metrics(align_bam, bait_file, target_file) elif (variant_region_file and config["algorithm"].get("coverage_interval", "").lower() in ["exome"]): assert os.path.exists(variant_region_file), (variant_region_file, "does not exist") hybrid_metrics = self._hybrid_select_metrics( align_bam, variant_region_file, variant_region_file) vrn_vals = self._variant_eval_metrics(align_bam) summary_info = self._parser.get_summary_metrics(align_metrics, dup_metrics, insert_metrics, hybrid_metrics, vrn_vals) graphs = [] if gc_graph and os.path.exists(gc_graph): graphs.append((gc_graph, "Distribution of GC content across reads")) if insert_graph and os.path.exists(insert_graph): graphs.append((insert_graph, "Distribution of paired end insert sizes")) return summary_info, graphs
python
def report(self, align_bam, ref_file, is_paired, bait_file, target_file, variant_region_file, config): """Produce report metrics using Picard with sorted aligned BAM file. """ dup_metrics = self._get_current_dup_metrics(align_bam) align_metrics = self._collect_align_metrics(align_bam, ref_file) # Prefer the GC metrics in FastQC instead of Picard # gc_graph, gc_metrics = self._gc_bias(align_bam, ref_file) gc_graph = None insert_graph, insert_metrics, hybrid_metrics = (None, None, None) if is_paired: insert_graph, insert_metrics = self._insert_sizes(align_bam) if bait_file and target_file: assert os.path.exists(bait_file), (bait_file, "does not exist!") assert os.path.exists(target_file), (target_file, "does not exist!") hybrid_metrics = self._hybrid_select_metrics(align_bam, bait_file, target_file) elif (variant_region_file and config["algorithm"].get("coverage_interval", "").lower() in ["exome"]): assert os.path.exists(variant_region_file), (variant_region_file, "does not exist") hybrid_metrics = self._hybrid_select_metrics( align_bam, variant_region_file, variant_region_file) vrn_vals = self._variant_eval_metrics(align_bam) summary_info = self._parser.get_summary_metrics(align_metrics, dup_metrics, insert_metrics, hybrid_metrics, vrn_vals) graphs = [] if gc_graph and os.path.exists(gc_graph): graphs.append((gc_graph, "Distribution of GC content across reads")) if insert_graph and os.path.exists(insert_graph): graphs.append((insert_graph, "Distribution of paired end insert sizes")) return summary_info, graphs
[ "def", "report", "(", "self", ",", "align_bam", ",", "ref_file", ",", "is_paired", ",", "bait_file", ",", "target_file", ",", "variant_region_file", ",", "config", ")", ":", "dup_metrics", "=", "self", ".", "_get_current_dup_metrics", "(", "align_bam", ")", "a...
Produce report metrics using Picard with sorted aligned BAM file.
[ "Produce", "report", "metrics", "using", "Picard", "with", "sorted", "aligned", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L287-L319
train
218,016
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetrics._get_current_dup_metrics
def _get_current_dup_metrics(self, align_bam): """Retrieve duplicate information from input BAM file. """ metrics_file = "%s.dup_metrics" % os.path.splitext(align_bam)[0] if not file_exists(metrics_file): dups = 0 with pysam.Samfile(align_bam, "rb") as bam_handle: for read in bam_handle: if (read.is_paired and read.is_read1) or not read.is_paired: if read.is_duplicate: dups += 1 with open(metrics_file, "w") as out_handle: out_handle.write("# custom bcbio-nextgen metrics\n") out_handle.write("READ_PAIR_DUPLICATES\t%s\n" % dups) return metrics_file
python
def _get_current_dup_metrics(self, align_bam): """Retrieve duplicate information from input BAM file. """ metrics_file = "%s.dup_metrics" % os.path.splitext(align_bam)[0] if not file_exists(metrics_file): dups = 0 with pysam.Samfile(align_bam, "rb") as bam_handle: for read in bam_handle: if (read.is_paired and read.is_read1) or not read.is_paired: if read.is_duplicate: dups += 1 with open(metrics_file, "w") as out_handle: out_handle.write("# custom bcbio-nextgen metrics\n") out_handle.write("READ_PAIR_DUPLICATES\t%s\n" % dups) return metrics_file
[ "def", "_get_current_dup_metrics", "(", "self", ",", "align_bam", ")", ":", "metrics_file", "=", "\"%s.dup_metrics\"", "%", "os", ".", "path", ".", "splitext", "(", "align_bam", ")", "[", "0", "]", "if", "not", "file_exists", "(", "metrics_file", ")", ":", ...
Retrieve duplicate information from input BAM file.
[ "Retrieve", "duplicate", "information", "from", "input", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L321-L335
train
218,017
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetrics._check_metrics_file
def _check_metrics_file(self, bam_name, metrics_ext): """Check for an existing metrics file for the given BAM. """ base, _ = os.path.splitext(bam_name) try: int(base[-1]) can_glob = False except ValueError: can_glob = True check_fname = "{base}{maybe_glob}.{ext}".format( base=base, maybe_glob="*" if can_glob else "", ext=metrics_ext) glob_fnames = glob.glob(check_fname) if len(glob_fnames) > 0: return glob_fnames[0] else: return "{base}.{ext}".format(base=base, ext=metrics_ext)
python
def _check_metrics_file(self, bam_name, metrics_ext): """Check for an existing metrics file for the given BAM. """ base, _ = os.path.splitext(bam_name) try: int(base[-1]) can_glob = False except ValueError: can_glob = True check_fname = "{base}{maybe_glob}.{ext}".format( base=base, maybe_glob="*" if can_glob else "", ext=metrics_ext) glob_fnames = glob.glob(check_fname) if len(glob_fnames) > 0: return glob_fnames[0] else: return "{base}.{ext}".format(base=base, ext=metrics_ext)
[ "def", "_check_metrics_file", "(", "self", ",", "bam_name", ",", "metrics_ext", ")", ":", "base", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "bam_name", ")", "try", ":", "int", "(", "base", "[", "-", "1", "]", ")", "can_glob", "=", "Fa...
Check for an existing metrics file for the given BAM.
[ "Check", "for", "an", "existing", "metrics", "file", "for", "the", "given", "BAM", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L337-L352
train
218,018
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetrics._hybrid_select_metrics
def _hybrid_select_metrics(self, dup_bam, bait_file, target_file): """Generate metrics for hybrid selection efficiency. """ metrics = self._check_metrics_file(dup_bam, "hs_metrics") if not file_exists(metrics): with bed_to_interval(bait_file, dup_bam) as ready_bait: with bed_to_interval(target_file, dup_bam) as ready_target: with file_transaction(metrics) as tx_metrics: opts = [("BAIT_INTERVALS", ready_bait), ("TARGET_INTERVALS", ready_target), ("INPUT", dup_bam), ("OUTPUT", tx_metrics)] try: self._picard.run("CollectHsMetrics", opts) # HsMetrics fails regularly with memory errors # so we catch and skip instead of aborting the # full process except subprocess.CalledProcessError: return None return metrics
python
def _hybrid_select_metrics(self, dup_bam, bait_file, target_file): """Generate metrics for hybrid selection efficiency. """ metrics = self._check_metrics_file(dup_bam, "hs_metrics") if not file_exists(metrics): with bed_to_interval(bait_file, dup_bam) as ready_bait: with bed_to_interval(target_file, dup_bam) as ready_target: with file_transaction(metrics) as tx_metrics: opts = [("BAIT_INTERVALS", ready_bait), ("TARGET_INTERVALS", ready_target), ("INPUT", dup_bam), ("OUTPUT", tx_metrics)] try: self._picard.run("CollectHsMetrics", opts) # HsMetrics fails regularly with memory errors # so we catch and skip instead of aborting the # full process except subprocess.CalledProcessError: return None return metrics
[ "def", "_hybrid_select_metrics", "(", "self", ",", "dup_bam", ",", "bait_file", ",", "target_file", ")", ":", "metrics", "=", "self", ".", "_check_metrics_file", "(", "dup_bam", ",", "\"hs_metrics\"", ")", "if", "not", "file_exists", "(", "metrics", ")", ":", ...
Generate metrics for hybrid selection efficiency.
[ "Generate", "metrics", "for", "hybrid", "selection", "efficiency", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L354-L373
train
218,019
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
PicardMetrics._variant_eval_metrics
def _variant_eval_metrics(self, dup_bam): """Find metrics for evaluating variant effectiveness. """ base, ext = os.path.splitext(dup_bam) end_strip = "-dup" base = base[:-len(end_strip)] if base.endswith(end_strip) else base mfiles = glob.glob("%s*eval_metrics" % base) if len(mfiles) > 0: with open(mfiles[0]) as in_handle: # pull the metrics as JSON from the last line in the file for line in in_handle: pass metrics = json.loads(line) return metrics else: return None
python
def _variant_eval_metrics(self, dup_bam): """Find metrics for evaluating variant effectiveness. """ base, ext = os.path.splitext(dup_bam) end_strip = "-dup" base = base[:-len(end_strip)] if base.endswith(end_strip) else base mfiles = glob.glob("%s*eval_metrics" % base) if len(mfiles) > 0: with open(mfiles[0]) as in_handle: # pull the metrics as JSON from the last line in the file for line in in_handle: pass metrics = json.loads(line) return metrics else: return None
[ "def", "_variant_eval_metrics", "(", "self", ",", "dup_bam", ")", ":", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "dup_bam", ")", "end_strip", "=", "\"-dup\"", "base", "=", "base", "[", ":", "-", "len", "(", "end_strip", ")", "...
Find metrics for evaluating variant effectiveness.
[ "Find", "metrics", "for", "evaluating", "variant", "effectiveness", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L375-L390
train
218,020
bcbio/bcbio-nextgen
bcbio/broad/metrics.py
RNASeqPicardMetrics.report
def report(self, align_bam, ref_file, gtf_file, is_paired=False, rrna_file="null"): """Produce report metrics for a RNASeq experiment using Picard with a sorted aligned BAM file. """ # collect duplication metrics dup_metrics = self._get_current_dup_metrics(align_bam) align_metrics = self._collect_align_metrics(align_bam, ref_file) insert_graph, insert_metrics = (None, None) if is_paired: insert_graph, insert_metrics = self._insert_sizes(align_bam) rnaseq_metrics = self._rnaseq_metrics(align_bam, gtf_file, rrna_file) summary_info = self._parser.get_summary_metrics(align_metrics, dup_metrics, insert_metrics=insert_metrics, rnaseq_metrics=rnaseq_metrics) graphs = [] if insert_graph and file_exists(insert_graph): graphs.append((insert_graph, "Distribution of paired end insert sizes")) return summary_info, graphs
python
def report(self, align_bam, ref_file, gtf_file, is_paired=False, rrna_file="null"): """Produce report metrics for a RNASeq experiment using Picard with a sorted aligned BAM file. """ # collect duplication metrics dup_metrics = self._get_current_dup_metrics(align_bam) align_metrics = self._collect_align_metrics(align_bam, ref_file) insert_graph, insert_metrics = (None, None) if is_paired: insert_graph, insert_metrics = self._insert_sizes(align_bam) rnaseq_metrics = self._rnaseq_metrics(align_bam, gtf_file, rrna_file) summary_info = self._parser.get_summary_metrics(align_metrics, dup_metrics, insert_metrics=insert_metrics, rnaseq_metrics=rnaseq_metrics) graphs = [] if insert_graph and file_exists(insert_graph): graphs.append((insert_graph, "Distribution of paired end insert sizes")) return summary_info, graphs
[ "def", "report", "(", "self", ",", "align_bam", ",", "ref_file", ",", "gtf_file", ",", "is_paired", "=", "False", ",", "rrna_file", "=", "\"null\"", ")", ":", "# collect duplication metrics", "dup_metrics", "=", "self", ".", "_get_current_dup_metrics", "(", "ali...
Produce report metrics for a RNASeq experiment using Picard with a sorted aligned BAM file.
[ "Produce", "report", "metrics", "for", "a", "RNASeq", "experiment", "using", "Picard", "with", "a", "sorted", "aligned", "BAM", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/metrics.py#L470-L493
train
218,021
bcbio/bcbio-nextgen
bcbio/variation/gatk.py
standard_cl_params
def standard_cl_params(items): """Shared command line parameters for GATK programs. Handles no removal of duplicate reads for amplicon or non mark duplicate experiments. If we have pre-aligned inputs we ignore the value or mark duplicates (since they may already be marked in the input BAM). """ out = [] def _skip_duplicates(data): return (dd.get_coverage_interval(data) == "amplicon" or (dd.get_aligner(data) and not dd.get_mark_duplicates(data))) if any(_skip_duplicates(d) for d in items): broad_runner = broad.runner_from_config(items[0]["config"]) gatk_type = broad_runner.gatk_type() if gatk_type == "gatk4": out += ["--disable-read-filter", "NotDuplicateReadFilter"] elif LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.5"): out += ["-drf", "DuplicateRead"] return out
python
def standard_cl_params(items): """Shared command line parameters for GATK programs. Handles no removal of duplicate reads for amplicon or non mark duplicate experiments. If we have pre-aligned inputs we ignore the value or mark duplicates (since they may already be marked in the input BAM). """ out = [] def _skip_duplicates(data): return (dd.get_coverage_interval(data) == "amplicon" or (dd.get_aligner(data) and not dd.get_mark_duplicates(data))) if any(_skip_duplicates(d) for d in items): broad_runner = broad.runner_from_config(items[0]["config"]) gatk_type = broad_runner.gatk_type() if gatk_type == "gatk4": out += ["--disable-read-filter", "NotDuplicateReadFilter"] elif LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.5"): out += ["-drf", "DuplicateRead"] return out
[ "def", "standard_cl_params", "(", "items", ")", ":", "out", "=", "[", "]", "def", "_skip_duplicates", "(", "data", ")", ":", "return", "(", "dd", ".", "get_coverage_interval", "(", "data", ")", "==", "\"amplicon\"", "or", "(", "dd", ".", "get_aligner", "...
Shared command line parameters for GATK programs. Handles no removal of duplicate reads for amplicon or non mark duplicate experiments. If we have pre-aligned inputs we ignore the value or mark duplicates (since they may already be marked in the input BAM).
[ "Shared", "command", "line", "parameters", "for", "GATK", "programs", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatk.py#L17-L36
train
218,022
bcbio/bcbio-nextgen
bcbio/variation/gatk.py
_shared_gatk_call_prep
def _shared_gatk_call_prep(align_bams, items, ref_file, region, out_file, num_cores=1): """Shared preparation work for GATK variant calling. """ data = items[0] config = data["config"] broad_runner = broad.runner_from_config(config) gatk_type = broad_runner.gatk_type() for x in align_bams: bam.index(x, config) picard_runner = broad.runner_from_path("picard", config) picard_runner.run_fn("picard_index_ref", ref_file) params = ["-R", ref_file] coverage_depth_min = tz.get_in(["algorithm", "coverage_depth_min"], config) if coverage_depth_min and coverage_depth_min < 4: confidence = "4.0" params += ["--standard_min_confidence_threshold_for_calling", confidence] for a in annotation.get_gatk_annotations(config): params += ["--annotation", a] for x in align_bams: params += ["-I", x] variant_regions = bedutils.population_variant_regions(items) region = subset_variant_regions(variant_regions, region, out_file, items) if region: if gatk_type == "gatk4": params += ["-L", bamprep.region_to_gatk(region), "--interval-set-rule", "INTERSECTION"] else: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] params += standard_cl_params(items) return broad_runner, params
python
def _shared_gatk_call_prep(align_bams, items, ref_file, region, out_file, num_cores=1): """Shared preparation work for GATK variant calling. """ data = items[0] config = data["config"] broad_runner = broad.runner_from_config(config) gatk_type = broad_runner.gatk_type() for x in align_bams: bam.index(x, config) picard_runner = broad.runner_from_path("picard", config) picard_runner.run_fn("picard_index_ref", ref_file) params = ["-R", ref_file] coverage_depth_min = tz.get_in(["algorithm", "coverage_depth_min"], config) if coverage_depth_min and coverage_depth_min < 4: confidence = "4.0" params += ["--standard_min_confidence_threshold_for_calling", confidence] for a in annotation.get_gatk_annotations(config): params += ["--annotation", a] for x in align_bams: params += ["-I", x] variant_regions = bedutils.population_variant_regions(items) region = subset_variant_regions(variant_regions, region, out_file, items) if region: if gatk_type == "gatk4": params += ["-L", bamprep.region_to_gatk(region), "--interval-set-rule", "INTERSECTION"] else: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] params += standard_cl_params(items) return broad_runner, params
[ "def", "_shared_gatk_call_prep", "(", "align_bams", ",", "items", ",", "ref_file", ",", "region", ",", "out_file", ",", "num_cores", "=", "1", ")", ":", "data", "=", "items", "[", "0", "]", "config", "=", "data", "[", "\"config\"", "]", "broad_runner", "...
Shared preparation work for GATK variant calling.
[ "Shared", "preparation", "work", "for", "GATK", "variant", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatk.py#L38-L66
train
218,023
bcbio/bcbio-nextgen
bcbio/variation/gatk.py
unified_genotyper
def unified_genotyper(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Perform SNP genotyping on the given alignment file. """ if out_file is None: out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0] if not utils.file_exists(out_file): broad_runner, params = \ _shared_gatk_call_prep(align_bams, items, ref_file, region, out_file) with file_transaction(items[0], out_file) as tx_out_file: params += ["-T", "UnifiedGenotyper", "-o", tx_out_file, "-ploidy", (str(ploidy.get_ploidy(items, region)) if broad_runner.gatk_type() == "restricted" else "2"), "--genotype_likelihoods_model", "BOTH"] resources = config_utils.get_resources("gatk", items[0]["config"]) if "options" in resources: params += [str(x) for x in resources.get("options", [])] broad_runner.run_gatk(params) return vcfutils.bgzip_and_index(out_file, items[0]["config"])
python
def unified_genotyper(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Perform SNP genotyping on the given alignment file. """ if out_file is None: out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0] if not utils.file_exists(out_file): broad_runner, params = \ _shared_gatk_call_prep(align_bams, items, ref_file, region, out_file) with file_transaction(items[0], out_file) as tx_out_file: params += ["-T", "UnifiedGenotyper", "-o", tx_out_file, "-ploidy", (str(ploidy.get_ploidy(items, region)) if broad_runner.gatk_type() == "restricted" else "2"), "--genotype_likelihoods_model", "BOTH"] resources = config_utils.get_resources("gatk", items[0]["config"]) if "options" in resources: params += [str(x) for x in resources.get("options", [])] broad_runner.run_gatk(params) return vcfutils.bgzip_and_index(out_file, items[0]["config"])
[ "def", "unified_genotyper", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "if", "out_file", "is", "None", ":", "out_file", "=", "\"%s-variants.vcf.gz\"", "%", "utils"...
Perform SNP genotyping on the given alignment file.
[ "Perform", "SNP", "genotyping", "on", "the", "given", "alignment", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatk.py#L68-L87
train
218,024
bcbio/bcbio-nextgen
bcbio/variation/gatk.py
_joint_calling
def _joint_calling(items): """Determine if this call feeds downstream into joint calls. """ jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), items[0]) if jointcaller: assert len(items) == 1, "Can only do joint calling preparation with GATK with single samples" assert tz.get_in(("metadata", "batch"), items[0]) is not None, \ "Joint calling requires batched samples, %s has no metadata batch." % dd.get_sample_name(items[0]) return jointcaller
python
def _joint_calling(items): """Determine if this call feeds downstream into joint calls. """ jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), items[0]) if jointcaller: assert len(items) == 1, "Can only do joint calling preparation with GATK with single samples" assert tz.get_in(("metadata", "batch"), items[0]) is not None, \ "Joint calling requires batched samples, %s has no metadata batch." % dd.get_sample_name(items[0]) return jointcaller
[ "def", "_joint_calling", "(", "items", ")", ":", "jointcaller", "=", "tz", ".", "get_in", "(", "(", "\"config\"", ",", "\"algorithm\"", ",", "\"jointcaller\"", ")", ",", "items", "[", "0", "]", ")", "if", "jointcaller", ":", "assert", "len", "(", "items"...
Determine if this call feeds downstream into joint calls.
[ "Determine", "if", "this", "call", "feeds", "downstream", "into", "joint", "calls", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatk.py#L89-L97
train
218,025
bcbio/bcbio-nextgen
bcbio/variation/gatk.py
_supports_avx
def _supports_avx(): """Check for support for Intel AVX acceleration.""" if os.path.exists("/proc/cpuinfo"): with open("/proc/cpuinfo") as in_handle: for line in in_handle: if line.startswith("flags") and line.find("avx") > 0: return True
python
def _supports_avx(): """Check for support for Intel AVX acceleration.""" if os.path.exists("/proc/cpuinfo"): with open("/proc/cpuinfo") as in_handle: for line in in_handle: if line.startswith("flags") and line.find("avx") > 0: return True
[ "def", "_supports_avx", "(", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "\"/proc/cpuinfo\"", ")", ":", "with", "open", "(", "\"/proc/cpuinfo\"", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswit...
Check for support for Intel AVX acceleration.
[ "Check", "for", "support", "for", "Intel", "AVX", "acceleration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/gatk.py#L204-L210
train
218,026
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
jar_versioner
def jar_versioner(program_name, jar_name): """Retrieve version information based on jar file. """ def get_version(config): try: pdir = config_utils.get_program(program_name, config, "dir") # not configured except ValueError: return "" jar = os.path.basename(config_utils.get_jar(jar_name, pdir)) for to_remove in [jar_name, ".jar", "-standalone"]: jar = jar.replace(to_remove, "") if jar.startswith(("-", ".")): jar = jar[1:] if not jar: logger.warn("Unable to determine version for program '{}' from jar file {}".format( program_name, config_utils.get_jar(jar_name, pdir))) return jar return get_version
python
def jar_versioner(program_name, jar_name): """Retrieve version information based on jar file. """ def get_version(config): try: pdir = config_utils.get_program(program_name, config, "dir") # not configured except ValueError: return "" jar = os.path.basename(config_utils.get_jar(jar_name, pdir)) for to_remove in [jar_name, ".jar", "-standalone"]: jar = jar.replace(to_remove, "") if jar.startswith(("-", ".")): jar = jar[1:] if not jar: logger.warn("Unable to determine version for program '{}' from jar file {}".format( program_name, config_utils.get_jar(jar_name, pdir))) return jar return get_version
[ "def", "jar_versioner", "(", "program_name", ",", "jar_name", ")", ":", "def", "get_version", "(", "config", ")", ":", "try", ":", "pdir", "=", "config_utils", ".", "get_program", "(", "program_name", ",", "config", ",", "\"dir\"", ")", "# not configured", "...
Retrieve version information based on jar file.
[ "Retrieve", "version", "information", "based", "on", "jar", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L66-L84
train
218,027
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
_get_cl_version
def _get_cl_version(p, config): """Retrieve version of a single commandline program. """ if not p.get("has_cl_version", True): return "" try: prog = config_utils.get_program(p["cmd"], config) except config_utils.CmdNotFound: localpy_cmd = os.path.join(os.path.dirname(sys.executable), p["cmd"]) if os.path.exists(localpy_cmd): prog = localpy_cmd else: return "" args = p.get("args", "") cmd = "{prog} {args}" subp = subprocess.Popen(cmd.format(**locals()), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) with contextlib.closing(subp.stdout) as stdout: if p.get("stdout_flag"): v = _parse_from_stdoutflag(stdout, p["stdout_flag"]) elif p.get("paren_flag"): v = _parse_from_parenflag(stdout, p["paren_flag"]) else: lines = [l.strip() for l in str(stdout.read()).split("\n") if l.strip()] v = lines[-1] if v.endswith("."): v = v[:-1] return v
python
def _get_cl_version(p, config): """Retrieve version of a single commandline program. """ if not p.get("has_cl_version", True): return "" try: prog = config_utils.get_program(p["cmd"], config) except config_utils.CmdNotFound: localpy_cmd = os.path.join(os.path.dirname(sys.executable), p["cmd"]) if os.path.exists(localpy_cmd): prog = localpy_cmd else: return "" args = p.get("args", "") cmd = "{prog} {args}" subp = subprocess.Popen(cmd.format(**locals()), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) with contextlib.closing(subp.stdout) as stdout: if p.get("stdout_flag"): v = _parse_from_stdoutflag(stdout, p["stdout_flag"]) elif p.get("paren_flag"): v = _parse_from_parenflag(stdout, p["paren_flag"]) else: lines = [l.strip() for l in str(stdout.read()).split("\n") if l.strip()] v = lines[-1] if v.endswith("."): v = v[:-1] return v
[ "def", "_get_cl_version", "(", "p", ",", "config", ")", ":", "if", "not", "p", ".", "get", "(", "\"has_cl_version\"", ",", "True", ")", ":", "return", "\"\"", "try", ":", "prog", "=", "config_utils", ".", "get_program", "(", "p", "[", "\"cmd\"", "]", ...
Retrieve version of a single commandline program.
[ "Retrieve", "version", "of", "a", "single", "commandline", "program", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L115-L145
train
218,028
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
_get_brew_versions
def _get_brew_versions(): """Retrieve versions of tools installed via brew. """ from bcbio import install tooldir = install.get_defaults().get("tooldir") brew_cmd = os.path.join(tooldir, "bin", "brew") if tooldir else "brew" try: vout = subprocess.check_output([brew_cmd, "list", "--versions"]) except OSError: # brew not installed/used vout = "" out = {} for vstr in vout.split("\n"): if vstr.strip(): parts = vstr.rstrip().split() name = parts[0] v = parts[-1] out[name] = v return out
python
def _get_brew_versions(): """Retrieve versions of tools installed via brew. """ from bcbio import install tooldir = install.get_defaults().get("tooldir") brew_cmd = os.path.join(tooldir, "bin", "brew") if tooldir else "brew" try: vout = subprocess.check_output([brew_cmd, "list", "--versions"]) except OSError: # brew not installed/used vout = "" out = {} for vstr in vout.split("\n"): if vstr.strip(): parts = vstr.rstrip().split() name = parts[0] v = parts[-1] out[name] = v return out
[ "def", "_get_brew_versions", "(", ")", ":", "from", "bcbio", "import", "install", "tooldir", "=", "install", ".", "get_defaults", "(", ")", ".", "get", "(", "\"tooldir\"", ")", "brew_cmd", "=", "os", ".", "path", ".", "join", "(", "tooldir", ",", "\"bin\...
Retrieve versions of tools installed via brew.
[ "Retrieve", "versions", "of", "tools", "installed", "via", "brew", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L147-L164
train
218,029
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
_get_versions
def _get_versions(config=None): """Retrieve details on all programs available on the system. """ try: from bcbio.pipeline import version if hasattr(version, "__version__"): bcbio_version = ("%s-%s" % (version.__version__, version.__git_revision__) if version.__git_revision__ else version.__version__) else: bcbio_version = "" except ImportError: bcbio_version = "" out = [{"program": "bcbio-nextgen", "version": bcbio_version}] manifest_dir = _get_manifest_dir(config) manifest_vs = _get_versions_manifest(manifest_dir) if manifest_dir else [] if manifest_vs: out += manifest_vs else: assert config is not None, "Need configuration to retrieve from non-manifest installs" brew_vs = _get_brew_versions() for p in _cl_progs: out.append({"program": p["cmd"], "version": (brew_vs[p["cmd"]] if p["cmd"] in brew_vs else _get_cl_version(p, config))}) for p in _alt_progs: out.append({"program": p["name"], "version": (brew_vs[p["name"]] if p["name"] in brew_vs else p["version_fn"](config))}) out.sort(key=lambda x: x["program"]) return out
python
def _get_versions(config=None): """Retrieve details on all programs available on the system. """ try: from bcbio.pipeline import version if hasattr(version, "__version__"): bcbio_version = ("%s-%s" % (version.__version__, version.__git_revision__) if version.__git_revision__ else version.__version__) else: bcbio_version = "" except ImportError: bcbio_version = "" out = [{"program": "bcbio-nextgen", "version": bcbio_version}] manifest_dir = _get_manifest_dir(config) manifest_vs = _get_versions_manifest(manifest_dir) if manifest_dir else [] if manifest_vs: out += manifest_vs else: assert config is not None, "Need configuration to retrieve from non-manifest installs" brew_vs = _get_brew_versions() for p in _cl_progs: out.append({"program": p["cmd"], "version": (brew_vs[p["cmd"]] if p["cmd"] in brew_vs else _get_cl_version(p, config))}) for p in _alt_progs: out.append({"program": p["name"], "version": (brew_vs[p["name"]] if p["name"] in brew_vs else p["version_fn"](config))}) out.sort(key=lambda x: x["program"]) return out
[ "def", "_get_versions", "(", "config", "=", "None", ")", ":", "try", ":", "from", "bcbio", ".", "pipeline", "import", "version", "if", "hasattr", "(", "version", ",", "\"__version__\"", ")", ":", "bcbio_version", "=", "(", "\"%s-%s\"", "%", "(", "version",...
Retrieve details on all programs available on the system.
[ "Retrieve", "details", "on", "all", "programs", "available", "on", "the", "system", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L166-L195
train
218,030
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
_get_versions_manifest
def _get_versions_manifest(manifest_dir): """Retrieve versions from a pre-existing manifest of installed software. """ all_pkgs = _manifest_progs + [p.get("name", p["cmd"]) for p in _cl_progs] + [p["name"] for p in _alt_progs] if os.path.exists(manifest_dir): out = [] for plist in ["toolplus", "python", "r", "debian", "custom"]: pkg_file = os.path.join(manifest_dir, "%s-packages.yaml" % plist) if os.path.exists(pkg_file): with open(pkg_file) as in_handle: pkg_info = yaml.safe_load(in_handle) if not pkg_info: continue added = [] for pkg in all_pkgs: if pkg in pkg_info: added.append(pkg) out.append({"program": pkg, "version": pkg_info[pkg]["version"]}) for x in added: all_pkgs.remove(x) out.sort(key=lambda x: x["program"]) for pkg in all_pkgs: out.append({"program": pkg, "version": ""}) return out
python
def _get_versions_manifest(manifest_dir): """Retrieve versions from a pre-existing manifest of installed software. """ all_pkgs = _manifest_progs + [p.get("name", p["cmd"]) for p in _cl_progs] + [p["name"] for p in _alt_progs] if os.path.exists(manifest_dir): out = [] for plist in ["toolplus", "python", "r", "debian", "custom"]: pkg_file = os.path.join(manifest_dir, "%s-packages.yaml" % plist) if os.path.exists(pkg_file): with open(pkg_file) as in_handle: pkg_info = yaml.safe_load(in_handle) if not pkg_info: continue added = [] for pkg in all_pkgs: if pkg in pkg_info: added.append(pkg) out.append({"program": pkg, "version": pkg_info[pkg]["version"]}) for x in added: all_pkgs.remove(x) out.sort(key=lambda x: x["program"]) for pkg in all_pkgs: out.append({"program": pkg, "version": ""}) return out
[ "def", "_get_versions_manifest", "(", "manifest_dir", ")", ":", "all_pkgs", "=", "_manifest_progs", "+", "[", "p", ".", "get", "(", "\"name\"", ",", "p", "[", "\"cmd\"", "]", ")", "for", "p", "in", "_cl_progs", "]", "+", "[", "p", "[", "\"name\"", "]",...
Retrieve versions from a pre-existing manifest of installed software.
[ "Retrieve", "versions", "from", "a", "pre", "-", "existing", "manifest", "of", "installed", "software", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L224-L247
train
218,031
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
write_versions
def write_versions(dirs, config=None, is_wrapper=False): """Write CSV file with versions used in analysis pipeline. """ out_file = _get_program_file(dirs) if is_wrapper: assert utils.file_exists(out_file), "Failed to create program versions from VM" elif out_file is None: for p in _get_versions(config): print("{program},{version}".format(**p)) else: with open(out_file, "w") as out_handle: for p in _get_versions(config): out_handle.write("{program},{version}\n".format(**p)) return out_file
python
def write_versions(dirs, config=None, is_wrapper=False): """Write CSV file with versions used in analysis pipeline. """ out_file = _get_program_file(dirs) if is_wrapper: assert utils.file_exists(out_file), "Failed to create program versions from VM" elif out_file is None: for p in _get_versions(config): print("{program},{version}".format(**p)) else: with open(out_file, "w") as out_handle: for p in _get_versions(config): out_handle.write("{program},{version}\n".format(**p)) return out_file
[ "def", "write_versions", "(", "dirs", ",", "config", "=", "None", ",", "is_wrapper", "=", "False", ")", ":", "out_file", "=", "_get_program_file", "(", "dirs", ")", "if", "is_wrapper", ":", "assert", "utils", ".", "file_exists", "(", "out_file", ")", ",", ...
Write CSV file with versions used in analysis pipeline.
[ "Write", "CSV", "file", "with", "versions", "used", "in", "analysis", "pipeline", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L254-L267
train
218,032
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
get_version_manifest
def get_version_manifest(name, data=None, required=False): """Retrieve a version from the currently installed manifest. """ manifest_dir = _get_manifest_dir(data, name) manifest_vs = _get_versions_manifest(manifest_dir) or [] for x in manifest_vs: if x["program"] == name: v = x.get("version", "") if v: return v if required: raise ValueError("Did not find %s in install manifest. Could not check version." % name) return ""
python
def get_version_manifest(name, data=None, required=False): """Retrieve a version from the currently installed manifest. """ manifest_dir = _get_manifest_dir(data, name) manifest_vs = _get_versions_manifest(manifest_dir) or [] for x in manifest_vs: if x["program"] == name: v = x.get("version", "") if v: return v if required: raise ValueError("Did not find %s in install manifest. Could not check version." % name) return ""
[ "def", "get_version_manifest", "(", "name", ",", "data", "=", "None", ",", "required", "=", "False", ")", ":", "manifest_dir", "=", "_get_manifest_dir", "(", "data", ",", "name", ")", "manifest_vs", "=", "_get_versions_manifest", "(", "manifest_dir", ")", "or"...
Retrieve a version from the currently installed manifest.
[ "Retrieve", "a", "version", "from", "the", "currently", "installed", "manifest", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L269-L281
train
218,033
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
add_subparser
def add_subparser(subparsers): """Add command line option for exporting version information. """ parser = subparsers.add_parser("version", help="Export versions of used software to stdout or a file ") parser.add_argument("--workdir", help="Directory export programs to in workdir/provenance/programs.txt", default=None)
python
def add_subparser(subparsers): """Add command line option for exporting version information. """ parser = subparsers.add_parser("version", help="Export versions of used software to stdout or a file ") parser.add_argument("--workdir", help="Directory export programs to in workdir/provenance/programs.txt", default=None)
[ "def", "add_subparser", "(", "subparsers", ")", ":", "parser", "=", "subparsers", ".", "add_parser", "(", "\"version\"", ",", "help", "=", "\"Export versions of used software to stdout or a file \"", ")", "parser", ".", "add_argument", "(", "\"--workdir\"", ",", "help...
Add command line option for exporting version information.
[ "Add", "command", "line", "option", "for", "exporting", "version", "information", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L283-L289
train
218,034
bcbio/bcbio-nextgen
bcbio/provenance/programs.py
get_version
def get_version(name, dirs=None, config=None): """Retrieve the current version of the given program from cached names. """ if dirs: p = _get_program_file(dirs) else: p = tz.get_in(["resources", "program_versions"], config) if p: with open(p) as in_handle: for line in in_handle: prog, version = line.rstrip().split(",") if prog == name and version: return version raise KeyError("Version information not found for %s in %s" % (name, p))
python
def get_version(name, dirs=None, config=None): """Retrieve the current version of the given program from cached names. """ if dirs: p = _get_program_file(dirs) else: p = tz.get_in(["resources", "program_versions"], config) if p: with open(p) as in_handle: for line in in_handle: prog, version = line.rstrip().split(",") if prog == name and version: return version raise KeyError("Version information not found for %s in %s" % (name, p))
[ "def", "get_version", "(", "name", ",", "dirs", "=", "None", ",", "config", "=", "None", ")", ":", "if", "dirs", ":", "p", "=", "_get_program_file", "(", "dirs", ")", "else", ":", "p", "=", "tz", ".", "get_in", "(", "[", "\"resources\"", ",", "\"pr...
Retrieve the current version of the given program from cached names.
[ "Retrieve", "the", "current", "version", "of", "the", "given", "program", "from", "cached", "names", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/programs.py#L291-L304
train
218,035
bcbio/bcbio-nextgen
scripts/utils/hlas_to_pgroups.py
hla_choices
def hla_choices(orig_hla, min_parts=2): """Provide a range of options for HLA type, with decreasing resolution. """ yield orig_hla try: int(orig_hla[-1]) except ValueError: yield orig_hla[:-1] hla_parts = orig_hla.split(":") for sub_i in range(len(hla_parts) - min_parts + 1): yield ":".join(hla_parts[:len(hla_parts) - sub_i])
python
def hla_choices(orig_hla, min_parts=2): """Provide a range of options for HLA type, with decreasing resolution. """ yield orig_hla try: int(orig_hla[-1]) except ValueError: yield orig_hla[:-1] hla_parts = orig_hla.split(":") for sub_i in range(len(hla_parts) - min_parts + 1): yield ":".join(hla_parts[:len(hla_parts) - sub_i])
[ "def", "hla_choices", "(", "orig_hla", ",", "min_parts", "=", "2", ")", ":", "yield", "orig_hla", "try", ":", "int", "(", "orig_hla", "[", "-", "1", "]", ")", "except", "ValueError", ":", "yield", "orig_hla", "[", ":", "-", "1", "]", "hla_parts", "="...
Provide a range of options for HLA type, with decreasing resolution.
[ "Provide", "a", "range", "of", "options", "for", "HLA", "type", "with", "decreasing", "resolution", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hlas_to_pgroups.py#L35-L45
train
218,036
bcbio/bcbio-nextgen
scripts/utils/hlas_to_pgroups.py
read_pgroups
def read_pgroups(in_file): """Read HLAs and the pgroups they fall in. """ out = {} with open(in_file) as in_handle: for line in (l for l in in_handle if not l.startswith("#")): locus, alleles, group = line.strip().split(";") for allele in alleles.split("/"): out["HLA-%s%s" % (locus, allele)] = group return out
python
def read_pgroups(in_file): """Read HLAs and the pgroups they fall in. """ out = {} with open(in_file) as in_handle: for line in (l for l in in_handle if not l.startswith("#")): locus, alleles, group = line.strip().split(";") for allele in alleles.split("/"): out["HLA-%s%s" % (locus, allele)] = group return out
[ "def", "read_pgroups", "(", "in_file", ")", ":", "out", "=", "{", "}", "with", "open", "(", "in_file", ")", "as", "in_handle", ":", "for", "line", "in", "(", "l", "for", "l", "in", "in_handle", "if", "not", "l", ".", "startswith", "(", "\"#\"", ")"...
Read HLAs and the pgroups they fall in.
[ "Read", "HLAs", "and", "the", "pgroups", "they", "fall", "in", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hlas_to_pgroups.py#L47-L56
train
218,037
bcbio/bcbio-nextgen
scripts/utils/hlas_to_pgroups.py
read_hlas
def read_hlas(fasta_fai): """Get HLA alleles from the hg38 fasta fai file. """ out = [] with open(fasta_fai) as in_handle: for line in in_handle: if line.startswith("HLA"): out.append(line.split()[0]) return out
python
def read_hlas(fasta_fai): """Get HLA alleles from the hg38 fasta fai file. """ out = [] with open(fasta_fai) as in_handle: for line in in_handle: if line.startswith("HLA"): out.append(line.split()[0]) return out
[ "def", "read_hlas", "(", "fasta_fai", ")", ":", "out", "=", "[", "]", "with", "open", "(", "fasta_fai", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "line", ".", "startswith", "(", "\"HLA\"", ")", ":", "out", ".", "append...
Get HLA alleles from the hg38 fasta fai file.
[ "Get", "HLA", "alleles", "from", "the", "hg38", "fasta", "fai", "file", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hlas_to_pgroups.py#L58-L66
train
218,038
bcbio/bcbio-nextgen
bcbio/variation/split.py
split_vcf
def split_vcf(in_file, ref_file, config, out_dir=None): """Split a VCF file into separate files by chromosome. """ if out_dir is None: out_dir = os.path.join(os.path.dirname(in_file), "split") out_files = [] with open(ref.fasta_idx(ref_file, config)) as in_handle: for line in in_handle: chrom, size = line.split()[:2] out_file = os.path.join(out_dir, os.path.basename(replace_suffix(append_stem(in_file, "-%s" % chrom), ".vcf"))) subset_vcf(in_file, (chrom, 0, size), out_file, config) out_files.append(out_file) return out_files
python
def split_vcf(in_file, ref_file, config, out_dir=None): """Split a VCF file into separate files by chromosome. """ if out_dir is None: out_dir = os.path.join(os.path.dirname(in_file), "split") out_files = [] with open(ref.fasta_idx(ref_file, config)) as in_handle: for line in in_handle: chrom, size = line.split()[:2] out_file = os.path.join(out_dir, os.path.basename(replace_suffix(append_stem(in_file, "-%s" % chrom), ".vcf"))) subset_vcf(in_file, (chrom, 0, size), out_file, config) out_files.append(out_file) return out_files
[ "def", "split_vcf", "(", "in_file", ",", "ref_file", ",", "config", ",", "out_dir", "=", "None", ")", ":", "if", "out_dir", "is", "None", ":", "out_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "in_file", "...
Split a VCF file into separate files by chromosome.
[ "Split", "a", "VCF", "file", "into", "separate", "files", "by", "chromosome", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/split.py#L12-L25
train
218,039
bcbio/bcbio-nextgen
bcbio/variation/split.py
subset_vcf
def subset_vcf(in_file, region, out_file, config): """Subset VCF in the given region, handling bgzip and indexing of input. """ work_file = vcfutils.bgzip_and_index(in_file, config) if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) region_str = bamprep.region_to_gatk(region) cmd = "{bcftools} view -r {region_str} {work_file} > {tx_out_file}" do.run(cmd.format(**locals()), "subset %s: %s" % (os.path.basename(work_file), region_str)) return out_file
python
def subset_vcf(in_file, region, out_file, config): """Subset VCF in the given region, handling bgzip and indexing of input. """ work_file = vcfutils.bgzip_and_index(in_file, config) if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) region_str = bamprep.region_to_gatk(region) cmd = "{bcftools} view -r {region_str} {work_file} > {tx_out_file}" do.run(cmd.format(**locals()), "subset %s: %s" % (os.path.basename(work_file), region_str)) return out_file
[ "def", "subset_vcf", "(", "in_file", ",", "region", ",", "out_file", ",", "config", ")", ":", "work_file", "=", "vcfutils", ".", "bgzip_and_index", "(", "in_file", ",", "config", ")", "if", "not", "file_exists", "(", "out_file", ")", ":", "with", "file_tra...
Subset VCF in the given region, handling bgzip and indexing of input.
[ "Subset", "VCF", "in", "the", "given", "region", "handling", "bgzip", "and", "indexing", "of", "input", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/split.py#L27-L37
train
218,040
bcbio/bcbio-nextgen
bcbio/variation/recalibrate.py
prep_recal
def prep_recal(data): """Do pre-BQSR recalibration, calculation of recalibration tables. """ if dd.get_recalibrate(data) in [True, "gatk"]: logger.info("Prepare BQSR tables with GATK: %s " % str(dd.get_sample_name(data))) dbsnp_file = tz.get_in(("genome_resources", "variation", "dbsnp"), data) if not dbsnp_file: logger.info("Skipping GATK BaseRecalibrator because no VCF file of known variants was found.") return data broad_runner = broad.runner_from_config(data["config"]) data["prep_recal"] = _gatk_base_recalibrator(broad_runner, dd.get_align_bam(data), dd.get_ref_file(data), dd.get_platform(data), dbsnp_file, dd.get_variant_regions(data) or dd.get_sample_callable(data), data) elif dd.get_recalibrate(data) == "sentieon": logger.info("Prepare BQSR tables with sentieon: %s " % str(dd.get_sample_name(data))) data["prep_recal"] = sentieon.bqsr_table(data) elif dd.get_recalibrate(data): raise NotImplementedError("Unsupported recalibration type: %s" % (dd.get_recalibrate(data))) return data
python
def prep_recal(data): """Do pre-BQSR recalibration, calculation of recalibration tables. """ if dd.get_recalibrate(data) in [True, "gatk"]: logger.info("Prepare BQSR tables with GATK: %s " % str(dd.get_sample_name(data))) dbsnp_file = tz.get_in(("genome_resources", "variation", "dbsnp"), data) if not dbsnp_file: logger.info("Skipping GATK BaseRecalibrator because no VCF file of known variants was found.") return data broad_runner = broad.runner_from_config(data["config"]) data["prep_recal"] = _gatk_base_recalibrator(broad_runner, dd.get_align_bam(data), dd.get_ref_file(data), dd.get_platform(data), dbsnp_file, dd.get_variant_regions(data) or dd.get_sample_callable(data), data) elif dd.get_recalibrate(data) == "sentieon": logger.info("Prepare BQSR tables with sentieon: %s " % str(dd.get_sample_name(data))) data["prep_recal"] = sentieon.bqsr_table(data) elif dd.get_recalibrate(data): raise NotImplementedError("Unsupported recalibration type: %s" % (dd.get_recalibrate(data))) return data
[ "def", "prep_recal", "(", "data", ")", ":", "if", "dd", ".", "get_recalibrate", "(", "data", ")", "in", "[", "True", ",", "\"gatk\"", "]", ":", "logger", ".", "info", "(", "\"Prepare BQSR tables with GATK: %s \"", "%", "str", "(", "dd", ".", "get_sample_na...
Do pre-BQSR recalibration, calculation of recalibration tables.
[ "Do", "pre", "-", "BQSR", "recalibration", "calculation", "of", "recalibration", "tables", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/recalibrate.py#L21-L41
train
218,041
bcbio/bcbio-nextgen
bcbio/variation/recalibrate.py
apply_recal
def apply_recal(data): """Apply recalibration tables to the sorted aligned BAM, producing recalibrated BAM. """ orig_bam = dd.get_align_bam(data) or dd.get_work_bam(data) had_work_bam = "work_bam" in data if dd.get_recalibrate(data) in [True, "gatk"]: if data.get("prep_recal"): logger.info("Applying BQSR recalibration with GATK: %s " % str(dd.get_sample_name(data))) data["work_bam"] = _gatk_apply_bqsr(data) elif dd.get_recalibrate(data) == "sentieon": if data.get("prep_recal"): logger.info("Applying BQSR recalibration with sentieon: %s " % str(dd.get_sample_name(data))) data["work_bam"] = sentieon.apply_bqsr(data) elif dd.get_recalibrate(data): raise NotImplementedError("Unsupported recalibration type: %s" % (dd.get_recalibrate(data))) # CWL does not have work/alignment BAM separation if not had_work_bam and dd.get_work_bam(data): data["align_bam"] = dd.get_work_bam(data) if orig_bam != dd.get_work_bam(data) and orig_bam != dd.get_align_bam(data): utils.save_diskspace(orig_bam, "BAM recalibrated to %s" % dd.get_work_bam(data), data["config"]) return data
python
def apply_recal(data): """Apply recalibration tables to the sorted aligned BAM, producing recalibrated BAM. """ orig_bam = dd.get_align_bam(data) or dd.get_work_bam(data) had_work_bam = "work_bam" in data if dd.get_recalibrate(data) in [True, "gatk"]: if data.get("prep_recal"): logger.info("Applying BQSR recalibration with GATK: %s " % str(dd.get_sample_name(data))) data["work_bam"] = _gatk_apply_bqsr(data) elif dd.get_recalibrate(data) == "sentieon": if data.get("prep_recal"): logger.info("Applying BQSR recalibration with sentieon: %s " % str(dd.get_sample_name(data))) data["work_bam"] = sentieon.apply_bqsr(data) elif dd.get_recalibrate(data): raise NotImplementedError("Unsupported recalibration type: %s" % (dd.get_recalibrate(data))) # CWL does not have work/alignment BAM separation if not had_work_bam and dd.get_work_bam(data): data["align_bam"] = dd.get_work_bam(data) if orig_bam != dd.get_work_bam(data) and orig_bam != dd.get_align_bam(data): utils.save_diskspace(orig_bam, "BAM recalibrated to %s" % dd.get_work_bam(data), data["config"]) return data
[ "def", "apply_recal", "(", "data", ")", ":", "orig_bam", "=", "dd", ".", "get_align_bam", "(", "data", ")", "or", "dd", ".", "get_work_bam", "(", "data", ")", "had_work_bam", "=", "\"work_bam\"", "in", "data", "if", "dd", ".", "get_recalibrate", "(", "da...
Apply recalibration tables to the sorted aligned BAM, producing recalibrated BAM.
[ "Apply", "recalibration", "tables", "to", "the", "sorted", "aligned", "BAM", "producing", "recalibrated", "BAM", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/recalibrate.py#L43-L63
train
218,042
bcbio/bcbio-nextgen
bcbio/variation/recalibrate.py
_gatk_base_recalibrator
def _gatk_base_recalibrator(broad_runner, dup_align_bam, ref_file, platform, dbsnp_file, intervals, data): """Step 1 of GATK recalibration process, producing table of covariates. For GATK 4 we use local multicore spark runs: https://github.com/broadinstitute/gatk/issues/2345 For GATK3, Large whole genome BAM files take an excessively long time to recalibrate and the extra inputs don't help much beyond a certain point. See the 'Downsampling analysis' plots in the GATK documentation: http://gatkforums.broadinstitute.org/discussion/44/base-quality-score-recalibrator#latest This identifies large files and calculates the fraction to downsample to. spark host and timeout settings help deal with runs on restricted systems where we encounter network and timeout errors """ target_counts = 1e8 # 100 million reads per read group, 20x the plotted max out_file = os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data), "%s-recal.grp" % utils.splitext_plus(os.path.basename(dup_align_bam))[0]) if not utils.file_exists(out_file): if has_aligned_reads(dup_align_bam, intervals): with file_transaction(data, out_file) as tx_out_file: gatk_type = broad_runner.gatk_type() assert gatk_type in ["restricted", "gatk4"], \ "Require full version of GATK 2.4+ or GATK4 for BQSR" params = ["-I", dup_align_bam] cores = dd.get_num_cores(data) if gatk_type == "gatk4": resources = config_utils.get_resources("gatk-spark", data["config"]) spark_opts = [str(x) for x in resources.get("options", [])] params += ["-T", "BaseRecalibratorSpark", "--output", tx_out_file, "--reference", dd.get_ref_file(data)] if spark_opts: params += spark_opts else: params += ["--spark-master", "local[%s]" % cores, "--conf", "spark.driver.host=localhost", "--conf", "spark.network.timeout=800", "--conf", "spark.executor.heartbeatInterval=100", "--conf", "spark.local.dir=%s" % os.path.dirname(tx_out_file)] if dbsnp_file: params += ["--known-sites", dbsnp_file] if intervals: params += ["-L", intervals, "--interval-set-rule", "INTERSECTION"] else: params += ["-T", "BaseRecalibrator", "-o", tx_out_file, "-R", ref_file] downsample_pct = bam.get_downsample_pct(dup_align_bam, target_counts, data) if downsample_pct: params += ["--downsample_to_fraction", str(downsample_pct), "--downsampling_type", "ALL_READS"] if platform.lower() == "solid": params += ["--solid_nocall_strategy", "PURGE_READ", "--solid_recal_mode", "SET_Q_ZERO_BASE_N"] if dbsnp_file: params += ["--knownSites", dbsnp_file] if intervals: params += ["-L", intervals, "--interval_set_rule", "INTERSECTION"] memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None broad_runner.run_gatk(params, os.path.dirname(tx_out_file), memscale=memscale, parallel_gc=True) else: with open(out_file, "w") as out_handle: out_handle.write("# No aligned reads") return out_file
python
def _gatk_base_recalibrator(broad_runner, dup_align_bam, ref_file, platform, dbsnp_file, intervals, data): """Step 1 of GATK recalibration process, producing table of covariates. For GATK 4 we use local multicore spark runs: https://github.com/broadinstitute/gatk/issues/2345 For GATK3, Large whole genome BAM files take an excessively long time to recalibrate and the extra inputs don't help much beyond a certain point. See the 'Downsampling analysis' plots in the GATK documentation: http://gatkforums.broadinstitute.org/discussion/44/base-quality-score-recalibrator#latest This identifies large files and calculates the fraction to downsample to. spark host and timeout settings help deal with runs on restricted systems where we encounter network and timeout errors """ target_counts = 1e8 # 100 million reads per read group, 20x the plotted max out_file = os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data), "%s-recal.grp" % utils.splitext_plus(os.path.basename(dup_align_bam))[0]) if not utils.file_exists(out_file): if has_aligned_reads(dup_align_bam, intervals): with file_transaction(data, out_file) as tx_out_file: gatk_type = broad_runner.gatk_type() assert gatk_type in ["restricted", "gatk4"], \ "Require full version of GATK 2.4+ or GATK4 for BQSR" params = ["-I", dup_align_bam] cores = dd.get_num_cores(data) if gatk_type == "gatk4": resources = config_utils.get_resources("gatk-spark", data["config"]) spark_opts = [str(x) for x in resources.get("options", [])] params += ["-T", "BaseRecalibratorSpark", "--output", tx_out_file, "--reference", dd.get_ref_file(data)] if spark_opts: params += spark_opts else: params += ["--spark-master", "local[%s]" % cores, "--conf", "spark.driver.host=localhost", "--conf", "spark.network.timeout=800", "--conf", "spark.executor.heartbeatInterval=100", "--conf", "spark.local.dir=%s" % os.path.dirname(tx_out_file)] if dbsnp_file: params += ["--known-sites", dbsnp_file] if intervals: params += ["-L", intervals, "--interval-set-rule", "INTERSECTION"] else: params += ["-T", "BaseRecalibrator", "-o", tx_out_file, "-R", ref_file] downsample_pct = bam.get_downsample_pct(dup_align_bam, target_counts, data) if downsample_pct: params += ["--downsample_to_fraction", str(downsample_pct), "--downsampling_type", "ALL_READS"] if platform.lower() == "solid": params += ["--solid_nocall_strategy", "PURGE_READ", "--solid_recal_mode", "SET_Q_ZERO_BASE_N"] if dbsnp_file: params += ["--knownSites", dbsnp_file] if intervals: params += ["-L", intervals, "--interval_set_rule", "INTERSECTION"] memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None broad_runner.run_gatk(params, os.path.dirname(tx_out_file), memscale=memscale, parallel_gc=True) else: with open(out_file, "w") as out_handle: out_handle.write("# No aligned reads") return out_file
[ "def", "_gatk_base_recalibrator", "(", "broad_runner", ",", "dup_align_bam", ",", "ref_file", ",", "platform", ",", "dbsnp_file", ",", "intervals", ",", "data", ")", ":", "target_counts", "=", "1e8", "# 100 million reads per read group, 20x the plotted max", "out_file", ...
Step 1 of GATK recalibration process, producing table of covariates. For GATK 4 we use local multicore spark runs: https://github.com/broadinstitute/gatk/issues/2345 For GATK3, Large whole genome BAM files take an excessively long time to recalibrate and the extra inputs don't help much beyond a certain point. See the 'Downsampling analysis' plots in the GATK documentation: http://gatkforums.broadinstitute.org/discussion/44/base-quality-score-recalibrator#latest This identifies large files and calculates the fraction to downsample to. spark host and timeout settings help deal with runs on restricted systems where we encounter network and timeout errors
[ "Step", "1", "of", "GATK", "recalibration", "process", "producing", "table", "of", "covariates", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/recalibrate.py#L67-L132
train
218,043
bcbio/bcbio-nextgen
bcbio/variation/recalibrate.py
_gatk_apply_bqsr
def _gatk_apply_bqsr(data): """Parallel BQSR support for GATK4. Normalized qualities to 3 bin outputs at 10, 20 and 30 based on pipeline standard recommendations, which will help with output file sizes: https://github.com/CCDG/Pipeline-Standardization/blob/master/PipelineStandard.md#base-quality-score-binning-scheme https://github.com/gatk-workflows/broad-prod-wgs-germline-snps-indels/blob/5585cdf7877104f2c61b2720ddfe7235f2fad577/PairedEndSingleSampleWf.gatk4.0.wdl#L1081 spark host and timeout settings help deal with runs on restricted systems where we encounter network and timeout errors """ in_file = dd.get_align_bam(data) or dd.get_work_bam(data) out_file = os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data), "%s-recal.bam" % utils.splitext_plus(os.path.basename(in_file))[0]) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: broad_runner = broad.runner_from_config(data["config"]) gatk_type = broad_runner.gatk_type() cores = dd.get_num_cores(data) if gatk_type == "gatk4": resources = config_utils.get_resources("gatk-spark", data["config"]) spark_opts = [str(x) for x in resources.get("options", [])] params = ["-T", "ApplyBQSRSpark", "--input", in_file, "--output", tx_out_file, "--bqsr-recal-file", data["prep_recal"], "--static-quantized-quals", "10", "--static-quantized-quals", "20", "--static-quantized-quals", "30"] if spark_opts: params += spark_opts else: params += ["--spark-master", "local[%s]" % cores, "--conf", "spark.local.dir=%s" % os.path.dirname(tx_out_file), "--conf", "spark.driver.host=localhost", "--conf", "spark.network.timeout=800"] else: params = ["-T", "PrintReads", "-R", dd.get_ref_file(data), "-I", in_file, "-BQSR", data["prep_recal"], "-o", tx_out_file] # Avoid problems with intel deflater for GATK 3.8 and GATK4 # https://github.com/bcbio/bcbio-nextgen/issues/2145#issuecomment-343095357 if gatk_type == "gatk4": params += ["--jdk-deflater", "--jdk-inflater"] elif LooseVersion(broad_runner.gatk_major_version()) > LooseVersion("3.7"): params += ["-jdk_deflater", "-jdk_inflater"] memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None broad_runner.run_gatk(params, os.path.dirname(tx_out_file), memscale=memscale, parallel_gc=True) bam.index(out_file, data["config"]) return out_file
python
def _gatk_apply_bqsr(data): """Parallel BQSR support for GATK4. Normalized qualities to 3 bin outputs at 10, 20 and 30 based on pipeline standard recommendations, which will help with output file sizes: https://github.com/CCDG/Pipeline-Standardization/blob/master/PipelineStandard.md#base-quality-score-binning-scheme https://github.com/gatk-workflows/broad-prod-wgs-germline-snps-indels/blob/5585cdf7877104f2c61b2720ddfe7235f2fad577/PairedEndSingleSampleWf.gatk4.0.wdl#L1081 spark host and timeout settings help deal with runs on restricted systems where we encounter network and timeout errors """ in_file = dd.get_align_bam(data) or dd.get_work_bam(data) out_file = os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data), "%s-recal.bam" % utils.splitext_plus(os.path.basename(in_file))[0]) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: broad_runner = broad.runner_from_config(data["config"]) gatk_type = broad_runner.gatk_type() cores = dd.get_num_cores(data) if gatk_type == "gatk4": resources = config_utils.get_resources("gatk-spark", data["config"]) spark_opts = [str(x) for x in resources.get("options", [])] params = ["-T", "ApplyBQSRSpark", "--input", in_file, "--output", tx_out_file, "--bqsr-recal-file", data["prep_recal"], "--static-quantized-quals", "10", "--static-quantized-quals", "20", "--static-quantized-quals", "30"] if spark_opts: params += spark_opts else: params += ["--spark-master", "local[%s]" % cores, "--conf", "spark.local.dir=%s" % os.path.dirname(tx_out_file), "--conf", "spark.driver.host=localhost", "--conf", "spark.network.timeout=800"] else: params = ["-T", "PrintReads", "-R", dd.get_ref_file(data), "-I", in_file, "-BQSR", data["prep_recal"], "-o", tx_out_file] # Avoid problems with intel deflater for GATK 3.8 and GATK4 # https://github.com/bcbio/bcbio-nextgen/issues/2145#issuecomment-343095357 if gatk_type == "gatk4": params += ["--jdk-deflater", "--jdk-inflater"] elif LooseVersion(broad_runner.gatk_major_version()) > LooseVersion("3.7"): params += ["-jdk_deflater", "-jdk_inflater"] memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None broad_runner.run_gatk(params, os.path.dirname(tx_out_file), memscale=memscale, parallel_gc=True) bam.index(out_file, data["config"]) return out_file
[ "def", "_gatk_apply_bqsr", "(", "data", ")", ":", "in_file", "=", "dd", ".", "get_align_bam", "(", "data", ")", "or", "dd", ".", "get_work_bam", "(", "data", ")", "out_file", "=", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "...
Parallel BQSR support for GATK4. Normalized qualities to 3 bin outputs at 10, 20 and 30 based on pipeline standard recommendations, which will help with output file sizes: https://github.com/CCDG/Pipeline-Standardization/blob/master/PipelineStandard.md#base-quality-score-binning-scheme https://github.com/gatk-workflows/broad-prod-wgs-germline-snps-indels/blob/5585cdf7877104f2c61b2720ddfe7235f2fad577/PairedEndSingleSampleWf.gatk4.0.wdl#L1081 spark host and timeout settings help deal with runs on restricted systems where we encounter network and timeout errors
[ "Parallel", "BQSR", "support", "for", "GATK4", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/recalibrate.py#L134-L179
train
218,044
bcbio/bcbio-nextgen
bcbio/log/__init__.py
create_base_logger
def create_base_logger(config=None, parallel=None): """Setup base logging configuration, also handling remote logging. Correctly sets up for local, multiprocessing and distributed runs. Creates subscribers for non-local runs that will be references from local logging. Retrieves IP address using tips from http://stackoverflow.com/a/1267524/252589 """ if parallel is None: parallel = {} parallel_type = parallel.get("type", "local") cores = parallel.get("cores", 1) if parallel_type == "ipython": from bcbio.log import logbook_zmqpush fqdn_ip = socket.gethostbyname(socket.getfqdn()) ips = [fqdn_ip] if (fqdn_ip and not fqdn_ip.startswith("127.")) else [] if not ips: ips = [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] if not ips: ips += [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close())[1] for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]] if not ips: sys.stderr.write("Cannot resolve a local IP address that isn't 127.x.x.x " "Your machines might not have a local IP address " "assigned or are not able to resolve it.\n") sys.exit(1) uri = "tcp://%s" % ips[0] subscriber = logbook_zmqpush.ZeroMQPullSubscriber() mport = subscriber.socket.bind_to_random_port(uri) wport_uri = "%s:%s" % (uri, mport) parallel["log_queue"] = wport_uri subscriber.dispatch_in_background(_create_log_handler(config, True)) elif cores > 1: subscriber = IOSafeMultiProcessingSubscriber(mpq) subscriber.dispatch_in_background(_create_log_handler(config)) else: # Do not need to setup anything for local logging pass return parallel
python
def create_base_logger(config=None, parallel=None): """Setup base logging configuration, also handling remote logging. Correctly sets up for local, multiprocessing and distributed runs. Creates subscribers for non-local runs that will be references from local logging. Retrieves IP address using tips from http://stackoverflow.com/a/1267524/252589 """ if parallel is None: parallel = {} parallel_type = parallel.get("type", "local") cores = parallel.get("cores", 1) if parallel_type == "ipython": from bcbio.log import logbook_zmqpush fqdn_ip = socket.gethostbyname(socket.getfqdn()) ips = [fqdn_ip] if (fqdn_ip and not fqdn_ip.startswith("127.")) else [] if not ips: ips = [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] if not ips: ips += [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close())[1] for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]] if not ips: sys.stderr.write("Cannot resolve a local IP address that isn't 127.x.x.x " "Your machines might not have a local IP address " "assigned or are not able to resolve it.\n") sys.exit(1) uri = "tcp://%s" % ips[0] subscriber = logbook_zmqpush.ZeroMQPullSubscriber() mport = subscriber.socket.bind_to_random_port(uri) wport_uri = "%s:%s" % (uri, mport) parallel["log_queue"] = wport_uri subscriber.dispatch_in_background(_create_log_handler(config, True)) elif cores > 1: subscriber = IOSafeMultiProcessingSubscriber(mpq) subscriber.dispatch_in_background(_create_log_handler(config)) else: # Do not need to setup anything for local logging pass return parallel
[ "def", "create_base_logger", "(", "config", "=", "None", ",", "parallel", "=", "None", ")", ":", "if", "parallel", "is", "None", ":", "parallel", "=", "{", "}", "parallel_type", "=", "parallel", ".", "get", "(", "\"type\"", ",", "\"local\"", ")", "cores"...
Setup base logging configuration, also handling remote logging. Correctly sets up for local, multiprocessing and distributed runs. Creates subscribers for non-local runs that will be references from local logging. Retrieves IP address using tips from http://stackoverflow.com/a/1267524/252589
[ "Setup", "base", "logging", "configuration", "also", "handling", "remote", "logging", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/log/__init__.py#L91-L130
train
218,045
bcbio/bcbio-nextgen
bcbio/log/__init__.py
setup_local_logging
def setup_local_logging(config=None, parallel=None): """Setup logging for a local context, directing messages to appropriate base loggers. Handles local, multiprocessing and distributed setup, connecting to handlers created by the base logger. """ if config is None: config = {} if parallel is None: parallel = {} parallel_type = parallel.get("type", "local") cores = parallel.get("cores", 1) wrapper = parallel.get("wrapper", None) if parallel_type == "ipython": from bcbio.log import logbook_zmqpush handler = logbook_zmqpush.ZeroMQPushHandler(parallel["log_queue"]) elif cores > 1: handler = logbook.queues.MultiProcessingHandler(mpq) else: handler = _create_log_handler(config, direct_hostname=wrapper is not None, write_toterm=wrapper is None) handler.push_thread() return handler
python
def setup_local_logging(config=None, parallel=None): """Setup logging for a local context, directing messages to appropriate base loggers. Handles local, multiprocessing and distributed setup, connecting to handlers created by the base logger. """ if config is None: config = {} if parallel is None: parallel = {} parallel_type = parallel.get("type", "local") cores = parallel.get("cores", 1) wrapper = parallel.get("wrapper", None) if parallel_type == "ipython": from bcbio.log import logbook_zmqpush handler = logbook_zmqpush.ZeroMQPushHandler(parallel["log_queue"]) elif cores > 1: handler = logbook.queues.MultiProcessingHandler(mpq) else: handler = _create_log_handler(config, direct_hostname=wrapper is not None, write_toterm=wrapper is None) handler.push_thread() return handler
[ "def", "setup_local_logging", "(", "config", "=", "None", ",", "parallel", "=", "None", ")", ":", "if", "config", "is", "None", ":", "config", "=", "{", "}", "if", "parallel", "is", "None", ":", "parallel", "=", "{", "}", "parallel_type", "=", "paralle...
Setup logging for a local context, directing messages to appropriate base loggers. Handles local, multiprocessing and distributed setup, connecting to handlers created by the base logger.
[ "Setup", "logging", "for", "a", "local", "context", "directing", "messages", "to", "appropriate", "base", "loggers", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/log/__init__.py#L132-L151
train
218,046
bcbio/bcbio-nextgen
bcbio/log/__init__.py
setup_script_logging
def setup_script_logging(): """ Use this logger for standalone scripts, or script-like subcommands, such as bcbio_prepare_samples and bcbio_nextgen.py -w template. """ handlers = [logbook.NullHandler()] format_str = ("[{record.time:%Y-%m-%dT%H:%MZ}] " "{record.level_name}: {record.message}") handler = logbook.StreamHandler(sys.stderr, format_string=format_str, level="DEBUG") handler.push_thread() return handler
python
def setup_script_logging(): """ Use this logger for standalone scripts, or script-like subcommands, such as bcbio_prepare_samples and bcbio_nextgen.py -w template. """ handlers = [logbook.NullHandler()] format_str = ("[{record.time:%Y-%m-%dT%H:%MZ}] " "{record.level_name}: {record.message}") handler = logbook.StreamHandler(sys.stderr, format_string=format_str, level="DEBUG") handler.push_thread() return handler
[ "def", "setup_script_logging", "(", ")", ":", "handlers", "=", "[", "logbook", ".", "NullHandler", "(", ")", "]", "format_str", "=", "(", "\"[{record.time:%Y-%m-%dT%H:%MZ}] \"", "\"{record.level_name}: {record.message}\"", ")", "handler", "=", "logbook", ".", "StreamH...
Use this logger for standalone scripts, or script-like subcommands, such as bcbio_prepare_samples and bcbio_nextgen.py -w template.
[ "Use", "this", "logger", "for", "standalone", "scripts", "or", "script", "-", "like", "subcommands", "such", "as", "bcbio_prepare_samples", "and", "bcbio_nextgen", ".", "py", "-", "w", "template", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/log/__init__.py#L153-L165
train
218,047
bcbio/bcbio-nextgen
scripts/cwltool2wdl.py
_validate
def _validate(wdl_file): """Run validation on the generated WDL output using wdltool. """ start_dir = os.getcwd() os.chdir(os.path.dirname(wdl_file)) print("Validating", wdl_file) subprocess.check_call(["wdltool", "validate", wdl_file]) os.chdir(start_dir)
python
def _validate(wdl_file): """Run validation on the generated WDL output using wdltool. """ start_dir = os.getcwd() os.chdir(os.path.dirname(wdl_file)) print("Validating", wdl_file) subprocess.check_call(["wdltool", "validate", wdl_file]) os.chdir(start_dir)
[ "def", "_validate", "(", "wdl_file", ")", ":", "start_dir", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "wdl_file", ")", ")", "print", "(", "\"Validating\"", ",", "wdl_file", ")", "subprocess", ...
Run validation on the generated WDL output using wdltool.
[ "Run", "validation", "on", "the", "generated", "WDL", "output", "using", "wdltool", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/cwltool2wdl.py#L53-L60
train
218,048
bcbio/bcbio-nextgen
scripts/cwltool2wdl.py
_wf_to_dict
def _wf_to_dict(wf, records): """Parse a workflow into cwl2wdl style dictionaries for base and sub-workflows. """ inputs, outputs, records = _get_wf_inout(wf, records) out = {"name": _id_to_name(_clean_id(wf.tool["id"])), "inputs": inputs, "outputs": outputs, "steps": [], "subworkflows": [], "requirements": []} for step in wf.steps: is_subworkflow = isinstance(step.embedded_tool, cwltool.workflow.Workflow) inputs, outputs, remapped, prescatter = _get_step_inout(step) inputs, scatter = _organize_step_scatter(step, inputs, remapped) if is_subworkflow: wf_def, records = _wf_to_dict(step.embedded_tool, records) out["subworkflows"].append({"id": "%s.%s" % (wf_def["name"], wf_def["name"]), "definition": wf_def, "inputs": inputs, "outputs": outputs, "scatter": scatter, "prescatter": prescatter}) else: task_def, records = _tool_to_dict(step.embedded_tool, records, remapped) out["steps"].append({"task_id": task_def["name"], "task_definition": task_def, "inputs": inputs, "outputs": outputs, "scatter": scatter, "prescatter": prescatter}) return out, records
python
def _wf_to_dict(wf, records): """Parse a workflow into cwl2wdl style dictionaries for base and sub-workflows. """ inputs, outputs, records = _get_wf_inout(wf, records) out = {"name": _id_to_name(_clean_id(wf.tool["id"])), "inputs": inputs, "outputs": outputs, "steps": [], "subworkflows": [], "requirements": []} for step in wf.steps: is_subworkflow = isinstance(step.embedded_tool, cwltool.workflow.Workflow) inputs, outputs, remapped, prescatter = _get_step_inout(step) inputs, scatter = _organize_step_scatter(step, inputs, remapped) if is_subworkflow: wf_def, records = _wf_to_dict(step.embedded_tool, records) out["subworkflows"].append({"id": "%s.%s" % (wf_def["name"], wf_def["name"]), "definition": wf_def, "inputs": inputs, "outputs": outputs, "scatter": scatter, "prescatter": prescatter}) else: task_def, records = _tool_to_dict(step.embedded_tool, records, remapped) out["steps"].append({"task_id": task_def["name"], "task_definition": task_def, "inputs": inputs, "outputs": outputs, "scatter": scatter, "prescatter": prescatter}) return out, records
[ "def", "_wf_to_dict", "(", "wf", ",", "records", ")", ":", "inputs", ",", "outputs", ",", "records", "=", "_get_wf_inout", "(", "wf", ",", "records", ")", "out", "=", "{", "\"name\"", ":", "_id_to_name", "(", "_clean_id", "(", "wf", ".", "tool", "[", ...
Parse a workflow into cwl2wdl style dictionaries for base and sub-workflows.
[ "Parse", "a", "workflow", "into", "cwl2wdl", "style", "dictionaries", "for", "base", "and", "sub", "-", "workflows", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/cwltool2wdl.py#L62-L83
train
218,049
bcbio/bcbio-nextgen
scripts/cwltool2wdl.py
_organize_step_scatter
def _organize_step_scatter(step, inputs, remapped): """Add scattering information from inputs, remapping input variables. """ def extract_scatter_id(inp): _, ns_var = inp.split("#") _, var = ns_var.split("/") return var scatter_local = {} if "scatter" in step.tool: assert step.tool["scatterMethod"] == "dotproduct", \ "Only support dotproduct scattering in conversion to WDL" inp_val = collections.OrderedDict() for x in inputs: inp_val[x["id"]] = x["value"] for scatter_key in [extract_scatter_id(x) for x in step.tool["scatter"]]: scatter_key = remapped.get(scatter_key) or scatter_key val = inp_val[scatter_key] if len(val.split(".")) in [1, 2]: base_key = val attr = None elif len(val.split(".")) == 3: orig_location, record, attr = val.split(".") base_key = "%s.%s" % (orig_location, record) else: raise ValueError("Unexpected scatter input: %s" % val) local_ref = base_key.split(".")[-1] + "_local" scatter_local[base_key] = local_ref if attr: local_ref += ".%s" % attr inp_val[scatter_key] = local_ref inputs = [{"id": iid, "value": ival} for iid, ival in inp_val.items()] return inputs, [(v, k) for k, v in scatter_local.items()]
python
def _organize_step_scatter(step, inputs, remapped): """Add scattering information from inputs, remapping input variables. """ def extract_scatter_id(inp): _, ns_var = inp.split("#") _, var = ns_var.split("/") return var scatter_local = {} if "scatter" in step.tool: assert step.tool["scatterMethod"] == "dotproduct", \ "Only support dotproduct scattering in conversion to WDL" inp_val = collections.OrderedDict() for x in inputs: inp_val[x["id"]] = x["value"] for scatter_key in [extract_scatter_id(x) for x in step.tool["scatter"]]: scatter_key = remapped.get(scatter_key) or scatter_key val = inp_val[scatter_key] if len(val.split(".")) in [1, 2]: base_key = val attr = None elif len(val.split(".")) == 3: orig_location, record, attr = val.split(".") base_key = "%s.%s" % (orig_location, record) else: raise ValueError("Unexpected scatter input: %s" % val) local_ref = base_key.split(".")[-1] + "_local" scatter_local[base_key] = local_ref if attr: local_ref += ".%s" % attr inp_val[scatter_key] = local_ref inputs = [{"id": iid, "value": ival} for iid, ival in inp_val.items()] return inputs, [(v, k) for k, v in scatter_local.items()]
[ "def", "_organize_step_scatter", "(", "step", ",", "inputs", ",", "remapped", ")", ":", "def", "extract_scatter_id", "(", "inp", ")", ":", "_", ",", "ns_var", "=", "inp", ".", "split", "(", "\"#\"", ")", "_", ",", "var", "=", "ns_var", ".", "split", ...
Add scattering information from inputs, remapping input variables.
[ "Add", "scattering", "information", "from", "inputs", "remapping", "input", "variables", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/cwltool2wdl.py#L133-L164
train
218,050
bcbio/bcbio-nextgen
scripts/cwltool2wdl.py
_variable_type_to_read_fn
def _variable_type_to_read_fn(vartype, records): """Convert variant types into corresponding WDL standard library functions. """ fn_map = {"String": "read_string", "Array[String]": "read_lines", "Array[Array[String]]": "read_tsv", "Object": "read_object", "Array[Object]": "read_objects", "Array[Array[Object]]": "read_objects", "Int": "read_int", "Float": "read_float"} for rec_name in records.keys(): fn_map["%s" % rec_name] = "read_struct" fn_map["Array[%s]" % rec_name] = "read_struct" fn_map["Array[Array[%s]]" % rec_name] = "read_struct" # Read in Files as Strings vartype = vartype.replace("File", "String") # Can't read arrays of Ints/Floats vartype = vartype.replace("Array[Int]", "Array[String]") vartype = vartype.replace("Array[Float]", "Array[String]") return fn_map[vartype]
python
def _variable_type_to_read_fn(vartype, records): """Convert variant types into corresponding WDL standard library functions. """ fn_map = {"String": "read_string", "Array[String]": "read_lines", "Array[Array[String]]": "read_tsv", "Object": "read_object", "Array[Object]": "read_objects", "Array[Array[Object]]": "read_objects", "Int": "read_int", "Float": "read_float"} for rec_name in records.keys(): fn_map["%s" % rec_name] = "read_struct" fn_map["Array[%s]" % rec_name] = "read_struct" fn_map["Array[Array[%s]]" % rec_name] = "read_struct" # Read in Files as Strings vartype = vartype.replace("File", "String") # Can't read arrays of Ints/Floats vartype = vartype.replace("Array[Int]", "Array[String]") vartype = vartype.replace("Array[Float]", "Array[String]") return fn_map[vartype]
[ "def", "_variable_type_to_read_fn", "(", "vartype", ",", "records", ")", ":", "fn_map", "=", "{", "\"String\"", ":", "\"read_string\"", ",", "\"Array[String]\"", ":", "\"read_lines\"", ",", "\"Array[Array[String]]\"", ":", "\"read_tsv\"", ",", "\"Object\"", ":", "\"...
Convert variant types into corresponding WDL standard library functions.
[ "Convert", "variant", "types", "into", "corresponding", "WDL", "standard", "library", "functions", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/cwltool2wdl.py#L216-L233
train
218,051
bcbio/bcbio-nextgen
scripts/cwltool2wdl.py
_requirements_to_dict
def _requirements_to_dict(rs): """Convert supported requirements into dictionary for output. """ out = [] added = set([]) for r in rs: if r["class"] == "DockerRequirement" and "docker" not in added: added.add("docker") out.append({"requirement_type": "docker", "value": r["dockerImageId"]}) elif r["class"] == "ResourceRequirement": if "coresMin" in r and "cpu" not in added: added.add("cpu") out.append({"requirement_type": "cpu", "value": r["coresMin"]}) if "ramMin" in r and "memory" not in added: added.add("memory") out.append({"requirement_type": "memory", "value": "%s MB" % r["ramMin"]}) if "tmpdirMin" in r and "disks" not in added: added.add("disks") out.append({"requirement_type": "disks", "value": "local-disk %s HDD" % r["tmpdirMin"]}) return out
python
def _requirements_to_dict(rs): """Convert supported requirements into dictionary for output. """ out = [] added = set([]) for r in rs: if r["class"] == "DockerRequirement" and "docker" not in added: added.add("docker") out.append({"requirement_type": "docker", "value": r["dockerImageId"]}) elif r["class"] == "ResourceRequirement": if "coresMin" in r and "cpu" not in added: added.add("cpu") out.append({"requirement_type": "cpu", "value": r["coresMin"]}) if "ramMin" in r and "memory" not in added: added.add("memory") out.append({"requirement_type": "memory", "value": "%s MB" % r["ramMin"]}) if "tmpdirMin" in r and "disks" not in added: added.add("disks") out.append({"requirement_type": "disks", "value": "local-disk %s HDD" % r["tmpdirMin"]}) return out
[ "def", "_requirements_to_dict", "(", "rs", ")", ":", "out", "=", "[", "]", "added", "=", "set", "(", "[", "]", ")", "for", "r", "in", "rs", ":", "if", "r", "[", "\"class\"", "]", "==", "\"DockerRequirement\"", "and", "\"docker\"", "not", "in", "added...
Convert supported requirements into dictionary for output.
[ "Convert", "supported", "requirements", "into", "dictionary", "for", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/cwltool2wdl.py#L306-L325
train
218,052
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_get_jvm_opts
def _get_jvm_opts(out_file, data): """Retrieve Java options, adjusting memory for available cores. """ resources = config_utils.get_resources("purple", data["config"]) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3500m"]) jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": {"direction": "increase", "maximum": "30000M", "magnitude": dd.get_cores(data)}}}) jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file)) return jvm_opts
python
def _get_jvm_opts(out_file, data): """Retrieve Java options, adjusting memory for available cores. """ resources = config_utils.get_resources("purple", data["config"]) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3500m"]) jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": {"direction": "increase", "maximum": "30000M", "magnitude": dd.get_cores(data)}}}) jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file)) return jvm_opts
[ "def", "_get_jvm_opts", "(", "out_file", ",", "data", ")", ":", "resources", "=", "config_utils", ".", "get_resources", "(", "\"purple\"", ",", "data", "[", "\"config\"", "]", ")", "jvm_opts", "=", "resources", ".", "get", "(", "\"jvm_opts\"", ",", "[", "\...
Retrieve Java options, adjusting memory for available cores.
[ "Retrieve", "Java", "options", "adjusting", "memory", "for", "available", "cores", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L43-L53
train
218,053
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_counts_to_amber
def _counts_to_amber(t_vals, n_vals): """Converts a line of CollectAllelicCounts into AMBER line. """ t_depth = int(t_vals["REF_COUNT"]) + int(t_vals["ALT_COUNT"]) n_depth = int(n_vals["REF_COUNT"]) + int(n_vals["ALT_COUNT"]) if n_depth > 0 and t_depth > 0: t_baf = float(t_vals["ALT_COUNT"]) / float(t_depth) n_baf = float(n_vals["ALT_COUNT"]) / float(n_depth) return [t_vals["CONTIG"], t_vals["POSITION"], t_baf, _normalize_baf(t_baf), t_depth, n_baf, _normalize_baf(n_baf), n_depth]
python
def _counts_to_amber(t_vals, n_vals): """Converts a line of CollectAllelicCounts into AMBER line. """ t_depth = int(t_vals["REF_COUNT"]) + int(t_vals["ALT_COUNT"]) n_depth = int(n_vals["REF_COUNT"]) + int(n_vals["ALT_COUNT"]) if n_depth > 0 and t_depth > 0: t_baf = float(t_vals["ALT_COUNT"]) / float(t_depth) n_baf = float(n_vals["ALT_COUNT"]) / float(n_depth) return [t_vals["CONTIG"], t_vals["POSITION"], t_baf, _normalize_baf(t_baf), t_depth, n_baf, _normalize_baf(n_baf), n_depth]
[ "def", "_counts_to_amber", "(", "t_vals", ",", "n_vals", ")", ":", "t_depth", "=", "int", "(", "t_vals", "[", "\"REF_COUNT\"", "]", ")", "+", "int", "(", "t_vals", "[", "\"ALT_COUNT\"", "]", ")", "n_depth", "=", "int", "(", "n_vals", "[", "\"REF_COUNT\""...
Converts a line of CollectAllelicCounts into AMBER line.
[ "Converts", "a", "line", "of", "CollectAllelicCounts", "into", "AMBER", "line", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L113-L122
train
218,054
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_count_files_to_amber
def _count_files_to_amber(tumor_counts, normal_counts, work_dir, data): """Converts tumor and normal counts from GATK CollectAllelicCounts into Amber format. """ amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber")) out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(data)) if not utils.file_uptodate(out_file, tumor_counts): with file_transaction(data, out_file) as tx_out_file: with open(tumor_counts) as tumor_handle: with open(normal_counts) as normal_handle: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle, delimiter="\t") writer.writerow(["Chromosome", "Position", "TumorBAF", "TumorModifiedBAF", "TumorDepth", "NormalBAF", "NormalModifiedBAF", "NormalDepth"]) header = None for t, n in zip(tumor_handle, normal_handle): if header is None and t.startswith("CONTIG"): header = t.strip().split() elif header is not None: t_vals = dict(zip(header, t.strip().split())) n_vals = dict(zip(header, n.strip().split())) amber_line = _counts_to_amber(t_vals, n_vals) if amber_line: writer.writerow(amber_line) return out_file
python
def _count_files_to_amber(tumor_counts, normal_counts, work_dir, data): """Converts tumor and normal counts from GATK CollectAllelicCounts into Amber format. """ amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber")) out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(data)) if not utils.file_uptodate(out_file, tumor_counts): with file_transaction(data, out_file) as tx_out_file: with open(tumor_counts) as tumor_handle: with open(normal_counts) as normal_handle: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle, delimiter="\t") writer.writerow(["Chromosome", "Position", "TumorBAF", "TumorModifiedBAF", "TumorDepth", "NormalBAF", "NormalModifiedBAF", "NormalDepth"]) header = None for t, n in zip(tumor_handle, normal_handle): if header is None and t.startswith("CONTIG"): header = t.strip().split() elif header is not None: t_vals = dict(zip(header, t.strip().split())) n_vals = dict(zip(header, n.strip().split())) amber_line = _counts_to_amber(t_vals, n_vals) if amber_line: writer.writerow(amber_line) return out_file
[ "def", "_count_files_to_amber", "(", "tumor_counts", ",", "normal_counts", ",", "work_dir", ",", "data", ")", ":", "amber_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"amber\"", ")", ")", "out_file", ...
Converts tumor and normal counts from GATK CollectAllelicCounts into Amber format.
[ "Converts", "tumor", "and", "normal", "counts", "from", "GATK", "CollectAllelicCounts", "into", "Amber", "format", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L124-L148
train
218,055
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_amber_het_file
def _amber_het_file(method, vrn_files, work_dir, paired): """Create file of BAFs in normal heterozygous positions compatible with AMBER. Two available methods: - pon -- Use panel of normals with likely heterozygous sites. - variants -- Use pre-existing variant calls, filtered to likely heterozygotes. https://github.com/hartwigmedical/hmftools/tree/master/amber https://github.com/hartwigmedical/hmftools/blob/637e3db1a1a995f4daefe2d0a1511a5bdadbeb05/hmf-common/src/test/resources/amber/new.amber.baf """ assert vrn_files, "Did not find compatible variant calling files for PURPLE inputs" from bcbio.heterogeneity import bubbletree if method == "variants": amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber")) out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(paired.tumor_data)) prep_file = bubbletree.prep_vrn_file(vrn_files[0]["vrn_file"], vrn_files[0]["variantcaller"], work_dir, paired, AmberWriter) utils.symlink_plus(prep_file, out_file) pcf_file = out_file + ".pcf" if not utils.file_exists(pcf_file): with file_transaction(paired.tumor_data, pcf_file) as tx_out_file: r_file = os.path.join(os.path.dirname(tx_out_file), "bafSegmentation.R") with open(r_file, "w") as out_handle: out_handle.write(_amber_seg_script) cmd = "%s && %s --no-environ %s %s %s" % (utils.get_R_exports(), utils.Rscript_cmd(), r_file, out_file, pcf_file) do.run(cmd, "PURPLE: AMBER baf segmentation") else: assert method == "pon" out_file = _run_amber(paired, work_dir) return out_file
python
def _amber_het_file(method, vrn_files, work_dir, paired): """Create file of BAFs in normal heterozygous positions compatible with AMBER. Two available methods: - pon -- Use panel of normals with likely heterozygous sites. - variants -- Use pre-existing variant calls, filtered to likely heterozygotes. https://github.com/hartwigmedical/hmftools/tree/master/amber https://github.com/hartwigmedical/hmftools/blob/637e3db1a1a995f4daefe2d0a1511a5bdadbeb05/hmf-common/src/test/resources/amber/new.amber.baf """ assert vrn_files, "Did not find compatible variant calling files for PURPLE inputs" from bcbio.heterogeneity import bubbletree if method == "variants": amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber")) out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(paired.tumor_data)) prep_file = bubbletree.prep_vrn_file(vrn_files[0]["vrn_file"], vrn_files[0]["variantcaller"], work_dir, paired, AmberWriter) utils.symlink_plus(prep_file, out_file) pcf_file = out_file + ".pcf" if not utils.file_exists(pcf_file): with file_transaction(paired.tumor_data, pcf_file) as tx_out_file: r_file = os.path.join(os.path.dirname(tx_out_file), "bafSegmentation.R") with open(r_file, "w") as out_handle: out_handle.write(_amber_seg_script) cmd = "%s && %s --no-environ %s %s %s" % (utils.get_R_exports(), utils.Rscript_cmd(), r_file, out_file, pcf_file) do.run(cmd, "PURPLE: AMBER baf segmentation") else: assert method == "pon" out_file = _run_amber(paired, work_dir) return out_file
[ "def", "_amber_het_file", "(", "method", ",", "vrn_files", ",", "work_dir", ",", "paired", ")", ":", "assert", "vrn_files", ",", "\"Did not find compatible variant calling files for PURPLE inputs\"", "from", "bcbio", ".", "heterogeneity", "import", "bubbletree", "if", "...
Create file of BAFs in normal heterozygous positions compatible with AMBER. Two available methods: - pon -- Use panel of normals with likely heterozygous sites. - variants -- Use pre-existing variant calls, filtered to likely heterozygotes. https://github.com/hartwigmedical/hmftools/tree/master/amber https://github.com/hartwigmedical/hmftools/blob/637e3db1a1a995f4daefe2d0a1511a5bdadbeb05/hmf-common/src/test/resources/amber/new.amber.baf
[ "Create", "file", "of", "BAFs", "in", "normal", "heterozygous", "positions", "compatible", "with", "AMBER", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L166-L197
train
218,056
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_run_cobalt
def _run_cobalt(paired, work_dir): """Run Cobalt for counting read depth across genomic windows. PURPLE requires even 1000bp windows so use integrated counting solution directly rather than converting from CNVkit calculations. If this approach is useful should be moved upstream to be available to other tools as an input comparison. https://github.com/hartwigmedical/hmftools/tree/master/count-bam-lines """ cobalt_dir = utils.safe_makedir(os.path.join(work_dir, "cobalt")) out_file = os.path.join(cobalt_dir, "%s.cobalt" % dd.get_sample_name(paired.tumor_data)) if not utils.file_exists(out_file): with file_transaction(paired.tumor_data, out_file) as tx_out_file: cmd = ["COBALT"] + _get_jvm_opts(tx_out_file, paired.tumor_data) + \ ["-reference", paired.normal_name, "-reference_bam", paired.normal_bam, "-tumor", paired.tumor_name, "-tumor_bam", paired.tumor_bam, "-threads", dd.get_num_cores(paired.tumor_data), "-output_dir", os.path.dirname(tx_out_file), "-gc_profile", dd.get_variation_resources(paired.tumor_data)["gc_profile"]] cmd = "%s && %s" % (utils.get_R_exports(), " ".join([str(x) for x in cmd])) do.run(cmd, "PURPLE: COBALT read depth normalization") for f in os.listdir(os.path.dirname(tx_out_file)): if f != os.path.basename(tx_out_file): shutil.move(os.path.join(os.path.dirname(tx_out_file), f), os.path.join(cobalt_dir, f)) return out_file
python
def _run_cobalt(paired, work_dir): """Run Cobalt for counting read depth across genomic windows. PURPLE requires even 1000bp windows so use integrated counting solution directly rather than converting from CNVkit calculations. If this approach is useful should be moved upstream to be available to other tools as an input comparison. https://github.com/hartwigmedical/hmftools/tree/master/count-bam-lines """ cobalt_dir = utils.safe_makedir(os.path.join(work_dir, "cobalt")) out_file = os.path.join(cobalt_dir, "%s.cobalt" % dd.get_sample_name(paired.tumor_data)) if not utils.file_exists(out_file): with file_transaction(paired.tumor_data, out_file) as tx_out_file: cmd = ["COBALT"] + _get_jvm_opts(tx_out_file, paired.tumor_data) + \ ["-reference", paired.normal_name, "-reference_bam", paired.normal_bam, "-tumor", paired.tumor_name, "-tumor_bam", paired.tumor_bam, "-threads", dd.get_num_cores(paired.tumor_data), "-output_dir", os.path.dirname(tx_out_file), "-gc_profile", dd.get_variation_resources(paired.tumor_data)["gc_profile"]] cmd = "%s && %s" % (utils.get_R_exports(), " ".join([str(x) for x in cmd])) do.run(cmd, "PURPLE: COBALT read depth normalization") for f in os.listdir(os.path.dirname(tx_out_file)): if f != os.path.basename(tx_out_file): shutil.move(os.path.join(os.path.dirname(tx_out_file), f), os.path.join(cobalt_dir, f)) return out_file
[ "def", "_run_cobalt", "(", "paired", ",", "work_dir", ")", ":", "cobalt_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"cobalt\"", ")", ")", "out_file", "=", "os", ".", "path", ".", "join", "(", ...
Run Cobalt for counting read depth across genomic windows. PURPLE requires even 1000bp windows so use integrated counting solution directly rather than converting from CNVkit calculations. If this approach is useful should be moved upstream to be available to other tools as an input comparison. https://github.com/hartwigmedical/hmftools/tree/master/count-bam-lines
[ "Run", "Cobalt", "for", "counting", "read", "depth", "across", "genomic", "windows", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L259-L285
train
218,057
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_cobalt_ratio_file
def _cobalt_ratio_file(paired, work_dir): """Convert CNVkit binning counts into cobalt ratio output. This contains read counts plus normalization for GC, from section 7.2 "Determine read depth ratios for tumor and reference genomes" https://www.biorxiv.org/content/biorxiv/early/2018/09/20/415133.full.pdf Since CNVkit cnr files already have GC bias correction, we re-center the existing log2 ratios to be around 1, rather than zero, which matches the cobalt expectations. XXX This doesn't appear to be a worthwhile direction since PURPLE requires 1000bp even binning. We'll leave this here as a starting point for future work but work on using cobalt directly. """ cobalt_dir = utils.safe_makedir(os.path.join(work_dir, "cobalt")) out_file = os.path.join(cobalt_dir, "%s.cobalt" % dd.get_sample_name(paired.tumor_data)) if not utils.file_exists(out_file): cnr_file = tz.get_in(["depth", "bins", "normalized"], paired.tumor_data) with file_transaction(paired.tumor_data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle, delimiter="\t") writer.writerow(["Chromosome", "Position", "ReferenceReadCount", "TumorReadCount", "ReferenceGCRatio", "TumorGCRatio", "ReferenceGCDiploidRatio"]) raise NotImplementedError return out_file
python
def _cobalt_ratio_file(paired, work_dir): """Convert CNVkit binning counts into cobalt ratio output. This contains read counts plus normalization for GC, from section 7.2 "Determine read depth ratios for tumor and reference genomes" https://www.biorxiv.org/content/biorxiv/early/2018/09/20/415133.full.pdf Since CNVkit cnr files already have GC bias correction, we re-center the existing log2 ratios to be around 1, rather than zero, which matches the cobalt expectations. XXX This doesn't appear to be a worthwhile direction since PURPLE requires 1000bp even binning. We'll leave this here as a starting point for future work but work on using cobalt directly. """ cobalt_dir = utils.safe_makedir(os.path.join(work_dir, "cobalt")) out_file = os.path.join(cobalt_dir, "%s.cobalt" % dd.get_sample_name(paired.tumor_data)) if not utils.file_exists(out_file): cnr_file = tz.get_in(["depth", "bins", "normalized"], paired.tumor_data) with file_transaction(paired.tumor_data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: writer = csv.writer(out_handle, delimiter="\t") writer.writerow(["Chromosome", "Position", "ReferenceReadCount", "TumorReadCount", "ReferenceGCRatio", "TumorGCRatio", "ReferenceGCDiploidRatio"]) raise NotImplementedError return out_file
[ "def", "_cobalt_ratio_file", "(", "paired", ",", "work_dir", ")", ":", "cobalt_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"cobalt\"", ")", ")", "out_file", "=", "os", ".", "path", ".", "join", ...
Convert CNVkit binning counts into cobalt ratio output. This contains read counts plus normalization for GC, from section 7.2 "Determine read depth ratios for tumor and reference genomes" https://www.biorxiv.org/content/biorxiv/early/2018/09/20/415133.full.pdf Since CNVkit cnr files already have GC bias correction, we re-center the existing log2 ratios to be around 1, rather than zero, which matches the cobalt expectations. XXX This doesn't appear to be a worthwhile direction since PURPLE requires 1000bp even binning. We'll leave this here as a starting point for future work but work on using cobalt directly.
[ "Convert", "CNVkit", "binning", "counts", "into", "cobalt", "ratio", "output", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L287-L313
train
218,058
bcbio/bcbio-nextgen
bcbio/structural/purple.py
_export_to_vcf
def _export_to_vcf(cur): """Convert PURPLE custom output into VCF. """ if float(cur["copyNumber"]) > 2.0: svtype = "DUP" elif float(cur["copyNumber"]) < 2.0: svtype = "DEL" else: svtype = None if svtype: info = ["END=%s" % cur["end"], "SVLEN=%s" % (int(cur["end"]) - int(cur["start"])), "SVTYPE=%s" % svtype, "CN=%s" % cur["copyNumber"], "PROBES=%s" % cur["depthWindowCount"]] return [cur["chromosome"], cur["start"], ".", "N", "<%s>" % svtype, ".", ".", ";".join(info), "GT", "0/1"]
python
def _export_to_vcf(cur): """Convert PURPLE custom output into VCF. """ if float(cur["copyNumber"]) > 2.0: svtype = "DUP" elif float(cur["copyNumber"]) < 2.0: svtype = "DEL" else: svtype = None if svtype: info = ["END=%s" % cur["end"], "SVLEN=%s" % (int(cur["end"]) - int(cur["start"])), "SVTYPE=%s" % svtype, "CN=%s" % cur["copyNumber"], "PROBES=%s" % cur["depthWindowCount"]] return [cur["chromosome"], cur["start"], ".", "N", "<%s>" % svtype, ".", ".", ";".join(info), "GT", "0/1"]
[ "def", "_export_to_vcf", "(", "cur", ")", ":", "if", "float", "(", "cur", "[", "\"copyNumber\"", "]", ")", ">", "2.0", ":", "svtype", "=", "\"DUP\"", "elif", "float", "(", "cur", "[", "\"copyNumber\"", "]", ")", "<", "2.0", ":", "svtype", "=", "\"DEL...
Convert PURPLE custom output into VCF.
[ "Convert", "PURPLE", "custom", "output", "into", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L324-L337
train
218,059
bcbio/bcbio-nextgen
bcbio/rnaseq/pizzly.py
make_pizzly_gtf
def make_pizzly_gtf(gtf_file, out_file, data): """ pizzly needs the GTF to be in gene -> transcript -> exon order for each gene. it also wants the gene biotype set as the source """ if file_exists(out_file): return out_file db = gtf.get_gtf_db(gtf_file) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for gene in db.features_of_type("gene"): children = [x for x in db.children(id=gene)] for child in children: if child.attributes.get("gene_biotype", None): gene_biotype = child.attributes.get("gene_biotype") gene.attributes['gene_biotype'] = gene_biotype gene.source = gene_biotype[0] print(gene, file=out_handle) for child in children: child.source = gene_biotype[0] # gffread produces a version-less FASTA file child.attributes.pop("transcript_version", None) print(child, file=out_handle) return out_file
python
def make_pizzly_gtf(gtf_file, out_file, data): """ pizzly needs the GTF to be in gene -> transcript -> exon order for each gene. it also wants the gene biotype set as the source """ if file_exists(out_file): return out_file db = gtf.get_gtf_db(gtf_file) with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for gene in db.features_of_type("gene"): children = [x for x in db.children(id=gene)] for child in children: if child.attributes.get("gene_biotype", None): gene_biotype = child.attributes.get("gene_biotype") gene.attributes['gene_biotype'] = gene_biotype gene.source = gene_biotype[0] print(gene, file=out_handle) for child in children: child.source = gene_biotype[0] # gffread produces a version-less FASTA file child.attributes.pop("transcript_version", None) print(child, file=out_handle) return out_file
[ "def", "make_pizzly_gtf", "(", "gtf_file", ",", "out_file", ",", "data", ")", ":", "if", "file_exists", "(", "out_file", ")", ":", "return", "out_file", "db", "=", "gtf", ".", "get_gtf_db", "(", "gtf_file", ")", "with", "file_transaction", "(", "data", ","...
pizzly needs the GTF to be in gene -> transcript -> exon order for each gene. it also wants the gene biotype set as the source
[ "pizzly", "needs", "the", "GTF", "to", "be", "in", "gene", "-", ">", "transcript", "-", ">", "exon", "order", "for", "each", "gene", ".", "it", "also", "wants", "the", "gene", "biotype", "set", "as", "the", "source" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/pizzly.py#L82-L105
train
218,060
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_validate_caller_vcf
def _validate_caller_vcf(call_vcf, truth_vcf, callable_bed, svcaller, work_dir, data): """Validate a caller VCF against truth within callable regions using SURVIVOR. Combines files with SURIVOR merge and counts (https://github.com/fritzsedlazeck/SURVIVOR/) """ stats = _calculate_comparison_stats(truth_vcf) call_vcf = _prep_vcf(call_vcf, callable_bed, dd.get_sample_name(data), dd.get_sample_name(data), stats, work_dir, data) truth_vcf = _prep_vcf(truth_vcf, callable_bed, vcfutils.get_samples(truth_vcf)[0], "%s-truth" % dd.get_sample_name(data), stats, work_dir, data) cmp_vcf = _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data) return _comparison_stats_from_merge(cmp_vcf, stats, svcaller, data)
python
def _validate_caller_vcf(call_vcf, truth_vcf, callable_bed, svcaller, work_dir, data): """Validate a caller VCF against truth within callable regions using SURVIVOR. Combines files with SURIVOR merge and counts (https://github.com/fritzsedlazeck/SURVIVOR/) """ stats = _calculate_comparison_stats(truth_vcf) call_vcf = _prep_vcf(call_vcf, callable_bed, dd.get_sample_name(data), dd.get_sample_name(data), stats, work_dir, data) truth_vcf = _prep_vcf(truth_vcf, callable_bed, vcfutils.get_samples(truth_vcf)[0], "%s-truth" % dd.get_sample_name(data), stats, work_dir, data) cmp_vcf = _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data) return _comparison_stats_from_merge(cmp_vcf, stats, svcaller, data)
[ "def", "_validate_caller_vcf", "(", "call_vcf", ",", "truth_vcf", ",", "callable_bed", ",", "svcaller", ",", "work_dir", ",", "data", ")", ":", "stats", "=", "_calculate_comparison_stats", "(", "truth_vcf", ")", "call_vcf", "=", "_prep_vcf", "(", "call_vcf", ","...
Validate a caller VCF against truth within callable regions using SURVIVOR. Combines files with SURIVOR merge and counts (https://github.com/fritzsedlazeck/SURVIVOR/)
[ "Validate", "a", "caller", "VCF", "against", "truth", "within", "callable", "regions", "using", "SURVIVOR", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L44-L55
train
218,061
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_survivor_merge
def _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data): """Perform a merge of two callsets using SURVIVOR, """ out_file = os.path.join(work_dir, "eval-merge.vcf") if not utils.file_uptodate(out_file, call_vcf): in_call_vcf = call_vcf.replace(".vcf.gz", ".vcf") if not utils.file_exists(in_call_vcf): with file_transaction(data, in_call_vcf) as tx_in_call_vcf: do.run("gunzip -c {call_vcf} > {tx_in_call_vcf}".format(**locals())) in_truth_vcf = truth_vcf.replace(".vcf.gz", ".vcf") if not utils.file_exists(in_truth_vcf): with file_transaction(data, in_truth_vcf) as tx_in_truth_vcf: do.run("gunzip -c {truth_vcf} > {tx_in_truth_vcf}".format(**locals())) in_list_file = os.path.join(work_dir, "eval-inputs.txt") with open(in_list_file, "w") as out_handle: out_handle.write("%s\n%s\n" % (in_call_vcf, in_truth_vcf)) with file_transaction(data, out_file) as tx_out_file: cmd = ("SURVIVOR merge {in_list_file} {stats[merge_size]} 1 0 0 0 {stats[min_size]} {tx_out_file}") do.run(cmd.format(**locals()), "Merge SV files for validation: %s" % dd.get_sample_name(data)) return out_file
python
def _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data): """Perform a merge of two callsets using SURVIVOR, """ out_file = os.path.join(work_dir, "eval-merge.vcf") if not utils.file_uptodate(out_file, call_vcf): in_call_vcf = call_vcf.replace(".vcf.gz", ".vcf") if not utils.file_exists(in_call_vcf): with file_transaction(data, in_call_vcf) as tx_in_call_vcf: do.run("gunzip -c {call_vcf} > {tx_in_call_vcf}".format(**locals())) in_truth_vcf = truth_vcf.replace(".vcf.gz", ".vcf") if not utils.file_exists(in_truth_vcf): with file_transaction(data, in_truth_vcf) as tx_in_truth_vcf: do.run("gunzip -c {truth_vcf} > {tx_in_truth_vcf}".format(**locals())) in_list_file = os.path.join(work_dir, "eval-inputs.txt") with open(in_list_file, "w") as out_handle: out_handle.write("%s\n%s\n" % (in_call_vcf, in_truth_vcf)) with file_transaction(data, out_file) as tx_out_file: cmd = ("SURVIVOR merge {in_list_file} {stats[merge_size]} 1 0 0 0 {stats[min_size]} {tx_out_file}") do.run(cmd.format(**locals()), "Merge SV files for validation: %s" % dd.get_sample_name(data)) return out_file
[ "def", "_survivor_merge", "(", "call_vcf", ",", "truth_vcf", ",", "stats", ",", "work_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"eval-merge.vcf\"", ")", "if", "not", "utils", ".", "file_uptodate",...
Perform a merge of two callsets using SURVIVOR,
[ "Perform", "a", "merge", "of", "two", "callsets", "using", "SURVIVOR" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L77-L96
train
218,062
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_calculate_comparison_stats
def _calculate_comparison_stats(truth_vcf): """Identify calls to validate from the input truth VCF. """ # Avoid very small events for average calculations min_stat_size = 50 min_median_size = 250 sizes = [] svtypes = set([]) with utils.open_gzipsafe(truth_vcf) as in_handle: for call in (l.rstrip().split("\t") for l in in_handle if not l.startswith("#")): stats = _summarize_call(call) if stats["size"] > min_stat_size: sizes.append(stats["size"]) svtypes.add(stats["svtype"]) pct10 = int(np.percentile(sizes, 10)) pct25 = int(np.percentile(sizes, 25)) pct50 = int(np.percentile(sizes, 50)) pct75 = int(np.percentile(sizes, 75)) ranges_detailed = [(int(min(sizes)), pct10), (pct10, pct25), (pct25, pct50), (pct50, pct75), (pct75, max(sizes))] ranges_split = [(int(min(sizes)), pct50), (pct50, max(sizes))] return {"min_size": int(min(sizes) * 0.95), "max_size": int(max(sizes) + 1.05), "svtypes": svtypes, "merge_size": int(np.percentile([x for x in sizes if x > min_median_size], 50)), "ranges": []}
python
def _calculate_comparison_stats(truth_vcf): """Identify calls to validate from the input truth VCF. """ # Avoid very small events for average calculations min_stat_size = 50 min_median_size = 250 sizes = [] svtypes = set([]) with utils.open_gzipsafe(truth_vcf) as in_handle: for call in (l.rstrip().split("\t") for l in in_handle if not l.startswith("#")): stats = _summarize_call(call) if stats["size"] > min_stat_size: sizes.append(stats["size"]) svtypes.add(stats["svtype"]) pct10 = int(np.percentile(sizes, 10)) pct25 = int(np.percentile(sizes, 25)) pct50 = int(np.percentile(sizes, 50)) pct75 = int(np.percentile(sizes, 75)) ranges_detailed = [(int(min(sizes)), pct10), (pct10, pct25), (pct25, pct50), (pct50, pct75), (pct75, max(sizes))] ranges_split = [(int(min(sizes)), pct50), (pct50, max(sizes))] return {"min_size": int(min(sizes) * 0.95), "max_size": int(max(sizes) + 1.05), "svtypes": svtypes, "merge_size": int(np.percentile([x for x in sizes if x > min_median_size], 50)), "ranges": []}
[ "def", "_calculate_comparison_stats", "(", "truth_vcf", ")", ":", "# Avoid very small events for average calculations", "min_stat_size", "=", "50", "min_median_size", "=", "250", "sizes", "=", "[", "]", "svtypes", "=", "set", "(", "[", "]", ")", "with", "utils", "...
Identify calls to validate from the input truth VCF.
[ "Identify", "calls", "to", "validate", "from", "the", "input", "truth", "VCF", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L110-L133
train
218,063
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_get_start_end
def _get_start_end(parts, index=7): """Retrieve start and end for a VCF record, skips BNDs without END coords """ start = parts[1] end = [x.split("=")[-1] for x in parts[index].split(";") if x.startswith("END=")] if end: end = end[0] return start, end return None, None
python
def _get_start_end(parts, index=7): """Retrieve start and end for a VCF record, skips BNDs without END coords """ start = parts[1] end = [x.split("=")[-1] for x in parts[index].split(";") if x.startswith("END=")] if end: end = end[0] return start, end return None, None
[ "def", "_get_start_end", "(", "parts", ",", "index", "=", "7", ")", ":", "start", "=", "parts", "[", "1", "]", "end", "=", "[", "x", ".", "split", "(", "\"=\"", ")", "[", "-", "1", "]", "for", "x", "in", "parts", "[", "index", "]", ".", "spli...
Retrieve start and end for a VCF record, skips BNDs without END coords
[ "Retrieve", "start", "and", "end", "for", "a", "VCF", "record", "skips", "BNDs", "without", "END", "coords" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L135-L143
train
218,064
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_summarize_call
def _summarize_call(parts): """Provide summary metrics on size and svtype for a SV call. """ svtype = [x.split("=")[1] for x in parts[7].split(";") if x.startswith("SVTYPE=")] svtype = svtype[0] if svtype else "" start, end = _get_start_end(parts) return {"svtype": svtype, "size": int(end) - int(start)}
python
def _summarize_call(parts): """Provide summary metrics on size and svtype for a SV call. """ svtype = [x.split("=")[1] for x in parts[7].split(";") if x.startswith("SVTYPE=")] svtype = svtype[0] if svtype else "" start, end = _get_start_end(parts) return {"svtype": svtype, "size": int(end) - int(start)}
[ "def", "_summarize_call", "(", "parts", ")", ":", "svtype", "=", "[", "x", ".", "split", "(", "\"=\"", ")", "[", "1", "]", "for", "x", "in", "parts", "[", "7", "]", ".", "split", "(", "\";\"", ")", "if", "x", ".", "startswith", "(", "\"SVTYPE=\""...
Provide summary metrics on size and svtype for a SV call.
[ "Provide", "summary", "metrics", "on", "size", "and", "svtype", "for", "a", "SV", "call", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L145-L151
train
218,065
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_prep_callable_bed
def _prep_callable_bed(in_file, work_dir, stats, data): """Sort and merge callable BED regions to prevent SV double counting """ out_file = os.path.join(work_dir, "%s-merge.bed.gz" % utils.splitext_plus(os.path.basename(in_file))[0]) gsort = config_utils.get_program("gsort", data) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: fai_file = ref.fasta_idx(dd.get_ref_file(data)) cmd = ("{gsort} {in_file} {fai_file} | bedtools merge -i - -d {stats[merge_size]} | " "bgzip -c > {tx_out_file}") do.run(cmd.format(**locals()), "Prepare SV callable BED regions") return vcfutils.bgzip_and_index(out_file, data["config"])
python
def _prep_callable_bed(in_file, work_dir, stats, data): """Sort and merge callable BED regions to prevent SV double counting """ out_file = os.path.join(work_dir, "%s-merge.bed.gz" % utils.splitext_plus(os.path.basename(in_file))[0]) gsort = config_utils.get_program("gsort", data) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: fai_file = ref.fasta_idx(dd.get_ref_file(data)) cmd = ("{gsort} {in_file} {fai_file} | bedtools merge -i - -d {stats[merge_size]} | " "bgzip -c > {tx_out_file}") do.run(cmd.format(**locals()), "Prepare SV callable BED regions") return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "_prep_callable_bed", "(", "in_file", ",", "work_dir", ",", "stats", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"%s-merge.bed.gz\"", "%", "utils", ".", "splitext_plus", "(", "os", ".", "path", "...
Sort and merge callable BED regions to prevent SV double counting
[ "Sort", "and", "merge", "callable", "BED", "regions", "to", "prevent", "SV", "double", "counting" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L176-L187
train
218,066
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_get_anns_to_remove
def _get_anns_to_remove(in_file): """Find larger annotations, if present in VCF, that slow down processing. """ to_remove = ["ANN", "LOF"] to_remove_str = tuple(["##INFO=<ID=%s" % x for x in to_remove]) cur_remove = [] with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if not line.startswith("#"): break elif line.startswith(to_remove_str): cur_id = line.split("ID=")[-1].split(",")[0] cur_remove.append("INFO/%s" % cur_id) return ",".join(cur_remove)
python
def _get_anns_to_remove(in_file): """Find larger annotations, if present in VCF, that slow down processing. """ to_remove = ["ANN", "LOF"] to_remove_str = tuple(["##INFO=<ID=%s" % x for x in to_remove]) cur_remove = [] with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if not line.startswith("#"): break elif line.startswith(to_remove_str): cur_id = line.split("ID=")[-1].split(",")[0] cur_remove.append("INFO/%s" % cur_id) return ",".join(cur_remove)
[ "def", "_get_anns_to_remove", "(", "in_file", ")", ":", "to_remove", "=", "[", "\"ANN\"", ",", "\"LOF\"", "]", "to_remove_str", "=", "tuple", "(", "[", "\"##INFO=<ID=%s\"", "%", "x", "for", "x", "in", "to_remove", "]", ")", "cur_remove", "=", "[", "]", "...
Find larger annotations, if present in VCF, that slow down processing.
[ "Find", "larger", "annotations", "if", "present", "in", "VCF", "that", "slow", "down", "processing", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L189-L202
train
218,067
bcbio/bcbio-nextgen
bcbio/structural/validate.py
cnv_to_event
def cnv_to_event(name, data): """Convert a CNV to an event name. """ cur_ploidy = ploidy.get_ploidy([data]) if name.startswith("cnv"): num = max([int(x) for x in name.split("_")[0].replace("cnv", "").split(";")]) if num < cur_ploidy: return "DEL" elif num > cur_ploidy: return "DUP" else: return name else: return name
python
def cnv_to_event(name, data): """Convert a CNV to an event name. """ cur_ploidy = ploidy.get_ploidy([data]) if name.startswith("cnv"): num = max([int(x) for x in name.split("_")[0].replace("cnv", "").split(";")]) if num < cur_ploidy: return "DEL" elif num > cur_ploidy: return "DUP" else: return name else: return name
[ "def", "cnv_to_event", "(", "name", ",", "data", ")", ":", "cur_ploidy", "=", "ploidy", ".", "get_ploidy", "(", "[", "data", "]", ")", "if", "name", ".", "startswith", "(", "\"cnv\"", ")", ":", "num", "=", "max", "(", "[", "int", "(", "x", ")", "...
Convert a CNV to an event name.
[ "Convert", "a", "CNV", "to", "an", "event", "name", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L216-L229
train
218,068
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_evaluate_one
def _evaluate_one(caller, svtype, size_range, ensemble, truth, data): """Compare a ensemble results for a caller against a specific caller and SV type. """ def cnv_matches(name): return cnv_to_event(name, data) == svtype def is_breakend(name): return name.startswith("BND") def in_size_range(max_buffer=0): def _work(feat): minf, maxf = size_range buffer = min(max_buffer, int(((maxf + minf) / 2.0) / 10.0)) size = feat.end - feat.start return size >= max([0, minf - buffer]) and size < maxf + buffer return _work def is_caller_svtype(feat): for name in feat.name.split(","): if ((name.startswith(svtype) or cnv_matches(name) or is_breakend(name)) and (caller == "sv-ensemble" or name.endswith(caller))): return True return False minf, maxf = size_range efeats = pybedtools.BedTool(ensemble).filter(in_size_range(0)).filter(is_caller_svtype).saveas().sort().merge() tfeats = pybedtools.BedTool(truth).filter(in_size_range(0)).sort().merge().saveas() etotal = efeats.count() ttotal = tfeats.count() match = efeats.intersect(tfeats, u=True).sort().merge().saveas().count() return {"sensitivity": _stat_str(match, ttotal), "precision": _stat_str(match, etotal)}
python
def _evaluate_one(caller, svtype, size_range, ensemble, truth, data): """Compare a ensemble results for a caller against a specific caller and SV type. """ def cnv_matches(name): return cnv_to_event(name, data) == svtype def is_breakend(name): return name.startswith("BND") def in_size_range(max_buffer=0): def _work(feat): minf, maxf = size_range buffer = min(max_buffer, int(((maxf + minf) / 2.0) / 10.0)) size = feat.end - feat.start return size >= max([0, minf - buffer]) and size < maxf + buffer return _work def is_caller_svtype(feat): for name in feat.name.split(","): if ((name.startswith(svtype) or cnv_matches(name) or is_breakend(name)) and (caller == "sv-ensemble" or name.endswith(caller))): return True return False minf, maxf = size_range efeats = pybedtools.BedTool(ensemble).filter(in_size_range(0)).filter(is_caller_svtype).saveas().sort().merge() tfeats = pybedtools.BedTool(truth).filter(in_size_range(0)).sort().merge().saveas() etotal = efeats.count() ttotal = tfeats.count() match = efeats.intersect(tfeats, u=True).sort().merge().saveas().count() return {"sensitivity": _stat_str(match, ttotal), "precision": _stat_str(match, etotal)}
[ "def", "_evaluate_one", "(", "caller", ",", "svtype", ",", "size_range", ",", "ensemble", ",", "truth", ",", "data", ")", ":", "def", "cnv_matches", "(", "name", ")", ":", "return", "cnv_to_event", "(", "name", ",", "data", ")", "==", "svtype", "def", ...
Compare a ensemble results for a caller against a specific caller and SV type.
[ "Compare", "a", "ensemble", "results", "for", "a", "caller", "against", "a", "specific", "caller", "and", "SV", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L231-L258
train
218,069
bcbio/bcbio-nextgen
bcbio/structural/validate.py
_plot_evaluation_event
def _plot_evaluation_event(df_csv, svtype): """Provide plot of evaluation metrics for an SV event, stratified by event size. """ titles = {"INV": "Inversions", "DEL": "Deletions", "DUP": "Duplications", "INS": "Insertions"} out_file = "%s-%s.png" % (os.path.splitext(df_csv)[0], svtype) sns.set(style='white') if not utils.file_uptodate(out_file, df_csv): metrics = ["sensitivity", "precision"] df = pd.read_csv(df_csv).fillna("0%") df = df[(df["svtype"] == svtype)] event_sizes = _find_events_to_include(df, EVENT_SIZES) fig, axs = plt.subplots(len(event_sizes), len(metrics), tight_layout=True) if len(event_sizes) == 1: axs = [axs] callers = sorted(df["caller"].unique()) if "sv-ensemble" in callers: callers.remove("sv-ensemble") callers.append("sv-ensemble") for i, size in enumerate(event_sizes): size_label = "%s to %sbp" % size size = "%s-%s" % size for j, metric in enumerate(metrics): ax = axs[i][j] ax.get_xaxis().set_ticks([]) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.set_xlim(0, 125.0) if i == 0: ax.set_title(metric, size=12, y=1.2) vals, labels = _get_plot_val_labels(df, size, metric, callers) ax.barh(range(1,len(vals)+1), vals) if j == 0: ax.tick_params(axis='y', which='major', labelsize=8) ax.locator_params(axis="y", tight=True) ax.set_yticks(range(1,len(callers)+1,1)) ax.set_yticklabels(callers, va="center") ax.text(100, len(callers)+1, size_label, fontsize=10) else: ax.get_yaxis().set_ticks([]) for ai, (val, label) in enumerate(zip(vals, labels)): ax.annotate(label, (val + 0.75, ai + 1), va='center', size=7) if svtype in titles: fig.text(0.025, 0.95, titles[svtype], size=14) fig.set_size_inches(7, len(event_sizes) + 1) fig.savefig(out_file) return out_file
python
def _plot_evaluation_event(df_csv, svtype): """Provide plot of evaluation metrics for an SV event, stratified by event size. """ titles = {"INV": "Inversions", "DEL": "Deletions", "DUP": "Duplications", "INS": "Insertions"} out_file = "%s-%s.png" % (os.path.splitext(df_csv)[0], svtype) sns.set(style='white') if not utils.file_uptodate(out_file, df_csv): metrics = ["sensitivity", "precision"] df = pd.read_csv(df_csv).fillna("0%") df = df[(df["svtype"] == svtype)] event_sizes = _find_events_to_include(df, EVENT_SIZES) fig, axs = plt.subplots(len(event_sizes), len(metrics), tight_layout=True) if len(event_sizes) == 1: axs = [axs] callers = sorted(df["caller"].unique()) if "sv-ensemble" in callers: callers.remove("sv-ensemble") callers.append("sv-ensemble") for i, size in enumerate(event_sizes): size_label = "%s to %sbp" % size size = "%s-%s" % size for j, metric in enumerate(metrics): ax = axs[i][j] ax.get_xaxis().set_ticks([]) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.set_xlim(0, 125.0) if i == 0: ax.set_title(metric, size=12, y=1.2) vals, labels = _get_plot_val_labels(df, size, metric, callers) ax.barh(range(1,len(vals)+1), vals) if j == 0: ax.tick_params(axis='y', which='major', labelsize=8) ax.locator_params(axis="y", tight=True) ax.set_yticks(range(1,len(callers)+1,1)) ax.set_yticklabels(callers, va="center") ax.text(100, len(callers)+1, size_label, fontsize=10) else: ax.get_yaxis().set_ticks([]) for ai, (val, label) in enumerate(zip(vals, labels)): ax.annotate(label, (val + 0.75, ai + 1), va='center', size=7) if svtype in titles: fig.text(0.025, 0.95, titles[svtype], size=14) fig.set_size_inches(7, len(event_sizes) + 1) fig.savefig(out_file) return out_file
[ "def", "_plot_evaluation_event", "(", "df_csv", ",", "svtype", ")", ":", "titles", "=", "{", "\"INV\"", ":", "\"Inversions\"", ",", "\"DEL\"", ":", "\"Deletions\"", ",", "\"DUP\"", ":", "\"Duplications\"", ",", "\"INS\"", ":", "\"Insertions\"", "}", "out_file", ...
Provide plot of evaluation metrics for an SV event, stratified by event size.
[ "Provide", "plot", "of", "evaluation", "metrics", "for", "an", "SV", "event", "stratified", "by", "event", "size", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L300-L348
train
218,070
bcbio/bcbio-nextgen
bcbio/structural/validate.py
evaluate
def evaluate(data): """Provide evaluations for multiple callers split by structural variant type. """ work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural", dd.get_sample_name(data), "validate")) truth_sets = tz.get_in(["config", "algorithm", "svvalidate"], data) if truth_sets and data.get("sv"): if isinstance(truth_sets, dict): val_summary, df_csv = _evaluate_multi(data["sv"], truth_sets, work_dir, data) summary_plots = _plot_evaluation(df_csv) data["sv-validate"] = {"csv": val_summary, "plot": summary_plots, "df": df_csv} else: assert isinstance(truth_sets, six.string_types) and utils.file_exists(truth_sets), truth_sets val_summary = _evaluate_vcf(data["sv"], truth_sets, work_dir, data) title = "%s structural variants" % dd.get_sample_name(data) summary_plots = validateplot.classifyplot_from_valfile(val_summary, outtype="png", title=title) data["sv-validate"] = {"csv": val_summary, "plot": summary_plots[0] if len(summary_plots) > 0 else None} return data
python
def evaluate(data): """Provide evaluations for multiple callers split by structural variant type. """ work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural", dd.get_sample_name(data), "validate")) truth_sets = tz.get_in(["config", "algorithm", "svvalidate"], data) if truth_sets and data.get("sv"): if isinstance(truth_sets, dict): val_summary, df_csv = _evaluate_multi(data["sv"], truth_sets, work_dir, data) summary_plots = _plot_evaluation(df_csv) data["sv-validate"] = {"csv": val_summary, "plot": summary_plots, "df": df_csv} else: assert isinstance(truth_sets, six.string_types) and utils.file_exists(truth_sets), truth_sets val_summary = _evaluate_vcf(data["sv"], truth_sets, work_dir, data) title = "%s structural variants" % dd.get_sample_name(data) summary_plots = validateplot.classifyplot_from_valfile(val_summary, outtype="png", title=title) data["sv-validate"] = {"csv": val_summary, "plot": summary_plots[0] if len(summary_plots) > 0 else None} return data
[ "def", "evaluate", "(", "data", ")", ":", "work_dir", "=", "utils", ".", "safe_makedir", "(", "os", ".", "path", ".", "join", "(", "data", "[", "\"dirs\"", "]", "[", "\"work\"", "]", ",", "\"structural\"", ",", "dd", ".", "get_sample_name", "(", "data"...
Provide evaluations for multiple callers split by structural variant type.
[ "Provide", "evaluations", "for", "multiple", "callers", "split", "by", "structural", "variant", "type", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L375-L392
train
218,071
bcbio/bcbio-nextgen
bcbio/variation/mutect2.py
_add_region_params
def _add_region_params(region, out_file, items, gatk_type): """Add parameters for selecting by region to command line. """ params = [] variant_regions = bedutils.population_variant_regions(items) region = subset_variant_regions(variant_regions, region, out_file, items) if region: if gatk_type == "gatk4": params += ["-L", bamprep.region_to_gatk(region), "--interval-set-rule", "INTERSECTION"] else: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] params += gatk.standard_cl_params(items) return params
python
def _add_region_params(region, out_file, items, gatk_type): """Add parameters for selecting by region to command line. """ params = [] variant_regions = bedutils.population_variant_regions(items) region = subset_variant_regions(variant_regions, region, out_file, items) if region: if gatk_type == "gatk4": params += ["-L", bamprep.region_to_gatk(region), "--interval-set-rule", "INTERSECTION"] else: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] params += gatk.standard_cl_params(items) return params
[ "def", "_add_region_params", "(", "region", ",", "out_file", ",", "items", ",", "gatk_type", ")", ":", "params", "=", "[", "]", "variant_regions", "=", "bedutils", ".", "population_variant_regions", "(", "items", ")", "region", "=", "subset_variant_regions", "("...
Add parameters for selecting by region to command line.
[ "Add", "parameters", "for", "selecting", "by", "region", "to", "command", "line", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect2.py#L48-L60
train
218,072
bcbio/bcbio-nextgen
bcbio/variation/mutect2.py
_prep_inputs
def _prep_inputs(align_bams, ref_file, items): """Ensure inputs to calling are indexed as expected. """ broad_runner = broad.runner_from_path("picard", items[0]["config"]) broad_runner.run_fn("picard_index_ref", ref_file) for x in align_bams: bam.index(x, items[0]["config"])
python
def _prep_inputs(align_bams, ref_file, items): """Ensure inputs to calling are indexed as expected. """ broad_runner = broad.runner_from_path("picard", items[0]["config"]) broad_runner.run_fn("picard_index_ref", ref_file) for x in align_bams: bam.index(x, items[0]["config"])
[ "def", "_prep_inputs", "(", "align_bams", ",", "ref_file", ",", "items", ")", ":", "broad_runner", "=", "broad", ".", "runner_from_path", "(", "\"picard\"", ",", "items", "[", "0", "]", "[", "\"config\"", "]", ")", "broad_runner", ".", "run_fn", "(", "\"pi...
Ensure inputs to calling are indexed as expected.
[ "Ensure", "inputs", "to", "calling", "are", "indexed", "as", "expected", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect2.py#L70-L76
train
218,073
bcbio/bcbio-nextgen
bcbio/variation/mutect2.py
mutect2_caller
def mutect2_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Call variation with GATK's MuTect2. This requires the full non open-source version of GATK 3.5+. """ if out_file is None: out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0] if not utils.file_exists(out_file): paired = vcfutils.get_paired_bams(align_bams, items) broad_runner = broad.runner_from_config(items[0]["config"]) gatk_type = broad_runner.gatk_type() _prep_inputs(align_bams, ref_file, items) with file_transaction(items[0], out_file) as tx_out_file: params = ["-T", "Mutect2" if gatk_type == "gatk4" else "MuTect2", "--annotation", "ClippingRankSumTest", "--annotation", "DepthPerSampleHC"] if gatk_type == "gatk4": params += ["--reference", ref_file] else: params += ["-R", ref_file] for a in annotation.get_gatk_annotations(items[0]["config"], include_baseqranksum=False): params += ["--annotation", a] # Avoid issues with BAM CIGAR reads that GATK doesn't like if gatk_type == "gatk4": params += ["--read-validation-stringency", "LENIENT"] params += _add_tumor_params(paired, items, gatk_type) params += _add_region_params(region, out_file, items, gatk_type) # Avoid adding dbSNP/Cosmic so they do not get fed to variant filtering algorithm # Not yet clear how this helps or hurts in a general case. #params += _add_assoc_params(assoc_files) resources = config_utils.get_resources("mutect2", items[0]["config"]) if "options" in resources: params += [str(x) for x in resources.get("options", [])] assert LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.5"), \ "Require full version of GATK 3.5+ for mutect2 calling" broad_runner.new_resources("mutect2") gatk_cmd = broad_runner.cl_gatk(params, os.path.dirname(tx_out_file)) if gatk_type == "gatk4": tx_raw_prefilt_file = "%s-raw%s" % utils.splitext_plus(tx_out_file) tx_raw_file = "%s-raw-filt%s" % utils.splitext_plus(tx_out_file) filter_cmd = _mutect2_filter(broad_runner, tx_raw_prefilt_file, tx_raw_file, ref_file) cmd = "{gatk_cmd} -O {tx_raw_prefilt_file} && {filter_cmd}" else: tx_raw_file = "%s-raw%s" % utils.splitext_plus(tx_out_file) cmd = "{gatk_cmd} > {tx_raw_file}" do.run(cmd.format(**locals()), "MuTect2") out_file = _af_filter(paired.tumor_data, tx_raw_file, out_file) return vcfutils.bgzip_and_index(out_file, items[0]["config"])
python
def mutect2_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Call variation with GATK's MuTect2. This requires the full non open-source version of GATK 3.5+. """ if out_file is None: out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0] if not utils.file_exists(out_file): paired = vcfutils.get_paired_bams(align_bams, items) broad_runner = broad.runner_from_config(items[0]["config"]) gatk_type = broad_runner.gatk_type() _prep_inputs(align_bams, ref_file, items) with file_transaction(items[0], out_file) as tx_out_file: params = ["-T", "Mutect2" if gatk_type == "gatk4" else "MuTect2", "--annotation", "ClippingRankSumTest", "--annotation", "DepthPerSampleHC"] if gatk_type == "gatk4": params += ["--reference", ref_file] else: params += ["-R", ref_file] for a in annotation.get_gatk_annotations(items[0]["config"], include_baseqranksum=False): params += ["--annotation", a] # Avoid issues with BAM CIGAR reads that GATK doesn't like if gatk_type == "gatk4": params += ["--read-validation-stringency", "LENIENT"] params += _add_tumor_params(paired, items, gatk_type) params += _add_region_params(region, out_file, items, gatk_type) # Avoid adding dbSNP/Cosmic so they do not get fed to variant filtering algorithm # Not yet clear how this helps or hurts in a general case. #params += _add_assoc_params(assoc_files) resources = config_utils.get_resources("mutect2", items[0]["config"]) if "options" in resources: params += [str(x) for x in resources.get("options", [])] assert LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.5"), \ "Require full version of GATK 3.5+ for mutect2 calling" broad_runner.new_resources("mutect2") gatk_cmd = broad_runner.cl_gatk(params, os.path.dirname(tx_out_file)) if gatk_type == "gatk4": tx_raw_prefilt_file = "%s-raw%s" % utils.splitext_plus(tx_out_file) tx_raw_file = "%s-raw-filt%s" % utils.splitext_plus(tx_out_file) filter_cmd = _mutect2_filter(broad_runner, tx_raw_prefilt_file, tx_raw_file, ref_file) cmd = "{gatk_cmd} -O {tx_raw_prefilt_file} && {filter_cmd}" else: tx_raw_file = "%s-raw%s" % utils.splitext_plus(tx_out_file) cmd = "{gatk_cmd} > {tx_raw_file}" do.run(cmd.format(**locals()), "MuTect2") out_file = _af_filter(paired.tumor_data, tx_raw_file, out_file) return vcfutils.bgzip_and_index(out_file, items[0]["config"])
[ "def", "mutect2_caller", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "if", "out_file", "is", "None", ":", "out_file", "=", "\"%s-variants.vcf.gz\"", "%", "utils", ...
Call variation with GATK's MuTect2. This requires the full non open-source version of GATK 3.5+.
[ "Call", "variation", "with", "GATK", "s", "MuTect2", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect2.py#L78-L126
train
218,074
bcbio/bcbio-nextgen
bcbio/variation/mutect2.py
_mutect2_filter
def _mutect2_filter(broad_runner, in_file, out_file, ref_file): """Filter of MuTect2 calls, a separate step in GATK4. """ params = ["-T", "FilterMutectCalls", "--reference", ref_file, "--variant", in_file, "--output", out_file] return broad_runner.cl_gatk(params, os.path.dirname(out_file))
python
def _mutect2_filter(broad_runner, in_file, out_file, ref_file): """Filter of MuTect2 calls, a separate step in GATK4. """ params = ["-T", "FilterMutectCalls", "--reference", ref_file, "--variant", in_file, "--output", out_file] return broad_runner.cl_gatk(params, os.path.dirname(out_file))
[ "def", "_mutect2_filter", "(", "broad_runner", ",", "in_file", ",", "out_file", ",", "ref_file", ")", ":", "params", "=", "[", "\"-T\"", ",", "\"FilterMutectCalls\"", ",", "\"--reference\"", ",", "ref_file", ",", "\"--variant\"", ",", "in_file", ",", "\"--output...
Filter of MuTect2 calls, a separate step in GATK4.
[ "Filter", "of", "MuTect2", "calls", "a", "separate", "step", "in", "GATK4", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect2.py#L128-L132
train
218,075
bcbio/bcbio-nextgen
bcbio/upload/irods.py
update_file
def update_file(finfo, sample_info, config): """ Update the file to an iRODS repository. """ ffinal = filesystem.update_file(finfo, sample_info, config, pass_uptodate=True) _upload_dir_icommands_cli(config.get("dir"), config.get("folder"), config)
python
def update_file(finfo, sample_info, config): """ Update the file to an iRODS repository. """ ffinal = filesystem.update_file(finfo, sample_info, config, pass_uptodate=True) _upload_dir_icommands_cli(config.get("dir"), config.get("folder"), config)
[ "def", "update_file", "(", "finfo", ",", "sample_info", ",", "config", ")", ":", "ffinal", "=", "filesystem", ".", "update_file", "(", "finfo", ",", "sample_info", ",", "config", ",", "pass_uptodate", "=", "True", ")", "_upload_dir_icommands_cli", "(", "config...
Update the file to an iRODS repository.
[ "Update", "the", "file", "to", "an", "iRODS", "repository", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/irods.py#L25-L31
train
218,076
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
_get_callers
def _get_callers(items, stage, special_cases=False): """Retrieve available callers for the provided stage. Handles special cases like CNVkit that can be in initial or standard depending on if fed into Lumpy analysis. """ callers = utils.deepish_copy(_CALLERS[stage]) if special_cases and "cnvkit" in callers: has_lumpy = any("lumpy" in get_svcallers(d) or "lumpy" in d["config"]["algorithm"].get("svcaller_orig", []) for d in items) if has_lumpy and any("lumpy_usecnv" in dd.get_tools_on(d) for d in items): if stage != "initial": del callers["cnvkit"] else: if stage != "standard": del callers["cnvkit"] return callers
python
def _get_callers(items, stage, special_cases=False): """Retrieve available callers for the provided stage. Handles special cases like CNVkit that can be in initial or standard depending on if fed into Lumpy analysis. """ callers = utils.deepish_copy(_CALLERS[stage]) if special_cases and "cnvkit" in callers: has_lumpy = any("lumpy" in get_svcallers(d) or "lumpy" in d["config"]["algorithm"].get("svcaller_orig", []) for d in items) if has_lumpy and any("lumpy_usecnv" in dd.get_tools_on(d) for d in items): if stage != "initial": del callers["cnvkit"] else: if stage != "standard": del callers["cnvkit"] return callers
[ "def", "_get_callers", "(", "items", ",", "stage", ",", "special_cases", "=", "False", ")", ":", "callers", "=", "utils", ".", "deepish_copy", "(", "_CALLERS", "[", "stage", "]", ")", "if", "special_cases", "and", "\"cnvkit\"", "in", "callers", ":", "has_l...
Retrieve available callers for the provided stage. Handles special cases like CNVkit that can be in initial or standard depending on if fed into Lumpy analysis.
[ "Retrieve", "available", "callers", "for", "the", "provided", "stage", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L38-L54
train
218,077
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
_handle_multiple_svcallers
def _handle_multiple_svcallers(data, stage): """Retrieve configured structural variation caller, handling multiple. """ svs = get_svcallers(data) # special cases -- prioritization if stage == "ensemble" and dd.get_svprioritize(data): svs.append("prioritize") out = [] for svcaller in svs: if svcaller in _get_callers([data], stage): base = copy.deepcopy(data) # clean SV callers present in multiple rounds and not this caller final_svs = [] for sv in data.get("sv", []): if (stage == "ensemble" or sv["variantcaller"] == svcaller or sv["variantcaller"] not in svs or svcaller not in _get_callers([data], stage, special_cases=True)): final_svs.append(sv) base["sv"] = final_svs base["config"]["algorithm"]["svcaller"] = svcaller base["config"]["algorithm"]["svcaller_orig"] = svs out.append(base) return out
python
def _handle_multiple_svcallers(data, stage): """Retrieve configured structural variation caller, handling multiple. """ svs = get_svcallers(data) # special cases -- prioritization if stage == "ensemble" and dd.get_svprioritize(data): svs.append("prioritize") out = [] for svcaller in svs: if svcaller in _get_callers([data], stage): base = copy.deepcopy(data) # clean SV callers present in multiple rounds and not this caller final_svs = [] for sv in data.get("sv", []): if (stage == "ensemble" or sv["variantcaller"] == svcaller or sv["variantcaller"] not in svs or svcaller not in _get_callers([data], stage, special_cases=True)): final_svs.append(sv) base["sv"] = final_svs base["config"]["algorithm"]["svcaller"] = svcaller base["config"]["algorithm"]["svcaller_orig"] = svs out.append(base) return out
[ "def", "_handle_multiple_svcallers", "(", "data", ",", "stage", ")", ":", "svs", "=", "get_svcallers", "(", "data", ")", "# special cases -- prioritization", "if", "stage", "==", "\"ensemble\"", "and", "dd", ".", "get_svprioritize", "(", "data", ")", ":", "svs",...
Retrieve configured structural variation caller, handling multiple.
[ "Retrieve", "configured", "structural", "variation", "caller", "handling", "multiple", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L64-L85
train
218,078
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
finalize_sv
def finalize_sv(samples, config): """Combine results from multiple sv callers into a single ordered 'sv' key. """ by_bam = collections.OrderedDict() for x in samples: batch = dd.get_batch(x) or [dd.get_sample_name(x)] try: by_bam[x["align_bam"], tuple(batch)].append(x) except KeyError: by_bam[x["align_bam"], tuple(batch)] = [x] by_batch = collections.OrderedDict() lead_batches = {} for grouped_calls in by_bam.values(): def orig_svcaller_order(x): orig_callers = tz.get_in(["config", "algorithm", "svcaller_orig"], x) cur_caller = tz.get_in(["config", "algorithm", "svcaller"], x) return orig_callers.index(cur_caller) sorted_svcalls = sorted([x for x in grouped_calls if "sv" in x], key=orig_svcaller_order) final = grouped_calls[0] if len(sorted_svcalls) > 0: final["sv"] = reduce(operator.add, [x["sv"] for x in sorted_svcalls]) final["config"]["algorithm"]["svcaller"] = final["config"]["algorithm"].pop("svcaller_orig") batch = dd.get_batch(final) or dd.get_sample_name(final) batches = batch if isinstance(batch, (list, tuple)) else [batch] if len(batches) > 1: lead_batches[(dd.get_sample_name(final), dd.get_phenotype(final) == "germline")] = batches[0] for batch in batches: try: by_batch[batch].append(final) except KeyError: by_batch[batch] = [final] out = [] for batch, items in by_batch.items(): if any("svplots" in dd.get_tools_on(d) for d in items): items = plot.by_regions(items) for data in items: if lead_batches.get((dd.get_sample_name(data), dd.get_phenotype(data) == "germline")) in [batch, None]: out.append([data]) return out
python
def finalize_sv(samples, config): """Combine results from multiple sv callers into a single ordered 'sv' key. """ by_bam = collections.OrderedDict() for x in samples: batch = dd.get_batch(x) or [dd.get_sample_name(x)] try: by_bam[x["align_bam"], tuple(batch)].append(x) except KeyError: by_bam[x["align_bam"], tuple(batch)] = [x] by_batch = collections.OrderedDict() lead_batches = {} for grouped_calls in by_bam.values(): def orig_svcaller_order(x): orig_callers = tz.get_in(["config", "algorithm", "svcaller_orig"], x) cur_caller = tz.get_in(["config", "algorithm", "svcaller"], x) return orig_callers.index(cur_caller) sorted_svcalls = sorted([x for x in grouped_calls if "sv" in x], key=orig_svcaller_order) final = grouped_calls[0] if len(sorted_svcalls) > 0: final["sv"] = reduce(operator.add, [x["sv"] for x in sorted_svcalls]) final["config"]["algorithm"]["svcaller"] = final["config"]["algorithm"].pop("svcaller_orig") batch = dd.get_batch(final) or dd.get_sample_name(final) batches = batch if isinstance(batch, (list, tuple)) else [batch] if len(batches) > 1: lead_batches[(dd.get_sample_name(final), dd.get_phenotype(final) == "germline")] = batches[0] for batch in batches: try: by_batch[batch].append(final) except KeyError: by_batch[batch] = [final] out = [] for batch, items in by_batch.items(): if any("svplots" in dd.get_tools_on(d) for d in items): items = plot.by_regions(items) for data in items: if lead_batches.get((dd.get_sample_name(data), dd.get_phenotype(data) == "germline")) in [batch, None]: out.append([data]) return out
[ "def", "finalize_sv", "(", "samples", ",", "config", ")", ":", "by_bam", "=", "collections", ".", "OrderedDict", "(", ")", "for", "x", "in", "samples", ":", "batch", "=", "dd", ".", "get_batch", "(", "x", ")", "or", "[", "dd", ".", "get_sample_name", ...
Combine results from multiple sv callers into a single ordered 'sv' key.
[ "Combine", "results", "from", "multiple", "sv", "callers", "into", "a", "single", "ordered", "sv", "key", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L87-L126
train
218,079
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
batch_for_sv
def batch_for_sv(samples): """Prepare a set of samples for parallel structural variant calling. CWL input target -- groups samples into batches and structural variant callers for parallel processing. """ samples = cwlutils.assign_complex_to_samples(samples) to_process, extras, background = _batch_split_by_sv(samples, "standard") out = [cwlutils.samples_to_records(xs) for xs in to_process.values()] + extras return out
python
def batch_for_sv(samples): """Prepare a set of samples for parallel structural variant calling. CWL input target -- groups samples into batches and structural variant callers for parallel processing. """ samples = cwlutils.assign_complex_to_samples(samples) to_process, extras, background = _batch_split_by_sv(samples, "standard") out = [cwlutils.samples_to_records(xs) for xs in to_process.values()] + extras return out
[ "def", "batch_for_sv", "(", "samples", ")", ":", "samples", "=", "cwlutils", ".", "assign_complex_to_samples", "(", "samples", ")", "to_process", ",", "extras", ",", "background", "=", "_batch_split_by_sv", "(", "samples", ",", "\"standard\"", ")", "out", "=", ...
Prepare a set of samples for parallel structural variant calling. CWL input target -- groups samples into batches and structural variant callers for parallel processing.
[ "Prepare", "a", "set", "of", "samples", "for", "parallel", "structural", "variant", "calling", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L133-L142
train
218,080
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
run
def run(samples, run_parallel, stage): """Run structural variation detection. The stage indicates which level of structural variant calling to run. - initial, callers that can be used in subsequent structural variation steps (cnvkit -> lumpy) - standard, regular batch calling - ensemble, post-calling, combine other callers or prioritize results """ to_process, extras, background = _batch_split_by_sv(samples, stage) processed = run_parallel("detect_sv", ([xs, background, stage] for xs in to_process.values())) finalized = (run_parallel("finalize_sv", [([xs[0] for xs in processed], processed[0][0]["config"])]) if len(processed) > 0 else []) return extras + finalized
python
def run(samples, run_parallel, stage): """Run structural variation detection. The stage indicates which level of structural variant calling to run. - initial, callers that can be used in subsequent structural variation steps (cnvkit -> lumpy) - standard, regular batch calling - ensemble, post-calling, combine other callers or prioritize results """ to_process, extras, background = _batch_split_by_sv(samples, stage) processed = run_parallel("detect_sv", ([xs, background, stage] for xs in to_process.values())) finalized = (run_parallel("finalize_sv", [([xs[0] for xs in processed], processed[0][0]["config"])]) if len(processed) > 0 else []) return extras + finalized
[ "def", "run", "(", "samples", ",", "run_parallel", ",", "stage", ")", ":", "to_process", ",", "extras", ",", "background", "=", "_batch_split_by_sv", "(", "samples", ",", "stage", ")", "processed", "=", "run_parallel", "(", "\"detect_sv\"", ",", "(", "[", ...
Run structural variation detection. The stage indicates which level of structural variant calling to run. - initial, callers that can be used in subsequent structural variation steps (cnvkit -> lumpy) - standard, regular batch calling - ensemble, post-calling, combine other callers or prioritize results
[ "Run", "structural", "variation", "detection", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L174-L187
train
218,081
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
detect_sv
def detect_sv(items, all_items=None, stage="standard"): """Top level parallel target for examining structural variation. """ items = [utils.to_single_data(x) for x in items] items = cwlutils.unpack_tarballs(items, items[0]) svcaller = items[0]["config"]["algorithm"].get("svcaller") caller_fn = _get_callers(items, stage, special_cases=True).get(svcaller) out = [] if svcaller and caller_fn: if (all_items and svcaller in _NEEDS_BACKGROUND and not vcfutils.is_paired_analysis([x.get("align_bam") for x in items], items)): names = set([dd.get_sample_name(x) for x in items]) background = [x for x in all_items if dd.get_sample_name(x) not in names] for svdata in caller_fn(items, background): out.append([svdata]) else: for svdata in caller_fn(items): out.append([svdata]) else: for data in items: out.append([data]) # Avoid nesting of callers for CWL runs for easier extraction if cwlutils.is_cwl_run(items[0]): out_cwl = [] for data in [utils.to_single_data(x) for x in out]: # Run validation directly from CWL runs since we're single stage data = validate.evaluate(data) data["svvalidate"] = {"summary": tz.get_in(["sv-validate", "csv"], data)} svs = data.get("sv") if svs: assert len(svs) == 1, svs data["sv"] = svs[0] else: data["sv"] = {} data = _add_supplemental(data) out_cwl.append([data]) return out_cwl return out
python
def detect_sv(items, all_items=None, stage="standard"): """Top level parallel target for examining structural variation. """ items = [utils.to_single_data(x) for x in items] items = cwlutils.unpack_tarballs(items, items[0]) svcaller = items[0]["config"]["algorithm"].get("svcaller") caller_fn = _get_callers(items, stage, special_cases=True).get(svcaller) out = [] if svcaller and caller_fn: if (all_items and svcaller in _NEEDS_BACKGROUND and not vcfutils.is_paired_analysis([x.get("align_bam") for x in items], items)): names = set([dd.get_sample_name(x) for x in items]) background = [x for x in all_items if dd.get_sample_name(x) not in names] for svdata in caller_fn(items, background): out.append([svdata]) else: for svdata in caller_fn(items): out.append([svdata]) else: for data in items: out.append([data]) # Avoid nesting of callers for CWL runs for easier extraction if cwlutils.is_cwl_run(items[0]): out_cwl = [] for data in [utils.to_single_data(x) for x in out]: # Run validation directly from CWL runs since we're single stage data = validate.evaluate(data) data["svvalidate"] = {"summary": tz.get_in(["sv-validate", "csv"], data)} svs = data.get("sv") if svs: assert len(svs) == 1, svs data["sv"] = svs[0] else: data["sv"] = {} data = _add_supplemental(data) out_cwl.append([data]) return out_cwl return out
[ "def", "detect_sv", "(", "items", ",", "all_items", "=", "None", ",", "stage", "=", "\"standard\"", ")", ":", "items", "=", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "items", "]", "items", "=", "cwlutils", ".", "unpack_tarba...
Top level parallel target for examining structural variation.
[ "Top", "level", "parallel", "target", "for", "examining", "structural", "variation", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L189-L226
train
218,082
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
_add_supplemental
def _add_supplemental(data): """Add additional supplemental files to CWL sv output, give useful names. """ if "supplemental" not in data["sv"]: data["sv"]["supplemental"] = [] if data["sv"].get("variantcaller"): cur_name = _useful_basename(data) for k in ["cns", "vrn_bed"]: if data["sv"].get(k) and os.path.exists(data["sv"][k]): dname, orig = os.path.split(data["sv"][k]) orig_base, orig_ext = utils.splitext_plus(orig) orig_base = _clean_name(orig_base, data) if orig_base: fname = "%s-%s%s" % (cur_name, orig_base, orig_ext) else: fname = "%s%s" % (cur_name, orig_ext) sup_out_file = os.path.join(dname, fname) utils.symlink_plus(data["sv"][k], sup_out_file) data["sv"]["supplemental"].append(sup_out_file) return data
python
def _add_supplemental(data): """Add additional supplemental files to CWL sv output, give useful names. """ if "supplemental" not in data["sv"]: data["sv"]["supplemental"] = [] if data["sv"].get("variantcaller"): cur_name = _useful_basename(data) for k in ["cns", "vrn_bed"]: if data["sv"].get(k) and os.path.exists(data["sv"][k]): dname, orig = os.path.split(data["sv"][k]) orig_base, orig_ext = utils.splitext_plus(orig) orig_base = _clean_name(orig_base, data) if orig_base: fname = "%s-%s%s" % (cur_name, orig_base, orig_ext) else: fname = "%s%s" % (cur_name, orig_ext) sup_out_file = os.path.join(dname, fname) utils.symlink_plus(data["sv"][k], sup_out_file) data["sv"]["supplemental"].append(sup_out_file) return data
[ "def", "_add_supplemental", "(", "data", ")", ":", "if", "\"supplemental\"", "not", "in", "data", "[", "\"sv\"", "]", ":", "data", "[", "\"sv\"", "]", "[", "\"supplemental\"", "]", "=", "[", "]", "if", "data", "[", "\"sv\"", "]", ".", "get", "(", "\"...
Add additional supplemental files to CWL sv output, give useful names.
[ "Add", "additional", "supplemental", "files", "to", "CWL", "sv", "output", "give", "useful", "names", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L228-L247
train
218,083
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
_clean_name
def _clean_name(fname, data): """Remove standard prefixes from a filename before renaming with useful names. """ for to_remove in dd.get_batches(data) + [dd.get_sample_name(data), data["sv"]["variantcaller"]]: for ext in ("-", "_"): if fname.startswith("%s%s" % (to_remove, ext)): fname = fname[len(to_remove) + len(ext):] if fname.startswith(to_remove): fname = fname[len(to_remove):] return fname
python
def _clean_name(fname, data): """Remove standard prefixes from a filename before renaming with useful names. """ for to_remove in dd.get_batches(data) + [dd.get_sample_name(data), data["sv"]["variantcaller"]]: for ext in ("-", "_"): if fname.startswith("%s%s" % (to_remove, ext)): fname = fname[len(to_remove) + len(ext):] if fname.startswith(to_remove): fname = fname[len(to_remove):] return fname
[ "def", "_clean_name", "(", "fname", ",", "data", ")", ":", "for", "to_remove", "in", "dd", ".", "get_batches", "(", "data", ")", "+", "[", "dd", ".", "get_sample_name", "(", "data", ")", ",", "data", "[", "\"sv\"", "]", "[", "\"variantcaller\"", "]", ...
Remove standard prefixes from a filename before renaming with useful names.
[ "Remove", "standard", "prefixes", "from", "a", "filename", "before", "renaming", "with", "useful", "names", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L249-L258
train
218,084
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
_group_by_sample
def _group_by_sample(items): """Group a set of items by sample names + multiple callers for prioritization """ by_sample = collections.defaultdict(list) for d in items: by_sample[dd.get_sample_name(d)].append(d) out = [] for sample_group in by_sample.values(): cur = utils.deepish_copy(sample_group[0]) svs = [] for d in sample_group: svs.append(d["sv"]) cur["sv"] = svs out.append(cur) return out
python
def _group_by_sample(items): """Group a set of items by sample names + multiple callers for prioritization """ by_sample = collections.defaultdict(list) for d in items: by_sample[dd.get_sample_name(d)].append(d) out = [] for sample_group in by_sample.values(): cur = utils.deepish_copy(sample_group[0]) svs = [] for d in sample_group: svs.append(d["sv"]) cur["sv"] = svs out.append(cur) return out
[ "def", "_group_by_sample", "(", "items", ")", ":", "by_sample", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "d", "in", "items", ":", "by_sample", "[", "dd", ".", "get_sample_name", "(", "d", ")", "]", ".", "append", "(", "d", ")", ...
Group a set of items by sample names + multiple callers for prioritization
[ "Group", "a", "set", "of", "items", "by", "sample", "names", "+", "multiple", "callers", "for", "prioritization" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L269-L283
train
218,085
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
standardize_cnv_reference
def standardize_cnv_reference(data): """Standardize cnv_reference background to support multiple callers. """ out = tz.get_in(["config", "algorithm", "background", "cnv_reference"], data, {}) cur_callers = set(data["config"]["algorithm"].get("svcaller")) & _CNV_REFERENCE if isinstance(out, six.string_types): if not len(cur_callers) == 1: raise ValueError("Multiple CNV callers and single background reference for %s: %s" % data["description"], list(cur_callers)) else: out = {cur_callers.pop(): out} return out
python
def standardize_cnv_reference(data): """Standardize cnv_reference background to support multiple callers. """ out = tz.get_in(["config", "algorithm", "background", "cnv_reference"], data, {}) cur_callers = set(data["config"]["algorithm"].get("svcaller")) & _CNV_REFERENCE if isinstance(out, six.string_types): if not len(cur_callers) == 1: raise ValueError("Multiple CNV callers and single background reference for %s: %s" % data["description"], list(cur_callers)) else: out = {cur_callers.pop(): out} return out
[ "def", "standardize_cnv_reference", "(", "data", ")", ":", "out", "=", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"background\"", ",", "\"cnv_reference\"", "]", ",", "data", ",", "{", "}", ")", "cur_callers", "=", "set", "(",...
Standardize cnv_reference background to support multiple callers.
[ "Standardize", "cnv_reference", "background", "to", "support", "multiple", "callers", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L324-L335
train
218,086
bcbio/bcbio-nextgen
bcbio/ngsalign/bowtie2.py
_bowtie2_args_from_config
def _bowtie2_args_from_config(config, curcl): """Configurable high level options for bowtie2. """ qual_format = config["algorithm"].get("quality_format", "") if qual_format.lower() == "illumina": qual_flags = ["--phred64-quals"] else: qual_flags = [] num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] user_opts = config_utils.get_resources("bowtie2", config).get("options", []) for flag_opt in (o for o in user_opts if str(o).startswith("-")): if flag_opt in curcl: raise ValueError("Duplicate option %s in resources and bcbio commandline: %s %s" % flag_opt, user_opts, curcl) return core_flags + qual_flags + user_opts
python
def _bowtie2_args_from_config(config, curcl): """Configurable high level options for bowtie2. """ qual_format = config["algorithm"].get("quality_format", "") if qual_format.lower() == "illumina": qual_flags = ["--phred64-quals"] else: qual_flags = [] num_cores = config["algorithm"].get("num_cores", 1) core_flags = ["-p", str(num_cores)] if num_cores > 1 else [] user_opts = config_utils.get_resources("bowtie2", config).get("options", []) for flag_opt in (o for o in user_opts if str(o).startswith("-")): if flag_opt in curcl: raise ValueError("Duplicate option %s in resources and bcbio commandline: %s %s" % flag_opt, user_opts, curcl) return core_flags + qual_flags + user_opts
[ "def", "_bowtie2_args_from_config", "(", "config", ",", "curcl", ")", ":", "qual_format", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"quality_format\"", ",", "\"\"", ")", "if", "qual_format", ".", "lower", "(", ")", "==", "\"illumina\"", ":...
Configurable high level options for bowtie2.
[ "Configurable", "high", "level", "options", "for", "bowtie2", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bowtie2.py#L15-L30
train
218,087
bcbio/bcbio-nextgen
bcbio/ngsalign/bowtie2.py
align
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): """Alignment with bowtie2. """ config = data["config"] analysis_config = ANALYSIS.get(data["analysis"].lower()) assert analysis_config, "Analysis %s is not supported by bowtie2" % (data["analysis"]) out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data))) if data.get("align_split"): final_file = out_file out_file, data = alignprep.setup_combine(final_file, data) fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data) else: final_file = None if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)): with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file): cl = [config_utils.get_program("bowtie2", config)] cl += extra_args if extra_args is not None else [] cl += ["-q", "-x", ref_file] cl += analysis_config.get("params", []) if pair_file: cl += ["-1", fastq_file, "-2", pair_file] else: cl += ["-U", fastq_file] if names and "rg" in names: cl += ["--rg-id", names["rg"]] for key, tag in [("sample", "SM"), ("pl", "PL"), ("pu", "PU"), ("lb", "LB")]: if names.get(key): cl += ["--rg", "%s:%s" % (tag, names[key])] cl += _bowtie2_args_from_config(config, cl) cl = [str(i) for i in cl] cmd = "unset JAVA_HOME && " + " ".join(cl) + " | " + tobam_cl do.run(cmd, "Aligning %s and %s with Bowtie2." % (fastq_file, pair_file)) return out_file
python
def align(fastq_file, pair_file, ref_file, names, align_dir, data, extra_args=None): """Alignment with bowtie2. """ config = data["config"] analysis_config = ANALYSIS.get(data["analysis"].lower()) assert analysis_config, "Analysis %s is not supported by bowtie2" % (data["analysis"]) out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data))) if data.get("align_split"): final_file = out_file out_file, data = alignprep.setup_combine(final_file, data) fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data) else: final_file = None if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)): with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file): cl = [config_utils.get_program("bowtie2", config)] cl += extra_args if extra_args is not None else [] cl += ["-q", "-x", ref_file] cl += analysis_config.get("params", []) if pair_file: cl += ["-1", fastq_file, "-2", pair_file] else: cl += ["-U", fastq_file] if names and "rg" in names: cl += ["--rg-id", names["rg"]] for key, tag in [("sample", "SM"), ("pl", "PL"), ("pu", "PU"), ("lb", "LB")]: if names.get(key): cl += ["--rg", "%s:%s" % (tag, names[key])] cl += _bowtie2_args_from_config(config, cl) cl = [str(i) for i in cl] cmd = "unset JAVA_HOME && " + " ".join(cl) + " | " + tobam_cl do.run(cmd, "Aligning %s and %s with Bowtie2." % (fastq_file, pair_file)) return out_file
[ "def", "align", "(", "fastq_file", ",", "pair_file", ",", "ref_file", ",", "names", ",", "align_dir", ",", "data", ",", "extra_args", "=", "None", ")", ":", "config", "=", "data", "[", "\"config\"", "]", "analysis_config", "=", "ANALYSIS", ".", "get", "(...
Alignment with bowtie2.
[ "Alignment", "with", "bowtie2", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/bowtie2.py#L32-L66
train
218,088
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
create_cromwell_config
def create_cromwell_config(args, work_dir, sample_file): """Prepare a cromwell configuration within the current working directory. """ docker_attrs = ["String? docker", "String? docker_user"] cwl_attrs = ["Int? cpuMin", "Int? cpuMax", "Int? memoryMin", "Int? memoryMax", "String? outDirMin", "String? outDirMax", "String? tmpDirMin", "String? tmpDirMax"] out_file = os.path.join(work_dir, "bcbio-cromwell.conf") run_config = _load_custom_config(args.runconfig) if args.runconfig else {} # Avoid overscheduling jobs for local runs by limiting concurrent jobs # Longer term would like to keep these within defined core window joblimit = args.joblimit if joblimit == 0 and not args.scheduler: joblimit = 1 file_types = _get_filesystem_types(args, sample_file) std_args = {"docker_attrs": "" if args.no_container else "\n ".join(docker_attrs), "submit_docker": 'submit-docker: ""' if args.no_container else "", "joblimit": "concurrent-job-limit = %s" % (joblimit) if joblimit > 0 else "", "cwl_attrs": "\n ".join(cwl_attrs), "filesystem": _get_filesystem_config(file_types), "database": run_config.get("database", DATABASE_CONFIG % {"work_dir": work_dir})} cl_args, conf_args, scheduler, cloud_type = _args_to_cromwell(args) std_args["engine"] = _get_engine_filesystem_config(file_types, args, conf_args) conf_args.update(std_args) main_config = {"hpc": (HPC_CONFIGS[scheduler] % conf_args) if scheduler else "", "cloud": (CLOUD_CONFIGS[cloud_type] % conf_args) if cloud_type else "", "work_dir": work_dir} main_config.update(std_args) # Local run always seems to need docker set because of submit-docker in default configuration # Can we unset submit-docker based on configuration so it doesn't inherit? # main_config["docker_attrs"] = "\n ".join(docker_attrs) with open(out_file, "w") as out_handle: out_handle.write(CROMWELL_CONFIG % main_config) return out_file
python
def create_cromwell_config(args, work_dir, sample_file): """Prepare a cromwell configuration within the current working directory. """ docker_attrs = ["String? docker", "String? docker_user"] cwl_attrs = ["Int? cpuMin", "Int? cpuMax", "Int? memoryMin", "Int? memoryMax", "String? outDirMin", "String? outDirMax", "String? tmpDirMin", "String? tmpDirMax"] out_file = os.path.join(work_dir, "bcbio-cromwell.conf") run_config = _load_custom_config(args.runconfig) if args.runconfig else {} # Avoid overscheduling jobs for local runs by limiting concurrent jobs # Longer term would like to keep these within defined core window joblimit = args.joblimit if joblimit == 0 and not args.scheduler: joblimit = 1 file_types = _get_filesystem_types(args, sample_file) std_args = {"docker_attrs": "" if args.no_container else "\n ".join(docker_attrs), "submit_docker": 'submit-docker: ""' if args.no_container else "", "joblimit": "concurrent-job-limit = %s" % (joblimit) if joblimit > 0 else "", "cwl_attrs": "\n ".join(cwl_attrs), "filesystem": _get_filesystem_config(file_types), "database": run_config.get("database", DATABASE_CONFIG % {"work_dir": work_dir})} cl_args, conf_args, scheduler, cloud_type = _args_to_cromwell(args) std_args["engine"] = _get_engine_filesystem_config(file_types, args, conf_args) conf_args.update(std_args) main_config = {"hpc": (HPC_CONFIGS[scheduler] % conf_args) if scheduler else "", "cloud": (CLOUD_CONFIGS[cloud_type] % conf_args) if cloud_type else "", "work_dir": work_dir} main_config.update(std_args) # Local run always seems to need docker set because of submit-docker in default configuration # Can we unset submit-docker based on configuration so it doesn't inherit? # main_config["docker_attrs"] = "\n ".join(docker_attrs) with open(out_file, "w") as out_handle: out_handle.write(CROMWELL_CONFIG % main_config) return out_file
[ "def", "create_cromwell_config", "(", "args", ",", "work_dir", ",", "sample_file", ")", ":", "docker_attrs", "=", "[", "\"String? docker\"", ",", "\"String? docker_user\"", "]", "cwl_attrs", "=", "[", "\"Int? cpuMin\"", ",", "\"Int? cpuMax\"", ",", "\"Int? memoryMin\"...
Prepare a cromwell configuration within the current working directory.
[ "Prepare", "a", "cromwell", "configuration", "within", "the", "current", "working", "directory", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L8-L40
train
218,089
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
_get_file_paths
def _get_file_paths(cur): """Retrieve a list of file paths, recursively traversing the """ out = [] if isinstance(cur, (list, tuple)): for x in cur: new = _get_file_paths(x) if new: out.extend(new) elif isinstance(cur, dict): if "class" in cur: out.append(cur["path"]) else: for k, v in cur.items(): new = _get_file_paths(v) if new: out.extend(new) return out
python
def _get_file_paths(cur): """Retrieve a list of file paths, recursively traversing the """ out = [] if isinstance(cur, (list, tuple)): for x in cur: new = _get_file_paths(x) if new: out.extend(new) elif isinstance(cur, dict): if "class" in cur: out.append(cur["path"]) else: for k, v in cur.items(): new = _get_file_paths(v) if new: out.extend(new) return out
[ "def", "_get_file_paths", "(", "cur", ")", ":", "out", "=", "[", "]", "if", "isinstance", "(", "cur", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "x", "in", "cur", ":", "new", "=", "_get_file_paths", "(", "x", ")", "if", "new", ":", "...
Retrieve a list of file paths, recursively traversing the
[ "Retrieve", "a", "list", "of", "file", "paths", "recursively", "traversing", "the" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L42-L59
train
218,090
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
_load_custom_config
def _load_custom_config(run_config): """Load custom configuration input HOCON file for cromwell. """ from pyhocon import ConfigFactory, HOCONConverter, ConfigTree conf = ConfigFactory.parse_file(run_config) out = {} if "database" in conf: out["database"] = HOCONConverter.to_hocon(ConfigTree({"database": conf.get_config("database")})) return out
python
def _load_custom_config(run_config): """Load custom configuration input HOCON file for cromwell. """ from pyhocon import ConfigFactory, HOCONConverter, ConfigTree conf = ConfigFactory.parse_file(run_config) out = {} if "database" in conf: out["database"] = HOCONConverter.to_hocon(ConfigTree({"database": conf.get_config("database")})) return out
[ "def", "_load_custom_config", "(", "run_config", ")", ":", "from", "pyhocon", "import", "ConfigFactory", ",", "HOCONConverter", ",", "ConfigTree", "conf", "=", "ConfigFactory", ".", "parse_file", "(", "run_config", ")", "out", "=", "{", "}", "if", "\"database\""...
Load custom configuration input HOCON file for cromwell.
[ "Load", "custom", "configuration", "input", "HOCON", "file", "for", "cromwell", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L61-L69
train
218,091
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
_args_to_cromwell
def _args_to_cromwell(args): """Convert input arguments into cromwell inputs for config and command line. """ default_config = {"slurm": {"timelimit": "1-00:00", "account": ""}, "sge": {"memtype": "mem_free", "pename": "smp"}, "lsf": {"walltime": "24:00", "account": ""}, "htcondor": {}, "torque": {"walltime": "24:00:00", "account": ""}, "pbspro": {"walltime": "24:00:00", "account": "", "cpu_and_mem": "-l select=1:ncpus=${cpu}:mem=${memory_mb}mb"}} prefixes = {("account", "slurm"): "-A ", ("account", "pbspro"): "-A "} custom = {("noselect", "pbspro"): ("cpu_and_mem", "-l ncpus=${cpu} -l mem=${memory_mb}mb")} cl = [] config = {} # HPC scheduling if args.scheduler: if args.scheduler not in default_config: raise ValueError("Scheduler not yet supported by Cromwell: %s" % args.scheduler) if not args.queue and args.scheduler not in ["htcondor"]: raise ValueError("Need to set queue (-q) for running with an HPC scheduler") config = default_config[args.scheduler] cl.append("-Dbackend.default=%s" % args.scheduler.upper()) config["queue"] = args.queue for rs in args.resources: for r in rs.split(";"): parts = r.split("=") if len(parts) == 2: key, val = parts config[key] = prefixes.get((key, args.scheduler), "") + val elif len(parts) == 1 and (parts[0], args.scheduler) in custom: key, val = custom[(parts[0], args.scheduler)] config[key] = val cloud_type = None if args.cloud_project: if args.cloud_root and args.cloud_root.startswith("gs:"): cloud_type = "PAPI" cloud_root = args.cloud_root cloud_region = None elif ((args.cloud_root and args.cloud_root.startswith("s3:")) or (args.cloud_project and args.cloud_project.startswith("arn:"))): cloud_type = "AWSBATCH" cloud_root = args.cloud_root if not cloud_root.startswith("s3://"): cloud_root = "s3://%s" % cloud_root # split region from input Amazon Resource Name, ie arn:aws:batch:us-east-1: cloud_region = args.cloud_project.split(":")[3] else: raise ValueError("Unexpected inputs for Cromwell Cloud support: %s %s" % (args.cloud_project, args.cloud_root)) config = {"cloud_project": args.cloud_project, "cloud_root": cloud_root, "cloud_region": cloud_region} cl.append("-Dbackend.default=%s" % cloud_type) return cl, config, args.scheduler, cloud_type
python
def _args_to_cromwell(args): """Convert input arguments into cromwell inputs for config and command line. """ default_config = {"slurm": {"timelimit": "1-00:00", "account": ""}, "sge": {"memtype": "mem_free", "pename": "smp"}, "lsf": {"walltime": "24:00", "account": ""}, "htcondor": {}, "torque": {"walltime": "24:00:00", "account": ""}, "pbspro": {"walltime": "24:00:00", "account": "", "cpu_and_mem": "-l select=1:ncpus=${cpu}:mem=${memory_mb}mb"}} prefixes = {("account", "slurm"): "-A ", ("account", "pbspro"): "-A "} custom = {("noselect", "pbspro"): ("cpu_and_mem", "-l ncpus=${cpu} -l mem=${memory_mb}mb")} cl = [] config = {} # HPC scheduling if args.scheduler: if args.scheduler not in default_config: raise ValueError("Scheduler not yet supported by Cromwell: %s" % args.scheduler) if not args.queue and args.scheduler not in ["htcondor"]: raise ValueError("Need to set queue (-q) for running with an HPC scheduler") config = default_config[args.scheduler] cl.append("-Dbackend.default=%s" % args.scheduler.upper()) config["queue"] = args.queue for rs in args.resources: for r in rs.split(";"): parts = r.split("=") if len(parts) == 2: key, val = parts config[key] = prefixes.get((key, args.scheduler), "") + val elif len(parts) == 1 and (parts[0], args.scheduler) in custom: key, val = custom[(parts[0], args.scheduler)] config[key] = val cloud_type = None if args.cloud_project: if args.cloud_root and args.cloud_root.startswith("gs:"): cloud_type = "PAPI" cloud_root = args.cloud_root cloud_region = None elif ((args.cloud_root and args.cloud_root.startswith("s3:")) or (args.cloud_project and args.cloud_project.startswith("arn:"))): cloud_type = "AWSBATCH" cloud_root = args.cloud_root if not cloud_root.startswith("s3://"): cloud_root = "s3://%s" % cloud_root # split region from input Amazon Resource Name, ie arn:aws:batch:us-east-1: cloud_region = args.cloud_project.split(":")[3] else: raise ValueError("Unexpected inputs for Cromwell Cloud support: %s %s" % (args.cloud_project, args.cloud_root)) config = {"cloud_project": args.cloud_project, "cloud_root": cloud_root, "cloud_region": cloud_region} cl.append("-Dbackend.default=%s" % cloud_type) return cl, config, args.scheduler, cloud_type
[ "def", "_args_to_cromwell", "(", "args", ")", ":", "default_config", "=", "{", "\"slurm\"", ":", "{", "\"timelimit\"", ":", "\"1-00:00\"", ",", "\"account\"", ":", "\"\"", "}", ",", "\"sge\"", ":", "{", "\"memtype\"", ":", "\"mem_free\"", ",", "\"pename\"", ...
Convert input arguments into cromwell inputs for config and command line.
[ "Convert", "input", "arguments", "into", "cromwell", "inputs", "for", "config", "and", "command", "line", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L77-L128
train
218,092
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
_get_filesystem_types
def _get_filesystem_types(args, sample_file): """Retrieve the types of inputs and staging based on sample JSON and arguments. """ out = set([]) ext = "" if args.no_container else "_container" with open(sample_file) as in_handle: for f in _get_file_paths(json.load(in_handle)): if f.startswith("gs:"): out.add("gcp%s" % ext) elif f.startswith("s3:"): out.add("s3%s" % ext) elif f.startswith(("https:", "http:")): out.add("http%s" % ext) else: out.add("local%s" % ext) return out
python
def _get_filesystem_types(args, sample_file): """Retrieve the types of inputs and staging based on sample JSON and arguments. """ out = set([]) ext = "" if args.no_container else "_container" with open(sample_file) as in_handle: for f in _get_file_paths(json.load(in_handle)): if f.startswith("gs:"): out.add("gcp%s" % ext) elif f.startswith("s3:"): out.add("s3%s" % ext) elif f.startswith(("https:", "http:")): out.add("http%s" % ext) else: out.add("local%s" % ext) return out
[ "def", "_get_filesystem_types", "(", "args", ",", "sample_file", ")", ":", "out", "=", "set", "(", "[", "]", ")", "ext", "=", "\"\"", "if", "args", ".", "no_container", "else", "\"_container\"", "with", "open", "(", "sample_file", ")", "as", "in_handle", ...
Retrieve the types of inputs and staging based on sample JSON and arguments.
[ "Retrieve", "the", "types", "of", "inputs", "and", "staging", "based", "on", "sample", "JSON", "and", "arguments", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L130-L145
train
218,093
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
_get_filesystem_config
def _get_filesystem_config(file_types): """Retrieve filesystem configuration, including support for specified file types. """ out = " filesystems {\n" for file_type in sorted(list(file_types)): if file_type in _FILESYSTEM_CONFIG: out += _FILESYSTEM_CONFIG[file_type] out += " }\n" return out
python
def _get_filesystem_config(file_types): """Retrieve filesystem configuration, including support for specified file types. """ out = " filesystems {\n" for file_type in sorted(list(file_types)): if file_type in _FILESYSTEM_CONFIG: out += _FILESYSTEM_CONFIG[file_type] out += " }\n" return out
[ "def", "_get_filesystem_config", "(", "file_types", ")", ":", "out", "=", "\" filesystems {\\n\"", "for", "file_type", "in", "sorted", "(", "list", "(", "file_types", ")", ")", ":", "if", "file_type", "in", "_FILESYSTEM_CONFIG", ":", "out", "+=", "_FILESYSTE...
Retrieve filesystem configuration, including support for specified file types.
[ "Retrieve", "filesystem", "configuration", "including", "support", "for", "specified", "file", "types", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L147-L155
train
218,094
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
_get_engine_filesystem_config
def _get_engine_filesystem_config(file_types, args, conf_args): """Retriever authorization and engine filesystem configuration. """ file_types = [x.replace("_container", "") for x in list(file_types)] out = "" if "gcp" in file_types: out += _AUTH_CONFIG_GOOGLE if "s3" in file_types: out += _AUTH_CONFIG_AWS % conf_args["cloud_region"] if "gcp" in file_types or "http" in file_types or "s3" in file_types: out += "engine {\n" out += " filesystems {\n" if "gcp" in file_types: out += ' gcs {\n' out += ' auth = "gcp-auth"\n' if args.cloud_project: out += ' project = "%s"\n' % args.cloud_project out += ' }\n' if "http" in file_types: out += ' http {}\n' if "s3" in file_types: out += ' s3 { auth = "default" }' out += " }\n" out += "}\n" return out
python
def _get_engine_filesystem_config(file_types, args, conf_args): """Retriever authorization and engine filesystem configuration. """ file_types = [x.replace("_container", "") for x in list(file_types)] out = "" if "gcp" in file_types: out += _AUTH_CONFIG_GOOGLE if "s3" in file_types: out += _AUTH_CONFIG_AWS % conf_args["cloud_region"] if "gcp" in file_types or "http" in file_types or "s3" in file_types: out += "engine {\n" out += " filesystems {\n" if "gcp" in file_types: out += ' gcs {\n' out += ' auth = "gcp-auth"\n' if args.cloud_project: out += ' project = "%s"\n' % args.cloud_project out += ' }\n' if "http" in file_types: out += ' http {}\n' if "s3" in file_types: out += ' s3 { auth = "default" }' out += " }\n" out += "}\n" return out
[ "def", "_get_engine_filesystem_config", "(", "file_types", ",", "args", ",", "conf_args", ")", ":", "file_types", "=", "[", "x", ".", "replace", "(", "\"_container\"", ",", "\"\"", ")", "for", "x", "in", "list", "(", "file_types", ")", "]", "out", "=", "...
Retriever authorization and engine filesystem configuration.
[ "Retriever", "authorization", "and", "engine", "filesystem", "configuration", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L212-L237
train
218,095
bcbio/bcbio-nextgen
bcbio/variation/normalize.py
normalize
def normalize(in_file, data, passonly=False, normalize_indels=True, split_biallelic=True, rerun_effects=True, remove_oldeffects=False, nonrefonly=False, work_dir=None): """Normalizes variants and reruns SnpEFF for resulting VCF """ if remove_oldeffects: out_file = "%s-noeff-nomultiallelic%s" % utils.splitext_plus(in_file) else: out_file = "%s-nomultiallelic%s" % utils.splitext_plus(in_file) if work_dir: out_file = os.path.join(work_dir, os.path.basename(out_file)) if not utils.file_exists(out_file): if vcfutils.vcf_has_variants(in_file): ready_ma_file = _normalize(in_file, data, passonly=passonly, normalize_indels=normalize_indels, split_biallelic=split_biallelic, remove_oldeffects=remove_oldeffects, nonrefonly=nonrefonly, work_dir=work_dir) if rerun_effects: ann_ma_file, _ = effects.add_to_vcf(ready_ma_file, data) if ann_ma_file: ready_ma_file = ann_ma_file utils.symlink_plus(ready_ma_file, out_file) else: utils.symlink_plus(in_file, out_file) return vcfutils.bgzip_and_index(out_file, data["config"])
python
def normalize(in_file, data, passonly=False, normalize_indels=True, split_biallelic=True, rerun_effects=True, remove_oldeffects=False, nonrefonly=False, work_dir=None): """Normalizes variants and reruns SnpEFF for resulting VCF """ if remove_oldeffects: out_file = "%s-noeff-nomultiallelic%s" % utils.splitext_plus(in_file) else: out_file = "%s-nomultiallelic%s" % utils.splitext_plus(in_file) if work_dir: out_file = os.path.join(work_dir, os.path.basename(out_file)) if not utils.file_exists(out_file): if vcfutils.vcf_has_variants(in_file): ready_ma_file = _normalize(in_file, data, passonly=passonly, normalize_indels=normalize_indels, split_biallelic=split_biallelic, remove_oldeffects=remove_oldeffects, nonrefonly=nonrefonly, work_dir=work_dir) if rerun_effects: ann_ma_file, _ = effects.add_to_vcf(ready_ma_file, data) if ann_ma_file: ready_ma_file = ann_ma_file utils.symlink_plus(ready_ma_file, out_file) else: utils.symlink_plus(in_file, out_file) return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "normalize", "(", "in_file", ",", "data", ",", "passonly", "=", "False", ",", "normalize_indels", "=", "True", ",", "split_biallelic", "=", "True", ",", "rerun_effects", "=", "True", ",", "remove_oldeffects", "=", "False", ",", "nonrefonly", "=", "Fal...
Normalizes variants and reruns SnpEFF for resulting VCF
[ "Normalizes", "variants", "and", "reruns", "SnpEFF", "for", "resulting", "VCF" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/normalize.py#L59-L84
train
218,096
bcbio/bcbio-nextgen
bcbio/variation/normalize.py
_normalize
def _normalize(in_file, data, passonly=False, normalize_indels=True, split_biallelic=True, remove_oldeffects=False, nonrefonly=False, work_dir=None): """Convert multi-allelic variants into single allelic. `vt normalize` has the -n flag passed (skipping reference checks) because of errors where the reference genome has non GATCN ambiguous bases. These are not supported in VCF, so you'll have a mismatch of N in VCF versus R (or other ambiguous bases) in the genome. """ if remove_oldeffects: out_file = "%s-noeff-decompose%s" % utils.splitext_plus(in_file) old_effects = [a for a in ["CSQ", "ANN"] if a in cyvcf2.VCF(in_file)] if old_effects: clean_effects_cmd = " | bcftools annotate -x %s " % (",".join(["INFO/%s" % x for x in old_effects])) else: clean_effects_cmd = "" else: clean_effects_cmd = "" out_file = "%s-decompose%s" % utils.splitext_plus(in_file) if passonly or nonrefonly: subset_vcf_cmd = " | bcftools view " if passonly: subset_vcf_cmd += "-f 'PASS,.' " if nonrefonly: subset_vcf_cmd += "--min-ac 1:nref " else: subset_vcf_cmd = "" if work_dir: out_file = os.path.join(work_dir, os.path.basename(out_file)) if not utils.file_exists(out_file): ref_file = dd.get_ref_file(data) assert out_file.endswith(".vcf.gz") with file_transaction(data, out_file) as tx_out_file: cmd = ("gunzip -c " + in_file + subset_vcf_cmd + clean_effects_cmd + (" | vcfallelicprimitives -t DECOMPOSED --keep-geno" if split_biallelic else "") + " | sed 's/ID=AD,Number=./ID=AD,Number=R/'" + " | vt decompose -s - " + ((" | vt normalize -n -r " + ref_file + " - ") if normalize_indels else "") + " | awk '{ gsub(\"./-65\", \"./.\"); print $0 }'" + " | sed -e 's/Number=A/Number=1/g'" + " | bgzip -c > " + tx_out_file ) do.run(cmd, "Multi-allelic to single allele") return vcfutils.bgzip_and_index(out_file, data["config"])
python
def _normalize(in_file, data, passonly=False, normalize_indels=True, split_biallelic=True, remove_oldeffects=False, nonrefonly=False, work_dir=None): """Convert multi-allelic variants into single allelic. `vt normalize` has the -n flag passed (skipping reference checks) because of errors where the reference genome has non GATCN ambiguous bases. These are not supported in VCF, so you'll have a mismatch of N in VCF versus R (or other ambiguous bases) in the genome. """ if remove_oldeffects: out_file = "%s-noeff-decompose%s" % utils.splitext_plus(in_file) old_effects = [a for a in ["CSQ", "ANN"] if a in cyvcf2.VCF(in_file)] if old_effects: clean_effects_cmd = " | bcftools annotate -x %s " % (",".join(["INFO/%s" % x for x in old_effects])) else: clean_effects_cmd = "" else: clean_effects_cmd = "" out_file = "%s-decompose%s" % utils.splitext_plus(in_file) if passonly or nonrefonly: subset_vcf_cmd = " | bcftools view " if passonly: subset_vcf_cmd += "-f 'PASS,.' " if nonrefonly: subset_vcf_cmd += "--min-ac 1:nref " else: subset_vcf_cmd = "" if work_dir: out_file = os.path.join(work_dir, os.path.basename(out_file)) if not utils.file_exists(out_file): ref_file = dd.get_ref_file(data) assert out_file.endswith(".vcf.gz") with file_transaction(data, out_file) as tx_out_file: cmd = ("gunzip -c " + in_file + subset_vcf_cmd + clean_effects_cmd + (" | vcfallelicprimitives -t DECOMPOSED --keep-geno" if split_biallelic else "") + " | sed 's/ID=AD,Number=./ID=AD,Number=R/'" + " | vt decompose -s - " + ((" | vt normalize -n -r " + ref_file + " - ") if normalize_indels else "") + " | awk '{ gsub(\"./-65\", \"./.\"); print $0 }'" + " | sed -e 's/Number=A/Number=1/g'" + " | bgzip -c > " + tx_out_file ) do.run(cmd, "Multi-allelic to single allele") return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "_normalize", "(", "in_file", ",", "data", ",", "passonly", "=", "False", ",", "normalize_indels", "=", "True", ",", "split_biallelic", "=", "True", ",", "remove_oldeffects", "=", "False", ",", "nonrefonly", "=", "False", ",", "work_dir", "=", "None",...
Convert multi-allelic variants into single allelic. `vt normalize` has the -n flag passed (skipping reference checks) because of errors where the reference genome has non GATCN ambiguous bases. These are not supported in VCF, so you'll have a mismatch of N in VCF versus R (or other ambiguous bases) in the genome.
[ "Convert", "multi", "-", "allelic", "variants", "into", "single", "allelic", "." ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/normalize.py#L86-L130
train
218,097
bcbio/bcbio-nextgen
bcbio/rnaseq/sailfish.py
sleuthify_sailfish
def sleuthify_sailfish(sailfish_dir): """ if installed, use wasabi to create abundance.h5 output for use with sleuth """ if not R_package_path("wasabi"): return None else: rscript = Rscript_cmd() cmd = """{rscript} --no-environ -e 'library("wasabi"); prepare_fish_for_sleuth(c("{sailfish_dir}"))'""" do.run(cmd.format(**locals()), "Converting Sailfish to Sleuth format.") return os.path.join(sailfish_dir, "abundance.h5")
python
def sleuthify_sailfish(sailfish_dir): """ if installed, use wasabi to create abundance.h5 output for use with sleuth """ if not R_package_path("wasabi"): return None else: rscript = Rscript_cmd() cmd = """{rscript} --no-environ -e 'library("wasabi"); prepare_fish_for_sleuth(c("{sailfish_dir}"))'""" do.run(cmd.format(**locals()), "Converting Sailfish to Sleuth format.") return os.path.join(sailfish_dir, "abundance.h5")
[ "def", "sleuthify_sailfish", "(", "sailfish_dir", ")", ":", "if", "not", "R_package_path", "(", "\"wasabi\"", ")", ":", "return", "None", "else", ":", "rscript", "=", "Rscript_cmd", "(", ")", "cmd", "=", "\"\"\"{rscript} --no-environ -e 'library(\"wasabi\"); prepare_f...
if installed, use wasabi to create abundance.h5 output for use with sleuth
[ "if", "installed", "use", "wasabi", "to", "create", "abundance", ".", "h5", "output", "for", "use", "with", "sleuth" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/sailfish.py#L66-L77
train
218,098
bcbio/bcbio-nextgen
bcbio/rnaseq/sailfish.py
create_combined_fasta
def create_combined_fasta(data): """ if there are genomes to be disambiguated, create a FASTA file of all of the transcripts for all genomes """ out_dir = os.path.join(dd.get_work_dir(data), "inputs", "transcriptome") items = disambiguate.split([data]) fasta_files = [] for i in items: odata = i[0] gtf_file = dd.get_gtf_file(odata) ref_file = dd.get_ref_file(odata) out_file = os.path.join(out_dir, dd.get_genome_build(odata) + ".fa") if file_exists(out_file): fasta_files.append(out_file) else: out_file = gtf.gtf_to_fasta(gtf_file, ref_file, out_file=out_file) fasta_files.append(out_file) out_stem = os.path.join(out_dir, dd.get_genome_build(data)) if dd.get_disambiguate(data): out_stem = "-".join([out_stem] + (dd.get_disambiguate(data) or [])) combined_file = out_stem + ".fa" if file_exists(combined_file): return combined_file fasta_file_string = " ".join(fasta_files) cmd = "cat {fasta_file_string} > {tx_out_file}" with file_transaction(data, combined_file) as tx_out_file: do.run(cmd.format(**locals()), "Combining transcriptome FASTA files.") return combined_file
python
def create_combined_fasta(data): """ if there are genomes to be disambiguated, create a FASTA file of all of the transcripts for all genomes """ out_dir = os.path.join(dd.get_work_dir(data), "inputs", "transcriptome") items = disambiguate.split([data]) fasta_files = [] for i in items: odata = i[0] gtf_file = dd.get_gtf_file(odata) ref_file = dd.get_ref_file(odata) out_file = os.path.join(out_dir, dd.get_genome_build(odata) + ".fa") if file_exists(out_file): fasta_files.append(out_file) else: out_file = gtf.gtf_to_fasta(gtf_file, ref_file, out_file=out_file) fasta_files.append(out_file) out_stem = os.path.join(out_dir, dd.get_genome_build(data)) if dd.get_disambiguate(data): out_stem = "-".join([out_stem] + (dd.get_disambiguate(data) or [])) combined_file = out_stem + ".fa" if file_exists(combined_file): return combined_file fasta_file_string = " ".join(fasta_files) cmd = "cat {fasta_file_string} > {tx_out_file}" with file_transaction(data, combined_file) as tx_out_file: do.run(cmd.format(**locals()), "Combining transcriptome FASTA files.") return combined_file
[ "def", "create_combined_fasta", "(", "data", ")", ":", "out_dir", "=", "os", ".", "path", ".", "join", "(", "dd", ".", "get_work_dir", "(", "data", ")", ",", "\"inputs\"", ",", "\"transcriptome\"", ")", "items", "=", "disambiguate", ".", "split", "(", "[...
if there are genomes to be disambiguated, create a FASTA file of all of the transcripts for all genomes
[ "if", "there", "are", "genomes", "to", "be", "disambiguated", "create", "a", "FASTA", "file", "of", "all", "of", "the", "transcripts", "for", "all", "genomes" ]
6a9348c0054ccd5baffd22f1bb7d0422f6978b20
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/sailfish.py#L79-L108
train
218,099