code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
GroupField,
IntegerField,
JsonField,
ListField,
Persistence,
Process,
SchedulingClass,
StringField,
)
class Deseq(Process):
"""Run DESeq2 analysis.
The DESeq2 package estimates variance-mean dependence in count data
from high-throughput sequencing assays and tests for differential
expression based on a model using the negative binomial
distribution. See
[here](https://www.bioconductor.org/packages/release/bioc/manuals/DESeq2/man/DESeq2.pdf)
and [here](http://bioconductor.org/packages/devel/bioc/vignettes/DESeq2/inst/doc/DESeq2.html)
for more information.
"""
slug = "differentialexpression-deseq2"
name = "DESeq2"
process_type = "data:differentialexpression:deseq2"
version = "3.5.0"
category = "Differential Expression"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {"cores": 1, "memory": 8192},
}
data_name = "Differential expression (case vs. control)"
class Input:
"""Input fields to process Deseq."""
case = ListField(
DataField("expression"),
label="Case",
description="Case samples (replicates)",
)
control = ListField(
DataField("expression"),
label="Control",
description="Control samples (replicates)",
)
create_sets = BooleanField(
label="Create gene sets",
description="After calculating differential gene "
"expressions create gene sets for up-regulated genes, "
"down-regulated genes and all genes.",
default=False,
)
logfc = FloatField(
label="Log2 fold change threshold for gene sets",
description="Genes above Log2FC are considered as "
"up-regulated and genes below -Log2FC as down-regulated.",
default=1.0,
hidden="!create_sets",
)
fdr = FloatField(
label="FDR threshold for gene sets",
default=0.05,
hidden="!create_sets",
)
class Options:
"""Options."""
beta_prior = BooleanField(
label="Beta prior",
default=False,
description="Whether or not to put a zero-mean normal prior "
"on the non-intercept coefficients.",
)
class FilterOptions:
"""Filtering options."""
count = BooleanField(
label="Filter genes based on expression count",
default=True,
)
min_count_sum = IntegerField(
label="Minimum gene expression count summed over all samples",
default=10,
description="Filter genes in the expression matrix input. "
"Remove genes where the expression count sum over all samples "
"is below the threshold.",
hidden="!filter_options.count",
)
cook = BooleanField(
label="Filter genes based on Cook's distance",
default=False,
)
cooks_cutoff = FloatField(
label="Threshold on Cook's distance",
required=False,
description="If one or more samples have Cook's distance "
"larger than the threshold set here, the p-value for the row "
"is set to NA. If left empty, the default threshold of 0.99 "
"quantile of the F(p, m-p) distribution is used, where p is "
"the number of coefficients being fitted and m is the number "
"of samples. This test excludes Cook's distance of samples "
"belonging to experimental groups with only two samples.",
hidden="!filter_options.cook",
)
independent = BooleanField(
label="Apply independent gene filtering",
default=True,
)
alpha = FloatField(
label="Significance cut-off used for optimizing independent "
"gene filtering",
default=0.1,
description="The value should be set to adjusted p-value "
"cut-off (FDR).",
hidden="!filter_options.independent",
)
options = GroupField(Options, label="Gene filtering options")
filter_options = GroupField(
FilterOptions, label="Differential expression analysis options"
)
class Output:
"""Output fields of the process Deseq."""
raw = FileField("Differential expression")
de_json = JsonField(label="Results table (JSON)")
de_file = FileField(label="Results table (file)")
count_matrix = FileField(label="Count matrix")
source = StringField(label="Gene ID database")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
def run(self, inputs, outputs):
"""Run the analysis."""
expressions = inputs.case + inputs.control
if any(e.type == "data:expression:microarray:" for e in expressions):
self.error("Microarray expressions are not supported.")
for exp in expressions:
if exp.output.source != expressions[0].output.source:
self.error(
"Input samples are of different Gene ID databases: "
f"{exp.output.source} and {expressions[0].output.source}."
)
if exp.output.species != expressions[0].output.species:
self.error(
"Input samples are of different Species: "
f"{exp.output.species} and {expressions[0].output.species}."
)
if exp.output.build != expressions[0].output.build:
self.error(
"Input samples are of different Build: "
f"{exp.output.build} and {expressions[0].output.build}."
)
if exp.output.feature_type != expressions[0].output.feature_type:
self.error(
"Input samples are of different Feature type: "
f"{exp.output.feature_type} and {expressions[0].output.feature_type}."
)
for case in inputs.case:
if case in inputs.control:
self.error(
"Case and Control groups must contain unique "
f"samples. Sample {case.sample_name} is in both Case "
"and Control group."
)
self.progress(0.1)
if all(e.type == "data:expression:nanostring:" for e in expressions):
params = [
"--cases",
[e.output.exp.path for e in inputs.case],
"--controls",
[e.output.exp.path for e in inputs.control],
"--format",
"nanostring",
]
elif all(e.type == "data:expression:rsem:" for e in expressions):
params = [
"--cases",
[e.output.genes.path for e in inputs.case],
"--controls",
[e.output.genes.path for e in inputs.control],
"--format",
"rsem",
]
elif all(e.type == "data:expression:salmon:" for e in expressions):
params = [
"--cases",
[e.output.quant.path for e in inputs.case],
"--controls",
[e.output.quant.path for e in inputs.control],
"--format",
"salmon",
"--tx2gene",
inputs.case[0].output.txdb.path,
]
else:
if not all(hasattr(e.output.rc, "path") for e in expressions):
self.error("Read counts are required when using DESeq2.")
params = [
"--cases",
[e.output.rc.path for e in inputs.case],
"--controls",
[e.output.rc.path for e in inputs.control],
]
if inputs.options.beta_prior:
params.append("--beta-prior")
if inputs.filter_options.count:
params.extend(["--min-count-sum", inputs.filter_options.min_count_sum])
if inputs.filter_options.cook:
params.extend(["--cooks-cutoff", inputs.filter_options.cooks_cutoff])
if inputs.filter_options.independent:
params.extend(["--independent", "--alpha", inputs.filter_options.alpha])
return_code, _, _ = Cmd["deseq.R"][params] & TEE(retcode=None)
self.progress(0.95)
deseq_output = "diffexp_deseq2.tab"
args = [
deseq_output,
"de_data.json",
"de_file.tab.gz",
"--gene_id",
"gene_id",
"--fdr",
"padj",
"--pvalue",
"pvalue",
"--logfc",
"log2FoldChange",
"--stat",
"stat",
]
return_code, _, _ = Cmd["parse_diffexp.py"][args] & TEE(retcode=None)
if return_code:
self.error("Error while parsing DGE results.")
(Cmd["gzip"][deseq_output])()
(Cmd["gzip"]["count_matrix.tab"])()
outputs.raw = f"{deseq_output}.gz"
outputs.de_json = "de_data.json"
outputs.de_file = "de_file.tab.gz"
outputs.count_matrix = "count_matrix.tab.gz"
outputs.source = expressions[0].output.source
outputs.species = expressions[0].output.species
outputs.build = expressions[0].output.build
outputs.feature_type = expressions[0].output.feature_type
if inputs.create_sets:
out_dir = "gene_sets"
gene_set_args = [
"--dge_file",
"de_file.tab.gz",
"--out_dir",
out_dir,
"--analysis_name",
self.name,
"--tool",
"DESeq2",
"--logfc",
inputs.logfc,
"--fdr",
inputs.fdr,
]
return_code, _, _ = Cmd["create_gene_sets.py"][gene_set_args] & TEE(
retcode=None
)
if return_code:
self.error("Error while creating gene sets.")
for gene_file in sorted(Path(out_dir).glob("*.tab.gz")):
gene_file.rename(Path() / gene_file.name)
process_inputs = {
"src": str(gene_file.name),
"source": expressions[0].output.source,
"species": expressions[0].output.species,
}
self.run_process("upload-geneset", process_inputs) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/differential_expression/deseq.py | 0.895845 | 0.501953 | deseq.py | pypi |
import gzip
import os
from shutil import copy
import pandas as pd
from resolwe.process import (
Cmd,
DataField,
FileField,
ListField,
Process,
SchedulingClass,
)
class ShortHairpinRNADifferentialExpression(Process):
"""
Performing differential expression on a list of objects.
Analysis starts by inputting a set of expression files (count matrices) and a parameter file. Parameter file is
an xlsx file and consists of tabs:
- `sample_key`: Should have column sample with exact sample name as input expression file(s),
columns defining treatment and lastly a column which indicates replicate.
- `contrasts`: Define groups which will be used to perform differential expression analysis. Model for DE
uses these contrasts and replicate number. In R annotation, this would be ` ~ 1 + group + replicate`. Table
should have two columns named `group_1` and `group_2`.
- `overall_contrasts`: This is a layer "above" `contrasts`, where results from two contrasts are compared for
lethal, beneficial and neutral species. Thresholds governing classification can be found in
`classification_parameters` tab.
- `classification_parameters`: This tab holds three columns, `threshold`, `value` and `description`. Only
the first two are used in the workflow, description is for your benefit.
This process outputs DESeq2 results, classified results based on provided thresholds and counts of beneficial
and lethal species.
"""
slug = "differentialexpression-shrna"
name = "Differential expression of shRNA"
process_type = "data:shrna:differentialexpression:"
version = "1.3.0"
category = "Differential Expression"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
}
data_name = '{{ parameter_file.file|default("?") }}'
class Input:
"""Input fields to process ShortHairpinRNADifferentialExpression."""
parameter_file = DataField(
data_type="file",
label="Excel parameter file (.xlsx)",
description="Select .xlsx file which holds parameters for analysis. "
"See [here](https://github.com/genialis/shRNAde/blob/master/inst/extdata/template_doDE_inputs"
".xlsx) for a template.",
)
expression_data = ListField(
DataField(
data_type="expression:shrna2quant:",
description="Data objects of expressions from process shrna-quant. These inputs should match sample "
"names specified in parameter file.",
),
label="List of expression files from shrna2quant",
)
class Output:
"""Output fields to process ShortHairpinRNADifferentialExpression."""
deseq_results = FileField(label="DESeq2 results")
class_results = FileField(
label="Results classified based on thresholds provided by the user"
)
beneficial_counts = FileField(
label="shRNAs considered as beneficial based on user input"
)
lethal_counts = FileField(
label="shRNAs considered as lethal based on user input"
)
def run(self, inputs, outputs):
"""Run differential expression of shRNA.
These are the steps for running the process:
1.) Prepare data to be pulled into R for processing. Place data
objects into expression_files/ folder.
2.) Pass parameter file for R's shRNAde::doDE() and execute the
function.
3.) Prepare outputs.
"""
dir_expressions = "./expression_files"
os.mkdir(dir_expressions)
# (1) Move expression files and extract files.
sample_list = [
copy(src=x.output.exp.path, dst=dir_expressions)
for x in inputs.expression_data
]
for fl in sample_list:
base_filename = os.path.splitext(fl)[0]
with gzip.open(fl) as in_file:
with open(base_filename, "wt") as out_file:
for line in in_file:
out_file.write(line.decode())
# (2)
r_input = f'shRNAde::doDE(input = "{inputs.parameter_file.output.file.path}", sample_list = "{dir_expressions}")'
run_cmd = Cmd["Rscript"]["-e"][r_input]
run_cmd()
# (3) Compress results before storing them.
result_deseq = "deseq_results.txt"
result_class = "class_results.txt"
result_beneficial = "beneficial_counts.txt"
result_lethal = "lethal_counts.txt"
# Due to stochasticity
xy = pd.read_csv(filepath_or_buffer=result_deseq, sep="\t")
xy = xy.round(7)
xy.to_csv(path_or_buf=result_deseq, sep="\t")
to_compress = [result_deseq, result_class, result_beneficial, result_lethal]
for file in to_compress:
with open(file, "rb") as txt, gzip.open(file + ".gz", "wb") as gz:
try:
gz.writelines(txt)
except FileNotFoundError:
return "Something went wrong during result compression."
outputs.deseq_results = result_deseq + ".gz"
outputs.class_results = result_class + ".gz"
outputs.beneficial_counts = result_beneficial + ".gz"
outputs.lethal_counts = result_lethal + ".gz" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/differential_expression/shRNAde.py | 0.816589 | 0.624752 | shRNAde.py | pypi |
import os
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
JsonField,
ListField,
Persistence,
Process,
SchedulingClass,
StringField,
)
class Cuffdiff(Process):
"""Run Cuffdiff 2.2 analysis.
Cuffdiff finds significant changes in transcript expression, splicing, and
promoter use. You can use it to find differentially expressed genes and
transcripts, as well as genes that are being differentially regulated at
the transcriptional and post-transcriptional level. See
[here](http://cole-trapnell-lab.github.io/cufflinks/cuffdiff/) and
[here](https://software.broadinstitute.org/cancer/software/genepattern/modules/docs/Cuffdiff/7)
for more information.
"""
slug = "cuffdiff"
name = "Cuffdiff 2.2"
process_type = "data:differentialexpression:cuffdiff"
version = "3.4.0"
category = "Differential Expression"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {"cores": 10, "memory": 8192},
}
data_name = "Cuffdiff results"
class Input:
"""Input fields to process Cuffdiff."""
case = ListField(
DataField("cufflinks:cuffquant"),
label="Case samples",
)
control = ListField(
DataField("cufflinks:cuffquant"),
label="Control samples",
)
labels = ListField(
StringField(),
label="Group labels",
description="Define labels for each sample group.",
default=["control", "case"],
)
annotation = DataField(
"annotation",
label="Annotation (GTF/GFF3)",
description="A transcript annotation file produced by "
"cufflinks, cuffcompare, or other tool.",
)
genome = DataField(
"seq:nucleotide",
label="Run bias detection and correction algorithm",
required=False,
description="Provide Cufflinks with a multifasta file "
"(genome file) via this option to instruct it to run a "
"bias detection and correction algorithm which can "
"significantly improve accuracy of transcript abundance "
"estimates.",
)
multi_read_correct = BooleanField(
label="Do initial estimation procedure to more accurately "
"weight reads with multiple genome mappings",
default=False,
)
create_sets = BooleanField(
label="Create gene sets",
description="After calculating differential gene "
"expressions create gene sets for up-regulated genes, "
"down-regulated genes and all genes.",
default=False,
)
gene_logfc = FloatField(
label="Log2 fold change threshold for gene sets",
description="Genes above Log2FC are considered as "
"up-regulated and genes below -Log2FC as down-regulated.",
default=1.0,
hidden="!create_sets",
)
gene_fdr = FloatField(
label="FDR threshold for gene sets",
default=0.05,
hidden="!create_sets",
)
fdr = FloatField(
label="Allowed FDR",
description="The allowed false discovery rate. The default is 0.05.",
default=0.05,
)
library_type = StringField(
label="Library type",
description="In cases where Cufflinks cannot determine the "
"platform and protocol used to generate input reads, you "
"can supply this information manually, which will allow "
"Cufflinks to infer source strand information with certain "
"protocols. The available options are listed below. For "
"paired-end data, we currently only support protocols "
"where reads point towards each other: fr-unstranded - "
"Reads from the left-most end of the fragment (in "
"transcript coordinates) map to the transcript strand and "
"the right-most end maps to the opposite strand; "
"fr-firststrand - Same as above except we enforce the rule "
"that the right-most end of the fragment (in transcript "
"coordinates) is the first sequenced (or only sequenced "
"for single-end reads). Equivalently, it is assumed that "
"only the strand generated during first strand synthesis "
"is sequenced; fr-secondstrand - Same as above except we "
"enforce the rule that the left-most end of the fragment "
"(in transcript coordinates) is the first sequenced (or "
"only sequenced for single-end reads). Equivalently, it is "
"assumed that only the strand generated during second "
"strand synthesis is sequenced.",
default="fr-unstranded",
choices=[
("fr-unstranded", "fr-unstranded"),
("fr-firststrand", "fr-firststrand"),
("fr-secondstrand", "fr-secondstrand"),
],
)
library_normalization = StringField(
label="Library normalization method",
description="You can control how library sizes (i.e. "
"sequencing depths) are normalized in Cufflinks and "
"Cuffdiff. Cuffdiff has several methods that require "
"multiple libraries in order to work. Library "
"normalization methods supported by Cufflinks work on one "
"library at a time.",
default="geometric",
choices=[
("geometric", "geometric"),
("classic-fpkm", "classic-fpkm"),
("quartile", "quartile"),
],
)
dispersion_method = StringField(
label="Dispersion method",
description=" Cuffdiff works by modeling the variance in "
"fragment counts across replicates as a function of the "
"mean fragment count across replicates. Strictly speaking, "
"models a quantitity called dispersion - the variance "
"present in a group of samples beyond what is expected "
"from a simple Poisson model of RNA_Seq. You can control "
"how Cuffdiff constructs its model of dispersion in locus "
"fragment counts. Each condition that has replicates can "
"receive its own model, or Cuffdiff can use a global model "
"for all conditions. All of these policies are identical "
"to those used by DESeq (Anders and Huber, Genome Biology, "
"2010).",
default="pooled",
choices=[
("pooled", "pooled"),
("per-condition", "per-condition"),
("blind", "blind"),
("poisson", "poisson"),
],
)
class Output:
"""Output fields of the process Cuffdiff."""
raw = FileField("Differential expression")
de_json = JsonField(label="Results table (JSON)")
de_file = FileField(label="Results table (file)")
transcript_diff_exp = FileField(
label="Differential expression (transcript level)"
)
tss_group_diff_exp = FileField(
label="Differential expression (primary transcript)"
)
cds_diff_exp = FileField(label="Differential expression (coding sequence)")
cuffdiff_output = FileField(label="Cuffdiff output")
source = StringField(label="Gene ID database")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
def run(self, inputs, outputs):
"""Run the analysis."""
cuffquants = inputs.case + inputs.control
for c in cuffquants:
if c.output.source != cuffquants[0].output.source:
self.error(
"Input samples are of different Gene ID databases: "
f"{c.output.source} and {cuffquants[0].output.source}."
)
if c.output.species != cuffquants[0].output.species:
self.error(
"Input samples are of different Species: "
f"{c.output.species} and {cuffquants[0].output.species}."
)
if c.output.build != cuffquants[0].output.build:
self.error(
"Input samples are of different Panel types: "
f"{c.output.build} and {cuffquants[0].output.build}."
)
for case in inputs.case:
if case in inputs.control:
self.error(
"Case and Control groups must contain unique "
f"samples. Sample {case.sample_name} is in both Case "
"and Control group."
)
case_paths = ",".join([case.output.cxb.path for case in inputs.case])
control_paths = ",".join(
[control.output.cxb.path for control in inputs.control]
)
labels = ",".join(inputs.labels)
outputs.source = cuffquants[0].output.source
outputs.species = cuffquants[0].output.species
outputs.build = cuffquants[0].output.build
outputs.feature_type = "gene"
self.progress(0.1)
params = [
"-output-dir",
"./",
"-num-threads",
self.requirements.resources.cores,
"-labels",
labels,
"-FDR",
inputs.fdr,
"-library-type",
inputs.library_type,
"-library-norm-method",
inputs.library_normalization,
"-dispersion-method",
inputs.dispersion_method,
"-quiet",
]
if inputs.genome:
params.extend(["-frag-bias-correct", inputs.genome.output.fasta.path])
if inputs.multi_read_correct:
params.append("-multi-read-correct")
return_code, _, _ = Cmd["cuffdiff"][params][
inputs.annotation.output.annot.path, control_paths, case_paths
] & TEE(retcode=None)
if return_code:
self.error("Error while computing differential expression with Cuffdiff.")
self.progress(0.90)
exp_file = "cuffdiff.tab"
os.rename("gene_exp.diff", exp_file)
files_list = [
"cds.*",
"isoforms.*",
"genes.*",
"tss_groups.*",
"read_groups.*",
"promoters.diff",
"splicing.diff",
"cds_exp.diff",
exp_file,
"isoform_exp.diff",
"tss_group_exp.diff",
]
zip_file = "cuffdiff_output.zip"
return_code, _, _ = Cmd["zip"][zip_file][files_list] & TEE(retcode=None)
if return_code:
self.error("Error while compressing Cuffdiff files.")
args = [
exp_file,
"de_data.json",
"de_file.tab.gz",
"--gene_id",
"gene_id",
"--fdr",
"q_value",
"--pvalue",
"p_value",
"--logfc",
"log2(fold_change)",
"--stat",
"test_stat",
]
return_code, _, _ = Cmd["parse_diffexp.py"][args] & TEE(retcode=None)
if return_code:
self.error("Error while parsing DGE results.")
(Cmd["gzip"][exp_file])()
outputs.raw = f"{exp_file}.gz"
outputs.de_json = "de_data.json"
outputs.de_file = "de_file.tab.gz"
outputs.transcript_diff_exp = "isoform_exp.diff"
outputs.cds_diff_exp = "cds_exp.diff"
outputs.tss_group_diff_exp = "tss_group_exp.diff"
outputs.cuffdiff_output = "cuffdiff_output.zip"
if inputs.create_sets:
out_dir = "gene_sets"
gene_set_args = [
"--dge_file",
"de_file.tab.gz",
"--out_dir",
out_dir,
"--analysis_name",
self.name,
"--tool",
"Cuffdiff",
"--logfc",
inputs.gene_logfc,
"--fdr",
inputs.gene_fdr,
]
return_code, _, _ = Cmd["create_gene_sets.py"][gene_set_args] & TEE(
retcode=None
)
if return_code:
self.error("Error while creating gene sets.")
for gene_file in sorted(Path(out_dir).glob("*.tab.gz")):
gene_file.rename(Path() / gene_file.name)
process_inputs = {
"src": str(gene_file.name),
"source": cuffquants[0].output.source,
"species": cuffquants[0].output.species,
}
self.run_process("upload-geneset", process_inputs) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/differential_expression/cuffdiff.py | 0.677367 | 0.515864 | cuffdiff.py | pypi |
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
IntegerField,
JsonField,
ListField,
Persistence,
Process,
SchedulingClass,
StringField,
)
class EdgeR(Process):
"""Run EdgeR analysis.
Empirical Analysis of Digital Gene Expression Data in R (edgeR).
Differential expression analysis of RNA-seq expression profiles with
biological replication. Implements a range of statistical methodology
based on the negative binomial distributions, including empirical Bayes
estimation, exact tests, generalized linear models and quasi-likelihood
tests. As well as RNA-seq, it be applied to differential signal analysis
of other types of genomic data that produce counts, including ChIP-seq,
Bisulfite-seq, SAGE and CAGE. See
[here](https://www.bioconductor.org/packages/devel/bioc/vignettes/edgeR/inst/doc/edgeRUsersGuide.pdf)
for more information.
"""
slug = "differentialexpression-edger"
name = "edgeR"
process_type = "data:differentialexpression:edger"
version = "1.7.0"
category = "Differential Expression"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {"cores": 1, "memory": 8192},
}
data_name = "Differential expression (case vs. control)"
class Input:
"""Input fields to process EdgeR."""
case = ListField(
DataField("expression"),
label="Case",
description="Case samples (replicates)",
)
control = ListField(
DataField("expression"),
label="Control",
description="Control samples (replicates)",
)
count_filter = IntegerField(
label="Raw counts filtering threshold",
default=10,
description="Filter genes in the expression matrix input. "
"Remove genes where the number of counts in all samples is "
"below the threshold.",
)
create_sets = BooleanField(
label="Create gene sets",
description="After calculating differential gene "
"expressions create gene sets for up-regulated genes, "
"down-regulated genes and all genes.",
default=False,
)
logfc = FloatField(
label="Log2 fold change threshold for gene sets",
description="Genes above Log2FC are considered as "
"up-regulated and genes below -Log2FC as down-regulated.",
default=1.0,
hidden="!create_sets",
)
fdr = FloatField(
label="FDR threshold for gene sets",
default=0.05,
hidden="!create_sets",
)
class Output:
"""Output fields of the process EdgeR."""
raw = FileField("Differential expression")
de_json = JsonField(label="Results table (JSON)")
de_file = FileField(label="Results table (file)")
source = StringField(label="Gene ID database")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
def run(self, inputs, outputs):
"""Run the analysis."""
if any(
e.type == "data:expression:microarray:"
for e in inputs.case + inputs.control
):
self.error("Microarray expressions are not supported.")
for t in inputs.case:
if t in inputs.control:
self.error(
"Case and Control groups must contain unique "
f"samples. Sample {t.sample_name} is in both Case "
"and Control group."
)
if len(inputs.case) < 2 or len(inputs.control) < 2:
self.error(
"Error in calculating edgeR dispersion, please provide more samples"
)
try:
case_paths = [c.output.rc.path for c in inputs.case]
control_paths = [c.output.rc.path for c in inputs.control]
except AttributeError:
self.error("Read counts are required when using edgeR")
conditions = ["case"] * len(case_paths) + ["control"] * len(control_paths)
expressions = inputs.case + inputs.control
for exp in expressions:
if exp.output.source != expressions[0].output.source:
self.error(
"Input samples are of different Gene ID databases: "
f"{exp.output.source} and {expressions[0].output.source}."
)
if exp.output.species != expressions[0].output.species:
self.error(
"Input samples are of different Species: "
f"{exp.output.species} and {expressions[0].output.species}."
)
if exp.output.build != expressions[0].output.build:
self.error(
"Input samples are of different Panel types: "
f"{exp.output.build} and {expressions[0].output.build}."
)
if exp.output.feature_type != expressions[0].output.feature_type:
self.error(
"Input samples are of different Feature type: "
f"{exp.output.feature_type} and {expressions[0].output.feature_type}."
)
for case in inputs.case:
if case in inputs.control:
self.error(
"Case and Control groups must contain unique "
f"samples. Sample {case.sample_name} is in both Case "
"and Control group."
)
self.progress(0.1)
sample_files = case_paths + control_paths
merge_args = [
sample_files,
"--experiments",
sample_files,
"--intersection",
"--out",
"counts.tab",
]
return_code, _, _ = Cmd["expressionmerge.py"][merge_args] & TEE(retcode=None)
if return_code:
self.error("Error merging read counts.")
filter_args = [
"-counts",
"counts.tab",
"-filter",
inputs.count_filter,
"-out",
"counts_filtered.tab",
]
return_code, _, _ = Cmd["diffexp_filtering.R"][filter_args] & TEE(retcode=None)
if return_code:
self.error("Error while filtering read counts.")
args = [
"counts_filtered.tab",
"--sampleConditions",
conditions,
]
return_code, _, _ = Cmd["run_edger.R"][args] & TEE(retcode=None)
if return_code:
self.error("Error computing differential expression (edgeR).")
self.progress(0.95)
edger_output = "diffexp_edgeR.tab"
args = [
edger_output,
"de_data.json",
"de_file.tab.gz",
"--gene_id",
"gene_id",
"--fdr",
"FDR",
"--pvalue",
"PValue",
"--logfc",
"logFC",
]
return_code, _, _ = Cmd["parse_diffexp.py"][args] & TEE(retcode=None)
if return_code:
self.error("Error while parsing DGE results.")
(Cmd["gzip"][edger_output])()
outputs.raw = f"{edger_output}.gz"
outputs.de_json = "de_data.json"
outputs.de_file = "de_file.tab.gz"
outputs.source = expressions[0].output.source
outputs.species = expressions[0].output.species
outputs.build = expressions[0].output.build
outputs.feature_type = expressions[0].output.feature_type
if inputs.create_sets:
out_dir = "gene_sets"
gene_set_args = [
"--dge_file",
"de_file.tab.gz",
"--out_dir",
out_dir,
"--analysis_name",
self.name,
"--tool",
"EdgeR",
"--logfc",
inputs.logfc,
"--fdr",
inputs.fdr,
]
return_code, _, _ = Cmd["create_gene_sets.py"][gene_set_args] & TEE(
retcode=None
)
if return_code:
self.error("Error while creating gene sets.")
for gene_file in sorted(Path(out_dir).glob("*.tab.gz")):
gene_file.rename(Path() / gene_file.name)
process_inputs = {
"src": str(gene_file.name),
"source": expressions[0].output.source,
"species": expressions[0].output.species,
}
self.run_process("upload-geneset", process_inputs) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/differential_expression/edgeR.py | 0.890002 | 0.499268 | edgeR.py | pypi |
import json
from collections import defaultdict
import numpy as np
import pandas as pd
from scipy.spatial.distance import euclidean
from scipy.stats import pearsonr, spearmanr
from resolwe.process import (
DataField,
JsonField,
ListField,
Persistence,
Process,
SchedulingClass,
StringField,
)
def get_expression(fname, sep="\t"):
"""Read expressions from file."""
df = pd.read_csv(
filepath_or_buffer=fname,
sep=sep,
header=0,
index_col=0,
compression="gzip",
dtype={
0: str,
1: float,
},
keep_default_na=False,
)
df.index = df.index.map(str)
return df
def get_mean_expression(fnames, name, sep="\t"):
"""Get mean expression for replicates of one time point."""
dfs = [get_expression(fname, sep=sep) for fname in fnames]
joined = pd.concat(dfs, axis=1, join="inner")
return joined.mean(axis=1).rename(name)
def join_expressions(positions, labels, sep="\t"):
"""Join mean expressions.
Join expressions from different time points and return only those that are
in all samples.
"""
dfs = []
for position, replicates in positions:
dfs.append(
get_mean_expression(
replicates,
name=labels[position],
sep=sep,
)
)
inner = pd.concat(dfs, axis=1, join="inner")
outer = pd.concat(dfs, axis=1, join="outer")
excluded = sorted(outer.index.difference(inner.index))
return inner, excluded
def calculate_spearman(x, y):
"""Calculate Spearman correlation distance between x and y."""
return 1.0 - spearmanr(x, y).correlation
def calculate_pearson(x, y):
"""Calculate Pearson correlation distance between x and y."""
return 1.0 - pearsonr(x, y)[0]
def calculate_euclidean(x, y):
"""Calculate Euclidean distance between x and y."""
return euclidean(x, y)
def is_const(values):
"""Return True, if all values are approximately equal, otherwise return False."""
mn = np.min(values)
mx = np.max(values)
if mn + mx == 0.0:
return mn == mx
else:
return (mx - mn) / abs(mx + mn) < 1.0e-6
def remove_const_genes(expressions):
"""Remove genes with constant expression profile across samples."""
matches = expressions.apply(lambda x: not is_const(x), axis=1)
return expressions.loc[matches], matches[~matches].index.tolist()
class FindSimilar(Process):
"""Find genes with similar expression profile.
Find genes that have similar expression over time to the query gene.
"""
slug = "find-similar"
name = "Find similar genes"
process_type = "data:similarexpression"
version = "1.3.0"
scheduling_class = SchedulingClass.INTERACTIVE
persistence = Persistence.TEMP
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {"cores": 1, "memory": 4096, "storage": 10},
"relations": [{"type": "series"}],
}
data_name = "Genes similar to {{gene}}"
category = "Enrichment and Clustering"
class Input:
"""Input fields to process FindSimilar."""
expressions = ListField(
DataField("expression"),
relation_type="series",
label="Time series relation",
description="Select time course to which the expressions belong to.",
)
gene = StringField(
label="Query gene",
description="Select a gene to which others are compared.",
)
distance = StringField(
label="Distance metric",
choices=[
("euclidean", "Euclidean"),
("spearman", "Spearman"),
("pearson", "Pearson"),
],
default="spearman",
)
class Output:
"""Output field of the process FindSimilar."""
similar_genes = JsonField(label="Similar genes")
source = StringField(label="Gene ID database")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
def get_data_info(self, data_objects):
"""Get data time course labels and position sorted counts paths."""
data_positions = defaultdict(list)
labels = {}
for data in data_objects:
for relation in data.relations:
if relation.type == "series":
position, label = next(
(p.position, p.label)
for p in relation.partitions
if p.entity_id == data.entity_id
)
else:
self.error(
f"Relations of type series are not defined for {data.name}."
)
labels[position] = label
data_positions[position].append(data.output.exp.path)
return labels, data_positions.items()
def run(self, inputs, outputs):
"""Run the analysis."""
for exp in inputs.expressions:
if exp.output.source != inputs.expressions[0].output.source:
self.error(
"Input samples are of different Gene ID databases: "
f"{exp.output.source} and {inputs.expressions[0].output.source}."
)
if exp.output.species != inputs.expressions[0].output.species:
self.error(
"Input samples are of different Species: "
f"{exp.output.species} and {inputs.expressions[0].output.species}."
)
if exp.output.exp_type != inputs.expressions[0].output.exp_type:
self.error(
"Input samples are of different Expression types: "
f"{exp.output.exp_type} and {inputs.expressions[0].output.exp_type}."
)
if exp.output.feature_type != inputs.expressions[0].output.feature_type:
self.error(
"Input samples are of different Feature type: "
f"{exp.output.feature_type} and {inputs.expressions[0].output.feature_type}."
)
labels, positions = self.get_data_info(inputs.expressions)
if len(labels) == 1:
self.error(
"Only one time point was provided. At least two time "
"points are required."
)
expressions, excluded = join_expressions(positions, labels)
if len(expressions.index) < 2:
self.error("At least two genes shared across all samples are required.")
if excluded:
suffix = "" if len(excluded) <= 3 else ", ..."
excluded_genes = ", ".join(excluded[:3])
self.warning(
"Genes not present in all of the selected samples are "
f"excluded from the analysis. Excluded {len(excluded)} "
f"of them ({excluded_genes + suffix})."
)
if inputs.gene not in expressions.index:
self.error(
"Selected query gene was not found. Please make sure "
"the selected gene name can be found in all expression "
"time courses."
)
expressions, removed = remove_const_genes(expressions)
rows = expressions.index
if len(rows) < 2:
self.error(
"There are less than two genes with non-constant "
"expression across time points. Distances can not be "
"computed."
)
suffix = "" if len(removed) <= 3 else ", ..."
if removed:
removed_genes = ", ".join(removed[:3])
self.warning(
f"{len(removed)} genes ({removed_genes+suffix}) have "
"constant expression across time points. Those genes "
"are excluded from the computation of hierarchical "
"clustering of genes."
)
if inputs.gene in removed:
self.error(
f"Query gene ({inputs.gene}) has constant "
"expression and was removed. Distances can not be "
"computed."
)
distance_map = {
"pearson": calculate_pearson,
"spearman": calculate_spearman,
"euclidean": calculate_euclidean,
}
distance_func = distance_map[inputs.distance]
selected_etc = expressions.loc[inputs.gene].tolist()
expressions = expressions.drop(inputs.gene)
distances = np.array(
[distance_func(selected_etc, etc) for etc in expressions.values]
)
genes = expressions.index
similarity = [
{"gene": gene, "distance": d} for gene, d in zip(genes, distances)
]
similarity.sort(key=lambda x: x["distance"])
formatted_output = {"search gene": inputs.gene, "similar genes": similarity}
with open("similar_genes.json", "w") as f:
json.dump(formatted_output, f)
outputs.similar_genes = "similar_genes.json"
outputs.source = inputs.expressions[0].output.source
outputs.species = inputs.expressions[0].output.species
outputs.build = inputs.expressions[0].output.build
outputs.feature_type = inputs.expressions[0].output.feature_type | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/clustering/find_similar.py | 0.785761 | 0.385982 | find_similar.py | pypi |
import json
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.stats import spearmanr, zscore
from resolwe.process import (
BooleanField,
DataField,
GroupField,
JsonField,
ListField,
Persistence,
SchedulingClass,
StringField,
)
from resolwe_bio.process.runtime import ProcessBio
def check_compatibility(
exp_source,
target_source,
exp_species,
target_species,
exp_type,
target_exp_type,
exp_feature_type,
target_feature_type,
process_source,
process_species,
exp_name,
target_name,
error,
warning,
genes,
):
"""Check compatibility of inputs."""
if exp_source != target_source:
warning("All expression data must be annotated by the same genome database.")
error(
f"Sample {target_name} has {target_source} gene IDs, "
f"while sample {exp_name} has {exp_source} gene IDs."
)
if exp_species != target_species:
warning("All expressions must be of the same Species.")
error(
f"Sample {target_name} is {target_species}, while sample {exp_name} is {exp_species}."
)
if exp_type != target_exp_type:
warning("All expressions must be of the same Expression type.")
error(
f"Expression {target_name} has {target_exp_type} expression type, "
f"while sample {exp_name} has {exp_type} expression type."
)
if exp_feature_type != target_feature_type:
warning("All expressions must be of the same Feature type.")
error(
f"Expression {target_name} has {target_feature_type} feature type, "
f"while sample {exp_name} has {exp_feature_type} feature type."
)
if len(genes) > 0:
if exp_source != process_source:
warning(
"Selected genes must be annotated by the same genome database as all expression files."
)
error(
f"Gene IDs are from {process_source} database, "
f"while sample {exp_name} has gene IDs from {exp_source} database."
)
if exp_species != process_species:
warning(
"Selected genes must be from the same species as all expression files."
)
error(
f"Selected genes are {process_species}, while expression {exp_name} is {exp_species}."
)
def get_expression(fname, sep="\t", gene_set=[]):
"""Read expressions from file and return only expressions of genes in gene_set."""
df = pd.read_csv(
filepath_or_buffer=fname,
sep=sep,
header=0,
index_col=0,
compression="gzip",
dtype={
0: str,
1: float,
},
keep_default_na=False,
)
df.index = df.index.map(str)
if not gene_set:
return df
intersection = [gene for gene in gene_set if gene in df.index]
return df.loc[intersection]
def get_expressions(fnames, sep="\t", gene_set=[]):
"""Read expressions from files.
Return only expressions of genes that are listed in all samples and in gene_set.
"""
dfs = [get_expression(fname, sep=sep, gene_set=gene_set) for fname in fnames]
inner = pd.concat(dfs, axis=1, join="inner")
outer = pd.concat(dfs, axis=1, join="outer", sort=True)
if gene_set:
excluded = sorted(set(gene_set).difference(set(inner.index)))
else:
excluded = sorted(outer.index.difference(inner.index))
return inner, excluded
def transform(expressions, error, log2=False, const=1.0, z_score=False, ddof=1):
"""Compute log2 and normalize expression values.
Parameters:
- log2: use log2(x+const) transformation
- const: an additive constant used in computation of log2
- z_score: use Z-score normalization
- ddof: degrees of freedom used in computation of Z-score
"""
if log2:
expressions = expressions.applymap(lambda x: np.log2(x + const))
if expressions.isnull().values.any():
error("Cannot apply log2 to expression values.")
if z_score:
expressions = expressions.apply(
lambda x: zscore(x, ddof=ddof), axis=1, result_type="broadcast"
)
expressions.fillna(value=0.0, inplace=True)
return expressions
def get_distance_metric(distance_metric):
"""Get distance metric."""
if distance_metric == "spearman":
return lambda x, y: 1.0 - spearmanr(x, y).correlation
elif distance_metric == "pearson":
return "correlation"
return distance_metric
def get_clustering(
expressions,
error,
distance_metric="euclidean",
linkage_method="average",
order=False,
):
"""Compute linkage, order, and produce a dendrogram."""
try:
link = linkage(
y=expressions,
method=linkage_method,
metric=distance_metric,
optimal_ordering=order,
)
except Exception:
error("Cannot compute linkage.")
try:
dend = dendrogram(link, no_plot=True)
except Exception:
error("Cannot compute dendrogram.")
return link, dend
def is_const(values):
"""Return True, if all values are approximately equal, otherwise return False."""
mn = np.min(values)
mx = np.max(values)
if mn + mx == 0.0:
return mn == mx
else:
return (mx - mn) / abs(mx + mn) < 1.0e-6
def remove_const_samples(expressions):
"""Remove samples with constant expression profile across genes."""
matches = expressions.apply(lambda x: not is_const(x), axis=0)
return expressions.loc[:, matches], matches.values.tolist()
def remove_const_genes(expressions):
"""Remove genes with constant expression profile across samples."""
matches = expressions.apply(lambda x: not is_const(x), axis=1)
return expressions.loc[matches], matches[~matches].index.tolist()
def output_json(result=dict(), fname="cluster.json"):
"""Print json if fname=None else write json to file 'fname'."""
with open(fname, "w") as f:
json.dump(result, f)
class HierarchicalClusteringSamples(ProcessBio):
"""Hierarchical clustering of samples."""
slug = "clustering-hierarchical-samples"
name = "Hierarchical clustering of samples"
process_type = "data:clustering:hierarchical:sample"
version = "3.5.1"
category = "Enrichment and Clustering"
data_name = "Hierarchical clustering of samples"
scheduling_class = SchedulingClass.INTERACTIVE
persistence = Persistence.TEMP
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {"cores": 1, "memory": 4096, "storage": 10},
}
class Input:
"""Input fields to process HierarchicalClusteringSamples."""
exps = ListField(
DataField("expression"),
label="Expressions",
description="Select at least two data objects.",
)
class Preprocessing:
"""Preprocessing."""
genes = ListField(
StringField(),
label="Gene subset",
required=False,
placeholder="new gene id, e.g. ENSG00000185982 (ENSEMBL database)",
description="Specify at least two genes or leave this field empty.",
)
source = StringField(
label="Gene ID database of selected genes",
description="This field is required if gene subset is set, e.g. ENSEMBL, UCSC.",
required=False,
hidden="!preprocessing.genes",
)
species = StringField(
label="Species",
description="Specify species name. This field is required if gene subset is set.",
allow_custom_choice=True,
hidden="!preprocessing.genes",
required=False,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
],
)
log2 = BooleanField(
label="Log-transform expressions",
default=True,
description="Transform expressions with log2(x + 1) before clustering.",
)
z_score = BooleanField(
label="Z-score normalization",
default=True,
description="Use Z-score normalization of gene expressions before clustering.",
)
class Processing:
"""Processing."""
distance_metric = StringField(
label="Distance metric",
default="pearson",
choices=[
("euclidean", "Euclidean"),
("pearson", "Pearson"),
("spearman", "Spearman"),
],
)
linkage_method = StringField(
label="Linkage method",
default="average",
choices=[
("single", "single"),
("average", "average"),
("complete", "complete"),
],
)
class Postprocessing:
"""Postprocessing."""
order = BooleanField(
label="Order samples optimally",
default=True,
)
preprocessing = GroupField(Preprocessing, label="Preprocessing")
processing = GroupField(Processing, label="Processing")
postprocessing = GroupField(Postprocessing, label="Postprocessing")
class Output:
"""Output fields to process HierarchicalClusteringSamples."""
cluster = JsonField(
label="Hierarchical clustering",
required=False,
)
def run(self, inputs, outputs):
"""Run analysis."""
sample_files = []
sample_ids = []
sample_names = []
if inputs.preprocessing.genes:
gene_labels = inputs.preprocessing.genes
else:
gene_labels = []
for exp in inputs.exps:
check_compatibility(
exp_source=exp.output.source,
target_source=inputs.exps[0].output.source,
exp_species=exp.output.species,
target_species=inputs.exps[0].output.species,
exp_type=exp.output.exp_type,
target_exp_type=inputs.exps[0].output.exp_type,
exp_feature_type=exp.output.feature_type,
target_feature_type=inputs.exps[0].output.feature_type,
process_source=inputs.preprocessing.source,
process_species=inputs.preprocessing.species,
exp_name=exp.entity.name,
target_name=inputs.exps[0].entity.name,
warning=self.warning,
error=self.error,
genes=gene_labels,
)
sample_files.append(exp.output.exp.path)
sample_ids.append(exp.entity.id)
sample_names.append(exp.entity.name)
if len(gene_labels) == 1 and inputs.processing.distance_metric != "euclidean":
self.error(
"Select at least two genes to compute hierarchical clustering of samples with "
"correlation distance metric or use Euclidean distance metric."
)
if len(sample_files) < 2:
self.error(
"Select at least two samples to compute hierarchical clustering of samples."
)
expressions, excluded = get_expressions(
fnames=sample_files, gene_set=gene_labels
)
if len(expressions.index) == 0:
if not inputs.preprocessing.genes:
self.error("The selected samples do not have any common genes.")
else:
self.error("None of the selected genes are present in all samples.")
features = self.feature.filter(
feature_id__in=list(expressions.index),
source=inputs.exps[0].output.source,
species=inputs.exps[0].output.species,
)
if (
len(expressions.index) == 1
and inputs.processing.distance_metric != "euclidean"
):
if not inputs.preprocessing.genes:
self.error(
"The selected samples contain only one common gene "
f"({[feature.name for feature in features][0]}). At least two common "
"genes are required to compute hierarchical clustering of samples with "
"correlation distance metric. Select a different set of samples or use Euclidean "
"distance metric."
)
else:
self.error(
f"Only one of the selected genes ({[feature.name for feature in features][0]}) "
"is present in all samples but at least two such genes are required to compute "
"hierarchical clustering of samples with correlation distance metric. Select more "
"genes or use Euclidean distance metric."
)
expressions = transform(
expressions=expressions,
error=self.error,
log2=inputs.preprocessing.log2,
z_score=inputs.preprocessing.z_score,
const=1.0,
)
if inputs.processing.distance_metric != "euclidean":
expressions, matches = remove_const_samples(expressions)
if len(expressions.columns) == 0:
self.error(
"All of the selected samples have constant expression across genes. Hierarchical "
"clustering of samples cannot be computed."
)
if len(expressions.columns) == 1:
samples_name = [id for i, id in enumerate(sample_names) if matches[i]][
0
]
self.error(
f"Only one of the selected samples ({samples_name}) has a non-constant expression across "
"genes. However, hierarchical clustering of samples cannot be computed with "
"just one sample."
)
removed = [name for i, name in enumerate(sample_names) if not matches[i]]
if removed:
suffix = "" if len(removed) <= 3 else ", ..."
self.warning(
f"{len(removed)} of the selected samples ({', '.join(removed[:3]) + suffix}) have "
"constant expression across genes. Those samples are excluded from the computation "
"of hierarchical clustering of samples with correlation distance metric."
)
else:
matches = [True] * len(sample_files)
if excluded:
features = self.feature.filter(
feature_id__in=excluded[:3],
source=inputs.exps[0].output.source,
species=inputs.exps[0].output.species,
)
excluded_names = sorted([feature.name for feature in features])
if len(excluded) == 1:
if not inputs.preprocessing.genes:
self.warning(
f"Gene {excluded_names[0]} is present in some but not all of the selected samples. "
"This gene is excluded from the computation of hierarchical clustering of "
"samples."
)
else:
self.warning(
f"{excluded} of the selected genes ({excluded_names[0]}) is missing in at least one "
"of the selected samples. This gene is excluded from the computation of hierarchical "
"clustering of samples."
)
if len(excluded) > 1:
if not inputs.preprocessing.genes:
self.warning(
f"{len(excluded)} genes ({', '.join(excluded_names)}) are present in some but "
"not all of the selected samples. Those genes are excluded from the computation "
"of hierarchical clustering of samples."
)
else:
self.warning(
f"{len(excluded)} of the selected genes ({', '.join(excluded_names)}) are missing "
"in at least one of the selected samples. Those genes are excluded from the "
"computation of hierarchical clustering of samples."
)
linkage, dendrogram = get_clustering(
expressions=expressions.transpose(),
error=self.error,
distance_metric=get_distance_metric(inputs.processing.distance_metric),
linkage_method=inputs.processing.linkage_method,
order=inputs.postprocessing.order,
)
sample_ids = [sample_id for i, sample_id in enumerate(sample_ids) if matches[i]]
result = {
"sample_ids": {
i: {"id": sample_id} for i, sample_id in enumerate(sample_ids)
},
"linkage": linkage.tolist(),
"order": dendrogram["leaves"],
}
output_json(result=result)
outputs.cluster = "cluster.json"
class HierarchicalClusteringGenes(ProcessBio):
"""Hierarchical clustering of genes."""
slug = "clustering-hierarchical-genes"
name = "Hierarchical clustering of genes"
process_type = "data:clustering:hierarchical:gene"
version = "3.5.1"
category = "Enrichment and Clustering"
data_name = "Hierarchical clustering of genes"
scheduling_class = SchedulingClass.INTERACTIVE
persistence = Persistence.TEMP
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {"cores": 1, "memory": 4096, "storage": 10},
}
class Input:
"""Input fields to process HierarchicalClusteringGenes."""
exps = ListField(
DataField("expression"),
label="Expressions",
description="Select at least two data objects.",
)
class Preprocessing:
"""Preprocessing."""
genes = ListField(
StringField(),
label="Gene subset",
required=False,
placeholder="new gene id, e.g. ENSG00000185982 (ENSEMBL database)",
description="Specify at least two genes or leave this field empty.",
)
source = StringField(
label="Gene ID database of selected genes",
description="This field is required if gene subset is set, e.g. ENSEMBL, UCSC.",
required=False,
hidden="!preprocessing.genes",
)
species = StringField(
label="Species",
description="Specify species name. This field is required if gene subset is set.",
allow_custom_choice=True,
required=False,
hidden="!preprocessing.genes",
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
],
)
log2 = BooleanField(
label="Log-transform expressions",
default=True,
description="Transform expressions with log2(x + 1) before clustering.",
)
z_score = BooleanField(
label="Z-score normalization",
default=True,
description="Use Z-score normalization of gene expressions before clustering.",
)
class Processing:
"""Processing."""
distance_metric = StringField(
label="Distance metric",
default="pearson",
choices=[
("euclidean", "Euclidean"),
("pearson", "Pearson"),
("spearman", "Spearman"),
],
)
linkage_method = StringField(
label="Linkage method",
default="average",
choices=[
("single", "single"),
("average", "average"),
("complete", "complete"),
],
)
class Postprocessing:
"""Postprocessing."""
order = BooleanField(
label="Order samples optimally",
default=True,
)
preprocessing = GroupField(Preprocessing, label="Preprocessing")
processing = GroupField(Processing, label="Processing")
postprocessing = GroupField(Postprocessing, label="Postprocessing")
class Output:
"""Output fields to process HierarchicalClusteringGenes."""
cluster = JsonField(
label="Hierarchical clustering",
required=False,
)
def run(self, inputs, outputs):
"""Run analysis."""
sample_files = []
sample_names = []
if inputs.preprocessing.genes:
gene_labels = inputs.preprocessing.genes
else:
gene_labels = []
for exp in inputs.exps:
check_compatibility(
exp_source=exp.output.source,
target_source=inputs.exps[0].output.source,
exp_species=exp.output.species,
target_species=inputs.exps[0].output.species,
exp_type=exp.output.exp_type,
target_exp_type=inputs.exps[0].output.exp_type,
exp_feature_type=exp.output.feature_type,
target_feature_type=inputs.exps[0].output.feature_type,
process_source=inputs.preprocessing.source,
process_species=inputs.preprocessing.species,
exp_name=exp.entity.name,
target_name=inputs.exps[0].entity.name,
warning=self.warning,
error=self.error,
genes=gene_labels,
)
sample_files.append(exp.output.exp.path)
sample_names.append(exp.entity.name)
if len(gene_labels) == 1:
self.error(
"Select at least two genes to compute hierarchical clustering of genes."
)
if len(sample_files) == 1 and inputs.processing.distance_metric != "euclidean":
self.error(
"Select at least two samples to compute hierarchical clustering of genes with "
"correlation distance metric or use Euclidean distance metric."
)
expressions, excluded = get_expressions(
fnames=sample_files, gene_set=gene_labels
)
if len(expressions.index) == 0:
if not inputs.preprocessing.genes:
self.error("The selected samples do not have any common genes.")
else:
self.error("None of the selected genes are present in all samples.")
features = self.feature.filter(
feature_id__in=list(expressions.index),
source=inputs.exps[0].output.source,
species=inputs.exps[0].output.species,
)
if (
len(expressions.index) == 1
and inputs.processing.distance_metric != "euclidean"
):
if not inputs.preprocessing.genes:
self.error(
"The selected samples contain only one common gene "
f"({[feature.name for feature in features][0]}). At least two common "
"genes are required to compute hierarchical clustering of genes with "
"correlation distance metric. Select a different set of samples or use Euclidean "
"distance metric."
)
else:
self.error(
f"Only one of the selected genes ({[feature.name for feature in features][0]}) "
"is present in all samples but at least two such genes are required to compute "
"hierarchical clustering of genes with correlation distance metric. Select more "
"genes or use Euclidean distance metric."
)
expressions = transform(
expressions=expressions,
error=self.error,
log2=inputs.preprocessing.log2,
z_score=inputs.preprocessing.z_score,
const=1.0,
)
if inputs.processing.distance_metric != "euclidean":
expressions, removed = remove_const_genes(expressions=expressions)
if len(expressions.index) == 0:
self.error(
"All of the selected genes have constant expression across samples. Hierarchical "
"clustering of genes cannot be computed."
)
if len(expressions.index) == 1:
features = self.feature.filter(
feature_id__in=list(expressions.index),
source=inputs.exps[0].output.source,
species=inputs.exps[0].output.species,
)
gene_names = [feature.name for feature in features]
self.error(
f"Only one of the selected genes ({gene_names[0]}) has a non-constant expression across "
"samples. However, hierarchical clustering of genes cannot be computed with "
"just one gene."
)
if removed:
suffix = "" if len(removed) <= 3 else ", ..."
features = self.feature.filter(
feature_id__in=removed[:3],
source=inputs.exps[0].output.source,
species=inputs.exps[0].output.species,
)
removed_names = sorted([feature.name for feature in features])
self.warning(
f"{len(removed)} of the selected genes ({', '.join(removed_names) + suffix}) have "
"constant expression across samples. Those genes are excluded from the computation "
"of hierarchical clustering of genes with correlation distance metric."
)
if excluded:
features = self.feature.filter(
feature_id__in=excluded[:3],
source=inputs.exps[0].output.source,
species=inputs.exps[0].output.species,
)
excluded_names = sorted([feature.name for feature in features])
if len(excluded) == 1:
if not inputs.preprocessing.genes:
self.warning(
f"Gene {excluded_names} is present in some but not all of the selected samples. "
"This gene is excluded from the computation of hierarchical clustering of "
"genes."
)
else:
self.warning(
f"{len(excluded)} of the selected genes ({excluded_names[0]}) is missing in at least "
"one of the selected samples. This gene is excluded from the computation of "
"hierarchical clustering of genes."
)
if len(excluded) > 1:
if not inputs.preprocessing.genes:
self.warning(
f"{len(excluded)} genes ({', '.join(excluded_names)}) are present in some but "
"not all of the selected samples. Those genes are excluded from the computation "
"of hierarchical clustering of genes."
)
else:
self.warning(
f"{len(excluded)} of the selected genes ({', '.join(excluded_names)}) are "
"missing in at least one of the selected samples. Those genes are excluded from "
"the computation of hierarchical clustering of genes."
)
linkage, dendrogram = get_clustering(
expressions=expressions,
error=self.error,
distance_metric=get_distance_metric(inputs.processing.distance_metric),
linkage_method=inputs.processing.linkage_method,
order=inputs.postprocessing.order,
)
result = {
"gene_symbols": {
i: {"gene": gene} for i, gene in enumerate(expressions.index)
},
"linkage": linkage.tolist(),
"order": dendrogram["leaves"],
}
output_json(result=result)
outputs.cluster = "cluster.json" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/clustering/hierarchical_clustering.py | 0.81928 | 0.414603 | hierarchical_clustering.py | pypi |
import json
from collections import defaultdict
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.stats import spearmanr
from resolwe.process import (
BooleanField,
DataField,
JsonField,
ListField,
Persistence,
Process,
SchedulingClass,
StringField,
)
def get_expression(fname, sep="\t", gene_set=[]):
"""Read expressions from file and return only expressions of genes in gene_set."""
df = pd.read_csv(
filepath_or_buffer=fname,
sep=sep,
header=0,
index_col=0,
compression="gzip",
dtype={
0: str,
1: float,
},
keep_default_na=False,
)
df.index = df.index.map(str)
if not gene_set:
return df
intersection = [gene for gene in gene_set if gene in df.index]
return df.loc[intersection]
def get_mean_expression(fnames, name, sep="\t", gene_set=[]):
"""Get mean expression for replicates of one time point."""
dfs = [get_expression(fname, sep=sep, gene_set=gene_set) for fname in fnames]
joined = pd.concat(dfs, axis=1, join="inner")
return joined.mean(axis=1).rename(name)
def join_expressions(positions, labels, sep="\t", gene_set=[]):
"""Join mean expressions.
Join expressions from different time points and return only those that are
in all samples and the gene_set.
"""
dfs = []
for position, replicates in positions:
dfs.append(
get_mean_expression(
replicates,
name=labels[position],
sep=sep,
gene_set=gene_set,
)
)
inner = pd.concat(dfs, axis=1, join="inner")
outer = pd.concat(dfs, axis=1, join="outer")
if gene_set:
excluded = sorted(set(gene_set).difference(set(inner.index)))
else:
excluded = sorted(outer.index.difference(inner.index))
return inner, excluded
def get_distance_metric(distance_metric):
"""Get distance metric."""
if distance_metric == "spearman":
return lambda x, y: 1.0 - spearmanr(x, y).correlation
elif distance_metric == "pearson":
return "correlation"
return distance_metric
def is_const(values):
"""Return True, if all values are approximately equal, otherwise return False."""
mn = np.min(values)
mx = np.max(values)
if mn + mx == 0.0:
return mn == mx
else:
return (mx - mn) / abs(mx + mn) < 1.0e-6
def remove_const_genes(expressions):
"""Remove genes with constant expression profile across samples."""
matches = expressions.apply(lambda x: not is_const(x), axis=1)
return expressions.loc[matches], matches[~matches].index.tolist()
class ClusterTimeCourse(Process):
"""Cluster gene expression time courses.
Hierarchical clustering of expression time courses.
"""
slug = "clustering-hierarchical-etc"
name = "Hierarchical clustering of time courses"
process_type = "data:clustering:hierarchical:etc"
version = "1.3.0"
scheduling_class = SchedulingClass.INTERACTIVE
persistence = Persistence.TEMP
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {"cores": 1, "memory": 4096, "storage": 10},
"relations": [{"type": "series"}],
}
data_name = "Hierarchical clustering of time courses"
category = "Enrichment and Clustering"
class Input:
"""Input fields to process ClusterTimeCourse."""
expressions = ListField(
DataField("expression"),
relation_type="series",
label="Time series relation",
description="Select time course to which the expressions belong to.",
)
genes = ListField(
StringField(),
label="Gene subset",
required=False,
description="Select at least two genes or leave this field empty.",
)
gene_species = StringField(
label="Species",
description="Species to which the selected genes belong to. "
"This field is required if gene subset is set.",
required=False,
hidden="!genes",
allow_custom_choice=True,
choices=[
("Dictyostelium discoideum", "Dictyostelium discoideum"),
("Homo sapiens", "Homo sapiens"),
("Macaca mulatta", "Macaca mulatta"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
],
)
gene_source = StringField(
label="Gene ID database of selected genes",
description="This field is required if gene subset is set.",
required=False,
hidden="!genes",
)
distance = StringField(
label="Distance metric",
choices=[
("euclidean", "Euclidean"),
("spearman", "Spearman"),
("pearson", "Pearson"),
],
default="spearman",
)
linkage = StringField(
label="Linkage method",
choices=[
("single", "single"),
("average", "average"),
("complete", "complete"),
],
default="average",
)
ordering = BooleanField(
label="Use optimal ordering",
description="Results in a more intuitive tree structure, "
"but may slow down the clustering on large datasets",
default=False,
)
class Output:
"""Output field of the process ClusterTimeCourse."""
cluster = JsonField(label="Hieararhical clustering")
source = StringField(label="Gene ID database")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
def get_data_info(self, data_objects):
"""Get data time course labels and position sorted counts paths."""
data_positions = defaultdict(list)
labels = {}
for data in data_objects:
for relation in data.relations:
if relation.type == "series":
position, label = next(
(p.position, p.label)
for p in relation.partitions
if p.entity_id == data.entity_id
)
else:
self.error(
f"Relations of type series are not defined for {data.name}."
)
labels[position] = label
data_positions[position].append(data.output.exp.path)
return labels, data_positions.items()
def get_clustering(
self, expressions, linkage_method="average", metric="correlation", order=False
):
"""Compute linkage, order, and produce a dendrogram."""
try:
link = linkage(
y=expressions,
method=linkage_method,
metric=metric,
optimal_ordering=order,
)
except Exception as err:
self.error(f"Cannot compute linkage. Original error was: {err}")
try:
dend = dendrogram(link, no_plot=True)
except Exception as err:
self.error(f"Cannot compute dendrogram. Original error was: {err}")
return link, dend
def run(self, inputs, outputs):
"""Run the analysis."""
for exp in inputs.expressions:
if exp.output.source != inputs.expressions[0].output.source:
self.error(
"Input samples are of different Gene ID databases: "
f"{exp.output.source} and {inputs.expressions[0].output.source}."
)
if exp.output.species != inputs.expressions[0].output.species:
self.error(
"Input samples are of different Species: "
f"{exp.output.species} and {inputs.expressions[0].output.species}."
)
if exp.output.exp_type != inputs.expressions[0].output.exp_type:
self.error(
"Input samples are of different Expression types: "
f"{exp.output.exp_type} and {inputs.expressions[0].output.exp_type}."
)
if exp.output.feature_type != inputs.expressions[0].output.feature_type:
self.error(
"Input samples are of different Feature type: "
f"{exp.output.feature_type} and {inputs.expressions[0].output.feature_type}."
)
if inputs.genes:
if len(inputs.genes) == 1:
self.error("At least two genes have to be selected.")
if inputs.gene_species != inputs.expressions[0].output.species:
self.error(
"Selected genes must belong to the same species as "
"expression files. Instead genes belong to "
f"{inputs.gene_species} while expressions belong "
f"to {inputs.expressions[0].output.species}."
)
if inputs.gene_source != inputs.expressions[0].output.source:
self.error(
"Selected genes must be annotated by the same "
"genome database as expressions. Instead Gene IDs "
f"of genes are from {inputs.gene_source} and "
"expressions have IDs from "
f"{inputs.expressions[0].output.source}."
)
labels, positions = self.get_data_info(inputs.expressions)
if len(labels) == 1:
self.error(
"Only one time point was provided. At least two time "
"points are required to run hierarhical clustering."
)
expressions, excluded = join_expressions(
positions, labels, gene_set=inputs.genes
)
rows = expressions.index
if len(rows) < 2:
if inputs.genes:
self.error(
"At least two of the selected genes have to be "
"present in all samples to run hierarhical "
f"clustering. {len(rows)} found in "
"all samples."
)
else:
self.error(
"At least two genes shared across all samples are "
"required to run hierarhical clustering. "
f"{len(rows)} found in all samples."
)
if excluded:
suffix = "" if len(excluded) <= 3 else ", ..."
excluded_genes = ", ".join(excluded[:3])
self.warning(
"Genes not present in all of the selected samples are "
f"excluded from the analysis. Excluded {len(excluded)} "
f"of them ({excluded_genes + suffix})."
)
expressions, removed = remove_const_genes(expressions)
rows = expressions.index
if len(rows) == 0:
self.error(
"All genes have constant expression across time "
"points. Hierarchical clustering of genes cannot be "
"computed."
)
if len(rows) == 1:
self.error(
f"Only one gene ({rows[0]}) "
"has a non-constant expression across time points. "
"However, hierarchical clustering of genes cannot "
"be computed with just one gene."
)
suffix = "" if len(removed) <= 3 else ", ..."
if removed:
removed_genes = ", ".join(removed[:3])
self.warning(
f"{len(removed)} genes ({removed_genes+suffix}) have "
"constant expression across time points. Those genes "
"are excluded from the computation of hierarchical "
"clustering of genes."
)
link, dend = self.get_clustering(
expressions.values,
linkage_method=inputs.linkage,
metric=get_distance_metric(inputs.distance),
order=inputs.ordering,
)
result = {
"gene_symbols": {i: {"gene": gene} for i, gene in enumerate(rows)},
"linkage": link.tolist(),
"order": dend["leaves"],
}
with open("cluster.json", "w") as f:
json.dump(result, f)
outputs.cluster = "cluster.json"
outputs.source = inputs.expressions[0].output.source
outputs.species = inputs.expressions[0].output.species
outputs.build = inputs.expressions[0].output.build
outputs.feature_type = inputs.expressions[0].output.feature_type | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/clustering/hierarchical_clustering_etc.py | 0.780412 | 0.453504 | hierarchical_clustering_etc.py | pypi |
import os
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
class GatkHaplotypeCaller(Process):
"""GATK HaplotypeCaller Variant Calling.
Call germline SNPs and indels via local re-assembly of haplotypes.
The HaplotypeCaller is capable of calling SNPs and indels simultaneously via local
de-novo assembly of haplotypes in an active region. In other words, whenever the program
encounters a region showing signs of variation, it discards the existing mapping information
and completely reassembles the reads in that region. This allows the HaplotypeCaller to be
more accurate when calling regions that are traditionally difficult to call, for example when
they contain different types of variants close to each other. It also makes the HaplotypeCaller
much better at calling indels than position-based callers like UnifiedGenotyper.
"""
slug = "vc-gatk4-hc"
name = "GATK4 (HaplotypeCaller)"
category = "GATK"
process_type = "data:variants:vcf:gatk:hc"
version = "1.5.0"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
data_name = "{{ alignment|name|default('?') }}"
class Input:
"""Input fields for GatkHaplotypeCaller."""
alignment = DataField(
data_type="alignment:bam", label="Analysis ready BAM file"
)
genome = DataField(data_type="seq:nucleotide", label="Reference genome")
intervals_bed = DataField(
data_type="bed",
label="Intervals (from BED file)",
description="Use this option to perform the analysis over only part of the genome.",
required=False,
)
dbsnp = DataField(
data_type="variants:vcf",
label="dbSNP file",
description="Database of known polymorphic sites.",
)
stand_call_conf = IntegerField(
label="Min call confidence threshold",
default=30,
description="The minimum phred-scaled confidence threshold at which "
"variants should be called.",
)
mbq = IntegerField(
label="Min Base Quality",
default=20,
description="Minimum base quality required to consider a base for calling.",
)
max_reads = IntegerField(
label="Max reads per aligment start site",
default=50,
description="Maximum number of reads to retain per alignment start position. "
"Reads above this threshold will be downsampled. Set to 0 to disable.",
)
class Advanced:
"""Advanced options."""
interval_padding = IntegerField(
label="Interval padding",
required=False,
description="Amount of padding (in bp) to add to each interval "
"you are including. The recommended value is 100.",
hidden="!intervals_bed",
)
soft_clipped = BooleanField(
label="Do not analyze soft clipped bases in the reads",
default=False,
description="Suitable option for RNA-seq variant calling.",
)
java_gc_threads = IntegerField(
label="Java ParallelGCThreads",
default=2,
description="Sets the number of threads used during parallel phases of "
"the garbage collectors.",
)
max_heap_size = IntegerField(
label="Java maximum heap size (Xmx)",
default=12,
description="Set the maximum Java heap size (in GB).",
)
advanced = GroupField(Advanced, label="Advanced options")
class Output:
"""Output fields for GatkHaplotypeCaller."""
vcf = FileField(label="VCF file")
tbi = FileField(label="Tabix index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
name = Path(inputs.alignment.output.bam.path).stem
variants = name + ".gatkHC.vcf"
variants_gz = variants + ".gz"
variants_index = variants_gz + ".tbi"
gc_threads = min(
self.requirements.resources.cores, inputs.advanced.java_gc_threads
)
args = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"-R",
inputs.genome.output.fasta.path,
"-I",
inputs.alignment.output.bam.path,
"-O",
variants,
"--dbsnp",
inputs.dbsnp.output.vcf.path,
"--min-base-quality-score",
inputs.mbq,
"--max-reads-per-alignment-start",
inputs.max_reads,
"--standard-min-confidence-threshold-for-calling",
inputs.stand_call_conf,
"--tmp-dir",
TMPDIR,
]
if inputs.advanced.soft_clipped:
args.append("--dont-use-soft-clipped-bases")
if inputs.intervals_bed:
args.extend(["-L", inputs.intervals_bed.output.bed.path])
if inputs.advanced.interval_padding:
args.extend(["--interval-padding", inputs.advanced.interval_padding])
return_code, stdout, stderr = Cmd["gatk"]["HaplotypeCaller"][args] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error("GATK HaplotypeCaller tool failed.")
self.progress(0.8)
# Compress and index the output variants file
(Cmd["bgzip"]["-c", variants] > variants_gz)()
self.progress(0.9)
Cmd["tabix"]["-p", "vcf", variants_gz]()
self.progress(0.95)
outputs.vcf = variants_gz
outputs.tbi = variants_index
outputs.species = inputs.alignment.output.species
outputs.build = inputs.alignment.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/gatk4_hc.py | 0.736021 | 0.39129 | gatk4_hc.py | pypi |
import os
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
FloatField,
GroupField,
Process,
SchedulingClass,
StringField,
)
class GatkHaplotypeCallerGvcf(Process):
"""Run GATK HaplotypeCaller in GVCF mode."""
slug = "gatk-haplotypecaller-gvcf"
name = "GATK HaplotypeCaller (GVCF)"
category = "GATK"
process_type = "data:variants:gvcf"
version = "1.3.0"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 1,
"memory": 8192,
},
}
data_name = "{{ bam|name|default('?') }}"
class Input:
"""Input fields for GatkHaplotypeCallerGvcf."""
bam = DataField("alignment:bam", label="Analysis ready BAM file")
ref_seq = DataField("seq:nucleotide", label="Reference sequence")
class Options:
"""Options."""
intervals = DataField(
"bed",
label="Use intervals BED file to limit the analysis to the specified parts of the genome.",
required=False,
)
contamination = FloatField(
label="Contamination fraction",
default=0,
description="Fraction of contamination in sequencing data (for all samples) to aggressively remove.",
)
options = GroupField(Options, label="Options")
class Output:
"""Output fields for GatkHaplotypeCallerGvcf."""
vcf = FileField(label="GVCF file")
tbi = FileField(label="Tabix index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
name = Path(inputs.bam.output.bam.path).stem
variants = name + ".g.vcf"
variants_gz = variants + ".gz"
variants_index = variants_gz + ".tbi"
args = [
"-R",
inputs.ref_seq.output.fasta.path,
"-I",
inputs.bam.output.bam.path,
"-O",
variants,
"-contamination",
inputs.options.contamination,
"--tmp-dir",
TMPDIR,
"-G",
"StandardAnnotation",
"-G",
"StandardHCAnnotation",
"-G",
"AS_StandardAnnotation",
"-GQB",
10,
"-GQB",
20,
"-GQB",
30,
"-GQB",
40,
"-GQB",
50,
"-GQB",
60,
"-GQB",
70,
"-GQB",
80,
"-GQB",
90,
"-ERC",
"GVCF",
]
if inputs.options.intervals:
args.extend(["-L", inputs.options.intervals.output.bed.path])
return_code, _, _ = Cmd["gatk"]["HaplotypeCaller"][args] & TEE(retcode=None)
if return_code:
self.error("GATK HaplotypeCaller tool failed.")
# Compress and index the output variants file
(Cmd["bgzip"]["-c", variants] > variants_gz)()
Cmd["tabix"]["-p", "vcf", variants_gz]()
outputs.vcf = variants_gz
outputs.tbi = variants_index
outputs.species = inputs.bam.output.species
outputs.build = inputs.bam.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/gatk4.py | 0.708818 | 0.351005 | gatk4.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
class VariantFiltrationVqsr(Process):
"""Filter WGS variants using Variant Quality Score Recalibration (VQSR) procedure."""
slug = "gatk-vqsr"
name = "GATK filter variants (VQSR)"
category = "GATK"
process_type = "data:variants:vcf:vqsr"
version = "1.2.0"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 4,
"memory": 32768,
},
}
data_name = "VQSR filtered variants"
class Input:
"""Input fields for VariantFiltrationVqsr."""
vcf = DataField("variants:vcf", label="Input data (VCF)")
class ResourceFiles:
"""Resource files options."""
dbsnp = DataField("variants:vcf", label="dbSNP file")
mills = DataField(
"variants:vcf",
label="Mills and 1000G gold standard indels",
required=False,
)
axiom_poly = DataField(
"variants:vcf",
label="1000G Axiom genotype data",
required=False,
)
hapmap = DataField(
"variants:vcf",
label="HapMap variants",
required=False,
)
omni = DataField(
"variants:vcf",
label="1000G Omni variants",
required=False,
)
thousand_genomes = DataField(
"variants:vcf",
label="1000G high confidence SNPs",
required=False,
)
class AdvancedOptions:
"""Advanced options."""
use_as_anno = BooleanField(
label="--use-allele-specific-annotations", default=False
)
indel_anno_fields = ListField(
StringField(),
label="Annotation fields (INDEL filtering)",
default=[
"FS",
"ReadPosRankSum",
"MQRankSum",
"QD",
"SOR",
"DP",
],
)
snp_anno_fields = ListField(
StringField(),
label="Annotation fields (SNP filtering)",
default=[
"QD",
"MQRankSum",
"ReadPosRankSum",
"FS",
"MQ",
"SOR",
"DP",
],
)
indel_filter_level = FloatField(
label="--truth-sensitivity-filter-level (INDELs)", default=99.0
)
snp_filter_level = FloatField(
label="--truth-sensitivity-filter-level (SNPs)", default=99.7
)
max_gaussians_indels = IntegerField(
label="--max-gaussians (INDELs)",
default=4,
description="This parameter determines the maximum number "
"of Gaussians that should be used when building a positive "
"model using the variational Bayes algorithm. This parameter "
"sets the expected number of clusters in modeling. If a "
"dataset gives fewer distinct clusters, e.g. as can happen "
"for smaller data, then the tool will tell you there is "
"insufficient data with a No data found error message. "
"In this case, try decrementing the --max-gaussians value.",
)
max_gaussians_snps = IntegerField(
label="--max-gaussians (SNPs)",
default=6,
description="This parameter determines the maximum number "
"of Gaussians that should be used when building a positive "
"model using the variational Bayes algorithm. This parameter "
"sets the expected number of clusters in modeling. If a "
"dataset gives fewer distinct clusters, e.g. as can happen "
"for smaller data, then the tool will tell you there is "
"insufficient data with a No data found error message. "
"In this case, try decrementing the --max-gaussians value.",
)
resource_files = GroupField(
ResourceFiles,
label="Resource files",
)
advanced_options = GroupField(AdvancedOptions, label="Advanced options")
class Output:
"""Output fields for VariantFiltrationVqsr."""
vcf = FileField(label="GVCF file")
tbi = FileField(label="Tabix index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
variants = "snp.recalibrated.vcf"
variants_gz = variants + ".gz"
variants_index = variants_gz + ".tbi"
species = inputs.vcf.output.species
build = inputs.vcf.output.build
excesshet_vcf = "cohort_excesshet.vcf.gz"
sites_only_vcf = "cohort_sitesonly.vcf.gz"
tmp_indel_recal_vcf = "indel.recalibrated.vcf"
indels_recal = "cohort_indels.recal"
snps_recal = "cohort_snps.recal"
indels_tranches = "cohort_indels.tranches"
snps_tranches = "cohort_snps.tranches"
# Hard-filter a large cohort callset on ExcessHet
variant_filtration = [
"-V",
inputs.vcf.output.vcf.path,
"--filter-expression",
"ExcessHet > 54.69",
"--filter-name",
"ExcessHet",
"-O",
excesshet_vcf,
"--TMP_DIR",
TMPDIR,
]
return_code, _, _ = Cmd["gatk"]["VariantFiltration"][variant_filtration] & TEE(
retcode=None
)
if return_code:
self.error("GATK VariantFiltration tool failed.")
# Create sites-only VCF
make_sites_only_args = [
"-I",
excesshet_vcf,
"-O",
sites_only_vcf,
"--TMP_DIR",
TMPDIR,
]
return_code, _, _ = Cmd["gatk"]["MakeSitesOnlyVcf"][make_sites_only_args] & TEE(
retcode=None
)
if return_code:
self.error("GATK MakeSitesOnlyVcf tool failed.")
indel_recalibration_tranche_values = [
"100.0",
"99.95",
"99.9",
"99.5",
"99.0",
"97.0",
"96.0",
"95.0",
"94.0",
"93.5",
"93.0",
"92.0",
"91.0",
"90.0",
]
snp_recalibration_tranche_values = [
"100.0",
"99.95",
"99.9",
"99.8",
"99.6",
"99.5",
"99.4",
"99.3",
"99.0",
"98.0",
"97.0",
"90.0",
]
args_indels = [
"-V",
sites_only_vcf,
"--trust-all-polymorphic",
"-mode",
"INDEL",
"--max-gaussians",
inputs.advanced_options.max_gaussians_indels,
"-O",
indels_recal,
"--tranches-file",
indels_tranches,
"--resource:dbsnp,known=true,training=false,truth=false,prior=2",
inputs.resource_files.dbsnp.output.vcf.path,
"--TMP_DIR",
TMPDIR,
]
if inputs.resource_files.mills:
args_indels.extend(
[
"--resource:mills,known=false,training=true,truth=true,prior=12",
inputs.resource_files.mills.output.vcf.path,
]
)
if inputs.resource_files.axiom_poly:
args_indels.extend(
[
"--resource:axiomPoly,known=false,training=true,truth=false,prior=10",
inputs.resource_files.axiom_poly.output.vcf.path,
]
)
for tr_val in indel_recalibration_tranche_values:
args_indels.extend(["-tranche", tr_val])
for anno_val in inputs.advanced_options.indel_anno_fields:
args_indels.extend(["-an", anno_val])
if inputs.advanced_options.use_as_anno:
args_indels.append("--use-allele-specific-annotations")
return_code, _, _ = Cmd["gatk"]["VariantRecalibrator"][args_indels] & TEE(
retcode=None
)
if return_code:
self.error("GATK VariantRecalibrator (INDELs) tool failed.")
args_snps = [
"-V",
sites_only_vcf,
"--trust-all-polymorphic",
"-mode",
"SNP",
"--max-gaussians",
inputs.advanced_options.max_gaussians_snps,
"-O",
snps_recal,
"--tranches-file",
snps_tranches,
"-resource:dbsnp,known=true,training=false,truth=false,prior=7",
inputs.resource_files.dbsnp.output.vcf.path,
"--TMP_DIR",
TMPDIR,
]
if inputs.resource_files.hapmap:
args_snps.extend(
[
"--resource:hapmap,known=false,training=true,truth=true,prior=15",
inputs.resource_files.hapmap.output.vcf.path,
]
)
if inputs.resource_files.omni:
args_snps.extend(
[
"-resource:omni,known=false,training=true,truth=true,prior=12",
inputs.resource_files.omni.output.vcf.path,
]
)
if inputs.resource_files.thousand_genomes:
args_snps.extend(
[
"-resource:1000G,known=false,training=true,truth=false,prior=10",
inputs.resource_files.thousand_genomes.output.vcf.path,
]
)
for tr_val in snp_recalibration_tranche_values:
args_snps.extend(["-tranche", tr_val])
for anno_val in inputs.advanced_options.snp_anno_fields:
args_snps.extend(["-an", anno_val])
if inputs.advanced_options.use_as_anno:
args_snps.append("--use-allele-specific-annotations")
return_code, _, _ = Cmd["gatk"]["VariantRecalibrator"][args_snps] & TEE(
retcode=None
)
if return_code:
self.error("GATK VariantRecalibrator (SNPs) tool failed.")
# ApplyVQSR
apply_vqsr_indels = [
"-O",
tmp_indel_recal_vcf,
"-V",
excesshet_vcf,
"--recal-file",
indels_recal,
"--tranches-file",
indels_tranches,
"--truth-sensitivity-filter-level",
inputs.advanced_options.indel_filter_level,
"--create-output-variant-index",
"true",
"-mode",
"INDEL",
"--TMP_DIR",
TMPDIR,
]
if inputs.advanced_options.use_as_anno:
apply_vqsr_indels.append("--use-allele-specific-annotations")
return_code, _, _ = Cmd["gatk"]["ApplyVQSR"][apply_vqsr_indels] & TEE(
retcode=None
)
if return_code:
self.error("GATK ApplyVQSR (INDEL) tool failed.")
apply_vqsr_snp = [
"-O",
variants,
"-V",
tmp_indel_recal_vcf,
"--recal-file",
snps_recal,
"--tranches-file",
snps_tranches,
"--truth-sensitivity-filter-level",
inputs.advanced_options.snp_filter_level,
"--create-output-variant-index",
"false",
"-mode",
"SNP",
"--TMP_DIR",
TMPDIR,
]
if inputs.advanced_options.use_as_anno:
apply_vqsr_indels.append("--use-allele-specific-annotations")
return_code, _, _ = Cmd["gatk"]["ApplyVQSR"][apply_vqsr_snp] & TEE(retcode=None)
if return_code:
self.error("GATK ApplyVQSR (SNPs) tool failed.")
# Compress and index the output variants file
(Cmd["bgzip"]["-c", variants] > variants_gz)()
Cmd["tabix"]["-p", "vcf", variants_gz]()
outputs.vcf = variants_gz
outputs.tbi = variants_index
outputs.species = species
outputs.build = build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/vqsr.py | 0.718792 | 0.349519 | vqsr.py | pypi |
import os
from pathlib import Path
from joblib import Parallel, delayed, wrap_non_picklable_objects
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
from resolwe.process.fields import DirField
TMPDIR = os.environ.get("TMPDIR")
def create_vcf_path(interval_path):
"""Create a vcf path based on the interval name."""
return f"cohort_variants/{interval_path.stem}.vcf"
@delayed
@wrap_non_picklable_objects
def run_genotype_gvcfs(interval_path, ref_seq_path, db_path, dbsnp_path, java_memory):
"""Run genotyping on a specifed interval."""
variants_interval = create_vcf_path(interval_path)
genotype_gvcfs_inputs = [
"-R",
ref_seq_path,
"-V",
f"gendb://{db_path}",
"-O",
variants_interval,
"-L",
interval_path,
"-D",
dbsnp_path,
"-G",
"StandardAnnotation",
"-G",
"AS_StandardAnnotation",
"--only-output-calls-starting-in-intervals",
"--java-options",
f"-Xmx{java_memory}g",
"--tmp-dir",
TMPDIR,
]
return_code, _, _ = Cmd["gatk"]["GenotypeGVCFs"][genotype_gvcfs_inputs] & TEE(
retcode=None
)
return return_code
class GatkGenotypeGVCFs(Process):
"""Consolidate GVCFs and run joint calling using GenotypeGVCFs tool."""
slug = "gatk-genotype-gvcfs"
name = "GATK GenotypeGVCFs"
category = "GATK"
process_type = "data:variants:vcf:genotypegvcfs"
version = "2.3.0"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 16,
"memory": 32768,
"storage": 200,
},
}
data_name = "Cohort variants"
class Input:
"""Input fields for GatkGenotypeGVCFs."""
database = DataField("genomicsdb", label="GATK GenomicsDB")
ref_seq = DataField("seq:nucleotide", label="Reference sequence")
dbsnp = DataField("variants:vcf", label="dbSNP file")
class AdvancedOptions:
"""Advanced options."""
n_jobs = IntegerField(
label="Number of concurent jobs",
description="Use a fixed number of jobs for genotyping "
"instead of determining it based on the number of available "
"cores.",
required=False,
)
max_heap_size = IntegerField(
label="Java maximum heap size in GB (Xmx)",
default=28,
description="Set the maximum Java heap size.",
)
advanced_options = GroupField(AdvancedOptions, label="Advanced options")
class Output:
"""Output fields for GatkGenotypeGVCFs."""
vcf = FileField(label="GVCF file")
vcf_dir = DirField(label="Folder with split GVCFs")
tbi = FileField(label="Tabix index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
variants = "cohort_variants.vcf"
variants_gz = variants + ".gz"
variants_index = variants_gz + ".tbi"
intervals_path = Path("intervals_folder")
Path("cohort_variants").mkdir(exist_ok=True)
intervals_path.mkdir(exist_ok=True)
if inputs.advanced_options.n_jobs:
n_jobs = max(inputs.advanced_options.n_jobs, 1)
else:
n_jobs = max(self.requirements.resources.cores, 1)
split_intervals_inputs = [
"-R",
inputs.ref_seq.output.fasta.path,
"-L",
inputs.database.output.intervals.path,
"--scatter-count",
n_jobs,
"-O",
str(intervals_path),
"--tmp-dir",
TMPDIR,
]
return_code, _, _ = Cmd["gatk"]["SplitIntervals"][split_intervals_inputs] & TEE(
retcode=None
)
if return_code is None:
self.error("Could not create equally sized intervals from the bedfile.")
intervals = [path for path in intervals_path.glob("*.interval_list")]
java_memory = min(
int(self.requirements.resources.memory / 1024),
inputs.advanced_options.max_heap_size,
)
return_codes = Parallel(n_jobs=n_jobs)(
run_genotype_gvcfs(
interval_path,
ref_seq_path=inputs.ref_seq.output.fasta.path,
db_path=inputs.database.output.database.path,
dbsnp_path=inputs.dbsnp.output.vcf.path,
java_memory=java_memory,
)
for interval_path in intervals
)
outputs.vcf_dir = "cohort_variants"
if any(return_codes):
self.error("GATK GenotypeGVCFs tool failed.")
merge_list_file = "merge_files.list"
with open(merge_list_file, "w") as f:
for interval_path in sorted(intervals):
f.write(f"{create_vcf_path(interval_path)}\n")
self.progress(0.8)
merge_inputs = [
"-I",
merge_list_file,
"-O",
variants,
"-R",
inputs.ref_seq.output.fasta.path,
"--CREATE_INDEX",
"false",
"--TMP_DIR",
TMPDIR,
]
return_code, _, _ = Cmd["gatk"]["GatherVcfs"][merge_inputs] & TEE(retcode=None)
if return_code:
self.error("GATK GatherVcfs tool failed.")
# Compress and index the output variants file
(Cmd["bgzip"]["-c", variants] > variants_gz)()
Cmd["tabix"]["-p", "vcf", variants_gz]()
outputs.vcf = variants_gz
outputs.tbi = variants_index
outputs.species = inputs.database.output.species
outputs.build = inputs.database.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/genotype_gvcfs.py | 0.711331 | 0.206394 | genotype_gvcfs.py | pypi |
import os
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
DateField,
FileField,
GroupField,
IntegerField,
ListField,
Persistence,
Process,
SchedulingClass,
StringField,
)
class CheMut(Process):
"""CheMut varint calling using multiple BAM input files."""
slug = "vc-chemut"
name = "Variant calling (CheMut)"
category = "WGS"
process_type = "data:variants:vcf:chemut"
version = "3.0.1"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
data_name = "Called variants (CheMut)"
class Input:
"""Input fields for CheMut."""
genome = DataField(data_type="seq:nucleotide", label="Reference genome")
parental_strains = ListField(
DataField(data_type="alignment:bam"), label="Parental strains"
)
mutant_strains = ListField(
DataField(data_type="alignment:bam"), label="Mutant strains"
)
base_recalibration = BooleanField(
label="Do variant base recalibration", default=False
)
known_sites = DataField(
data_type="variants:vcf",
label="dbSNP file",
description="Database of known polymorphic sites.",
required=False,
)
known_indels = ListField(
DataField(data_type="variants:vcf"),
label="Known indels",
required=False,
hidden="!base_recalibration",
)
class ReadsInfo:
"""Reads information."""
PL = StringField(
label="Platform/technology",
description="Platform/technology used to produce the reads.",
choices=[
("Capillary", "Capillary"),
("Ls454", "Ls454"),
("Illumina", "Illumina"),
("SOLiD", "SOLiD"),
("Helicos", "Helicos"),
("IonTorrent", "IonTorrent"),
("Pacbio", "Pacbio"),
],
default="Illumina",
)
LB = StringField(label="Library", default="x")
PU = StringField(
label="Platform unit",
default="x",
description="Platform unit (e.g. flowcell-barcode.lane for "
"Illumina or slide for SOLiD). Unique identifier.",
)
CN = StringField(
label="Sequencing center",
default="x",
description="Name of sequencing center producing the read.",
)
DT = DateField(
label="Date",
default="2017-01-01",
description="Date the run was produced.",
)
class HaplotypeCaller:
"""Options for GATK HaplotypeCaller."""
intervals = DataField(
data_type="bed",
label="Intervals (from BED file)",
description="Use this option to perform the analysis over only part of the genome.",
required=False,
)
ploidy = IntegerField(
label="Sample ploidy",
description="Ploidy (number of chromosomes) per sample. For pooled data, set "
"to (Number of samples in each pool * Sample Ploidy).",
default=2,
)
stand_call_conf = IntegerField(
label="Min call confidence threshold",
default=30,
description="The minimum phred-scaled confidence threshold at which "
"variants should be called.",
)
mbq = IntegerField(
label="Min Base Quality",
default=10,
description="Minimum base quality required to consider a base for calling.",
)
max_reads = IntegerField(
label="Max reads per alignment start site",
default=50,
description="Maximum number of reads to retain per alignment start position. "
"Reads above this threshold will be downsampled. Set to 0 to disable.",
)
class Advanced:
"""Advanced options."""
java_gc_threads = IntegerField(
label="Java ParallelGCThreads",
default=2,
description="Sets the number of threads used during parallel phases of "
"the garbage collectors.",
)
max_heap_size = IntegerField(
label="Java maximum heap size (Xmx)",
default=12,
description="Set the maximum Java heap size (in GB).",
)
reads_info = GroupField(ReadsInfo, label="Reads information")
hc = GroupField(HaplotypeCaller, label="HaplotypeCaller options")
advanced = GroupField(Advanced, label="Advanced options")
class Output:
"""Output fields for CheMut."""
vcf = FileField(label="Called variants file")
tbi = FileField(label="Tabix index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
if (inputs.base_recalibration and not inputs.known_sites) or (
inputs.base_recalibration and not inputs.known_indels
):
self.error(
"Variant base recalibration requires known sites/indels "
"information in the form of user provided VCF files."
)
samples = [
Path(bam.output.bam.path).stem
for bam in inputs.parental_strains + inputs.mutant_strains
]
if len(samples) > len(set(samples)):
self.error("Sample names must be unique.")
gc_threads = min(
self.requirements.resources.cores, inputs.advanced.java_gc_threads
)
TMPDIR = os.environ.get("TMPDIR")
samples_list = "samples.list"
for counter, bam in list(
enumerate(inputs.parental_strains + inputs.mutant_strains, start=1)
):
bam_file = Path(bam.output.bam.path).stem
args_markduplicates = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
f"INPUT={bam.output.bam.path}",
f"OUTPUT={bam_file}_inds.bam",
"METRICS_FILE=junk.txt",
"VALIDATION_STRINGENCY=LENIENT",
f"TMP_DIR={TMPDIR}",
]
return_code, stdout, stderr = Cmd["gatk"]["MarkDuplicates"][
args_markduplicates
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error("GATK MarkDuplicates tool failed.")
args_groups = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
f"INPUT={bam_file}_inds.bam",
f"OUTPUT={bam_file}_indh.bam",
f"RGID=ReadGroup_{counter}",
f"RGLB={inputs.reads_info.LB}",
f"RGPL={inputs.reads_info.PL}",
f"RGPU={inputs.reads_info.PU}",
f"RGCN={inputs.reads_info.CN}",
f"RGDT={inputs.reads_info.DT}",
f"TMP_DIR={TMPDIR}",
"CREATE_INDEX=TRUE",
]
if bam in inputs.parental_strains:
args_groups.append(f"RGSM=parental_{bam_file}")
else:
args_groups.append(f"RGSM=mut_{bam_file}")
return_code, stdout, stderr = Cmd["gatk"]["AddOrReplaceReadGroups"][
args_groups
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error("GATK AddOrReplaceReadGroups tool failed.")
counter += 1
if inputs.base_recalibration:
args_br = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--input",
f"{bam_file}_indh.bam",
"--reference",
inputs.genome.output.fasta.path,
"--output",
"recal_data.table",
"--tmp-dir",
TMPDIR,
]
if inputs.known_sites:
args_br.extend(
["--known-sites", inputs.known_sites.output.vcf.path]
)
if inputs.known_indels:
for indel in inputs.known_indels:
args_br.extend(["--known-sites", indel.output.vcf.path])
return_code, stdout, stderr = Cmd["gatk"]["BaseRecalibrator"][
args_br
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error("GATK BaseRecalibrator tool failed.")
args_ab = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--input",
f"{bam_file}_indh.bam",
"--output",
f"{bam_file}_final.bam",
"--reference",
inputs.reference.output.fasta.path,
"--bqsr-recal-file",
"recal_data.table",
"--tmp-dir",
TMPDIR,
]
return_code, stdout, stderr = Cmd["gatk"]["ApplyBQSR"][args_ab] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error("GATK ApplyBQSR tool failed.")
with open(samples_list, "a") as f:
f.write(f"{bam_file}_final.bam\n")
else:
with open(samples_list, "a") as f:
f.write(f"{bam_file}_indh.bam\n")
variants_gz = "GATKvariants_raw.vcf.gz"
variants_index = variants_gz + ".tbi"
args_hc = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--input",
samples_list,
"--reference",
inputs.genome.output.fasta.path,
"--output",
variants_gz,
"--standard-min-confidence-threshold-for-calling",
inputs.hc.stand_call_conf,
"--min-base-quality-score",
inputs.hc.mbq,
"--max-reads-per-alignment-start",
inputs.hc.max_reads,
"--sample-ploidy",
inputs.hc.ploidy,
]
if inputs.known_sites:
args_hc.extend(["--known-sites", inputs.known_sites.output.vcf.path])
if inputs.hc.intervals:
args_hc.extend(["-L", inputs.hc.intervals.output.bed.path])
return_code, stdout, stderr = Cmd["gatk"]["HaplotypeCaller"][args_hc] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error("GATK HaplotypeCaller tool failed.")
outputs.vcf = variants_gz
outputs.tbi = variants_index
outputs.species = inputs.parental_strains[0].output.species
outputs.build = inputs.parental_strains[0].output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/vc_chemut.py | 0.703142 | 0.305944 | vc_chemut.py | pypi |
import os
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
from resolwe.process.fields import DirField
class GenomicsDBImport(Process):
"""Import single-sample GVCFs into GenomicsDB before joint genotyping."""
slug = "gatk-genomicsdb-import"
name = "GATK GenomicsDBImport"
category = "GATK"
process_type = "data:genomicsdb"
version = "1.3.0"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 4,
"memory": 32768,
"storage": 200,
},
}
data_name = '{{ "GATK GenomicsDB (%s %s)"|format(gvcfs|length, "samples added" if use_existing else "samples" ) }}'
class Input:
"""Input fields for GenomicsDBImport."""
gvcfs = ListField(
DataField("variants:gvcf"),
label="Input data (GVCF)",
)
intervals = DataField(
"bed",
label="Intervals file (.bed)",
description="Intervals file is required if a new database will be "
"created.",
required=False,
)
use_existing = BooleanField(
label="Add new samples to an existing GenomicsDB workspace",
default=False,
)
existing_db = DataField(
"genomicsdb",
label="Select a GATK GenomicsDB object",
description="Instead of creating a new database the GVCFs are "
"added to this database and a new GenomicsDB object is created.",
required=False,
hidden="!use_existing",
)
class AdvancedOptions:
"""Advanced options."""
batch_size = IntegerField(
label="Batch size",
default=0,
description="Batch size controls the number of samples "
"for which readers are open at once and therefore provides "
"a way to minimize memory consumption. However, it can "
"take longer to complete. Use the consolidate flag if more "
"than a hundred batches were used. This will improve feature "
"read time. batchSize=0 means no batching "
"(i.e. readers for all samples will be opened at once).",
)
consolidate = BooleanField(
label="Consolidate",
default=False,
description="Boolean flag to enable consolidation. If "
"importing data in batches, a new fragment is created for "
"each batch. In case thousands of fragments are created, "
"GenomicsDB feature readers will try to open ~20x as many "
"files. Also, internally GenomicsDB would consume more "
"memory to maintain bookkeeping data from all fragments. "
"Use this flag to merge all fragments into one. Merging "
"can potentially improve read performance, however overall "
"benefit might not be noticeable as the top Java layers "
"have significantly higher overheads. This flag has no "
"effect if only one batch is used.",
)
max_heap_size = IntegerField(
label="Java maximum heap size in GB (Xmx)",
default=28,
description="Set the maximum Java heap size.",
)
use_cms_gc = BooleanField(
label="Use CMS Garbage Collector in Java",
default=True,
description="The Concurrent Mark Sweep (CMS) implementation uses multiple garbage "
"collector threads for garbage collection.",
)
advanced_options = GroupField(AdvancedOptions, label="Advanced options")
class Output:
"""Output fields for GenomicsDBImport."""
database = DirField(label="GenomicsDB workspace")
intervals = FileField(label="Intervals file")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
database_folder = "database"
sample_map_file = "sample_map.txt"
species = inputs.gvcfs[0].output.species
if any(gvcf.output.species != species for gvcf in inputs.gvcfs):
self.error("Not all of the input samples are of the same species.")
build = inputs.gvcfs[0].output.build
if any(gvcf.output.build != build for gvcf in inputs.gvcfs):
self.error("Not all of the input samples have the same genome build.")
with open(sample_map_file, "w") as sample_map:
for gvcf in inputs.gvcfs:
sample_map.write(f"{gvcf.entity_name}\t{gvcf.output.vcf.path}\n")
if inputs.use_existing and inputs.existing_db is None:
self.error(
"GATK GenomicsDB object has to be provided to add GVCFs to the existing "
"database."
)
elif inputs.use_existing and inputs.existing_db:
if species != inputs.existing_db.output.species:
self.error("The existing database and GVCFs species differ.")
if build != inputs.existing_db.output.build:
self.error("The existing database and GVCFs build differ.")
shutil.copytree(inputs.existing_db.output.database.path, database_folder)
db_import_args = [
"--genomicsdb-update-workspace-path",
database_folder,
]
intervals = Path(inputs.existing_db.output.intervals.path)
elif inputs.intervals:
db_import_args = [
"--genomicsdb-workspace-path",
database_folder,
"-L",
inputs.intervals.output.bed.path,
]
intervals = Path(inputs.intervals.output.bed.path)
else:
self.error("Intervals file is required for creating a new database.")
java_memory = min(
int(self.requirements.resources.memory / 1024),
inputs.advanced_options.max_heap_size,
)
java_options = f"-Xmx{java_memory}g"
if inputs.advanced_options.use_cms_gc:
java_options += " -XX:+UseConcMarkSweepGC"
db_import_args.extend(
[
"--sample-name-map",
sample_map_file,
"--batch-size",
inputs.advanced_options.batch_size,
"--reader-threads",
min(self.requirements.resources.cores, 5),
"--verbosity",
"DEBUG",
"--tmp-dir",
os.environ.get("TMPDIR"),
"--java-options",
java_options,
]
)
if inputs.advanced_options.consolidate:
db_import_args.append("--consolidate")
return_code, stdout, stderr = Cmd["gatk"]["GenomicsDBImport"][
db_import_args
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error("GATK GenomicsDBImport tool failed.")
output_bed = f"./{intervals.name}"
Path(output_bed).symlink_to(str(intervals))
outputs.intervals = output_bed
outputs.database = database_folder
outputs.species = species
outputs.build = build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/create_genomicsdb.py | 0.648911 | 0.249516 | create_genomicsdb.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
def return_sample_count(vcf, error):
"""Count number of samples in the input VCF file."""
try:
return int((Cmd["bcftools"]["query"]["-l", vcf] | Cmd["wc"]["-l"])().strip())
except Exception as err:
error(
f"Unable to determine sample count in VCF file. Original error was: {err}"
)
class GatkVariantFiltration(Process):
"""Filter multi-sample variant calls based on INFO and/or FORMAT annotations.
This tool is designed for hard-filtering variant calls based on certain criteria.
Records are hard-filtered by changing the value in the FILTER field to something
other than PASS. Passing variants are annotated as PASS and failing variants are
annotated with the name(s) of the filter(s) they failed. If you want to remove
failing variants, use GATK SelectVariants process.
"""
slug = "gatk-variant-filtration"
process_type = "data:variants:vcf:variantfiltration"
name = "GATK VariantFiltration (multi-sample)"
version = "1.3.0"
category = "GATK"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 2,
"memory": 16384,
"storage": 200,
},
}
data_name = "Filtered variants"
class Input:
"""Input fields for GatkVariantFiltration."""
vcf = DataField(data_type="variants:vcf", label="Input data (VCF)")
ref_seq = DataField(data_type="seq:nucleotide", label="Reference sequence")
filter_expressions = ListField(
StringField(),
label="Expressions used with INFO fields to filter",
description="VariantFiltration accepts any number of JEXL expressions "
"(so you can have two named filters by using --filter-name One "
"--filter-expression 'X < 1' --filter-name Two --filter-expression 'X > 2'). "
"It is preferable to use multiple expressions, each specifying an individual "
"filter criteria, to a single compound expression that specifies multiple "
"filter criteria. Input expressions one by one and press ENTER after each "
"expression. Examples of filter expression: 'FS > 30', 'DP > 10'.",
required=False,
)
filter_name = ListField(
StringField(),
label="Names to use for the list of filters",
description="This name is put in the FILTER field for variants that get "
"filtered. Note that there must be a 1-to-1 mapping between filter expressions "
"and filter names. Input expressions one by one and press ENTER after each name. "
"Warning: filter names should be in the same order as filter expressions. "
"Example: you specified filter expressions 'FS > 30' and 'DP > 10', now "
"specify filter names 'FS' and 'DP'.",
required=False,
)
genotype_filter_expressions = ListField(
StringField(),
label="Expressions used with FORMAT field to filter",
description="Similar to the INFO field based expressions, but used on the FORMAT "
"(genotype) fields instead. VariantFiltration will add the sample-level FT tag to "
"the FORMAT field of filtered samples (this does not affect the record's FILTER tag). "
"One can filter normally based on most fields (e.g. 'GQ < 5.0'), but the GT "
"(genotype) field is an exception. We have put in convenience methods so that "
"one can now filter out hets ('isHet == 1'), refs ('isHomRef == 1'), or homs "
"('isHomVar == 1'). Also available are expressions isCalled, isNoCall, isMixed, "
"and isAvailable, in accordance with the methods of the Genotype object. "
"To filter by alternative allele depth, use the expression: 'AD.1 < 5'. This "
"filter expression will filter all the samples in the multi-sample VCF file.",
required=False,
)
genotype_filter_name = ListField(
StringField(),
label="Names to use for the list of genotype filters",
description="Similar to the INFO field based expressions, but used on the FORMAT "
"(genotype) fields instead. Warning: filter names should be in the same order as "
"filter expressions.",
required=False,
)
mask = DataField(
data_type="variants:vcf",
label="Input mask",
description="Any variant which overlaps entries from the provided "
"mask file will be filtered.",
required=False,
)
mask_name = StringField(
label="The text to put in the FILTER field if a 'mask' is provided",
description="When using the mask file, the mask name will be annotated in "
"the variant record.",
required=False,
disabled="!mask",
)
class Advanced:
"""Advanced options."""
cluster = IntegerField(
label="Cluster size",
default=3,
description="The number of SNPs which make up a cluster. Must be at least 2.",
)
window = IntegerField(
label="Window size",
default=0,
description="The window size (in bases) in which to evaluate clustered SNPs.",
)
java_gc_threads = IntegerField(
label="Java ParallelGCThreads",
default=2,
description="Sets the number of threads used during parallel phases of "
"the garbage collectors.",
)
max_heap_size = IntegerField(
label="Java maximum heap size (Xmx)",
default=12,
description="Set the maximum Java heap size (in GB).",
)
advanced = GroupField(
Advanced,
label="Advanced options",
)
class Output:
"""Output fields for GatkVariantFiltration."""
vcf = FileField(label="Filtered variants (VCF)")
tbi = FileField(label="Tabix index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
filtered_variants = "filtered_variants.vcf.gz"
filtered_variants_index = filtered_variants + ".tbi"
# check the VCF file content
sample_count = return_sample_count(
vcf=inputs.vcf.output.vcf.path, error=self.error
)
if not sample_count > 1:
self.error(
f"The input VCF file should contain data for multiple samples. "
f"The input contains data for {sample_count} sample(s)."
)
gc_threads = min(
self.requirements.resources.cores, inputs.advanced.java_gc_threads
)
args = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"-V",
inputs.vcf.output.vcf.path,
"-R",
inputs.ref_seq.output.fasta.path,
"-O",
filtered_variants,
"--window",
inputs.advanced.window,
"--cluster",
inputs.advanced.cluster,
"--tmp-dir",
TMPDIR,
]
if inputs.filter_expressions:
if len(inputs.filter_expressions) != len(inputs.filter_name):
self.error(
"The number of filter expressions and filter names is not the same."
)
for name, exp in zip(inputs.filter_name, inputs.filter_expressions):
args.extend(["--filter-name", name, "--filter-expression", exp])
if inputs.genotype_filter_expressions:
if len(inputs.genotype_filter_expressions) != len(
inputs.genotype_filter_name
):
self.error(
"The number of genotype filter expressions and filter names is not the same."
)
for name, exp in zip(
inputs.genotype_filter_name, inputs.genotype_filter_expressions
):
args.extend(
[
"--genotype-filter-name",
name,
"--genotype-filter-expression",
exp,
]
)
if inputs.mask:
if not inputs.mask_name:
self.error(
"If you specify a mask file, please specify 'mask name' - the text to "
"put in the FILTER field"
)
args.extend(
["--mask", inputs.mask.output.vcf.path, "--mask-name", inputs.mask_name]
)
return_code, stdout, stderr = Cmd["gatk"]["VariantFiltration"][args] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error(
"GATK VariantFiltration failed. Check standard output for more "
"information."
)
outputs.vcf = filtered_variants
outputs.tbi = filtered_variants_index
outputs.species = inputs.vcf.output.species
outputs.build = inputs.vcf.output.build
class GatkVariantFiltrationSingle(Process):
"""Filter single-sample variant calls based on INFO and/or FORMAT annotations.
This tool is designed for hard-filtering variant calls based on certain criteria.
Records are hard-filtered by changing the value in the FILTER field to something
other than PASS. Passing variants are annotated as PASS and failing variants are
annotated with the name(s) of the filter(s) they failed. If you want to remove
failing variants, use GATK SelectVariants process.
"""
slug = "gatk-variant-filtration-single"
process_type = "data:variants:vcf:variantfiltration:single"
name = "GATK VariantFiltration (single-sample)"
version = "1.3.0"
entity = {
"type": "sample",
}
category = "GATK"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 2,
"memory": 16384,
"storage": 200,
},
}
data_name = "{{ vcf|name|default('?') }}"
class Input:
"""Input fields for GatkVariantFiltrationSingle."""
vcf = DataField(data_type="variants:vcf", label="Input data (VCF)")
ref_seq = DataField(data_type="seq:nucleotide", label="Reference sequence")
filter_expressions = ListField(
StringField(),
label="Expressions used with INFO fields to filter",
description="VariantFiltration accepts any number of JEXL expressions "
"(so you can have two named filters by using --filter-name One "
"--filter-expression 'X < 1' --filter-name Two --filter-expression 'X > 2'). "
"It is preferable to use multiple expressions, each specifying an individual "
"filter criteria, to a single compound expression that specifies multiple "
"filter criteria. Input expressions one by one and press ENTER after each "
"expression. Examples of filter expression: 'FS > 30', 'DP > 10'.",
required=False,
)
filter_name = ListField(
StringField(),
label="Names to use for the list of filters",
description="This name is put in the FILTER field for variants that get "
"filtered. Note that there must be a 1-to-1 mapping between filter expressions "
"and filter names. Input expressions one by one and press ENTER after each name. "
"Warning: filter names should be in the same order as filter expressions. "
"Example: you specified filter expressions 'FS > 30' and 'DP > 10', now "
"specify filter names 'FS' and 'DP'.",
required=False,
)
genotype_filter_expressions = ListField(
StringField(),
label="Expressions used with FORMAT field to filter",
description="Similar to the INFO field based expressions, but used on the FORMAT "
"(genotype) fields instead. VariantFiltration will add the sample-level FT tag to "
"the FORMAT field of filtered samples (this does not affect the record's FILTER tag). "
"One can filter normally based on most fields (e.g. 'GQ < 5.0'), but the GT "
"(genotype) field is an exception. We have put in convenience methods so that "
"one can now filter out hets ('isHet == 1'), refs ('isHomRef == 1'), or homs "
"('isHomVar == 1'). Also available are expressions isCalled, isNoCall, isMixed, "
"and isAvailable, in accordance with the methods of the Genotype object. "
"To filter by alternative allele depth, use the expression: 'AD.1 < 5'.",
required=False,
)
genotype_filter_name = ListField(
StringField(),
label="Names to use for the list of genotype filters",
description="Similar to the INFO field based expressions, but used on the FORMAT "
"(genotype) fields instead. Warning: filter names should be in the same order as "
"filter expressions.",
required=False,
)
mask = DataField(
data_type="variants:vcf",
label="Input mask",
description="Any variant which overlaps entries from the provided "
"mask file will be filtered.",
required=False,
)
mask_name = StringField(
label="The text to put in the FILTER field if a 'mask' is provided",
description="When using the mask file, the mask name will be annotated in "
"the variant record.",
required=False,
disabled="!mask",
)
class Advanced:
"""Advanced options."""
cluster = IntegerField(
label="Cluster size",
default=3,
description="The number of SNPs which make up a cluster. Must be at least 2.",
)
window = IntegerField(
label="Window size",
default=0,
description="The window size (in bases) in which to evaluate clustered SNPs.",
)
java_gc_threads = IntegerField(
label="Java ParallelGCThreads",
default=2,
description="Sets the number of threads used during parallel phases of "
"the garbage collectors.",
)
max_heap_size = IntegerField(
label="Java maximum heap size (Xmx)",
default=12,
description="Set the maximum Java heap size (in GB).",
)
advanced = GroupField(
Advanced,
label="Advanced options",
)
class Output:
"""Output fields for GatkVariantFiltrationSingle."""
vcf = FileField(label="Filtered variants (VCF)")
tbi = FileField(label="Tabix index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
filtered_variants = "filtered_variants.vcf.gz"
filtered_variants_index = filtered_variants + ".tbi"
# check the VCF file content
sample_count = return_sample_count(
vcf=inputs.vcf.output.vcf.path, error=self.error
)
if sample_count != 1:
self.error(
f"The input VCF should contain data for a single sample. "
f"The input contains data for {sample_count} sample(s)."
)
gc_threads = min(
self.requirements.resources.cores, inputs.advanced.java_gc_threads
)
args = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"-V",
inputs.vcf.output.vcf.path,
"-R",
inputs.ref_seq.output.fasta.path,
"-O",
filtered_variants,
"--window",
inputs.advanced.window,
"--cluster",
inputs.advanced.cluster,
"--tmp-dir",
TMPDIR,
]
if inputs.filter_expressions:
if len(inputs.filter_expressions) != len(inputs.filter_name):
self.error(
"The number of filter expressions and filter names is not the same."
)
for name, exp in zip(inputs.filter_name, inputs.filter_expressions):
args.extend(["--filter-name", name, "--filter-expression", exp])
if inputs.genotype_filter_expressions:
if len(inputs.genotype_filter_expressions) != len(
inputs.genotype_filter_name
):
self.error(
"The number of genotype filter expressions and filter names is not the same."
)
for name, exp in zip(
inputs.genotype_filter_name, inputs.genotype_filter_expressions
):
args.extend(
[
"--genotype-filter-name",
name,
"--genotype-filter-expression",
exp,
]
)
if inputs.mask:
if not inputs.mask_name:
self.error(
"If you specify a mask file, please specify 'mask name' - the text to "
"put in the FILTER field"
)
args.extend(
["--mask", inputs.mask.output.vcf.path, "--mask-name", inputs.mask_name]
)
return_code, stdout, stderr = Cmd["gatk"]["VariantFiltration"][args] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error(
"GATK VariantFiltration failed. Check standard output for more "
"information."
)
outputs.vcf = filtered_variants
outputs.tbi = filtered_variants_index
outputs.species = inputs.vcf.output.species
outputs.build = inputs.vcf.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/gatk_variant_filtration.py | 0.693161 | 0.368633 | gatk_variant_filtration.py | pypi |
import os
from pathlib import Path
import pandas as pd
from joblib import Parallel, delayed, wrap_non_picklable_objects
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
def prepare_chromosome_sizes(fai_path, bed_path):
"""Prepare a BED file with chromosome sizes."""
fai = pd.read_csv(
fai_path,
sep="\t",
header=None,
names=["chr", "length", "offset", "line_bases", "line_width"],
)
fai = fai.drop(columns=["offset", "line_bases", "line_width"])
fai.insert(loc=1, column="start", value=0)
fai.to_csv(bed_path, sep="\t", header=False, index=False)
def prepare_scattered_inputs(results_dir, pattern="*"):
"""Prepare the input arguments for scattered input files.
This expects the files in results_dir to be named using four number
interval notation used by GATK SplitIntervals (e.g. 0001-scattered).
Names are used for sorting, which ensures the correct concatenation
order.
"""
input_list = []
for scattered_output in sorted(results_dir.glob(pattern)):
input_list.extend(["-I", scattered_output])
return input_list
@delayed
@wrap_non_picklable_objects
def run_base_recalibration(
intput_bam, known_sites, interval_path, ref_seq_path, tmp, parent_dir
):
"""Run BaseRecalibrator on a specifed interval."""
recal_interval = f"{parent_dir.name}/{interval_path.stem}.recal_data.csv"
base_recal_inputs = [
"-R",
ref_seq_path,
"-I",
intput_bam,
"--use-original-qualities",
"--tmp-dir",
tmp,
"-L",
interval_path,
"-O",
recal_interval,
]
# Add known sites to the input parameters of BaseRecalibrator.
for site in known_sites:
base_recal_inputs.extend(["--known-sites", f"{site.output.vcf.path}"])
return_code, stdout, stderr = Cmd["gatk"]["BaseRecalibrator"][
base_recal_inputs
] & TEE(retcode=None)
if return_code:
print(f"Error in {interval_path.stem} interval.", stdout, stderr)
return return_code
@delayed
@wrap_non_picklable_objects
def run_apply_bqsr(
intput_bam, recal_table, interval_path, ref_seq_path, tmp, parent_dir
):
"""Run ApplyBQSR on a specifed interval."""
bqsr_interval_bam = f"{parent_dir.name}/{interval_path.stem}.bam"
apply_bqsr_inputs = [
"-R",
ref_seq_path,
"-I",
intput_bam,
"-O",
bqsr_interval_bam,
"-bqsr",
recal_table,
"--static-quantized-quals",
"10",
"--static-quantized-quals",
"20",
"--static-quantized-quals",
"30",
"--add-output-sam-program-record",
"--use-original-qualities",
"-L",
interval_path,
"--tmp-dir",
tmp,
]
return_code, stdout, stderr = Cmd["gatk"]["ApplyBQSR"][apply_bqsr_inputs] & TEE(
retcode=None
)
if return_code:
print(f"Error in {interval_path.stem} interval.", stdout, stderr)
return return_code
class WgsPreprocess_BWA2(Process):
"""Prepare analysis ready BAM file.
This process follows GATK best practices procedure to prepare
analysis-ready BAM file. The steps included are read alignment using
BWA MEM2, marking of duplicates (Picard MarkDuplicates), BAM sorting,
read-group assignment and base quality score recalibration (BQSR).
"""
slug = "wgs-preprocess-bwa2"
name = "WGS preprocess data with bwa-mem2"
process_type = "data:alignment:bam:wgsbwa2"
version = "1.4.0"
category = "WGS"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 4,
"memory": 32768,
"storage": 600,
},
}
data_name = (
"{{ reads|name|default('?') if reads else aligned_reads|name|default('?') }}"
)
class Input:
"""Input fields to process WgsPreprocess_BWA2."""
reads = DataField(
"reads:fastq:paired", label="Input sample (FASTQ)", required=False
)
aligned_reads = DataField(
"alignment:bam", label="Input sample (BAM)", required=False
)
ref_seq = DataField("seq:nucleotide", label="Reference sequence")
bwa_index = DataField("index:bwamem2", label="BWA-MEM2 genome index")
known_sites = ListField(
DataField("variants:vcf"), label="Known sites of variation (VCF)"
)
class AdvancedOptions:
"""Advanced options."""
pixel_distance = IntegerField(
label="--OPTICAL_DUPLICATE_PIXEL_DISTANCE",
default=2500,
description="Set the optical pixel distance, e.g. "
"distance between clusters. Modify this parameter to "
"ensure compatibility with older Illumina platforms.",
)
n_jobs = IntegerField(
label="Number of concurent jobs",
description="Use a fixed number of jobs for quality score "
"recalibration of determining it based on the number of "
"available cores.",
required=False,
)
advanced_options = GroupField(AdvancedOptions, label="Advanced options")
class Output:
"""Output fields to process WgsPreprocess_BWA2."""
bam = FileField(label="Analysis ready BAM file")
bai = FileField(label="BAM file index")
stats = FileField(label="Alignment statistics")
species = StringField(label="Species")
build = StringField(label="Build")
metrics_file = FileField(label="Metrics from MarkDuplicate process")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
aligned_sam = "aligned.sam"
aligned_bam = "aligned.bam"
marked_dups = "marked_duplicates.bam"
sorted_temp = "sorted_temp.bam"
sorted_bam = "sorted.bam"
sorted_rg = "sorted_rg.bam"
recal_table = "recal_data.csv"
index_fasta_name = Path(inputs.bwa_index.output.fasta.path).name
if not inputs.reads and not inputs.aligned_reads:
self.error("Please provide FASTQ or BAM input files.")
if inputs.reads and inputs.aligned_reads:
self.error(
"Please provide input data in either FASTQ or aligned BAM format, not both."
)
if inputs.reads:
# Define output file names
name = inputs.reads.entity_name
if not name:
mate1_path = Path(inputs.reads.output.fastq[0].path).name
assert mate1_path.endswith(".fastq.gz")
name = mate1_path[:-9]
# Concatenate multi-lane read files
(
Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq]]
> "input_reads_mate1.fastq.gz"
)()
(
Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq2]]
> "input_reads_mate2.fastq.gz"
)()
self.progress(0.05)
# Align reads with BWA MEM2
bwa_inputs = [
"-K 100000000",
"-v 3",
f"-t {self.requirements.resources.cores}",
"-Y",
f"{Path(inputs.bwa_index.output.index.path) / index_fasta_name}",
"input_reads_mate1.fastq.gz",
"input_reads_mate2.fastq.gz",
]
(Cmd["bwa-mem2"]["mem"][bwa_inputs] > aligned_sam)()
self.progress(0.2)
else:
if inputs.aligned_reads.output.species != inputs.bwa_index.output.species:
self.error(
"Species information for the input BAM file doesn't match the BWA-MEM2 index species information."
)
# Define output file names
name = inputs.aligned_reads.entity_name
if not name:
bam_path = Path(inputs.aligned_reads.output.bam.path).name
assert bam_path.endswith(".bam")
name = bam_path[:-4]
collate_inputs = [
f"-@ {self.requirements.resources.cores}",
"-O",
inputs.aligned_reads.output.bam.path,
"-",
]
fastq_inputs = [
f"-@ {self.requirements.resources.cores}",
"-c 9",
"-N",
"-c singletons.fastq",
"-",
]
bwa_inputs = [
"-K 100000000",
"-v 3",
f"-t {self.requirements.resources.cores}",
"-p",
"-Y",
f"{Path(inputs.bwa_index.output.index.path) / index_fasta_name}",
"-",
]
(
Cmd["samtools"]["collate"][collate_inputs]
| Cmd["samtools"]["fastq"][fastq_inputs]
| Cmd["bwa-mem2"]["mem"][bwa_inputs]
> aligned_sam
)()
bam = f"{name}.bam"
bam_stats = f"{name}_stats.txt"
metrics_file = f"{name}_markduplicates_metrics.txt"
# Convert aligned reads to BAM format
# Samtools sort may require 4-5 GB RAM per thread, so the CPU
# limit for this command is set to 4
(
Cmd["samtools"]["view"][
"-1", "-@", min(4, self.requirements.resources.cores), aligned_sam
]
> aligned_bam
)()
self.progress(0.25)
# File cleanup
Path(aligned_sam).unlink(missing_ok=True)
# Mark duplicates
mark_duplicates_inputs = [
"--INPUT",
aligned_bam,
"--OUTPUT",
marked_dups,
"--METRICS_FILE",
metrics_file,
"--VALIDATION_STRINGENCY",
"SILENT",
"--OPTICAL_DUPLICATE_PIXEL_DISTANCE",
inputs.advanced_options.pixel_distance,
"--ASSUME_SORT_ORDER",
"queryname",
"--TMP_DIR",
TMPDIR,
]
return_code, _, _ = Cmd["gatk"]["MarkDuplicates"][mark_duplicates_inputs] & TEE(
retcode=None
)
if return_code:
self.error("MarkDuplicates analysis failed.")
self.progress(0.3)
# File cleanup
Path(aligned_bam).unlink(missing_ok=True)
# Sort BAM file and fix NM and UQ tags
sort_inputs = [
"--INPUT",
marked_dups,
"--OUTPUT",
sorted_temp,
"--TMP_DIR",
TMPDIR,
"--SORT_ORDER",
"coordinate",
"--CREATE_INDEX",
"false",
]
return_code, _, _ = Cmd["gatk"]["SortSam"][sort_inputs] & TEE(retcode=None)
if return_code:
self.error("SortSam analysis failed.")
self.progress(0.35)
set_tag_inputs = [
"--INPUT",
sorted_temp,
"--OUTPUT",
sorted_bam,
"--TMP_DIR",
TMPDIR,
"--CREATE_INDEX",
"true",
"--REFERENCE_SEQUENCE",
inputs.ref_seq.output.fasta.path,
]
return_code, _, _ = Cmd["gatk"]["SetNmMdAndUqTags"][set_tag_inputs] & TEE(
retcode=None
)
if return_code:
self.error("SetNmMdAndUqTags analysis failed.")
self.progress(0.4)
# File cleanup
Path(marked_dups).unlink(missing_ok=True)
Path(sorted_temp).unlink(missing_ok=True)
# Set the read group information (required by BaseRecalibrator)
rg_inputs = [
"--INPUT",
sorted_bam,
"--VALIDATION_STRINGENCY",
"STRICT",
"--OUTPUT",
sorted_rg,
"--TMP_DIR",
TMPDIR,
"--RGLB",
"WGS",
"--RGPL",
"ILLUMINA",
"--RGPU",
"X",
"--RGSM",
name,
"--CREATE_INDEX",
True,
]
return_code, _, _ = Cmd["gatk"]["AddOrReplaceReadGroups"][rg_inputs] & TEE(
retcode=None
)
if return_code:
self.error("AddOrReplaceReadGroups tool failed.")
self.progress(0.45)
# File cleanup
Path(sorted_bam).unlink(missing_ok=True)
# Prepare files for scattering over chromosomes
intervals_path = Path("intervals_folder")
intervals_path.mkdir(exist_ok=True)
if inputs.advanced_options.n_jobs:
n_jobs = max(inputs.advanced_options.n_jobs, 1)
else:
n_jobs = max(self.requirements.resources.cores, 1)
chromosome_sizes = "chromosome_sizes.bed"
prepare_chromosome_sizes(
fai_path=inputs.ref_seq.output.fai.path, bed_path=chromosome_sizes
)
split_intervals_inputs = [
"-R",
inputs.ref_seq.output.fasta.path,
"-L",
chromosome_sizes,
"--scatter-count",
n_jobs,
"--subdivision-mode",
"BALANCING_WITHOUT_INTERVAL_SUBDIVISION_WITH_OVERFLOW",
"-O",
str(intervals_path),
]
return_code, _, _ = Cmd["gatk"]["SplitIntervals"][split_intervals_inputs] & TEE(
retcode=None
)
if return_code:
self.error("SplitIntervals tool failed.")
# BaseRecalibrator
recal_dir = Path("recal_tables")
recal_dir.mkdir(exist_ok=True)
intervals = [path for path in intervals_path.glob("*.interval_list")]
return_codes = Parallel(n_jobs=n_jobs)(
run_base_recalibration(
intput_bam=sorted_rg,
known_sites=inputs.known_sites,
interval_path=interval_path,
ref_seq_path=inputs.ref_seq.output.fasta.path,
tmp=TMPDIR,
parent_dir=recal_dir,
)
for interval_path in intervals
)
if any(return_codes):
self.error("GATK BaseRecalibrator tool failed.")
gather_bqsr_inputs = [
"-O",
recal_table,
*prepare_scattered_inputs(results_dir=recal_dir, pattern="*.csv"),
]
return_code, stdout, stderr = Cmd["gatk"]["GatherBQSRReports"][
gather_bqsr_inputs
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error("GatherBQSRReports tool failed.")
self.progress(0.6)
# ApplyBQSR
bqsr_dir = Path("bqsr_bams")
bqsr_dir.mkdir(exist_ok=True)
return_codes = Parallel(n_jobs=n_jobs)(
run_apply_bqsr(
intput_bam=sorted_rg,
recal_table=recal_table,
interval_path=interval_path,
ref_seq_path=inputs.ref_seq.output.fasta.path,
tmp=TMPDIR,
parent_dir=bqsr_dir,
)
for interval_path in intervals
)
if any(return_codes):
self.error("GATK ApplyBQSR tool failed.")
gather_bam_inputs = [
"-O",
bam,
*prepare_scattered_inputs(results_dir=bqsr_dir, pattern="*.bam"),
"--TMP_DIR",
TMPDIR,
]
return_code, _, _ = Cmd["gatk"]["GatherBamFiles"][gather_bam_inputs] & TEE(
retcode=None
)
if return_code:
self.error("GatherBamFiles tool failed.")
self.progress(0.9)
# Index the BQSR BAM file
return_code, _, _ = Cmd["samtools"]["index"][bam] & TEE(retcode=None)
if return_code:
self.error("Samtools index command failed.")
(Cmd["samtools"]["flagstat"][f"{bam}"] > bam_stats)()
self.progress(0.95)
outputs.bam = bam
outputs.bai = bam + ".bai"
outputs.stats = bam_stats
outputs.species = inputs.bwa_index.output.species
outputs.build = inputs.bwa_index.output.build
outputs.metrics_file = metrics_file | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/wgs_preprocess_bwa2.py | 0.630344 | 0.325266 | wgs_preprocess_bwa2.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
class GatkMergeVcfs(Process):
"""Combine multiple variant files into a single variant file using GATK MergeVcfs."""
slug = "gatk-merge-vcfs"
name = "GATK MergeVcfs"
category = "GATK"
process_type = "data:variants:vcf:mergevcfs"
version = "1.2.0"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 2,
"memory": 16384,
"storage": 200,
},
}
data_name = "Combined variants"
class Input:
"""Input fields for GatkMergeVcfs."""
vcfs = ListField(DataField("variants:vcf"), label="Input data (VCFs)")
class AdvancedOptions:
"""Advanced options."""
ref_seq = DataField(
"seq:nucleotide",
label="Reference sequence",
required=False,
description="Optionally use a sequence dictionary file (.dict) "
"if the input VCF does not contain a complete contig list.",
)
java_gc_threads = IntegerField(
label="Java ParallelGCThreads",
default=2,
description="Sets the number of threads used during parallel phases of the garbage collectors.",
)
max_heap_size = IntegerField(
label="Java maximum heap size (Xmx)",
default=12,
description="Set the maximum Java heap size (in GB).",
)
advanced_options = GroupField(AdvancedOptions, label="Advanced options")
class Output:
"""Output fields for GatkMergeVcfs."""
vcf = FileField(label="Merged VCF")
tbi = FileField(label="Tabix index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
combined_variants = "combined_variants.vcf.gz"
combined_variants_index = combined_variants + ".tbi"
vcf_list = "input_variant_files.list"
species = inputs.vcfs[0].output.species
build = inputs.vcfs[0].output.build
if not all(vcf.output.species == species for vcf in inputs.vcfs):
self.error(
"Species information must be the same for all of the input VCF files."
)
if not all(vcf.output.build == build for vcf in inputs.vcfs):
self.error(
"Genome build information must be the same for all of the input VCF files."
)
with open(vcf_list, "w") as input_vcfs:
for vcf in inputs.vcfs:
input_vcfs.write(f"{vcf.output.vcf.path}\n")
gc_threads = min(
self.requirements.resources.cores, inputs.advanced_options.java_gc_threads
)
args = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced_options.max_heap_size}g",
f"I={vcf_list}",
f"O={combined_variants}",
f"TMP_DIR={TMPDIR}",
]
if inputs.advanced_options.ref_seq:
if inputs.advanced_options.ref_seq.output.species != species:
self.error(
"The species information of the provided reference "
"sequence file does not match the species of the input VCFs."
)
if inputs.advanced_options.ref_seq.output.build != build:
self.error(
"The genome build information of the provided reference "
"sequence file does not match the build of the input VCFs."
)
args.append(f"D={inputs.advanced_options.ref_seq.output.fasta_dict.path}")
return_code, stdout, stderr = Cmd["gatk"]["MergeVcfs"][args] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error("GATK MergeVcfs failed.")
outputs.vcf = combined_variants
outputs.tbi = combined_variants_index
outputs.species = species
outputs.build = build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/gatk_merge_vcfs.py | 0.750187 | 0.343369 | gatk_merge_vcfs.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
Process,
SchedulingClass,
StringField,
)
class GatkGenotypeRefinement(Process):
"""Run GATK Genotype Refinement.
The goal of the Genotype Refinement workflow is to use
additional data to improve the accuracy of genotype calls
and to filter genotype calls that are not reliable enough
for downstream analysis. In this sense it serves as an
optional extension of the variant calling workflow, intended
for researchers whose work requires high-quality identification
of individual genotypes.
For additional information, please see
[manual page](https://gatk.broadinstitute.org/hc/en-us/articles/360035531432-Genotype-Refinement-workflow-for-germline-short-variants)
"""
slug = "gatk-refine-variants"
name = "GATK refine variants"
category = "GATK"
process_type = "data:variants:vcf:refinevariants"
version = "1.1.1"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 2,
"memory": 16384,
"storage": 200,
},
}
data_name = "Refined variants"
class Input:
"""Input fields for GatkGenotypeRefinement."""
vcf = DataField(
"variants:vcf", label="The main input, as produced in the GATK VQSR process"
)
ref_seq = DataField("seq:nucleotide", label="Reference sequence")
vcf_pop = DataField(
"variants:vcf",
label="Population-level variant set (VCF)",
required=False,
)
class Output:
"""Output fields for GatkGenotypeRefinement."""
vcf = FileField(label="Refined multi-sample vcf")
tbi = FileField(label="Tabix index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
variants_cgp = "variants_recal_genotype_posteriors.vcf.gz"
variants_refined = "variants.refined.vcf.gz"
variants_refined_index = variants_refined + ".tbi"
if inputs.vcf_pop:
if (
inputs.vcf_pop.output.species
!= inputs.vcf.output.species
!= inputs.ref_seq.output.species
):
self.error(
"Species information for the input VCF files and reference sequence don't match."
)
elif (
inputs.vcf_pop.output.build
!= inputs.vcf.output.build
!= inputs.ref_seq.output.build
):
self.error(
"Build information for the input VCF files and reference sequence don't match."
)
else:
if inputs.vcf.output.species != inputs.ref_seq.output.species:
self.error(
"Species information for the input VCF file and reference sequence don't match."
)
elif inputs.vcf.output.build != inputs.ref_seq.output.build:
self.error(
"Build information for the input VCF file and reference sequence don't match."
)
args_cgp = [
"-R",
inputs.ref_seq.output.fasta.path,
"-V",
inputs.vcf.output.vcf.path,
"-O",
variants_cgp,
"--create-output-variant-index",
"--tmp-dir",
TMPDIR,
]
if inputs.vcf_pop:
args_cgp.extend(["--supporting", inputs.vcf_pop.output.vcf.path])
return_code, _, _ = Cmd["gatk"]["CalculateGenotypePosteriors"][args_cgp] & TEE(
retcode=None
)
if return_code:
self.error("GATK CalculateGenotypePosteriors failed.")
args_filtration = [
"-R",
inputs.ref_seq.output.fasta.path,
"-V",
variants_cgp,
"-O",
variants_refined,
"--genotype-filter-name",
"lowGQ",
"--create-output-variant-index",
"--genotype-filter-expression",
"GQ < 20",
"--tmp-dir",
TMPDIR,
]
return_code, _, _ = Cmd["gatk"]["VariantFiltration"][args_filtration] & TEE(
retcode=None
)
if return_code:
self.error("GATK VariantFiltration failed.")
outputs.vcf = variants_refined
outputs.tbi = variants_refined_index
outputs.species = inputs.ref_seq.output.species
outputs.build = inputs.ref_seq.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/gatk_variant_refinement.py | 0.670608 | 0.472501 | gatk_variant_refinement.py | pypi |
import gzip
import shutil
from pathlib import Path
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FileHtmlField,
GroupField,
ListField,
Persistence,
Process,
SchedulingClass,
StringField,
)
def return_sample_count(vcf, error):
"""Count number of samples in the input VCF file."""
try:
with gzip.open(vcf, "rt") as vcf_in:
for line in vcf_in:
if line.startswith("#CHROM"):
headers = line.rstrip().split()
return len(headers[headers.index("FORMAT") + 1 :])
except Exception as err:
error(
f"Unable to determine sample count in VCF file. Original error was: {err}"
)
class SnpEff(Process):
"""Annotate variants with SnpEff.
SnpEff is a variant annotation and effect prediction tool.
It annotates and predicts the effects of genetic variants
(such as amino acid changes).
This process also allows filtering of variants with ``SnpSift
filter`` command and extracting specific fields from the VCF
file with ``SnpSift extractFields`` command.
This tool works with multi-sample VCF file as an input.
"""
slug = "snpeff"
name = "snpEff (General variant annotation) (multi-sample)"
process_type = "data:variants:vcf:snpeff"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/snpeff:2.1.1"},
},
"resources": {
"cores": 2,
"memory": 16384,
},
}
category = "WGS"
data_name = "Annotated variants (SnpEff)"
version = "1.1.1"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
class Input:
"""Input fields to SnpEff process."""
variants = DataField(
data_type="variants:vcf",
label="Variants (VCF)",
)
database = StringField(
label="snpEff database",
default="GRCh38.99",
choices=[("GRCh37.75", "GRCh37.75"), ("GRCh38.99", "GRCh38.99")],
)
dbsnp = DataField(
data_type="variants:vcf",
label="Known variants",
description="List of known variants for annotation.",
required=False,
)
filtering_options = StringField(
label="Filtering expressions",
description="Filter VCF file using arbitraty expressions."
"Examples of filtering expressions: '(ANN[*].GENE = 'PSD3')' "
"or '( REF = 'A' )' or "
"'(countHom() > 3) | (( exists INDEL ) & (QUAL >= 20)) | (QUAL >= 30 )'."
"For more information checkout the official documentation of [SnpSift]"
"(https://pcingola.github.io/SnpEff/ss_filter/)",
required=False,
)
sets = ListField(
DataField(data_type="geneset"),
label="Files with list of genes",
description="Use list of genes, if you only want variants reported for "
"them. Each file must have one string per line.",
hidden="!filtering_options",
required=False,
)
extract_fields = ListField(
StringField(),
label="Fields to extract",
description="Write fields you want to extract from annonated vcf file "
"and press Enter after each one. Example of fields: `CHROM POS REF ALT "
"'ANN[*].GENE'`. For more information follow this [link]"
"(https://pcingola.github.io/SnpEff/ss_extractfields/).",
required=False,
)
class Advanced:
"""Advanced options."""
one_per_line = BooleanField(
label="One effect per line",
default=False,
description="If there is more than one effect per variant, write them "
"to seperate lines.",
)
advanced = GroupField(
Advanced, label="Advanced options", hidden="!extract_fields"
)
class Output:
"""Output fields to process SnpEff."""
vcf = FileField(
label="Annotated variants (VCF)",
)
tbi = FileField(label="Index of annotated variants")
vcf_extracted = FileField(
label="Extracted annotated variants (VCF)",
required=False,
)
tbi_extracted = FileField(
label="Index of extracted variants",
required=False,
)
species = StringField(label="Species")
build = StringField(label="Build")
genes = FileField(label="SnpEff genes")
summary = FileHtmlField(
label="Summary",
)
def run(self, inputs, outputs):
"""Run analysis."""
output_variants = "snpeff_variants.vcf"
annotated_variants = "annotated_variants.vcf"
filtered_variants = "filtered_variants.vcf"
extracted_variants = "extracted_variants.vcf"
if not inputs.variants.output.build.startswith(inputs.database[:6]):
self.error(
"Genome build for the input variants file and "
"SnpEff database should be the same. Input variants file is "
f"based on {inputs.variants.output.build}, while SnpEff "
f"database is based on {inputs.database[:6]}."
)
file_name = Path(inputs.variants.output.vcf.path).name
shutil.copy(Path(inputs.variants.output.vcf.path), Path.cwd())
# check the VCF file content
sample_count = return_sample_count(vcf=Path(file_name), error=self.error)
if not sample_count > 1:
self.error(
f"The input VCF file should contain data for multiple samples. "
f"The input contains data for {sample_count} sample(s)."
)
args_snpeff = [
inputs.database,
inputs.variants.output.vcf.path,
]
(Cmd["snpEff"][args_snpeff] > output_variants)()
if inputs.dbsnp:
if not inputs.dbsnp.output.build.startswith(inputs.database[:6]):
self.error(
"Genome build for the DBSNP file and used database "
"should be the same. DBSNP file is based on "
f"{inputs.dbsnp.output.build}, while snpEff database "
f"is based on {inputs.database[:6]}."
)
args_annotation = [
"annotate",
inputs.dbsnp.output.vcf.path,
output_variants,
]
(Cmd["SnpSift"][args_annotation] > annotated_variants)()
if inputs.filtering_options:
args_filtering = [
"filter",
inputs.filtering_options,
]
if inputs.dbsnp:
args_filtering.append(annotated_variants)
else:
args_filtering.append(output_variants)
if inputs.sets:
for set in inputs.sets:
args_filtering.extend(["-s", set.output.geneset.path])
(Cmd["SnpSift"][args_filtering] > filtered_variants)()
(Cmd["bgzip"]["-c", filtered_variants] > filtered_variants + ".gz")()
(Cmd["tabix"]["-p", "vcf", filtered_variants + ".gz"])()
outputs.vcf = filtered_variants + ".gz"
outputs.tbi = filtered_variants + ".gz.tbi"
elif inputs.dbsnp:
(Cmd["bgzip"]["-c", annotated_variants] > annotated_variants + ".gz")()
(Cmd["tabix"]["-p", "vcf", annotated_variants + ".gz"])()
outputs.vcf = annotated_variants + ".gz"
outputs.tbi = annotated_variants + ".gz.tbi"
else:
(Cmd["bgzip"]["-c", output_variants] > output_variants + ".gz")()
(Cmd["tabix"]["-p", "vcf", output_variants + ".gz"])()
outputs.vcf = output_variants + ".gz"
outputs.tbi = output_variants + ".gz.tbi"
if inputs.extract_fields:
args_extract = [
"extractFields",
]
if not inputs.dbsnp and not inputs.filtering_options:
args_extract.append(output_variants)
elif inputs.dbsnp and not inputs.filtering_options:
args_extract.append(annotated_variants)
else:
args_extract.append(filtered_variants)
for field in inputs.extract_fields:
args_extract.append(field)
if inputs.advanced.one_per_line:
(
Cmd["SnpSift"][args_extract] | Cmd["vcfEffOnePerLine.pl"]
> extracted_variants
)()
else:
(Cmd["SnpSift"][args_extract] > extracted_variants)()
(Cmd["bgzip"]["-c", extracted_variants] > extracted_variants + ".gz")()
(Cmd["tabix"]["-p", "vcf", extracted_variants + ".gz"])()
outputs.vcf_extracted = extracted_variants + ".gz"
outputs.tbi_extracted = extracted_variants + ".gz.tbi"
outputs.species = inputs.variants.output.species
outputs.build = inputs.variants.output.build
outputs.genes = "snpEff_genes.txt"
outputs.summary = "snpEff_summary.html"
class SnpEffSingleSample(Process):
"""Annotate variants with SnpEff.
SnpEff is a variant annotation and effect prediction tool.
It annotates and predicts the effects of genetic variants
(such as amino acid changes).
This process also allows filtering of variants with ``SnpSift
filter`` command and extracting specific fields from the VCF
file with ``SnpSift extractFields`` command.
This tool works with single-sample VCF file as an input.
"""
slug = "snpeff-single"
name = "snpEff (General variant annotation) (single-sample)"
process_type = "data:variants:vcf:snpeff:single"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/snpeff:2.1.1"},
},
"resources": {
"cores": 2,
"memory": 16384,
},
}
entity = {
"type": "sample",
}
category = "WGS"
data_name = "{{ variants|name|default('?') }}"
version = "1.0.1"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
class Input:
"""Input fields to SnpEffSingleSample process."""
variants = DataField(
data_type="variants:vcf",
label="Variants (VCF)",
)
database = StringField(
label="snpEff database",
default="GRCh38.99",
choices=[("GRCh37.75", "GRCh37.75"), ("GRCh38.99", "GRCh38.99")],
)
dbsnp = DataField(
data_type="variants:vcf",
label="Known variants",
description="List of known variants for annotation.",
required=False,
)
filtering_options = StringField(
label="Filtering expressions",
description="Filter VCF file using arbitraty expressions."
"Examples of filtering expressions: '(ANN[*].GENE = 'PSD3')' "
"or '( REF = 'A' )' or "
"'(countHom() > 3) | (( exists INDEL ) & (QUAL >= 20)) | (QUAL >= 30 )'."
"For more information checkout the official documentation of [SnpSift]"
"(https://pcingola.github.io/SnpEff/ss_filter/)",
required=False,
)
sets = ListField(
DataField(data_type="geneset"),
label="Files with list of genes",
description="Use list of genes, if you only want variants reported for "
"them. Each file must have one string per line.",
hidden="!filtering_options",
required=False,
)
extract_fields = ListField(
StringField(),
label="Fields to extract",
description="Write fields you want to extract from annonated vcf file "
"and press Enter after each one. Example of fields: `CHROM POS REF ALT "
"'ANN[*].GENE'`. For more information follow this [link]"
"(https://pcingola.github.io/SnpEff/ss_extractfields/).",
required=False,
)
class Advanced:
"""Advanced options."""
one_per_line = BooleanField(
label="One effect per line",
default=False,
description="If there is more than one effect per variant, write them "
"to seperate lines.",
)
advanced = GroupField(
Advanced, label="Advanced options", hidden="!extract_fields"
)
class Output:
"""Output fields to process SnpEffSingleSample."""
vcf = FileField(
label="Annotated variants (VCF)",
)
tbi = FileField(label="Index of annotated variants")
vcf_extracted = FileField(
label="Extracted annotated variants (VCF)",
required=False,
)
tbi_extracted = FileField(
label="Index of extracted variants",
required=False,
)
species = StringField(label="Species")
build = StringField(label="Build")
genes = FileField(label="SnpEff genes")
summary = FileHtmlField(
label="Summary",
)
def run(self, inputs, outputs):
"""Run analysis."""
output_variants = "snpeff_variants.vcf"
annotated_variants = "annotated_variants.vcf"
filtered_variants = "filtered_variants.vcf"
extracted_variants = "extracted_variants.vcf"
if not inputs.variants.output.build.startswith(inputs.database[:6]):
self.error(
"Genome build for the input variants file and "
"SnpEff database should be the same. Input variants file is "
f"based on {inputs.variants.output.build}, while SnpEff "
f"database is based on {inputs.database[:6]}."
)
file_name = Path(inputs.variants.output.vcf.path).name
shutil.copy(Path(inputs.variants.output.vcf.path), Path.cwd())
# check the VCF file content
sample_count = return_sample_count(vcf=Path(file_name), error=self.error)
if sample_count != 1:
self.error(
f"The input VCF should contain data for a single sample. "
f"The input contains data for {sample_count} sample(s)."
)
args_snpeff = [
inputs.database,
inputs.variants.output.vcf.path,
]
(Cmd["snpEff"][args_snpeff] > output_variants)()
if inputs.dbsnp:
if not inputs.dbsnp.output.build.startswith(inputs.database[:6]):
self.error(
"Genome build for the DBSNP file and used database "
"should be the same. DBSNP file is based on "
f"{inputs.dbsnp.output.build}, while snpEff database "
f"is based on {inputs.database[:6]}."
)
args_annotation = [
"annotate",
inputs.dbsnp.output.vcf.path,
output_variants,
]
(Cmd["SnpSift"][args_annotation] > annotated_variants)()
if inputs.filtering_options:
args_filtering = [
"filter",
inputs.filtering_options,
]
if inputs.dbsnp:
args_filtering.append(annotated_variants)
else:
args_filtering.append(output_variants)
if inputs.sets:
for set in inputs.sets:
args_filtering.extend(["-s", set.output.geneset.path])
(Cmd["SnpSift"][args_filtering] > filtered_variants)()
(Cmd["bgzip"]["-c", filtered_variants] > filtered_variants + ".gz")()
(Cmd["tabix"]["-p", "vcf", filtered_variants + ".gz"])()
outputs.vcf = filtered_variants + ".gz"
outputs.tbi = filtered_variants + ".gz.tbi"
elif inputs.dbsnp:
(Cmd["bgzip"]["-c", annotated_variants] > annotated_variants + ".gz")()
(Cmd["tabix"]["-p", "vcf", annotated_variants + ".gz"])()
outputs.vcf = annotated_variants + ".gz"
outputs.tbi = annotated_variants + ".gz.tbi"
else:
(Cmd["bgzip"]["-c", output_variants] > output_variants + ".gz")()
(Cmd["tabix"]["-p", "vcf", output_variants + ".gz"])()
outputs.vcf = output_variants + ".gz"
outputs.tbi = output_variants + ".gz.tbi"
if inputs.extract_fields:
args_extract = [
"extractFields",
]
if not inputs.dbsnp and not inputs.filtering_options:
args_extract.append(output_variants)
elif inputs.dbsnp and not inputs.filtering_options:
args_extract.append(annotated_variants)
else:
args_extract.append(filtered_variants)
for field in inputs.extract_fields:
args_extract.append(field)
if inputs.advanced.one_per_line:
(
Cmd["SnpSift"][args_extract] | Cmd["vcfEffOnePerLine.pl"]
> extracted_variants
)()
else:
(Cmd["SnpSift"][args_extract] > extracted_variants)()
(Cmd["bgzip"]["-c", extracted_variants] > extracted_variants + ".gz")()
(Cmd["tabix"]["-p", "vcf", extracted_variants + ".gz"])()
outputs.vcf_extracted = extracted_variants + ".gz"
outputs.tbi_extracted = extracted_variants + ".gz.tbi"
outputs.species = inputs.variants.output.species
outputs.build = inputs.variants.output.build
outputs.genes = "snpEff_genes.txt"
outputs.summary = "snpEff_summary.html" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/snpeff.py | 0.698535 | 0.315806 | snpeff.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
ListField,
Process,
SchedulingClass,
StringField,
)
class GatkVariantsToTable(Process):
"""Run GATK VariantsToTable.
This tool extracts specified fields for each variant in a VCF file
to a tab-delimited table, which may be easier to work with than a VCF.
For additional information, please see
[manual page](https://gatk.broadinstitute.org/hc/en-us/articles/360036711531-VariantsToTable)
"""
slug = "variants-to-table"
name = "GATK VariantsToTable"
category = "GATK"
process_type = "data:variantstable"
version = "1.2.0"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 2,
"memory": 16384,
"storage": 200,
},
}
data_name = "Variants in table"
class Input:
"""Input fields for GATK VariantsToTable."""
vcf = DataField("variants:vcf", label="Input VCF file")
vcf_fields = ListField(
StringField(),
label="Select VCF fields",
description="The name of a standard VCF field or an "
"INFO field to include in the output table. "
"The field can be any standard VCF column (e.g. CHROM, ID, QUAL) "
"or any annotation name in the INFO field (e.g. AC, AF).",
default=[
"CHROM",
"POS",
"ID",
"REF",
"ALT",
],
)
class AdvancedOptions:
"""Advanced options."""
gf_fields = ListField(
StringField(),
label="Include FORMAT/sample-level fields",
default=[
"GT",
"GQ",
],
)
split_alleles = BooleanField(
label="Split multi-allelic records into multiple lines",
description="By default, a variant record with multiple "
"ALT alleles will be summarized in one line, with per "
"alt-allele fields (e.g. allele depth) separated by commas."
"This may cause difficulty when the table is loaded by "
"an R script, for example. Use this flag to write multi-allelic "
"records on separate lines of output.",
default=True,
)
advanced_options = GroupField(AdvancedOptions, label="Advanced options")
class Output:
"""Output fields for GATK VariantsToTable."""
tsv = FileField(label="Tab-delimited file with variants")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
variants_table = "variants_table.tsv"
args = [
"-V",
inputs.vcf.output.vcf.path,
"-O",
variants_table,
"--tmp-dir",
TMPDIR,
]
for field in inputs.vcf_fields:
args.extend(["-F", field])
for gf_field in inputs.advanced_options.gf_fields:
args.extend(["-GF", gf_field])
if inputs.advanced_options.split_alleles:
args.append("--split-multi-allelic")
return_code, _, _ = Cmd["gatk"]["VariantsToTable"][args] & TEE(retcode=None)
if return_code:
self.error("GATK VariantsToTable failed.")
outputs.tsv = variants_table
outputs.species = inputs.vcf.output.species
outputs.build = inputs.vcf.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/variants_to_table.py | 0.704364 | 0.401453 | variants_to_table.py | pypi |
import os
from pathlib import Path
from plumbum import TEE
from pysam import VariantFile
from resolwe.process import (
Cmd,
DataField,
FileField,
IntegerField,
Persistence,
Process,
SchedulingClass,
StringField,
)
def append_sample_info(vcf_file, summary, warning, error):
"""Extract reference (FASTA) and sample names from the VCF file."""
try:
vcf = VariantFile(vcf_file)
except (OSError, ValueError) as err:
proc_error = "Input VCF file does not exist or could not be correctly opened."
print(err)
error(proc_error)
vcf_header = vcf.header
header_records = {record.key: record.value for record in vcf_header.records}
with open(summary, "a") as out_file:
try:
fasta_name = os.path.basename(header_records["reference"])
except KeyError:
fasta_name = ""
warning(
"Reference sequence (FASTA) name could not be recognized from the VCF header."
)
out_file.write("\nReference (genome) sequence:\n{}\n".format(fasta_name))
out_file.write("\nSamples:\n{}".format("\n".join(list(vcf_header.samples))))
class FilteringCheMut(Process):
"""
Filtering and annotation of Variant Calling (CheMut).
Filtering and annotation of Variant Calling data - Chemical
mutagenesis in _Dictyostelium discoideum_.
"""
slug = "filtering-chemut"
name = "Variant filtering (CheMut)"
category = "WGS"
process_type = "data:variants:vcf:filtering"
version = "1.8.2"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
}
data_name = "Filtered variants ({{ analysis_type }})"
class Input:
"""Input fields for FilteringCheMut."""
variants = DataField(
data_type="variants:vcf",
label="Variants file (VCF)",
)
analysis_type = StringField(
label="Analysis type",
description="Choice of the analysis type. Use 'SNV' or 'INDEL' options. "
"Choose options SNV_CHR2 or INDEL_CHR2 to run the GATK analysis "
"only on the diploid portion of CHR2 (-ploidy 2 -L chr2:2263132-3015703).",
choices=[
("snv", "SNV"),
("indel", "INDEL"),
("snv_chr2", "SNV_CHR2"),
("indel_chr2", "INDEL_CHR2"),
],
default="snv",
)
parental_strain = StringField(
label="Parental strain prefix",
default="parental",
)
mutant_strain = StringField(
label="Mutant strain prefix",
default="mut",
)
genome = DataField(data_type="seq:nucleotide", label="Reference genome")
read_depth = IntegerField(
label="Read Depth Cutoff",
default=5,
)
class Output:
"""Output fields for FilteringCheMut."""
summary = FileField(
label="Summary", description="Summarize the input parameters and results."
)
vcf = FileField(
label="Variants",
description="A genome VCF file of variants that passed the filters.",
)
tbi = FileField(label="Tabix index")
variants_filtered = FileField(
label="Variants filtered",
required=False,
description="A data frame of variants that passed the filters.",
)
variants_filtered_alt = FileField(
label="Variants filtered (multiple alt. alleles)",
required=False,
description="A data frame of variants that contain more than two alternative "
"alleles. These variants are likely to be false positives.",
)
gene_list_all = FileField(
label="Gene list (all)",
required=False,
description="Genes that are mutated at least once.",
)
gene_list_top = FileField(
label="Gene list (top)",
required=False,
description="Genes that are mutated at least twice.",
)
mut_chr = FileField(
label="Mutations (by chr)",
required=False,
description="List mutations in individual chromosomes.",
)
mut_strain = FileField(
label="Mutations (by strain)",
required=False,
description="List mutations in individual strains.",
)
strain_by_gene = FileField(
label="Strain (by gene)",
required=False,
description="List mutants that carry mutations in individual genes.",
)
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
base_path = Path(inputs.variants.output.vcf.path)
assert base_path.name.endswith(".vcf.gz")
vcf_file = base_path.stem
(Cmd["bgzip"]["-dc"][inputs.variants.output.vcf.path] > vcf_file)()
if inputs.variants.output.species != inputs.genome.output.species:
self.error(
"Species for variants and FASTA reference do not match. "
f"Variants are from {inputs.variants.output.species}, while FASTA "
f"reference is from {inputs.genome.output.species}."
)
if inputs.variants.output.build != inputs.genome.output.build:
self.error(
"Genome build for variants and FASTA reference do not match. "
f"Variants have build {inputs.variants.output.build}, while FASTA "
f"reference has build {inputs.genome.output.build}."
)
selected_variants = f"selected_{vcf_file}"
input_selected = [
"-R",
inputs.genome.output.fasta.path,
"-V",
vcf_file,
"-O",
selected_variants,
]
if "snv" in inputs.analysis_type:
input_selected.extend(["--select-type-to-include", "SNP"])
elif "indel" in inputs.analysis_type:
input_selected.extend(["--select-type-to-include", "INDEL"])
return_code, stdout, stderr = Cmd["gatk"]["SelectVariants"][
input_selected
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error("GATK SelectVariants tool failed.")
r_input = (
"library(chemut); "
f"{inputs.analysis_type}("
f"input_file = '{selected_variants}', "
f"parental_strain = '{inputs.parental_strain}', "
f"mutant_strain = '{inputs.mutant_strain}', "
f"read_depth = {inputs.read_depth})"
)
return_code, _, stderr = Cmd["Rscript"]["-e"][r_input] & TEE(retcode=None)
if return_code:
print(stderr)
self.error(f"Error while running the script {inputs.analysis_type}.R")
output_dir = Path(f"{selected_variants}_{inputs.read_depth}")
if not output_dir.exists():
output_dir.mkdir()
append_sample_info(
vcf_file=selected_variants,
summary=str(output_dir / "summary.txt"),
warning=self.warning,
error=self.error,
)
outputs.summary = str(output_dir / "summary.txt")
outputs.species = inputs.variants.output.species
outputs.build = inputs.variants.output.build
if (output_dir / "variants.vcf").exists():
variants_gz = str(output_dir / "variants.vcf.gz")
(Cmd["bgzip"]["-c", str(output_dir / "variants.vcf")] > variants_gz)()
Cmd["tabix"]["-p", "vcf", variants_gz]()
outputs.vcf = variants_gz
outputs.tbi = variants_gz + ".tbi"
else:
self.error("No variants have passed the filters. VCF file was not created.")
if (output_dir / "variant_filtered.txt").exists():
outputs.variants_filtered = str(output_dir / "variant_filtered.txt")
if (output_dir / "variant_mult_alt.txt").exists():
outputs.variants_filtered_alt = str(output_dir / "variant_mult_alt.txt")
if (output_dir / "gene_list_all.txt").exists():
outputs.gene_list_all = str(output_dir / "gene_list_all.txt")
if (output_dir / "gene_list_top.txt").exists():
outputs.gene_list_top = str(output_dir / "gene_list_top.txt")
if (output_dir / "mutations_by_chr.txt").exists():
outputs.mut_chr = str(output_dir / "mutations_by_chr.txt")
if (output_dir / "mutations_by_strain.txt").exists():
outputs.mut_strain = str(output_dir / "mutations_by_strain.txt")
if (output_dir / "strain_by_gene.txt").exists():
outputs.strain_by_gene = str(output_dir / "strain_by_gene.txt") | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/filtering_chemut.py | 0.63443 | 0.278361 | filtering_chemut.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
def return_sample_count(vcf, error):
"""Count number of samples in the input VCF file."""
try:
return int((Cmd["bcftools"]["query"]["-l", vcf] | Cmd["wc"]["-l"])().strip())
except Exception as err:
error(
f"Unable to determine sample count in VCF file. Original error was: {err}"
)
class GatkSelectVariants(Process):
"""Select a subset of variants based on various criteria using GATK SelectVariants.
This tool works with multi-sample VCF file as an input.
"""
slug = "gatk-select-variants"
process_type = "data:variants:vcf:selectvariants"
name = "GATK SelectVariants (multi-sample)"
version = "1.2.0"
category = "GATK"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 2,
"memory": 16384,
"storage": 200,
},
}
data_name = "Selected variants"
class Input:
"""Input fields for GatkSelectVariants."""
vcf = DataField("variants:vcf", label="Input data (VCF)")
intervals = DataField(
"bed",
label="Intervals file (.bed)",
description="One or more genomic intervals over which to operate. This can also be "
"used to get data from a specific interval.",
required=False,
)
select_type = ListField(
StringField(),
label="Select only a certain type of variants from the input file",
description="This argument selects particular kinds of variants out of a list. If "
"left empty, there is no type selection and all variant types are considered for "
"other selection criteria. Valid types are INDEL, SNP, MIXED, MNP, SYMBOLIC, "
"NO_VARIATION. Can be specified multiple times.",
required=False,
)
exclude_filtered = BooleanField(
label="Don't include filtered sites",
default=False,
description="If this flag is enabled, sites that have been marked as filtered (i.e. have "
"anything other than `.` or `PASS` in the FILTER field) will be excluded from the output.",
)
class AdvancedOptions:
"""Advanced options."""
ref_seq = DataField(
"seq:nucleotide",
label="Reference sequence",
required=False,
)
java_gc_threads = IntegerField(
label="Java ParallelGCThreads",
default=2,
description="Sets the number of threads used during parallel phases of the garbage collectors.",
)
max_heap_size = IntegerField(
label="Java maximum heap size (Xmx)",
default=12,
description="Set the maximum Java heap size (in GB).",
)
advanced_options = GroupField(AdvancedOptions, label="Advanced options")
class Output:
"""Output fields for GatkSelectVariants."""
vcf = FileField(label="Selected variants (VCF)")
tbi = FileField(label="Tabix index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
# check the VCF file content
sample_count = return_sample_count(
vcf=inputs.vcf.output.vcf.path, error=self.error
)
if not sample_count > 1:
self.error(
f"The input VCF file should contain data for multiple samples. "
f"The input contains data for {sample_count} sample(s)."
)
selected_variants = "selected_variants.vcf.gz"
selected_variants_index = selected_variants + ".tbi"
species = inputs.vcf.output.species
build = inputs.vcf.output.build
gc_threads = min(
self.requirements.resources.cores, inputs.advanced_options.java_gc_threads
)
args = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced_options.max_heap_size}g",
"--variant",
inputs.vcf.output.vcf.path,
"--output",
selected_variants,
"--tmp-dir",
TMPDIR,
]
if inputs.advanced_options.ref_seq:
if (
inputs.advanced_options.ref_seq.output.species
!= inputs.vcf.output.species
):
self.error(
"The species information of the provided reference "
"sequence file does not match the species of the input VCF."
)
if inputs.advanced_options.ref_seq.output.build != inputs.vcf.output.build:
self.error(
"The genome build information of the provided reference "
"sequence file does not match the build of the input VCFs."
)
args.extend(["-R", inputs.advanced_options.ref_seq.output.fasta.path])
if inputs.intervals:
args.extend(["-L", inputs.intervals.output.bed.path])
if inputs.select_type:
for type in inputs.select_type:
args.extend(["-select-type", type])
if inputs.exclude_filtered:
args.append("--exclude-filtered")
return_code, stdout, stderr = Cmd["gatk"]["SelectVariants"][args] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error("GATK SelectVariants failed.")
outputs.vcf = selected_variants
outputs.tbi = selected_variants_index
outputs.species = species
outputs.build = build
class GatkSelectVariantsSingleSample(Process):
"""Select a subset of variants based on various criteria using GATK SelectVariants.
This tool works with single-sample VCF file as an input.
"""
slug = "gatk-select-variants-single"
process_type = "data:variants:vcf:selectvariants:single"
name = "GATK SelectVariants (single-sample)"
version = "1.1.0"
entity = {
"type": "sample",
}
category = "GATK"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 2,
"memory": 16384,
"storage": 200,
},
}
data_name = "{{ vcf|name|default('?') }}"
class Input:
"""Input fields for GatkSelectVariantsSingleSample."""
vcf = DataField("variants:vcf", label="Input data (VCF)")
intervals = DataField(
"bed",
label="Intervals file (.bed)",
description="One or more genomic intervals over which to operate. This can also be "
"used to get data from a specific interval.",
required=False,
)
select_type = ListField(
StringField(),
label="Select only a certain type of variants from the input file",
description="This argument selects particular kinds of variants out of a list. If "
"left empty, there is no type selection and all variant types are considered for "
"other selection criteria. Valid types are INDEL, SNP, MIXED, MNP, SYMBOLIC, "
"NO_VARIATION. Can be specified multiple times.",
required=False,
)
exclude_filtered = BooleanField(
label="Don't include filtered sites",
default=False,
description="If this flag is enabled, sites that have been marked as filtered (i.e. have "
"anything other than `.` or `PASS` in the FILTER field) will be excluded from the output.",
)
class AdvancedOptions:
"""Advanced options."""
ref_seq = DataField(
"seq:nucleotide",
label="Reference sequence",
required=False,
)
java_gc_threads = IntegerField(
label="Java ParallelGCThreads",
default=2,
description="Sets the number of threads used during parallel phases of the garbage collectors.",
)
max_heap_size = IntegerField(
label="Java maximum heap size (Xmx)",
default=12,
description="Set the maximum Java heap size (in GB).",
)
advanced_options = GroupField(AdvancedOptions, label="Advanced options")
class Output:
"""Output fields for GatkSelectVariantsSingleSample."""
vcf = FileField(label="Selected variants (VCF)")
tbi = FileField(label="Tabix index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
# check the VCF file content
sample_count = return_sample_count(
vcf=inputs.vcf.output.vcf.path, error=self.error
)
if sample_count != 1:
self.error(
f"The input VCF should contain data for a single sample. "
f"The input contains data for {sample_count} sample(s)."
)
selected_variants = "selected_variants.vcf.gz"
selected_variants_index = selected_variants + ".tbi"
species = inputs.vcf.output.species
build = inputs.vcf.output.build
gc_threads = min(
self.requirements.resources.cores, inputs.advanced_options.java_gc_threads
)
args = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced_options.max_heap_size}g",
"--variant",
inputs.vcf.output.vcf.path,
"--output",
selected_variants,
"--tmp-dir",
TMPDIR,
]
if inputs.advanced_options.ref_seq:
if (
inputs.advanced_options.ref_seq.output.species
!= inputs.vcf.output.species
):
self.error(
"The species information of the provided reference "
"sequence file does not match the species of the input VCF."
)
if inputs.advanced_options.ref_seq.output.build != inputs.vcf.output.build:
self.error(
"The genome build information of the provided reference "
"sequence file does not match the build of the input VCF."
)
args.extend(["-R", inputs.advanced_options.ref_seq.output.fasta.path])
if inputs.intervals:
args.extend(["-L", inputs.intervals.output.bed.path])
if inputs.select_type:
for type in inputs.select_type:
args.extend(["-select-type", type])
if inputs.exclude_filtered:
args.append("--exclude-filtered")
return_code, stdout, stderr = Cmd["gatk"]["SelectVariants"][args] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error("GATK SelectVariants failed.")
outputs.vcf = selected_variants
outputs.tbi = selected_variants_index
outputs.species = species
outputs.build = build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/variant_calling/gatk_select_variants.py | 0.71413 | 0.347371 | gatk_select_variants.py | pypi |
import json
from resolwe.process import (
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
STRAND_CODES = {
"IU": "non_specific",
"U": "non_specific",
"ISF": "forward",
"OSF": "forward",
"SF": "forward",
"ISR": "reverse",
"OSR": "reverse",
"SR": "reverse",
}
class QortsQC(Process):
"""QoRTs QC analysis."""
slug = "qorts-qc"
name = "QoRTs QC"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {
"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0",
},
},
"resources": {
"cores": 1,
"memory": 32768,
},
}
data_name = "{{ alignment|name|default('?') }}"
version = "1.7.1"
process_type = "data:qorts:qc"
category = "QC"
entity = {
"type": "sample",
"input": "alignment",
}
scheduling_class = SchedulingClass.BATCH
description = "Quality of RNA-seq Tool-Set."
class Input:
"""Input fields."""
alignment = DataField("alignment:bam", label="Alignment")
annotation = DataField("annotation:gtf", label="GTF annotation")
class Options:
"""Options."""
stranded = StringField(
label="Assay type",
default="non_specific",
choices=[
("non_specific", "Strand non-specific"),
("forward", "Strand-specific forward"),
("reverse", "Strand-specific reverse"),
("auto", "Detect automatically"),
],
)
cdna_index = DataField(
"index:salmon",
label="cDNA index file",
required=False,
hidden="options.stranded != 'auto'",
)
n_reads = IntegerField(
label="Number of reads in subsampled alignment file",
default=5000000,
hidden="options.stranded != 'auto'",
)
maxPhredScore = IntegerField(
label="Max Phred Score",
required=False,
)
adjustPhredScore = IntegerField(
label="Adjust Phred Score",
required=False,
)
options = GroupField(Options, label="Options")
class Output:
"""Output fields."""
plot = FileField(label="QC multiplot")
summary = FileField(label="QC summary")
qorts_data = FileField(label="QoRTs report data")
def run(self, inputs, outputs):
"""Run the analysis."""
lib_strand = ""
if inputs.options.stranded == "auto":
detect_strandedness_inputs = [
inputs.alignment.output.bam.path,
inputs.options.n_reads,
inputs.options.cdna_index.output.index.path,
self.requirements.resources.cores,
]
Cmd["detect_strandedness.sh"](detect_strandedness_inputs)
try:
lib_strand = STRAND_CODES[
json.load(open("results/lib_format_counts.json")).get(
"expected_format", ""
)
]
except KeyError:
self.error(
"Library strandedness autodetection failed. Use manual selection options instead."
)
# Default and required arguments
args = [
"--skipFunctions",
"writeDESeq,writeDEXSeq",
"--randomSeed",
42,
"--generatePlots",
inputs.alignment.output.bam.path,
inputs.annotation.output.annot.path,
"qorts_output",
]
optional_args = []
# Detect if aligned reads in BAM file are of single or paired-end type
# The samtools view command counts the number of reads with the SAM flag "read paired (0x1)"
if (
Cmd["samtools"](
"view", "-c", "-f", "1", inputs.alignment.output.bam.path
).strip()
== "0"
):
optional_args.append("--singleEnded")
if inputs.options.stranded == "forward" or lib_strand == "forward":
optional_args.extend(["--stranded", "--stranded_fr_secondstrand"])
elif inputs.options.stranded == "reverse" or lib_strand == "reverse":
optional_args.append("--stranded")
if inputs.options.maxPhredScore or inputs.options.maxPhredScore == 0:
optional_args.extend(["--maxPhredScore", inputs.options.maxPhredScore])
if inputs.options.adjustPhredScore or inputs.options.adjustPhredScore == 0:
optional_args.extend(
["--adjustPhredScore", inputs.options.adjustPhredScore]
)
memory_limit = "-Xmx{}g".format(self.requirements.resources.memory // 1024)
# join optional and required arguments
Cmd["QoRTs"]([memory_limit, "QC"] + optional_args + args)
# Compress QoRTs output folder
Cmd["zip"](["-r", "qorts_report.zip", "qorts_output"])
outputs.plot = "qorts_output/QC.multiPlot.pdf"
outputs.summary = "qorts_output/QC.summary.txt"
outputs.qorts_data = "qorts_report.zip" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/qorts_qc.py | 0.735167 | 0.317532 | qorts_qc.py | pypi |
import gzip
import json
import os
import shutil
from pathlib import Path
import pandas as pd
import yaml
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
DirField,
FileHtmlField,
GroupField,
IntegerField,
ListField,
Process,
StringField,
)
def create_symlink(src, dst):
"""Create a symbolic link."""
return Cmd["ln"]("-s", "--backup=numbered", src, dst)
def clean_name(sample_name, to_remove, error):
"""Clean a sample name."""
for substr in to_remove:
sample_name = sample_name.replace(substr, "")
if not sample_name:
error(
"Sample name only contains elements which are removed during sample name cleanup."
f"Avoid naming samples with just {' ,'.join(to_remove)}."
)
return sample_name
def create_summary_table(samples, species, build):
"""Prepare sample summary MultiQC table."""
sample_summary_json = {
"id": "sample_info",
"section_name": "Sample Info",
"plot_type": "table",
"file_format": "json",
"data": {},
}
for sample_name, sample_species, sample_build in zip(samples, species, build):
if sample_build not in ["rRNA", "globin"]:
sample_summary_json["data"][sample_name] = {
"Species": sample_species,
"Genome Build": sample_build,
}
with open("sample_data_mqc.json", "w") as out_file:
json.dump(sample_summary_json, out_file)
def parse_rnaseqc_report(report):
"""Parse RNA-SeQC QC report file."""
df = pd.read_csv(report, sep="\t")
return dict(df.values)
def create_coverage_table(sample_name, report):
"""Prepare coverage metrics table."""
report_data = parse_rnaseqc_report(report)
coverage_stats = [
"Genes used in 3' bias",
"Mean 3' bias",
"Median 3' bias",
"3' bias Std",
"3' bias MAD_Std",
"3' Bias, 25th Percentile",
"3' Bias, 75th Percentile",
]
coverage_qc_json = {
"id": "coverage_qc",
"section_name": "RNA-SeQC Coverage Stats",
"plot_type": "table",
"file_format": "json",
"data": {},
}
coverage_qc_json["data"][sample_name] = {
k: report_data[k] for k in coverage_stats if k in report_data
}
with open("rnaseqc_coverage_mqc.json", "w") as out_file:
json.dump(coverage_qc_json, out_file)
def parse_chip_qc_report(report):
"""Parse ChiP-seq QC report file."""
df = pd.read_csv(report, sep="\t")
df.fillna("", inplace=True)
return df.to_dict(orient="records")[0]
def create_prepeak_table(sample_names, reports):
"""Prepare ChIP-seq pre-peak MultiQC table."""
prepeak_qc_json = {
"pconfig": {"format": "{:,.2f}"},
"id": "chip_seq_prepeak_qc",
"section_name": "ChIP-seq pre-peak QC",
"plot_type": "table",
"file_format": "json",
"data": {},
}
for sample_name, report in zip(sample_names, reports):
report_data = parse_chip_qc_report(report)
prepeak_qc_json["data"][sample_name] = report_data
with open("chipseq_prepeak_qc_mqc.json", "w") as out_file:
json.dump(prepeak_qc_json, out_file)
def create_postpeak_table(sample_names, reports):
"""Prepare ChIP-seq pre-peak MultiQC table."""
postpeak_qc_json = {
"pconfig": {"format": "{:,.2f}"},
"id": "chip_seq_postpeak_qc",
"section_name": "ChIP-seq post-peak QC",
"plot_type": "table",
"file_format": "json",
"data": {},
}
for sample_name, report in zip(sample_names, reports):
report_data = parse_chip_qc_report(report)
postpeak_qc_json["data"][sample_name] = report_data
with open("chipseq_postpeak_qc_mqc.json", "w") as out_file:
json.dump(postpeak_qc_json, out_file)
def create_lib_strand_table(samples, reports):
"""Prepare library strandedness MultiQC table."""
strand_codes = {
"IU": "Strand non-specific (paired-end; -fr-unstranded)",
"U": "Strand non-specific (single-end; -fr-unstranded)",
"ISF": "Strand-specific forward (paired-end; -fr-secondstrand)",
"OSF": "Strand-specific forward (paired-end; outward facing reads)",
"SF": "Strand-specific forward (single-end; -fr-secondstrand)",
"ISR": "Strand-specific reverse (paired-end; -fr-firststrand)",
"OSR": "Strand-specific reverse (paired-end; outward facing reads)",
"SR": "Strand-specific reverse (single-end; -fr-firststrand)",
}
lib_strand_json = {
"id": "lib_strandedness",
"section_name": "Library Strandedness",
"plot_type": "table",
"file_format": "json",
"data": {},
}
for sample_name, report in zip(samples, reports):
with open(report) as infile:
data = json.load(infile)
if "expected_format" in data:
strandedness = data["expected_format"]
else:
raise ValueError("Cannot parse library type information file.")
lib_strand_json["data"][sample_name] = {
"Strandedness code": strandedness,
"Description": strand_codes[strandedness],
}
with open("lib_strandedness_mqc.json", "w") as out_file:
json.dump(lib_strand_json, out_file)
def sum_featurecounts_columns(summary_file, out_file):
"""Prepare input for featureCounts."""
exp = pd.read_csv(
summary_file,
sep="\t",
index_col="Status",
dtype={
"Status": str,
},
)
if len(exp.columns) > 1:
sum_column = exp.columns[0].split(":")[0]
exp[sum_column] = exp.sum(axis=1)
exp = exp.astype({sum_column: int})
exp[[sum_column]].to_csv(
out_file,
index_label="Status",
sep="\t",
)
def process_strand_report_file(data, lib_type_samples, lib_type_reports):
"""Process Strandedness report file if it exists as Data output file."""
try:
if os.path.isfile(data.output.strandedness_report.path):
lib_type_samples.append(data.entity.name)
lib_type_reports.append(data.output.strandedness_report.path)
except AttributeError:
pass
def parse_bsrate_report(report):
"""Parse bsrate report file."""
bsrate_dict = {}
with open(report, "r") as r:
first_line = r.readline()
if not first_line.startswith("Bisulfite conversion rate process skipped."):
bsrate_dict["Overall conversion rate"] = first_line.split()[-1]
for strand in ["positive", "negative"]:
line = r.readline().split()
bsrate_dict[f"Conversion rate on {strand}"] = line[-2]
bsrate_dict[f"Number of nucleotides used on {strand} strand"] = line[-1]
return bsrate_dict
def create_bsrate_table(samples, reports):
"""Prepare bisulfite sequencing conversion rate MultiQC table."""
bsrate_json = {
"id": "wgbs_bsrate",
"section_name": "WGBS conversion rate ",
"plot_type": "table",
"file_format": "json",
"data": {},
}
for sample_name, report in zip(samples, reports):
report_data = parse_bsrate_report(report)
if report_data:
bsrate_json["data"][sample_name] = report_data
if bsrate_json["data"]:
with open("wgbs_bsrate_mqc.json", "w") as out_file:
json.dump(bsrate_json, out_file)
def process_markdup_file(report):
"""Process samtools markdup file."""
df = pd.read_csv(
report, skiprows=0, sep=": ", header=0, engine="python"
).transpose()
new_header = df.iloc[0]
df = df[1:]
df.columns = new_header
data_table = df.to_dict(orient="records")[0]
metrics = {}
metrics["UNIQUE PAIRS"] = data_table["PAIRED"] - data_table["DUPLICATE PAIR"]
metrics["UNIQUE SINGLE"] = data_table["SINGLE"] - data_table["DUPLICATE SINGLE"]
metrics["DUPLICATE PAIRS OPTICAL"] = data_table["DUPLICATE PAIR OPTICAL"]
metrics["DUPLICATE PAIRS NONOPTICAL"] = (
data_table["DUPLICATE PAIR"] - data_table["DUPLICATE PAIR OPTICAL"]
)
metrics["DUPLICATE SINGLE OPTICAL"] = data_table["DUPLICATE SINGLE OPTICAL"]
metrics["DUPLICATE SINGLE NONOPTICAL"] = (
data_table["DUPLICATE SINGLE"] - data_table["DUPLICATE SINGLE OPTICAL"]
)
metrics["EXCLUDED"] = data_table["EXCLUDED"]
return metrics
def create_markdup_plot(samples, reports):
"""Prepare samtools markdup MultiQC table."""
markdup_json = {
"id": "samtools_markdup",
"section_name": "Samtools markdup statistics",
"description": "*Please note that excluded reads are those marked as secondary, supplementary, QC failed or "
"unmapped reads and are not used for calculating duplicates.",
"plot_type": "bargraph",
"pconfig": {
"id": "rmdup_bargraph",
"title": "Samtools deduplication stats",
"ylab": "Number of reads",
},
"file_format": "json",
"data": {},
}
for sample_name, report in zip(samples, reports):
report_data = process_markdup_file(report)
if report_data:
markdup_json["data"][sample_name] = report_data
if markdup_json["data"]:
with open("wgbs_markdup_mqc.json", "w") as out_file:
json.dump(markdup_json, out_file)
def parse_nanostring_report(report):
"""Parse Nanostring sample QC report file."""
df = pd.read_csv(
report,
sep="\t",
names=[
"Samples",
"Zero Counts",
"Signal",
"Neg. Controls Mean",
"Neg. Controls Std Dev",
"Noise Cutoff",
],
usecols=[
"Zero Counts",
"Signal",
"Neg. Controls Mean",
"Neg. Controls Std Dev",
"Noise Cutoff",
],
header=0,
)
df.fillna("NA", inplace=True)
return df.to_dict(orient="records")[0]
def create_nanostring_table(sample_names, reports):
"""Prepare Nanostring Sample QC MultiQC table."""
sample_qc_json = {
"pconfig": {"format": "{:,.2f}"},
"id": "nanostring_sample_qc",
"section_name": "Nanostring sample QC",
"plot_type": "table",
"file_format": "json",
"data": {},
}
for sample_name, report in zip(sample_names, reports):
report_data = parse_nanostring_report(report)
sample_qc_json["data"][sample_name] = report_data
with open("nanostring_sample_qc_mqc.json", "w") as out_file:
json.dump(sample_qc_json, out_file)
def parse_lane_attributes(report):
"""Parse Nanostring lane attributes report file."""
df = pd.read_csv(report, sep="\t", index_col=0, header=None).T
df.drop(df.columns[0], axis=1, inplace=True)
return df.to_dict(orient="records")[0]
def create_lane_table(sample_names, reports):
"""Prepare Nanostring lane attributes MultiQC table."""
lane_json = {
"pconfig": {"format": "{}", "scale": False},
"id": "nanostring_lane_attributes",
"section_name": "Nanostring lane attributes",
"plot_type": "table",
"file_format": "json",
"data": {},
}
for sample_name, report in zip(sample_names, reports):
report_data = parse_lane_attributes(report)
lane_json["data"][sample_name] = report_data
with open("nanostring_lane_attributes_mqc.json", "w") as out_file:
json.dump(lane_json, out_file)
def parse_counts_summary(report):
"""Parse STAR quantification summary report file."""
df = pd.read_csv(
report,
sep="\t",
index_col=0,
)
assigned_reads = int(df.loc[df.index == "N_assigned", "Read count"].values)
mapped_reads_sum = df.loc[
["N_multimapping", "N_noFeature", "N_ambiguous", "N_assigned"], "Read count"
].sum()
mapped_reads_sum = float(mapped_reads_sum)
percent_assigned = assigned_reads / mapped_reads_sum * 100
out_dict = {
"Assigned reads": assigned_reads,
"% of assigned reads": percent_assigned,
}
return out_dict
def update_generalstats_table(sample_name, report):
"""Update general statistics table with new information."""
sample_name += f" | {sample_name}"
report_data = parse_counts_summary(report)
counts_stats = list(report_data)
counts_json = {
"section_name": "STAR quantification",
"plot_type": "generalstats",
"file_format": "json",
"data": {},
}
counts_json["data"][sample_name] = {
k: report_data[k] for k in counts_stats if k in report_data
}
with open("STAR quantification_mqc.json", "w") as out_file:
json.dump(counts_json, out_file)
class MultiQC(Process):
"""Aggregate results from bioinformatics analyses across many samples into a single report.
[MultiQC](http://www.multiqc.info) searches a given directory for analysis logs and compiles a
HTML report. It's a general purpose tool, perfect for summarising the output from numerous
bioinformatics tools.
"""
slug = "multiqc"
process_type = "data:multiqc"
name = "MultiQC"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/common:3.0.1"},
},
"resources": {
"cores": 1,
"memory": 8192,
},
}
entity = {
"type": "sample",
}
category = "QC"
data_name = "MultiQC report"
version = "1.20.0"
class Input:
"""Input fields to process MultiQC."""
data = ListField(
DataField(
data_type="",
description="Select multiple data objects for which the MultiQC report is to be "
"generated.",
),
label="Input data",
)
class Advanced:
"""Options."""
dirs = BooleanField(
label="--dirs",
default=True,
description="Prepend directory to sample names.",
)
dirs_depth = IntegerField(
label="--dirs-depth",
default=-1,
description="Prepend a specified number of directories to sample names. Enter a "
"negative number (default) to take from start of path.",
)
fullnames = BooleanField(
label="--fullnames",
default=False,
description="Disable the sample name cleaning (leave as full file name).",
)
config = BooleanField(
label="Use configuration file",
default=True,
description="Use Genialis configuration file for MultiQC report.",
)
cl_config = StringField(
label="--cl-config",
required=False,
description="Enter text with command-line configuration options to override the "
"defaults (e.g. custom_logo_url: https://www.genialis.com).",
)
advanced = GroupField(Advanced, label="Advanced options")
class Output:
"""Output fields."""
report = FileHtmlField(label="MultiQC report")
report_data = DirField(label="Report data")
def run(self, inputs, outputs):
"""Run the analysis."""
samples = []
species = []
build = []
lib_type_samples = []
lib_type_reports = []
chip_seq_samples = []
chip_seq_prepeak_reports = []
chip_seq_postpeak_samples = []
chip_seq_postpeak_reports = []
bsrate_samples = []
bsrate_reports = []
markdup_samples = []
markdup_reports = []
rcc_samples = []
rcc_reports = []
rcc_lane_reports = []
unsupported_data = []
config_file = "/opt/resolwebio/assets/multiqc_config.yml"
with open(config_file) as handle:
mqc_config = yaml.safe_load(handle)
for d in inputs.data:
try:
# Here we have to remove some filename suffixes
# to avoid missing data in the final report. This
# workaround is used to avoid duplicated names caused by
# file name cleaning.
# For example, `some_sample.fastq.gz/stats_L001.txt`
# and `some_sample.fastq.gz/stats_L002.txt` would
# both be simplified to `some_sample` and only the first
# would be included in the report.
sample_name = clean_name(
sample_name=d.entity.name,
to_remove=mqc_config["extra_fn_clean_exts"],
error=self.error,
)
sample_dir = sample_name
os.makedirs(sample_dir, exist_ok=True)
if sample_name and d.output.species and d.output.build:
samples.append(sample_name)
species.append(d.output.species)
build.append(d.output.build)
except AttributeError:
pass
if d.process.type.startswith("data:reads:fastq:single"):
for fq_report in d.output.fastqc_archive:
name = os.path.basename(fq_report.path)
create_symlink(fq_report.path, os.path.join(sample_dir, name))
elif d.process.type.startswith("data:reads:fastq:paired"):
for fq_report in d.output.fastqc_archive + d.output.fastqc_archive2:
name = os.path.basename(fq_report.path)
create_symlink(fq_report.path, os.path.join(sample_dir, name))
elif d.process.type.startswith("data:alignment:bam:markduplicate"):
name = os.path.basename(d.output.metrics_file.path)
create_symlink(
d.output.metrics_file.path, os.path.join(sample_dir, name)
)
elif d.process.type == "data:alignment:bam:star:":
stats_file = os.path.basename(d.output.stats.path)
assert stats_file.endswith("_stats.txt")
bam_name = stats_file[:-10]
if d.output.build == "rRNA":
rrna_report = f"{bam_name}.rRNA.Log.final.out"
create_symlink(
d.output.stats.path, os.path.join(sample_dir, rrna_report)
)
elif d.output.build == "globin":
globin_report = f"{bam_name}.globin.Log.final.out"
create_symlink(
d.output.stats.path, os.path.join(sample_dir, globin_report)
)
else:
report = f"{bam_name}.Log.final.out"
create_symlink(
d.output.stats.path, os.path.join(sample_dir, report)
)
if d.output.gene_counts:
with gzip.open(d.output.gene_counts.path, "rb") as f_in:
with open(
os.path.join(sample_dir, "ReadsPerGene.out.tab"), "wb"
) as f_out:
shutil.copyfileobj(f_in, f_out)
elif d.process.type == "data:alignment:bam:walt:":
try:
if os.path.isfile(d.output.duplicates_report.path):
dup_report_path = d.output.duplicates_report.path
name = os.path.basename(dup_report_path)
create_symlink(dup_report_path, os.path.join(sample_dir, name))
markdup_samples.append(sample_name)
markdup_reports.append(dup_report_path)
create_markdup_plot(markdup_samples, markdup_reports)
except AttributeError:
pass
elif d.process.type == "data:alignment:bam:bqsr:":
name = os.path.basename(d.output.recal_table.path)
create_symlink(
d.output.recal_table.path, os.path.join(sample_dir, name)
)
elif d.process.type.startswith("data:alignment:bam"):
name = os.path.basename(d.output.stats.path)
create_symlink(d.output.stats.path, os.path.join(sample_dir, name))
elif d.process.type == "data:expression:featurecounts:":
name = os.path.basename(d.output.counts_summary.path)
sum_featurecounts_columns(
summary_file=d.output.counts_summary.path,
out_file=os.path.join(sample_dir, f"summed_{name}"),
)
if not os.path.exists(os.path.join(sample_dir, f"summed_{name}")):
create_symlink(
d.output.counts_summary.path, os.path.join(sample_dir, name)
)
# Strandedness report exists only if auto detection was enabled
process_strand_report_file(d, lib_type_samples, lib_type_reports)
elif d.process.type == "data:expression:star:":
name = os.path.basename(d.output.counts_summary.path)
create_symlink(d.output.counts_summary, os.path.join(sample_dir, name))
update_generalstats_table(
sample_name=sample_name, report=d.output.counts_summary.path
)
elif d.process.type == "data:chipseq:callpeak:macs2:":
name = os.path.basename(d.output.called_peaks.path)
create_symlink(
d.output.called_peaks.path, os.path.join(sample_dir, name)
)
chip_seq_samples.append(sample_name)
chip_seq_prepeak_reports.append(d.output.case_prepeak_qc.path)
chip_seq_postpeak_samples.append(sample_name)
chip_seq_postpeak_reports.append(d.output.chip_qc.path)
# MACS2 analysis can be run without the background sample,
# thus the associated report might not exits
try:
if os.path.isfile(d.output.control_prepeak_qc.path):
chip_seq_samples.append(f"Background of {sample_name}")
chip_seq_prepeak_reports.append(
d.output.control_prepeak_qc.path
)
except AttributeError:
pass
elif d.process.type == "data:samtools:idxstats:":
name = os.path.basename(d.output.report.path)
create_symlink(d.output.report.path, os.path.join(sample_dir, name))
elif d.process.type == "data:qorts:qc:":
name = os.path.basename(d.output.summary.path)
create_symlink(d.output.summary.path, os.path.join(sample_dir, name))
elif d.process.type == "data:rnaseqc:qc:":
name = os.path.basename(d.output.metrics.path)
create_symlink(
src=d.output.metrics.path, dst=os.path.join(sample_dir, name)
)
create_coverage_table(
sample_name=sample_name, report=d.output.metrics.path
)
elif d.process.type == "data:expression:salmon:":
# Symlink files/dirs without the parent directory to
# attach it to the same sample in the general summary.
for out_file in Path(d.output.salmon_output.path).iterdir():
create_symlink(str(out_file), str(Path(sample_dir) / out_file.name))
# Strandedness report might not exist in legacy Salmon objects
process_strand_report_file(d, lib_type_samples, lib_type_reports)
elif d.process.type.startswith("data:picard"):
name = os.path.basename(d.output.report.path)
create_symlink(d.output.report.path, os.path.join(sample_dir, name))
elif d.process.type == "data:wgbs:bsrate:":
name = os.path.basename(d.output.report.path)
create_symlink(d.output.report.path, os.path.join(sample_dir, name))
bsrate_samples.append(sample_name)
bsrate_reports.append(d.output.report.path)
create_bsrate_table(bsrate_samples, bsrate_reports)
elif d.process.type == "data:chipqc:":
plot_paths = [
d.output.ccplot.path,
d.output.coverage_histogram.path,
d.output.peak_profile.path,
d.output.peaks_barplot.path,
d.output.peaks_density_plot.path,
]
for path in plot_paths:
name = os.path.basename(path)
create_symlink(path, os.path.join(sample_dir, name))
# ChipQC may contain enrichment heatmap
try:
if os.path.isfile(d.output.enrichment_heatmap.path):
name = os.path.basename(d.output.enrichment_heatmap.path)
create_symlink(
src=d.output.enrichment_heatmap.path,
dst=os.path.join(sample_dir, name),
)
except AttributeError:
pass
elif d.process.type == "data:nanostring:rcc:":
# Sample_qc is an optional field
try:
name = os.path.basename(d.output.sample_qc.path)
create_symlink(
d.output.sample_qc.path, os.path.join(sample_dir, name)
)
rcc_samples.append(d.entity.name)
rcc_reports.append(d.output.sample_qc.path)
create_nanostring_table(rcc_samples, rcc_reports)
lane_name = os.path.basename(d.output.lane_attributes.path)
create_symlink(
d.output.lane_attributes.path,
os.path.join(sample_dir, lane_name),
)
rcc_lane_reports.append(d.output.lane_attributes.path)
create_lane_table(rcc_samples, rcc_lane_reports)
except AttributeError:
pass
else:
unsupported_data.append(d.name)
if unsupported_data:
ext = ", ..." if len(unsupported_data) > 5 else ""
self.warning(
f"The Input data {', '.join(unsupported_data[:5])}{ext} is not supported "
f"by the MultiQC analysis."
)
create_summary_table(samples, species, build)
if lib_type_samples and lib_type_reports:
create_lib_strand_table(lib_type_samples, lib_type_reports)
if chip_seq_samples and chip_seq_prepeak_reports:
create_prepeak_table(chip_seq_samples, chip_seq_prepeak_reports)
if chip_seq_postpeak_samples and chip_seq_postpeak_reports:
create_postpeak_table(chip_seq_postpeak_samples, chip_seq_postpeak_reports)
args = [
"-dd",
inputs.advanced.dirs_depth,
]
if inputs.advanced.dirs:
args.append("-d")
if inputs.advanced.fullnames:
args.append("-s")
if inputs.advanced.config:
args.extend(["-c", config_file])
if inputs.advanced.cl_config:
args.extend(["--cl-config", inputs.advanced.cl_config])
with Cmd.env(LC_ALL="C.UTF-8"):
return_code, _, _ = Cmd["multiqc"]["."][args] & TEE(retcode=None)
if return_code:
self.error("MultiQC analysis failed.")
if not os.path.isdir("multiqc_data") and not os.path.isfile(
"multiqc_report.html"
):
self.error("MultiQC finished without creating outputs.")
outputs.report = "multiqc_report.html"
outputs.report_data = "multiqc_data" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/multiqc.py | 0.623835 | 0.276568 | multiqc.py | pypi |
import gzip
import os
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
class AlignmentSummary(Process):
"""Produce a summary of alignment metrics from BAM file.
Tool from Picard, wrapped by GATK4. See GATK
CollectAlignmentSummaryMetrics for more information.
"""
slug = "alignment-summary"
name = "Picard AlignmentSummary"
category = "Picard"
process_type = "data:picard:summary"
version = "2.3.0"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
}
data_name = "{{ bam|name|default('?') }}"
class Input:
"""Input fields for AlignmentSummary."""
bam = DataField("alignment:bam", label="Alignment BAM file")
genome = DataField("seq:nucleotide", label="Genome")
adapters = DataField(
"seq:nucleotide", label="Adapter sequences", required=False
)
validation_stringency = StringField(
label="Validation stringency",
description="Validation stringency for all SAM files read by this "
"program. Setting stringency to SILENT can improve "
"performance when processing a BAM file in which "
"variable-length data (read, qualities, tags) do not "
"otherwise need to be decoded. Default is STRICT.",
choices=[
("STRICT", "STRICT"),
("LENIENT", "LENIENT"),
("SILENT", "SILENT"),
],
default="STRICT",
)
insert_size = IntegerField(label="Maximum insert size", default=100000)
pair_orientation = StringField(
label="Pair orientation",
default="null",
choices=[
("null", "Unspecified"),
("FR", "FR"),
("RF", "RF"),
("TANDEM", "TANDEM"),
],
)
bisulfite = BooleanField(
label="BAM file consists of bisulfite sequenced reads", default=False
)
assume_sorted = BooleanField(
label="Sorted BAM file",
description="If true the sort order in the header file will be ignored.",
default=False,
)
class Output:
"""Output fields for AlignmentSummary."""
report = FileField(label="Alignement metrics")
species = StringField(label="Species")
build = StringField(label="Build")
def get_sequences(self, fname):
"""Get a list of sequences from FASTA file."""
if fname.endswith(".gz"):
with gzip.open(fname, "rt") as fasta:
return [line.strip() for line in fasta if not line.startswith(">")]
else:
with open(fname, "r") as fasta:
return [line.strip() for line in fasta if not line.startswith(">")]
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
basename = os.path.basename(inputs.bam.output.bam.path)
assert basename.endswith(".bam")
name = basename[:-4]
metrics_file = f"{name}_alignment_metrics.txt"
args = [
"--INPUT",
inputs.bam.output.bam.path,
"--OUTPUT",
metrics_file,
"--REFERENCE_SEQUENCE",
inputs.genome.output.fasta.path,
"--VALIDATION_STRINGENCY",
inputs.validation_stringency,
"--MAX_INSERT_SIZE",
inputs.insert_size,
"--ASSUME_SORTED",
inputs.assume_sorted,
"--EXPECTED_PAIR_ORIENTATIONS",
inputs.pair_orientation,
"--IS_BISULFITE_SEQUENCED",
inputs.bisulfite,
"--TMP_DIR",
TMPDIR,
]
if inputs.adapters:
adapters_list = self.get_sequences(inputs.adapters.output.fasta.path)
args.extend(["--ADAPTER_SEQUENCE", [adapters_list]])
else:
# Clear the default adapter list implemented in Picard.
args.extend(["--ADAPTER_SEQUENCE", "null"])
return_code, _, _ = Cmd["gatk"]["CollectAlignmentSummaryMetrics"][args] & TEE(
retcode=None
)
if return_code:
self.error("CollectAlignmentSummaryMetrics tool failed.")
outputs.report = metrics_file
outputs.species = inputs.bam.output.species
outputs.build = inputs.bam.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/alignment_summary.py | 0.690142 | 0.315894 | alignment_summary.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
class CollectRrbsMetrics(Process):
"""Produce metrics for RRBS data based on the methylation status.
This tool uses reduced representation bisulfite sequencing (Rrbs)
data to determine cytosine methylation status across all reads of
a genomic DNA sequence.
Tool is wrapped by GATK4. See GATK
CollectRrbsMetrics for more information.
"""
slug = "rrbs-metrics"
name = "Picard CollectRrbsMetrics"
category = "Picard"
process_type = "data:picard:rrbs"
version = "2.3.0"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"memory": 32768,
},
}
data_name = "{{ bam|name|default('?') }}"
class Input:
"""Input fields for CollectRrbsMetrics."""
bam = DataField("alignment:bam", label="Alignment BAM file")
genome = DataField("seq:nucleotide", label="Genome")
min_quality = IntegerField(
label="Threshold for base quality of a C base before it is considered",
default=20,
)
next_base_quality = IntegerField(
label="Threshold for quality of a base next to a C before the C base is considered",
default=10,
)
min_lenght = IntegerField(label="Minimum read length", default=5)
mismatch_rate = FloatField(
label="Maximum fraction of mismatches in a read to be considered (Range: 0 and 1)",
default=0.1,
)
validation_stringency = StringField(
label="Validation stringency",
description="Validation stringency for all SAM files read by this "
"program. Setting stringency to SILENT can improve "
"performance when processing a BAM file in which "
"variable-length data (read, qualities, tags) do not "
"otherwise need to be decoded. Default is STRICT.",
choices=[
("STRICT", "STRICT"),
("LENIENT", "LENIENT"),
("SILENT", "SILENT"),
],
default="STRICT",
)
assume_sorted = BooleanField(
label="Sorted BAM file",
description="If true the sort order in the header file will be ignored.",
default=False,
)
class Output:
"""Output fields for CollectRrbsMetrics."""
report = FileField(label="RRBS summary metrics")
detailed_report = FileField(label="Detailed RRBS report")
plot = FileField(label="QC plots")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
basename = os.path.basename(inputs.bam.output.bam.path)
assert basename.endswith(".bam")
name = basename[:-4]
args = [
"--INPUT",
inputs.bam.output.bam.path,
"--REFERENCE",
inputs.genome.output.fasta.path,
"--METRICS_FILE_PREFIX",
name,
"--C_QUALITY_THRESHOLD",
inputs.min_quality,
"--NEXT_BASE_QUALITY_THRESHOLD",
inputs.next_base_quality,
"--MINIMUM_READ_LENGTH",
inputs.min_lenght,
"--VALIDATION_STRINGENCY",
inputs.validation_stringency,
"--ASSUME_SORTED",
inputs.assume_sorted,
"--TMP_DIR",
TMPDIR,
]
if 0 <= inputs.mismatch_rate <= 1:
args.extend(["--MAX_MISMATCH_RATE", inputs.mismatch_rate])
return_code, _, _ = Cmd["gatk"]["CollectRrbsMetrics"][args] & TEE(retcode=None)
if return_code:
self.error("CollectRrbsMetrics tool failed.")
report_file = f"{name}_rrbs_summary_metrics.txt"
os.rename(f"{name}.rrbs_summary_metrics", report_file)
detailed_file = f"{name}_rrbs_detail_metrics.txt"
os.rename(f"{name}.rrbs_detail_metrics", detailed_file)
out_plot = f"{name}_rrbs_qc.pdf"
os.rename(f"{name}.rrbs_qc.pdf", out_plot)
outputs.report = report_file
outputs.detailed_report = detailed_file
outputs.plot = out_plot
outputs.species = inputs.bam.output.species
outputs.build = inputs.bam.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/rrbs_metrics.py | 0.695235 | 0.37399 | rrbs_metrics.py | pypi |
import csv
import json
from pathlib import Path
import numpy as np
import pandas as pd
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
Persistence,
Process,
SchedulingClass,
StringField,
)
STRAND_CODES = {
"IU": "non_specific",
"U": "non_specific",
"ISF": "forward",
"OSF": "forward",
"SF": "forward",
"ISR": "reverse",
"OSR": "reverse",
"SR": "reverse",
}
def detect_strandedness(bam_path, n_reads, cdna_path, resources_cores):
"""Detect strandedness using SALMON tool."""
detect_strandedness_inputs = [bam_path, n_reads, cdna_path, resources_cores]
Cmd["detect_strandedness.sh"](detect_strandedness_inputs)
try:
lib_strand = STRAND_CODES[
json.load(open("results/lib_format_counts.json")).get("expected_format", "")
]
return lib_strand
except KeyError:
return None
def format_ucsc(annotation_path):
"""Convert UCSC annotation to a format suitable for parsing with collapse_annotation.py script."""
def add_attributes(x):
new_attrs = ""
new_attrs += f'gene_name "{x.gene_id}"; '
new_attrs += f'gene_id "{x.gene_id}"; '
new_attrs += f'transcript_name "{x.gene_id}"; '
new_attrs += f'transcript_id "{x.gene_id}"; '
new_attrs += f'gene_type "{x.gene_id}"; '
new_attrs += f'transcript_type "{x.gene_id}"; '
new_attrs = new_attrs.strip()
return new_attrs
df = pd.read_csv(
annotation_path,
sep="\t",
comment="#",
header=None,
usecols=[0, 2, 3, 4, 6, 8],
low_memory=False,
)
df = pd.DataFrame(
{
"chromosome": df[0],
"feature_type": df[2],
"start": df[3],
"end": df[4],
"strand": df[6],
"gene_id": df[8].str.extract(r'gene_id "(.*?)"', expand=False).values,
},
)
df.replace("", np.nan, inplace=True)
df = df[df["feature_type"] == "exon"]
columns = {
"chromosome": "first",
"feature_type": "first",
"start": "min",
"end": "max",
"strand": "first",
"gene_id": "first",
}
df["gene_id"] = (
"chr"
+ df["chromosome"].astype(str)
+ "_"
+ df["strand"].astype(str)
+ "_"
+ df["gene_id"].astype(str)
)
chromosome_order = df["chromosome"].unique().tolist()
hierarchy_order = ["gene", "transcript"]
genes_df = df.groupby(["gene_id"]).agg(columns).assign(feature_type="gene")
transcripts_df = (
df.groupby(["gene_id"]).agg(columns).assign(feature_type="transcript")
)
df = pd.concat([genes_df, transcripts_df, df], ignore_index=True)
df["attributes"] = df.apply(lambda x: add_attributes(x), axis=1)
df["chromosome_order"] = df["chromosome"].map(lambda x: chromosome_order.index(x))
df["hierarchy_order"] = df["feature_type"].map(
lambda x: hierarchy_order.index(x) if x in hierarchy_order else float("inf")
)
df = df.sort_values(
by=["chromosome_order", "gene_id", "hierarchy_order"], ascending=True
)
out_df = pd.DataFrame(
{
"chromosome": df["chromosome"],
"annotation_source": "UCSC",
"feature_type": df["feature_type"],
"start": df["start"],
"end": df["end"],
"score": ".",
"strand": df["strand"],
"genomic_phase": ".",
"attributes": df["attributes"],
},
)
out_filename = "ucsc_formatted_annotation.gtf"
out_df.to_csv(
out_filename,
sep="\t",
header=False,
index=False,
quoting=csv.QUOTE_NONE,
)
return out_filename
class QcRnaseqc(Process):
"""RNA-SeQC QC analysis.
An efficient new version of RNA-SeQC that computes a comprehensive set of metrics
for characterizing samples processed by a wide range of protocols.
It also quantifies gene- and exon-level expression,
enabling effective quality control of large-scale RNA-seq datasets.
More information can be found in the
[GitHub repository](https://github.com/getzlab/rnaseqc)
and in the [original paper](https://academic.oup.com/bioinformatics/article/37/18/3048/6156810?login=false).
"""
slug = "rnaseqc-qc"
name = "RNA-SeQC"
process_type = "data:rnaseqc:qc"
version = "1.1.0"
category = "QC"
data_name = "{{ alignment|name|default('?') }}"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/qc:1.1.0"}
},
"resources": {"cores": 1, "memory": 32768, "storage": 100},
}
entity = {
"type": "sample",
"input": "alignment",
}
class Input:
"""Input fields."""
alignment = DataField("alignment:bam", label="Input aligned reads (BAM file)")
annotation = DataField(
"annotation:gtf",
label="Annotation file (GTF)",
description="The input GTF file containing features to check the bam against. "
"The file should include gene_id in the attributes column for all entries. "
"During the process the file is formatted so the transcript_id matches the gene_id. "
"Exons are merged to remove overlaps and exon_id field is then "
"matched with gene_id including the consecutive exon number.",
)
class RnaseqcOptions:
"""RNA-SeQC options."""
mapping_quality = IntegerField(
label="Mapping quality [--mapping-quality]",
default=255,
description="Set the lower bound on read quality for exon coverage counting. "
"Reads below this number are excluded from coverage metrics.",
)
base_mismatch = IntegerField(
label="Base mismatch [--base-mismatch]",
default=6,
description="Set the maximum number of allowed mismatches "
"between a read and the reference sequence. "
"Reads with more than this number of mismatches "
"are excluded from coverage metrics.",
)
offset = IntegerField(
label="Offset [--offset]",
default=150,
description="Set the offset into the gene for the 3' and 5' windows in bias calculation. "
"A positive value shifts the 3' and 5' windows towards each other, "
"while a negative value shifts them apart.",
)
window_size = IntegerField(
label="Window size [--window-size]",
default=100,
description="Set the offset into the gene for the 3' and 5' windows in bias calculation.",
)
gene_length = IntegerField(
label="Window size [--gene-length]",
default=600,
description="Set the minimum size of a gene for bias calculation. "
"Genes below this size are ignored in the calculation.",
)
detection_threshold = IntegerField(
label="Detection threshold [--detection-threshold]",
default=5,
description="Number of counts on a gene to consider the gene 'detected'. "
"Additionally, genes below this limit are excluded from 3' bias computation.",
)
exclude_chimeric = BooleanField(
label="Exclude chimeric reads [--exclude-chimeric]",
default=False,
description="Exclude chimeric reads from the read counts.",
)
class StrandDetectionOptions:
"""Strand detection options."""
stranded = StringField(
label="Assay type [--stranded]",
default="non_specific",
choices=[
("non_specific", "Strand non-specific"),
("RF", "Strand-specific forward then reverse"),
("FR", "Strand-specific forward then reverse"),
("auto", "Detect automatically"),
],
)
cdna_index = DataField(
"index:salmon",
label="cDNA index file",
required=False,
hidden="strand_detection_options.stranded != 'auto'",
)
n_reads = IntegerField(
label="Number of reads in subsampled alignment file. "
"Subsampled reads will be used in strandedness detection",
default=5000000,
hidden="strand_detection_options.stranded != 'auto'",
)
rnaseqc_options = GroupField(RnaseqcOptions, label="RNA-SeQC options")
strand_detection_options = GroupField(
StrandDetectionOptions, label="Strand detection options"
)
class Output:
"""Output fields."""
metrics = FileField(label="metrics")
def run(self, inputs, outputs):
"""Run the analysis."""
bam_filename = Path(inputs.alignment.output.bam.path).name
args = [
"--mapping-quality",
inputs.rnaseqc_options.mapping_quality,
"--base-mismatch",
inputs.rnaseqc_options.base_mismatch,
"--offset",
inputs.rnaseqc_options.offset,
"--window-size",
inputs.rnaseqc_options.window_size,
"--gene-length",
inputs.rnaseqc_options.gene_length,
"--detection-threshold",
inputs.rnaseqc_options.detection_threshold,
"--verbose",
"--verbose",
]
if inputs.rnaseqc_options.exclude_chimeric:
args.append("--exclude-chimeric")
# Detect if aligned reads in BAM file are of single or paired-end type
# The samtools view command counts the number of reads with the SAM flag "read paired (0x1)"
if (
Cmd["samtools"](
"view", "-c", "-f", "1", inputs.alignment.output.bam.path
).strip()
== "0"
):
args.append("--unpaired") # Required for single-end libraries
else:
lib_strand = ""
if inputs.strand_detection_options.stranded == "auto":
lib_strand = detect_strandedness(
bam_path=inputs.alignment.output.bam.path,
n_reads=inputs.strand_detection_options.n_reads,
cdna_path=inputs.strand_detection_options.cdna_index.output.index.path,
resources_cores=self.requirements.resources.cores,
)
if lib_strand is None:
self.error(
"Library strandedness autodetection failed. Use manual selection options instead."
)
if (
inputs.strand_detection_options.stranded == "FR"
or lib_strand == "forward"
):
args.extend(["--stranded", "FR"])
elif (
inputs.strand_detection_options.stranded == "RF"
or lib_strand == "reverse"
):
args.extend(["--stranded", "RF"])
self.progress(0.3)
if inputs.annotation.output.source == "UCSC":
collapse_args = [
format_ucsc(annotation_path=inputs.annotation.output.annot.path),
"collapsed_annotation.gtf",
]
else:
collapse_args = [
inputs.annotation.output.annot.path,
"collapsed_annotation.gtf",
]
if "--stranded" in args:
collapse_args.append("--collapse_only")
# Collapsing the annotation with collapse_annotation.py script (included in the qc docker image)
return_code, _, _ = Cmd["collapse_annotation.py"][collapse_args] & TEE(
retcode=None
)
if return_code:
self.error("Collapse of GTF file failed.")
self.progress(0.5)
args.extend(
[
"collapsed_annotation.gtf",
inputs.alignment.output.bam.path,
"rnaseqc_output",
]
)
return_code, _, _ = Cmd["rnaseqc"][args] & TEE(retcode=None)
if return_code:
self.error("QC analysis failed.")
outputs.metrics = f"rnaseqc_output/{bam_filename}.metrics.tsv" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/rnaseqc_qc.py | 0.655557 | 0.313761 | rnaseqc_qc.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
Process,
SchedulingClass,
StringField,
)
class InsertSizeMetrics(Process):
"""Collect metrics about the insert size of a paired-end library.
Tool from Picard, wrapped by GATK4. See GATK
CollectInsertSizeMetrics for more information.
"""
slug = "insert-size"
name = "Picard InsertSizeMetrics"
category = "Picard"
process_type = "data:picard:insert"
version = "2.3.0"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
}
data_name = "{{ bam|name|default('?') }}"
class Input:
"""Input fields for InsertSizeMetrics."""
bam = DataField("alignment:bam", label="Alignment BAM file")
genome = DataField("seq:nucleotide", label="Genome")
minimum_fraction = FloatField(
label="Minimum fraction of reads in a category to be considered ",
description="When generating the histogram, discard any data "
"categories (out of FR, TANDEM, RF) that have fewer than this "
"fraction of overall reads (Range: 0 and 0.5).",
default=0.05,
)
include_duplicates = BooleanField(
label="Include reads marked as duplicates in the insert size histogram",
default=False,
)
deviations = FloatField(
label="Deviations limit",
description="Generate mean, standard deviation and plots by trimming "
"the data down to MEDIAN + DEVIATIONS*MEDIAN_ABSOLUTE_DEVIATION. "
"This is done because insert size data typically includes enough "
"anomalous values from chimeras and other artifacts to make the "
"mean and standard deviation grossly misleading regarding the real "
"distribution.",
default=10.0,
)
validation_stringency = StringField(
label="Validation stringency",
description="Validation stringency for all SAM files read by this "
"program. Setting stringency to SILENT can improve "
"performance when processing a BAM file in which "
"variable-length data (read, qualities, tags) do not "
"otherwise need to be decoded. Default is STRICT.",
choices=[
("STRICT", "STRICT"),
("LENIENT", "LENIENT"),
("SILENT", "SILENT"),
],
default="STRICT",
)
assume_sorted = BooleanField(
label="Sorted BAM file",
description="If True, the sort order in the header file will be ignored.",
default=False,
)
class Output:
"""Output fields for InsertSizeMetrics."""
report = FileField(label="Insert size metrics")
plot = FileField(label="Insert size histogram")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
basename = os.path.basename(inputs.bam.output.bam.path)
assert basename.endswith(".bam")
name = basename[:-4]
metrics_file = f"{name}_insert_size_metrics.txt"
histogram_file = f"{name}_insert_size.pdf"
args = [
"--INPUT",
inputs.bam.output.bam.path,
"--OUTPUT",
metrics_file,
"--Histogram_FILE",
histogram_file,
"--REFERENCE_SEQUENCE",
inputs.genome.output.fasta.path,
"--DEVIATIONS",
inputs.deviations,
"--INCLUDE_DUPLICATES",
inputs.include_duplicates,
"--VALIDATION_STRINGENCY",
inputs.validation_stringency,
"--ASSUME_SORTED",
inputs.assume_sorted,
"--TMP_DIR",
TMPDIR,
]
if 0 <= inputs.minimum_fraction <= 0.5:
args.extend(["--MINIMUM_PCT", inputs.minimum_fraction])
else:
self.warning(
"Minimum fraction of reads should be between 0 and 0.5. "
"Setting minimum fraction of reads to 0."
)
args.extend(["--MINIMUM_PCT", 0])
return_code, _, _ = Cmd["gatk"]["CollectInsertSizeMetrics"][args] & TEE(
retcode=None
)
if return_code:
self.error("CollectInsertSizeMetrics tool failed.")
outputs.report = metrics_file
outputs.plot = histogram_file
outputs.species = inputs.bam.output.species
outputs.build = inputs.bam.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/insert_size.py | 0.684897 | 0.420243 | insert_size.py | pypi |
import gzip
import os
from collections import defaultdict
import numpy as np
import pandas as pd
import pysam
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
ListField,
Persistence,
SchedulingClass,
StringField,
)
from resolwe_bio.process.runtime import ProcessBio
ANN_COLUMNS = [
"Allele",
"Annotation",
"Annotation_Impact",
"Gene_Name",
"Gene_ID",
"Feature_Type",
"Feature_ID",
"Transcript_BioType",
"Rank",
"HGVS.c",
"HGVS.p",
"cDNA.pos/cDNA.length",
"CDS.pos/CDS.length",
"AA.pos/AA.length",
"Distance",
"ERRORS/WARNINGS/INFO",
]
AMINOACIDS = [
"Arg",
"His",
"Lys",
"Asp",
"Glu",
"Ser",
"Thr",
"Asn",
"Gln",
"Gly",
"Pro",
"Cys",
"Ala",
"Val",
"Ile",
"Leu",
"Met",
"Phe",
"Tyr",
"Trp",
]
def get_output_table(mutations, variants_table, output_table, warning):
"""Prepare output table."""
genes = []
for gene in mutations:
genes.append(gene)
if len(mutations[gene]) > 0:
for mutation in mutations[gene]:
df = variants_table.loc[
(variants_table["HGVS.p"].str.contains(mutation))
& (variants_table["Gene_Name"] == gene)
]
output_table = pd.concat([output_table, df], ignore_index=True, axis=0)
else:
df = variants_table.loc[(variants_table["Gene_Name"] == gene)]
output_table = pd.concat([output_table, df], ignore_index=True, axis=0)
if output_table.empty and not variants_table.empty:
warning("No variants present for the input set of mutations.")
return output_table, genes
def get_mutations(input_mutations, error):
"""Input mutations to dictionary."""
mutations = defaultdict(list)
for mutation in input_mutations:
mutation = mutation.replace(" ", "")
gene = mutation.split(sep=":")[0]
if len(mutation.split(sep=":")) == 2:
aminoacids = mutation.split(sep=":")[1].split(sep=",")
for aminoacid in aminoacids:
if aminoacid[:3] not in AMINOACIDS:
error(
f"The input amino acid {aminoacid[:3]} is in the wrong format "
"or is not among the 20 standard amino acids."
)
mutations[gene].append(aminoacid)
elif len(mutation.split(sep=":")) == 1:
mutations[gene] = []
else:
error("Wrong input format for mutations.")
return mutations
def prepare_geneset(geneset):
"""Prepare gene set for further analysis."""
mutations = []
with gzip.open(geneset, "rb") as file_in:
for gene in file_in:
mutations.append(gene.decode().rstrip())
return mutations
def prepare_variants_table(variants_table, vcf_fields, ann_fields, gt_fields, warning):
"""Prepare variants table."""
variants_table = pd.read_csv(
variants_table,
sep="\t",
header=0,
float_precision="round_trip",
)
if variants_table.empty:
warning("There are no variants in the input VCF file.")
for ann in ann_fields:
variants_table[ann] = None
variants_table.drop("ANN", axis=1, inplace=True)
else:
variants_table = ann_field_to_df(
variants_table=variants_table,
ann_fields=ann_fields,
vcf_fields=vcf_fields,
gt_fields=[f"SAMPLENAME1.{field}" for field in gt_fields],
)
# Collapse multiple transcripts of one variant to one line
variants_table = (
variants_table.groupby(
vcf_fields
+ [field for field in ann_fields if field != "Feature_ID"]
+ [
col
for col in variants_table.columns
if col.startswith("SAMPLENAME1")
],
dropna=False,
)["Feature_ID"]
.apply(",".join)
.reset_index()
)
# Migrate sample-level filter value to the general FILTER field
if "FT" in gt_fields:
variants_table["FILTER"] = np.where(
(
~variants_table["SAMPLENAME1.FT"].isna()
& ~variants_table["FILTER"].str.contains("PASS")
),
variants_table["FILTER"] + ";" + variants_table["SAMPLENAME1.FT"],
variants_table["FILTER"],
)
variants_table["FILTER"] = np.where(
(
~variants_table["SAMPLENAME1.FT"].isna()
& variants_table["FILTER"].str.contains("PASS")
),
variants_table["SAMPLENAME1.FT"],
variants_table["FILTER"],
)
variants_table.drop(["SAMPLENAME1.FT"], axis=1, inplace=True)
if "DP" in gt_fields:
if "DP" in vcf_fields:
variants_table.drop(["DP"], axis=1, inplace=True)
variants_table.rename(columns={"SAMPLENAME1.DP": "DP"}, inplace=True)
return variants_table
def ann_field_to_df(variants_table, ann_fields, vcf_fields, gt_fields=None):
"""Transform SnpEff ANN field to multiple rows and columns."""
# First split each line to multiple lines, since every variant has
# info for multiple transcripts.
variants_table = variants_table.drop("ANN", axis=1).join(
variants_table["ANN"]
.str.split(",", expand=True)
.stack()
.reset_index(level=1, drop=True)
.rename("ANN")
)
# Split ANN column to multiple columns
variants_table[ANN_COLUMNS] = variants_table["ANN"].str.split("|", expand=True)
# Only use subset of the original dataframe
if gt_fields:
variants_table = variants_table[vcf_fields + ann_fields + gt_fields]
else:
variants_table = variants_table[vcf_fields + ann_fields]
variants_table.reset_index(inplace=True, drop=True)
return variants_table
def get_depth(variants_table, bam):
"""Calculate depth for every positions in the variants table."""
bases = ["Base_A", "Base_C", "Base_G", "Base_T"]
if not variants_table.empty:
for i in range(len(bases)):
variants_table[bases[i]] = variants_table.apply(
lambda row: bam.count_coverage(
contig=str(row["CHROM"]), start=row["POS"] - 1, stop=row["POS"]
)[i][0],
axis=1,
)
variants_table["Total_depth"] = variants_table[bases].sum(axis=1)
variants_table["POS"] = variants_table["POS"].astype(int)
variants_table.sort_values(by=["POS"], inplace=True)
else:
for col in bases + ["Total_depth"]:
variants_table[col] = None
return variants_table.to_csv("mutations.tsv", sep="\t")
class MutationsTable(ProcessBio):
"""Report mutations in a table from RNA-seq Variant Calling Pipeline.
This process reports only mutations selected in the process input.
For example, if you want to know, if the mutation Gly12X in the gene
KRAS is present in the sample, use VCF file annotated with SnpEff as
input together with the desired gene and mutation. One of the inputs
should also be annotated dbSNP VCF file. The process also calculates
sequencing coverage at positions of desired mutations, so one of the
inputs should also be a BAM file from BQSR or SplitNCigarReads process.
"""
slug = "mutations-table"
name = "Mutations table"
process_type = "data:mutationstable"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"},
},
"resources": {
"cores": 1,
"memory": 8196,
},
}
entity = {
"type": "sample",
}
category = "WGS"
data_name = "{{ variants|name|default('?') }}"
version = "2.1.0"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
class Input:
"""Input fields to ReportVariants."""
variants = DataField(
data_type="variants:vcf:snpeff",
label="Annotated variants",
description="Variants annotated with SnpEff. VCF file used for "
"annotation should only be filtered but should include the filtered "
"variants.",
)
mutations = ListField(
StringField(),
label="Gene and its mutations",
description="Insert the gene you are interested in, together "
"with mutations. First enter the name of the gene and then "
"the mutations. Seperate gene from mutations with ':' and mutations "
"with ','. Example of an input: 'KRAS: Gly12, Gly61'. Press enter "
"after each input (gene + mutations). NOTE: Field only accepts "
"three character amino acid symbols.",
disabled="geneset",
required=False,
)
geneset = DataField(
data_type="geneset",
label="Gene set",
description="Select a gene set with genes you are interested in. "
"Only variants of genes in the selected gene set will be in the "
"output.",
disabled="mutations",
required=False,
)
vcf_fields = ListField(
StringField(),
label="Select VCF fields",
description="The name of a standard VCF field or an "
"INFO field to include in the output table. "
"The field can be any standard VCF column (e.g. CHROM, ID, QUAL) "
"or any annotation name in the INFO field (e.g. AC, AF). "
"Required fields are CHROM, POS, ID, REF and ANN. If your variants "
"file was annotated with clinvar information then fields CLNDN, "
"CLNSIG and CLNSIGCONF might be of your interest.",
default=[
"CHROM",
"POS",
"ID",
"QUAL",
"REF",
"ALT",
"DP",
"FILTER",
"ANN",
],
)
ann_fields = ListField(
StringField(),
label="ANN fields to use",
description="Only use specific fields from the SnpEff ANN "
"field. All available fields: Allele | Annotation | Annotation_Impact "
"| Gene_Name | Gene_ID | Feature_Type | Feature_ID | Transcript_BioType "
"| Rank | HGVS.c | HGVS.p | cDNA.pos / cDNA.length | CDS.pos / CDS.length "
"| AA.pos / AA.length | Distance | ERRORS / WARNINGS / INFO' ."
"Fields are seperated by '|'. For more information, follow this [link]"
"(https://pcingola.github.io/SnpEff/se_inputoutput/#ann-field-vcf-output-files).",
default=[
"Allele",
"Annotation",
"Annotation_Impact",
"Gene_Name",
"Feature_ID",
"HGVS.p",
],
)
bam = DataField(
data_type="alignment:bam",
label="Bam file used for coverage calculation",
description="Output BAM file from BQSR or SplitNCigarReads should be used. BAM "
"file should be from the same sample as the input variants file.",
)
class Advanced:
"""Advanced options."""
split_alleles = BooleanField(
label="Split multi-allelic records into multiple lines",
description="By default, a variant record with multiple "
"ALT alleles will be summarized in one line, with per "
"alt-allele fields (e.g. allele depth) separated by commas."
"This may cause difficulty when the table is loaded by "
"an R script, for example. Use this flag to write multi-allelic "
"records on separate lines of output.",
default=True,
)
show_filtered = BooleanField(
label="Include filtered records in the output",
default=True,
description="Include filtered records in the output of the GATK "
"VariantsToTable.",
)
gf_fields = ListField(
StringField(),
label="Include FORMAT/sample-level fields. Note: If you specify DP "
"from genotype field, it will overwrite the original DP field.",
default=[
"GT",
],
)
advanced = GroupField(Advanced, label="Advanced options")
class Output:
"""Output fields to ReportVariants."""
tsv = FileField(
label="Mutations table",
)
genes = ListField(StringField(), label="Input genes")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
if not inputs.mutations and not inputs.geneset:
self.error(
"Mutations or geneset were not specified. You must either enter desired "
"mutations or select your geneset of interest."
)
if not all(
field in inputs.vcf_fields for field in ["CHROM", "POS", "ID", "REF", "ANN"]
):
self.error(
"Input VCF fields do not contain all required values. "
"Required fields are CHROM, POS, ID, REF and ANN."
)
if inputs.variants.entity.id != inputs.bam.entity.id:
self.error(
"Sample ids of input annotated variants and input bam file do not match. "
f"Annotated variants have sample id {inputs.variants.entity.id}, while bam "
f"file has sample id {inputs.bam.entity.id}."
)
TMPDIR = os.environ.get("TMPDIR")
variants_table = "variants_table.tsv"
args = [
"-V",
inputs.variants.output.vcf.path,
"-O",
variants_table,
"--tmp-dir",
TMPDIR,
]
for field in inputs.vcf_fields:
args.extend(["-F", field])
for gf_field in inputs.advanced.gf_fields:
args.extend(["-GF", gf_field])
if inputs.advanced.split_alleles:
args.append("--split-multi-allelic")
if inputs.advanced.show_filtered:
args.append("--show-filtered")
return_code, stdout, stderr = Cmd["gatk"]["VariantsToTable"][args] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error("GATK VariantsToTable failed.")
vcf_fields = inputs.vcf_fields
vcf_fields.remove("ANN")
variants_table = prepare_variants_table(
variants_table=variants_table,
vcf_fields=vcf_fields,
ann_fields=inputs.ann_fields,
gt_fields=inputs.advanced.gf_fields,
warning=self.warning,
)
if inputs.mutations:
mutations = get_mutations(
input_mutations=inputs.mutations, error=self.error
)
elif inputs.geneset:
geneset = prepare_geneset(inputs.geneset.output.geneset.path)
feature_filters = {
"source": inputs.geneset.output.source,
"species": inputs.geneset.output.species,
"feature_id__in": geneset,
}
geneset = [f.name for f in self.feature.filter(**feature_filters)]
if len(geneset) == 0:
self.error(
"Geneset is either empty or no gene IDs were mapped to gene symbols."
)
mutations = defaultdict(list)
for gene in geneset:
mutations[gene] = []
bam = pysam.AlignmentFile(inputs.bam.output.bam.path, "rb")
# Set up the output dataframe
output_table = pd.DataFrame()
output_table, genes = get_output_table(
mutations=mutations,
variants_table=variants_table,
output_table=output_table,
warning=self.warning,
)
get_depth(variants_table=output_table, bam=bam)
output_table.reset_index(inplace=True, drop=True)
output_table.to_csv("mutations.tsv", sep="\t", index=False)
outputs.tsv = "mutations.tsv"
outputs.genes = genes
outputs.species = inputs.variants.output.species
outputs.build = inputs.variants.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/mutations_table.py | 0.560493 | 0.345961 | mutations_table.py | pypi |
import os
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
FloatField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
class BamToBedpe(Process):
"""Takes in a BAM file and calculates a normalization factor in BEDPE format.
Done by sorting with Samtools and transformed with Bedtools.
"""
slug = "bedtools-bamtobed"
name = "Bedtools bamtobed"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {"cores": 1, "memory": 8192},
}
data_name = "{{ alignment|name|default('?') }}"
version = "1.3.1"
process_type = "data:bedpe"
category = "BAM processing"
entity = {"type": "sample"}
scheduling_class = SchedulingClass.BATCH
class Input:
"""Input fields."""
alignment = DataField("alignment:bam", label="Alignment BAM file")
class Output:
"""Output fields."""
bedpe = FileField(label="BEDPE file")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run the analysis."""
path = inputs.alignment.output.bam.path
basename = os.path.basename(path)
assert basename.endswith(".bam")
name = basename[:-4]
bedpe_file = f"{name}.bedpe"
samtools_param = ["-n", path]
bedtools_param = ["-bedpe", "-i"]
(
Cmd["samtools"]["sort"][samtools_param]
| Cmd["bedtools"]["bamtobed"][bedtools_param]
> bedpe_file
)()
if not os.path.exists(bedpe_file):
self.error("Converting BAM to BEDPE with Bedtools bamtobed failed.")
outputs.bedpe = bedpe_file
outputs.species = inputs.alignment.output.species
outputs.build = inputs.alignment.output.build
class CalculateBigWig(Process):
"""Calculate bigWig coverage track.
Deeptools bamCoverage takes an alignment of reads or fragments as
input (BAM file) and generates a coverage track (bigWig) as output.
The coverage is calculated as the number of reads per bin, where
bins are short consecutive counting windows of a defined size. For
more information is available in the
[bamCoverage documentation](https://deeptools.readthedocs.io/en/latest/content/tools/bamCoverage.html).
"""
slug = "calculate-bigwig"
name = "Calculate coverage (bamCoverage)"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {"cores": 1, "memory": 16384},
}
data_name = "{{ alignment|name|default('?') }}"
version = "2.0.1"
process_type = "data:coverage:bigwig"
category = "BAM processing"
entity = {"type": "sample"}
scheduling_class = SchedulingClass.BATCH
class Input:
"""Input fields."""
alignment = DataField("alignment:bam", label="Alignment BAM file")
bedpe = DataField(
"bedpe",
label="BEDPE Normalization factor",
description="The BEDPE file describes disjoint genome features, "
"such as structural variations or paired-end sequence alignments. "
"It is used to estimate the scale factor [--scaleFactor].",
required=False,
)
scale = FloatField(
label="Scale for the normalization factor",
description="Magnitude of the scale factor. The scaling factor "
"[--scaleFactor] is calculated by dividing the scale with the "
"number of features in BEDPE (scale/(number of features)).",
disabled="!bedpe",
default=10000,
)
bin_size = IntegerField(
label="Bin size[--binSize]",
description="Size of the bins (in bp) for the output bigWig file. "
"A smaller bin size value will result in a higher resolution of "
"the coverage track but also in a larger file size.",
default=50,
)
class Output:
"""Output fields."""
bigwig = FileField(label="Coverage file (bigWig)")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run the analysis."""
bam_path = Path(inputs.alignment.output.bam.path)
assert bam_path.name.endswith(".bam")
name = bam_path.stem
if inputs.bedpe:
with open(inputs.bedpe.output.bedpe.path, "rb") as f:
spike_count = sum(1 for _ in f)
if spike_count == 0:
self.error("BEDPE file is empty there were no features found.")
scale_factor = inputs.scale / spike_count
out_file = Path(f"{name}.SInorm.bigwig")
else:
scale_factor = 1
out_file = Path(f"{name}.bigwig")
self.progress(0.1)
bam_coverage_param = [
"--bam",
bam_path,
"--scaleFactor",
scale_factor,
"--outFileName",
out_file,
"--numberOfProcessors",
self.requirements.resources.cores,
"--outFileFormat",
"bigwig",
"--binSize",
inputs.bin_size,
]
return_code, _, stderr = Cmd["bamCoverage"][bam_coverage_param] & TEE(
retcode=None
)
if return_code:
print(stderr)
self.error("Calculating coverage with bamCoverage failed.")
if not out_file.is_file():
self.error("Generation of a scaled bigWig file failed.")
self.progress(0.9)
outputs.bigwig = str(out_file)
outputs.species = inputs.alignment.output.species
outputs.build = inputs.alignment.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/bam_conversion.py | 0.814385 | 0.577853 | bam_conversion.py | pypi |
from pathlib import Path
from resolwe.process import Data, DataField, ListField, Process, SchedulingClass
def get_label(data, warning):
"""Get relation partition label of data object."""
label = None
for relation in data.relations:
if relation.category == "Replicate":
label = next(
p.label for p in relation.partitions if p.entity_id == data.entity_id
)
else:
other_relation = next(
p for p in relation.partitions if p.entity_id == data.entity_id
)
if other_relation:
warning(
f"Sample {data.entity.name} has defined {relation.category} "
"relation. Samples will only be merged based on Replicate relations."
)
return label
def create_symlinks(paths):
"""Create symlinks with unique file names."""
container_paths = [f"{n}_{Path(path).name}" for n, path in enumerate(paths)]
for container_path, path in zip(container_paths, paths):
Path(container_path).symlink_to(path)
return container_paths
def group_paths(data_objects, warning, second_pair=False):
"""Group read paths grouped by relation labels."""
labeled_paths = {}
for data in data_objects:
label = get_label(data=data, warning=warning)
if second_pair:
read_paths = [fastq.path for fastq in data.output.fastq2]
else:
read_paths = [fastq.path for fastq in data.output.fastq]
if label in labeled_paths:
labeled_paths[label].extend(read_paths)
else:
labeled_paths[label] = read_paths
return labeled_paths.items()
class MergeFastqSingle(Process):
"""Merge single-end FASTQs into one sample.
Samples are merged based on the defined replicate group relations
and then uploaded as separate samples.
"""
slug = "merge-fastq-single"
name = "Merge FASTQ (single-end)"
process_type = "data:mergereads:single"
version = "2.2.1"
category = "FASTQ processing"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"relations": [{"type": "group"}],
}
data_name = "Merge FASTQ (single-end)"
class Input:
"""Input fields to process MergeFastqSingle."""
reads = ListField(
DataField(data_type="reads:fastq:single:"),
label="Select relations",
description="Define and select replicate relations.",
relation_type="group",
)
def run(self, inputs, outputs):
"""Run the analysis."""
# Check if user has selected multiple read objects from the same sample
data_by_sample = {}
for data in inputs.reads:
if data.entity_id in data_by_sample:
self.warning(
"There are multiple read objects for "
f"{data.entity_name}. Using only the first one."
)
if int(data.id) < int(data_by_sample[data.entity_id].id):
data_by_sample[data.entity_id] = data
else:
data_by_sample[data.entity_id] = data
reads = [*data_by_sample.values()]
labeled_reads = group_paths(data_objects=reads, warning=self.warning)
for label, paths in labeled_reads:
if label is None:
self.error(
"Missing replicate relations. Please make sure you have selected and defined "
"replicate sample relations."
)
symlinks = create_symlinks(paths=paths)
self.run_process(
slug="upload-fastq-single",
inputs={
"src": symlinks,
"merge_lanes": True,
},
)
merged_objects = Data.filter(entity__name=symlinks[0])
# Sort by id and select the newest data object.
merged_objects.sort(key=lambda x: x.id)
merged_data = merged_objects[-1]
merged_data.name = label
merged_data.entity.name = label
class MergeFastqPaired(Process):
"""Merge paired-end FASTQs into one sample.
Samples are merged based on the defined replicate group relations
and then uploaded as separate samples.
"""
slug = "merge-fastq-paired"
name = "Merge FASTQ (paired-end)"
process_type = "data:mergereads:paired"
version = "2.2.1"
category = "FASTQ processing"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"relations": [{"type": "group"}],
}
data_name = "Merge FASTQ (paired-end)"
class Input:
"""Input fields to process MergeFastqPaired."""
reads = ListField(
DataField(data_type="reads:fastq:paired:"),
label="Select relations",
description="Define and select Replicate relations.",
relation_type="group",
)
def run(self, inputs, outputs):
"""Run the analysis."""
# Check if user has selected multiple read objects from the same sample
data_by_sample = {}
for data in inputs.reads:
if data.entity_id in data_by_sample:
self.warning(
"There are multiple read objects for "
f"{data.entity_name}. Using only the first one."
)
if int(data.id) < int(data_by_sample[data.entity_id].id):
data_by_sample[data.entity_id] = data
else:
data_by_sample[data.entity_id] = data
reads = [*data_by_sample.values()]
labeled_reads = group_paths(data_objects=reads, warning=self.warning)
labeled_reads_2 = group_paths(
data_objects=reads, second_pair=True, warning=self.warning
)
for (label, paths), (_, paths_2) in zip(labeled_reads, labeled_reads_2):
if label is None:
self.error(
"Missing replicate relations. Please make sure you have selected and defined "
"replicate sample relations."
)
symlinks = create_symlinks(paths=paths)
symlinks_2 = create_symlinks(paths=paths_2)
self.run_process(
slug="upload-fastq-paired",
inputs={
"src1": symlinks,
"src2": symlinks_2,
"merge_lanes": True,
},
)
merged_objects = Data.filter(entity__name=symlinks[0])
# Sort by id and select the newest data object.
merged_objects.sort(key=lambda x: x.id)
merged_data = merged_objects[-1]
merged_data.name = label
merged_data.entity.name = label | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/merge_samples.py | 0.829146 | 0.442817 | merge_samples.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
def replace_metrics_class(fname):
"""Replace metrics class name.
This temporary fix is needed due to compatibility issue with GATK
4.1.2.0 and MultiQC 1.8. MultiQC searches for CollectWgsMetrics
instead of WgsMetrics in the report file. Note that this should be
resolved in the 1.9 release of MultiQC.
"""
with open(fname, "r") as report:
newlines = []
for line in report.readlines():
if line == "## METRICS CLASS\tpicard.analysis.WgsMetrics\n":
line = "## METRICS CLASS\tCollectWgsMetrics$WgsMetrics\n"
newlines.append(line)
else:
newlines.append(line)
with open(fname, "w") as report:
for line in newlines:
report.writelines(line)
class InsertSizeMetrics(Process):
"""Collect metrics about coverage of whole genome sequencing.
Tool from Picard, wrapped by GATK4. See GATK
CollectWgsMetrics for more information.
"""
slug = "wgs-metrics"
name = "Picard WGS Metrics"
category = "Picard"
process_type = "data:picard:wgsmetrics"
version = "2.4.0"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
}
data_name = "{{ bam|name|default('?') }}"
class Input:
"""Input fields for CollectWgsMetrics."""
bam = DataField("alignment:bam", label="Alignment BAM file")
genome = DataField("seq:nucleotide", label="Genome")
read_length = IntegerField(label="Average read length", default=150)
create_histogram = BooleanField(
label="Include data for base quality histogram in the metrics file",
default=False,
)
class Options:
"""Options."""
min_map_quality = IntegerField(
label="Minimum mapping quality for a read to contribute coverage",
default=20,
)
min_quality = IntegerField(
label="Minimum base quality for a base to contribute coverage",
description="N bases will be treated as having a base quality of "
"negative infinity and will therefore be excluded from coverage "
"regardless of the value of this parameter.",
default=20,
)
coverage_cap = IntegerField(
label="Maximum coverage cap",
description="Treat positions with coverage exceeding this value as "
"if they had coverage at this set value.",
default=250,
)
accumulation_cap = IntegerField(
label="Ignore positions with coverage above this value",
description="At positions with coverage exceeding this value, "
"completely ignore reads that accumulate beyond this value",
default=100000,
)
count_unpaired = BooleanField(
label="Count unpaired reads and paired reads with one end unmapped",
default=False,
)
sample_size = IntegerField(
label="Sample Size used for Theoretical Het Sensitivity sampling",
default=10000,
)
validation_stringency = StringField(
label="Validation stringency",
description="Validation stringency for all SAM files read by this "
"program. Setting stringency to SILENT can improve "
"performance when processing a BAM file in which "
"variable-length data (read, qualities, tags) do not "
"otherwise need to be decoded. Default is STRICT.",
choices=[
("STRICT", "STRICT"),
("LENIENT", "LENIENT"),
("SILENT", "SILENT"),
],
default="STRICT",
)
options = GroupField(Options, label="Options")
class Output:
"""Output fields for CollectWgsMetrics."""
report = FileField(label="WGS metrics report")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
basename = os.path.basename(inputs.bam.output.bam.path)
assert basename.endswith(".bam")
name = basename[:-4]
metrics_file = f"{name}_wgs_metrics.txt"
args = [
"--INPUT",
inputs.bam.output.bam.path,
"--OUTPUT",
metrics_file,
"--REFERENCE_SEQUENCE",
inputs.genome.output.fasta.path,
"--READ_LENGTH",
inputs.read_length,
"--INCLUDE_BQ_HISTOGRAM",
inputs.create_histogram,
"--MINIMUM_MAPPING_QUALITY",
inputs.options.min_map_quality,
"--MINIMUM_BASE_QUALITY",
inputs.options.min_quality,
"--COVERAGE_CAP",
inputs.options.coverage_cap,
"--LOCUS_ACCUMULATION_CAP",
inputs.options.accumulation_cap,
"--COUNT_UNPAIRED",
inputs.options.count_unpaired,
"--SAMPLE_SIZE",
inputs.options.sample_size,
"--VALIDATION_STRINGENCY",
inputs.options.validation_stringency,
"--TMP_DIR",
TMPDIR,
]
return_code, _, _ = Cmd["gatk"]["CollectWgsMetrics"][args] & TEE(retcode=None)
if return_code:
self.error("CollectWgsMetrics tool failed.")
replace_metrics_class(metrics_file)
outputs.report = metrics_file
outputs.species = inputs.bam.output.species
outputs.build = inputs.bam.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/wgs_metrics.py | 0.643441 | 0.292747 | wgs_metrics.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
FileHtmlField,
ListField,
Process,
StringField,
)
class ReverseComplementSingle(Process):
"""Reverse complement single-end FASTQ reads file using Seqtk."""
slug = "seqtk-rev-complement-single"
process_type = "data:reads:fastq:single:seqtk"
name = "Reverse complement FASTQ (single-end)"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"},
},
"resources": {
"cores": 1,
"memory": 16384,
},
}
entity = {
"type": "sample",
}
data_name = "{{ reads|name|default('?') }}"
version = "1.3.1"
category = "FASTQ processing"
class Input:
"""Input fields to process ReverseComplementSingle."""
reads = DataField("reads:fastq:single", label="Reads")
class Output:
"""Output fields."""
fastq = ListField(FileField(), label="Reverse complemented FASTQ file")
fastqc_url = ListField(FileHtmlField(), label="Quality control with FastQC")
fastqc_archive = ListField(FileField(), label="Download FastQC archive")
def run(self, inputs, outputs):
"""Run the analysis."""
basename = os.path.basename(inputs.reads.output.fastq[0].path)
assert basename.endswith(".fastq.gz")
name = basename[:-9]
complemented_name = f"{name}_complemented.fastq"
# Concatenate multilane reads
(
Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq]]
> "input_reads.fastq.gz"
)()
# Reverse complement reads
(Cmd["seqtk"]["seq", "-r", "input_reads.fastq.gz"] > complemented_name)()
_, _, stderr = (
Cmd["fastqc"][complemented_name, "--extract", "--outdir=./"] & TEE
)
if "Failed to process" in stderr or "Skipping" in stderr:
self.error("Failed while processing with FastQC.")
(Cmd["gzip"][complemented_name])()
outputs.fastq = [f"{complemented_name}.gz"]
outputs.fastqc_url = [f"{name}_complemented_fastqc.html"]
outputs.fastqc_archive = [f"{name}_complemented_fastqc.zip"]
class ReverseComplementPaired(Process):
"""Reverse complement paired-end FASTQ reads file using Seqtk."""
slug = "seqtk-rev-complement-paired"
process_type = "data:reads:fastq:paired:seqtk"
name = "Reverse complement FASTQ (paired-end)"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"},
},
"resources": {
"cores": 1,
"memory": 16384,
},
}
entity = {
"type": "sample",
}
data_name = "{{ reads|name|default('?') }}"
version = "1.2.1"
category = "FASTQ processing"
class Input:
"""Input fields to process ReverseComplementPaired."""
reads = DataField("reads:fastq:paired", label="Reads")
select_mate = StringField(
label="Select mate",
description="Select the which mate should be reverse complemented.",
choices=[("Mate 1", "Mate 1"), ("Mate 2", "Mate 2"), ("Both", "Both")],
default="Mate 1",
)
class Output:
"""Output fields."""
fastq = ListField(FileField(), label="Reverse complemented FASTQ file")
fastq2 = ListField(FileField(), label="Remaining mate")
fastqc_url = ListField(
FileHtmlField(), label="Quality control with FastQC (Mate 1)"
)
fastqc_archive = ListField(
FileField(), label="Download FastQC archive (Mate 1)"
)
fastqc_url2 = ListField(
FileHtmlField(), label="Quality control with FastQC (Mate 2)"
)
fastqc_archive2 = ListField(
FileField(), label="Download FastQC archive (Mate 2)"
)
def run(self, inputs, outputs):
"""Run the analysis."""
basename_mate1 = os.path.basename(inputs.reads.output.fastq[0].path)
basename_mate2 = os.path.basename(inputs.reads.output.fastq2[0].path)
assert basename_mate1.endswith(".fastq.gz")
assert basename_mate2.endswith(".fastq.gz")
name_mate1 = basename_mate1[:-9]
name_mate2 = basename_mate2[:-9]
original_mate1 = f"{name_mate1}_original.fastq.gz"
original_mate2 = f"{name_mate2}_original.fastq.gz"
(
Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq]]
> original_mate1
)()
(
Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq2]]
> original_mate2
)()
if inputs.select_mate == "Mate 1":
complemented_mate1 = f"{name_mate1}_complemented.fastq"
(Cmd["seqtk"]["seq", "-r", original_mate1] > complemented_mate1)()
_, _, stderr = (
Cmd["fastqc"][complemented_mate1, "--extract", "--outdir=./"] & TEE
)
if "Failed to process" in stderr or "Skipping" in stderr:
self.error("Failed while processing with FastQC.")
_, _, stderr2 = (
Cmd["fastqc"][original_mate2, "--extract", "--outdir=./"] & TEE
)
if "Failed to process" in stderr2 or "Skipping" in stderr2:
self.error("Failed while processing with FastQC.")
(Cmd["gzip"][complemented_mate1])()
outputs.fastq = [f"{complemented_mate1}.gz"]
outputs.fastq2 = [original_mate2]
outputs.fastqc_url = [f"{name_mate1}_complemented_fastqc.html"]
outputs.fastqc_archive = [f"{name_mate1}_complemented_fastqc.zip"]
outputs.fastqc_url2 = [f"{name_mate2}_original_fastqc.html"]
outputs.fastqc_archive2 = [f"{name_mate2}_original_fastqc.zip"]
elif inputs.select_mate == "Mate 2":
complemented_mate2 = f"{name_mate2}_complemented.fastq"
(
Cmd["seqtk"]["seq", "-r", f"{name_mate2}_original.fastq.gz"]
> complemented_mate2
)()
_, _, stderr = (
Cmd["fastqc"][original_mate1, "--extract", "--outdir=./"] & TEE
)
if "Failed to process" in stderr or "Skipping" in stderr:
self.error("Failed while processing with FastQC.")
_, _, stderr2 = (
Cmd["fastqc"][complemented_mate2, "--extract", "--outdir=./"] & TEE
)
if "Failed to process" in stderr2 or "Skipping" in stderr2:
self.error("Failed while processing with FastQC.")
(Cmd["gzip"][complemented_mate2])()
outputs.fastq = [original_mate1]
outputs.fastq2 = [f"{complemented_mate2}.gz"]
outputs.fastqc_url = [f"{name_mate1}_original_fastqc.html"]
outputs.fastqc_archive = [f"{name_mate1}_original_fastqc.zip"]
outputs.fastqc_url2 = [f"{name_mate2}_complemented_fastqc.html"]
outputs.fastqc_archive2 = [f"{name_mate2}_complemented_fastqc.zip"]
else:
complemented_mate1 = f"{name_mate1}_complemented.fastq"
complemented_mate2 = f"{name_mate2}_complemented.fastq"
(
Cmd["seqtk"]["seq", "-r", f"{name_mate1}_original.fastq.gz"]
> complemented_mate1
)()
_, _, stderr = (
Cmd["fastqc"][complemented_mate1, "--extract", "--outdir=./"] & TEE
)
if "Failed to process" in stderr or "Skipping" in stderr:
self.error("Failed while processing with FastQC.")
(Cmd["gzip"][complemented_mate1])()
(
Cmd["seqtk"]["seq", "-r", f"{name_mate2}_original.fastq.gz"]
> complemented_mate2
)()
_, _, stderr2 = (
Cmd["fastqc"][complemented_mate2, "--extract", "--outdir=./"] & TEE
)
if "Failed to process" in stderr2 or "Skipping" in stderr2:
self.error("Failed while processing with FastQC.")
(Cmd["gzip"][complemented_mate2])()
outputs.fastq = [f"{complemented_mate1}.gz"]
outputs.fastq2 = [f"{complemented_mate2}.gz"]
outputs.fastqc_url = [f"{name_mate1}_complemented_fastqc.html"]
outputs.fastqc_archive = [f"{name_mate1}_complemented_fastqc.zip"]
outputs.fastqc_url2 = [f"{name_mate2}_complemented_fastqc.html"]
outputs.fastqc_archive2 = [f"{name_mate2}_complemented_fastqc.zip"] | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/seqtk_reverse_complement.py | 0.652574 | 0.383728 | seqtk_reverse_complement.py | pypi |
import os
from glob import glob
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
Process,
SchedulingClass,
StringField,
)
class UmiToolsDedup(Process):
"""Deduplicate reads using UMI and mapping coordinates."""
slug = "umi-tools-dedup"
name = "UMI-tools dedup"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {
"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0",
},
},
"resources": {
"cores": 1,
"memory": 16384,
},
}
data_name = "{{ alignment|name|default('?') }}"
version = "1.5.1"
process_type = "data:alignment:bam:umitools:dedup"
category = "FASTQ processing"
entity = {
"type": "sample",
"input": "alignment",
}
scheduling_class = SchedulingClass.BATCH
class Input:
"""Input fields."""
alignment = DataField("alignment:bam", label="Alignment")
class Output:
"""Output fields."""
bam = FileField(label="Clipped BAM file")
bai = FileField(label="Index of clipped BAM file")
stats = FileField(label="Alignment statistics")
dedup_log = FileField(label="Deduplication log")
dedup_stats = FileField(label="Deduplication stats")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run the analysis."""
alignment_path = os.path.basename(inputs.alignment.output.bam.path)
assert alignment_path.endswith(".bam")
name = alignment_path[:-4]
out_bam = "{}_dedup.bam".format(name)
out_log = "{}_dedup.log".format(name)
stats = "{}_stats.txt".format(name)
dedup_stats = "{}_dedup_stats.zip".format(name)
args = [
"dedup",
"-I",
inputs.alignment.output.bam.path,
"-S",
out_bam,
"-L",
out_log,
"--multimapping-detection-method=NH",
"--output-stats=dedup_stats",
]
# Detect if aligned reads in BAM file are of single or paired-end type
# The samtools view command counts the number of reads with the SAM flag "read paired (0x1)"
if (
Cmd["samtools"](
"view", "-c", "-f", "1", inputs.alignment.output.bam.path
).strip()
!= "0"
):
args.append("--paired")
# Run UMI-tools dedup
return_code, _, _ = Cmd["umi_tools"][args] & TEE(retcode=None)
if return_code:
self.error("Deduplication of {}.bam failed.".format(name))
# Compress deduplication stats files
Cmd["zip"]([dedup_stats, *glob("dedup_stats_*")])
# Index deduplicated output .bam file
return_code, _, _ = Cmd["samtools"]["index", out_bam] & TEE(retcode=None)
if return_code:
self.error("Indexing of {} failed.".format(out_bam))
# Calculate alignment statistics
(Cmd["samtools"]["flagstat", out_bam] > stats)()
# Save the outputs
outputs.bam = out_bam
outputs.bai = out_bam + ".bai"
outputs.stats = stats
outputs.dedup_log = out_log
outputs.dedup_stats = dedup_stats
outputs.species = inputs.alignment.output.species
outputs.build = inputs.alignment.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/umi_tools.py | 0.78037 | 0.386995 | umi_tools.py | pypi |
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FileHtmlField,
FloatField,
GroupField,
IntegerField,
ListField,
Persistence,
Process,
SchedulingClass,
)
class SeqtkSampleSingle(Process):
"""Subsample reads from FASTQ file (single-end).
[Seqtk](https://github.com/lh3/seqtk) is a fast and lightweight tool for
processing sequences in the FASTA or FASTQ format. The Seqtk "sample" command
enables subsampling of the large FASTQ file(s).
"""
slug = "seqtk-sample-single"
name = "Subsample FASTQ (single-end)"
process_type = "data:reads:fastq:single:seqtk"
version = "1.5.1"
category = "FASTQ processing"
data_name = "{{ reads|name|default('?') }}"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {
"cores": 1,
"memory": 16384,
},
}
class Input:
"""Input fields to process SeqtkSampleSingle."""
reads = DataField(
"reads:fastq:single",
label="Reads",
)
n_reads = IntegerField(
label="Number of reads",
default=1000000,
)
class Advanced:
"""Advanced options."""
seed = IntegerField(
label="Seed",
default=11,
)
fraction = FloatField(
label="Fraction",
required=False,
description="Use the fraction of reads [0 - 1.0] from the "
"original input file instead of the absolute number of reads. "
"If set, this will override the 'Number of reads' input parameter.",
)
two_pass = BooleanField(
label="2-pass mode",
default=False,
description="Enable two-pass mode when down-sampling. "
"Two-pass mode is twice as slow but with much reduced memory.",
)
advanced = GroupField(
Advanced,
label="Advanced options",
)
class Output:
"""Output fields to process SeqtkSampleSingle."""
fastq = ListField(
FileField(),
label="Remaining reads",
)
fastqc_url = ListField(
FileHtmlField(),
label="Quality control with FastQC",
)
fastqc_archive = ListField(
FileField(),
label="Download FastQC archive",
)
def run(self, inputs, outputs):
"""Run analysis."""
if inputs.advanced.fraction and not 0 < inputs.advanced.fraction <= 1.0:
self.error("Fraction of reads should be between 0 and 1.")
basename = Path(inputs.reads.output.fastq[0].path).name
assert basename.endswith(".fastq.gz")
name = basename[:-9]
input_reads = "input_reads.fastq.gz"
final_reads = name + "_downsampled.fastq"
(
Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq]]
> input_reads
)()
args = [
"-s",
inputs.advanced.seed,
input_reads,
]
if inputs.advanced.two_pass:
args.append("-2")
if inputs.advanced.fraction:
args.append(inputs.advanced.fraction)
else:
args.append(inputs.n_reads)
(Cmd["seqtk"]["sample"][args] > final_reads)()
Cmd["pigz"][final_reads]()
args_fastqc = [
f"{final_reads}.gz",
"fastqc",
"fastqc_archive",
"fastqc_url",
]
return_code, _, _ = Cmd["fastqc.sh"][args_fastqc] & TEE(retcode=None)
if return_code:
self.error("Error while preparing FASTQC report.")
outputs.fastq = [f"{final_reads}.gz"]
class SeqtkSamplePaired(Process):
"""Subsample reads from FASTQ files (paired-end).
[Seqtk](https://github.com/lh3/seqtk) is a fast and lightweight tool for
processing sequences in the FASTA or FASTQ format. The Seqtk "sample" command
enables subsampling of the large FASTQ file(s).
"""
slug = "seqtk-sample-paired"
name = "Subsample FASTQ (paired-end)"
process_type = "data:reads:fastq:paired:seqtk"
version = "1.5.1"
category = "FASTQ processing"
data_name = "{{ reads|name|default('?') }}"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {
"cores": 1,
"memory": 16384,
},
}
class Input:
"""Input fields to process SeqtkSamplePaired."""
reads = DataField(
"reads:fastq:paired",
label="Reads",
)
n_reads = IntegerField(
label="Number of reads",
default=1000000,
)
class Advanced:
"""Advanced options."""
seed = IntegerField(
label="Seed",
default=11,
)
fraction = FloatField(
label="Fraction",
required=False,
description="Use the fraction of reads [0 - 1.0] from the "
"orignal input file instead of the absolute number of reads. "
"If set, this will override the 'Number of reads' input parameter.",
)
two_pass = BooleanField(
label="2-pass mode",
default=False,
description="Enable two-pass mode when down-sampling. "
"Two-pass mode is twice as slow but with much reduced memory.",
)
advanced = GroupField(
Advanced,
label="Advanced options",
)
class Output:
"""Output fields to process SeqtkSamplePaired."""
fastq = ListField(
FileField(),
label="Remaining mate 1 reads",
)
fastq2 = ListField(
FileField(),
label="Remaining mate 2 reads",
)
fastqc_url = ListField(
FileHtmlField(),
label="Mate 1 quality control with FastQC",
)
fastqc_url2 = ListField(
FileHtmlField(),
label="Mate 2 quality control with FastQC",
)
fastqc_archive = ListField(
FileField(),
label="Download mate 1 FastQC archive",
)
fastqc_archive2 = ListField(
FileField(),
label="Download mate 2 FastQC archive",
)
def run(self, inputs, outputs):
"""Run analysis."""
if inputs.advanced.fraction and not 0 < inputs.advanced.fraction <= 1.0:
self.error("Fraction of reads should be between 0 and 1.")
basename1 = Path(inputs.reads.output.fastq[0].path).name
basename2 = Path(inputs.reads.output.fastq2[0].path).name
assert basename1.endswith(".fastq.gz")
assert basename2.endswith(".fastq.gz")
name_mate1 = basename1[:-9]
name_mate2 = basename2[:-9]
input_mate1 = "input_mate1.fastq.gz"
input_mate2 = "input_mate2.fastq.gz"
final_mate1 = name_mate1 + "_downsampled.fastq"
final_mate2 = name_mate2 + "_downsampled.fastq"
(
Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq]]
> input_mate1
)()
(
Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq2]]
> input_mate2
)()
args1 = [
"-s",
inputs.advanced.seed,
input_mate1,
]
args2 = [
"-s",
inputs.advanced.seed,
input_mate2,
]
for arg in [args1, args2]:
if inputs.advanced.two_pass:
arg.append("-2")
if inputs.advanced.fraction:
arg.append(inputs.advanced.fraction)
else:
arg.append(inputs.n_reads)
(Cmd["seqtk"]["sample"][args1] > final_mate1)()
(Cmd["seqtk"]["sample"][args2] > final_mate2)()
Cmd["pigz"][final_mate1]()
Cmd["pigz"][final_mate2]()
args_fastqc1 = [
f"{final_mate1}.gz",
"fastqc",
"fastqc_archive",
"fastqc_url",
]
args_fastqc2 = [
f"{final_mate2}.gz",
"fastqc",
"fastqc_archive2",
"fastqc_url2",
]
for arg in [args_fastqc1, args_fastqc2]:
return_code, _, _ = Cmd["fastqc.sh"][arg] & TEE(retcode=None)
if return_code:
self.error("Error while preparing FASTQC report.")
outputs.fastq = [f"{final_mate1}.gz"]
outputs.fastq2 = [f"{final_mate2}.gz"] | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/support_processors/seqtk.py | 0.860164 | 0.47524 | seqtk.py | pypi |
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
GroupField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
class SamtoolsView(Process):
"""Samtools view.
With no options or regions specified, saves all alignments in
the specified input alignment file in BAM format to standard output
also in BAM format.
You may specify one or more space-separated region specifications
to restrict output to only those alignments which overlap the specified
region(s). For more information about samtools view, click
[here](https://www.htslib.org/doc/samtools-view.html).
"""
slug = "samtools-view"
process_type = "data:alignment:bam:samtools"
name = "Samtools view"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 2,
"memory": 16384,
},
}
category = "Samtools"
data_name = "{{ bam|name|default('?') }}"
version = "1.0.1"
entity = {"type": "sample"}
scheduling_class = SchedulingClass.BATCH
class Input:
"""Input fields for SamtoolsView."""
bam = DataField(data_type="alignment:bam", label="Input BAM file")
region = StringField(
label="Region",
description="Region can be specified as: RNAME:STARTPOS-ENDPOS "
"and all position coordinates are 1-based, where RNAME is the "
"name of the contig. If the input BAM file was generated by "
"General RNA-seq pipeline, you should use only chromosome "
"numbers to subset the input file, e.g. 3:30293-39103.",
hidden="bedfile",
required=False,
)
bedfile = DataField(
data_type="bed",
label="Target BED file",
description="Target BED file with regions to extract."
"If the input BAM file was generated by General RNA-seq "
"pipeline, you should use only chromosome numbers to subset "
"the input file, e.g. 3:30292-39103.",
hidden="region",
required=False,
)
class AdvancedOptions:
"""Advanced options."""
include_header = BooleanField(
label="Include the header in the output",
default=True,
disabled="advanced.only_header",
)
only_header = BooleanField(
label="Output the header only",
default=False,
disabled="advanced.include_header",
description="Selecting this option overrides all other options.",
)
subsample = FloatField(
label="Fraction of the input alignments",
required=False,
range=[0.0, 1.0],
description="Output only a proportion of the input alignments, as "
"specified by 0.0 ≤ FLOAT ≤ 1.0, which gives the fraction of "
"templates/pairs to be kept. This subsampling acts in the same "
"way on all of the alignment records in the same template or read "
"pair, so it never keeps a read but not its mate.",
)
subsample_seed = IntegerField(
label="Subsampling seed",
default=11,
description="Subsampling seed used to influence which subset of "
"reads is kept. When subsampling data that has previously been "
"subsampled, be sure to use a different seed value from those used "
"previously; otherwise more reads will be retained than expected.",
hidden="!advanced.subsample",
)
threads = IntegerField(
label="Number of threads",
default=2,
description="Number of BAM compression threads to use in addition "
"to main thread.",
)
advanced = GroupField(AdvancedOptions, label="Advanced options")
class Output:
"""Output fields for SamtoolsView."""
bam = FileField(label="Output BAM file")
bai = FileField(label="Ouput index file")
stats = FileField(label="Alignment statistics", required=False)
build = StringField(label="Build")
species = StringField(label="Species")
def run(self, inputs, outputs):
"""Run the analysis."""
name = f"{Path(inputs.bam.output.bam.path).stem}_subset"
bam_name = f"{name}.bam"
bai_name = f"{bam_name}.bai"
stats = f"{name}_stats.txt"
if not inputs.region and not inputs.bedfile and not inputs.advanced.only_header:
self.error("No region or BED file specified.")
if inputs.bedfile:
if inputs.bedfile.output.species != inputs.bam.output.species:
self.error(
"Input BAM file and BED file are of different species. "
f"BAM file is from {inputs.bam.output.species}, "
f"while BED file is from {inputs.bedfile.output.species}."
)
if inputs.bedfile.output.build != inputs.bam.output.build:
self.error(
"Input BAM file and BED file have different genome build, "
"but it should be the same. BAM file has build "
f"{inputs.bam.output.build}, while BED file has build "
f"{inputs.bedfile.output.build}."
)
if inputs.advanced.only_header:
return_code, stdout, stderr = Cmd["samtools"]["view"][
"-Hbo", bam_name, inputs.bam.output.bam.path
] & TEE(retcode=None)
if return_code:
self.error(f"Samtools view failed. {stdout}, {stderr}")
return_code, stdout, stderr = Cmd["samtools"]["index"][bam_name] & TEE(
retcode=None
)
if return_code:
self.error(f"Samtools index failed. {stdout}, {stderr}")
self.warning("Only header is present in the output BAM file.")
else:
input_options = [
"-b",
"--threads",
min(inputs.advanced.threads, self.requirements.resources.cores),
]
if inputs.advanced.include_header:
input_options.append("-h")
if inputs.advanced.subsample:
input_options.extend(
[
"--subsample",
inputs.advanced.subsample,
"--subsample-seed",
inputs.advanced.subsample_seed,
]
)
if inputs.bedfile:
input_options.extend(
[
"-L",
inputs.bedfile.output.bed.path,
"-o",
bam_name,
inputs.bam.output.bam.path,
]
)
else:
input_options.extend(
["-o", bam_name, inputs.bam.output.bam.path, inputs.region]
)
return_code, stdout, stderr = Cmd["samtools"]["view"][input_options] & TEE(
retcode=None
)
if return_code:
self.error(f"Samtools view failed. {stdout}, {stderr}")
if "invalid region or unknown reference" in stderr:
self.warning(stderr)
return_code, stdout, stderr = Cmd["samtools"]["index"][bam_name] & TEE(
retcode=None
)
if return_code:
self.error(f"Samtools index failed. {stdout}, {stderr}")
(Cmd["samtools"]["flagstat"][bam_name] > stats)()
outputs.stats = stats
outputs.bam = bam_name
outputs.bai = bai_name
outputs.species = inputs.bam.output.species
outputs.build = inputs.bam.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/samtools/samtools_view.py | 0.852721 | 0.453322 | samtools_view.py | pypi |
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
class SamtoolsCoverageMulti(Process):
"""Samtools coverage for multiple BAM files.
Computes the depth at each position or region and creates tabulated text.
For more information about samtools coverage, click
[here](https://www.htslib.org/doc/samtools-coverage.html).
"""
slug = "samtools-coverage-multi"
process_type = "data:samtoolscoverage:multi"
name = "Samtools coverage (multi-sample)"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 2,
"memory": 16384,
},
}
category = "Samtools"
data_name = "Samtools coverage"
version = "1.0.0"
scheduling_class = SchedulingClass.BATCH
class Input:
"""Input fields for SamtoolsCoverageMulti."""
bam = ListField(
DataField(data_type="alignment:bam"),
label="Input BAM files",
description="Select BAM file(s) for the analysis. Coverage information will "
"be calculated from the merged alignments.",
)
region = StringField(
label="Region",
description="Region can be specified as: RNAME:STARTPOS-ENDPOS "
"and all position coordinates are 1-based, where RNAME is the "
"name of the contig. If the input BAM file was generated by "
"General RNA-seq pipeline, you should use only chromosome "
"numbers to subset the input file, e.g. 3:30293-39103.",
required=False,
)
class AdvancedOptions:
"""Advanced options."""
min_read_length = IntegerField(
label="Minimum read length",
required=False,
description="Ignore reads shorter than specified number of "
"base pairs.",
)
min_mq = IntegerField(
label="Minimum mapping quality",
required=False,
description="Minimum mapping quality for an alignment to be used.",
)
min_bq = IntegerField(
label="Minimum base quality",
required=False,
description="Minimum base quality for a base to be considered.",
)
excl_flags = ListField(
StringField(),
label="Filter flags",
default=["UNMAP", "SECONDARY", "QCFAIL", "DUP"],
description="Filter flags: skip reads with mask bits set. "
"Press ENTER after each flag.",
)
depth = IntegerField(
label="Maximum allowed coverage depth",
default=1000000,
description="If 0, depth is set to the maximum integer value "
"effectively removing any depth limit.",
)
no_header = BooleanField(
label="No header",
default=False,
description="Do not output header.",
)
advanced = GroupField(AdvancedOptions, label="Advanced options")
class Output:
"""Output fields for SamtoolsCoverageMulti."""
table = FileField(label="Output coverage table")
build = StringField(label="Build")
species = StringField(label="Species")
def run(self, inputs, outputs):
"""Run the analysis."""
output_name = "samtools_coverage.tsv"
if len(inputs.bam) == 1:
self.error(
"Only one BAM file was selected. Please select more than one "
"input BAM file or use process Samtools coverage (single-sample)."
)
if len(inputs.bam) > 1:
for bam in inputs.bam:
if inputs.bam[0].output.species != bam.output.species:
self.error(
"Not all BAM files are from the same species. "
f"BAM file {Path(inputs.bam[0].output.bam.path).name} is from "
f"{inputs.bam[0].output.species}, "
f"while file {Path(bam.output.bam.path).name} is from {bam.output.species}."
)
if inputs.bam[0].output.build != bam.output.build:
self.error(
"Not all BAM files have the same genome build. "
f"BAM file {Path(inputs.bam[0].output.bam.path).name} has build "
f"{inputs.bam[0].output.build}, "
f"while file {Path(bam.output.bam.path).name} has build {bam.output.build}."
)
input_options = [
"-o",
output_name,
]
if inputs.region:
input_options.extend(["--region", inputs.region])
if inputs.advanced.min_read_length:
input_options.extend(["--min-read-len", inputs.advanced.min_read_length])
if inputs.advanced.min_mq:
input_options.extend(["--min-MQ", inputs.advanced.min_mq])
if inputs.advanced.min_bq:
input_options.extend(["--min-BQ", inputs.advanced.min_bq])
if inputs.advanced.excl_flags:
flags = ",".join(inputs.advanced.excl_flags)
input_options.extend(["--excl-flags", flags])
if inputs.advanced.depth:
input_options.extend(["--depth", inputs.advanced.depth])
if inputs.advanced.no_header:
input_options.append("--no-header")
input_options.extend([bam.output.bam.path for bam in inputs.bam])
return_code, stdout, stderr = Cmd["samtools"]["coverage"][input_options] & TEE(
retcode=None
)
if return_code:
self.error(f"Samtools coverage failed. {stdout}, {stderr}")
outputs.table = output_name
outputs.species = inputs.bam[0].output.species
outputs.build = inputs.bam[0].output.build
class SamtoolsCoverageSingle(Process):
"""Samtools coverage for a single BAM file.
Computes the depth at each position or region and creates tabulated text.
For more information about samtools coverage, click
[here](https://www.htslib.org/doc/samtools-coverage.html).
"""
slug = "samtools-coverage-single"
process_type = "data:samtoolscoverage:single"
name = "Samtools coverage (single-sample)"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 2,
"memory": 16384,
},
}
category = "Samtools"
data_name = "{{ bam|name|default('?') }}"
version = "1.0.0"
entity = {"type": "sample"}
scheduling_class = SchedulingClass.BATCH
class Input:
"""Input fields for SamtoolsCoverageSingle."""
bam = DataField(
data_type="alignment:bam",
label="Input BAM file",
description="Select BAM file for the analysis",
)
region = StringField(
label="Region",
description="Region can be specified as: RNAME:STARTPOS-ENDPOS "
"and all position coordinates are 1-based, where RNAME is the "
"name of the contig. If the input BAM file was generated by "
"General RNA-seq pipeline, you should use only chromosome "
"numbers to subset the input file, e.g. 3:30293-39103.",
required=False,
)
class AdvancedOptions:
"""Advanced options."""
min_read_length = IntegerField(
label="Minimum read length",
required=False,
description="Ignore reads shorter than specified number of "
"base pairs.",
)
min_mq = IntegerField(
label="Minimum mapping quality",
required=False,
description="Minimum mapping quality for an alignment to be used.",
)
min_bq = IntegerField(
label="Minimum base quality",
required=False,
description="Minimum base quality for a base to be considered.",
)
excl_flags = ListField(
StringField(),
label="Filter flags",
default=["UNMAP", "SECONDARY", "QCFAIL", "DUP"],
description="Filter flags: skip reads with mask bits set. "
"Press ENTER after each flag.",
)
depth = IntegerField(
label="Maximum allowed coverage depth",
default=1000000,
description="If 0, depth is set to the maximum integer value "
"effectively removing any depth limit.",
)
no_header = BooleanField(
label="No header",
default=False,
description="Do not output header.",
)
advanced = GroupField(AdvancedOptions, label="Advanced options")
class Output:
"""Output fields for SamtoolsCoverageSingle."""
table = FileField(label="Output coverage table")
build = StringField(label="Build")
species = StringField(label="Species")
def run(self, inputs, outputs):
"""Run the analysis."""
output_name = f"{Path(inputs.bam.output.bam.path).stem}_coverage.tsv"
input_options = [
"-o",
output_name,
]
if inputs.region:
input_options.extend(["--region", inputs.region])
if inputs.advanced.min_read_length:
input_options.extend(["--min-read-len", inputs.advanced.min_read_length])
if inputs.advanced.min_mq:
input_options.extend(["--min-MQ", inputs.advanced.min_mq])
if inputs.advanced.min_bq:
input_options.extend(["--min-BQ", inputs.advanced.min_bq])
if inputs.advanced.excl_flags:
flags = ",".join(inputs.advanced.excl_flags)
input_options.extend(["--excl-flags", flags])
if inputs.advanced.depth:
input_options.extend(["--depth", inputs.advanced.depth])
if inputs.advanced.no_header:
input_options.append("--no-header")
input_options.append(inputs.bam.output.bam.path)
return_code, stdout, stderr = Cmd["samtools"]["coverage"][input_options] & TEE(
retcode=None
)
if return_code:
self.error(f"Samtools coverage failed. {stdout}, {stderr}")
outputs.table = output_name
outputs.species = inputs.bam.output.species
outputs.build = inputs.bam.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/samtools/samtools_coverage.py | 0.879529 | 0.449816 | samtools_coverage.py | pypi |
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import Cmd, DataField, DirField, FileField, Process, StringField
class WaltIndex(Process):
"""Create WALT genome index."""
slug = "walt-index"
process_type = "data:index:walt"
name = "WALT genome index"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 1,
"memory": 16384,
},
}
category = "WGBS"
data_name = '{{ ref_seq.fasta.file|basename|default("?") }}'
version = "1.2.1"
class Input:
"""Input fields for WaltIndex."""
ref_seq = DataField(
"seq:nucleotide", label="Reference sequence (nucleotide FASTA)"
)
class Output:
"""Output fields to process WaltIndex."""
index = DirField(label="WALT index")
fastagz = FileField(label="FASTA file (compressed)")
fasta = FileField(label="FASTA file")
fai = FileField(label="FASTA file index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
basename = Path(inputs.ref_seq.output.fasta.path).name
assert basename.endswith(".fasta")
name = basename[:-6]
index_dir = Path("walt_index")
index_dir.mkdir()
shutil.copy(Path(inputs.ref_seq.output.fasta.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fastagz.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fai.path), Path.cwd())
args = [
"-c",
inputs.ref_seq.output.fasta.path,
"-o",
index_dir / f"{name}.dbindex",
]
return_code, _, _ = Cmd["makedb-walt"][args] & TEE(retcode=None)
if return_code:
self.error("Error occurred while preparing the WALT index.")
outputs.index = index_dir.name
outputs.fasta = f"{name}.fasta"
outputs.fastagz = f"{name}.fasta.gz"
outputs.fai = f"{name}.fasta.fai"
outputs.species = inputs.ref_seq.output.species
outputs.build = inputs.ref_seq.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/WGBS/walt_index.py | 0.622459 | 0.386648 | walt_index.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
IntegerField,
Process,
SchedulingClass,
)
class BsConversionRate(Process):
"""Estimate bisulfite conversion rate in a control set.
The program bsrate included in [Methpipe]
(https://github.com/smithlabcode/methpipe) will estimate the bisulfite
conversion rate.
"""
slug = "bs-conversion-rate"
name = "Bisulfite conversion rate"
process_type = "data:wgbs:bsrate"
version = "1.3.1"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/wgbs:3.0.0"}
},
"resources": {
"cores": 1,
"memory": 16384,
},
}
data_name = "{{ mr|name|default('?') }}"
category = "WGBS"
class Input:
"""Input fields for BsConversionRate."""
mr = DataField(
"alignment:bam:walt",
label="Aligned reads from bisulfite sequencing",
description="Bisulfite specifc alignment such as WALT is required as .mr file type is used. Duplicates"
"should be removed to reduce any bias introduced by incomplete conversion on PCR duplicate"
"reads.",
)
skip = BooleanField(
label="Skip Bisulfite conversion rate step",
description="Bisulfite conversion rate step can be skipped.",
default=False,
)
sequence = DataField(
"seq:nucleotide",
label="Unmethylated control sequence",
description="Separate unmethylated control sequence FASTA file is required to estimate bisulfite"
"conversion rate.",
required=False,
)
count_all = BooleanField(
label="Count all cytosines including CpGs", default=True
)
read_length = IntegerField(label="Average read length", default=150)
max_mismatch = FloatField(
label="Maximum fraction of mismatches", required=False
)
a_rich = BooleanField(label="Reads are A-rich", default=False)
class Output:
"""Output fields."""
report = FileField(label="Bisulfite conversion rate report")
def run(self, inputs, outputs):
"""Run the analysis."""
basename = os.path.basename(inputs.mr.output.mr.path)
assert basename.endswith(".mr.gz")
name = basename[:-6]
report_file = f"{name}_spikein_bsrate.txt"
skip_process = inputs.skip
try:
inputs.mr.output.spikein_mr.path
except AttributeError:
self.warning(
"Selected sample lacks the alignment file for unmethylated control reads."
)
skip_process = True
try:
inputs.sequence.output.fasta.path
except AttributeError:
self.warning("Unmethylated control sequence was not provided.")
skip_process = True
if not skip_process:
(Cmd["pigz"]["-cd", inputs.mr.output.spikein_mr.path] > f"{name}.mr")()
args = [
"-chrom",
inputs.sequence.output.fasta.path,
"-output",
report_file,
]
if inputs.count_all:
args.append("-all")
if inputs.max_mismatch:
args.extend(["-max", inputs.max_mismatch])
if inputs.a_rich:
args.append("-a-rich")
return_code, _, _ = Cmd["bsrate"][args][f"{name}.mr"] & TEE(retcode=None)
if return_code:
self.error("Bsrate analysis failed.")
else:
with open(report_file, "w") as f:
f.write("Bisulfite conversion rate process skipped.")
outputs.report = report_file | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/WGBS/bsrate.py | 0.668015 | 0.398641 | bsrate.py | pypi |
import gzip
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
SPECIES = [
"Caenorhabditis elegans",
"Cricetulus griseus",
"Dictyostelium discoideum",
"Dictyostelium purpureum",
"Drosophila melanogaster",
"Homo sapiens",
"Macaca mulatta",
"Mus musculus",
"Odocoileus virginianus texanus",
"Rattus norvegicus",
"Solanum tuberosum",
]
def get_fastq_name(fastq_path):
"""Get the name of the FASTQ file."""
fastq_file = fastq_path.name
assert fastq_file.endswith(".fastq.gz")
return fastq_file[:-9]
class AlignmentStar(Process):
"""Align reads with STAR aligner.
Spliced Transcripts Alignment to a Reference (STAR) software is
based on an alignment algorithm that uses sequential maximum
mappable seed search in uncompressed suffix arrays followed by seed
clustering and stitching procedure. In addition to unbiased de novo
detection of canonical junctions, STAR can discover non-canonical
splices and chimeric (fusion) transcripts, and is also capable of
mapping full-length RNA sequences. More information can be found in
the [STAR manual](https://github.com/alexdobin/STAR/blob/master/doc/STARmanual.pdf)
and in the [original paper](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3530905/).
The current version of STAR is 2.7.10b.
"""
slug = "alignment-star"
name = "STAR"
process_type = "data:alignment:bam:star"
version = "5.0.1"
category = "Align"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.2.0"}
},
"resources": {
"cores": 4,
"memory": 32768,
},
}
data_name = "{{ reads|name|default('?') }}"
class Input:
"""Input fields to process AlignmentStar."""
reads = DataField("reads:fastq", label="Input reads (FASTQ)")
genome = DataField(
"index:star",
label="Indexed reference genome",
description="Genome index prepared by STAR aligner indexing tool.",
)
annotation = DataField(
"annotation",
label="Annotation file (GTF/GFF3)",
required=False,
description="Insert known annotations into genome indices at the mapping stage.",
)
unstranded = BooleanField(
label="The data is unstranded [--outSAMstrandField intronMotif]",
default=False,
description="For unstranded RNA-seq data, Cufflinks/Cuffdiff require spliced "
"alignments with XS strand attribute, which STAR will generate with "
"--outSAMstrandField intronMotif option. As required, the XS strand attribute will be "
"generated for all alignments that contain splice junctions. The spliced alignments "
"that have undefined strand (i.e. containing only non-canonical unannotated "
"junctions) will be suppressed. If you have stranded RNA-seq data, you do not need to "
"use any specific STAR options. Instead, you need to run Cufflinks with the library "
"option --library-type options. For example, cufflinks --library-type fr-firststrand "
"should be used for the standard dUTP protocol, including Illumina's stranded "
"Tru-Seq. This option has to be used only for Cufflinks runs and not for STAR runs.",
)
noncannonical = BooleanField(
label="Remove non-canonical junctions (Cufflinks compatibility)",
default=False,
description="It is recommended to remove the non-canonical junctions for Cufflinks "
"runs using --outFilterIntronMotifs RemoveNoncanonical.",
)
gene_counts = BooleanField(
label="Gene count [--quantMode GeneCounts]",
description="With this option set to True STAR will count the number of reads per gene "
"while mapping. A read is counted if it overlaps (1nt or more) one and only one "
"gene. Both ends of the paired-end read are checked for overlaps. The counts coincide "
"with those produced by htseq-count with default parameters.",
default=False,
)
class AnnotationOptions:
"""Annotation file options."""
feature_exon = StringField(
label="Feature type [--sjdbGTFfeatureExon]",
default="exon",
description="Feature type in GTF file to be used as exons for building "
"transcripts.",
)
sjdb_overhang = IntegerField(
label="Junction length [--sjdbOverhang]",
default=100,
description="This parameter specifies the length of the genomic sequence around "
"the annotated junction to be used in constructing the splice junction database. "
"Ideally, this length should be equal to the ReadLength-1, where ReadLength is "
"the length of the reads. For instance, for Illumina 2x100b paired-end reads, the "
"ideal value is 100-1=99. In the case of reads of varying length, the ideal value "
"is max(ReadLength)-1. In most cases, the default value of 100 will work as well "
"as the ideal value.",
)
class ChimericReadsOptions:
"""Chimeric reads options."""
chimeric = BooleanField(
label="Detect chimeric and circular alignments [--chimOutType SeparateSAMold]",
default=False,
description="To switch on detection of chimeric (fusion) alignments (in addition "
"to normal mapping), --chimSegmentMin should be set to a positive value. Each "
"chimeric alignment consists of two segments.Each segment is non-chimeric on "
"its own, but the segments are chimeric to each other (i.e. the segments belong "
"to different chromosomes, or different strands, or are far from each other). "
"Both segments may contain splice junctions, and one of the segments may contain "
"portions of both mates. --chimSegmentMin parameter controls the minimum mapped "
"length of the two segments that is allowed. For example, if you have 2x75 reads "
"and used --chimSegmentMin 20, a chimeric alignment with 130b on one chromosome "
"and 20b on the other will be output, while 135 + 15 won't be.",
)
chim_segment_min = IntegerField(
label="Minimum length of chimeric segment [--chimSegmentMin]",
default=20,
disabled="!detect_chimeric.chimeric",
)
class TranscriptOutputOptions:
"""Transcript coordinate output options."""
quant_mode = BooleanField(
label="Output in transcript coordinates [--quantMode TranscriptomeSAM]",
default=False,
description="With --quantMode TranscriptomeSAM option STAR will output alignments "
"translated into transcript coordinates in the Aligned.toTranscriptome.out.bam "
"file (in addition to alignments in genomic coordinates in Aligned.*.sam/bam "
"files). These transcriptomic alignments can be used with various transcript "
"quantification software that require reads to be mapped to transcriptome, such "
"as RSEM or eXpress.",
)
single_end = BooleanField(
label="Allow soft-clipping and indels [--quantTranscriptomeBan Singleend]",
default=False,
disabled="!t_coordinates.quant_mode",
description="By default, the output satisfies RSEM requirements: soft-clipping or "
"indels are not allowed. Use --quantTranscriptomeBan Singleend to allow "
"insertions, deletions and soft-clips in the transcriptomic alignments, which "
"can be used by some expression quantification softwares (e.g. eXpress).",
)
class FilteringOptions:
"""Output filtering options."""
out_filter_type = StringField(
label="Type of filtering [--outFilterType]",
default="Normal",
choices=[
("Normal", "Normal"),
("BySJout", "BySJout"),
],
description="Normal: standard filtering using only current alignment; BySJout: "
"keep only those reads that contain junctions that passed filtering into "
"SJ.out.tab.",
)
out_multimap_max = IntegerField(
label="Maximum number of loci [--outFilterMultimapNmax]",
required=False,
description="Maximum number of loci the read is allowed to map to. Alignments "
"(all of them) will be output only if the read maps to no more loci than this "
"value. Otherwise no alignments will be output, and the read will be counted as "
"'mapped to too many loci' (default: 10).",
)
out_mismatch_max = IntegerField(
label="Maximum number of mismatches [--outFilterMismatchNmax]",
required=False,
description="Alignment will be output only if it has fewer mismatches than this "
"value (default: 10). Large number (e.g. 999) switches off this filter.",
)
out_mismatch_nl_max = FloatField(
label="Maximum no. of mismatches (map length) [--outFilterMismatchNoverLmax]",
required=False,
range=[0.0, 1.0],
description="Alignment will be output only if its ratio of mismatches to *mapped* "
"length is less than or equal to this value (default: 0.3). The value should be "
"between 0.0 and 1.0.",
)
out_score_min = IntegerField(
label="Minumum alignment score [--outFilterScoreMin]",
required=False,
description="Alignment will be output only if its score is higher than or equal "
"to this value (default: 0).",
)
out_mismatch_nrl_max = FloatField(
label="Maximum no. of mismatches (read length) [--outFilterMismatchNoverReadLmax]",
required=False,
range=[0.0, 1.0],
description="Alignment will be output only if its ratio of mismatches to *read* "
"length is less than or equal to this value (default: 1.0). Using 0.04 for "
"2x100bp, the max number of mismatches is calculated as 0.04*200=8 for the paired "
"read. The value should be between 0.0 and 1.0.",
)
class AlignmentOptions:
"""Alignment options."""
align_overhang_min = IntegerField(
label="Minimum overhang [--alignSJoverhangMin]",
required=False,
description="Minimum overhang (i.e. block size) for spliced alignments "
"(default: 5).",
)
align_sjdb_overhang_min = IntegerField(
label="Minimum overhang (sjdb) [--alignSJDBoverhangMin]",
required=False,
description="Minimum overhang (i.e. block size) for annotated (sjdb) spliced "
"alignments (default: 3).",
)
align_intron_size_min = IntegerField(
label="Minimum intron size [--alignIntronMin]",
required=False,
description="Minimum intron size: the genomic gap is considered an intron if its "
"length >= alignIntronMin, otherwise it is considered Deletion (default: 21).",
)
align_intron_size_max = IntegerField(
label="Maximum intron size [--alignIntronMax]",
required=False,
description="Maximum intron size, if 0, max intron size will be determined by "
"(2pow(winBinNbits)*winAnchorDistNbins)(default: 0).",
)
align_gap_max = IntegerField(
label="Minimum gap between mates [--alignMatesGapMax]",
required=False,
description="Maximum gap between two mates, if 0, max intron gap will be "
"determined by (2pow(winBinNbits)*winAnchorDistNbins) (default: 0).",
)
align_end_alignment = StringField(
label="Read ends alignment [--alignEndsType]",
required=False,
choices=[
("Local", "Local"),
("EndToEnd", "EndToEnd"),
("Extend5pOfRead1", "Extend5pOfRead1"),
("Extend5pOfReads12", "Extend5pOfReads12"),
],
description="Type of read ends alignment (default: Local). Local: standard local "
"alignment with soft-clipping allowed. EndToEnd: force end-to-end read alignment, "
"do not soft-clip. Extend5pOfRead1: fully extend only the 5p of the read1, all "
"other ends: local alignment. Extend5pOfReads12: fully extend only the 5' of the "
"both read1 and read2, all other ends use local alignment.",
)
class TwoPassOptions:
"""Two-pass mapping options."""
two_pass_mode = BooleanField(
label="Use two pass mode [--twopassMode]",
default=False,
description="Use two-pass maping instead of first-pass only. In two-pass mode we "
"first perform first-pass mapping, extract junctions, insert them into genome "
"index, and re-map all reads in the second mapping pass.",
)
class OutputOptions:
"""Output options."""
out_unmapped = BooleanField(
label="Output unmapped reads (SAM) [--outSAMunmapped Within]",
default=False,
description="Output of unmapped reads in the SAM format.",
)
out_sam_attributes = StringField(
label="Desired SAM attributes [--outSAMattributes]",
default="Standard",
choices=[
("Standard", "Standard"),
("All", "All"),
("NH HI NM MD", "NH HI NM MD"),
("None", "None"),
],
description="A string of desired SAM attributes, in the order desired for the "
"output SAM.",
)
out_rg_line = StringField(
label="SAM/BAM read group line [--outSAMattrRGline]",
required=False,
description="The first word contains the read group identifier and must start "
"with ID:, e.g. --outSAMattrRGline ID:xxx CN:yy ”DS:z z z” xxx will be added as "
"RG tag to each output alignment. Any spaces in the tag values have to be double "
"quoted. Comma separated RG lines correspons to different (comma separated) input "
"files in –readFilesIn. Commas have to be surrounded by spaces, e.g. "
"–outSAMattrRGline ID:xxx , ID:zzz ”DS:z z” , ID:yyy DS:yyyy.",
)
class Limits:
"""Limits."""
limit_buffer_size = ListField(
IntegerField(),
label="Buffer size [--limitIObufferSize]",
default=[30000000, 50000000],
description="Maximum available buffers size (bytes) for input/output, per thread. "
"Parameter requires two numbers - separate sizes for input and output buffers.",
)
limit_sam_records = IntegerField(
label="Maximum size of the SAM record [--limitOutSAMoneReadBytes]",
default=100000,
description="Maximum size of the SAM record (bytes) for one read. Recommended "
"value: >(2*(LengthMate1+LengthMate2+100)*outFilterMultimapNmax.",
)
limit_junction_reads = IntegerField(
label="Maximum number of junctions [--limitOutSJoneRead]",
default=1000,
description="Maximum number of junctions for one read (including all "
"multi-mappers).",
)
limit_collapsed_junctions = IntegerField(
label="Maximum number of collapsed junctions [--limitOutSJcollapsed]",
default=1000000,
)
limit_inserted_junctions = IntegerField(
label="Maximum number of junction to be inserted [--limitSjdbInsertNsj]",
default=1000000,
description="Maximum number of junction to be inserted to the genome on the fly "
"at the mapping stage, including those from annotations and those detected in the "
"1st step of the 2-pass run.",
)
annotation_options = GroupField(
AnnotationOptions, label="Annotation file options", hidden="!annotation"
)
detect_chimeric = GroupField(
ChimericReadsOptions, label="Chimeric and circular alignments"
)
t_coordinates = GroupField(
TranscriptOutputOptions, label="Transcript coordinates output"
)
filtering = GroupField(FilteringOptions, label="Output Filtering")
alignment = GroupField(AlignmentOptions, label="Alignment and Seeding")
two_pass_mapping = GroupField(TwoPassOptions, label="Two-pass mapping")
output_options = GroupField(OutputOptions, label="Output options")
limits = GroupField(Limits, label="Limits")
class Output:
"""Output fields to process AlignmentStar."""
bam = FileField(label="Alignment file")
bai = FileField(label="BAM file index")
unmapped_1 = FileField(label="Unmapped reads (mate 1)", required=False)
unmapped_2 = FileField(label="Unmapped reads (mate 2)", required=False)
sj = FileField(label="Splice junctions")
chimeric = FileField(label="Chimeric alignments", required=False)
alignment_transcriptome = FileField(
label="Alignment (transcriptome coordinates)", required=False
)
gene_counts = FileField(label="Gene counts", required=False)
stats = FileField(label="Statistics")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
try:
if (
inputs.reads.entity.descriptor["general"]["species"]
!= inputs.genome.output.species
):
self.warning(
f"Species of reads ({inputs.reads.entity.descriptor['general']['species']}) "
f"and genome ({inputs.genome.output.species}) do not match."
)
except KeyError:
if inputs.genome.output.species in SPECIES:
self.update_entity_descriptor(
{"general.species": inputs.genome.output.species}
)
self.info(
"Sample species was automatically annotated to match the genome."
)
mate1_name = get_fastq_name(Path(inputs.reads.output.fastq[0].path))
mate_1 = [fastq.path for fastq in inputs.reads.output.fastq]
if inputs.reads.type.startswith("data:reads:fastq:paired:"):
mate2_name = get_fastq_name(Path(inputs.reads.output.fastq2[0].path))
mate_2 = [fastq.path for fastq in inputs.reads.output.fastq2]
self.progress(0.05)
star_params = [
"--runThreadN",
self.requirements.resources.cores,
"--genomeDir",
inputs.genome.output.index.path,
"--outReadsUnmapped",
"Fastx",
"--limitIObufferSize",
inputs.limits.limit_buffer_size[0],
inputs.limits.limit_buffer_size[1],
"--limitOutSAMoneReadBytes",
inputs.limits.limit_sam_records,
"--limitOutSJoneRead",
inputs.limits.limit_junction_reads,
"--limitOutSJcollapsed",
inputs.limits.limit_collapsed_junctions,
"--limitSjdbInsertNsj",
inputs.limits.limit_inserted_junctions,
"--outFilterType",
inputs.filtering.out_filter_type,
"--outSAMtype",
"BAM",
"Unsorted",
]
if inputs.reads.type.startswith("data:reads:fastq:single:"):
star_params.extend(
["--readFilesIn", ",".join(mate_1), "--readFilesCommand", "zcat"]
)
elif inputs.reads.type.startswith("data:reads:fastq:paired:"):
star_params.extend(
[
"--readFilesIn",
",".join(mate_1),
",".join(mate_2),
"--readFilesCommand",
"zcat",
]
)
else:
self.error("Wrong reads input type.")
if inputs.annotation:
star_params.extend(
[
"--sjdbGTFfile",
inputs.annotation.output.annot.path,
"--sjdbOverhang",
inputs.annotation_options.sjdb_overhang,
"--sjdbGTFfeatureExon",
inputs.annotation_options.feature_exon,
]
)
if inputs.annotation.type.startswith("data:annotation:gff3:"):
star_params.extend(["--sjdbGTFtagExonParentTranscript", "Parent"])
if inputs.unstranded:
star_params.extend(["--outSAMstrandField", "intronMotif"])
if inputs.noncannonical:
star_params.extend(["--outFilterIntronMotifs", "RemoveNoncanonical"])
if inputs.detect_chimeric.chimeric:
star_params.extend(
[
"--chimOutType",
"SeparateSAMold",
"--chimSegmentMin",
inputs.detect_chimeric.chim_segment_min,
]
)
gene_segments = Path(inputs.genome.output.index.path) / "geneInfo.tab"
if inputs.t_coordinates.quant_mode:
if not gene_segments.is_file() and not inputs.annotation:
self.error(
"Output in transcript coordinates requires genome annotation file."
)
if inputs.gene_counts:
star_params.extend(["--quantMode", "TranscriptomeSAM", "GeneCounts"])
else:
star_params.extend(["--quantMode", "TranscriptomeSAM"])
if inputs.t_coordinates.single_end:
star_params.extend(["--quantTranscriptomeBan", "Singleend"])
elif inputs.gene_counts:
if not gene_segments.is_file() and not inputs.annotation:
self.error(
"Counting the number of reads per gene requires a genome "
"annotation file."
)
star_params.extend(["--quantMode", "GeneCounts"])
if inputs.filtering.out_multimap_max:
star_params.extend(
["--outFilterMultimapNmax", inputs.filtering.out_multimap_max]
)
if inputs.filtering.out_mismatch_max:
star_params.extend(
["--outFilterMismatchNmax", inputs.filtering.out_mismatch_max]
)
if inputs.filtering.out_mismatch_nl_max:
star_params.extend(
["--outFilterMismatchNoverLmax", inputs.filtering.out_mismatch_nl_max]
)
if inputs.filtering.out_score_min:
star_params.extend(["--outFilterScoreMin", inputs.filtering.out_score_min])
if inputs.filtering.out_mismatch_nrl_max:
star_params.extend(
[
"--outFilterMismatchNoverReadLmax",
inputs.filtering.out_mismatch_nrl_max,
]
)
if inputs.alignment.align_overhang_min:
star_params.extend(
["--alignSJoverhangMin", inputs.alignment.align_overhang_min]
)
if inputs.alignment.align_sjdb_overhang_min:
star_params.extend(
["--alignSJDBoverhangMin", inputs.alignment.align_sjdb_overhang_min]
)
if inputs.alignment.align_intron_size_min:
star_params.extend(
["--alignIntronMin", inputs.alignment.align_intron_size_min]
)
if inputs.alignment.align_intron_size_max:
star_params.extend(
["--alignIntronMax", inputs.alignment.align_intron_size_max]
)
if inputs.alignment.align_gap_max:
star_params.extend(["--alignMatesGapMax", inputs.alignment.align_gap_max])
if inputs.alignment.align_end_alignment:
star_params.extend(
["--alignMatesGapMax", inputs.alignment.align_end_alignment]
)
if inputs.two_pass_mapping.two_pass_mode:
star_params.extend(["--twopassMode", "Basic"])
if inputs.output_options.out_unmapped:
star_params.extend(["--outSAMunmapped", "Within"])
if inputs.output_options.out_sam_attributes:
# Create a list from string of out_sam_attributes to avoid unknown/unimplemented
# SAM attrribute error due to Plumbum command passing problems.
attributes = inputs.output_options.out_sam_attributes.split(" ")
star_params.extend(["--outSAMattributes", attributes])
if inputs.output_options.out_rg_line:
star_params.extend(
["--outSAMattrRGline", inputs.output_options.out_rg_line]
)
elif len(mate_1) > 1:
read_groups = [
f"ID:{Path(file_path).name[:-9].replace(' ', '_')} SM:sample1"
for file_path in mate_1
]
star_params.append("--outSAMattrRGline " + " , ".join(read_groups))
self.progress(0.1)
return_code, _, _ = Cmd["STAR"][star_params] & TEE(retcode=None)
log_file = Path("Log.out")
# Log contains useful information for debugging.
if log_file.is_file():
with open(log_file, "r") as log:
print(log.read())
if return_code:
self.error("Reads alignment failed.")
self.progress(0.7)
star_unmapped_r1 = Path("Unmapped.out.mate1")
if star_unmapped_r1.is_file():
unmapped_out_1 = f"{mate1_name}_unmapped.out.mate1.fastq"
star_unmapped_r1.rename(unmapped_out_1)
return_code, _, _ = Cmd["pigz"][unmapped_out_1] & TEE(retcode=None)
if return_code:
self.error("Compression of unmapped mate 1 reads failed.")
outputs.unmapped_1 = f"{unmapped_out_1}.gz"
star_unmapped_r2 = Path("Unmapped.out.mate2")
if (
inputs.reads.type.startswith("data:reads:fastq:paired:")
and star_unmapped_r2.is_file()
):
unmapped_out_2 = f"{mate2_name}_unmapped.out.mate2.fastq"
star_unmapped_r2.rename(unmapped_out_2)
return_code, _, _ = Cmd["pigz"][unmapped_out_2] & TEE(retcode=None)
if return_code:
self.error("Compression of unmapped mate 2 reads failed.")
outputs.unmapped_2 = f"{unmapped_out_2}.gz"
self.progress(0.8)
out_bam = f"{mate1_name}.bam"
out_bai = f"{out_bam}.bai"
sort_params = [
"Aligned.out.bam",
"-o",
out_bam,
"-@",
self.requirements.resources.cores,
]
return_code, _, _ = Cmd["samtools"]["sort"][sort_params] & TEE(retcode=None)
if return_code:
self.error("Samtools sort command failed.")
outputs.bam = out_bam
return_code, _, _ = Cmd["samtools"]["index"][out_bam, out_bai] & TEE(
retcode=None
)
if return_code:
self.error("Samtools index command failed.")
outputs.bai = out_bai
self.progress(0.9)
if inputs.detect_chimeric.chimeric:
out_chimeric = f"{mate1_name}_chimeric.out.sam"
Path("Chimeric.out.sam").rename(out_chimeric)
outputs.chimeric = out_chimeric
if inputs.t_coordinates.quant_mode:
out_transcriptome = f"{mate1_name}_aligned.toTranscriptome.out.bam"
Path("Aligned.toTranscriptome.out.bam").rename(out_transcriptome)
outputs.alignment_transcriptome = out_transcriptome
if inputs.gene_counts:
out_counts = f"{mate1_name}_ReadsPerGene.out.tab.gz"
with open(file="ReadsPerGene.out.tab", mode="rb") as f_in:
with gzip.open(filename=out_counts, mode="wb") as f_out:
shutil.copyfileobj(f_in, f_out)
outputs.gene_counts = out_counts
out_stats = f"{mate1_name}_stats.txt"
Path("Log.final.out").rename(out_stats)
outputs.stats = out_stats
out_sj = f"{mate1_name}_SJ.out.tab"
Path("SJ.out.tab").rename(out_sj)
outputs.sj = out_sj
outputs.species = inputs.genome.output.species
outputs.build = inputs.genome.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/alignment/star.py | 0.784319 | 0.50293 | star.py | pypi |
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import Cmd, DataField, DirField, FileField, Process, StringField
class BWAMEM2Index(Process):
"""Create BWA-MEM2 genome index."""
slug = "bwamem2-index"
process_type = "data:index:bwamem2"
name = "BWA-MEM2 genome index"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"},
},
"resources": {
"cores": 6,
"memory": 98304,
},
}
category = "Genome index"
data_name = '{{ ref_seq.fasta.file|basename|default("?") }}'
version = "1.1.0"
class Input:
"""Input fields for BWAMEM2Index."""
ref_seq = DataField(
"seq:nucleotide", label="Reference sequence (nucleotide FASTA)"
)
class Output:
"""Output fields to process BWAMEM2Index."""
index = DirField(label="BWA-MEM2 index")
fastagz = FileField(label="FASTA file (compressed)")
fasta = FileField(label="FASTA file")
fai = FileField(label="FASTA file index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
basename = Path(inputs.ref_seq.output.fasta.path).name
assert basename.endswith(".fasta")
name = basename[:-6]
index_dir = Path("BWAMEM2_index")
index_dir.mkdir()
shutil.copy(Path(inputs.ref_seq.output.fasta.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fastagz.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fai.path), Path.cwd())
args = [
"-p",
index_dir / f"{name}.fasta",
inputs.ref_seq.output.fasta.path,
]
return_code, _, _ = Cmd["bwa-mem2"]["index"][args] & TEE(retcode=None)
if return_code:
self.error("Error occurred while preparing the BWA-MEM2 index.")
outputs.index = index_dir.name
outputs.fasta = f"{name}.fasta"
outputs.fastagz = f"{name}.fasta.gz"
outputs.fai = f"{name}.fasta.fai"
outputs.species = inputs.ref_seq.output.species
outputs.build = inputs.ref_seq.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/alignment/bwamem2_index.py | 0.622 | 0.428293 | bwamem2_index.py | pypi |
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import Cmd, DataField, DirField, FileField, Process, StringField
class Bowtie2Index(Process):
"""Create Bowtie2 genome index."""
slug = "bowtie2-index"
process_type = "data:index:bowtie2"
name = "Bowtie2 genome index"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
category = "Genome index"
data_name = '{{ ref_seq.fasta.file|basename|default("?") }}'
version = "1.2.1"
class Input:
"""Input fields for Bowtie2Index."""
ref_seq = DataField(
"seq:nucleotide", label="Reference sequence (nucleotide FASTA)"
)
class Output:
"""Output fields to process Bowtie2Index."""
index = DirField(label="Bowtie2 index")
fastagz = FileField(label="FASTA file (compressed)")
fasta = FileField(label="FASTA file")
fai = FileField(label="FASTA file index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
basename = Path(inputs.ref_seq.output.fasta.path).name
assert basename.endswith(".fasta")
name = basename[:-6]
index_dir = Path("bowtie2_index")
index_dir.mkdir()
shutil.copy(Path(inputs.ref_seq.output.fasta.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fastagz.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fai.path), Path.cwd())
args = [
inputs.ref_seq.output.fasta.path,
index_dir / f"{name}_index",
"--threads",
self.requirements.resources.cores,
]
return_code, _, _ = Cmd["bowtie2-build"][args] & TEE(retcode=None)
if return_code:
self.error("Error occurred while preparing the Bowtie2 index.")
outputs.index = index_dir.name
outputs.fasta = f"{name}.fasta"
outputs.fastagz = f"{name}.fasta.gz"
outputs.fai = f"{name}.fasta.fai"
outputs.species = inputs.ref_seq.output.species
outputs.build = inputs.ref_seq.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/alignment/bowtie2_index.py | 0.648244 | 0.388618 | bowtie2_index.py | pypi |
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import Cmd, DataField, DirField, FileField, Process, StringField
class BowtieIndex(Process):
"""Create Bowtie genome index."""
slug = "bowtie-index"
process_type = "data:index:bowtie"
name = "Bowtie genome index"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
category = "Genome index"
data_name = '{{ ref_seq.fasta.file|basename|default("?") }}'
version = "1.2.1"
class Input:
"""Input fields for BowtieIndex."""
ref_seq = DataField(
"seq:nucleotide", label="Reference sequence (nucleotide FASTA)"
)
class Output:
"""Output fields to process BowtieIndex."""
index = DirField(label="Bowtie index")
fastagz = FileField(label="FASTA file (compressed)")
fasta = FileField(label="FASTA file")
fai = FileField(label="FASTA file index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
basename = Path(inputs.ref_seq.output.fasta.path).name
assert basename.endswith(".fasta")
name = basename[:-6]
index_dir = Path("bowtie_index")
index_dir.mkdir()
shutil.copy(Path(inputs.ref_seq.output.fasta.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fastagz.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fai.path), Path.cwd())
args = [
inputs.ref_seq.output.fasta.path,
index_dir / f"{name}_index",
"--threads",
self.requirements.resources.cores,
]
return_code, _, _ = Cmd["bowtie-build"][args] & TEE(retcode=None)
if return_code:
self.error("Error occurred while preparing the Bowtie index.")
outputs.index = index_dir.name
outputs.fasta = f"{name}.fasta"
outputs.fastagz = f"{name}.fasta.gz"
outputs.fai = f"{name}.fasta.fai"
outputs.species = inputs.ref_seq.output.species
outputs.build = inputs.ref_seq.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/alignment/bowtie_index.py | 0.648244 | 0.361954 | bowtie_index.py | pypi |
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import Cmd, DataField, DirField, FileField, Process, StringField
class Hisat2Index(Process):
"""Create HISAT2 genome index."""
slug = "hisat2-index"
process_type = "data:index:hisat2"
name = "HISAT2 genome index"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
category = "Genome index"
data_name = '{{ ref_seq.fasta.file|basename|default("?") }}'
version = "1.2.1"
class Input:
"""Input fields for Hisat2Index."""
ref_seq = DataField(
"seq:nucleotide", label="Reference sequence (nucleotide FASTA)"
)
class Output:
"""Output fields to process Hisat2Index."""
index = DirField(label="HISAT2 index")
fastagz = FileField(label="FASTA file (compressed)")
fasta = FileField(label="FASTA file")
fai = FileField(label="FASTA file index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
basename = Path(inputs.ref_seq.output.fasta.path).name
assert basename.endswith(".fasta")
name = basename[:-6]
index_dir = Path("hisat2_index")
index_dir.mkdir()
shutil.copy(Path(inputs.ref_seq.output.fasta.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fastagz.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fai.path), Path.cwd())
args = [
inputs.ref_seq.output.fasta.path,
index_dir / f"{name}_index",
"-p",
self.requirements.resources.cores,
]
return_code, _, _ = Cmd["hisat2-build"][args] & TEE(retcode=None)
if return_code:
self.error("Error occurred while preparing the HISAT2 index.")
outputs.index = index_dir.name
outputs.fasta = f"{name}.fasta"
outputs.fastagz = f"{name}.fasta.gz"
outputs.fai = f"{name}.fasta.fai"
outputs.species = inputs.ref_seq.output.species
outputs.build = inputs.ref_seq.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/alignment/hisat2_index.py | 0.626696 | 0.45417 | hisat2_index.py | pypi |
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import Cmd, DataField, DirField, FileField, Process, StringField
class BWAIndex(Process):
"""Create BWA genome index."""
slug = "bwa-index"
process_type = "data:index:bwa"
name = "BWA genome index"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 1,
"memory": 16384,
},
}
category = "Genome index"
data_name = '{{ ref_seq.fasta.file|basename|default("?") }}'
version = "1.2.0"
class Input:
"""Input fields for BWAIndex."""
ref_seq = DataField(
"seq:nucleotide", label="Reference sequence (nucleotide FASTA)"
)
class Output:
"""Output fields to process BWAIndex."""
index = DirField(label="BWA index")
fastagz = FileField(label="FASTA file (compressed)")
fasta = FileField(label="FASTA file")
fai = FileField(label="FASTA file index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
basename = Path(inputs.ref_seq.output.fasta.path).name
assert basename.endswith(".fasta")
name = basename[:-6]
index_dir = Path("BWA_index")
index_dir.mkdir()
shutil.copy(Path(inputs.ref_seq.output.fasta.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fastagz.path), Path.cwd())
shutil.copy(Path(inputs.ref_seq.output.fai.path), Path.cwd())
args = [
"-p",
index_dir / f"{name}.fasta",
inputs.ref_seq.output.fasta.path,
]
return_code, _, _ = Cmd["bwa"]["index"][args] & TEE(retcode=None)
if return_code:
self.error("Error occurred while preparing the BWA index.")
outputs.index = index_dir.name
outputs.fasta = f"{name}.fasta"
outputs.fastagz = f"{name}.fasta.gz"
outputs.fai = f"{name}.fasta.fai"
outputs.species = inputs.ref_seq.output.species
outputs.build = inputs.ref_seq.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/alignment/bwa_index.py | 0.638835 | 0.419767 | bwa_index.py | pypi |
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
DirField,
FileField,
GroupField,
IntegerField,
Process,
StringField,
)
class StarIndex(Process):
"""Generate STAR genome index.
Generate genome indices files from the supplied reference genome
sequence and GTF files. The current version of STAR is 2.7.10b.
"""
slug = "alignment-star-index"
name = "STAR genome index"
process_type = "data:index:star"
version = "4.0.0"
category = "Genome index"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.2.0"}
},
"resources": {
"cores": 1,
"memory": 32768,
},
}
data_name = "{{ ref_seq.fasta.file|basename|default('?') }}"
class Input:
"""Input fields to process StarIndex."""
ref_seq = DataField(
"seq:nucleotide", label="Reference sequence (nucleotide FASTA)"
)
annotation = DataField(
"annotation",
label="Annotation file (GTF/GFF3)",
required=False,
description="Insert known annotations into genome indices at the indexing stage.",
)
source = StringField(
label="Gene ID Database Source",
disabled="annotation",
required=False,
allow_custom_choice=True,
choices=[
("ENSEMBL", "ENSEMBL"),
("NCBI", "NCBI"),
("UCSC", "UCSC"),
],
)
class AnnotationOptions:
"""Annotation file options."""
feature_exon = StringField(
label="Feature type [--sjdbGTFfeatureExon]",
default="exon",
description="Feature type in GTF file to be used as exons for building "
"transcripts.",
)
sjdb_overhang = IntegerField(
label="Junction length [--sjdbOverhang]",
default=100,
description="This parameter specifies the length of the genomic sequence around "
"the annotated junction to be used in constructing the splice junction database. "
"Ideally, this length should be equal to the ReadLength-1, where ReadLength is "
"the length of the reads. For instance, for Illumina 2x100b paired-end reads, the "
"ideal value is 100-1=99. In case of reads of varying length, the ideal value is "
"max(ReadLength)-1. In most cases, the default value of 100 will work as well as "
"the ideal value.",
)
class AdvancedOptions:
"""Advanced options."""
genome_sa_string_len = IntegerField(
label="Small genome adjustment [--genomeSAindexNbases]",
required=False,
description="For small genomes, the parameter --genomeSAindexNbases needs to be "
"scaled down, with a typical value of min(14, log2(GenomeLength)/2 - 1). For "
"example, for 1 megaBase genome, this is equal to 9, for 100 kiloBase genome, "
"this is equal to 7.",
)
genome_chr_bin_size = IntegerField(
label="Bin size for genome storage [--genomeChrBinNbits]",
required=False,
description="If you are using a genome with a large (>5,000) number of references "
"(chrosomes/scaffolds), you may need to reduce the --genomeChrBinNbits to reduce "
"RAM consumption. The following scaling is recommended: --genomeChrBinNbits = "
"min(18, log2(GenomeLength / NumberOfReferences)). For example, for 3 gigaBase "
"genome with 100,000 chromosomes/scaffolds, this is equal to 15.",
)
genome_sa_sparsity = IntegerField(
label="Suffix array sparsity [--genomeSAsparseD]",
required=False,
description="Suffix array sparsity, i.e. distance between indices: use bigger "
"numbers to decrease needed RAM at the cost of mapping speed reduction (integer > "
"0, default = 1).",
)
annotation_options = GroupField(
AnnotationOptions, label="Annotation file options", hidden="!annotation"
)
advanced = GroupField(AdvancedOptions, label="Advanced options")
class Output:
"""Output fields to process StarIndex."""
index = DirField(label="Indexed genome")
fastagz = FileField(label="FASTA file (compressed)")
fasta = FileField(label="FASTA file")
fai = FileField(label="FASTA file index")
source = StringField(label="Gene ID source")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
if not inputs.source and not inputs.annotation:
self.error(
"Gene ID database Source information must be provided when annotation GTF is not "
"selected."
)
fasta = Path(inputs.ref_seq.output.fasta.path).name
shutil.copy(inputs.ref_seq.output.fasta.path, fasta)
outputs.fasta = fasta
fastagz = Path(inputs.ref_seq.output.fastagz.path).name
shutil.copy(inputs.ref_seq.output.fastagz.path, fastagz)
outputs.fastagz = fastagz
fai = Path(inputs.ref_seq.output.fai.path).name
shutil.copy(inputs.ref_seq.output.fai.path, fai)
outputs.fai = fai
self.progress(0.1)
index_dir = Path("star_index")
index_dir.mkdir()
index_params = [
"--runThreadN",
self.requirements.resources.cores,
"--runMode",
"genomeGenerate",
"--genomeDir",
str(index_dir),
"--genomeFastaFiles",
str(Path(inputs.ref_seq.output.fasta.path).name),
]
if inputs.annotation:
index_params.extend(
[
"--sjdbGTFfile",
inputs.annotation.output.annot.path,
"--sjdbOverhang",
inputs.annotation_options.sjdb_overhang,
"--sjdbGTFfeatureExon",
inputs.annotation_options.feature_exon,
]
)
if inputs.annotation.type.startswith("data:annotation:gff3:"):
index_params.extend(["--sjdbGTFtagExonParentTranscript Parent"])
if inputs.advanced.genome_sa_string_len:
index_params.extend(
["--genomeSAindexNbases", inputs.advanced.genome_sa_string_len]
)
if inputs.advanced.genome_chr_bin_size:
index_params.extend(
["--genomeChrBinNbits", inputs.advanced.genome_chr_bin_size]
)
if inputs.advanced.genome_sa_sparsity:
index_params.extend(
["--genomeSAsparseD", inputs.advanced.genome_sa_sparsity]
)
return_code, _, _ = Cmd["STAR"][index_params] & TEE(retcode=None)
if return_code:
self.error("Genome index build failed.")
self.progress(0.8)
outputs.index = str(index_dir)
if inputs.annotation:
outputs.source = inputs.annotation.output.source
outputs.species = inputs.annotation.output.species
outputs.build = inputs.annotation.output.build
else:
outputs.source = inputs.source
outputs.species = inputs.ref_seq.output.species
outputs.build = inputs.ref_seq.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/alignment/star_index.py | 0.788583 | 0.365542 | star_index.py | pypi |
import csv
import tempfile
from collections import defaultdict
from resolwe.process import (
Cmd,
DataField,
FileField,
FloatField,
IntegerField,
JsonField,
ListField,
Persistence,
SchedulingClass,
StringField,
)
from resolwe_bio.process.runtime import ProcessBio
class GOEnrichmentAnalysis(ProcessBio):
"""Identify significantly enriched Gene Ontology terms for given genes."""
slug = "goenrichment"
name = "GO Enrichment analysis"
process_type = "data:goea"
version = "3.6.3"
category = "Enrichment and Clustering"
data_name = 'GO Enrichment analysis for {{genes|join(", ")|default("?")}}'
scheduling_class = SchedulingClass.INTERACTIVE
persistence = Persistence.TEMP
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {"cores": 1, "memory": 4096, "storage": 10},
}
class Input:
"""Input fields to process GOEnrichmentAnalysis."""
ontology = DataField("ontology:obo", label="Gene Ontology")
gaf = DataField("gaf", label="GO annotation file (GAF v2.0)")
genes = ListField(
StringField(), label="List of genes", placeholder="new gene id"
)
source = StringField(label="Gene ID database of selected genes")
species = StringField(
label="Species",
allow_custom_choice=True,
description="Specify species name. This field is required if gene subset is set.",
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
],
)
pval_threshold = FloatField(
label="P-value threshold",
default=0.1,
)
min_genes = IntegerField(
label="Minimum number of genes",
default=1,
description="Minimum number of genes on a GO term.",
)
class Output:
"""Output fields to process GOEnrichmentAnalysis."""
terms = JsonField(label="Enriched terms")
ids = FileField(label="Mapped ids", required=False)
source = StringField(label="Source")
species = StringField(label="Species")
def run(self, inputs, outputs):
"""Run analysis."""
if inputs.species != inputs.gaf.output.species:
self.warning(
"Selected genes Species must be the same as the Species field of the GAF file."
)
self.error(
f"Selected genes are from {inputs.species}, "
f"while GAF file has defined {inputs.gaf.output.species} under Species field."
)
org_features = self.feature.filter(
source=inputs.source, species=inputs.species, feature_id__in=inputs.genes
)
if len(org_features) == 0:
self.error("No genes were fetched from the knowledge base.")
if inputs.source == inputs.gaf.output.source:
target_ids = inputs.genes
else:
mapping_res = self.mapping.filter(
source_db=inputs.source,
source_species=inputs.species,
target_db=inputs.gaf.output.source,
target_species=inputs.gaf.output.species,
source_id__in=inputs.genes,
)
if len(mapping_res) == 0:
self.error("Failed to map features.")
ids = defaultdict(list)
target_ids = []
for m in mapping_res:
if m.source_id in inputs.genes:
target_ids.append(m.target_id)
if m.source_id in ids:
self.warning(f"Mapping {m.source_id} returned multiple times.")
ids[m.source_id].append(m.target_id)
if len(inputs.genes) > len(ids):
self.warning("Not all features could be mapped.")
if len(target_ids) > 0:
with open("mapped_ids.txt", "w") as f:
writer = csv.writer(f, delimiter="\t", lineterminator="\n")
writer.writerow([inputs.source, inputs.gaf.output.source])
for key, value in ids.items():
for v in value:
writer.writerow([key, v])
outputs.ids = "mapped_ids.txt"
with tempfile.NamedTemporaryFile() as input_genes:
input_genes.write(" ".join(target_ids).encode("UTF-8"))
input_genes.flush()
args = [
str(inputs.pval_threshold),
str(inputs.min_genes),
inputs.ontology.output.obo_obj.path,
inputs.gaf.output.gaf_obj.path,
input_genes.name,
]
(Cmd["processor"][args] > "terms.json")()
outputs.source = inputs.gaf.output.source
outputs.species = inputs.gaf.output.species
outputs.terms = "terms.json" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/go_enrichment/go.py | 0.646572 | 0.284253 | go.py | pypi |
import gzip
import io
import json
from pathlib import Path
import pandas as pd
from resolwe.process import (
DataField,
FileField,
JsonField,
Persistence,
Process,
SchedulingClass,
StringField,
)
from resolwe_bio.process.runtime import ProcessBio
def gzopen(fname):
"""Open Gzip files using io.BufferedReader."""
return io.TextIOWrapper(io.BufferedReader(gzip.open(fname)))
def isfloat(value):
"""Check if value is float."""
try:
float(value)
return True
except ValueError:
return False
def expression_to_json(infile, outfile):
"""Re-format expression file to json."""
with gzopen(infile) as f:
# Split lines by tabs
# Ignore lines without a number in second column
# Build a dictionary of gene-expression pairs
exp = {
"genes": {
gene_exp[0]: float(gene_exp[1])
for gene_exp in (l.split("\t") for l in f)
if len(gene_exp) == 2 and isfloat(gene_exp[1])
}
}
with open(outfile, "w") as f:
json.dump(exp, f)
def rename_cols(infile, outfile):
"""Rename columns in expression file."""
exp = pd.read_csv(
infile,
compression="gzip",
sep="\t",
skip_blank_lines=True,
usecols=["Gene", "Expression"],
index_col="Gene",
dtype={
"Gene": str,
"Expression": float,
},
squeeze=True,
)
return exp.to_csv(
outfile,
index_label="FEATURE_ID",
header=["log2 normalized intensity signal"],
sep="\t",
)
def prepare_expression_set(exp_file, feature_dict, outfile_name):
"""Prepare expression set output data."""
exp = pd.read_csv(exp_file, sep="\t", float_precision="round_trip")
exp["FEATURE_ID"] = exp["FEATURE_ID"].astype("str")
exp["GENE_SYMBOL"] = exp["FEATURE_ID"].map(feature_dict)
input_features = exp["FEATURE_ID"].tolist()
# Check if all of the input feature IDs could be mapped to the gene symbols
if not all(f_id in feature_dict for f_id in input_features):
print(
f"{sum(exp.isnull().values.ravel())} feature(s) "
f"could not be mapped to the associated feature symbols."
)
# Reorder columns
columns = ["FEATURE_ID", "GENE_SYMBOL", "log2 normalized intensity signal"]
exp_set = exp[columns]
# Replace NaN values with empty string
exp_set.fillna("", inplace=True)
# Write to file
exp_set.to_csv(
outfile_name + ".txt.gz",
header=True,
index=False,
sep="\t",
compression="gzip",
)
# Write to JSON
df_dict = exp_set.set_index("FEATURE_ID").to_dict(orient="index")
with open(outfile_name + ".json", "w") as f:
json.dump({"genes": df_dict}, f, allow_nan=False)
class ImportMicroarrayExpression(Process):
"""Import unmapped microarray expression data."""
slug = "upload-microarray-expression"
name = "Upload microarray expression (unmapped)"
process_type = "data:microarray:normalized"
version = "1.1.0"
category = "Import"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.RAW
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {
"cores": 1,
"memory": 4096,
"network": True,
},
}
data_name = '{{ exp.file|default("?") }}'
class Input:
"""Input fields to process ImportMicroarrayExpression."""
exp = FileField(
label="Normalized expression",
description="Normalized expression file with the original probe IDs. Supported file extensions are "
".tab.*, .tsv.*, .txt.*",
)
exp_type = StringField(
label="Normalization type",
)
platform = StringField(
label="Microarray platform name",
)
platform_id = StringField(
label="GEO platform ID",
description="Platform ID according to the GEO database. This can be used in following steps to "
"automatically map probe IDs to genes.",
required=False,
)
species = StringField(
label="Species",
description="Select a species name from the dropdown menu or write a custom species name in the species "
"field",
allow_custom_choice=True,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Macaca mulatta", "Macaca mulatta"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
],
)
class Output:
"""Output fields to process ImportMicroarrayExpression."""
exp = FileField(label="Uploaded normalized expression")
exp_type = StringField(label="Normalization type")
platform = StringField(label="Microarray platform type")
platform_id = StringField(label="GEO platform ID", required=False)
species = StringField(label="Species")
def run(self, inputs, outputs):
"""Run the analysis."""
exp = inputs.exp.import_file(imported_format="compressed")
supported_extensions = (".tab", ".tsv", ".txt")
if not Path(exp).stem.endswith(supported_extensions):
self.error(
f"The imported file has unsupported file name extension. "
f"The supported extensions are {supported_extensions}."
)
if inputs.platform_id:
outputs.platform_id = inputs.platform_id
outputs.exp = exp
outputs.exp_type = inputs.exp_type
outputs.platform = inputs.platform
outputs.species = inputs.species
class MicroarrayExpression(ProcessBio):
"""Upload normalized and mapped microarray expression data."""
slug = "mapped-microarray-expression"
name = "Mapped microarray expression"
process_type = "data:expression:microarray"
version = "1.3.0"
category = "Import"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.RAW
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {
"cores": 1,
"memory": 4096,
"network": True,
},
}
data_name = "{{ exp_unmapped|name|default('?') }}"
class Input:
"""Input fields to process MicroarrayExpression."""
exp_unmapped = DataField(
"microarray:normalized",
label="Unmapped normalized expressions",
description="Unmapped normalized expression with the original probe IDs.",
)
exp = FileField(
label="Normalized and mapped expressions file",
description="Files should have two columns one with GeneIDs and the other one with expression values."
"Expected column names are 'Gene' and 'Expression'.Supported file extensions are .tab.*, .tsv.*, .txt.*",
)
source = StringField(
label="Gene ID source",
allow_custom_choice=True,
choices=[
("AFFY", "AFFY"),
("DICTYBASE", "DICTYBASE"),
("ENSEMBL", "ENSEMBL"),
("NCBI", "NCBI"),
("UCSC", "UCSC"),
],
)
build = StringField(
label="Genome build",
)
probe_mapping = StringField(
label="Probe to transcript mapping used",
)
class Output:
"""Output fields."""
exp = FileField(label="Normalized expression")
exp_json = JsonField(label="Expression (json)")
exp_type = StringField(label="Expression type")
platform = StringField(label="Microarray platform type")
platform_id = StringField(label="GEO platform ID", required=False)
exp_set = FileField(label="Expressions")
exp_set_json = JsonField(label="Expressions (json)")
source = StringField(label="Gene ID source")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
probe_mapping = StringField(label="Probe to transcript mapping used")
def run(self, inputs, outputs):
"""Run the analysis."""
exp = inputs.exp.import_file(imported_format="compressed")
exp_stem = Path(exp).stem
supported_extensions = (".tab", ".tsv", ".txt")
if not exp_stem.endswith(supported_extensions):
self.error(
"The imported file has unsupported file name extension. "
f"The supported extensions are {supported_extensions}."
)
name = exp_stem[:-4]
expression_to_json(exp, "json.txt")
# Rename columns of the expression file
exp_renamed = f"{exp}_renamed"
rename_cols(infile=exp, outfile=exp_renamed)
# Prepare the expression set outputs
feature_ids = pd.read_csv(
exp_renamed, sep="\t", index_col="FEATURE_ID"
).index.tolist()
feature_filters = {
"source": inputs.source,
"species": inputs.exp_unmapped.output.species,
"feature_id__in": feature_ids,
}
feature_ids_to_names = {
f.feature_id: f.name for f in self.feature.filter(**feature_filters)
}
prepare_expression_set(
exp_file=exp_renamed,
feature_dict=feature_ids_to_names,
outfile_name=f"{name}_expressions",
)
if inputs.exp_unmapped.output.platform_id:
outputs.platform_id = inputs.exp_unmapped.output.platform_id
outputs.exp = exp
outputs.exp_json = "json.txt"
outputs.exp_type = inputs.exp_unmapped.output.exp_type
outputs.platform = inputs.exp_unmapped.output.platform
outputs.exp_set = name + "_expressions.txt.gz"
outputs.exp_set_json = name + "_expressions.json"
outputs.source = inputs.source
outputs.species = inputs.exp_unmapped.output.species
outputs.build = inputs.build
outputs.feature_type = "gene"
outputs.probe_mapping = inputs.probe_mapping | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/microarray/microarray_expression.py | 0.734405 | 0.336685 | microarray_expression.py | pypi |
import gzip
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
Persistence,
SchedulingClass,
StringField,
)
from resolwe_bio.process.runtime import ProcessBio
class MethylationArraySesame(ProcessBio):
"""Illumina methylation array SeSAMe process.
Implemented SeSAMe method for analyzing methylation array from
Illumina. For more information on the pipeline, please see
https://www.bioconductor.org/packages/release/bioc/html/sesame.html
This process will input IDAT methylation array files and
produce two results. One is the quality control file with
some basic statistics, such as mean beta, fraction of
(un)methylated, GCT, predicted ethnicity, gender and age.
Methylation data file holds betas, mvals and pvals for probe ids.
In addition, Ensembl IDs and HGNC gene symbol names are provided,
along with chromosome and start/end positions of CpG sites
(1-based).
"""
slug = "methylation-array-sesame"
name = "Methylation analysis (SeSAMe)"
process_type = "data:methylation:sesame"
version = "1.4.1"
category = "Methylation arrays"
data_name = 'SeSAMe array ({{ idat_file.red_channel.file|default("?") }})'
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {
"image": "public.ecr.aws/s4q6j6e8/resolwebio/methylation_arrays:1.1.0"
}
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
class Input:
"""Input fields to process MethylationArraySesame."""
idat_file = DataField(
data_type="methylationarray:idat",
label="Illumina methylation array IDAT file",
description="Illumina methylation array BeadChip raw IDAT file.",
)
class Output:
"""Output fields to process MethylationArraySesame."""
methylation_data = FileField(label="A gzipped tab delimited file (txt.gz)")
qc_data = FileField(label="Quality control information from SeSAMe analysis")
species = StringField(label="Species")
platform = StringField(label="Platform used in the analysis")
def run(self, inputs, outputs):
"""Run MethylationArraySesame process."""
dirdata = Path("./data")
if not dirdata.exists():
dirdata.mkdir()
red = inputs.idat_file.output.red_channel.path
green = inputs.idat_file.output.green_channel.path
# Decompress the input IDAT files to the destination folder. This
# fixes an edge case where calling the sesame.R script using
# Plumbum was failing for some compressed IDAT inputs due to the
# file encoding issues
for idat in [red, green]:
with gzip.open(idat, "rb") as in_file:
with open(
dirdata / Path(idat).name.rsplit(".", 1)[0], "wb"
) as out_file:
shutil.copyfileobj(in_file, out_file)
platform = inputs.idat_file.output.platform
manifest = f"{platform}.hg38.manifest"
sesame_args = [
f"--platform={platform}",
f"--manifest={manifest}",
]
rc, _, _ = Cmd["sesame.R"][sesame_args] & TEE(retcode=None)
# Returns QC_data.txt and beta_values_annotated.txt.gz
if rc:
self.error(
"An error was encountered during the running of SeSAMe pipeline."
)
outputs.qc_data = "QC_data.txt"
outputs.methylation_data = "beta_values_annotated.txt.gz"
outputs.species = inputs.idat_file.output.species
outputs.platform = platform | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/microarray/methylation_arrays.py | 0.649134 | 0.505859 | methylation_arrays.py | pypi |
import re
from io import StringIO
from pathlib import Path
import pandas as pd
from bioservices import BioMart
from resolwe.process import (
DataField,
FileField,
ListField,
Process,
SchedulingClass,
StringField,
)
PLATFORM_MAP = {
"GPL74": "affy_hc_g110",
"GPL201": "affy_hg_focus",
"GPL96": "affy_hg_u133a",
"GPL571": "affy_hg_u133a_2",
"GPL97": "affy_hg_u133b",
"GPL570": "affy_hg_u133_plus_2",
"GPL91": "affy_hg_u95a",
"GPL8300": "affy_hg_u95av2",
"GPL92": "affy_hg_u95b",
"GPL93": "affy_hg_u95c",
"GPL94": "affy_hg_u95d",
"GPL95": "affy_hg_u95e",
"GPL17586": "affy_hta_2_0",
"GPL5175": "affy_huex_1_0_st_v2",
"GPL80": "affy_hugenefl",
"GPL6244": "affy_hugene_1_0_st_v1",
"GPL16686": "affy_hugene_2_0_st_v1",
"GPL15207": "affy_primeview",
"GPL1352": "affy_u133_x3p",
"GPL11068": "agilent_cgh_44b",
"GPL26966": "agilent_gpl26966",
"GPL6848": "agilent_gpl6848",
"GPL14550": "agilent_sureprint_g3_ge_8x60k",
"GPL17077": "agilent_sureprint_g3_ge_8x60k_v2",
"GPL16981": "agilent_wholegenome",
"GPL6480": "agilent_wholegenome_4x44k_v1",
"GPL13497": "agilent_wholegenome_4x44k_v2",
"GPL6947": "illumina_humanht_12_v3",
"GPL10558": "illumina_humanht_12_v4",
"GPL6883": "illumina_humanref_8_v3",
"GPL13376": "illumina_humanwg_6_v2",
"GPL6884": "illumina_humanwg_6_v3",
"GPL6254": "phalanx_onearray",
}
def get_exp_table(fname, sample_name):
"""Get a formated expression table."""
table = pd.read_csv(fname, sep="\t")
if "ID_REF" in table.columns:
table = table.set_index("ID_REF")
else:
table = table.set_index(table.columns[0])
if "VALUE" in table.columns:
table = table["VALUE"].rename(sample_name)
else:
table = table.iloc[:, 0].rename(sample_name)
return table
def join_expressions(expressions):
"""Join expression tables of data objects in a list."""
tables = [get_exp_table(e.output.exp.path, e.entity.name) for e in expressions]
data = pd.concat(tables, axis=1, join="outer")
data.index = data.index.astype(str)
data.index.name = "probe"
return data
class MapMicroarrayProbes(Process):
"""Map microarray probes to Gene IDs.
Mapping can be done automatically or using a custom mapping file.
For automatic probe mapping all 'Normalized expression' objects
should have a GEO platform ID. If the platform is supported the
provided probe IDs will be mapped to the corresponding Ensembl IDs.
Currently supported platforms are: GPL74, GPL201, GPL96, GPL571,
GPL97, GPL570, GPL91, GPL8300, GPL92, GPL93, GPL94, GPL95, GPL17586,
GPL5175, GPL80, GPL6244, GPL16686, GPL15207, GPL1352, GPL11068,
GPL26966, GPL6848, GPL14550, GPL17077, GPL16981, GPL13497, GPL6947,
GPL10558, GPL6883, GPL13376,GPL6884, GPL6254.
"""
slug = "map-microarray-probes"
name = "Map microarray probes"
process_type = "data:microarray:mapping"
version = "1.1.0"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
}
data_name = "Probe mapping"
class Input:
"""Input fields to process MapMicroarrayProbes."""
expressions = ListField(
DataField("microarray:normalized"),
label="Normalized expressions",
)
mapping_file = FileField(
label="File with probe ID mappings",
description="The file should be tab-separated and contain two columns with their column names. The first "
"column should contain Gene IDs and the second one should contain probe names. Supported file extensions "
"are .tab.*, .tsv.*, .txt.*",
required=False,
)
source = StringField(
label="Gene ID source",
description="Gene ID source used for probe mapping is required when using a custom file.",
allow_custom_choice=True,
required=False,
choices=[
("AFFY", "AFFY"),
("DICTYBASE", "DICTYBASE"),
("ENSEMBL", "ENSEMBL"),
("NCBI", "NCBI"),
("UCSC", "UCSC"),
],
)
build = StringField(
label="Genome build",
description="Genome build of mapping file is required when using a custom file.",
required=False,
)
class Output:
"""Output fields to process MapMicroarrayProbes."""
mapped_exp = FileField(label="Mapped expressions")
probe_mapping = StringField(label="Probe to transcript mapping used")
mapping = FileField(label="Mapping file")
platform = StringField(label="Microarray platform type")
platform_id = StringField(label="GEO platform ID", required=False)
def run(self, inputs, outputs):
"""Run the analysis."""
for exp in inputs.expressions:
if exp.output.species != inputs.expressions[0].output.species:
self.error(
"Input samples are of different Species: "
f"{exp.output.species} and {inputs.expressions[0].output.species}."
)
if exp.output.exp_type != inputs.expressions[0].output.exp_type:
self.error(
"Input samples have different Normalization types: "
f"{exp.output.exp_type} and {inputs.expressions[0].output.exp_type}."
)
if exp.output.platform != inputs.expressions[0].output.platform:
self.error(
"Input samples have different Microarray platform types: "
f"{exp.output.platform} and {inputs.expressions[0].output.platform}."
)
if exp.output.platform_id != inputs.expressions[0].output.platform_id:
self.error(
"Input samples have different GEO platform IDs: "
f"{exp.output.platform_id} and {inputs.expressions[0].output.platform_id}."
)
species = inputs.expressions[0].output.species
platform = inputs.expressions[0].output.platform
platform_id = inputs.expressions[0].output.platform_id
joined_expressions = join_expressions(inputs.expressions)
probe_ids = joined_expressions.index.unique()
if inputs.mapping_file:
mapping_file = inputs.mapping_file.import_file(imported_format="compressed")
stem = Path(mapping_file).stem
supported_extensions = (".tab", ".tsv", ".txt")
if not stem.endswith(supported_extensions):
self.error(
"Mapping file has unsupported file name extension. "
f"The supported extensions are {supported_extensions}."
)
mapping = pd.read_csv(
mapping_file,
sep="\t",
header=0,
names=["ensembl_id", "probe"],
dtype=str,
)
mapping = mapping.drop_duplicates()
if inputs.source:
source = inputs.source
else:
self.error(
"Custom probe id mapping file was provided but no source was selected."
)
if inputs.build:
build = inputs.build
else:
self.error(
"Custom probe id mapping file was provided but genome build was not defined."
)
probe_mapping = "Custom"
else:
if not platform_id:
self.error(
"Custom mapping file should be provided when samples do not have a GEO platform defined"
)
if platform_id not in PLATFORM_MAP:
self.error(f"GEO platform {platform_id} is not supported.")
species_low = species.lower()
dataset = f"{species_low[0]}{species_low.split(' ')[1]}_gene_ensembl"
probe_mapping = PLATFORM_MAP[platform_id]
try:
b = BioMart()
except IOError:
raise Exception("None of the ENSEMBL Biomart hosts is reachable.")
except Exception as e:
raise Exception(f"Unexpected biomart error: {e}")
b.add_dataset_to_xml(dataset)
b.add_attribute_to_xml("ensembl_gene_id")
b.add_attribute_to_xml(probe_mapping) # type of microarray
b.add_filter_to_xml(probe_mapping, ",".join(probe_ids))
xml_query = b.get_xml()
res = b.query(xml_query)
mapping = pd.read_csv(
StringIO(res),
sep="\t",
header=None,
names=["ensembl_id", "probe"],
dtype=str,
)
mapping = mapping.drop_duplicates()
mapping_file = f"{platform}_mapping.tsv"
mapping.to_csv(mapping_file, sep="\t", index=False)
dataset_names = b.get_datasets("ENSEMBL_MART_ENSEMBL")
display_name = dataset_names.loc[dataset_names["name"] == dataset][
"description"
].to_string()
# Typical display name would be Human genes (GRCh38.p13)
build = re.search("\((.+?)\)", display_name).group(1)
source = "ENSEMBL"
mapping = mapping.drop_duplicates(subset=["probe"], keep=False)
data = joined_expressions.loc[mapping["probe"]]
data["ensembl_id"] = mapping["ensembl_id"].tolist()
data = data.reset_index()
# For Ensembl IDs with multiple probe IDs retain the one with highest expression.
data["mean"] = data.loc[
:, data.columns.difference(["probe", "ensembl_id"])
].mean(axis=1)
idx_max = data.groupby(["ensembl_id"])["mean"].idxmax()
data = data.loc[idx_max].set_index("ensembl_id")
data = data.drop(columns=["probe", "mean"])
data.index.name = "Gene"
mapped_file = "mapped_expressions.tsv.gz"
data.to_csv(mapped_file, sep="\t", index=True, compression="gzip")
for column, exp in zip(data.columns, inputs.expressions):
mapped_column = f"{column}_mapped_exp.tsv.gz"
data.to_csv(
mapped_column,
sep="\t",
index=True,
columns=[column],
header=["Expression"],
index_label="Gene",
compression="gzip",
)
self.run_process(
"mapped-microarray-expression",
{
"exp_unmapped": exp.id,
"exp": mapped_column,
"source": source,
"build": build,
"probe_mapping": probe_mapping,
},
)
outputs.mapped_exp = mapped_file
outputs.mapping = mapping_file
outputs.probe_mapping = probe_mapping
outputs.platform = platform
if platform_id:
outputs.platform_id = platform_id | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/microarray/map_probes.py | 0.599837 | 0.178848 | map_probes.py | pypi |
import os
from pathlib import Path
from shutil import copy, copytree
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
DirField,
FileField,
FileHtmlField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
class CellRangerMkref(Process):
"""Reference preparation tool for 10x Genomics Cell Ranger.
Build a Cell Ranger-compatible reference from genome FASTA and gene GTF files.
https://support.10xgenomics.com/single-cell-gene-expression/software/pipelines/latest/advanced/references
"""
slug = "cellranger-mkref"
name = "Cell Ranger Mkref"
process_type = "data:genomeindex:10x"
version = "2.1.3"
category = "scRNA-seq"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/scseq:2.1.0"}
},
"resources": {
"memory": 32768,
"cores": 4,
},
}
data_name = '{{ genome.output.fasta.file|default("?") }}'
class Input:
"""Input fields to process CellRangerMkref."""
genome = DataField(
data_type="seq:nucleotide:",
label="Reference genome",
)
annotation = DataField(
data_type="annotation:gtf:",
label="Annotation",
)
class Output:
"""Output fields to process CellRangerMkref."""
genome_index = DirField(label="Indexed genome")
build = StringField(label="Build")
species = StringField(label="Species")
source = StringField(label="Gene ID source")
def run(self, inputs, outputs):
"""Run the analysis."""
genome_build = inputs.genome.output.build
annotation_build = inputs.annotation.output.build
if genome_build != annotation_build:
self.error(
"Builds of the genome {} and annotation {} do not match. Please provide genome "
"and annotation with the same build.".format(
genome_build, annotation_build
)
)
genome_species = inputs.genome.output.species
annotation_species = inputs.annotation.output.species
if genome_species != annotation_species:
self.error(
"Species of genome {} and annotation {} do not match. Please provide genome "
"and annotation with the same species.".format(
genome_species, annotation_species
)
)
cmd = Cmd["cellranger"]["mkref"]
cmd = cmd["--genome={}".format(genome_build)]
cmd = cmd["--genes={}".format(inputs.annotation.output.annot_sorted.path)]
cmd = cmd["--fasta={}".format(inputs.genome.output.fasta.path)]
cmd = cmd["--nthreads={}".format(self.requirements.resources.cores)]
cmd = cmd[
"--memgb={}".format(int(self.requirements.resources.memory * 0.9 / 1024))
]
return_code, _, _ = cmd & TEE(retcode=None)
if return_code:
self.error("Error while running cellranger mkref.")
os.rename(genome_build, "cellranger_index")
outputs.genome_index = "cellranger_index"
outputs.source = inputs.annotation.output.source
outputs.species = genome_species
outputs.build = genome_build
class CellRangerCount(Process):
"""Perform gene expression analysis.
Generate single cell feature counts for a single library.
https://support.10xgenomics.com/single-cell-gene-expression/software/pipelines/latest/using/count
"""
slug = "cellranger-count"
name = "Cell Ranger Count"
process_type = "data:scexpression:10x"
version = "1.2.2"
category = "scRNA-seq"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/scseq:2.1.0"}
},
"resources": {
"memory": 32768,
"cores": 4,
},
}
data_name = "{{ reads|name|default('?') }}"
class Input:
"""Input fields to process ImportScRNA10x."""
reads = DataField(
data_type="screads:10x:",
label="10x reads data object",
)
genome_index = DataField(
data_type="genomeindex:10x:",
label="10x genome index data object",
)
chemistry = StringField(
label="Chemistry",
required=False,
default="auto",
description=(
"Assay configuration. By default the assay configuration is detected "
"automatically, which is the recommended mode. You should only specify "
"chemistry if there is an error in automatic detection."
),
choices=[
("auto", "auto"),
("Single Cell 3'", "threeprime"),
("Single Cell 5'", "fiveprime"),
("Single Cell 3' v1", "SC3Pv1"),
("Single Cell 3' v2", "SC3Pv2"),
("Single Cell 3' v3", "SC3Pv3"),
("Single Cell 5' paired-end", "C5P-PE"),
("Single Cell 5' R2-only", "SC5P-R2"),
],
)
trim_r1 = IntegerField(
label="Trim R1",
required=False,
description=(
"Hard-trim the input R1 sequence to this length. Note that the length "
"includes the Barcode and UMI sequences so do not set this below 26 for "
"Single Cell 3' v2 or Single Cell 5'. This and \"Trim R2\" are useful for "
"determining the optimal read length for sequencing."
),
)
trim_r2 = IntegerField(
label="Trim R2",
required=False,
description="Hard-trim the input R2 sequence to this length.",
)
expected_cells = IntegerField(
label="Expected number of recovered cells",
default=3000,
)
force_cells = IntegerField(
label="Force cell number",
required=False,
description=(
"Force pipeline to use this number of cells, bypassing the cell "
"detection algorithm. Use this if the number of cells estimated by Cell "
"Ranger is not consistent with the barcode rank plot."
),
)
class Output:
"""Output fields to process ImportScRNA10x."""
matrix_filtered = FileField(label="Matrix (filtered)")
genes_filtered = FileField(label="Genes (filtered)")
barcodes_filtered = FileField(label="Barcodes (filtered)")
matrix_raw = FileField(label="Matrix (raw)")
genes_raw = FileField(label="Genes (raw)")
barcodes_raw = FileField(label="Barcodes (raw)")
report = FileHtmlField(label="Report")
build = StringField(label="Build")
species = StringField(label="Species")
source = StringField(label="Gene ID source")
def run(self, inputs, outputs):
"""Run the analysis."""
sample_name = inputs.reads.entity_name
if sample_name.endswith(".fastq.gz"):
sample_name = sample_name[:-9]
dir_fastqs = "./fastqs"
os.mkdir(dir_fastqs)
# Format cellranger count fastq input so it follows the correct naming convention and
# folder structure
for i, fastqs in enumerate(
zip(inputs.reads.output.barcodes, inputs.reads.output.reads)
):
os.symlink(
fastqs[0].path,
os.path.join(
dir_fastqs,
"{}_S1_L{}_R1_001.fastq.gz".format(
sample_name,
str(i + 1).zfill(3),
),
),
)
os.symlink(
fastqs[1].path,
os.path.join(
dir_fastqs,
"{}_S1_L{}_R2_001.fastq.gz".format(
sample_name,
str(i + 1).zfill(3),
),
),
)
cmd = Cmd["cellranger"]["count"]
cmd = cmd["--id={}".format(sample_name)]
cmd = cmd["--fastqs={}".format(dir_fastqs)]
cmd = cmd[
"--transcriptome={}".format(inputs.genome_index.output.genome_index.path)
]
cmd = cmd["--localcores={}".format(self.requirements.resources.cores)]
cmd = cmd[
"--localmem={}".format(int(self.requirements.resources.memory * 0.9 / 1024))
]
cmd = cmd["--chemistry={}".format(inputs.chemistry)]
cmd = cmd["--expect-cells={}".format(inputs.expected_cells)]
if inputs.trim_r1:
cmd = cmd["--r1-length={}".format(inputs.trim_r1)]
if inputs.trim_r2:
cmd = cmd["--r2-length={}".format(inputs.trim_r2)]
if inputs.force_cells:
cmd = cmd["--force-cells={}".format(inputs.force_cells)]
return_code, _, _ = cmd & TEE(retcode=None)
if return_code:
self.error("Error while running cellranger count.")
output_dir = Path(f"{sample_name}/outs")
report_file = "report_summary.html"
copy(output_dir / "web_summary.html", report_file)
filtered_dir = Path("filtered_feature_bc_matrix")
raw_dir = Path("raw_feature_bc_matrix")
copytree(output_dir / filtered_dir, filtered_dir)
copytree(output_dir / raw_dir, raw_dir)
outputs.matrix_filtered = str(filtered_dir / "matrix.mtx.gz")
outputs.genes_filtered = str(filtered_dir / "features.tsv.gz")
outputs.barcodes_filtered = str(filtered_dir / "barcodes.tsv.gz")
outputs.matrix_raw = str(raw_dir / "matrix.mtx.gz")
outputs.genes_raw = str(raw_dir / "features.tsv.gz")
outputs.barcodes_raw = str(raw_dir / "barcodes.tsv.gz")
outputs.report = report_file
outputs.build = inputs.genome_index.output.build
outputs.species = inputs.genome_index.output.species
outputs.source = inputs.genome_index.output.source
# Spawn upload-bam process
bam_path = output_dir / "possorted_genome_bam.bam"
bai_path = output_dir / "possorted_genome_bam.bam.bai"
copy(bam_path, f"{sample_name}.bam")
copy(bai_path, f"{sample_name}.bam.bai")
process_inputs = {
"src": f"{sample_name}.bam",
"src2": f"{sample_name}.bam.bai",
"reads": inputs.reads.id,
"species": inputs.genome_index.output.species,
"build": inputs.genome_index.output.build,
}
self.run_process("upload-bam-scseq-indexed", process_inputs) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/scseq/cellranger.py | 0.670608 | 0.318724 | cellranger.py | pypi |
from resolwe.process import (
BooleanField,
Data,
DataField,
FloatField,
GroupField,
IntegerField,
ListField,
Process,
StringField,
)
from resolwe.process.models import Process as BioProcess
class WorkflowWgsGvcf(Process):
"""Whole genome sequencing pipeline (GATK GVCF).
The pipeline follows GATK best practices recommendations and prepares
single-sample paired-end sequencing data for a joint-genotyping step.
The pipeline steps include read trimming (Trimmomatic), read alignment
(BWA-MEM2), marking of duplicates (Picard MarkDuplicates), recalibration
of base quality scores (ApplyBQSR) and calling of variants
(GATK HaplotypeCaller in GVCF mode). The QC reports (FASTQC report,
Picard AlignmentSummaryMetrics, CollectWgsMetrics and InsertSizeMetrics)
are summarized using MultiQC.
"""
slug = "workflow-wgs-gvcf"
name = "WGS analysis (GVCF)"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {
"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1",
},
},
}
data_name = "WGS GVCF analysis ({{ reads|name|default('?') if reads else aligned_reads|name|default('?') }})"
version = "2.3.0"
process_type = "data:workflow:wgs:gvcf"
category = "Pipeline"
entity = {
"type": "sample",
}
class Input:
"""Input fields."""
reads = DataField(
"reads:fastq:paired",
label="Input sample (FASTQ)",
required=False,
disabled="aligned_reads",
description="Input data in FASTQ format. This input type allows for optional "
"read trimming procedure and is mutually exclusive with the BAM input file type.",
)
aligned_reads = DataField(
"alignment:bam",
label="Input sample (BAM)",
required=False,
disabled="reads",
description="Input data in BAM format. This input file type is mutually exclusive "
"with the FASTQ input file type and does not allow for read trimming procedure.",
)
ref_seq = DataField("seq:nucleotide", label="Reference sequence")
bwa_index = DataField("index:bwamem2", label="BWA genome index")
known_sites = ListField(
DataField("variants:vcf"), label="Known sites of variation (VCF)"
)
class Trimming:
"""Trimming parameters."""
enable_trimming = BooleanField(
label="Trim and quality filter input data",
description="Enable or disable adapter trimming and QC filtering procedure.",
default=False,
)
adapters = DataField(
"seq:nucleotide",
label="Adapter sequences",
required=False,
description="Adapter sequences in FASTA format that will "
"be removed from the reads.",
disabled="!trimming_options.enable_trimming",
)
seed_mismatches = IntegerField(
label="Seed mismatches",
required=False,
disabled="!trimming_options.adapters",
description="Specifies the maximum mismatch count which "
"will still allow a full match to be performed. This field "
"is required to perform adapter trimming.",
)
simple_clip_threshold = IntegerField(
label="Simple clip threshold",
required=False,
disabled="!trimming_options.adapters",
description="Specifies how accurate the match between any "
"adapter sequence must be against a read. This field is "
"required to perform adapter trimming.",
)
min_adapter_length = IntegerField(
label="Minimum adapter length",
default=8,
disabled="!trimming_options.seed_mismatches && "
"!trimming_options.simple_clip_threshold && "
"!trimming_options.palindrome_clip_threshold",
description="In addition to the alignment score, palindrome "
"mode can verify that a minimum length of adapter has been "
"detected. If unspecified, this defaults to 8 bases, for "
"historical reasons. However, since palindrome mode has a "
"very low false positive rate, this can be safely reduced, "
"even down to 1, to allow shorter adapter fragments to be "
"removed.",
)
palindrome_clip_threshold = IntegerField(
label="Palindrome clip threshold",
required=False,
disabled="!trimming_options.adapters",
description="Specifies how accurate the match between the "
"two adapter ligated reads must be for PE palindrome read "
"alignment. This field is required to perform adapter "
"trimming.",
)
leading = IntegerField(
label="Leading quality",
required=False,
description="Remove low quality bases from the beginning, "
"if below a threshold quality.",
disabled="!trimming_options.enable_trimming",
)
trailing = IntegerField(
label="Trailing quality",
required=False,
description="Remove low quality bases from the end, if "
"below a threshold quality.",
disabled="!trimming_options.enable_trimming",
)
minlen = IntegerField(
label="Minimum length",
required=False,
description="Drop the read if it is below a specified length.",
disabled="!trimming_options.enable_trimming",
)
class GatkOptions:
"""Options."""
intervals = DataField(
"bed",
label="Intervals BED file",
description="Use intervals BED file to limit the analysis to "
"the specified parts of the genome.",
required=False,
)
contamination = IntegerField(
label="Contamination fraction",
default=0,
description="Fraction of contamination in sequencing "
"data (for all samples) to aggressively remove.",
)
class AlignmentSummary:
"""AlignmentSummary parameters."""
adapters = DataField(
"seq:nucleotide",
label="Adapter sequences",
required=False,
)
max_insert_size = IntegerField(
label="Maximum insert size",
default=100000,
)
pair_orientation = StringField(
label="Pair orientation",
default="null",
choices=[
("null", "Unspecified"),
("FR", "FR"),
("RF", "RF"),
("TANDEM", "TANDEM"),
],
)
class PicardWGSMetrics:
"""PicardWGSMetrics parameters."""
read_length = IntegerField(
label="Average read length",
default=150,
)
min_map_quality = IntegerField(
label="Minimum mapping quality for a read to contribute coverage",
default=20,
)
min_quality = IntegerField(
label="Minimum base quality for a base to contribute coverage",
default=20,
description="N bases will be treated as having a base quality of "
"negative infinity and will therefore be excluded from "
"coverage regardless of the value of this parameter.",
)
coverage_cap = IntegerField(
label="Maximum coverage cap",
default=250,
description="Treat positions with coverage exceeding this "
"value as if they had coverage at this set value.",
)
accumulation_cap = IntegerField(
label="Ignore positions with coverage above this value",
default=100000,
description="At positions with coverage exceeding this value, "
"completely ignore reads that accumulate beyond this value.",
)
sample_size = IntegerField(
label="Sample size used for Theoretical Het Sensitivity sampling",
default=10000,
)
class InsertSizeMetrics:
"""InsertSizeMetrics parameters."""
minimum_fraction = FloatField(
label="Minimum fraction of reads in a category to be considered",
default=0.05,
description="When generating the histogram, discard any data "
"categories (out of FR, TANDEM, RF) that have fewer than "
"this fraction of overall reads (Range: 0 and 0.5).",
)
include_duplicates = BooleanField(
label="Include reads marked as duplicates in the insert size histogram",
default=False,
)
deviations = FloatField(
label="Deviations limit",
default=10.0,
description="Generate mean, standard deviation and plots "
"by trimming the data down to MEDIAN + DEVIATIONS * "
"MEDIAN_ABSOLUTE_DEVIATION. This is done because insert "
"size data typically includes enough anomalous values "
"from chimeras and other artifacts to make the mean and "
"standard deviation grossly misleading regarding the real "
"distribution.",
)
trimming_options = GroupField(Trimming, label="Trimming options")
gatk_options = GroupField(GatkOptions, label="GATK options")
alignment_summary = GroupField(
AlignmentSummary, label="Alignment summary options"
)
wgs_metrics = GroupField(PicardWGSMetrics, label="Picard WGS metrics options")
insert_size = GroupField(
InsertSizeMetrics, label="Picard InsertSizeMetrics options"
)
class Output:
"""Output fields."""
def run(self, inputs, outputs):
"""Run the workflow."""
if not inputs.reads and not inputs.aligned_reads:
self.error("Please provide FASTQ or BAM input files.")
if inputs.reads and inputs.aligned_reads:
self.error(
"Please provide input data in either FASTQ or aligned BAM format, not both."
)
preprocess_inputs = {
"ref_seq": inputs.ref_seq,
"bwa_index": inputs.bwa_index,
"known_sites": inputs.known_sites,
}
if inputs.reads and inputs.trimming_options.enable_trimming:
trimmomatic = Data.create(
process=BioProcess.get_latest(slug="trimmomatic-paired"),
input={
"reads": inputs.reads,
"illuminaclip": {
"adapters": inputs.trimming_options.adapters,
"seed_mismatches": inputs.trimming_options.seed_mismatches,
"simple_clip_threshold": inputs.trimming_options.simple_clip_threshold,
"palindrome_clip_threshold": inputs.trimming_options.palindrome_clip_threshold,
"min_adapter_length": inputs.trimming_options.min_adapter_length,
},
"trim_bases": {
"trailing": inputs.trimming_options.trailing,
"leading": inputs.trimming_options.leading,
},
"reads_filtering": {"minlen": inputs.trimming_options.minlen},
},
)
preprocess_inputs.update(reads=trimmomatic)
elif inputs.reads and not inputs.trimming_options.enable_trimming:
preprocess_inputs.update(reads=inputs.reads)
else:
preprocess_inputs.update(aligned_reads=inputs.aligned_reads)
bam = Data.create(
process=BioProcess.get_latest(slug="wgs-preprocess-bwa2"),
input=preprocess_inputs,
)
Data.create(
process=BioProcess.get_latest(slug="gatk-haplotypecaller-gvcf"),
input={
"bam": bam,
"ref_seq": inputs.ref_seq,
"options": {
"intervals": inputs.gatk_options.intervals,
"contamination": inputs.gatk_options.contamination,
},
},
)
alignment_summary_inputs = {
"bam": bam,
"genome": inputs.ref_seq,
"insert_size": inputs.alignment_summary.max_insert_size,
"pair_orientation": inputs.alignment_summary.pair_orientation,
"bisulfite": False,
"assume_sorted": True,
}
if inputs.alignment_summary.adapters:
alignment_summary_inputs.update(
{"adapters": inputs.alignment_summary.adapters}
)
summary = Data.create(
process=BioProcess.get_latest(slug="alignment-summary"),
input=alignment_summary_inputs,
)
wgs_metrics = Data.create(
process=BioProcess.get_latest(slug="wgs-metrics"),
input={
"bam": bam,
"genome": inputs.ref_seq,
"read_length": inputs.wgs_metrics.read_length,
"create_histogram": False,
"options": {
"min_map_quality": inputs.wgs_metrics.min_map_quality,
"coverage_cap": inputs.wgs_metrics.coverage_cap,
"accumulation_cap": inputs.wgs_metrics.accumulation_cap,
"count_unpaired": False,
"sample_size": inputs.wgs_metrics.sample_size,
},
},
)
insert_size = Data.create(
process=BioProcess.get_latest(slug="insert-size"),
input={
"bam": bam,
"genome": inputs.ref_seq,
"minimum_fraction": inputs.insert_size.minimum_fraction,
"include_duplicates": inputs.insert_size.include_duplicates,
"deviations": inputs.insert_size.deviations,
"assume_sorted": True,
},
)
multiqc_inputs = [
bam,
summary,
wgs_metrics,
insert_size,
]
if inputs.reads:
multiqc_inputs.append(inputs.reads)
if inputs.reads and inputs.trimming_options.enable_trimming:
multiqc_inputs.append(trimmomatic)
Data.create(
process=BioProcess.get_latest(slug="multiqc"),
input={"data": multiqc_inputs},
) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/workflows/wgs_gvcf.py | 0.872646 | 0.493409 | wgs_gvcf.py | pypi |
from resolwe.process import (
BooleanField,
Data,
DataField,
FloatField,
GroupField,
IntegerField,
ListField,
Process,
StringField,
)
from resolwe.process.models import Process as BioProcess
class WorkflowBBDukStarFcQC(Process):
"""RNA-seq pipeline comprised of preprocessing, alignment and quantification.
First, reads are preprocessed by __BBDuk__ which removes adapters, trims
reads for quality from the 3'-end, and discards reads that are too short
after trimming. Compared to similar tools, BBDuk is regarded for its
computational efficiency. Next, preprocessed reads are aligned by __STAR__
aligner. At the time of implementation, STAR is considered a
state-of-the-art tool that consistently produces accurate results from
diverse sets of reads, and performs well even with default settings. For
more information see [this comparison of RNA-seq
aligners](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5792058/). Finally,
aligned reads are summarized to genes by __featureCounts__. Gaining wide
adoption among the bioinformatics community, featureCounts yields
expressions in a computationally efficient manner. All three tools in
this workflow support parallelization to accelerate the analysis.
rRNA contamination rate in the sample is determined using the STAR aligner.
Quality-trimmed reads are down-sampled (using __Seqtk__ tool) and aligned to the
rRNA reference sequences. The alignment rate indicates the percentage of the
reads in the sample that are derived from the rRNA sequences.
"""
slug = "workflow-bbduk-star-featurecounts-qc"
name = "BBDuk - STAR - featureCounts - QC"
requirements = {
"expression-engine": "jinja",
}
data_name = "{{ reads|name|default('?') }}"
entity = {
"type": "sample",
}
version = "6.0.0"
process_type = "data:workflow:rnaseq:featurecounts:qc"
category = "Pipeline"
class Input:
"""Input fields."""
reads = DataField(
data_type="reads:fastq",
label="Reads (FASTQ)",
description="Reads in FASTQ file, single or paired end.",
)
genome = DataField(
data_type="index:star",
label="Indexed reference genome",
description="Genome index prepared by STAR aligner indexing tool.",
)
annotation = DataField(
data_type="annotation",
label="Annotation",
description="GTF and GFF3 annotation formats are supported.",
)
assay_type = StringField(
label="Assay type",
choices=[
("non_specific", "Strand non-specific"),
("forward", "Strand-specific forward"),
("reverse", "Strand-specific reverse"),
("auto", "Detect automatically"),
],
description="In strand non-specific assay a read is considered overlapping with a "
"feature regardless of whether it is mapped to the same or the opposite "
"strand as the feature. In strand-specific forward assay and single "
"reads, the read has to be mapped to the same strand as the feature. "
"For paired-end reads, the first read has to be on the same strand and "
"the second read on the opposite strand. In strand-specific reverse "
"assay these rules are reversed.",
default="non_specific",
)
cdna_index = DataField(
data_type="index:salmon",
label="cDNA index file",
required=False,
description="Transcriptome index file created using the Salmon indexing tool. "
"cDNA (transcriptome) sequences used for index file creation must be "
"derived from the same species as the input sequencing reads to "
"obtain the reliable analysis results.",
hidden="assay_type != 'auto'",
)
rrna_reference = DataField(
data_type="index:star",
label="Indexed rRNA reference sequence",
description="Reference sequence index prepared by STAR aligner indexing tool.",
)
globin_reference = DataField(
data_type="index:star",
label="Indexed Globin reference sequence",
description="Reference sequence index prepared by STAR aligner indexing tool.",
)
class Preprocessing:
"""Preprocessing with BBDuk."""
adapters = ListField(
inner=DataField(data_type="seq:nucleotide"),
label="Adapters",
required=False,
description="FASTA file(s) with adapters.",
)
custom_adapter_sequences = ListField(
inner=StringField(),
label="Custom adapter sequences",
required=False,
default=[],
description="Custom adapter sequences can be specified by inputting them "
"one by one and pressing Enter after each sequence.",
)
kmer_length = IntegerField(
label="K-mer length [k=]",
default=23,
description="Kmer length used for finding contaminants. "
"Contaminants shorter than kmer length will not be found. "
"Kmer length must be at least 1.",
)
min_k = IntegerField(
label="Minimum k-mer length at right end of reads used for trimming [mink=]",
default=11,
disabled="preprocessing.adapters.length === 0 && preprocessing.custom_adapter_sequences.length === 0",
)
hamming_distance = IntegerField(
label="Maximum Hamming distance for k-mers [hammingdistance=]",
default=1,
description="Hamming distance i.e. the number of mismatches allowed in the kmer.",
)
maxns = IntegerField(
label="Max Ns after trimming [maxns=]",
default=-1,
description="If non-negative, reads with more Ns than this (after trimming) will be discarded.",
)
trim_quality = IntegerField(
label="Average quality below which to trim region [trimq=]",
default=10,
description="Phred algorithm is used, which is more accurate than naive trimming.",
)
min_length = IntegerField(
label="Minimum read length [minlength=]",
default=20,
description="Reads shorter than minimum read length after trimming are discarded.",
)
quality_encoding_offset = StringField(
label="Quality encoding offset [qin=]",
choices=[
("33", "Sanger / Illumina 1.8+"),
("64", "Illumina up to 1.3+, 1.5+"),
("auto", "Auto"),
],
default="auto",
description="Quality encoding offset for input FASTQ files.",
)
ignore_bad_quality = BooleanField(
label="Ignore bad quality [ignorebadquality]",
default=False,
description="Don't crash if quality values appear to be incorrect.",
)
class Alignment:
"""Alignment with STAR."""
unstranded = BooleanField(
label="The data is unstranded [--outSAMstrandField intronMotif]",
default=False,
description="For unstranded RNA-seq data, Cufflinks/Cuffdiff require spliced "
"alignments with XS strand attribute, which STAR will generate with "
"--outSAMstrandField intronMotif option. As required, the XS strand "
"attribute will be generated for all alignments that contain splice "
"junctions. The spliced alignments that have undefined strand "
"(i.e. containing only non-canonical unannotated junctions) will be "
"suppressed. If you have stranded RNA-seq data, you do not need to "
"use any specific STAR options. Instead, you need to run Cufflinks with "
"the library option --library-type options. For example, "
"cufflinks --library-type fr-firststrand should be used for the standard "
"dUTP protocol, including Illumina's stranded Tru-Seq. "
"This option has to be used only for Cufflinks runs and not for STAR runs.",
)
noncannonical = BooleanField(
label="Remove non-cannonical junctions (Cufflinks compatibility)",
default=False,
description="It is recommended to remove the non-canonical junctions for Cufflinks "
"runs using --outFilterIntronMotifs RemoveNoncanonical.",
)
class ChimericReadsOptions:
"""Chimeric reads options."""
chimeric = BooleanField(
label="Detect chimeric and circular alignments [--chimOutType SeparateSAMold]",
default=False,
description="To switch on detection of chimeric (fusion) alignments (in addition "
"to normal mapping), --chimSegmentMin should be set to a positive value. Each "
"chimeric alignment consists of two segments. Each segment is non-chimeric on "
"its own, but the segments are chimeric to each other (i.e. the segments belong "
"to different chromosomes, or different strands, or are far from each other). "
"Both segments may contain splice junctions, and one of the segments may contain "
"portions of both mates. --chimSegmentMin parameter controls the minimum mapped "
"length of the two segments that is allowed. For example, if you have 2x75 reads "
"and used --chimSegmentMin 20, a chimeric alignment with 130b on one chromosome "
"and 20b on the other will be output, while 135 + 15 won't be.",
)
chim_segment_min = IntegerField(
label="Minimum length of chimeric segment [--chimSegmentMin]",
default=20,
disabled="!alignment.chimeric_reads.chimeric",
)
class TranscriptOutputOptions:
"""Transcript coordinate output options."""
quant_mode = BooleanField(
label="Output in transcript coordinates [--quantMode]",
default=False,
description="With --quantMode TranscriptomeSAM option STAR will output alignments "
"translated into transcript coordinates in the Aligned.toTranscriptome.out.bam "
"file (in addition to alignments in genomic coordinates in Aligned.*.sam/bam "
"files). These transcriptomic alignments can be used with various transcript "
"quantification software that require reads to be mapped to transcriptome, such "
"as RSEM or eXpress.",
)
single_end = BooleanField(
label="Allow soft-clipping and indels [--quantTranscriptomeBan Singleend]",
default=False,
disabled="!t_coordinates.quant_mode",
description="By default, the output satisfies RSEM requirements: soft-clipping or "
"indels are not allowed. Use --quantTranscriptomeBan Singleend to allow "
"insertions, deletions and soft-clips in the transcriptomic alignments, which "
"can be used by some expression quantification softwares (e.g. eXpress).",
)
class FilteringOptions:
"""Output filtering options."""
out_filter_type = StringField(
label="Type of filtering [--outFilterType]",
default="Normal",
choices=[
("Normal", "Normal"),
("BySJout", "BySJout"),
],
description="Normal: standard filtering using only current alignment; BySJout: "
"keep only those reads that contain junctions that passed filtering into "
"SJ.out.tab.",
)
out_multimap_max = IntegerField(
label="Maximum number of loci [--outFilterMultimapNmax]",
required=False,
description="Maximum number of loci the read is allowed to map to. Alignments "
"(all of them) will be output only if the read maps to no more loci than this "
"value. Otherwise no alignments will be output, and the read will be counted as "
"'mapped to too many loci' (default: 10).",
)
out_mismatch_max = IntegerField(
label="Maximum number of mismatches [--outFilterMismatchNmax]",
required=False,
description="Alignment will be output only if it has fewer mismatches than this "
"value (default: 10). Large number (e.g. 999) switches off this filter.",
)
out_mismatch_nl_max = FloatField(
label="Maximum no. of mismatches (map length) [--outFilterMismatchNoverLmax]",
required=False,
range=[0.0, 1.0],
description="Alignment will be output only if its ratio of mismatches to *mapped* "
"length is less than or equal to this value (default: 0.3). The value should be "
"between 0.0 and 1.0.",
)
out_score_min = IntegerField(
label="Minimum alignment score [--outFilterScoreMin]",
required=False,
description="Alignment will be output only if its score is higher than or equal "
"to this value (default: 0).",
)
out_mismatch_nrl_max = FloatField(
label="Maximum no. of mismatches (read length) [--outFilterMismatchNoverReadLmax]",
required=False,
range=[0.0, 1.0],
description="Alignment will be output only if its ratio of mismatches to *read* "
"length is less than or equal to this value (default: 1.0). Using 0.04 for "
"2x100bp, the max number of mismatches is calculated as 0.04*200=8 for the paired "
"read. The value should be between 0.0 and 1.0.",
)
class AlignmentOptions:
"""Alignment and Seeding."""
align_overhang_min = IntegerField(
label="Minimum overhang [--alignSJoverhangMin]",
required=False,
description="Minimum overhang (i.e. block size) for spliced alignments "
"(default: 5).",
)
align_sjdb_overhang_min = IntegerField(
label="Minimum overhang (sjdb) [--alignSJDBoverhangMin]",
required=False,
description="Minimum overhang (i.e. block size) for annotated (sjdb) spliced "
"alignments (default: 3).",
)
align_intron_size_min = IntegerField(
label="Minimum intron size [--alignIntronMin]",
required=False,
description="Minimum intron size: the genomic gap is considered an intron if its "
"length >= alignIntronMin, otherwise it is considered Deletion (default: 21).",
)
align_intron_size_max = IntegerField(
label="Maximum intron size [--alignIntronMax]",
required=False,
description="Maximum intron size, if 0, max intron size will be determined by "
"(2pow(winBinNbits)*winAnchorDistNbins)(default: 0).",
)
align_gap_max = IntegerField(
label="Minimum gap between mates [--alignMatesGapMax]",
required=False,
description="Maximum gap between two mates, if 0, max intron gap will be "
"determined by (2pow(winBinNbits)*winAnchorDistNbins) (default: 0).",
)
align_end_alignment = StringField(
label="Read ends alignment [--alignEndsType]",
choices=[
("Local", "Local"),
("EndToEnd", "EndToEnd"),
("Extend5pOfRead1", "Extend5pOfRead1"),
("Extend5pOfReads12", "Extend5pOfReads12"),
],
description="Type of read ends alignment (default: Local). Local: standard local "
"alignment with soft-clipping allowed. EndToEnd: force end-to-end read alignment, "
"do not soft-clip. Extend5pOfRead1: fully extend only the 5p of the read1, all "
"other ends: local alignment. Extend5pOfReads12: fully extend only the 5' of the "
"both read1 and read2, all other ends use local alignment.",
default="Local",
)
class OutputOptions:
"""Output options."""
out_unmapped = BooleanField(
label="Output unmapped reads (SAM) [--outSAMunmapped Within]",
default=False,
description="Output of unmapped reads in the SAM format.",
)
out_sam_attributes = StringField(
label="Desired SAM attributes [--outSAMattributes]",
default="Standard",
choices=[
("Standard", "Standard"),
("All", "All"),
("NH HI NM MD", "NH HI NM MD"),
("None", "None"),
],
description="A string of desired SAM attributes, in the order desired for the "
"output SAM.",
)
out_rg_line = StringField(
label="SAM/BAM read group line [--outSAMattrRGline]",
required=False,
description="The first word contains the read group identifier and must start "
"with ID:, e.g. --outSAMattrRGline ID:xxx CN:yy ”DS:z z z” xxx will be added as "
"RG tag to each output alignment. Any spaces in the tag values have to be double "
"quoted. Comma separated RG lines correspons to different (comma separated) input "
"files in -readFilesIn. Commas have to be surrounded by spaces, e.g. "
"-outSAMattrRGline ID:xxx , ID:zzz ”DS:z z” , ID:yyy DS:yyyy.",
)
chimeric_reads = GroupField(
ChimericReadsOptions,
label="Chimeric reads options",
)
transcript_output = GroupField(
TranscriptOutputOptions,
label="Transcript coordinate output options",
)
filtering_options = GroupField(
FilteringOptions,
label="Output filtering options",
)
alignment_options = GroupField(
AlignmentOptions,
label="Alignment options",
)
output_options = GroupField(
OutputOptions,
label="Output options",
)
class Quantification:
"""Quantification (featureCounts)."""
n_reads = IntegerField(
label="Number of reads in subsampled alignment file",
default=5000000,
hidden="assay_type != 'auto'",
description="Alignment (.bam) file subsample size. Increase the number of reads "
"to make automatic detection more reliable. Decrease the number of "
"reads to make automatic detection run faster.",
)
feature_class = StringField(
label="Feature class [-t]",
default="exon",
description="Feature class (3rd column in GTF/GFF3 file) to be used. All other "
"features will be ignored.",
)
feature_type = StringField(
label="Feature type",
default="gene",
choices=[
("gene", "gene"),
("transcript", "transcript"),
],
description="The type of feature the quantification program summarizes over "
"(e.g. gene or transcript-level analysis). The value of this "
"parameter needs to be chosen in line with 'ID attribute' below.",
)
id_attribute = StringField(
label="ID attribute [-g]",
default="gene_id",
allow_custom_choice=True,
choices=[
("gene_id", "gene_id"),
("transcript_id", "transcript_id"),
("ID", "ID"),
("geneid", "geneid"),
],
description="GTF/GFF3 attribute to be used as feature ID. Several GTF/GFF3 lines "
"with the same feature ID will be considered as parts of the same "
"feature. The feature ID is used to identify the counts in the "
"output table. In GTF files this is usually 'gene_id', in GFF3 files "
"this is often 'ID', and 'transcript_id' is frequently a valid "
"choice for both annotation formats.",
)
by_read_group = BooleanField(
label="Assign reads by read group",
description="RG tag is required to be present in the input BAM files.",
default=True,
)
class Downsampling:
"""Downsampling (Seqtk)."""
n_reads = IntegerField(
label="Number of reads",
default=1000000,
description="Number of reads to include in subsampling.",
)
class Advanced:
"""Advanced options for downsampling."""
seed = IntegerField(
label="Seed [-s]",
default=11,
description="Using the same random seed makes reads subsampling more reproducible "
"in different environments.",
)
fraction = FloatField(
label="Fraction of reads used",
required=False,
range=[0.0, 1.0],
description="Use the fraction of reads [0.0 - 1.0] from the orignal input file instead "
"of the absolute number of reads. If set, this will override the 'Number of reads' "
"input parameter.",
)
two_pass = BooleanField(
label="2-pass mode [-2]",
default=False,
description="Enable two-pass mode when down-sampling. Two-pass mode is twice "
"as slow but with much reduced memory.",
)
advanced = GroupField(
Advanced,
label="Advanced options for downsampling",
)
preprocessing = GroupField(
Preprocessing,
label="Preprocessing with BBDuk",
)
alignment = GroupField(
Alignment,
label="Alignment with STAR",
)
quantification = GroupField(
Quantification,
label="Quantification with featureCounts",
)
downsampling = GroupField(
Downsampling,
label="Downsampling with Seqtk",
)
class Output:
"""Output fields."""
# Workflows do not have output fields.
def run(self, inputs, outputs):
"""Run the workflow."""
if not inputs.cdna_index and inputs.assay_type == "auto":
self.error(
"The input cDNA index file is necessary for 'Detect automatically' "
"assay type."
)
input_bbduk = {
"reads": inputs.reads,
"min_length": inputs.preprocessing.min_length,
"reference": {
"sequences": inputs.preprocessing.adapters or [],
"literal_sequences": inputs.preprocessing.custom_adapter_sequences,
},
"processing": {
"kmer_length": inputs.preprocessing.kmer_length,
"hamming_distance": inputs.preprocessing.hamming_distance,
},
"operations": {
"quality_trim": "r",
"trim_quality": inputs.preprocessing.trim_quality,
"quality_encoding_offset": inputs.preprocessing.quality_encoding_offset,
"ignore_bad_quality": inputs.preprocessing.ignore_bad_quality,
"maxns": inputs.preprocessing.maxns,
},
}
if (
inputs.preprocessing.adapters
or inputs.preprocessing.custom_adapter_sequences
):
input_bbduk["operations"]["k_trim"] = "r"
else:
input_bbduk["operations"]["k_trim"] = "f"
if (
inputs.preprocessing.adapters
or inputs.preprocessing.custom_adapter_sequences
):
input_bbduk["operations"]["min_k"] = inputs.preprocessing.min_k
else:
input_bbduk["operations"]["min_k"] = -1
if inputs.reads.type.startswith("data:reads:fastq:single:"):
slug_bbduk = "bbduk-single"
elif inputs.reads.type.startswith("data:reads:fastq:paired:"):
input_bbduk["operations"]["trim_pairs_evenly"] = True
input_bbduk["operations"]["trim_by_overlap"] = True
slug_bbduk = "bbduk-paired"
else:
self.error("Wrong reads input type.")
preprocessing = Data.create(
process=BioProcess.get_latest(slug=slug_bbduk),
input=input_bbduk,
name=f"Trimmed ({inputs.reads.name})",
)
input_star = {
"reads": preprocessing,
"genome": inputs.genome,
"unstranded": inputs.alignment.unstranded,
"noncannonical": inputs.alignment.noncannonical,
"detect_chimeric": {
"chimeric": inputs.alignment.chimeric_reads.chimeric,
"chim_segment_min": inputs.alignment.chimeric_reads.chim_segment_min,
},
"t_coordinates": {
"quant_mode": inputs.alignment.transcript_output.quant_mode,
"single_end": inputs.alignment.transcript_output.single_end,
},
"filtering": {
"out_filter_type": inputs.alignment.filtering_options.out_filter_type,
},
"alignment": {
"align_end_alignment": inputs.alignment.alignment_options.align_end_alignment
},
"output_options": {
"out_unmapped": inputs.alignment.output_options.out_unmapped,
"out_sam_attributes": inputs.alignment.output_options.out_sam_attributes,
},
}
if inputs.alignment.filtering_options.out_multimap_max:
input_star["filtering"][
"out_multimap_max"
] = inputs.alignment.filtering_options.out_multimap_max
if inputs.alignment.filtering_options.out_mismatch_max:
input_star["filtering"][
"out_missmatch_max"
] = inputs.alignment.filtering_options.out_mismatch_max
if inputs.alignment.filtering_options.out_mismatch_nl_max:
input_star["filtering"][
"out_missmatch_nl_max"
] = inputs.alignment.filtering_options.out_mismatch_nl_max
if inputs.alignment.filtering_options.out_score_min:
input_star["filtering"][
"out_score_min"
] = inputs.alignment.filtering_options.out_score_min
if inputs.alignment.filtering_options.out_mismatch_nrl_max:
input_star["filtering"][
"out_mismatch_nrl_max"
] = inputs.alignment.filtering_options.out_mismatch_nrl_max
if inputs.alignment.alignment_options.align_overhang_min:
input_star["alignment"][
"align_overhang_min"
] = inputs.alignment.alignment_options.align_overhang_min
if inputs.alignment.alignment_options.align_sjdb_overhang_min:
input_star["alignment"][
"align_sjdb_overhang_min"
] = inputs.alignment.alignment_options.align_sjdb_overhang_min
if inputs.alignment.alignment_options.align_intron_size_min:
input_star["alignment"][
"align_intron_size_min"
] = inputs.alignment.alignment_options.align_intron_size_min
if inputs.alignment.alignment_options.align_intron_size_max:
input_star["alignment"][
"align_intron_size_max"
] = inputs.alignment.alignment_options.align_intron_size_max
if inputs.alignment.alignment_options.align_gap_max:
input_star["alignment"][
"align_gap_max"
] = inputs.alignment.alignment_options.align_gap_max
if inputs.alignment.output_options.out_rg_line:
input_star["output_options"][
"out_rg_line"
] = inputs.alignment.output_options.out_rg_line
alignment = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input=input_star,
name=f"Aligned ({inputs.reads.name})",
)
input_featurecounts = {
"aligned_reads": alignment,
"n_reads": inputs.quantification.n_reads,
"assay_type": inputs.assay_type,
"annotation": inputs.annotation,
"feature_class": inputs.quantification.feature_class,
"feature_type": inputs.quantification.feature_type,
"id_attribute": inputs.quantification.id_attribute,
"general": {
"by_read_group": inputs.quantification.by_read_group,
},
}
if inputs.cdna_index:
input_featurecounts["cdna_index"] = inputs.cdna_index
quantification = Data.create(
process=BioProcess.get_latest(slug="feature_counts"),
input=input_featurecounts,
name=f"Quantified ({inputs.reads.name})",
)
input_seqtk = {
"reads": preprocessing,
"n_reads": inputs.downsampling.n_reads,
"advanced": {
"seed": inputs.downsampling.advanced.seed,
"fraction": inputs.downsampling.advanced.fraction,
"two_pass": inputs.downsampling.advanced.two_pass,
},
}
if inputs.reads.type.startswith("data:reads:fastq:single:"):
slug_seqtk = "seqtk-sample-single"
elif inputs.reads.type.startswith("data:reads:fastq:paired:"):
slug_seqtk = "seqtk-sample-paired"
else:
self.error("Wrong reads input type.")
downsampling = Data.create(
process=BioProcess.get_latest(slug=slug_seqtk),
input=input_seqtk,
name=f"Subsampled ({inputs.reads.name})",
)
alignment_qc_rrna = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input={
"reads": downsampling,
"genome": inputs.rrna_reference,
},
name=f"rRNA aligned ({inputs.reads.name})",
)
alignment_qc_globin = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input={
"reads": downsampling,
"genome": inputs.globin_reference,
},
name=f"Globin aligned ({inputs.reads.name})",
)
idxstats = Data.create(
process=BioProcess.get_latest(slug="samtools-idxstats"),
input={
"alignment": alignment,
},
name=f"Alignment summary ({inputs.reads.name})",
)
input_multiqc = {
"data": [
inputs.reads,
preprocessing,
alignment,
downsampling,
quantification,
alignment_qc_rrna,
alignment_qc_globin,
idxstats,
],
"advanced": {"dirs": True, "config": True},
}
Data.create(process=BioProcess.get_latest(slug="multiqc"), input=input_multiqc) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/workflows/bbduk_star_featurecounts_qc.py | 0.875455 | 0.517937 | bbduk_star_featurecounts_qc.py | pypi |
import re
from pathlib import Path
import GEOparse
import pandas as pd
import requests
from resolwe.process import (
BooleanField,
FileField,
GroupField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
from resolwe.process.models import Data
def parse_sample(gse, db_accession, gse_name):
"""Parse sample information from GEO."""
sample = {"Database accession": db_accession}
for k, v in gse.gsms[gse_name].metadata.items():
if len(v) == 1:
sample[k] = v[0]
else:
if all(": " in substring for substring in v):
for meta in v:
key, value = meta.split(": ", 1)
sample[key] = value
else:
sample[k] = " ".join(v)
return sample
def create_metadata(gse, run_info):
"""Create a tab-separated metadata file."""
collection = [
parse_sample(gse, row["Accession"], row["SampleName"])
for _, row in run_info.iterrows()
]
metadata = pd.json_normalize(collection)
metadata.insert(0, "Sample name", metadata["title"])
return metadata.set_index(["Sample name"], drop=False)
def construct_descriptor(metadata, sample_name):
"""Construct a descriptor from sample metadata.
Dictionary with GEO metadata that matches the sample descriptor
schema is created. Atributes under general that have no
predetermined choices are matched with our naming if they
exist in the metadata. Other fields with choices and the
experimental section are filled separately.
"""
sample_metadata = metadata.loc[sample_name]
sample_metadata = sample_metadata.fillna("")
descriptor = {"general": {}, "experiment": {}}
# TODO Replace fixed values with calls to descriptor schema once available.
# Also organ / tissue should be added then.
species = [
"Caenorhabditis elegans",
"Cricetulus griseus",
"Dictyostelium discoideum",
"Dictyostelium purpureum",
"Drosophila melanogaster",
"Homo sapiens",
"Macaca mulatta",
"Mus musculus",
"Odocoileus virginianus texanus",
"Rattus norvegicus",
"Solanum tuberosum",
]
molecule_choices = [
"total_rna",
"polya_rna",
"cytoplasmic_rna",
"nuclear_rna",
"genomic_dna",
"protein",
"other",
]
assay_types = [
"rna-seq",
"chip-seq",
"atac-seq",
"other",
]
platform_types = [
"nextseq_500",
"hiseq_2500",
"hiseq_2000",
"novaseq_6000",
"other",
]
general_attributes = {
"description": "description",
"cell type": "cell_type",
"source_name_ch1": "biosample_source",
"growth_protocol_ch1": "growth_protocol",
"treatment_protocol_ch1": "treatment_protocol",
}
if (
"organism_ch1" in metadata.columns
and sample_metadata["organism_ch1"] in species
):
descriptor["general"]["species"] = sample_metadata["organism_ch1"]
if "cell line" in metadata.columns:
descriptor["general"]["biosample_type"] = "cell_line"
descriptor["general"]["cell_line"] = sample_metadata["cell line"]
elif "tissue" in metadata.columns:
descriptor["general"]["biosample_type"] = "tissue"
if "contact_name" in metadata.columns:
descriptor["general"]["annotator"] = sample_metadata["contact_name"].replace(
",,", " "
)
for geo_attribute, attribute in general_attributes.items():
if geo_attribute in metadata.columns:
descriptor["general"][attribute] = sample_metadata[geo_attribute]
if "library_strategy" in metadata.columns:
formated_assay = sample_metadata["library_strategy"].lower().replace(" ", "-")
if formated_assay in assay_types:
descriptor["experiment"]["assay_type"] = formated_assay
if "extract_protocol_ch1" in metadata.columns:
descriptor["experiment"]["extract_protocol"] = sample_metadata[
"extract_protocol_ch1"
]
if "molecule_ch1" in metadata.columns:
formated_molecule = sample_metadata["molecule_ch1"].lower().replace(" ", "_")
if formated_molecule in molecule_choices:
descriptor["experiment"]["molecule"] = formated_molecule
if "instrument_model" in metadata.columns:
formated_platform = (
sample_metadata["instrument_model"]
.replace("Illumina ", "")
.lower()
.replace(" ", "_")
)
if formated_platform in platform_types:
descriptor["experiment"]["platform"] = formated_platform
return descriptor
class GeoImport(Process):
"""Import all runs from a GEO Series.
WARNING: Additional costs for storage and processing may be incurred
if a very large data set is selected.
RNA-seq ChIP-Seq, ATAC-Seq and expression microarray datasets can be
uploaded.
For RNA-Seq data sets this runs the SRA import process for each
experiment (SRX) from the selected RNA-Seq GEO Series. The same
procedure is followed for ChIP-Seq and ATAC-Seq data sets.
If GSE contains microarray data, it downloads individual samples and
uploads them as microarray expression objects. Probe IDs can be
mapped to the Ensembl IDs if the corresponding GPL platform is
supported, otherwise, a custom mapping file should be provided.
Currently supported platforms are: GPL74, GPL201, GPL96, GPL571,
GPL97, GPL570, GPL91, GPL8300, GPL92, GPL93, GPL94, GPL95, GPL17586,
GPL5175, GPL80, GPL6244, GPL16686, GPL15207, GPL1352, GPL11068,
GPL26966, GPL6848, GPL14550, GPL17077, GPL16981, GPL13497, GPL6947,
GPL10558, GPL6883, GPL13376,GPL6884, GPL6254.
In addition metadata table with sample information is created and
uploaded to the same collection.
"""
slug = "geo-import"
name = "GEO import"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {
"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0",
},
},
"resources": {
"cores": 1,
"memory": 16384,
"network": True,
},
}
data_name = "{{ gse_accession }}"
version = "2.6.2"
process_type = "data:geo"
category = "Import"
scheduling_class = SchedulingClass.BATCH
class Input:
"""Input fields."""
gse_accession = StringField(
label="GEO accession", description="Enter a GEO series accession number."
)
class Advanced:
"""Advanced options."""
prefetch = BooleanField(label="Prefetch SRA file", default=True)
max_size_prefetch = StringField(
label="Maximum file size to download in KB",
default="20G",
description="A unit prefix can be used instead of a value in KB (e.g. 1024M or 1G).",
)
min_spot_id = IntegerField(label="Minimum spot ID", required=False)
max_spot_id = IntegerField(label="Maximum spot ID", required=False)
min_read_len = IntegerField(label="Minimum read length", required=False)
clip = BooleanField(label="Clip adapter sequences", default=False)
aligned = BooleanField(label="Dump only aligned sequences", default=False)
unaligned = BooleanField(
label="Dump only unaligned sequences", default=False
)
mapping_file = FileField(
label="File with probe ID mappings",
description="The file should be tab-separated and contain two columns with their column names. The "
"first column should contain Gene IDs and the second one should contain probe names. Supported file "
"extensions are .tab.*, .tsv.*, .txt.*",
required=False,
)
source = StringField(
label="Gene ID source",
description="Gene ID source used for probe mapping is required when using a custom file.",
allow_custom_choice=True,
required=False,
choices=[
("AFFY", "AFFY"),
("DICTYBASE", "DICTYBASE"),
("ENSEMBL", "ENSEMBL"),
("NCBI", "NCBI"),
("UCSC", "UCSC"),
],
)
build = StringField(
label="Genome build",
description="Genome build of mapping file is required when using a custom file.",
required=False,
)
advanced = GroupField(Advanced, label="Advanced options")
def upload_rna_gse(self, inputs, gse):
"""Upload RNA samples from GEO series.
Find SRX accessions on a GEO sample (GSM) and fetch the
corresponding Run Info from SRA. Use run info to retrieve
individual run accessions (SRR) and library layouts needed for
sra-import. Samples are renamed to their SRA experiment
accessions (SRX).
"""
process_inputs = {
"sra_accession": [],
"advanced": {
"prefetch": inputs.advanced.prefetch,
"max_size_prefetch": inputs.advanced.max_size_prefetch,
"clip": inputs.advanced.clip,
"aligned": inputs.advanced.aligned,
"unaligned": inputs.advanced.unaligned,
"min_spot_id": inputs.advanced.min_spot_id,
"max_spot_id": inputs.advanced.max_spot_id,
"min_read_len": inputs.advanced.min_read_len,
},
}
sample_info = {}
for name, gsm in gse.gsms.items():
if "SRA" in gse.gsms[name].relations:
# Match NCBI's SRX and EBI's ERX sample accessions.
sample_found = re.findall(
r"([SE]RX\d{6,8})", str(gse.gsms[name].relations["SRA"])
)
else:
sample_found = None
if sample_found:
for srx_id in sample_found:
sample_info[srx_id] = name
info_file = f"{gse.name}.csv"
run_info = requests.get(
url="https://eutils.ncbi.nlm.nih.gov/Traces/sra/sra.cgi",
params={
"save": "efetch",
"db": "sra",
"rettype": "runinfo",
"term": srx_id,
},
)
if run_info.status_code != 200:
self.error(
f"Failed to fetch SRA runs for project {srx_id} belonging to {gse.name}."
)
elif run_info.text.isspace():
self.error(
f"Got an empty response from SRA for SRX ID {srx_id} belonging to {gse.name}."
)
else:
with open(info_file, "wb") as handle:
handle.write(run_info.content)
run_info = pd.read_csv(
info_file, usecols=["Run", "SampleName", "LibraryLayout"]
)
run_info = run_info.set_index("Run", drop=False)
process_inputs["sra_accession"] = run_info.index.values.tolist()
assert run_info.nunique().loc["LibraryLayout"] == 1
lib_type = run_info["LibraryLayout"].iloc[0]
if lib_type == "PAIRED":
self.run_process("import-sra-paired", process_inputs)
elif lib_type == "SINGLE":
self.run_process("import-sra-single", process_inputs)
else:
self.error(
f"Unsupported library layout expected SINGLE or PAIRED but got {lib_type}."
)
entity_name = process_inputs["sra_accession"][0]
sra_data = Data.filter(entity__name=entity_name)[-1]
sra_data.entity.name = gsm.metadata["title"][0]
else:
self.warning(
f"Matching SRX accession number for {gsm.metadata['title'][0]}({name} "
"was not found in GEO metadata."
)
return pd.DataFrame(
sample_info.items(), columns=["Accession", "SampleName"]
).set_index("Accession", drop=False)
def upload_ma_gse(self, inputs, gse):
"""Upload microarray samples from a GEO series."""
unmapped_data = []
for name, gsm in gse.gsms.items():
exp_path = f"{name}.tsv"
gsm.table.to_csv(exp_path, sep="\t", index=False)
platform_id = gsm.metadata["platform_id"][0]
process_inputs = {
"exp": exp_path,
"exp_type": gsm.columns.loc["VALUE", "description"],
"platform": gse.gpls[platform_id].metadata["title"][0],
"platform_id": platform_id,
"species": gsm.metadata["organism_ch1"][0],
}
self.run_process("upload-microarray-expression", process_inputs)
ma_data = Data.filter(entity__name=exp_path)[-1]
ma_data.entity.name = gsm.metadata["title"][0]
unmapped_data.append(ma_data.id)
names = [gsm for gsm in gse.gsms.keys()]
info_df = {"Accession": names, "SampleName": names}
mapping_inputs = {"expressions": unmapped_data}
if inputs.advanced.mapping_file:
mapping_file = inputs.advanced.mapping_file.import_file(
imported_format="compressed"
)
stem = Path(mapping_file).stem
supported_extensions = (".tab", ".tsv", ".txt")
if not stem.endswith(supported_extensions):
self.error(
"Mapping file has unsupported file name extension. "
f"The supported extensions are {supported_extensions}."
)
mapping_inputs["mapping_file"] = mapping_file
if inputs.advanced.source:
mapping_inputs["source"] = inputs.advanced.source
else:
self.error(
"Custom probe id mapping file was provided but no source was selected."
)
if inputs.advanced.build:
mapping_inputs["build"] = inputs.advanced.build
else:
self.error(
"Custom probe id mapping file was provided but genome build was not defined."
)
self.run_process("map-microarray-probes", mapping_inputs)
return pd.DataFrame(info_df).set_index("Accession", drop=False)
def run(self, inputs, outputs):
"""Run the analysis."""
if not re.match(r"(GSE\d{1,8})", inputs.gse_accession):
self.error(
f"GEO series accessions (GSE) are supported but {inputs.gse_accession} was provided."
)
try:
gse = GEOparse.get_GEO(geo=inputs.gse_accession, destdir="./")
except IOError:
self.error(
f"Download of {inputs.gse_accession} failed. ID could be incorrect or the data might not be "
"public yet."
)
except Exception as err:
self.error(
f"Download of {inputs.gse_accession} failed. GEO parse failed with {err}"
)
supported = [
"Expression profiling by high throughput sequencing",
"Expression profiling by array",
"Genome binding/occupancy profiling by high throughput sequencing",
]
gse_type = gse.get_type() if type(gse.get_type()) is list else [gse.get_type()]
if set(gse_type).intersection(set(supported)):
if "SuperSeries of" in gse.relations:
# This is a mixed GSE series which needs to be unpacked.
super_series = [
GEOparse.get_GEO(geo=accession, destdir="./")
for accession in gse.relations["SuperSeries of"]
]
else:
super_series = [gse]
else:
self.error(
f"No supported series types found. Got {', '.join(gse_type)} but only {', '.join(supported)} "
"are supported."
)
metadata_tables = {}
for series in super_series:
series_type = series.get_type()
if series_type == "Expression profiling by high throughput sequencing":
run_info = self.upload_rna_gse(inputs, series)
if run_info.empty:
self.warning(
f"No samples with SRA files were found for GEO series {series.name}."
)
continue
metadata_tables[series.name] = create_metadata(series, run_info)
elif series_type == "Expression profiling by array":
run_info = self.upload_ma_gse(inputs, series)
metadata_tables[series.name] = create_metadata(series, run_info)
elif (
series_type
== "Genome binding/occupancy profiling by high throughput sequencing"
):
run_info = self.upload_rna_gse(inputs, series)
if run_info.empty:
self.warning(
f"No samples with SRA files were found for GEO series {series.name}."
)
continue
metadata_tables[series.name] = create_metadata(series, run_info)
else:
self.warning(
f"The upload of {series_type} is currently not supported. Samples from {series.name} will be "
"skipped."
)
if len(metadata_tables) == 0:
self.error("No supported data found.")
meta_file = f"{inputs.gse_accession}_metadata.tsv"
metadata = pd.concat(metadata_tables.values(), join="outer", ignore_index=False)
metadata.to_csv(meta_file, sep="\t", index=False)
self.run_process("upload-metadata-unique", {"src": meta_file})
for entity_name in metadata["Sample name"].values:
objects = Data.filter(entity__name=entity_name)
if len(objects) > 1:
self.warning(
f"Multiple samples with entity name {entity_name} are present, descriptor will be added only "
"to the last one"
)
obj = objects[-1]
obj.entity.descriptor = construct_descriptor(metadata, obj.entity_name) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/workflows/geo_import.py | 0.459319 | 0.245565 | geo_import.py | pypi |
from resolwe.process import (
BooleanField,
Data,
DataField,
FloatField,
GroupField,
IntegerField,
ListField,
Process,
StringField,
)
from resolwe.process.models import Process as BioProcess
class WorkflowSTAR(Process):
"""STAR-based RNA-seq pipeline.
First, reads are preprocessed by __BBDuk__ which removes adapters, trims
reads for quality from the 3'-end, and discards reads that are too short
after trimming. Compared to similar tools, BBDuk is regarded for its
computational efficiency. Next, preprocessed reads are aligned by __STAR__
aligner. At the time of implementation, STAR is considered a
state-of-the-art tool that consistently produces accurate results from
diverse sets of reads, and performs well even with default settings. STAR aligner
counts and reports the number of aligned reads per gene while mapping.
STAR version used is 2.7.10b. For more information see
[this comparison of RNA-seq aligners](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5792058/).
rRNA contamination rate in the sample is determined using the STAR aligner.
Quality-trimmed reads are downsampled (using __Seqtk__ tool) and aligned to the
rRNA reference sequences. The alignment rate indicates the percentage of the
reads in the sample that are derived from the rRNA sequences. Final step of the
workflow is QoRTs QC analysis with downsampled reads.
"""
slug = "workflow-bbduk-star-qc"
name = "STAR-based gene quantification workflow"
requirements = {
"expression-engine": "jinja",
}
data_name = "{{ reads|name|default('?') }}"
version = "1.2.0"
entity = {
"type": "sample",
}
process_type = "data:workflow:rnaseq:star:qc"
category = "Pipeline"
class Input:
"""Input fields."""
reads = DataField(
data_type="reads:fastq",
label="Reads (FASTQ)",
description="Reads in FASTQ file, single or paired end.",
)
genome = DataField(
data_type="index:star",
label="Indexed reference genome",
description="Genome index prepared by STAR aligner indexing tool.",
)
annotation = DataField(
data_type="annotation",
label="Annotation",
description="GTF and GFF3 annotation formats are supported.",
)
assay_type = StringField(
label="Assay type",
choices=[
("non_specific", "Strand non-specific"),
("forward", "Strand-specific forward"),
("reverse", "Strand-specific reverse"),
("auto", "Detect automatically"),
],
description="In strand non-specific assay a read is considered overlapping with a "
"feature regardless of whether it is mapped to the same or the opposite "
"strand as the feature. In strand-specific forward assay and single "
"reads, the read has to be mapped to the same strand as the feature. "
"For paired-end reads, the first read has to be on the same strand and "
"the second read on the opposite strand. In strand-specific reverse "
"assay these rules are reversed.",
default="non_specific",
)
cdna_index = DataField(
data_type="index:salmon",
label="Indexed cDNA reference sequence",
required=False,
description="Transcriptome index file created using the Salmon indexing tool. "
"cDNA (transcriptome) sequences used for index file creation must be "
"derived from the same species as the input sequencing reads to "
"obtain the reliable analysis results.",
hidden="assay_type != 'auto'",
)
rrna_reference = DataField(
data_type="index:star",
label="Indexed rRNA reference sequence",
description="Reference sequence index prepared by STAR aligner indexing tool.",
)
globin_reference = DataField(
data_type="index:star",
label="Indexed Globin reference sequence",
description="Reference sequence index prepared by STAR aligner indexing tool.",
)
class Preprocessing:
"""Preprocessing with BBDuk."""
adapters = ListField(
inner=DataField(data_type="seq:nucleotide"),
label="Adapters",
required=False,
description="FASTA file(s) with adapters.",
)
custom_adapter_sequences = ListField(
inner=StringField(),
label="Custom adapter sequences",
required=False,
default=[],
description="Custom adapter sequences can be specified by inputting them "
"one by one and pressing Enter after each sequence.",
)
kmer_length = IntegerField(
label="K-mer length [k=]",
default=23,
description="K-mer length used for finding contaminants. "
"Contaminants shorter than k-mer length will not be found. "
"K-mer length must be at least 1.",
)
min_k = IntegerField(
label="Minimum k-mer length at right end of reads used for trimming [mink=]",
default=11,
disabled="preprocessing.adapters.length === 0 && preprocessing.custom_adapter_sequences.length === 0",
)
hamming_distance = IntegerField(
label="Maximum Hamming distance for k-mers [hammingdistance=]",
default=1,
description="Hamming distance i.e. the number of mismatches allowed in the k-mer.",
)
maxns = IntegerField(
label="Max Ns after trimming [maxns=]",
default=-1,
description="If non-negative, reads with more Ns than this (after trimming) will be discarded.",
)
trim_quality = IntegerField(
label="Average quality below which to trim region [trimq=]",
default=10,
description="Phred algorithm is used, which is more accurate than naive trimming.",
)
min_length = IntegerField(
label="Minimum read length [minlength=]",
default=20,
description="Reads shorter than minimum read length after trimming are discarded.",
)
quality_encoding_offset = StringField(
label="Quality encoding offset [qin=]",
choices=[
("33", "Sanger / Illumina 1.8+"),
("64", "Illumina up to 1.3+, 1.5+"),
("auto", "Auto"),
],
default="auto",
description="Quality encoding offset for input FASTQ files.",
)
ignore_bad_quality = BooleanField(
label="Ignore bad quality [ignorebadquality]",
default=False,
description="Don't crash if quality values appear to be incorrect.",
)
class Alignment:
"""Alignment with STAR."""
unstranded = BooleanField(
label="The data is unstranded [--outSAMstrandField intronMotif]",
default=False,
description="For unstranded RNA-seq data, Cufflinks/Cuffdiff require spliced "
"alignments with XS strand attribute, which STAR will generate with "
"--outSAMstrandField intronMotif option. As required, the XS strand "
"attribute will be generated for all alignments that contain splice "
"junctions. The spliced alignments that have undefined strand "
"(i.e. containing only non-canonical unannotated junctions) will be "
"suppressed. If you have stranded RNA-seq data, you do not need to "
"use any specific STAR options. Instead, you need to run Cufflinks with "
"the library option --library-type options. For example, "
"cufflinks --library-type fr-firststrand should be used for the standard "
"dUTP protocol, including Illumina's stranded Tru-Seq. "
"This option has to be used only for Cufflinks runs and not for STAR runs.",
)
noncannonical = BooleanField(
label="Remove non-canonical junctions (Cufflinks compatibility)",
default=False,
description="It is recommended to remove the non-canonical junctions for Cufflinks "
"runs using --outFilterIntronMotifs RemoveNoncanonical.",
)
class ChimericReadsOptions:
"""Chimeric reads options."""
chimeric = BooleanField(
label="Detect chimeric and circular alignments [--chimOutType SeparateSAMold]",
default=False,
description="To switch on detection of chimeric (fusion) alignments (in addition "
"to normal mapping), --chimSegmentMin should be set to a positive value. Each "
"chimeric alignment consists of two segments. Each segment is non-chimeric on "
"its own, but the segments are chimeric to each other (i.e. the segments belong "
"to different chromosomes, or different strands, or are far from each other). "
"Both segments may contain splice junctions, and one of the segments may contain "
"portions of both mates. --chimSegmentMin parameter controls the minimum mapped "
"length of the two segments that is allowed. For example, if you have 2x75 reads "
"and used --chimSegmentMin 20, a chimeric alignment with 130b on one chromosome "
"and 20b on the other will be output, while 135 + 15 won't be.",
)
chim_segment_min = IntegerField(
label="Minimum length of chimeric segment [--chimSegmentMin]",
default=20,
disabled="!alignment.chimeric_reads.chimeric",
)
class TranscriptOutputOptions:
"""Transcript coordinate output options."""
quant_mode = BooleanField(
label="Output in transcript coordinates [--quantMode]",
default=False,
description="With --quantMode TranscriptomeSAM option STAR will output alignments "
"translated into transcript coordinates in the Aligned.toTranscriptome.out.bam "
"file (in addition to alignments in genomic coordinates in Aligned.*.sam/bam "
"files). These transcriptomic alignments can be used with various transcript "
"quantification software that require reads to be mapped to transcriptome, such "
"as RSEM or eXpress.",
)
single_end = BooleanField(
label="Allow soft-clipping and indels [--quantTranscriptomeBan Singleend]",
default=False,
disabled="!t_coordinates.quant_mode",
description="By default, the output satisfies RSEM requirements: soft-clipping or "
"indels are not allowed. Use --quantTranscriptomeBan Singleend to allow "
"insertions, deletions and soft-clips in the transcriptomic alignments, which "
"can be used by some expression quantification softwares (e.g. eXpress).",
)
class FilteringOptions:
"""Output filtering options."""
out_filter_type = StringField(
label="Type of filtering [--outFilterType]",
default="Normal",
choices=[
("Normal", "Normal"),
("BySJout", "BySJout"),
],
description="Normal: standard filtering using only current alignment; BySJout: "
"keep only those reads that contain junctions that passed filtering into "
"SJ.out.tab.",
)
out_multimap_max = IntegerField(
label="Maximum number of loci [--outFilterMultimapNmax]",
required=False,
description="Maximum number of loci the read is allowed to map to. Alignments "
"(all of them) will be output only if the read maps to no more loci than this "
"value. Otherwise no alignments will be output, and the read will be counted as "
"'mapped to too many loci' (default: 10).",
)
out_mismatch_max = IntegerField(
label="Maximum number of mismatches [--outFilterMismatchNmax]",
required=False,
description="Alignment will be output only if it has fewer mismatches than this "
"value (default: 10). Large number (e.g. 999) switches off this filter.",
)
out_mismatch_nl_max = FloatField(
label="Maximum no. of mismatches (map length) [--outFilterMismatchNoverLmax]",
required=False,
range=[0.0, 1.0],
description="Alignment will be output only if its ratio of mismatches to *mapped* "
"length is less than or equal to this value (default: 0.3). The value should be "
"between 0.0 and 1.0.",
)
out_score_min = IntegerField(
label="Minimum alignment score [--outFilterScoreMin]",
required=False,
description="Alignment will be output only if its score is higher than or equal "
"to this value (default: 0).",
)
out_mismatch_nrl_max = FloatField(
label="Maximum no. of mismatches (read length) [--outFilterMismatchNoverReadLmax]",
required=False,
range=[0.0, 1.0],
description="Alignment will be output only if its ratio of mismatches to *read* "
"length is less than or equal to this value (default: 1.0). Using 0.04 for "
"2x100bp, the max number of mismatches is calculated as 0.04*200=8 for the paired "
"read. The value should be between 0.0 and 1.0.",
)
class AlignmentOptions:
"""Alignment and Seeding."""
align_overhang_min = IntegerField(
label="Minimum overhang [--alignSJoverhangMin]",
required=False,
description="Minimum overhang (i.e. block size) for spliced alignments "
"(default: 5).",
)
align_sjdb_overhang_min = IntegerField(
label="Minimum overhang (sjdb) [--alignSJDBoverhangMin]",
required=False,
description="Minimum overhang (i.e. block size) for annotated (sjdb) spliced "
"alignments (default: 3).",
)
align_intron_size_min = IntegerField(
label="Minimum intron size [--alignIntronMin]",
required=False,
description="Minimum intron size: the genomic gap is considered an intron if its "
"length >= alignIntronMin, otherwise it is considered Deletion (default: 21).",
)
align_intron_size_max = IntegerField(
label="Maximum intron size [--alignIntronMax]",
required=False,
description="Maximum intron size, if 0, max intron size will be determined by "
"(2pow(winBinNbits)*winAnchorDistNbins)(default: 0).",
)
align_gap_max = IntegerField(
label="Minimum gap between mates [--alignMatesGapMax]",
required=False,
description="Maximum gap between two mates, if 0, max intron gap will be "
"determined by (2pow(winBinNbits)*winAnchorDistNbins) (default: 0).",
)
align_end_alignment = StringField(
label="Read ends alignment [--alignEndsType]",
choices=[
("Local", "Local"),
("EndToEnd", "EndToEnd"),
("Extend5pOfRead1", "Extend5pOfRead1"),
("Extend5pOfReads12", "Extend5pOfReads12"),
],
description="Type of read ends alignment (default: Local). Local: standard local "
"alignment with soft-clipping allowed. EndToEnd: force end-to-end read alignment, "
"do not soft-clip. Extend5pOfRead1: fully extend only the 5p of the read1, all "
"other ends: local alignment. Extend5pOfReads12: fully extend only the 5' of the "
"both read1 and read2, all other ends use local alignment.",
default="Local",
)
class TwoPassOptions:
"""Two-pass mapping options."""
two_pass_mode = BooleanField(
label="Use two pass mode [--twopassMode]",
default=True,
description="Use two-pass maping instead of first-pass only. In two-pass mode we "
"first perform first-pass mapping, extract junctions, insert them into genome "
"index, and re-map all reads in the second mapping pass.",
)
class OutputOptions:
"""Output options."""
out_unmapped = BooleanField(
label="Output unmapped reads (SAM) [--outSAMunmapped Within]",
default=True,
description="Output of unmapped reads in the SAM format.",
)
out_sam_attributes = StringField(
label="Desired SAM attributes [--outSAMattributes]",
default="Standard",
choices=[
("Standard", "Standard"),
("All", "All"),
("NH HI NM MD", "NH HI NM MD"),
("None", "None"),
],
description="A string of desired SAM attributes, in the order desired for the "
"output SAM.",
)
out_rg_line = StringField(
label="SAM/BAM read group line [--outSAMattrRGline]",
required=False,
description="The first word contains the read group identifier and must start "
"with ID:, e.g. --outSAMattrRGline ID:xxx CN:yy ”DS:z z z” xxx will be added as "
"RG tag to each output alignment. Any spaces in the tag values have to be double "
"quoted. Comma separated RG lines corresponds to different (comma separated) input "
"files in -readFilesIn. Commas have to be surrounded by spaces, e.g. "
"-outSAMattrRGline ID:xxx , ID:zzz ”DS:z z” , ID:yyy DS:yyyy.",
)
chimeric_reads = GroupField(
ChimericReadsOptions,
label="Chimeric reads options",
)
transcript_output = GroupField(
TranscriptOutputOptions,
label="Transcript coordinate output options",
)
filtering_options = GroupField(
FilteringOptions,
label="Output filtering options",
)
alignment_options = GroupField(
AlignmentOptions,
label="Alignment options",
)
two_pass_mapping = GroupField(TwoPassOptions, label="Two-pass mapping")
output_options = GroupField(
OutputOptions,
label="Output options",
)
class Quantification:
"""Quantification."""
n_reads = IntegerField(
label="Number of reads in subsampled alignment file for strandedness detection",
default=5000000,
hidden="assay_type != 'auto'",
description="Alignment (.bam) file subsample size. Increase the number of reads "
"to make automatic detection more reliable. Decrease the number of "
"reads to make automatic detection run faster.",
)
class Downsampling:
"""Downsampling (Seqtk)."""
n_reads = IntegerField(
label="Number of reads",
default=1000000,
description="Number of reads to include in downsampling.",
)
class Advanced:
"""Advanced options for downsampling."""
seed = IntegerField(
label="Seed [-s]",
default=11,
description="Using the same random seed makes reads downsampling more reproducible "
"in different environments.",
)
fraction = FloatField(
label="Fraction of reads used",
required=False,
range=[0.0, 1.0],
description="Use the fraction of reads [0.0 - 1.0] from the original input file instead "
"of the absolute number of reads. If set, this will override the 'Number of reads' "
"input parameter.",
)
two_pass = BooleanField(
label="2-pass mode [-2]",
default=False,
description="Enable two-pass mode when downsampling. Two-pass mode is twice "
"as slow but with much reduced memory.",
)
advanced = GroupField(
Advanced,
label="Advanced options for downsampling",
)
preprocessing = GroupField(
Preprocessing,
label="Preprocessing with BBDuk",
)
alignment = GroupField(
Alignment,
label="Alignment with STAR",
)
quantification = GroupField(
Quantification,
label="Quantification",
)
downsampling = GroupField(
Downsampling,
label="Downsampling with Seqtk",
)
class Output:
"""Output fields."""
# Workflows do not have output fields.
def run(self, inputs, outputs):
"""Run the workflow."""
if not inputs.cdna_index and inputs.assay_type == "auto":
self.error(
"The input cDNA index file is necessary for 'Detect automatically' "
"assay type."
)
input_bbduk = {
"reads": inputs.reads,
"min_length": inputs.preprocessing.min_length,
"reference": {
"sequences": inputs.preprocessing.adapters or [],
"literal_sequences": inputs.preprocessing.custom_adapter_sequences,
},
"processing": {
"kmer_length": inputs.preprocessing.kmer_length,
"hamming_distance": inputs.preprocessing.hamming_distance,
},
"operations": {
"quality_trim": "r",
"trim_quality": inputs.preprocessing.trim_quality,
"quality_encoding_offset": inputs.preprocessing.quality_encoding_offset,
"ignore_bad_quality": inputs.preprocessing.ignore_bad_quality,
"maxns": inputs.preprocessing.maxns,
},
}
if (
inputs.preprocessing.adapters
or inputs.preprocessing.custom_adapter_sequences
):
input_bbduk["operations"]["k_trim"] = "r"
else:
input_bbduk["operations"]["k_trim"] = "f"
if (
inputs.preprocessing.adapters
or inputs.preprocessing.custom_adapter_sequences
):
input_bbduk["operations"]["min_k"] = inputs.preprocessing.min_k
else:
input_bbduk["operations"]["min_k"] = -1
if inputs.reads.type.startswith("data:reads:fastq:single:"):
slug_bbduk = "bbduk-single"
elif inputs.reads.type.startswith("data:reads:fastq:paired:"):
input_bbduk["operations"]["trim_pairs_evenly"] = True
input_bbduk["operations"]["trim_by_overlap"] = True
slug_bbduk = "bbduk-paired"
else:
self.error("Wrong reads input type.")
preprocessing = Data.create(
process=BioProcess.get_latest(slug=slug_bbduk),
input=input_bbduk,
name=f"Trimmed ({inputs.reads.name})",
)
input_star = {
"reads": preprocessing,
"genome": inputs.genome,
"unstranded": inputs.alignment.unstranded,
"noncannonical": inputs.alignment.noncannonical,
"gene_counts": True,
"detect_chimeric": {
"chimeric": inputs.alignment.chimeric_reads.chimeric,
"chim_segment_min": inputs.alignment.chimeric_reads.chim_segment_min,
},
"t_coordinates": {
"quant_mode": inputs.alignment.transcript_output.quant_mode,
"single_end": inputs.alignment.transcript_output.single_end,
},
"filtering": {
"out_filter_type": inputs.alignment.filtering_options.out_filter_type,
},
"alignment": {
"align_end_alignment": inputs.alignment.alignment_options.align_end_alignment
},
"two_pass_mapping": {
"two_pass_mode": inputs.alignment.two_pass_mapping.two_pass_mode
},
"output_options": {
"out_unmapped": inputs.alignment.output_options.out_unmapped,
"out_sam_attributes": inputs.alignment.output_options.out_sam_attributes,
},
}
if inputs.alignment.filtering_options.out_multimap_max:
input_star["filtering"][
"out_multimap_max"
] = inputs.alignment.filtering_options.out_multimap_max
if inputs.alignment.filtering_options.out_mismatch_max:
input_star["filtering"][
"out_mismatch_max"
] = inputs.alignment.filtering_options.out_mismatch_max
if inputs.alignment.filtering_options.out_mismatch_nl_max:
input_star["filtering"][
"out_mismatch_nl_max"
] = inputs.alignment.filtering_options.out_mismatch_nl_max
if inputs.alignment.filtering_options.out_score_min:
input_star["filtering"][
"out_score_min"
] = inputs.alignment.filtering_options.out_score_min
if inputs.alignment.filtering_options.out_mismatch_nrl_max:
input_star["filtering"][
"out_mismatch_nrl_max"
] = inputs.alignment.filtering_options.out_mismatch_nrl_max
if inputs.alignment.alignment_options.align_overhang_min:
input_star["alignment"][
"align_overhang_min"
] = inputs.alignment.alignment_options.align_overhang_min
if inputs.alignment.alignment_options.align_sjdb_overhang_min:
input_star["alignment"][
"align_sjdb_overhang_min"
] = inputs.alignment.alignment_options.align_sjdb_overhang_min
if inputs.alignment.alignment_options.align_intron_size_min:
input_star["alignment"][
"align_intron_size_min"
] = inputs.alignment.alignment_options.align_intron_size_min
if inputs.alignment.alignment_options.align_intron_size_max:
input_star["alignment"][
"align_intron_size_max"
] = inputs.alignment.alignment_options.align_intron_size_max
if inputs.alignment.alignment_options.align_gap_max:
input_star["alignment"][
"align_gap_max"
] = inputs.alignment.alignment_options.align_gap_max
if inputs.alignment.output_options.out_rg_line:
input_star["output_options"][
"out_rg_line"
] = inputs.alignment.output_options.out_rg_line
alignment = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input=input_star,
name=f"Aligned ({inputs.reads.name})",
)
input_quant = {
"aligned_reads": alignment,
"n_reads": inputs.quantification.n_reads,
"assay_type": inputs.assay_type,
"annotation": inputs.annotation,
}
if inputs.cdna_index:
input_quant["cdna_index"] = inputs.cdna_index
counts = Data.create(
process=BioProcess.get_latest(slug="star-quantification"),
input=input_quant,
name=f"Quantified ({inputs.reads.name})",
)
input_seqtk = {
"reads": preprocessing,
"n_reads": inputs.downsampling.n_reads,
"advanced": {
"seed": inputs.downsampling.advanced.seed,
"fraction": inputs.downsampling.advanced.fraction,
"two_pass": inputs.downsampling.advanced.two_pass,
},
}
if inputs.reads.type.startswith("data:reads:fastq:single:"):
slug_seqtk = "seqtk-sample-single"
elif inputs.reads.type.startswith("data:reads:fastq:paired:"):
slug_seqtk = "seqtk-sample-paired"
else:
self.error("Wrong reads input type.")
downsampling = Data.create(
process=BioProcess.get_latest(slug=slug_seqtk),
input=input_seqtk,
name=f"Subsampled ({inputs.reads.name})",
)
alignment_qc_rrna = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input={
"reads": downsampling,
"genome": inputs.rrna_reference,
},
name=f"rRNA aligned ({inputs.reads.name})",
)
alignment_qc_globin = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input={
"reads": downsampling,
"genome": inputs.globin_reference,
},
name=f"Globin aligned ({inputs.reads.name})",
)
idxstats = Data.create(
process=BioProcess.get_latest(slug="samtools-idxstats"),
input={
"alignment": alignment,
},
name=f"Alignment summary ({inputs.reads.name})",
)
alignment_qorts = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input={
"reads": downsampling,
"genome": inputs.genome,
},
name=f"Aligned subset ({inputs.reads.name})",
)
input_qorts = {
"alignment": alignment_qorts,
"annotation": inputs.annotation,
"options": {
"stranded": inputs.assay_type,
},
}
if inputs.cdna_index:
input_qorts["options"]["cdna_index"] = inputs.cdna_index
qorts = Data.create(
process=BioProcess.get_latest(slug="qorts-qc"),
input=input_qorts,
name=f"QoRTs QC report ({inputs.reads.name})",
)
input_multiqc = {
"data": [
inputs.reads,
preprocessing,
alignment,
counts,
downsampling,
alignment_qc_rrna,
alignment_qc_globin,
idxstats,
qorts,
],
"advanced": {"dirs": True, "config": True},
}
Data.create(process=BioProcess.get_latest(slug="multiqc"), input=input_multiqc) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/workflows/bbduk_star.py | 0.859855 | 0.61341 | bbduk_star.py | pypi |
from resolwe.process import (
BooleanField,
Data,
DataField,
FloatField,
GroupField,
IntegerField,
ListField,
Process,
StringField,
)
from resolwe.process.models import Process as BioProcess
class WorkflowQuantSeq(Process):
"""3' mRNA-Seq pipeline.
Reads are preprocessed by __BBDuk__ or __Cutadapt__ which removes adapters,
trims reads for quality from the 3'-end, and discards reads that are too
short after trimming. Preprocessed reads are aligned by __STAR__
aligner. For read-count quantification, the __FeatureCounts__ tool
is used. QoRTs QC and Samtools idxstats tools are used to report
alignment QC metrics.
QC steps include downsampling, QoRTs QC analysis and alignment of
input reads to the rRNA/globin reference sequences. The reported
alignment rate is used to assess the rRNA/globin sequence depletion
rate.
"""
slug = "workflow-quantseq"
name = "QuantSeq workflow"
requirements = {
"expression-engine": "jinja",
}
data_name = "{{ reads|name|default('?') }}"
version = "5.1.0"
entity = {
"type": "sample",
}
process_type = "data:workflow:quant:featurecounts"
category = "Pipeline"
class Input:
"""Input fields."""
trimming_tool = StringField(
label="Trimming tool",
choices=[
("bbduk", "BBDuk"),
("cutadapt", "Cutadapt"),
],
description="Select the trimming tool. If you select BBDuk then "
"please provide adapter sequences in fasta file(s). If you select Cutadapt "
"as a trimming tool, pre-determined adapter sequences will be removed.",
)
reads = DataField(
data_type="reads:fastq",
label="Input reads (FASTQ)",
description="Reads in FASTQ file, single or paired end.",
)
genome = DataField(
data_type="index:star",
label="Indexed reference genome",
description="Genome index prepared by STAR aligner indexing tool.",
)
adapters = ListField(
inner=DataField(data_type="seq:nucleotide"),
label="Adapters",
required=False,
hidden="trimming_tool != 'bbduk'",
description="Provide a list of sequencing adapters files (.fasta) "
"to be removed by BBDuk.",
)
annotation = DataField(
data_type="annotation",
label="Annotation",
description="GTF and GFF3 annotation formats are supported.",
)
assay_type = StringField(
label="Assay type",
choices=[
("forward", "Strand-specific forward"),
("reverse", "Strand-specific reverse"),
],
description="In strand-specific forward assay and single "
"reads, the read has to be mapped to the same strand as the feature. "
"For paired-end reads, the first read has to be on the same strand and "
"the second read on the opposite strand. In strand-specific reverse "
"assay these rules are reversed.",
required=False,
)
rrna_reference = DataField(
data_type="index:star",
label="Indexed rRNA reference sequence",
description="Reference sequence index prepared by STAR aligner indexing tool.",
required=False,
)
globin_reference = DataField(
data_type="index:star",
label="Indexed Globin reference sequence",
description="Reference sequence index prepared by STAR aligner indexing tool.",
required=False,
)
class Preprocessing:
"""Preprocessing with BBDuk."""
quality_encoding_offset = StringField(
label="Quality encoding offset",
choices=[
("33", "Sanger / Illumina 1.8+"),
("64", "Illumina up to 1.3+, 1.5+"),
("auto", "Auto"),
],
default="auto",
description="Quality encoding offset for input FASTQ files.",
)
ignore_bad_quality = BooleanField(
label="Ignore bad quality",
default=False,
description="Don't crash if quality values appear to be incorrect.",
)
class Cutadapt:
"""Cutadapt filtering."""
quality_cutoff = IntegerField(
label="Reads quality cutoff",
required=False,
description="Trim low-quality bases from 3' end of each read before "
"adapter removal. The use of this option will override the use "
"of NextSeq/NovaSeq-specific trim option.",
)
class Downsampling:
"""Downsampling (Seqtk)."""
n_reads = IntegerField(
label="Number of reads",
default=1000000,
description="Number of reads to include in subsampling.",
)
class Advanced:
"""Advanced options for downsampling."""
seed = IntegerField(
label="Number of reads",
default=11,
description="Using the same random seed makes reads subsampling reproducible "
"in different environments.",
)
fraction = FloatField(
label="Fraction",
required=False,
range=[0, 1.0],
description="Use the fraction of reads [0 - 1.0] from the orignal input file instead "
"of the absolute number of reads. If set, this will override the"
"'Number of reads' input parameter.",
)
two_pass = BooleanField(
label="2-pass mode",
default=False,
description="Enable two-pass mode when down-sampling. Two-pass mode is twice "
"as slow but with much reduced memory.",
)
advanced = GroupField(Advanced, label="Advanced options for downsampling")
cutadapt = GroupField(
Cutadapt, label="Cutadapt filtering", hidden="trimming_tool != 'cutadapt'"
)
downsampling = GroupField(
Downsampling,
label="Downsampling with Seqtk",
)
preprocessing = GroupField(
Preprocessing,
label="Preprocessing with BBDuk",
hidden="trimming_tool != 'bbduk'",
)
class Output:
"""Output fields."""
def run(self, inputs, outputs):
"""Run the workflow."""
if inputs.trimming_tool == "bbduk" and not inputs.adapters:
self.error(
"Please provide fasta file of adapters, if you want to use BBDuk as a trimming tool."
)
if inputs.trimming_tool == "bbduk":
input_preprocessing = {
"reads": inputs.reads,
"min_length": 20,
"reference": {"sequences": inputs.adapters},
"processing": {"kmer_length": 13},
"operations": {
"k_trim": "r",
"min_k": 6,
"quality_trim": "r",
"trim_quality": 10,
"quality_encoding_offset": inputs.preprocessing.quality_encoding_offset,
"ignore_bad_quality": inputs.preprocessing.ignore_bad_quality,
},
"fastqc": {
"nogroup": True,
},
}
if inputs.reads.type.startswith("data:reads:fastq:single:"):
process_slug = "bbduk-single"
elif inputs.reads.type.startswith("data:reads:fastq:paired:"):
process_slug = "bbduk-paired"
else:
self.error("Wrong reads input type was provided.")
else:
if inputs.reads.type.startswith("data:reads:fastq:single:"):
input_preprocessing = {
"reads": inputs.reads,
"options": {
"quality_cutoff": inputs.cutadapt.quality_cutoff,
},
}
process_slug = "cutadapt-3prime-single"
else:
self.error(
"Only single-end reads are supported when Cutadapt is "
"selected as a trimming tool."
)
preprocessing = Data.create(
process=BioProcess.get_latest(process_slug),
input=input_preprocessing,
name=f"Trimmed ({inputs.reads.name})",
)
input_star = {
"reads": preprocessing,
"genome": inputs.genome,
"filtering": {
"out_filter_type": "BySJout",
"out_multimap_max": 20,
"out_mismatch_max": 999,
"out_mismatch_nl_max": 0.6,
},
"alignment": {
"align_overhang_min": 8,
"align_sjdb_overhang_min": 1,
"align_intron_size_min": 20,
"align_intron_size_max": 1000000,
"align_gap_max": 1000000,
},
}
if inputs.trimming_tool == "cutadapt":
input = {
"unstranded": True,
"output_options": {"out_sam_attributes": "NH HI NM MD"},
}
input_star.update(input)
alignment = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input=input_star,
name=f"Aligned ({inputs.reads.name})",
)
input_featurecounts = {
"aligned_reads": alignment,
"normalization_type": "CPM",
"assay_type": inputs.assay_type,
"annotation": inputs.annotation,
}
if inputs.trimming_tool == "cutadapt":
input = {"assay_type": "forward"}
input_featurecounts.update(input)
quantification = Data.create(
process=BioProcess.get_latest(slug="feature_counts"),
input=input_featurecounts,
name=f"Quantified ({inputs.reads.name})",
)
input_seqtk = {
"reads": preprocessing,
"n_reads": inputs.downsampling.n_reads,
"advanced": {
"seed": inputs.downsampling.advanced.seed,
"fraction": inputs.downsampling.advanced.fraction,
"two_pass": inputs.downsampling.advanced.two_pass,
},
}
if inputs.reads.type.startswith("data:reads:fastq:single:"):
process_slug = "seqtk-sample-single"
elif inputs.reads.type.startswith("data:reads:fastq:paired:"):
process_slug = "seqtk-sample-paired"
else:
self.error("Wrong reads input type was provided.")
downsampling = Data.create(
process=BioProcess.get_latest(slug=process_slug),
input=input_seqtk,
name=f"Subsampled ({inputs.reads.name})",
)
idxstats = Data.create(
process=BioProcess.get_latest(slug="samtools-idxstats"),
input={
"alignment": alignment,
},
name=f"Alignment summary ({inputs.reads.name})",
)
alignment_qorts = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input={
"reads": downsampling,
"genome": inputs.genome,
},
name=f"Aligned subset ({inputs.reads.name})",
)
input_qorts = {
"alignment": alignment_qorts,
"annotation": inputs.annotation,
"options": {
"stranded": inputs.assay_type,
},
}
if inputs.trimming_tool == "cutadapt":
input = {"options": {"stranded": "forward"}}
input_qorts.update(input)
qorts = Data.create(
process=BioProcess.get_latest(slug="qorts-qc"),
input=input_qorts,
name=f"QoRTs QC report ({inputs.reads.name})",
)
multiqc_inputs = [
inputs.reads,
preprocessing,
alignment,
downsampling,
quantification,
idxstats,
qorts,
]
if inputs.rrna_reference and inputs.globin_reference:
alignment_qc_rrna = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input={
"reads": downsampling,
"genome": inputs.rrna_reference,
},
)
alignment_qc_globin = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input={
"reads": downsampling,
"genome": inputs.globin_reference,
},
)
multiqc_inputs.extend([alignment_qc_rrna, alignment_qc_globin])
inputs_multiqc = {"data": multiqc_inputs}
Data.create(process=BioProcess.get_latest(slug="multiqc"), input=inputs_multiqc) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/workflows/quantseq.py | 0.793906 | 0.448909 | quantseq.py | pypi |
from resolwe.process import (
BooleanField,
Data,
DataField,
FloatField,
GroupField,
IntegerField,
ListField,
Process,
StringField,
)
from resolwe.process.models import Process as BioProcess
class WorkflowBbdukSalmonQc(Process):
"""Alignment-free RNA-Seq pipeline.
Salmon tool and tximport package are used in quantification step to
produce gene-level abundance estimates.
rRNA and globin-sequence contamination rate in the sample is
determined using STAR aligner. Quality-trimmed reads are down-sampled
(using Seqtk tool) and aligned to the genome, rRNA and globin
reference sequences. The rRNA and globin-sequence alignment rates
indicate the percentage of the reads in the sample that are of
rRNA and globin origin, respectively. Alignment of down-sampled data
to a whole genome reference sequence is used to produce an alignment
file suitable for Samtools and QoRTs QC analysis.
Per-sample analysis results and QC data is summarized by the MultiQC
tool.
"""
slug = "workflow-bbduk-salmon-qc"
name = "BBDuk - Salmon - QC"
requirements = {
"expression-engine": "jinja",
}
data_name = "{{ reads|name|default('?') }}"
entity = {
"type": "sample",
}
version = "4.2.1"
process_type = "data:workflow:rnaseq:salmon"
category = "Pipeline"
class Input:
"""Input fields."""
reads = DataField(
data_type="reads:fastq",
label="Select sample(s) (FASTQ)",
description="Reads in FASTQ file, single or paired end.",
)
salmon_index = DataField(
data_type="index:salmon",
label="Salmon index",
description="Transcriptome index file created using the Salmon indexing tool.",
)
genome = DataField(
data_type="index:star",
label="Indexed reference genome",
description="Genome index prepared by STAR aligner indexing tool.",
)
annotation = DataField(
data_type="annotation",
label="Annotation",
description="GTF and GFF3 annotation formats are supported.",
)
rrna_reference = DataField(
data_type="index:star",
label="Indexed rRNA reference sequence",
description="Reference sequence index prepared by STAR aligner indexing tool.",
)
globin_reference = DataField(
data_type="index:star",
label="Indexed Globin reference sequence",
description="Reference sequence index prepared by STAR aligner indexing tool.",
)
class Preprocessing:
"""Preprocessing with BBDuk."""
adapters = ListField(
inner=DataField(data_type="seq:nucleotide"),
label="Adapters",
description="FASTA file(s) with adapters.",
required=False,
)
custom_adapter_sequences = ListField(
inner=StringField(),
label="Custom adapter sequences",
required=False,
default=[],
description="Custom adapter sequences can be specified by inputting them "
"one by one and pressing Enter after each sequence.",
)
kmer_length = IntegerField(
label="K-mer length",
default=23,
description="K-mer length must be smaller or equal to the length of adapters.",
)
min_k = IntegerField(
label="Minimum k-mer length at right end of reads used for trimming",
default=11,
disabled="preprocessing.adapters.length === 0 && preprocessing.custom_adapter_sequences.length === 0",
)
hamming_distance = IntegerField(
label="Maximum Hamming distance for k-mers",
default=1,
)
maxns = IntegerField(
label="Max Ns after trimming",
default=-1,
description="If non-negative, reads with more Ns than this (after trimming) will be discarded.",
)
trim_quality = IntegerField(
label="Quality below which to trim reads from the right end",
default=10,
description="Phred algorithm is used, which is more accurate than naive trimming.",
)
min_length = IntegerField(
label="Minimum read length",
default=20,
description="Reads shorter than minimum read length after trimming are discarded.",
)
quality_encoding_offset = StringField(
label="Quality encoding offset",
choices=[
("33", "Sanger / Illumina 1.8+"),
("64", "Illumina up to 1.3+, 1.5+"),
("auto", "Auto"),
],
default="auto",
description="Quality encoding offset for input FASTQ files.",
)
ignore_bad_quality = BooleanField(
label="Ignore bad quality",
default=False,
description="Don't crash if quality values appear to be incorrect.",
)
class Quantification:
"""Quantification (Salmon)."""
seq_bias = BooleanField(
label="Perform sequence-specific bias correction",
default=True,
description="Perform sequence-specific bias correction.",
)
gc_bias = BooleanField(
label="Perform fragment GC bias correction",
required=False,
description="Perform fragment GC bias correction. If single-end reads are selected "
"as input in this workflow, it is recommended that you set this option to False. If you "
"selected paired-end reads as input in this workflow, it is recommended that you set "
"this option to True.",
)
consensus_slack = FloatField(
label="Consensus slack",
required=False,
description="The amount of slack allowed in the quasi-mapping consensus mechanism. "
"Normally, a transcript must cover all hits to be considered for mapping. "
"If this is set to a fraction, X, greater than 0 (and in [0,1)), then a transcript "
"can fail to cover up to (100 * X)% of the hits before it is discounted as a "
"mapping candidate. The default value of this option is 0.2 in selective alignment mode "
"and 0 otherwise.",
)
min_score_fraction = FloatField(
label="Minimum alignment score fraction",
default=0.65,
description="The fraction of the optimal possible alignment score that a mapping "
"must achieve in order to be considered valid - should be in (0,1].",
)
range_factorization_bins = IntegerField(
label="Range factorization bins",
default=4,
description="Factorizes the likelihood used in quantification by adopting a "
"new notion of equivalence classes based on the conditional probabilities with which "
"fragments are generated from different transcripts. This is a more fine-grained "
"factorization than the normal rich equivalence classes. The default value (4) "
"corresponds to the default used in Zakeri et al. 2017 and larger values imply a more "
"fine-grained factorization. If range factorization is enabled, a common value to "
"select for this parameter is 4. A value of 0 signifies the use of basic rich "
"equivalence classes.",
)
min_assigned_frag = IntegerField(
label="Minimum number of assigned fragments",
default=10,
description="The minimum number of fragments that must be assigned to the "
"transcriptome for quantification to proceed.",
)
num_bootstraps = IntegerField(
label="--numBootstraps",
description="Salmon has the ability to optionally "
"compute bootstrapped abundance estimates. This is "
"done by resampling (with replacement) from the counts "
"assigned to the fragment equivalence classes, and then "
"re-running the optimization procedure, either the EM or VBEM, "
"for each such sample. The values of these different bootstraps "
"allows us to assess technical variance in the main abundance "
"estimates we produce. Such estimates can be useful for downstream "
"(e.g. differential expression) tools that can make use of such "
"uncertainty estimates. This option takes a positive integer that "
"dictates the number of bootstrap samples to compute. The more samples "
"computed, the better the estimates of varaiance, but the more "
"computation (and time) required.",
disabled="quantification.num_gibbs_samples",
required=False,
)
num_gibbs_samples = IntegerField(
label="--numGibbsSamples",
description="Just as with the bootstrap procedure above, this option "
"produces samples that allow us to estimate the variance in abundance "
"estimates. However, in this case the samples are generated using posterior "
"Gibbs sampling over the fragment equivalence classes rather than "
"bootstrapping. We are currently analyzing these different approaches to "
"assess the potential trade-offs in time / accuracy. The --numBootstraps "
"and --numGibbsSamples options are mutually exclusive (i.e. in a given run, "
"you must set at most one of these options to a positive integer.)",
disabled="quantification.num_bootstraps",
required=False,
)
class Downsampling:
"""Downsampling (Seqtk)."""
n_reads = IntegerField(
label="Number of reads",
default=10000000,
description="Number of reads to include in subsampling.",
)
seed = IntegerField(
label="Number of reads",
default=11,
description="Using the same random seed makes reads subsampling reproducible "
"in different environments.",
)
fraction = FloatField(
label="Fraction of reads",
required=False,
range=[0.0, 1.0],
description="Use the fraction of reads [0.0 - 1.0] from the orignal input file instead "
"of the absolute number of reads. If set, this will override the 'Number of reads' "
"input parameter.",
)
two_pass = BooleanField(
label="2-pass mode",
default=False,
description="Enable two-pass mode when down-sampling. Two-pass mode is twice "
"as slow but with much reduced memory usage.",
)
preprocessing = GroupField(
Preprocessing,
label="Preprocessing with BBDuk",
)
quantification = GroupField(
Quantification,
label="Quantification (Salmon)",
)
downsampling = GroupField(
Downsampling,
label="Downsampling with Seqtk",
)
class Output:
"""Output fields."""
# Workflows do not have output fields.
def run(self, inputs, outputs):
"""Run the workflow."""
input_bbduk = {
"reads": inputs.reads,
"min_length": inputs.preprocessing.min_length,
"reference": {
"sequences": inputs.preprocessing.adapters or [],
"literal_sequences": inputs.preprocessing.custom_adapter_sequences,
},
"processing": {
"kmer_length": inputs.preprocessing.kmer_length,
"hamming_distance": inputs.preprocessing.hamming_distance,
},
"operations": {
"quality_trim": "r",
"trim_quality": inputs.preprocessing.trim_quality,
"quality_encoding_offset": inputs.preprocessing.quality_encoding_offset,
"ignore_bad_quality": inputs.preprocessing.ignore_bad_quality,
"maxns": inputs.preprocessing.maxns,
},
}
if (
inputs.preprocessing.adapters
or inputs.preprocessing.custom_adapter_sequences
):
input_bbduk["operations"]["k_trim"] = "r"
else:
input_bbduk["operations"]["k_trim"] = "f"
if (
inputs.preprocessing.adapters
or inputs.preprocessing.custom_adapter_sequences
):
input_bbduk["operations"]["min_k"] = inputs.preprocessing.min_k
else:
input_bbduk["operations"]["min_k"] = -1
if inputs.reads.type.startswith("data:reads:fastq:single:"):
bbduk_slug = "bbduk-single"
elif inputs.reads.type.startswith("data:reads:fastq:paired:"):
input_bbduk["operations"]["trim_pairs_evenly"] = True
input_bbduk["operations"]["trim_by_overlap"] = True
bbduk_slug = "bbduk-paired"
else:
self.error("Wrong reads input type.")
preprocessing = Data.create(
process=BioProcess.get_latest(bbduk_slug),
input=input_bbduk,
name=f"Trimmed ({inputs.reads.name})",
)
input_salmon = {
"reads": preprocessing,
"salmon_index": inputs.salmon_index,
"annotation": inputs.annotation,
"options": {
"seq_bias": inputs.quantification.seq_bias,
"min_score_fraction": inputs.quantification.min_score_fraction,
"range_factorization_bins": inputs.quantification.range_factorization_bins,
"min_assigned_frag": inputs.quantification.min_assigned_frag,
},
}
if inputs.quantification.consensus_slack:
input_salmon["options"][
"consensus_slack"
] = inputs.quantification.consensus_slack
if inputs.quantification.gc_bias:
input_salmon["options"]["gc_bias"] = inputs.quantification.gc_bias
if inputs.quantification.num_bootstraps:
input_salmon["options"][
"num_bootstraps"
] = inputs.quantification.num_bootstraps
if inputs.quantification.num_gibbs_samples:
input_salmon["options"][
"num_gibbs_samples"
] = inputs.quantification.num_gibbs_samples
quantification = Data.create(
process=BioProcess.get_latest(slug="salmon-quant"),
input=input_salmon,
name=f"Quantified ({inputs.reads.name})",
)
input_seqtk = {
"reads": preprocessing,
"n_reads": inputs.downsampling.n_reads,
"advanced": {
"seed": inputs.downsampling.seed,
"fraction": inputs.downsampling.fraction,
"two_pass": inputs.downsampling.two_pass,
},
}
if inputs.reads.type.startswith("data:reads:fastq:single:"):
seqtk_slug = "seqtk-sample-single"
elif inputs.reads.type.startswith("data:reads:fastq:paired:"):
seqtk_slug = "seqtk-sample-paired"
else:
self.error("Wrong reads input type.")
downsampling = Data.create(
process=BioProcess.get_latest(slug=seqtk_slug),
input=input_seqtk,
name=f"Subsampled ({inputs.reads.name})",
)
alignment_qc = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input={
"reads": downsampling,
"genome": inputs.genome,
},
name=f"Aligned subset ({inputs.reads.name})",
)
alignment_qc_rrna = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input={
"reads": downsampling,
"genome": inputs.rrna_reference,
},
name=f"rRNA aligned ({inputs.reads.name})",
)
alignment_qc_globin = Data.create(
process=BioProcess.get_latest(slug="alignment-star"),
input={
"reads": downsampling,
"genome": inputs.globin_reference,
},
name=f"Globin aligned ({inputs.reads.name})",
)
qorts = Data.create(
process=BioProcess.get_latest(slug="qorts-qc"),
input={
"alignment": alignment_qc,
"annotation": inputs.annotation,
"options": {
"stranded": "auto",
"cdna_index": inputs.salmon_index,
"n_reads": 5000000,
},
},
name=f"QoRTs QC report ({inputs.reads.name})",
)
idxstats = Data.create(
process=BioProcess.get_latest(slug="samtools-idxstats"),
input={
"alignment": alignment_qc,
},
name=f"Alignment summary ({inputs.reads.name})",
)
input_multiqc = {
"data": [
inputs.reads,
preprocessing,
quantification,
downsampling,
alignment_qc,
alignment_qc_rrna,
alignment_qc_globin,
qorts,
idxstats,
]
}
Data.create(process=BioProcess.get_latest(slug="multiqc"), input=input_multiqc) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/workflows/bbduk_salmon_qc.py | 0.873323 | 0.537223 | bbduk_salmon_qc.py | pypi |
from typing import Any, Dict, List
from resolwe.process.communicator import communicator
from resolwe.process.models import Model
class Feature(Model):
"""Expose Feature model in Python Processes."""
_app_name = "resolwe_bio_kb"
_model_name = "Feature"
_filter_response_fields = [
"source",
"feature_id",
"species",
"type",
"sub_type",
"name",
"full_name",
"description",
"aliases",
]
@classmethod
def filter(cls, **filters: Dict[str, Any]) -> List["Model"]:
"""Filter features from the database."""
# Make sure attributes have 'id' in the first place.
attributes = filters.pop("__fields", None)
attributes = attributes or cls._filter_response_fields
attributes = ["id"] + [
attribute for attribute in attributes if attribute != "id"
]
if set(filters.keys()) == {"source", "species", "feature_id__in"}:
objects = communicator.filter_features(
(
filters["source"],
filters["species"],
filters["feature_id__in"],
attributes,
)
)
else:
objects = communicator.filter_objects(
cls._app_name, cls._model_name, filters, attributes
)
models = []
for entry in objects:
model = cls(entry[0])
for field_name, value in zip(attributes[1:], entry[1:]):
field = model.fields[field_name]
model._cache[field_name] = field.clean(value)
models.append(model)
return models
class Mapping(Model):
"""Expose Feature model in Python Processes."""
_app_name = "resolwe_bio_kb"
_model_name = "Mapping"
_filter_response_fields = [
"relation_type",
"source_db",
"source_id",
"source_species",
"target_db",
"target_id",
"target_species",
] | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/process/models.py | 0.869091 | 0.238794 | models.py | pypi |
from resolwe.flow.expression_engines.jinja.filters import id_, type_
from resolwe.flow.models.entity import Entity, RelationPartition
def background_pairs(data):
"""Get a list of (case, background) pairs for given data.
A list of data objects is re-arranged to a list of (case, background) pairs
based on the background relation between their corresponding samples.
"""
if not isinstance(data, list):
raise ValueError("Argument data must be a list")
if not data:
return []
data_types = [type_(d) for d in data]
if len(set(data_types)) != 1:
raise ValueError("All data must be of the same type")
data_ids = [id_(d) for d in data]
data_type = data_types[0]
backround_query = RelationPartition.objects.filter(
label="background",
entity__data__process__type=data_type,
relation__type__name="background",
relation__relationpartition__label="case",
relation__relationpartition__entity__data__in=data_ids,
).values("entity__data", "relation__relationpartition__entity__data")
returned_cases = set()
returned_backgrounds = set()
background_pairs_list = []
for backround_dict in backround_query:
case = backround_dict["relation__relationpartition__entity__data"]
background = backround_dict["entity__data"]
# Case must have been given in function args.
assert case in data_ids
background_pairs_list.append((case, background))
returned_cases.add(case)
returned_backgrounds.add(background)
# Append data without background
for case in (
set(data_ids).difference(returned_cases).difference(returned_backgrounds)
):
background_pairs_list.append((case, None))
return sorted(background_pairs_list)
def replicate_groups(data):
"""Get replicate groups."""
if not isinstance(data, list):
raise ValueError("List of data must be given")
data_ids = [id_(d) for d in data]
if len(data_ids) != len(set(data_ids)):
raise ValueError("Repeated data objects not allowed")
samples = Entity.objects.filter(data__id__in=data_ids)
if len(samples) != len(data_ids):
raise ValueError("Can not get replicates of data without sample")
partitions = RelationPartition.objects.filter(
relation__category="Replicate", relation__type__name="group", entity__in=samples
)
sample_group = {}
for p in partitions:
if p.entity.id in sample_group:
raise ValueError(
"More than one replicate relation on sample: {}".format(p.entity.id)
)
sample_group[p.entity.id] = p.label
group_map = {}
group_index = 1
repl_groups = []
# Ensure the correct order
for d in data_ids:
# This is slow because we are fetching samples one by one
sample_id = Entity.objects.filter(data__id=d).values("id").first()["id"]
if sample_id in sample_group:
group_label = sample_group[sample_id]
if group_label not in group_map:
group_map[group_label] = group_index
group_index += 1
repl_groups.append(group_map[group_label])
else:
repl_groups.append(group_index)
group_index += 1
return repl_groups
# A dictionary of filters that will be registered.
filters = {
"background_pairs": background_pairs,
"replicate_groups": replicate_groups,
} | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/expression_filters/relation.py | 0.738198 | 0.495545 | relation.py | pypi |
from functools import reduce
import django_filters as filters
from django.contrib.postgres.search import SearchQuery, SearchRank
from django.db import models
from django.db.models import F
from resolwe.flow.filters import TEXT_LOOKUPS, CheckQueryParamsMixin
from .models import Feature, Mapping
class FullTextFilter(filters.BaseCSVFilter):
"""Filter for full-text search."""
def filter(self, queryset, values):
"""Filter field by text-search vector.
Values, given in csv format are joined with the OR operator.
Filtering by more than 135 values raises recursion error, so we have
to do it in chunks.
"""
if not values:
return queryset
values = [SearchQuery(v, config="simple") for v in values]
step = 100
result_qs = queryset.none()
for i in range(0, len(values), step):
query = reduce(SearchQuery.__or__, values[i : i + step])
result_qs = result_qs.union(
queryset.filter(**{self.field_name: query})
# This assumes that field is already a TextSearch vector and thus
# doesn't need to be transformed. To achieve that F function is
# required.
.annotate(rank=SearchRank(F(self.field_name), query))
)
return result_qs.order_by("-rank")
class MultichoiceCharFilter(filters.BaseCSVFilter):
"""Filter by comma-separated strings."""
def filter(self, queryset, value):
"""Perform the filtering."""
if not value:
return queryset
return queryset.filter(**{"{}__in".format(self.field_name): value})
class FeatureFilter(CheckQueryParamsMixin, filters.FilterSet):
"""Filter the feature endpoint."""
query = FullTextFilter(field_name="search")
class Meta:
"""Filter configuration."""
model = Feature
fields = {
"source": ["exact"],
"species": ["exact"],
"type": ["exact"],
"feature_id": ["exact", "in"],
}
filter_overrides = {
models.CharField: {
"filter_class": MultichoiceCharFilter,
},
}
class MappingFilter(CheckQueryParamsMixin, filters.FilterSet):
"""Filter the mapping endpoint."""
class Meta:
"""Filter configuration."""
model = Mapping
fields = {
"source_db": TEXT_LOOKUPS[:],
"source_id": TEXT_LOOKUPS[:],
"source_species": TEXT_LOOKUPS[:],
"target_db": TEXT_LOOKUPS[:],
"target_id": TEXT_LOOKUPS[:],
"target_species": TEXT_LOOKUPS[:],
} | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/kb/filters.py | 0.700485 | 0.25931 | filters.py | pypi |
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.indexes import GinIndex
from django.contrib.postgres.search import SearchVectorField
from django.db import models
# NOTE: Feature are manually inserted in management command, so take
# care that it is synced with model definition.
class Feature(models.Model):
"""Describes a feature in the knowledge base."""
TYPE_GENE = "gene"
TYPE_TRANSCRIPT = "transcript"
TYPE_EXON = "exon"
TYPE_PROBE = "probe"
TYPE_CHOICES = (
(TYPE_GENE, "Gene"),
(TYPE_TRANSCRIPT, "Transcript"),
(TYPE_EXON, "Exon"),
(TYPE_PROBE, "Probe"),
)
SUBTYPE_PROTEIN_CODING = "protein-coding"
SUBTYPE_PSEUDO = "pseudo"
SUBTYPE_RRNA = "rRNA"
SUBTYPE_NCRNA = "ncRNA"
SUBTYPE_SNRNA = "snRNA"
SUBTYPE_SNORNA = "snoRNA"
SUBTYPE_TRNA = "tRNA"
SUBTYPE_ASRNA = "asRNA"
SUBTYPE_OTHER = "other"
SUBTYPE_UNKNOWN = "unknown"
SUBTYPE_CHOICES = (
(SUBTYPE_PROTEIN_CODING, "Protein-coding"),
(SUBTYPE_PSEUDO, "Pseudo"),
(SUBTYPE_RRNA, "rRNA"),
(SUBTYPE_NCRNA, "ncRNA"),
(SUBTYPE_SNRNA, "snRNA"),
(SUBTYPE_SNORNA, "snoRNA"),
(SUBTYPE_TRNA, "tRNA"),
(SUBTYPE_ASRNA, "asRNA"),
(SUBTYPE_OTHER, "Other"),
(SUBTYPE_UNKNOWN, "Unknown"),
)
# Because Django ORM cannot handle composite primary keys, each feature is
# still assigned an internal numeric 'id' and the ('source', 'feature_id',
# 'species') combination is used to uniquely identify a feature.
source = models.CharField(max_length=20)
feature_id = models.CharField(max_length=50)
species = models.CharField(max_length=50)
type = models.CharField(max_length=20, choices=TYPE_CHOICES)
sub_type = models.CharField(max_length=20, choices=SUBTYPE_CHOICES)
name = models.CharField(max_length=1024)
full_name = models.CharField(max_length=350, blank=True)
description = models.TextField(blank=True)
aliases = ArrayField(models.CharField(max_length=256), default=list, blank=True)
#: field used for full-text search
search = SearchVectorField(null=True)
class Meta:
"""Feature Meta options."""
constraints = [
models.UniqueConstraint(
fields=["source", "feature_id", "species"],
name="uniq_feature_source_feature_id_species",
),
]
indexes = [
models.Index(name="idx_feature_source", fields=["source"]),
models.Index(name="idx_feature_species", fields=["species"]),
models.Index(name="idx_feature_feature_id", fields=["feature_id"]),
models.Index(name="idx_feature_type", fields=["type"]),
GinIndex(name="idx_feature_search", fields=["search"]),
]
def __str__(self):
"""Represent a feature instance as a string."""
return "{source}: {feature_id} ({species})".format(
source=self.source,
feature_id=self.feature_id,
species=self.species,
)
# NOTE: Mappings are manually inserted in management command, so take
# care that it is synced with model definition.
class Mapping(models.Model):
"""Describes a mapping between features from different sources."""
RELATION_TYPE_CROSSDB = "crossdb"
RELATION_TYPE_ORTHOLOG = "ortholog"
RELATION_TYPE_TRANSCRIPT = "transcript"
RELATION_TYPE_EXON = "exon"
RELATION_TYPE_CHOICES = (
(RELATION_TYPE_CROSSDB, "Crossdb"),
(RELATION_TYPE_ORTHOLOG, "Ortholog"),
(RELATION_TYPE_TRANSCRIPT, "Transcript"),
(RELATION_TYPE_EXON, "Exon"),
)
relation_type = models.CharField(max_length=20, choices=RELATION_TYPE_CHOICES)
source_db = models.CharField(max_length=20)
source_id = models.CharField(max_length=50)
source_species = models.CharField(max_length=50)
target_db = models.CharField(max_length=20)
target_id = models.CharField(max_length=50)
target_species = models.CharField(max_length=50)
class Meta:
"""Mapping Meta options."""
constraints = [
models.UniqueConstraint(
fields=[
"source_db",
"source_id",
"source_species",
"target_db",
"target_id",
"target_species",
"relation_type",
],
name="uniq_mapping_source_target_type",
),
]
indexes = [
models.Index(
name="idx_feature_source_target",
fields=[
"source_db",
"source_id",
"source_species",
"target_db",
"target_species",
],
),
models.Index(
name="idx_feature_target",
fields=["target_db", "target_id", "target_species"],
),
]
def __str__(self):
"""Represent a feature instance as a string."""
return "{src_db}: {src_id} ({src_species}) -> {dst_db}: {dst_id} ({dst_species})".format(
src_db=self.source_db,
src_id=self.source_id,
src_species=self.source_species,
dst_db=self.target_db,
dst_id=self.target_id,
dst_species=self.target_species,
) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/kb/models.py | 0.74055 | 0.243406 | models.py | pypi |
from typing import Tuple
from django.contrib.auth import get_user_model
from django.db.models import QuerySet
from resolwe.flow.executors.socket_utils import Message, Response
from resolwe.flow.managers.listener.listener import Processor
from resolwe.flow.managers.listener.permission_plugin import ExposeObjectPlugin
from resolwe.flow.managers.listener.plugin import (
ListenerPlugin,
listener_plugin_manager,
)
from resolwe.flow.managers.listener.redis_cache import CachedObjectPlugin, cache_manager
from resolwe_bio.kb.models import Feature
UserClass = get_user_model()
class ExposeFeature(ExposeObjectPlugin):
"""Expose the Feature knowledge base model in listener."""
full_model_name = "resolwe_bio_kb.Feature"
def filter_objects(self, user: UserClass, queryset: QuerySet, data) -> QuerySet:
"""Filter the objects for the given user."""
return queryset
class ExposeMapping(ExposeObjectPlugin):
"""Expose the Mapping knowledge base model in listener."""
full_model_name = "resolwe_bio_kb.Mapping"
def filter_objects(self, user: UserClass, queryset: QuerySet, data) -> QuerySet:
"""Filter the objects for the given user."""
return queryset
class FeatureCache(CachedObjectPlugin):
"""Cache the Feature knowledge base model in Redis."""
model = Feature
cached_fields = [
"id",
"source",
"feature_id",
"species",
"type",
"sub_type",
"name",
"full_name",
"description",
"aliases",
]
identifier_fields = ("feature_id", "source", "species")
expiration_time = None # Never expire.
class KnowledgeBasePlugin(ListenerPlugin):
"""Handler methods for KnowledgeBase methods."""
name = "KnowledgeBase plugin"
plugin_manager = listener_plugin_manager
def handle_filter_features(
self,
data_id: int,
message: Message[Tuple[str, str, list[str], list[str]]],
manager: "Processor",
) -> Response[dict]:
"""Get the feature data based on the given identifiers."""
source, species, feature_ids, requested_fields = message.message_data
identifiers_list = list()
for feature_id in feature_ids:
# Identifiers must be ordered same as FeatureCache identifier_fields.
data = {"feature_id": feature_id, "source": source, "species": species}
identifiers = (data.get(name) for name in FeatureCache.identifier_fields)
identifiers_list.append(tuple(identifiers))
# The get itself will wait for up to one minute when lock is set to any entry.
cached_data = cache_manager.mget(Feature, identifiers_list)
to_return = [
[entry[field] for field in requested_fields]
for entry in cached_data
if entry is not None
]
missing_identifiers = []
for position, (identifier, value) in enumerate(
zip(identifiers_list, cached_data)
):
if value is None:
missing_identifiers.append(identifier)
# Read the missing data from the database.
if missing_identifiers:
try:
cache_manager.lock(Feature, missing_identifiers)
feature_id_index = FeatureCache.identifier_fields.index("feature_id")
missing_feature_ids = [
identifier[feature_id_index] for identifier in missing_identifiers
]
missing_features = Feature.objects.filter(
source=source, species=species, feature_id__in=missing_feature_ids
)
# Cache the missing data.
cache_manager.mcache(missing_features)
# Fill the missing values to the to_return list.
to_return.extend(missing_features.values_list(*requested_fields))
finally:
# Unlock the entries.
cache_manager.unlock(Feature, missing_identifiers)
return message.respond(to_return) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/kb/listener_plugin.py | 0.842928 | 0.292905 | listener_plugin.py | pypi |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("resolwe_bio_kb", "0004_add_unique_together"),
]
operations = [
migrations.AddField(
model_name="mapping",
name="source_species",
field=models.CharField(default="unknown", max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name="mapping",
name="target_species",
field=models.CharField(default="unknown", max_length=50),
preserve_default=False,
),
migrations.AlterField(
model_name="feature",
name="sub_type",
field=models.CharField(
choices=[
("protein-coding", "Protein-coding"),
("pseudo", "Pseudo"),
("rRNA", "rRNA"),
("ncRNA", "ncRNA"),
("snRNA", "snRNA"),
("snoRNA", "snoRNA"),
("tRNA", "tRNA"),
("asRNA", "asRNA"),
("other", "Other"),
("unknown", "Unknown"),
],
max_length=20,
),
),
migrations.AlterField(
model_name="feature",
name="type",
field=models.CharField(
choices=[
("gene", "Gene"),
("transcript", "Transcript"),
("exon", "Exon"),
("probe", "Probe"),
],
max_length=20,
),
),
migrations.AlterField(
model_name="mapping",
name="relation_type",
field=models.CharField(
choices=[
("crossdb", "Crossdb"),
("ortholog", "Ortholog"),
("transcript", "Transcript"),
("exon", "Exon"),
],
max_length=20,
),
),
migrations.AlterUniqueTogether(
name="feature",
unique_together=set([("source", "feature_id", "species")]),
),
migrations.AlterUniqueTogether(
name="mapping",
unique_together=set(
[
(
"source_db",
"source_id",
"source_species",
"target_db",
"target_id",
"target_species",
"relation_type",
)
]
),
),
migrations.AlterIndexTogether(
name="mapping",
index_together=set(
[
("source_db", "source_id", "source_species", "target_db"),
("target_db", "target_id", "target_species"),
]
),
),
] | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/kb/migrations/0005_species.py | 0.600423 | 0.190837 | 0005_species.py | pypi |
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Feature",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("source", models.CharField(max_length=20)),
("feature_id", models.CharField(max_length=50)),
("species", models.CharField(max_length=50)),
(
"type",
models.CharField(
choices=[
(b"gene", b"Gene"),
(b"transcript", b"Transcript"),
(b"exon", b"Exon"),
(b"probe", b"Probe"),
],
max_length=20,
),
),
(
"sub_type",
models.CharField(
choices=[
(b"protein-coding", b"Protein-coding"),
(b"pseudo", b"Pseudo"),
(b"rRNA", b"rRNA"),
(b"ncRNA", b"ncRNA"),
(b"snRNA", b"snRNA"),
(b"snoRNA", b"snoRNA"),
(b"tRNA", b"tRNA"),
(b"other", b"Other"),
(b"unknown", b"Unknown"),
],
max_length=20,
),
),
("name", models.CharField(max_length=20)),
("full_name", models.CharField(blank=True, max_length=200)),
("description", models.TextField(blank=True)),
(
"aliases",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=20),
blank=True,
default=[],
size=None,
),
),
],
),
migrations.CreateModel(
name="Mapping",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"relation_type",
models.CharField(
choices=[(b"crossdb", b"Crossdb"), (b"ortholog", b"Ortholog")],
max_length=20,
),
),
("source_db", models.CharField(max_length=20)),
("source_id", models.CharField(max_length=50)),
("target_db", models.CharField(max_length=20)),
("target_id", models.CharField(max_length=50)),
],
),
migrations.AlterUniqueTogether(
name="feature",
unique_together=set([("source", "feature_id")]),
),
] | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/kb/migrations/0001_initial.py | 0.487551 | 0.235724 | 0001_initial.py | pypi |
"""Create IGV session for archive."""
import argparse
import os
from lxml import etree
parser = argparse.ArgumentParser(description="Create igv session.")
parser.add_argument(
"-f", "--input_file", required=True, help="File with paths to files for IGV."
)
args = parser.parse_args()
def get_build_info(input_file):
"""Get build information in UCSC notation, if possible."""
build_dict = {
"hg38": ["GRCh38"],
"hg19": ["GRCh37", "b37"],
"mm10": ["GRCm38"],
"mm9": ["MGSCv37"],
"rn6": ["Rnor_6.0"],
}
with open(input_file, "r") as tfile:
build = tfile.readline().strip()
new_build = [
k for k, v in build_dict.items() if k == build or build.startswith(tuple(v))
]
if new_build:
return new_build[0]
return ""
def make_xml_tree(input_file):
"""Make xml tree for IGV session."""
global_ = etree.Element(
"Global",
genome=get_build_info(input_file),
version="3",
)
resources = etree.SubElement(global_, "Resources")
with open(input_file, "r") as tfile:
next(tfile) # Skip the line with the build.
for filename in tfile:
filename = filename.rstrip()
# replace None (dir folder if species and build are not defined) with other_data
if os.path.dirname(os.path.dirname(filename)) == "None":
filename = filename.replace("None", "other_data")
etree.SubElement(
resources,
"Resource",
name=os.path.basename(filename),
path=os.path.join("..", filename),
)
doc = etree.tostring(
global_,
pretty_print=True,
xml_declaration=True,
encoding="UTF-8",
)
return doc
def write_xml_file(input_file, doc):
"""Compose a name and write output file."""
os.makedirs("IGV", exist_ok=True)
output_name = os.path.join("IGV", input_file.replace("temp_igv.txt", "igv.xml"))
with open(output_name, "wb") as f:
f.write(doc)
def main():
"""Invoke when run directly as a program."""
doc = make_xml_tree(args.input_file)
write_xml_file(args.input_file, doc)
if __name__ == "__main__":
main() | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/make_igv_session_archive.py | 0.529507 | 0.20001 | make_igv_session_archive.py | pypi |
"""Merge columns of multiple experiments by gene id with sample name for header."""
import argparse
import multiprocessing
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(
description="Merge columns of multiple experiments by gene id."
)
parser.add_argument(
"-f",
"--file-paths",
required=True,
nargs="+",
help="List of paths to expression files.",
)
parser.add_argument(
"-n", "--sample-names", required=True, nargs="+", help="List of sample names."
)
parser.add_argument(
"-b", "--builds", required=True, nargs="+", help="List of sample builds."
)
parser.add_argument(
"-s", "--species", required=True, nargs="+", help="List of species of samples."
)
parser.add_argument(
"-e",
"--exp-types",
required=False,
nargs="+",
help="List of expression types of samples.",
)
na_rep = "NA"
if __name__ == "__main__":
args = parser.parse_args()
# Use file in exp output field of expression data objects.
# This is a two-column file with feature ids and expression values.
if args.exp_types:
data = {}
for exp_file, build, species, exp_type, sample_name in zip(
args.file_paths,
args.builds,
args.species,
args.exp_types,
args.sample_names,
):
data.setdefault((build, species, exp_type), []).append(
[exp_file, sample_name]
)
for (build, species, exp_type), data_values in data.items():
df = pd.DataFrame(np.nan, index=[], columns=[])
header = []
for exp_file, sample_name in data_values:
header.append(sample_name)
reader = pd.read_csv(
exp_file, index_col="Gene", delimiter="\t", dtype=str
)
df = pd.concat([df, reader], axis=1)
df.columns = header
name = "_".join([species, build, exp_type, "all_expressions.txt"])
df.to_csv(name, sep="\t", na_rep=na_rep)
# Use file in exp_set output field of expression data objects.
# This is a multi-column file with feature ids, gene symbols and one or more expression values.
else:
multi_index = ["FEATURE_ID", "GENE_SYMBOL"]
def read_csv(file_path):
"""Return DataFrame representation of CSV file."""
return pd.read_csv(
file_path, index_col=multi_index, delimiter="\t", dtype=str
)
expressions = multiprocessing.Pool().map(read_csv, args.file_paths)
items = []
for species, build, expression in zip(args.species, args.builds, expressions):
for column_label in expression.columns:
if column_label in multi_index:
continue
item = {
"species": species,
"build": build,
"column_label": column_label,
}
if item not in items:
items.append(item)
def merge_expressions(item):
"""Merge expressions of multiple samples and save them into CSV file."""
dfs = [
expressions[i][item["column_label"]]
for i in range(len(args.file_paths))
if (
args.species[i] == item["species"]
and args.builds[i] == item["build"]
and item["column_label"] in expressions[i].columns
)
]
df = pd.concat(dfs, axis=1)
header = [
args.sample_names[i]
for i in range(len(args.file_paths))
if (
args.species[i] == item["species"]
and args.builds[i] == item["build"]
and item["column_label"] in expressions[i].columns
)
]
df.columns = header
name = "_".join(
[
item["species"],
item["build"],
item["column_label"],
"all_expressions.txt",
]
)
df.to_csv(name, sep="\t", na_rep=na_rep)
multiprocessing.Pool().map(merge_expressions, items) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/expressionmerge_archive.py | 0.783782 | 0.409221 | expressionmerge_archive.py | pypi |
"""Principal components analysis."""
import argparse
import json
import numpy as np
import pandas as pd
from resolwe_runtime_utils import send_message, warning
from sklearn.decomposition import PCA
def get_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description="PCA")
parser.add_argument(
"--sample-files", "-f", nargs="+", help="Sample file names", required=True
)
parser.add_argument(
"--sample-ids", "-i", nargs="+", help="Sample IDs", required=True
)
parser.add_argument("--gene-labels", "-g", nargs="+", help="Filter genes by label")
parser.add_argument(
"--components", "-c", help="Number of PCA components", type=int, default=2
)
parser.add_argument("--output-fn", "-o", help="Output file name")
return parser.parse_args()
def component_top_factors(component, allgenes_array, max_size=20):
"""Return top 20 absolute factors."""
abs_component = np.abs(component)
size = min(component.size, max_size)
unordered_ixs = np.argpartition(abs_component, -size)[-size:]
ixs = unordered_ixs[np.argsort(abs_component[unordered_ixs])[::-1]]
if ixs.size == 0:
return []
return list(zip(np.array(allgenes_array)[ixs].tolist(), component[ixs].tolist()))
def get_pca(expressions=pd.DataFrame(), n_components=2, gene_labels=[]):
"""Compute PCA."""
if not gene_labels:
gene_labels = expressions.index
skipped_gene_labels = list(set(gene_labels).difference(expressions.index))
if expressions.shape[0] < 2 or expressions.shape[1] < 2:
coordinates = [[0.0, 0.0] for i in range(expressions.shape[1])]
all_components = [[], []]
all_explained_variance_ratios = [0.0, 0.0]
else:
pca = PCA(n_components=n_components, whiten=True)
pca_expressions = pca.fit_transform(expressions.transpose())
coordinates = [
t[:2].tolist() if len(t) > 1 else [t[0], 0.0] for t in pca_expressions
]
all_components = [
component_top_factors(component, gene_labels)
for component in pca.components_
]
if np.isnan(pca.explained_variance_ratio_).any():
all_explained_variance_ratios = [0.0 for _ in pca.explained_variance_ratio_]
else:
all_explained_variance_ratios = pca.explained_variance_ratio_.tolist()
result = {
"coordinates": coordinates,
"all_components": all_components,
"all_explained_variance_ratios": all_explained_variance_ratios,
"skipped_gene_labels": skipped_gene_labels,
"warning": None,
}
if expressions.empty:
send_message(
warning(
"Gene selection and filtering resulted in no genes. Please select different samples or genes."
)
)
return result
def save_pca(result={}, sample_ids=[], output_fn=None, max_size=10):
"""Save PCA."""
data = {
"flot": {
"data": result["coordinates"],
"xlabel": "PC 1",
"ylabel": "PC 2",
"sample_ids": sample_ids,
},
"zero_gene_symbols": result["skipped_gene_labels"],
"components": result["all_components"][:max_size],
"all_components": result["all_components"],
"explained_variance_ratios": result["all_explained_variance_ratios"][:max_size],
"all_explained_variance_ratios": result["all_explained_variance_ratios"],
}
if output_fn:
with open(output_fn, "w") as outfile:
json.dump(data, outfile, separators=(",", ":"), allow_nan=False)
else:
print(json.dumps(data, separators=(",", ":"), allow_nan=False))
def read_csv(fname):
"""Read CSV file and return Pandas DataFrame."""
csv = pd.read_csv(
filepath_or_buffer=fname,
sep="\t",
header=0,
index_col=0,
dtype={
0: str,
1: float,
},
keep_default_na=False,
)
csv.index = csv.index.map(str)
return csv
def get_csv(fnames):
"""Read CSV files and return Pandas DataFrame."""
expressions = [read_csv(fname) for fname in fnames]
return pd.concat(expressions, axis=1, join="inner")
def main():
"""Read data, run PCA, and output results."""
args = get_args()
expressions = get_csv(args.sample_files)
if args.gene_labels:
gene_labels = set(args.gene_labels).intersection(expressions.index)
expressions = expressions.loc[gene_labels]
result = get_pca(expressions, args.components, args.gene_labels)
save_pca(result, args.sample_ids, args.output_fn)
if __name__ == "__main__":
main() | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/pca.py | 0.852045 | 0.423756 | pca.py | pypi |
import argparse
import sys
from collections import namedtuple
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Remove overlaping indels.")
parser.add_argument("in_vcf", help="Input VCF file.")
parser.add_argument("out_vcf", help="Output (filterd) VCF file.")
return parser.parse_args()
def write_var(var, out_file):
"""Write variant to output file."""
var = var._replace(pos=str(var.pos))
out_file.write("{}\n".format("\t".join(var)))
def vcf_line_to_var(line):
"""Parse vcf lines to variants."""
VCFEntry = namedtuple(
"VCFEntry", ["chrom", "pos", "dbsnpid", "ref", "alt", "qual", "filter", "info"]
)
fields = line.rstrip().split("\t")[:8]
e = VCFEntry._make(fields)
return e._replace(pos=int(e.pos))
def af_from_var(var):
"""Parse AF value."""
for f in var.info.split(";"):
if f.startswith("AF="):
return float(f[3:])
return None
def qual_from_var(var):
"""Take care of missing values, int conversion and ties in comparisons."""
if var.qual == ".":
return sys.maxint
else:
# add AF to deal with ties
return int(var.qual) + af_from_var(var)
def overlap(v1, v2):
"""Determine whether affected positions of two variants overlap."""
v1_b = v1.pos + max(len(v1.ref), len(v1.alt))
v2_b = v2.pos + max(len(v2.ref), len(v2.alt))
return min(v1_b, v2_b) - max(v1.pos, v2.pos) > 0
def main():
"""Invoke when run directly as a program."""
args = parse_arguments()
with open(args.in_vcf) as in_vcf:
with open(args.out_vcf, "w") as out_vcf:
pick_best_func = qual_from_var
prev_vars = []
for line in in_vcf:
line = line.rstrip()
# Write header lines to the output VCF file
if line.startswith("#"):
out_vcf.write("{}\n".format(line))
continue
cur_var = vcf_line_to_var(line)
if len(prev_vars):
if cur_var.chrom != prev_vars[-1].chrom or not overlap(
prev_vars[-1], cur_var
):
# pick highest qual/af from stack and empty stack
picked_var = sorted(
prev_vars, key=lambda e: pick_best_func(e), reverse=True
)[0]
write_var(picked_var, out_vcf)
prev_vars = []
prev_vars.append(cur_var)
# don't forget remaining ones
if prev_vars:
picked_var = sorted(
prev_vars, key=lambda e: pick_best_func(e), reverse=True
)[0]
write_var(picked_var, out_vcf)
if __name__ == "__main__":
main() | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/lofreq2_indel_ovlp.py | 0.524882 | 0.263074 | lofreq2_indel_ovlp.py | pypi |
"""Parse Diff Exp output files."""
import argparse
import json
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
from resolwe_runtime_utils import error, send_message
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Parse Diff Exp output files")
parser.add_argument("raw_file", help="DE analysis output file (.tab).")
parser.add_argument("output_json", help="Output JSON")
parser.add_argument("output_file", help="Output file")
parser.add_argument("--gene_id", help="Gene_IDs column name", type=str)
parser.add_argument("--fdr", help="FDR column name", type=str)
parser.add_argument("--pvalue", help="Pvalue column name", type=str)
parser.add_argument("--fwer", help="FWER column name", type=str)
parser.add_argument("--logodds", help="Log Odds column name", type=str)
parser.add_argument("--logfc", help="logfc column name", type=str)
parser.add_argument("--stat", help="Statistics column name", type=str)
return parser.parse_args()
def main():
"""Invoke when run directly as a program."""
args = parse_arguments()
de_data = pd.read_csv(args.raw_file, sep="\t")
de_data.rename(columns={"Unnamed: 0": "gene_id"}, inplace=True)
de_data.fillna(value=1, inplace=True)
columns = {}
col_order = []
# Make sure all listed numeric columns are valid numeric variables based
# on a union of numeric column names from cuffdiff, edgeR, deseq2 and test
# files.
numeric_columns = [
"baseMean",
"log2FoldChange",
"lfcSE",
"stat",
"pvalue",
"padj",
"value_1",
"value_2",
"log2(fold_change)",
"test_stat",
"p_value",
"q_value",
"logfc",
"fdr",
"stat",
"logFC",
"logCPM",
"LR",
"Pvalue",
"FDR",
]
de_columns = de_data.columns
for column in numeric_columns:
if column not in de_columns:
continue
if not is_numeric_dtype(de_data[column]):
msg = (
f"Column {column} is not numeric. Please make sure "
f"that the input file has valid numeric values (i.e. "
f"periods for decimal places)."
)
send_message(error(msg))
raise ValueError(msg)
if args.gene_id:
if args.gene_id == "index":
columns["gene_id"] = list(de_data.index.astype(str))
col_order.append("gene_id")
else:
columns["gene_id"] = list(de_data[args.gene_id].astype(str))
col_order.append("gene_id")
if args.logfc:
col = np.array(de_data[args.logfc])
col[np.isinf(col)] = 0
columns["logfc"] = list(col)
col_order.append("logfc")
if args.fdr:
columns["fdr"] = list(de_data[args.fdr])
col_order.append("fdr")
if args.pvalue:
columns["pvalue"] = list(de_data[args.pvalue])
col_order.append("pvalue")
if args.fwer:
columns["fwer"] = list(de_data[args.fwer])
col_order.append("fwer")
if args.logodds:
columns["logodds"] = list(de_data[args.logodds])
col_order.append("logodds")
if args.stat:
columns["stat"] = list(de_data[args.stat])
col_order.append("stat")
with open(args.output_json, "w") as f:
json.dump(columns, f, separators=(",", ":"), allow_nan=False)
outdf = pd.DataFrame(columns)
outdf = outdf[col_order]
outdf.to_csv(args.output_file, sep="\t", index=False, compression="gzip")
if __name__ == "__main__":
main() | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/parse_diffexp.py | 0.732018 | 0.245927 | parse_diffexp.py | pypi |
import argparse
import matplotlib
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from resolwe_runtime_utils import send_message, warning
from scipy import stats
import utils
# File with known ERCC concetrations
ERCC_CONC_FILE = "/opt/resolwebio/assets/ERCC_table.txt"
# known SIRV concetrations
SIRV_CONC = {
"SIRV1": 8000,
"SIRV2": 6000,
"SIRV3": 11000,
"SIRV4": 7000,
"SIRV5": 12000,
"SIRV6": 18000,
"SIRV7": 7000,
}
ERCC_MIXES = {
"ercc_mix1": {
"name": "concentration in Mix 1 (attomoles/ul)",
},
"ercc_mix2": {
"name": "concentration in Mix 2 (attomoles/ul)",
},
"sirv_set3": {
"name": "concentration in SIRV Set 3 (attomoles/ul)",
},
}
# Histogram and scatter plot x-limit
XLIM = (-9, 16)
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--sample_names", help="List of sample names.", nargs="+")
parser.add_argument("--sample_exps", help="List of expressions.", nargs="+")
parser.add_argument(
"--exp_types", help="List of expression types (TPM, SMP, FPKM...)", nargs="+"
)
parser.add_argument(
"--spikeins_mix", help="Used spike-ins mix.", choices=ERCC_MIXES.keys()
)
return parser.parse_args()
def validate_inputs(args):
"""Validate inputs."""
# Validate that all expression types are equal.
exp_type_set = set(args.exp_types)
if len(exp_type_set) != 1:
msg = "All samples should have the same expression type, but multiple expression types were given: {}."
msg = msg.format(", ".join(exp_type_set))
send_message(warning(msg))
# Validate that same number of sample names, expression files and
# expression types are given.
assert len(args.sample_names) == len(args.sample_exps) == len(args.exp_types)
def get_expected(spikeins_mix, log2=False):
"""Get expected ERCC / SIRV concentrations for given spike-in mix.
If specified, also log2 transform.
"""
ercc_data = pd.read_csv(
ERCC_CONC_FILE, delimiter="\t", dtype=str, index_col="ERCC ID"
)
if spikeins_mix in ["ercc_mix1", "ercc_mix2"]:
series = ercc_data.loc[:, ERCC_MIXES[spikeins_mix]["name"]].astype("float")
elif spikeins_mix == "sirv_set3":
# This mix contains both: SIRV's and ERCC Mix 1
sirv_series = pd.Series(SIRV_CONC)
ercc_series = ercc_data.loc[:, ERCC_MIXES["ercc_mix1"]["name"]].astype("float")
series = pd.concat([ercc_series, sirv_series], axis=0)
else:
raise ValueError("Invalid spikein mix.")
if log2:
series = np.log2(series)
return series
def get_measured(
sample_exp, sample_name, exp_type, only_zero=False, only_nonzero=False, log2=False
):
"""Get measured expression values.
If specified, also log2 transform and only keep nonzero values.
"""
handle = utils.gzopen(sample_exp)
exp = pd.read_csv(handle, delimiter="\t", index_col="Gene")
exp = exp.loc[:, "Expression"].astype("float")
assert not (only_zero and only_nonzero)
if only_zero:
exp = exp[exp == 0]
elif only_nonzero:
exp = exp.iloc[exp.to_numpy().nonzero()[0]]
if log2:
exp = np.log2(exp)
return exp
def merge_expected_measured(expected, measured):
"""Merge expected and measured data."""
merged = pd.concat([expected, measured], axis=1)
merged = merged.reindex(measured.index)
return merged
def plot_histogram(histogram, all_spikeins, nonzero_spikeins, spikein_type):
"""Histogram showing the number of all and nonzero spikeins."""
histogram.bar(all_spikeins.index, all_spikeins, color="grey", alpha=0.5, width=0.4)
histogram.bar(
nonzero_spikeins.index, nonzero_spikeins, color="green", alpha=0.5, width=0.4
)
histogram.grid(alpha=0.5, axis="y")
histogram.set_ylabel("No. of {}s".format(spikein_type))
histogram.set_xlim(XLIM)
def plot_scatter(scatter, zero, nonzero, exp_type):
"""Scatter plot of measured vs. expected spikein expressions."""
scatter.scatter(nonzero.iloc[:, 0], nonzero.iloc[:, 1], alpha=0.5, color="green")
# Nonzero values. Use linear regression to compute expected positions
slope, intercept, r_value, _, _ = stats.linregress(
nonzero.iloc[:, 0], nonzero.iloc[:, 1]
)
if not zero.empty:
scatter.scatter(
zero.iloc[:, 0],
intercept + zero.iloc[:, 0] * slope,
alpha=0.5,
color="grey",
)
# Text box
scatter.text(
x=0.05,
y=0.9,
s="\n".join(
[
"$R^2$ = {}".format(round(r_value**2, 2)),
"{} / {} transcripts not detected".format(
len(zero), len(zero) + len(nonzero)
),
]
),
horizontalalignment="left",
verticalalignment="top",
transform=scatter.transAxes,
bbox=dict(boxstyle="round", facecolor="white", alpha=0.5, edgecolor="gray"),
)
# Sytling
scatter.grid()
scatter.set_xlim(XLIM)
scatter.set_xlabel("log2(Concentration [attomoles/ul])")
scatter.set_ylabel("Sample log2({})".format(exp_type))
def plot_histogram_scatter(
expected, zero, nonzero, spikein_type, sample_name, exp_type
):
"""Plot figure where measured vs. expected expressions are compared."""
fig = plt.figure(
figsize=(8, 6),
dpi=200,
)
gspec = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[1, 4])
plot_histogram(
histogram=plt.subplot(gspec[0]),
all_spikeins=expected.value_counts(),
nonzero_spikeins=nonzero.iloc[:, 0].value_counts(),
spikein_type=spikein_type,
)
plot_scatter(
scatter=plt.subplot(gspec[1]),
zero=zero,
nonzero=nonzero,
exp_type=exp_type,
)
title = "{} ({} spike-in's)".format(sample_name, spikein_type)
fig.suptitle(title)
plt.savefig(title + ".png", format="png")
plt.close()
def main():
"""Invoke when run directly as a program."""
args = parse_arguments()
validate_inputs(args)
exp_type = args.exp_types[0]
spikeins_mix = args.spikeins_mix
expected = get_expected(spikeins_mix, log2=True)
min_one_has_spikeins = False # At least one sample has spikeins = False
warnings = []
for sample_name, sample_exp in zip(args.sample_names, args.sample_exps):
measured_zero = get_measured(sample_exp, sample_name, exp_type, only_zero=True)
measured_nonzero = get_measured(
sample_exp, sample_name, exp_type, only_nonzero=True, log2=True
)
merged_zero = merge_expected_measured(expected, measured_zero)
merged_nonzero = merge_expected_measured(expected, measured_nonzero)
# Get only ERCC spike-in's and plot the histogram-scatter figure.
if merged_nonzero.iloc[merged_nonzero.index.str.startswith("ERCC"), :].empty:
warnings.append(
"All ERCC spike-ins have zero expression in sample {}".format(
sample_name
)
)
continue
min_one_has_spikeins = True
plot_histogram_scatter(
expected=expected.iloc[expected.index.str.startswith("ERCC")],
zero=merged_zero.iloc[merged_zero.index.str.startswith("ERCC"), :],
nonzero=merged_nonzero.iloc[merged_nonzero.index.str.startswith("ERCC"), :],
spikein_type="ERCC",
sample_name=sample_name,
exp_type=exp_type,
)
if min_one_has_spikeins:
for message in warnings:
send_message(warning(message))
else:
# In case all samples have zero expression for all spikeins,
# rather print one warning that says so (instead of printing
# warning for each of the samples).
send_message(warning("All ERCC spike-ins in all samples have zero expression."))
if __name__ == "__main__":
main() | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/spikein_pairwise.py | 0.675872 | 0.544862 | spikein_pairwise.py | pypi |
"""Build expression time course."""
from __future__ import absolute_import, division, print_function
import argparse
import gzip
import json
from collections import Counter
import numpy as np
from six import iteritems
import utils
parser = argparse.ArgumentParser(description="Build expression time course.")
parser.add_argument("expression_files", nargs="+", help="gene expression files")
parser.add_argument("--names", nargs="+", help="expression names")
parser.add_argument("--mean", action="store_true", help="group by time and average")
args = parser.parse_args()
if args.names and len(args.names) != len(args.expression_files):
raise AttributeError("Names length must match files length.")
files = args.expression_files
progress = 0
series = {"timePoints": [], "genes": {}}
# Read time points from file names
times = []
if args.names:
for name in args.names:
ndx = name.lower().find("hr")
if ndx > 0:
try:
times.append(int(name[ndx + 2 : ndx + 4]))
except Exception:
try:
times.append(int(name[ndx - 2 : ndx]))
except Exception:
pass
if len(times) > 0 and len(times) != len(files):
raise AttributeError("Found some times but not for all time points.")
if len(times) == 0:
times = range(len(files))
def is_gzipped(f):
"""Check if file gzipped."""
with open(f, "rb") as rpkm_file:
magic = rpkm_file.read(2)
return magic == "\037\213"
# Go through files
for t, f in sorted(zip(times, files)):
myopen = utils.gzopen if is_gzipped(f) else open
with myopen(f) as rpkm_file:
rpkm_file.readline() # skip first line
# TODO: see above
series["timePoints"].append(t)
# Go through lines and parse them
for line in rpkm_file:
entry = line.split()
gene = entry[0]
value = float(entry[1])
# Add gene if missing
if gene not in series["genes"]:
series["genes"][gene] = []
# Set values from previous experiment to 0 if missing and add value
series["genes"][gene].extend(
[0] * (len(series["timePoints"]) - 1 - len(series["genes"][gene]))
)
series["genes"][gene].append(value)
# Print progress
progress += 1.0 / len(files)
print('{{"proc.progress":{0}}}'.format(progress))
# Add 0s at the end of genes if values are missing
for gene in series["genes"]:
series["genes"][gene].extend(
[0] * (len(series["timePoints"]) - len(series["genes"][gene]))
)
# Average time points for replicates
if args.mean:
tp, tp_counts = zip(*sorted(iteritems(Counter(series["timePoints"]))))
for gene in series["genes"]:
s = 0
tp_averages = []
for c in tp_counts:
tp_averages.append(np.mean(series["genes"][gene][s : s + c]))
s += c
series["genes"][gene] = tp_averages
series["timePoints"] = tp
# Print result in json
etcjson = '{"etc":%s}' % json.dumps(series, separators=(",", ":"))
gzip.open("etc.json.gz", "wb").write(etcjson)
with open("etc.json", "w") as out_json:
out_json.write(json.dumps(series, separators=(",", ":"))) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/etc.py | 0.541894 | 0.331701 | etc.py | pypi |
"""Merge Bowtie2 statistics from paired-end reads."""
import sys
if len(sys.argv) < 2:
sys.stderr.write("No stats file given.\n")
exit(1)
stats = []
t_unique, t_multiple, t_discord = 0, 0, 0
with open(sys.argv[1]) as f:
iteration, processed, not_aligned, unique, multiple, discord, mapped = (
"Initial alignment",
0,
0,
0,
0,
0,
0,
)
for line in f:
vals = line.strip().split(" ")
if "reads; of these" in line:
processed = int(vals[0])
if "pairs aligned 0 times" in line:
not_aligned = int(vals[0])
if "aligned concordantly exactly 1 time" in line:
unique = int(vals[0])
t_unique += unique
if "aligned concordantly >1 times" in line:
multiple = int(vals[0])
t_multiple += multiple
if "aligned discordantly 1 time" in line:
discord = int(vals[0])
t_discord += discord
if "Trimming iteration" in line:
iteration = line.strip()
if "overall alignment rate" in line:
mapped = float(vals[0].strip("%"))
stats.append(
(iteration, processed, not_aligned, unique, multiple, discord, mapped)
)
processed, not_aligned, unique, multiple, discord, mapped = (
0,
0,
0,
0,
0,
0,
)
with open("stats.tab", "w") as f:
f.write(
"Alignment\tPairs processed\tPairs aligned 0 times\t"
"Pairs aligned exactly 1 time\tPairs aligned >1 times\t"
"Pairs aligned discordantly\tOverall alignment rate (%)\n"
)
for vals in stats:
f.write("\t".join(map(str, vals)) + "\n")
t_mapped = round(((t_unique + t_multiple + t_discord) / stats[0][1]) * 100, 1)
f.write(
"\t".join(
map(
str,
(
"Total",
stats[0][1],
stats[-1][2],
t_unique,
t_multiple,
t_discord,
t_mapped,
),
)
)
+ "\n"
) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/mergebowtie2stats_paired.py | 0.444806 | 0.313853 | mergebowtie2stats_paired.py | pypi |
"""Expression aggregator."""
import argparse
import csv
import gzip
import json
import math
import numpy as np
def get_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description="Expression aggregator")
parser.add_argument(
"-e", "--expressions", nargs="+", help="Expressions", required=True
)
parser.add_argument(
"-d", "--descriptors", nargs="+", help="Descriptors", required=True
)
parser.add_argument("-s", "--source", help="Source", required=True)
parser.add_argument(
"-t", "--expression-type", help="Expression type", required=True
)
parser.add_argument("-g", "--group-by", help="Group by", required=True)
parser.add_argument("-a", "--aggregator", help="Aggregator")
parser.add_argument("-b", "--box-plot-output", help="Box plot output file name")
parser.add_argument(
"-l", "--log-box-plot-output", help="Log box plot output file name"
)
parser.add_argument(
"-x", "--expressions-output", help="Expressions output file name"
)
return parser.parse_args()
def load_expression(fn=None, sep="\t"):
"""Read expressions from file."""
with gzip.open(fn, "rt") as f:
reader = csv.DictReader(f, delimiter=sep)
return {row["Gene"]: float(row["Expression"]) for row in reader}
def get_indices(descriptors, descriptor):
"""Return positions of my_element in my_list."""
return {i for i, x in enumerate(descriptors) if x == descriptor}
def get_values(expressions, descriptors, gene, descriptor):
"""Return expressions of a gene with matching descriptor."""
indices = get_indices(descriptors, descriptor)
return [
expression[gene]
for i, expression in enumerate(expressions)
if i in indices and gene in expression
]
def load_expressions(aggregator, expression_fns=[], sep="\t", descriptors=[]):
"""Read expressions from files."""
raw_expressions = [
load_expression(expression_fn, sep) for expression_fn in expression_fns
]
if aggregator:
raw_expressions.extend(aggregator["raw_expressions"])
descriptors.extend(aggregator["descriptors"])
genes = {key for raw_expression in raw_expressions for key in raw_expression.keys()}
grouped_expressions = {
gene: {
descriptor: get_values(raw_expressions, descriptors, gene, descriptor)
for descriptor in sorted(set(descriptors))
if get_values(raw_expressions, descriptors, gene, descriptor)
}
for gene in genes
}
return raw_expressions, descriptors, grouped_expressions
def get_log_expressions(grouped_expressions):
"""Get log(expression + 1) for all expressions."""
log_expressions = {
gene: {
descriptor: [
math.log(expression + 1.0, 2.0)
for expression in grouped_expressions[gene][descriptor]
]
for descriptor in grouped_expressions[gene]
}
for gene in grouped_expressions
}
return log_expressions
def generate_statistic(expression, gene, attribute, expression_type):
"""Get box plot statistic for expressions of a single gene and attribute."""
min_val = min(expression)
max_val = max(expression)
median = np.percentile(expression, 50.0)
q1 = np.percentile(expression, 25.0)
q3 = np.percentile(expression, 75.0)
iqr = q3 - q1
lowerwhisker = max(min_val, q1 - 1.5 * iqr)
upperwhisker = min(max_val, q3 + 1.5 * iqr)
data_count = len(expression)
return {
"attribute": attribute,
"gene": gene,
"exp_types": [expression_type],
"min": min_val,
"max": max_val,
"median": median,
"q1": q1,
"q3": q3,
"lowerwhisker": lowerwhisker,
"upperwhisker": upperwhisker,
"data_count": data_count,
}
def get_statistics(expressions, expression_type):
"""Get box plot statistics for expressions of all genes and attributes."""
return {
gene: [
generate_statistic(exp, gene, descriptor, expression_type)
for descriptor, exp in expression.items()
]
for gene, expression in expressions.items()
}
def output_json(statistics, fname=None, compressed=False):
"""Write json to file."""
if not compressed:
with open(fname, "w") as f:
json.dump(statistics, f)
else:
with gzip.open(fname, "wt") as f:
json.dump(statistics, f)
def load_json(fname):
"""Read json from file."""
with gzip.open(fname, "rt") as f:
return json.load(f)
def check_aggregator(aggregator, source, expression_type, group_by):
"""Check aggregator fields."""
if aggregator["source"] != source:
raise ValueError(
"All expressions must be annotated by the same genome database (NCBI, UCSC, ENSEMBLE,...)."
)
if aggregator["expression_type"] != expression_type:
raise ValueError("All expressions must be of the same type.")
if aggregator["group_by"] != group_by:
raise ValueError("Group by field must be the same.")
def get_expressions_out(
raw_expressions, descriptors, source, expression_type, group_by
):
"""Return expressions output."""
return {
"raw_expressions": raw_expressions,
"descriptors": descriptors,
"source": source,
"expression_type": expression_type,
"group_by": group_by,
}
def main():
"""Compute expression statistics."""
args = get_args()
aggregator = None
if args.aggregator:
aggregator = load_json(args.aggregator)
check_aggregator(aggregator, args.source, args.expression_type, args.group_by)
raw_expressions, descriptors, expressions = load_expressions(
aggregator, args.expressions, "\t", args.descriptors
)
log_expressions = get_log_expressions(expressions)
if args.box_plot_output:
statistics = get_statistics(expressions, args.expression_type)
output_json(statistics, args.box_plot_output)
if args.log_box_plot_output:
log_statistics = get_statistics(log_expressions, args.expression_type)
output_json(log_statistics, args.log_box_plot_output)
if args.expressions_output:
expressions_out = get_expressions_out(
raw_expressions,
descriptors,
args.source,
args.expression_type,
args.group_by,
)
output_json(expressions_out, args.expressions_output, True)
if __name__ == "__main__":
main() | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/expression_aggregator.py | 0.827026 | 0.395601 | expression_aggregator.py | pypi |
"""Recalculate from STAR SJ.out.tab file to BED12 format."""
import argparse
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("sj_file", help="STAR SJ.out.tab output file")
args = parser.parse_args()
# STAR SJ.out.tab file consist of following columns:
# column 1: chromosome
# column 2: first base of the intron (1-based)
# column 3: last base of the intron (1-based)
# column 4: strand (0: undefined, 1: +, 2: -)
# column 5: intron motif: 0: non-canonical; 1: GT/AG, 2: CT/AC, 3: GC/AG, 4: CT/GC, 5: AT/AC, 6: GT/AT
# column 6: 0: unannotated, 1: annotated (only if splice junctions database is used)
# column 7: number of uniquely mapping reads crossing the junction
# column 8: number of multi-mapping reads crossing the junction
# column 9: maximum spliced alignment overhang
sj_file = pd.read_csv(args.sj_file, delimiter="\t", header=None)
# BED12 consists of 12 columns:
header = [
"chromosome",
"sj_start",
"sj_end",
"sj_name",
"score",
"strand",
"thick_start",
"thick_end",
"item_rgb",
"block_counts",
"block_sizes",
"block_starts",
]
bed_file = pd.DataFrame(index=sj_file.index, columns=header)
# 1: chromosome = first column from STAR SJ.out.tab
bed_file.loc[:, "chromosome"] = sj_file.iloc[:, 0].values
# 2: SJ start (0-based) =
# (first base of the intron (1-based) - maximum spliced alignment overhang) -1 (to recalculate to
# 0 based system)
bed_file.loc[:, "sj_start"] = (sj_file.iloc[:, 1]) - (sj_file.iloc[:, 8]) - 1
# 3: SJ end (0-based) =
# (last base of the intron (1-based) + maximum spliced alignment overhang)
bed_file.loc[:, "sj_end"] = (sj_file.iloc[:, 2]) + (sj_file.iloc[:, 8])
# 4: SJ name
rows_num_length = len(str(len(sj_file.index)))
bed_file.loc[:, "sj_name"] = (
(sj_file.index + 1).astype(str).map(lambda x: "JUNC0" + x.zfill(rows_num_length))
)
# 5: score = number of uniquely and multi mapping reads crossing the junction
bed_file.loc[:, "score"] = sj_file.iloc[:, 6].values + sj_file.iloc[:, 7].values
# 6: strand = 0: '.' (undefined) , 1: '+', 2: '-
conditions = [sj_file.iloc[:, 3] == 0, sj_file.iloc[:, 3] == 1, sj_file.iloc[:, 3] == 2]
choices_strand = [".", "+", "-"]
bed_file.loc[:, "strand"] = np.select(conditions, choices_strand)
# 7: thick start is the same as SJ start
bed_file.loc[:, "thick_start"] = (sj_file.iloc[:, 1]) - (sj_file.iloc[:, 8]) - 1
# 8: thick end is the same as SJ end
bed_file.loc[:, "thick_end"] = (sj_file.iloc[:, 2]) + (sj_file.iloc[:, 8])
# 9: item RGB = 255,0,0 (red color) for '-' strand, 0,0,255 (blue color) for '+' strand
# and 0,0,0 (black) for undefined
choices_rgb = ["0,0,0", "0,0,255", "255,0,0"]
bed_file.loc[:, "item_rgb"] = np.select(conditions, choices_rgb)
# 10: block counts = 2
bed_file.loc[:, "block_counts"] = "2"
# 11: block sizes = maximum spliced alignment overhang, maximum spliced alignment overhang
bed_file.loc[:, "block_sizes"] = (
(sj_file.iloc[:, 8]).astype(str) + "," + (sj_file.iloc[:, 8]).astype(str)
)
# 12: block starts (a comma-separated list of block starts, relative to SJ start)
# = 0, (SJ end - SJ start + maximum spliced alignment overhang +1 )
bed_file.loc[:, "block_starts"] = (
"0" # first block allways starts at SJ start
+ ","
+ ((sj_file.iloc[:, 2]) - (sj_file.iloc[:, 1]) + (sj_file.iloc[:, 8]) + 1).astype(
str
)
)
bed_file.to_csv("junctions_unsorted.bed", sep="\t", index=False, header=False) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/star_sj_to_bed12.py | 0.759582 | 0.533884 | star_sj_to_bed12.py | pypi |
"""Create gene expression profiles."""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import multiprocessing
import os
import biox
parser = argparse.ArgumentParser(description="Create gene expression profiles.")
parser.add_argument("genome_file", help="genome fasta file")
parser.add_argument("gff_file", help="gff3 file")
parser.add_argument("length", type=int, help="read length")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose output")
args = parser.parse_args()
if not os.path.isfile(args.genome_file):
raise ValueError("File {} does not exist".format(args.genome_file))
genome_name, genome_ext = os.path.splitext(args.genome_file)
if genome_ext != ".fasta":
raise ValueError("Expected .fasta file got {}".format(args.genome_file))
if not os.path.isfile(args.gff_file):
raise ValueError("File {} does not exist".format(args.gff_file))
if os.path.splitext(args.gff_file)[1] not in [".gtf", ".gff3"]:
raise ValueError("Expected .gff file got {}".format(args.gff_file))
if args.verbose:
biox.utils.verbosity(logging.INFO)
fasta_file = "{}_mappability_{}.fasta".format(genome_name, args.length)
biox.data.prepare_fasta_mapability(args.genome_file, fasta_file, args.length)
b = biox.map.Bowtie()
b.set_m(1)
b.enable_v(0)
b.set_mode_fasta()
b.set_processors(min(multiprocessing.cpu_count(), 2))
b.map(
"{}_index".format(genome_name),
fasta_file,
"{}_mappability_{}".format(genome_name, args.length),
verbose=args.verbose,
)
f_gtf = open("foo.gtf", "w")
parents = {}
with open(args.gff_file) as f:
for line in f:
if line.strip().startswith("#"):
continue
vals = line.split("\t")
attrlist = vals[-1].strip().split(";")
attrs = {}
for att in attrlist:
key, val = att.split("=")
attrs[key] = val
if vals[2] == "CDS" or vals[2] == "exon":
gene_id = attrs["Parent"]
f_gtf.write(
"{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
vals[0],
"",
"exon",
vals[3],
vals[4],
".",
vals[6],
".",
'gene_id "{}"; gene_name "{}"; gene_description "{}"; gene_type "{}";'.format(
gene_id,
parents[gene_id]["name"],
parents[gene_id]["description"],
parents[gene_id]["gene_type"],
),
)
)
else:
parents[attrs["ID"]] = {"gene_type": vals[2]}
parents[attrs["ID"]]["name"] = (
attrs["Name"] if "Name" in attrs else attrs["ID"]
)
parents[attrs["ID"]]["description"] = (
attrs["Note"] if "Note" in attrs else ""
)
f_gtf.close()
results = biox.expression.gene_expression(
"foo.gtf", "{}_mappability_{}.bam".format(genome_name, args.length)
)
gtf = biox.data.Gtf("foo.gtf")
coding_len = {}
gene_len = {}
for gene_id, gene in gtf.genes.iteritems():
coding = 0
for feature in gene.features:
if feature.type != "exon":
continue
coding += feature.stop - feature.start + 1
coding_len[gene_id] = coding
gene_len[gene_id] = gene.stop - gene.start + 1
f = open("{}_mappability_{}.tab".format(genome_name, args.length), "wt")
header = ["gene_id", "coverage", "coding_len", "gene_len", "mapability"]
f.write("\t".join(header) + "\n")
gene_ids = results.keys()
gene_ids.sort()
for gene_id in gene_ids:
coverage = results[gene_id]
row = [
gene_id,
coverage,
coding_len[gene_id],
gene_len[gene_id],
"%.3f" % (float(coverage) / coding_len[gene_id]),
]
f.write("\t".join(str(x) for x in row) + "\n")
f.close() | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/mappability.py | 0.63023 | 0.208904 | mappability.py | pypi |
"""Create gene expression profiles."""
from __future__ import absolute_import, division, print_function
import argparse
import gzip
import logging
import math
import os
import biox
parser = argparse.ArgumentParser(description="Create gene expression profiles.")
parser.add_argument("gff_file", help="GFF file")
parser.add_argument("bam_file", help="aligned BAM file")
parser.add_argument(
"--rc", action="store_true", help="reads uniquely mapped to gene exons"
)
parser.add_argument("--rpkm", action="store_true", help="reads scaled by exon length")
parser.add_argument(
"--rpkum", help="reads scaled by uniquely mappable part of exons <mappability_file>"
)
parser.add_argument(
"--mrna", action="store_true", help="scale with reads that map to polyA transcripts"
)
parser.add_argument(
"--ncrna",
action="store_true",
help="Exclude reads that map to chrR from scaling factor",
)
parser.add_argument("--stranded", action="store_true", help="Stranded library type")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose output")
args = parser.parse_args()
if os.path.splitext(args.bam_file)[1] != ".bam":
raise ValueError("Expected .bam file, got {}.".format(args.bam_file))
if os.path.splitext(args.gff_file)[1] not in [".gff", ".gff3"]:
raise ValueError("Expected .gff file, got {}.".format(args.gff_file))
if args.verbose:
biox.utils.verbosity(logging.INFO)
gff_file = args.gff_file
gtf_file = "foo.gtf"
bam_file = args.bam_file
suffix = "_polya" if args.mrna else ""
f_gtf = open(gtf_file, "w")
parents = {}
with open(gff_file) as f:
for line in f:
if line.strip().startswith("#"):
continue
vals = line.split("\t")
attrlist = vals[-1].strip().split(";")
attrs = {}
for att in attrlist:
key, val = att.split("=")
attrs[key] = val
if vals[2] == "CDS" or vals[2] == "exon":
gene_id = attrs["Parent"]
f_gtf.write(
"{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
vals[0],
"",
"exon",
vals[3],
vals[4],
".",
vals[6],
".",
'gene_id "{}"; gene_name "{}"; gene_description "{}"; gene_type "{}";'.format(
gene_id,
parents[gene_id]["name"],
parents[gene_id]["description"],
parents[gene_id]["gene_type"],
),
)
)
else:
parents[attrs["ID"]] = {"gene_type": vals[2]}
parents[attrs["ID"]]["name"] = (
attrs["Name"] if "Name" in attrs else attrs["ID"]
)
parents[attrs["ID"]]["description"] = (
attrs["Note"] if "Note" in attrs else ""
)
f_gtf.close()
gtf = biox.data.Gtf(gtf_file)
genes = None
if args.mrna:
genes = set()
for gene_id, gene in gtf.genes.items():
if gene.attrs["gene_type"] == "mRNA" or gene.attrs["gene_type"] == "'mRNA'":
genes.add(gene_id)
def gene_expression_overlap_stranded(gtf_file, bam_file, quality=30):
"""Compute gene expression overlap."""
gtf = biox.data.Gtf(gtf_file)
genes_exp = {}
for gene_id in gtf.genes:
genes_exp[gene_id] = 0
current = 0
for gene_id, gene in gtf.genes.items():
current += 1
if current % 300 == 0:
print("%.2f" % (float(current) / len(gtf.genes)), bam_file)
for feature in gene.features:
if feature.type != "exon":
continue
assert feature.start <= feature.stop
if gene.strand == "-":
# warning: for std. illumina library prep, this statements would count reads in "plus" strand
# alignments of the second in pair if they map to the forward strand
command = "{samtools} view -f 128 -F 16 -q {quality} -c {bam_file} {chr}:{start}-{stop}".format(
samtools=os.path.join(biox.samtools_folder, "samtools"),
bam_file=bam_file,
quality=30,
chr=gene.chr,
start=feature.start,
stop=feature.stop,
)
output_second_in_pair, _ = biox.utils.cmd(command)
output_second_in_pair = (
output_second_in_pair if output_second_in_pair != "" else 0
)
# alignments of the first in pair if they map to the reverse strand
command = "{samtools} view -F 80 -q {quality} -c {bam_file} {chr}:{start}-{stop}".format(
samtools=os.path.join(biox.samtools_folder, "samtools"),
bam_file=bam_file,
quality=30,
chr=gene.chr,
start=feature.start,
stop=feature.stop,
)
output_first_in_pair, _ = biox.utils.cmd(command)
output_first_in_pair = (
output_first_in_pair if output_first_in_pair != "" else 0
)
output = int(output_second_in_pair) + int(output_first_in_pair)
genes_exp[gene_id] = genes_exp.get(gene_id, 0) + int(output)
else:
# warning: for std. illumina library prep, this statements would count reads in "minus" strand
# alignments of the second in pair if they map to the reverse strand
command = "{samtools} view -f 144 -q {quality} -c {bam_file} {chr}:{start}-{stop}".format(
samtools=os.path.join(biox.samtools_folder, "samtools"),
bam_file=bam_file,
quality=30,
chr=gene.chr,
start=feature.start,
stop=feature.stop,
)
output_second_in_pair, _ = biox.utils.cmd(command)
output_second_in_pair = (
output_second_in_pair if output_second_in_pair != "" else 0
)
# alignments of the first in pair if they map to the forward strand
command = "{samtools} view -f 64 -F 16 -q {quality} -c {bam_file} {chr}:{start}-{stop}".format(
samtools=os.path.join(biox.samtools_folder, "samtools"),
bam_file=bam_file,
quality=30,
chr=gene.chr,
start=feature.start,
stop=feature.stop,
)
output_first_in_pair, _ = biox.utils.cmd(command)
output_first_in_pair = (
output_first_in_pair if output_first_in_pair != "" else 0
)
output = int(output_second_in_pair) + int(output_first_in_pair)
genes_exp[gene_id] = genes_exp.get(gene_id, 0) + int(output)
return genes_exp
print("Expression profile overlap...")
if args.stranded:
results = gene_expression_overlap_stranded(gtf_file, bam_file)
else:
results = biox.expression.gene_expression_overlap(gtf_file, bam_file, genes=genes)
def save_expression_profile(file_name, exp=results.get):
"""Save expression profile."""
with gzip.open(file_name, "wb") as f:
f.write("Gene\tExpression\n")
gene_ids = results.keys()
gene_ids.sort()
for gene_id in gene_ids:
_exp = str(exp(gene_id))
f.write("{}\t{}\n".format(gene_id, _exp))
if args.rc:
print("Writing read counts...")
save_expression_profile("expression_rc{}.tab.gz".format(suffix))
if args.rpkm or args.rpkum:
if not args.mrna and not args.ncrna:
command = "{samtools} view -F 4 -q {quality} -c {bam_file}".format(
samtools=os.path.join(biox.samtools_folder, "samtools"),
bam_file=bam_file,
quality=30,
)
output, error = biox.utils.cmd(command)
N = int(output)
if args.ncrna:
command = "{samtools} view -F 4 -q {quality} -c {bam_file}".format(
samtools=os.path.join(biox.samtools_folder, "samtools"),
bam_file=bam_file,
quality=30,
)
output, error = biox.utils.cmd(command)
all_reads = int(output)
print("Number of all reads: ", all_reads)
command = "{samtools} view -F 4 -q {quality} -c {bam_file} chrR".format(
samtools=os.path.join(biox.samtools_folder, "samtools"),
bam_file=bam_file,
quality=30,
)
output, error = biox.utils.cmd(command)
chrR_reads = int(output)
print("Number of reads that map to chrR: ", chrR_reads)
N = all_reads - chrR_reads
print("Effective library size: ", N)
else:
N = sum(results.values())
if args.rpkm:
print("Writing RPKM...")
gene_exon_lens = {}
for gene_id, gene in gtf.genes.items():
exon_len = 0
for feature in gene.features:
if feature.type != "exon":
continue
exon_len += feature.stop - feature.start + 1
gene_exon_lens[gene_id] = exon_len
def exp(gene_id):
"""Compute RPKM."""
return (math.pow(10, 9) * results[gene_id]) / (N * gene_exon_lens[gene_id])
save_expression_profile("expression_rpkm{}.tab.gz".format(suffix), exp)
if args.rpkum:
print("Processing RPKUM...")
# find read length
mapability_file = args.rpkum
data_mapability = {}
f = biox.data.TabReader(mapability_file)
while f.readline():
data_mapability[f.gene_id] = f.coverage
def exp_rpkum(gene_id):
"""Compute RPKUM."""
if data_mapability[gene_id] == 0:
return 0
else:
return (math.pow(10, 9) * results[gene_id]) / (N * data_mapability[gene_id])
save_expression_profile("expression_rpkum{}.tab.gz".format(suffix), exp_rpkum) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/xexpression.py | 0.639511 | 0.210563 | xexpression.py | pypi |
"""Validate input FASTQ file(s)."""
import argparse
import collections
from os.path import basename, isfile
import dnaio
from dnaio.exceptions import FastqFormatError, FileFormatError
from resolwe_runtime_utils import error, send_message
SUPPORTED_EXTENSIONS = (
".fastq",
".fastq.gz",
".fq",
".fq.gz",
)
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Validate single/paired-end FASTQ files."
)
parser.add_argument(
"-fq", nargs="+", type=str, required=True, help="FASTQ file(s)."
)
parser.add_argument(
"-fq2",
nargs="+",
type=str,
required=False,
help="Second-in-pair FASTQ file(s).",
)
return parser.parse_args()
def set_error(msg):
"""Print error message and raise ValueError."""
send_message(error(msg))
raise ValueError(msg)
def main():
"""Invoke when run directly as a program."""
args = parse_arguments()
if args.fq2:
input_fastq = args.fq + args.fq2
else:
input_fastq = args.fq
for fq_file in input_fastq:
if not isfile(fq_file):
set_error("Input file {} does not exist".format(basename(fq_file)))
if not fq_file.lower().endswith(SUPPORTED_EXTENSIONS):
set_error(
"Unrecognized file name extension in file {}. "
"Supported file name extensions are {}.".format(
fq_file, SUPPORTED_EXTENSIONS
)
)
# Reduce the probability of uploading the FASTQ files with the same
# content multiple times (as multiple lanes or mates).
if len(set(input_fastq)) != len(input_fastq):
seen_files = [
item
for item, count in collections.Counter(input_fastq).items()
if count > 1
]
set_error("Non-unique input file names detected: {}.".format(seen_files))
if args.fq2 and len(args.fq) != len(args.fq2):
set_error(
"The number of mate-pair files in split-lane samples must match. "
"{} and {} input files were given for the -fq and -fq2 inputs, "
"respectively.".format(len(args.fq), len(args.fq2))
)
if args.fq2:
for mate1, mate2 in zip(args.fq, args.fq2):
try:
paired_reads = dnaio.open(mate1, file2=mate2, fileformat="fastq")
if not any(paired_reads):
set_error(
"Mate-pair files {} and {} contain no read sequences.".format(
basename(mate1), basename(mate2)
)
)
else:
for read in paired_reads:
continue
print(
"Successfully validated mate-pair files {} and {}.".format(
basename(mate1), basename(mate2)
)
)
except (FastqFormatError, FileFormatError) as dnaio_error:
set_error(
"Format error in mate-pairs {} and {}. {}".format(
basename(mate1), basename(mate2), str(dnaio_error)
)
)
else:
for fq in args.fq:
try:
reads = dnaio.open(fq, fileformat="fastq")
if not any(reads):
set_error(
"Input file {} contains no read sequences.".format(basename(fq))
)
else:
for read in reads:
continue
print("Successfully validated reads file {}.".format(basename(fq)))
except (FastqFormatError, FileFormatError) as dnaio_error:
set_error("Error in file {}. {}".format(basename(fq), str(dnaio_error)))
if __name__ == "__main__":
main() | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/validate_fastq.py | 0.61832 | 0.21794 | validate_fastq.py | pypi |
"""Create gene set table."""
import argparse
import re
from pathlib import Path
import pandas as pd
from resolwe_runtime_utils import send_message, warning
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Create gene sets from DGE results table."
)
parser.add_argument(
"--dge_file", required=True, help="Differential expressions file."
)
parser.add_argument("--out_dir", required=True, help="Output directory.")
parser.add_argument(
"--analysis_name", type=str, required=True, help="Analysis name."
)
parser.add_argument("--tool", type=str, required=True, help="Tool name.")
parser.add_argument("--logfc", type=float, default=1, help="LogFC threshold.")
parser.add_argument("--fdr", type=float, default=0.05, help="FDR threshold")
return parser.parse_args()
def save_genes(data_frame, outfname):
"""Save gene ids."""
data_frame["gene_id"].to_csv(
outfname,
header=False,
index=False,
sep="\n",
compression="gzip",
)
return outfname
def generate_name(analysis_name, tool_name, logfc=1, fdr=0.05):
"""Generate name for gene sets."""
analysis_name = analysis_name.strip().replace(" ", "_")
analysis_name = re.sub(r"[^-\w.]", "", analysis_name)
tool_name = tool_name.strip().replace(" ", "_")
tool_name = re.sub(r"[^-\w.]", "", tool_name)
return f"{analysis_name}_{tool_name}_logFC{logfc}_FDR{fdr}"
def create_gene_sets(exp_file, logfc=1, fdr=0.05):
"""Create gene sets from DGE results table.
Create a gene set file with up-regulated (log2FC > 1, fdr < 0.05),
down-regulated (log2FC > -1, fdr < 0.05) and all genes.
"""
df = pd.read_csv(
filepath_or_buffer=exp_file,
sep="\t",
header=0,
compression="gzip",
keep_default_na=False,
)
up = df.loc[(df["logfc"] > logfc) & (df["fdr"] < fdr)]
down = df.loc[(df["logfc"] < -logfc) & (df["fdr"] < fdr)]
return {"up": up, "down": down, "all": df}
def main():
"""Invoke when run directly as a program."""
args = parse_arguments()
gene_sets = create_gene_sets(args.dge_file, args.logfc, args.fdr)
fname_prefix = generate_name(args.analysis_name, args.tool, args.logfc, args.fdr)
out_dir = Path(args.out_dir)
if not out_dir.exists():
out_dir.mkdir()
for name, data in gene_sets.items():
if data.empty:
send_message(
warning(f"No {name}-regulated genes. Gene set was not created.")
)
else:
save_genes(data, out_dir / f"{fname_prefix}_{name}.tab.gz")
if __name__ == "__main__":
main() | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/tools/create_gene_sets.py | 0.74055 | 0.172869 | create_gene_sets.py | pypi |
===================
Resolwe Flow Design
===================
The Resolwe Flow workflow engine comprises the execution framework and other
layers which make up the internal data model and facilitate dependency
resolution, permissions enforcement and process filtering.
Overview
========
Execution Framework
-------------------
Flow consists of a number of active services which need to be running before job
execution can proceed.
The core message transport and coordination facility, as currently used, is
`Redis`_. It serves as the central status hub for keeping track of shared
dynamic information used by parts of the framework, and as a contact point for
those parts of the framework that run remotely. These connect to well-known
'channels' (specially named Redis list objects), into which they can deposit
JSON-formatted messages and commands.
.. _Redis: https://redis.io
Flow's execution manager, or just the 'manager', is an independent service which
runs as a `Django Channels`_ event consumer. When objects are added to the
database to be executed, they will trigger events for the appropriate channels.
These will be processed by the manager, which will carry out all the preparatory
tasks necessary to start execution and then commuicate with a concrete workload
management system so that the job can eventually be scheduled and run on a
worker node.
.. _Django Channels: https://github.com/django/channels
Finally, the jobs are executed by the aptly named 'executors'. These are run on
worker nodes and act as local execution managers: preparing a controlled
execution environment, running the job's code, collecting results and
communicating them back to the manager which stores them in the database.
Utility Layers
--------------
Django's facilities are used for interfacing with the database, thus all models
used in Flow are actually Django Model objects. The most important two models
are the `Data` model and the `Process` model.
A Data object represents a single instance of data to be processed, i.e. a node
in the flow graph being executed. It contains properties which mainly concern
execution, such as various process and task IDs, output statuses and the results
produced by executors.
A Process object represents the way in which its Data object will be 'executed',
i.e. the type of node in the flow graph and the associated code. It contains
properties defining its relationship to other nodes in the graph currently being
executed, the permissions that control access rights for users and other
processes, and the actual code that is run by the executors.
The code in the process object can be either final code that is already ready
for execution, or it can be a form of template, for which an 'expression engine'
is needed. An expression engine (the only one currently in use is `Jinja`_)
pre-processes the process' code to produce text that can then be executed by an
'execution engine'.
.. _Jinja: http://jinja.pocoo.org
An execution engine is, simply put, the interpreter that will run the processed
code, just after an additional metadata discovery step. It is done by the
execution engine because the encoding might be language-dependent. The
properties to be discovered include process resource limits, secret
requirements, etc. These properties are passed on to the executor, so that it
can properly set up the target environment. The only currently supported
execution engine is Bash.
Technicalities
==============
The Manager
-----------
Being a Django Channels consumer application, the Flow Manager is entirely
event-driven and mostly contextless. The main input events are data object
creation, processing termination and intermediate executor messages. Once run,
it consists of two distinct servers and a modularized connection framework used
to interface with workload managers used by the deployment site.
Dispatcher
^^^^^^^^^^
This is the central job scheduler. On receipt of an appropriate event through
Django Channels (in this service, only data object creation and processing
termination), the dispatcher will scan the database for outstanding data
objects. For each object found to still not be processed, dependencies will be
calculated and scanned for completion. If all the requirements are satisfied,
its execution cycle will commence. The manager-side of this cycle entails job
pre-processing and a part of the environment preparation steps:
- The data object's process is loaded, its code preprocessed with the
configured expression engine and the result of that fed into the selected
execution engine to discover further details about the process' environemntal
requirements (resource limits).
- The runtime directories on the global shared file system are prepared: file
dependencies are copied out from the database, the process' processed code
(as output by the expression engine) is stored into a file that the executor
will run.
- The executor platform is created by copying the Flow Executor source code to
the destination (per-data) directories on the shared file system, along with
serialized (JSON) settings and support metadata (file lists, directory paths,
Docker configuration and other information the configured executor will need
for execution).
- After all this is done, control is handed over to the configured 'workload
connector', see below for a description.
Listener
^^^^^^^^
As the name might imply to some, the purpose of the listener is to listen for
status updates and distressing thoughts sent by executors. The service itself is
an independent (`i.e.` not Django Channels-based) process which waits for events to
arrive on the executor contact point channels in Redis.
The events are JSON-formatted messages and include:
- processing status updates, such as execution progress and any computed output
values,
- spawn commands, with which a process can request the creation of new data
objects,
- execution termination, upon which the listener will finalize the Data object
in question: delete temporary files from the global shared file system, update
process exit code fields in the database, store the process' standard output
and standard error sent by the executor and notify the dispatcher about the
termination, so that any state internal to it may be updated properly,
- ancillary status updates from the executor, such as logging. Because executors
are running remotely with respect to the manager's host machine, they may not
have access to any centralized logging infrastructure, so the listener is used
as a proxy.
Workload Connectors
^^^^^^^^^^^^^^^^^^^
Workload connectors are thin glue libraries which communicate with the concrete
workload managers used on the deployment site. The dispatcher only contains
logic to prepare execution environments and generate the command line necessary
to kick off an executor instance. The purpose of the workload connector is to
submit that command line to the workload manager which will then execute it on
one of its worker nodes. The currently supported workload managers are
`Celery`_, `SLURM`_ and a simple local dummy for test environments.
.. _Celery: http://www.celeryproject.org
.. _SLURM: https://slurm.schedmd.com
The Executor
------------
The Flow Executor is the program that controls Process execution on a worker
node managed by the site workload manager, for which it is a job. Depending on
the configured executor, it further prepares an execution environment,
configures runtime limitations enforced by the system and spawns the code in the
Process object. The currently supported executor types are a simple local
executor for testing deployments and a `Docker`_-based one.
.. _Docker: https://www.docker.com
Once started, the executor will carry out any additional preparation based on
its type (`e.g.` the Docker executor constructs a command line to create an
instance of a pre-prepared Docker container, with all necessary file system
mappings and communication conduits). After that, it executes the Process code
as prepared by the manager, by running a command to start it (this need not be
anything more complicated than a simple `subprocess.Popen`).
Following this, the executor acts as a proxy between the process and the
database by relaying messages generated by the process to the manager-side
listener. When the process is finished (or when it terminates abnormally), the
executor will send a final cleanup message and terminate, thus finishing the job
from the point of view of the workload manager.
Example Execution, from Start to Finish
=======================================
- Flow services are started: the dispatcher Django Channels application and the
listener process.
- The user, through any applicable intricacy, creates a Data object.
- Django signals will fire on creation and submit a data scan event to the
dispatcher through Django Channels.
- The dispatcher will scan the database for outstanding data objects
(alternatively, only for a specific one, given an ID). The following steps are
then performed for each discovered data object whose dependencies are all
processed:
- The runtime directory is populated with data files, executor source and
configuration files.
- The process code template is run through an expression engine to transform it
into executable text. This is also scanned with an execution engine to
discover runtime resource limits and other process-local configuration.
- A command line is generated which can be run on a processing node to start an
executor.
- The command line is handed over to a workload connector, which submits it as a
job to the workload manager installed on the site.
- At this point, the dispatcher's job for this data object is done. Eventually,
the workload manager will start processing the submitted job, thereby spawning
an executor.
- The executor will prepare a safe runtime context, such as a Docker container,
configure it with appropriate communication channels (stdin/out redirection or
sockets) and run the command to execute the process code.
- The code executes, periodically generating status update messages. These are
received by the executor and re-sent to the listener. The listener responds
appropriately, updating database fields for the data object, notifying the
dispatcher about lifetime events or forwarding log messages to any configured
infrastructure.
- Once the process is done, the executor will send a finalizing command to the
listener and terminate.
- The listener will notify the dispatcher about the termination and finalize the
database status of this data object (processing exit code, outputs).
- The dispatcher will update processing states and statistics, and re-scan the
database for data objects which might have dependencies on the one that just
finished and could therefore potentially be started up.
| /resolwe-36.0.0a2.tar.gz/resolwe-36.0.0a2/docs/flow.rst | 0.929899 | 0.809163 | flow.rst | pypi |
=================
Writing processes
=================
Process is a central building block of the Resolwe's dataflow. Formally, a
process is an algorithm that transforms inputs to outputs. For example, a `Word
Count` process would take a text file as input and report the number of words
on the output.
.. figure:: images/proc_01.png
`Word Count` process with input ``doc`` of type ``basic:file`` and output
``words`` of type ``basic:integer``.
When you execute the process, Resolwe creates a new ``Data`` object with
information about the process instance. In this case the `document` and the
`words` would be saved to the same ``Data`` object. What if you would like
to execute another analysis on the same document, say count the number of
lines? We could create a similar process `Number of Lines` that would also take
the file and report the number of lines. However, when we would execute the
process we would have 2 copies of the same `document` file stored on the
platform. In most cases it makes sense to split the upload (data storage) from
the analysis. For example, we could create 3 processes: `Upload Document`,
`Word Count` and `Number of Lines`.
.. figure:: images/proc_02_flow.png
Separate the data storage (`Upload Document`) and analysis (`Word Count`,
`Number of Lines`). Notice that the `Word Count` and `Number of Lines`
processes accept ``Data`` objects of type ``data:doc``---the type ot the
`Upload Document` process.
Resolwe handles the execution of the dataflow automatically. If you were to
execute all three processes at the same time, Resolwe would delay the execution
of `Word Count` and `Number of Lines` until the completion of `Upload
Document`. Resolwe resolves dependencies between processes.
A processes is defined by:
- Inputs
- Outputs
- Meta-data
- Algorithm
Processes are stored in the data base in the ``Process`` model. A process'
algorithm runs automatically when you create a new ``Data`` object. The inputs
and the process name are required at ``Data`` create, the outputs are saved by
the algorithm, and users can update the meta-data at any time. The
:ref:`process-syntax` chapter explains how to add a process definition to the
``Process`` data base model
Processes can be chained into a dataflow. Each process is assigned a type
(`e.g.,` ``data:wc``). The ``Data`` object created by a process is implicitly
assigned a type of that process. When you define a new process, you can specify
which data types are required on the input. In the figure below, the `Word
Count` process accepts ``Data`` objects of type ``data:doc`` on the input.
Types are hierarchical with each level of the hierarchy separated by a colon.
For instance, ``data:doc:text`` would be a sub-type of ``data:doc``. A process
that accepts ``Data`` objects of type ``data:doc``, also accepts ``Data``
objects of type ``data:doc:text``. However, a process that accepts ``Data``
objects of type ``data:doc:text``, does not accept ``Data`` objects of type
``data:doc``.
.. figure:: images/proc_03_types.png
Types are hierarchical. When you define the type on the input, keep in mind
that the process should also handle all sub-types.
.. _process-syntax:
Process syntax
==============
A process can be written in any syntax as long as you can save it to the
``Process`` model. The most straight-forward would be to write in Python, using
the Django ORM::
p = Process(name='Word Cound',
slug='wc-basic',
type='data:wc:',
inputs = [{
'name': 'document',
'type': 'basic:file:'
}],
outputs = [{
'name': 'words',
'type': 'basic:integer:'
}],
run = {
'bash': 'WORDS=`wc {{ document.file }}\n`' +
'echo {"words": $WORDS}'
})
p.save()
We suggest to write processes in the YAML syntax. Resolwe includes a
``register`` Django command that parses .yml files in the ``processes``
directory and adds the discovered processes to the ``Process`` model::
./manage.py register
Do not forget to re-register the process after you make changes to the .yml
file. You have to increase the process version each time you register it. For
development, you can use the ``--force`` option (or ``-f`` for short)::
./manage.py register -f
This is an example of :download:`the smallest processor
<example/example/processes/minimal.yml>` in YAML syntax:
.. literalinclude:: example/example/processes/minimal.yml
:language: yaml
:linenos:
This is the example of the :download:`basic Word Count
<example/example/processes/example_basic.yml>` implementation in the YAML
syntax (with the document file as input):
.. literalinclude:: example/example/processes/example_basic.yml
:language: yaml
:linenos:
If you would like to review the examples of the three processes mentioned above
(`Upload Document`, `Word Count` and `Number of Lines`), :download:`follow this
link <example/example/processes/example.yml>`. Read more about the process
options in :ref:`process-schema` below.
.. _process-schema:
Process schema
==============
Process is defined by a set of fields in the ``Process`` model. We will
describe how to write the process schema in YAML syntax. Some fields in the
YAML syntax have different name or values than the actual fields in the
``Process`` model. See an :download:`example of a process with all fields
<example/example/processes/all_fields.yml>`. Fields in a process schema:
================================ ===================== ======== ==============
Field Short description Required Default
================================ ===================== ======== ==============
:ref:`slug <slug>` unique id required
:ref:`name <name>` human readable name required
:ref:`description <description>` detailed description optional <empty string>
:ref:`version <version>` version numbering optional
:ref:`type <type>` data type required
:ref:`category <category>` menu category optional <empty string>
:ref:`entity <entity>` automatic grouping optional
:ref:`persistence <persistence>` storage optimization optional RAW
:ref:`scheduling_class <sch>` scheduling class optional batch
:ref:`input <io>` list of input fields optional <empty list>
:ref:`output <io>` list of result fields optional <empty list>
:ref:`run <run>` the algorithm required
:ref:`requirements <reqs>` requirements optional <empty dict>
================================ ===================== ======== ==============
.. _slug:
Slug
----
TODO
.. _name:
Name
----
TODO
.. _description:
Description
-----------
TODO
.. _version:
Version
-------
TODO
.. _type:
Type
----
TODO
.. _category:
Category
--------
The category is used to arrange processes in a GUI. A category can be any
string of lowercase letters, numbers, - and :. The colon is used to split
categories into sub-categories (`e.g.,` ``analyses:alignment``).
We have predefined three top categories: upload, import and analyses. Processes
without this top category will not be displayed in the GenBoard interface, but
will be available on the platform.
.. _entity:
Entity
------
With defining the ``entity`` field in the process, new data objects will be
automatically attached to a new or existing Entity, depending on it's parents
and the definition of the field.
``entity`` field has 3 subfields:
* ``type`` is required and defines the type of entity that the new ``Data``
object is attached to
* ``input`` limits the group of parents' entities to a single field (dot
separated path to the field in the definition of input)
* ``descriptor_schema`` specifies the slug of the descriptor schema that is
attached to newly created entity. It defaults to the value of ``type``
.. _persistence:
Persistence
-----------
Use RAW for imports. CACHED or TMP processes should be idempotent.
.. _sch:
Scheduling class
----------------
The scheduling class specifies how the process should be treated by the
scheduler. There are two possible values:
* ``batch`` is for long running tasks, which require high throughput.
* ``interactive`` is for short running tasks, which require low latency.
Processes in this scheduling class are given a limited amount of time
to execute (default: 30 seconds).
The default value for processes is ``batch``.
.. _io:
Input and Output
----------------
A list of `Resolwe Fields` that define the inputs and outputs of a process. A
`Resolwe Field` is defined as a dictionary of the following properties:
Required `Resolwe Field` properties:
- ``name`` - unique name of the field
- ``label`` - human readable name
- ``type`` - type of field (either ``basic:<...>`` or ``data:<...>``)
Optional `Resolwe Field` properties (except for ``group``):
- ``description`` - displayed under titles or as a tooltip
- ``required`` - (choices: `true`, `false`)
- ``disabled`` - (choices: `true`, `false`)
- ``hidden`` - (choices: `true`, `false`)
- ``default`` - initial value
- ``placeholder`` - placeholder value displayed if nothing is specified
- ``validate_regex`` - client-side validation with regular expression
- ``choices`` - list of choices to select from (``label``, ``value`` pairs)
Optional `Resolwe Field` properties for ``group`` fields:
- ``description`` - displayed under titles or as a tooltip
- ``disabled`` - (choices: `true`, `false`)
- ``hidden`` - (choices: `true`, `false`)
- ``collapsed`` - (choices: `true`, `false`)
- ``group`` - list of process fields
TODO: explain what is field schema. For field schema details see
fieldSchema.json.
.. _run:
Run
---
The algorithm that transforms inputs into outputs. Bash and workflow languages
are currently supported and we envision more language support in the future (`e.g.,`
directly writing processes in Python or R). Commands should be written to a
``program`` subfield.
TODO: link a few lines from the all_fields.yml process
.. _reqs:
Requirements
------------
A dictionary defining optional features that should be available in order for the process
to run. There are several different types of requirements that may be specified:
- ``expression-engine`` defines the name of the engine that should be used to evaluate
expressions embedded in the ``run`` section. Currently, only the ``jinja`` expression
engine is supported. By default no expression engine is set, so expressions cannot be
used and will be ignored.
- ``executor`` defines executor-specific options. The value should be a dictionary,
where each key defines requirements for a specific executor. The following executor
requirements are available:
- ``docker``:
- ``image`` defines the name of the Docker container image that the process should
run under.
- ``resources`` define resources that should be made available to the process. The
following resources may be requested:
- ``cores`` defines the number of CPU cores available to the process. By default, this
value is set to ``1`` core.
- ``memory`` defines the amount of memory (in megabytes) that the process may use. By
default, this value is set to ``4096`` MiB.
- ``network`` should be a boolean value, specifying whether the process requires network
access. By default this value is ``false``.
Types
=====
Types are defined for processes and `Resolwe Fields`. ``Data`` objects have
implicitly defined types, based on the corresponding processor. Types define
the type of objects that are passed as inputs to the process or saved as
outputs of the process. Resolwe uses 2 kinds of types:
- ``basic:``
- ``data:``
``Basic:`` types are defined by Resolwe and represent the data building blocks.
``Data:`` types are defined by processes. In terms of programming languages you
could think of ``basic:`` as primitive types (like integer, float or boolean)
and of ``data:`` types as classes.
Resolwe matches inputs based on the type. Types are hierarchical, so the same
or more specific inputs are matched. For example:
- ``data:genome:fasta:`` will match the ``data:genome:`` input, but
- ``data:genome:`` will not match the ``data:genome:fasta:`` input.
.. note::
Types in a process schema do not have to end with a colon. The last colon
can be omitted for readability and is added automatically by Resolwe.
Basic types
-----------
Basic types are entered by the user. Resolwe implements the backend handling
(storage and retrieval) of basic types and GenBoard supports the HTML5
controls.
The following basic types are supported:
- ``basic:boolean:`` - boolean
- ``basic:date:`` - date (format `yyyy-mm-dd`)
- ``basic:datetime:`` - date and time (format `yyyy-mm-dd hh:mm:ss`)
- ``basic:decimal:`` - decimal number (`e.g.,` `-123.345`)
- ``basic:integer:`` - whole number (`e.g.,` `-123`)
- ``basic:string:`` - short string
- ``basic:text:`` - multi-line string
- ``basic:url:link:`` - visit link
- ``basic:url:download:`` - download link
- ``basic:url:view:`` - view link (in a popup or iframe)
- ``basic:file:`` - a file, stored on shared file system
- ``basic:dir:`` - a directory, stored on shared file system
- ``basic:json:`` - a JSON object, stored in MongoDB collection
- ``basic:group:`` - list of form fields (default if nothing specified)
The values of basic data types are different for each type, for example:
``basic:file:`` data type is a JSON dictionary: {"file": "file name"}
``basic:dir:`` data type is a JSON dictionary: {"dir": "directory name"}
``basic:string:`` data type is just a JSON string
Resolwe treats types differently. All but ``basic:file:``,
``basic:dir:`` and ``basic:json:`` are treated as meta-data.
``basic:file:`` and ``basic:dir:`` objects are saved to the shared
file storage, and ``basic:json:`` objects are stored in PostgreSQL
bjson field. Meta-data entries have references to ``basic:file:``,
``basic:dir:`` and ``basic:json:`` objects.
Data types
----------
``Data`` types are defined by processes. Each process is itself a ``data:``
sub-type named with the ``type`` attribute. A ``data:`` sub-type is defined by
a list process outputs. All processes of the same ``type`` should have the same
outputs.
``Data`` type name:
- ``data:<type>[:<sub-type>[...]]:``
The algorithm
=============
Algorithm is the key component of a process. The algorithm transforms process's
inputs into outputs. It is written as a sequence of Bash commands in process's
``run.program`` field.
.. note::
In this section, we assume that the program is written using the ``bash``
language and having the ``expression-engine`` requirement set to ``jinja``.
To write the algorithm in a different language (`e.g.,` Python), just put it in
a file with an appropriate *shebang* at the top (`e.g.,` ``#!/usr/bin/env
python2`` for Python2 programs) and add it to the `tools` directory. To run it
simply call the script with appropriate arguments.
For example, to compute a Volcano plot of the baySeq data, use:
.. code-block:: bash
volcanoplot.py diffexp_bayseq.tab
.. _algorithm-utilities:
Platform utilities
------------------
Resolwe provides some convenience utilities for writing processes:
* ``re-import``
is a convenience utility that copies/downloads a file from the given
temporary location, extracts/compresses it and moves it to the given final
location. It takes six arguments:
1. file's temporary location or URL
2. file's final location
3. file's input format, which can have one of the following forms:
* ``ending1|ending2``: matches files that end with ``ending1`` or
``ending2`` or a combination of
``(ending1|ending2).(gz|bz2|zip|rar|7z|tgz|tar.gz|tar.bz2)``
* ``ending1|ending2|compression``: matches files that end with
``ending1`` or ``ending2`` or a combination of
``(ending1|ending2).(gz|bz2|zip|rar|7z|tgz|tar.gz|tar.bz2)`` or just
with a supported compression format line ending
``(gz|bz2|zip|rar|7z)``
4. file's output format (`e.g.,` ``fasta``)
5. maximum progress at the end of transfer (a number between 0.0 and 1.0)
6. file's output format, which can be one of the following:
* ``compress``: to produce a compressed file
* ``extract``: to produce an extracted file
If this argument is not given, both, the compressed and the extracted
file are produced.
For storing the results to process's output fields, Resolwe provides a series
of utilities. They are described in the :ref:`algorithm-outputs` section.
Runtime
-------
TODO: Write about BioLinux and what is available in the Docker runtime.
Inputs
------
To access values stored in process's input fields, use `Jinja2's template
language syntax for accessing variables`_. For example, to access the value
of process's ``fastq`` input field, write ``{{ fastq }}``.
In addition to all process's input fields, Resolwe provides the following
system variables:
* ``proc.case_ids``: ids of the corresponding cases
* ``proc.data_id``: id of the data object
* ``proc.slugs_path``: file system path to Resolwe's slugs
Resolwe also provides some custom built-in filters to access the fields of the
referenced data objects:
* ``id``: returns the id of the referenced data object
* ``type``: returns the type of the referenced data object
* ``name``: returns the value of the ``static.name`` field if it exists
For example, to use these filters on the ``reads`` field, use
``{{ reads|id }}``, ``{{ reads|type }}`` or ``{{ reads|name }}``, respectively.
You can also use any `Jinja2's built in template tags and filters`_ in your
algorithm.
.. note::
All input variables should be considered *unsafe* and will be automatically
quoted when used in your scripts. For example, the following call:
.. code-block:: bash
volcanoplot.py {{ reads.fastq.0.file }}
will actually be transformed into something like (depending on the value):
.. code-block:: bash
volcanoplot.py '/path/to/reads with spaces.gz'
If you do not want this behaviour for a certain variable and you are sure
that it is safe to do so, you can use the ``safe`` filter as follows:
.. code-block:: bash
volcanoplot.py {{ known_good_input | safe }}
.. _Jinja2's template language syntax for accessing variables: http://jinja.pocoo.org/docs/2.9/templates/#variables
.. _Jinja2's built in template tags and filters: http://jinja.pocoo.org/docs/2.9/templates/#builtin-filters
.. _algorithm-outputs:
Outputs
-------
Processes have three options for storing the results:
* as files in data object's directory (i.e. ``{{ proc.data_dir }}``)
* as constants in process's output fields
* as entries in the MongoDB data storage
.. note::
Files are stored on a shared file system that supports fast read and write
accesss by the processes. Accessing MongoDB from a process requires more
time and is suggested for interactive data retrieval from GenPackages only.
Saving status
`````````````
There are two special fields that you should use:
* ``proc.rc``: the return code of the process
* ``proc.progress``: the process's progress
If you set the ``proc.rc`` field to a positive value, the process will fail
and its status will be set to ``ERROR``. All processes that depend on this
process will subsequently fail and their status will be set to ``ERROR`` as
well.
The ``proc.progress`` field can be used to report processing progress
interactively. You can set it to a value between 0 and 1 that represents an
estimate for process's progress.
To set them, use the ``re-progress`` and ``re-checkrc`` utilities described
in the :ref:`algorithm-outputs-re-save-and-friends` section.
Resolwe provides some specialized utilities for reporting process status:
* ``re-error``
takes one argument and stores it to ``proc.error`` field. For example:
.. code-block:: bash
re-error "Error! Something went wrong."
* ``re-warning``
takes one argument and stores it to ``proc.warning`` field. For example:
.. code-block:: bash
re-warning "Be careful there might be a problem."
* ``re-info``
takes one argument and stores it to ``proc.info`` field. For example:
.. code-block:: bash
re-info "Just say hello."
* ``re-progress``
takes one argument and stores it to ``proc.progress`` field. The argument
should be a float between 0 and 1 and represents an estimate for
process's progress. For example, to estimate the progress to 42%, use:
.. code-block:: bash
re-progress 0.42
* ``re-checkrc``
saves the return code of the previous command to ``proc.rc`` field.
To use it, just call:
.. code-block:: bash
re-checkrc
As some programs exit with a non-zero return code, even though they
finished successfully, you can pass additional return codes as arguments to
the ``re-checkrc`` command and they will be translated to zero. For
example:
.. code-block:: bash
re-checkrc 2 15
will set ``proc.rc`` to 0 if the return code is 0, 2 or 15, and to the
actual return code otherwise.
It is also possible to set the ``proc.error`` field with this command in
case the return code is not zero (or is not given as one of the acceptable
return codes). To do that, just pass the error message as the last argument
to the ``re-checkrc`` command. For example:
.. code-block:: bash
re-checkrc "Error ocurred."
re-checkrc 2 "Return code was not 0 or 2."
.. _algorithm-outputs-re-save-and-friends:
Saving constants
````````````````
To store a value in a process's output field, use the ``re-save`` utility.
The ``re-save`` utility requires two arguments, a key (i.e. field's name) and
a value (i.e. field's value).
For example, executing:
.. code-block:: bash
re-save quality_mean $QUALITY_MEAN
will store the value of the ``QUALITY_MEAN`` Bash variable in process's
``quality_mean`` field.
.. note::
To use the ``re-save`` utility, add ``re-require common`` to the
beginning of the algorithm. For more details, see
:ref:`algorithm-utilities`.
You can pass any JSON object as the second argument to the ``re-save``
utility, `e.g.`:
.. code-block:: bash
re-save foo '{"extra_output": "output.txt"}'
.. note::
Make sure to put the second argument into quotes (`e.g.,` "" or '') if you
pass a JSON object containing a space to the ``re-save`` utility.
Saving files
````````````
A convinience function for saving files is:
.. code-block:: bash
re-save-file
It takes two arguments and stores the value of the second argument in the
first argument's ``file`` subfield. For example:
.. code-block:: bash
re-save-file fastq $NAME.fastq.gz
stores ``$NAME.fastq.gz`` to the ``fastq.file`` field which has to be of
type ``basic:file:``.
To reference additional files/folders, pass them as extra arguments to the
``re-save-file`` utility. They will be saved to the ``refs`` subfield of
type ``basic:file:``. For example:
.. code-block:: bash
re-save-file fastq $NAME.fastq.gz fastqc/${NAME}_fastqc
stores ``fastqc/${NAME}_fastqc`` to the ``fastq.refs`` field in addition to
storing ``$NAME.fastq.gz`` to the ``fastq.file`` field.
.. note::
Resolwe will automatically add files' sizes to the
files' ``size`` subfields.
.. warning::
After the process has finished, Resolwe will automatically check if all
the referenced files exist. If any file is missing, it will set the data
object's status to ``ERROR``. Files that are not referenced are
automatically deleted by the platform, so make sure to reference all the
files you want to keep!
Saving JSON blobs in MongoDB
````````````````````````````
To store a JSON blob to the MongoDB storage, simply create a field of type
``data:json:`` and use the ``re-save`` utility to store it. The platform will
automatically detect that you are trying to store to a ``data:json:`` field and
it will store the blob to a separate collection.
For example:
.. code-block:: bash
re-save etc { JSON blob }
will store the ``{ JSON blob }`` to the ``etc`` field.
.. note::
Printing a lot ot data to standard output can cause problems when using
the Docker executor due to its current implementation. Therefore, it is
advised to save big JSON blobs to a file and only pass the file name to the
``re-save`` function.
For example:
.. code-block:: bash
command_that_generates_large_json > json.txt
re-save etc json.txt
.. warning::
Do not store large JSON blobs into the data collection directly as this
will slow down the retrieval of data objects.
| /resolwe-36.0.0a2.tar.gz/resolwe-36.0.0a2/docs/proc.rst | 0.870638 | 0.885334 | proc.rst | pypi |
import pandas as pd
def cohort(compressed_df: pd.DataFrame, interval_time: int=3600):
"""Creates a dataframe of cohorts using a compressed detection file
Args:
compressed_df (pd.DataFrame): compressed dataframe
interval_time (int, optional): cohort detection time interval (in seconds). Defaults to 3600.
Returns:
pd.DataFrame: cohort dataframe with the following columns:
* anml_1
* anml_1_seq
* station
* anml_2
* anml_2_seq
* anml_2_arrive
* anml_2_depart
* anml_2_startunqdetecid
* anml_2_endunqdetecid
* anml_2_detcount
"""
# Convert input int interval_time into a timedelta object
interval_time = pd.to_timedelta(interval_time, unit='s')
# Sort input compressed data file
cmps = compressed_df.sort_values(['catalognumber', 'seq_num'])
# Loop through rows in the compressed data file and choose
# cohorts if the times are within range
seen = []
final_set = []
for idx1, item1 in cmps.iterrows():
match = cmps[
(cmps.station == item1.station) &
(cmps.catalognumber != item1.catalognumber) &
(~cmps.index.to_series().apply(lambda x: (x, idx1)).isin(seen)) &
((cmps.startdate > item1.startdate - interval_time) &
(cmps.startdate < item1.enddate + interval_time) |
(cmps.enddate > item1.startdate - interval_time) &
(cmps.enddate < item1.enddate + interval_time))]
if not match.empty:
match.insert(0, 'anml_1', item1.catalognumber)
match.insert(1, 'anml_1_seq', item1.seq_num)
final_set.extend(match[['anml_1', 'anml_1_seq', 'station',
'catalognumber', 'seq_num', 'startdate',
'enddate', 'startunqdetecid',
'endunqdetecid', 'total_count'
]].values.tolist())
seen.extend([(idx1, i) for i in match.index.tolist()])
output_df = pd.DataFrame(final_set)
output_df.columns = ['anml_1',
'anml_1_seq',
'station',
'anml_2',
'anml_2_seq',
'anml_2_arrive',
'anml_2_depart',
'anml_2_startunqdetecid',
'anml_2_endunqdetecid',
'anml_2_detcount']
return output_df | /resonATe-1.1.tar.gz/resonATe-1.1/resonate/cohorts.py | 0.843251 | 0.562717 | cohorts.py | pypi |
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from geopy.distance import geodesic
from resonate.library.exceptions import GenericException
def get_distance_matrix(detections: pd.DataFrame):
"""Creates a distance matrix of all stations in the array or line.
Args:
detections (pd.DataFrame): Creates a distance matrix of all stations in the array or line.
Returns:
pd.DataFrame: A Pandas DataFrame matrix of station to station distances
"""
stn_grouped = detections.groupby('station', dropna=False)
stn_locs = stn_grouped[['longitude', 'latitude']].mean()
dist_mtx = pd.DataFrame(
np.zeros(len(stn_locs) ** 2).reshape(len(stn_locs), len(stn_locs)),
index=stn_locs.index, columns=stn_locs.index)
for cstation in dist_mtx.columns:
for rstation in dist_mtx.index:
cpoint = (stn_locs.loc[cstation, 'latitude'],
stn_locs.loc[cstation, 'longitude'])
rpoint = (stn_locs.loc[rstation, 'latitude'],
stn_locs.loc[rstation, 'longitude'])
dist_mtx.loc[rstation, cstation] = geodesic(cpoint, rpoint).m
dist_mtx.index.name = None
return dist_mtx
def filter_detections(detections: pd.DataFrame, suspect_file=None,
min_time_buffer=3600,
distance_matrix=False, add_column:bool=True):
"""Filters isolated detections that are more than min_time_buffer apart from
other dets. for a series of detections in detection_file. Returns Filtered
and Suspect dataframes.
suspect_file can be a file of existing suspect detections to remove before
filtering.
dist_matrix is created as a matrix of between-station distances from
stations defined in the input file.
Args:
detections (pd.DataFrame): A Pandas DataFrame of acoustic detections
suspect_file (str, optional): Path to a user specified suspect file, same format as
the detections. Defaults to None.
min_time_buffer (int, optional): The minimum of time required for outlier detections
in seconds. Defaults to 3600.
distance_matrix (bool, optional): A boolean of whether or not to generate the
distance matrix. Defaults to False.
add_column (bool, optional): If true, add a column to specify if the row passed the filtered
or not. Otherwise, split into 2 dataframes. Defaults to True.
Raises:
GenericException: Triggered if detections file is missing required columns for filtering.
GenericException: Triggered if detections file is missing required columns for generating
the distance matrix
Returns:
dict|pd.DataFrame: if add_column is True, a Pandas Dataframe. Otherwise, a list of Pandas DataFrames of filtered detections and suspect
detections.
"""
# Set of mandatory column names for detection_file
mandatory_columns = set(['station',
'unqdetecid',
'datecollected',
'catalognumber'])
# Subtract all detections found in the user defined suspect file
if suspect_file:
print("Found suspect file {0}. Subtracting detections from input".format(
suspect_file))
susp_dets = pd.read_csv(suspect_file)
good_dets = pd.concat([detections, susp_dets], ignore_index=True)
good_dets.drop_duplicates(mandatory_columns, keep=False, inplace=True)
elif mandatory_columns.issubset(detections.columns):
# calculate detections to filtered
# For each individual catalognumber:
# Determine the space between each detection.
# If the space before + after > min_time_buffer
# Remove that detection row from the detections and add it to suspect detections.
# SQL that does this is in load_to_postgresql under createSuspect
detections = detections.copy(deep=True)
ind = detections['catalognumber'].unique()
detections.loc[:, 'datecollected'] = pd.to_datetime(
detections['datecollected'])
user_int = timedelta(seconds=min_time_buffer)
good_dets = pd.DataFrame()
susp_dets = pd.DataFrame()
grouped = detections.groupby('catalognumber', dropna=False)
for anm in ind:
anm_dets = grouped.get_group(anm).sort_values(
'datecollected', ascending=True)
intervals = anm_dets['datecollected'] - \
anm_dets['datecollected'].shift(1)
post_intervals = anm_dets['datecollected'].shift(
-1) - anm_dets['datecollected']
good_dets = pd.concat([
good_dets,
anm_dets[
(intervals <= user_int) | (post_intervals <= user_int)
]
])
# If they aren't a good det, they're suspect!
# TODO: Reporting: Decide if we want to report the big 'before/after'
# triplicate in Suspect Dets
# If so, building susp_dets gets tougher, involves a merge and then a
# append.
# For now, just a matter of putting the complement of the good dets in
# the susp_dets
susp_dets = detections.loc[~detections['unqdetecid'].isin(
good_dets['unqdetecid'])].copy(deep=True)
else:
raise GenericException("Missing required input columns: {}".format(
mandatory_columns - set(detections.columns)))
print("Total detections in filtered dataframe: {0}".format(
len(good_dets.index)))
print("{0} suspect detections removed".format(len(susp_dets.index)))
output_dict = {"filtered": good_dets, "suspect": susp_dets}
if distance_matrix:
# Must now have lat and long columns as well.
dm_mandatory_columns = set(['latitude', 'longitude'])
if dm_mandatory_columns.issubset(detections.columns):
output_dict['dist_mtrx'] = get_distance_matrix(detections)
print("There are {0} station locations in the distance \
matrix".format(len(output_dict['dist_mtrx'].index)))
else:
raise GenericException("Missing required input columns for \
distance matrix calc: {}".format(
dm_mandatory_columns - set(detections.columns)))
if add_column:
output_dict['filtered'].loc[:, 'passed_detection_filter'] = True
output_dict['suspect'].loc[output_dict['suspect'].index, 'passed_detection_filter'] = False
output_df = pd.concat(output_dict.values()).reset_index(drop=True)
if distance_matrix:
return {'detections': output_df, 'dist_mtrx': output_dict['dist_mtrx']}
else:
return output_df
return output_dict
def distance_filter(detections: pd.DataFrame, maximum_distance=100000, add_column:bool=True):
"""Filters detections based on distance between detections.
Args:
detections (pd.DataFrame): a Pandas DataFrame of acoustic detection
maximum_distance (int, optional): a number in meters. Defaults to 100000.
add_column (bool, optional): If true, add a column to specify if the row passed the filtered
or not. Otherwise, split into 2 dataframes. Defaults to True.
Raises:
GenericException: Triggered if detections file is missing required columns
Returns:
dict|pd.DataFrame: if add_column is True, a Pandas Dataframe. Otherwise, a list of Pandas DataFrames of filtered detections and suspect
detections.
"""
pd.options.mode.chained_assignment = None
mandatory_columns = set(['station',
'unqdetecid',
'datecollected',
'catalognumber'])
if mandatory_columns.issubset(detections.columns):
dm = get_distance_matrix(detections)
lead_lag_stn_df = pd.DataFrame()
for _, group in detections.sort_values(['datecollected']).groupby(['catalognumber'], dropna=False):
group['lag_station'] = group.station.shift(1).fillna(group.station)
group['lead_station'] = group.station.shift(
-1).fillna(group.station)
lead_lag_stn_df = pd.concat([lead_lag_stn_df, group])
del detections
distance_df = pd.DataFrame()
for _, group in lead_lag_stn_df.groupby(['station', 'lag_station', 'lead_station'], dropna=False):
stn = group.station.unique()[0]
lag_stn = group.lag_station.unique()[0]
lead_stn = group.lead_station.unique()[0]
lag_distance = dm.loc[stn, lag_stn]
lead_distance = dm.loc[stn, lead_stn]
group['lag_distance_m'] = lag_distance
group['lead_distance_m'] = lead_distance
distance_df = pd.concat([distance_df, group])
del lead_lag_stn_df
distance_df.sort_index(inplace=True)
if add_column:
distance_df['passed_distance_filter'] = (distance_df.lag_distance_m <= maximum_distance) & (distance_df.lead_distance_m <= maximum_distance)
return distance_df
else:
filtered_detections = dict()
filtered_detections['filtered'] = distance_df[(distance_df.lag_distance_m <= maximum_distance) & (
distance_df.lead_distance_m <= maximum_distance)].reset_index(drop=True)
filtered_detections['suspect'] = distance_df[(distance_df.lag_distance_m > maximum_distance) | (
distance_df.lead_distance_m > maximum_distance)].reset_index(drop=True)
return filtered_detections
else:
raise GenericException("Missing required input columns: {}".format(
mandatory_columns - set(detections.columns)))
def velocity_filter(detections: pd.DataFrame, maximum_velocity=10, add_column:bool=True):
"""Filters detections based on the time it took to travel between locations.
Args:
detections (pd.DataFrame): a Pandas DataFrame of acoustic detection
maximum_velocity (int, optional): The maximum velocity the animals can travel. Defaults to 10.
add_column (bool, optional): If true, add a column to specify if the row passed the filtered
or not. Otherwise, split into 2 dataframes. Defaults to True.
Raises:
GenericException: Triggered if detections file is missing required columns
Returns:
dict|pd.DataFrame: if add_column is True, a Pandas Dataframe. Otherwise, a dict of Pandas DataFrames of filtered detections and suspect
detections.
"""
pd.options.mode.chained_assignment = None
mandatory_columns = set(['station',
'unqdetecid',
'datecollected',
'catalognumber'])
if mandatory_columns.issubset(detections.columns):
dm = get_distance_matrix(detections)
lead_lag_df = pd.DataFrame()
for _, group in detections.sort_values(['datecollected']).groupby(['catalognumber'], dropna=False):
group['lag_station'] = group.station.shift(1).fillna(group.station)
group['lead_station'] = group.station.shift(
-1).fillna(group.station)
group.datecollected = pd.to_datetime(group.datecollected)
group['lag_time_diff'] = group.datecollected.diff().fillna(
timedelta(seconds=1))
group['lead_time_diff'] = group.lag_time_diff.shift(
-1).fillna(timedelta(seconds=1))
lead_lag_df = pd.concat([lead_lag_df, group])
del detections
vel_df = pd.DataFrame()
for _, group in lead_lag_df.groupby(['station', 'lag_station', 'lead_station'], dropna=False):
stn = group.station.unique()[0]
lag_stn = group.lag_station.unique()[0]
lead_stn = group.lead_station.unique()[0]
lag_distance = dm.loc[stn, lag_stn]
lead_distance = dm.loc[stn, lead_stn]
group['lag_distance_m'] = lag_distance
group['lead_distance_m'] = lead_distance
vel_df = pd.concat([vel_df, group])
del lead_lag_df
vel_df['lag_velocity'] = vel_df.lag_distance_m / \
vel_df.lag_time_diff.dt.total_seconds()
vel_df['lead_velocity'] = vel_df.lead_distance_m / \
vel_df.lead_time_diff.dt.total_seconds()
vel_df.sort_index(inplace=True)
if add_column:
vel_df['passed_velocity_filter'] = (
(vel_df.lag_velocity <= maximum_velocity) &
(vel_df.lead_velocity <= maximum_velocity) &
(vel_df.lag_velocity.notna()) & vel_df.lag_velocity.notna())
return vel_df
else:
filtered_detections = dict()
filtered_detections['filtered'] = vel_df[(vel_df.lag_velocity <= maximum_velocity) & (
vel_df.lead_velocity <= maximum_velocity)].reset_index(drop=True)
filtered_detections['suspect'] = vel_df[
(vel_df.lag_velocity > maximum_velocity) |
(vel_df.lead_velocity > maximum_velocity) |
(vel_df.lag_velocity.isna()) |
(vel_df.lead_velocity.isna())
].reset_index(drop=True)
return filtered_detections
else:
raise GenericException("Missing required input columns: {}".format(
mandatory_columns - set(detections.columns)))
def filter_all(detections: pd.DataFrame, min_time_buffer=3600, maximum_distance=100000, maximum_velocity=10):
"""Runs all 3 filters on a given detection dataframe, returning 1 dataframe with 3 columns specifying
if the row passed each filter test. Does not return a distance matrix.
Args:
detections (pd.DataFrame): A Pandas DataFrame of acoustic detections
min_time_buffer (int, optional): The minimum of time required for outlier detections
in seconds. Defaults to 3600.
maximum_distance (int, optional): a number in meters. Defaults to 100000.
maximum_velocity (int, optional): The maximum velocity the animals can travel. Defaults to 10.
Raises:
GenericException: Triggered if detections file is missing required columns
Returns:
pd.DataFrame: A dataframe with all 3 filter columns added and populated
"""
# Set of mandatory column names for detection_file
mandatory_columns = set(['station',
'unqdetecid',
'datecollected',
'catalognumber'])
if mandatory_columns.issubset(detections.columns):
detections = detections.copy(deep=True)
detections = filter_detections(detections, min_time_buffer=min_time_buffer)
detections = distance_filter(detections, maximum_distance=maximum_distance)
detections = velocity_filter(detections, maximum_velocity=maximum_velocity)
return detections
else:
raise GenericException("Missing required input columns: {}".format(
mandatory_columns - set(detections.columns))) | /resonATe-1.1.tar.gz/resonATe-1.1/resonate/filters.py | 0.854839 | 0.537709 | filters.py | pypi |
import geopy
import pandas as pd
def interval_data(compressed_df: pd.DataFrame, dist_matrix_df: pd.DataFrame, station_radius_df: pd.DataFrame=None):
"""Creates a detection interval file from a compressed detection, distance matrix and station detection radius DataFrames
Args:
compressed_df (pd.DataFrame): compressed detection dataframe
dist_matrix_df (pd.DataFrame): station distance matrix dataframe
station_radius_df (pd.DataFrame, optional): station distance radius dataframe. Defaults to None.
Returns:
pd.DataFrame: interval detection Dataframe
"""
# Create two dataframes from input compressed detections and decrement the second's seq_num
fst = compressed_df[
['catalognumber', 'station', 'seq_num', 'total_count', 'startdate', 'enddate', 'endunqdetecid']].copy()
snd = compressed_df[
['catalognumber', 'station', 'seq_num', 'total_count', 'startdate', 'enddate', 'endunqdetecid']].copy()
snd.seq_num -= 1
# Rename columns
fst.columns = ['catalognumber', 'from_station', 'seq_num', 'from_detcnt',
'from_arrive', 'from_leave', 'unqdetid_from']
snd.columns = ['catalognumber', 'to_station', 'seq_num', 'to_detcnt',
'to_arrive', 'to_leave', 'unqdetid_arrive']
# Merge the two DataFrames together linking catalognumber and seq_num
merged = pd.merge(fst, snd, how='left', on=['catalognumber', 'seq_num'])
# Create additional column placeholders
merged['intervaltime'] = None
merged['intervalseconds'] = None
merged['distance_m'] = None
merged['metres_per_second'] = None
# Loop through all rows linking distance matrices and calculating average speed between intervals
for idx, item in merged.iterrows():
# If any of the station pairs don't exist, skip processing current row
if not (pd.isnull(item['from_station']) or pd.isnull(item['to_station'])):
# Get station matrix distance (input in m)
matrix_distance_m = dist_matrix_df.loc[item['from_station'],
item['to_station']]
# If matrix pair exists do distance calculations
if matrix_distance_m:
if isinstance(station_radius_df, pd.DataFrame):
stn1_radius = station_radius_df.loc[item['from_station'], 'radius']
stn2_radius = station_radius_df.loc[item['to_station'], 'radius']
distance = max(geopy.distance.Distance(
0).m, matrix_distance_m - stn1_radius.m - stn2_radius.m) * 1000
else:
distance = max(geopy.distance.Distance(
0).m, matrix_distance_m) * 1000
merged.loc[idx, 'distance_m'] = distance
time_interval = item['to_arrive'] - item['from_leave']
merged.loc[idx, 'intervaltime'] = time_interval
merged.loc[idx, 'intervalseconds'] = time_interval.total_seconds()
if time_interval.seconds != 0:
merged.loc[idx, 'metres_per_second'] = distance / \
time_interval.seconds
return merged | /resonATe-1.1.tar.gz/resonATe-1.1/resonate/interval_data_tool.py | 0.911352 | 0.641871 | interval_data_tool.py | pypi |
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as py
from resonate.library.exceptions import GenericException
def bubble_plot(detections: pd.DataFrame, type='detections', ipython_display=True,
title='Bubble Plot', height=700, width=1000,
plotly_geo=None, filename=None, mapbox_token=None,
marker_size=10, colorscale='Viridis'):
"""_summary_
Args:
detections (pd.DataFrame): detection dataframe
type (str, optional): Counts detections if 'detection', or counts individuals if 'individual'. Defaults to 'detections'.
ipython_display (bool, optional): a boolean to show in a notebook. Defaults to True.
title (str, optional): the title of the plot. Defaults to 'Bubble Plot'.
height (int, optional): the height of the plot. Defaults to 700.
width (int, optional): the width of the plot. Defaults to 1000.
plotly_geo (dict, optional): an optional dictionary to controle the
geographix aspects of the plot. Defaults to None.
filename (str, optional): Plotly filename to write to. Defaults to None.
mapbox_token (str, optional): A string of mapbox access token. Defaults to None.
marker_size (int, optional): An int to indicate the diameter in pixels. Defaults to 10.
colorscale (str, optional): A string to indicate the color index. Defaults to 'Viridis'.
Raises:
GenericException: Triggers if detections isn't a dataframe
GenericException: Triggers if detections is missing required columns
Returns:
(None|Any): A plotly geoscatter plot or mapbox plot
"""
if not isinstance(detections, pd.DataFrame):
raise GenericException('input parameter must be a Pandas dataframe')
mandatory_columns = set(
['station', 'catalognumber', 'unqdetecid', 'latitude', 'longitude', 'datecollected'])
if mandatory_columns.issubset(detections.columns):
detections = detections[['station', 'catalognumber', 'unqdetecid',
'latitude', 'longitude', 'datecollected']].reset_index(drop=True)
if type == 'individual':
detections = detections.drop(
['unqdetecid', 'datecollected'], axis=1).drop_duplicates()
detections = detections.groupby(
['station', 'latitude', 'longitude'], dropna=False).size().reset_index(name='counts')
map_type = 'scattergeo'
if mapbox_token is not None:
map_type = 'scattermapbox'
mapbox = dict(
accesstoken=mapbox_token,
center=dict(
lon=detections.longitude.mean(),
lat=detections.latitude.mean()
),
zoom=5,
style='light'
)
data = [
{
'lon': detections.longitude.tolist(),
'lat': detections.latitude.tolist(),
'text': detections.station + " : " + detections.counts.astype(str),
'mode': 'markers',
'marker': {
'color': detections.counts.tolist(),
'size': marker_size,
'showscale': True,
'colorscale': colorscale,
'colorbar': {
'title': 'Detection Count'
}
},
'type': map_type
}
]
if plotly_geo is None:
plotly_geo = dict(
showland=True,
landcolor="rgb(255, 255, 255)",
showocean=True,
oceancolor="rgb(212,212,212)",
showlakes=True,
lakecolor="rgb(212,212,212)",
showrivers=True,
rivercolor="rgb(212,212,212)",
resolution=50,
showcoastlines=False,
showframe=False,
projection=dict(
type='mercator',
)
)
plotly_geo.update(
center=dict(
lon=detections.longitude.mean(),
lat=detections.latitude.mean()
),
lonaxis=dict(
range=[detections.longitude.min(), detections.longitude.max()],
),
lataxis=dict(
range=[detections.latitude.min(), detections.latitude.max()],
)
)
if mapbox_token is None:
layout = dict(
geo=plotly_geo,
title=title
)
else:
layout = dict(title=title,
autosize=True,
hovermode='closest',
mapbox=mapbox
)
if ipython_display:
layout.update(
height=height,
width=width
)
fig = {'data': data, 'layout': layout}
py.init_notebook_mode()
return py.iplot(fig)
else:
fig = {'data': data, 'layout': layout}
return py.plot(fig, filename=filename)
else:
raise GenericException("Missing required input columns: {}".format(
mandatory_columns - set(detections.columns))) | /resonATe-1.1.tar.gz/resonATe-1.1/resonate/bubble_plot.py | 0.927223 | 0.601184 | bubble_plot.py | pypi |
import pandas as pd
def create_att_dictionary_format(dets_file: str=None, tags_file: str=None, deployment_file: str=None, preprocessed:dict=None) -> dict:
"""Creates a dictionary with dataframes containing detections, tag metadata, and station metadata.
Heavily inspired by VTrack's ATT format. Either the 3 file args must not be none or the preprocessed
arg must not be none. If all are not none, preprocessed will be used.
Args:
dets_file (str, optional): path to the OTN detection extract file. Defaults to None.
tags_file (str, optional): path to the OTN tagging metadata excel file. Defaults to None.
deployment_file (str, optional): path to the OTN deployment metadata excel file. Defaults to None.
preprocessed (dict, optional): dictionary containing ALL THREE dataframes normally created by
the file args. Dict keys are 'dets', 'tags', 'deploys'. Defaults to None.
Returns:
dict: A dictionary containing 3 dataframe for detections, tagging metadata, and station metadata
"""
att = {}
# Make sure preprocessed is there and that everything is in it
if preprocessed is not None and all(x in preprocessed for x in ['dets', 'tags', 'deploys']):
dets = preprocessed['dets']
tags = preprocessed['tags']
deploys = preprocessed['deploys']
# Check if all file args are there
elif all(x is not None for x in [dets_file, tags_file, deployment_file]):
dets = pd.read_csv(dets_file)
tags = setup_tag_sheet(tags_file)
deploys = setup_deployment_sheet(deployment_file)
else:
raise RuntimeError("Arguments incorrect, please insure that all file args are there or the preprocessed dict is there.")
# clean up station and receiver so they don't show (lost/found)
dets['station_name'] = dets['station'].str.extract(
'([A-Za-z0-9]*)(\\(lost/found\\))?')[0]
dets['receiver'] = dets['receiver'].str.extract(
'([0-9]*)(\\(lost/found\\))?')[0]
dets.rename({'tagname': 'transmitter_id'}, inplace=True, axis=1)
dets_joined_tags = dets.merge(
tags, how='left', left_on='transmitter_id', right_on='transmitter_id') # Add tag data to dets
dets_joined_tags.rename({
'catalognumber': 'tag_id',
'transmitter_id': 'transmitter',
'collectioncode': 'tag_project',
}, inplace=True, axis=1)
# We don't track tag_status or bio, set to none
dets_joined_tags['tag_status'] = None
dets_joined_tags['bio'] = None
# Select all required columns and drop dupes
att['tag_metadata'] = dets_joined_tags[
['tag_id', 'transmitter', 'common_name', 'sci_name', 'tag_project', 'release_latitude',
'release_longitude', 'release_date', 'sex', 'tag_life', 'tag_status', 'bio']
].drop_duplicates()
att['tag_metadata'] = reindex_df(att['tag_metadata'])
dets_joined_full = dets_joined_tags.merge(
deploys, how="left", left_on='station_name', right_on='station_name') # Add station data to dets
dets_joined_full['installation'] = None
dets_joined_full['receiver_status'] = None
# Create full receiver, if it's not already complete
if not dets_joined_full['ins_model_no'].str.contains('-').all():
dets_joined_full['receiver'] = dets_joined_full['ins_model_no'] + \
'-' + dets_joined_full['receiver']
att['station_information'] = dets_joined_full[
['station_name', 'receiver', 'installation', 'receiver_project',
'deploy_date_time', 'recover_date_time', 'deploy_lat', 'deploy_long', 'receiver_status']]
att['station_information'] = att['station_information'].drop_duplicates(
).sort_values('station_name')
att['station_information'] = reindex_df(att['station_information'])
att['tag_detections'] = dets_joined_full[
['datecollected', 'transmitter', 'station_name', 'receiver',
'latitude', 'longitude', 'sensorvalue', 'sensorunit']]
return att
def setup_tag_sheet(path: str):
"""Imports the tag sheet and extracts the parts that are needed for the ATT like dict.
Args:
path (str): Path to the tagging sheet
Returns:
pd.DataFrame: Dataframe needed by 'create_att_dictionary_format'
"""
tags = pd.read_excel(path, sheet_name=1, header=4)
tags['transmitter_id'] = tags['TAG_CODE_SPACE'].astype(
str) + '-' + tags['TAG_ID_CODE'].astype(str)
tags['tag_life'] = tags['EST_TAG_LIFE'].apply(get_days_from_string)
tag_cols = ['ANIMAL_ID (floy tag ID, pit tag code, etc.)', 'UTC_RELEASE_DATE_TIME',
'SEX',
'RELEASE_LATITUDE',
'RELEASE_LONGITUDE',
'UTC_RELEASE_DATE_TIME',
'COMMON_NAME_E',
'SCIENTIFIC_NAME',
'transmitter_id',
'tag_life']
tag_renames = ['animal_id',
'time',
'sex',
'release_latitude',
'release_longitude',
'release_date',
'common_name',
'sci_name',
'transmitter_id',
'tag_life']
return subset_rename_df(tags, tag_cols, tag_renames)
def setup_deployment_sheet(path:str, pad_station:bool=True):
"""Imports the deployment sheet and extracts the parts that are needed for the ATT like dict.
Args:
path (str): Path to the deployment sheet
pad_station (bool, optional): If the station number should be padded to be a 3 digit number (34 -> 034). Defaults to True.
Returns:
pd.DataFrame: Dataframe needed by 'create_att_dictionary_format'
"""
deploys = pd.read_excel(path)
deploy_cols = ['DEPLOY_LAT',
'DEPLOY_LONG',
'INS_MODEL_NO',
'DEPLOY_DATE_TIME (yyyy-mm-ddThh:mm:ss)',
'RECOVER_DATE_TIME (yyyy-mm-ddThh:mm:ss)',
'OTN_ARRAY',
'station_name']
deploy_renames = ['deploy_lat',
'deploy_long',
'ins_model_no',
'deploy_date_time',
'recover_date_time',
'receiver_project',
'station_name']
if pad_station:
deploys['STATION_NO'] = deploys['STATION_NO'].apply(pad_number)
deploys['station_name'] = deploys['OTN_ARRAY'].astype(str) + deploys['STATION_NO'].astype(str)
return subset_rename_df(deploys, deploy_cols, deploy_renames)
def subset_rename_df(df: pd.DataFrame, subset: list, names: list):
"""Subsets a dataframe then renamed all the columns of the subset.
Args:
df (pd.DataFrame): A Pandas DataFrame
subset (list): A list of columns to select
names (list): A list of renamed columns, in the same order and 'subset'
Raises:
RuntimeError: Raised if the length of 'subset' and 'names' aren't the same
Returns:
pd.DataFrame: New dataframe containing the columns from 'subset', renamed to the names in 'names'
"""
if (len(subset) != len(names)):
raise RuntimeError("'subset' and 'names' must be the same length. %s != %s" % (
len(subset), len(names)))
new_df = df[subset]
new_df.columns = names
return new_df
def get_days_from_string(string: str):
"""Attempts to convert written text to a time delta then return the amount of days
Args:
string (str): The timedelta in text form
Raises:
Exception: if casting to int and 'to_timedelta' doesn't work.
Returns:
[type]: [description]
"""
try:
return int(string)
except ValueError:
pass # Attempting to use to_timedelta
try:
return pd.to_timedelta(string).days
except KeyError:
raise Exception("Please change the estimated tag life to days.")
def pad_number(num, size=3):
"""Pads a number with zeros in the front until 'size' is reached.
Does not shorten string to 'size.
Args:
num (int): The number to be padded
size (int, optional): The length of the number after padding. Defaults to 3.
Returns:
str: The given number padded on the front with zeros
"""
num = str(num)
while len(num) < size:
num = f"0{num}"
return num
def reindex_df(df: pd.DataFrame):
"""Sets the index of a dataframe to 0 to (size - 1)
Args:
df (pd.DataFrame): A dataframe
Returns:
pd.DataFrame: The dataframe that was passed in with a 0 to N-1 index.
"""
df.index = range(0, len(df))
return df | /resonATe-1.1.tar.gz/resonATe-1.1/resonate/att_formatter.py | 0.810516 | 0.425247 | att_formatter.py | pypi |
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as py
from resonate.library.exceptions import GenericException
def abacus_plot(detections: pd.DataFrame, ycolumn:str='catalognumber', color_column:str=None, ipython_display=True, title:str='Abacus Plot', filename:str=None):
"""Creates a plotly abacus plot from a pandas dataframe
Args:
detections (pd.DataFrame): detection dataframe
ycolumn (str, optional): the series/column for the y axis of the plot. Defaults to 'catalognumber'.
color_column (str, optional): the series/column to group by and assign a color. Defaults to None.
ipython_display (bool, optional): a boolean to show in a notebook. Defaults to True.
title (str, optional): the title of the plot. Defaults to 'Abacus Plot'.
filename (str, optional): Plotly filename to write to. Defaults to None.
Raises:
GenericException: Triggers if detections argument isnt a dataframe
GenericException: Triggers dataframe is missing columns.
Returns:
(None|Any): A plotly scatter plot or None if ipython_display is True
"""
if not isinstance(detections, pd.DataFrame):
raise GenericException('input parameter must be a Pandas dataframe')
mandatory_columns = set(['datecollected', ycolumn])
if color_column is not None:
mandatory_columns.add(color_column)
if mandatory_columns.issubset(detections.columns):
detections = detections[~detections.unqdetecid.str.contains(
'release')].reset_index(drop=True)
if color_column is not None:
data = list()
for group in detections.groupby(color_column):
data.append(
{
'x': group[1].datecollected.tolist(),
'y': group[1][ycolumn].tolist(),
'mode': 'markers',
'name': group[0]
}
)
else:
data = [
{
'x': detections.datecollected.tolist(),
'y': detections[ycolumn].tolist(),
'mode': 'markers',
}
]
layout = dict(
title=title,
xaxis=dict(
autorange=False,
range=[detections.datecollected.min(
), detections.datecollected.max()]
),
yaxis=dict(
autorange=True
),
margin=dict(
l=175
)
)
fig = {'data': data, 'layout': layout}
if ipython_display:
py.init_notebook_mode()
return py.iplot(fig)
else:
return py.plot(fig, filename=filename)
else:
raise GenericException("Missing required input columns: {}".format(
mandatory_columns - set(detections.columns))) | /resonATe-1.1.tar.gz/resonATe-1.1/resonate/abacus_plot.py | 0.930931 | 0.533701 | abacus_plot.py | pypi |
import datetime
import numpy as np
import pandas as pd
from resonate.library.exceptions import GenericException
def REI(detections: pd.DataFrame, deployments: pd.DataFrame) -> pd.DataFrame:
"""Calculates a returns a list of each station and the REI (defined here):
Args:
detections (pd.DataFrame): a pandas DataFrame of detections
deployments (pd.DataFrame): a pandas DataFrame of station deployment histories
Raises:
GenericException: Triggers when detections argument isn't a dataframe
GenericException: Triggers when detections argument doesnt have all the required columns
Returns:
pd.DataFrame: a pandas DataFrame of station, REI, latitude, and longitude
"""
# Check for proper dataframe and the correct columns
if not isinstance(detections, pd.DataFrame):
raise GenericException('input parameter must be a Pandas dataframe')
mandatory_detection_columns = set(
['datecollected', 'fieldnumber', 'scientificname', 'station'])
mandatory_deployment_columns = set(
['station_name', 'deploy_date', 'recovery_date', 'last_download'])
if mandatory_detection_columns.issubset(detections.columns) and \
mandatory_deployment_columns.issubset(deployments.columns):
# Copy and change the deployments to create dates in the 3 mandatory
# date columns
deployments = deployments.copy(deep=True)
detections = detections.copy(deep=True)
if deployments.recovery_date.dtype != np.dtype('<M8[ns]'):
deployments['recovery_notes'] = deployments.recovery_date.str.extract(
r'([A-Za-z\//:]+)', expand=False)
deployments.recovery_date = deployments.recovery_date.str.extract(
r'(\d+-\d+-\d+)', expand=False)
deployments = deployments.replace('-', np.nan)
deployments.loc[deployments.recovery_date.isnull(
), 'recovery_date'] = deployments.last_download
deployments = deployments[~deployments.recovery_date.isnull()]
# Cast the date columns to a datetime
deployments.deploy_date = pd.to_datetime(deployments.deploy_date)
deployments.recovery_date = pd.to_datetime(deployments.recovery_date)
deployments.last_download = pd.to_datetime(deployments.last_download)
# Calculate each receivers total days deployed
deployments['days_deployed'] = deployments.recovery_date - \
deployments.deploy_date
days_active = deployments.groupby('station_name', dropna=False).agg(
{'days_deployed': 'sum'}).reset_index()
days_active.set_index('station_name', inplace=True)
# Exclude all detections that are not registered with receivers in the
# deployments
detections = detections[detections.station.isin(
deployments.station_name)]
# Calculate array counts and initialize the dataframe
array_unique_tags = len(detections.fieldnumber.unique())
array_unique_species = len(detections.scientificname.unique())
days_with_detections = len(pd.to_datetime(
detections.datecollected).dt.date.unique())
array_days_active = (max(deployments.last_download.fillna(deployments.deploy_date.min()).max(
), deployments.recovery_date.max()) - min(deployments.deploy_date)).days
station_reis = pd.DataFrame({
'station': pd.Series(dtype='str'),
'rei': pd.Series(dtype='float'),
'latitude': pd.Series(dtype='float'),
'longitude': pd.Series(dtype='float'),
})
# Loop through each station in the detections and Calculate REI for
# oeach station
detections.datecollected = pd.to_datetime(
detections.datecollected).dt.date
# Loop through each station in the detections and Calculate REI for
# oeach station
for name, data in detections.groupby('station', dropna=False):
receiver_unique_tags = len(data.fieldnumber.unique())
receiver_unique_species = len(data.scientificname.unique())
receiver_days_with_detections = len(
pd.to_datetime(data.datecollected).dt.date.unique())
if name in days_active.index:
receiver_days_active = days_active.loc[name].days_deployed.days
if receiver_days_active > 0:
rei = (receiver_unique_tags / array_unique_tags) * \
(receiver_unique_species / array_unique_species) * \
(receiver_days_with_detections / days_with_detections) * \
(array_days_active / receiver_days_active)
station_reis = pd.concat([station_reis, pd.DataFrame({
'station': [name],
'rei': [rei],
'latitude': [data.latitude.mean()],
'longitude': [data.longitude.mean()]})], ignore_index=True)
else:
print("No valid deployment record for " + name)
print(station_reis)
# Normalize REIs to value from 0 to 1
station_reis.rei = station_reis.rei / station_reis.rei.sum()
# Cleanup and return the station REI's
del deployments
return station_reis
else:
raise GenericException("Missing required input columns: {}".format(
mandatory_detection_columns - set(detections.columns))) | /resonATe-1.1.tar.gz/resonATe-1.1/resonate/receiver_efficiency.py | 0.882921 | 0.61757 | receiver_efficiency.py | pypi |
from datetime import datetime
import numpy as np
import pandas as pd
import plotly.offline as py
from plotly.graph_objs import *
py.init_notebook_mode()
def consolidate_data(detections: pd.DataFrame):
"""Takes set of detections, cleans and sumarises the detections for the
timeline.
Args:
detections (pd.DataFrame): A Pandas DataFrame of animal detections
Returns:
pd.DataFrame: A Pandas DataFrame of catalognumber, station, date, latitude,
longitude and detection counts by day
"""
detections = detections.copy(deep=True)
if 'receiver' in detections.columns:
detections = detections[~(detections.receiver == 'release')]
detections['date'] = pd.to_datetime(detections.datecollected).dt.date
detections['det_counts'] = 0
detections = detections.groupby(
['catalognumber', 'date', 'station'], as_index=False, dropna=False).agg({
'det_counts': 'count',
'latitude': 'mean',
'longitude': 'mean',
})[['catalognumber',
'date',
'station',
'det_counts',
'latitude',
'longitude']]
detections.det_counts = (detections.det_counts / 10.0) + 5
detections['date'] = pd.to_datetime(detections['date'])
detections.sort_values('date', inplace=True)
catalognumbers = pd.DataFrame(
detections.catalognumber.unique(), columns=['catalognumber'])
catalognumbers['color'] = catalognumbers.index + 1
detections = detections.merge(catalognumbers, on='catalognumber')
return detections
def create_grid(detections: pd.DataFrame):
"""Takes the a set of consolidated detections (the output from
``consolidate_data()``) and organizes them into a Plotly grid like format.
Args:
detections (pd.DataFrame): A Pandas DataFrame of catalognumber, station, date,
latitude, longitude and detection counts by day
Returns:
pd.DataFrame: A Plotly grid like dataframe
"""
total_grid = pd.DataFrame()
for date, data in detections.groupby('date'):
grid = pd.DataFrame()
grid['x-' + str(date.date())] = data.longitude
grid['y-' + str(date.date())] = data.latitude
grid['size-' + str(date.date())] = data.det_counts
grid['station-' + str(date.date())] = data.station
grid['color-' + str(date.date())] = data.color
grid['catalognumber-' + str(date.date())] = data.catalognumber
grid.reset_index(inplace=True, drop=True)
total_grid = pd.concat([total_grid, grid], ignore_index=False, axis=1)
return total_grid
def create_trace(detections: pd.DataFrame, total_grid, is_mapbox=False,
colorscale='Viridis'):
"""
Args:
detections (pd.DataFrame): A Pandas DataFrame of catalognumber, station, date,
latitude, longitude and detection counts by day
total_grid (pd.DataFrame): A Pandas DataFrame from ``create_grid()``
is_mapbox (bool, optional): A boolean indicating whether to return a Scattermapbox or
Scattergeo trace. Defaults to False.
colorscale (str, optional): colorscale (str, optional): A string to indicate the color index. See here for options:
https://community.plot.ly/t/what-colorscales-are-available-in-plotly-and-which-are-the-default/2079. Defaults to 'Viridis'.
Returns:
Figure: A plotly scattergeo or a scattermapbox
"""
trace = dict(
lon=total_grid['x-' + str(detections.date.min().date())].dropna(),
lat=total_grid['y-' + str(detections.date.min().date())].dropna(),
text=total_grid['catalognumber-' +
str(detections.date.min().date())].dropna(),
mode="markers",
hoverinfo="lon+lat+text",
marker=dict(
size=total_grid['size-' +
str(detections.date.min().date())].dropna(),
color=total_grid['color-' +
str(detections.date.min().date())].dropna(),
cmin=detections.color.min(),
cmax=detections.color.max(),
autocolorscale=False,
colorscale=colorscale
)
)
if is_mapbox:
trace = Scattermapbox(trace)
else:
trace = Scattergeo(trace)
return trace
def create_frames(detections: pd.DataFrame, total_grid: pd.DataFrame, is_mapbox=False):
"""
Args:
detections (pd.DataFrame): A Pandas DataFrame of catalognumber, station, date,
latitude, longitude and detection counts by day
total_grid (pd.DataFrame): A Pandas DataFrame from ``create_grid()``
is_mapbox (bool, optional): A boolean indicating whether to return a Scattermapbox or
Scattergeo trace. Defaults to False.
Returns:
list: An array of Plotly Frames
"""
frames = []
for date in pd.date_range(detections.date.min(), detections.date.max()):
date = date.date()
if 'x-' + str(date) in total_grid.columns:
frame_trace = dict(
lon=total_grid['x-' + str(date)].dropna(),
lat=total_grid['y-' + str(date)].dropna(),
text=total_grid['catalognumber-' + str(date)].dropna(),
mode="markers",
hoverinfo="lon+lat+text",
marker=dict(
size=total_grid['size-' + str(date)].dropna(),
color=total_grid['color-' + str(date)].dropna()
)
)
else:
frame_trace = dict(
lon=[0],
lat=[0],
text=[0],
mode="markers",
marker=dict(
size=[0]
)
)
if is_mapbox:
date_trace = Scattermapbox(frame_trace)
else:
date_trace = Scattergeo(frame_trace)
frame = Frame(
name=str(date),
data=[date_trace],
traces=[0]
)
frames = pd.concat([frames, frame])
return frames
def define_updatemenus(animation_time=1000, transition_time=300):
"""
Args:
animation_time (int, optional): The amount of time in milliseconds for each frame. Defaults to 1000.
transition_time (int, optional): The amount of time in milliseconds between frames. Defaults to 300.
Returns:
dict: dictionary of updatemenu settings
"""
updatemenus = dict(
# GENERAL
type="buttons",
showactive=False,
x=0.1,
y=0,
pad=dict(t=60, r=10),
xanchor="right",
yanchor="top",
direction="left",
# Buttons
buttons=[
dict(
method="animate",
label="Play",
args=[
None,
dict(
# False quicker but disables animations
frame=dict(duration=animation_time, redraw=False),
fromcurrent=True,
# easing = "cubic-in-out"
transition=dict(duration=transition_time,
easing="quadratic-in-out"),
mode="immediate",
),
],
),
dict(
method="animate",
label="Pause",
# PAUSE
args=[
[None], # Note the list
dict(
frame=dict(duration=0, redraw=False), # Idem
mode="immediate",
transition=dict(duration=0),
),
],
),
],
)
return updatemenus
def define_sliders(detections: pd.DataFrame, animation_time=300, slider_transition_time=300):
"""
Args:
detections (pd.DataFrame): A Pandas DataFrame of catalognumber, station, date,
latitude, longitude and detection counts by day
animation_time (int, optional): The amount of time in milliseconds between frames. Defaults to 300.
slider_transition_time (int, optional): The amount of time in milliseconds between
frames for the slider. Defaults to 300.
Returns:
dict: A Plotly sliders dictionary
"""
sliders = dict(
active=0,
steps=[],
currentvalue=dict(
font=dict(size=16),
prefix="Year : ",
xanchor="right",
visible=True,
),
transition=dict(
duration=slider_transition_time,
easing="cubic-in-out",
),
# PLACEMENT
x=0.1,
y=0,
pad=dict(t=40, b=10),
len=0.9,
xanchor="left",
yanchor="top",
)
for date in pd.date_range(detections.date.min(), detections.date.max()):
date = date.date()
slider_step = dict(
# GENERAL
method="animate",
value=str(date),
label=str(date),
# ARGUMENTS
args=[
[str(date)],
dict(
frame=dict(duration=animation_time, redraw=False),
transition=dict(duration=slider_transition_time),
mode="immediate",
),
],
)
sliders["steps"].append(slider_step)
return sliders
def define_layout(detections: pd.DataFrame, title, plotly_geo=None, mapbox_token=None,
style='light'):
"""
Args:
detections (pd.DataFrame): A Pandas DataFrame of catalognumber, station, date,
latitude, longitude and detection counts by day
title (str): the title of the plot
plotly_geo (dict, optional): an optional dictionary to control the
geographic aspects of the plot. Defaults to None.
mapbox_token (str, optional): A string of mapbox access token. Defaults to None.
style (str, optional): The style for the Mapbox tileset:
https://plot.ly/python/reference/#layout-mapbox-style.
Defaults to 'light'.
Returns:
dict: A plotly layout dictionary
"""
if mapbox_token is None:
if plotly_geo is None:
plotly_geo = dict(
showland=True,
landcolor="rgb(255, 255, 255)",
showocean=True,
oceancolor="rgb(212,212,212)",
showlakes=True,
lakecolor="rgb(212,212,212)",
showrivers=True,
rivercolor="rgb(212,212,212)",
resolution=50,
showcoastlines=False,
showframe=False,
projection=dict(
type='mercator',
)
)
plotly_geo.update(
center=dict(
lon=detections.longitude.mean(),
lat=detections.latitude.mean()
),
lonaxis=dict(
range=[detections.longitude.min(), detections.longitude.max()],
),
lataxis=dict(
range=[detections.latitude.min(), detections.latitude.max()],
)
)
layout = dict(
geo=plotly_geo,
title=title,
hovermode='closest',
showlegend=False,
)
else:
mapbox = dict(
accesstoken=mapbox_token,
center=dict(
lon=detections.longitude.mean(),
lat=detections.latitude.mean()
),
zoom=5,
style=style
)
layout = dict(title=title,
autosize=True,
hovermode='closest',
showlegend=False,
mapbox=mapbox
)
return layout
def timeline(detections: pd.DataFrame, title='Timeline', height=700, width=1000,
ipython_display=True, mapbox_token=None, plotly_geo=None,
animation_time=1000, transition_time=300,
slider_transition_time=300, colorscale='Rainbow', style='light'):
"""
Args:
detections (pd.DataFrame): A Pandas DataFrame of catalognumber, station, date,
latitude, longitude and detection counts by day
title (str, optional): the title of the plot. Defaults to 'Timeline'.
height (int, optional): the height of the plotly. Defaults to 700.
width (int, optional): the width of the plotly. Defaults to 1000.
ipython_display (bool, optional): a boolean to show in a notebook. Defaults to True.
mapbox_token (str, optional): A string of mapbox access token. Defaults to None.
plotly_geo (dict, optional): an optional dictionary to control the
geographic aspects of the plot. Defaults to None.
animation_time (int, optional): The amount of time in milliseconds for each frame. Defaults to 1000.
transition_time (int, optional): The amount of time in milliseconds between frames. Defaults to 300.
slider_transition_time (int, optional): The amount of time in milliseconds between
frames for the slider. Defaults to 300.
colorscale (str, optional): A string to indicate the color index. See here for options:
https://community.plot.ly/t/what-colorscales-are-available-in-plotly-and-which-are-the-default/2079. Defaults to 'Rainbow'.
style (str, optional): The style for the Mapbox tileset:
https://plot.ly/python/reference/#layout-mapbox-style
Defaults to 'light'.
Returns:
(None|Any): : A plotly object or None if ipython_display is True
"""
detections = consolidate_data(detections)
total_grid = create_grid(detections)
if mapbox_token is None:
trace = create_trace(detections, total_grid, colorscale=colorscale)
frames = create_frames(detections, total_grid)
else:
trace = create_trace(detections, total_grid,
is_mapbox=True, colorscale=colorscale)
frames = create_frames(detections, total_grid, is_mapbox=True)
updatemenus = define_updatemenus(animation_time, transition_time)
sliders = define_sliders(detections, animation_time,
slider_transition_time)
layout = define_layout(detections, title, plotly_geo=plotly_geo, mapbox_token=mapbox_token,
style=style)
layout.update(dict(
updatemenus=[updatemenus],
sliders=[sliders],
))
layout.update(
height=height,
width=width
)
if ipython_display:
fig = {'data': [trace], 'layout': layout, 'frames': frames}
py.init_notebook_mode()
return py.iplot(fig)
else:
fig = {'data': [trace], 'layout': layout, 'frames': frames}
return py.plot(fig, filename="{}.html".format(
title.lower().replace(" ", "_"))) | /resonATe-1.1.tar.gz/resonATe-1.1/resonate/visual_timeline.py | 0.938301 | 0.47591 | visual_timeline.py | pypi |
import math
from datetime import datetime
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as py
import resonate.compress as cp
def total_days_diff(detections: pd.DataFrame):
"""Determines the total days difference.
The difference is determined
by the minimal startdate of every detection and the maximum enddate of
every detection. Both are converted into a datetime then subtracted to
get a timedelta. The timedelta is converted to seconds and divided by
the number of seconds in a day (86400). The function returns a floating
point number of days (i.e. 503.76834).
Args:
detections (pd.DataFrame): Pandas DataFrame pulled from the compressed detections CSV
Returns:
float: An float in the number of days
"""
first = datetime.strptime(detections.startdate.min(), "%Y-%m-%d %H:%M:%S")
last = datetime.strptime(detections.enddate.max(), "%Y-%m-%d %H:%M:%S")
total = last - first
total = total.total_seconds() / 86400.0
return total
def total_days_count(detections: pd.DataFrame):
"""The function below takes a Pandas DataFrame and determines the number of days any
detections were seen on the array.
The function converst both the startdate and enddate columns into a date with no hours, minutes,
or seconds. Next it creates a list of the unique days where a detection was seen. The size of the
list is returned as the total number of days as an integer.
*** NOTE ****
Possible rounding error may occur as a detection on 2016-01-01 23:59:59 and a detection on
2016-01-02 00:00:01 would be counted as days when it is really 2-3 seconds.
Args:
detections (pd.DataFrame): Pandas DataFrame pulled from the compressed detections CSV
Returns:
int: An int in the number of days
"""
detections['startdate'] = detections['startdate'].apply(
datetime.strptime, args=("%Y-%m-%d %H:%M:%S",)).apply(datetime.date)
detections['enddate'] = detections['enddate'].apply(
datetime.strptime, args=("%Y-%m-%d %H:%M:%S",)).apply(datetime.date)
detections = pd.unique(detections[['startdate', 'enddate']].values.ravel())
return detections.size
def aggregate_total_with_overlap(detections: pd.DataFrame):
"""The function below aggregates timedelta of startdate and enddate of each detection into
a final timedelta then returns a float of the number of days. If the startdate and enddate
are the same, a timedelta of one second is assumed.
Args:
detections (pd.DataFrame): Pandas DataFrame pulled from the compressed detections CSV
Returns:
float: An float in the number of days
"""
total = pd.Timedelta(0)
detections['startdate'] = detections['startdate'].apply(
datetime.strptime, args=("%Y-%m-%d %H:%M:%S",))
detections['enddate'] = detections['enddate'].apply(
datetime.strptime, args=("%Y-%m-%d %H:%M:%S",))
detections['timedelta'] = detections['enddate'] - detections['startdate']
for index, row in detections.iterrows():
if row['timedelta'] > pd.Timedelta(0):
diff = row['timedelta']
else:
diff = pd.Timedelta('1 second')
total += diff
return total.total_seconds() / 86400.0
def aggregate_total_no_overlap(detections: pd.DataFrame):
"""The function below aggregates timedelta of startdate and enddate, excluding overlap between
detections. Any overlap between two detections is converted to a new detection using the earlier
startdate and the latest enddate. If the startdate and enddate are the same, a timedelta of one
second is assumed.
Args:
detections (pd.DataFrame): pandas DataFrame pulled from the compressed detections CSV
Returns:
float: An float in the number of days
"""
total = pd.Timedelta(0)
# sort and convert datetimes
detections = detections.sort_values(
by='startdate', ascending=False).reset_index(drop=True)
detections['startdate'] = detections['startdate'].apply(
datetime.strptime, args=("%Y-%m-%d %H:%M:%S",))
detections['enddate'] = detections['enddate'].apply(
datetime.strptime, args=("%Y-%m-%d %H:%M:%S",))
# A stack is used as an easy way to organize and maintain the detections
detection_stack = list(detections.T.to_dict().values())
# Run the loop while the stack is not empty
while len(detection_stack) > 0:
current_time_block = detection_stack.pop()
# Make sure the current element is not empty
if current_time_block:
# Pop the next item if the stack is not empty
if len(detection_stack) > 0:
next_time_block = detection_stack.pop()
else:
next_time_block = False
# Check to see if we are down to the last item in the stack or there is no overlap
if not next_time_block or next_time_block['startdate'] > current_time_block['enddate']:
# Create the timedelta and add it to the total, assuming 1 second if the timedelta equals 0
diff = pd.Timedelta(0)
diff += current_time_block['enddate'] - \
current_time_block['startdate']
if diff == pd.Timedelta(0):
diff = pd.Timedelta('1 second')
total += diff
# Add the next block back into the stack so that it can be used in the next iteration
detection_stack.append(next_time_block)
else:
# If there is overlap take a new endate, eliminating the overlap, and add it back into the stack for the next iteration
current_time_block['enddate'] = max(
[current_time_block['enddate'], next_time_block['enddate']])
detection_stack.append(current_time_block)
# Return the value as a float in days
return total.total_seconds() / 86400.0
def get_days(dets: pd.DataFrame, calculation_method='kessel'):
"""Determines which calculation method to use for the residency index.
Wrapper method for the calulation methods above.
Args:
dets (pd.DataFrame): A Pandas DataFrame pulled from the compressed detections CSV
calculation_method (str, optional): determines which method above will be used to
count total time and station time. Defaults to 'kessel'.
Returns:
int: An int in the number of days
"""
days = 0
if calculation_method == 'aggregate_with_overlap':
days = aggregate_total_with_overlap(dets)
elif calculation_method == 'aggregate_no_overlap':
days = aggregate_total_no_overlap(dets)
elif calculation_method == 'timedelta':
days = total_days_diff(dets)
else:
days = total_days_count(dets)
return days
def get_station_location(station: str, detections: pd.DataFrame):
"""Returns the longitude and latitude of a station/receiver given the station
and the table name.
Args:
station (str): String that contains the station name
detections (pd.DataFrame): the table name in which to find the station
Returns:
pd.DataFrame: A Pandas DataFrame of station, latitude, and longitude
"""
location = detections[detections.station == station][:1]
location = location[['station', 'longitude', 'latitude']]
return location
def plot_ri(ri_data: pd.DataFrame, ipython_display=True,
title='Bubble Plot', height=700,
width=1000, plotly_geo=None, filename=None,
marker_size=6, scale_markers=False,
colorscale='Viridis', mapbox_token=None):
"""Creates a bubble plot of residency index data.
Args:
ri_data (pd.DataFrame): A Pandas DataFrame generated from ``residency_index()``
ipython_display (bool, optional): a boolean to show in a notebook. Defaults to True.
title (str, optional): the title of the plot. Defaults to 'Bubble Plot'.
height (int, optional): the height of the plotly. Defaults to 700.
width (int, optional): the width of the plotly. Defaults to 1000.
plotly_geo (dict, optional): an optional dictionary to control the
geographic aspects of the plot. Defaults to None.
filename (str, optional): Plotly filename to write to. Defaults to None.
marker_size (int, optional): An int to indicate the diameter in pixels. Defaults to 6.
scale_markers (bool, optional): A boolean to indicate whether or not markers are
scaled by their value. Defaults to False.
colorscale (str, optional): A string to indicate the color index. See here for options:
https://community.plot.ly/t/what-colorscales-are-available-in-plotly-and-which-are-the-default/2079. Defaults to 'Viridis'.
mapbox_token (str, optional): A string of mapbox access token. Defaults to None.
Returns:
(None|Any): A plotly geoscatter or None if ipython_display is True
"""
ri_data = ri_data.sort_values('residency_index')
map_type = 'scattergeo'
if mapbox_token is not None:
map_type = 'scattermapbox'
mapbox = dict(
accesstoken=mapbox_token,
center=dict(
lon=ri_data.longitude.mean(),
lat=ri_data.latitude.mean()
),
zoom=5,
style='light'
)
if scale_markers:
marker_size = (ri_data.residency_index * marker_size + 5).tolist()
else:
marker_size += 5
data = [
{
'lon': ri_data.longitude.tolist(),
'lat': ri_data.latitude.tolist(),
'text': ri_data.station + " : " + ri_data.residency_index.astype(str),
'mode': 'markers',
'marker': {
'color': ri_data.residency_index.tolist(),
'size': marker_size,
'showscale': True,
'colorscale': colorscale,
'colorbar': {
'title': 'Detection Count'
}
},
'type': map_type
}
]
if plotly_geo is None:
plotly_geo = dict(
showland=True,
landcolor="rgb(255, 255, 255)",
showocean=True,
oceancolor="rgb(212,212,212)",
showlakes=True,
lakecolor="rgb(212,212,212)",
showrivers=True,
rivercolor="rgb(212,212,212)",
resolution=50,
showcoastlines=False,
showframe=False,
projection=dict(
type='mercator',
)
)
plotly_geo.update(
center=dict(
lon=ri_data.longitude.mean(),
lat=ri_data.latitude.mean()
),
lonaxis=dict(
range=[ri_data.longitude.min(), ri_data.longitude.max()],
),
lataxis=dict(
range=[ri_data.latitude.min(), ri_data.latitude.max()],
)
)
if mapbox_token is None:
layout = dict(
geo=plotly_geo,
title=title
)
else:
layout = dict(title=title,
autosize=True,
hovermode='closest',
mapbox=mapbox
)
if ipython_display:
layout.update(
height=height,
width=width
)
fig = {'data': data, 'layout': layout}
py.init_notebook_mode()
return py.iplot(fig)
else:
fig = {'data': data, 'layout': layout}
return py.plot(fig, filename=filename)
def residency_index(detections: pd.DataFrame, calculation_method='kessel'):
"""This function takes in a detections CSV and determines the residency
index for reach station.
Residence Index (RI) was calculated as the number of days an individual
fish was detected at each receiver station divided by the total number of
days the fish was detected anywhere on the acoustic array. - Kessel et al.
Args:
detections (pd.DataFrame): Dataframe of detections
calculation_method (str, optional): determines which method above will be used to
count total time and station time. Defaults to 'kessel'.
Returns:
pd.DataFrame: A residence index DataFrame with the following columns
* days_detected
* latitude
* longitude
* residency_index
* station
"""
dets = cp.compress_detections(detections)
# Converting start and end date to strings
dets['startdate'] = dets['startdate'].astype(str)
dets['enddate'] = dets['enddate'].astype(str)
# Remove any release locations
dets = dets[~dets['startunqdetecid'].astype(str).str.contains("release")]
print('Creating the residency index using the {0} method.\nPlease be patient, I am currently working...'.format(
calculation_method))
# Determine the total days from a copy of the DataFrame
total_days = get_days(dets.copy(), calculation_method)
# Init the stations list
station_list = []
# For each unique station determine the total number of days there were
# detections at the station
for station in dets['station'].unique():
st_dets = pd.DataFrame(dets[dets['station'] == station])
total = get_days(st_dets.copy(), calculation_method)
location = get_station_location(station, detections)
# Determine the RI and add the station to the list
station_dict = {
'days_detected': total,
'latitude': location['latitude'].values[0],
'longitude': location['longitude'].values[0],
'residency_index': (total / (float(total_days))),
'station': station,
}
station_list.append(station_dict)
# convert the station list to a Dataframe
all_stations = pd.DataFrame(station_list)
# sort and reset the index for the station DataFrame
all_stations = all_stations.sort_values(
by='days_detected', ascending=False).reset_index(drop=True)
print("OK!")
# Return the stations RI DataFrame
return all_stations | /resonATe-1.1.tar.gz/resonATe-1.1/resonate/residence_index.py | 0.861363 | 0.622775 | residence_index.py | pypi |
================================================================
Creating and Exercising a Custom Single Degree of Freedom System
================================================================
.. note::
You can download this example as a Python script:
:jupyter-download:script:`custom-sdof-system` or Jupyter notebook:
:jupyter-download:notebook:`custom-sdof-system`.
Creating a new system
=====================
The first step is to import a "blank" ``SingleDoFLinearSystem`` and initialize
it.
.. jupyter-execute::
from resonance.linear_systems import SingleDoFLinearSystem
msd_sys = SingleDoFLinearSystem()
Now define the constant variables for the system. In this case, the single
degree of freedom system will be described by its mass, natural frequency, and
damping ratio.
.. jupyter-execute::
msd_sys.constants['m'] = 1.0 # kg
msd_sys.constants['fn'] = 1.0 # Hz
msd_sys.constants['zeta'] = 0.1 # unitless
msd_sys.constants
Define the coordinate and speed. The software assumes that the speed is defined
as the time derivative of the coordinate, i.e. :math:`v = \dot{x}`.
.. jupyter-execute::
msd_sys.coordinates['x'] = 1.0 # m
msd_sys.speeds['v'] = 0.0 # m/s
.. jupyter-execute::
msd_sys.coordinates
.. jupyter-execute::
msd_sys.speeds
.. jupyter-execute::
msd_sys.states
Now that the coordinate, speed, and constants are defined the equations of
motion can be defined. For a single degree of freedom system a Python function
must be defined that uses the system's constants to compute the coefficients to
the canonical second order equation, :math:`m \dot{v} + c v + k x = 0`.
The inputs to the function are the constants. The variable names should match
those defined above on the system.
.. jupyter-execute::
import numpy as np
def calculate_canonical_coefficients(m, fn, zeta):
"""Returns the system's mass, damping, and stiffness coefficients given
the system's constants."""
wn = 2*np.pi*fn
k = m*wn**2
c = zeta*2*wn*m
return m, c, k
msd_sys.canonical_coeffs_func = calculate_canonical_coefficients
Once this function is defined and added to the system :math:`m,c,k` can be
computed using:
.. jupyter-execute::
msd_sys.canonical_coefficients()
The period of the natural frequency can be computed with:
.. jupyter-execute::
msd_sys.period()
All information about the system can be displayed:
.. jupyter-execute::
msd_sys
Simulating the free response
============================
The ``free_response()`` function simulates the now fully defined system given
as an initial value problem. One or both of the coordinates and speeds must be
set to provide a free response. The following shows the response to both
:math:`x` and :math:`v` being set to some initial values.
.. jupyter-execute::
msd_sys.coordinates['x'] = -5.0
msd_sys.speeds['v'] = 8.0
``free_response()`` returns a Pandas ``DataFrame`` with the time values as the
index and columns for the coordinate, speed, and additionally the time
derivative of the speed (acceleration in this case). See
https://pandas.pydata.org/pandas-docs/stable/getting_started/dsintro.html for
an introduction to ``DataFrame``.
.. jupyter-execute::
trajectories = msd_sys.free_response(5.0)
trajectories
There are a variety of plotting methods associated with the ``DataFrame`` that
can be used to quickly plot the trajectories of the coordinate, speed, and
acceleration. See more about plotting ``DataFrames`` at
https://pandas.pydata.org/pandas-docs/stable/user_guide/visualization.html.
.. jupyter-execute::
axes = trajectories.plot(subplots=True)
Response to change in constants
-------------------------------
This system is *parameterized* by its mass, natural frequency, and damping
ratio. It can be useful to plot the trajectories of position for different
values of :math:`\zeta` for example.
Set the initial conditions back to simply stretching the spring 1 meter:
.. jupyter-execute::
msd_sys.coordinates['x'] = 1.0
msd_sys.speeds['v'] = 0.0
Now change :math:`\zeta` to different values and simulate the free response to
see the different damping regimes:
Un-damped, :math:`\zeta=0`
.. jupyter-execute::
msd_sys.constants['zeta'] = 0.0 # Unitless
trajectories = msd_sys.free_response(5.0)
axes = trajectories['x'].plot()
Under-damped, :math:`0<\zeta<1`
.. jupyter-execute::
msd_sys.constants['zeta'] = 0.5 # Unitless
trajectories = msd_sys.free_response(5.0)
axes = trajectories['x'].plot()
Critically damped, :math:`\zeta=1`
.. jupyter-execute::
msd_sys.constants['zeta'] = 1.0 # Unitless
trajectories = msd_sys.free_response(5.0)
axes = trajectories['x'].plot()
Over-damped, :math:`\zeta>1`
.. jupyter-execute::
msd_sys.constants['zeta'] = 2.0 # Unitless
trajectories = msd_sys.free_response(5.0)
axes = trajectories['x'].plot()
Adding measurements
===================
It is often useful to calculate the trajectories of other quantities. Systems
in resonance allow "measurements" to be defined. These measurements are
functions of the constants, coordinates, speeds, and/or time. To create a new
measurement, create a function that returns the quantity of interest. Here a
measurement function is defined that calculates the kinetic energy
(:math:`\frac{1}{2}mv^2`) of the system and then added to the system with
variable name ``KE``.
.. jupyter-execute::
def calculate_kinetic_energy(m, v):
return m*v**2/2
msd_sys.add_measurement('KE', calculate_kinetic_energy)
Once added, the measurement will be computed and added to the ``DataFrame``
containing the trajectories:
.. jupyter-execute::
msd_sys.constants['zeta'] = 0.5 # Unitless
trajectories = msd_sys.free_response(5.0)
trajectories
and can be plotted like any other column:
.. jupyter-execute::
axes = trajectories['KE'].plot()
Plotting the configuration
==========================
``resonance`` systems can plot and animate at the system's configuration. To do
so, a custom function that generates a configuration plot using matplotlib must
be defined and associated with the system. Below a plot is created to show an
orange block representing the mass and a spring attached to the block. The
``spring()`` function conveniently provides the x and y data needed to plot the
spring.
.. jupyter-execute::
import matplotlib.pyplot as plt
from resonance.functions import spring
# create a new constant to describe the block's dimension, l
msd_sys.constants['l'] = 0.2 # m
def create_configuration_figure(x, l):
# create a figure with one or more axes
fig, ax = plt.subplots()
# the `spring()` function creates the x and y data for plotting a simple
# spring
spring_x_data, spring_y_data = spring(0.0, x, l/2, l/2, l/8, n=3)
lines = ax.plot(spring_x_data, spring_y_data, color='purple')
spring_line = lines[0]
# add a square that represents the mass
square = plt.Rectangle((x, 0.0), width=l, height=l, color='orange')
ax.add_patch(square)
# add a vertical line representing the spring's attachment point
ax.axvline(0.0, linewidth=4.0, color='black')
# set axis limits and aspect ratio such that the entire motion will appear
ax.set_ylim((-l/2, 3*l/2))
ax.set_xlim((-np.abs(x) - l, np.abs(x) + l))
ax.set_aspect('equal')
ax.set_xlabel('$x$ [m]')
ax.set_ylabel('$y$ [m]')
# this function must return the figure as the first item
# but you also may return any number of objects that you'd like to have
# access to modify, e.g. for an animation update
return fig, ax, spring_line, square
# associate the function with the system
msd_sys.config_plot_func = create_configuration_figure
Now the configuration plot can be generated with ``plot_configuration()``. This
returns the same results as the function defined above.
.. jupyter-execute::
fig, ax, spring_line, square = msd_sys.plot_configuration()
Animating the configuration
===========================
Reset to un-damped motion and simulate again
.. jupyter-execute::
msd_sys.constants['zeta'] = 0.1
trajectories = msd_sys.free_response(5.0)
To animate the configuration, create a function that updates the various
matplotlib objects using any constants, coordinates, speeds, and/or the special
variable ``time``. The last input arguments to this function must be all of the
extra outputs of ``plot_configuration()`` (excluding the figure which is the
first output). The order of these must match the order of the
``plot_configuration()`` outputs.
.. jupyter-execute::
def update_configuration(x, l, time, # any variables you need for updating
ax, spring_line, square): # returned items from plot_configuration() in same order
ax.set_title('{:1.2f} [s]'.format(time))
xs, ys = spring(0.0, x, l/2, l/2, l/8, n=3)
spring_line.set_data(xs, ys)
square.set_xy((x, 0.0))
msd_sys.config_plot_update_func = update_configuration
Now that the update function is associated, ``animate_configuration()`` will
create the animation. Here the frames-per-second are set to an explicit value.
.. jupyter-execute::
animation = msd_sys.animate_configuration(fps=30)
If using the notebook interactively with ``%matplotlib widget`` set, the
animation above will play. But ``animate_configuration()`` returns a matplotlib
``FuncAnimation`` object which has other options that allow the generation of
different formats, see
https://matplotlib.org/api/_as_gen/matplotlib.animation.FuncAnimation.html for
options. One option is to create a Javascript/HTML versions that displays
nicely in the notebook with different play options:
.. jupyter-execute::
from IPython.display import HTML
HTML(animation.to_jshtml(fps=30))
Response to sinusoidal forcing
==============================
The response to a sinusoidal forcing input, i.e.:
.. math::
m\dot{v} + cv + kx = F_o \sin(\omega t)
can be simulated with ``sinusoidal_forcing_response()``. This works the same as
``free_response`` except it requires a forcing amplitude and frequency.
.. jupyter-execute::
msd_sys.coordinates['x'] = 0.0 # m
msd_sys.speeds['v'] = 0.0 # m/s
Fo = 10.0
omega = 2*np.pi*3.0 # rad/s
forced_trajectory = msd_sys.sinusoidal_forcing_response(Fo, omega, 5.0)
Note that there is now a ``forcing_function`` column. This is the applied
forcing function.
.. jupyter-execute::
forced_trajectory
The trajectories can be plotted and animated as above:
.. jupyter-execute::
axes = forced_trajectory.plot(subplots=True)
.. jupyter-execute::
fps = 30
animation = msd_sys.animate_configuration(fps=fps)
.. jupyter-execute::
HTML(animation.to_jshtml(fps=fps))
Frequency response
==================
The frequency response to sinusoidal forcing at different frequencies can be
plotted with ``frequency_response_plot()`` for a specific forcing amplitude.
.. jupyter-execute::
axes = msd_sys.frequency_response_plot(Fo)
Response to periodic forcing
============================
Any periodic forcing function can be applied given the Fourier series
coefficients of the approximating function. The following function calculates
the Fourier series coefficients for a "sawtooth" shaped periodic input.
.. jupyter-execute::
def sawtooth_fourier_coeffs(A, N):
"""
A : sawtooth amplitude, Newtons
T : sawtooth period, seconds
N : number of Fourier series terms
"""
n = np.arange(1, N+1)
an = A*(8*(-1)**n - 8) / 2 / np.pi**2 / n**2
return 0, an, np.zeros_like(an)
a0, an, bn = sawtooth_fourier_coeffs(Fo, 20)
These coefficients can be provided to ``periodic_forcing_response()`` to
simulate the response:
.. jupyter-execute::
wb = 2*np.pi*3.0 # rad/s
trajectory = msd_sys.periodic_forcing_response(a0, an, bn, wb, 5.0)
trajectory
.. jupyter-execute::
axes = trajectory.plot(subplots=True)
.. jupyter-execute::
fps = 30
animation = msd_sys.animate_configuration(fps=fps)
.. jupyter-execute::
HTML(animation.to_jshtml(fps=fps))
| /resonance-0.22.0.tar.gz/resonance-0.22.0/docs/custom-sdof-system.rst | 0.947733 | 0.800458 | custom-sdof-system.rst | pypi |
===============
Topical Outline
===============
The course is taught over 20 two hour class periods during a quarter system of
10 weeks of instructions and 1 week for examinations. One of the 20 class
periods is reserved for a midterm examination, 2 hours are reserved for exam
reviews leaving 36 hours of in class time. The following lists the topics for
each of the class periods which correspond to the detailed headers below:
+----+----------+-----------------------+
| L# | Date | Notebook # |
+====+==========+=======================+
| 01 | W Sep 27 | 1, 2 |
+----+----------+-----------------------+
| 02 | M Oct 02 | 3 |
+----+----------+-----------------------+
| 03 | W Oct 04 | 4 |
+----+----------+-----------------------+
| 04 | M Oct 09 | 5 |
+----+----------+-----------------------+
| 05 | W Oct 11 | 6 |
+----+----------+-----------------------+
| 06 | M Oct 16 | 7 |
+----+----------+-----------------------+
| 07 | W Oct 18 | 8 |
+----+----------+-----------------------+
| 08 | M Oct 23 | 9 |
+----+----------+-----------------------+
| NA | T Oct 24 | Drop Date |
+----+----------+-----------------------+
| 09 | W Oct 25 | 10 |
+----+----------+-----------------------+
| 10 | M Oct 30 | 11 |
+----+----------+-----------------------+
| 11 | W Nov 01 | 12 |
+----+----------+-----------------------+
| 12 | M Nov 06 | Exam |
+----+----------+-----------------------+
| 13 | W Nov 08 | 13 |
+----+----------+-----------------------+
| NA | F Nov 10 | Veterans Day Holiday |
+----+----------+-----------------------+
| 14 | M Nov 13 | 14 |
+----+----------+-----------------------+
| 15 | W Nov 15 | 15 |
+----+----------+-----------------------+
| 16 | M Nov 20 | 16 |
+----+----------+-----------------------+
| 17 | W Nov 22 | 17 |
+----+----------+-----------------------+
| NA | R Nov 23 | Thanksgiving Holiday |
+----+----------+-----------------------+
| NA | F Nov 24 | Thanksgiving Holiday |
+----+----------+-----------------------+
| 18 | M Nov 27 | 18 |
+----+----------+-----------------------+
| 19 | W Nov 29 | 19 |
+----+----------+-----------------------+
| 20 | M Dec 04 | 20 |
+----+----------+-----------------------+
| 21 | W Dec 06 | 21 |
+----+----------+-----------------------+
| NA | T Dec 12 | Final Exam @ 6:00 PM |
+----+----------+-----------------------+
Analyzing Vibrating Systems
===========================
1. Introduction to Jupyter
--------------------------
This notebook introduces students to the Jupyter notebook environment and
establishes good practices for creating computational notebooks and scientific
python programming.
After the completion of this assignment students will be able to:
- open Jupyter notebooks and operate basic functionality
- fetch assignments, complete exercises, submit work and view the graded work
- solve basic scientific python problems
- create a well formatted and fully executing notebook
2. Introduction to vibrations: Book Balancing on a Cup
------------------------------------------------------
This notebook introduces a single degree of freedom vibratory system in which a
textbook balances on a cylindrical cup. The system is implemented as a model
that students can interact with in order to visualize its free response and
compare to the demonstration in the classroom.
After the completion of this assignment students will be able to:
- visualize a system's free response
- estimate the period of a sinusoidal vibration from a time series
- compare a computer simulation result to experimental result
- interactively adjust the book inertia to see the affect on system response
- understand the concept of natural frequency nd its relationship to
mass/inertia
3. Measuring a Bicycle Wheel's Inertia
--------------------------------------
This notebook introduces the concept of using vibratory characteristics to
estimate parameters of an existing system. It discusses how vibrations can be
measured and how these measurements might relate to parameters of interest,
such as the inertia of a bicycle wheel.
After the completion of this assignment students will be able to:
- describe different methods of measuring vibrations
- choose appropriate sensors and sensor placement
- visualize the vibrational measurements
- use curve fitting to estimate the period of oscillation
- understand the concept of natural frequency and its relationship to
mass/inertia and stiffness
- state two of the three fundamental characteristics that govern vibration
(mass/inertia and stiffness)
- use frequency domain techniques to characterize a system's behavior
4. Clock Pendulum with Air Drag Damping
---------------------------------------
This notebook introduces the third fundamental characteristic of vibration:
energy dissipation through damping. A simple pendulum model is implemented that
allows students to vary the damping parameters and visualize the three regimes
of linear damping.
After the completion of this assignment students will be able to:
- understand the concept of damped natural frequency and its relationship to
mass/inertia, stiffness, and damping
- state the three fundamental characteristics that make a system vibrate
- compute the free response of a linear system with viscous-damping in all
three damping regimes
- identify critically damped, underdamped, and overdamped behavior
- determine whether a linear system is over/under/critically damped given its
dynamic properties
- understand the difference between underdamping, overdamping, and crticial
damping
5. Clock Pendulum with Air Drag and Joint Friction
--------------------------------------------------
This notebook builds on the previous one by introducing nonlinear damping
through Coulomb friction. Students will be able to work with both a linear and
nonlinear version of the same system (pendulum) in order to compare the free
response in both cases.
After the completion of this assignment students will be able to:
- identify the function that governs the decay envelope
- compare this non-linear behavior to the linear behavior
- estimate the period of oscillation
- compute the free response of a non-linear system with viscous and coulomb
damping
6. Vertical Vibration of a Bus Driver's Seat
--------------------------------------------
This notebook introduces external forcing of a vibratory system, where the
external force is modeled as a sinusoidal input to the bottom of a bus driver's
seat.
After the completion of this assignment students will be able to:
- excite a system with a sinusoidal input
- understand the difference in transient and steady state solutions
- use autocorrelation to determine period
- relate the frequency response to the time series
- create a frequency response plot
- define resonance and determine the parameters that cause resonance
7. Vertical vibration of a Bus Driver's Seat with a Leaf Spring
---------------------------------------------------------------
This notebook builds on the previous one by replacing the linear spring with
a realistic leaf spring.
After the completion of this assignment students will be able to:
- create a force versus displacement curve for a leaf spring
- describe the time response and frequency response of a non-linear system
- show that sinusoidal fitting does not necessarily describe non-linear
vibration
8. Bicycle Lateral Vibration
----------------------------
This notebook introduces a simple lean and steer bicycle model as an example of
a system with multiple degrees of freedom. Coupling and modes are discussed
from a data analysis perspective.
After the completion of this assignment students will be able to:
- get a sense of the coupling of input to output through frequency response
plots
- simulate a 2 DoF vibratory model
- identify a MDoF system and see effects of coupling through time and frequency
domain
- determine if a general 2 DoF is stable
- sweep through input frequencies to discover modal frequencies
9. Simulating a building during an earthquake
---------------------------------------------
This notebook uses a lumped parameter multi-story building model as a
many-degree-of-freedom system with all oscillatory modes.
After the completion of this assignment students will be able to:
- examine time domain and frequency coupling with MDoF
- sweeping through frequencies to discover modal frequencies
- visualize the system's response at modal frequencies to see mode shapes
Modeling Vibrating Systems
==========================
10. Modeling the Bicycle Wheel Inertia Measurement System
---------------------------------------------------------
This notebook walks through modeling two different test rigs for determining
the vibrational characteristics of a bicycle wheel. After coming up with a
simple model the students will use the canonical linear form of the equations
of motion to derive various vibrational parameters.
After the completion of this assignment students will be able to:
- derive the equations of motion of a compound pendulum with Lagrange's method
- derive the equations of motion of a torsional pendulum with Lagrange's method
- linearize the compound pendulum equation
- put equations in canonical form
- review solutions to ODEs
11. Modeling a non-linear spring
--------------------------------
TODO : Think this out more.
After the completion of this assignment students will be able to:
- will be able to derive the nonlinear euqations of motion of a system with
simple kinmeatics with lagrange's method
12. Modeling the car on the bumpy road
--------------------------------------
Here will will present the base excitation single degree of freedom system and
the students will derive the equations of motion. They will then explore the
displacement and force transmisiblity frequency response functions.
After the completion of this assignment students will be able to:
- derive the linear equations of motion ofa system with simple kinematics using
lagrange's method
- create system object with custom euqations of motion an simulate the system
13. Modeling the book on a cup
------------------------------
The book balancing on the cup will be revisited. The students will derive the
equations of motion which require more complex kinematic analysis and explore
the analytical equations of motion. The stability thresholds will be determined
as well as the period from the linear model.
After the completion of this assignment students will be able to:
- derive the euqations of motion of a system with non-trivial kinematics with
lagrange's method
- apply a linearization procedure to non-linear equations of motion
- determine the stability of a linear system analytically and verify through
simulation
14. Balancing your car tire at the autoshop
-------------------------------------------
The mass imbalance problem will be presented through the analytical model of an
unbalance car tire. The frequency response will be derived and examined.
After the completion of this assignment students will be able to:
- derive the equations of motion fo a mass imbalance system
15. Engine cam non-sinusoidal periodic forcing
----------------------------------------------
Using an engine cam piecewise periodic function the students will learn how a
Fourier series can be used to find the solution to the differential equations
symbolicaly.
After the completion of this assignment students will be able to:
- generate a Fourier series of a periodic function
- find the analytic solution of the the mass-spring-damper system
16. Modeling a bulding during an earthquake
-------------------------------------------
We will revisit the multi-story building model and derive the equations of
motion for the system. The students will use eigenanalysis of the simple system
to discover the modes of motion and simulate the behavior.
After the completion of this assignment students will be able to:
- perform modal analysis of the system to determine its modal frequencies and
mode shapes
- represent model using a matric equation of motion (canoncial form)
- formulate the equations of motion for a MDoF system
- use eignvalue analyssis to determine the modeshapes of a mDoF system
- plot the motion of a MDoF system (with no damping) using the analytical
solution
- form a MDoF model corresponding to a chain of floors in a buliding
17. Bicycle Model
-----------------
The students will be given the analytical canocial form of the bicycle
equations that do not have simple damping. They will have to convert to state
space form and do a full eigenanalysis of the general form. The modes will be
examined and the nature of the bicycle motion discovered.
After the completion of this assignment students will be able to:
- convert the canonical linear form into state space form
- interpret eigenvalues and eienvectors of a general 2 DoF linear system
Designing Vibrating Systems
===========================
18. Design a Clock that Keeps Time
----------------------------------
The students will be presented with a compound pendulum model of a clock's bob
that does not keep time well due to friction and air drag. They will be tasked
with designing a system that adds in the right amount of additional energy so
that the pendulum has the desired constant period.
After the completion of this assignment students will be able to:
- develop an analytic model of a energy injection system
- simulate the motion of clock and determine its time varying period
- choose the energy injection system parameters that will cause the clock to
work as intended
19. Isolator Selection
----------------------
The students will be presented with a model of X and asked to select and/or
design a commercially available vibration isolator that ensures the system
meets specific vibrational design criteria.
After the completion of this assignment students will be able to:
- discuss and justify trade-offs and design decisions
- model the system with additional damping provided by isolation
- select/design a vibration isolator to meet given vibration specifications
- analyze a system's motion to determine its vibrational characteristics
20. Designing a Tuned Mass Damper to Earthquake Proof a Building
----------------------------------------------------------------
Students will be presented with a single (or multi?) floor building model. They
will need to modify the model to includes a laterally actuated mass on the
roof. They will be asked to design an actuation scheme that prevents the
building from having too large of displacements or resonance while excited by a
earthquake-like vibration at its base.
After the completion of this assignment students will be able to:
- add a generic vibration absorber to a building model
- use a building model to simulate the motion of a building without damping
- choose design criteria for the building and justify decisions (with ISO
standards)
- design an absorber that meets their design criteria
- use the frequency response function to demonstrate the effect of the
vibration absorber
21. Designing a stable bicycle
------------------------------
The students will be presented with a 2 DoF linear model of a bicycle in
canonical form with analytical expressions for the M, C, and K matrix entries
that are functions of the 25 bicycle parameters. The students will be asked to
discover bicycle designs that meet certain criteria through eigenanalysis and
simulation.
After the completion of this assignment students will be able to:
- determine parameters which cause the 2 DoF system to be stable/unstable
- simulate and visualize the motion of a bicycle with difference parameters
- determine and describe the influence of the physical parameters, initial
conditions, and steering input on the dynamics of the vehicle
- design a bicycle that meets specific design criteria
22. Designing Shock Absorbtion for a Car
----------------------------------------
The students will be presented with 2D planar data generated from a "ground
truth" 3 DoF half car model. Their job will be to design a quarter car model
that behaves similarly to the ground truth model. Once they have a working
simple model, then they will design an improved shock absorber for the quarter
car model using analytic and computational methods. The instructors will then
provide the students with the ground truth model, i.e. the "real" car, and the
students will need to show that the ride quality is improved and that design
criteria is met.
After the completion of this assignment students will be able to:
- develop a simple analytic model that predicts motion provided from
planar 2D "experimental" data
- select springs and dampers to meet given design criteria by demonstrating
performance with the simple analytic model
- demonstrate that the designed shock absorber works well for the "real" car
- discuss why the design does or does not meet the design criteria
- reflect on their modeling and design decisions after having tested it against
the ground truth model
| /resonance-0.22.0.tar.gz/resonance-0.22.0/docs/outline.rst | 0.933249 | 0.908901 | outline.rst | pypi |
# Mean-Motion Resonances
[](https://github.com/psf/black)
[](https://github.com/smirik/resonances/actions/workflows/ci.yml)
`resonances` is an open-source package dedicated to the identification of mean-motion resonances of small bodies. Many examples are for the Solar system; however, you might use the package for any possible planetary system, including exoplanets.
For more information, [read the documentation](https://smirik.github.io/resonances/).
**Note:** while this app has many functional and integration tests built in, it is still in the dev stage. Hence, it might include some inconsistencies. So, any community help is appreciated!
## Features
The package:
- can automatically identify two-body and three-body mean-motion resonance in the Solar system,
- accurately differentiates different types of resonances (pure, transient, uncertain),
- provides an interface for mass tasks (i.e. find resonant areas in a planetary system),
- can plot time series and periodograms,
- and, yeah, it is well tested ;)
It actively uses [REBOUND integrator](https://rebound.readthedocs.io) maintained by Hanno Rein and others.
## Installation
To install resonances on your system, follow the instructions on the appropriate [installation guide](https://smirik.github.io/resonances/install/)
## Mean-motion resonances
For those who are not familiar with the mean-motion resonances, here is the list of papers used to develop this package:
### Papers about the automatic identification of resonant asteroids
1. Smirnov, E. A. & Dovgalev, I. S. Identification of Asteroids in Two-Body Resonances. Solar System Research 52, 347–354 (2018).
2. Smirnov, E. A. (2023). A new python package for identifying celestial bodies trapped in mean-motion resonances. Astronomy and Computing, 100707. https://doi.org/10.1016/j.ascom.2023.100707
3. Smirnov, E. A. & Shevchenko, I. I. Massive identification of asteroids in three-body resonances. Icarus 222, 220–228 (2013).
4. Smirnov, E. A., Dovgalev, I. S. & Popova, E. A. Asteroids in three-body mean motion resonances with planets. Icarus (2017) doi:10.1016/j.icarus.2017.09.032.
5. Nesvorný, D. & Morbidelli, A. Three-Body Mean Motion Resonances and the Chaotic Structure of the Asteroid Belt. The Astronomical Journal 116, 3029–3037 (1998).
### Papers about mean-motion resonances
1. Chirikov, B. V. A universal instability of many-dimensional oscillator systems. Physics reports 52, 263–379 (1979).
1. Gallardo, T. Strength, stability and three dimensional structure of mean motion resonances in the solar system. Icarus 317, 121–134 (2019).
1. Gallardo, T. Atlas of the mean motion resonances in the Solar System. Icarus 184, 29–38 (2006).
1. Gallardo, T., Coito, L. & Badano, L. Planetary and satellite three body mean motion resonances. Icarus 274, 83–98 (2016).
1. Milani, A., Cellino, A., Knezevic, Z., Novaković, B. & Spoto, F. Asteroid families classification: Exploiting very large datasets. Icarus 239, 46–73 (2014).
1. Murray, N. & Holman, M. Diffusive chaos in the outer asteroid belt. The Astronomical Journal 114, 1246 (1997).
1. Murray, N., Holman, M. & Potter, M. On the Origin of Chaos in the Asteroid Belt. The Astronomical Journal 116, 2583–2589 (1998).
1. Shevchenko, I. I. On the Lyapunov exponents of the asteroidal motion subject to resonances and encounters. Proc. IAU 2, 15–30 (2006).
### Books
1. Murray, C. D. & Dermott, S. F. Solar system dynamics. (Cambridge Univ. Press, 2012).
1. Morbidelli, A. Modern celestial mechanics: aspects of solar system dynamics. (2002).
## References
Whenever you use this package, we are kindly asking you to refer to one of the following papers (please choose the appropriate):
1. **The package itself**:
* Smirnov, E. A. (2023). A new python package for identifying celestial bodies trapped in mean-motion resonances. Astronomy and Computing. https://doi.org/10.1016/j.ascom.2023.100707
```tex
@article{Smirnov2023,
title = {A new python package for identifying celestial bodies trapped in mean-motion resonances},
journal = {Astronomy and Computing},
year = {2023},
issn = {2213-1337},
doi = {https://doi.org/10.1016/j.ascom.2023.100707},
url = {https://www.sciencedirect.com/science/article/pii/S2213133723000227},
author = {E.A. Smirnov},
keywords = {Mean-motion resonances, Python, Identification, Asteroids},
abstract = {In this paper, a new open-source package ‘resonances’ written in python is introduced. It allows to find, analyse, and plot two-body and three-body mean-motion eccentricity-type resonances in the Solar and other planetary systems. The package has a better accuracy of the automatic identification procedure for resonant objects compared to previous studies. Furthermore, it has built-in integrations with AstDyS and NASA JPL catalogues. The code is extensively documented and tested with automatic tests. The package is available on GitHub under MIT Licence.}
}
```
2. **The Libration module and automatic identification of librations**:
* Smirnov, E. A. (2023). A new python package for identifying celestial bodies trapped in mean-motion resonances. Astronomy and Computing, 100707. https://doi.org/10.1016/j.ascom.2023.100707
3. **Mass identification of mean-motion resonances:**
* Smirnov, E. A., & Dovgalev, I. S. (2018). Identification of Asteroids in Two-Body Resonances. Solar System Research, 52(4), 347–354. https://doi.org/10.1134/S0038094618040056
* Smirnov, E. A., Dovgalev, I. S. & Popova, E. A. Asteroids in three-body mean motion resonances with planets. Icarus (2017) doi:10.1016/j.icarus.2017.09.032.
## Authors
The authors of the package:
- [Evgeny Smirnov](https://github.com/smirik) ([FB](https://facebook.com/smirik), [Telegram](https://t.me/smirik))
## Acknowledgement
- Many thanks to the co-authors of the papers (prof. I. I. Shevchenko, I. Dovgalev, and Dr. E. Popova).
- The creators of [REBOUND integrator](https://rebound.readthedocs.io).
- The creators of [Astropy](http://astropy.org).
- The creators of `numpy`, `scipy`, `pandas`, and `matplotlib`.
## Contributing
Feel free to contribute to the code by sending pull requests [to the repository](https://github.com/smirik/resonances).
## License
MIT
| /resonances-0.2.7.tar.gz/resonances-0.2.7/README.md | 0.763219 | 0.94079 | README.md | pypi |
import warnings
import numpy as np
from scipy.signal import periodogram
class noisedata(object):
def __init__(self,IQ,IQref,fr,Ql,fs,gain_corr=[1.,1.],Z=50):
'''
units are assumed to be in volts
-> IQ = I+1j*Q ; with amplitude signal on Q and phase on I
this signal is measured on resonance
-> IQref = Iref+1j*Qref ; with amplitude signal on Qref and phase on Iref
this signal is measured far off resonance
IMPORTANT: IQ and IQref describe signals on opposite sides of the resonance circle
Therefore, take care that Q and Qref have the correct signs in order that
the program can determine the diameter of the resonance circle.
-> fr: resonance frequency
-> Ql: loaded Q of the resonator
-> fs: sampling rate
-> gain_corr = [1.,1.] ; enter here if the gain of IQ and IQref signals
are different
-> Z: impedance
The signals will be normalized to the reference such that IQref = 1.
'''
self.Z = Z
self.fr = fr
self.Ql = Ql
self.offrespoint = np.mean(np.imag(IQref))
self.respoint = np.mean(np.imag(IQref))
self.radius = (self.offrespoint - self.respoint)/self.offrespoint
self.P_I = periodogram(self._demean(np.real(IQ)),fs=fs)
self.P_Q = periodogram(self._demean(np.imag(IQ)),fs=fs)
self.P_Iref = periodogram(self._demean(np.real(IQref)),fs=fs)
self.P_Qref = periodogram(self._demean(np.imag(IQref)),fs=fs)
#################################
#functions to evalate multiple things
def P_I_eval_all(self):
'''
returns a 2D numpy array with all the results
and a 1D list with the description
'''
comment = ['P_I','P_Inorm','P_Ipower','P_dtheta','P_dphi','P_df','P_']
return np.vstack((self.P_I,self.P_Inorm(),self.P_Ipower(),self.P_dtheta(),self.P_dphi(),self.P_df(),self.P_())), comment
def P_Iref_eval_all(self):
'''
returns a 2D numpy array with all the results
and a 1D list with the description
'''
comment = ['P_Iref','P_Irefnorm','P_Irefpower','P_refdtheta','P_refdphi','P_refdf','P_ref']
return np.vstack((self.P_Iref,self.P_Irefnorm(),self.P_Irefpower(),self.P_refdtheta(),self.P_refdphi(),self.P_refdf(),self.P_ref())), comment
def P_Q_eval_all(self):
'''
returns a 2D numpy array with all the results
and a 1D list with the description
'''
comment = ['P_Q','P_Qnorm','P_Qpower']
return np.vstack((self.P_Q,self.P_Qnorm(),self.P_Qpower())), comment
def P_Qref_eval_all(self):
'''
returns a 2D numpy array with all the results
and a 1D list with the description
'''
comment = ['P_Qref','P_Qrefnorm','P_Qrefpower']
return np.vstack((self.P_Qref,self.P_Qrefnorm(),self.P_Qrefpower())), comment
#################################
#helpers
def _demean(self,x):
'''
removes the mean value from x
'''
return x - x.mean()
#################################
#noise on I
def P_Inorm(self):
'''
V^2/Hz
'''
return self.P_I/(self.offrespoint**2)
def P_Ipower(self):
'''
W/Hz
'''
return self.P_I/self.Z
def P_dtheta(self):
'''
rad^2/Hz
phase noise on the resonator circle phase
(this is not the real measured phase)
'''
return self.P_Inorm()/self.r**2
def P_dphi(self):
'''
rad^2/Hz
phase noise on the phase measured with the VNA
'''
return self.P_Inorm()/np.absolute(self.respoint**2)
def P_df(self):
'''
Hz^2/Hz
frequency noise
'''
return self.P_theta() * self.fr**2 / (16.*self.Ql**2)
def P_(self):
'''
1/Hz
fractional frequency noise
'''
return self.P_theta() / (16.*self.Ql**2)
#################################
#noise on Iref
def P_Irefnorm(self):
'''
V^2/Hz
'''
return self.P_Iref/(self.offrespoint**2)
def P_Irefpower(self):
'''
W/Hz
'''
return self.P_Iref/self.Z
def P_refdtheta(self):
'''
rad^2/Hz
phase noise on the resonator circle phase
(this is not the real measured phase)
'''
return self.P_Irefnorm()/self.r**2
def P_refdphi(self):
'''
rad^2/Hz
phase noise on the phase measured with the VNA
'''
return self.P_Irefnorm()/np.absolute(self.respoint**2)
def P_refdf(self):
'''
Hz^2/Hz
frequency noise
'''
return self.P_reftheta() * self.fr**2 / (16.*self.Ql**2)
def P_ref(self):
'''
1/Hz
fractional frequency noise
'''
return self.P_reftheta() / (16.*self.Ql**2)
#################################
#noise on Q
def P_Qnorm(self):
'''
V^2/Hz
'''
return self.P_Q/(self.offrespoint**2)
def P_Qpower(self):
'''
W/Hz
'''
return self.P_Q/self.Z
#################################
#noise on Qref
def P_Qrefnorm(self):
'''
V^2/Hz
'''
return self.P_Qref/(self.offrespoint**2)
def P_Qrefpower(self):
'''
W/Hz
'''
return self.P_Qref/self.Z | /resonator_tools_vdrhtc-0.12-py3-none-any.whl/resonator_tools/noise.py | 0.715623 | 0.695997 | noise.py | pypi |
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a signal with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter) | /resonator_tools_vdrhtc-0.12-py3-none-any.whl/resonator_tools/calibration.py | 0.760828 | 0.359561 | calibration.py | pypi |
from datetime import datetime
from typing import ClassVar, Dict, Optional, List, Type, cast
from attrs import define, field
from resoto_plugin_aws.resource.autoscaling import AwsAutoScalingGroup
from resoto_plugin_aws.resource.base import AwsResource, GraphBuilder, AwsApiSpec
from resoto_plugin_aws.resource.iam import AwsIamRole
from resotolib.baseresources import ModelReference
from resotolib.graph import Graph
from resotolib.json_bender import Bender, S, Bend, ForallBend
from resotolib.types import Json
from resoto_plugin_aws.aws_client import AwsClient
service_name = "eks"
# noinspection PyUnresolvedReferences
class EKSTaggable:
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
if isinstance(self, AwsResource):
if spec := self.api_spec:
client.call(
aws_service=spec.service,
action="tag-resource",
result_name=None,
resourceArn=self.arn,
tags={key: value},
)
return True
return False
return False
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
if isinstance(self, AwsResource):
if spec := self.api_spec:
client.call(
aws_service=spec.service,
action="untag-resource",
result_name=None,
resourceArn=self.arn,
tagKeys=[key],
)
return True
return False
return False
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [AwsApiSpec(service_name, "tag-resource"), AwsApiSpec(service_name, "untag-resource")]
@classmethod
def service_name(cls) -> str:
return service_name
@define(eq=False, slots=False)
class AwsEksNodegroupScalingConfig:
kind: ClassVar[str] = "aws_eks_nodegroup_scaling_config"
mapping: ClassVar[Dict[str, Bender]] = {
"min_size": S("minSize"),
"max_size": S("maxSize"),
"desired_size": S("desiredSize"),
}
min_size: Optional[int] = field(default=None)
max_size: Optional[int] = field(default=None)
desired_size: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class AwsEksRemoteAccessConfig:
kind: ClassVar[str] = "aws_eks_remote_access_config"
mapping: ClassVar[Dict[str, Bender]] = {
"ec2_ssh_key": S("ec2SshKey"),
"source_security_groups": S("sourceSecurityGroups", default=[]),
}
ec2_ssh_key: Optional[str] = field(default=None)
source_security_groups: List[str] = field(factory=list)
@define(eq=False, slots=False)
class AwsEksTaint:
kind: ClassVar[str] = "aws_eks_taint"
mapping: ClassVar[Dict[str, Bender]] = {"key": S("key"), "value": S("value"), "effect": S("effect")}
key: Optional[str] = field(default=None)
value: Optional[str] = field(default=None)
effect: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsEksNodegroupResources:
kind: ClassVar[str] = "aws_eks_nodegroup_resources"
mapping: ClassVar[Dict[str, Bender]] = {
"auto_scaling_groups": S("autoScalingGroups", default=[]) >> ForallBend(S("name")),
"remote_access_security_group": S("remoteAccessSecurityGroup"),
}
auto_scaling_groups: List[str] = field(factory=list)
remote_access_security_group: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsEksIssue:
kind: ClassVar[str] = "aws_eks_issue"
mapping: ClassVar[Dict[str, Bender]] = {
"code": S("code"),
"message": S("message"),
"resource_ids": S("resourceIds", default=[]),
}
code: Optional[str] = field(default=None)
message: Optional[str] = field(default=None)
resource_ids: List[str] = field(factory=list)
@define(eq=False, slots=False)
class AwsEksNodegroupHealth:
kind: ClassVar[str] = "aws_eks_nodegroup_health"
mapping: ClassVar[Dict[str, Bender]] = {"issues": S("issues", default=[]) >> ForallBend(AwsEksIssue.mapping)}
issues: List[AwsEksIssue] = field(factory=list)
@define(eq=False, slots=False)
class AwsEksNodegroupUpdateConfig:
kind: ClassVar[str] = "aws_eks_nodegroup_update_config"
mapping: ClassVar[Dict[str, Bender]] = {
"max_unavailable": S("maxUnavailable"),
"max_unavailable_percentage": S("maxUnavailablePercentage"),
}
max_unavailable: Optional[int] = field(default=None)
max_unavailable_percentage: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class AwsEksLaunchTemplateSpecification:
kind: ClassVar[str] = "aws_eks_launch_template_specification"
mapping: ClassVar[Dict[str, Bender]] = {"name": S("name"), "version": S("version"), "id": S("id")}
name: Optional[str] = field(default=None)
version: Optional[str] = field(default=None)
id: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsEksNodegroup(EKSTaggable, AwsResource):
# Note: this resource is collected via AwsEksCluster
kind: ClassVar[str] = "aws_eks_nodegroup"
reference_kinds: ClassVar[ModelReference] = {
"predecessors": {"default": ["aws_eks_cluster"], "delete": ["aws_eks_cluster", "aws_autoscaling_group"]},
"successors": {"default": ["aws_autoscaling_group"]},
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("nodegroupName"),
"name": S("nodegroupName"),
"tags": S("tags", default={}),
"cluster_name": S("clusterName"),
"ctime": S("createdAt"),
"arn": S("nodegroupArn"),
"version": S("version"),
"group_release_version": S("releaseVersion"),
"group_modified_at": S("modifiedAt"),
"group_status": S("status"),
"group_capacity_type": S("capacityType"),
"group_scaling_config": S("scalingConfig") >> Bend(AwsEksNodegroupScalingConfig.mapping),
"group_instance_types": S("instanceTypes", default=[]),
"group_subnets": S("subnets", default=[]),
"group_remote_access": S("remoteAccess") >> Bend(AwsEksRemoteAccessConfig.mapping),
"group_ami_type": S("amiType"),
"group_node_role": S("nodeRole"),
"group_labels": S("labels"),
"group_taints": S("taints", default=[]) >> ForallBend(AwsEksTaint.mapping),
"group_resources": S("resources") >> Bend(AwsEksNodegroupResources.mapping),
"group_disk_size": S("diskSize"),
"group_health": S("health") >> Bend(AwsEksNodegroupHealth.mapping),
"group_update_config": S("updateConfig") >> Bend(AwsEksNodegroupUpdateConfig.mapping),
"group_launch_template": S("launchTemplate") >> Bend(AwsEksLaunchTemplateSpecification.mapping),
}
cluster_name: Optional[str] = field(default=None)
group_nodegroup_arn: Optional[str] = field(default=None)
group_version: Optional[str] = field(default=None)
group_release_version: Optional[str] = field(default=None)
group_modified_at: Optional[datetime] = field(default=None)
group_status: Optional[str] = field(default=None)
group_capacity_type: Optional[str] = field(default=None)
group_scaling_config: Optional[AwsEksNodegroupScalingConfig] = field(default=None)
group_instance_types: List[str] = field(factory=list)
group_subnets: List[str] = field(factory=list)
group_remote_access: Optional[AwsEksRemoteAccessConfig] = field(default=None)
group_ami_type: Optional[str] = field(default=None)
group_node_role: Optional[str] = field(default=None)
group_labels: Optional[Dict[str, str]] = field(default=None)
group_taints: List[AwsEksTaint] = field(factory=list)
group_resources: Optional[AwsEksNodegroupResources] = field(default=None)
group_disk_size: Optional[int] = field(default=None)
group_health: Optional[AwsEksNodegroupHealth] = field(default=None)
group_update_config: Optional[AwsEksNodegroupUpdateConfig] = field(default=None)
group_launch_template: Optional[AwsEksLaunchTemplateSpecification] = field(default=None)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if cluster_name := self.cluster_name:
builder.dependant_node(
self, clazz=AwsEksCluster, reverse=True, delete_same_as_default=True, name=cluster_name
)
if self.group_resources:
for rid in self.group_resources.auto_scaling_groups:
builder.dependant_node(self, clazz=AwsAutoScalingGroup, id=rid)
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(
aws_service=service_name,
action="delete-nodegroup",
result_name=None,
clusterName=self.cluster_name,
nodegroupName=self.name,
)
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return super().called_mutator_apis() + [AwsApiSpec(service_name, "delete-nodegroup")]
@define(eq=False, slots=False)
class AwsEksVpcConfigResponse:
kind: ClassVar[str] = "aws_eks_vpc_config_response"
mapping: ClassVar[Dict[str, Bender]] = {
"subnet_ids": S("subnetIds", default=[]),
"security_group_ids": S("securityGroupIds", default=[]),
"cluster_security_group_id": S("clusterSecurityGroupId"),
"vpc_id": S("vpcId"),
"endpoint_public_access": S("endpointPublicAccess"),
"endpoint_private_access": S("endpointPrivateAccess"),
"public_access_cidrs": S("publicAccessCidrs", default=[]),
}
subnet_ids: List[str] = field(factory=list)
security_group_ids: List[str] = field(factory=list)
cluster_security_group_id: Optional[str] = field(default=None)
vpc_id: Optional[str] = field(default=None)
endpoint_public_access: Optional[bool] = field(default=None)
endpoint_private_access: Optional[bool] = field(default=None)
public_access_cidrs: List[str] = field(factory=list)
@define(eq=False, slots=False)
class AwsEksKubernetesNetworkConfigResponse:
kind: ClassVar[str] = "aws_eks_kubernetes_network_config_response"
mapping: ClassVar[Dict[str, Bender]] = {
"service_ipv4_cidr": S("serviceIpv4Cidr"),
"service_ipv6_cidr": S("serviceIpv6Cidr"),
"ip_family": S("ipFamily"),
}
service_ipv4_cidr: Optional[str] = field(default=None)
service_ipv6_cidr: Optional[str] = field(default=None)
ip_family: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsEksLogSetup:
kind: ClassVar[str] = "aws_eks_log_setup"
mapping: ClassVar[Dict[str, Bender]] = {"types": S("types", default=[]), "enabled": S("enabled")}
types: List[str] = field(factory=list)
enabled: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class AwsEksLogging:
kind: ClassVar[str] = "aws_eks_logging"
mapping: ClassVar[Dict[str, Bender]] = {
"cluster_logging": S("clusterLogging", default=[]) >> ForallBend(AwsEksLogSetup.mapping)
}
cluster_logging: List[AwsEksLogSetup] = field(factory=list)
@define(eq=False, slots=False)
class AwsEksIdentity:
kind: ClassVar[str] = "aws_eks_identity"
mapping: ClassVar[Dict[str, Bender]] = {"oidc": S("oidc", "issuer")}
oidc: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsEksEncryptionConfig:
kind: ClassVar[str] = "aws_eks_encryption_config"
mapping: ClassVar[Dict[str, Bender]] = {
"resources": S("resources", default=[]),
"provider": S("provider", "keyArn"),
}
resources: List[str] = field(factory=list)
provider: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsEksConnectorConfig:
kind: ClassVar[str] = "aws_eks_connector_config"
mapping: ClassVar[Dict[str, Bender]] = {
"activation_id": S("activationId"),
"activation_code": S("activationCode"),
"activation_expiry": S("activationExpiry"),
"provider": S("provider"),
"role_arn": S("roleArn"),
}
activation_id: Optional[str] = field(default=None)
activation_code: Optional[str] = field(default=None)
activation_expiry: Optional[datetime] = field(default=None)
provider: Optional[str] = field(default=None)
role_arn: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsEksCluster(EKSTaggable, AwsResource):
kind: ClassVar[str] = "aws_eks_cluster"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "list-clusters", "clusters")
reference_kinds: ClassVar[ModelReference] = {
"predecessors": {
"default": ["aws_iam_role"],
"delete": ["aws_iam_role"],
}
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name"),
"tags": S("tags", default={}),
"name": S("name"),
"arn": S("arn"),
"ctime": S("createdAt"),
"cluster_version": S("version"),
"cluster_endpoint": S("endpoint"),
"cluster_role_arn": S("roleArn"),
"cluster_resources_vpc_config": S("resourcesVpcConfig") >> Bend(AwsEksVpcConfigResponse.mapping),
"cluster_kubernetes_network_config": S("kubernetesNetworkConfig")
>> Bend(AwsEksKubernetesNetworkConfigResponse.mapping),
"cluster_logging": S("logging") >> Bend(AwsEksLogging.mapping),
"cluster_identity": S("identity") >> Bend(AwsEksIdentity.mapping),
"cluster_status": S("status"),
"cluster_certificate_authority": S("certificateAuthority", "data"),
"cluster_client_request_token": S("clientRequestToken"),
"cluster_platform_version": S("platformVersion"),
"cluster_encryption_config": S("encryptionConfig", default=[]) >> ForallBend(AwsEksEncryptionConfig.mapping),
"cluster_connector_config": S("connectorConfig") >> Bend(AwsEksConnectorConfig.mapping),
}
cluster_version: Optional[str] = field(default=None)
cluster_endpoint: Optional[str] = field(default=None)
cluster_role_arn: Optional[str] = field(default=None)
cluster_resources_vpc_config: Optional[AwsEksVpcConfigResponse] = field(default=None)
cluster_kubernetes_network_config: Optional[AwsEksKubernetesNetworkConfigResponse] = field(default=None)
cluster_logging: Optional[AwsEksLogging] = field(default=None)
cluster_identity: Optional[AwsEksIdentity] = field(default=None)
cluster_status: Optional[str] = field(default=None)
cluster_certificate_authority: Optional[str] = field(default=None)
cluster_client_request_token: Optional[str] = field(default=None)
cluster_platform_version: Optional[str] = field(default=None)
cluster_encryption_config: List[AwsEksEncryptionConfig] = field(factory=list)
cluster_connector_config: Optional[AwsEksConnectorConfig] = field(default=None)
@classmethod
def called_collect_apis(cls) -> List[AwsApiSpec]:
return [
cls.api_spec,
AwsApiSpec(service_name, "describe-cluster"),
AwsApiSpec(service_name, "list-nodegroups"),
AwsApiSpec(service_name, "describe-nodegroup"),
]
@classmethod
def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None:
for name in cast(List[str], json):
cluster_json = builder.client.get(service_name, "describe-cluster", "cluster", name=name)
if cluster_json is not None:
if cluster := AwsEksCluster.from_api(cluster_json, builder):
builder.add_node(cluster, cluster_json)
for ng_name in builder.client.list(service_name, "list-nodegroups", "nodegroups", clusterName=name):
ng_json = builder.client.get(
service_name, "describe-nodegroup", "nodegroup", clusterName=name, nodegroupName=ng_name
)
if ng_json is not None and (ng := AwsEksNodegroup.from_api(ng_json, builder)):
builder.add_node(ng, ng_json)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
builder.dependant_node(
self, reverse=True, delete_same_as_default=True, clazz=AwsIamRole, arn=self.cluster_role_arn
)
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(aws_service=self.api_spec.service, action="delete-cluster", result_name=None, name=self.name)
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return super().called_mutator_apis() + [AwsApiSpec(service_name, "delete-cluster")]
resources: List[Type[AwsResource]] = [AwsEksNodegroup, AwsEksCluster] | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/eks.py | 0.876251 | 0.161089 | eks.py | pypi |
from datetime import datetime
from typing import ClassVar, Dict, List, Optional, Type
from attrs import define, field
from resoto_plugin_aws.aws_client import AwsClient
from resoto_plugin_aws.resource.base import AwsApiSpec, AwsResource, GraphBuilder
from resoto_plugin_aws.resource.kms import AwsKmsKey
from resotolib.baseresources import ModelReference
from resotolib.graph import Graph
from resotolib.json_bender import F, Bender, S, AsInt, AsBool, Bend, ParseJson
from resotolib.types import Json
from resotolib.utils import utc_str
service_name = "sqs"
@define(eq=False, slots=False)
class AwsSqsRedrivePolicy:
kind: ClassVar[str] = "aws_sqs_redrive_policy"
mapping: ClassVar[Dict[str, Bender]] = {
"dead_letter_target_arn": S("deadLetterTargetArn"),
"max_receive_count": S("maxReceiveCount"),
}
dead_letter_target_arn: Optional[str] = None
max_receive_count: Optional[int] = None
@define(eq=False, slots=False)
class AwsSqsQueue(AwsResource):
kind: ClassVar[str] = "aws_sqs_queue"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "list-queues", "QueueUrls")
reference_kinds: ClassVar[ModelReference] = {
"successors": {"default": ["aws_kms_key"]},
"predecessors": {"delete": ["aws_kms_key"]},
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("QueueName"),
"name": S("QueueName"),
"ctime": S("CreatedTimestamp") >> AsInt() >> F(lambda x: utc_str(datetime.utcfromtimestamp(x))),
"mtime": S("LastModifiedTimestamp") >> AsInt() >> F(lambda x: utc_str(datetime.utcfromtimestamp(x))),
"arn": S("QueueArn"),
"sqs_queue_url": S("QueueUrl"),
"sqs_approximate_number_of_messages": S("ApproximateNumberOfMessages") >> AsInt(),
"sqs_approximate_number_of_messages_not_visible": S("ApproximateNumberOfMessagesNotVisible") >> AsInt(),
"sqs_approximate_number_of_messages_delayed": S("ApproximateNumberOfMessagesDelayed") >> AsInt(),
"sqs_policy": S("Policy") >> ParseJson(keys_to_snake=True),
"sqs_redrive_policy": S("RedrivePolicy") >> ParseJson() >> Bend(AwsSqsRedrivePolicy.mapping),
"sqs_fifo_queue": S("FifoQueue"),
"sqs_content_based_deduplication": S("ContentBasedDeduplication") >> AsBool(),
"sqs_kms_master_key_id": S("KmsMasterKeyId"),
"sqs_kms_data_key_reuse_period_seconds": S("KmsDataKeyReusePeriodSeconds") >> AsInt(),
"sqs_deduplication_scope": S("DeduplicationScope"),
"sqs_fifo_throughput_limit": S("FifoThroughputLimit"),
"sqs_redrive_allow_policy": S("RedriveAllowPolicy") >> ParseJson() >> S("redrivePermission"),
"sqs_visibility_timeout": S("VisibilityTimeout") >> AsInt(),
"sqs_maximum_message_size": S("MaximumMessageSize") >> AsInt(),
"sqs_message_retention_period": S("MessageRetentionPeriod") >> AsInt(),
"sqs_delay_seconds": S("DelaySeconds") >> AsInt(),
"sqs_receive_message_wait_time_seconds": S("ReceiveMessageWaitTimeSeconds") >> AsInt(),
"sqs_managed_sse_enabled": S("SqsManagedSseEnabled") >> AsBool(),
}
sqs_queue_url: Optional[str] = field(default=None)
sqs_approximate_number_of_messages: Optional[int] = field(default=None)
sqs_approximate_number_of_messages_not_visible: Optional[int] = field(default=None)
sqs_approximate_number_of_messages_delayed: Optional[int] = field(default=None)
sqs_policy: Optional[Json] = field(default=None)
sqs_redrive_policy: Optional[AwsSqsRedrivePolicy] = field(default=None)
sqs_fifo_queue: Optional[bool] = field(default=None)
sqs_content_based_deduplication: Optional[bool] = field(default=None)
sqs_kms_master_key_id: Optional[str] = field(default=None)
sqs_kms_data_key_reuse_period_seconds: Optional[int] = field(default=None)
sqs_deduplication_scope: Optional[str] = field(default=None)
sqs_fifo_throughput_limit: Optional[str] = field(default=None)
sqs_redrive_allow_policy: Optional[str] = field(default=None)
sqs_visibility_timeout: Optional[int] = field(default=None)
sqs_maximum_message_size: Optional[int] = field(default=None)
sqs_message_retention_period: Optional[int] = field(default=None)
sqs_delay_seconds: Optional[int] = field(default=None)
sqs_receive_message_wait_time_seconds: Optional[int] = field(default=None)
sqs_managed_sse_enabled: Optional[bool] = field(default=None)
@classmethod
def called_collect_apis(cls) -> List[AwsApiSpec]:
return [
cls.api_spec,
AwsApiSpec(service_name, "get-queue-attributes"),
AwsApiSpec(service_name, "list-queue-tags"),
]
@classmethod
def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None:
def add_instance(queue_url: str) -> None:
queue_attributes = builder.client.get(
service_name, "get-queue-attributes", "Attributes", QueueUrl=queue_url, AttributeNames=["All"]
)
if queue_attributes is not None:
queue_attributes["QueueUrl"] = queue_url
queue_attributes["QueueName"] = queue_url.rsplit("/", 1)[-1]
if instance := cls.from_api(queue_attributes, builder):
builder.add_node(instance)
builder.submit_work(service_name, add_tags, instance)
def add_tags(queue: AwsSqsQueue) -> None:
tags = builder.client.get(service_name, "list-queue-tags", result_name="Tags", QueueUrl=queue.sqs_queue_url)
if tags:
queue.tags = tags
for queue_url in json:
if isinstance(queue_url, str):
add_instance(queue_url)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if self.sqs_kms_master_key_id:
builder.dependant_node(
self,
clazz=AwsKmsKey,
id=self.sqs_kms_master_key_id,
)
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
client.call(
aws_service=service_name,
action="tag-queue",
result_name=None,
QueueUrl=self.sqs_queue_url,
Tags={key: value},
)
return True
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
client.call(
aws_service=service_name, action="untag-queue", result_name=None, QueueUrl=self.sqs_queue_url, TagKeys=[key]
)
return True
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(aws_service=service_name, action="delete-queue", result_name=None, QueueUrl=self.sqs_queue_url)
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [
AwsApiSpec(service_name, "tag-queue"),
AwsApiSpec(service_name, "untag-queue"),
AwsApiSpec(service_name, "delete-queue"),
]
resources: List[Type[AwsResource]] = [AwsSqsQueue] | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/sqs.py | 0.857291 | 0.162115 | sqs.py | pypi |
import time
from datetime import datetime
from typing import Any, ClassVar, Dict, Literal, Optional, List, Type, cast
from attrs import define, field
from resoto_plugin_aws.aws_client import AwsClient
from resoto_plugin_aws.resource.base import AwsResource, AwsApiSpec, GraphBuilder
from resoto_plugin_aws.utils import ToDict
from resotolib.baseresources import BaseStack
from resotolib.graph import ByNodeId, BySearchCriteria, Graph
from resotolib.json_bender import Bender, S, Bend, ForallBend, F
from resotolib.types import Json
service_name = "cloudformation"
@define(eq=False, slots=False)
class AwsCloudFormationRollbackTrigger:
kind: ClassVar[str] = "aws_cloudformation_rollback_trigger"
mapping: ClassVar[Dict[str, Bender]] = {"arn": S("Arn"), "type": S("Type")}
arn: Optional[str] = field(default=None)
type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsCloudFormationRollbackConfiguration:
kind: ClassVar[str] = "aws_cloudformation_rollback_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"rollback_triggers": S("RollbackTriggers", default=[]) >> ForallBend(AwsCloudFormationRollbackTrigger.mapping),
"monitoring_time_in_minutes": S("MonitoringTimeInMinutes"),
}
rollback_triggers: List[AwsCloudFormationRollbackTrigger] = field(factory=list)
monitoring_time_in_minutes: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class AwsCloudFormationOutput:
kind: ClassVar[str] = "aws_cloudformation_output"
mapping: ClassVar[Dict[str, Bender]] = {
"output_key": S("OutputKey"),
"output_value": S("OutputValue"),
"description": S("Description"),
"export_name": S("ExportName"),
}
output_key: Optional[str] = field(default=None)
output_value: Optional[str] = field(default=None)
description: Optional[str] = field(default=None)
export_name: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsCloudFormationStackDriftInformation:
kind: ClassVar[str] = "aws_cloudformation_stack_drift_information"
mapping: ClassVar[Dict[str, Bender]] = {
"stack_drift_status": S("StackDriftStatus"),
"last_check_timestamp": S("LastCheckTimestamp"),
}
stack_drift_status: Optional[str] = field(default=None)
last_check_timestamp: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class AwsCloudFormationStack(AwsResource, BaseStack):
kind: ClassVar[str] = "aws_cloudformation_stack"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "describe-stacks", "Stacks")
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("StackId"),
"tags": S("Tags", default=[]) >> ToDict(),
"name": S("StackName"),
"ctime": S("CreationTime"),
"mtime": S("LastUpdatedTime"),
"stack_status": S("StackStatus", default=""),
"stack_status_reason": S("StackStatusReason", default=""),
"stack_parameters": S("Parameters", default=[]) >> ToDict("ParameterKey", "ParameterValue"),
"stack_change_set_id": S("ChangeSetId"),
"description": S("Description"),
"stack_deletion_time": S("DeletionTime"),
"stack_rollback_configuration": S("RollbackConfiguration")
>> Bend(AwsCloudFormationRollbackConfiguration.mapping),
"stack_disable_rollback": S("DisableRollback"),
"stack_notification_ar_ns": S("NotificationARNs", default=[]),
"stack_timeout_in_minutes": S("TimeoutInMinutes"),
"stack_capabilities": S("Capabilities", default=[]),
"stack_outputs": S("Outputs", default=[]) >> ForallBend(AwsCloudFormationOutput.mapping),
"stack_role_arn": S("RoleARN"),
"stack_enable_termination_protection": S("EnableTerminationProtection"),
"stack_parent_id": S("ParentId"),
"stack_root_id": S("RootId"),
"stack_drift_information": S("DriftInformation") >> Bend(AwsCloudFormationStackDriftInformation.mapping),
}
stack_change_set_id: Optional[str] = field(default=None)
description: Optional[str] = field(default=None)
stack_deletion_time: Optional[datetime] = field(default=None)
stack_rollback_configuration: Optional[AwsCloudFormationRollbackConfiguration] = field(default=None)
stack_disable_rollback: Optional[bool] = field(default=None)
stack_notification_ar_ns: List[str] = field(factory=list)
stack_timeout_in_minutes: Optional[int] = field(default=None)
stack_capabilities: List[str] = field(factory=list)
stack_outputs: List[AwsCloudFormationOutput] = field(factory=list)
stack_role_arn: Optional[str] = field(default=None)
stack_enable_termination_protection: Optional[bool] = field(default=None)
stack_parent_id: Optional[str] = field(default=None)
stack_root_id: Optional[str] = field(default=None)
stack_drift_information: Optional[AwsCloudFormationStackDriftInformation] = field(default=None)
def _modify_tag(self, client: AwsClient, key: str, value: Optional[str], mode: Literal["delete", "update"]) -> bool:
tags = dict(self.tags)
if mode == "delete":
if not self.tags.get(key):
raise KeyError(key)
del tags[key]
elif mode == "update":
if self.tags.get(key) == value:
return True
tags.update({key: value})
else:
return False
service = self.api_spec.service
stack = cast(
Json,
client.list(aws_service=service, action="describe-stacks", result_name="Stacks", StackName=self.name)[0],
)
stack = self._wait_for_completion(client, stack, service)
try:
client.call(
aws_service=service_name,
action="update-stack",
result_name=None,
StackName=self.name,
Capabilities=["CAPABILITY_NAMED_IAM"],
UsePreviousTemplate=True,
Tags=[{"Key": label, "Value": value} for label, value in tags.items()],
Parameters=[
{"ParameterKey": parameter, "UsePreviousValue": True} for parameter in self.stack_parameters.keys()
],
)
except Exception as e:
raise RuntimeError(f"Error updating AWS Cloudformation Stack {self.dname} for {mode} of tag {key}") from e
return True
def _wait_for_completion(self, client: AwsClient, stack: Json, service: str, timeout: int = 300) -> Json:
start_utime = time.time()
while stack["StackStatus"].endswith("_IN_PROGRESS"):
if time.time() > start_utime + timeout:
raise TimeoutError(
(
f"AWS Cloudformation Stack {self.dname} tag update timed out "
f"after {timeout} seconds with status {stack['StackStatus']}"
)
)
time.sleep(5)
stack = cast(
Json,
client.list(aws_service=service, action="describe-stacks", result_name="Stacks", StackName=self.name)[
0
],
)
return stack
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
return self._modify_tag(client, key, value, "update")
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
return self._modify_tag(client, key, None, "delete")
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(aws_service=self.api_spec.service, action="delete-stack", result_name=None, StackName=self.name)
return True
@classmethod
def called_collect_apis(cls) -> List[AwsApiSpec]:
return [cls.api_spec, AwsApiSpec(service_name, "list-stacks")]
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [AwsApiSpec(service_name, "update-stack"), AwsApiSpec(service_name, "delete-stack")]
@define(eq=False, slots=False)
class AwsCloudFormationAutoDeployment:
kind: ClassVar[str] = "aws_cloudformation_auto_deployment"
mapping: ClassVar[Dict[str, Bender]] = {
"enabled": S("Enabled"),
"retain_stacks_on_account_removal": S("RetainStacksOnAccountRemoval"),
}
enabled: Optional[bool] = field(default=None)
retain_stacks_on_account_removal: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class AwsCloudFormationStackSet(AwsResource):
kind: ClassVar[str] = "aws_cloudformation_stack_set"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "list-stack-sets", "Summaries", dict(Status="ACTIVE"))
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("StackSetId"),
"tags": S("Tags", default=[]) >> ToDict(),
"name": S("StackSetName"),
"description": S("Description"),
"stack_set_status": S("Status"),
"stack_set_auto_deployment": S("AutoDeployment") >> Bend(AwsCloudFormationAutoDeployment.mapping),
"stack_set_permission_model": S("PermissionModel"),
"stack_set_drift_status": S("DriftStatus"),
"stack_set_last_drift_check_timestamp": S("LastDriftCheckTimestamp"),
"stack_set_managed_execution": S("ManagedExecution", "Active"),
"stack_set_parameters": S("Parameters", default=[]) >> ToDict("ParameterKey", "ParameterValue"),
}
description: Optional[str] = field(default=None)
stack_set_status: Optional[str] = field(default=None)
stack_set_auto_deployment: Optional[AwsCloudFormationAutoDeployment] = field(default=None)
stack_set_permission_model: Optional[str] = field(default=None)
stack_set_drift_status: Optional[str] = field(default=None)
stack_set_last_drift_check_timestamp: Optional[datetime] = field(default=None)
stack_set_managed_execution: Optional[bool] = field(default=None)
stack_set_parameters: Optional[Dict[str, Any]] = None
@classmethod
def collect(cls, json: List[Json], builder: GraphBuilder) -> None:
def stack_set_instances(ss: AwsCloudFormationStackSet) -> None:
for sij in builder.client.list(service_name, "list-stack-instances", "Summaries", StackSetName=ss.name):
if sii := AwsCloudFormationStackInstanceSummary.from_api(sij, builder):
builder.add_node(sii, sij)
builder.add_edge(ss, node=sii)
builder.graph.add_deferred_edge(
ByNodeId(ss.chksum),
BySearchCriteria(
f'is(aws_cloudformation_stack) and reported.id="{sii.stack_instance_stack_id}"'
),
)
for js in json:
if stack_set := cls.from_api(js, builder):
builder.add_node(stack_set, js)
builder.submit_work(service_name, stack_set_instances, stack_set)
def _modify_tag(self, client: AwsClient, key: str, value: Optional[str], mode: Literal["update", "delete"]) -> bool:
tags = dict(self.tags)
if mode == "delete":
if not self.tags.get(key):
raise KeyError(key)
del tags[key]
elif mode == "update":
if self.tags.get(key) == value:
return True
tags.update({key: value})
else:
return False
try:
client.call(
aws_service=service_name,
action="update-stack-set",
result_name=None,
StackSetName=self.name,
Capabilities=["CAPABILITY_NAMED_IAM"],
UsePreviousTemplate=True,
Tags=[{"Key": label, "Value": value} for label, value in tags.items()],
Parameters=[
{"ParameterKey": parameter, "UsePreviousValue": True}
for parameter in (self.stack_set_parameters or {}).keys()
],
)
except Exception as e:
raise RuntimeError(
"Error updating AWS Cloudformation Stack Set" f" {self.dname} for {mode} of tag {key}"
) from e
return True
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
return self._modify_tag(client, key, value, "update")
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
return self._modify_tag(client, key, None, "delete")
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(
aws_service=self.api_spec.service,
action="delete-stack-set",
result_name=None,
StackSetName=self.name,
)
return True
@classmethod
def called_collect_apis(cls) -> List[AwsApiSpec]:
return [cls.api_spec, AwsApiSpec(service_name, "list-stack-instances")]
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [AwsApiSpec(service_name, "update-stack-set"), AwsApiSpec(service_name, "delete-stack-set")]
def _stack_instance_id(stack: Json) -> str:
stack_id = stack.get("StackId", "").rsplit("/", 1)[-1]
stack_set_id = stack.get("StackSetId", "")
account = stack.get("Account", "")
region = stack.get("Region", "")
return f"{stack_set_id}/{stack_id}/{account}/{region}"
@define(eq=False, slots=False)
class AwsCloudFormationStackInstanceSummary(AwsResource):
# note: resource is collected via AwsCloudFormationStackSet
kind: ClassVar[str] = "aws_cloud_formation_stack_instance_summary"
mapping: ClassVar[Dict[str, Bender]] = {
"id": F(_stack_instance_id),
"stack_instance_stack_set_id": S("StackSetId"),
"stack_instance_region": S("Region"),
"stack_instance_account": S("Account"),
"stack_instance_stack_id": S("StackId"),
"stack_instance_status": S("Status"),
"stack_instance_status_reason": S("StatusReason"),
"stack_instance_stack_instance_status": S("StackInstanceStatus", "DetailedStatus"),
"stack_instance_organizational_unit_id": S("OrganizationalUnitId"),
"stack_instance_drift_status": S("DriftStatus"),
"stack_instance_last_drift_check_timestamp": S("LastDriftCheckTimestamp"),
"stack_instance_last_operation_id": S("LastOperationId"),
}
stack_instance_stack_set_id: Optional[str] = field(default=None)
stack_instance_region: Optional[str] = field(default=None)
stack_instance_account: Optional[str] = field(default=None)
stack_instance_stack_id: Optional[str] = field(default=None)
stack_instance_status: Optional[str] = field(default=None)
stack_instance_status_reason: Optional[str] = field(default=None)
stack_instance_stack_instance_status: Optional[str] = field(default=None)
stack_instance_organizational_unit_id: Optional[str] = field(default=None)
stack_instance_drift_status: Optional[str] = field(default=None)
stack_instance_last_drift_check_timestamp: Optional[datetime] = field(default=None)
stack_instance_last_operation_id: Optional[str] = field(default=None)
@classmethod
def service_name(cls) -> str:
return service_name
resources: List[Type[AwsResource]] = [
AwsCloudFormationStack,
AwsCloudFormationStackSet,
AwsCloudFormationStackInstanceSummary,
] | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/cloudformation.py | 0.735071 | 0.176352 | cloudformation.py | pypi |
from typing import ClassVar, Dict, Optional, List
from attrs import define, field
from resoto_plugin_aws.resource.base import AwsResource, AwsApiSpec, GraphBuilder
from resoto_plugin_aws.resource.kms import AwsKmsKey
from resotolib.baseresources import ModelReference
from resotolib.graph import Graph
from resotolib.json_bender import Bender, S, Bend, ForallBend, K
from resoto_plugin_aws.aws_client import AwsClient
from resoto_plugin_aws.utils import ToDict
from typing import Type
from datetime import datetime
from resotolib.types import Json
from resoto_plugin_aws.resource.ec2 import AwsEc2Vpc, AwsEc2SecurityGroup, AwsEc2Subnet
from resoto_plugin_aws.resource.iam import AwsIamRole
service_name = "redshift"
@define(eq=False, slots=False)
class AwsRedshiftNetworkInterface:
kind: ClassVar[str] = "aws_redshift_network_interface"
mapping: ClassVar[Dict[str, Bender]] = {
"network_interface_id": S("NetworkInterfaceId"),
"subnet_id": S("SubnetId"),
"private_ip_address": S("PrivateIpAddress"),
"availability_zone": S("AvailabilityZone"),
}
network_interface_id: Optional[str] = field(default=None)
subnet_id: Optional[str] = field(default=None)
private_ip_address: Optional[str] = field(default=None)
availability_zone: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftVpcEndpoint:
kind: ClassVar[str] = "aws_redshift_vpc_endpoint"
mapping: ClassVar[Dict[str, Bender]] = {
"vpc_endpoint_id": S("VpcEndpointId"),
"vpc_id": S("VpcId"),
"network_interfaces": S("NetworkInterfaces", default=[]) >> ForallBend(AwsRedshiftNetworkInterface.mapping),
}
vpc_endpoint_id: Optional[str] = field(default=None)
vpc_id: Optional[str] = field(default=None)
network_interfaces: List[AwsRedshiftNetworkInterface] = field(factory=list)
@define(eq=False, slots=False)
class AwsRedshiftEndpoint:
kind: ClassVar[str] = "aws_redshift_endpoint"
mapping: ClassVar[Dict[str, Bender]] = {
"address": S("Address"),
"port": S("Port"),
"vpc_endpoints": S("VpcEndpoints", default=[]) >> ForallBend(AwsRedshiftVpcEndpoint.mapping),
}
address: Optional[str] = field(default=None)
port: Optional[int] = field(default=None)
vpc_endpoints: List[AwsRedshiftVpcEndpoint] = field(factory=list)
@define(eq=False, slots=False)
class AwsRedshiftClusterSecurityGroupMembership:
kind: ClassVar[str] = "aws_redshift_cluster_security_group_membership"
mapping: ClassVar[Dict[str, Bender]] = {
"cluster_security_group_name": S("ClusterSecurityGroupName"),
"status": S("Status"),
}
cluster_security_group_name: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftVpcSecurityGroupMembership:
kind: ClassVar[str] = "aws_redshift_vpc_security_group_membership"
mapping: ClassVar[Dict[str, Bender]] = {"vpc_security_group_id": S("VpcSecurityGroupId"), "status": S("Status")}
vpc_security_group_id: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftClusterParameterStatus:
kind: ClassVar[str] = "aws_redshift_cluster_parameter_status"
mapping: ClassVar[Dict[str, Bender]] = {
"parameter_name": S("ParameterName"),
"parameter_apply_status": S("ParameterApplyStatus"),
"parameter_apply_error_description": S("ParameterApplyErrorDescription"),
}
parameter_name: Optional[str] = field(default=None)
parameter_apply_status: Optional[str] = field(default=None)
parameter_apply_error_description: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftClusterParameterGroupStatus:
kind: ClassVar[str] = "aws_redshift_cluster_parameter_group_status"
mapping: ClassVar[Dict[str, Bender]] = {
"parameter_group_name": S("ParameterGroupName"),
"parameter_apply_status": S("ParameterApplyStatus"),
"cluster_parameter_status_list": S("ClusterParameterStatusList", default=[])
>> ForallBend(AwsRedshiftClusterParameterStatus.mapping),
}
parameter_group_name: Optional[str] = field(default=None)
parameter_apply_status: Optional[str] = field(default=None)
cluster_parameter_status_list: List[AwsRedshiftClusterParameterStatus] = field(factory=list)
@define(eq=False, slots=False)
class AwsRedshiftPendingModifiedValues:
kind: ClassVar[str] = "aws_redshift_pending_modified_values"
mapping: ClassVar[Dict[str, Bender]] = {
"master_user_password": S("MasterUserPassword"),
"node_type": S("NodeType"),
"number_of_nodes": S("NumberOfNodes"),
"cluster_type": S("ClusterType"),
"cluster_version": S("ClusterVersion"),
"automated_snapshot_retention_period": S("AutomatedSnapshotRetentionPeriod"),
"cluster_identifier": S("ClusterIdentifier"),
"publicly_accessible": S("PubliclyAccessible"),
"enhanced_vpc_routing": S("EnhancedVpcRouting"),
"maintenance_track_name": S("MaintenanceTrackName"),
"encryption_type": S("EncryptionType"),
}
master_user_password: Optional[str] = field(default=None)
node_type: Optional[str] = field(default=None)
number_of_nodes: Optional[int] = field(default=None)
cluster_type: Optional[str] = field(default=None)
cluster_version: Optional[str] = field(default=None)
automated_snapshot_retention_period: Optional[int] = field(default=None)
cluster_identifier: Optional[str] = field(default=None)
publicly_accessible: Optional[bool] = field(default=None)
enhanced_vpc_routing: Optional[bool] = field(default=None)
maintenance_track_name: Optional[str] = field(default=None)
encryption_type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftRestoreStatus:
kind: ClassVar[str] = "aws_redshift_restore_status"
mapping: ClassVar[Dict[str, Bender]] = {
"status": S("Status"),
"current_restore_rate_in_mega_bytes_per_second": S("CurrentRestoreRateInMegaBytesPerSecond"),
"snapshot_size_in_mega_bytes": S("SnapshotSizeInMegaBytes"),
"progress_in_mega_bytes": S("ProgressInMegaBytes"),
"elapsed_time_in_seconds": S("ElapsedTimeInSeconds"),
"estimated_time_to_completion_in_seconds": S("EstimatedTimeToCompletionInSeconds"),
}
status: Optional[str] = field(default=None)
current_restore_rate_in_mega_bytes_per_second: Optional[float] = field(default=None)
snapshot_size_in_mega_bytes: Optional[int] = field(default=None)
progress_in_mega_bytes: Optional[int] = field(default=None)
elapsed_time_in_seconds: Optional[int] = field(default=None)
estimated_time_to_completion_in_seconds: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftDataTransferProgress:
kind: ClassVar[str] = "aws_redshift_data_transfer_progress"
mapping: ClassVar[Dict[str, Bender]] = {
"status": S("Status"),
"current_rate_in_mega_bytes_per_second": S("CurrentRateInMegaBytesPerSecond"),
"total_data_in_mega_bytes": S("TotalDataInMegaBytes"),
"data_transferred_in_mega_bytes": S("DataTransferredInMegaBytes"),
"estimated_time_to_completion_in_seconds": S("EstimatedTimeToCompletionInSeconds"),
"elapsed_time_in_seconds": S("ElapsedTimeInSeconds"),
}
status: Optional[str] = field(default=None)
current_rate_in_mega_bytes_per_second: Optional[float] = field(default=None)
total_data_in_mega_bytes: Optional[int] = field(default=None)
data_transferred_in_mega_bytes: Optional[int] = field(default=None)
estimated_time_to_completion_in_seconds: Optional[int] = field(default=None)
elapsed_time_in_seconds: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftHsmStatus:
kind: ClassVar[str] = "aws_redshift_hsm_status"
mapping: ClassVar[Dict[str, Bender]] = {
"hsm_client_certificate_identifier": S("HsmClientCertificateIdentifier"),
"hsm_configuration_identifier": S("HsmConfigurationIdentifier"),
"status": S("Status"),
}
hsm_client_certificate_identifier: Optional[str] = field(default=None)
hsm_configuration_identifier: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftClusterSnapshotCopyStatus:
kind: ClassVar[str] = "aws_redshift_cluster_snapshot_copy_status"
mapping: ClassVar[Dict[str, Bender]] = {
"destination_region": S("DestinationRegion"),
"retention_period": S("RetentionPeriod"),
"manual_snapshot_retention_period": S("ManualSnapshotRetentionPeriod"),
"snapshot_copy_grant_name": S("SnapshotCopyGrantName"),
}
destination_region: Optional[str] = field(default=None)
retention_period: Optional[int] = field(default=None)
manual_snapshot_retention_period: Optional[int] = field(default=None)
snapshot_copy_grant_name: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftClusterNode:
kind: ClassVar[str] = "aws_redshift_cluster_node"
mapping: ClassVar[Dict[str, Bender]] = {
"node_role": S("NodeRole"),
"private_ip_address": S("PrivateIPAddress"),
"public_ip_address": S("PublicIPAddress"),
}
node_role: Optional[str] = field(default=None)
private_ip_address: Optional[str] = field(default=None)
public_ip_address: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftElasticIpStatus:
kind: ClassVar[str] = "aws_redshift_elastic_ip_status"
mapping: ClassVar[Dict[str, Bender]] = {"elastic_ip": S("ElasticIp"), "status": S("Status")}
elastic_ip: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftClusterIamRole:
kind: ClassVar[str] = "aws_redshift_cluster_iam_role"
mapping: ClassVar[Dict[str, Bender]] = {"iam_role_arn": S("IamRoleArn"), "apply_status": S("ApplyStatus")}
iam_role_arn: Optional[str] = field(default=None)
apply_status: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftDeferredMaintenanceWindow:
kind: ClassVar[str] = "aws_redshift_deferred_maintenance_window"
mapping: ClassVar[Dict[str, Bender]] = {
"defer_maintenance_identifier": S("DeferMaintenanceIdentifier"),
"defer_maintenance_start_time": S("DeferMaintenanceStartTime"),
"defer_maintenance_end_time": S("DeferMaintenanceEndTime"),
}
defer_maintenance_identifier: Optional[str] = field(default=None)
defer_maintenance_start_time: Optional[datetime] = field(default=None)
defer_maintenance_end_time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftResizeInfo:
kind: ClassVar[str] = "aws_redshift_resize_info"
mapping: ClassVar[Dict[str, Bender]] = {
"resize_type": S("ResizeType"),
"allow_cancel_resize": S("AllowCancelResize"),
}
resize_type: Optional[str] = field(default=None)
allow_cancel_resize: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftAquaConfiguration:
kind: ClassVar[str] = "aws_redshift_aqua_configuration"
mapping: ClassVar[Dict[str, Bender]] = {
"aqua_status": S("AquaStatus"),
"aqua_configuration_status": S("AquaConfigurationStatus"),
}
aqua_status: Optional[str] = field(default=None)
aqua_configuration_status: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftReservedNodeExchangeStatus:
kind: ClassVar[str] = "aws_redshift_reserved_node_exchange_status"
mapping: ClassVar[Dict[str, Bender]] = {
"reserved_node_exchange_request_id": S("ReservedNodeExchangeRequestId"),
"status": S("Status"),
"request_time": S("RequestTime"),
"source_reserved_node_id": S("SourceReservedNodeId"),
"source_reserved_node_type": S("SourceReservedNodeType"),
"source_reserved_node_count": S("SourceReservedNodeCount"),
"target_reserved_node_offering_id": S("TargetReservedNodeOfferingId"),
"target_reserved_node_type": S("TargetReservedNodeType"),
"target_reserved_node_count": S("TargetReservedNodeCount"),
}
reserved_node_exchange_request_id: Optional[str] = field(default=None)
status: Optional[str] = field(default=None)
request_time: Optional[datetime] = field(default=None)
source_reserved_node_id: Optional[str] = field(default=None)
source_reserved_node_type: Optional[str] = field(default=None)
source_reserved_node_count: Optional[int] = field(default=None)
target_reserved_node_offering_id: Optional[str] = field(default=None)
target_reserved_node_type: Optional[str] = field(default=None)
target_reserved_node_count: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class AwsRedshiftCluster(AwsResource):
kind: ClassVar[str] = "aws_redshift_cluster"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "describe-clusters", "Clusters")
reference_kinds: ClassVar[ModelReference] = {
"predecessors": {
"default": ["aws_vpc", "aws_ec2_security_group", "aws_iam_role", "aws_ec2_subnet"],
"delete": ["aws_kms_key", "aws_vpc", "aws_ec2_security_group", "aws_iam_role", "aws_ec2_subnet"],
},
"successors": {
"default": ["aws_kms_key"],
},
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("ClusterIdentifier"),
"tags": S("Tags", default=[]) >> ToDict(),
"name": S("ClusterIdentifier"),
"ctime": S("ClusterCreateTime"),
"mtime": K(None),
"atime": K(None),
"redshift_node_type": S("NodeType"),
"redshift_cluster_status": S("ClusterStatus"),
"redshift_cluster_availability_status": S("ClusterAvailabilityStatus"),
"redshift_modify_status": S("ModifyStatus"),
"redshift_master_username": S("MasterUsername"),
"redshift_db_name": S("DBName"),
"redshift_endpoint": S("Endpoint") >> Bend(AwsRedshiftEndpoint.mapping),
"redshift_automated_snapshot_retention_period": S("AutomatedSnapshotRetentionPeriod"),
"redshift_manual_snapshot_retention_period": S("ManualSnapshotRetentionPeriod"),
"redshift_cluster_security_groups": S("ClusterSecurityGroups", default=[])
>> ForallBend(AwsRedshiftClusterSecurityGroupMembership.mapping),
"redshift_vpc_security_groups": S("VpcSecurityGroups", default=[])
>> ForallBend(AwsRedshiftVpcSecurityGroupMembership.mapping),
"redshift_cluster_parameter_groups": S("ClusterParameterGroups", default=[])
>> ForallBend(AwsRedshiftClusterParameterGroupStatus.mapping),
"redshift_cluster_subnet_group_name": S("ClusterSubnetGroupName"),
"redshift_vpc_id": S("VpcId"),
"redshift_availability_zone": S("AvailabilityZone"),
"redshift_preferred_maintenance_window": S("PreferredMaintenanceWindow"),
"redshift_pending_modified_values": S("PendingModifiedValues")
>> Bend(AwsRedshiftPendingModifiedValues.mapping),
"redshift_cluster_version": S("ClusterVersion"),
"redshift_allow_version_upgrade": S("AllowVersionUpgrade"),
"redshift_number_of_nodes": S("NumberOfNodes"),
"redshift_publicly_accessible": S("PubliclyAccessible"),
"redshift_encrypted": S("Encrypted"),
"redshift_restore_status": S("RestoreStatus") >> Bend(AwsRedshiftRestoreStatus.mapping),
"redshift_data_transfer_progress": S("DataTransferProgress") >> Bend(AwsRedshiftDataTransferProgress.mapping),
"redshift_hsm_status": S("HsmStatus") >> Bend(AwsRedshiftHsmStatus.mapping),
"redshift_cluster_snapshot_copy_status": S("ClusterSnapshotCopyStatus")
>> Bend(AwsRedshiftClusterSnapshotCopyStatus.mapping),
"redshift_cluster_public_key": S("ClusterPublicKey"),
"redshift_cluster_nodes": S("ClusterNodes", default=[]) >> ForallBend(AwsRedshiftClusterNode.mapping),
"redshift_elastic_ip_status": S("ElasticIpStatus") >> Bend(AwsRedshiftElasticIpStatus.mapping),
"redshift_cluster_revision_number": S("ClusterRevisionNumber"),
"redshift_kms_key_id": S("KmsKeyId"),
"redshift_enhanced_vpc_routing": S("EnhancedVpcRouting"),
"redshift_iam_roles": S("IamRoles", default=[]) >> ForallBend(AwsRedshiftClusterIamRole.mapping),
"redshift_pending_actions": S("PendingActions", default=[]),
"redshift_maintenance_track_name": S("MaintenanceTrackName"),
"redshift_elastic_resize_number_of_node_options": S("ElasticResizeNumberOfNodeOptions"),
"redshift_deferred_maintenance_windows": S("DeferredMaintenanceWindows", default=[])
>> ForallBend(AwsRedshiftDeferredMaintenanceWindow.mapping),
"redshift_snapshot_schedule_identifier": S("SnapshotScheduleIdentifier"),
"redshift_snapshot_schedule_state": S("SnapshotScheduleState"),
"redshift_expected_next_snapshot_schedule_time": S("ExpectedNextSnapshotScheduleTime"),
"redshift_expected_next_snapshot_schedule_time_status": S("ExpectedNextSnapshotScheduleTimeStatus"),
"redshift_next_maintenance_window_start_time": S("NextMaintenanceWindowStartTime"),
"redshift_resize_info": S("ResizeInfo") >> Bend(AwsRedshiftResizeInfo.mapping),
"redshift_availability_zone_relocation_status": S("AvailabilityZoneRelocationStatus"),
"redshift_cluster_namespace_arn": S("ClusterNamespaceArn"),
"redshift_total_storage_capacity_in_mega_bytes": S("TotalStorageCapacityInMegaBytes"),
"redshift_aqua_configuration": S("AquaConfiguration") >> Bend(AwsRedshiftAquaConfiguration.mapping),
"redshift_default_iam_role_arn": S("DefaultIamRoleArn"),
"redshift_reserved_node_exchange_status": S("ReservedNodeExchangeStatus")
>> Bend(AwsRedshiftReservedNodeExchangeStatus.mapping),
}
redshift_node_type: Optional[str] = field(default=None)
redshift_cluster_status: Optional[str] = field(default=None)
redshift_cluster_availability_status: Optional[str] = field(default=None)
redshift_modify_status: Optional[str] = field(default=None)
redshift_master_username: Optional[str] = field(default=None)
redshift_db_name: Optional[str] = field(default=None)
redshift_endpoint: Optional[AwsRedshiftEndpoint] = field(default=None)
redshift_automated_snapshot_retention_period: Optional[int] = field(default=None)
redshift_manual_snapshot_retention_period: Optional[int] = field(default=None)
redshift_cluster_security_groups: List[AwsRedshiftClusterSecurityGroupMembership] = field(factory=list)
redshift_vpc_security_groups: List[AwsRedshiftVpcSecurityGroupMembership] = field(factory=list)
redshift_cluster_parameter_groups: List[AwsRedshiftClusterParameterGroupStatus] = field(factory=list)
redshift_cluster_subnet_group_name: Optional[str] = field(default=None)
redshift_vpc_id: Optional[str] = field(default=None)
redshift_availability_zone: Optional[str] = field(default=None)
redshift_preferred_maintenance_window: Optional[str] = field(default=None)
redshift_pending_modified_values: Optional[AwsRedshiftPendingModifiedValues] = field(default=None)
redshift_cluster_version: Optional[str] = field(default=None)
redshift_allow_version_upgrade: Optional[bool] = field(default=None)
redshift_number_of_nodes: Optional[int] = field(default=None)
redshift_publicly_accessible: Optional[bool] = field(default=None)
redshift_encrypted: Optional[bool] = field(default=None)
redshift_restore_status: Optional[AwsRedshiftRestoreStatus] = field(default=None)
redshift_data_transfer_progress: Optional[AwsRedshiftDataTransferProgress] = field(default=None)
redshift_hsm_status: Optional[AwsRedshiftHsmStatus] = field(default=None)
redshift_cluster_snapshot_copy_status: Optional[AwsRedshiftClusterSnapshotCopyStatus] = field(default=None)
redshift_cluster_public_key: Optional[str] = field(default=None)
redshift_cluster_nodes: List[AwsRedshiftClusterNode] = field(factory=list)
redshift_elastic_ip_status: Optional[AwsRedshiftElasticIpStatus] = field(default=None)
redshift_cluster_revision_number: Optional[str] = field(default=None)
redshift_kms_key_id: Optional[str] = field(default=None)
redshift_enhanced_vpc_routing: Optional[bool] = field(default=None)
redshift_iam_roles: List[AwsRedshiftClusterIamRole] = field(factory=list)
redshift_pending_actions: List[str] = field(factory=list)
redshift_maintenance_track_name: Optional[str] = field(default=None)
redshift_elastic_resize_number_of_node_options: Optional[str] = field(default=None)
redshift_deferred_maintenance_windows: List[AwsRedshiftDeferredMaintenanceWindow] = field(factory=list)
redshift_snapshot_schedule_identifier: Optional[str] = field(default=None)
redshift_snapshot_schedule_state: Optional[str] = field(default=None)
redshift_expected_next_snapshot_schedule_time: Optional[datetime] = field(default=None)
redshift_expected_next_snapshot_schedule_time_status: Optional[str] = field(default=None)
redshift_next_maintenance_window_start_time: Optional[datetime] = field(default=None)
redshift_resize_info: Optional[AwsRedshiftResizeInfo] = field(default=None)
redshift_availability_zone_relocation_status: Optional[str] = field(default=None)
redshift_cluster_namespace_arn: Optional[str] = field(default=None)
redshift_total_storage_capacity_in_mega_bytes: Optional[int] = field(default=None)
redshift_aqua_configuration: Optional[AwsRedshiftAquaConfiguration] = field(default=None)
redshift_default_iam_role_arn: Optional[str] = field(default=None)
redshift_reserved_node_exchange_status: Optional[AwsRedshiftReservedNodeExchangeStatus] = field(default=None)
@classmethod
def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None:
for js in json:
if cluster := cls.from_api(js, builder):
cluster.set_arn(builder=builder, resource=f"cluster:{cluster.id}")
builder.add_node(cluster, js)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if self.redshift_vpc_id:
builder.dependant_node(
self, reverse=True, delete_same_as_default=True, clazz=AwsEc2Vpc, id=self.redshift_vpc_id
)
for vsg in self.redshift_vpc_security_groups:
if vsg.vpc_security_group_id:
builder.dependant_node(
self,
reverse=True,
delete_same_as_default=True,
clazz=AwsEc2SecurityGroup,
id=vsg.vpc_security_group_id,
)
for role in self.redshift_iam_roles:
if role.iam_role_arn:
builder.dependant_node(
self, reverse=True, delete_same_as_default=True, clazz=AwsIamRole, arn=role.iam_role_arn
)
if self.redshift_cluster_subnet_group_name:
builder.dependant_node(
self,
reverse=True,
delete_same_as_default=True,
clazz=AwsEc2Subnet,
name=self.redshift_cluster_subnet_group_name,
)
if self.redshift_kms_key_id:
builder.dependant_node(self, clazz=AwsKmsKey, id=self.redshift_kms_key_id)
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
client.call(
aws_service=self.api_spec.service,
action="create-tags",
result_name=None,
ResourceName=self.arn,
Tags=[{"Key": key, "Value": value}],
)
return True
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
client.call(
aws_service=self.api_spec.service,
action="delete-tags",
result_name=None,
ResourceName=self.arn,
TagKeys=[key],
)
return True
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(
aws_service=self.api_spec.service,
action="delete-cluster",
result_name=None,
ClusterIdentifier=self.id,
SkipFinalClusterSnapshot=True,
)
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [
AwsApiSpec(service_name, "create-tags"),
AwsApiSpec(service_name, "delete-tags"),
AwsApiSpec(service_name, "delete-cluster"),
]
resources: List[Type[AwsResource]] = [AwsRedshiftCluster] | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/redshift.py | 0.772874 | 0.161287 | redshift.py | pypi |
from datetime import datetime
from typing import ClassVar, Dict, Optional, List, Type
from attr import define, field
from resoto_plugin_aws.aws_client import AwsClient
from resoto_plugin_aws.resource.base import AwsApiSpec, GraphBuilder, parse_json
from resoto_plugin_aws.resource.base import AwsResource
from resoto_plugin_aws.utils import ToDict
from resotolib.graph import Graph
from resotolib.json_bender import Bender, S, Bend, bend
from resotolib.types import Json
service_name = "config"
@define(eq=False, slots=False)
class AwsConfigRecorderStatus:
kind: ClassVar[str] = "aws_config_recorder_status"
mapping: ClassVar[Dict[str, Bender]] = {
"last_start_time": S("lastStartTime"),
"last_stop_time": S("lastStopTime"),
"recording": S("recording"),
"last_status": S("lastStatus"),
"last_error_code": S("lastErrorCode"),
"last_error_message": S("lastErrorMessage"),
"last_status_change_time": S("lastStatusChangeTime"),
}
last_start_time: Optional[datetime] = field(default=None)
last_stop_time: Optional[datetime] = field(default=None)
recording: Optional[bool] = field(default=None)
last_status: Optional[str] = field(default=None)
last_error_code: Optional[str] = field(default=None)
last_error_message: Optional[str] = field(default=None)
last_status_change_time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class AwsConfigRecordingGroup:
kind: ClassVar[str] = "aws_config_recording_group"
mapping: ClassVar[Dict[str, Bender]] = {
"all_supported": S("allSupported"),
"include_global_resource_types": S("includeGlobalResourceTypes"),
"resource_types": S("resourceTypes", default=[]),
}
all_supported: Optional[bool] = field(default=None)
include_global_resource_types: Optional[bool] = field(default=None)
resource_types: List[str] = field(factory=list)
@define(eq=False, slots=False)
class AwsConfigRecorder(AwsResource):
kind: ClassVar[str] = "aws_config_recorder"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(
service_name, "describe-configuration-recorders", "ConfigurationRecorders"
)
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("name"),
"tags": S("Tags", default=[]) >> ToDict(),
"name": S("name"),
"arn": S("roleARN"),
"recorder_group": S("recordingGroup") >> Bend(AwsConfigRecordingGroup.mapping),
}
recorder_group: Optional[AwsConfigRecordingGroup] = field(default=None)
recorder_status: Optional[AwsConfigRecorderStatus] = field(default=None)
@classmethod
def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None:
# get all statuses
statuses: Dict[str, AwsConfigRecorderStatus] = {}
for r in builder.client.list(
service_name, "describe-configuration-recorder-status", "ConfigurationRecordersStatus"
):
if status := parse_json(bend(AwsConfigRecorderStatus.mapping, r), AwsConfigRecorderStatus, builder):
statuses[r["name"]] = status
for js in json:
if instance := AwsConfigRecorder.from_api(js, builder):
if status := statuses.get(instance.id):
instance.recorder_status = status
instance.mtime = status.last_status_change_time
builder.add_node(instance, js)
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(service_name, "delete-configuration-recorder", self.name)
return True
@classmethod
def called_collect_apis(cls) -> List[AwsApiSpec]:
return [cls.api_spec, AwsApiSpec(service_name, "describe-configuration-recorder-status")]
# this resource does not allow tags
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [AwsApiSpec(service_name, "delete-configuration-recorder")]
resources: List[Type[AwsResource]] = [AwsConfigRecorder] | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/config.py | 0.803868 | 0.167729 | config.py | pypi |
from datetime import datetime
from typing import ClassVar, Dict, List, Optional, Type
from attrs import define, field
from resoto_plugin_aws.aws_client import AwsClient
from resoto_plugin_aws.resource.base import AwsApiSpec, AwsResource, GraphBuilder
from resoto_plugin_aws.resource.kinesis import AwsKinesisStream
from resoto_plugin_aws.resource.kms import AwsKmsKey
from resoto_plugin_aws.utils import ToDict
from resotolib.baseresources import ModelReference
from resotolib.graph import Graph
from resotolib.json_bender import S, Bend, Bender, ForallBend, bend
from resotolib.types import Json
service_name = "dynamodb"
# noinspection PyUnresolvedReferences
class DynamoDbTaggable:
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
if isinstance(self, AwsResource):
client.call(
aws_service=service_name,
action="tag-resource",
result_name=None,
ResourceArn=self.arn,
Tags=[{"Key": key, "Value": value}],
)
return True
return False
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
if isinstance(self, AwsResource):
client.call(
aws_service=service_name,
action="untag-resource",
result_name=None,
ResourceArn=self.arn,
TagKeys=[key],
)
return True
return False
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [AwsApiSpec(service_name, "tag-resource"), AwsApiSpec(service_name, "untag-resource")]
@define(eq=False, slots=False)
class AwsDynamoDbAttributeDefinition:
kind: ClassVar[str] = "aws_dynamo_db_attribute_definition"
mapping: ClassVar[Dict[str, Bender]] = {"attribute_name": S("AttributeName"), "attribute_type": S("AttributeType")}
attribute_name: Optional[str] = field(default=None)
attribute_type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbKeySchemaElement:
kind: ClassVar[str] = "aws_dynamo_db_key_schema_element"
mapping: ClassVar[Dict[str, Bender]] = {"attribute_name": S("AttributeName"), "key_type": S("KeyType")}
attribute_name: Optional[str] = field(default=None)
key_type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbProvisionedThroughputDescription:
kind: ClassVar[str] = "aws_dynamo_db_provisioned_throughput_description"
mapping: ClassVar[Dict[str, Bender]] = {
"last_increase_date_time": S("LastIncreaseDateTime"),
"last_decrease_date_time": S("LastDecreaseDateTime"),
"number_of_decreases_today": S("NumberOfDecreasesToday"),
"read_capacity_units": S("ReadCapacityUnits"),
"write_capacity_units": S("WriteCapacityUnits"),
}
last_increase_date_time: Optional[datetime] = field(default=None)
last_decrease_date_time: Optional[datetime] = field(default=None)
number_of_decreases_today: Optional[int] = field(default=None)
read_capacity_units: Optional[int] = field(default=None)
write_capacity_units: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbBillingModeSummary:
kind: ClassVar[str] = "aws_dynamo_db_billing_mode_summary"
mapping: ClassVar[Dict[str, Bender]] = {
"billing_mode": S("BillingMode"),
"last_update_to_pay_per_request_date_time": S("LastUpdateToPayPerRequestDateTime"),
}
billing_mode: Optional[str] = field(default=None)
last_update_to_pay_per_request_date_time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbProjection:
kind: ClassVar[str] = "aws_dynamo_db_projection"
mapping: ClassVar[Dict[str, Bender]] = {
"projection_type": S("ProjectionType"),
"non_key_attributes": S("NonKeyAttributes", default=[]),
}
projection_type: Optional[str] = field(default=None)
non_key_attributes: List[str] = field(factory=list)
@define(eq=False, slots=False)
class AwsDynamoDbLocalSecondaryIndexDescription:
kind: ClassVar[str] = "aws_dynamo_db_local_secondary_index_description"
mapping: ClassVar[Dict[str, Bender]] = {
"index_name": S("IndexName"),
"key_schema": S("KeySchema", default=[]) >> ForallBend(AwsDynamoDbKeySchemaElement.mapping),
"projection": S("Projection") >> Bend(AwsDynamoDbProjection.mapping),
"index_size_bytes": S("IndexSizeBytes"),
"item_count": S("ItemCount"),
"index_arn": S("IndexArn"),
}
index_name: Optional[str] = field(default=None)
key_schema: List[AwsDynamoDbKeySchemaElement] = field(factory=list)
projection: Optional[AwsDynamoDbProjection] = field(default=None)
index_size_bytes: Optional[int] = field(default=None)
item_count: Optional[int] = field(default=None)
index_arn: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbGlobalSecondaryIndexDescription:
kind: ClassVar[str] = "aws_dynamo_db_global_secondary_index_description"
mapping: ClassVar[Dict[str, Bender]] = {
"index_name": S("IndexName"),
"key_schema": S("KeySchema", default=[]) >> ForallBend(AwsDynamoDbKeySchemaElement.mapping),
"projection": S("Projection") >> Bend(AwsDynamoDbProjection.mapping),
"index_status": S("IndexStatus"),
"backfilling": S("Backfilling"),
"provisioned_throughput": S("ProvisionedThroughput")
>> Bend(AwsDynamoDbProvisionedThroughputDescription.mapping),
"index_size_bytes": S("IndexSizeBytes"),
"item_count": S("ItemCount"),
"index_arn": S("IndexArn"),
}
index_name: Optional[str] = field(default=None)
key_schema: List[AwsDynamoDbKeySchemaElement] = field(factory=list)
projection: Optional[AwsDynamoDbProjection] = field(default=None)
index_status: Optional[str] = field(default=None)
backfilling: Optional[bool] = field(default=None)
provisioned_throughput: Optional[AwsDynamoDbProvisionedThroughputDescription] = field(default=None)
index_size_bytes: Optional[int] = field(default=None)
item_count: Optional[int] = field(default=None)
index_arn: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbStreamSpecification:
kind: ClassVar[str] = "aws_dynamo_db_stream_specification"
mapping: ClassVar[Dict[str, Bender]] = {
"stream_enabled": S("StreamEnabled"),
"stream_view_type": S("StreamViewType"),
}
stream_enabled: Optional[bool] = field(default=None)
stream_view_type: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbReplicaGlobalSecondaryIndexDescription:
kind: ClassVar[str] = "aws_dynamo_db_replica_global_secondary_index_description"
mapping: ClassVar[Dict[str, Bender]] = {
"index_name": S("IndexName"),
"provisioned_throughput_override": S("ProvisionedThroughputOverride", "ReadCapacityUnits"),
}
index_name: Optional[str] = field(default=None)
provisioned_throughput_override: Optional[int] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbTableClassSummary:
kind: ClassVar[str] = "aws_dynamo_db_table_class_summary"
mapping: ClassVar[Dict[str, Bender]] = {
"table_class": S("TableClass"),
"last_update_date_time": S("LastUpdateDateTime"),
}
table_class: Optional[str] = field(default=None)
last_update_date_time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbReplicaDescription:
kind: ClassVar[str] = "aws_dynamo_db_replica_description"
mapping: ClassVar[Dict[str, Bender]] = {
"region_name": S("RegionName"),
"replica_status": S("ReplicaStatus"),
"replica_status_description": S("ReplicaStatusDescription"),
"replica_status_percent_progress": S("ReplicaStatusPercentProgress"),
"kms_master_key_id": S("KMSMasterKeyId"),
"provisioned_throughput_override": S("ProvisionedThroughputOverride", "ReadCapacityUnits"),
"global_secondary_indexes": S("GlobalSecondaryIndexes", default=[])
>> ForallBend(AwsDynamoDbReplicaGlobalSecondaryIndexDescription.mapping),
"replica_inaccessible_date_time": S("ReplicaInaccessibleDateTime"),
"replica_table_class_summary": S("ReplicaTableClassSummary") >> Bend(AwsDynamoDbTableClassSummary.mapping),
}
region_name: Optional[str] = field(default=None)
replica_status: Optional[str] = field(default=None)
replica_status_description: Optional[str] = field(default=None)
replica_status_percent_progress: Optional[str] = field(default=None)
kms_master_key_id: Optional[str] = field(default=None)
provisioned_throughput_override: Optional[int] = field(default=None)
global_secondary_indexes: List[AwsDynamoDbReplicaGlobalSecondaryIndexDescription] = field(factory=list)
replica_inaccessible_date_time: Optional[datetime] = field(default=None)
replica_table_class_summary: Optional[AwsDynamoDbTableClassSummary] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbRestoreSummary:
kind: ClassVar[str] = "aws_dynamo_db_restore_summary"
mapping: ClassVar[Dict[str, Bender]] = {
"source_backup_arn": S("SourceBackupArn"),
"source_table_arn": S("SourceTableArn"),
"restore_date_time": S("RestoreDateTime"),
"restore_in_progress": S("RestoreInProgress"),
}
source_backup_arn: Optional[str] = field(default=None)
source_table_arn: Optional[str] = field(default=None)
restore_date_time: Optional[datetime] = field(default=None)
restore_in_progress: Optional[bool] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbSSEDescription:
kind: ClassVar[str] = "aws_dynamo_db_sse_description"
mapping: ClassVar[Dict[str, Bender]] = {
"status": S("Status"),
"sse_type": S("SSEType"),
"kms_master_key_arn": S("KMSMasterKeyArn"),
"inaccessible_encryption_date_time": S("InaccessibleEncryptionDateTime"),
}
status: Optional[str] = field(default=None)
sse_type: Optional[str] = field(default=None)
kms_master_key_arn: Optional[str] = field(default=None)
inaccessible_encryption_date_time: Optional[datetime] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbArchivalSummary:
kind: ClassVar[str] = "aws_dynamo_db_archival_summary"
mapping: ClassVar[Dict[str, Bender]] = {
"archival_date_time": S("ArchivalDateTime"),
"archival_reason": S("ArchivalReason"),
"archival_backup_arn": S("ArchivalBackupArn"),
}
archival_date_time: Optional[datetime] = field(default=None)
archival_reason: Optional[str] = field(default=None)
archival_backup_arn: Optional[str] = field(default=None)
@define(eq=False, slots=False)
class AwsDynamoDbTable(DynamoDbTaggable, AwsResource):
kind: ClassVar[str] = "aws_dynamo_db_table"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "list-tables", "TableNames")
reference_kinds: ClassVar[ModelReference] = {
"successors": {"default": ["aws_kinesis_stream", "aws_kms_key"]},
"predecessors": {"delete": ["aws_kinesis_stream", "aws_kms_key"]},
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("TableId"),
"name": S("TableName"),
"ctime": S("CreationDateTime"),
"arn": S("TableArn"),
"dynamodb_attribute_definitions": S("AttributeDefinitions", default=[])
>> ForallBend(AwsDynamoDbAttributeDefinition.mapping),
"dynamodb_key_schema": S("KeySchema", default=[]) >> ForallBend(AwsDynamoDbKeySchemaElement.mapping),
"dynamodb_table_status": S("TableStatus"),
"dynamodb_provisioned_throughput": S("ProvisionedThroughput")
>> Bend(AwsDynamoDbProvisionedThroughputDescription.mapping),
"dynamodb_table_size_bytes": S("TableSizeBytes"),
"dynamodb_item_count": S("ItemCount"),
"dynamodb_billing_mode_summary": S("BillingModeSummary") >> Bend(AwsDynamoDbBillingModeSummary.mapping),
"dynamodb_local_secondary_indexes": S("LocalSecondaryIndexes", default=[])
>> ForallBend(AwsDynamoDbLocalSecondaryIndexDescription.mapping),
"dynamodb_global_secondary_indexes": S("GlobalSecondaryIndexes", default=[])
>> ForallBend(AwsDynamoDbGlobalSecondaryIndexDescription.mapping),
"dynamodb_stream_specification": S("StreamSpecification") >> Bend(AwsDynamoDbStreamSpecification.mapping),
"dynamodb_latest_stream_label": S("LatestStreamLabel"),
"dynamodb_latest_stream_arn": S("LatestStreamArn"),
"dynamodb_global_table_version": S("GlobalTableVersion"),
"dynamodb_replicas": S("Replicas", default=[]) >> ForallBend(AwsDynamoDbReplicaDescription.mapping),
"dynamodb_restore_summary": S("RestoreSummary") >> Bend(AwsDynamoDbRestoreSummary.mapping),
"dynamodb_sse_description": S("SSEDescription") >> Bend(AwsDynamoDbSSEDescription.mapping),
"dynamodb_archival_summary": S("ArchivalSummary") >> Bend(AwsDynamoDbArchivalSummary.mapping),
"dynamodb_table_class_summary": S("TableClassSummary") >> Bend(AwsDynamoDbTableClassSummary.mapping),
}
arn: Optional[str] = field(default=None)
dynamodb_attribute_definitions: List[AwsDynamoDbAttributeDefinition] = field(factory=list)
dynamodb_key_schema: List[AwsDynamoDbKeySchemaElement] = field(factory=list)
dynamodb_table_status: Optional[str] = field(default=None)
dynamodb_provisioned_throughput: Optional[AwsDynamoDbProvisionedThroughputDescription] = field(default=None)
dynamodb_table_size_bytes: Optional[int] = field(default=None)
dynamodb_item_count: Optional[int] = field(default=None)
dynamodb_billing_mode_summary: Optional[AwsDynamoDbBillingModeSummary] = field(default=None)
dynamodb_local_secondary_indexes: List[AwsDynamoDbLocalSecondaryIndexDescription] = field(factory=list)
dynamodb_global_secondary_indexes: List[AwsDynamoDbGlobalSecondaryIndexDescription] = field(factory=list)
dynamodb_stream_specification: Optional[AwsDynamoDbStreamSpecification] = field(default=None)
dynamodb_latest_stream_label: Optional[str] = field(default=None)
dynamodb_latest_stream_arn: Optional[str] = field(default=None)
dynamodb_global_table_version: Optional[str] = field(default=None)
dynamodb_replicas: List[AwsDynamoDbReplicaDescription] = field(factory=list)
dynamodb_restore_summary: Optional[AwsDynamoDbRestoreSummary] = field(default=None)
dynamodb_sse_description: Optional[AwsDynamoDbSSEDescription] = field(default=None)
dynamodb_archival_summary: Optional[AwsDynamoDbArchivalSummary] = field(default=None)
dynamodb_table_class_summary: Optional[AwsDynamoDbTableClassSummary] = field(default=None)
@classmethod
def called_collect_apis(cls) -> List[AwsApiSpec]:
return [
cls.api_spec,
AwsApiSpec(service_name, "describe-table"),
AwsApiSpec(service_name, "list-tags-of-resource"),
]
@classmethod
def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None:
def add_instance(table: str) -> None:
table_description = builder.client.get(service_name, "describe-table", "Table", TableName=table)
if table_description is not None:
if instance := cls.from_api(table_description, builder):
builder.add_node(instance, table_description)
builder.submit_work(service_name, add_tags, instance)
def add_tags(table: AwsDynamoDbTable) -> None:
tags = builder.client.list(service_name, "list-tags-of-resource", "Tags", ResourceArn=table.arn)
if tags:
table.tags = bend(ToDict(), tags)
for js in json:
if isinstance(js, str):
add_instance(js)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if self.dynamodb_latest_stream_arn:
builder.dependant_node(
self,
clazz=AwsKinesisStream,
arn=self.dynamodb_latest_stream_arn,
)
for replica in self.dynamodb_replicas:
if replica.kms_master_key_id:
builder.dependant_node(
self,
clazz=AwsKmsKey,
id=replica.kms_master_key_id,
)
if self.dynamodb_sse_description and self.dynamodb_sse_description.kms_master_key_arn:
builder.dependant_node(
self,
clazz=AwsKmsKey,
arn=self.dynamodb_sse_description.kms_master_key_arn,
)
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(aws_service=self.api_spec.service, action="delete-table", result_name=None, TableName=self.name)
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return super().called_mutator_apis() + [AwsApiSpec(service_name, "delete-table")]
@define(eq=False, slots=False)
class AwsDynamoDbGlobalTable(DynamoDbTaggable, AwsResource):
kind: ClassVar[str] = "aws_dynamo_db_global_table"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "list-global-tables", "GlobalTables")
reference_kinds: ClassVar[ModelReference] = {
"successors": {"default": ["aws_kms_key"]},
"predecessors": {"delete": ["aws_kms_key"]},
}
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("GlobalTableName"),
"name": S("GlobalTableName"),
"ctime": S("CreationDateTime"),
"arn": S("GlobalTableArn"),
"dynamodb_replication_group": S("ReplicationGroup", default=[])
>> ForallBend(AwsDynamoDbReplicaDescription.mapping),
"dynamodb_global_table_status": S("GlobalTableStatus"),
}
arn: Optional[str] = field(default=None)
dynamodb_replication_group: List[AwsDynamoDbReplicaDescription] = field(factory=list)
dynamodb_global_table_status: Optional[str] = field(default=None)
@classmethod
def called_collect_apis(cls) -> List[AwsApiSpec]:
return [
cls.api_spec,
AwsApiSpec(service_name, "describe-global-table"),
AwsApiSpec(service_name, "list-tags-of-resource"),
]
@classmethod
def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None:
def add_instance(table: Dict[str, str]) -> None:
table_description = builder.client.get(
service_name,
"describe-global-table",
"GlobalTableDescription",
GlobalTableName=table["GlobalTableName"],
)
if table_description:
if instance := cls.from_api(table_description, builder):
builder.add_node(instance, table_description)
builder.submit_work(service_name, add_tags, instance)
def add_tags(table: AwsDynamoDbGlobalTable) -> None:
tags = builder.client.list(service_name, "list-tags-of-resource", "Tags", ResourceArn=table.arn)
if tags:
table.tags = bend(ToDict(), tags)
for js in json:
add_instance(js)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if self.dynamodb_replication_group is not []:
for replica in self.dynamodb_replication_group:
if replica.kms_master_key_id:
builder.dependant_node(
self,
clazz=AwsKmsKey,
id=replica.kms_master_key_id,
)
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(aws_service=self.api_spec.service, action="delete-table", result_name=None, TableName=self.name)
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return super().called_mutator_apis() + [AwsApiSpec(service_name, "delete-table")]
global_resources: List[Type[AwsResource]] = [AwsDynamoDbGlobalTable]
resources: List[Type[AwsResource]] = [AwsDynamoDbTable] | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/dynamodb.py | 0.829285 | 0.173708 | dynamodb.py | pypi |
from datetime import datetime
from typing import ClassVar, Dict, Optional, Type, List
from attr import define, field as attrs_field
from resoto_plugin_aws.aws_client import AwsClient
from resoto_plugin_aws.resource.base import AwsApiSpec, GraphBuilder, AwsResource, parse_json
from resoto_plugin_aws.resource.cloudwatch import AwsCloudwatchLogGroup
from resoto_plugin_aws.resource.iam import AwsIamRole
from resoto_plugin_aws.resource.kms import AwsKmsKey
from resoto_plugin_aws.resource.s3 import AwsS3Bucket
from resoto_plugin_aws.resource.sns import AwsSnsTopic
from resoto_plugin_aws.utils import ToDict
from resotolib.graph import Graph
from resotolib.types import Json
from resotolib.baseresources import ModelReference, EdgeType
from resotolib.json_bender import Bender, S, bend, ForallBend, EmptyToNone, F
service_name = "cloudtrail"
@define(eq=False, slots=False)
class AwsCloudTrailAdvancedFieldSelector:
kind: ClassVar[str] = "aws_cloud_trail_advanced_field_selector"
mapping: ClassVar[Dict[str, Bender]] = {
"field": S("Field"),
"equals": S("Equals"),
"starts_with": S("StartsWith"),
"ends_with": S("EndsWith"),
"not_equals": S("NotEquals"),
"not_starts_with": S("NotStartsWith"),
"not_ends_with": S("NotEndsWith"),
}
equals: Optional[List[str]] = attrs_field(default=None)
starts_with: Optional[List[str]] = attrs_field(default=None)
ends_with: Optional[List[str]] = attrs_field(default=None)
not_equals: Optional[List[str]] = attrs_field(default=None)
not_starts_with: Optional[List[str]] = attrs_field(default=None)
not_ends_with: Optional[List[str]] = attrs_field(default=None)
@define(eq=False, slots=False)
class AwsCloudTrailEventSelector:
kind: ClassVar[str] = "aws_cloud_trail_event_selector"
mapping: ClassVar[Dict[str, Bender]] = {
"name": S("Name"),
"field_selectors": S("FieldSelectors", default=[])
>> ForallBend(AwsCloudTrailAdvancedFieldSelector.mapping)
>> F(lambda x: {a["field"]: a for a in x}),
}
name: Optional[str] = attrs_field(default=None)
field_selectors: Optional[Dict[str, AwsCloudTrailAdvancedFieldSelector]] = attrs_field(default=None)
@define(eq=False, slots=False)
class AwsCloudTrailStatus:
kind: ClassVar[str] = "aws_cloud_trail_status"
mapping: ClassVar[Dict[str, Bender]] = {
"is_logging": S("IsLogging"),
"latest_delivery_error": S("LatestDeliveryError"),
"latest_notification_error": S("LatestNotificationError"),
"latest_delivery_time": S("LatestDeliveryTime") >> EmptyToNone,
"latest_notification_time": S("LatestNotificationTime") >> EmptyToNone,
"start_logging_time": S("StartLoggingTime") >> EmptyToNone,
"stop_logging_time": S("StopLoggingTime") >> EmptyToNone,
"latest_cloud_watch_logs_delivery_error": S("LatestCloudWatchLogsDeliveryError"),
"latest_cloud_watch_logs_delivery_time": S("LatestCloudWatchLogsDeliveryTime"),
"latest_digest_delivery_time": S("LatestDigestDeliveryTime") >> EmptyToNone,
"latest_digest_delivery_error": S("LatestDigestDeliveryError"),
"latest_delivery_attempt_time": S("LatestDeliveryAttemptTime") >> EmptyToNone,
"latest_notification_attempt_time": S("LatestNotificationAttemptTime") >> EmptyToNone,
"latest_notification_attempt_succeeded": S("LatestNotificationAttemptSucceeded") >> EmptyToNone,
"latest_delivery_attempt_succeeded": S("LatestDeliveryAttemptSucceeded") >> EmptyToNone,
"time_logging_started": S("TimeLoggingStarted") >> EmptyToNone,
"time_logging_stopped": S("TimeLoggingStopped") >> EmptyToNone,
}
is_logging: Optional[bool] = attrs_field(default=None)
latest_delivery_error: Optional[str] = attrs_field(default=None)
latest_notification_error: Optional[str] = attrs_field(default=None)
latest_delivery_time: Optional[datetime] = attrs_field(default=None)
latest_notification_time: Optional[datetime] = attrs_field(default=None)
start_logging_time: Optional[datetime] = attrs_field(default=None)
stop_logging_time: Optional[datetime] = attrs_field(default=None)
latest_cloud_watch_logs_delivery_error: Optional[str] = attrs_field(default=None)
latest_cloud_watch_logs_delivery_time: Optional[datetime] = attrs_field(default=None)
latest_digest_delivery_time: Optional[datetime] = attrs_field(default=None)
latest_digest_delivery_error: Optional[str] = attrs_field(default=None)
latest_delivery_attempt_time: Optional[datetime] = attrs_field(default=None)
latest_notification_attempt_time: Optional[datetime] = attrs_field(default=None)
latest_notification_attempt_succeeded: Optional[datetime] = attrs_field(default=None)
latest_delivery_attempt_succeeded: Optional[datetime] = attrs_field(default=None)
time_logging_started: Optional[datetime] = attrs_field(default=None)
time_logging_stopped: Optional[datetime] = attrs_field(default=None)
@define(eq=False, slots=False)
class AwsCloudTrail(AwsResource):
kind: ClassVar[str] = "aws_cloud_trail"
api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "list-trails", "Trails")
mapping: ClassVar[Dict[str, Bender]] = {
"id": S("Name"),
"name": S("Name"),
"trail_s3_bucket_name": S("S3BucketName"),
"trail_s3_key_prefix": S("S3KeyPrefix"),
"trail_sns_topic_name": S("SnsTopicName"),
"trail_sns_topic_arn": S("SnsTopicARN"),
"trail_include_global_service_events": S("IncludeGlobalServiceEvents"),
"trail_is_multi_region_trail": S("IsMultiRegionTrail"),
"trail_home_region": S("HomeRegion"),
"arn": S("TrailARN"),
"trail_log_file_validation_enabled": S("LogFileValidationEnabled"),
"trail_cloud_watch_logs_log_group_arn": S("CloudWatchLogsLogGroupArn"),
"trail_cloud_watch_logs_role_arn": S("CloudWatchLogsRoleArn"),
"trail_kms_key_id": S("KmsKeyId"),
"trail_has_custom_event_selectors": S("HasCustomEventSelectors"),
"trail_has_insight_selectors": S("HasInsightSelectors"),
"trail_is_organization_trail": S("IsOrganizationTrail"),
}
reference_kinds: ClassVar[ModelReference] = {
"successors": {"default": ["aws_s3_bucket", "aws_sns_topic", "aws_kms_key"]},
}
trail_s3_bucket_name: Optional[str] = attrs_field(default=None)
trail_s3_key_prefix: Optional[str] = attrs_field(default=None)
trail_sns_topic_name: Optional[str] = attrs_field(default=None)
trail_sns_topic_arn: Optional[str] = attrs_field(default=None)
trail_include_global_service_events: Optional[bool] = attrs_field(default=None)
trail_is_multi_region_trail: Optional[bool] = attrs_field(default=None)
trail_home_region: Optional[str] = attrs_field(default=None)
arn: Optional[str] = attrs_field(default=None)
trail_log_file_validation_enabled: Optional[bool] = attrs_field(default=None)
trail_cloud_watch_logs_log_group_arn: Optional[str] = attrs_field(default=None)
trail_cloud_watch_logs_role_arn: Optional[str] = attrs_field(default=None)
trail_kms_key_id: Optional[str] = attrs_field(default=None)
trail_has_custom_event_selectors: Optional[bool] = attrs_field(default=None)
trail_has_insight_selectors: Optional[bool] = attrs_field(default=None)
trail_is_organization_trail: Optional[bool] = attrs_field(default=None)
trail_status: Optional[AwsCloudTrailStatus] = attrs_field(default=None)
trail_event_selectors: Optional[List[AwsCloudTrailEventSelector]] = attrs_field(default=None)
trail_insight_selectors: Optional[List[str]] = attrs_field(default=None)
@classmethod
def called_collect_apis(cls) -> List[AwsApiSpec]:
return [
AwsApiSpec(service_name, "list-trails"),
AwsApiSpec(service_name, "get-trail"),
AwsApiSpec(service_name, "get-trail-status"),
AwsApiSpec(service_name, "list-tags"),
AwsApiSpec(service_name, "get-event-selectors"),
AwsApiSpec(service_name, "get-insight-selectors"),
]
@classmethod
def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None:
def collect_trail(trail_arn: str) -> None:
if trail_raw := builder.client.get(service_name, "get-trail", "Trail", Name=trail_arn):
if instance := AwsCloudTrail.from_api(trail_raw, builder):
builder.add_node(instance, js)
collect_status(instance)
collect_tags(instance)
if instance.trail_has_custom_event_selectors:
collect_event_selectors(instance)
if instance.trail_has_insight_selectors:
collect_insight_selectors(instance)
def collect_event_selectors(trail: AwsCloudTrail) -> None:
trail.trail_event_selectors = []
for item in builder.client.list(
service_name, "get-event-selectors", "AdvancedEventSelectors", TrailName=trail.arn
):
mapped = bend(AwsCloudTrailEventSelector.mapping, item)
if es := parse_json(mapped, AwsCloudTrailEventSelector, builder):
trail.trail_event_selectors.append(es)
def collect_insight_selectors(trail: AwsCloudTrail) -> None:
trail.trail_insight_selectors = []
for item in builder.client.list(
service_name,
"get-insight-selectors",
"InsightSelectors",
TrailName=trail.arn,
expected_errors=["InsightNotEnabledException"],
):
trail.trail_insight_selectors.append(item["InsightType"])
def collect_status(trail: AwsCloudTrail) -> None:
status_raw = builder.client.get(service_name, "get-trail-status", Name=trail.arn)
mapped = bend(AwsCloudTrailStatus.mapping, status_raw)
if status := parse_json(mapped, AwsCloudTrailStatus, builder):
trail.trail_status = status
trail.ctime = status.start_logging_time
trail.mtime = status.latest_delivery_time
def collect_tags(trail: AwsCloudTrail) -> None:
for tr in builder.client.list(
service_name,
"list-tags",
"ResourceTagList",
ResourceIdList=[trail.arn],
expected_errors=["CloudTrailARNInvalidException", "AccessDeniedException"],
):
trail.tags = bend(S("TagsList", default=[]) >> ToDict(), tr)
for js in json:
arn = js["TrailARN"]
# list trails will return multi account trails in all regions
if js["HomeRegion"] == builder.region.name and builder.account.id in arn:
# only collect trails in the current account and current region
builder.submit_work(service_name, collect_trail, arn)
else:
# add a deferred edge to the trails in another account or region
builder.add_deferred_edge(
builder.region, EdgeType.default, f'is(aws_cloud_trail) and reported.arn=="{arn}"'
)
def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None:
if s3 := self.trail_s3_bucket_name:
builder.add_edge(self, clazz=AwsS3Bucket, name=s3)
if sns := self.trail_sns_topic_arn:
builder.add_edge(self, clazz=AwsSnsTopic, arn=sns)
if kms := self.trail_kms_key_id:
builder.add_edge(self, clazz=AwsKmsKey, id=AwsKmsKey.normalise_id(kms))
if log_group := self.trail_cloud_watch_logs_log_group_arn:
builder.add_edge(self, clazz=AwsCloudwatchLogGroup, arn=log_group)
if log_role := self.trail_cloud_watch_logs_role_arn:
builder.add_edge(self, clazz=AwsIamRole, arn=log_role)
def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool:
client.call(service_name, "add-tags", ResourceId=self.arn, TagsList=[{"Key": key, "Value": value}])
return True
def delete_resource_tag(self, client: AwsClient, key: str) -> bool:
client.call(service_name, "remove-tags", ResourceId=self.arn, TagsList=[{"Key": key}])
return True
def delete_resource(self, client: AwsClient, graph: Graph) -> bool:
client.call(service_name, "delete-trail", Name=self.arn)
return True
@classmethod
def called_mutator_apis(cls) -> List[AwsApiSpec]:
return [
AwsApiSpec(service_name, "add-tags"),
AwsApiSpec(service_name, "remove-tags"),
AwsApiSpec(service_name, "delete-trail"),
]
resources: List[Type[AwsResource]] = [AwsCloudTrail] | /resoto-plugin-aws-3.6.5.tar.gz/resoto-plugin-aws-3.6.5/resoto_plugin_aws/resource/cloudtrail.py | 0.740644 | 0.162081 | cloudtrail.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.