code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from typing import Any, Sequence, Union
import numpy as np
from scipy.sparse import issparse
from reservoirpy.utils.validation import check_vector
def _check_values(array_or_list: Union[Sequence, np.ndarray], value: Any):
"""Check if the given array or list contains the given value."""
if value == np.nan:
assert (
np.isnan(array_or_list).any() == False
), f"{array_or_list} should not contain NaN values."
if value is None:
if type(array_or_list) is list:
assert (
np.count_nonzero(array_or_list == None) == 0
), f"{array_or_list} should not contain None values."
elif type(array_or_list) is np.array:
# None is transformed to np.nan when it is in an array
assert (
np.isnan(array_or_list).any() == False
), f"{array_or_list} should not contain NaN values."
def check_input_lists(X, dim_in, Y=None, dim_out=None):
if isinstance(X, np.ndarray):
X = [X]
if Y is not None:
if isinstance(Y, np.ndarray):
Y = [Y]
if not (len(X) == len(Y)):
raise ValueError(
f"Inconsistent number of inputs and targets: "
f"found {len(X)} input sequences, but {len(Y)} "
f"target sequences."
)
for i in range(len(X)):
x = check_vector(X[i], allow_reshape=False)
if x.ndim != 2:
raise ValueError(
f"Input {i} has shape {x.shape} but should "
f"be 2-dimensional, with first axis representing "
f"time and second axis representing features."
)
if x.shape[1] != dim_in:
raise ValueError(
f"Input {i} has {x.shape[1]} features but ESN expects "
f"{dim_in} features as input."
)
if Y is not None:
y = check_vector(Y[i], allow_reshape=False)
if y.ndim != 2:
raise ValueError(
f"Target {i} has shape {y.shape} but should "
f"be 2-dimensional, with first axis "
f"representing "
f"time and second axis representing "
f"features."
)
if x.shape[0] != y.shape[0]:
raise ValueError(
f"Inconsistent inputs and targets lengths: "
f"input {i} has length {x.shape[0]} but "
f"corresponding target {i} has length "
f"{y.shape[0]}."
)
if dim_out is not None:
if y.shape[1] != dim_out:
raise ValueError(
f"Target {i} has {y.shape[1]} features but ESN "
f"expects "
f"{dim_out} features as feedback."
)
return X, Y
def check_reservoir_matrices(W, Win, Wout=None, Wfb=None, caller=None):
caller_name = f"{caller.__class__.__name__} :" if caller is not None else ""
W = check_datatype(W, caller=caller, name="W")
Win = check_datatype(Win, caller=caller, name="Win")
in_shape = Win.shape
res_shape = W.shape
# W shape is (units, units)
if res_shape[0] != res_shape[1]:
raise ValueError(
f"{caller_name} reservoir matrix W should be square but has "
f"shape {res_shape}."
)
# Win shape is (units, dim_in [+ bias])
if in_shape[0] != res_shape[0]:
raise ValueError(
f"{caller_name} dimension mismatch between W and Win: "
f"W is of shape {res_shape} and Win is of shape {in_shape} "
f"({res_shape[0]} != {in_shape[0]})."
)
# Wout shape is (dim_out, units + bias)
out_shape = None
if Wout is not None:
Wout = check_datatype(Wout, caller=caller, name="Wout")
out_shape = Wout.shape
if out_shape[1] != res_shape[0] + 1:
raise ValueError(
f"{caller_name} dimension mismatch between W and Wout: "
f"W is of shape {res_shape} and Wout is of shape {out_shape} "
f"({res_shape[0]} + bias (1) != {out_shape[1]})."
)
# Wfb shape is (units, dim_out)
if Wfb is not None:
Wfb = check_datatype(Wfb, caller=caller, name="Wfb")
fb_shape = Wfb.shape
if out_shape is not None:
if fb_shape[1] != out_shape[0]:
raise ValueError(
f"{caller_name} dimension mismatch between Wfb and Wout: "
f"Wfb is of shape {fb_shape} and Wout is of sh"
f"ape {out_shape} "
f"({fb_shape[1]} != {out_shape[0]})."
)
if fb_shape[0] != res_shape[0]:
raise ValueError(
f"{caller_name} dimension mismatch between W and Wfb: "
f"W is of shape {res_shape} and Wfb is of shape {fb_shape} "
f"({res_shape[0]} != {fb_shape[0]})."
)
return W, Win, Wout, Wfb
def check_datatype(array, caller=None, name=None, allow_inf=False, allow_nan=False):
caller_name = f"{caller.__class__.__name__} :" if caller is not None else ""
array_name = name if isinstance(name, str) else array.__class__.__name___
if not isinstance(array, np.ndarray) and not issparse(array):
array = np.asarray(array)
if not (np.issubdtype(array.dtype, np.number)):
raise TypeError(
f"{caller_name} Impossible to operate on non-numerical data, "
f"in array '{array_name}' of type {array.dtype}: {array}"
)
if not allow_nan:
msg = (
f"{caller_name} Impossible to operate on NaN value, "
f"in array '{array_name}': {array}."
)
if issparse(array):
if np.any(np.isnan(array.data)):
raise ValueError(msg)
else:
if np.any(np.isnan(array)):
raise ValueError(msg)
if not allow_inf:
msg = (
f"{caller_name} Impossible to operate on inf value, "
f"in array '{array_name}': {array}."
)
if issparse(array):
if np.any(np.isinf(array.data)):
raise ValueError(msg)
else:
if np.any(np.isinf(array)):
raise ValueError(msg)
return array | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/compat/utils/validation.py | 0.850142 | 0.589923 | validation.py | pypi |
import json
import os
import time
import dill
import numpy as np
from scipy import sparse
from ..._version import __version__
from .. import regression_models
def _save(esn, directory: str):
"""Base utilitary for saving an ESN model, based on the ESN class.
Arguments:
esn {ESN} -- ESN model to save.
directory {str or Path} -- Directory to store the model.
"""
# create new directory
savedir = os.path.join(directory)
if not os.path.isdir(savedir):
os.mkdir(savedir)
else:
raise OSError(f"Directory '{savedir}' already exists.")
current_time = time.time()
# store matrices
Win_path = f"esn-Win-{current_time}.npy"
Wout_path = None
Wfb_path = None
if sparse.issparse(esn.W):
W_path = f"esn-W-{current_time}.npz"
sparse.save_npz(os.path.join(savedir, W_path), esn.W)
else:
W_path = f"esn-W-{current_time}.npy"
np.save(os.path.join(savedir, W_path), esn.W)
np.save(os.path.join(savedir, Win_path), esn.Win)
dim_out = None
if esn.Wout is not None:
Wout_path = f"esn-Wout-{current_time}.npy"
np.save(os.path.join(savedir, Wout_path), esn.Wout)
dim_out = esn.dim_out
fbfunc = None
fbfunc_info = None
if esn.Wfb is not None:
Wfb_path = f"esn-Wfb-{current_time}.npy"
np.save(os.path.join(savedir, Wfb_path), esn.Wfb)
# fbfunc is serialized and stored
fbfunc_info = {"cls": str(esn.fbfunc.__class__), "name": esn.fbfunc.__name__}
fbfunc = f"fbfunc_save-{current_time}"
dim_out = esn.dim_out
with open(os.path.join(savedir, fbfunc), "wb+") as f:
dill.dump(esn.fbfunc, f)
sklearn_model = None
if getattr(esn.model, "model", None) is not None:
sklearn_model = f"sklearn_func_save-{current_time}"
# scikit-learn model is serialized and stored
# will require scikit-learn to be imported when loading ESN.
with open(os.path.join(savedir, sklearn_model), "wb+") as f:
dill.dump(esn.model.model, f)
# a copy of the ESN class is also serialized.
# allow to load an ESN without necesseraly using
# the same version of Reservoirpy.
cls_path = f"cls_bin-{current_time}"
with open(os.path.join(savedir, cls_path), "wb+") as f:
dill.dump(esn.__class__, f)
attr = {
"cls": esn.__class__.__name__,
"cls_bin": cls_path,
"version": __version__,
"serial": current_time,
"attr": {
"_W": W_path,
"_Win": Win_path,
"_Wfb": Wfb_path,
"_Wout": Wout_path,
"_N": esn._N,
"lr": esn.lr,
"_input_bias": esn.input_bias,
"_dim_in": esn._dim_in,
"_dim_out": dim_out,
"_ridge": esn.ridge,
"typefloat": esn.typefloat.__name__,
"sklearn_model": sklearn_model,
"fbfunc": fbfunc,
"noise_in": esn.noise_in,
"noise_out": esn.noise_out,
"noise_rc": esn.noise_rc,
"seed": esn.seed,
"model": esn.model.__class__.__name__,
},
"misc": {"fbfunc_info": fbfunc_info},
}
# save a summary file
with open(os.path.join(savedir, "esn.json"), "w+") as f:
json.dump(attr, f)
def _new_from_save(base_cls, restored_attr):
obj = object.__new__(base_cls)
for name, attr in restored_attr.items():
try:
obj.__setattr__(name, attr)
except AttributeError as e:
print(e)
print(name, attr)
obj.model = getattr(regression_models, obj.model)(obj._ridge, obj.sklearn_model)
del obj.sklearn_model
del obj._ridge
return obj
def load(directory: str):
"""Load an ESN model in v0.2 format.
Warning
-------
v0.2 models are deprecated. Consider using :py:func:`load_compat` to
translate saved models from v0.2 to new Node API (see :ref:`node`)
introduced in v0.3.
Parameters
----------
directory : str or Path
Saved model directory.
Returns
-------
:py:class:`compat.ESN`
Loaded ESN.
"""
with open(os.path.join(directory, "esn.json"), "r") as f:
attr = json.load(f)
model_attr = attr["attr"]
if os.path.splitext(model_attr["_W"])[1] == ".npy":
model_attr["_W"] = np.load(os.path.join(directory, model_attr["_W"]))
else:
model_attr["_W"] = sparse.load_npz(os.path.join(directory, model_attr["_W"]))
model_attr["_Win"] = np.load(os.path.join(directory, model_attr["_Win"]))
if model_attr["_Wout"] is not None:
model_attr["_Wout"] = np.load(os.path.join(directory, model_attr["_Wout"]))
if model_attr["_Wfb"] is not None:
model_attr["_Wfb"] = np.load(os.path.join(directory, model_attr["_Wfb"]))
with open(os.path.join(directory, model_attr["fbfunc"]), "rb") as f:
model_attr["fbfunc"] = dill.load(f)
elif model_attr["sklearn_model"] is not None:
with open(os.path.join(directory, model_attr["sklearn_model"]), "rb") as f:
model_attr["sklearn_model"] = dill.load(f)
model_attr["typefloat"] = getattr(np, model_attr["typefloat"])
with open(os.path.join(directory, attr["cls_bin"]), "rb") as f:
base_cls = dill.load(f)
if model_attr.get("activation") is None:
model_attr["activation"] = np.tanh
model = _new_from_save(base_cls, model_attr)
return model | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/compat/utils/save.py | 0.653459 | 0.18228 | save.py | pypi |
<div align="center">
<!-- <img src="https://github.com/reservoirpy/reservoirpy/raw/master/static/rpy_banner_bw.png"><br> !-->
<img src="./static/rpy_banner_bw_small-size.jpg"><br>
</div>

[](https://badge.fury.io/py/reservoirpy)
[](https://reservoirpy.readthedocs.io/en/latest/?badge=latest)
[](https://github.com/reservoirpy/reservoirpy/actions/workflows/test.yml)
[](https://codecov.io/gh/reservoirpy/reservoirpy)
# ReservoirPy (v0.3.9) 🌀🧠
**Simple and flexible code for Reservoir Computing architectures like Echo State Networks (ESN).**
[](https://mybinder.org/v2/gh/reservoirpy/reservoirpy/HEAD)
```python
from reservoirpy.nodes import Reservoir, Ridge, Input
data = Input(input_dim=1)
reservoir = Reservoir(100, lr=0.3, sr=1.1)
readout = Ridge(ridge=1e-6)
esn = data >> reservoir >> readout
forecast = esn.fit(X, y).run(timeseries)
```
ReservoirPy is a simple user-friendly library based on Python scientific modules.
It provides a **flexible interface to implement efficient Reservoir Computing** (RC)
architectures with a particular focus on *Echo State Networks* (ESN).
Advanced features of ReservoirPy allow to improve computation time efficiency
on a simple laptop compared to basic Python implementation, with datasets of
any size.
Some of its features are: **offline and online training**, **parallel implementation**,
**sparse matrix computation**, fast spectral initialization, **advanced learning rules**
(e.g. *Intrinsic Plasticity*) etc. It also makes possible
to **easily create complex architectures with multiple reservoirs** (e.g. *deep reservoirs*),
readouts, and **complex feedback loops**.
Moreover, graphical tools are included to **easily explore hyperparameters**
with the help of the *hyperopt* library.
Finally, it includes several tutorials exploring exotic architectures
and examples of scientific papers reproduction.
This library works for **Python 3.8** and higher.
[Follow @reservoirpy](https://twitter.com/reservoirpy) updates and new releases on Twitter.
## Official documentation 📖
See [the official ReservoirPy's documentation](https://reservoirpy.readthedocs.io/en/latest/?badge=latest)
to learn more about the main features of ReservoirPy, its API and the installation process. Or you can access directly the [User Guide with tutorials](https://reservoirpy.readthedocs.io/en/latest/user_guide/index.html#user-guide).
## Quick example of how to code a deep reservoir

## Installation
```bash
pip install reservoirpy
```
(See below for more advanced installation options)
## Quick try ⚡
### An example on Chaotic timeseries prediction (MackeyGlass)
**Step 1: Load the dataset**
ReservoirPy comes with some handy data generator able to create synthetic timeseries
for well-known tasks such as Mackey-Glass timeseries forecasting.
```python
from reservoirpy.datasets import mackey_glass
X = mackey_glass(n_timesteps=2000)
```
**Step 2: Create an Echo State Network...**
...or any kind of model you wish to use to solve your task. In this simple
use case, we will try out Echo State Networks (ESNs), one of the
most minimal architecture of Reservoir Computing machines.
An ESN is made of
a *reservoir*, a random recurrent network used to encode our
inputs in a high-dimensional (non-linear) space, and a *readout*, a simple
feed-forward layer of neurons in charge with *reading-out* the desired output from
the activations of the reservoir.
```python
from reservoirpy.nodes import Reservoir, Ridge
reservoir = Reservoir(units=100, lr=0.3, sr=1.25)
readout = Ridge(output_dim=1, ridge=1e-5)
```
We here obtain a reservoir with 100 neurons, a *spectral radius* of 1.25 and
a *leak rate* of 0.3 (you can learn more about these hyperparameters going through
the tutorial
[Understand and optimize hyperparameters](./tutorials/4-Understand_and_optimize_hyperparameters.ipynb)).
Here, our readout layer is just a single unit, that we will receive connections from (all units of) the reservoir.
Note that only the readout layer connections are trained.
This is one of the cornerstone of all Reservoir Computing techniques. In our
case, we will train these connections using linear regression, with a regularization
coefficient of 10<sup>-5</sup>.
Now, let's connect everything using the `>>` operator.
```python
esn = reservoir >> readout
```
That's it! Next step: fit the readout weights to perform the task we want.
We will train the ESN to make one-step-ahead forecasts of our timeseries.
**Step 3: Fit and run the ESN**
We train our ESN on the first 500 timesteps of the timeseries, with 100 steps used to warm up the reservoir states.
```python
esn.fit(X[:500], X[1:501], warmup=100)
```
Our ESN is now trained and ready to use. Let's run it on the remainder of the timeseries:
```python
predictions = esn.run(X[501:-1])
```
As a shortcut, both operations can be performed in just one line!
```python
predictions = esn.fit(X[:500], X[1:501]).run(X[501:-1])
```
Let's now evaluate its performances.
**Step 4: Evaluate the ESN**
```python
from reservoirpy.observables import rmse, rsquare
print("RMSE:", rmse(X[502:], predictions), "R^2 score:", rsquare(X[502:], predictions))
```
Run and analyse this simple file (in the "tutorials/Simple Examples with Mackey-Glass" folder) to see a complete example of timeseries prediction with ESNs:
- simple_example_MackeyGlass.py (using the ESN class)
```bash
python simple_example_MackeyGlass.py
```
If you have some issues testing some examples, have a look at the [extended packages requirements in readthedocs](https://reservoirpy.readthedocs.io/en/latest/developer_guide/advanced_install.html?highlight=requirements#additional-dependencies-and-requirements).
## More installation options
To install it, use one of the following command:
```bash
pip install reservoirpy
```
or
```bash
pip install reservoirpy==0.3.5
```
If you want to run the Python Notebooks of the _tutorials_ folder, install the packages in requirements file (warning: this may downgrade the version of hyperopt installed):
```bash
pip install -r tutorials/requirements.txt
```
If you want to use the previous version 0.2.4, you can install ReservoirPy using:
```bash
pip install reservoirpy==0.2.4
```
If you want to enable the `hyper` package and its hyperparameter optimization helpers using
[hyperopt](http://hyperopt.github.io/hyperopt/), use:
```bash
pip install reservoirpy[hyper]
```
## More examples and tutorials 🎓
[Go to the tutorial folder](./tutorials/) for tutorials in Jupyter Notebooks.
[Go to the examples folder](./examples/) for examples and papers with codes, also in Jupyter Notebooks.
## Paper with tutorials
Tutorial for ReservoirPy (v0.2) can be found in this [Paper (Trouvain et al. 2020)](https://hal.inria.fr/hal-02595026).
## Explore Hyper-Parameters with Hyperopt
A quick tutorial on how to explore hyperparameters with ReservoirPy and Hyperopt can be found in this [paper (Trouvain et al. 2020)](https://hal.inria.fr/hal-02595026).
Take a look at our **advices and our method to explore hyperparameters** for reservoirs in our [recent paper: (Hinaut et al 2021)](https://hal.inria.fr/hal-03203318/) [HTML](https://link.springer.com/chapter/10.1007/978-3-030-86383-8_7) [HAL](https://hal.inria.fr/hal-03203318)
[Turorial and Jupyter Notebook for hyper-parameter exploration](./tutorials/4-Understand_and_optimize_hyperparameters.ipynb)
More info on hyperopt: [Official website](http://hyperopt.github.io/hyperopt/)
## Papers and projects using ReservoirPy
If you want your paper to appear here, please contact us (see contact link below).
- Chaix-Eichel et al. (2022) From implicit learning to explicit representations. arXiv preprint arXiv:2204.02484. [arXiv](https://arxiv.org/abs/2204.02484) [PDF](https://arxiv.org/pdf/2204.02484)
- Trouvain & Hinaut (2021) Canary Song Decoder: Transduction and Implicit Segmentation with ESNs and LTSMs. ICANN 2021 [HTML](https://link.springer.com/chapter/10.1007/978-3-030-86383-8_6) [HAL](https://hal.inria.fr/hal-03203374) [PDF](https://hal.inria.fr/hal-03203374/document)
- Pagliarini et al. (2021) Canary Vocal Sensorimotor Model with RNN Decoder and Low-dimensional GAN Generator. ICDL 2021. [HTML](https://ieeexplore.ieee.org/abstract/document/9515607?casa_token=QbpNhxjtfFQAAAAA:3klJ9jDfA0EEbckAdPFeyfIwQf5qEicaKS-U94aIIqf2q5xkX74gWJcm3w9zxYy9SYOC49mQt6vF)
- Pagliarini et al. (2021) What does the Canary Say? Low-Dimensional GAN Applied to Birdsong. HAL preprint. [HAL](https://hal.inria.fr/hal-03244723/) [PDF](https://hal.inria.fr/hal-03244723/document)
- Which Hype for My New Task? Hints and Random Search for Echo State Networks Hyperparameters. ICANN 2021 [HTML](https://link.springer.com/chapter/10.1007/978-3-030-86383-8_7) [HAL](https://hal.inria.fr/hal-03203318) [PDF](https://hal.inria.fr/hal-03203318)
## Contact
If you have a question regarding the library, please open an Issue. If you have more general question or feedback you can [contact us on twitter](https://twitter.com/reservoirpy) or by email to xavier dot hinaut the-famous-home-symbol inria dot fr.
## Citing ReservoirPy
Trouvain, N., Pedrelli, L., Dinh, T. T., Hinaut, X. (2020) Reservoirpy: an efficient and user-friendly library to design echo state networks. In International Conference on Artificial Neural Networks (pp. 494-505). Springer, Cham. [HTML](https://link.springer.com/chapter/10.1007/978-3-030-61616-8_40) [HAL](https://hal.inria.fr/hal-02595026) [PDF](https://hal.inria.fr/hal-02595026/document)
If you're using ReservoirPy in your work, please cite our package using the following bibtex entry:
```
@incollection{Trouvain2020,
doi = {10.1007/978-3-030-61616-8_40},
url = {https://doi.org/10.1007/978-3-030-61616-8_40},
year = {2020},
publisher = {Springer International Publishing},
pages = {494--505},
author = {Nathan Trouvain and Luca Pedrelli and Thanh Trung Dinh and Xavier Hinaut},
title = {{ReservoirPy}: An Efficient and User-Friendly Library to Design Echo State Networks},
booktitle = {Artificial Neural Networks and Machine Learning {\textendash} {ICANN} 2020}
}
```
<div align="left">
<img src="./static/inr_logo_rouge.jpg" width=300><br>
</div>
This package is developped and supported by Inria at Bordeaux, France in [Mnemosyne](https://team.inria.fr/mnemosyne/) group. [Inria](https://www.inria.fr/en) is a French Research Institute in Digital Sciences (Computer Science, Mathematics, Robotics, ...).
| /reservoirpy-0.3.9.post1.tar.gz/reservoirpy-0.3.9.post1/README.md | 0.793386 | 0.991764 | README.md | pypi |
ResFinder documentation
=============
ResFinder identifies acquired antimicrobial resistance genes in total or partial
sequenced isolates of bacteria.
## Important if you are updating from a previous ResFinder version
It is no longer recommended to clone the ResFinder bitbucket repository unless you plan to do development work on ResFinder.
Instead we recommend installing ResFinder using pip as described below.
There are several good reasons why the recommended installation procedure has changed, among those are the increasing size of the repository that has risen to several hundreds of megabytes, due to the long history of ResFinder. Its easier for users. And it makes sure your installation will be a tested release of the application.
## Installation
ResFinder consists of an application and 1-3 databases. The databases can be used without the application, but not the other way around. Below ResFinder, the application, will be installed first and then the databases will be installed and configured to work with ResFinder the application.
### Dependencies
ResFinder uses two external alignment tools that must be installed.
* BLAST
* KMA
#### BLAST
If you don't want to specify the path of BLAST every time you run ResFinder, make sure that "blastn" is in you PATH or set the environment variable specified in the "Environment Variables Table" in this README.
Blastn can be obtained from:
```url
https://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/LATEST/
```
```bash
# Example of how to set the environment variable in the bash shell. Remember this is only temporary, if you want it set every time you log in you need to add this line to your .bashrc, .zshrc file.
export CGE_BLASTN="/path/to/some/dir/blastn"
```
#### KMA
If you don't want to specify the path of KMA every time you run ResFinder, make sure that KMA is in you PATH or set the environment variable specified in the "Environment Variables Table" in this README.
KMA can be obtained from:
```url
https://bitbucket.org/genomicepidemiology/kma.git
```
```bash
# Example of how to set the environment variable in the bash shell. Remember this is only temporary, if you want it set every time you log in you need to add this line to your .bashrc, .zshrc file.
export CGE_KMA="/path/to/some/dir/kma/kma"
```
### Install ResFinder the application using pip
**Important**: This will install ResFinder in the environment where you run pip and potenitally update the python modules ResFinder depends on. It is recommended to run ResFinder in its own environment, in order to avoid breaking existing installations and prevent ResFinder from getting broken by future unrelated pip installations. This is described in the optional step below.
#### Optional: Create virtual environment ####
Go to the location where you want to store your environment.
```bash
# Create environment
python3 -m venv resfinder_env
# Activate environment
source resfinder_env/bin/activate
# When you are finished using ResFinder deactivate the environment
deactivate
```
#### Install ResFinder ####
```bash
pip install resfinder
```
#### Databases
If you don't want to specify the path to the databases every time you run ResFinder, you need to set the environment variable specified in the "Environment Variables Table" in this README.
Go to the location where you want to store the databases. Clone the datbases you need.
**Note**: We are currently working on hosting tarballed versions of the databases that can be downloaded, so that cloning can be avoided.
```bash
git clone https://bitbucket.org/genomicepidemiology/resfinder_db/
git clone https://bitbucket.org/genomicepidemiology/pointfinder_db/
git clone https://bitbucket.org/genomicepidemiology/disinfinder_db/
```
Set approximate environment variables.
```bash
# Example of how to set the environment variable in the bash shell. Remember this is only temporary, if you want it set every time you log in you need to add this line to for example your .bashrc file.
export CGE_RESFINDER_RESGENE_DB="/path/to/some/dir/resfinder_db"
export CGE_RESFINDER_RESPOINT_DB="/path/to/some/dir/pointfinder_db"
export CGE_DISINFINDER_DB="/path/to/some/dir/disinfinder_db"
```
### Install ResFinder with Docker
The ResFinder application and the 3 databases has been build into a single image on docker hub named "genomicepidemiology/resfinder". Below is an example run, where the current working directory is bound to the container "/app" path which is the container working directory.
```bash
docker run -v "$(pwd):/app" genomicepidemiology/resfinder -ifa data/test_isolate_01.fa -o test1 -s ecoli --acquired --point
```
### Test data
Test data can be found in the sub-directory tests/data
## Usage
You can run resfinder command line using python.
**NOTE**: Species should be entered with their full scientific names (e.g. "escherichia coli"), using quotation marks, not case sensitive.
An attempt has been made to capture some deviations like "ecoli" and "e.coli", but it is far from all deviations that will be captured.
```bash
# Example of running resfinder
python -m resfinder -o path/to/outdir -s "Escherichia coli" -l 0.6 -t 0.8 --acquired --point -ifq test_isolate_01_*
# The program can be invoked with the -h option
usage: __main__.py [-h] [-ifa INPUTFASTA] [-ifq INPUTFASTQ [INPUTFASTQ ...]] [--nanopore] -o OUTPUTPATH [-j OUT_JSON] [-b BLASTPATH] [-k KMAPATH] [-s SPECIES] [--ignore_missing_species] [-db_res DB_PATH_RES]
[-db_res_kma DB_PATH_RES_KMA] [-acq] [-ao ACQ_OVERLAP] [-l MIN_COV] [-t THRESHOLD] [-d] [-db_disinf DB_PATH_DISINF] [-db_disinf_kma DB_PATH_DISINF_KMA] [-c] [-db_point DB_PATH_POINT]
[-db_point_kma DB_PATH_POINT_KMA] [-g SPECIFIC_GENE [SPECIFIC_GENE ...]] [-u] [-l_p MIN_COV_POINT] [-t_p THRESHOLD_POINT] [--ignore_indels] [--ignore_stop_codons] [-v] [--pickle]
options:
-h, --help show this help message and exit
-ifa INPUTFASTA, --inputfasta INPUTFASTA
Input fasta file.
-ifq INPUTFASTQ [INPUTFASTQ ...], --inputfastq INPUTFASTQ [INPUTFASTQ ...]
Input fastq file(s). Assumed to be single-end fastq if only one file is provided, and assumed to be paired-end data if two files are provided.
--nanopore If nanopore data is used
-o OUTPUTPATH, --outputPath OUTPUTPATH
Output directory. If it doesnt exist, it will be created.
-j OUT_JSON, --out_json OUT_JSON
Specify JSON filename and output directory. If the directory doesnt exist, it will be created.
-b BLASTPATH, --blastPath BLASTPATH
Path to blastn
-k KMAPATH, --kmaPath KMAPATH
Path to KMA
-s SPECIES, --species SPECIES
Species in the sample
--ignore_missing_species
If set, species is provided and --point flag is set, will not throw an error if no database is found for the provided species. If species is not found. Point mutations will silently be ignored.
-db_res DB_PATH_RES, --db_path_res DB_PATH_RES
Path to the databases for ResFinder.
-db_res_kma DB_PATH_RES_KMA, --db_path_res_kma DB_PATH_RES_KMA
Path to the ResFinder databases indexed with KMA. Defaults to the value of the --db_res flag.
-acq, --acquired Run resfinder for acquired resistance genes
-ao ACQ_OVERLAP, --acq_overlap ACQ_OVERLAP
Genes are allowed to overlap this number of nucleotides. Default: 30.
-l MIN_COV, --min_cov MIN_COV
Minimum (breadth-of) coverage of ResFinder within the range 0-1.
-t THRESHOLD, --threshold THRESHOLD
Threshold for identity of ResFinder within the range 0-1.
-d, --disinfectant Run resfinder for disinfectant resistance genes
-db_disinf DB_PATH_DISINF, --db_path_disinf DB_PATH_DISINF
Path to the databases for DisinFinder.
-db_disinf_kma DB_PATH_DISINF_KMA, --db_path_disinf_kma DB_PATH_DISINF_KMA
Path to the DisinFinder databases indexed with KMA. Defaults to the value of the --db_res flag.
-c, --point Run pointfinder for chromosomal mutations
-db_point DB_PATH_POINT, --db_path_point DB_PATH_POINT
Path to the databases for PointFinder
-db_point_kma DB_PATH_POINT_KMA, --db_path_point_kma DB_PATH_POINT_KMA
Path to the PointFinder databases indexed with KMA. Defaults to the value of the --db_path_point flag.
-g SPECIFIC_GENE [SPECIFIC_GENE ...], --specific_gene SPECIFIC_GENE [SPECIFIC_GENE ...]
Specify genes existing in the database to search for - if none is specified all genes are included in the search.
-u, --unknown_mut Show all mutations found even if in unknown to the resistance database
-l_p MIN_COV_POINT, --min_cov_point MIN_COV_POINT
Minimum (breadth-of) coverage of Pointfinder within the range 0-1. If None is selected, the minimum coverage of ResFinder will be used.
-t_p THRESHOLD_POINT, --threshold_point THRESHOLD_POINT
Threshold for identity of Pointfinder within the range 0-1. If None is selected, the minimum coverage of ResFinder will be used.
--ignore_indels Ignore frameshift-causing indels in Pointfinder.
--ignore_stop_codons Ignore premature stop codons in Pointfinder.
-v, --version Show programs version number and exit
--pickle Create a pickle dump of the Isolate object. Currently needed in the CGE webserver. Dependency and this option is being removed.
```
### Environment Variables
Environment variables recognized by ResFinder, the flag they replace and the default value for the flag. Provided commandline flags will always take precedence. Set environment variables takes precedence over default flag values.
Additional Environment variables can be added by appending entries to the file named "environment_variables.md".
#### Environment Variables Table
| Environment Variabel | Flag | Default Value |
|----------------------------|---------------------|----------------|
| CGE_KMA | kmaPath | kma |
| CGE_BLASTN | blastPath | blastn |
| CGE_RESFINDER_RESGENE_DB | db_path_res | None |
| CGE_RESFINDER_RESPOINT_DB | db_path_point | None |
| CGE_RESFINDER_GENE_COV | min_cov | 0.60 |
| CGE_RESFINDER_GENE_ID | threshold | 0.80 |
| CGE_RESFINDER_POINT_COV | min_cov_point | 0.60 |
| CGE_RESFINDER_POINT_ID | threshold_point | 0.80 |
| CGE_DISINFINDER_DB | db_path_disinf | None |
| CGE_DISINFINDER_DB_KMA | db_path_disinf_kma | kma |
### Species Abbreviations
ResFinder understands the species abbreviations listed in the Species Abbreviations Table. Additional species abbreviations can be added by appending entries to the file "species_abbreviations.md".
#### Species Abbreviations Table
| Species | Abbreviation |
|-------------------------------|-------------------------|
| campylobacter jejuni | c. jejuni |
| campylobacter jejuni | c.jejuni |
| campylobacter jejuni | c jejuni |
| campylobacter jejuni | cjejuni |
| campylobacter coli | c. coli |
| campylobacter coli | c.coli |
| campylobacter coli | c coli |
| campylobacter coli | ccoli |
| escherichia coli | e. coli |
| escherichia coli | e.coli |
| escherichia coli | e coli |
| escherichia coli | ecoli |
| salmonella enterica | s. enterica |
| salmonella enterica | s.enterica |
| salmonella enterica | s enterica |
| salmonella enterica | senterica |
### Web-server
A webserver implementing the methods is available at the [CGE
website](http://www.genomicepidemiology.org/) and can be found here:
https://cge.food.dtu.dk/services/ResFinder/
### ResFinder result files
ResFinder outputs several files. A brief description of these is given below.
* pheno_table_species.txt: table with species specific AMR phenotypes.
* pheno_table.txt: table with all AMR phenotypes.
* PointFinder_prediction.txt: tab seperated table. 1 is given to a predicted resistance against an antibiotic class, 0 is given to not resistance detected.
* PointFinder_results.txt: tab seperated table with predicted point mutations leading to antibiotic resistance.
* PointFinder_table.txt: predicted point mutations grouped into genes to which they belong.
* ResFinder_Hit_in_genome_seq.fsa: fasta sequence of resistance gene hits found in the input data (query).
* ResFinder_Resistance_gene_seq.fsa: fasta sequence of resistance gene hits found in the database (reference).
* ResFinder_results_table.txt: predicted resistance genes grouped by antibiotic class.
* ResFinder_results_tab.txt: tab seperated table with predicted resistance genes.
* ResFinder_results.txt: predicted resistance genes grouped by antibiotic class and hit alignments to reference resistance genes.
* <input_filename>.json: Output written to a CGE standardized json file. All results can be derived from this file. The format is defined here: https://bitbucket.org/genomicepidemiology/cgelib/src/master/src/cgelib/output/templates_json/beone/
Citation
=======
When using the method please cite:
ResFinder 4.0 for predictions of phenotypes from genotypes.
Bortolaia V, Kaas RS, Ruppe E, Roberts MC, Schwarz S, Cattoir V, Philippon A, Allesoe RL, Rebelo AR, Florensa AR, Fagelhauer L,
Chakraborty T, Neumann B, Werner G, Bender JK, Stingl K, Nguyen M, Coppens J, Xavier BB, Malhotra-Kumar S, Westh H, Pinholt M,
Anjum MF, Duggett NA, Kempf I, Nykasenoja S, Olkkola S, Wieczorek K, Amaro A, Clemente L, Mossong J, Losch S, Ragimbeau C, Lund O, Aarestrup FM.
Journal of Antimicrobial Chemotherapy. 2020 Aug 11.
PMID: 32780112 doi: 10.1093/jac/dkaa345
[Epub ahead of print]
References
=======
1. Camacho C, Coulouris G, Avagyan V, Ma N, Papadopoulos J, Bealer K, Madden TL. BLAST+: architecture and applications. BMC Bioinformatics 2009; 10:421.
2. Clausen PTLC, Aarestrup FM, Lund O. Rapid and precise alignment of raw reads against redundant databases with KMA. BMC Bioinformatics 2018; 19:307.
License
=======
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| /resfinder-4.3.2.tar.gz/resfinder-4.3.2/README.md | 0.662469 | 0.806052 | README.md | pypi |
import sys
_track_default_position = {
"2d-rectangle-domains": "center",
"bedlike": "top",
"horizontal-bar": "top",
"horizontal-chromosome-labels": "top",
"chromosome-labels": "top",
"horizontal-gene-annotations": "top",
"horizontal-heatmap": "top",
"horizontal-1d-heatmap": "top",
"horizontal-line": "top",
"horizontal-multivec": "top",
"bar": "top",
"chromosome-labels": "top",
"gene-annotations": "top",
"heatmap": "top",
"1d-heatmap": "top",
"line": "top",
"horizontal-multivec": "top",
"heatmap": "center",
"left-axis": "left",
"osm-tiles": "center",
"top-axis": "top",
"viewport-projection-center": "center",
"viewport-projection-horizontal": "top",
}
_datatype_default_track = {
"2d-rectangle-domains": "2d-rectangle-domains",
"bedlike": "bedlike",
"chromsizes": "horizontal-chromosome-labels",
"gene-annotations": "horizontal-gene-annotations",
"matrix": "heatmap",
"vector": "horizontal-bar",
"multivec": "horizontal-multivec",
}
def tracktype_default_position(tracktype: str):
"""
Get the default track position for a track type.
For example, default position for a heatmap is 'center'.
If the provided track type has no known default position
return None.
Parameters
----------
tracktype: str
The track type to check
Returns
-------
str:
The default position
"""
if tracktype in _track_default_position:
return _track_default_position[tracktype]
return None
def datatype_to_tracktype(datatype):
"""
Infer a default track type from a data type. There can
be other track types that can display a given data type.
Parameters
----------
datatype: str
A datatype identifier (e.g. 'matrix')
Returns
-------
str, str:
A track type (e.g. 'heatmap') and position (e.g. 'top')
"""
track_type = _datatype_default_track.get(datatype, None)
position = _track_default_position.get(track_type, None)
return track_type, position
def position_to_viewport_projection_type(position):
if position == "center":
track_type = "viewport-projection-center"
elif position == "top" or position == "bottom":
track_type = "viewport-projection-horizontal"
elif position == "left" or position == "right":
track_type = "viewport-projection-vertical"
else:
track_type = "viewport-projection"
return track_type
def recommend_filetype(filename):
ext = op.splitext(filename)
if op.splitext(filename)[1] == ".bed":
return "bedfile"
if op.splitext(filename)[1] == ".bedpe":
return "bedpe"
def recommend_datatype(filetype):
if filetype == "bedfile":
return "bedlike"
FILETYPES = {
"cooler": {
"description": "multi-resolution cooler file",
"extensions": [".mcool"],
"datatypes": ["matrix"],
},
"bigwig": {
"description": "Genomics focused multi-resolution vector file",
"extensions": [".bw", ".bigwig"],
"datatypes": ["vector"],
},
"beddb": {
"description": "SQLite-based multi-resolution annotation file",
"extensions": [".beddb", ".multires.db"],
"datatypes": ["bedlike", "gene-annotations"],
},
"hitile": {
"description": "Multi-resolution vector file",
"extensions": [".hitile"],
"datatypes": ["vector"],
},
"time-interval-json": {
"description": "Time interval notation",
"extensions": [".htime"],
"datatypes": ["time-interval"],
},
}
def infer_filetype(filename):
_, ext = op.splitext(filename)
for filetype, meta in FILETYPES.items():
if ext.lower() in meta["extensions"]:
return filetype
return None
def infer_datatype(filetype):
if filetype in FILETYPES:
return FILETYPES[filetype]["datatypes"][0]
return None
def fill_filetype_and_datatype(filename, filetype=None, datatype=None):
"""
If no filetype or datatype are provided, add them
based on the given filename.
Parameters:
----------
filename: str
The name of the file
filetype: str
The type of the file (can be None)
datatype: str
The datatype for the data in the file (can be None)
Returns:
--------
(filetype, datatype): (str, str)
Filled in filetype and datatype based on the given filename
"""
if filetype is None:
# no filetype provided, try a few common filetypes
filetype = infer_filetype(filename)
if filetype is None:
recommended_filetype = recommend_filetype(filename)
print(
"Unknown filetype, please specify using the --filetype option",
file=sys.stderr,
)
if recommended_filetype is not None:
print(
"Based on the filename, you may want to try the filetype: {}".format(
recommended_filetype
)
)
return (None, None)
if datatype is None:
datatype = infer_datatype(filetype)
if datatype is None:
recommended_datatype = recommend_datatype(filetype)
print(
"Unknown datatype, please specify using the --datatype option",
file=sys.stderr,
)
if recommended_datatype is not None:
print(
"Based on the filetype, you may want to try the datatype: {}".format(
recommended_datatype
)
)
return filetype, datatype | /resgen-python-0.6.1.tar.gz/resgen-python-0.6.1/resgen/utils.py | 0.749179 | 0.550607 | utils.py | pypi |
[](https://travis-ci.org/dhilst/resguard)
resguard
========
This module provides function for parsing response data, based on
dataclass defined schemas.
The user define arbitrary schema using dataclass. One dataclass
can refer to others to represent nested structures.
```python
>>> @dataclass
... class Foo:
... pass
>>> @dataclass
... class Bar:
... foo: Foo
```
While made with parsing json decoded data from REST responses in mind, the
approach is pretty generic and may work for other use cases.
So suppose that you're in charging to do another API client.. if you started
doing this once you know that you'll gonna work with JSON and that JSON become
plain dicts and lists in python, it's easy to lose the track of these objects
and start to spread KeyError and IndexError handlers all over the codebase.
It became usual to me to write representation of the response data as objects
and instantiating these objects, and with objects I can have some type
checking, mutch better than with dicts... and can track what the fields
But writing ad-hoc classes and parsers from dict -> myobject became boring
too.. so I created this! Much more declarative and type checking friendly
So let's write an API to cat facts, we can find the docs here
https://alexwohlbruck.github.io/cat-facts/docs/endpoints/facts.html
We're implementing the /facts/random endpoint. The documentation said that it
will respond like this:
```json
{
"_id": "591f9894d369931519ce358f",
"__v": 0,
"text": "A female cat will be pregnant for approximately 9 weeks - between 62 and 65 days from conception to delivery.",
"updatedAt": "2018-01-04T01:10:54.673Z",
"deleted": false,
"source": "api",
"used": false
}
```
So is a list of facts, a fact can be defined like this
```python
>>> from datetime import datetime
>>> @dataclass
... class Fact:
... _id: str
... __v: int
... text: str
... updatedAt: datetime
... deleted: bool
... source: str
... used: bool
... user: Optional[str]
```
To parse a respone you call `parse_dc`, where `dc` stands for dataclass. You
call it with the dataclass and the response data:
```python
>>> import requests as r
>>> url = "https://cat-fact.herokuapp.com"
>>> res = r.get(f"{url}/facts/random")
>>> parse_dc(Fact, res.json())
Traceback (most recent call last):
...
TypeError: Unknow field type for Fact(_id,_Fact__v,text,updatedAt,deleted,source,used,user)
```
You may notice that I put a `user: Optional[str]` on the `Fact` definition too.
This is how you express optional fields, that may or may not be present on
response. Missing optinal fields become `None` in dataclass
What happens here is that the documentation is outdated, there are a type field
that was not expected in response. `parse_dc` raise a TypeError if anything
goes out of rails. Let's see in response what we have in `type` field
```python
>>> type_ = res.json()['type']
>>> type_, type(type_)
('cat', <class 'str'>)
```
We do not want that our software breaks because the API put a brand new
field in the response. You can ignore unknow fields by passing `strict=False`
to `parse_dc`. If you want this by default you can memoise the parse_dc like
below:
```python
>>> from functools import partial
>>> parse_dc = partial(parse_dc, strict=False)
```
So let's update our `Fact` definition
```python
>>> @dataclass
... class Fact:
... _id: str
... __v: int
... text: str
... updatedAt: datetime
... deleted: bool
... source: str
... used: bool
... user: Optional[str]
... type: str # <- we added this
```
And parse again. This time it works, but it's doesn't properly initialize the
dataclasses fields. Well, dataclass don't do runtime type checking.
```python
>>> dc = parse_dc(Fact, res.json())
>>> dc
Fact(...)
>>> type(dc.updatedAt)
<class 'str'>
```
If you pass it a string, it doens't matter if the field type says datetime,
constructor will put the string there and it's done. But the standard library
provides a way to handle this. You need to provide an `__post_init__` method.
It will not receive any arguments and it.s called by constructor after
initializing self.
```python
>>> @dataclass
... class Fact:
... _id: str
... __v: int
... text: str
... updatedAt: datetime
... deleted: bool
... source: str
... used: bool
... user: Optional[str]
... type: str
...
... def __post_init__(self):
... if isinstance(self.updatedAt, str):
... self.updatedAt = datetime.strptime(self.updatedAt, "%Y-%m-%dT%H:%M:%S.%fZ")
>>> dc = parse_dc(Fact, res.json())
>>> dc
Fact(...)
>>> type(dc.updatedAt)
<class 'datetime.datetime'>
```
Now what if we want go to the oposite direction, given somejson, construct
a dataclass. Well resguard can be invoked as `curl something | python -m resguard fromjson`
and it will output a dataclass definition for that JSON.
The type inference is pretty simple, but it is already better than writing all
that dataclasses by hand. Let's see it in action
```python
>>> print(print_dc(fromjson("Root", '{"foo": "foo", "bar": { "bar": "bar" }}')))
@dataclass
class bar:
bar: str
<BLANKLINE>
<BLANKLINE>
@dataclass
class Root:
foo: str
bar: bar
<BLANKLINE>
```
To use it from command line (much simpler)
```shell
curl -s https://cat-fact.herokuapp.com/facts/random | python -m resguard fromjson
@dataclass
class status:
verified: bool
sentCount: int
@dataclass
class Root:
used: bool
source: str
type: str
deleted: bool
_id: str
__v: int
text: str
updatedAt: str
createdAt: str
status: status
user: str
```
That's it, check below for function docs
# parse_dc(dc, data, strict=True)
Build tree of dataclasses initialized with data
It don't type checks, just instantiate the dataclasses recursively. Just
note that dataclass don't check at runtime too, so, this doesn't typecheck
but it works at runtime
>>> from dataclasses import dataclass, asdict
>>> @dataclass
... class Foo:
... foo: str
... __bar: str
>>> asdict(Foo(foo=1, _Foo__bar=1))
{'foo': 1, '_Foo__bar': 1}
But mypy will detect the `foo=1` there.
Let's parse something :-)
```python
>>> from enum import Enum
>>> FooEnum = Enum("FooEnum", "foo bar")
>>>
>>> @dataclass
... class Bar:
... bar: str
>>>
>>> @dataclass
... class Foo:
... foo: str
... bar: Bar
>>> parse_dc(Foo, {"foo": "foo", "num": 1, "bar": {"bar": "bar"}})
Foo(foo='foo', bar=Bar(bar='bar'))
>>> from datetime import datetime
>>> @dataclass
... class Date:
... d: datetime
>>> Date(d="20010101T00:00Z").d
20010101T00:00Z
>>> @dataclass
... class Date:
... d: datetime
... def __post_init__(self):
... if isinstance(self.d, str):
... self.d = datetime.strptime("%Y%m%dT%H%MZ")
>>> Date(d="20010101T00:00Z").d
```
# create_base(base)
A function decorator. It replace the function by a class
which call the decorated function in its new method, for
example
```python
>>> from datetime import datetime
>>> @create_base(datetime)
... def date_br(s):
... return datetime.strptime(s, r"%d/%m/%Y")
>>> issubclass(date_br, datetime)
True
>>> date_br("01/01/2001")
datetime.datetime(2001, 1, 1, 0, 0)
```
# unpack_union(union: Union[~T, Any, NoneType]) -> ~T
Takes an Unin and return another union with the same arguments
as input, but with None and Any filtered
```python
>>> unpack_union(Optional[str])
<class 'str'>
>>> unpack_union(List[str])
<class 'str'>
```
It respect concrete types
```python
>>> unpack_union(int)
<class 'int'>
```
If the input is a literal, it returns itself. Literals are types
and values at same time, like enums
```python
>>> unpack_union(1)
1
>>> unpack_union([1,2])
[1, 2]
```
# Dataclass(*args, **kwds)
Dataclass static type
https://stackoverflow.com/a/55240861/652528
| /resguard-0.15.tar.gz/resguard-0.15/README.md | 0.603698 | 0.913445 | README.md | pypi |
from io import StringIO
import logging
import json
import re
from typing import *
from ast import literal_eval
from dataclasses import dataclass, fields, is_dataclass, make_dataclass
try:
from typing_extensions import Protocol, Literal
except ImportError:
pass
T = TypeVar("T")
log = logging.getLogger(__name__)
if __debug__:
log.setLevel(logging.DEBUG)
class Dataclass(Protocol):
"""
Dataclass static type
https://stackoverflow.com/a/55240861/652528
"""
__dataclass_fields__: Dict
def create_base(base):
"""
A function decorator. It replace the function by a class
which call the decorated function in its new method, for
example
```python
>>> from datetime import datetime
>>> @create_base(datetime)
... def date_br(s):
... return datetime.strptime(s, r"%d/%m/%Y")
>>> issubclass(date_br, datetime)
True
>>> date_br("01/01/2001")
datetime.datetime(2001, 1, 1, 0, 0)
```
"""
def dec(func):
class _TypeHelper(base):
def __new__(cls, *args, **kwargs):
return func(*args, **kwargs)
_TypeHelper.__name__ = func.__name__
return _TypeHelper
return dec
def unpack_union(union: Union[T, Any, None]) -> T:
"""
Takes an Unin and return another union with the same arguments
as input, but with None and Any filtered
```python
>>> unpack_union(Optional[str])
<class 'str'>
>>> unpack_union(List[str])
<class 'str'>
```
It respect concrete types
```python
>>> unpack_union(int)
<class 'int'>
```
If the input is a literal, it returns itself. Literals are types
and values at same time, like enums
```python
>>> unpack_union(1)
1
>>> unpack_union([1,2])
[1, 2]
```
"""
try:
return Union[
tuple(t for t in union.__args__ if t not in (type(None), Any)) # type: ignore
]
except AttributeError: # no __args__
return union
def parse_dc_typecheck(cls: Dataclass, data: dict, ignore_unknows=False) -> Dataclass:
"""
Given an arbitrary dataclass and a dict this function will
recursively parse the data, checking data types against cls
dataclass.
It raises TypeError if something goes wrong. It tries to improve
the common errors by reraising then with better messages.
First declare some dataclasses that define the data that you want
to parse.
```python
>>> @dataclass
... class Foo:
... name: Literal[0, 1]
...
>>> @dataclass
... class Bar:
... l: List[int]
... foo: Dict[str, int]
... Foo: Foo
... age: Optional[int] = None
```
Now suppose that you get this data from a network response. I'm
expecting it to be plain json parsed to dicts, lists and so on,
but no objects, just decoded json:
```python
>>> data = {"foo": {"bar": 1}, "l": [], "Foo": {"name": 1}}
```
Hmm, it seems to match, lets try to parse this
```python
>>> parse_dc_typecheck(Bar, data)
Bar(l=[], foo={'bar': 1}, Foo=Foo(name=1), age=None)
```
You can see that it creates nested dataclasses too, cool. But this
was easy, this was the happy path, what about the not so happy path.
Let's change data, and see how parse_dc handle errors
```python
>>> data["badkey"] = "bad things"
>>> parse_dc_typecheck(Bar, data)
Traceback (most recent call last):
...
TypeError: Unknow field badkey for Bar. Expected one of (l,foo,Foo,age)
```
Hmm... interesting... It knows that badkey is not in Bar dataclass
definition and it shows what are the expected keys. Let's try another thing
```python
>>> @dataclass
... class Foo:
... foo: int
>>> parse_dc_typecheck(Foo, {"foo": "an string"})
Traceback (most recent call last):
...
TypeError: in dataclass Foo, 'an string' is not int: invalid literal for int() with base 10: 'an string'
```
So I passed a bad (by little margin) value in "foo" key. It expects an int
and received an string. The "invalid literal ..." part is from an error that
raises when parce_dc tries to pass "an string" to int(), it handles the error
and reraise as TypeError. The idea is that calle should catch TypeError.
Now if it uses the dataclass field type as constructor, this works
```python
>>> @dataclass
... class Foo:
... foo: str
>>> parse_dc_typecheck(Foo, {"foo": 1}).foo
'1'
```
It works because str(1) just .. works .. So think about str as the Any type
from typing module. Almost anything can be encoded as string, so take care
of yours, since they point to holes on type checking, but provide a nice
generic system
"""
res = {}
fields_ = {f.name: f.type for f in fields(cls)}
for k, v in data.items():
# avoid python mangling
k = re.sub(r"^__", f"_{cls.__name__}__", k)
if k not in fields_:
msg = "Unknow field {} for {}. Expected one of ({})".format(
k, cls.__name__, ",".join(fields_.keys())
)
if not __debug__:
log.warning("%s", msg)
if ignore_unknows:
continue
raise TypeError(msg)
if v is None:
continue
typev = fields_[k]
is_literal = False
# @FIXME
# This code is bad, lift this to another
# function that returns the concrete_type
if hasattr(typev, "__origin__"):
if typev.__origin__ in (list, List):
concrete_typev = list
list_subtype = unpack_union(typev)
elif typev.__origin__ in (dict, Dict):
concrete_typev = dict
dict_subtype_key = typev.__args__[0]
dict_subtype_val = typev.__args__[1]
elif typev.__origin__ in (Union,):
concrete_typev = unpack_union(fields_[k])
elif typev.__origin__ is Literal:
is_literal = True
literals = typev.__args__
else:
raise NotImplementedError(
f"Can't find a way to determine concrete type for {v}"
)
else:
# typing_extensions.Literal has no __origin__
if str(typev).startswith("typing_extensions.Literal"):
is_literal = True
literals = literal_eval(
str(typev).replace("typing_extensions.Literal", "")
)
else:
concrete_typev = typev
scalar = (float, int, bool, str)
if is_literal:
if v not in literals:
raise TypeError(
f"while creating Literal for dataclass {cls.__name__}, it seems that {v} is not in literal values {literals}"
)
res[k] = v
elif is_dataclass(concrete_typev):
res[k] = parse_dc(concrete_typev, v)
elif issubclass(concrete_typev, scalar):
try:
res[k] = concrete_typev(v)
except ValueError as e:
raise TypeError(
f"in dataclass {cls.__name__}, {repr(v)} is not {concrete_typev.__name__}: {e}"
) from e
elif concrete_typev is list:
try:
res[k] = [list_subtype(x) for x in v]
except ValueError as e:
raise TypeError(
f"in dataclass {cls.__name__} while trying to construct a list of type {list_subtype} with values [{', '.join(v)}]: {e}"
) from e
elif concrete_typev is dict:
res[k] = {dict_subtype_key(k): dict_subtype_val(v) for k, v in v.items()}
elif callable(concrete_typev):
try:
res[k] = concrete_typev(v)
except TypeError as e:
raise TypeError(
f" in dataclass {cls.__name__} while trying to construct value from {concrete_typev.__name__}({repr(v)}): {e}"
) from e
else:
raise NotImplementedError(
"This should never happen, please open an issue with an stack trace"
)
try:
return cls(**res)
except TypeError as e:
raise TypeError(
f"while calling {cls.__name__}(**data) with this data {data}: {e}"
) from e
_created_dataclasses = {}
def create_dc(dcname: str, fields):
"""
>>> from dataclasses import is_dataclass, fields, asdict
>>> Foo = create_dc("Foo", (("foo", str),) )
>>> Foo.__name__
'Foo'
>>> is_dataclass(Foo)
True
>>> asdict(Foo(foo="foo"))
{'foo': 'foo'}
"""
dc = make_dataclass(dcname, fields)
_created_dataclasses[dcname] = dc
return dc
def fromdict(dcname: str, data: dict):
"""
>>> from dataclasses import fields
>>> Foo = fromdict("Foo", {"foo": "foo", "bar": {"bar": "bar"}})
>>> [f.type.__name__ for f in fields(Foo)]
['str', 'bar']
"""
dc_fields = []
scalar = (int, float, bool, str)
for k, v in data.items():
if isinstance(v, dict):
dc_fields.append((k, fromdict(k, v)))
elif isinstance(v, list):
if len(v) == 0:
dc_fields.append((k, List[Any]))
elif len(set(map(type, v))) == 1:
dc_fields.append((k, List[type(v[0])]))
else:
dc_fields.append((k, Tuple[tuple(map(type, v))]))
elif isinstance(v, scalar):
dc_fields.append((k, type(v)))
return create_dc(dcname, dc_fields)
def print_dc(dcroot) -> str:
"""
from dataclasses import dataclass
>>> @dataclass
... class Bar:
... bar: str
>>> @dataclass
... class Foo:
... foo: str
... bar: Bar
>>> print(print_dc(Foo))
@dataclass
class Bar:
bar: str
<BLANKLINE>
<BLANKLINE>
@dataclass
class Foo:
foo: str
bar: Bar
<BLANKLINE>
"""
s = StringIO()
s.write("@dataclass\n")
s.write(f"class {dcroot.__name__}:\n")
fields_ = {f.name: f.type for f in fields(dcroot)}
for name, type_ in fields_.items():
if is_dataclass(type_):
new_dc = print_dc(type_)
new_s = StringIO()
new_s.write(new_dc)
new_s.write("\n\n")
new_s.write(s.getvalue())
s = new_s
s.write(f" {name}: {type_.__name__}\n")
return s.getvalue()
def fromjson(dcname: str, jsondata: str):
"""
Just a helper, it calls json.loads on jsondata
before calling fromdict
"""
return fromdict(dcname, json.loads(jsondata))
def parse_dc(dc, data, strict=True):
"""
Build tree of dataclasses initialized with data
It don't type checks, just instantiate the dataclasses recursively. Just
note that dataclass don't check at runtime too, so, this doesn't typecheck
but it works at runtime
>>> from dataclasses import dataclass, asdict
>>> @dataclass
... class Foo:
... foo: str
... __bar: str
>>> asdict(Foo(foo=1, _Foo__bar=1))
{'foo': 1, '_Foo__bar': 1}
But mypy will detect the `foo=1` there.
Let's parse something :-)
```python
>>> from enum import Enum
>>> FooEnum = Enum("FooEnum", "foo bar")
>>>
>>> @dataclass
... class Bar:
... bar: str
>>>
>>> @dataclass
... class Foo:
... foo: str
... bar: Bar
>>> parse_dc(Foo, {"foo": "foo", "num": 1, "bar": {"bar": "bar"}})
Foo(foo='foo', bar=Bar(bar='bar'))
>>> from datetime import datetime
>>> @dataclass
... class Date:
... d: datetime
>>> Date(d="20010101T00:00Z").d
20010101T00:00Z
>>> @dataclass
... class Date:
... d: datetime
... def __post_init__(self):
... if isinstance(self.d, str):
... self.d = datetime.strptime("%Y%m%dT%H%MZ")
>>> Date(d="20010101T00:00Z").d
```
"""
flds = {f.name: f.type for f in fields(dc)}
cpy = data.copy()
for k, v in data.items():
if k not in flds.keys():
if k.startswith("__"):
new_k = f"_{dc.__name__}{k}"
cpy[new_k] = v
del cpy[k]
k = new_k
else:
log.warn(f"Unknow field {k}={v} for {dc.__name__}")
if strict:
fields_ = ",".join([
f"{t.name}" for t in fields(dc)
])
raise TypeError(f"Unknow field {k} for {dc.__name__}({fields_})")
del cpy[k]
continue
if is_dataclass(flds[k]):
cpy[k] = parse_dc(flds[k], v)
return dc(**cpy)
if __name__ == "__main__":
import doctest
import sys
try:
arg1 = sys.argv[1]
except IndexError:
print(f"Usage: python -m resguard {{fromjson [dcname]|test}}", file=sys.stderr)
sys.exit(1)
if arg1 == "test":
doctest.testmod(optionflags=doctest.ELLIPSIS)
elif arg1 == "fromjson":
try:
arg2 = sys.argv[2]
except IndexError:
arg2 = "Root"
print(print_dc(fromjson(arg2, sys.stdin.read()))) | /resguard-0.15.tar.gz/resguard-0.15/resguard.py | 0.743541 | 0.64823 | resguard.py | pypi |
import os
import six
import logging
import argparse
import pandas as pd
import scipy.spatial
from tqdm import trange
from osgeo import ogr
from collections import defaultdict, Counter
def find_feature(layer, lng, lat):
'''Return the first feature that contains (lng, lat) in the layer'''
pt = ogr.Geometry(ogr.wkbPoint)
pt.SetPoint_2D(0, lng, lat)
layer.SetSpatialFilter(pt)
for feature in layer:
ply = feature.GetGeometryRef() # ???
if ply.Contains(pt):
return feature
def split(geometry, coords):
# Add points at infinity
m = max(abs(coords.max()), abs(coords.min())) * 1000
inf_coords = pd.np.append(coords, [[m, m], [m, -m], [-m, -m], [-m, m]], axis=0)
voronoi = scipy.spatial.Voronoi(inf_coords)
# Compute the voronoi shapes for each coord
for region_index in voronoi.point_region[:len(coords)]:
ring = ogr.Geometry(ogr.wkbLinearRing)
region = voronoi.regions[region_index]
for vertex_index in region:
ring.AddPoint(*voronoi.vertices[vertex_index])
ring.AddPoint(*voronoi.vertices[region[0]]) # Close the ring
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
yield geometry.Intersection(poly)
def main(infile, points, outfile, group, cols, lat='latitude', lon='longitude'):
driver = ogr.GetDriverByName('ESRI Shapefile')
shape = driver.Open(infile)
# shape = driver.Open('Export_Output_2.shp')
layer = shape.GetLayer()
# If any of the groups are not present, ignore those rows
points = points.dropna(subset=[group])
lookup = points.set_index(group)
logging.info('Get centroids of features to locate nearest feature...')
feature_coords = []
for index in trange(layer.GetFeatureCount()):
feature = layer.GetFeature(index)
feature_coords.append(feature.GetGeometryRef().Centroid().GetPoints()[0])
feature_tree = scipy.spatial.cKDTree(feature_coords)
# feature_points[id] - lists all points in a shape
# point.feature - the shape a given point is in
logging.info('Find nearest feature for each point...')
feature_points = defaultdict(list)
outside_points = []
for index in trange(len(points)):
point = points.iloc[index, :]
feature = find_feature(layer, point[lon], point[lat])
# If the point is not in any feature, find the nearest
if feature is None:
feature_index = feature_tree.query([[point[lon], point[lat]]])[1][0]
feature = layer.GetFeature(feature_index)
outside_points.append([feature, point])
feature_points[feature.GetFID()].append(point)
logging.warn('%d points were outside shape. Using nearest features', len(outside_points))
point_coords = points[[lon, lat]].values
point_tree = scipy.spatial.cKDTree(point_coords)
logging.info('Splitting features by group')
layer.SetSpatialFilter(None)
group_freq = Counter() # group_freq[0] = # of features with 0 groups, etc
target = defaultdict(list) # target[group] = [geometries corresponding to group]
for index in trange(layer.GetFeatureCount()):
pts = feature_points.get(index, [])
groups = {point[group] for point in pts}
feature = layer.GetFeature(index)
geometry = feature.GetGeometryRef().Clone()
group_freq[len(groups)] += 1
# For features that don't have a group, pick the nearest point's group
if len(groups) == 0:
centroid = geometry.Centroid()
p = centroid.GetPoints()
feature_centroid = p[:2]
nearest_point_index = point_tree.query(feature_centroid)[1][0]
target[points.iloc[nearest_point_index][group]].append(geometry)
# # For features with exactly 1 group, assign the group to that feature
if len(groups) == 1:
target[list(groups)[0]].append(geometry)
# For features that have multiple points, split them
elif len(groups) > 1:
coords = pd.np.array([[pt[lon], pt[lat]] for pt in pts])
for index, new_geometry in enumerate(split(geometry, coords)):
target[pts[index][group]].append(new_geometry)
# Save into shapefile
logging.info('Merging broken features by group')
if os.path.exists(outfile):
driver.DeleteDataSource(outfile)
data_source = driver.CreateDataSource(outfile)
out_layer = data_source.CreateLayer('target', geom_type=ogr.wkbPolygon)
feature_defn = out_layer.GetLayerDefn()
for col in ['id'] + cols:
# TODO: use the inferred type. Don't use string for everything
field = ogr.FieldDefn(col, ogr.OFTString)
out_layer.CreateField(field)
groups = list(target.keys())
for index in trange(len(groups)):
group = groups[index]
geometries = target[group]
geom = geometries[0]
for geometry in geometries[1:]:
geom = geom.Union(geometry)
feature = ogr.Feature(feature_defn)
feature.SetGeometry(geom)
feature.SetField('id', six.text_type(group))
for col in cols:
feature.SetField(col, lookup[col][group])
out_layer.CreateFeature(feature)
data_source.Destroy()
logging.info('Created %s', outfile)
def cmdline():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(
description=__doc__.strip().splitlines()[0], # First line of the docstring
formatter_class=argparse.ArgumentDefaultsHelpFormatter, # Print default values
)
parser.add_argument('input', help='input shape file')
parser.add_argument('points', help='CSV file with lat-lng data')
parser.add_argument('output', help='output shape file')
parser.add_argument('--id', default='id',
help='CSV column to create shapes for (e.g. branchname)')
parser.add_argument('--col', nargs='*', default=[], help='Additional columns to add')
parser.add_argument('--lat', default='latitude', help='Latitude column in points CSV file')
parser.add_argument('--lng', default='longitude', help='Longitude column in points CSV file')
args = parser.parse_args()
points_data = pd.read_csv(args.points, encoding='cp1252')
main(args.input, points_data, args.output, args.id, args.col, args.lat, args.lng)
if __name__ == '__main__':
cmdline() | /reshaper-1.1.0.tar.gz/reshaper-1.1.0/reshaper.py | 0.442155 | 0.462655 | reshaper.py | pypi |
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
import numpy as np
from scipy.stats import t
VERBOSE = True
class ResidualAnomalyDetector:
classifiers = dict(dt=DecisionTreeClassifier)
regressors = dict(dt=DecisionTreeRegressor)
def __init__(
self,
classifier="dt",
regressor="dt",
significance_level=0.05,
clf_kwargs=dict(),
rgr_kwargs=dict(),
verbose=VERBOSE,
**algorithm_kwargs
):
# General params
self.verbose = verbose
# Metadata
self.n_instances = None
self.n_attributes = None
self._attr_ids = None
self._nominal_ids = None
# Models
self._models = None
self._desc_ids = None
self._targ_ids = None
self.regressor_algorithm = regressor
self.classifier_algorithm = classifier
self.classifier_config = {**clf_kwargs, **algorithm_kwargs}
self.regressor_config = {**rgr_kwargs, **algorithm_kwargs}
# Residuals/Anomaly Scores
self.significance_level = significance_level
self._residuals = None
self._scores = None
self._labels = None
return
def fit(self, X, nominal_ids=None):
self.n_instances, self.n_attributes = X.shape
self.attr_ids = self.n_attributes
self.nominal_ids = nominal_ids
# Fit Models
self._init_desc_and_targ_ids()
self._init_models(X)
self._fit_models(X)
# Get Scores
self._labels = self._init_labels()
self._residuals = self._get_residuals(X)
self._scores = self._get_scores()
return
def predict(self):
progress_to_be_made = True
while progress_to_be_made:
n_anomalies_start = self.n_anomalies
outlier_idxs = self._detect_outliers()
self._update_labels(outlier_idxs)
n_anomalies_after = self.n_anomalies
progress_to_be_made = n_anomalies_after > n_anomalies_start
if self.verbose:
msg = """
In this iteration, I found anomalies: {}
Total n_anomalies now: {}
""".format(
outlier_idxs, self.n_anomalies
)
print(msg)
return self.labels
@staticmethod
def normalize_residuals(residuals):
avg_residuals = np.mean(residuals, axis=0)
std_residuals = np.std(residuals, axis=0)
R = (np.abs(residuals - avg_residuals)) / std_residuals
return R
# Private Methods
def _init_desc_and_targ_ids(self):
d = []
t = []
for a in self.attr_ids:
d.append(self.attr_ids - {a})
t.append({a})
self._desc_ids = d
self._targ_ids = t
return
def _init_models(self, X):
self.models = [
self.classifier_algorithm(**self.classifier_config)
if a in self.nominal_ids
else self.regressor_algorithm(**self.regressor_config)
for a in self.attr_ids
]
return
def _init_labels(self):
# Initialize everything normal
return np.zeros(self.n_instances)
def _fit_models(self, X):
for m_idx in range(self.n_models):
desc_ids = list(self.desc_ids[m_idx])
targ_ids = list(self.targ_ids[m_idx])
x = X[:, desc_ids]
y = X[:, targ_ids]
self.models[m_idx].fit(x, y)
return
def _predict_models(self, X_true):
X_pred = np.zeros_like(X_true)
for m_idx in range(self.n_models):
desc_ids = list(self.desc_ids[m_idx])
targ_ids = list(self.targ_ids[m_idx])
y_pred = self.models[m_idx].predict(X_true[:, desc_ids])
X_pred[:, targ_ids] = y_pred.reshape(-1, len(targ_ids))
return X_pred
def _get_residuals(self, X_true):
X_pred = self._predict_models(X_true)
return X_true - X_pred
def _get_scores(self):
return np.max(self.normalize_residuals(self.residuals),axis=1)
def _get_grubbs_statistic(self):
labels = self.labels
all_residuals = self.residuals
flt_residuals = all_residuals[labels == 0, :]
flt_label_idx = np.arange(self.n_instances, dtype=int)[labels == 0]
nrm_residuals = self.normalize_residuals(flt_residuals)
degrees_of_freedom = nrm_residuals.shape[0]
return (
np.max(nrm_residuals, axis=0),
flt_label_idx[np.argmax(nrm_residuals, axis=0)],
degrees_of_freedom,
)
@staticmethod
def _get_grubbs_threshold(dof, critical_t_value):
factor_01 = (dof - 1) / np.sqrt(dof)
factor_02 = np.sqrt(
(critical_t_value ** 2) / (dof - 2 + (critical_t_value ** 2))
)
return factor_01 * factor_02
@staticmethod
def _get_critical_t_value(a=0.05, dof=100):
p = 1.0 - a / 2
return t.ppf(p, dof)
def _detect_outliers(self):
grubbs_statistic, potential_outlier_idxs, dof = self._get_grubbs_statistic()
critical_t_value = self._get_critical_t_value(
a=self.significance_level, dof=dof - 2
)
grubbs_threshold = self._get_grubbs_threshold(dof, critical_t_value)
if self.verbose:
msg = """
potential_outlier_idxs: {}
critical_t_value(dof={}, significance={}): {}
grubbs_statistic: {}
grubbs_threshold: {}
""".format(
potential_outlier_idxs,
dof,
self.significance_level,
critical_t_value,
grubbs_statistic,
grubbs_threshold,
)
print(msg)
attributes_with_outliers = np.where(grubbs_statistic > grubbs_threshold)[0]
outlier_idxs = potential_outlier_idxs[attributes_with_outliers]
return outlier_idxs
def _update_labels(self, outlier_idxs):
self.labels[outlier_idxs] = 1
return
# Properties
@property
def models(self):
return self._models
@models.setter
def models(self, value):
assert isinstance(value, list)
assert (
len(value) == self.n_attributes
), "The amount of models must equal the amount of attributes"
self._models = value
return
@property
def n_models(self):
return len(self.models)
@property
def residuals(self):
return self._residuals
@property
def decision_scores_(self):
return self.scores
@property
def scores(self):
return self._scores
@property
def labels(self):
return self._labels
@labels.setter
def labels(self, value):
assert isinstance(value, np.ndarray)
assert value.shape[0] == self.n_instances
self._labels == value
return
@property
def n_anomalies(self):
return np.sum(self.labels)
@property
def classifier_algorithm(self):
return self._classifier_algorithm
@property
def regressor_algorithm(self):
return self._regressor_algorithm
@classifier_algorithm.setter
def classifier_algorithm(self, value):
self._classifier_algorithm = self.classifiers[value]
return
@regressor_algorithm.setter
def regressor_algorithm(self, value):
self._regressor_algorithm = self.regressors[value]
return
@property
def desc_ids(self):
return self._desc_ids
@property
def targ_ids(self):
return self._targ_ids
@property
def attr_ids(self):
return self._attr_ids
@attr_ids.setter
def attr_ids(self, n):
self._attr_ids = set(range(n))
return
@property
def nominal_ids(self):
return self._nominal_ids
@nominal_ids.setter
def nominal_ids(self, value):
if value is None:
self._nominal_ids = set()
else:
assert (
value <= self.attr_ids
), "Nominal attributes have to be a subset of all attributes."
self._nominal_ids = set(value)
return
@property
def numeric_ids(self):
return self.attr_ids - self.nominal_ids | /residual-anomaly-detector-0.0.1.tar.gz/residual-anomaly-detector-0.0.1/src/residual_anomaly_detector/ResidualAnomalyDetector.py | 0.848816 | 0.35855 | ResidualAnomalyDetector.py | pypi |
# noqa: D205, D400
"""
Script for adding a task
=========================
- for every theory file in a given folder
- creates a new file with the same name in another given folder
- the output file includes the same one lemma as the input
- but the ‘task’ is changed according to the script’s parameters
Possible task types:
- ``TaskType.NITPICK``, demands a ``cardinality`` to be provided as well
(finite model search)
- ``TaskType.SLEDGEHAMMER`` (automated proof search)
Both task types have a hard-coded timeout of ``1000000`` seconds.
"""
import os
import re
from enum import Enum
class TaskType(Enum):
"""Type of tasks to ask ``isabelle`` server to perform."""
SLEDGEHAMMER = "sledgehammer[timeout=1000000]"
NITPICK = "nitpick[timeout=1000000,max_threads=0]"
def add_task(
source_path: str,
target_path: str,
task_type: TaskType,
cardinality: int = 1,
) -> None:
"""
Take theory files from an existing folder and change tasks in them.
:param source_path: a directory where to get theory files to add tasks to
:param target_path: where to put new theory files with added tasks
:param task_type: use Nitpick or Sledgehammer (disprove by finding a finite
counter-example or prove)
:param cardinality: a cardinality of finite model to find (only for Nitpick
tasks)
"""
if not os.path.exists(target_path):
os.mkdir(target_path)
for theory_name in os.listdir(source_path):
with open(
os.path.join(source_path, theory_name), "r", encoding="utf-8"
) as theory_file:
theory_text = theory_file.read()
theory_text = re.sub(
"datatype finite_type =.*\n",
"datatype finite_type = "
+ " | ".join(map(lambda i: "C" + str(i), range(cardinality)))
+ "\n",
re.sub(
'"\n.*\n?oops',
'"\n' + task_type.value + "\noops",
theory_text,
),
)
with open(
os.path.join(target_path, theory_name), "w", encoding="utf-8"
) as theory_file:
theory_file.write(theory_text) | /residuated-binars-0.0.4.tar.gz/residuated-binars-0.0.4/residuated_binars/add_task.py | 0.69285 | 0.418103 | add_task.py | pypi |
# noqa: D205, D400
"""
Lattice
========
"""
from typing import Dict, List, Tuple
import graphviz
from residuated_binars.algebraic_structure import BOT, TOP, AlgebraicStructure
from residuated_binars.axiom_checkers import absorbs, associative, commutative
class Lattice(AlgebraicStructure):
r"""
A representation of a lattice.
>>> join = {"0": {"0": "0", "1": "1"}, "1": {"0": "1", "1": "1"}}
>>> meet = {"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "1"}}
>>> lattice = Lattice("test", {"join": join, "meet": meet})
>>> print(lattice.mace4_format)
0 v 0 = 0.
0 v 1 = 1.
1 v 0 = 1.
1 v 1 = 1.
0 ^ 0 = 0.
0 ^ 1 = 0.
1 ^ 0 = 0.
1 ^ 1 = 1.
<BLANKLINE>
>>> lattice.canonise_symbols()
>>> print(lattice.graphviz_repr)
graph {
"⟙" -- "⟘"
}
>>> join["0"]["1"] = "0"
>>> Lattice("test", {"join": join, "meet": meet})
Traceback (most recent call last):
...
ValueError: join is not commutative
>>> join["0"]["1"] = "1"
>>> meet["0"]["1"] = "1"
>>> Lattice("test", {"join": join, "meet": meet})
Traceback (most recent call last):
...
ValueError: meet is not commutative
>>> meet["0"]["1"] = "0"
>>> join["0"]["0"] = "1"
>>> Lattice("test", {"join": join, "meet": meet})
Traceback (most recent call last):
...
ValueError: absorption laws fail
>>> join = {'0': {'0': '1', '1': '0'}, '1': {'0': '0', '1': '0'}}
>>> meet = {'0': {'0': '0', '1': '0'}, '1': {'0': '0', '1': '1'}}
>>> Lattice("test", {"join": join, "meet": meet})
Traceback (most recent call last):
...
ValueError: join is not associative
>>> join = {'0': {'0': '0', '1': '0'}, '1': {'0': '0', '1': '1'}}
>>> meet = {'0': {'0': '1', '1': '0'}, '1': {'0': '0', '1': '0'}}
>>> Lattice("test", {"join": join, "meet": meet})
Traceback (most recent call last):
...
ValueError: meet is not associative
"""
def check_axioms(self) -> None: # noqa: D102
self._check_commutativity_and_associativity()
if not absorbs(
self.operations["meet"], self.operations["join"]
) or not absorbs(self.operations["join"], self.operations["meet"]):
raise ValueError("absorption laws fail")
def _check_commutativity_and_associativity(self):
if not commutative(self.operations["join"]):
raise ValueError("join is not commutative")
if not commutative(self.operations["meet"]):
raise ValueError("meet is not commutative")
if not associative(self.operations["join"]):
raise ValueError("join is not associative")
if not associative(self.operations["meet"]):
raise ValueError("meet is not associative")
@property
def operation_map(self) -> Dict[str, str]: # noqa: D102
return {"meet": "^", "join": "v"}
@property
def more(self) -> Dict[str, List[str]]:
"""Return a representation of a 'more' relation of the lattice."""
relation: Dict[str, List[str]] = {}
for one in self.symbols:
relation[one] = []
for two in self.symbols:
if self.operations["meet"][one][two] == two and one != two:
relation[one].append(two)
return relation
@property
def hasse(self) -> List[Tuple[str, str]]:
"""Return a representation of a Hasse diagram of a lattice."""
more = {
pair[0]: pair[1]
for pair in sorted(
self.more.items(),
key=lambda key_value: -len(key_value[1]),
)
}
hasse = []
for higher in more:
nearest = set(more[higher])
for lower in more[higher]:
nearest = nearest.difference(set(more[lower]))
hasse += [(higher, neighbour) for neighbour in list(nearest)]
return hasse
@property
def graphviz_repr(self) -> str:
"""Return a representation usable by ``graphviz`` of Hasse diagram."""
graph = graphviz.Graph()
for pair in self.hasse:
graph.edge(pair[0], pair[1])
return graph
def canonise_symbols(self) -> None:
"""Enumerate lattice's items in a canonical way."""
symbol_map = {
pair[1]: pair[0]
for pair in zip(
[BOT]
+ [chr(ord("a") + i) for i in range(self.cardinality - 2)]
+ [TOP],
[
key
for key, value in sorted(
self.more.items(),
key=lambda key_value: (
len(key_value[1]),
key_value[0],
),
)
],
)
}
self.remap_symbols(symbol_map) | /residuated-binars-0.0.4.tar.gz/residuated-binars-0.0.4/residuated_binars/lattice.py | 0.884732 | 0.391231 | lattice.py | pypi |
# noqa: D205, D400
"""
Algebraic Structure
====================
"""
from typing import Any, Dict, List, Union
CayleyTable = Dict[str, Dict[str, str]]
TOP = r"⟙"
BOT = r"⟘"
class AlgebraicStructure:
r"""
A base class for difference algebraic structures.
>>> magma_with_involution = AlgebraicStructure(
... label="test",
... operations={
... "mult": {"0": {"0": "0", "1": "1"}, "1": {"0": "1", "1": "1"}},
... "invo": {"0": "1", "1": "0"}
... }
... )
>>> magma_with_involution
{'mult': [[0, 1], [1, 1]], 'invo': [1, 0]}
>>> magma_with_involution.symbols
['0', '1']
>>> magma_with_involution.remap_symbols({"0": "c1", "1": "c2"})
>>> magma_with_involution
{'mult': [[0, 1], [1, 1]], 'invo': [1, 0]}
>>> magma_with_involution.symbols
['c1', 'c2']
>>> print(magma_with_involution.mace4_format)
mult(c1, c1) = c1.
mult(c1, c2) = c2.
mult(c2, c1) = c2.
mult(c2, c2) = c2.
invo(c1) = c2.
invo(c2) = c1.
<BLANKLINE>
>>> class Magma(AlgebraicStructure):
... ''' an example with redefined operation symbol '''
...
... @property
... def operation_map(self) -> Dict[str, str]:
... return {"mult": "*"}
>>> magma = Magma("test", {"mult": {"c": {"c": "c"}}})
>>> print(magma.mace4_format)
c * c = c.
<BLANKLINE>
"""
def __init__(
self,
label: str,
operations: Dict[str, Dict[str, Any]],
):
"""
Only binary and unary operations are supported.
:param label: an arbitrary name for an algebraic structure
:param operations: a dictionary of operations and their names
"""
self.label = label
self.operations = operations
self.check_axioms()
def check_axioms(self) -> None:
"""Check axioms specific to that algebraic structure.
If any axiom fails, raise an error.
"""
@property
def cardinality(self) -> int:
"""Return the number of items in the algebraic structure."""
return len(next(iter(self.operations.items()))[1].keys())
def remap_symbols(self, symbol_map: Dict[str, str]) -> None:
"""
Rename symbols in a given way.
:param symbol_map: what map to what
"""
for op_label, operation in self.operations.items():
if isinstance(next(iter(operation.items()))[1], Dict):
new_table: CayleyTable = {}
for one in symbol_map.keys():
new_table[symbol_map[one]] = {}
for two in symbol_map.keys():
new_table[symbol_map[one]][
symbol_map[two]
] = symbol_map[operation[one][two]]
self.operations[op_label] = new_table
else:
new_op: Dict[str, str] = {}
for one in symbol_map.keys():
new_op[symbol_map[one]] = symbol_map[operation[one]]
self.operations[op_label] = new_op
@property
def symbols(self) -> List[str]:
"""Return a list of symbols denoting items of an the structure."""
keys = list(next(iter(self.operations.items()))[1].keys())
pure_keys = keys.copy()
if TOP in pure_keys:
pure_keys.remove(TOP)
if BOT in pure_keys:
pure_keys.remove(BOT)
return (
([BOT] if BOT in keys else [])
+ list(sorted(pure_keys))
+ ([TOP] if TOP in keys else [])
)
@property
def operation_map(self) -> Dict[str, str]:
"""Return a map from operation labels to operation symbols.
Labels are used for the infix notation and symbols ---
for functional one.
"""
return {}
@property
def mace4_format(self) -> str:
"""
Represent the algebraic structure in ``Prover9/Mace4`` format.
:returns: a string representation
"""
result = ""
for op_label, operation in self.operations.items():
op_symbol = self.operation_map.get(op_label, None)
if isinstance(list(operation.items())[0][1], Dict):
for i in self.symbols:
for j in self.symbols:
if op_symbol is not None:
result += (
f"{i} {op_symbol} {j} = {operation[i][j]}.\n"
)
else:
result += (
f"{op_label}({i}, {j}) = {operation[i][j]}.\n"
)
else:
for i in self.symbols:
result += f"{op_label}({i}) = {operation[i]}.\n"
return result
def _operation_tabular_view(
self, operation: Dict[str, Any]
) -> Union[List[List[int]], List[int]]:
inverse_index = {symbol: i for i, symbol in enumerate(self.symbols)}
if isinstance(next(iter(operation.items()))[1], Dict):
table: List[List[int]] = []
for one in self.symbols:
table.append([])
for two in self.symbols:
table[inverse_index[one]].append(
inverse_index[operation[one][two]]
)
return table
new_operation: List[int] = list(range(len(operation)))
for one in self.symbols:
new_operation[inverse_index[one]] = inverse_index[operation[one]]
return new_operation
@property
def tabular_format(self) -> Dict[str, Union[List[List[int]], List[int]]]:
"""Return a dictionary of Cayley tables as lists of lists."""
return {
op_label: self._operation_tabular_view(operation)
for op_label, operation in self.operations.items()
}
def __repr__(self):
"""Return a default representation --- the tabular format."""
return str(self.tabular_format) | /residuated-binars-0.0.4.tar.gz/residuated-binars-0.0.4/residuated_binars/algebraic_structure.py | 0.899811 | 0.505127 | algebraic_structure.py | pypi |
# noqa: D205, D400
"""
Generate Theories
==================
This script creates a folder with a specified name and fills it with valid
Isabelle theory files. Each theory file contains only one lemma without a proof
and does not depend on any other theories. The exact statement of lemmas
is hard-coded. It may have the following assumptions:
- lattice axioms
- residuation axioms
- existence of the latest and the greatest elements of the lattice
(they always exist in finite models)
- a definition of the involution operation
- some combination of abstract distributivity laws (from
``ASSUMPTIONS`` variable of ``constants.py``)
The consequent of the lemma is one of the laws from ``ASSUMPTIONS``
which is not in the antecedent. So, if one has six laws in
``ASSUMPTIONS`` list, there will be ``6 * (2 ^ 5 - 1) = 186`` original
hypotheses to check for models of different cardinalities.
The theory files are called ``T[number].thy`` where ``number``
enumerates theory files starting from zero.
"""
import os
from itertools import combinations
from typing import List, Optional
def generate_isabelle_theory_file(
theory_name: str, assumptions: List[str], goal: Optional[str] = None
) -> List[str]:
"""
Generate a text of Isabelle theory file with only ones lemma inside.
:param theory_name: name of a theory file
:param assumptions: a list of lemma assumptions in Isabelle language
:param goal: the lemma goal in Isabelle language
:returns: a list of lines of a theory file
"""
theory_text = [f"theory {theory_name}"]
theory_text += [
"imports Main",
"begin",
"datatype finite_type = finite_type_constants",
'lemma "(',
]
theory_text += [" &\n".join(assumptions)]
if goal is not None:
theory_text += [") \\<longrightarrow>"]
theory_text += [goal]
else:
theory_text += [")"]
theory_text += ['"', "oops", "end"]
return theory_text
def independence_case(
path: str,
independent_assumptions: List[str],
assumption_indices: List[int],
goal_index: int,
additional_assumptions: List[str],
) -> None:
"""
Generate theory of independence of an assumption from a subset of the rest.
:param path: a folder for storing theory files
:param independent_assumptions: a list of assumption which independence
we want to check
:param assumption_indices: indices of assumption to use
:param goal_index: index of a goal to prove
:param additional_assumptions: a list of additional assumptions about
the binars like the lattice reduct distributivity, existence of
an involution operation, and multiplication associativity
"""
assumption_list = list(assumption_indices)
theory_name = f"T{''.join(map(str, assumption_list))}_{goal_index}"
all_assumptions = [
independent_assumptions[k] for k in assumption_indices
] + additional_assumptions
theory_text = generate_isabelle_theory_file(
theory_name,
all_assumptions,
independent_assumptions[goal_index],
)
with open(
os.path.join(path, f"{theory_name}.thy"),
"w",
encoding="utf-8",
) as theory_file:
theory_file.write("\n".join(theory_text))
def independence_check(
path: str,
independent_assumptions: List[str],
additional_assumptions: List[str],
check_subset_independence: bool,
) -> None:
"""
Generate a theory files to check independence given additional assumptions.
:param path: a folder for storing theory files
:param independent_assumptions: a list of assumption which independence
we want to check
:param additional_assumptions: a list of additional assumptions
:param check_subset_independence: whether to check every assumption from
the list against all the rest or against any combination of the rest
"""
if not os.path.exists(path):
os.mkdir(path)
total_assumptions_count = len(independent_assumptions)
for goal_index in range(total_assumptions_count):
for assumptions_count in range(1, total_assumptions_count):
indices = tuple(
list(range(0, goal_index))
+ list(range(goal_index + 1, total_assumptions_count))
)
index_combinations = (
list(
combinations(
indices,
assumptions_count,
)
)
if check_subset_independence
else [indices]
)
for assumption_indices in index_combinations:
independence_case(
path,
independent_assumptions,
list(assumption_indices),
goal_index,
additional_assumptions,
) | /residuated-binars-0.0.4.tar.gz/residuated-binars-0.0.4/residuated_binars/generate_theories.py | 0.830353 | 0.584034 | generate_theories.py | pypi |
# noqa: D205, D400
"""
Check Assumptions
==================
- gets all theory files from a given directory
- constructs a command for Isabelle server to process these files
- saves the log of Isabelle server replies to the file named
``isabelle.out`` in the directory where the theory files are
This script depends on `Python client for Isabelle
server <https://pypi.org/project/isabelle-client>`__.
"""
import logging
import os
import sys
from typing import Optional
import nest_asyncio
from isabelle_client import get_isabelle_client
from isabelle_client.utils import start_isabelle_server
def get_customised_logger(task_folder: str) -> logging.Logger:
"""
Get a nice logger.
:param task_folder: a base folder (and a task name)
"""
logfile_name = os.path.join(task_folder, "isabelle.out")
if os.path.exists(logfile_name):
os.remove(logfile_name)
logger = logging.getLogger(os.path.basename(task_folder))
handler = logging.FileHandler(logfile_name)
handler.setFormatter(logging.Formatter("%(asctime)s: %(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def check_assumptions(path: str, server_info: Optional[str] = None) -> None:
"""
Ask Isabelle server to process all theory files in a given path.
:param path: a folder with theory files
:param server_info: an info string of an Isabelle server
"""
nest_asyncio.apply()
theories = [
theory_name[0]
for theory_name in [
os.path.splitext(theory_file) for theory_file in os.listdir(path)
]
if theory_name[1] == ".thy"
]
new_server_info = _start_server_if_needed(path, server_info)
isabelle_client = get_isabelle_client(new_server_info)
isabelle_client.logger = get_customised_logger(path)
isabelle_client.use_theories(
theories=theories, master_dir=get_abs_path(path), watchdog_timeout=0
)
if server_info is None:
isabelle_client.shutdown()
def get_abs_path(path: str) -> str:
"""
Get an absolute path on Windows or Linux.
:param path: a path
:returns: an absolute path, corrected to CygWin path for Windows
"""
abs_path = os.path.abspath(path)
if sys.platform == "win32":
abs_path = abs_path.replace("C:\\", "/cygdrive/c/").replace("\\", "/")
return abs_path
def _start_server_if_needed(path: str, server_info: Optional[str]) -> str:
if server_info is None:
new_server_info, _ = start_isabelle_server(
log_file=os.path.join(path, "server.log"),
name=os.path.basename(path),
)
else:
new_server_info = server_info
return new_server_info | /residuated-binars-0.0.4.tar.gz/residuated-binars-0.0.4/residuated_binars/check_assumptions.py | 0.705988 | 0.182044 | check_assumptions.py | pypi |
# noqa: D205, D400
"""
Axiom Checkers
===============
"""
from typing import Dict
from residuated_binars.algebraic_structure import CayleyTable
def associative(cayley_table: CayleyTable) -> bool:
"""
Check associativity.
>>> associative({"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "0"}})
True
>>> associative({"0": {"0": "1", "1": "0"}, "1": {"0": "0", "1": "0"}})
False
:param cayley_table: a multiplication table of a binary operation
:returns: whether the operation is associative or not
"""
for one in cayley_table.keys():
for two in cayley_table.keys():
for three in cayley_table.keys():
if (
cayley_table[one][cayley_table[two][three]]
!= cayley_table[cayley_table[one][two]][three]
):
return False
return True
def is_left_identity(cayley_table: CayleyTable, identity: str) -> bool:
"""
Check left identity.
>>> is_left_identity(
... {"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "1"}}, "1"
... )
True
>>> is_left_identity(
... {"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "1"}}, "0"
... )
False
:param cayley_table: a multiplication table of a binary operation
:param identity: a symbol for identity
:returns: whether ``identity`` is a left identity for a Cayley table
"""
for one in cayley_table.keys():
if cayley_table[identity][one] != one:
return False
return True
def is_right_identity(cayley_table: CayleyTable, identity: str) -> bool:
"""
Check right identity.
>>> is_right_identity(
... {"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "1"}}, "1"
... )
True
>>> is_right_identity(
... {"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "1"}}, "0"
... )
False
:param cayley_table: a multiplication table of a binary operation
:param identity: a symbol for identity
:returns: whether ``identity`` is a right identity for a Cayley table
"""
for one in cayley_table.keys():
if cayley_table[one][identity] != one:
return False
return True
def is_left_inverse(
cayley_table: CayleyTable, inverse: Dict[str, str], identity: str
) -> bool:
"""
Check left inverse.
>>> op = {"0": {"0": "1", "1": "1"}, "1": {"0": "1", "1": "1"}}
>>> inv = {"0": "1", "1": "1"}
>>> is_left_inverse(op, inv, "1")
True
>>> is_left_inverse(op, inv, "0")
False
:param cayley_table: a multiplication table of a binary operation
:param inverse: a map for the operation of inversion
:param identity: a symbol for identity
:returns: whether ``inverse`` is a left inverse for a Cayley table
"""
for one in cayley_table.keys():
if cayley_table[inverse[one]][one] != identity:
return False
return True
def is_right_inverse(
cayley_table: CayleyTable, inverse: Dict[str, str], identity: str
) -> bool:
"""
Check right inverse.
>>> op = {"0": {"0": "1", "1": "1"}, "1": {"0": "1", "1": "1"}}
>>> inv = {"0": "1", "1": "1"}
>>> is_right_inverse(op, inv, "1")
True
>>> is_right_inverse(op, inv, "0")
False
:param cayley_table: a multiplication table of a binary operation
:param inverse: a map for the operation of inversion
:param identity: a symbol for identity
:returns: whether ``inverse`` is a right inverse for a Cayley table
"""
for one in cayley_table.keys():
if cayley_table[one][inverse[one]] != identity:
return False
return True
def commutative(cayley_table: CayleyTable) -> bool:
"""
Check commutativity.
>>> commutative({"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "0"}})
True
>>> commutative({"0": {"0": "0", "1": "1"}, "1": {"0": "0", "1": "0"}})
False
:param cayley_table: a multiplication table of a binary operation
:returns: whether the operation is commutative or not
"""
for one in cayley_table.keys():
for two in cayley_table.keys():
if cayley_table[one][two] != cayley_table[two][one]:
return False
return True
def idempotent(cayley_table: CayleyTable) -> bool:
"""
Check idempotency.
>>> idempotent({"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "1"}})
True
>>> idempotent({"0": {"0": "0", "1": "1"}, "1": {"0": "0", "1": "0"}})
False
:param cayley_table: a multiplication table of a binary operation
:returns: whether the operation is idempotent or not
"""
for one in cayley_table.keys():
if cayley_table[one][one] != one:
return False
return True
def left_distributive(table1: CayleyTable, table2: CayleyTable) -> bool:
"""
Check left distributivity.
>>> operation = {"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "0"}}
>>> left_distributive(operation, operation)
True
>>> operation = {"0": {"0": "1", "1": "0"}, "1": {"0": "0", "1": "0"}}
>>> left_distributive(operation, operation)
False
:param table1: a multiplication table of a binary operation
:param table2: a multiplication table of another binary operation
:returns: whether the first operation is left distributive with respect to
the second one or not
"""
for one in table1.keys():
for two in table1.keys():
for three in table1.keys():
if (
table1[one][table2[two][three]]
!= table2[table1[one][two]][table1[one][three]]
):
return False
return True
def right_distributive(table1: CayleyTable, table2: CayleyTable) -> bool:
"""
Check right distributivity.
>>> operation = {"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "0"}}
>>> right_distributive(operation, operation)
True
>>> operation = {"0": {"0": "0", "1": "1"}, "1": {"0": "0", "1": "0"}}
>>> right_distributive(operation, operation)
False
:param table1: a multiplication table of a binary operation
:param table2: a multiplication table of another binary operation
:returns: whether the first operation is right distributive with respect to
the second one or not
"""
for one in table1.keys():
for two in table1.keys():
for three in table1.keys():
if (
table1[table2[one][two]][three]
!= table2[table1[one][three]][table1[two][three]]
):
return False
return True
def absorbs(table1: CayleyTable, table2: CayleyTable) -> bool:
"""
Check an absorption law.
>>> conjunction = {"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "1"}}
>>> disjunction = {"0": {"0": "0", "1": "1"}, "1": {"0": "1", "1": "1"}}
>>> absorbs(conjunction, disjunction)
True
>>> absorbs(conjunction, conjunction)
False
:param table1: a multiplication table of a binary operation
:param table2: a multiplication table of another binary operation
:returns: whether the absorption law is true with respect to
two binary operation
"""
for one in table1.keys():
for two in table1.keys():
if table1[one][table2[one][two]] != one:
return False
return True
def is_left_zero(cayley_table: CayleyTable, zero: str) -> bool:
"""
Check left zero.
>>> table = {"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "1"}}
>>> is_left_zero(table, "0")
True
>>> is_left_zero(table, "1")
False
:param cayley_table: a multiplication table of a binary operation
:param zero: a symbol for the zero
:returns: whether ``zero`` is a left zero for a Cayley table
"""
for one in cayley_table.keys():
if cayley_table[zero][one] != zero:
return False
return True
def is_right_zero(cayley_table: CayleyTable, zero: str) -> bool:
"""
Check right zero.
>>> table = {"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "1"}}
>>> is_right_zero(table, "0")
True
>>> is_right_zero(table, "1")
False
:param cayley_table: a multiplication table of a binary operation
:param zero: a symbol for the zero
:returns: whether ``zero`` is a right zero for a Cayley table
"""
for one in cayley_table.keys():
if cayley_table[one][zero] != zero:
return False
return True | /residuated-binars-0.0.4.tar.gz/residuated-binars-0.0.4/residuated_binars/axiom_checkers.py | 0.92589 | 0.579043 | axiom_checkers.py | pypi |
# noqa: D205, D400
"""
Pseudo-weak-:math:`R_0` Algebra
===============================
"""
from residuated_binars.algebraic_structure import BOT, TOP
from residuated_binars.axiom_checkers import (
is_left_identity,
left_distributive,
)
from residuated_binars.bounded_lattice import BoundedLattice
class PseudoWeakR0Algebra(BoundedLattice):
"""
A representation of a peudo-weak-:math:`R_0` algebra.
for more info look `here <https://doi.org/10.1155/2014/854168>`__
>>> join = {BOT: {BOT: BOT, TOP: TOP}, TOP: {BOT: TOP, TOP: TOP}}
>>> meet = {BOT: {BOT: BOT, TOP: BOT}, TOP: {BOT: BOT, TOP: TOP}}
>>> inv = {BOT: BOT, TOP: TOP}
>>> imp = {BOT: {BOT: TOP, TOP: TOP}, TOP: {BOT: BOT, TOP: TOP}}
>>> operations = {"imp1": imp, "imp2": imp, "inv1": inv, "inv2": inv,
... "join": join, "meet": meet}
>>> PseudoWeakR0Algebra("no P1", operations)
Traceback (most recent call last):
...
ValueError: P1 axiom doesn't hold
>>> imp = {BOT: {BOT: TOP, TOP: TOP}, TOP: {BOT: TOP, TOP: TOP}}
>>> operations = {"imp1": imp, "imp2": imp, "inv1": inv, "inv2": inv,
... "join": join, "meet": meet}
>>> PseudoWeakR0Algebra("no P1", operations)
Traceback (most recent call last):
...
ValueError: P2 axiom doesn't hold
"""
def _check_p1(self) -> str:
try:
for one in self.operations["meet"].keys():
for two in self.operations["meet"].keys():
assert (
self.operations["imp1"][one][two]
== self.operations["imp2"][
self.operations["inv1"][two]
][self.operations["inv1"][one]]
)
assert (
self.operations["imp2"][one][two]
== self.operations["imp1"][
self.operations["inv2"][two]
][self.operations["inv2"][one]]
)
return " "
except AssertionError:
return "P1 axiom doesn't hold"
def _check_p2(self) -> str:
try:
assert is_left_identity(self.operations["imp1"], TOP)
assert is_left_identity(self.operations["imp2"], TOP)
return " "
except AssertionError:
return "P2 axiom doesn't hold"
def _check_p3(self) -> str:
try:
for i in self.operations["join"].keys():
for j in self.operations["join"].keys():
for k in self.operations["join"].keys():
smaller = self.operations["imp1"][i][j]
greater = self.operations["imp1"][
self.operations["imp1"][k][i]
][self.operations["imp1"][k][j]]
assert (
smaller == greater or smaller in self.more[greater]
)
smaller = self.operations["imp2"][i][j]
greater = self.operations["imp2"][
self.operations["imp2"][k][i]
][self.operations["imp2"][k][j]]
assert (
smaller == greater or smaller in self.more[greater]
)
return " "
except AssertionError:
return "P3 axiom doesn't hold"
def _check_p4(self) -> str:
try:
assert left_distributive(
self.operations["imp1"], self.operations["join"]
)
assert left_distributive(
self.operations["imp2"], self.operations["join"]
)
return " "
except AssertionError:
return "P4 axiom doesn't hold"
def check_axioms(self) -> None: # noqa: D102
super().check_axioms()
assert (
self.operations["inv1"][self.operations["inv2"][BOT]] == BOT
and self.operations["inv2"][self.operations["inv1"][BOT]] == BOT
and self.operations["inv1"][self.operations["inv2"][TOP]] == TOP
and self.operations["inv2"][self.operations["inv1"][TOP]] == TOP
), "Pseudo-inverse axioms don't hold"
res = " ".join(
(
self._check_p1(),
self._check_p2(),
self._check_p3(),
self._check_p4(),
)
).strip()
if res != "":
raise ValueError(res) | /residuated-binars-0.0.4.tar.gz/residuated-binars-0.0.4/residuated_binars/pseudo_weak_r0_algebra.py | 0.703346 | 0.384768 | pseudo_weak_r0_algebra.py | pypi |
# noqa: D205, D400
"""
Residuated Binar
=================
"""
from typing import Dict
from residuated_binars.axiom_checkers import (
left_distributive,
right_distributive,
)
from residuated_binars.lattice import BOT, Lattice
class ResiduatedBinar(Lattice):
r"""
A representation of a residuated binar (with involution).
>>> join = {"0": {"0": "0", "1": "1"}, "1": {"0": "1", "1": "1"}}
>>> meet = {"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "1"}}
>>> mult = {"0": {"0": "0", "1": "0"}, "1": {"0": "0", "1": "0"}}
>>> const = {"0": {"0": "1", "1": "1"}, "1": {"0": "1", "1": "1"}}
>>> binar = ResiduatedBinar(
... label="test",
... operations={
... "join": join, "meet": meet, "mult": mult, "over": const,
... "undr": const, "invo": {"0": "1", "1": "0"}
... }
... )
>>> print(binar.latex_mult_table)
\begin{table}[]
\begin{tabular}{l|ll}
$\cdot$ & $0$ & $1$\\\hline
$0$ & $0$ & $0$ & \\
$1$ & $0$ & $0$ & \\
\end{tabular}
\end{table}
<BLANKLINE>
>>> print(binar.markdown_mult_table)
|*|0|1|
|-|-|-|
|**0**|0|0|
|**1**|0|0|
<BLANKLINE>
>>> mult["0"]["0"] = "1"
>>> ResiduatedBinar("test", {"join": join, "meet": meet, "mult": mult,
... "over": const, "undr": const})
Traceback (most recent call last):
...
ValueError: multiplication must be distributive over join
>>> mult["0"]["0"] = "0"
>>> const["0"]["0"] = "0"
>>> ResiduatedBinar("test", {"join": join, "meet": meet, "mult": mult,
... "over": const, "undr": const})
Traceback (most recent call last):
...
ValueError: check residuated binars axioms!
>>> mult = {"0": {"0": "0", "1": "0"}, "1": {"0": "1", "1": "1"}}
>>> undr = {"0": {"0": "1", "1": "1"}, "1": {"0": "0", "1": "1"}}
>>> ResiduatedBinar("test", {"join": join, "meet": meet, "mult": mult,
... "over": mult, "undr": undr})
Traceback (most recent call last):
...
ValueError: check residuated binars axioms!
>>> print(binar.mace4_format[:10])
0 v 0 = 0.
"""
def check_axioms(self) -> None: # noqa: D102
super().check_axioms()
if not left_distributive(
self.operations["mult"], self.operations["join"]
) or not right_distributive(
self.operations["mult"], self.operations["join"]
):
raise ValueError("multiplication must be distributive over join")
if not self._check_residuated_binars_axioms():
raise ValueError("check residuated binars axioms!")
def _check_residuated_binars_axioms(self) -> bool:
for one in self.symbols:
for two in self.symbols:
if (
self.operations["join"][
self.operations["mult"][
self.operations["over"][one][two]
][two]
][one]
!= one
or self.operations["join"][
self.operations["mult"][two][
self.operations["undr"][two][one]
]
][one]
!= one
):
return False
for three in self.symbols:
if (
self.operations["meet"][one][
self.operations["over"][
self.operations["join"][
self.operations["mult"][one][two]
][three]
][two]
]
!= one
or self.operations["meet"][two][
self.operations["undr"][one][
self.operations["join"][
self.operations["mult"][one][two]
][three]
]
]
!= two
):
return False
return True
@property
def operation_map(self) -> Dict[str, str]: # noqa: D102
res = super().operation_map
res.update({"over": "/", "undr": "\\", "mult": "*"})
return res
@property
def latex_mult_table(self) -> str:
"""Return a LaTeX representation of a multiplication table."""
table = (
"\\begin{table}[]\n"
+ "\\begin{tabular}"
+ f"{{l|{''.join((self.cardinality) * 'l')}}}\n"
+ "$"
+ "$ & $".join([r"\cdot"] + self.symbols)
+ "$\\\\\\hline\n"
)
for row in self.symbols:
table += "$" + row + "$ & "
for col in self.symbols:
table += "$" + self.operations["mult"][row][col] + "$"
if col != BOT:
table += " & "
if row != BOT:
table += r"\\"
table += "\n"
table += "\\end{tabular}\n" + "\\end{table}\n"
return table
@property
def markdown_mult_table(self) -> str:
"""Return a Markdown representation of a multiplication table."""
table = "|*|" + "|".join(self.symbols) + "|\n"
table += "|" + (1 + len(self.symbols)) * "-|" + "\n"
for row in self.symbols:
table += "|**" + row + "**|"
for col in self.symbols:
table += self.operations["mult"][row][col] + "|"
table += "\n"
return table | /residuated-binars-0.0.4.tar.gz/residuated-binars-0.0.4/residuated_binars/residuated_binar.py | 0.786787 | 0.469399 | residuated_binar.py | pypi |
# noqa: D205, D400
"""
Pseudo-:math:`R_0` Algebra
==========================
"""
from residuated_binars.algebraic_structure import TOP
from residuated_binars.pseudo_weak_r0_algebra import PseudoWeakR0Algebra
class PseudoR0Algebra(PseudoWeakR0Algebra):
"""
A representation of a peudo-:math:`R_0` algebra.
for more info look `here <https://doi.org/10.1155/2014/854168>`__
>>> from residuated_binars.algebraic_structure import BOT
>>> imp = {
... BOT: {BOT: TOP, TOP: TOP, "C3": TOP, "C2": TOP},
... TOP: {BOT: BOT, TOP: TOP, "C3": "C3", "C2": "C2"},
... "C3": {BOT: "C2", TOP: TOP, "C3": TOP, "C2": "C3"},
... "C2": {BOT: "C3", TOP: TOP, "C3": TOP, "C2": TOP}
... }
>>> inv = {BOT: TOP, TOP: BOT, "C3": "C2", "C2": "C3"}
>>> join = {
... BOT: {BOT: BOT, TOP: TOP, "C3": "C3", "C2": "C2"},
... TOP: {BOT: TOP, TOP: TOP, "C3": TOP, "C2": TOP},
... "C3": {BOT: "C3", TOP: TOP, "C3": "C3", "C2": "C3"},
... "C2": {BOT: "C2", TOP: TOP, "C3": "C3", "C2": "C2"}
... }
>>> meet = {
... BOT: {BOT: BOT, TOP: BOT, "C3": BOT, "C2": BOT},
... TOP: {BOT: BOT, TOP: TOP, "C3": "C3", "C2": "C2"},
... "C3": {BOT: BOT, TOP: "C3", "C3": "C3", "C2": "C2"},
... "C2": {BOT: BOT, TOP: "C2", "C3": "C2", "C2": "C2"}
... }
>>> operations = {"imp1": imp, "imp2": imp, "inv1": inv, "inv2": inv,
... "join": join, "meet": meet}
>>> PseudoR0Algebra("no P5", operations)
Traceback (most recent call last):
...
ValueError: P5 axiom doesn't hold
>>> imp = {
... BOT: {BOT: TOP, TOP: TOP, "C3": TOP, "C2": TOP},
... TOP: {BOT: BOT, TOP: TOP, "C3": "C3", "C2": "C2"},
... "C2": {BOT: "C3", TOP: TOP, "C2": TOP, "C3": "C3"},
... "C3": {BOT: "C2", TOP: TOP, "C2": "C2", "C3": TOP}
... }
>>> operations = {"imp1": imp, "imp2": imp, "inv1": inv, "inv2": inv,
... "join": join, "meet": meet}
>>> PseudoR0Algebra("no P5", operations)
Traceback (most recent call last):
...
ValueError: P4 axiom doesn't hold
>>> imp = {
... BOT: {BOT: TOP, TOP: TOP, "C3": TOP, "C2": TOP},
... TOP: {BOT: BOT, TOP: TOP, "C3": "C3", "C2": "C2"},
... "C2": {BOT: "C2", TOP: TOP, "C2": TOP, "C3": TOP},
... "C3": {BOT: "C3", TOP: TOP, "C2": TOP, "C3": TOP}
... }
>>> inv = {BOT: TOP, TOP: BOT, "C3": "C3", "C2": "C2"}
>>> operations = {"imp1": imp, "imp2": imp, "inv1": inv, "inv2": inv,
... "join": join, "meet": meet}
>>> PseudoR0Algebra("no P5", operations)
Traceback (most recent call last):
...
ValueError: P3 axiom doesn't hold
"""
def check_axioms(self) -> None: # noqa: D102
super().check_axioms()
try:
for one in self.operations["meet"].keys():
for two in self.operations["meet"].keys():
assert (
self.operations["join"][
self.operations["imp1"][one][two]
][
self.operations["imp2"][
self.operations["imp1"][one][two]
][
self.operations["join"][
self.operations["inv1"][one]
][two]
]
]
== TOP
)
assert (
self.operations["join"][
self.operations["imp2"][one][two]
][
self.operations["imp1"][
self.operations["imp2"][one][two]
][
self.operations["join"][
self.operations["inv2"][one]
][two]
]
]
== TOP
)
except AssertionError as error:
raise ValueError("P5 axiom doesn't hold") from error | /residuated-binars-0.0.4.tar.gz/residuated-binars-0.0.4/residuated_binars/pseudo_r0_algebra.py | 0.753829 | 0.421373 | pseudo_r0_algebra.py | pypi |
# noqa: D205, D400
"""
Parser
=======
"""
import json
import re
from typing import Any, Dict, List, Union
from residuated_binars.algebraic_structure import (
AlgebraicStructure,
CayleyTable,
)
from residuated_binars.lattice import Lattice
from residuated_binars.residuated_binar import ResiduatedBinar
def parse_binary_operation(line: str) -> CayleyTable:
"""
Parse text describing a binary operation in Isabelle server response.
:param line: a part of Isabelle server response, representing a binary
operation
:returns: a Cayley table
"""
table: CayleyTable = {}
regex = re.compile(
r"\(([\w\\\^\<\>]+), ([\w\\\^\<\>]+)\) := ([\w\\\^\<\>]+)"
)
match = regex.search(line)
while match is not None:
pos = match.span()[-1] + 1
args = list(match.groups())
if args[0] not in table:
table[args[0]] = {}
table[args[0]][args[1]] = args[2]
match = regex.search(line, pos)
return table
def parse_unary_operation(line: str) -> Dict[str, str]:
"""
Parse text describing a unary operation in Isabelle server response.
:param line: a part of Isabelle server response, representing an unary
operation
:returns: an inner representation of an unary operation
"""
table: Dict[str, str] = {}
regex = re.compile(r"([\w\\\<\>\^]+) := ([\w\\\<\>\^]+)")
match = regex.search(line)
while match is not None:
pos = match.span()[-1] + 1
args = list(match.groups())
table[args[0]] = args[1]
match = regex.search(line, pos)
return table
def choose_algebraic_structure(
label: str, operations: Dict[str, Dict[str, Any]]
) -> AlgebraicStructure:
"""
Decide in which algebraic structure to saved the parsed result.
:param label: a name of that particular algebraic structure example
:param operations: a dictionary of unary and binary operations
:returns: an algebraic structure of a concrete type (
depending on the signature)
"""
sorted_ops = sorted(operations.keys())
if sorted_ops == ["join", "meet"]:
return Lattice(label, operations)
if sorted_ops in [
[
"join",
"meet",
"mult",
"over",
"undr",
],
["invo", "join", "meet", "mult", "over", "undr"],
]:
return ResiduatedBinar(label, operations)
return AlgebraicStructure(label, operations)
def isabelle_format_to_algebra(
isabelle_message: str, label: str
) -> AlgebraicStructure:
"""
Parse the textual representation of operations to ``AlgebraicStructure``.
:param isabelle_message: a body of reply from Isabelle server (in JSON)
:param label: a name of the theory for which we got a reply from server
:returns: a residuated binar
"""
regex = re.compile(
r" (\w+) =\n? +\(\\<lambda>x\. _\)\n? *\(([^\.]+)\)\n?",
re.DOTALL,
)
match = regex.search(isabelle_message)
operations: Dict[str, Union[CayleyTable, Dict[str, str]]] = {}
while match is not None:
table: Union[CayleyTable, Dict[str, str]] = parse_binary_operation(
match.group(2)
)
if not table:
table = parse_unary_operation(match.group(2))
operations[match.group(1)] = table
pos = match.span()[0] + 1
match = regex.search(isabelle_message, pos)
return choose_algebraic_structure(label, operations)
def isabelle_response_to_algebra(filename: str) -> List[AlgebraicStructure]:
"""
Read file with replies from ``isabelle`` server and parse them.
>>> import sys
>>> if sys.version_info.major == 3 and sys.version_info.minor >= 9:
... from importlib.resources import files
... else:
... from importlib_resources import files
>>> import os
>>> len(isabelle_response_to_algebra(
... files("residuated_binars")
... .joinpath(os.path.join("resources", "isabelle2.out"))
... ))
6
:param filename: a name of a file to which all replies from Isabelle server
where written
:returns: a list of algebraic structures
"""
with open(filename, "r", encoding="utf-8") as isabelle_log:
nodes = json.loads(
[
line
for line in isabelle_log.readlines()
if "FINISHED" in line and "lambda" in line
][0][9:]
)["nodes"]
messages = [
(
[
message["message"]
for message in node["messages"]
if "lambda" in message["message"]
][:1],
node["theory_name"].split(".")[1],
)
for node in nodes
]
return [
isabelle_format_to_algebra(message[0][0], message[1])
for message in messages
if message[0] != []
] | /residuated-binars-0.0.4.tar.gz/residuated-binars-0.0.4/residuated_binars/parser.py | 0.840161 | 0.457621 | parser.py | pypi |
# noqa: D205, D400
"""
Use Nitpick
============
A wrapper ‘do all’ script.
- runs ``generate_theories.py`` which creates a new ``hyp2`` folder
with initial hypotheses templates
- then for each cardinality from 2 to 100 (hard-coded)
- runs ``add_task.py`` which creates a ``task[n]`` folder for a
particular cardinality with a respective task for ``Nitpick`` added
to the templates in ``hyp[n]``
- runs ``check_assumptions.py`` on a ``task[n]`` folder
- runs ``filter_theories.py`` which filter theories with no
counter-examples found to a new folder ``hyp[n+1]``
- if the ``hyp[n+1]`` folder is empty, the script stops (that means
counter-examples were found for all original hypotheses)
"""
import os
from typing import List, Optional
from residuated_binars.add_task import TaskType, add_task
from residuated_binars.check_assumptions import check_assumptions
from residuated_binars.filter_theories import filter_theories
from residuated_binars.generate_theories import independence_check
def use_nitpick(
max_cardinality: int,
independent_assumptions: List[str],
additional_assumptions: List[str],
check_subset_independence: bool,
server_info: Optional[str] = None,
) -> None:
"""
Incrementally search for finite counter-examples.
:param max_cardinality: maximal cardinality of a model to search for
:param independent_assumptions: a list of assumption which independence
we want to check
:param additional_assumptions: a list of additional assumptions
:param check_subset_independence: whether to check every assumption from
the list against all the rest or against any combination of the rest
:param server_info: an info string of an Isabelle server
"""
cardinality = 2
hypotheses = f"hyp{cardinality}"
independence_check(
hypotheses,
independent_assumptions,
additional_assumptions,
check_subset_independence,
)
while cardinality <= max_cardinality and os.listdir(hypotheses) != []:
tasks = f"task{cardinality}"
add_task(hypotheses, tasks, TaskType.NITPICK, cardinality)
check_assumptions(tasks, server_info)
cardinality += 1
hypotheses = f"hyp{cardinality}"
filter_theories(tasks, hypotheses) | /residuated-binars-0.0.4.tar.gz/residuated-binars-0.0.4/residuated_binars/use_nitpick.py | 0.813127 | 0.488832 | use_nitpick.py | pypi |
# noqa: D205, D400
"""
Filter Theories
================
- reads from ``isabelle.out`` file from the input directory (this
directory should be an output of ``check_assumption.py`` script)
- filters only those theory files, for which neither finite model was
found, nor the proof (depending on the task type)
- copies filtered theory files from the input directory to another
given directory
"""
import json
import os
import re
import shutil
def filter_theories(source_path: str, target_path: str) -> None:
"""
Filter theories which don't have neither counter-example nor a proof yet.
Get theory files from an existing folder and copy to another existing
folder ones those of them, for which neither have a finite counter-example
nor a proof.
:param source_path: where to look for processed theory files; should
include an ``isabelle.out`` file with server's output
:param target_path: where to put theory files without proofs or
counter-examples
:raises ValueError: if there is no FINISHED message in Isabelle server
response
"""
if not os.path.exists(target_path):
os.mkdir(target_path)
with open(
os.path.join(source_path, "isabelle.out"), "r", encoding="utf-8"
) as out_file:
final_line = [
re.compile(".*FINISHED (.*)\n?").match(line)
for line in out_file.readlines()
if "FINISHED" in line
and ("Sledgehammering" in line or "Nitpick" in line)
][0]
if final_line is None:
raise ValueError(f"Unexpected Isabelle server response: {final_line}")
results = {
node["theory_name"][6:]: [
message["message"]
for message in node["messages"]
if any(
nitpick_message in message["message"]
for nitpick_message in (
"Try this: ",
"Timed out",
"Nitpick found a potentially spurious counterexample",
"Nitpick found a counterexample",
"Nitpick found no counterexample",
)
)
][0]
for node in json.loads(final_line.group(1))["nodes"]
}
for result in results:
if (
"Nitpick found no counterexample" in results[result]
or "Timed out" in results[result]
):
shutil.copy(
os.path.join(source_path, result + ".thy"),
os.path.join(target_path, result + ".thy"),
) | /residuated-binars-0.0.4.tar.gz/residuated-binars-0.0.4/residuated_binars/filter_theories.py | 0.655557 | 0.292311 | filter_theories.py | pypi |
# residuecontact - A utility package to generate 3D residue distance graph
The residue-contact package provides a set of utilities for generating 3D residue distance graph from PDB files.
## Quick Run
```python
from residuecontact import get_sifts_pdbresidue_to_uniprot_map, build_PDB_residues_connection_graph
import os
spmap = get_sifts_pdbresidue_to_uniprot_map("pdbresiduemapping.txt")
build_PDB_residues_connection_graph(
["1HIP.pdb"], # A list of files, or PDB structures, or structure from PDB bundles
spmap, # A dictionary of PDB residue ID to Uniprot residue ID
10, # The maximum distance between two residues to be put in the graph. Note that generating a full distance matrix is very memory intensive.
"CA", # CA or all. When measuring distance between two residues res-A and res-B, whether to use the distance between the two c-alpha atoms from res-A and res-B, or the shortest distance of all pairs of atoms between res-A and res-B.
"intra", # intra or inter or all.
lambda i: os.path.basename(i).split(".")[0],
output="1HIP_graph.graphml"
)
```
## Detailed usage
To generate a graph, we need (1) a dictionary of PDB residue ID to uniprot residue ID; and (2) the 3D structure files (in PDB format)
### Preparation of PDB residue map
Users could generate their own dictionary, with keys as PDBID_CHAINID_RESSEQ; and value as UNIPROTID_SEQ.
Here we provide two methods to process the map from sifts and alphafold.
```python
from residuecontact import get_sifts_pdbresidue_to_uniprot_map, get_alphafold_pdbresidue_to_uniprot_map
spmap_sifts = get_sifts_pdbresidue_to_uniprot_map("pdbresiduemapping.txt")
spmap_alphafold = get_alphafold_pdbresidue_to_uniprot_map("alphafold2residuemapping.txt")
```
Example of `pdbresiduemapping` file:
```
PDB Chain UniProt MappableResInPDBChainOnUniprotBasis MappableResInPDBChainOnPDBBasis
1HIP A P00260 [38-47,49-81,83-122] [1-10,12-44,46-85]
5UFW A P03372 [309-380,382-416,418-461,465-529,531-535,537-548] [309-380,382-416,418-461,465-529,531-535,537-548]
5UFW B P03372 [306-337,341-380,382-416,418-460,470-529,531-535,537-546] [306-337,341-380,382-416,418-460,470-529,531-535,537-546]
2LQ8 A P29397 [1-41,230-353] [3-43,54-177]
```
Example of `alphafold2residuemapping` file:
```
UniProt Species Gene Structure Fragment_Num Total_Fragments Avg_pLDDT Avg_Confidence PDB_Resi UniProt_Resi Total_Resi N_Very_High Very_High_Resi N_High High_Resi N_Low Low_Resi N_Very_Low Very_Low_Resi
A0A0A7EPL0 ARATH PIAL1 AF-A0A0A7EPL0-F1-model_v1.pdb.gz 1 1 56.94 Low [1-847] [1-847] 847 239 [22,24-40,47-48,51,71-94,117-144,150-156,166-174,177-202,209-247,254-258,277-347,351-356,362-364] 89 [16-21,23,41-46,49-50,52-70,95-101,115-116,145-149,157-162,164-165,175-176,203-208,248-253,259-261,271-276,348-350,357-361,365-366] 31 [1-15,102-107,113-114,163,262-263,268-270,367,382] 488 [108-112,264-267,368-381,383-847]
A0A140JWM8 ARATH C7162 AF-A0A140JWM8-F1-model_v1.pdb.gz 1 1 92.29 Very High [1-473] [1-473] 473 350 [28-39,44-69,73-78,81-104,106,138-189,194-195,197-205,216-217,219-249,258-259,277-278,281-320,324-416,419-448,452-455,458-471] 115 [1-27,40-43,70-72,79-80,105,107-116,125-137,190-193,196,206-215,218,250-257,260-276,279-280,321-323,417-418,449-451,456-457,472-473] 6 [117-119,122-124] 2 [120-121]
```
### Preparation of input structures
For legacy PDB files, one could directly use the files as input.
```python
input_structures = ["1HIP.pdb"]
```
Larger structures are not supported by legacy PDB format, and they were packed in bundle. To read a bundle file, use the following:
```python
structure = PDB_bundle_structure(*(extract_PDB_bundle("7a01-pdb-bundle.tar.gz")))
input_structures = [structure]
```
One could input multiple structures at the same time, and only the shortest distance is reported:
```python
input_structures = ["1HIP.pdb", "4MZI.pdb"]
```
### Graph generation
```python
from residuecontact import build_PDB_residues_connection_graph
build_PDB_residues_connection_graph(
pdbfiles=["1HIP.pdb"], # A list of files, or PDB structures, or structure from PDB bundles
spmap, # A dictionary of PDB residue ID to Uniprot residue ID
10,
"CA",
"all",
pdb_id_func=lambda i: os.path.basename(i).split(".")[0],
residue_subset=None,
extra_residue_filter=None,
output="1HIP_graph.graphml"
)
```
```
pdbfiles: A list of PDB files (or Structures)
spmap: A dictionary to map pdb residue into uniprot residue, with key as PDBID_CHAINID_RESSEQ, value as UNIPROTID_SEQ
max_distance: Maximum distance between any two residues as connected. One can set a large maximum distance to capture a full distance matrix, but generating a full matrix is very memory intensive.
atommode: calpha/ca or all. When measuring distance between two residues res-A and res-B, whether to use the distance between the two c-alpha atoms from res-A and res-B, or the shortest distance of all pairs of available atoms between res-A and res-B. Distance calculated based on all atoms are always smaller than or equal to that based on only calpha atoms.
chainmode: all or inter or intra. When set to Intra, only pairs of residues from the same chain are analyzed. When set to Inter, only pairs of residues from different chains are analyzed. When set to All, all pairs of residues are analyzed.
pdb_id_func: Convert pdbfile to pdb_id. Not required if a list of structures is provided in pdbfiles
residue_subset: A subset of residues to include.
extra_residue_filter: A filter to remove certain residues that meet the criteria. For example, residue with a high uncertainty in its position in the structure could be removed from the analysis to avoid noise.
output: The output graph file in graphml format
```
### Output
The output graph is in graphml format.
Each node has an attribute of ID which correspond to the uniprot residues.
```xml
<node id="P36217_204"/>
```
Each edge has the source and target nodes, and has two additional fields (d1) source field refers to the PDB residue pairs with the shortest distance, and the (d0) distance field refers to the distance between the residue pairs.
```xml
<edge source="P36217_204" target="P36217_80">
<data key="d0">7.443631014932368</data>
<data key="d1">P36217_204:5ZIW_A_171;P36217_80:5ZIW_A_47</data>
</edge>
```
### Graph merging
When multiple structures are used to determine the closest distance between two uniprot residues, the graph generated previously can be merged as one by using the following:
```python
from residuecontact import merge_PDB_residues_connection_graphs
merged_graph = merge_PDB_residues_connection_graphs([graph1, graph2])
```
### Validation
When you want to quickly find out the shortest distances of selected pairs of PDB residues, one could use the following:
```python
from residuecontact import find_PDB_residues_distances_separated_by_models
find_PDB_residues_distances_separated_by_models("1HIP", "1HIP.pdb", pairs, "CA")
```
| /residuecontact-0.0.5.tar.gz/residuecontact-0.0.5/README.md | 0.567937 | 0.778986 | README.md | pypi |
from resif_delivery_stats_plotter.services.report_builders.html import ServiceReportBuilderHtml
from resif_delivery_stats_plotter.errors import NoDataError
from resif_delivery_stats_plotter.factories.network import NetworkFactory
class Report:
"""
Defines a report
"""
FORMAT_HTML = 'html'
FORMAT_PNG = 'png'
def __init__(self, year=None, network=None, stations=[]):
self.year = year
self.network = network
self._stations = stations
@property
def stations(self):
"""
Gets the list of stations of the current network
:return: A collection of stations
:rtype: list
"""
if self._stations:
return self._stations
elif self.network:
return self.network.stations
@stations.setter
def stations(self, value):
"""
Sets the list of stations
:param value: The list of station to set
"""
self._stations = value
def build(self, output_format=FORMAT_PNG, unit=None, width=None, height=None, with_plots=True):
"""
Builds the report
:param output_format: The output format ('png'|'html')
:param unit: The unit to use for data plots
:param width: The width of generated images
:param height: The height of generated images
:param with_plots: Include plots
:return: The output filename of the report
:rtype: str
"""
if output_format in (self.FORMAT_HTML, self.FORMAT_PNG):
try:
return ServiceReportBuilderHtml.build(self, output_format=output_format, unit=unit, width=width, height=height, with_plots=with_plots)
except NoDataError:
# TODO
raise
else:
raise Exception("Format '%s' is not supported by any builder" % output_format)
@property
def networks(self):
"""
Gets the list of available networks
:return: The list of available networks
:rtype: list
"""
return NetworkFactory.list(year=self.year) | /resif-delivery-stats-plotter-0.1.3.tar.gz/resif-delivery-stats-plotter-0.1.3/resif_delivery_stats_plotter/models/report.py | 0.767864 | 0.225779 | report.py | pypi |
import logging
import os
import pathlib
import shutil
import datetime
from jinja2 import Environment, PackageLoader
from . import ServiceReportBuilderAbstract
from resif_delivery_stats_plotter import __version__
logger = logging.getLogger(__name__)
class ServiceReportBuilderHtml(ServiceReportBuilderAbstract):
"""
HTML report builder
"""
FORMAT_PNG = 'png'
FORMAT_HTML = 'html'
@classmethod
def build(cls, report, output_format=FORMAT_PNG, unit=None, width=None, height=None, use_local_assets=True, with_plots=True):
"""
Builds an HTML report
:param report: The report object
:param output_format: The plot format (default='png')
:param unit: The unit in which the amount of data are converted
:param width: The width of generated images
:param height: The height of generated images
:param use_local_assets: Enables the usage of local assets instead of on-line assets
:param with_plots: Include plots
:return: The output file path
:rtype: str
"""
logger.debug("ServiceReportBuilderHtml.build(%s, %s, %s, %s, %s, %s, %s)" % (report, output_format, unit, width, height, use_local_assets, with_plots))
jinja_env = Environment(loader=PackageLoader('resif_delivery_stats_plotter'),
lstrip_blocks=True) # , trim_blocks=True)
# Get output directory path
output_dirpath = cls.output_dir(report, True)
# Copy assets if needed
if use_local_assets:
cls.copy_assets(output_dirpath, (output_format == cls.FORMAT_HTML))
# Render the page content and get the output filename
output_filename = None
if output_format == cls.FORMAT_PNG:
output = cls.render_png(report, jinja_env, unit, width, height, use_local_assets, with_plots)
output_filename = 'report_png.html'
elif output_format == cls.FORMAT_HTML:
output = cls.render_html(report, jinja_env, unit, width, height, use_local_assets, with_plots)
output_filename = 'report.html'
else:
raise Exception("Format '%s' is not supported by any renderer" % output_format)
# Write the output content to output file
output_filepath = os.path.join(output_dirpath, output_filename)
with open(output_filepath, 'w') as output_file:
output_file.write(output)
logger.info("Report saved as %s" % output_filepath)
return output_filename
@staticmethod
def render_png(report, jinja_env, unit, width, height, use_local_assets, with_plots):
"""
Render an HTML report using static PNG plots
:param report: The report object
:param jinja_env: The Jinja2 environment
:param unit: The unit in which the amount of data are converted
:param width: The width of generated images
:param height: The height of generated images
:param use_local_assets: Enables the usage of local assets instead of on-line assets
:param with_plots: Include plots
:return: The HTML report content
:rtype: str
"""
logger.debug("ServiceReportBuilderHtml.render_png(%s, %s)" % (report, jinja_env))
params = {
'report': report,
'unit': unit,
'use_local_assets': use_local_assets,
'use_plotly': False,
'version': __version__,
'today': datetime.datetime.utcnow().strftime("%Y-%m-%d at %Hh%M UTC"),
}
if with_plots:
from resif_delivery_stats_plotter.services.plotters.png import ServicePlotterPNG as service_plotter
params.update({
'plot_availability_network': service_plotter.plot_network_availability(report.network, report.year, width, height),
'plot_data_send_yearly': service_plotter.plot_data_send_yearly(report.network, report.year, width, height, unit=unit),
'plot_data_send_monthly': service_plotter.plot_data_send_monthly(report.network, report.year, width, height, unit=unit),
'plot_data_stored_yearly': service_plotter.plot_data_stored_yearly(report.network, report.year, width, height, unit=unit),
'plot_requests_yearly': service_plotter.plot_requests_yearly(report.network, report.year, width, height),
'plot_requests_monthly': service_plotter.plot_requests_monthly(report.network, report.year, width, height),
'map_requests_by_country': service_plotter.plot_map_requests_by_country(report.network, report.year, width, height),
'map_clients_by_country': service_plotter.plot_map_clients_by_country(report.network, report.year, width, height),
'map_stations': service_plotter.plot_map_network_stations(report.network, report.year, width, height), #FIXME
})
jinja_tpl = jinja_env.get_template('report_html_png.jinja2')
jinja_out = jinja_tpl.render(**params)
return jinja_out
@staticmethod
def render_html(report, jinja_env, unit, width, height, use_local_assets, with_plots):
"""
Render an HTML report using dynamic JS plots
:param report: The report object
:param jinja_env: The Jinja2 environment
:param unit: The unit in which the amount of data are converted
:param width: The width of generated images
:param height: The height of generated images
:param use_local_assets: Enables the usage of local assets instead of on-line assets
:param with_plots: Include plots
:return: The HTML report content
:rtype: str
"""
logger.debug("ServiceReportBuilderHtml.render_html(%s, %s)" % (report, jinja_env))
params = {
'report': report,
'unit': unit,
'use_local_assets': use_local_assets,
'use_plotly': True,
'version': __version__
}
if with_plots:
from resif_delivery_stats_plotter.services.plotters.html import ServicePlotterHTML as service_plotter
params.update({
'plot_availability_network': service_plotter.plot_network_availability(report.network, report.year, width, height, output_format=service_plotter.FORMAT_DIV),
'plot_data_send_yearly': service_plotter.plot_data_send_yearly(report.network, report.year, width, height, output_format=service_plotter.FORMAT_DIV, unit=unit),
'plot_data_send_monthly': service_plotter.plot_data_send_monthly(report.network, report.year, width, height, output_format=service_plotter.FORMAT_DIV, unit=unit),
'map_requests_by_country': service_plotter.plot_map_requests_by_country(report.network, report.year, width, height, output_format=service_plotter.FORMAT_DIV),
'map_clients_by_country': service_plotter.plot_map_clients_by_country(report.network, report.year, width, height, output_format=service_plotter.FORMAT_DIV),
'map_stations': service_plotter.plot_map_network_stations(report.network, report.year, width, height, output_format=service_plotter.FORMAT_DIV),
})
jinja_tpl = jinja_env.get_template('report_html.jinja2')
jinja_out = jinja_tpl.render(**params)
return jinja_out
@staticmethod
def copy_assets(output_dir, use_plotly=False):
"""
Copy assets to output directory
:param output_dir: The output directory path
"""
logger.debug("ServiceReportBuilderHtml.copy_assets(%s, %s)" % (output_dir, use_plotly))
# List the assets to copy
assets = [
'resif_logo.png',
'bootstrap.min.css',
'bootstrap.bundle.min.js',
'jquery-3.5.1.slim.min.js',
'jquery.dataTables.min.css',
'jquery.dataTables.min.js',
]
if use_plotly:
assets.append('plotly-latest.min.js')
# Get the assets source directory path
root_dir = pathlib.Path(__file__).resolve().parent.parent.parent
# Copy each asset to output directory
for asset in assets:
shutil.copyfile(os.path.join(root_dir, 'assets', asset), os.path.join(output_dir, asset)) | /resif-delivery-stats-plotter-0.1.3.tar.gz/resif-delivery-stats-plotter-0.1.3/resif_delivery_stats_plotter/services/report_builders/html.py | 0.662906 | 0.299336 | html.py | pypi |
import logging
import os
import pathlib
import pint
import datetime
import arrow
import math
import geopandas
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.patches as mpatches
import matplotlib.cm as mcolormap
from iso3166 import countries
from staticmap import StaticMap, IconMarker
from resif_delivery_stats_plotter.services.clients.statistics import ServiceClientStatistics as service_stats
from resif_delivery_stats_plotter.services.clients.availabilty import ServiceClientAvailability as service_availability
from . import ServicePlotterAbstract
logger = logging.getLogger(__name__)
class ServicePlotterPNG(ServicePlotterAbstract):
"""
Static PNG plotter
"""
DEFAULT_PX_WIDTH = 800
DEFAULT_PX_HEIGHT = 600
MINIMAL_PX_WIDTH = 380
@classmethod
def _footer_bar(cls, fig, text=None, fontsize=10, pad=5, xpos=20, ypos=7.5, rect_kw = {"facecolor":"grey", "edgecolor":None}, text_kw = {"color":"w"}):
"""
Adds a footer bar to matplotlib plots
:param fig: The matplotlib figure
:param text: The text of the footer bar
:param fontsize: The fontsize (default=10)
:param pad: The padding of the footer bar (default=5)
:param xpos: The X position of the footer bar (default=20)
:param ypos: The Y position of the footer bar (default=7.5)
:param rect_kw: The other parameters of the footer bar
:param text_kw: The other parameters of the footer text
"""
today = datetime.datetime.utcnow()
text = text or "Generated by Résif on %s" % today.strftime("%Y-%m-%d at %Hh%M UTC")
w, h = fig.get_size_inches()
height = ((fontsize+2*pad)/fig.dpi)/h
rect = plt.Rectangle((0, 0), 1, height, transform=fig.transFigure, clip_on=False, **rect_kw)
fig.axes[0].add_patch(rect)
fig.text(xpos/fig.dpi/w, ypos/fig.dpi/h, text, fontsize=fontsize, **text_kw)
fig.subplots_adjust(bottom=fig.subplotpars.bottom+height)
@classmethod
def _colors(cls, colormap=None, number=None, default_colormap='tab20'):
"""
Gets a list of colors from the selected matplotlib colormap
:param colormap: The colormap
:param number: The number of colors to return
:param default_colormap: The default colormap
:return: A list of colors
:rtype: list
"""
logger.debug("ServicePlotterPNG._colors(%s, %s, %s)" % (colormap, number, default_colormap))
colormap = colormap or default_colormap
colors = mcolormap.get_cmap(colormap, number).colors
if number:
logger.debug("Using %i colors from '%s' colormap: %s" % (number, colormap, colors))
else:
logger.debug("Using '%s' colormap" % colormap)
return colors
@classmethod
def plot_network_availability(cls, network, year=None, width=None, height=None, vertical=False, instrument=None, dpi=100):
"""
Plots the annual availability of each station of a network
:param network: The network object
:param year: The year
:param width: The width of the output image
:param height: The height of the output image
:param vertical: Enables the vertical layout (default=false)
:param instrument: The instrument code to filter
:param dpi: The DPI (default=100)
:return: The plot output file path
:rtype: str
"""
logger.debug("ServicePlotterPNG.plot_network_availability(%s, %s, %s, %s, %s, %s)" % (network, year, width, height, vertical, instrument))
year = year or datetime.datetime.now().year
logger.info("Plotting availability of network %s in %i" % (network.code, year))
# Get plot data
plot_data = {}
for station in network.stations:
for channel in station.representative_channels(year):
station_data = {}
qualities = channel.availability(year)
if qualities:
for quality, timespans in qualities.items():
station_data[quality] = []
for timespan in timespans:
station_data[quality].append((arrow.get(timespan[0]).datetime, arrow.get(timespan[1]).datetime))
if station_data:
plot_data["%s.%s.%s" % (network.code, station.code, channel.location_code)] = station_data
nb_lines = len(plot_data)
logger.debug("Got %i channels to plot" % nb_lines)
if nb_lines > 0:
# Calculate figure dimensions
line_px = 15
margin_px_top = 20
margin_px_right = 10
if vertical:
margin_px_left = 100
margin_px_bottom = 180
width_px = width or max(cls.MINIMAL_PX_WIDTH, (line_px * nb_lines + margin_px_left + margin_px_right))
height_px = height or cls.DEFAULT_PX_HEIGHT
else:
margin_px_left = 180
margin_px_bottom = 100
width_px = width or cls.DEFAULT_PX_WIDTH
height_px = height or (line_px * nb_lines + margin_px_top + margin_px_bottom)
width_in = width_px / dpi
height_in = height_px / dpi
margin_in_left = margin_px_left / dpi
margin_in_right = margin_px_right / dpi
margin_in_top = margin_px_top / dpi
margin_in_bottom = margin_px_bottom / dpi
x_plot = margin_in_left / width_in
y_plot = margin_in_bottom / height_in
w_plot = (width_in - margin_in_left - margin_in_right) / width_in
h_plot = (height_in - margin_in_bottom - margin_in_top) / height_in
# Initialize image
logger.debug("Figure size (Pixels): w=%s, h=%s" % (width_px, height_px))
fig = plt.figure(figsize=(width_in, height_in))
ax = plt.axes([x_plot, y_plot, w_plot, h_plot])
# Initialize colors
colors = {}
handles = []
color = ax._get_lines.get_next_color()
for quality in (service_availability.QUALITY_Q_CONTROLLED, service_availability.QUALITY_M_MODIFIED):
colors[quality] = color
handles.append(mpatches.Patch(color=color, label='Valid'))
color = ax._get_lines.get_next_color()
for quality in (service_availability.QUALITY_R_RAW, service_availability.QUALITY_D_INDETERMINATE):
colors[quality] = color
handles.append(mpatches.Patch(color=color, label='Raw'))
# Plot data
if vertical:
for trace_id in sorted(plot_data.keys()):
for quality in plot_data[trace_id]:
color = colors.get(quality)
trace = (trace_id, trace_id)
for segment in plot_data[trace_id][quality]:
ax.plot(trace, segment, color=color)
else:
for trace_id in sorted(plot_data.keys()):
for quality in plot_data[trace_id]:
color = colors.get(quality)
trace = (trace_id, trace_id)
for segment in plot_data[trace_id][quality]:
ax.plot(segment, trace, color=color)
# Configure axis
ax.xaxis.set_tick_params(rotation=90)
if vertical:
ax.yaxis.set_major_formatter(mdates.DateFormatter("%Y-%m"))
ax.yaxis.set_major_locator(mdates.MonthLocator())
ax.yaxis.set_minor_locator(mdates.WeekdayLocator())
ax.grid(axis='y')
ax.invert_xaxis()
else:
ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m"))
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_minor_locator(mdates.WeekdayLocator())
ax.grid(axis='x')
ax.invert_yaxis()
# Add title
#ax.set_title("%s %i" % (network.code, year), loc='left', fontsize=10)
# Add legend
ax.legend(handles=handles, bbox_to_anchor=(1, 1), loc='lower right', borderaxespad=0, ncol=2, frameon=False)
# Add footer
cls._footer_bar(fig)
# Save file
output_filename = 'plot_network_availability.png'
output_filepath = os.path.join(cls.output_dir(network, year), output_filename)
fig.savefig(output_filepath, transparent=True)
logger.info("Plot saved as %s" % output_filepath)
return output_filename
else:
logger.warning("Unable to find available channels to plot for network %s" % network.code)
@classmethod
def plot_data_send_yearly(cls, network, year=None, width=None, height=None, vertical=True, start_year=None, unit=None, colormap=None):
"""
Plots the amount of data send yearly for a network
:param network: The network object
:param year: The year
:param width: The width of the output image
:param height: The height of the output image
:param vertical: Enables the vertical layout (default=false)
:param start_year: The starting year
:param unit: The unit in which the values are converted
:param colormap: The colormap to use
:return: The plot output file path
:rtype: str
"""
logger.debug("ServicePlotterPNG.plot_data_send_yearly(%s, %s, %s, %s, %s, %s, %s, %s)" % (network, year, width, height, vertical, start_year, unit, colormap))
year = year or datetime.datetime.now().year
logger.info("Plotting data send yearly by network %s until %i" % (network.code, year))
ureg = pint.UnitRegistry()
ureg.setup_matplotlib(True)
# Calculate starting year
if start_year:
start_year = max(network.year_start, start_year)
else:
start_year = network.year_start
logger.debug('Starting year: %s' % start_year)
# Get plot data
plot_data_years = []
plot_data_dataselect = []
plot_data_seedlink = []
has_data = False
for y in range(start_year, year+1):
send_dataselect = network.data_send_dataselect(year=y)
send_seedlink = network.data_send_seedlink(year=y)
if has_data or send_dataselect or send_seedlink:
has_data = True
plot_data_years.append(arrow.get(y, 1, 1).datetime)
plot_data_dataselect.append((send_dataselect or 0) * ureg.byte)
plot_data_seedlink.append((send_seedlink or 0) * ureg.byte)
# Build image
if plot_data_years:
logger.debug("Plotted years: %s" % len(plot_data_years))
bar_width = 0.75 * ureg.year
fig, ax = plt.subplots()
# Calculate dimensions of image
bar_px = 50
margin_px_top = 20
margin_px_right = 10
margin_px_left_bottom = 100
if vertical:
width_px = width or max(cls.MINIMAL_PX_WIDTH, (bar_px * len(plot_data_years) + margin_px_left_bottom + margin_px_right))
height_px = height or cls.DEFAULT_PX_HEIGHT
else:
width_px = width or cls.DEFAULT_PX_WIDTH
height_px = height or (bar_px * len(plot_data_years) + margin_px_top + margin_px_left_bottom)
logger.debug("Figure size (Pixels): w=%s, h=%s" % (width_px, height_px))
fig.set_size_inches(width_px/fig.dpi, height_px/fig.dpi)
# Initialize colors
colors = cls._colors(colormap, 2)
# Configure axis and plot data
ax.xaxis.set_tick_params(rotation=90)
if vertical:
ax.yaxis.set_label_text('Data send (%s) for %s' % (unit or 'Bytes', network.code))
if unit:
ax.yaxis.set_units(ureg.Unit(unit))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y"))
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.bar(plot_data_years, plot_data_seedlink, bar_width, label='Seedlink', color=colors[0])
ax.bar(plot_data_years, plot_data_dataselect, bar_width, bottom=plot_data_seedlink, label='Dataselect', color=colors[1])
else:
ax.xaxis.set_label_text('Data send (%s) for %s' % (unit or 'Bytes', network.code))
if unit:
ax.xaxis.set_units(ureg.Unit(unit))
ax.yaxis.set_major_formatter(mdates.DateFormatter("%Y"))
ax.yaxis.set_major_locator(mdates.YearLocator())
ax.barh(plot_data_years, plot_data_seedlink, bar_width, label='Seedlink', color=colors[0])
ax.barh(plot_data_years, plot_data_dataselect, bar_width, left=plot_data_seedlink, label='Dataselect', color=colors[1])
# Add title
#ax.set_title(network.code, loc='left')
# Add legend
ax.legend(bbox_to_anchor=(1, 1), loc='lower right', borderaxespad=0, ncol=2, frameon=False)
# Crop image
plt.tight_layout()
# Add footer
cls._footer_bar(fig)
# Save file
output_filename = 'plot_data_send_yearly.png'
output_filepath = os.path.join(cls.output_dir(network, year), output_filename)
fig.savefig(output_filepath, transparent=True)
logger.info("Plot saved as %s" % output_filepath)
return output_filename
@classmethod
def plot_data_send_monthly(cls, network, year=None, width=None, height=None, unit=None, colormap=None):
"""
Plots the amount of data send monthly for a network
:param network: The network object
:param year: The year
:param width: The width of the output image
:param height: The height of the output image
:param unit: The unit in which the values are converted
:param colormap: The colormap to use
:return: The plot output file path
:rtype: str
"""
logger.debug("ServicePlotterPNG.plot_data_send_monthly(%s, %s, %s, %s, %s, %s)" % (network, year, width, height, unit, colormap))
year = year or datetime.datetime.now().year
logger.info("Plotting data send monthly by network %s in %i" % (network.code, year))
ureg = pint.UnitRegistry()
ureg.setup_matplotlib(True)
# Get plot data
plot_data_months = []
plot_data_dataselect = []
plot_data_seedlink = []
for month in range(1, 13):
send_dataselect = network.data_send_dataselect(year=year, month=month) or 0
send_seedlink = network.data_send_seedlink(year=year, month=month) or 0
plot_data_dataselect.append(send_dataselect * ureg.byte)
plot_data_seedlink.append(send_seedlink * ureg.byte)
plot_data_months.append(arrow.get(year, month, 1).datetime)
# Build image
bar_width = 0.75 * ureg.month
fig, ax = plt.subplots()
# Calculate dimensions of image
height = height or cls.DEFAULT_PX_HEIGHT
width = width or cls.DEFAULT_PX_WIDTH
fig.set_size_inches(width/fig.dpi, height/fig.dpi)
# Initialize colors
colors = cls._colors(colormap, 2)
# Configure axis and plot data
ax.yaxis.set_label_text('Data send (%s) for %s in %s' % (unit or 'Bytes', network.code, year))
if unit:
ax.yaxis.set_units(ureg.Unit(unit))
ax.xaxis.set_tick_params(rotation=90)
ax.xaxis.set_major_formatter(mdates.DateFormatter("%b"))
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.bar(plot_data_months, plot_data_seedlink, bar_width, label='Seedlink', color=colors[0])
ax.bar(plot_data_months, plot_data_dataselect, bar_width, bottom=plot_data_seedlink, label='Dataselect', color=colors[1])
# Add title
#ax.set_title("%s %i" % (network.code, year), loc='left')
# Add legend
ax.legend(bbox_to_anchor=(1, 1), loc='lower right', borderaxespad=0, ncol=2, frameon=False)
# Crop image
plt.tight_layout()
# Add footer
cls._footer_bar(fig)
# Save file
output_filename = 'plot_data_send_monthly.png'
output_filepath = os.path.join(cls.output_dir(network, year), output_filename)
fig.savefig(output_filepath, transparent=True)
logger.info("Plot saved as %s" % output_filepath)
return output_filename
@classmethod
def plot_data_send_by_other_networks(cls, network, year=None, width=None, height=None, unit=None):
"""
Plots a pie chart with the amount of data send yearly for a network against the other networks
:param network: The network object
:param year: The year
:param width: The width of the output image
:param height: The height of the output image
:param unit: The unit in which the values are converted
:return: The plot output file path
:rtype: str
"""
logger.debug("ServicePlotterPNG.plot_data_send_by_other_networks(%s, %s, %s, %s, %s)" % (network, year, width, height, unit))
year = year or datetime.datetime.now().year
logger.info("Plotting ratio of data send for network %s against all networks in %i" % (network.code, year))
# Get plot data
send_network = network.data_send(year=year)
send_all = service_stats.data_send(year=year)
plot_data_values = [send_network, send_all-send_network]
plot_data_labels = [network.code, 'Other']
plot_data_explode = [0.1, 0]
# Build image
fig, ax = plt.subplots()
# Calculate dimensions of image
height = height or cls.DEFAULT_PX_HEIGHT
width = width or cls.DEFAULT_PX_WIDTH
fig.set_size_inches(width/fig.dpi, height/fig.dpi)
# Configure axis and plot data
ax.pie(plot_data_values, labels=plot_data_labels, explode=plot_data_explode, startangle=90)
ax.axis('equal')
# Crop image
plt.tight_layout()
# Add footer
cls._footer_bar(fig)
# Save file
output_filename = 'plot_data_send_by_other_networks.png'
output_filepath = os.path.join(cls.output_dir(network, year), output_filename)
fig.savefig(output_filepath, transparent=True)
logger.info("Plot saved as %s" % output_filepath)
return output_filename
@classmethod
def plot_data_stored_yearly(cls, network, year=None, width=None, height=None, vertical=True, start_year=None, unit=None, colormap=None):
"""
Plots the amount of data stored yearly for a network
:param network: The network object
:param year: The year
:param width: The width of the output image
:param height: The height of the output image
:param vertical: Enables the vertical layout (default=false)
:param start_year: The starting year
:param unit: The unit in which the values are converted
:param colormap: The colormap to use
:return: The plot output file path
:rtype: str
"""
logger.debug("ServicePlotterPNG.plot_data_stored_yearly(%s, %s, %s, %s, %s, %s, %s, %s)" % (network, year, width, height, vertical, start_year, unit, colormap))
year = year or datetime.datetime.now().year
logger.info("Plotting data stored yearly by network %s until %i" % (network.code, year))
ureg = pint.UnitRegistry()
ureg.setup_matplotlib(True)
# Calculate starting year
if start_year:
start_year = max(network.year_start, start_year)
else:
start_year = network.year_start
logger.debug('Starting year: %s' % start_year)
# Get plot data
plot_data_years = []
plot_data_buffer = []
plot_data_validated = []
has_data = False
for y in range(start_year, year+1):
stored_buffer = network.data_stored_buffer(year=y)
stored_validated = network.data_stored_validated(year=y)
if has_data or stored_buffer or stored_validated:
has_data = True
plot_data_years.append(arrow.get(y, 1, 1).datetime)
plot_data_buffer.append((stored_buffer or 0) * ureg.byte)
plot_data_validated.append((stored_validated or 0) * ureg.byte)
# Build image
if plot_data_years:
logger.debug("Plotted years: %s" % len(plot_data_years))
bar_width = 0.75 * ureg.year
fig, ax = plt.subplots()
# Calculate dimensions of image
bar_px = 50
margin_px_top = 20
margin_px_right = 10
margin_px_left_bottom = 100
if vertical:
width_px = width or max(cls.MINIMAL_PX_WIDTH, (bar_px * len(plot_data_years) + margin_px_left_bottom + margin_px_right))
height_px = height or cls.DEFAULT_PX_HEIGHT
else:
width_px = width or cls.DEFAULT_PX_WIDTH
height_px = height or (bar_px * len(plot_data_years) + margin_px_top + margin_px_left_bottom)
logger.debug("Figure size (Pixels): w=%s, h=%s" % (width_px, height_px))
fig.set_size_inches(width_px/fig.dpi, height_px/fig.dpi)
# Initialize colors
colors = cls._colors(colormap, 2)
# Configure axis and plot data
ax.xaxis.set_tick_params(rotation=90)
if vertical:
ax.yaxis.set_label_text('Data stored (%s) for %s' % (unit or 'Bytes', network.code))
if unit:
ax.yaxis.set_units(ureg.Unit(unit))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y"))
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.bar(plot_data_years, plot_data_validated, bar_width, label='Validated', color=colors[0])
ax.bar(plot_data_years, plot_data_buffer, bar_width, bottom=plot_data_validated, label='Non-validated', color=colors[1])
else:
ax.xaxis.set_label_text('Data stored (%s) for %s' % (unit or 'Bytes', network.code))
if unit:
ax.xaxis.set_units(ureg.Unit(unit))
ax.yaxis.set_major_formatter(mdates.DateFormatter("%Y"))
ax.yaxis.set_major_locator(mdates.YearLocator())
ax.barh(plot_data_years, plot_data_validated, bar_width, label='Validated', color=colors[0])
ax.barh(plot_data_years, plot_data_buffer, bar_width, left=plot_data_validated, label='Non-validated', color=colors[1])
# Add title
#ax.set_title(network.code, loc='left')
# Add legend
ax.legend(bbox_to_anchor=(1, 1), loc='lower right', borderaxespad=0, ncol=2, frameon=False)
# Crop image
plt.tight_layout()
# Add footer
cls._footer_bar(fig)
# Save file
output_filename = 'plot_data_stored_yearly.png'
output_filepath = os.path.join(cls.output_dir(network, year), output_filename)
fig.savefig(output_filepath, transparent=True)
logger.info("Plot saved as %s" % output_filepath)
return output_filename
@classmethod
def plot_requests_yearly(cls, network, year=None, width=None, height=None, vertical=True, start_year=None, colormap=None):
"""
Plots the number of requests querying yearly a network
:param network: The network object
:param year: The year
:param width: The width of the output image
:param height: The height of the output image
:param vertical: Enables the vertical layout (default=false)
:param start_year: The starting year
:param colormap: The colormap to use
:return: The plot output file path
:rtype: str
"""
logger.debug("ServicePlotterPNG.plot_requests_yearly(%s, %s, %s, %s, %s)" % (network, year, width, height, colormap))
year = year or datetime.datetime.now().year
logger.info("Plotting requests yearly by network %s until %i" % (network.code, year))
ureg = pint.UnitRegistry()
ureg.setup_matplotlib(True)
# Calculate starting year
if start_year:
start_year = max(network.year_start, start_year)
else:
start_year = network.year_start
logger.debug('Starting year: %s' % start_year)
# Get plot data
plot_data_years = []
plot_data_dataselect = []
plot_data_station = []
has_data = False
for y in range(start_year, year+1):
requests_dataselect = network.requests_dataselect(year=y)
requests_station = network.requests_station(year=y)
if has_data or requests_dataselect or requests_station:
has_data = True
plot_data_years.append(arrow.get(y, 1, 1).datetime)
plot_data_dataselect.append(requests_dataselect or 0)
plot_data_station.append(requests_station or 0)
# Build image
if plot_data_years:
logger.debug("Plotted years: %s" % len(plot_data_years))
bar_width = 0.75 * ureg.year
fig, ax = plt.subplots()
# Calculate dimensions of image
bar_px = 50
margin_px_top = 20
margin_px_right = 10
margin_px_left_bottom = 100
if vertical:
width_px = width or max(cls.MINIMAL_PX_WIDTH, (bar_px * len(plot_data_years) + margin_px_left_bottom + margin_px_right))
height_px = height or cls.DEFAULT_PX_HEIGHT
else:
width_px = width or cls.DEFAULT_PX_WIDTH
height_px = height or (bar_px * len(plot_data_years) + margin_px_top + margin_px_left_bottom)
logger.debug("Figure size (Pixels): w=%s, h=%s" % (width_px, height_px))
fig.set_size_inches(width_px/fig.dpi, height_px/fig.dpi)
# Initialize colors
colors = cls._colors(colormap, 2)
# Configure axis and plot data
ax.xaxis.set_tick_params(rotation=90)
if vertical:
ax.yaxis.set_label_text('Requests for %s' % network.code)
ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y"))
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.bar(plot_data_years, plot_data_station, bar_width, label='Station', color=colors[0])
ax.bar(plot_data_years, plot_data_dataselect, bar_width, bottom=plot_data_station, label='Dataselect', color=colors[1])
else:
ax.xaxis.set_label_text('Requests for %s' % network.code)
ax.yaxis.set_major_formatter(mdates.DateFormatter("%Y"))
ax.yaxis.set_major_locator(mdates.YearLocator())
ax.barh(plot_data_years, plot_data_station, bar_width, label='Station', color=colors[0])
ax.barh(plot_data_years, plot_data_dataselect, bar_width, left=plot_data_station, label='Dataselect', color=colors[1])
# Add title
#ax.set_title(network.code, loc='left')
# Add legend
ax.legend(bbox_to_anchor=(1, 1), loc='lower right', borderaxespad=0, ncol=2, frameon=False)
# Crop image
plt.tight_layout()
# Add footer
cls._footer_bar(fig)
# Save file
output_filename = 'plot_requests_yearly.png'
output_filepath = os.path.join(cls.output_dir(network, year), output_filename)
fig.savefig(output_filepath, transparent=True)
logger.info("Plot saved as %s" % output_filepath)
return output_filename
@classmethod
def plot_requests_monthly(cls, network, year=None, width=None, height=None, colormap=None):
"""
Plots the number of requests querying monthly a network
:param network: The network object
:param year: The year
:param width: The width of the output image
:param height: The height of the output image
:param colormap: The colormap to use
:return: The plot output file path
:rtype: str
"""
logger.debug("ServicePlotterPNG.plot_requests_monthly(%s, %s, %s, %s, %s)" % (network, year, width, height, colormap))
year = year or datetime.datetime.now().year
logger.info("Plotting requests monthly by network %s in %i" % (network.code, year))
ureg = pint.UnitRegistry()
ureg.setup_matplotlib(True)
# Get plot data
plot_data_months = []
plot_data_dataselect = []
plot_data_station = []
for month in range(1, 13):
requests_dataselect = network.requests_dataselect(year=year, month=month) or 0
requests_station = network.requests_station(year=year, month=month) or 0
plot_data_dataselect.append(requests_dataselect)
plot_data_station.append(requests_station)
plot_data_months.append(arrow.get(year, month, 1).datetime)
# Build image
bar_width = 0.75 * ureg.month
fig, ax = plt.subplots()
# Calculate dimensions of image
height = height or cls.DEFAULT_PX_HEIGHT
width = width or cls.DEFAULT_PX_WIDTH
fig.set_size_inches(width/fig.dpi, height/fig.dpi)
# Initialize colors
colors = cls._colors(colormap, 2)
# Configure axis and plot data
ax.yaxis.set_label_text('Requests for %s in %s' % (network.code, year))
ax.xaxis.set_tick_params(rotation=90)
ax.xaxis.set_major_formatter(mdates.DateFormatter("%b"))
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.bar(plot_data_months, plot_data_station, bar_width, label='Station', color=colors[0])
ax.bar(plot_data_months, plot_data_dataselect, bar_width, bottom=plot_data_station, label='Dataselect', color=colors[1])
# Add title
#ax.set_title("%s %i" % (network.code, year), loc='left')
# Add legend
ax.legend(bbox_to_anchor=(1, 1), loc='lower right', borderaxespad=0, ncol=2, frameon=False)
# Crop image
plt.tight_layout()
# Add footer
cls._footer_bar(fig)
# Save file
output_filename = 'plot_requests_monthly.png'
output_filepath = os.path.join(cls.output_dir(network, year), output_filename)
fig.savefig(output_filepath, transparent=True)
logger.info("Plot saved as %s" % output_filepath)
return output_filename
@classmethod
def plot_map_requests_by_country(cls, network, year=None, width=None, height=None, colormap='Blues', dpi=100):
"""
Plots a world map colored by number of requests querying a network
:param network: The network object
:param year: The year
:param width: The width of the output image
:param height: The height of the output image
:param colormap: The colormap to use
:param dpi: The DPI (default=100)
:return: The plot output file path
:rtype: str
"""
logger.debug("ServicePlotterPNG.plot_map_requests_by_country(%s, %s, %s, %s, %s, %s)" % (network, year, width, height, colormap, dpi))
year = year or datetime.datetime.now().year
logger.info("Plotting map of requests by country for network %s in %i" % (network.code, year))
# Initialize world data
world = geopandas.read_file(
geopandas.datasets.get_path("naturalearth_lowres"),
ignore_fields=["pop_est", "continent", "gdp_md_est"]
)
world['requests'] = math.nan
# Add plot data
with_antartica = False
network_requests = network.requests_by_country(year=year)
if network_requests:
for country_code, country_requests in network_requests.items():
try:
country = countries.get(country_code)
#world.at[world.loc[world['iso_a3'] == country.alpha3].index, 'requests'] = int(country_requests)
world.at[world.loc[world['name'] == country.name].index, 'requests'] = int(country_requests)
if not with_antartica and country.alpha3 == 'ATA':
with_antartica = True
except KeyError:
logger.debug("Unable to resolve country code '%s'" % country_code)
continue
if not with_antartica:
world.drop(world.loc[world['iso_a3']=='ATA'].index, inplace=True)
# Initialize figure
height = height or cls.DEFAULT_PX_HEIGHT
width = width or cls.DEFAULT_PX_WIDTH
fig = plt.figure(figsize=(width/dpi, height/dpi))
ax = fig.add_subplot(1, 1, 1)
ax.tick_params(axis='both', bottom=False, left=False, labelbottom=False, labelleft=False)
# Plot data
world.plot("requests", ax=ax, cmap=colormap, edgecolor="grey",
legend=True, legend_kwds={"label":"requests", 'orientation':"horizontal"},
missing_kwds={"color": "lightgrey", "label":"Missing Values", 'edgecolor':None}
)
# Add title
# ax.set_title("Requests for %s in %s" % (network.code, year))
# Crop image
#plt.tight_layout()
# Add footer
cls._footer_bar(fig)
# Save file
output_filename = 'plot_map_requests_by_country.png'
output_filepath = os.path.join(cls.output_dir(network, year), output_filename)
fig.savefig(output_filepath, transparent=True)
logger.info("Plot saved as %s" % output_filepath)
return output_filename
else:
logger.warning("Unable to find requests stats for network %s" % network.code)
@classmethod
def plot_map_clients_by_country(cls, network, year=None, width=None, height=None, colormap='Blues', dpi=100):
"""
Plots a world map colored by number of clients querying a network
:param network: The network object
:param year: The year
:param width: The width of the output image
:param height: The height of the output image
:param colormap: The colormap to use
:param dpi: The DPI (default=100)
:return: The plot output file path
:rtype: str
"""
logger.debug("ServicePlotterPNG.plot_map_clients_by_country(%s, %s, %s, %s, %s, %s)" % (network, year, width, height, colormap, dpi))
year = year or datetime.datetime.now().year
logger.info("Plotting map of clients by country for network %s in %i" % (network.code, year))
# Initialize world data
world = geopandas.read_file(
geopandas.datasets.get_path("naturalearth_lowres"),
ignore_fields=["pop_est", "continent", "gdp_md_est"]
)
world['clients'] = math.nan
# Add plot data
with_antartica = False
network_clients = network.clients_by_country(year=year)
if network_clients:
for country_code, country_clients in network_clients.items():
try:
country = countries.get(country_code)
#world.at[world.loc[world['iso_a3'] == country.alpha3].index, 'clients'] = int(country_clients)
world.at[world.loc[world['name'] == country.name].index, 'clients'] = int(country_clients)
if not with_antartica and country.alpha3 == 'ATA':
with_antartica = True
except KeyError:
logger.debug("Unable to resolve country code '%s'" % country_code)
continue
if not with_antartica:
world.drop(world.loc[world['iso_a3']=='ATA'].index, inplace=True)
# Initialize figure
height = height or cls.DEFAULT_PX_HEIGHT
width = width or cls.DEFAULT_PX_WIDTH
fig = plt.figure(figsize=(width/dpi, height/dpi))
ax = fig.add_subplot(1, 1, 1)
ax.tick_params(axis='both', bottom=False, left=False, labelbottom=False, labelleft=False)
# Plot data
world.plot("clients", ax=ax, cmap=colormap, edgecolor="grey",
legend=True, legend_kwds={"label":"Clients", 'orientation':"horizontal"},
missing_kwds={"color": "lightgrey", "label":"Missing Values", 'edgecolor':None}
)
# Add title
# ax.set_title("clients for %s in %s" % (network.code, year))
# Crop image
#plt.tight_layout()
# Add footer
cls._footer_bar(fig)
# Save file
output_filename = 'plot_map_clients_by_country.png'
output_filepath = os.path.join(cls.output_dir(network, year), output_filename)
fig.savefig(output_filepath, transparent=True)
logger.info("Plot saved as %s" % output_filepath)
return output_filename
else:
logger.warning("Unable to find clients stats for network %s" % network.code)
@classmethod
def plot_map_network_stations(cls, network, year=None, width=None, height=None, zoom=None):
"""
Plots a world map locating the stations of a network
:param network: The network object
:param year: The year
:param width: The width of the output image
:param height: The height of the output image
:param zoom: The map zoom
:return: The plot output file path
:rtype: str
"""
logger.debug("ServicePlotterPNG.plot_map_network_stations(%s, %s, %s, %s, %s)" % (network, year, width, height, zoom))
year = year or datetime.datetime.now().year
logger.info("Plotting map of stations for network %s in %i" % (network.code, year))
root_path = pathlib.Path(__file__).resolve().parent.parent.parent
marker_filepath = os.path.join(root_path, 'assets', 'triangle-10.png')
width = width or cls.DEFAULT_PX_WIDTH
height = height or cls.DEFAULT_PX_HEIGHT
map = StaticMap(width, height)
for station in network.active_stations(year):
marker = IconMarker((station.longitude, station.latitude), marker_filepath, 0, 0)
map.add_marker(marker)
output_filename = 'plot_map_network_stations.png'
output_filepath = os.path.join(cls.output_dir(network, year), output_filename)
image = map.render(zoom=zoom)
image.save(output_filepath)
logger.info("Plot saved as %s" % output_filepath)
return output_filename | /resif-delivery-stats-plotter-0.1.3.tar.gz/resif-delivery-stats-plotter-0.1.3/resif_delivery_stats_plotter/services/plotters/png.py | 0.762424 | 0.28814 | png.py | pypi |
import logging
from datetime import date
from obspy.clients.fdsn import Client
from obspy.clients.fdsn.header import FDSNNoDataException, FDSNException
from .enums import EnumObspyLevel
from ..errors import NoDataError, ApiError
from ..models.channel import Channel, ChannelTimeslot
logger = logging.getLogger(__name__)
class ChannelFactory:
"""
Factory to builds Channel objects from an ObsPy instance
"""
@classmethod
def from_obspy(cls, obspy_channel, with_timeslot=True, year=None):
"""
Create a Channel object from an ObsPy instance
:param obspy_channel: The ObsPy instance
:param with_timeslot: Creates the first ChannelTimeslot object
:param year: The year to filter opened timeslots
:return: A Channel initialized object
:rtype: Channel
"""
logger.debug('ChannelFactory.from_obspy(%s, %s, %s)' % (obspy_channel.code, with_timeslot, year))
channel = Channel(
code=obspy_channel.code,
location=obspy_channel.location_code,
)
if with_timeslot:
timeslot = ChannelTimeslotFactory.from_obspy(obspy_channel)
if year is None or timeslot.is_open(year):
channel.add_timeslot(timeslot)
return channel
@classmethod
def location_code_from_obspy(cls, obspy_channel):
"""
Generates a location code from an ObsPy instance
:param obspy_channel: The ObsPy instance
:return: The location code of the channel
:rtype: str
"""
if hasattr(obspy_channel, 'location_code') and obspy_channel.location_code not in (None, '', 'None'):
return "%s.%s" % (obspy_channel.location_code, obspy_channel.code)
else:
return obspy_channel.code
@classmethod
def list(cls, code_network, code_station, level=EnumObspyLevel.channel, year=None):
"""
Gets a Channel object from its code
:param code_network: The code of the network to get
:param code_station: The code of the station to get
:param level: The ObsPy query level (default='channel')
:param year: The year to filter opened stations and channels
:return: A Network initialized object
:rtype: Network
"""
logger.debug('ChannelFactory.list(%s, %s, %s, %s)' % (code_network, code_station, level, year))
try:
params = {
'network': code_network,
'station': code_station,
'level': level,
}
if year:
params['starttime'] = date(year, 1, 1)
params['endtime'] = date(year, 12, 31)
channels = []
for obspy_network in Client('RESIF').get_stations(**params):
for obspy_station in obspy_network.stations:
for obspy_channel in obspy_station.channels:
channel = cls.from_obspy(obspy_channel, year=year)
channels.append(channel)
return channels
except FDSNNoDataException:
logger.warning("No channel found for station %s.%s" % (code_network, code_station))
raise NoDataError()
except FDSNException as e:
raise ApiError(e)
class ChannelTimeslotFactory:
"""
Factory to builds ChannelTimeslot objects from an ObsPy instance of Channel
"""
@classmethod
def from_obspy(cls, obspy_channel):
"""
Create a ChannelTimeslot object from an ObsPy instance
:param obspy_channel: The ObsPy instance
:return: A ChannelTimeslot initialized object
:rtype: ChannelTimeslot
"""
logger.debug('ChannelTimeslotFactory.from_obspy(<%s, %s, %s>)' % (obspy_channel.code, obspy_channel.start_date, obspy_channel.end_date))
timeslot = ChannelTimeslot(
start_date=obspy_channel.start_date,
end_date=obspy_channel.end_date,
)
return timeslot | /resif-delivery-stats-plotter-0.1.3.tar.gz/resif-delivery-stats-plotter-0.1.3/resif_delivery_stats_plotter/factories/channel.py | 0.761361 | 0.253053 | channel.py | pypi |
import logging
import arrow
from datetime import date
from obspy.clients.fdsn import Client
from obspy.clients.fdsn.header import FDSNNoDataException, FDSNException
from .station import StationFactory
from .enums import EnumObspyLevel
from ..errors import NoDataError, ApiError
from ..models.network import Network
logger = logging.getLogger(__name__)
class NetworkFactory:
"""
Factory to builds Network objects from an ObsPy instance
"""
@classmethod
def from_obspy(cls, obspy_network, level, channel_year=None):
"""
Create a Network object from an ObsPy instance
:param obspy_network: The ObsPy instance
:param level: The ObsPy query level
:param channel_year: The year to filter opened channels
:return: A Network initialized object
:rtype: Network
"""
logger.debug('NetworkFactory.from_obspy(%s, %s, %s)' % (obspy_network.code, level, channel_year))
network = Network(
code=obspy_network.code,
start_date=obspy_network.start_date,
end_date=obspy_network.end_date,
)
if level in (EnumObspyLevel.station, EnumObspyLevel.channel):
for obspy_station in obspy_network.stations:
network.add_station(StationFactory.from_obspy(obspy_station, level, channel_year, obspy_network.code))
return network
@classmethod
def factory(cls, code_network, level=EnumObspyLevel.station, channel=None, channel_year=None): #FIXME: '?HZ,?NZ'
"""
Gets a Network object from its code
:param code_network: The code of the network to get
:param level: The ObsPy query level (default='station')
:param channel: The channel code to filter
:param channel_year: The year to filter opened channels
:return: A Network initialized object
:rtype: Network
"""
logger.debug('NetworkFactory.factory(%s, %s, %s, %s)' % (code_network, level, channel, channel_year))
try:
params = {
'network': code_network,
'level': level,
'channel': channel,
}
inventory = Client('RESIF').get_stations(**params)
return cls.from_obspy(inventory[0], level, channel_year)
except FDSNNoDataException:
logger.warning("Network %s not found" % code_network)
raise NoDataError()
except FDSNException as e:
raise ApiError(e)
@classmethod
def list(cls, level=EnumObspyLevel.network, channel=None, year=None, month=None):
"""
List the available networks from ObsPy
:param level: The ObsPy query level (default='network')
:param channel: The channel code to filter
:param year: The year to filter opened networks
:param month: The month to filter opened networks
:return: The list the available networks from ObsPy
:rtype: list
"""
logger.debug('NetworkFactory.list(%s, %s, %s, %s)' % (level, channel, year, month))
try:
params = {
'level': level,
'channel': channel,
}
if year and month:
date_start = date(year, month or 1, 1)
date_end = arrow.get(date_start).shift(months=1).shift(days=-1).format('YYYY-MM-DD')
params['starttime'] = date_start
params['endtime'] = date_end
elif year:
params['starttime'] = date(year, 1, 1)
params['endtime'] = date(year, 12, 31)
networks = []
for item in Client('RESIF').get_stations(**params):
networks.append(cls.from_obspy(item, level))
return networks
except FDSNNoDataException:
logger.warning("No network found")
raise NoDataError()
except FDSNException as e:
raise ApiError(e) | /resif-delivery-stats-plotter-0.1.3.tar.gz/resif-delivery-stats-plotter-0.1.3/resif_delivery_stats_plotter/factories/network.py | 0.659953 | 0.262086 | network.py | pypi |
import logging
from six import string_types
LOG = logging.getLogger(__name__)
class PAMPluginInterface():
"""
Base abstract class to outline required methods to be implemented by
all app config plugins.
"""
PAM_VERIFY_SERVER_CERT = "PAM_VERIFY_SERVER_CERT"
PAM_ADDRESS = "PAM_ADDRESS"
PAM_APP_ID = "PAM_APP_ID"
def __init__(self, protected_secrets_manager, key):
"""
Save protected_secrets_manager and "key" which indicates the level of
the underlying dictionary at which the plugin is positioned.
:param protected_secrets_manager: obj for retrieving protected secrets, mostly for PAM credentials
:type protected_secrets_manager: resilient.app_config.ProtectedSecretsManager
:param key: dict key indicating the level at which this position is situated
:type key: str
"""
raise NotImplementedError("Cannot instantiate object of type {0}".format(type(self)))
def get(self, plain_text_value, default=None):
"""
Get value from external provider.
``plain_text_value`` is the value from the app.config file. As an example,
if the config had:
[fn_my_app]
password=^PASS.WORD
and ``self.options.get("password")`` is run, ``plain_text_value`` here
will be "^PASS.WORD".
That value should be parsed to something useable by your PAM solution's API.
NOTE: the "^" has to be stripped out before you can use it. The recommended way to
do that is to use ``plain_text_value.lstrip(constants.PAM_SECRET_PREFIX)``
Any required secrets for authentication should be retrieved here using
the protected_secrets_manager saved in the constructor.
:param plain_text_value: plain text value from app.config (starts with "^")
:type plain_text_value: str
:param default: value to return if item is not found in PAM
:type default: str
"""
raise NotImplementedError("Implementation for PAM Interface 'get()' method is required")
def selftest(self):
"""
Test the configuration of the PAM plugin. This should do two things:
1. Ensure all necessary properties are set in protected secrets
2. Ensure that you can authenticate to the endpoint. Usually this means running
some sort of "log in" API call
Return True/Flase (True for successful test) and a reason as the return value.
If anything fails, provide as useful information as you can in the second value of the tuple.
:return: Should return (True|False, reason) where True if success and False is failure with reason for failure
:rtype: tuple(bool, str)
"""
raise NotImplementedError("Implementation for PAM Interface 'selftest()' method is required")
def get_verify_from_string(str_val):
"""
Read value of 'verify' from app.config to usable
value of True, False, or <path_to_cert_file>
:param str_val: value in app.config
:type str_val: str
:return: value readable by ``requests.request(..., verify=<>)``
:rtype: bool|str
"""
if str_val is None or not isinstance(str_val, string_types):
return True
if str_val.lower() == "false":
return False
if str_val.lower() == "true":
return True
return str_val | /resilient_app_config_plugins-1.0.0-py3-none-any.whl/resilient_app_config_plugins/plugin_base.py | 0.77193 | 0.230476 | plugin_base.py | pypi |
from requests import Session
from typing import Union, Dict
from .exceptions import UnsupportedProxyType
def proxy_to_dict(proxy: str) -> Dict[str, str]:
"""Converts a proxy in the format ip:port or ip:port:login:password to a json format
Args:
proxy (str): proxy to convert
Returns:
Dict[str, str]: proxy in json format
Raises:
UnsupportedProxyType: if the proxy is in a bad format
Example:
>>> from resilenter_caller import proxy_to_dict
>>>
>>> proxy_to_dict("123:123")
{'http': 'http://123:123', 'https': 'http://123:123'}
>>> proxy_to_dict("123:123:login:password")
{'http': 'http://login:password@123:123/', 'https': 'http://login:password@123:123/'}
"""
proxy_full = proxy.replace('\n','').split(':')
if len(proxy_full) == 2:
return {'http': 'http://{}'.format(proxy),'https': 'http://{}'.format(proxy),}
elif len(proxy_full) == 4:
ip_port = '{}:{}'.format(proxy_full[0], proxy_full[1])
login_pw = '{}:{}'.format(proxy_full[2], proxy_full[3])
return {'http': 'http://{}@{}/'.format(login_pw, ip_port),'https': 'http://{}@{}/'.format(login_pw, ip_port)}
else:
raise UnsupportedProxyType("Proxy must be in the format ip:port or ip:port:login:password")
def update_session_proxy(session: Session, proxy: Union[str, Dict[str, str]]) -> Union[Dict[str, str], bool]:
"""Updates the proxy of a session, if proxy is a string, it will be converted to a json format
Args:
session (Session): session to change
proxy (Union[str, Dict[str, str]]): proxy to set
Returns:
Union[dict, bool]: False if proxy is in a bad format
Example:
>>> from requests import Session
>>> from resilenter_caller import update_session_proxy
>>>
>>> session = Session()
>>> update_session_proxy(session, "123:123")
>>> session.proxies
{'http': 'http://123:123', 'https': 'http://123:123'}
>>> update_session_proxy(session, "123:123:login:password")
>>> session.proxies
{'http': 'http://login:password@123:123/', 'https': 'http://login:password@123:123/'}
"""
if type(proxy) == str:
formatted = proxy_to_dict(proxy)
if not formatted: return False
session.proxies.update(formatted)
return proxy | /resilient_caller-0.2.1-py3-none-any.whl/resilient_caller/utils.py | 0.872971 | 0.393356 | utils.py | pypi |
from requests import Session
from typing import Union, Dict
from .exceptions import UnsupportedProxyType
def proxy_to_dict(proxy: str) -> Dict[str, str]:
"""Converts a proxy in the format ip:port or ip:port:login:password to a json format
Args:
proxy (str): proxy to convert
Returns:
Dict[str, str]: proxy in json format
Raises:
UnsupportedProxyType: if the proxy is in a bad format
Example:
>>> from resilenter_caller import proxy_to_dict
>>>
>>> proxy_to_dict("123:123")
{'http': 'http://123:123', 'https': 'http://123:123'}
>>> proxy_to_dict("123:123:login:password")
{'http': 'http://login:password@123:123/', 'https': 'http://login:password@123:123/'}
"""
proxy_full = proxy.replace('\n','').split(':')
if len(proxy_full) == 2:
return {'http': 'http://{}'.format(proxy),'https': 'http://{}'.format(proxy),}
elif len(proxy_full) == 4:
ip_port = '{}:{}'.format(proxy_full[0], proxy_full[1])
login_pw = '{}:{}'.format(proxy_full[2], proxy_full[3])
return {'http': 'http://{}@{}/'.format(login_pw, ip_port),'https': 'http://{}@{}/'.format(login_pw, ip_port)}
else:
raise UnsupportedProxyType("Proxy must be in the format ip:port or ip:port:login:password")
def update_session_proxy(session: Session, proxy: Union[str, Dict[str, str]]) -> Union[Dict[str, str], bool]:
"""Updates the proxy of a session, if proxy is a string, it will be converted to a json format
Args:
session (Session): session to change
proxy (Union[str, Dict[str, str]]): proxy to set
Returns:
Union[dict, bool]: False if proxy is in a bad format
Example:
>>> from requests import Session
>>> from resilenter_caller import update_session_proxy
>>>
>>> session = Session()
>>> update_session_proxy(session, "123:123")
>>> session.proxies
{'http': 'http://123:123', 'https': 'http://123:123'}
>>> update_session_proxy(session, "123:123:login:password")
>>> session.proxies
{'http': 'http://login:password@123:123/', 'https': 'http://login:password@123:123/'}
"""
if type(proxy) == str:
formatted = proxy_to_dict(proxy)
if not formatted: return False
session.proxies.update(formatted)
return proxy | /resilient_caller-0.2.1-py3-none-any.whl/resilenter_caller/utils.py | 0.872971 | 0.393356 | utils.py | pypi |
import shelve
import time
import queue
import random
import string
import math
import logging
import requests
from typing import Optional, Text, Union
from datetime import datetime
from .exceptions import DataTypeError
from collections import namedtuple
ColumnDescription = namedtuple("ColumnDescription",
["col_name", "data_type", "ordinal_position",
"is_nullable", "precision"])
logger = logging.getLogger(__name__)
sql_datatypes_map = {
"postgres": {
"boolean": bool,
"character varying": str,
"varchar": str,
"character": str,
"char": str,
"text": str,
"real": float,
"double precision": float,
"decimal": float,
"numeric": float,
"money": float,
"integer": int,
"smallint": int,
"bigint": int,
"smallserial": int,
"serial": int,
"bigserial": int,
"timestamp": datetime,
"timestamp without time zone": datetime,
"interval": str,
"cidr": str,
"inet": str,
"macaddr": str,
"macaddr8": str,
"USER-DEFINED": str,
"ARRAY": list
}
}
def generate_rand_name() -> str:
"""Generate a random name of the form "export_XXXXXX" where XXXXXX are
6 random characters.
Returns:
str: the generated name
"""
suf = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f"exporters_{suf}"
def is_able_to_connect(url: Optional[Text] = None) -> bool:
"""Runs a HTTP GET request to the url.
Args:
url (str): an URL (with its schema)
Returns:
bool: if a ConnectionError or Timeout are raised, returns `False`.
`True`, otherwise.
"""
if url is None:
url = "https://www.google.com"
try:
_ = requests.get(url, timeout=.5)
return True
except (requests.ConnectionError, requests.Timeout):
return False
def _stringify_sql(value):
if isinstance(value, str):
return f"'{value}'"
elif value is None:
return "NULL"
else:
return str(value)
def _transform_data_for_sql_query(data: Union[dict, tuple]):
generator = enumerate(data) if isinstance(data, tuple) else data.items()
for k, v in generator:
if isinstance(v, bool):
data[k] = "true" if v else "false"
if isinstance(v, datetime):
data[k] = str(v)
if isinstance(v, float):
if v == math.inf:
data[k] = 'Infinity'
elif v == -math.inf:
data[k] = '-Infinity'
elif math.isnan(v):
data[k] = 'NaN'
if isinstance(v, list):
data[k] = str(v).replace("[", "{").replace("]", "}")
columns = None
if isinstance(data, dict):
columns = ",".join(data.keys())
values = ",".join([f"'{val}'" if isinstance(val, str) else _stringify_sql(val)
for val in data.values()])
values = values.replace("None", "NULL")
return columns, values
def _describe_postgres_column(col: tuple):
"""Input column is a tuple of the form:
(table_name, column_name, data_type, ordinal_position, is_nullable,
character_maximum_length, numeric_precision, datetime_precision)
"""
if col[5]:
precision = col[5]
elif col[6]:
precision = col[6]
else:
precision = col[7]
try:
data_type = sql_datatypes_map["postgres"][col[2]]
except KeyError:
logger.error(f"Data type not found in mapping: {col[2]}")
data_type = None
ordinal_position = col[3]
is_nullable = True if col[4] == 'YES' else False
return ColumnDescription(col[1], data_type, ordinal_position, is_nullable, precision)
def _validate_data_for_sql_table(data: dict, table: dict):
"""Validates data based on a table's schema.
`table` is a dictionary where keys are column names and values are of the type
`resilient_exporters.utils.ColumnDescription`.
"""
for key, val in data.items():
if val is None:
if not table[key].is_nullable:
raise DataTypeError(message=f"Column '{key}' is not nullable, but \
value provided is None.")
elif not isinstance(val, table[key].data_type):
raise DataTypeError(message=f"Invalid data type for '{key}'.")
elif isinstance(val, str):
if isinstance(table[key].precision, int):
if len(val) > table[key].precision:
raise DataTypeError(message=f"String of chars too long for '{key}'. \
It must be {table[key][3]} chars maximum.")
return
class _DataStore:
__instantiated = 0
__used_filenames = []
def __new__(cls, *args, **kwargs):
if "shelf_filename" in kwargs.keys():
if kwargs["shelf_filename"] in cls.__used_filenames:
raise ValueError(f"File {kwargs['shelf_filename']} is already \
being used.")
return super(_DataStore, cls).__new__(cls)
def __init__(self,
use_memory: bool = True,
shelf_filename: Optional[Text] = None,
max_size: int = 100 * 100 * 100,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.__use_memory = use_memory
self.__filename = generate_rand_name() \
if shelf_filename is None \
else shelf_filename
self.__used_filenames.append(self.__filename)
self.__size = 0
self.max_size = max_size
self.__queue = queue.Queue() if use_memory else None
self.__shelf = None if use_memory else shelve.open(self.__filename)
@property
def size(self):
return self.__size
@property
def use_memory(self) -> bool:
return self.__use_memory
@use_memory.setter
def use_memory(self, new_val: bool) -> bool:
if self.__use_memory and new_val is False:
self.export_queue_to_shelf()
self.__use_memory = new_val
self.__queue = None
elif self.__use_memory is False and new_val:
self.__queue = self.import_queue_from_shelf()
self.__use_memory = new_val
self.__shelf.close()
self.__shelf = None
else:
print(f"WARNING - use_memory is already set to {new_val}")
return new_val
def put(self, data) -> bool:
if self.size >= self.max_size:
# Cannot save more data
return False
if self.use_memory:
self.__put_in_memory(data)
else:
self.__put_in_shelf(data)
self.__size += 1
return True
def __put_in_memory(self, data):
self.__queue.put(data)
def __put_in_shelf(self, data):
self.__shelf[str(time.time())] = data
def get(self):
if self.size <= 0:
raise Exception("No saved data left.")
if self.use_memory:
data = self.__get_from_memory()
else:
data = self.__get_from_shelf()
self.__size -= 1
return data
def __get_from_memory(self):
return self.__queue.get()
def __get_from_shelf(self):
generator = iter(self.__shelf.keys())
try:
key = next(generator)
except StopIteration:
return None
else:
res = self.__shelf[key].copy()
del self.__shelf[key]
return res
def export_queue_to_shelf(self):
self.__shelf = shelve.open(self.__filename, "n")
while not self.__queue.empty():
data = self.__queue.get()
self.__shelf[str(time.time())] = data
def import_queue_from_shelf(self):
q = queue.Queue()
generator = iter(self.__shelf.keys())
for key in generator:
q.put(self.__shelf[key])
return q
def __len__(self):
return self.size
def __iter__(self):
return self
def __next__(self):
if self.size:
return self.get()
raise StopIteration
def __del__(self):
if self.__shelf:
self.__shelf.close() | /resilient_exporters-0.1.6-py3-none-any.whl/resilient_exporters/utils.py | 0.813831 | 0.248044 | utils.py | pypi |
import logging
from typing import Text, Union
from ..exporters import Exporter, ExportResult
from ..utils import _validate_data_for_sql_table, \
_transform_data_for_sql_query, \
_describe_postgres_column
from ..exceptions import MissingConfigError, \
InvalidConfigError
logger = logging.getLogger(__name__)
try:
import psycopg2
except ModuleNotFoundError:
logger.error("""Module psycopg2 not available. Install using:
pip install resilient-exporters[postgres]""")
raise
class PostgreSQLExporter(Exporter):
"""Exporter for PostgreSQL.
Args:
target_host (str):
target_port (int):
username (str):
password (str):
database (str):
default_table (str):
create_table_if_inexistent (bool): Default to False
**kwargs : the keyword arguments to pass down to parent class Exporter
Raises:
InvalidConfigError: if it cannot retrieve the server information, which
is likely due an invalid configuration of the target.
.. admonition:: Example
.. code-block:: python
from resilient_exporters.exporters import PostgreSQLExporter
exporter = PostgreSQLExporter(target_host="myserver.domain.net",
username="username",
password="my-password",
database="profiles",
default_table="scientists")
data = {"name": "Richard Feynman",
"age": 69}
exporter.send(data)
"""
def __init__(self,
conn_string: Text = None,
target_host: Text = None,
database: Text = None,
target_port: int = 5432,
username: Text = None,
password: Text = None,
default_table: Text = None,
**kwargs):
assert conn_string or (target_host and target_port), \
"Either a connection string or target host is needed."
super(PostgreSQLExporter, self).__init__(**kwargs)
if conn_string:
self.__conn = psycopg2.connect(conn_string)
else:
self.__conn = psycopg2.connect(host=target_host,
port=target_port,
user=username,
password=password,
dbname=database)
self.__cur = self.__conn.cursor()
self.__default_table = default_table
# List tables in the database
try:
self.__cur.execute("SELECT * FROM pg_catalog.pg_tables \
WHERE schemaname != 'pg_catalog' \
AND schemaname != 'information_schema';")
self.__all_tables = {}
for row in self.__cur.fetchall():
self.__all_tables[row[1]] = {}
except Exception as e:
logging.error(e)
raise InvalidConfigError(self, "Cannot retrieve database info. \
Is the configuration valid? \
Does the user have read rights?")
# Get information on columns for each table
for tabname in self.__all_tables.keys():
self.__cur.execute(f"SELECT TABLE_NAME, COLUMN_NAME, DATA_TYPE, \
ORDINAL_POSITION, IS_NULLABLE, \
CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, \
DATETIME_PRECISION FROM \
information_schema.columns \
WHERE table_name='{tabname}';")
for col in self.__cur.fetchall():
self.__all_tables[tabname][col[1]] = _describe_postgres_column(col)
def send(self,
data: Union[dict, tuple],
table: Text = None,
upsert_on: Union[Text, tuple, list] = None) -> ExportResult:
"""Inserts data into a table. Reuses default database and
tables names, if provided at initialisation.
Args:
data (Union[dict, tuple]): a dict or tuple representing the
document to insert into the collection. If a dict, the
keys must be the column names. If a tuple, there must be
as many elements as there are columns in the table.
table (str): name of the target table. If `None`, will use
the default value set at initialisation. Default is `None`.
upsert_on (Union[Text, tuple, list]): upsert on given columns.
If `None`, it will not do an upsert. Default is `None`.
Returns:
ExportResult: the result in the form (Object, True) if successful,
(None, False) otherwise.
Raises:
MissingConfigError: if it cannot find a database and/or collection
in the arguments and default values.
"""
if table is None:
if self.__default_table is None:
raise MissingConfigError(self, "No table given by argument \
nor default table configured.")
table = self.__default_table
if table in self.__all_tables.keys():
# Validates the data, raises errors in case something is wrong
_validate_data_for_sql_table(data, self.__all_tables[table])
columns, values = _transform_data_for_sql_query(data)
if isinstance(data, dict):
query = f"INSERT INTO {table}({columns}) VALUES({values})"
else:
query = f"INSERT INTO {table} VALUES({values})"
if upsert_on:
if isinstance(upsert_on, str):
upsert_on = [upsert_on]
if isinstance(upsert_on, list) \
or isinstance(upsert_on, tuple):
tmp = ""
for item in upsert_on:
assert item in columns, \
f"{item} is not a column of the table {table}"
tmp += item + ","
upsert_on = tmp[:-1]
else:
raise InvalidConfigError("upsert_on type not supported")
query += f" ON CONFLICT ({upsert_on}) DO UPDATE SET"
for colname in columns.split(","):
if colname == upsert_on:
continue
query += f" {colname} = EXCLUDED.{colname},"
query = query[:-1] # removes last ',' character
query += ";"
logger.debug(f"Final query: {query}")
try:
self.__cur.execute(query, table)
success = bool(self.__cur.rowcount)
self.__conn.commit()
return ExportResult(None, success)
except psycopg2.Error as e:
logging.error(e)
return ExportResult(None, False)
else:
raise InvalidConfigError(self, f"Table {table} does not exist in \
database. Provide the name of \
an existing table") | /resilient_exporters-0.1.6-py3-none-any.whl/resilient_exporters/exporters/exporter_postgresql.py | 0.697918 | 0.230367 | exporter_postgresql.py | pypi |
import logging
from typing import Optional, Text, Iterable
from resilient_exporters.exporters import Exporter, ExportResult
from resilient_exporters.exceptions import MissingModuleError, \
MissingConfigError, \
InvalidConfigError
logger = logging.getLogger(__name__)
try:
import elasticsearch
except ModuleNotFoundError:
logger.error("""Elasticsearch not available. Install using:
pip install resilient-transmitter[elastic]""")
raise MissingModuleError
class ElasticSearchExporter(Exporter):
"""Exporter for ElasticSearch.
Args:
target_ip (str): an IP address of a ElasticSearch server.
target_port (int): the port to connect to. Default to 9300.
username (str): the username for authentication.
password (str): the password as plain text for authentication.
Use an environement variable for security.
cluster_hosts (Iterable[Text]): cluster of hosts, passed to ES's client
application.
cloud_id (str): cloud id used to connect to a Elastic Cloud server.
A username and password is most likely required to be
able to connect.
api_key (str): a base64 encoded token to authenticate to an
ElasticSearchserver.
sniff_on_start (bool): see Elasticsearch documentation.
default_index (str): a default index to use when ``send`` is called. If
None, an index will have to be provided as an argument
when calling ``send``.
**kwargs : the keyword arguments to pass down to parent class Exporter
.. admonition:: Warning
If ``target_ip`` is provided, it will supercede ``cluster_hosts``.
"""
def __init__(self,
target_ip: Text = None,
target_port: int = 9300,
username: Text = None,
password: Text = None,
cluster_hosts: Iterable[Text] = None,
cloud_id: Text = None,
api_key: Text = None, # base 64 encoded token
sniff_on_start: bool = True,
default_index: Optional[Text] = None,
use_ssl: bool = False,
ssl_certfile: Text = None,
ssl_ca_certs: Text = None,
**kwargs):
super(ElasticSearchExporter, self).__init__(**kwargs)
self.target_ip = target_ip
self.target_port = target_port
self.cluster_hosts = cluster_hosts
self.cloud_id = cloud_id
self.api_key = api_key
self.sniff_on_start = sniff_on_start
self.default_index = default_index
self.use_ssl = use_ssl
self.ssl_certfile = ssl_certfile
self.ssl_ca_certs = ssl_ca_certs
# Need to provide an address
if self.target_ip is None \
and self.cluster_hosts is None \
and self.cloud_id is None:
logger.error("No target address provided.")
raise ValueError
kwargs = {}
if username and password:
kwargs["http_auth"] = (username, password)
#kwargs["scheme"] = "https"
if self.cloud_id:
kwargs["cloud_id"] = self.cloud_id
if self.api_key:
kwargs["api_key"] = self.api_key
if self.sniff_on_start:
kwargs["sniff_on_start"] = self.sniff_on_start
if self.use_ssl:
kwargs["use_ssl"] = self.use_ssl
if self.ssl_certfile:
kwargs["client_cert"] = self.ssl_certfile
if self.ssl_ca_certs:
kwargs["ca_certs"] = self.ssl_ca_certs
hosts = None
if self.target_ip:
hosts = {"host": self.target_ip}
if self.target_port:
hosts["port"] = self.target_port
elif self.cluster_hosts:
hosts = self.cluster_hosts
if hosts:
self.__client = elasticsearch.Elasticsearch(hosts, **kwargs)
else:
self.__client = elasticsearch.Elasticsearch(**kwargs)
@property
def client(self) -> elasticsearch.Elasticsearch:
"""The Elasticsearch client. It cannot be replaced."""
return self.__client
def send(self, data: dict, index: Optional[Text] = None) -> ExportResult:
"""Indexes the data into an ElasicSearch index.
Args:
data (dict): the data, as a dict, to index.
index (str): the index name. If `None`, it uses the default value
provided at initialisation.
Returns:
ExportResult: (Object, True) if successful, (None, False) otherwise
Raises:
MissingConfigError: if no index is found.
InvalidConfigError: if cannot send data because of a configuration
issue (authentication or other type of issues).
"""
if index is None:
index = self.default_index
if index is None:
raise MissingConfigError(self, "No index found.")
try:
res = self.__client.index(index=index, body=data)
return ExportResult(res, True)
except elasticsearch.exceptions.ConnectionError:
logger.warning("elasticsearch.exceptions.ConnectionError")
except elasticsearch.exceptions.RequestError:
logger.error("elasticsearch.exceptions.RequestError")
raise InvalidConfigError(self,
"elasticsearch.exceptions.RequestError")
return ExportResult(None, False) | /resilient_exporters-0.1.6-py3-none-any.whl/resilient_exporters/exporters/exporter_elasticsearch.py | 0.884083 | 0.191536 | exporter_elasticsearch.py | pypi |
import pathlib
import logging
from concurrent.futures import ThreadPoolExecutor
from typing import Callable, Union, Optional, List, Iterable, Text, Any
from resilient_exporters.exporters import Exporter, ExportResult
from resilient_exporters.exceptions import InvalidConfigError
from resilient_exporters.utils import is_able_to_connect
logger = logging.getLogger(__name__)
class ExporterPool(Exporter):
"""Enables pooling of exporters for improved efficiency and performance.
All the exporters will be managed by the pool, including the saving of
unsent data, with only one ``send`` call to be used. It also offers a
multithreading option to run the exporters' ``send`` functions in parallel,
to speed up the execution of ``ExporterPool.send``.
Args:
exporters (Iterable[Exporter]): a list of
``resilient_exporters.exporters.Exporter``.
transform (Callable): a function to be invoked at each ``send`` call on
the passed data. It must return data or exceptions will be raised.
num_threads (int): the number of threads to use for ``send`` calls.
Must be greater than 1. If 1, multithreading is disabled.
Default to 1.
wait_for_result (bool): value to decide if the instance has to wait for
the result of ``send`` calls or not when multithreading is enabled.
manual_reexport (bool): if True, the user is responsible to call the
function ``send_unsent_data`` when appropriate. If False, the
instance will manage that automatically by assessing the necessity
to call the function at each ``send`` call; the criterias are 1)
there's unsent data, 2) there's an Internet connection. Default to
False.
.. admonition:: Note
If using parallelism, the default behaviour is to wait for the results
of all calls. One can disable this behaviour with `wait_for_result` set
to ``False``, and ``ExporterPool.send`` will be then non-blocking, but
will return ``None``.
Raises:
InvalidConfigError: if ``num_threads`` is < 1.
.. admonition:: Example
.. code-block:: python
import resilient_exporters as rex
exporter1 = rex.exporters.FileExporter("local_file.txt")
exporter2 = rex.exporters.FileExporter("/path/to/network/file.txt")
pool = ExporterPool([exporter1, exporter2], num_threads=2)
line = "A string to be written in a file"
pool.send(line)
Attributes:
num_threads (int): number of threads used by the instance.
wait_for_result (bool): value to decide if the instance has to wait for
the results of ``send`` calls or not when multithreading is enabled
"""
__futures = []
__instantiated = 0
def __init__(self,
exporters: Optional[Iterable[Exporter]],
transform: Optional[Callable] = None,
num_threads: int = 1,
wait_for_result: bool = True,
use_memory: bool = True,
manual_reexport: bool = False,
*,
tmp_file: Union[Text, pathlib.Path, None] = None,
save_unsent_data: bool = True):
# @TODO: name: Optional[Text] = None
super(ExporterPool, self).__init__(transform=transform,
use_memory=use_memory,
tmp_file=tmp_file,
manual_reexport=manual_reexport,
save_unsent_data=save_unsent_data)
if num_threads < 1:
raise InvalidConfigError(self, "num_threads must be >= 1; use \
num_threads=1 to disable multithreading.")
self.num_threads = num_threads
self.wait_for_result = wait_for_result
self.__exporters = {}
if exporters is not None:
for exporter in exporters:
exporter._replace_datastore(self._datastore)
if self._run_transform:
exporter._run_transform = False
exporter._save_unsent_data = not self._save_unsent_data
self.__exporters[exporter.name] = exporter
self.__instantiated += 1
self.name = f"exporterpool_{ExporterPool.__instantiated}"
@property
def exporters(self) -> dict:
"""A dictionary of the contained exporters."""
return self.__exporters
def add_exporter(self, exporter: Exporter) -> None:
"""Adds an exporter to the pool. Use this function to add an exporter
after the pool has been initialised. It removes the responsability to
run the `transform` method from the exporter, and to save unsent data.
Args:
exporter (Exporter): an exporter.
"""
exporter.use_memory = self.use_memory
if self._run_transform:
exporter._run_transform = False
exporter._save_unsent_data = not self.save_unsent_data
self.__exporters[exporter.name] = exporter
def send(self, data: Any, **kwargs) -> List[ExportResult]:
"""Runs the `send` method of all its exporters. If the pool's
`num_threads` attribute is > 1, it will execute all the calls in
separate threads.
.. admonition:: Note
The key arguments passed at the call of the method will be passed
down to all the exporters. Make sure they all have different
keywords.
Args:
data (Any): the data to export.
**kwargs (Any): the keyword arguments to pass down to the
exporters' `send` methods.
Returns:
List[ExportResult]: a list of the exporters' results.
"""
results = []
if self.num_threads <= 1:
for exporter in self:
results.append(exporter.send(data, **kwargs))
else:
# use multithreading
futures = []
with ThreadPoolExecutor(max_workers=self.num_threads) as executor:
for exporter in self:
future = executor.submit(exporter.send, data, **kwargs)
futures.append(future)
def save_if_failed(future):
if not future.result():
self.save_unsent_data(data, kwargs, exporter.name)
future.add_done_callback(save_if_failed)
if self.wait_for_result:
results += [f.result() for f in futures]
else:
self.__futures += futures
return results
def _process_result(self,
results: Iterable[ExportResult],
data: Any,
kwargs: dict) -> Union[bool, List[bool]]:
summed_res = sum([r.successful for r in results])
if summed_res == len(results):
# all expeditions have been successful
if not self.manual_reexport \
and is_able_to_connect(self.TEST_URL) \
and self.has_unsent_data():
logger.info("Attempt to send previously unsent data.")
return results + self.send_unsent_data()
elif summed_res == 0 and self._save_unsent_data:
# all have failed
self.save_unsent_data(data, kwargs, self.name)
else:
# mixed results
for exporter, res in zip(self, results):
if not res.successful and self._save_unsent_data:
self.save_unsent_data(data, kwargs, exporter.name)
return results
def send_unsent_data(self) -> List[ExportResult]:
"""Tries to send the previously saved, unsent data.
Returns:
List[ExportResult]: list of the results of the export jobs.
"""
self.__is_sending_unsent_data = True
results = [self.send(d["data"], d["exporter"], **d["kwargs"])
for d in self._datastore]
self.__is_sending_unsent_data = False
return results
def __len__(self) -> int:
return len(self.__exporters)
def __iter__(self):
self.__iterator_count = 0
self.__exporters_as_list = list(self.exporters.values())
return self
def __next__(self) -> Optional[Exporter]:
if self.__iterator_count < len(self):
res = self.__exporters_as_list[self.__iterator_count]
self.__iterator_count += 1
return res
del self.__exporters_as_list
raise StopIteration
def __del__(self):
if not self.wait_for_result:
_ = [f.result() for f in self.__futures]
del _
del self.__futures | /resilient_exporters-0.1.6-py3-none-any.whl/resilient_exporters/exporters/exporter_pool.py | 0.830663 | 0.37108 | exporter_pool.py | pypi |
import urllib
import logging
from typing import Text
from resilient_exporters import utils
from resilient_exporters.exporters import Exporter, ExportResult
from resilient_exporters.exceptions import MissingConfigError, \
InvalidConfigError, \
MissingModuleError, \
ExportError
logger = logging.getLogger(__name__)
try:
import _mssql
except ModuleNotFoundError:
logger.error("""Module pymssql not available. Install using:
pip install resilient-exporters[sqlserver]""")
raise
except ImportError as e:
logger.error(e)
logger.error("""Is FreeTDS installed in the system?""")
raise
class SQLServerExporter(Exporter):
"""Exporter for Microsoft SQL Server, Azure SQL DB and MySQL.
Args:
target_host (str):
target_port (int):
username (str):
password (str):
database (str):
default_table (str):
create_table_if_inexistent (bool):
**kwargs : the keyword arguments to pass down to parent's class Exporter
Raises:
InvalidConfigError: if it cannot retrieve the server information, which
is likely due an invalid configuration of the target.
.. admonition:: Example
.. code-block:: python
import os
from resilient_exporters.exporters import SQLServerExporter
exporter = SQLServerExporter(target_host="myserver.domain.net",
username="username",
password="my-password",
database="profiles",
default_table="scientists")
data = {"name": "Richard Feynman",
"age": 69}
exporter.send(data)
"""
def __init__(self,
target_host: Text,
database: Text,
target_port: int = 1433,
username: Text = None,
password: Text = None,
default_table: Text = None,
**kwargs):
super(SQLServerExporter, self).__init__(**kwargs)
self.__conn = _mssql.connect(server=target_host,
user=username,
password=password,
database=database)
self.__default_table = default_table
# List tables in the database
try:
self.__conn.execute_query("SELECT * FROM INFORMATION_SCHEMA.TABLES")
self.__all_tables = {}
for row in self.__conn:
self.__all_tables[row["TABLE_NAME"]] = {}
except _mssql.MSSQLDataBaseException as e:
logging.error(e)
raise InvalidConfigError(self, "Cannot retrieve database info. Is \
the configuration valid? Does the user have read rights?")
# Get information on columns for each table
for table_name in self.__all_tables.keys():
self.__conn.execute_query(f"""SELECT COLUMN_NAME, DATA_TYPE, ORDINAL_POSITION, IS_NULLABLE, CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, DATETIME_PRECISION FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME='{table_name}'""")
for col in self.__conn:
if col["CHARACTER_MAXIMUM_LENGTH"]:
precision = col["CHARACTER_MAXIMUM_LENGTH"]
elif col["NUMERIC_PRECISION"]:
precision = col["NUMERIC_PRECISION"]
else:
precison = col["DATETIME_PRECISION"]
col_info = (col["DATA_TYPE"], col["ORDINAL_POSITION"], col["IS_NULLABLE"], precision)
self.__all_tables[table_name][col["COLUMN_NAME"]] = col_info
def send(self,
data: dict,
table: Text = None) -> ExportResult:
"""Inserts data into a table. Reuses default database and
tables names, if provided at initialisation.
Args:
data (dict): a dict representing the document to insert into the
collection.
table (str): name of the target table. If `None`, will use
the default value. Default is `None`.
Returns:
ExportResult: the result in the form (ObjectId, True) if successful,
(None, False) otherwise.
Raises:
MissingConfigError: if it cannot find a database and/or collection
in the arguments and default values.
"""
if table is None:
if self.__default_table is None:
logger.error("""No table name given and no default table
configured.""")
raise MissingConfigError(self, """No table given by
argument nor default table configured. Please
provide a table name.""")
table = self.__default_table
if table in self.__all_tables.keys():
# Validates the data, raises errors in case something is wrong
utils.validate_data_for_sql_table(data, self.__all_tables[table])
columns = ",".join(data.keys())
values = ",".join([ f"'{val}'" if isinstance(val, str) else str \
(val) for val in data.values()])
self.__conn.execute_non_query(f"""INSERT INTO {table}({columns})
VALUES({values})""", table)
return ExportResult(None, True)
else:
logger.error(f"Invalid table name provided: {table}")
raise InvalidConfigError(self, f"""Table {table} does not exist in
database. Provide the name of
an existing table""") | /resilient_exporters-0.1.6-py3-none-any.whl/resilient_exporters/exporters/exporter_sqlserver.py | 0.715026 | 0.165627 | exporter_sqlserver.py | pypi |
from .common import get_incident_tabs
import six
import logging
LOG = logging.getLogger(__name__)
TABS_LABEL = "step_label"
UI_TAB_ELEMENT = "tab"
UI_TAB_FIELD_TYPE = "incident"
class RequiredTabFields(type):
"""
The job of this metaclass is to confirm that the user defined subclasses of Tab define
required attributes, or otherwise throw an error at the time of class definition.
"""
REQUIRED_TAB_ATTRIBUTES = ["UUID", "NAME", "SECTION", "CONTAINS"]
def __init__(cls, name, bases, attrs):
if not bases:
return # No bases implies Tab, which doesn't need to define the properties
for attr in cls.REQUIRED_TAB_ATTRIBUTES:
if not hasattr(cls, attr) or getattr(cls, attr) is None:
raise AttributeError("{} is missing from class definition of a Tab".format(attr))
class Tab(six.with_metaclass(RequiredTabFields)):
"""
Base class to be subclassed in integrations to create and update tabs related to those integrations.
UUID, NAME, SECTION, and CONTAINS attributes have to be defined on the subclass.
- UUID has to be a constant defined as a string to identify tab uniquely in all of the UIs.
To generate UUID you can use python package `uuid` and method `uuid.uuid4()` in the python shell.
- NAME is the name of the tab as it should appear in the UI
- SECTION is integration's section in the app.config
Can be used to place `ui_lock` in it to prevent changes being made to the tab.
- CONTAINS - array of UI Elements (Field, Datatable) with api names to be listed in the tab
Example usage:
class QRadarTab(UITab):
UUID = "abcdefg"
NAME = "QRadar Tab"
SECTION = "fn_qradar_integration"
CONTAINS = [
Field("api_name"),
Datatable("api_name")
]
create_tab(QRadarTab)
"""
UUID = None
NAME = None
SECTION = None
CONTAINS = None
@classmethod
def exists(cls, client):
"""
Check if the tab exists in Resilient
"""
return cls.exists_in(get_incident_tabs(client))
@classmethod
def exists_in(cls, tabs):
"""
Checks if the tab exists in the list of tabs.
"""
return cls.get_from_tabs(tabs) is not None
@classmethod
def get_from_tabs(cls, tabs):
return next((x for x in tabs if x.get("predefined_uuid") == cls.UUID), None)
@classmethod
def as_dto(cls):
if not hasattr(cls, "SHOW_IF"):
cls.SHOW_IF = []
return {
"step_label": cls.NAME,
"fields": [field.as_dto() for field in cls.CONTAINS] if cls.CONTAINS else [],
"show_if": cls.SHOW_IF,
"element": UI_TAB_ELEMENT,
"field_type": UI_TAB_FIELD_TYPE,
"predefined_uuid": cls.UUID,
"show_link_header": False
}
@classmethod
def get_missing_conditions(cls, tabs):
if not hasattr(cls, "SHOW_IF"):
return None
if not cls.exists_in(tabs):
return None
required_conditions = cls.SHOW_IF
tab = cls.get_from_tabs(tabs)
present_conditions = tab.get("show_if", [])
return [condition for condition in required_conditions if condition not in present_conditions]
@classmethod
def get_missing_fields(cls, tabs):
"""
Given all the tabs find what fields are missing that are required in the `cls` tab.
"""
if not cls.exists_in(tabs):
return None
tab = cls.get_from_tabs(tabs)
tab_fields = tab.get('fields', [])
return [field.as_dto() for field in cls.CONTAINS if not field.exists_in(tab_fields)] | /resilient_lib-50.0.151.tar.gz/resilient_lib-50.0.151/resilient_lib/ui/tab.py | 0.80406 | 0.167593 | tab.py | pypi |
import calendar
import datetime
import json
import logging
import os
import pprint
import random
import re
import sys
import time
import pytz
from jinja2 import Environment, Undefined, select_autoescape
from jinja2.exceptions import TemplateError, TemplateSyntaxError
from resilient_lib import readable_datetime
if sys.version_info.major < 3:
from cgi import escape as html_escape
else:
# Python 3.2 adds html.escape() and deprecates cgi.escape().
from html import escape as html_escape
if sys.version_info.major < 3:
from base64 import encodestring as b64encode
else:
# Python 3.x
from base64 import encodebytes as b64encode
if sys.version_info.major < 3:
from urllib import quote
else:
# Python 3.x
from urllib.parse import quote
LOG = logging.getLogger(__name__)
UNDEFINED_LABEL = "[undefined]"
def render(template, data):
"""
Render data into a template, producing a string result. All the additional custom filters are available.
:param template: Path to or a dict of the Jinja template
:type template: str or dict
:param data: JSON data to apply to the template
:type data: dict
:return: result from the rendering of the template. The template is usually a string, but can be a dict
:rtype: str or dict
**Examples:**
.. code-block:: python
>>> render("template {{value}}", {"value":"123"})
u'template 123'
>>> render({"template": "{{value}}"}, {"value":"123"})
u'{"template": "123"}'
You can escape values using the 'json' filter,
or the 'url' or 'html' or 'ldap' filters.
>>> render('{"template": {{value|json}} }', {"value":'1"23'})
u'{"template": "1\\\\"23" }'
>>> render('{"template": "{{value|js}}" }', {"value":'1"23'})
u'{"template": "1\\\\"23" }'
>>> render('{"template": {{value|ldap}} }', {"value":'1*23'})
u'{"template": 1\\\\2a23 }'
>>> render('shell "{{value|ps}}"', {"value":'$"foo"'})
u'shell "`$`"foo`""'
>>> render('shell "{{value|sh}}"', {"value":'$"foo"'})
u'shell "\\\\$\\\\"foo\\\\""'
>>> render('template={{value|timestamp}}', {"value":0})
u'template=0'
>>> render('template={{value|timestamp}}', {})
u'template=null'
>>> render('template={{value|timestamp}}', {"value":{"year":2015, "month":7, "day":15}})
u'template=1436918400000'
>>> render('template={{value|timestamp}}', {"value":datetime.datetime(2015, 7, 15)})
u'template=1436918400000'
"""
stringtemplate = template
if isinstance(template, dict):
stringtemplate = json.dumps(template, sort_keys=True)
try:
jtemplate = environment().from_string(stringtemplate)
except TemplateSyntaxError as err:
LOG.error("Render failed: %s, with template: %s", str(err), stringtemplate)
raise
try:
stringvalue = jtemplate.render(data)
except TemplateError:
LOG.error("Render failed, with data: %s", data)
raise
return stringvalue
def render_json(template, data):
"""
Render data into a template, producing a JSON result.
Also clean up any "really bad" control characters to avoid failure.
:param template: Path to or a dict of the Jinja template
:type template: str or dict
:param data: dict to apply to the template
:type data: dict
:return: result from the rendering of the template as a dictionary
:rtype: dict
**Examples:**
.. code-block:: python
>>> d = {"value": "the" + chr(10) + "new" + chr(10) + "thing"}
>>> render_json('{"result":"{{value}}"}', d)
{u'result': u'the new thing'}
>>> d = {"value": "the" + chr(1) + "new" + chr(9) + "thing"}
>>> render_json('{"result":"{{value}}"}', d)
{u'result': u'the new thing'}
"""
result = render(template, data)
result = _remove_ctl_chars(result)
return _convert_to_json(result)
def _remove_ctl_chars(result):
# replace any control characters with spaces
for n in range(1, 32):
result = result.replace(chr(n), " ")
return result
def _convert_to_json(result):
try:
return json.loads(result)
except:
raise ValueError(u"It is expected that the rendered template is a JSON Object\nInvalid JSON result: {0}".format(result))
def make_payload_from_template(template_override, default_template, payload, return_json=True):
"""
Convert a payload into a new format based on a specified template.
:param template_override: Path to the specified template (*usually
taken from the app.config file. See the Usage example above*)
:type template_override: str
:param default_template: Path to the default template (*usually in
the '/util/templates' directory. See the Usage example above*)
:type default_template: str
:param payload: ``dict`` of payload that is passed to Jinja template
:type payload: dict
:param return_json: False if template should be render as a ``str``
and results returned as a ``str``
:type return_json: bool
:return: If the Jinja template is valid JSON and ``return_json`` is ``True`` the result is
returned as a ``dict`` else it returns the rendered template as a ``str``
:rtype: str|dict
:raises ValueError: if ``return_json`` is ``True`` and the Jinja template is not
valid JSON
"""
template_data = _get_template(template_override, default_template)
# Render the template.
if return_json:
rendered_payload = render_json(template_data, payload)
else:
rendered_payload = render(template_data, payload)
rendered_payload = _remove_ctl_chars(rendered_payload)
LOG.debug(rendered_payload)
return rendered_payload
def _get_template(specified_template, default_template):
"""return the contents of a jinja template, either from the default location or from a customer specified
custom path
Args:
specified_template ([str]): [customer specified template path]
default_template ([str]): [default template path]
Returns:
[str]: [contents of template]
"""
template_file_path = specified_template
if template_file_path:
if not (os.path.exists(template_file_path) and os.path.isfile(template_file_path)):
LOG.error(u"Template file: %s doesn't exist, using default template",
template_file_path)
template_file_path = None
if not template_file_path:
# using default template
template_file_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
default_template
)
LOG.debug(u"template file used: %s", template_file_path)
with open(template_file_path, "r") as definition:
return definition.read()
# C U S T O M J I N J A F I L T E R S
def soar_datetimeformat(value, date_format="%Y-%m-%dT%H:%M:%S", split_at=None):
"""
**soar_datetimeformat**
Convert UTC dates to epoch format.
:param value: The UTC date string
:type value: str
:param date_format: *(optional)* Conversion format. Defaults to ``"%Y-%m-%dT%H:%M:%S"``
:type date_format: str
:param split_at: *(optional)* Character to split the date field to scope the date field
.. code-block::
split_at='.' to remove milliseconds for "2021-10-22T20:53:53.913Z"
split_at='+' to remove tz information "2021-10-22T20:53:53+00:00"
:type split_at: str
:return: Epoch value of datetime, in milliseconds
:rtype: int
"""
if not value:
return value
if split_at:
utc_time = time.strptime(value[:value.rfind(split_at)], date_format)
else:
utc_time = time.strptime(value, date_format)
return calendar.timegm(utc_time) * 1000
def soar_substitute(value, json_str):
"""
**soar_substitute**
Replace values based on a lookup dictionary.
:param value: original value to lookup
:type value: str
:param json_str: JSON encoded string with key/value pairs of lookup values
:type json_str: JSON encoded str
:return: replacement value or original value if no replacement found
:rtype: str | int
"""
replace_dict = json.loads(json_str)
if value in replace_dict:
return replace_dict[value]
# use a default value if specific match is missing
if 'DEFAULT' in replace_dict:
return replace_dict['DEFAULT']
return value
def soar_splitpart(value, index, split_chars=' - '):
"""
**soar_splitpart**
Split a string and return the index.
:param value: string to split
:type value: str
:param index: index of split to return
:type index: int
:param split_chars: *(optional)* split characters. Defaults to ``' - '``
:type split_chars: str
:return: value of split. If ``index`` is out of bounds, the original ``value`` is returned
:rtype: str
"""
splits = value.split(split_chars)
if len(splits) > index:
return splits[index]
return value
def soar_trimlist(org_list):
"""
**soar_trimlist**
Trim whitespace from elements in a list.
:param org_list: list of elements to trim whitespace from
:type org_list: list of strings
:return: list with elements trimmed of whitespace
:rtype: list
"""
if not isinstance(org_list, list):
return org_list
return [element.strip() for element in org_list]
def js_filter(val):
"""
**js**
Produces JSONified string of the value,
without surrounding quotes.
:param val: The string to convert
:type val: str
:return: JSONified string of the value, without surrounding quotes
:rtype: str
"""
if val is None or isinstance(val, Undefined):
return "null"
js = json_filter(val)
return js[1:-1]
def json_filter(val, indent=0):
"""
**json**
Produces JSONified string of the value.
:param val: The string to convert
:type val: str
:return: JSONified string of the value
:rtype: str
"""
if val is None or isinstance(val, Undefined):
return "null"
return json.dumps(val, indent=indent, sort_keys=True)
def html_filter(val):
"""
**html**
Produces HTML-encoded string of the value.
:param val: The string to encode
:type val: str
:return: Encoded string
:rtype: str
"""
if isinstance(val, Undefined):
return UNDEFINED_LABEL
return html_escape(val)
def url_filter(val):
"""
**url**
Produces URL-encoded string of the value.
:param val: The string to encoded
:type val: str
:return: Encoded string
:rtype: str
"""
if isinstance(val, Undefined):
return UNDEFINED_LABEL
return quote(str(val))
def idna_filter(val):
"""
**idna**
Encodes the value per RFC 3490.
:param val: The string to encode
:type val: str
:return: Encoded string
:rtype: str
"""
if isinstance(val, Undefined):
return UNDEFINED_LABEL
return val.encode("idna").decode("utf-8")
def punycode_filter(val):
"""
**punycode**
Encodes the value per RFC 3492.
:param val: The string to encode
:type val: str
:return: Encoded string
:rtype: str
"""
if isinstance(val, Undefined):
return UNDEFINED_LABEL
return val.encode("punycode").decode("utf-8")
def ldap_filter(val):
"""
**ldap**
Produces LDAP-encoded string of the value.
:param val: The string to encode
:type val: str
:return: Encoded string
:rtype: str
"""
if isinstance(val, Undefined):
return UNDEFINED_LABEL
escaped = []
for char in str(val):
if char < '0' or char > 'z' or char in "\\*()":
char = "\\%02x" % ord(char)
escaped.append(char)
return ''.join(escaped)
def ps_filter(val):
"""
**ps**
Escapes characters in ``val`` for use in a PowerShell command line.
:param val: The string to escaped
:type val: str
:return: Escaped string
:rtype: str
"""
if isinstance(val, Undefined):
return UNDEFINED_LABEL
escaped = []
for char in str(val):
if char in "`$#'\"":
char = "`" + char
elif char == '\0':
char = "`0"
elif char == '\a':
char = "`a"
elif char == '\b':
char = "`b"
elif char == '\f':
char = "`f"
elif char == '\n':
char = "`n"
elif char == '\r':
char = "`r"
elif char == '\t':
char = "`t"
elif char == '\v':
char = "`v"
escaped.append(char)
return ''.join(escaped)
def sh_filter(val):
"""
**sh**
Escapes characters in ``val`` for use in a Unix shell command line.
:param val: The string to escaped
:type val: str
:return: Escaped string
:rtype: str
"""
if isinstance(val, Undefined):
return UNDEFINED_LABEL
escaped = []
for char in str(val):
if char in "$#\"":
char = "\\" + char
elif ord(char) < 32 or ord(char) > 126:
char = "\\%03o" % ord(char)
escaped.append(char)
return ''.join(escaped)
def pretty_filter(val, indent=2):
"""
**pretty**
Produces pretty-printed string of the value.
:param val: The string to format
:type val: str
:param indent: Number of tabs to use when formatting
:type indent: int
:return: The formatted string
:rtype: str
"""
if isinstance(val, Undefined):
return UNDEFINED_LABEL
def nice_repr(obj, context, maxlevels, level):
if sys.version_info.major < 3:
typ = type(obj)
if typ is unicode:
obj = obj.encode("utf-8")
return pprint._safe_repr(obj, context, maxlevels, level)
printer = pprint.PrettyPrinter(indent=indent)
printer.format = nice_repr
return printer.pformat(val)
def iso8601(val):
"""
**iso8601**
Assuming ``val`` is an epoch milliseconds timestamp, produce ISO8601 datetime.
:param val: An epoch milliseconds timestamp
:type val: str|int
:return: ISO8601 datetime
:rtype: str
"""
dt = datetime.datetime.utcfromtimestamp(int(int(val)/1000))
return pytz.UTC.localize(dt).isoformat()
def timestamp(val):
"""
**timestamp**
Try convert non-timestamp values to a timestamp.
:param val: Either ``"now"`` or a dict containing year / month / day etc.
:type val: str | dict
:return: An epoch milliseconds timestamp
:rtype: int
.. code-block::
>>> timestamp({"year": 2018, "month": 8, "day": 1, "timezoneID": "CET"})
1533078000000
>>> timestamp(Undefined())
'null'
>>> timestamp("now") > 1530000000000
True
>>> timestamp("now") > 2000000000000 # 2033
False
"""
if isinstance(val, dict):
y = val.get("year", 1970)
m = val.get("month", 1)
d = val.get("day", 1)
h = val.get("hour", 0)
n = val.get("minute", 0)
s = val.get("second", 0)
u = val.get("milliSecond", 0)
z = pytz.timezone(val.get("timezoneID", "UTC"))
dt = datetime.datetime(y, m, d, h, n, s, u, z)
return int(calendar.timegm(dt.utctimetuple()) * 1000)
if isinstance(val, Undefined):
return "null"
if isinstance(val, datetime.datetime):
return int(calendar.timegm(val.utctimetuple()) * 1000)
if val == "now":
return int(calendar.timegm(datetime.datetime.now().utctimetuple()) * 1000)
return val
def uniq(val, key=None):
"""
**uniq**
Produce the unique list. If ``val`` is a dict, produce unique list of key values.
:param val: The original list
:type val: [str | int | obj]
:param key: If ``val`` is a dict return a list with dicts with just that ``key``
:type key: str
:return: Original list of items with duplicates removed
:rtype: list
.. code-block::
>>> sorted(uniq([1,2,3,2]))
[1, 2, 3]
>>> sorted(uniq([ {"a":1}, {"a":2}, {"a":3}, {"a":2}]))
[{'a': 1}, {'a': 2}, {'a': 3}]
>>> sorted(uniq([ {"a":1}, {"a":2}, {"a":3}, {"a":2}, Exception()], "a"))
[{'a': 1}, {'a': 2}, {'a': 3}, Exception()]
"""
if not isinstance(val, list):
return val
if key is None:
try:
return list(set(val))
except TypeError:
pass
keys = []
values = []
for value in val:
try:
thiskey = value[key]
except:
thiskey = repr(value)
if thiskey not in keys:
keys.append(thiskey)
values.append(value)
return values
def sample_filter(val, count=None):
"""
**sample**
Return a random sample from a list.
:param val: List of str | obj | int
:type val: list
:param count: Number of times to repeat items in ``val`` to
increase its *random* probability
:type count: int | ``None``
:return: The random item
:rtype: str | obj | int
"""
if count is None:
# Return a single value
try:
return random.sample(list(val), 1)[0]
except ValueError:
return None
else:
# Return a list
try:
return random.sample(list(val), count)
except ValueError:
return []
def camel_filter(val):
"""
**camel**
Convert text to CamelCase.
:param val: The string to convert
:type val: str
:return: Converted string
:rtype: str
.. code-block::
This value is in camel case: {{ a#bc_def | camel }}
>>> 'ABcDef'
"""
titlecase = val.title()
return re.sub(r"[\W^_]", "", titlecase)
def base64_filter(val, indent=2):
"""
**base64**
Breaks text into fixed-width blocks. You can specify the
``indent``.
:param val: The string to convert
:type val: str
:param indent: Number of tabs
:type indent: int
:return: Converted string
:rtype: str
"""
if isinstance(val, Undefined):
return ""
s = json.dumps(val).encode("utf-8")
return b64encode(s).decode("utf-8")
JINJA_FILTERS = {
"json": json_filter,
"js": js_filter,
"html": html_filter,
"url": url_filter,
"idna": idna_filter,
"punycode": punycode_filter,
"ldap": ldap_filter,
"ps": ps_filter,
"sh": sh_filter,
"pretty": pretty_filter,
"timestamp": timestamp,
"iso8601": iso8601,
"uniq": uniq,
"sample": sample_filter,
"camel": camel_filter,
"base64": base64_filter,
"soar_datetimeformat": soar_datetimeformat,
"soar_display_datetimeformat": readable_datetime,
"soar_substitute": soar_substitute,
"soar_splitpart": soar_splitpart,
"soar_trimlist": soar_trimlist
}
# Maintain one global Environment
_ENV = Environment(autoescape=select_autoescape(default_for_string=False))
_ENV.globals.update(JINJA_FILTERS)
_ENV.filters.update(JINJA_FILTERS)
def global_jinja_env():
"""
Return the Jinja environment with our resilient-lib custom filters.
This environment can be expanded upon to add additional custom filters.
See `Jinja Custom Filters <https://jinja.palletsprojects.com/en/3.1.x/api/#custom-filters>`_ for more.
Current custom filters available:
.. parsed-literal::
|lib_jinja_filters|
:return: The Jinja environment
:rtype: `jinja2.Environment <https://jinja.palletsprojects.com/en/3.1.x/api/#jinja2.Environment>`_
**Example:**
.. code-block:: python
from resilient-lib import global_jinja_env
addl_custom_filters = {
"filter_name": method_name
}
env = global_jinja_env()
env.globals.update(addl_custom_filters)
env.filters.update(addl_custom_filters)
"""
return _ENV
environment = global_jinja_env | /resilient_lib-50.0.151.tar.gz/resilient_lib-50.0.151/resilient_lib/components/templates_common.py | 0.536313 | 0.397588 | templates_common.py | pypi |
import re
import logging
from six import string_types
try:
from HTMLParser import HTMLParser
except:
from html.parser import HTMLParser
class MarkdownParser(HTMLParser):
"""
Convert HTML text into Markdown. A wrapper for
`html.parser.HTMLParser <https://docs.python.org/3.6/library/html.parser.html#html.parser.HTMLParser>`_
**Example:**
.. code-block:: python
from resilient_lib import MarkdownParser
data = "<div class='rte'><div><strong><u>underline and strong</u></strong></div></div>"
markdown = "*_underline and strong_*"
parser = MarkdownParser(bold="*", underline="_") # override defaults
converted = parser.convert(data)
self.assertEqual(converted, markdown)
"""
QUILL_RTE = "rte" # first <div> will have this class. This is part of quill
HTML_STYLE_COLOR = r'rgb\(([\d]+),[\s]*([\d]+),[\s]*([\d]+)\)'
SUPPORTED_TAGS = ["div", "span", "br", "strong", "em", "s", "u", "ol", "ul", "li", "a",
"h", "h1", "h2", "h3", "h4", "h5", "h6", "blockquote"]
MARKDOWN_NEWLINE = "\n"
MARKDOWN_NEWSECTION = "\n\n"
DEFAULT_LIST = "*"
def __init__(self, bold="**", italic="*", underline="__", strikeout="~~", bullets=DEFAULT_LIST, number=0, indent=4,
monospace=["{{", "}}"], headers=['h1.', 'h2.', 'h3.', 'h4.', 'h5.', 'h6.'], blockquote="```"):
HTMLParser.__init__(self)
self.log = logging.getLogger(__name__)
# customizable attributes
self.bold = bold
self.italic = italic
self.underscore = underline
self.strikeout = strikeout
self.list_bullets = bullets if isinstance(bullets, list) else [bullets] * 6
self.list_number = number
self.indent = indent
self.monospace = monospace
self.headers = headers
self.blockquote = blockquote
def init_buffers(self):
self.buffer = [] # end markdown buffer
self.curr_tag = [] # stack of tags to track
self.curr_attrs = [] # stack of tag attributes to track
self.curr_list = [] # stack of embedded ordered and unordered list symbols
self.data = [] # buffer for a given tag, cleared when and ending tag is found (ex. </p>)
self.data_pre = [] # markdown data to prefix the data
self.data_post = [] # markdown data to follow the data
self.prev_tag = None
self.prev_attrs = []
def convert(self, data):
"""
Converts html ``data`` to markdown and
returns the converted string
:param data: html text to convert
:type data: str
:return: converted text to markdown
:rtype: str
"""
self.init_buffers()
if not data or not isinstance(data, string_types):
return data
self.feed(data)
return self.toString()
def handle_starttag(self, tag, attrs):
"""
handler for the start of tags. Logic is added to surround data with markdown
:param tag:
:param attrs:
:return: None
"""
# flush any data accumulated
if tag in ('ol', 'ul'):
self.push_data(False)
else:
self.push_data(True)
# retain the hierarchy of nested command, which may be needed
self.curr_tag.append(tag)
self.curr_attrs.append(attrs)
if tag == "div":
if self.prev_tag in ("ol", "ul"):
self.data_pre.append(MarkdownParser.MARKDOWN_NEWSECTION)
if tag == "strong":
self.data_pre.append(self.bold)
self.data_post.insert(0, self.bold)
elif tag == "em":
self.data_pre.append(self.italic)
self.data_post.insert(0, self.italic)
elif tag == "s":
self.data_pre.append(self.strikeout)
self.data_post.insert(0, self.strikeout)
elif tag == "u":
self.data_pre.append(self.underscore)
self.data_post.insert(0, self.underscore)
elif tag == "ol":
self.curr_list.append(self.list_number) # number to be incremented with every <li>
elif tag == "ul":
if self.list_bullets:
self.curr_list.append(self.list_bullets.pop(0)) # get the symbol to use
else:
# default list marker
self.curr_list.append(MarkdownParser.DEFAULT_LIST)
elif tag == "li":
# add proper # of spaces
self.data_pre.append(MarkdownParser.MARKDOWN_NEWLINE)
self.data_pre.append((" " * self.indent) * len(self.curr_list))
if not isinstance(self.curr_list[-1], int):
self.data_pre.append('{} '.format(self.curr_list[-1]))
else:
num = self.curr_list.pop()
num = num+1
self.curr_list.append(num)
self.data_pre.append("{}. ".format(num))
elif tag == "a":
href = self.get_attr(attrs, 'href')
self.data_pre.extend(["[{}]".format(href), '('])
self.data_post.insert(0, ")")
elif tag in ('h', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'):
if tag == 'h':
tag = 'h1'
# get the number of header and use as index into the header array
idx = int(tag[-1:])-1
if idx < len(self.headers):
self.data_pre.append("{} ".format(self.headers[idx]))
self.data_post.append(MarkdownParser.MARKDOWN_NEWLINE)
elif tag == "blockquote":
self.data_pre.append(self.blockquote)
self.data_post.insert(0, self.blockquote)
elif tag not in MarkdownParser.SUPPORTED_TAGS:
self.log.warning("Unknown html tag: {}".format(tag))
self.data_post.insert(0, MarkdownParser.MARKDOWN_NEWLINE)
# determine if styling is needed
style = self.get_attr(attrs, 'style')
if style:
rgb = self.get_style_attr(style, 'color')
if rgb:
rgb_hex = self.convert_rgb(self.get_rgb(rgb))
self.data_pre.append("{{color:{0}}}".format(rgb_hex))
self.data_post.insert(0, "{color}")
# format monospace data blocks
font_family = self.get_style_attr(style, 'font-family')
if font_family and font_family == "monospace":
if isinstance(self.monospace, list):
self.data_pre.append(self.monospace[0])
self.data_post.insert(0, self.monospace[1])
else:
self.data_pre.append(self.monospace)
self.data_post.insert(0, self.monospace)
def handle_data(self, data):
"""
data handler. Basically, add tagged data to the growing buffer
:param data:
:return: None
"""
# clean data of prefix whitespace
cleaned = re.search(r"^[\n\t\r]*(.*)", data, re.S)
cleaned and self.data.append(cleaned.group(1))
def handle_endtag(self, tag):
"""
handler for end tags.
:param tag:
:return: None
"""
# remove existing tag from stack
self.prev_tag = self.curr_tag.pop()
self.prev_attrs = self.curr_attrs.pop()
if self.prev_tag != tag:
raise ValueError("Mismatch tag {} expecting {}".format(tag, self.prev_tag))
if tag == "div":
self.data_post.append(MarkdownParser.MARKDOWN_NEWLINE)
elif tag in ("ol", "ul"):
if len(self.curr_list) > 0:
bullet = self.curr_list.pop() # remove existing bullet, exposing previous bullet
if tag == "ul":
if self.list_bullets:
self.list_bullets.insert(0, bullet) # clear top item on list symbols
else:
self.list_bullets = [bullet]
elif tag == "br":
# this is data rather than pre or post data
self.data.append(MarkdownParser.MARKDOWN_NEWSECTION)
self.push_data()
def push_data(self, data_only=False):
"""
flush the data buffer and reset for next html tag
:return: None
"""
if data_only:
self.buffer.extend(self.data)
self.data = []
else:
self.buffer.extend([item for sublist in [self.data_pre, self.data, self.data_post] for item in sublist])
# clean up
self.data = []
self.data_pre = []
self.data_post = []
def convert_rgb(self, rgb):
"""
convert rgb values to hexcode format
:param rgb:
:return:
"""
return '#'+''.join('%02x'% int(i) for i in rgb)
def get_attr(self, attrs, key):
"""
get an attribute from the data's previous block
ex. <div style='font-family: monospace'>zzz</div>
:param attrs:
:param key:
:return: found attribute or None
"""
for attr in attrs:
if attr[0] == key:
return attr[1]
return None
def get_style_attr(self, style, key):
"""
find css data within the style attribute
:param style:
:param key: css label
:return: css data
"""
for attr in style.split(';'):
attr_split = attr.split(':')
if attr_split[0].strip() == key:
return attr_split[1].strip()
return None
def get_rgb(self, str):
"""
format of rgb information is "rgb(rrr, ggg, bbb)"
:param str:
:return: list of values
"""
m = re.search(MarkdownParser.HTML_STYLE_COLOR, str)
return m.group(1, 2, 3) if m else None
def __str__(self):
return self.toString()
def __repr__(self):
return self.toString()
def toString(self):
"""
we're done. flush the data buffer and print entire content
:return: markdown result
"""
self.push_data()
result = ''.join(self.buffer)
# clean up ending new line characters
while result[-1:] == '\n':
result = result[:-1]
return result | /resilient_lib-50.0.151.tar.gz/resilient_lib-50.0.151/resilient_lib/components/html2markdown.py | 0.672977 | 0.312691 | html2markdown.py | pypi |
import json
from .function_metrics import FunctionMetrics
PAYLOAD_VERSION = "1.0"
class ResultPayload:
""" Class to create a standard payload for functions. The resulting payload follows the following format:
1.0
{ "version": "1.0" -- used to track different versions of the payload
"success": True|False
"reason": str -- a string to explain if success=False
"content": json -- the result of the function call
"raw": str -- a string representation of content. This is sometimes needed when the result of one function is
piped into the next
"inputs": json -- a copy of the input parameters, useful for post-processor script use
"metrics": json -- a set of information to capture specifics metrics about the function's runtime environment
}
"""
def __init__(self, pkgname, version=PAYLOAD_VERSION, **kwargs):
"""
build initial payload data structure and the start the timers for metrics collection
:param pkgname: package name to capture stats on which package is being used
:param kwargs: input parameters for this function
"""
self.fm = FunctionMetrics(pkgname)
self.payload = {
"version": version,
"success": None,
"reason": None,
"content": None,
"raw": None,
"inputs": kwargs,
"metrics": None
}
def done(self, success, content, reason=None):
"""
complete the function payload
:param success: True|False
:param content: json result to pass back
:param reason: comment fields when success=False
:return: completed payload in json
"""
self.payload['success'] = success
self.payload['reason'] = reason
self.payload['content'] = content
self.payload['metrics'] = self.fm.finish()
if float(self.payload.get("version", 2.0)) < 2.0:
try:
self.payload['raw'] = json.dumps(content)
except:
pass
return self.payload | /resilient_lib-50.0.151.tar.gz/resilient_lib-50.0.151/resilient_lib/components/function_result.py | 0.658966 | 0.369884 | function_result.py | pypi |
import sys
import json
import re
from jinja2 import Undefined
if sys.version_info.major < 3:
# Handle PY 2 specific imports
from base64 import encodestring as b64encode
else:
# Handle PY 3 specific imports
from base64 import encodebytes as b64encode
def _filter_base64(val):
"""
Return val as base64 encoded string
"""
if isinstance(val, Undefined):
return ""
s = json.dumps(val).encode("utf-8")
return b64encode(s).decode("utf-8")
def _filter_camel(val):
"""Return CamelCase
e.g.: "a#bc_def" would convert to "ABcDef"
"""
titlecase = val.title()
return re.sub(pattern=r"[\W^_]", repl="", string=titlecase)
def _dot_py(val):
"""
Return a value with code quotes around any .py file
e.g.: sub any string "file.py" to "`file.py`"
"""
return re.sub(r"([0-9a-z_]+\-*\.py)", r"`\1`", val)
def _scrub_ansi(val):
"""
Return a value with all all ansi color codes removed
"""
return re.sub(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])", r"", val)
def _convert_to_code(val):
"""
Convert ' to code(`) or ''' code blocks(```) and convert ' within code blocks to "
e.g.: 'display_name' converts to `display_name`
e.g.: '''pip install -U 'resilient-circuits'''' converts to
```shell
$ pip install -U "resilient-circuits"
```
"""
return re.sub(r"'{3}(.*)'(.*)'(.*)'{3}", r'\n\n```shell\n$ \1"\2"\3\n```\n', val).replace("'", "`").replace("\t\t", "\t")
def _defaults_to_code(val):
"""
Make sure that any defaults that are surrounded by << >> are in code quotes so that they render properly.
e.g.: <<display_name>> converts to '<<display_name>>'
"""
return re.sub(r"(<{2}.*>{2})", r"`\1`", val)
def _render_diff(val):
"""
Renders any diff objects correctly. Assumes that diffs are embedded in strings
by two tabs before them (only used so far in package files issues' descriptions)
"""
return re.sub(r"(\t\t.*)(\n+)", r"```diff\n\1\n```", val, flags=re.S).replace("\t", "")
def _readable_time_from_timestamp(val):
"""Assuming val is a %Y%m%d%H%M%S timestamp, produce a readable Y-M-D H-M-S format"""
if len(val) != 14:
return val
return "{0}/{1}/{2} {3}:{4}:{5}".format(val[:4], val[4:6], val[6:8], val[8:10], val[10:12], val[12:])
JINJA_FILTERS = {
"base64": _filter_base64,
"camel": _filter_camel,
"dot_py": _dot_py,
"scrub_ansi": _scrub_ansi,
"code": _convert_to_code,
"defaults": _defaults_to_code,
"is_dict": lambda x: isinstance(x, dict),
"diff": _render_diff,
"datetime": _readable_time_from_timestamp
}
def add_filters_to_jinja_env(env):
"""
Update the Jinja Environment Filters dict with JINJA_FILTERS
:param env: Jinja Environment
"""
env.filters.update(JINJA_FILTERS) | /resilient_sdk-50.0.151.tar.gz/resilient_sdk-50.0.151/resilient_sdk/util/jinja2_filters.py | 0.520009 | 0.605886 | jinja2_filters.py | pypi |
import logging
from resilient_sdk.util import constants
from resilient_sdk.util import package_file_helpers as package_helpers
# Get the same logger object that is used in app.py
LOG = logging.getLogger(constants.LOGGER_NAME)
class SDKValidateIssue(object):
SEVERITY_LEVEL_CRITICAL = 1
SEVERITY_LEVEL_WARN = 2
SEVERITY_LEVEL_INFO = 3
SEVERITY_LEVEL_DEBUG = 100
def __init__(self, name, description, severity=SEVERITY_LEVEL_CRITICAL, solution="SOLUTION UNKNOWN"):
self.name = name
self.description = description
self.severity = severity
self.solution = solution
def __eq__(self, other):
"""Checks equality of two SDKValidateIssue objs"""
return self.severity == other.severity
def __lt__(self, other):
"""Checks less than for two SDKValidateIssue objs"""
return self.severity < other.severity
def __le__(self, other):
"""Checks less than or equal to for two SDKValidateIssue objs"""
return self.severity <= other.severity
def __str__(self):
"""Returns string representation of a SDKValidateIssue obj"""
return u"'name={0}; description={1}; severity={2}; solution={3}'".format(self.name, self.description,
self.get_logging_level(), self.solution)
def __short_str__(self):
"""Short string representation of a SDKValidateIssue obj"""
return u"'issue={0}, severtiy={1}'".format(self.name, self.severity)
def __repr__(self):
return self.__str__()
def as_dict(self):
"""Returns this class object as a dictionary"""
return self.__dict__
def get_logging_level(self):
"""
Returns logging level to use with CmdValidate._get_log_level
:return: string indicating the error level that maps severity with constants.VALIDATE_LOG_LEVEL_[level]
:rtype: str
"""
if self.severity == SDKValidateIssue.SEVERITY_LEVEL_CRITICAL:
return constants.VALIDATE_LOG_LEVEL_CRITICAL
elif self.severity == SDKValidateIssue.SEVERITY_LEVEL_WARN:
return constants.VALIDATE_LOG_LEVEL_WARNING
elif self.severity == SDKValidateIssue.SEVERITY_LEVEL_INFO:
return constants.VALIDATE_LOG_LEVEL_INFO
else:
return constants.VALIDATE_LOG_LEVEL_DEBUG
def error_str(self):
"""Returns an error string to be output to the console"""
return u"{0:<20} {1}\n{3:<11} {2}".format(
package_helpers.color_output(
self.get_logging_level() if self.get_logging_level() != constants.VALIDATE_LOG_LEVEL_DEBUG
else "PASS",
self.get_logging_level()),
self.description, self.solution, ""
)
def severity_to_color(self):
"""Returns a string representing HTML value of the severity. For Jinja2 templating"""
color = "red" if self.severity == SDKValidateIssue.SEVERITY_LEVEL_CRITICAL else "orange" if self.severity == SDKValidateIssue.SEVERITY_LEVEL_WARN else "teal"
return '<span style="color:{0}">{1}</span>'.format(color, self.get_logging_level()) | /resilient_sdk-50.0.151.tar.gz/resilient_sdk-50.0.151/resilient_sdk/util/sdk_validate_issue.py | 0.80271 | 0.191933 | sdk_validate_issue.py | pypi |
# Resimpy




[](https://resimpy.readthedocs.io/en/latest/?badge=latest)
[](https://pepy.tech/project/mclumi)
###### tags: `Resimpy` `read simulation` `PCR amplification` `scRNA-seq` `bulkRNA-seq`
## Overview
```angular2html
____ _ _____ _ _ _ _
| _ \ ___ ___(_)_ __ ___ _ __ _ _ |_ _|__ ___ | | | _(_) |_
| |_) / _ \/ __| | '_ ` _ \| '_ \| | | | | |/ _ \ / _ \| | |/ / | __|
| _ < __/\__ \ | | | | | | |_) | |_| | | | (_) | (_) | | <| | |_
|_| \_\___||___/_|_| |_| |_| .__/ \__, | |_|\___/ \___/|_|_|\_\_|\__|
|_| |___/
```
The **RE**ad **SIM**ulation **PY**thon program (Resimpy) provides an scalable interface for users through Python to massively simulate and generate reads of varying sequencing technologies, in order to avoid timeconsuming experimental trials and other error-prone approaches. Simulated reads can have the UMI- barcode- primer-, or spacer-featured composition. Resimpy has been made avilable through the command-line interface (CLI) and Python-inline visits.
## Citation
Please cite our work if you use Resimpy in your research.
## Result reproducibility
To reproduce the results used in https://www.biorxiv.org/content/10.1101/2023.04.06.535911v1, please follow the instruction below.
```angular2html
resimpy_general ...
resimpy_umi_transloc ...
```
## Documentation
The Resimpy documentation showing its usage in different situations are available at https://resimpy.readthedocs.io/en/latest/index.html.
## Installation
Released via https://pypi.org/project/resimpyx/
```angular2html
pip install resimpyx==0.0.2
```
## Overview
```angular2html
usage: resimpy_general [-h] --recipe recipe --read_structure read_structure
--permutation_num permutation_num
[--umi_unit_pattern umi_unit_pattern]
[--umi_unit_len_fixed umi_unit_len_fixed]
[--umi_num_fixed umi_num_fixed]
[--seq_length seq_length]
[--sim_thres_fixed sim_thres_fixed]
[--pcr_num_fixed pcr_num_fixed]
[--ampl_rate_fixed ampl_rate_fixed]
[--seq_sub_spl_rate seq_sub_spl_rate]
[--pcr_err_fixed pcr_err_fixed]
[--seq_err_fixed seq_err_fixed]
[--ampl_set_rates ampl_set_rates]
[--umi_unit_set_lens umi_unit_set_lens]
[--pcr_set_nums pcr_set_nums]
[--pcr_set_errs pcr_set_errs]
[--seq_set_errs seq_set_errs]
[--out_directory out_directory]
Welcome to the resimpy_general module
optional arguments:
-h, --help show this help message and exit
--recipe recipe, -r recipe
which condition among seq_errs, ampl_rates, pcr_errs,
pcr_nums, and umi_lens is used
--read_structure read_structure, -rs read_structure
read structure consisting of a UMI block (umi) and a
sequence block (seq), e.g., umi or umi+seq
--permutation_num permutation_num, -perm_num permutation_num
permutation test number
--umi_unit_pattern umi_unit_pattern, -umiup umi_unit_pattern
unit UMI pattern. This is to specify if UMIs consist
of monomer, dimer, trimer, or other blocks
--umi_unit_len_fixed umi_unit_len_fixed, -umiul umi_unit_len_fixed
unit UMI length fixed. This is to specify the length
of a monomer UMI. The final UMI length =
umi_unit_pattern * umi_unit_len_fixed
--umi_num_fixed umi_num_fixed, -umi_num umi_num_fixed
UMI number
--seq_length seq_length, -seq_len seq_length
genomic sequence length
--sim_thres_fixed sim_thres_fixed, -sim_thres sim_thres_fixed
edit distance-measured similarities between UMIs
--pcr_num_fixed pcr_num_fixed, -pcr_num pcr_num_fixed
Number of PCR cycles fixed
--ampl_rate_fixed ampl_rate_fixed, -ampl_rate ampl_rate_fixed
PCR amplification rate fixed
--seq_sub_spl_rate seq_sub_spl_rate, -spl_rate seq_sub_spl_rate
Subsampling rate for sequencing
--pcr_err_fixed pcr_err_fixed, -pcr_err pcr_err_fixed
PCR error fixed
--seq_err_fixed seq_err_fixed, -seq_err seq_err_fixed
Sequencing error fixed
--ampl_set_rates ampl_set_rates, -ampl_rates ampl_set_rates
a semicolon-partitioned string of a set of
amplification rates
--umi_unit_set_lens umi_unit_set_lens, -umi_lens umi_unit_set_lens
a semicolon-partitioned string of a set of unit UMI
lens
--pcr_set_nums pcr_set_nums, -pcr_nums pcr_set_nums
a semicolon-partitioned string of a set of PCR numbers
--pcr_set_errs pcr_set_errs, -pcr_errs pcr_set_errs
a semicolon-partitioned string of a set of PCR errors
--seq_set_errs seq_set_errs, -seq_errs seq_set_errs
a semicolon-partitioned string of a set of sequencing
errors
--out_directory out_directory, -out_dir out_directory
output directory
```
## Usage
### resimpy_general
#### Example 1: sequencing errors
```shell
resimpy_general \
-r seq_errs \
-rs umi+seq \
-perm_num 3 \
-umiup 1 \
-umiul 10 \
-umi_num 50 \
-seq_len 20 \
-pcr_num 8 \
-pcr_err 0.0001 \
-seq_err 0.0001 \
-ampl_rate 0.85 \
-sim_thres 3 \
-spl_rate 1 \
-seq_errs 1e-3;1e-2;0.1 \
-out_dir ./
```
```
# resimpy_general pcr_errs
resimpy_general -r pcr_errs -rs umi+seq -perm_num 3 -umiup 1 -umiul 10 -umi_num 50 -seq_len 20 -pcr_num 8 -pcr_err 0.0001 -seq_err 0.0001 -ampl_rate 0.85 -sim_thres 3 -spl_rate 1 -pcr_errs 1e-3;1e-2;0.1 -out_dir ./
# resimpy_general ampl_rates
resimpy_general -r ampl_rates -rs umi+seq -perm_num 3 -umiup 1 -umiul 10 -umi_num 50 -seq_len 20 -pcr_num 8 -pcr_err 0.0001 -seq_err 0.0001 -ampl_rate 0.85 -sim_thres 3 -spl_rate 1 -ampl_rates 0.1;0.2;0.3;0.4;0.5;0.6;0.7;0.8;0.9;1.0 -out_dir ./
# resimpy_general pcr_nums
resimpy_general -r pcr_nums -rs umi+seq -perm_num 3 -umiup 1 -umiul 10 -umi_num 50 -seq_len 20 -pcr_num 8 -pcr_err 0.0001 -seq_err 0.0001 -ampl_rate 0.85 -sim_thres 3 -spl_rate 1 -pcr_nums 6;7;8;9;10;11;12;13;14 -out_dir ./
# resimpy_general umi_lens
resimpy_general -r umi_lens -rs umi+seq -perm_num 3 -umiup 1 -umiul 10 -umi_num 50 -seq_len 20 -pcr_num 8 -pcr_err 0.0001 -seq_err 0.0001 -ampl_rate 0.85 -sim_thres 3 -spl_rate 1 -umi_lens 6;7;8;9;10;11;12 -out_dir ./
```
## Contact
Homepage: https://www.ndorms.ox.ac.uk/team/adam-cribbs
<style>
code {
white-space : pre-wrap !important;
word-break: break-word;
}
</style> | /resimpyx-0.0.2.tar.gz/resimpyx-0.0.2/README.md | 0.727879 | 0.873485 | README.md | pypi |
__version__ = "v1.0"
__copyright__ = "Copyright 2023"
__license__ = "MIT"
__lab__ = "cribbslab"
import numpy as np
from functools import wraps
class number(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self, deal):
if self.kwargs['type'] == 'binomial':
distrib = self.binomial
if self.kwargs['type'] == 'uniform':
distrib = self.binomial
else:
pass
@wraps(deal)
def switch(ph, *args, **kwargs):
print('======>numbering...')
# print(kwargs)
res = deal(ph, **kwargs)
res['spl_num'] = distrib(
n=len(res['data']),
p=res['ampl_rate'],
)
# print(res)
return res
return switch
def binomial(self, n, p, use_seed=True, seed=1):
if use_seed:
state = np.random.RandomState(seed)
return state.binomial(
n,
p,
)
else:
return np.random.binomial(
n,
p,
)
def nbinomial(self, n, p, use_seed=True, seed=1):
"""
:param n: the number of success to be expected, better, n = the total number of trails * p
:param p: the prob of success
:param use_seed:
:param seed:
:return:
"""
if use_seed:
state = np.random.RandomState(seed)
return state.negative_binomial(
n,
p,
)
else:
return np.random.negative_binomial(
n,
p,
)
def uniform(self, low, high, num, use_seed=True, seed=1):
if use_seed:
state = np.random.RandomState(seed)
return state.randint(
low=low,
high=high,
size=num
)
else:
return np.random.randint(
low=low,
high=high,
size=num
)
def choice(self, high, num, replace=False):
from numpy.random import default_rng
rng = default_rng()
return rng.choice(high, size=num, replace=replace) | /resimpyx-0.0.2.tar.gz/resimpyx-0.0.2/resimpy/util/random/Number.py | 0.452536 | 0.194024 | Number.py | pypi |
from .base_request import BaseRequest
from .settings import Settings
from . import exceptions
TOKEN_KEY = 'token'
class Auth(object):
"""
This class implements all authentication functions for Resin Python SDK.
"""
_user_detail_cache = {}
def __init__(self):
self.base_request = BaseRequest()
self.settings = Settings()
def __get_user_data(self):
"""
Get user details from token.
Returns:
dict: user details.
Raises:
NotLoggedIn: if there is no user logged in.
"""
if not self._user_detail_cache:
self._user_detail_cache = self.base_request.request(
'user/v1/whoami', 'get',
endpoint=self.settings.get('api_endpoint')
)
return self._user_detail_cache
def __get_property(self, element):
"""
Get a property from user details.
Args:
element (str): property name.
Returns:
str: property value.
Raises:
InvalidOption: If getting a non-existent property.
NotLoggedIn: if there is no user logged in.
"""
if element in self.__get_user_data():
return self._user_detail_cache[element]
else:
raise exceptions.InvalidOption(element)
def login(self, **credentials):
"""
This function is used for logging into Resin.io using email and password.
Args:
**credentials: credentials keyword arguments.
username (str): Resin.io email.
password (str): Password.
Returns:
This functions saves Auth Token to Settings and returns nothing.
Raises:
LoginFailed: if the email or password is invalid.
Examples:
>>> from resin import Resin
>>> resin = Resin()
>>> credentials = {'username': '<your email>', 'password': '<your password>''}
>>> resin.auth.login(**credentials)
(Empty Return)
"""
token = self.authenticate(**credentials).decode("utf-8")
self._user_detail_cache = {}
self.settings.set(TOKEN_KEY, token)
def login_with_token(self, token):
"""
This function is used for logging into Resin.io using Auth Token.
Auth Token can be found in Preferences section on Resin.io Dashboard.
Args:
token (str): Auth Token.
Returns:
This functions saves Auth Token to Settings and returns nothing.
Raises:
MalformedToken: if token is invalid.
Examples:
>>> from resin import Resin
>>> resin = Resin()
>>> auth_token = <your token>
>>> resin.auth.login_with_token(auth_token)
(Empty Return)
"""
self._user_detail_cache = {}
self.settings.set(TOKEN_KEY, token)
def who_am_i(self):
"""
This function retrieves username of logged in user.
Returns:
str: username.
Raises:
NotLoggedIn: if there is no user logged in.
Examples:
>>> resin.auth.who_am_i()
u'g_trong_nghia_nguyen'
"""
return self.__get_property('username')
def authenticate(self, **credentials):
"""
This function authenticates provided credentials information.
You should use Auth.login when possible, as it takes care of saving the Auth Token and username as well.
Args:
**credentials: credentials keyword arguments.
username (str): Resin.io username.
password (str): Password.
Returns:
str: Auth Token,
Raises:
LoginFailed: if the username or password is invalid.
Examples:
>>> resin.auth.authenticate(username='<your email>', password='<your password>')
'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6NTM5NywidXNlcm5hbWUiOiJnX3Ryb25nX25naGlhX25ndXllbiIsImVtYWlsIjoicmVzaW5weXRob25zZGt0ZXN0QGdtYWlsLmNvbSIsInNvY2lhbF9zZXJ2aWNlX2FjY291bnQiOlt7ImNyZWF0ZWRfYXQiOiIyMDE1LTExLTIzVDAzOjMwOjE0LjU3MloiLCJpZCI6MTE2NiwidXNlciI6eyJfX2RlZmVycmVkIjp7InVyaSI6Ii9ld2EvdXNlcig1Mzk3KSJ9LCJfX2lkIjo1Mzk3fSwicHJvdmlkZXIiOiJnb29nbGUiLCJyZW1vdGVfaWQiOiIxMDE4OTMzNzc5ODQ3NDg1NDMwMDIiLCJkaXNwbGF5X25hbWUiOiJUcm9uZyBOZ2hpYSBOZ3V5ZW4iLCJfX21ldGFkYXRhIjp7InVyaSI6Ii9ld2Evc29jaWFsX3NlcnZpY2VfYWNjb3VudCgxMTY2KSIsInR5cGUiOiIifX1dLCJoYXNfZGlzYWJsZWRfbmV3c2xldHRlciI6ZmFsc2UsImp3dF9zZWNyZXQiOiI0UDVTQzZGV1pIVU5JR0NDT1dJQUtST0tST0RMUTRNVSIsImhhc1Bhc3N3b3JkU2V0Ijp0cnVlLCJuZWVkc1Bhc3N3b3JkUmVzZXQiOmZhbHNlLCJwdWJsaWNfa2V5Ijp0cnVlLCJmZWF0dXJlcyI6W10sImludGVyY29tVXNlck5hbWUiOiJnX3Ryb25nX25naGlhX25ndXllbiIsImludGVyY29tVXNlckhhc2giOiI5YTM0NmUwZTgzNjk0MzYxODU3MTdjNWRhZTZkZWZhZDdiYmM4YzZkOGNlMzgxYjhhYTY5YWRjMTRhYWZiNGU0IiwicGVybWlzc2lvbnMiOltdLCJpYXQiOjE0NDgyNTYzMDYsImV4cCI6MTQ0ODg2MTEwNn0.U9lfEpPHBRvGQSayASE-glI-lQtAjyIFYd00uXOUzLI'
"""
return self.base_request.request(
'login_', 'POST', data=credentials,
endpoint=self.settings.get('api_endpoint'), auth=False
)
def is_logged_in(self):
"""
This function checks if you're logged in
Returns:
bool: True if logged in, False otherwise.
Examples:
# Check if user logged in.
>>> if resin.auth.is_logged_in():
... print('You are logged in!')
... else:
... print('You are not logged in!')
"""
try:
self.__get_user_data()
return True
except (exceptions.RequestError, exceptions.Unauthorized):
return False
def get_token(self):
"""
This function retrieves Auth Token.
Returns:
str: Auth Token.
Raises:
InvalidOption: if not logged in and there is no token in Settings.
Examples:
# If you are logged in.
>>> resin.auth.get_token()
'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6NTM5NywidXNlcm5hbWUiOiJnX3Ryb25nX25naGlhX25ndXllbiIsImVtYWlsIjoicmVzaW5weXRob25zZGt0ZXN0QGdtYWlsLmNvbSIsInNvY2lhbF9zZXJ2aWNlX2FjY291bnQiOlt7ImNyZWF0ZWRfYXQiOiIyMDE1LTExLTIzVDAzOjMwOjE0LjU3MloiLCJpZCI6MTE2NiwidXNlciI6eyJfX2RlZmVycmVkIjp7InVyaSI6Ii9ld2EvdXNlcig1Mzk3KSJ9LCJfX2lkIjo1Mzk3fSwicHJvdmlkZXIiOiJnb29nbGUiLCJyZW1vdGVfaWQiOiIxMDE4OTMzNzc5ODQ3NDg1NDMwMDIiLCJkaXNwbGF5X25hbWUiOiJUcm9uZyBOZ2hpYSBOZ3V5ZW4iLCJfX21ldGFkYXRhIjp7InVyaSI6Ii9ld2Evc29jaWFsX3NlcnZpY2VfYWNjb3VudCgxMTY2KSIsInR5cGUiOiIifX1dLCJoYXNfZGlzYWJsZWRfbmV3c2xldHRlciI6ZmFsc2UsImp3dF9zZWNyZXQiOiI0UDVTQzZGV1pIVU5JR0NDT1dJQUtST0tST0RMUTRNVSIsImhhc1Bhc3N3b3JkU2V0Ijp0cnVlLCJuZWVkc1Bhc3N3b3JkUmVzZXQiOmZhbHNlLCJwdWJsaWNfa2V5Ijp0cnVlLCJmZWF0dXJlcyI6W10sImludGVyY29tVXNlck5hbWUiOiJnX3Ryb25nX25naGlhX25ndXllbiIsImludGVyY29tVXNlckhhc2giOiI5YTM0NmUwZTgzNjk0MzYxODU3MTdjNWRhZTZkZWZhZDdiYmM4YzZkOGNlMzgxYjhhYTY5YWRjMTRhYWZiNGU0IiwicGVybWlzc2lvbnMiOltdLCJpYXQiOjE0NDgyNTY2ODMsImV4cCI6MTQ0ODg2MTQ4M30.oqq4DUI4cTbhzYznSwODZ_4zLOeGiJYuZRn82gTfQ6o'
"""
return self.settings.get(TOKEN_KEY)
def get_user_id(self):
"""
This function retrieves current logged in user's id.
Returns:
str: user id.
Raises:
InvalidOption: if not logged in.
Examples:
# If you are logged in.
>>> resin.auth.get_user_id()
5397
"""
return self.__get_property('id')
def get_email(self):
"""
This function retrieves current logged in user's get_email
Returns:
str: user email.
Raises:
InvalidOption: if not logged in.
Examples:
# If you are logged in.
>>> resin.auth.get_email()
u'resinpythonsdktest@gmail.com'
"""
return self.__get_property('email')
def log_out(self):
"""
This function is used for logging out from Resin.io.
Returns:
bool: True if successful, False otherwise.
Examples:
# If you are logged in.
>>> resin.auth.log_out()
True
"""
self._user_detail_cache = {}
return self.settings.remove(TOKEN_KEY)
def register(self, **credentials):
"""
This function is used for registering to Resin.io.
Args:
**credentials: credentials keyword arguments.
email (str): email to register.
password (str): Password.
Returns:
str: Auth Token for new account.
Raises:
RequestError: if error occurs during registration.
Examples:
>>> credentials = {'email': '<your email>', 'password': '<your password>'}
>>> resin.auth.register(**credentials)
'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6NTM5OCwidXNlcm5hbWUiOiJ0ZXN0MjcxMCIsImVtYWlsIjoidGVzdDI3MTBAZ21haWwuY29tIiwic29jaWFsX3NlcnZpY2VfYWNjb3VudCI6bnVsbCwiaGFzX2Rpc2FibGVkX25ld3NsZXR0ZXIiOmZhbHNlLCJqd3Rfc2VjcmV0IjoiQlJXR0ZIVUgzNVBKT0VKTVRSSVo2MjdINjVKVkJKWDYiLCJoYXNQYXNzd29yZFNldCI6dHJ1ZSwibmVlZHNQYXNzd29yZFJlc2V0IjpmYWxzZSwicHVibGljX2tleSI6ZmFsc2UsImZlYXR1cmVzIjpbXSwiaW50ZXJjb21Vc2VyTmFtZSI6InRlc3QyNzEwIiwiaW50ZXJjb21Vc2VySGFzaCI6IjNiYTRhZDRkZjk4MDQ1OTc1YmU2ZGUwYWJmNjFiYjRmYWY4ZmEzYTljZWI0YzE4Y2QxOGU1NmViNmI1NzkxZDAiLCJwZXJtaXNzaW9ucyI6W10sImlhdCI6MTQ0ODI1NzgyOCwiZXhwIjoxNDQ4ODYyNjI4fQ.chhf6deZ9BNDMmPr1Hm-SlRoWkK7t_4cktAPo12aCoE'
"""
return self.base_request.request(
'user/register', 'POST', data=credentials,
endpoint=self.settings.get('api_endpoint'), auth=False
) | /resin-sdk-5.1.3.zip/resin-sdk-5.1.3/resin/auth.py | 0.828037 | 0.175786 | auth.py | pypi |
from .resources import Message
class ResinException(Exception):
"""
Exception base class for Python SDK.
Attributes:
code (str): exception code.
exit_code (int): program exit code.
"""
def __init__(self):
self.code = self.__class__.__name__
self.exit_code = 1
class MissingOption(ResinException):
"""
Exception type for missing option in settings or auth token.
Args:
option (str): option name.
Attributes:
message (str): error message.
"""
def __init__(self, option):
super(MissingOption, self).__init__()
self.message = Message.MISSING_OPTION.format(option=option)
class InvalidOption(ResinException):
"""
Exception type for invalid option in settings or auth token.
Args:
option (str): option name.
Attributes:
message (str): error message.
"""
def __init__(self, option):
super(InvalidOption, self).__init__()
self.message = Message.INVALID_OPTION.format(option=option)
class NonAllowedOption(ResinException):
"""
Exception type for non allowed option in parameters for downloading device OS.
Args:
option (str): option name.
Attributes:
message (str): error message.
"""
def __init__(self, option):
super(NonAllowedOption, self).__init__()
self.message = Message.NON_ALLOWED_OPTION.format(option=option)
class InvalidDeviceType(ResinException):
"""
Exception type for invalid device type.
Args:
dev_type (str): device type.
Attributes:
message (str): error message.
"""
def __init__(self, dev_type):
super(InvalidDeviceType, self).__init__()
self.message = Message.INVALID_DEVICE_TYPE.format(dev_type=dev_type)
class MalformedToken(ResinException):
"""
Exception type for malformed token.
Args:
token (str): token.
Attributes:
message (str): error message.
"""
def __init__(self, token):
super(MalformedToken, self).__init__()
self.message = Message.MALFORMED_TOKEN.format(token=token)
class ApplicationNotFound(ResinException):
"""
Exception type for application not found.
Args:
application (str): application detail (application name or id).
Attributes:
message (str): error message.
"""
def __init__(self, application):
super(ApplicationNotFound, self).__init__()
self.message = Message.APPLICATION_NOT_FOUND.format(
application=application)
class DeviceNotFound(ResinException):
"""
Exception type for device not found.
Args:
device (str): device detail (device uuid or device name).
Attributes:
message (str): error message.
"""
def __init__(self, uuid):
super(DeviceNotFound, self).__init__()
self.message = Message.DEVICE_NOT_FOUND.format(uuid=uuid)
class KeyNotFound(ResinException):
"""
Exception type for ssh key not found.
Args:
key (str): ssh key id.
Attributes:
message (str): error message.
"""
def __init__(self, key):
super(KeyNotFound, self).__init__()
self.message = Message.KEY_NOT_FOUND.format(key=key)
class RequestError(ResinException):
"""
Exception type for request error.
Args:
body (str): response body.
Attributes:
message (str): error message.
"""
def __init__(self, body):
super(RequestError, self).__init__()
self.message = Message.REQUEST_ERROR.format(body=body)
class NotLoggedIn(ResinException):
"""
Exception when no user logged in.
Attributes:
message (str): error message.
"""
def __init__(self):
super(NotLoggedIn, self).__init__()
self.message = Message.NOT_LOGGED_IN
class Unauthorized(ResinException):
"""
Exception when no user logged in and no Resin API Key provided.
Attributes:
message (str): error message.
"""
def __init__(self):
super(Unauthorized, self).__init__()
self.message = Message.UNAUTHORIZED
class LoginFailed(ResinException):
"""
Exception when login unsuccessful.
Attributes:
code (str): exception code.
exit_code (int): program exit code.
message (str): error message.
"""
def __init__(self):
super(LoginFailed, self).__init__()
self.message = Message.LOGIN_FAILED
class DeviceOffline(ResinException):
"""
Exception when a device is offline.
Args:
uuid (str): device uuid.
Attributes:
message (str): error message.
"""
def __init__(self, uuid):
super(DeviceOffline, self).__init__()
self.message = Message.DEVICE_OFFLINE.format(uuid=uuid)
class DeviceNotWebAccessible(ResinException):
"""
Exception when a device is not web accessible.
Args:
uuid (str): device uuid.
Attributes:
message (str): error message.
"""
def __init__(self, uuid):
super(DeviceNotWebAccessible, self).__init__()
self.message = Message.DEVICE_NOT_WEB_ACCESSIBLE.format(uuid=uuid)
class IncompatibleApplication(ResinException):
"""
Exception when moving a device to an application with different device-type.
Args:
application (str): application name.
Attributes:
message (str): error message.
"""
def __init__(self, application):
super(IncompatibleApplication, self).__init__()
self.message = Message.INCOMPATIBLE_APPLICATION.format(application=application)
class UnsupportedFunction(ResinException):
"""
Exception when invoking an unsupported function in a specific supervisor version.
Args:
required_version (str): required supervisor version.
current_version (str): current supervisor version.
Attributes:
message (str): error message.
"""
def __init__(self, required_version, current_version):
super(UnsupportedFunction, self).__init__()
self.message = Message.SUPERVISOR_VERSION_ERROR.format(req_version=required_version, cur_version=current_version)
class AmbiguousApplication(ResinException):
"""
Args:
application (str): application name.
Attributes:
message (str): error message.
"""
def __init__(self, application):
super(AmbiguousApplication, self).__init__()
self.message = Message.AMBIGUOUS_APPLICATION.format(application=application)
class AmbiguousDevice(ResinException):
"""
Args:
uuid (str): device uuid.
Attributes:
message (str): error message.
"""
def __init__(self, uuid):
super(AmbiguousDevice, self).__init__()
self.message = Message.AMBIGUOUS_DEVICE.format(uuid=uuid)
class InvalidParameter(ResinException):
"""
Args:
parameter (str): parameter name.
value (str): provided value.
Attributes:
code (str): exception code.
exit_code (int): program exit code.
message (str): error message.
"""
def __init__(self, parameter, value):
super(InvalidParameter, self).__init__()
self.message = Message.INVALID_PARAMETER.format(parameter=parameter, value=value)
class ImageNotFound(ResinException):
"""
Args:
image_id (str): image id.
Attributes:
message (str): error message.
"""
def __init__(self, image_id):
super(ImageNotFound, self).__init__()
self.message = Message.IMAGE_NOT_FOUND.format(id=image_id)
class ReleaseNotFound(ResinException):
"""
Args:
release_id (str): release id.
Attributes:
message (str): error message.
"""
def __init__(self, release_id):
super(ReleaseNotFound, self).__init__()
self.message = Message.RELEASE_NOT_FOUND.format(id=release_id)
class ServiceNotFound(ResinException):
"""
Args:
service_id (str): service id.
Attributes:
message (str): error message.
"""
def __init__(self, service_id):
super(ServiceNotFound, self).__init__()
self.message = Message.SERVICE_NOT_FOUND.format(id=service_id)
class InvalidApplicationType(ResinException):
"""
Args:
app_type (str): application type.
Attributes:
message (str): error message.
"""
def __init__(self, app_type):
super(InvalidApplicationType, self).__init__()
self.message = Message.INVALID_APPLICATION_TYPE.format(app_type=app_type)
class UnsupportedFeature(ResinException):
"""
Attributes:
message (str): error message.
"""
def __init__(self):
super(UnsupportedFeature, self).__init__()
self.message = Message.UNSUPPORTED_FEATURE | /resin-sdk-5.1.3.zip/resin-sdk-5.1.3/resin/exceptions.py | 0.805785 | 0.156169 | exceptions.py | pypi |
import sys
from ..base_request import BaseRequest
from ..settings import Settings
from .. import exceptions
class Image(object):
"""
This class implements image model for Resin Python SDK.
"""
def __init__(self):
self.base_request = BaseRequest()
self.settings = Settings()
def __get_by_option(self, key, value, include_logs=False):
"""
Private function to get a specific image using any possible key.
Args:
key (str): query field.
value (str): key's value.
include_logs (Optional[bool]): Defaults to False since build log may be very large. True if user wants to include build log in image info.
Returns:
dict: image info.
Raises:
ImageNotFound: if image couldn't be found.
"""
params = {
'filter': key,
'eq': value
}
image = self.base_request.request(
'image', 'GET', params=params,
endpoint=self.settings.get('pine_endpoint')
)
if image['d']:
if include_logs:
return image['d'][0]
else:
image['d'][0].pop('build_log', None)
return image['d'][0]
else:
raise exceptions.ImageNotFound(key)
def get(self, id):
"""
Get a specific image.
Args:
id (str): image id.
Returns:
dict: image info.
Raises:
ImageNotFound: if image couldn't be found.
"""
image = self.__get_by_option('id', id)
# Only return selected fields, build_log is not included by default since they can be very large.
selected_fields = [
'id',
'content_hash',
'dockerfile',
'project_type',
'status',
'error_message',
'image_size',
'created_at',
'push_timestamp',
'start_timestamp',
'end_timestamp'
]
return ({k: image[k] for k in selected_fields})
def get_log(self, id):
"""
Get the build log from an image.
Args:
id (str): image id.
Returns:
str: build log.
Raises:
ImageNotFound: if image couldn't be found.
"""
return self.__get_by_option('id', id, include_logs=True)['build_log'] | /resin-sdk-5.1.3.zip/resin-sdk-5.1.3/resin/models/image.py | 0.664431 | 0.180161 | image.py | pypi |
from loguru import logger
from typing import List, Dict, Any
from pathlib import Path
import pandas as pd
from resistics.time import TimeMetadata
class TimeMetadataSingle(TimeMetadata):
"""
TimeMetadata class for a single file in a multi data file recording
In most cases, individual data formats may inherit from this as there could
be other parameters that are useful to save per data file.
"""
data_file: str
"""The name of a single data file in a multi file recording"""
class TimeMetadataMerge(TimeMetadata):
"""
This is an extension of TimeMetadata for situations where a single
continuous recording has been split into multiple data files.
This is applicable to SPAM data as well as Lemi B423 for example.
To keep track of the metadata about all the contributing files, this
extension to TimeMetadata adds a dictionary storing file specific details
such as first and last time, specific scalings or reading parameters etc.
"""
data_table: Dict[str, Any]
"""The data table that will help with scaling and selecting data files"""
def validate_consistency(dir_path: Path, metadata_list: List[TimeMetadata]) -> bool:
"""
Validate multi file metadata with each other to ensure they are consistent
This function checks:
- Matching sampling frequency
- Matching number of chans
- Matching channels
Parameters
----------
dir_path : Path
The data path
metadata_list : List[TimeMetadata]
List of TimeMetadata, one for each data file in a continuous recording
Returns
-------
bool
True if validation was successful, otherwise raises an Exception
Raises
------
MetadataReadError
If multiple values are found for sampling frequency
MetadataReadError
If multiple values are found for number of chans
MetadataReadError
If different XTR files have different channels
"""
from resistics.errors import MetadataReadError
set_fs = set([x.fs for x in metadata_list])
if len(set_fs) > 1:
raise MetadataReadError(dir_path, f"More than one fs, {set_fs}")
set_n_chans = set([x.n_chans for x in metadata_list])
if len(set_n_chans) > 1:
raise MetadataReadError(
dir_path, f"Inconsistent number of channels {set_n_chans}"
)
set_chans = set([", ".join(x.chans) for x in metadata_list])
if len(set_chans) > 1:
raise MetadataReadError(dir_path, f"Inconsistent channels {set_chans}")
return True
def validate_continuous(
dir_path: Path, metadata_list: List[TimeMetadataSingle]
) -> bool:
"""
Validate that metadata is continuous
For data formats such as SPAM and Lemi B423 which separate a single
continuous recording into multiple data files, it needs to be validated that
there is no missing data.
This function validates that metadata from each individual data file does
define a single continuous recording with no missing data.
Parameters
----------
dir_path : Path
The directory path
metadata_list : List[TimeMetadata]
List of TimeMetadata with metadata from a set of data files that
constitute a single continuous recording
Returns
-------
bool
True if recording is continuous
Raises
------
MetadataReadError
If gaps were found
"""
from resistics.errors import MetadataReadError
from resistics.sampling import to_timedelta
if len(metadata_list) == 0:
raise MetadataReadError(dir_path, "No metadata in list")
if len(metadata_list) == 1:
return True
dt = to_timedelta(1 / metadata_list[0].fs)
data = [(x.data_file, x.first_time, x.last_time) for x in metadata_list]
df = pd.DataFrame(data=data, columns=["file", "first_time", "last_time"])
df = df.sort_values("first_time")
time_chk = (df["first_time"] - df.shift(1)["last_time"]).dropna()
time_chk = time_chk - dt
gaps = time_chk[time_chk > to_timedelta(0)]
if len(gaps.index) > 0:
logger.error("Found gaps between files...")
data_files = df["file"].values
info = pd.DataFrame(
{
"From": data_files[gaps.index - 1],
"To": data_files[gaps.index],
"Gap": gaps.values,
}
)
logger.error(f"\n{info.to_string(index=False)}")
raise MetadataReadError(dir_path, "Gaps found, unable to read data")
return True
def add_cumulative_samples(df: pd.DataFrame) -> pd.DataFrame:
"""
Add cumulative samples to data table
This is useful for multi file recordings and helping to decide which files
to read to get a certain time range
Parameters
----------
df : pd.DataFrame
Data table with an n_samples column
Returns
-------
pd.DataFrame
Data table with a first_sample and last_sample column added
"""
cumsum_samples = df["n_samples"].cumsum()
df["first_sample"] = cumsum_samples.shift(1).fillna(value=0).astype(int)
df["last_sample"] = df["first_sample"] + df["n_samples"] - 1
return df
def samples_to_sources(
dir_path: Path,
df: pd.DataFrame,
from_sample: int,
to_sample: int,
) -> pd.DataFrame:
"""
Find the data sources for a sample range
This can be used for a multi-file measurement or for a measurement that is
split up into multiple records. It maps a sample range defined by
from_sample and to_sample to the sources and returns a DataFrame providing
information about the samples that need to be read from each source (file
or record) to cover the range.
Parameters
----------
dir_path : Path
The directory with the data
df : pd.DataFrame
Table of all the sources and their sample ranges
from_sample : int
Reading from sample
to_sample : int
Reading to sample
Returns
-------
pd.DataFrame
DataFrame with data files to read as indices and reading information
as columns such as number of samples to read, channel scalings etc.
Raises
------
TimeDataReadError
If somehow there's a mismatch in the total number of samples to read
per file and the expected number of samples.
"""
from resistics.errors import TimeDataReadError
df = df[~(df["first_sample"] > to_sample)]
df = df[~(df["last_sample"] < from_sample)]
# get read from samples
# correct those where the data file first sample is before the from sample
df["read_from"] = 0
adjust_from = df["first_sample"] < from_sample
df.loc[adjust_from, "read_from"] = from_sample - df["first_sample"]
# get read to samples
# correct those where the data file last sample is after the to sample
df["read_to"] = df["n_samples"] - 1
adjust_to = df["last_sample"] > to_sample
df.loc[adjust_to, "read_to"] = to_sample - df["first_sample"]
df["n_samples_read"] = df["read_to"] - df["read_from"] + 1
if df["n_samples_read"].sum() != to_sample - from_sample + 1:
sum_files = df["n_samples_read"].sum()
expected = to_sample - from_sample + 1
raise TimeDataReadError(
dir_path, f"Samples to read {sum_files} does not match expected {expected}"
)
return df | /resistics_readers-0.1.3-py3-none-any.whl/resistics_readers/multifile.py | 0.935487 | 0.709774 | multifile.py | pypi |
from loguru import logger
from typing import List, Dict, Any, Tuple
from pathlib import Path
import math
import numpy as np
import pandas as pd
from resistics.errors import CalibrationFileReadError
from resistics.time import ChanMetadata
from resistics.calibrate import SensorCalibrationReader, CalibrationData
class SensorCalibrationMetronix(SensorCalibrationReader):
"""
Metronix calibration data has the following units
- F [Hz]
- Magnitude [V/nT*Hz]
- Phase [deg]
For both chopper on and off.
Data is returned with units:
- F [Hz]
- Magnitude [mV/nT]
- Phase [radians]
Static gain is set to 1 as this is already included in the magnitude
It is recommended to do extension of the calibration data here as the
calibration data should be extended in the original units.
"""
extension: str = ".TXT"
file_str: str = "$sensor$serial$extension"
extend: bool = True
extend_low: float = 0.00001
extend_high: float = 1000000
def read_calibration_data(
self, file_path: Path, chan_metadata: ChanMetadata
) -> CalibrationData:
"""
Read data from metronix calibration file
Parameters
----------
file_path : Path
The file path of the calibration file
chan_metadata : ChanMetadata
The channel metadata for the channel to be calibrated
Returns
-------
CalibrationData
The calibration data
"""
with file_path.open("r") as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
data_dict = self._read_metadata(lines)
chopper = chan_metadata.chopper
data_dict["chopper"] = chopper
logger.debug(f"Reading calibration data for chopper = {chopper}")
df = self._read_data(file_path, lines, chopper)
data_dict["frequency"] = df.index.values.tolist()
data_dict["magnitude"] = df["magnitude"].values.tolist()
data_dict["phase"] = df["phase"].values.tolist()
data_dict["file_path"] = file_path
return CalibrationData(**data_dict)
def _read_metadata(self, lines: List[str]) -> Dict[str, Any]:
"""Get the calibration data metadata"""
sensor, serial = self._get_sensor_details(lines)
return {
"serial": serial,
"sensor": sensor,
"static_gain": 1,
"magnitude_unit": "mV/nT",
"phase_unit": "radians",
}
def _get_sensor_details(self, lines: List[str]) -> Tuple[int, str]:
"""Get sensor and serial details"""
sensor = ""
serial = 1
magnetometer = [x for x in lines if "Magnetometer" in x]
if len(magnetometer) != 1:
return serial, sensor
magnetometer_line = magnetometer[0]
try:
split1 = magnetometer_line.split(":")[1].strip()
split2 = split1.split()[0]
if "#" in split1:
tmp = split2.split("#")
sensor = tmp[0].strip()
serial = int(tmp[1].strip())
else:
serial = int(split2.strip())
except Exception:
logger.warning("Unable to read serial number from calibration file")
return sensor, serial
def _read_data(
self, file_path: Path, lines: List[str], chopper: bool
) -> pd.DataFrame:
"""Read the calibration data"""
if chopper:
data_lines = self._get_chopper_on_data(file_path, lines)
else:
data_lines = self._get_chopper_off_data(file_path, lines)
# convert lines to data frame
data = np.array([x.split() for x in data_lines], dtype=np.float32)
df = pd.DataFrame(data=data, columns=["frequency", "magnitude", "phase"])
df = df.set_index("frequency").sort_index()
if self.extend:
df = self._extend_data(df)
# unit manipulation - change V/(nT*Hz) to mV/nT
df["magnitude"] = df["magnitude"] * df.index.values * 1000
# unit manipulation - change phase to radians
df["phase"] = df["phase"] * (math.pi / 180)
return df
def _get_chopper_on_data(self, file_path: Path, lines: List[str]) -> List[str]:
"""Get chopper on data"""
chopper_on_line = None
for il, line in enumerate(lines):
if "Chopper On" in line:
chopper_on_line = il
break
if chopper_on_line is None:
raise CalibrationFileReadError(file_path, "Chopper on line not found")
return self._get_data_lines(lines, chopper_on_line + 1)
def _get_chopper_off_data(self, file_path: Path, lines: List[str]) -> List[str]:
"""Get chopper off data"""
chopper_off_line = None
for il, line in enumerate(lines):
if "Chopper Off" in line:
chopper_off_line = il
break
if chopper_off_line is None:
raise CalibrationFileReadError(file_path, "Chopper off line not found")
return self._get_data_lines(lines, chopper_off_line + 1)
def _get_data_lines(self, lines: List[str], idx: int) -> List[str]:
"""Get data lines from the calibration file"""
data_lines: List = []
while idx < len(lines) and lines[idx] != "":
data_lines.append(lines[idx])
idx += 1
return data_lines
def _extend_data(self, df: pd.DataFrame) -> pd.DataFrame:
"""Extend the calibration data before adjusting units"""
if self.extend_low < df.index.min():
df = df.append(
pd.DataFrame(
data={"magnitude": np.nan, "phase": np.nan},
index=[self.extend_low],
)
)
if self.extend_high > df.index.max():
df = df.append(
pd.DataFrame(
data={"magnitude": np.nan, "phase": np.nan},
index=[self.extend_high],
)
)
return df.sort_index().ffill().bfill() | /resistics_readers-0.1.3-py3-none-any.whl/resistics_readers/metronix/calibration.py | 0.861217 | 0.57081 | calibration.py | pypi |
from loguru import logger
from typing import List, Dict, Optional
from pathlib import Path
import struct
import numpy as np
import pandas as pd
from resistics.sampling import RSDateTime
from resistics.time import TimeMetadata, TimeData, TimeReader
from resistics_readers.multifile import validate_consistency, validate_continuous
from resistics_readers.multifile import TimeMetadataSingle, TimeMetadataMerge
B423_CHANS = ["Hx", "Hy", "Hz", "Ex", "Ey"]
B423_CHAN_TYPES = {
"E1": "electric",
"E2": "electric",
"E3": "electric",
"E4": "electric",
"Ex": "electric",
"Ey": "electric",
"Hx": "magnetic",
"Hy": "magnetic",
"Hz": "magnetic",
}
B423_RECORD_BYTES = 30
B423_HEADER_LENGTH = 1024
B423_MULT = {"Hx": "Kmx", "Hy": "Kmy", "Hz": "Kmz", "Ex": "Ke1", "Ey": "Ke2"}
B423_ADD = {"Hx": "Ax", "Hy": "Ay", "Hz": "Az", "Ex": "Ae1", "Ey": "Ae2"}
class TimeMetadataB423(TimeMetadataSingle):
"""This is an extension of TimeMetadataSingle for a single B423 file"""
data_byte_start: int
"""The byte offset from the beginning of the file to the start of the data"""
scalings: Dict[str, float]
"""Scalings in the B423 ASCII header"""
def make_subdir_B423_metadata(
dir_path: Path,
fs: float,
hx_serial: int = 0,
hy_serial: int = 0,
hz_serial: int = 0,
h_gain: int = 1,
dx: float = 1,
dy: float = 1,
folders: Optional[List[str]] = None,
) -> None:
"""
Construct B423 headers for sub directories of a folder
Parameters
----------
dir_path : Path
The path to the folder
fs : float
The sampling frequency, Hz
hx_serial : str, optional
The x direction magnetic serial, used for calibration
hy_serial : str, optional
The y direction magnetic serial, used for calibration
hz_serial : str, optional
The z direction magnetic serial, used for calibration
h_gain : int
Any gain on the magnetic channels which will need to be removed
dx : float, optional
Distance between x electrodes
dy : float, optional
Distance between y electrodes
folders : List, optional
An optional list of subfolders, by default None. If None, all the
subfolders will be processed
"""
from resistics.common import dir_subdirs
if folders is None:
folder_paths = dir_subdirs(dir_path)
else:
folder_paths = [dir_path / folder for folder in folders]
for folder in folder_paths:
make_B423_metadata(folder, fs, hx_serial, hy_serial, hz_serial, h_gain, dx, dy)
def make_B423_metadata(
dir_path: Path,
fs: float,
hx_serial: int = 0,
hy_serial: int = 0,
hz_serial: int = 0,
h_gain: int = 1,
dx: float = 1,
dy: float = 1,
) -> None:
"""
Read a single B423 measurement directory, construct and write out metadata
Parameters
----------
dir_path : Path
The path to the measurement
fs : float
The sampling frequency, Hz
hx_serial : str, optional
The x direction magnetic serial, used for calibration
hy_serial : str, optional
The y direction magnetic serial, used for calibration
hz_serial : str, optional
The z direction magnetic serial, used for calibration
h_gain : int
Any gain on the magnetic channels which will need to be removed
dx : float, optional
Distance between x electrodes
dy : float, optional
Distance between y electrodes
"""
metadata_list = _get_B423_metadata_list(dir_path, fs)
validate_consistency(dir_path, metadata_list)
validate_continuous(dir_path, metadata_list)
metadata = _merge_metadata(
metadata_list, hx_serial, hy_serial, hz_serial, h_gain, dx, dy
)
logger.info(f"Writing metadata in {dir_path}")
metadata.write(dir_path / "metadata.json")
def _get_B423_metadata_list(dir_path: Path, fs: float) -> List[TimeMetadataB423]:
"""
Get list of TimeMetadataB423, one for each data file
Parameters
----------
dir_path : Path
The data path
fs : float
The sampling frequency, Hz
Returns
-------
List[TimeMetadataB423]
List of TimeMetadata
"""
data_paths = list(dir_path.glob("*.B423"))
metadata_list = []
for data_path in data_paths:
logger.debug(f"Reading data file {data_path}")
metadata_B423 = _read_B423_headers(data_path, fs)
metadata_list.append(metadata_B423)
return metadata_list
def _read_B423_headers(
data_path: Path,
fs: float,
data_byte_offset: Optional[int] = None,
record_bytes: Optional[int] = None,
chans: Optional[List[str]] = None,
) -> TimeMetadataB423:
"""
Get metadata from single B423 file headers
Parameters
----------
data_path : Path
The data path to the file
fs : float
The sampling frequency, Hz
data_byte_offset : int, optional
The number of bytes to the start of the data, by default None
record_bytes : int, optional
The size of a single recors, by default None
chans : List[str]
The channels in the data
Returns
-------
TimeMetadataB423
Metadata for the B423 file
Raises
------
TimeDataReadError
If number of samples is non-integer
"""
from resistics.errors import TimeDataReadError
from resistics.sampling import to_n_samples
if data_byte_offset is None:
data_byte_offset = B423_HEADER_LENGTH
if record_bytes is None:
record_bytes = B423_RECORD_BYTES
if chans is None:
chans = B423_CHANS
name = data_path.name
f_size = data_path.stat().st_size
time_dict = {"fs": fs, "data_file": name, "data_byte_start": data_byte_offset}
n_samples = (f_size - data_byte_offset) / record_bytes
if not n_samples.is_integer():
TimeDataReadError(data_path, f"Non-integer number of samples {n_samples}")
time_dict["n_samples"] = int(n_samples)
f = data_path.open("rb")
ascii_bytes = f.read(data_byte_offset)
ascii_metadata = _read_B423_ascii_headers(ascii_bytes)
time_dict["northing"] = ascii_metadata.pop("Lat")
time_dict["easting"] = ascii_metadata.pop("Lon")
time_dict["elevation"] = ascii_metadata.pop("Alt")
time_dict["scalings"] = ascii_metadata
time_dict["first_time"] = _get_B423_time(f.read(6), fs)
f.seek(f_size - record_bytes, 0)
time_dict["last_time"] = _get_B423_time(f.read(6), fs)
if n_samples != to_n_samples(time_dict["last_time"] - time_dict["first_time"], fs):
raise TimeDataReadError(data_path, "Number of samples mismatch")
time_dict["chans"] = chans
time_dict["chans_metadata"] = {}
for chan in chans:
time_dict["chans_metadata"][chan] = {
"name": chan,
"data_files": [name],
"chan_type": B423_CHAN_TYPES[chan],
}
return TimeMetadataB423(**time_dict)
def _read_B423_ascii_headers(metadata_bytes: bytes) -> Dict[str, float]:
"""
Parse the ASCII part of the B423 file
Parameters
----------
metadata_bytes : bytes
The bytes string with the metadata
Returns
-------
Dict[str, float]
Parsed metadata which includes scalings and location information
"""
metadata_lines = metadata_bytes.decode().split("\r\n")
metadata_lines = [x.replace("%", "") for x in metadata_lines]
# scaling information
metadata = [x.split(" = ") for x in metadata_lines if "=" in x]
metadata = {x[0].strip(): float(x[1].strip()) for x in metadata}
# loxcation information
location = [x for x in metadata_lines if ("Lat" in x or "Lon" in x or "Alt" in x)]
location_dict = {x[0:3]: x[3:].split(",")[0] for x in location}
location_dict = {k: float(v.strip()) for k, v in location_dict.items()}
metadata.update(location_dict)
return metadata
def _get_B423_time(time_bytes: bytes, fs: float) -> RSDateTime:
"""Parse bytes to a RSDateTime"""
from datetime import datetime
from resistics.sampling import to_datetime
timestamp = struct.unpack("L", time_bytes[0:4])[0]
n_sample = struct.unpack("H", time_bytes[4:])[0]
return to_datetime(datetime.utcfromtimestamp(timestamp + (n_sample / fs)))
def _merge_metadata(
metadata_list: List[TimeMetadataB423],
hx_serial: int = 0,
hy_serial: int = 0,
hz_serial: int = 0,
h_gain: int = 1,
dx: float = 1,
dy: float = 1,
) -> TimeMetadata:
"""Merge the metadata list into a TimeMetadata"""
metadata = TimeMetadata(**metadata_list[0].dict())
metadata.first_time = min([x.first_time for x in metadata_list])
metadata.last_time = max([x.last_time for x in metadata_list])
metadata.n_samples = np.sum([x.n_samples for x in metadata_list])
# channel headers
data_files = [x.data_file for x in metadata_list]
serials = {"Hx": hx_serial, "Hy": hy_serial, "Hz": hz_serial}
dipoles = {"Ex": dx, "Ey": dy}
for chan in metadata.chans:
metadata.chans_metadata[chan].data_files = data_files
if metadata.chans_metadata[chan].magnetic():
metadata.chans_metadata[chan].gain1 = h_gain
metadata.chans_metadata[chan].serial = serials[chan]
if metadata.chans_metadata[chan].electric():
metadata.chans_metadata[chan].dipole_dist = dipoles[chan]
return metadata
class TimeReaderB423(TimeReader):
"""
Data reader for Lemi B423 data
There is no separate metadata file for Lemi B423 data detailing the sampling
frequency, the number of samples, the sensors etc.. Unfortunately, such a
metadata file is a pre-requisite for resistics. There are helper methods to
make one.
In situations where a Lemi B423 dataset is recorded in multiple files, it is
required that the recording is continuous.
Other important notes about Lemi B423 files
- 1024 bytes of ASCII metadata in the data file with scaling information
- Lemi B423 raw measurement data is signed long integer format
Important points about scalings
- Raw data is integer counts for electric and magnetic channels
- Scalings in B423 files convert electric channels to uV (microvolt)
- Scalings in B423 files convert magnetic channels to millivolts
- Scaling for the magnetic channels in B423 files leaves internal gain on
- Internal gain should be specified when creating metadata
If apply_scaling is False, data will be returned in:
- microvolts for the electric channels
- millivolts for the magnetic with the gain applied
Which is equivalent to applying the scalings in the B423 headers
With apply_scaling True, the following additional scaling will be applied:
- Electric channels converted to mV
- Dipole length corrections are applied to electric channels
- Magnetic channel gains are removed
.. note::
For more information about Lemi B423 format, please see:
http://lemisensors.com/?p=485
"""
extension = ".B423"
record_bytes: int = B423_RECORD_BYTES
def read_metadata(self, dir_path: Path) -> TimeMetadataMerge:
"""
Read metadata
Parameters
----------
dir_path : Path
The data directory
Returns
-------
TimeMetadataMerge
TimeMetadata with a data table
Raises
------
MetadataReadError
If the channels are not correct for B423
TimeDataReadError
If not all data files exist
TimeDataReadError
If extensions do not match
"""
from resistics.errors import MetadataReadError, TimeDataReadError
from resistics_readers.multifile import validate_consistency
from resistics_readers.multifile import validate_continuous
metadata = TimeMetadata.parse_file(dir_path / "metadata.json")
if metadata.chans != B423_CHANS:
raise MetadataReadError(
dir_path, f"Channels {metadata.chans} != B423 chans {B423_CHANS}"
)
metadata_list = _get_B423_metadata_list(dir_path, metadata.fs)
validate_consistency(dir_path, metadata_list)
validate_continuous(dir_path, metadata_list)
data_table = self._generate_table(metadata_list)
metadata_dict = metadata.dict()
metadata_dict["data_table"] = data_table.to_dict()
metadata = TimeMetadataMerge(**metadata_dict)
if not self._check_data_files(dir_path, metadata):
raise TimeDataReadError(dir_path, "All data files do not exist")
if not self._check_extensions(dir_path, metadata):
raise TimeDataReadError(dir_path, f"Data file suffix not {self.extension}")
return metadata
def _generate_table(self, metadata_list: List[TimeMetadataB423]) -> pd.DataFrame:
"""
Generate a table mapping RAW file to first time, last time, number of
samples, data byte offsets and scalings
Parameters
----------
metadata_list : List[TimeMetadataXTR]
List of TimeMetadataXTR, one for each XTR/RAW file combination
Returns
-------
pd.DataFrame
The table mapping data file to various properties
"""
from resistics_readers.multifile import add_cumulative_samples
df = pd.DataFrame()
df["data_file"] = [x.data_file for x in metadata_list]
df["first_time"] = [x.first_time for x in metadata_list]
df["last_time"] = [x.last_time for x in metadata_list]
df["n_samples"] = [x.n_samples for x in metadata_list]
df["data_byte_start"] = [x.data_byte_start for x in metadata_list]
# save scaling information
scalings = metadata_list[0].scalings.keys()
for scaling in scalings:
df[scaling] = [x.scalings[scaling] for x in metadata_list]
df = df.sort_values("first_time")
df = df.set_index("data_file")
return add_cumulative_samples(df)
def read_data(
self, dir_path: Path, metadata: TimeMetadata, read_from: int, read_to: int
) -> TimeData:
"""
Get data from data files
Lemi B423 data always has five channels, in order Hx, Hy, Hz, Ex, Ey.
The raw data is integer counts. However, additional scalings from the
B423 files are applied to give:
- microvolts for the electric channels
- millivolts for the magnetic with the gain applied
The scalings are as follows:
- Hx = (Hx * Kmx) + Ax
- Hx = (Hy * Kmy) + Ay
- Hx = (Hz * Kmz) + Az
- Ex = (Ex * Ke1) + Ae1
- Ey = (Ey * Ke2) + Ae2
Parameters
----------
dir_path : path
The directory path to read from
metadata : TimeMetadata
Time series data metadata
read_from : int
Sample to read data from
read_to : int
Sample to read data to
Returns
-------
TimeData
Time data object
"""
from resistics_readers.multifile import samples_to_sources
dtype = np.float32
n_samples = read_to - read_from + 1
messages = [f"Reading raw data from {dir_path}"]
messages.append(f"Sampling rate {metadata.fs} Hz")
# loop over B423 files and read data
data_table = pd.DataFrame(data=metadata.data_table)
df_to_read = samples_to_sources(dir_path, data_table, read_from, read_to)
data = np.empty(shape=(metadata.n_chans, n_samples), dtype=dtype)
sample = 0
for data_file, info in df_to_read.iterrows():
file_from = info.loc["read_from"]
file_to = info.loc["read_to"]
n_samples_file = info.loc["n_samples_read"]
data_byte_start = info.loc["data_byte_start"]
mult = np.array([info.loc[B423_MULT[chan]] for chan in metadata.chans])
add = np.array([info.loc[B423_ADD[chan]] for chan in metadata.chans])
messages.append(f"{data_file}: Reading samples {file_from} to {file_to}")
logger.debug(f"{data_file}: Reading samples {file_from} to {file_to}")
byte_read_start = data_byte_start + file_from * self.record_bytes
n_bytes_to_read = n_samples_file * self.record_bytes
with (dir_path / data_file).open("rb") as f:
f.seek(byte_read_start, 0)
data_bytes = f.read(n_bytes_to_read)
data_read = self._parse_records(metadata.chans, data_bytes)
data_read = (data_read * mult[:, None]) + add[:, None]
data[:, sample : sample + n_samples_file] = data_read
sample = sample + n_samples_file
metadata = self._get_return_metadata(metadata, read_from, read_to)
messages.append(f"From sample, time: {read_from}, {str(metadata.first_time)}")
messages.append(f"To sample, time: {read_to}, {str(metadata.last_time)}")
metadata.history.add_record(self._get_record(messages))
logger.info(f"Data successfully read from {dir_path}")
return TimeData(metadata, data)
def _parse_records(self, chans: List[str], data_bytes: bytes) -> np.ndarray:
"""
Read a number of B423 records from bytes
Records are blocks that repeat like:
SECOND_TIMESTAMP, SAMPLE_NUM [0-FS], HX, HY, HZ, EX, EY, PPS, PLL
These are interpreted to have byte types
L, H, l, l, l, l, l, h, h
Parameters
----------
chans : List[str]
The channels
data_bytes : bytes
The bytes string
Returns
-------
np.ndarray
Data array of size n_chans x n_records
"""
n_chans = len(chans)
n_bytes = len(data_bytes)
record_format = f"=LH{n_chans}l2h"
record_size = struct.calcsize(record_format)
logger.debug(
f"Unpacking {n_bytes} bytes, format {record_format}, size {record_size}"
)
return np.array(
[x[2 : 2 + n_chans] for x in struct.iter_unpack(record_format, data_bytes)],
dtype=np.float32,
).T
def scale_data(self, time_data: TimeData) -> TimeData:
"""
Get data scaled to physical values
resistics uses field units, meaning physical samples will return the
following:
- Electrical channels in mV/km
- Magnetic channels in mV
- To get magnetic fields in nT, calibration needs to be performed
Notes
-----
When Lemi data is read in, scaling in the headers is applied. Therefore,
the magnetic channels is in mV with gain applied and the electric
channels are in uV (microvolts). To complete the scaling to field units,
the below additional corrections need to be applied.
Electric channels need to divided by 1000 along with dipole length
division in km (east-west spacing and north-south spacing) to return
mV/km.
Magnetic channels need to be divided by the internal gain value which
should be set in the metadata
Parameters
----------
time_data : TimeData
Input time data
Returns
-------
TimeData
Time data in field units
"""
logger.info("Applying scaling to data to give field units")
messages = ["Scaling raw data to physical units"]
for chan in time_data.metadata.chans:
chan_metadata = time_data.metadata.chans_metadata[chan]
if chan_metadata.electric():
time_data[chan] = time_data[chan] / 1000.0
messages.append(
f"Dividing chan {chan} by 1000 to convert from uV to mV"
)
dipole_dist_km = chan_metadata.dipole_dist / 1_000
time_data[chan] = time_data[chan] / dipole_dist_km
messages.append(f"Dividing {chan} by dipole length {dipole_dist_km} km")
if chan_metadata.magnetic():
gain = chan_metadata.gain1
time_data[chan] = time_data[chan] / gain
messages.append(f"Dividing chan {chan} by {gain} to remove gain")
record = self._get_record(messages)
time_data.metadata.history.add_record(record)
return time_data | /resistics_readers-0.1.3-py3-none-any.whl/resistics_readers/lemi/b423.py | 0.941801 | 0.566678 | b423.py | pypi |
from loguru import logger
from typing import List, Optional, Tuple
from pathlib import Path
import numpy as np
import pandas as pd
from resistics.time import TimeMetadata, TimeData
from resistics_readers.multifile import validate_consistency, validate_continuous
from resistics_readers.multifile import TimeMetadataMerge
from resistics_readers.lemi.b423 import TimeMetadataB423, TimeReaderB423
B423E_CHANS = ["E1", "E2", "E3", "E4"]
B423E_RECORD_BYTES = 26
B423E_HEADER_LENGTH = 1024
B423E_MULT = ["Ke1", "Ke2", "Ke3", "Ke4"]
B423E_ADD = ["Ae1", "Ae2", "Ae3", "Ae4"]
def make_sudbir_B423E_metadata(
dir_path: Path,
fs: float,
chans: Optional[List[str]] = None,
dx: float = 1,
dy: float = 1,
folders: Optional[List[str]] = None,
) -> None:
"""
Generate metadata for every subdirectory in a folder
Parameters
----------
dir_path : Path
Root directory
fs : float
Sampling frequency, Hz
chans : Optional[List[str]], optional
The channels, by default None
dx : float, optional
Dipole distance Ex, by default 1
dy : float, optional
Dipole distance Ey, by default 1
folders : Optional[List[str]], optional
An optional list of subfolders, by default None. If None, all the
subfolders will be processed
"""
from resistics.common import dir_subdirs
if chans is None:
chans = B423E_CHANS
if folders is None:
folders = dir_subdirs(dir_path)
else:
folders = [dir_path / folder for folder in folders]
for folder in folders:
make_B423E_metadata(folder, fs, chans=chans, dx=dx, dy=dy)
def make_B423E_metadata(
dir_path: Path,
fs: float,
chans: Optional[List[str]] = None,
dx: float = 1,
dy: float = 1,
) -> None:
"""
Read a single B423E measurement directory, construct and write out metadata
Parameters
----------
dir_path : Path
Directory path
fs : float
The sampling frequency, Hz
chans : Optional[List[str]], optional
Optional list of chans, by default None
dx : float, optional
Dipole distance Ex, by default 1
dy : float, optional
Dipole distance Ey, by default 1
"""
metadata_list = _get_B423E_metadata_list(dir_path, fs, chans)
validate_consistency(dir_path, metadata_list)
validate_continuous(dir_path, metadata_list)
metadata = _merge_metadata(metadata_list, dx, dy)
logger.info(f"Writing metadata in {dir_path}")
metadata.write(dir_path / "metadata.json")
def _get_B423E_metadata_list(
dir_path: Path, fs: float, chans: Optional[List[str]] = None
) -> List[TimeMetadataB423]:
"""
Get list of TimeMetadataB423, one for each data file
Parameters
----------
dir_path : Path
The data path
fs : float
The sampling frequency, Hz
chans : Optional[List[str]]
The channels, by default None. Standard channels are E1, E2, E3 and E4,
but these have little meaning geophysically, so it is possible to
set different labels for the four channels
Returns
-------
List[TimeMetadataB423]
List of TimeMetadata
"""
from resistics_readers.lemi.b423 import _read_B423_headers
if chans is None:
chans = B423E_CHANS
data_paths = list(dir_path.glob("*.B423"))
metadata_list = []
for data_path in data_paths:
logger.debug(f"Reading data file {data_path}")
metadata_B423E = _read_B423_headers(
data_path,
fs,
data_byte_offset=B423E_HEADER_LENGTH,
record_bytes=B423E_RECORD_BYTES,
chans=chans,
)
metadata_list.append(metadata_B423E)
return metadata_list
def _merge_metadata(
metadata_list: List[TimeMetadataB423], dx: float = 1, dy: float = 1
) -> TimeMetadata:
"""Merge the metadata list into a TimeMetadata"""
metadata = TimeMetadata(**metadata_list[0].dict())
metadata.first_time = min([x.first_time for x in metadata_list])
metadata.last_time = max([x.last_time for x in metadata_list])
metadata.n_samples = np.sum([x.n_samples for x in metadata_list])
# channel headers
data_files = [x.data_file for x in metadata_list]
for chan in metadata.chans:
metadata.chans_metadata[chan].data_files = data_files
if chan == "Ex":
metadata.chans_metadata[chan].dipole_dist = dx
if chan == "Ey":
metadata.chans_metadata[chan].dipole_dist = dy
return metadata
class TimeReaderB423E(TimeReaderB423):
"""
Data reader for Lemi B423E data
There is no separate metadata file for Lemi B423E data detailing the
sampling frequency, the number of samples, the sensors etc.. Such a
metadata file is a pre-requisite for resistics. There are helper methods to
make one.
In situations where a Lemi B423E dataset is recorded in multiple files, it
is required that the recording is continuous.
Other important notes about Lemi B423E files
- 1024 bytes of ASCII metadata in the data file with scaling information
- Lemi B423E raw measurement data is signed long integer format
Important points about scalings
- Raw data is integer counts for electric channels
- Scalings in B423E files convert electric channels to uV (microvolt)
If apply_scaling is False, data will be returned in:
- microvolts for the electric channels
Which is equivalent to applying the scalings in the B423E headers
With apply_scaling True, the following additional scaling will be applied:
- Electric channels converted to mV
- Dipole length corrections are applied to electric channels
.. note::
For more information about Lemi B423 format, please see:
http://lemisensors.com/?p=485
"""
record_bytes: int = B423E_RECORD_BYTES
limit_chans: Optional[List[str]] = None
def read_metadata(self, dir_path: Path) -> TimeMetadataMerge:
"""
Read metadata
Parameters
----------
dir_path : Path
The data directory
Returns
-------
TimeMetadataMerge
TimeMetadata with a data table
Raises
------
MetadataReadError
If the number of channels is incorrect
TimeDataReadError
If not all data files exist
TimeDataReadError
If extensions do not match
"""
from resistics.errors import MetadataReadError, TimeDataReadError
from resistics_readers.multifile import validate_consistency
from resistics_readers.multifile import validate_continuous
metadata = TimeMetadata.parse_file(dir_path / "metadata.json")
if metadata.n_chans != len(B423E_CHANS):
raise MetadataReadError(
dir_path, f"Number channels {metadata.chans} != {len(B423E_CHANS)}"
)
metadata_list = _get_B423E_metadata_list(dir_path, metadata.fs)
validate_consistency(dir_path, metadata_list)
validate_continuous(dir_path, metadata_list)
data_table = self._generate_table(metadata_list)
metadata_dict = metadata.dict()
metadata_dict["data_table"] = data_table.to_dict()
metadata = TimeMetadataMerge(**metadata_dict)
if not self._check_data_files(dir_path, metadata):
raise TimeDataReadError(dir_path, "All data files do not exist")
if not self._check_extensions(dir_path, metadata):
raise TimeDataReadError(dir_path, f"Data file suffix not {self.extension}")
return metadata
def read_data(
self, dir_path: Path, metadata: TimeMetadata, read_from: int, read_to: int
) -> TimeData:
"""
Get data from data files
Lemi B423E data always has four channels, in order E1, E2, E3, E4. When
writing out the metadata, it is possible to relabel these channels, for
example:
- Ex, Ey, E3, E4
The raw data is integer counts. However, additional scalings from the
B423 files are applied to give:
- microvolts for the electric channels
- millivolts for the magnetic with the gain applied
The scalings are as follows:
- Channel 1 = (Channel 1 * Ke1) + Ae1
- Channel 2 = (Channel 1 * Ke2) + Ae2
- Channel 3 = (Channel 1 * Ke3) + Ae3
- Channel 4 = (Channel 1 * Ke4) + Ae4
Unlike most other readers, the channels returned can be explicity
selected by setting the limit_chans attribute of the class. This is
because in many cases, only two of the B423Echannels are actually
useful.
Parameters
----------
dir_path : path
The directory path to read from
metadata : TimeMetadata
Time series data metadata
read_from : int
Sample to read data from
read_to : int
Sample to read data to
Returns
-------
TimeData
Time data object
"""
from resistics_readers.multifile import samples_to_sources
dtype = np.float32
n_samples = read_to - read_from + 1
chans, chan_indices = self._get_chans(dir_path, metadata)
n_chans = len(chans)
logger.info(f"Reading channels {chans}, indices {chan_indices}")
messages = [f"Reading raw data from {dir_path}"]
messages.append(f"Sampling rate {metadata.fs} Hz")
# loop over B423 files and read data
data_table = pd.DataFrame(data=metadata.data_table)
df_to_read = samples_to_sources(dir_path, data_table, read_from, read_to)
data = np.empty(shape=(n_chans, n_samples), dtype=dtype)
sample = 0
for data_file, info in df_to_read.iterrows():
file_from = info.loc["read_from"]
file_to = info.loc["read_to"]
n_samples_file = info.loc["n_samples_read"]
data_byte_start = info.loc["data_byte_start"]
mult = np.array([info.loc[B423E_MULT[idx]] for idx in chan_indices])
add = np.array([info.loc[B423E_ADD[idx]] for idx in chan_indices])
messages.append(f"{data_file}: Reading samples {file_from} to {file_to}")
logger.debug(f"{data_file}: Reading samples {file_from} to {file_to}")
byte_read_start = data_byte_start + file_from * self.record_bytes
n_bytes_to_read = n_samples_file * self.record_bytes
with (dir_path / data_file).open("rb") as f:
f.seek(byte_read_start, 0)
data_bytes = f.read(n_bytes_to_read)
data_read = self._parse_records(metadata.chans, data_bytes)
data_read = (data_read[chan_indices] * mult[:, None]) + add[:, None]
data[:, sample : sample + n_samples_file] = data_read
sample = sample + n_samples_file
metadata = self._adjust_metadata_chans(metadata, chans)
metadata = self._get_return_metadata(metadata, read_from, read_to)
messages.append(f"From sample, time: {read_from}, {str(metadata.first_time)}")
messages.append(f"To sample, time: {read_to}, {str(metadata.last_time)}")
metadata.history.add_record(self._get_record(messages))
logger.info(f"Data successfully read from {dir_path}")
return TimeData(metadata, data)
def _get_chans(
self, dir_path: Path, metadata: TimeMetadataMerge
) -> Tuple[List[str], List[int]]:
"""
Get the channels to read and their indices
This is mainly used because it is likely that not all the channels are
used or recording anything worthwhile
Parameters
----------
dir_path : Path
The directory path
metadata : TimeMetadataMerge
The metadata for the recording
Returns
-------
Tuple[List[str], List[int]]
The channels and the channel indices
Raises
------
TimeDataReadError
If any of limit_chans are not in the metadata.chans
"""
from resistics.errors import TimeDataReadError
if self.limit_chans is None:
return metadata.chans, list(range(metadata.n_chans))
chk = set(self.limit_chans) - set(metadata.chans)
if len(chk) > 0:
raise TimeDataReadError(
dir_path, f"Chans {chk} not in metadata {metadata.chans}"
)
indices = [metadata.chans.index(x) for x in self.limit_chans]
return self.limit_chans, indices
def _adjust_metadata_chans(
self, metadata: TimeMetadataMerge, chans: List[str]
) -> TimeMetadataMerge:
"""Adjust the channels in the metadata"""
metadata.chans_metadata = {c: metadata.chans_metadata[c] for c in chans}
metadata.chans = chans
metadata.n_chans = len(chans)
return metadata
def scale_data(self, time_data: TimeData) -> TimeData:
"""
Get data scaled to physical values
resistics uses field units, meaning physical samples will return the
following:
- Electrical channels in mV/km
- Magnetic channels in mV
- To get magnetic fields in nT, calibration needs to be performed
Notes
-----
When Lemi data is read in, scaling in the headers is applied. Therefore,
the magnetic channels is in mV with gain applied and the electric
channels are in uV (microvolts). To complete the scaling to field units,
the below additional corrections need to be applied.
Electric channels need to divided by 1000 along with dipole length
division in km (east-west spacing and north-south spacing) to return
mV/km.
Magnetic channels need to be divided by the internal gain value which
should be set in the metadata
Parameters
----------
time_data : TimeData
Input time data
Returns
-------
TimeData
Time data in field units
"""
logger.info("Applying scaling to data to give field units")
messages = ["Scaling raw data to physical units"]
for chan in time_data.metadata.chans:
chan_metadata = time_data.metadata.chans_metadata[chan]
time_data[chan] = time_data[chan] / 1000.0
messages.append(f"Dividing chan {chan} by 1000 to convert from uV to mV")
dipole_dist_km = chan_metadata.dipole_dist / 1_000
time_data[chan] = time_data[chan] / dipole_dist_km
messages.append(f"Dividing {chan} by dipole length {dipole_dist_km} km")
record = self._get_record(messages)
time_data.metadata.history.add_record(record)
return time_data | /resistics_readers-0.1.3-py3-none-any.whl/resistics_readers/lemi/b423e.py | 0.921711 | 0.479443 | b423e.py | pypi |
from loguru import logger
from typing import Dict, List, Optional, Tuple
from pathlib import Path
import numpy as np
import pandas as pd
import obspy
from obspy.core.utcdatetime import UTCDateTime
from obspy.core.stream import Stream
from resistics.common import dir_files
from resistics.time import ChanMetadata, TimeMetadata, TimeData
from resistics.time import TimeProcess, TimeWriterNumpy
CHAN_TYPES = {
"Ex": "electric",
"Ey": "electric",
"Hx": "magnetic",
"Hy": "magnetic",
"Hz": "magnetic",
}
class NoDataInInterval(Exception):
def __init__(
self,
first_time: pd.Timestamp,
last_time: pd.Timestamp,
from_time: pd.Timestamp,
to_time: pd.Timestamp,
):
"""Exception for the no miniseed data in a time interval"""
self.first_time = first_time
self.last_time = last_time
self.from_time = from_time
self.to_time = to_time
def __str__(self):
"""The Exception string"""
outstr = f"Data between {self.first_time} to {self.last_time}"
outstr += f" does not intersect interval {self.from_time} to {self.to_time}"
return outstr
def get_miniseed_stream(data_path: Path):
"""Get the miniseed file stream"""
return obspy.read(str(data_path))
def get_streams(data_paths: List[Path]) -> Dict[Path, Stream]:
"""
Get the stream object for each data_path
Parameters
----------
data_paths : List[Path]
The data paths
Returns
-------
Dict[Path, Stream]
The stream objects
"""
streams = {}
for data_path in data_paths:
try:
logger.info(f"Attempting to read file {data_path}")
streams[data_path] = get_miniseed_stream(data_path)
traces = [trace.id for trace in streams[data_path]]
logger.info(f"Successfully read file with traces {traces}")
except Exception:
logger.error(f"Unable to read data from {data_path}. Skipping.")
return streams
def get_table(streams: Dict[Path, Stream], trace_ids: List[str]) -> pd.DataFrame:
"""
Get table with start and ends for each trace of interest in each file
The table additionally contains the trace index for each trace for every
file
Parameters
----------
streams : Dict[Path, Stream]
Dictionary of file paths to streams
trace_ids : List[str]
The ids of the traces that are of interest
Returns
-------
pd.DataFrame
The data table
"""
data = []
for data_path, stream in streams.items():
file_trace_ids = [x.id for x in stream.traces]
for trace_id in trace_ids:
trace_index = file_trace_ids.index(trace_id)
trace = stream[trace_index]
first_time = pd.to_datetime(
trace.stats.starttime.ns, unit="ns", origin="unix"
)
last_time = pd.to_datetime(trace.stats.endtime.ns, unit="ns", origin="unix")
data.append((data_path.name, trace_id, trace_index, first_time, last_time))
return pd.DataFrame(
data=data,
columns=["data_file", "trace_id", "trace_index", "first_time", "last_time"],
)
def get_first_last_times(table: pd.DataFrame) -> Tuple[pd.Timestamp, pd.Timestamp]:
"""
Get the minimum first time and maximum last time for the data
Each trace may have different date ranges in the miniseed files. This
function calculates the first and last times where data is present for each
requested trace.
Parameters
----------
table : pd.DataFrame
The information table with the details about trace duration in each data
file
Returns
-------
Tuple[pd.Timestamp, pd.Timestamp]
The first and last time
"""
grouped = table.groupby("data_file")
first_time = (grouped["first_time"].max()).min()
last_time = (grouped["last_time"].min()).max()
return first_time, last_time
def get_streams_to_read(
trace_id: str,
table: pd.DataFrame,
from_time: pd.Timestamp,
to_time: pd.Timestamp,
) -> pd.DataFrame:
"""
Get the streams to read and the time intervals to read for each stream
Note that this finds time intervals to cover from_time to to_time inclusive
Parameters
----------
trace_id : str
The trace id
table : pd.DataFrame
The table with details about date ranges covered by each miniseed file
from_time : pd.Timestamp
The time to get data from
to_time : pd.Timestamp
The time to get data to
Returns
-------
pd.DataFrame
A row for each data file to read and the time range to read from it
"""
table_trace = table[table["trace_id"] == trace_id]
to_exclude = table_trace["first_time"] > to_time
to_exclude = to_exclude | (table_trace["last_time"] < from_time)
table_trace = table_trace[~to_exclude]
# get the time ranges to read for each file
streams_to_read = []
for _idx, row in table_trace.iterrows():
# get the read from time for this data file
if row.loc["first_time"] > from_time:
read_from = row.loc["first_time"]
else:
read_from = from_time
# get the read to time for this data file
if row.loc["last_time"] > to_time:
read_to = to_time
else:
read_to = row.loc["last_time"]
# save the data to read
streams_to_read.append(
(row.loc["data_file"], row.loc["trace_index"], read_from, read_to)
)
return pd.DataFrame(
data=streams_to_read,
columns=["data_file", "trace_index", "read_from", "read_to"],
)
def get_stream_data(
dt: pd.Timedelta,
stream: Stream,
trace_index: int,
read_from: pd.Timestamp,
read_to: pd.Timestamp,
) -> np.ndarray:
"""
Get data for a single trace from a stream
Parameters
----------
dt : pd.Timedelta
The sampling rate
stream : Stream
The miniseed file stream
trace_index : int
The index of the trace
read_from : pd.Timestamp
The time to read from
read_to : pd.Timestamp
The time to read to
Returns
-------
np.ndarray
The trace data from the stream
Raises
------
ValueError
If the number of expected samples does not give an integer. This is
currently a safety first approach until more testing is done
ValueError
If the number of samples expected != the number of samples returned by
the trace in the time interval
"""
trace = stream[trace_index]
n_samples_expected = ((read_to - read_from) / dt) + 1
if not n_samples_expected.is_integer():
raise ValueError(
f"Number of samples expected {n_samples_expected} is not an integer"
)
n_samples_expected = int(n_samples_expected)
obspy_read_from = UTCDateTime(read_from.timestamp())
obspy_read_to = UTCDateTime(read_to.timestamp())
subtrace = trace.slice(starttime=obspy_read_from, endtime=obspy_read_to)
if subtrace.count() != n_samples_expected:
raise ValueError(
f"samples expected {n_samples_expected} != found {subtrace.count()}"
)
logger.debug(f"Expecting {n_samples_expected} samples, found {subtrace.count()}")
return subtrace.data
def get_trace_data(
fs: float,
streams: Dict[Path, Stream],
streams_to_read: Dict[str, Tuple[pd.Timestamp, pd.Timestamp]],
from_time: pd.Timestamp,
n_samples: int,
) -> np.ndarray:
"""
Get data for a single trace beginning at from_time and for n_samples
Parameters
----------
fs : float
The sampling frequency
streams : Dict[Path, Stream]
The streams
streams_to_read : Dict[str, Tuple[pd.Timestamp, pd.Timestamp]]
The streams to read for this trace and time interval
from_time : pd.Timestamp
The time to get the data from
n_samples : int
The number of samples to get
Returns
-------
np.ndarray
The data
Raises
------
ValueError
If converting read_from date to samples does not give an integer. This
is a safety first approach but problems could be encountered at very
high sampling frequencies. In this case, much more testing needs to be
done about expected behaviour
ValueError
If converting read_to date to samples does not give an integer. This
is a safety first approach but problems could be encountered at very
high sampling frequencies. In this case, much more testing needs to be
done about expected behaviour
"""
dt = pd.Timedelta(1 / fs, "s")
streams_from_name = {k.name: v for k, v in streams.items()}
data = np.empty(shape=(n_samples), dtype=np.float32)
data[:] = np.nan
for _idx, row in streams_to_read.iterrows():
data_file = row.loc["data_file"]
trace_index = row.loc["trace_index"]
read_from = row.loc["read_from"]
read_to = row.loc["read_to"]
data_sample_from = (read_from - from_time) / dt
data_sample_to = (read_to - from_time) / dt
if not data_sample_from.is_integer():
raise ValueError(f"Date sample from {data_sample_from} is not an integer")
if not data_sample_to.is_integer():
raise ValueError(f"Date sample to {data_sample_to} is not an integer")
data_sample_from = int(data_sample_from)
data_sample_to = int(data_sample_to)
logger.debug(f"Reading range {read_from} to {read_to} from stream {data_file}")
logger.debug(f"Data will cover samples {data_sample_from} to {data_sample_to}")
stream = streams_from_name[data_file]
data[data_sample_from : data_sample_to + 1] = get_stream_data(
dt, stream, trace_index, read_from, read_to
)
return data
def get_time_data(
fs: float,
id_map: Dict[str, str],
streams: Dict[Path, Stream],
table: pd.DataFrame,
first_time: pd.Timestamp,
last_time: pd.Timestamp,
from_time: pd.Timestamp,
to_time: pd.Timestamp,
) -> TimeData:
"""
Get time data covering from_time to to_time
Parameters
----------
fs : float
The sampling frequency
id_map : Dict[str, str]
The map from trace id to channel
streams : Dict[Path, Stream]
The streams
table : pd.DataFrame
The table with information about trace ranges for each file
first_time : pd.Timestamp
The common first_time for all traces and streams
last_time : pd.Timestamp
The common last_time for all traces and streams
from_time : pd.Timestamp
The from time for this interval of data
to_time : pd.Timestamp
The to time for this intervel of data
Returns
-------
TimeData
TimeData
Raises
------
NoDataInInterval
If there is no trace data in the interval from_time and to_time
ValueError
If the number of samples in the interval is not an integer. This is a
safety first approach for now that could fail at very high sampling
frequencies, in which case much more thorough testing would be better.
"""
# check there is actually data in the date range
if (first_time > last_time) or (to_time < first_time):
raise NoDataInInterval(first_time, last_time, from_time, to_time)
# correct the times if they are earlier than first time or after last time
if from_time < first_time:
logger.debug(f"Adjusted from time {from_time} to first time {first_time}")
from_time = first_time
if to_time > last_time:
logger.debug(f"Adjusted to time {to_time} to last time {last_time}")
to_time = last_time
logger.info(f"Extracting chunk between {from_time} and {to_time}")
n_samples = ((to_time - from_time) / pd.Timedelta(1 / fs, "s")) + 1
if not n_samples.is_integer():
raise ValueError("Number of calculated samples is not an integer")
n_samples = int(n_samples)
logger.debug(f"Expecting {n_samples} samples between {from_time} to {to_time}")
trace_ids = list(id_map.keys())
chans = [id_map[trace_id] for trace_id in trace_ids]
n_chans = len(chans)
chans_metadata: Dict[str, ChanMetadata] = {}
data = np.empty(shape=(n_chans, n_samples), dtype=np.float32)
for idx, trace_id in enumerate(trace_ids):
logger.info(f"Extracting data for trace {trace_id}, channel {chans[idx]}")
streams_to_read = get_streams_to_read(trace_id, table, from_time, to_time)
data[idx, :] = get_trace_data(
fs, streams, streams_to_read, from_time, n_samples
)
chans_metadata[chans[idx]] = ChanMetadata(
name=chans[idx],
data_files=list(streams_to_read["data_file"].values.tolist()),
chan_type=CHAN_TYPES[chans[idx]],
chan_source=trace_id,
)
metadata = TimeMetadata(
fs=fs,
first_time=from_time,
last_time=to_time,
n_samples=n_samples,
chans=chans,
n_chans=len(chans),
chans_metadata=chans_metadata,
)
return TimeData(metadata, data)
def get_processed_data(time_data: TimeData, processors: List[TimeProcess]) -> TimeData:
"""
Process time data
Parameters
----------
time_data : TimeData
TimeData to process
processors : List[TimeProcess]
The processors to run
Returns
-------
TimeData
The processed TimeData
"""
for process in processors:
time_data = process.run(time_data)
return time_data
def reformat(
dir_path: Path,
fs: float,
id_map: Dict[str, str],
chunk_time: pd.Timedelta,
write_path: Path,
from_time: Optional[pd.Timestamp] = None,
to_time: Optional[pd.Timestamp] = None,
processors: Optional[List[TimeProcess]] = None,
) -> None:
"""
Reformat miniseed data into resistics numpy format in intervals
Parameters
----------
dir_path : Path
The directory with the miniseed files
fs : float
The sampling frequencies being extracted
id_map : Dict[str, str]
Map from trace ids to be extracted to channel names
chunk_time : pd.Timedelta
The intervals to extract the data in, for example 1H, 12H, 1D
write_path : Path
The path to write out the TimeData to
from_time : Optional[pd.Timestamp], optional
Optionally provide a from time, by default None. If None, the from time
will be the earliest timestamp shared by all traces that are requested
to be reformatted
to_time : Optional[pd.Timestamp], optional
Optionally provide a to time, by default None. If None, the last time
will be the earliest timestamp shared by all traces that are requested
to be reformatted
processors : Optional[List[TimeProcess]], optional
Any processors to run, by default None. For example resampling of data.
"""
logger.info(f"Reformatting miniseed data in {dir_path}")
data_paths = dir_files(dir_path)
trace_ids = list(id_map.keys())
# get the streams and update data paths with just the files that were read
streams = get_streams(data_paths)
logger.info(f"Found {len(streams)} readable files")
data_paths = list(streams.keys())
table = get_table(streams, trace_ids)
first_time, last_time = get_first_last_times(table)
logger.info(f"Found maximum data range of {first_time} to {last_time}")
if from_time is None:
from_time = first_time
if to_time is None:
to_time = last_time
from_time = from_time.floor(freq=chunk_time)
to_time = to_time.ceil(freq=chunk_time)
logger.info(f"Extracting data from {from_time} to {to_time} for ids {trace_ids}")
starts = pd.date_range(
start=from_time, end=to_time - pd.Timedelta(chunk_time), freq=chunk_time
)
ends = pd.date_range(
start=from_time + pd.Timedelta(chunk_time), end=to_time, freq=chunk_time
)
# minus 1 sample from ends to avoid any double gathering of samples
ends = ends - pd.Timedelta(1 / fs, "s")
for date_start, date_end in zip(starts, ends):
try:
time_data = get_time_data(
fs, id_map, streams, table, first_time, last_time, date_start, date_end
)
except NoDataInInterval:
logger.debug(f"No data in interval {date_start} - {date_end}")
continue
if processors is not None:
time_data = get_processed_data(time_data, processors)
first_str = time_data.metadata.first_time.strftime("%Y-%m-%d_%H-%M-%S")
last_str = time_data.metadata.last_time.strftime("%Y-%m-%d_%H-%M-%S")
meas_name = f"{first_str}_to_{last_str}_mseed"
save_path = write_path / meas_name
TimeWriterNumpy().run(save_path, time_data) | /resistics_readers-0.1.3-py3-none-any.whl/resistics_readers/miniseed/mseed.py | 0.938604 | 0.434401 | mseed.py | pypi |
from loguru import logger
from typing import List, Dict, OrderedDict, Tuple, Any, BinaryIO, Optional
import collections
from pathlib import Path
import struct
import numpy as np
import pandas as pd
from resistics.errors import MetadataReadError, TimeDataReadError
from resistics.sampling import RSDateTime, to_datetime, to_timedelta
from resistics.common import Metadata, get_record
from resistics.time import ChanMetadata, TimeMetadata, TimeData, TimeReader, TimeProcess
from resistics.time import TimeWriterNumpy, adjust_time_metadata
from resistics_readers.phoenix.headers import phoenix_headers
from resistics_readers.multifile import samples_to_sources
HEADER_NAME_SIZE = 4
HEADER_SIZE = 12
HEADER_VALUE_SIZE = 13
HEADER_ENTRY_SIZE = HEADER_SIZE + HEADER_VALUE_SIZE
TAG_SIZE = 32
# three byte two's complement for the data
SAMPLE_SIZE = 3
SAMPLE_DTYPE = np.int32
# channel type mapping
CHAN_TYPES = {
"Ex": "electric",
"Ey": "electric",
"Hx": "magnetic",
"Hy": "magnetic",
"Hz": "magnetic",
}
class TimeMetadataTS(TimeMetadata):
"""TimeMetadata for a single TS file"""
data_table: Any
"""The record sample ranges"""
def json(self):
"""Get the JSON, but exclude the df"""
return super().json(exclude={"data_table"})
class TimeMetadataPhoenix(Metadata):
"""TimeMetadata for the Phoenix data"""
ts_nums: List[int]
"""The TS file numbers"""
ts_files: Dict[int, str]
"""The name of the TS files"""
ts_continuous: int
"""The continuous TS"""
ts_metadata: Dict[int, TimeMetadataTS]
"""Metadata for a single TS"""
def strip_control(in_bytes: bytes) -> str:
"""Strip control characters from byte string"""
return in_bytes.strip(b"\x00").decode()
def read_table_entry(entry_bytes: bytes) -> Tuple[str, Any]:
"""
Read a single table entry, this should return an entry name and value
Parameters
----------
entry_bytes : bytes
The entry bytes
Returns
-------
Tuple[str, Any]
The name and value
Raises
------
ValueError
If unable to read entry name
KeyError
If entry name is unknown
TypeError
If unable to read entry value
"""
header_name_fmt = f"{HEADER_NAME_SIZE}s"
try:
name = struct.unpack(header_name_fmt, entry_bytes[:HEADER_NAME_SIZE])
name = strip_control(name[0])
except Exception:
raise ValueError("Unable to read entry header name")
if name not in phoenix_headers:
logger.error(f"Unknown table name {name}")
raise KeyError(f"Unknown table name '{name}'")
# read data
value_info = phoenix_headers[name]
value_bytes = entry_bytes[HEADER_SIZE : HEADER_SIZE + value_info["vSize"]]
try:
if value_info["ptyp"] == "AmxPT":
value = struct.unpack(value_info["typ"], value_bytes)
else:
value = struct.unpack(value_info["typ"], value_bytes)[0]
if "s" in value_info["typ"]:
value = strip_control(value)
except Exception:
raise TypeError(f"Unable to read value for header {name}")
return name, value
def read_table_file(table_path: Path) -> OrderedDict[str, Any]:
"""
Read a TBL file
Parameters
----------
table_path : Path
Path to the table file
Returns
-------
Dict[str, Any]
The table data in an Ordered dictionary
"""
import math
byte_inc = HEADER_SIZE + HEADER_VALUE_SIZE
table_data = collections.OrderedDict()
with table_path.open("rb") as f:
table_bytes = f.read()
num_headers = int(math.floor(len(table_bytes) / byte_inc))
logger.debug(f"Reading {num_headers} entries from table file")
for ientry in range(0, num_headers):
byte_start = ientry * byte_inc
entry_bytes = table_bytes[byte_start : byte_start + byte_inc]
try:
name, value = read_table_entry(entry_bytes)
table_data[name] = value
except ValueError:
logger.error(f"Unable to read table entry name {ientry}")
except KeyError:
logger.error("Unknown entry name")
except TypeError:
logger.error("Unable to read data value")
return table_data
def get_date(value: bytes) -> RSDateTime:
"""
Convert bytes to a resistics DateTime
Parameters
----------
value : bytes
The bytes with the datetime information
Returns
-------
RSDateTime
The datetime
"""
seconds = value[0]
minutes = value[1]
hour = value[2]
day = value[3]
month = value[4]
year = value[5]
century = value[-1]
date = f"{century:02d}{year:02d}-{month:02d}-{day:02d}"
date += f"T{hour:02d}:{minutes:02d}:{seconds:02d}.000Z"
return to_datetime(date)
def read_tag(f: BinaryIO) -> Dict[str, Any]:
"""
Read the tag from a .TS data file
Tags are used to separate records in a data file. Each tag is 32 bytes long
and contains data about the next data record
Some notes about particular tag entries
- units of sample rate: 0 = Hz, 1 = minute, 2 = hour, 3 = day
- bit-wise saturation flags
- clock error in micro seconds
Parameters
----------
f : BinaryIO
Binary file type object
Returns
-------
Dict[str, Any]
The tag data
"""
tag_data = {}
tag_data["from_time"] = get_date(struct.unpack("8b", f.read(8)))
tag_data["serial"] = struct.unpack("h", f.read(2))
tag_data["n_scans"] = struct.unpack("h", f.read(2))[0]
tag_data["n_chans"] = struct.unpack("b", f.read(1))[0]
tag_data["tag_length"] = struct.unpack("b", f.read(1))
tag_data["status_code"] = struct.unpack("b", f.read(1))
tag_data["saturation_flag"] = struct.unpack("b", f.read(1))
tag_data["reserved"] = struct.unpack("b", f.read(1))
tag_data["sample_length"] = struct.unpack("b", f.read(1))
tag_data["fs"] = struct.unpack("h", f.read(2))
tag_data["fs_units"] = struct.unpack("b", f.read(1))
tag_data["clock_status"] = struct.unpack("b", f.read(1))
tag_data["clock_error"] = struct.unpack("i", f.read(4))
for res in range(6):
key = f"res{res+1}"
tag_data[key] = struct.unpack("b", f.read(1))
return tag_data
def get_records(dir_path: Path, ts_file: str) -> pd.DataFrame:
"""
Get details for all the records
Phoenix MTU5C data files have multiple records separated by tags. Each
record will have a number of scans. A single scan is all the channel data
for one timestamp. The number of scans in a record is equal to the number
of samples in the record.
Usually, a record will be a second long, so the number of scans in the
record is determined by the sampling frequency.
When reading data, it commonly needs to be read from multiple scans,
therefore this table helps find which records need to be read to get the
data to cover a particular time range.
Note that the time given in the tag is the start time of the next record.
Parameters
----------
dir_path : Path
The path with the data file
ts_file : str
The name of the data file
Returns
-------
pd.DataFrame
A DataFrame with details about each record
"""
data_path = dir_path / ts_file
n_bytes = data_path.stat().st_size
record_from_times = []
record_first_samples = []
record_last_samples = []
record_scans = []
record_byte_starts = []
# start number of samples at 0
sample = 0
bytes_read = 0
with data_path.open("rb") as f:
while bytes_read < n_bytes:
tag_data = read_tag(f)
record_byte_starts.append(bytes_read + TAG_SIZE)
record_scans.append(tag_data["n_scans"])
record_first_samples.append(sample)
record_last_samples.append(sample + tag_data["n_scans"] - 1)
record_from_times.append(tag_data["from_time"])
# increment the samples
sample += tag_data["n_scans"]
# go to start of next tag
data_bytes = tag_data["n_scans"] * tag_data["n_chans"] * SAMPLE_SIZE
f.seek(data_bytes, 1)
bytes_read += TAG_SIZE + data_bytes
return pd.DataFrame(
data={
"from_time": record_from_times,
"first_sample": record_first_samples,
"last_sample": record_last_samples,
"n_samples": record_scans,
"data_byte_start": record_byte_starts,
}
)
def get_time_dict(
ts: int,
table_data: OrderedDict[str, Any],
record_df: pd.DataFrame,
) -> Dict[str, Any]:
"""
Get the time dictionary for a single TS that will be used to initialise a
TimeMetadata instance.
Start and end times are provided in the metadata. However, it is only for
the continuous sampling frequency that the data is continuous between these
dates. For the other frequencies, whilst these may be the timestamps of the
first and last sample, it is not necessary that all the timestamps in
between are present.
Parameters
----------
ts : int
The TS number
table_data : OrderedDict[str, Any]
The data read in from the .TBL file
record_df : pd.DataFrame
Information about the records. This will be primarily be used to get the
total number of samples in the recording and the last time.
Returns
-------
Dict[str, Any]
A dictionary of data about the recording
"""
time_dict = {}
time_dict["fs"] = table_data[f"SRL{ts}"]
time_dict["n_samples"] = record_df["n_samples"].sum()
time_dict["first_time"] = to_datetime(record_df.loc[0, "from_time"])
# don't add 1 because only using this to calculate the duration
last_record_start_time = record_df.loc[record_df.index[-1], "from_time"]
record_n_samples = record_df["last_sample"] - record_df["first_sample"]
last_record_n_samples = record_n_samples.iloc[-1]
time_dict["last_time"] = last_record_start_time + to_timedelta(
last_record_n_samples / time_dict["fs"]
)
time_dict["serial"] = table_data["SNUM"]
time_dict["system"] = table_data["HW"]
time_dict["elevation"] = table_data["ELEV"]
time_dict["wgs84_latitude"] = float(table_data["LATG"].split(",")[0])
time_dict["wgs84_longitude"] = float(table_data["LNGG"].split(",")[0])
return time_dict
def get_chans_metadata(
table_data: OrderedDict[str, Any], ts_file: str
) -> Dict[str, Any]:
"""
Get ChanMetadata for each channel
Parameters
----------
table_data : OrderedDict[str, Any]
The table data
ts_file : str
The TS file name
Returns
-------
Dict[str, Any]
Channel metadata for each channel
"""
chans = ["Ex", "Ey", "Hx", "Hy", "Hz"]
order = [table_data[f"CH{chan.upper()}"] for chan in chans]
_order, chans = (
list(x) for x in zip(*sorted(zip(order, chans), key=lambda pair: pair[0]))
)
chans_dict = {}
for chan in chans:
chan_dict = {}
chan_dict["name"] = chan
chan_dict["data_files"] = [ts_file]
chan_dict["chan_type"] = CHAN_TYPES[chan]
# to convert integers to machine volts
chan_dict["scaling"] = table_data["FSCV"] / np.power(2, 23)
if CHAN_TYPES[chan] == "magnetic":
chan_dict["serial"] = table_data[f"{chan.upper()}SN"][-4:]
chan_dict["sensor"] = "Phoenix"
chan_dict["gain1"] = table_data["HGN"] * table_data["HATT"]
chan_dict["gain2"] = (
(1000.0 / table_data["HNUM"]) if "HNUM" in table_data else 1
)
dipoles = {"Ex": "EXLN", "Ey": "EYLN"}
if CHAN_TYPES[chan] == "electric":
dipole_dist_key = dipoles[chan]
chan_dict["dipole_dist"] = float(table_data[dipole_dist_key])
chan_dict["gain1"] = table_data["EGN"]
chans_dict[chan] = ChanMetadata(**chan_dict)
return chans, chans_dict
def get_ts_metadata(
dir_path: Path, ts_file: str, ts: int, table_data: Dict[str, Any]
) -> TimeMetadata:
"""
Get TimeMetadata for a single .TS file
Parameters
----------
dir_path : Path
The directory path with the data file
ts_file : str
The name of the data file
ts : int
The TS number
table_data : Dict[str, Any]
The table data
Returns
-------
TimeMetadata
TimeMetadata
"""
df = get_records(dir_path, ts_file)
time_dict = get_time_dict(ts, table_data, df)
chans, chans_dict = get_chans_metadata(table_data, ts_file)
time_dict["chans"] = chans
time_dict["n_chans"] = len(chans)
time_dict["chans_metadata"] = chans_dict
time_dict["data_table"] = df
return TimeMetadataTS(**time_dict)
def read_metadata(dir_path: Path) -> TimeMetadataPhoenix:
"""
Read metadata for Phoenix data
For phoenix data, the metadata is in the table file and it is binary
formatted.
Parameters
----------
dir_path : Path
The directory path with the data
Returns
-------
TimeMetadataPhoenix
TimeMetadataPhoenix which has a dictionary of TimeMetadata for each
.TS file
Raises
------
MetadataReadError
If the number of .TBL files in the directory != 1
"""
table_files = list(dir_path.glob("*.TBL"))
if len(table_files) != 1:
raise MetadataReadError(f"Number of table files {len(table_files)} != 1")
table_path = table_files[0]
logger.info(f"Reading metadata from {table_path}")
table_data = read_table_file(table_path)
data_files = list(dir_path.glob("*.TS*"))
ts_files = {int(f.name[-1]): f.name for f in data_files}
ts_nums = sorted(list(ts_files.keys()))
logger.info(f"Found TS files {ts_nums}")
ts_metadata = {}
logger.info("Reading metadata and verifying records for TS files")
for ts, ts_file in ts_files.items():
metadata = get_ts_metadata(dir_path, ts_file, ts, table_data)
ts_metadata[ts] = metadata
return TimeMetadataPhoenix(
ts_nums=ts_nums,
ts_files=ts_files,
ts_continuous=max(ts_nums),
ts_metadata=ts_metadata,
)
def read_record(
data_bytes: bytes, n_chans: int, from_sample: int, to_sample: int
) -> np.ndarray:
"""
Read a record
Parameters
----------
data_bytes : bytes
The bytes in the record
n_chans : int
The number of channels
from_sample : int
The first sample to read in the record
to_sample : int
The last sample to read in the record
Returns
-------
np.ndarray
Data with shape n_chans x n_samples
"""
n_samples = to_sample - from_sample + 1
index_from = n_chans * from_sample * SAMPLE_SIZE
index_to = n_chans * (to_sample + 1) * SAMPLE_SIZE
values = [
struct.unpack("<I", data_bytes[ii : ii + SAMPLE_SIZE] + b"\x00")[0]
for ii in range(index_from, index_to, SAMPLE_SIZE)
]
values = [x if not (x & 0x800000) else (x - 0x1000000) for x in values]
return np.array(values).reshape(n_samples, n_chans).T
def read_records(
data_path: Path, metadata: TimeMetadataTS, df_to_read: pd.DataFrame
) -> np.ndarray:
"""
Read data records from a TS file
Parameters
----------
data_path : Path
The path to the TS file
metadata : TimeMetadataTS
The metadata for the TS file
df_to_read : pd.DataFrame
The DataFrame with the records to read
Returns
-------
np.ndarray
The read data
"""
n_samples = df_to_read["n_samples_read"].sum()
data = np.empty(shape=(metadata.n_chans, n_samples), dtype=SAMPLE_DTYPE)
with data_path.open("rb") as f:
sample = 0
for record, info in df_to_read.iterrows():
record_from = info.loc["read_from"]
record_to = info.loc["read_to"]
n_samples_record = info.loc["n_samples_read"]
data_byte_start = info.loc["data_byte_start"]
# read bytes for the full record
n_bytes_to_read = info.loc["n_samples"] * metadata.n_chans * SAMPLE_SIZE
f.seek(data_byte_start, 0)
data_bytes = f.read(n_bytes_to_read)
# parse the record bytes
data_record = read_record(
data_bytes, metadata.n_chans, record_from, record_to
)
data[:, sample : sample + n_samples_record] = data_record
sample = sample + n_samples_record
return data
class TimeReaderTS(TimeReader):
"""
Phoenix time data reader only for the continuous time series data
There is no data reader for the other TS files, these should be reformatted.
"""
extension = ".TS"
def read_metadata(self, dir_path: Path) -> TimeMetadataTS:
"""
Read the metadata for the continuous data
Parameters
----------
dir_path : Path
The directory path to the data
Returns
-------
TimeMetadataTS
Metadata for the continuous TS file
Raises
------
TimeDataReadError
If the data files do not exist
"""
metadata = read_metadata(dir_path)
ts_metadata = metadata.ts_metadata[metadata.ts_continuous]
if not self._check_data_files(dir_path, ts_metadata):
raise TimeDataReadError(dir_path, "All data files do not exist")
return ts_metadata
def read_data(
self, dir_path: Path, metadata: TimeMetadata, read_from: int, read_to: int
) -> TimeData:
"""
Read data from the continuous time series data
In a TS file, each sample is recorded as a scan (all channels recorded
at the same time). To get the number of bytes to read, multiply number
of samples by number of channels by the number of bytes for a single
sample
Parameters
----------
dir_path : Path
The directory path
metadata : TimeMetadata
The phoenix data metadata
read_from : int
Sample to read from
read_to : int
Sample to read to
Returns
-------
TimeData
The read in time data
"""
data_file = metadata.chans_metadata[metadata.chans[0]].data_files[0]
data_path = dir_path / data_file
logger.info(
f"Reading data from continuous data file {data_path} at {metadata.fs} Hz"
)
messages = [f"Reading raw data from {data_path}"]
messages.append(f"Sampling rate {metadata.fs} Hz")
# read the records
data_table = pd.DataFrame(data=metadata.data_table)
df_to_read = samples_to_sources(dir_path, data_table, read_from, read_to)
logger.info(f"Reading data from {len(df_to_read.index)} records")
messages.append(f"Reading data from {len(df_to_read.index)} records")
data = read_records(data_path, metadata, df_to_read)
metadata = self._get_return_metadata(metadata, read_from, read_to)
messages.append(f"From sample, time: {read_from}, {str(metadata.first_time)}")
messages.append(f"To sample, time: {read_to}, {str(metadata.last_time)}")
metadata.history.add_record(self._get_record(messages))
logger.info(f"Data successfully read from {dir_path}")
return TimeData(metadata, data.astype(np.float32))
def scale_data(self, time_data: TimeData) -> TimeData:
r"""
Get data scaled to physical values
This information comes from:
Instrument and Sensor Calibration: Concepts and Utility Programs
And applies to:
V5 System 2000, System2000.net, and MTU-net (MTU, MTU-A, V8, RXU,
MTU-net; MTC and AMTC coils and AL-100 loop).
The important factors for scaling the data and defined in the TBL file
- FSCV: the full scale value in volts [V8, RXU, MTU-A, 2.45V] [MTU, 6.40V];
full scale is 2^23 or 8,388,608 du
- ExLN: the length of the N–S dipole (Ex) in metres
- EyLN: the length of the E–W dipole (Ey) in metres
- EGN: the gain used for the E channels [MTU-A x1 x4 x16] [MTU x10 x40 x160]
- HGN: the gain used for the H channels [MTU-A x1 x4 x16] [MTU x3 x12 x48]
- HNUM: the scale factor for coil sensors in (mV/nT) [AMTC-30 100] [MTC-50 1000]
- HATT: interconect board factor [MTU, MTU-A, MTU-net 0.233] [V8, RXU 1]
du refers to the digital unit or the value out of the machine
Note that HNUM is only applicable to AMTC-30 and MTC-50. For other
systems, HNUM does not appear in the table file.
These are read in and kept in the metadata
- FSCV / 2^23 is in the scaling key for each channel
- ExLn and EyLn are in their appropriate channel in metres
- EGN is in gain1 for electric channels
- HGN * HATT is in gain1 for magnetic channels
- If HNUM is in the table file, gain2 is set to (1000/HNUM) for magnetic
channels. If HNUM is not present, gain2 is set to 1 for magnetic
channels.
To scale the electric channels, apply the following:
- E-Channel (mV/km) = du * (FSCV/2^23) * (1/EGN) * (1/E_LN) * (1000*1000)
- Units derivation: (mV/km) = integer * (V/integer) * real * (1/m) * (mV/V * m/km)
Given the metadata, this becomes
.. math::
Ex = Ex * scaling * (1/gain1) * (1/dx) * (1000*1000)
Ey = Ey * scaling * (1/gain1) * (1/dy) * (1000*1000)
For the magnetic channels:
- H-Channel (nT) = du * (FSCV/2^23) * (1/HGN) * (1/HATT) * (1000/HNUM)
- Units derivation: (nT) = integer * (V/integer) * real* real * (mV/V / mV/nT)
With the metadata, this is:
.. math::
Hx = Hx * scaling * (1/gain1) * gain2
Hy = Hz * scaling * (1/gain1) * gain2
Hy = Hz * scaling * (1/gain1) * gain2
Parameters
----------
time_data : TimeData
Input time data
Returns
-------
TimeData
Time data in field units
"""
logger.info("Applying scaling to data to give field units")
logger.warning("Phoenix scaling still requires validation")
messages = ["Scaling raw data to physical units"]
for chan in time_data.metadata.chans:
chan_metadata = time_data.metadata.chans_metadata[chan]
if chan_metadata.electric():
mult = (
chan_metadata.scaling
* (1 / chan_metadata.gain1)
* (1 / chan_metadata.dipole_dist)
* (1_000 * 1_000)
)
time_data[chan] = time_data[chan] * mult
messages.append(
f"Scaling {chan} by (FSCV/2^23) * (1/EGN) * (1/E_LN) * (1000*1000) = {mult:.6f}"
)
if chan_metadata.magnetic():
mult = (
chan_metadata.scaling
* (1 / chan_metadata.gain1)
* (chan_metadata.gain2)
)
time_data[chan] = time_data[chan] * mult
messages.append(
f"Scaling {chan} by (FSCV/2^23) * (1/HGN) * (1/HATT) [* (1000/HNUM)] = {mult:.6f}"
)
record = self._get_record(messages)
time_data.metadata.history.add_record(record)
return time_data
def read_discontinuous_data(
dir_path: Path, ts_num: int, metadata: TimeMetadataTS
) -> TimeData:
"""
Read data from a discontinuous TS file
Note that all the gaps are lost and the data is essentially considered
single continuous data beginning at the first record.
Parameters
----------
dir_path : Path
The directory path to read from
ts_num : int
The TS file to read
metadata : TimeMetadataTS
The TimeMetadataTS for the TS file with details about all records
Returns
-------
TimeData
The read time data
Raises
------
TimeDataReadError
If an incorrect number of files are found for the TS number
"""
data_list = list(dir_path.glob(f"*.TS{ts_num}"))
if len(data_list) != 1:
raise TimeDataReadError(f"Number TS{ts_num} files in {dir_path} != 1")
data_path = data_list[0]
logger.info(f"Reading discontinous data from {data_path} at {metadata.fs} Hz")
messages = [f"Reading discontinuous data from {data_path}"]
messages.append(f"Sampling frequency {metadata.fs} Hz")
# get the records to read
n_samples = int(metadata.data_table["n_samples"].sum())
df_to_read = samples_to_sources(dir_path, metadata.data_table, 0, n_samples - 1)
logger.info(f"Reading data from {len(df_to_read.index)} records")
messages.append(f"Reading data from {len(df_to_read.index)} records")
data = read_records(data_path, metadata, df_to_read)
new_metadata = adjust_time_metadata(
metadata, metadata.fs, metadata.first_time, n_samples
)
record = get_record(creator={"name": "read_discontinuous_data"}, messages=messages)
new_metadata.history.add_record(record)
logger.info(f"Data successfully read from {dir_path}")
time_data = TimeData(new_metadata, data.astype(np.float32))
return TimeReaderTS().scale_data(time_data)
def reformat(
dir_path: Path,
metadata: TimeMetadataPhoenix,
ts_num: int,
write_path: Path,
processors: Optional[List[TimeProcess]] = None,
) -> None:
"""Reformat a discountinuous TS file"""
if ts_num == metadata.ts_continuous:
time_data = TimeReaderTS().run(dir_path, metadata=metadata.ts_metadata[ts_num])
else:
time_data = read_discontinuous_data(
dir_path, ts_num, metadata.ts_metadata[ts_num]
)
if processors is not None:
for process in processors:
time_data = process.run(time_data)
writer = TimeWriterNumpy()
writer.run(write_path, time_data) | /resistics_readers-0.1.3-py3-none-any.whl/resistics_readers/phoenix/mtu5.py | 0.862149 | 0.465873 | mtu5.py | pypi |
from loguru import logger
from typing import List, Dict, Any
from pathlib import Path
import numpy as np
import pandas as pd
from resistics.time import TimeMetadata, TimeData, TimeReader
from resistics_readers.multifile import TimeMetadataSingle, TimeMetadataMerge
CHAN_MAP = {"Ex": "Ex", "Ey": "Ey", "Bx": "Hx", "By": "Hy", "Bz": "Hz"}
CHAN_TYPES = {
"Ex": "electric",
"Ey": "electric",
"Bx": "magnetic",
"By": "magnetic",
"Bz": "magnetic",
}
def update_xtr_data(
xtr_data: Dict[str, Any], section: str, key: str, val: str
) -> Dict[str, Any]:
"""
Decide how to deal with an XTR file entry
Parameters
----------
xtr_data : Dict[str, Any]
The dictionary with the existing XTR data
section : str
The section which has the key, value pair
key : str
The key
val : str
the value
Returns
-------
Dict[str, Any]
The updated XTR dictionary
"""
if key in xtr_data[section] and not isinstance(xtr_data[section][key], list):
# the key has already been encountered once but has now reappeared
# this now needs to be converted to a list
xtr_data[section][key] = [xtr_data[section][key], val]
elif key in xtr_data[section]:
# a new value for an existing key that is already a list
xtr_data[section][key].append(val)
else:
# a new value for a key that has not appeared before
# still unknown whether this will become a list later
xtr_data[section][key] = val
return xtr_data
def read_xtr(xtr_path: Path) -> Dict[str, Any]:
"""
Function to read an XTR file.
XTR files are similar to INI files but with duplicate entries making them
more annoying to read.
Parameters
----------
xtr_path : Path
Path to the XTR file
Returns
-------
Dict[str, Any]
Data from the XTR file
"""
with xtr_path.open("r") as f:
lines = f.readlines()
lines = [x.strip().replace("'", "").strip() for x in lines]
lines = [x for x in lines if x != ""]
xtr_data: Dict[str, Any] = {}
section = "GLOBAL"
for line in lines:
if line[0] == "[" and line[-1] == "]":
section = line[1:-1]
xtr_data[section] = {}
else:
key, val = line.split("=")
xtr_data = update_xtr_data(xtr_data, section, key.strip(), val.strip())
return xtr_data
class TimeMetadataXTR(TimeMetadataSingle):
"""This is an extension of TimeMetadataSingle for a single XTR file"""
xtr_file: str
"""The XTR metadata file"""
data_byte_start: int
"""The byte offset from beginning of the file to the start of the data"""
rec_chans: int
"""The recorded channels"""
class TimeReaderRAW(TimeReader):
"""
Parent reader for reading from .RAW SPAM files. Associated metadata can come
in XTR or XTRX (an XML style) format. Methods for reading from specific
metadata file formats should be added in child classes inheriting from this
one.
The raw data for SPAM is sensor Voltage in single precision float. However,
if there are multiple data files for a single continuous dataset, each one
may have a different gain. Therefore, a scaling has to be calculated for
each data file and channel. Applying these scalings will convert all
channels to mV.
More information about scalings can be found in readers for the various
metadata types, where the scalars are calculated. This class simply
implements the reading of data and not the calculation of the scalars.
"""
extension = ".RAW"
def _read_RAW_metadata(self, raw_path: Path) -> Dict[str, Any]:
"""
Read metadata directly from the .RAW data files' header bytes
First begin by reading the general metadata at the start of the file.
This can be followed by multiple event metadata. However, multiple event
metadata in RAW files are largely deprecated and only single event
metadata are supported in this reader.
Each .RAW file can have its own data byte offset
Notes
-----
Open with encoding ISO-8859-1 because it has a value for all bytes
unlike other encoding. In particular, want to find number of samples
and the size of the metadata. The extended metadata is ignored.
Parameters
----------
raw_path : Path
Path to RAW file
Returns
-------
Dict[str, Any]
Dictionary of metadata from RAW file
Raises
------
MetadataReadError
If the number of event metadata is greater than 1. This is not
currently supported
"""
from resistics.errors import MetadataReadError
f_size = raw_path.stat().st_size
f = raw_path.open("r", encoding="ISO-8859-1")
# read general metadata - provide enough bytes to read
raw_metadata = self._read_RAW_general_metadata(f.read(1000))
# read event metadata
event_metadata = []
record = raw_metadata["first_event"]
for _ir in range(raw_metadata["n_events"]):
# seek to the record from the beginning of the file
seek_pt = (record - 1) * raw_metadata["rec_length"]
if not seek_pt > f_size:
f.seek(seek_pt, 0)
event = self._read_RAW_event_metadata(f.read(1000))
event_metadata.append(event)
if event["next_event_metadata"] < raw_metadata["total_rec"]:
# byte location of next record
record = event["next_event_metadata"]
else:
break
f.close()
if len(event_metadata) > 1:
raise MetadataReadError(
raw_path, f"({len(event_metadata)}) > 1 events in RAW file."
)
raw_metadata.update(event_metadata[0])
raw_metadata["data_byte_start"] = (
raw_metadata["start_data"] - 1
) * raw_metadata["rec_length"]
return raw_metadata
def _read_RAW_general_metadata(self, general: str) -> Dict[str, Any]:
"""
Note that rec_chans is the number of channels recorded, not the number
of channels connected acquiring good data. This is usually five.
Parameters
----------
general : str
The general data as a string
Returns
-------
Dict[str, Any]
Dictionary with general metadata and values
"""
gen_split = general.split()
return {
"rec_length": int(gen_split[0]),
"file_type": gen_split[1],
"word_length": int(gen_split[2]),
"version": gen_split[3],
"proc_id": gen_split[4],
"rec_chans": int(gen_split[5]),
"total_rec": int(gen_split[6]),
"first_event": int(gen_split[7]),
"n_events": int(gen_split[8]),
"extend": int(gen_split[9]),
}
def _read_RAW_event_metadata(self, event: str) -> Dict[str, Any]:
"""
Parse the event metadata
Parameters
----------
event : str
The event data as a string
Returns
-------
Dict[str, Any]
Dictionary with event metadata and values
"""
event_split = event.split()
return {
"start": int(event_split[0]),
"startms": int(event_split[1]),
"stop": int(event_split[2]),
"stopms": int(event_split[3]),
"cvalue1": float(event_split[4]),
"cvalue2": float(event_split[5]),
"cvalue3": float(event_split[6]),
"event_metadata_infile": int(event_split[7]),
"next_event_metadata": int(event_split[8]),
"previous_event_metadata": int(event_split[9]),
"n_samples": int(event_split[10]),
"start_data": int(event_split[11]),
"extended": int(event_split[12]),
}
def read_data(
self, dir_path: Path, metadata: TimeMetadata, read_from: int, read_to: int
) -> TimeData:
"""
Get data from data file, returned in mV
Calling this applies scalings calculated when the metadata are read.
When a recording consists of multiple data files, each channel of each
data file might have a different scaling, therefore, gain removals and
other RAW file unique scalings need to be applied before the data is
stitched together.
This method returns the data in mV for all channels.
Parameters
----------
dir_path : path
The directory path to read from
metadata : TimeMetadata
Time series data metadata
read_from : int
Sample to read data from
read_to : int
Sample to read data to
Returns
-------
TimeData
Time data object
"""
from resistics_readers.multifile import samples_to_sources
dtype = np.float32
dtype_size = np.dtype(np.float32).itemsize
n_samples = read_to - read_from + 1
messages = [f"Reading raw data from {dir_path}"]
messages.append(f"Sampling rate {metadata.fs} Hz")
# loop over RAW files and read data
data_table = pd.DataFrame(data=metadata.data_table)
df_to_read = samples_to_sources(dir_path, data_table, read_from, read_to)
data = np.empty(shape=(metadata.n_chans, n_samples), dtype=dtype)
sample = 0
for data_file, info in df_to_read.iterrows():
file_from = info.loc["read_from"]
file_to = info.loc["read_to"]
n_samples_file = info.loc["n_samples_read"]
rec_chans = info.loc["rec_chans"]
data_byte_start = info.loc["data_byte_start"]
messages.append(f"{data_file}: Reading samples {file_from} to {file_to}")
logger.debug(f"{data_file}: Reading samples {file_from} to {file_to}")
n_samples_read = n_samples_file * rec_chans
byteoff = data_byte_start + (file_from * rec_chans * dtype_size)
data_path = dir_path / str(data_file)
data_read = np.memmap(
data_path, dtype=dtype, mode="r", offset=byteoff, shape=(n_samples_read)
)
for idx, chan in enumerate(metadata.chans):
scaling = info[f"{chan} scaling"]
data[idx, sample : sample + n_samples_file] = (
data_read[idx:n_samples_read:rec_chans] * scaling
)
sample = sample + n_samples_file
metadata = self._get_return_metadata(metadata, read_from, read_to)
messages.append(f"From sample, time: {read_from}, {str(metadata.first_time)}")
messages.append(f"To sample, time: {read_to}, {str(metadata.last_time)}")
metadata.history.add_record(self._get_record(messages))
logger.info(f"Data successfully read from {dir_path}")
return TimeData(metadata, data)
def scale_data(self, time_data: TimeData) -> TimeData:
"""
Scale data to physically meaningful units
Resistics uses field units, meaning physical samples will return the
following:
- Electrical channels in mV/km
- Magnetic channels in mV or nT depending on the sensor
- To convert magnetic in mV to nT, calibration is required
Notes
-----
Conversion to mV (gain removal) is performed on read as each RAW file in
a dataset can have a separate scalar. Because gain is removed when
reading the data and all channel data is in mV, the only calculation
that has to be done is to divide by the dipole lengths (east-west
spacing and north-south spacing).
Parameters
----------
time_data : TimeData
TimeData read in from files
Returns
-------
TimeData
Scaled TimeData
"""
logger.info("Applying scaling to data to give field units")
messages = ["Scaling raw data to physical units"]
for chan in time_data.metadata.chans:
chan_metadata = time_data.metadata.chans_metadata[chan]
if chan_metadata.electric():
dipole_dist_km = chan_metadata.dipole_dist / 1_000
time_data[chan] = time_data[chan] / dipole_dist_km
messages.append(f"Dividing {chan} by dipole length {dipole_dist_km} km")
record = self._get_record(messages)
time_data.metadata.history.add_record(record)
return time_data
class TimeReaderXTR(TimeReaderRAW):
"""Data reader for SPAM RAW data with XTR metadata"""
def read_metadata(self, dir_path: Path) -> TimeMetadataMerge:
"""
Read and merge XTR metadata files
For SPAM data, there may be more than one XTR metadata file as data can
be split up into smaller files as it is recorded and each data file will
have an associated XTR metadata file. In this case, the following steps
are taken:
- Read all XTR files
- Generate a table with metadata about each data file (times, scalings)
- Merge metadata for all the data files
Parameters
----------
dir_path : Path
Directory path with SPAM data
Returns
-------
TimeMetadataMerge
Merged TimeMetadataXTR
Raises
------
TimeDataReadError
If not all data files exist
TimeDataReadError
If the extension of the data files is incorrect
"""
from resistics.errors import MetadataReadError, TimeDataReadError
from resistics_readers.multifile import validate_consistency
from resistics_readers.multifile import validate_continuous
metadata_paths = list(dir_path.glob("*.XTR"))
if len(metadata_paths) == 0:
MetadataReadError(dir_path, "No XTR files found.")
raw_paths = list(dir_path.glob("*.RAW"))
if not len(metadata_paths) == len(raw_paths):
TimeDataReadError(
dir_path, "Mismatch between number data files and XTR files"
)
metadata_list = []
for metadata_path in metadata_paths:
time_metadata = self._read_xtr(metadata_path)
metadata_list.append(time_metadata)
validate_consistency(dir_path, metadata_list)
validate_continuous(dir_path, metadata_list)
data_table = self._generate_table(metadata_list)
metadata = self._merge_metadata(metadata_list, data_table)
if not self._check_data_files(dir_path, metadata):
raise TimeDataReadError(dir_path, "All data files do not exist")
if not self._check_extensions(dir_path, metadata):
raise TimeDataReadError(dir_path, f"Data file suffix not {self.extension}")
return metadata
def _read_xtr(self, xtr_path: Path) -> TimeMetadataXTR:
"""
Read an individual XTR metadata file and return TimeMetadata
There is also metadata in RAW files associated with XTR files. To get
the full set of metadata and ensure the data makes sense, XTR metadata
are validated against those in the RAW file.
Parameters
----------
xtr_path : Path
The XTR metadata path to read in
Returns
-------
TimeMetadataXTR
Metadata for the XTR file
Raises
------
MetadataReadError
If there is a mismatch in samples between XTR file and RAW file
"""
from resistics.errors import MetadataReadError
xtr_data = read_xtr(xtr_path)
xtr_time_dict = self._read_xtr_dataset_metadata(xtr_data)
xtr_time_dict["xtr_file"] = xtr_path.name
xtr_time_dict["chans_metadata"] = self._read_xtr_chan_metadata(xtr_data)
xtr_time_dict["chans"] = list(xtr_time_dict["chans_metadata"].keys())
raw_path = xtr_path.parent / xtr_time_dict["data_file"]
raw_metadata = self._read_RAW_metadata(raw_path)
# check to make sure sample length in file matches calculated samples
if xtr_time_dict["n_samples"] != raw_metadata["n_samples"]:
raise MetadataReadError(
xtr_path,
f"Sample mismatch between XTR {xtr_path} and RAW in {raw_path}",
)
xtr_time_dict["data_byte_start"] = raw_metadata["data_byte_start"]
xtr_time_dict["rec_chans"] = raw_metadata["rec_chans"]
return TimeMetadataXTR(**xtr_time_dict)
def _read_xtr_dataset_metadata(self, xtr_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Read the data in the XTR file that relates to TimeData dataset metadata
Parameters
----------
xtr_data : Dict[str, Any]
Information from the xtr file
Returns
-------
Dict[str, Any]
Metadatas as a dictionary
"""
from resistics.sampling import to_datetime
xtr_time_metadata = {}
# raw file name and fs
split = xtr_data["FILE"]["NAME"].split()
xtr_time_metadata["data_file"] = split[0]
xtr_time_metadata["fs"] = np.absolute(float(split[-1]))
# first, last time info which are in unix timestamp
split = xtr_data["FILE"]["DATE"].split()
first_time = pd.to_datetime(int(split[0] + split[1]), unit="us", origin="unix")
last_time = pd.to_datetime(int(split[2] + split[3]), unit="us", origin="unix")
duration = (last_time - first_time).total_seconds()
n_samples = int((duration * xtr_time_metadata["fs"]) + 1)
xtr_time_metadata["first_time"] = to_datetime(first_time)
xtr_time_metadata["last_time"] = to_datetime(last_time)
xtr_time_metadata["n_samples"] = n_samples
# get number of channels
xtr_time_metadata["n_chans"] = xtr_data["CHANNAME"]["ITEMS"]
# location information
split = xtr_data["SITE"]["COORDS"].split()
xtr_time_metadata["wgs84_latitude"] = float(split[1])
xtr_time_metadata["wgs84_longitude"] = float(split[2])
xtr_time_metadata["elevation"] = float(split[3])
return xtr_time_metadata
def _read_xtr_chan_metadata(
self, xtr_data: Dict[str, Any]
) -> Dict[str, Dict[str, Any]]:
"""
Get metadata for each channel
There are some tricky notes here regarding scalers that are extracted.
This information was provided by Reinhard.
For electric channels:
- Data is in raw voltage of sensors
- Scaling is taken from DATA section of XTR file and will be applied
- Polarity reversal is applied (multiply by -1)
- 1000x scaling is applied to convert Volts to mV
- A final unknown scaling of 1000 is applied
Mathematically this becomes:
.. math::
scaling = scaling extracted from DATA section of XTR file
scaling = scaling * -1000 (polarity reversal and convert V to mV),
scaling = scaling * 1000 (unknown 1000 scaling).
For magnetic channels:
- Scaling in DATA section ignored
- This scaling is applied as the static gain during calibration
- Polarity reversal is applied (multiply by -1)
- A final unknown scaling of 1000
Mathematically, this is,
.. math::
scaling = -1000 (Polarity reversal and unknown 1000 factor)
.. note::
Board LF is currently translated to chopper being True or On. Not
sure how applicable this is, but it's in there.
Parameters
----------
xtr_data : Dict[str, Any]
Data from the XTR file
Returns
-------
Dict[Dict[str, Any]]
Metadatas for channels as a dictionary
"""
chans = [x.split()[1] for x in xtr_data["CHANNAME"]["NAME"]]
xtr_chans_metadata = {}
for idx, chan in enumerate(chans):
chan_metadata = {
"name": CHAN_MAP[chan],
"data_files": xtr_data["FILE"]["NAME"].split()[0],
}
chan_metadata["chan_type"] = CHAN_TYPES[chan]
chan_metadata["chan_source"] = chan
data_split = xtr_data["DATA"]["CHAN"][idx].split()
chan_metadata["scaling"] = self._get_xtr_chan_scaling(
CHAN_TYPES[chan], data_split
)
chan_metadata["dipole_dist"] = float(data_split[3])
# sensors
sensor_section = f"200{idx + 1}003"
sensor_split = xtr_data[sensor_section]["MODULE"].split()
chan_metadata["serial"] = sensor_split[1]
# get the board and coil type from name of calibration file
cal_file = sensor_split[0]
split = cal_file.split("-")
info = split[split.index("TYPE") + 1]
chan_metadata["sensor"] = info.split("_")[0]
chan_metadata["chopper"] = "LF" in info
# add to main dictionary
xtr_chans_metadata[CHAN_MAP[chan]] = chan_metadata
return xtr_chans_metadata
def _get_xtr_chan_scaling(self, chan_type: str, data_split: List[str]) -> float:
"""Get the correction required for field units"""
scaling = float(data_split[-2])
if chan_type == "electric":
scaling = -1000.0 * scaling * 1000
if chan_type == "magnetic":
scaling = -1000
return scaling
def _generate_table(self, metadata_list: List[TimeMetadataXTR]) -> pd.DataFrame:
"""
Generate a table mapping RAW file to first time, last time, number of
samples, data byte offsets and scalings
Parameters
----------
metadata_list : List[TimeMetadataXTR]
List of TimeMetadataXTR, one for each XTR/RAW file combination
Returns
-------
pd.DataFrame
The table mapping data file to various properties
"""
from resistics_readers.multifile import add_cumulative_samples
df = pd.DataFrame()
df["data_file"] = [x.data_file for x in metadata_list]
df["first_time"] = [x.first_time for x in metadata_list]
df["last_time"] = [x.last_time for x in metadata_list]
df["n_samples"] = [x.n_samples for x in metadata_list]
df["data_byte_start"] = [x.data_byte_start for x in metadata_list]
df["rec_chans"] = [x.rec_chans for x in metadata_list]
# save scaling information
chans = metadata_list[0].chans
for chan in chans:
col = f"{chan} scaling"
df[col] = [x.chans_metadata[chan].scaling for x in metadata_list]
df = df.sort_values("first_time")
df = df.set_index("data_file")
return add_cumulative_samples(df)
def _merge_metadata(
self, metadata_list: List[TimeMetadataXTR], df: pd.DataFrame
) -> TimeMetadataMerge:
"""
Merge metadata from all the metadata files
The following assumptions are made:
- Assume no change in location over time (lat and long remain the same)
- Assume no change in sensors over time
- Assume no change in electrode spacing over time
As each data file can have different scalings, the most common scaling
(mode) for each channel is taken.
Parameters
----------
metadata_list : List[TimeMetadataXTR]
List of TimeMetadataXTR, one for each XTR/RAW file combination
df : pd.DataFrame
The data table with information about each raw file
Returns
-------
TimeMetadataMerge
Merged metadata
"""
data_files = df.index.values.tolist()
metadata = metadata_list[0]
for chan in metadata.chans:
metadata.chans_metadata[chan].data_files = data_files
scaling = df[f"{chan} scaling"].mode(dropna=True)
metadata.chans_metadata[chan].scaling = scaling
metadata_dict = metadata.dict()
metadata_dict["first_time"] = df["first_time"].min()
metadata_dict["last_time"] = df["last_time"].max()
metadata_dict["n_samples"] = df["n_samples"].sum()
metadata_dict["data_table"] = df.to_dict()
return TimeMetadataMerge(**metadata_dict) | /resistics_readers-0.1.3-py3-none-any.whl/resistics_readers/spam/spam.py | 0.85223 | 0.434941 | spam.py | pypi |
from typing import List, Dict, Any, Tuple
from pathlib import Path
import re
import numpy as np
import pandas as pd
from xml.etree.ElementTree import Element # noqa: S405
import defusedxml.ElementTree as ET
from resistics.time import ChanMetadata
from resistics.calibrate import SensorCalibrationReader, CalibrationData
from resistics.spectra import SpectraMetadata
class SensorCalibration_RSP_RSPX_Base(SensorCalibrationReader):
"""Base class for RSP and RSPX calibration data readers"""
file_str: str = "Metronix_Coil-----TYPE-$sensor_$chopper-ID-$serial$extension"
"""The file string to search for. Various parameters will be substituted"""
def _get_path(self, dir_path: Path, metadata: SpectraMetadata, chan: str) -> Path:
"""
Get the path to the calibration file
Parameters
----------
dir_path : Path
The directory path to look for calibration files
metadata : SpectraMetadata
SpectraMetadata with data information
chan : str
The channel to calibrate
Returns
-------
Path
The path to the calibration file
"""
chan_metadata = metadata.chans_metadata[chan]
chopper_str = "LF" if chan_metadata.chopper else "HF"
sensor_str = re.sub("[^0-9]", "", chan_metadata.sensor)
sensor_str = f"{int(sensor_str):03d}"
serial_str = f"{int(chan_metadata.serial):06d}"
file_name = self.file_str.replace("$sensor", sensor_str)
file_name = file_name.replace("$serial", serial_str)
file_name = file_name.replace("$chopper", chopper_str)
file_name = file_name.replace("$extension", self.extension)
return dir_path / file_name
def _get_chopper(self, file_path: Path) -> bool:
"""Get whether the calibration is chopper on or off"""
if "LF" in file_path.stem or "BB" in file_path.stem:
return True
return False
class SensorCalibrationRSP(SensorCalibration_RSP_RSPX_Base):
"""
Reader for RSP calibration files
RSP data is in units:
- F [Hz]
- Magnitude [mv/nT]
- Phase [deg]
Data is returned with units:
- F [Hz]
- Magnitude [mV/nT]
- Phase [radians]
The static gain for RSP files is applied to the magnitude as it is read in
"""
extension: str = ".RSP"
def read_calibration_data(
self, file_path: Path, chan_metadata: ChanMetadata
) -> CalibrationData:
"""
Read data from a RSP calibration file
Parameters
----------
file_path : Path
The file path of the calibration file
chan_metadata : ChanMetadata
The channel metadata for the channel to be calibrated
Returns
-------
CalibrationData
The calibration data
"""
with file_path.open("r") as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
data_dict = self._read_metadata(lines)
data_dict["chopper"] = self._get_chopper(file_path)
df = self._read_data(lines)
df["magnitude"] = df["magnitude"] * data_dict["static_gain"]
df["phase"] = df["phase"] * (np.pi / 180)
data_dict["frequency"] = df.index.values.tolist()
data_dict["magnitude"] = df["magnitude"].values.tolist()
data_dict["phase"] = df["phase"].values.tolist()
data_dict["file_path"] = file_path
return CalibrationData(**data_dict)
def _read_metadata(self, lines: List[str]) -> Dict[str, Any]:
"""Read the calibration file metadata"""
serial, sensor = self._get_sensor_details(lines)
static_gain = self._get_static_gain(lines)
return {
"serial": serial,
"sensor": sensor,
"static_gain": static_gain,
"magnitude_unit": "mV/nT",
"phase_unit": "radians",
}
def _get_sensor_details(self, lines: List[str]) -> Tuple[int, str]:
"""Get sensor details from the file"""
serial: int = 1
sensor: str = ""
for line in lines:
if "induction coil no" in line:
split1 = line.split(":")[1]
serial = int(split1.split("-")[0].strip())
if "SensorType" in line:
sensor = line.split()[1]
return serial, sensor
def _get_static_gain(self, lines: List[str]) -> float:
static_gain: float = 1.0
for line in lines:
if "StaticGain" in line:
static_gain = float(line.split()[1])
return static_gain
return static_gain
def _read_data(self, lines: List[str]) -> pd.DataFrame:
"""Read data from calibration file"""
read_from = self._get_read_from(lines)
data_lines = self._get_data_lines(lines, read_from)
data = np.array([x.split() for x in data_lines], dtype=np.float32)
df = pd.DataFrame(data=data, columns=["frequency", "magnitude", "phase"])
return df.set_index("frequency").sort_index()
def _get_read_from(self, lines: List[str]) -> int:
"""Get the line number to read from"""
for idx, line in enumerate(lines):
if "FREQUENCY" in line:
return idx + 2
raise ValueError("Unable to determine location of data in file")
def _get_data_lines(self, lines: List[str], idx: int) -> List[str]:
"""Get the data lines out of the file"""
data_lines: List[str] = []
while idx < len(lines) and lines[idx] != "":
data_lines.append(lines[idx])
idx += 1
return data_lines
class SensorCalibrationRSPX(SensorCalibration_RSP_RSPX_Base):
"""
Read data from RSPX calibration file
RSPX data is in units:
- F [Hz]
- Magnitude [mv/nT]
- Phase [deg]
Data is returned with units:
- F [Hz]
- Magnitude [mV/nT]
- Phase [radians]
Static gain is applied to the magnitude
"""
extension: str = ".RSPX"
def read_calibration_data(
self, file_path: Path, chan_metadata: ChanMetadata
) -> CalibrationData:
"""
Read RSPX file
Parameters
----------
file_path : Path
The file path of the calibration file
chan_metadata : ChanMetadata
The channel metadata for the channel to be calibrated
Returns
-------
CalibrationData
The calibration data
"""
root = ET.parse(file_path).getroot()
data_dict = self._read_metadata(root)
data_dict["chopper"] = self._get_chopper(file_path)
df = self._read_data(root)
df["magnitude"] = df["magnitude"] * data_dict["static_gain"]
df["phase"] = df["phase"] * (np.pi / 180)
data_dict["frequency"] = df.index.values.tolist()
data_dict["magnitude"] = df["magnitude"].values.tolist()
data_dict["phase"] = df["phase"].values.tolist()
data_dict["file_path"] = file_path
return CalibrationData(**data_dict)
def _read_metadata(self, root: Element) -> Dict[str, Any]:
"""Read the calibration file metadata"""
serial, sensor = self._get_sensor_details(root)
static_gain = self._get_static_gain(root)
return {
"serial": serial,
"sensor": sensor,
"static_gain": static_gain,
"magnitude_unit": "mV/nT",
"phase_unit": "radians",
}
def _get_sensor_details(self, root: Element) -> Tuple[int, str]:
"""Get sensor details"""
serial: int = 1
if root.find("SensorId") is not None:
serial = int(root.find("SensorId").text)
sensor: str = ""
if root.find("SensorSpecification") is not None:
sensor = root.find("SensorSpecification").text
return serial, sensor
def _get_static_gain(self, root) -> float:
"""Get the static gain"""
static_gain: float = 1.0
if root.find("StaticGain") is not None:
static_gain = float(root.find("StaticGain").text)
return static_gain
def _read_data(self, root: Element) -> pd.DataFrame:
"""Get data in a DataFrame"""
data = []
for resp in root.findall("ResponseData"):
data.append(
[
np.float32(resp.get("Frequency")),
np.float32(resp.get("Magnitude")),
np.float32(resp.get("Phase")),
]
)
df = pd.DataFrame(data=data, columns=["frequency", "magnitude", "phase"])
return df.set_index("frequency").sort_index() | /resistics_readers-0.1.3-py3-none-any.whl/resistics_readers/spam/calibration.py | 0.918995 | 0.405213 | calibration.py | pypi |
try:
from PIL import Image
except ImportError:
import Image
def resize_and_crop(path, size, crop_origin='middle'):
"""
Resize and crop an image to fit the specified size.
args:
path: path for the image to resize.
size: `(width, height)` tuple.
crop_origin: can be 'top', 'middle' or 'bottom', depending on this
value, the image will cropped getting the 'top/left', 'middle' or
'bottom/right' of the image to fit the size.
raises:
Exception: if can not open the file in img_path of there is problems
to save the image.
ValueError: if an invalid `crop_origin` is provided.
"""
# If height is higher we resize vertically, if not we resize horizontally
img = Image.open(path)
# Convert the image to palette mode (for JPEG)
if img.mode != "RGB":
img = img.convert("RGB")
# Get current and desired ratio for the images
img_ratio = img.size[0] / float(img.size[1])
ratio = size[0] / float(size[1])
#The image is scaled/cropped vertically or horizontally depending on the ratio
if ratio > img_ratio:
img = img.resize((size[0], int(round(size[0] * img.size[1] / img.size[0]))),
Image.ANTIALIAS)
# Crop in the top, middle or bottom
if crop_origin == 'top':
box = (0, 0, img.size[0], size[1])
elif crop_origin == 'middle':
box = (0, int(round((img.size[1] - size[1]) / 2)), img.size[0],
int(round((img.size[1] + size[1]) / 2)))
elif crop_origin == 'bottom':
box = (0, img.size[1] - size[1], img.size[0], img.size[1])
else :
raise ValueError('ERROR: invalid value for crop_origin')
img = img.crop(box)
elif ratio < img_ratio:
img = img.resize((int(round(size[1] * img.size[0] / img.size[1])), size[1]),
Image.ANTIALIAS)
# Crop in the top, middle or bottom
if crop_origin == 'top':
box = (0, 0, size[0], img.size[1])
elif crop_origin == 'middle':
box = (int(round((img.size[0] - size[0]) / 2)), 0,
int(round((img.size[0] + size[0]) / 2)), img.size[1])
elif crop_origin == 'bottom':
box = (img.size[0] - size[0], 0, img.size[0], img.size[1])
else :
raise ValueError('ERROR: invalid value for crop_origin')
img = img.crop(box)
else :
img = img.resize((size[0], size[1]),
Image.ANTIALIAS)
return img | /resize_and_crop-0.1.1-py3-none-any.whl/resize_and_crop/main.py | 0.762247 | 0.604019 | main.py | pypi |
import io
import logging
import zipfile
import os
from PIL import Image, ImageDraw
from js import Uint8Array, File, document, window
from .spec import icon_spec
log = logging.getLogger(__name__)
usage_log = logging.getLogger("mki_usage")
class Processor:
def __init__(self, png_img):
self.png_img = png_img
self.path_stream_tuples = []
@property
def method_map(self):
return {
"should_crop_to_rounded": self.crop_to_rounded,
"should_remove_alpha": self.remove_alpha,
"crop_height": self.crop_height,
"size": self.resize,
}
def remove_alpha(self, img, spec):
"""
- Convert this to RGBA if possible
- Black background canvas (r,g,b,a)
- Paste the image onto the canvas, using it's alpha channel as mask
"""
img.convert("RGBA")
canvas = Image.new("RGBA", img.size, (0, 0, 0, 255))
canvas.paste(img, mask=img)
canvas.thumbnail([spec["size"], spec["size"]], Image.ANTIALIAS)
img = canvas.convert("RGB")
return img
def crop_height(self, img, spec):
"""
- Crop icon height for rectangular icon
"""
crop_height_spec = spec["crop_height"]
width, height = img.size
top = (height - crop_height_spec) // 2
bottom = (height + crop_height_spec) // 2
img = img.crop((0, top, width, bottom))
return img
def crop_to_rounded(self, img, spec):
"""
- Crop to rounded icon
"""
circle = Image.new("L", img.size, 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0) + img.size, fill=255)
alpha = Image.new("L", img.size, 255)
alpha.paste(circle)
img.putalpha(alpha)
return img
def resize(self, img, spec):
"""
- Resize icon to given size
"""
img = img.resize((spec["size"], spec["size"]), Image.ANTIALIAS)
return img
def process(self, file_path, file_name, spec):
img = self.png_img.copy()
try:
# The order in this list matters
for pipeline in [
"should_crop_to_rounded",
"should_remove_alpha",
"size",
"crop_height",
]:
if pipeline in spec and bool(spec[pipeline]) is True:
img = self.method_map[pipeline](img, spec)
except ValueError:
pass
finally:
if not os.path.exists(file_path):
os.makedirs(file_path)
new_stream = io.BytesIO()
img.save(new_stream, format="PNG")
self.path_stream_tuples.append((file_path + "/" + file_name, new_stream))
async def icon(origin_image_file):
# Generate the icon.
png_img = await convert_to_png(origin_image_file)
path_stream_tuples = walk_spec("/icons", png_img)
zip_file = zip_images(path_stream_tuples)
download_zip(zip_file)
usage_log.info("Icon generated: {}".format(png_img))
def walk_spec(file_path, png_img, spec=icon_spec):
processor = Processor(png_img)
path_stream_tuples = []
for k, v in spec.items():
if isinstance(v, dict) and "size" in v:
processor.process(file_path, k, v)
elif isinstance(v, dict):
new_path = file_path + "/" + k
new_path_stream_tuples = walk_spec(new_path, png_img, v)
path_stream_tuples = path_stream_tuples + new_path_stream_tuples
path_stream_tuples = path_stream_tuples + processor.path_stream_tuples
return path_stream_tuples
def zip_images(path_stream_tuples):
zip_file_bytes_io = io.BytesIO()
with zipfile.ZipFile(zip_file_bytes_io, "w") as zip_file:
for image_name, bytes_stream in path_stream_tuples:
zip_file.writestr(image_name, bytes_stream.getvalue())
# Create a ZIP file from the zip file data
zip_file = File.new(
[Uint8Array.new(zip_file_bytes_io.getvalue())], {"type": "application/zip"}
)
return zip_file
def download_zip(zip_file):
# Create a URL for the ZIP object
zip_url = window.URL.createObjectURL(zip_file)
# Create a download link
download_link = document.createElement("a")
download_link.href = zip_url
download_link.download = "icons.zip"
download_link.click()
async def convert_to_png(origin_image):
# Get the data from the files arrayBuffer as an array of unsigned bytes
array_buf = Uint8Array.new(await origin_image.arrayBuffer())
# BytesIO wants a bytes-like object, so convert to bytearray first
bytes_list = bytearray(array_buf)
origin_bytes = io.BytesIO(bytes_list)
# Create PIL image from np array
my_image = Image.open(origin_bytes)
my_stream = io.BytesIO()
my_image.save(my_stream, format="PNG")
return my_image | /resize_icon-0.1.3.tar.gz/resize_icon-0.1.3/resize_icon/resize.py | 0.637708 | 0.189334 | resize.py | pypi |
icon_spec = {
"ios": {
"AppIcon.appiconset": {
"Icon-App-20x20@1x.png": {
"size": 20,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-20x20@2x.png": {
"size": 40,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-20x20@3x.png": {
"size": 60,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-29x29@1x.png": {
"size": 29,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-29x29@2x.png": {
"size": 58,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-29x29@3x.png": {
"size": 87,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-40x40@1x.png": {
"size": 40,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-40x40@2x.png": {
"size": 80,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-40x40@3x.png": {
"size": 120,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-60x60@2x.png": {
"size": 120,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-60x60@3x.png": {
"size": 180,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-76x76@1x.png": {
"size": 76,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-76x76@2x.png": {
"size": 152,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-App-83.5x83.5@2x.png": {
"size": 167,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"ItunesArtwork@2x.png": {
"size": 1024,
"should_remove_alpha": True,
"should_crop_to_rounded": False,
"crop_height": None,
},
},
# iTunes artwork
"iTunesArtwork@1x.png": {
"size": 512,
"should_remove_alpha": True,
"should_crop_to_rounded": False,
"crop_height": None,
},
"iTunesArtwork@2x.png": {
"size": 1024,
"should_remove_alpha": True,
"should_crop_to_rounded": False,
"crop_height": None,
},
"iTunesArtwork@3x.png": {
"size": 1536,
"should_remove_alpha": True,
"should_crop_to_rounded": False,
"crop_height": None,
},
},
"imessenger": {
"icon-messages-app-27x20@1x.png": {
"size": 27,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 20,
},
"icon-messages-app-27x20@2x.png": {
"size": 54,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 40,
},
"icon-messages-app-27x20@3x.png": {
"size": 81,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 60,
},
"icon-messages-app-iPadAir-67x50@2x.png": {
"size": 134,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 100,
},
"icon-messages-app-iPadAir-74x55@2x.png": {
"size": 148,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 110,
},
"icon-messages-app-iPhone-60x45@1x.png": {
"size": 60,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 45,
},
"icon-messages-app-iPhone-60x45@2x.png": {
"size": 120,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 90,
},
"icon-messages-app-iPhone-60x45@3x.png": {
"size": 180,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 135,
},
"icon-messages-settings-29x29@2x.png": {
"size": 58,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 58,
},
"icon-messages-settings-29x29@3x.png": {
"size": 87,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 87,
},
"icon-messages-transcript-32x24@1x.png": {
"size": 32,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 24,
},
"icon-messages-transcript-32x24@2x.png": {
"size": 64,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 48,
},
"icon-messages-transcript-32x24@3x.png": {
"size": 96,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 72,
},
"icon-messages-app-store-1024x1024.png": {
"size": 1024,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 1024,
},
"icon-messages-app-store-1024x768.png": {
"size": 1024,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": 768,
},
},
"watchkit": {
"AppIcon.appiconset": {
# WatchKit Notification Center icon
"Icon-24@2x.png": {
"size": 48,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-27.5@2x.png": {
"size": 55,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
# WatchKit Long-Look notification icon
"Icon-40@2x.png": {
"size": 80,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-44@2x.png": {
"size": 88,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-50@2x.png": {
"size": 100,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
# WatchKit App icon
"Icon-29@2x.png": {
"size": 58,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-29@3x.png": {
"size": 87,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
# WatchKit Short-Look icon
"Icon-86@2x.png": {
"size": 172,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-98@2x.png": {
"size": 196,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"Icon-108@2x.png": {
"size": 216,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"ItunesArtwork@2x.png": {
"size": 1024,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
}
},
"android": {
# TODO: Keep this? Android docs says it is resized by the system
"mipmap-mdpi": {
"ic_launcher.png": {
"size": 48,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"ic_launcher_foreground.png": {
"size": 108,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"ic_launcher_round.png": {
"size": 48,
"should_remove_alpha": False,
"should_crop_to_rounded": True,
"crop_height": None,
},
},
"mipmap-hdpi": {
"ic_launcher.png": {
"size": 72,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"ic_launcher_foreground.png": {
"size": 162,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"ic_launcher_round.png": {
"size": 72,
"should_remove_alpha": False,
"should_crop_to_rounded": True,
"crop_height": None,
},
},
"mipmap-xhdpi": {
"ic_launcher.png": {
"size": 96,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"ic_launcher_foreground.png": {
"size": 216,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"ic_launcher_round.png": {
"size": 96,
"should_remove_alpha": False,
"should_crop_to_rounded": True,
"crop_height": None,
},
},
"mipmap-xxhdpi": {
"ic_launcher.png": {
"size": 144,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"ic_launcher_foreground.png": {
"size": 324,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"ic_launcher_round.png": {
"size": 144,
"should_remove_alpha": False,
"should_crop_to_rounded": True,
"crop_height": None,
},
},
"mipmap-xxxhdpi": {
"ic_launcher.png": {
"size": 192,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"ic_launcher_foreground.png": {
"size": 432,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
"ic_launcher_round.png": {
"size": 192,
"should_remove_alpha": False,
"should_crop_to_rounded": True,
"crop_height": None,
},
},
"playstore-icon.png": {
"size": 512,
"should_remove_alpha": False,
"should_crop_to_rounded": False,
"crop_height": None,
},
},
} | /resize_icon-0.1.3.tar.gz/resize_icon-0.1.3/resize_icon/spec.py | 0.447581 | 0.348825 | spec.py | pypi |
# ResizeRight
This is a resizing packge for images or tensors, that supports both Numpy and PyTorch (**fully differentiable**) seamlessly. The main motivation for creating this is to address some **crucial incorrectness issues** (see item 3 in the list below) that exist in all other resizing packages I am aware of. As far as I know, it is the only one that performs correctly in all cases. ResizeRight is specially made for machine learning, image enhancement and restoration challenges.
The code is inspired by MATLAB's imresize function, but with crucial differences. It is specifically useful due to the following reasons:
1. ResizeRight produces results **identical to MATLAB for the simple cases** (scale_factor * in_size is integer). None of the Python packages I am aware of, currently resize images with results similar to MATLAB's imresize (which is a common benchmark for image resotration tasks, especially super-resolution).
2. No other **differntiable** method I am aware of supports **AntiAliasing** as in MATLAB. Actually very few non-differentiable ones, including popular ones, do. This causes artifacts and inconsistency in downscaling. (see [this tweet](https://twitter.com/jaakkolehtinen/status/1258102168176951299) by [Jaakko Lehtinen](https://users.aalto.fi/~lehtinj7/)
for example).
3. The most important part: In the general case where scale_factor * in_size is non-integer, **no existing resizing method I am aware of (including MATLAB) performs consistently.** ResizeRight is accurate and consistent due to its ability to process **both scale-factor and output-size** provided by the user. This is a super important feature for super-resolution and learning. One must acknowledge that the same output-size can be resulted with varying scale-factors as output-size is usually determined by *ceil(input_size * scale_factor)*. This situation creates dangerous lack of consistency. Best explained by example: say you have an image of size 9x9 and you resize by scale-factor of 0.5. Result size is 5x5. now you resize with scale-factor of 2. you get result sized 10x10. "no big deal", you must be thinking now, "I can resize it according to output-size 9x9", right? but then you will not get the correct scale-fcator which is calculated as output-size / input-size = 1.8.
Due to a simple observation regarding the projection of the output grid to the input grid, ResizeRight is the only one that consistently maintains the image centered, as in optical zoom while complying with the exact scale-factor and output size the user requires.
This is one of the main reasons for creating this repository. this downscale-upscale consistency is often crucial for learning based tasks (e.g. ["Zero-Shot Super-Resolution"](http://www.wisdom.weizmann.ac.il/~vision/zssr/)), and does not exist in other python packages nor in MATLAB.
4. Misalignment in resizing is a pandemic! Many existing packages actually return misaligned results. it is visually not apparent but can cause great damage to image enhancement tasks.(for example, see [how tensorflow's image resize stole 60 days of my life](https://hackernoon.com/how-tensorflows-tf-image-resize-stole-60-days-of-my-life-aba5eb093f35)). I personally also suffered from many misfortunate consequences of such missalignment before and throughout making this method.
5. Resizing supports **both Numpy and PyTorch** tensors seamlessly, just by the type of input tensor given. Results are checked to be identical in both modes, so you can safely apply to different tensor types and maintain consistency. No Numpy <-> Torch conversion takes part at any step. The process is done exclusively with one of the frameworks. No direct dependency is needed, so you can run it without having PyTorch installed at all, or without Numpy. You only need one of them.
6. In the case where scale_factor * in_size is a rational number with denominater not too big (this is a prameter), calculation is done efficiently based on convolutions (currently only PyTorch is supported). This is extremely more efficient for big tensors and suitable for working on large batches or high resolution. Note that this efficient calculation can be applied to certain dims that maintain the conditions while performing the regular calculation for the other dims.
7. Differently from some existing methods, including MATLAB, You can **resize N-D tensors in M-D dimensions.** for any M<=N.
8. You can specify a list of scale-factors to resize each dimension using a different scale-factor.
9. You can easily add and embed your own interpolation methods for the resizer to use (see interp_mehods.py)
10. All used framework padding methods are supported (depends on numpy/PyTorch mode)
PyTorch: 'constant', 'reflect', 'replicate', 'circular'.
Numpy: ‘constant’, ‘edge’, ‘linear_ramp’, ‘maximum’, ‘mean’, ‘median’, ‘minimum’, ‘reflect’, ‘symmetric’, ‘wrap’, ‘empty’
11. Some general calculations are done more efficiently than the MATLAB version (one example is that MATLAB extends the kernel size by 2, and then searches for zero columns in the weights and cancels them. ResizeRight uses an observation that resizing is actually a continuous convolution and avoids having these redundancies ahead, see Shocher et al. ["From Discrete to Continuous Convolution Layers"](https://arxiv.org/abs/2006.11120)).
--------
### Usage:
For dynamic resize using either Numpy or PyTorch:
```
resize_right.resize(input, scale_factors=None, out_shape=None,
interp_method=interp_methods.cubic, support_sz=None,
antialiasing=True, by_convs=False, scale_tolerance=None,
max_numerator=10, pad_mode='constant'):
```
__input__ :
the input image/tensor, a Numpy or Torch tensor.
__scale_factors__:
can be specified as-
1. one scalar scale - then it will be assumed that you want to resize first two dims with this scale for Numpy or last two dims for PyTorch.
2. a list or tupple of scales - one for each dimension you want to resize. note: if length of the list is L then first L dims will be rescaled for Numpy and last L for PyTorch.
3. not specified - then it will be calculated using output_size. this is not recomended (see advantage 3 in the list above).
__out_shape__:
A list or tupple. if shorter than input.shape then only the first/last (depending np/torch) dims are resized. if not specified, can be calcualated from scale_factor.
__interp_method__:
The type of interpolation used to calculate the weights. this is a scalar to scalar function that can be applied to tensors pointwise. The classical methods are implemented and can be found in interp_methods.py. (cubic, linear, laczos2, lanczos3, box).
__support_sz__:
This is the support of the interpolation function, i.e length of non-zero segment over its 1d input domain. this is a characteristic of the function. eg. for bicubic 4, linear 2, laczos2 4, lanczos3 6, box 1.
__antialiasing__:
This is an option similar to MATLAB's default. only relevant for downscaling. if true it basicly means that the kernel is stretched with 1/scale_factor to prevent aliasing (low-pass filtering)
__by_convs__:
This determines whether to allow efficient calculation using convolutions according to tolerance. This feature should be used when scale_factor is rational with a numerator low enough (or close enough to being an integer) and the tensors are big (batches or high-resolution).
__scale_tolerance__:
This is the allowed distance between the M/N closest frac to the float scale_factore provided. if the frac is closer than this distance, then it will be used and efficient convolution calculation will take place.
__max_numerator__:
When by_convs is on, the scale_factor is translated to a rational frac M/N. Where M is limited by this parameter. The goal is to make the calculation more efficient. The number of convolutions used is the size of the numerator.
__pad_mode__:
This can be used according to the padding methods of each framework.
PyTorch: 'constant', 'reflect', 'replicate', 'circular'.
Numpy: ‘constant’, ‘edge’, ‘linear_ramp’, ‘maximum’, ‘mean’, ‘median’, ‘minimum’, ‘reflect’, ‘symmetric’, ‘wrap’, ‘empty’
--------
### Cite / credit
If you find our work useful in your research or publication, please cite this work:
```
@misc{ResizeRight,
author = {Shocher, Assaf},
title = {ResizeRight},
year = {2018},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/assafshocher/ResizeRight}},
}
```
| /resize_right_sdk-1.0.2.tar.gz/resize_right_sdk-1.0.2/README.md | 0.590897 | 0.9462 | README.md | pypi |
from __future__ import unicode_literals, print_function
import os
import logging
try:
from PIL import Image
from PIL import ImageOps
except ImportError:
import Image
import ImageOps
from docopt import docopt
logging.basicConfig(level=logging.INFO)
__version__ = "0.1.1"
__author__ = "Bartosz Dąbrowski"
__license__ = "MIT"
DEFAULT_HEIGHT = 1024
DEFAULT_WIDTH = 768
def resize_image(image, to, height, width):
"""
Preforms resize of given Image object which is expected to be with jpeg extension.
Args:
image: Image object.
to: Full directory for output results.
height: Height of the processed images.
width: Width of the processed images.
Returns:
Absolute path to new image.
"""
size = (height, width)
oimage = ImageOps.fit(image, size, Image.ANTIALIAS)
name, extension = os.path.basename(image.filename).split(os.extsep)
new_name = ''.join([name, '_{}x{}'.format(height, width), os.extsep, extension])
new_full_path = os.path.join(to, new_name)
oimage.save(new_full_path, 'JPEG')
return new_full_path
def main():
"""Main entry point for the resize CLI."""
arguments = docopt(__doc__, version=__version__)
print(arguments)
output_path = arguments.get('--output-dir') or os.path.join(os.getcwd(), 'processed')
try:
os.makedirs(output_path)
except OSError as error:
if 'Errno 17' in str(error):
# Dir already exists
pass
else:
raise
height = arguments.get('--height') or DEFAULT_HEIGHT
width = arguments.get('--width') or DEFAULT_WIDTH
for node in os.listdir(arguments['<path>']):
filename = os.path.join(arguments['<path>'], node)
logging.info('Processing file %s' % filename)
if os.path.splitext(filename)[1].lower() not in ('.jpg', '.jpeg'):
logging.info('{} is not jpeg - skipped'.format(filename))
return
image = Image.open(filename)
new_full_path = resize_image(image, output_path, int(height), int(width))
logging.info('Saved {}, Changed size: {} -> {} and {}MB to {}MB'
.format(new_full_path,
'{}x{}px'.format(image.size[0], image.size[1]),
'{}x{}px'.format(height, width),
os.stat(image.filename).st_size / 1000. / 1000.,
os.stat(new_full_path).st_size / 1000. / 1000.))
if __name__ == '__main__':
main() | /resize-0.1.0.tar.gz/resize-0.1.0/resize.py | 0.68637 | 0.184217 | resize.py | pypi |
import flax.linen as nn
import jax.numpy as jnp
from chex import Array
from .utils import Sequential, gelu
__all__ = [
"Affine",
"CrossPatchSublayer",
"CrossChannelSublayer",
"ResMLPLayer",
"ResMLP",
]
# ================ Helpers ===================
def full(key, shape, fill_value, dtype=None):
return jnp.full(shape, fill_value, dtype)
def ones(key, shape, dtype=None):
return jnp.ones(shape, dtype)
def zeros(key, shape, dtype=None):
return jnp.zeros(shape, dtype)
# =============== Various Components ==============
class Affine(nn.Module):
"""
A Flax linen Module to perform a Affine Transformation
Attributes:
dim: Needed to generate matrices of the appropriate shape
"""
dim: int = 512
def setup(self):
self.alpha = self.param("alpha", ones, (1, 1, self.dim))
self.beta = self.param("beta", zeros, (1, 1, self.dim))
@nn.compact
def __call__(self, x) -> Array:
return x * self.alpha + self.beta
class CrossPatchSublayer(nn.Module):
"""
A Flax linen Module consisting of two Affine element-wise transformations,
Linear Layer and Skip Connection.
Attributes:
dim: dimensionality for the Affine Layer
patch_size: dimensionality for the Linear Layer
layerscale: float value for scaling the output
"""
dim: int = 512
patch_size: int = 16
layerscale: float = 0.1
def setup(self):
self.aff1 = Affine(dim=self.dim)
self.linear = nn.Dense(features=self.patch_size)
self.aff2 = Affine(self.dim)
self.layerscale = self.param(
"layerscale_crosspatch", full, self.dim, self.layer_scale
)
@nn.compact
def __call__(self, x) -> Array:
# Output from Affine Layer 1
transform = self.aff1(x)
# Transpose the Affine Layer 1
transposed_transform = jnp.transpose(transform, axes=(1, 2))
# Feed into Linear Layer
linear_transform = self.linear(transposed_transform)
# Tranpose the output from Linear Layer
transposed_linear = jnp.transpose(linear_transform, axes=(1, 2))
# Feed into Affine Layer 2
affine_output = self.aff2(transposed_linear)
# Skip-Connection with LayerScale
output = x + affine_output * self.layerscale
return output
class CrossChannelSublayer(nn.Module):
"""
A Flax linen Module consisting of two Affine element-wise transformations,
MLP and Skip Connection.
Attributes:
dim: dimensionality for the Affine Layer and MLP fully-connected layers
layerscale: float value for scaling the output
"""
dim: int = 512
layerscale: float = 0.1
expansion_factor: int = 4
def setup(self):
self.aff1 = Affine(self.dim)
self.mlp = Sequential(
[
nn.Dense(features=self.expansion_factor * self.dim),
gelu(),
nn.Dense(features=self.dim),
]
)
self.aff2 = Affine(self.dim)
self.layerscale = self.param(
"layerscale_crosschannel", full, self.dim, self.layer_scale
)
@nn.compact
def __call__(self, x) -> Array:
# Output from Affine Layer 1
transform = self.aff1(x)
# Feed into the MLP Block
mlp_output = self.mlp(transform)
# Output from Affine Layer 2
affine_output = self.aff2(mlp_output)
# Skip-Connection with LayerScale
output = x + affine_output * self.layerscale
return output
class ResMLPLayer(nn.Module):
"""
A Flax linen Module consisting of the CrossPatchSublayer and CrossChannelSublayer
Attributes:
dim: dimensionality for the Affine and MLP layers
depth: No of ResMLP Layers, needed for determining the layerscale value
patch_size: dimensionality for the Linear Layer
"""
dim: int = 512
depth: int = 12
patch_size: int = 16
def setup(self):
# Determine Value of LayerScale based on the depth
if self.depth <= 18:
layerscale = 0.1
elif self.depth > 18 and self.depth <= 24:
layerscale = 1e-5
else:
layerscale = 1e-6
self.crosspatch = CrossPatchSublayer(
dim=self.dim, patch_size=self.patch_size, layerscale=layerscale
)
self.crosschannel = CrossChannelSublayer(dim=self.dim, layerscale=layerscale)
@nn.compact
def __call__(self, x) -> Array:
crosspatch_ouptput = self.crosspatch(x)
crosschannel_output = self.crosschannel(crosspatch_ouptput)
return crosschannel_output
# ================== Model ================
class ResMLP(nn.Module):
"""
A Flax linen Module for creating the ResMLP architecture.
Attributes:
dim: dimensionality for the Affine and MLP layers
depth: Number of ResMLP layers
patch_size: dimensionality of the patches
num_classes: No of classes
"""
dim: int = 512
depth: int = 12
patch_size: int = 16
num_classes: int = 10
def setup(self):
self.patch_projector = nn.Conv(
features=self.dim, kernel_size=self.patch_size, strides=self.patch_size
)
self.blocks = Sequential(
[
ResMLPLayer(dim=self.dim, patch_size=self.patch_size, depth=self.depth)
for _ in range(self.depth)
]
)
self.fc = nn.Dense(features=self.num_classes)
@nn.compact
def __call__(self, x) -> Array:
x = self.patch_projector(x)
x = self.blocks(x)
output = jnp.mean(x, axis=1)
return self.fc(output) | /resmlp-flax-0.0.1.tar.gz/resmlp-flax-0.0.1/resmlp_flax/model.py | 0.923074 | 0.502014 | model.py | pypi |
[](https://pypi.python.org/pypi/resnest)
[](https://pypi.org/project/resnest/#history)
[](https://github.com/zhanghang1989/ResNeSt/actions)
[](http://pepy.tech/project/resnest)
[](https://opensource.org/licenses/Apache-2.0)
[](https://github.com/zhanghang1989/ResNeSt/actions)
[](https://arxiv.org/abs/2004.08955)
[](https://paperswithcode.com/sota/instance-segmentation-on-coco?p=resnest-split-attention-networks)
[](https://paperswithcode.com/sota/object-detection-on-coco?p=resnest-split-attention-networks)
[](https://paperswithcode.com/sota/panoptic-segmentation-on-coco-panoptic?p=resnest-split-attention-networks)
[](https://paperswithcode.com/sota/semantic-segmentation-on-ade20k?p=resnest-split-attention-networks)
[](https://paperswithcode.com/sota/semantic-segmentation-on-cityscapes-val?p=resnest-split-attention-networks)
[](https://paperswithcode.com/sota/semantic-segmentation-on-pascal-context?p=resnest-split-attention-networks)
# ResNeSt
Split-Attention Network, A New ResNet Variant. It significantly boosts the performance of downstream models such as Mask R-CNN, Cascade R-CNN and DeepLabV3.

### Table of Contents
0. [Pretrained Models](#pretrained-models)
0. [Transfer Learning Models](#transfer-learning-models)
0. [Verify Backbone Models](#verify-backbone-models)
0. [How to Train](#how-to-train)
0. [Reference](#reference)
### Pypi / GitHub Install
0. Install this package repo, note that you only need to choose one of the options
```bash
# using github url
pip install git+https://github.com/zhanghang1989/ResNeSt
# using pypi
pip install resnest --pre
```
## Pretrained Models
| | crop size | PyTorch | Gluon |
|-------------|-----------|---------|-------|
| ResNeSt-50 | 224 | 81.03 | 81.04 |
| ResNeSt-101 | 256 | 82.83 | 82.81 |
| ResNeSt-200 | 320 | 83.84 | 83.88 |
| ResNeSt-269 | 416 | 84.54 | 84.53 |
- **3rd party implementations** are available: [Tensorflow](https://github.com/QiaoranC/tf_ResNeSt_RegNet_model), [Caffe](https://github.com/NetEase-GameAI/ResNeSt-caffe).
- Extra ablation study models are available in [link](./ablation.md)
### PyTorch Models
- Load using Torch Hub
```python
import torch
# get list of models
torch.hub.list('zhanghang1989/ResNeSt', force_reload=True)
# load pretrained models, using ResNeSt-50 as an example
net = torch.hub.load('zhanghang1989/ResNeSt', 'resnest50', pretrained=True)
```
- Load using python package
```python
# using ResNeSt-50 as an example
from resnest.torch import resnest50
net = resnest50(pretrained=True)
```
### Gluon Models
- Load pretrained model:
```python
# using ResNeSt-50 as an example
from resnest.gluon import resnest50
net = resnest50(pretrained=True)
```
## Transfer Learning Models
### Detectron Models
Training code and pretrained models are released at our [Detectron2 Fork](https://github.com/zhanghang1989/detectron2-ResNeSt).
#### Object Detection on MS-COCO validation set
<table class="tg">
<tr>
<th class="tg-0pky">Method</th>
<th class="tg-0pky">Backbone</th>
<th class="tg-0pky">mAP%</th>
</tr>
<tr>
<td rowspan="4" class="tg-0pky">Faster R-CNN</td>
<td class="tg-0pky">ResNet-50</td>
<td class="tg-0pky">39.25</td>
</tr>
<tr>
<td class="tg-0lax">ResNet-101</td>
<td class="tg-0lax">41.37</td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-50 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>42.33</b></td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-101 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>44.72</b></td>
</tr>
<tr>
<td rowspan="5" class="tg-0lax">Cascade R-CNN</td>
<td class="tg-0lax">ResNet-50</td>
<td class="tg-0lax">42.52</td>
</tr>
<tr>
<td class="tg-0lax">ResNet-101</td>
<td class="tg-0lax">44.03</td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-50 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>45.41</b></td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-101 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>47.50</b></td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-200 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>49.03</b></td>
</tr>
</table>
#### Instance Segmentation
<table class="tg">
<tr>
<th class="tg-0pky">Method</th>
<th class="tg-0pky">Backbone</th>
<th class="tg-0pky">bbox</th>
<th class="tg-0lax">mask</th>
</tr>
<tr>
<td rowspan="4" class="tg-0pky">Mask R-CNN</td>
<td class="tg-0pky">ResNet-50</td>
<td class="tg-0pky">39.97</td>
<td class="tg-0lax">36.05</td>
</tr>
<tr>
<td class="tg-0lax">ResNet-101</td>
<td class="tg-0lax">41.78</td>
<td class="tg-0lax">37.51</td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-50 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>42.81</b></td>
<td class="tg-0lax"><b>38.14</td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-101 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>45.75</b></td>
<td class="tg-0lax"><b>40.65</b></td>
</tr>
<tr>
<td rowspan="7" class="tg-0lax">Cascade R-CNN</td>
<td class="tg-0lax">ResNet-50</td>
<td class="tg-0lax">43.06</td>
<td class="tg-0lax">37.19</td>
</tr>
<tr>
<td class="tg-0lax">ResNet-101</td>
<td class="tg-0lax">44.79</td>
<td class="tg-0lax">38.52</td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-50 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>46.19</b></td>
<td class="tg-0lax"><b>39.55</b></td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-101 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>48.30</b></td>
<td class="tg-0lax"><b>41.56</b></td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-200 (w/ tricks <span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>50.54</b></td>
<td class="tg-0lax"><b>44.21</b></td>
</tr>
<tr>
<td rowspan="2" class="tg-0lax">ResNeSt-200-dcn (w/ tricks <span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>50.91</b></td>
<td class="tg-0lax"><b>44.50</b></td>
</tr>
<tr>
<td class="tg-0lax"><b>53.30*</b></td>
<td class="tg-0lax"><b>47.10*</b></td>
</tr>
</table>
All of results are reported on COCO-2017 validation dataset. The values with * demonstrate the mutli-scale testing performance on the test-dev2019.
## Panoptic Segmentation
<table class="tg">
<tr>
<th class="tg-0pky">Backbone</th>
<th class="tg-0pky">bbox</th>
<th class="tg-0lax">mask</th>
<th class="tg-0lax">PQ</th>
</tr>
<tr>
<td class="tg-0pky">ResNeSt-200</td>
<td class="tg-0pky">51.00</td>
<td class="tg-0lax">43.68</td>
<td class="tg-0lax">47.90</td>
</tr>
</table>
### Semantic Segmentation
- PyTorch models and training: Please visit [PyTorch Encoding Toolkit](https://hangzhang.org/PyTorch-Encoding/model_zoo/segmentation.html).
- Training with Gluon: Please visit [GluonCV Toolkit](https://gluon-cv.mxnet.io/model_zoo/segmentation.html#ade20k-dataset).
#### Results on ADE20K
<table class="tg">
<tr>
<th class="tg-cly1">Method</th>
<th class="tg-cly1">Backbone</th>
<th class="tg-cly1">pixAcc%</th>
<th class="tg-cly1">mIoU%</th>
</tr>
<tr>
<td rowspan="6" class="tg-cly1">Deeplab-V3<br></td>
<td class="tg-cly1">ResNet-50</td>
<td class="tg-cly1">80.39</td>
<td class="tg-cly1">42.1</td>
</tr>
<tr>
<td class="tg-cly1">ResNet-101</td>
<td class="tg-cly1">81.11</b></td>
<td class="tg-cly1">44.14</b></td>
</tr>
<tr>
<td class="tg-cly1">ResNeSt-50 (<span style="color:red">ours</span>)</td>
<td class="tg-cly1"><b>81.17</b></td>
<td class="tg-cly1"><b>45.12</b></td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-101 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>82.07</td>
<td class="tg-0lax"><b>46.91</b></td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-200 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>82.45</td>
<td class="tg-0lax"><b>48.36</b></td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-269 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax"><b>82.62</td>
<td class="tg-0lax"><b>47.60</b></td>
</tr>
</table>
#### Results on Cityscapes
<table class="tg">
<tr>
<th class="tg-cly1">Method</th>
<th class="tg-cly1">Backbone</th>
<th class="tg-cly1">Split</th>
<th class="tg-cly1">w Mapillary</th>
<th class="tg-cly1">mIoU%</th>
</tr>
<tr>
<td rowspan="3" class="tg-cly1">Deeplab-V3+<br></td>
<td class="tg-cly1">ResNeSt-200 (<span style="color:red">ours</span>)</td>
<td class="tg-cly1">Validation</td>
<td class="tg-cly1">no</td>
<td class="tg-cly1">82.7</td>
</tr>
<tr>
<td class="tg-cly1">ResNeSt-200 (<span style="color:red">ours</span>)</td>
<td class="tg-cly1">Validation</td>
<td class="tg-cly1">yes</td>
<td class="tg-cly1"><b>83.8<b></td>
</tr>
<tr>
<td class="tg-0lax">ResNeSt-200 (<span style="color:red">ours</span>)</td>
<td class="tg-0lax">Test</td>
<td class="tg-cly1">yes</td>
<td class="tg-0lax"><b>83.3<b></td>
</tr>
</table>
## Verify Backbone Models:
**Note:** the inference speed reported in the paper are tested using Gluon implementation with RecordIO data.
### Prepare ImageNet dataset:
Here we use raw image data format for simplicity, please follow [GluonCV tutorial](https://gluon-cv.mxnet.io/build/examples_datasets/recordio.html) if you would like to use RecordIO format.
```bash
cd scripts/dataset/
# assuming you have downloaded the dataset in the current folder
python prepare_imagenet.py --download-dir ./
```
### Torch Model
```bash
# use resnest50 as an example
cd scripts/torch/
python verify.py --model resnest50 --crop-size 224
```
### Gluon Model
```bash
# use resnest50 as an example
cd scripts/gluon/
python verify.py --model resnest50 --crop-size 224
```
## How to Train
### ImageNet Models
- Training with MXNet Gluon: Please visit [Gluon folder](./scripts/gluon/).
- Training with PyTorch: Please visit [PyTorch Encoding Toolkit](https://hangzhang.org/PyTorch-Encoding/model_zoo/imagenet.html) (slightly worse than Gluon implementation).
### Detectron Models
For object detection and instance segmentation models, please visit our [detectron2-ResNeSt fork](https://github.com/zhanghang1989/detectron2-ResNeSt).
### Semantic Segmentation
- Training with PyTorch: [Encoding Toolkit](https://hangzhang.org/PyTorch-Encoding/model_zoo/segmentation.html).
- Training with MXNet: [GluonCV Toolkit](https://gluon-cv.mxnet.io/model_zoo/segmentation.html#ade20k-dataset).
## Reference
**ResNeSt: Split-Attention Networks** [[arXiv](https://arxiv.org/pdf/2004.08955.pdf)]
Hang Zhang, Chongruo Wu, Zhongyue Zhang, Yi Zhu, Zhi Zhang, Haibin Lin, Yue Sun, Tong He, Jonas Muller, R. Manmatha, Mu Li and Alex Smola
```
@article{zhang2020resnest,
title={ResNeSt: Split-Attention Networks},
author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander},
journal={arXiv preprint arXiv:2004.08955},
year={2020}
}
```
### Major Contributors
- ResNeSt Backbone ([Hang Zhang](https://hangzhang.org/))
- Detectron Models ([Chongruo Wu](https://github.com/chongruo), [Zhongyue Zhang](http://zhongyuezhang.com/))
- Semantic Segmentation ([Yi Zhu](https://sites.google.com/view/yizhu/home))
- Distributed Training ([Haibin Lin](https://sites.google.com/view/haibinlin/))
| /resnest-0.0.6b20200712.tar.gz/resnest-0.0.6b20200712/README.md | 0.538498 | 0.941868 | README.md | pypi |
import os
import tensorflow as tf
import tensorflow_addons as tfa
from signal_transformation import helpers
import resnet_models.metrics as metrics
from resnet_models.settings import MAIN
from resnet_models.data_generator import DataGenerator
def get_data(path_to_files):
x = []
y = {}
labels = {}
counter = 0
for idx, file_path in enumerate(helpers.find_files(path_to_files, pattern=['.npy'])):
x.append((idx, file_path))
speaker_id = file_path.split('/')[-3]
if speaker_id not in labels.keys():
labels[speaker_id] = counter
counter += 1
y[idx] = labels[speaker_id]
return x, y
def train(model, dev_out_dir, valid_out_dir, output_dir, number_dev_files=0, number_val_files=0,
epochs=100,
batch_size=128):
# Parameters
params = {
'dim': MAIN['shape'],
'batch_size': batch_size,
'n_classes': MAIN['n_classes'],
'n_channels': 1,
'shuffle': True
}
# Datasets
train_files, train_labels = get_data(dev_out_dir)
valid_files, valid_labels = get_data(valid_out_dir)
# Generators
training_generator = DataGenerator(train_files, train_labels, **params)
validation_generator = DataGenerator(valid_files, valid_labels, **params)
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=1e-3),
loss='categorical_crossentropy',
metrics=['acc']
)
# print(models.summary())
# tf.keras.utils.plot_model(
# models,
# to_file=os.path.join(output_dir, 'logs/resnet_models/models.png'),
# show_shapes=True,
# show_layer_names=True,
# rankdir='TB',
# expand_nested=False,
# dpi=96
# )
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=os.path.join(output_dir, 'logs/resnet_models/tensorboard/')
)
helpers.create_dir(os.path.join(output_dir, 'logs/resnet_models/checkpoints/'))
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(output_dir, 'logs/resnet_models/checkpoints/models.{epoch:02d}.tf'),
verbose=0,
save_weights_only=False,
save_freq='epoch',
save_best_only=True,
monitor='val_acc'
)
# steps_per_epoch = int(number_dev_files / batch_size)
# validation_steps = int(number_val_files / batch_size)
print('Started train the models')
model.fit(
training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
workers=1,
epochs=epochs,
steps_per_epoch=len(training_generator),
callbacks=[tensorboard_callback, cp_callback],
validation_steps=len(validation_generator),
verbose=1
)
print('Finished train the models')
# history_eval = models.evaluate(valid_dataset, use_multiprocessing=True, verbose=0)
# print('Eval loss:', history_eval[0])
# print('Eval err:', history_eval[1])
return model | /resnet_models-1.1.3-py3-none-any.whl/resnet_models/train.py | 0.437824 | 0.187728 | train.py | pypi |
import tensorflow as tf
# To prevent division by zero
epsilon = 1e-16
def metric_variable(shape, dtype, validate_shape=True, name=None):
"""Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES`) collections."""
return tf.Variable(
lambda: tf.zeros(shape, dtype),
trainable=False,
# collections=[
# tf.compat.v1.GraphKeys.LOCAL_VARIABLES,
# tf.compat.v1.GraphKeys.METRIC_VARIABLES
# ],
aggregation=tf.VariableAggregation.SUM,
validate_shape=validate_shape,
name=name
)
# computing euclidean distance matrix for embeddings
def pairwise_distance(feature, squared=False):
pairwise_distances_squared = tf.add(
tf.math.reduce_sum(input_tensor=tf.math.square(feature), axis=[1], keepdims=True),
tf.math.reduce_sum(
input_tensor=tf.math.square(tf.linalg.matrix_transpose(feature)),
axis=[0],
keepdims=True)
) - 2.0 * tf.matmul(feature, tf.linalg.matrix_transpose(feature))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = tf.math.maximum(pairwise_distances_squared, 0.0)
# Get the mask where the zero distances are at.
error_mask = tf.math.less_equal(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = tf.math.sqrt(
pairwise_distances_squared + tf.cast(error_mask, tf.float32) * epsilon)
# Undo conditionally adding epsilon.
pairwise_distances = tf.math.multiply(
pairwise_distances, tf.cast(tf.math.logical_not(error_mask), dtype=tf.float32))
return pairwise_distances
def pairwise_distance_eval(rows, cols, squared=False):
pairwise_distances_squared = tf.add(
tf.math.reduce_sum(input_tensor=tf.math.square(rows), axis=[1], keepdims=True),
tf.math.reduce_sum(input_tensor=tf.math.square(tf.linalg.matrix_transpose(cols)), axis=[0],
keepdims=True)
) - 2.0 * tf.matmul(rows, tf.linalg.matrix_transpose(cols))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = tf.math.maximum(pairwise_distances_squared, 0.0)
# Get the mask where the zero distances are at.
error_mask = tf.math.less_equal(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = tf.math.sqrt(
pairwise_distances_squared + tf.cast(error_mask, tf.float32) * epsilon)
# Undo conditionally adding epsilon.
pairwise_distances = tf.math.multiply(
pairwise_distances, tf.cast(tf.math.logical_not(error_mask), dtype=tf.float32))
return pairwise_distances
def upper_triangular(tensor):
ones = tf.ones_like(tensor)
mask_a = tf.linalg.band_part(ones, 0, -1)
mask_b = tf.linalg.band_part(ones, 0, 0)
mask = tf.cast(mask_a - mask_b, dtype=tf.bool)
return tf.boolean_mask(tensor=tensor, mask=mask)
def eer(embeddings_labels, embeddings, evaluation=False, num_thresholds=200, name=None):
with tf.compat.v1.variable_scope(name, 'eer_metric', (embeddings_labels, embeddings)):
def compute_err(true_positive, true_negative, false_positive, false_negative):
far = false_positive / (true_negative + false_positive + epsilon)
frr = false_negative / (true_positive + false_negative + epsilon)
idx = tf.math.argmin(input=tf.math.abs(far - frr))
err = (far[idx] + frr[idx]) / 2
return err, far, frr, idx
# Generate tresholds
thresholds = tf.linspace(0.0, 1.0, num_thresholds)
if not evaluation:
distances = pairwise_distance(embeddings, squared=False)
distances = upper_triangular(distances)
else:
rows, cols = tf.split(embeddings, num_or_size_splits=2, axis=0)
distances = pairwise_distance_eval(rows, cols, squared=False)
distances = tf.reshape(distances, [-1])
# Rescale into [0, 1]
scaling_index = tf.math.argmax(input=distances)
scaling = distances[scaling_index]
distances = distances / scaling
embeddings_labels = tf.reshape(embeddings_labels, [-1, 1])
if not evaluation:
labels = tf.math.equal(embeddings_labels, tf.linalg.matrix_transpose(embeddings_labels))
labels = tf.cast(upper_triangular(tf.cast(labels, dtype=tf.uint8)), dtype=tf.bool)
else:
rows, cols = tf.split(embeddings_labels, num_or_size_splits=2, axis=0)
labels = tf.math.equal(rows, tf.linalg.matrix_transpose(cols))
labels = tf.reshape(labels, [-1])
# Reshape predictions and labels.
distances_2d = tf.reshape(distances, [-1, 1])
labels_2d = tf.reshape(tf.cast(labels, dtype=tf.dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = distances_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = tf.shape(input=distances_2d)[0]
# Tile tresholds
thresh_tiled = tf.tile(
tf.expand_dims(thresholds, [1]),
tf.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = tf.greater(
thresh_tiled,
tf.tile(tf.transpose(a=distances_2d), [num_thresholds, 1]))
pred_is_neg = tf.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = tf.tile(labels_2d, [num_thresholds, 1])
label_is_neg = tf.logical_not(label_is_pos)
is_true_positive = tf.cast(
tf.logical_and(label_is_pos, pred_is_pos), dtype=tf.float32)
is_true_negative = tf.cast(
tf.logical_and(label_is_neg, pred_is_neg), dtype=tf.float32)
is_false_positive = tf.cast(
tf.logical_and(label_is_neg, pred_is_pos), dtype=tf.float32)
is_false_negative = tf.cast(
tf.logical_and(label_is_pos, pred_is_neg), dtype=tf.float32)
true_positive = metric_variable([num_thresholds], tf.float32, name='true_positive')
true_negative = metric_variable([num_thresholds], tf.float32, name='true_negative')
false_positive = metric_variable([num_thresholds], tf.float32, name='false_positive')
false_negative = metric_variable([num_thresholds], tf.float32, name='false_negative')
true_positive_op = tf.compat.v1.assign_add(true_positive,
tf.reduce_sum(input_tensor=is_true_positive,
axis=1))
true_negative_op = tf.compat.v1.assign_add(true_negative,
tf.reduce_sum(input_tensor=is_true_negative,
axis=1))
false_positive_op = tf.compat.v1.assign_add(false_positive,
tf.reduce_sum(input_tensor=is_false_positive,
axis=1))
false_negative_op = tf.compat.v1.assign_add(false_negative,
tf.reduce_sum(input_tensor=is_false_negative,
axis=1))
value, _, _, _ = compute_err(
true_positive,
true_negative,
false_positive,
false_negative)
update_op, far, frr, idx = compute_err(
true_positive_op,
true_negative_op,
false_positive_op,
false_negative_op)
# Report metrics to tensorboard
tf.compat.v1.summary.scalar('EER', update_op)
tf.compat.v1.summary.scalar('FAR', far[idx])
tf.compat.v1.summary.scalar('FRR', frr[idx])
tf.compat.v1.summary.scalar('idx', idx)
tf.compat.v1.summary.scalar('d', thresholds[idx] * scaling)
# Analyze embeddings distribution
tf.compat.v1.summary.histogram('embeddings', embeddings)
return value, update_op | /resnet_models-1.1.3-py3-none-any.whl/resnet_models/metrics.py | 0.900226 | 0.528959 | metrics.py | pypi |
import tensorflow as tf
from resnet_models.models import blocks
def get_model(input_shape=(257, 998, 1), embeddings_size=512, weight_decay=1e-4, n_classes=5994):
# Define the input as a tensor with shape input_shape
input_layer = tf.keras.layers.Input(shape=input_shape, name='input')
# Zero-Padding
# x = tf.keras.layers.ZeroPadding2D((3, 3))(input_layer)
# Stage 1
x = tf.keras.layers.Conv2D(
filters=64,
kernel_size=(7, 7),
padding='same',
use_bias=False,
name='conv1',
kernel_initializer='orthogonal',
kernel_regularizer=tf.keras.regularizers.l2(weight_decay)
)(input_layer)
x = tf.keras.layers.BatchNormalization(axis=3, name='bn_conv1')(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.MaxPooling2D((2, 2), strides=(2, 2))(x)
# Stage 2
x = blocks.convolutional_block(x, kernel_size=3, filters=[48, 48, 96], stage=2, block='a',
strides=(1, 1), trainable=True)
x = blocks.identity_block(x, 3, [48, 48, 96], stage=2, block='b', trainable=True)
# Stage 3 (≈4 lines)
x = blocks.convolutional_block(x, kernel_size=3, filters=[96, 96, 128], stage=3, block='a',
trainable=True)
x = blocks.identity_block(x, 3, [96, 96, 128], stage=3, block='b', trainable=True)
x = blocks.identity_block(x, 3, [96, 96, 128], stage=3, block='c', trainable=True)
# Stage 4 (≈6 lines)
x = blocks.convolutional_block(x, kernel_size=3, filters=[128, 128, 256], stage=4, block='a',
trainable=True)
x = blocks.identity_block(x, 3, [128, 128, 256], stage=4, block='b', trainable=True)
x = blocks.identity_block(x, 3, [128, 128, 256], stage=4, block='c', trainable=True)
# Stage 5 (≈3 lines)
x = blocks.convolutional_block(x, kernel_size=3, filters=[256, 256, 512], stage=5, block='a',
trainable=True)
x = blocks.identity_block(x, 3, [256, 256, 512], stage=5, block='b', trainable=True)
x = blocks.identity_block(x, 3, [256, 256, 512], stage=5, block='c', trainable=True)
x = tf.keras.layers.MaxPooling2D(
pool_size=(3, 1),
strides=(2, 1),
padding='same'
)(x)
# x = tf.keras.layers.Conv2D(
# filters=embeddings_size,
# kernel_size=(7, 1),
# strides=(1, 1),
# activation='relu',
# kernel_initializer='orthogonal',
# use_bias=True,
# trainable=True,
# kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
# bias_regularizer=tf.keras.regularizers.l2(weight_decay),
# name='x_fc')(x)
# x = tf.keras.layers.AveragePooling2D((1, 5), strides=(1, 1), name='avg_pool')(x)
# x = tf.keras.layers.Reshape((-1, embeddings_size), name='reshape')(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(
embeddings_size,
activation=None,
kernel_initializer='orthogonal',
use_bias=True,
trainable=True,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
bias_regularizer=tf.keras.regularizers.l2(weight_decay),
name='embedding')(x)
x = tf.keras.layers.Dense(
n_classes,
# activation='softmax',
name='fc' + str(embeddings_size),
kernel_initializer='orthogonal',
kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
# bias_regularizer=tf.keras.regularizers.l2(weight_decay),
use_bias=False,
trainable=True
)(x)
y = tf.keras.layers.Activation('softmax', name='predictions')(x)
# y = tf.nn.l2_normalize(x, axis=1, epsilon=1e-12, name='output')
# Create models
model = tf.keras.models.Model(inputs=input_layer, outputs=y, name='ResNet34')
return model | /resnet_models-1.1.3-py3-none-any.whl/resnet_models/models/resnet_34.py | 0.877319 | 0.605857 | resnet_34.py | pypi |
# ResNet-PyTorch
### Update (Feb 20, 2020)
The update is for ease of use and deployment.
* [Example: Export to ONNX](#example-export-to-onnx)
* [Example: Extract features](#example-feature-extraction)
* [Example: Visual](#example-visual)
It is also now incredibly simple to load a pretrained model with a new number of classes for transfer learning:
```python
from resnet_pytorch import ResNet
model = ResNet.from_pretrained('resnet18', num_classes=10)
```
### Update (February 2, 2020)
This update allows you to use NVIDIA's Apex tool for accelerated training. By default choice `hybrid training precision` + `dynamic loss amplified` version, if you need to learn more and details about `apex` tools, please visit https://github.com/NVIDIA/apex.
### Overview
This repository contains an op-for-op PyTorch reimplementation of [Deep Residual Learning for Image Recognition](http://arxiv.org/abs/1512.03385).
The goal of this implementation is to be simple, highly extensible, and easy to integrate into your own projects. This implementation is a work in progress -- new features are currently being implemented.
At the moment, you can easily:
* Load pretrained ResNet models
* Use ResNet models for classification or feature extraction
_Upcoming features_: In the next few days, you will be able to:
* Quickly finetune an ResNet on your own dataset
* Export ResNet models for production
### Table of contents
1. [About ResNet](#about-resnet)
2. [Installation](#installation)
3. [Usage](#usage)
* [Load pretrained models](#loading-pretrained-models)
* [Example: Classify](#example-classification)
* [Example: Extract features](#example-feature-extraction)
* [Example: Export to ONNX](#example-export-to-onnx)
* [Example: Visual](#example-visual)
4. [Contributing](#contributing)
### About ResNet
If you're new to ResNets, here is an explanation straight from the official PyTorch implementation:
Resnet models were proposed in "Deep Residual Learning for Image Recognition". Here we have the 5 versions of resnet models,
which contains 5, 34, 50, 101, 152 layers respectively. Detailed model architectures can be found in Table 1.
### Installation
Install from pypi:
```bash
$ pip3 install resnet_pytorch
```
Install from source:
```bash
$ git clone https://github.com/Lornatang/ResNet-PyTorch.git
$ cd ResNet-PyTorch
$ pip3 install -e .
```
### Usage
#### Loading pretrained models
Load an resnet18 network:
```python
from resnet_pytorch import ResNet
model = ResNet.from_name("resnet18")
```
Load a pretrained resnet18:
```python
from resnet_pytorch import ResNet
model = ResNet.from_pretrained("resnet18")
```
Their 1-crop error rates on imagenet dataset with pretrained models are listed below.
| Model structure | Top-1 error | Top-5 error |
| --------------- | ----------- | ----------- |
| resnet18 | 30.24 | 10.92 |
| resnet34 | 26.70 | 8.58 |
| resnet50 | 23.85 | 7.13 |
| resnet101 | 22.63 | 6.44 |
| resnet152 | 21.69 | 5.94 |
*Option B of resnet-18/34/50/101/152 only uses projections to increase dimensions.*
For results extending to the cifar10 dataset, see `examples/cifar`
#### Example: Classification
We assume that in your current directory, there is a `img.jpg` file and a `labels_map.txt` file (ImageNet class names). These are both included in `examples/simple`.
All pre-trained models expect input images normalized in the same way,
i.e. mini-batches of 3-channel RGB images of shape `(3 x H x W)`, where `H` and `W` are expected to be at least `224`.
The images have to be loaded in to a range of `[0, 1]` and then normalized using `mean = [0.485, 0.456, 0.406]`
and `std = [0.229, 0.224, 0.225]`.
Here's a sample execution.
```python
import json
import torch
import torchvision.transforms as transforms
from PIL import Image
from resnet_pytorch import ResNet
# Open image
input_image = Image.open("img.jpg")
# Preprocess image
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
# Load class names
labels_map = json.load(open("labels_map.txt"))
labels_map = [labels_map[str(i)] for i in range(1000)]
# Classify with ResNet18
model = ResNet.from_pretrained("resnet18")
model.eval()
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
input_batch = input_batch.to("cuda")
model.to("cuda")
with torch.no_grad():
logits = model(input_batch)
preds = torch.topk(logits, k=5).indices.squeeze(0).tolist()
print("-----")
for idx in preds:
label = labels_map[idx]
prob = torch.softmax(logits, dim=1)[0, idx].item()
print(f"{label:<75} ({prob * 100:.2f}%)")
```
#### Example: Feature Extraction
You can easily extract features with `model.extract_features`:
```python
import torch
from resnet_pytorch import ResNet
model = ResNet.from_pretrained('resnet18')
# ... image preprocessing as in the classification example ...
inputs = torch.randn(1, 3, 224, 224)
print(inputs.shape) # torch.Size([1, 3, 224, 224])
features = model.extract_features(inputs)
print(features.shape) # torch.Size([1, 512, 1, 1])
```
#### Example: Export to ONNX
Exporting to ONNX for deploying to production is now simple:
```python
import torch
from resnet_pytorch import ResNet
model = ResNet.from_pretrained('resnet18')
dummy_input = torch.randn(16, 3, 224, 224)
torch.onnx.export(model, dummy_input, "demo.onnx", verbose=True)
```
#### Example: Visual
```text
cd $REPO$/framework
sh start.sh
```
Then open the browser and type in the browser address [http://127.0.0.1:10004/](http://127.0.0.1:10004/).
Enjoy it.
#### ImageNet
See `examples/imagenet` for details about evaluating on ImageNet.
### Credit
#### Deep Residual Learning for Image Recognition
*Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun*
##### Abstract
Deeper neural networks are more difficult to train. We
present a residual learning framework to ease the training
of networks that are substantially deeper than those used
previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual
networks are easier to optimize, and can gain accuracy from
considerably increased depth. On the ImageNet dataset we
evaluate residual nets with a depth of up to 152 layers—8×
deeper than VGG nets [41] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error
on the ImageNet test set. This result won the 1st place on the
ILSVRC 2015 classification task. We also present analysis
on CIFAR-10 with 100 and 1000 layers.
The depth of representations is of central importance
for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep
residual nets are foundations of our submissions to ILSVRC
& COCO 2015 competitions1
, where we also won the 1st
places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.
[paper](http://arxiv.org/abs/1512.03385) [code](https://github.com/KaimingHe/deep-residual-networks)
```text
@article{He2015,
author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun},
title = {Deep Residual Learning for Image Recognition},
journal = {arXiv preprint arXiv:1512.03385},
year = {2015}
}
``` | /resnet_pytorch-0.2.0.tar.gz/resnet_pytorch-0.2.0/README.md | 0.782081 | 0.952794 | README.md | pypi |
import torch
import torch.nn as nn
from .utils import BasicBlock
from .utils import Bottleneck
from .utils import conv1x1
from .utils import get_model_params
from .utils import load_pretrained_weights
from .utils import resnet_params
class ResNet(nn.Module):
def __init__(self, layers=None, global_params=None):
super(ResNet, self).__init__()
assert isinstance(layers, tuple), "blocks_args should be a tuple"
assert len(layers) > 0, "layers must be greater than 0"
if global_params.norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if global_params.replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = global_params.groups
self.base_width = global_params.width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(global_params.block, 64, layers[0])
self.layer2 = self._make_layer(global_params.block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(global_params.block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(global_params.block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * global_params.block.expansion, global_params.num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if global_params.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = [block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer)]
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def extract_features(self, inputs):
""" Returns output of the final convolution layer """
x = self.conv1(inputs)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
return x
def forward(self, inputs):
x = self.conv1(inputs)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
@classmethod
def from_name(cls, model_name, override_params=None):
cls._check_model_name_is_valid(model_name)
layers, global_params = get_model_params(model_name, override_params)
return cls(layers, global_params)
@classmethod
def from_pretrained(cls, model_name, num_classes=1000):
model = cls.from_name(model_name, override_params={"num_classes": num_classes})
load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000))
return model
@classmethod
def get_image_size(cls, model_name):
cls._check_model_name_is_valid(model_name)
_, res = resnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name):
""" Validates model name. None that pretrained weights are only available for
the first four models (resnet_pytorch{i} for i in 18,34,50,101,152) at the moment. """
num_models = [18, 34, 50, 101, 152]
valid_models = ["resnet" + str(i) for i in num_models]
if model_name not in valid_models:
raise ValueError("model_name should be one of: " + ", ".join(valid_models)) | /resnet_pytorch-0.2.0.tar.gz/resnet_pytorch-0.2.0/resnet_pytorch/model.py | 0.921185 | 0.443781 | model.py | pypi |
from typing import List, Tuple
import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer
from tensorflow.contrib.layers import l1_regularizer, l2_regularizer
class ResNet(object):
"""
Deep Residual Network for CIFAR-10 image classification
"""
regularizers = {
'l1': l1_regularizer,
'l2': l2_regularizer
}
EPSILON = 1e-12
def __init__(self, input: tf.Tensor = None, output: tf.Tensor = None, batch: int = 256, n_label: int = 10):
"""
:param input: Input Tensor. Use tf.placeholder. If not provided input layer for CIFAR-10 is used
:param output: Output Tensor. Use tf.placeholder. If not provided output layer for CIFAR-10 is used
:param batch: Batch Size
:param n_label: The number of labels for classification
"""
self.batch = batch
self.n_label = n_label
self.x_ts = tf.placeholder('float', [None, 32, 32, 3]) if input is None else input
self.y_ts = tf.placeholder('int64', [None]) if output is None else output
self.sess = None
self._names = dict()
self.layers = list()
self.layers.append(self.x_ts)
self.saver = None
def create_variable(self, name: str, shape: tuple, dtype=tf.float32,
initializer=xavier_initializer(), regularizer: str = None):
if regularizer is not None:
regularizer = regularizer.lower()
regularizer = self.regularizers[regularizer]()
v = tf.get_variable(self._naming(name), shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer)
return v
def conv(self, input_layer, filter: List[int], channel: List[int],
stride: int, padding: str = 'SAME') -> Tuple[tf.Tensor, tf.Tensor]:
"""
:param input_layer: Previous layer or tensor
:param filter: [filter_height, filter_width]
:param channel: [in_channels, out_channels]
:param stride:
:param padding:
:return: [conv_layer, filter]
"""
filter_ts = self.create_variable('filter', shape=(*filter, *channel))
conv = tf.nn.conv2d(input_layer, filter=filter_ts, strides=[1, stride, stride, 1], padding=padding)
return conv, filter_ts
def batch_norm(self, input_layer, dimension):
mean, variance = tf.nn.moments(input_layer, [0, 1, 2], keep_dims=False)
beta = self.create_variable('batch_beta', dimension, dtype=tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32))
gamma = self.create_variable('batch_gamma', dimension, dtype=tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32))
bn_layer = tf.nn.batch_normalization(input_layer, mean, variance, beta, gamma, self.EPSILON)
return bn_layer
def conv_bn(self, input_layer, filter: List[int], channel: List[int], stride: int):
"""
ResNet에서는 Convolution 다음에는 항상 Batch Normalization을 넣는다.
"We adopt batch normalization (BN) right after each convolution and before activation"
filter: [filter_height, filter_width]
channel: [in_channels, out_channels]
"""
out_channel = channel[1]
h, _filter = self.conv(input_layer, filter=filter, channel=channel, stride=stride, padding='SAME')
h = self.batch_norm(h, out_channel)
return h
def init_block(self, filter: List[int] = (7, 7), channel: List[int] = (3, 16),
stride: int = 1, max_pool: bool = True) -> tf.Tensor:
"""
input -> Conv -> ReLU -> output
:param filter: [filter_height, filter_width]
:param channel: [in_channels, out_channels]
:param stride:
"""
init_conv, _filter = self.conv(self.x_ts, filter=filter, channel=channel, stride=stride)
init_conv = tf.nn.relu(init_conv)
if max_pool:
# MaxPooling
# ksize: The size of the window for each dimension of the input tensor
# strides: The stride of the sliding window for each dimension of the input tensor
output = tf.nn.max_pool(init_conv, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
else:
output = init_conv
self.layers.append(output)
return output
def max_pool(self, input_layer, kernel: List[int], stride: List[int], padding: str = 'SAME') -> tf.Tensor:
"""
:param input_layer:
:param kernel: [width, height] Kernel Size
:param stride: [width, height] Stirde Size
:param padding:
:return:
"""
k_height, k_width = kernel
stride_width, stride_height = stride
output = tf.nn.max_pool(input_layer,
ksize=[1, k_height, k_width, 1],
strides=[1, stride_width, stride_height, 1], padding=padding)
self.layers.append(output)
return output
def avg_pool(self, input_layer, kernel: List[int], stride: List[int], padding: str = 'SAME') -> tf.Tensor:
"""
:param input_layer:
:param kernel: [width, height] Kernel Size
:param stride: [width, height] Stirde Size
:param padding:
:return:
"""
k_height, k_width = kernel
stride_width, stride_height = stride
output = tf.nn.avg_pool(input_layer,
ksize=[1, k_height, k_width, 1],
strides=[1, stride_width, stride_height, 1], padding=padding)
self.layers.append(output)
return output
def residual_block(self, input_layer, filter: List[int], channel: List[int], stride: int = 1) -> tf.Tensor:
"""
input -> Conv -> BN -> ReLU -> Conv -> BN -> Addition -> ReLU -> output
:param input_layer: Usually previous layer
:param filter: (width<int>, height<int>) The size of the filter
:param channel: [in_channels, out_channels]
:param stride: The size of the s
:return:
"""
input_channel, output_channel = channel
h = self.conv_bn(input_layer, filter=filter, channel=[input_channel, output_channel], stride=stride)
h = tf.nn.relu(h)
h = self.conv_bn(h, filter=filter, channel=[output_channel, output_channel], stride=stride)
if input_channel != output_channel:
# Input channel 과 output channel이 dimension이 다르기 때문에 projection 을 통해서 dimension을 맞춰준다.
inp, _filter = self.conv(input_layer, filter=[1, 1], channel=[input_channel, output_channel], stride=stride)
else:
inp = input_layer
h = tf.add(h, inp)
h = tf.nn.relu(h)
self.layers.append(h)
return h
def fc(self, input_layer):
global_pool = tf.reduce_mean(input_layer, axis=[1, 2])
fc_w = self.create_variable(name='fc_w', shape=[global_pool.shape[-1], self.n_label])
fc_b = self.create_variable(name='fc_b', shape=[self.n_label])
output = tf.matmul(global_pool, fc_w) + fc_b
self.layers.append(output)
return output
def loss(self):
loss_f = tf.nn.sparse_softmax_cross_entropy_with_logits
cross_entropy = loss_f(logits=self.last_layer, labels=self.y_ts, name='cross_entropy')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')
return cross_entropy_mean
def compile(self, target=None) -> tf.Session:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5, allow_growth=True)
sess = tf.Session(target, config=tf.ConfigProto(gpu_options=gpu_options,
allow_soft_placement=False,
log_device_placement=False))
sess.run(tf.global_variables_initializer())
self.sess = sess
return sess
def save(self, path='/tmp/resnet_anderson.ckpt'):
if self.saver is None:
self.saver = tf.train.Saver()
self.saver.save(self.sess, path)
def restore(self, path='/tmp/resnet_anderson.ckpt'):
if self.saver is None:
self.saver = tf.train.Saver()
print(f'Restoring "{path}" model')
self.saver.restore(self.sess, path)
@property
def last_layer(self) -> tf.Tensor:
return self.layers[-1]
def _naming(self, name=None):
if name is None or not name:
name = 'variable'
name = name.lower()
self._names.setdefault(name, 0)
self._names[name] += 1
count = self._names[name]
return f'{name}_{count:02}' | /resnet-tensorflow-0.0.1.tar.gz/resnet-tensorflow-0.0.1/resnet/model.py | 0.969215 | 0.555194 | model.py | pypi |
# ResNet-50 Inference with FINN on Alveo
This notebook demonstrates the functionality of a FINN-based, full dataflow ResNet-50 implemented in Alveo U250. The characteristics of the network are the following:
- residual blocks at 1-bit weights, 2/4-bit activations
- first convolution and last (fully connected) layer use 8-bit weights
- all parameters stored on-chip in BRAM/LUTRAM/URAM
- single DDR controller (DDR0) utilized for input and output
We validate the network against ImageNet. We use the PYNQ APIs for retrieving and recording power information which is then displayed in real-time.
## Set up Accelerator with PYNQ
We load the Alveo accelerator and print its memory-mapped registers:
```
import pynq
ol=pynq.Overlay("resnet50.xclbin")
accelerator=ol.resnet50_1
print(accelerator.register_map)
```
Next we create a data buffer in the Alveo PLRAM memory to hold the weights of the Fully Connected Layer:
```
import numpy as np
#allocate a buffer for FC weights, targeting the Alveo PLRAM
fcbuf = pynq.allocate((1000,2048), dtype=np.int8, target=ol.PLRAM0)
```
Load the weight from a CSV file and push them to the accelerator buffer:
```
#load Weights from file into the PYNQ buffer
fcweights = np.genfromtxt("fcweights.csv", delimiter=',', dtype=np.int8)
#csv reader erroneously adds one extra element to the end, so remove, then reshape
fcweights = fcweights[:-1].reshape(1000,2048)
fcbuf[:] = fcweights
#Move the data to the Alveo DDR
fcbuf.sync_to_device()
```
## Single Image Inference
In this example we perform inference on each of the images in a `pictures` folder and display the top predicted class overlaid onto the image. The code assumes the existence of this `pictures` folder, where you should put the images you want to classificate. There is no restriction on the images that you can use.
```
import shutil
import wget
import os
import glob
from itertools import chain
import cv2
import matplotlib.pyplot as plt
image_list = list(chain.from_iterable([glob.glob('pictures/*.%s' % ext) for ext in ["jpg","gif","png","tga"]]))
#get imagenet classes from file
import pickle
classes = pickle.load(open("labels.pkl",'rb'))
def infer_once(filename):
inbuf = pynq.allocate((224,224,3), dtype=np.int8, target=ol.bank0)
outbuf = pynq.allocate((5,), dtype=np.uint32, target=ol.bank0)
#preprocess image
img = cv2.resize(cv2.imread(filename), (224,224))
#transfer to accelerator
inbuf[:] = img
inbuf.sync_to_device()
#do inference
accelerator.call(inbuf, outbuf, fcbuf, 1)
#get results
outbuf.sync_from_device()
results = np.copy(outbuf)
return results
inf_results = []
for img in image_list:
inf_output = infer_once(img)
inf_result = [classes[i] for i in inf_output]
inf_results.append(inf_result)
plt.figure(figsize=(20,10))
columns = 3
for i, image in enumerate(image_list):
plt.subplot(len(image_list) / columns + 1, columns, i + 1)
top_class = inf_results[i][0].split(',', 1)[0]
display_image = cv2.cvtColor(cv2.resize(cv2.imread(image),(224,224)), cv2.COLOR_BGR2RGB)
plt.imshow(cv2.putText(display_image, top_class, (10,20), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (255,255,255)))
```
## Plot Accelerator Board Power with PYNQ
We first set up data acquisition using PYNQ's PMBus API
```
import plotly
import plotly.graph_objs as go
import pandas as pd
from pynq import pmbus
import time
rails = pmbus.get_xrt_sysfs_rails(pynq.pl_server.Device.active_device)
#We create a recorder monitoring the three rails that have power measurement on Alveo.
#Total board power is obtained by summing together the PCI Express and Auxilliary 12V rails.
#While some current is also drawn over the PCIe 5V rail this is negligible compared to the 12V rails and isn't recorded.
#We also measure the VCC_INT power which is the primary supply to the FPGA.
recorder = pmbus.DataRecorder(rails["12v_aux"].power,
rails["12v_pex"].power,
rails["vccint"].power)
f = recorder.frame
powers = pd.DataFrame(index=f.index)
powers['board_power'] = f['12v_aux_power'] + f['12v_pex_power']
powers['fpga_power'] = f['vccint_power']
#Now we need to specify the layout for the graph. In this case it will be a simple Line/Scatter plot,
#autoranging on both axes with the Y axis having 0 at the bottom.
layout = {
'xaxis': {
'title': 'Time (s)'
},
'yaxis': {
'title': 'Power (W)',
'rangemode': 'tozero',
'autorange': True
}
}
#Plotly expects data in a specific format, namely an array of plotting objects.
#This helper function will update the data in a plot based.
#Th e `DataRecorder` stores the recording in a Pandas dataframe object with a time-based index.
#This makes it easy to pull out the results for a certain time range and compute a moving average.
#In this case we are going to give a 5-second moving average of the results as well as the raw input.
def update_data(frame, start, end, plot):
ranged = frame[start:end]
average_ranged = frame[start-pd.tseries.offsets.Second(5):end]
rolling = (average_ranged['12v_aux_power'] + average_ranged['12v_pex_power']).rolling(
pd.tseries.offsets.Second(5)
).mean()[ranged.index]
powers = pd.DataFrame(index=ranged.index)
powers['board_power'] = ranged['12v_aux_power'] + ranged['12v_pex_power']
powers['rolling'] = rolling
data = [
go.Scatter(x=powers.index, y=powers['board_power'], name="Board Power"),
go.Scatter(x=powers.index, y=powers['rolling'], name="5 Second Avg")
]
plot.update(data=data)
#Next we create an show the plot object, initially there will be no data to display but this plot will be updated after we start the recording.
#Once the plot is running it is possible to right click on it to pop out the graph into a separate window.
plot = go.FigureWidget(layout=layout)
plot
```
Next we create a dynamically-updating power graph:
```
recorder.record(0.1)
#In order to continue updating the graph we need a thread running in the background.
#The following thread will call our update function twice a second to display the most recently collected minute of data.
do_update = True
def thread_func():
while do_update:
now = pd.Timestamp.fromtimestamp(time.time())
past = now - pd.tseries.offsets.Second(60)
update_data(recorder.frame, past, now, plot)
time.sleep(0.5)
from threading import Thread
t = Thread(target=thread_func)
t.start()
```
To manually stop the power graph:
```
do_update = False
recorder.stop()
```
## Synthetic Throughput Test
We execute inference of a configurable-size batch of images, without data movement. We measure the latency and throughput.
```
import ipywidgets as widgets
from IPython.display import clear_output
bs = widgets.IntSlider(
value=128,
min=1,
max=1000,
step=1,
description='Batch Size:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
fps = widgets.IntProgress(min=0, max=2500, description='FPS: ')
latency = widgets.FloatProgress(min=0, max=0.1, description='Latency (ms): ')
button = widgets.Button(description='Stop')
stop_running = False
def on_button_clicked(_):
global stop_running
stop_running = True
# linking button and function together using a button's method
button.on_click(on_button_clicked)
out_fps = widgets.Text()
out_latency = widgets.Text()
ui_top = widgets.HBox([button, bs])
ui_bottom = widgets.HBox([fps, out_fps, latency, out_latency])
ui = widgets.VBox([ui_top, ui_bottom])
display(ui)
import time
import threading
def benchmark_synthetic():
import pynq
ibuf = pynq.allocate((1000,3,224,224), dtype=np.int8, target=ol.bank0)
obuf = pynq.allocate((1000,5), dtype=np.uint32, target=ol.bank0)
while True:
if stop_running:
print("Stopping")
return
duration = time.monotonic()
accelerator.call(ibuf, obuf, fcbuf, bs.value)
duration = time.monotonic() - duration
fps.value = int(bs.value/duration)
latency.value = duration
out_fps.value = str(fps.value)
out_latency.value = '%.2f' % (duration * 1000)
t = threading.Thread(target=benchmark_synthetic)
t.start()
```
| /resnet50_pynq-1.1.tar.gz/resnet50_pynq-1.1/host/Demo.ipynb | 0.475362 | 0.951953 | Demo.ipynb | pypi |
from __future__ import annotations # Allows models to be defined in any order
import json
import shlex
import sys
from collections import defaultdict
from subprocess import CalledProcessError, check_output
from typing import TYPE_CHECKING, DefaultDict, NoReturn, Optional
import typer
if TYPE_CHECKING:
from mypy_boto3_ec2.type_defs import DescribeInstancesResultTypeDef, InstanceTypeDef
if sys.version_info < (3, 8):
import importlib_metadata
def shlex_join(split_command: list[str]) -> str:
return " ".join(shlex.quote(part) for part in split_command)
else:
import importlib.metadata as importlib_metadata
from shlex import join as shlex_join
# Set version number
__version__ = importlib_metadata.version(__name__)
# Helper functions
def print_error(msg: str) -> None:
"""Print a red error message to stderr."""
typer.secho(msg, err=True, fg=typer.colors.RED)
def exit_error() -> NoReturn:
"""Exit with code 1, which indicates an error."""
raise typer.Exit(code=1)
def exit_success() -> NoReturn:
"""Exit with code 0, which indicates success."""
raise typer.Exit(code=0)
# Exceptions
class ResolutionError(RuntimeError):
"""Raised when the name could not be resolved to an instance ID."""
class NameNotFound(ResolutionError):
"""Raised when the name could not be found."""
class MultipleNamesFound(ResolutionError):
"""Raised when multiple instances have requested name same name."""
# Define the CLI app
app = typer.Typer(add_completion=False)
# Define the main CLI command
def version_callback(value: bool):
"""Print the version number and exit."""
if value:
typer.echo(__version__)
exit_success()
@app.command()
def main(
name: str,
version: Optional[bool] = typer.Option(
None,
"--version",
callback=version_callback,
is_eager=True,
help="Print the version number and exit.",
),
):
try:
ec2_id = resolve_ec2_id(name)
except ResolutionError as e:
print_error(str(e))
exit_error()
typer.echo(ec2_id)
exit_success()
def resolve_ec2_id(name: str) -> str:
"""Given the name of an EC2 instance, return the instance ID."""
# Get the result of the "describe-instances" command.
# Prefer boto3, but fall back to AWS CLI if it's not installed.
try:
describe_instances_dict = describe_instances_with_boto3()
except ImportError:
describe_instances_dict = describe_instances_with_aws_cli()
instances_by_id: dict[str, InstanceTypeDef] = {
instance.get("InstanceId", "_MISSING"): instance
for reservation in describe_instances_dict["Reservations"]
for instance in reservation.get("Instances", [])
if instance.get("State", {"Name": "missing"}).get("Name") not in ["terminated"]
}
"""Dictionary of instance objects indexed by instance ID."""
instance_ids_by_name: DefaultDict[str, list[str]] = defaultdict(list)
for instance in instances_by_id.values():
for tag in instance.get("Tags", []):
if tag["Key"] == "Name":
instance_ids_by_name[tag.get("Value", "_MISSING")].append(
instance.get("InstanceId", "_MISSING"),
)
"""List of instance IDs associated to a given name, as defined in tags."""
if name in instances_by_id:
# The name is already an instance ID, so return it.
return name
instance_ids: list[str] = instance_ids_by_name[name]
"""A list of instance IDs (hopefully exactly one) associated to the given name."""
# Print the instance ID if there is exactly one, otherwise print an error.
if len(instance_ids) == 0:
raise NameNotFound(f"Could not find instance with name '{name}'.")
elif len(instance_ids) > 1:
instance_str = '"' + '", "'.join(instance_ids) + '"'
raise MultipleNamesFound(
f"Multiple instances with name '{name}' found: {instance_str}",
)
else:
assert len(instance_ids) == 1
return instance_ids[0]
def describe_instances_with_boto3() -> DescribeInstancesResultTypeDef:
"""Use boto3 to get the output of the "describe-instances" command.
Raises ImportError if boto3 is not installed.
"""
import boto3
client = boto3.client("ec2")
describe_instances_dict: DescribeInstancesResultTypeDef = (
client.describe_instances()
)
return describe_instances_dict
def describe_instances_with_aws_cli() -> DescribeInstancesResultTypeDef:
"""Use AWS CLI to get the output of the "describe-instances" command.
Exits with an error in case something goes wrong.
"""
# The command to lists all instances:
command = ["aws", "ec2", "describe-instances", "--output=json"]
# Run the command in a subprocess
try:
raw_command_output = check_output(command)
except CalledProcessError:
command_str = shlex_join(command)
print_error(
f"An error occurred while running command: '{command_str}'. "
f"We tried to run this command because boto3 is not installed. "
f"Consider installing boto3.",
)
exit_error()
return json.loads(raw_command_output)
if __name__ == "__main__":
app() | /resolve_ec2_id-1.0.1-py3-none-any.whl/resolve_ec2_id.py | 0.592195 | 0.293177 | resolve_ec2_id.py | pypi |
import django_filters as filters
from rest_framework import exceptions
from resolwe.flow.filters import CollectionFilter, DataFilter, EntityFilter
from resolwe_bio.models import Sample
class BioCollectionFilter(CollectionFilter):
"""Filter the collection endpoint.
Enable filtering collections by the entity.
.. IMPORTANT::
:class:`CollectionViewSet` must be patched before using it in
urls to enable this feature:
.. code:: python
CollectionViewSet.filterset_class = BioCollectionFilter
"""
sample = filters.ModelChoiceFilter(
field_name="entity", queryset=Sample.objects.all()
)
class BioEntityFilter(EntityFilter):
"""Filter the entity endpoint.
Enable filtering collections by the entity.
.. IMPORTANT::
:class:`EntityViewSet` must be patched before using it in
urls to enable this feature:
.. code:: python
EntityViewSet.filterset_class = BioEntityFilter
"""
descriptor__subject_information__sample_label__icontains = filters.CharFilter(
field_name="descriptor__subject_information__sample_label",
lookup_expr="icontains",
)
descriptor__subject_information__subject_id__icontains = filters.CharFilter(
field_name="descriptor__subject_information__subject_id",
lookup_expr="icontains",
)
descriptor__subject_information__batch__exact = filters.CharFilter(
field_name="descriptor__subject_information__batch",
method="filter_exact_number",
)
descriptor__subject_information__group__iexact = filters.CharFilter(
field_name="descriptor__subject_information__group", lookup_expr="iexact"
)
descriptor__disease_information__disease_type__icontains = filters.CharFilter(
field_name="descriptor__disease_information__disease_type",
lookup_expr="icontains",
)
descriptor__disease_information__disease_status__iexact = filters.CharFilter(
field_name="descriptor__disease_information__disease_status",
lookup_expr="iexact",
)
descriptor__immuno_oncology_treatment_type__io_drug__iexact = filters.CharFilter(
field_name="descriptor__immuno_oncology_treatment_type__io_drug",
lookup_expr="iexact",
)
descriptor__immuno_oncology_treatment_type__io_treatment__iexact = (
filters.CharFilter(
field_name="descriptor__immuno_oncology_treatment_type__io_treatment",
lookup_expr="iexact",
)
)
descriptor__response_and_survival_analysis__confirmed_bor__iexact = (
filters.CharFilter(
field_name="descriptor__response_and_survival_analysis__confirmed_bor",
lookup_expr="iexact",
)
)
descriptor__response_and_survival_analysis__pfs_event__iexact = filters.CharFilter(
field_name="descriptor__response_and_survival_analysis__pfs_event",
lookup_expr="iexact",
)
descriptor__general__description__icontains = filters.CharFilter(
field_name="descriptor__general__description", lookup_expr="icontains"
)
descriptor__general__biosample_source__icontains = filters.CharFilter(
field_name="descriptor__general__biosample_source", lookup_expr="icontains"
)
descriptor__general__biosample_treatment__icontains = filters.CharFilter(
field_name="descriptor__general__biosample_treatment", lookup_expr="icontains"
)
def filter_exact_number(self, queryset, name, value):
"""Transform value into an integer and filter by exact value."""
try:
value = int(value)
except ValueError:
raise exceptions.ParseError(f"Value of attribute {name} must be a number.")
return queryset.filter(**{name: value})
class BioDataFilter(DataFilter):
"""Filter the data endpoint.
Enable filtering data by the sample.
.. IMPORTANT::
:class:`DataViewSet` must be patched before using it in urls to
enable this feature:
.. code:: python
DataViewSet.filterset_class = BioDataFilter
"""
sample = filters.ModelChoiceFilter(
field_name="entity", queryset=Sample.objects.all()
) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/filters.py | 0.760517 | 0.212559 | filters.py | pypi |
from django.db.models import Q
from django_filters import rest_framework as filters
from resolwe.composer import composer
class ExtendedCollectionFilter:
"""Collection filter extension."""
descriptor__general__species = filters.CharFilter(
field_name="entity__descriptor__general__species",
lookup_expr="icontains",
distinct=True,
)
tissue_type = filters.CharFilter(method="_filter_by_tissue")
treatment = filters.CharFilter(method="_filter_by_treatment")
outcome_is_defined = filters.CharFilter(method="_filter_by_outcome")
def _filter_by_multiple_fields(self, queryset, fields, value):
"""Filter by any of the given fields.
Return exactly those objects, who icontain `value` in at least one of
`fields`.
"""
query = Q()
for field in fields:
query |= Q(**{field + "__icontains": value})
return queryset.filter(query).distinct()
def _filter_by_tissue(self, queryset, name, value):
locations = [
"entity__descriptor__general__organ",
"entity__descriptor__general__biosample_source",
"entity__descriptor__disease_information__organ_part",
"entity__descriptor__disease_information__biopsy_site",
"entity__descriptor__pathological_information__organ_part",
"entity__descriptor__pathological_information__biopsy_site",
]
return self._filter_by_multiple_fields(queryset, locations, value)
def _filter_by_treatment(self, queryset, name, value):
locations = [
"entity__descriptor__general__biosample_treatment",
"entity__descriptor__treatment_type__drug",
"entity__descriptor__immuno_oncology_treatment_type__io_drug",
]
return self._filter_by_multiple_fields(queryset, locations, value)
def _filter_by_outcome(self, queryset, name, value):
locations = [
"entity__descriptor__response_and_survival_analysis__clinical_benefit",
"entity__descriptor__response_and_survival_analysis__confirmed_bor",
"entity__descriptor__response_and_survival_analysis__unconfirmed_bor",
"entity__descriptor__response_and_survival_analysis__pfs",
"entity__descriptor__response_and_survival_analysis__os",
"entity__descriptor__response_and_survival_analysis__dfs",
"entity__descriptor__response_and_survival_analysis__ttp",
]
return self._filter_by_multiple_fields(queryset, locations, value)
class ExtendedDataFilter:
"""Data filter extensions."""
def filter_output(self, queryset, name, value):
"""Filter queryset by genome build."""
return queryset.filter(**{"output__{}".format(name): value})
def filter_output_icontains(self, queryset, name, value):
"""Filter queryset by genome build."""
return queryset.filter(**{"output__{}__icontains".format(name): value})
# These filters use custom indexes defined in migrations.
build = filters.CharFilter(method="filter_output")
feature_type = filters.CharFilter(method="filter_output")
source = filters.CharFilter(method="filter_output")
species = filters.CharFilter(method="filter_output_icontains")
class ExtendedEntityFilter:
"""Data filter extensions."""
def filter_species(self, queryset, name, value):
"""Filter queryset by genome build."""
return queryset.filter(descriptor__general__species__icontains=value)
species = filters.CharFilter(method="filter_species")
composer.add_extension(
"resolwe.flow.filters.CollectionFilter", ExtendedCollectionFilter
)
composer.add_extension("resolwe.flow.filters.DataFilter", ExtendedDataFilter)
composer.add_extension("resolwe.flow.filters.EntityFilter", ExtendedEntityFilter) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/extensions.py | 0.832679 | 0.233717 | extensions.py | pypi |
from pathlib import Path
from shutil import copy
from zipfile import ZipFile
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
IntegerField,
JsonField,
Process,
StringField,
)
class Rose2(Process):
"""Run ROSE2.
Rank Ordering of Super-Enhancers algorithm (ROSE2) takes the acetylation
peaks called by a peak caller (MACS, MACS2...) and based on the in-between
distances and the acetylation signal at the peaks judges whether they can
be considered super-enhancers. The ranked values are plotted and by
locating the inflection point in the resulting graph, super-enhancers are
assigned. See [here](http://younglab.wi.mit.edu/super_enhancer_code.html)
for more information.
"""
slug = "rose2"
name = "ROSE2"
process_type = "data:chipseq:rose2"
version = "5.2.1"
category = "ChIP-seq"
entity = {
"type": "sample",
"input": "input_macs",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {
"image": "public.ecr.aws/s4q6j6e8/resolwebio/bamliquidator:2.2.0"
}
},
}
data_name = (
"{{ input_macs|name|default('?') if input_macs else rankby|name|default('?') }}"
)
class Input:
"""Input fields to process ROSE2."""
input_macs = DataField(
"chipseq:callpeak",
label="BED/narrowPeak file (MACS results)",
required=False,
hidden="input_upload",
)
input_upload = DataField(
"bed",
label="BED file (Upload)",
required=False,
hidden="input_macs || use_filtered_bam",
)
use_filtered_bam = BooleanField(
label="Use Filtered BAM File",
default=False,
hidden="input_upload",
description=(
"Use filtered BAM file from a MACS2 object to rank "
"enhancers by. Only applicable if input is MACS2."
),
)
rankby = DataField(
"alignment:bam",
label="BAM file",
required=False,
hidden="use_filtered_bam",
description="BAM file to rank enhancers by.",
)
control = DataField(
"alignment:bam",
label="Control BAM File",
required=False,
hidden="use_filtered_bam",
description="BAM file to rank enhancers by.",
)
tss = IntegerField(
label="TSS exclusion",
default=0,
description="Enter a distance from TSS to exclude. 0 = no TSS exclusion.",
)
stitch = IntegerField(
label="Stitch",
required=False,
description=(
"Enter a max linking distance for stitching. If not "
"given, optimal stitching parameter will be determined"
" automatically."
),
)
mask = DataField(
"bed",
label="Masking BED file",
required=False,
description=(
"Mask a set of regions from analysis. Provide a BED of"
" masking regions."
),
)
class Output:
"""Output field of the process ImportFastaNucleotide."""
all_enhancers = FileField(label="All enhancers table")
enhancers_with_super = FileField(label="Super enhancers table")
plot_points = FileField(label="Plot points")
plot_panel = FileField(label="Plot panel")
enhancer_gene = FileField(label="Enhancer to gene")
enhancer_top_gene = FileField(label="Enhancer to top gene")
gene_enhancer = FileField(label="Gene to Enhancer")
stitch_parameter = FileField(label="Stitch parameter", required=False)
all_output = FileField(label="All output")
scatter_plot = JsonField(label="Super-Enhancer plot")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run the analysis."""
if not inputs.input_macs and not inputs.input_upload:
self.error(
"Peaks file missing. Please provide .bed peaks file as a file "
"upload or a MACS output."
)
if inputs.input_macs and inputs.input_upload:
self.error("Please provide only one .bed peaks file.")
if inputs.control and not inputs.rankby:
self.error(
"A control BAM file cannot be provided without specifying "
"a BAM file to rank by. If selecting Use Filtered Bam File"
" option neither must be specified."
)
if inputs.input_macs:
if (
inputs.input_macs.type == "data:chipseq:callpeak:macs14:"
and inputs.use_filtered_bam
):
self.error(
"Use Filtered Bam File option can only be used with a "
"MACS2 input."
)
if (
not (
inputs.input_macs.type == "data:chipseq:callpeak:macs2:"
and inputs.use_filtered_bam
)
and not inputs.rankby
):
self.error(
"BAM file to rank by must be used unless a filtered BAM "
"from a MACS2 data object is used to rank by."
)
species = inputs.input_macs.output.species
build = inputs.input_macs.output.build
if inputs.rankby:
if species != inputs.rankby.output.species:
self.error(
f"Species of rankby bam file {inputs.rankby.output.species} "
f"and MACS bed file {species} do not match. Please "
"provide aligned reads and annotation with the same "
"species."
)
if build != inputs.rankby.output.build:
self.error(
"Genome builds of rankby bam file "
f"{inputs.rankby.output.build} and MACS bed file {build} "
"do not match. Please provide aligned reads and "
"annotation with the same genome build."
)
if inputs.input_upload:
if not inputs.rankby:
self.error(
"BAM file to rank by must be used unless a filtered BAM "
"from a MACS2 data object is used to rank by."
)
species = inputs.input_upload.output.species
if species != inputs.rankby.output.species:
self.error(
f"Species of rankby bam file {inputs.rankby.output.species} and "
f"uploaded bed file {species} do not match. Please provide"
" aligned reads and annotation with the same species."
)
build = inputs.input_upload.output.build
if build != inputs.rankby.output.build:
self.error(
f"Genome builds of rankby bam file {inputs.rankby.output.build} "
f"and uploaded bed file {build} do not match. Please "
"provide aligned reads and annotation with the same genome"
" build."
)
if inputs.control:
if inputs.control.output.species != inputs.rankby.output.species:
self.error(
f"Species of rankby bam file {inputs.rankby.output.species} and "
f"control bam file {inputs.control.output.species} do not match. "
"Please provide aligned reads with the same species."
)
if inputs.control.output.build != inputs.rankby.output.build:
self.error(
f"Genome builds of rankby bam file {inputs.rankby.output.build} "
f"and control bam file {inputs.control.output.build} do not "
"match. Please provide aligned reads with the same genome "
"build."
)
if inputs.mask:
if inputs.mask.output.species != species:
self.error(
f"Species of the masking bed file {inputs.mask.output.species} "
"does not match other inputs` species. Please provide a "
"masking file of the same species as other inputs."
)
if inputs.mask.output.build != build:
self.error(
f"Genome build of the masking bed file {inputs.mask.output.build}"
" does not match other inputs` build. Please provide a "
"masking file of the same genome build as other inputs."
)
genome_list = [
"hg19",
"hg18",
"mm10",
"mm9",
"mm8",
"rn6",
"rn4",
]
if build not in genome_list:
self.error(
f"{build} is not a valid genome build. Accepted genome "
f'builds are: {", ".join(genome_list)}.'
)
if inputs.input_upload:
bed_path = Path(inputs.input_upload.output.bed.path)
if bed_path.name[-4:] == ".bed":
name = bed_path.stem
copy(str(bed_path), bed_path.name)
else:
name = bed_path.name[:-7]
cmd = Cmd["bgzip"]["-cd"][str(bed_path)]
(cmd > f"{name}.bed")()
elif inputs.input_macs.type == "data:chipseq:callpeak:macs14:":
bed_path = Path(inputs.input_macs.output.peaks_bed.path)
if bed_path.name[-4:] == ".bed":
name = bed_path.stem
copy(str(bed_path), bed_path.name)
else:
name = bed_path.name[:-7]
cmd = Cmd["bgzip"]["-cd"][str(bed_path)]
(cmd > f"{name}.bed")()
elif inputs.input_macs.type == "data:chipseq:callpeak:macs2:":
narrowpeak_path = Path(inputs.input_macs.output.narrow_peaks.path)
if narrowpeak_path.name[-11:] == ".narrowPeak":
name = narrowpeak_path.stem
copy(str(narrowpeak_path), f"{name}.bed")
else:
name = narrowpeak_path.name[:-14]
cmd = Cmd["bgzip"]["-cd"][str(narrowpeak_path)]
(cmd > f"{name}.bed")()
if inputs.input_macs and inputs.use_filtered_bam:
rankby = inputs.input_macs.output.case_bam.path
else:
rankby = inputs.rankby.output.bam.path
if (
inputs.input_macs
and inputs.input_macs.type == "data:chipseq:callpeak:macs2:"
and inputs.input_macs.output.control_bam
and inputs.use_filtered_bam
):
control = inputs.input_macs.output.control_bam.path
elif inputs.control:
control = inputs.control.output.bam.path
else:
control = None
cmd = Cmd["rose2"]
cmd = cmd["--genome"][build.upper()]
cmd = cmd["-i"][f"{name}.bed"]
cmd = cmd["--rankby"][rankby]
if control:
cmd = cmd["--control"][control]
cmd = cmd["--tss"][inputs.tss]
if inputs.stitch or inputs.stitch == 0:
cmd = cmd["--stitch"][inputs.stitch]
if inputs.mask:
cmd = cmd["--mask"][inputs.mask.output.bed.path]
cmd = cmd["--out"]["."]
return_code, _, _ = cmd & TEE(retcode=None)
if return_code:
self.error("ROSE2 run failed.")
outputs.all_enhancers = f"{name}_AllEnhancers.table.txt"
outputs.enhancers_with_super = f"{name}_Enhancers_withSuper.bed"
outputs.plot_points = f"{name}_Plot_points.png"
outputs.plot_panel = f"{name}_Plot_panel.png"
outputs.enhancer_gene = f"{name}_SuperEnhancers_ENHANCER_TO_GENE.txt"
outputs.enhancer_top_gene = f"{name}_SuperEnhancers_ENHANCER_TO_TOP_GENE.txt"
outputs.gene_enhancer = f"{name}_SuperEnhancers_GENE_TO_ENHANCER.txt"
if not (inputs.stitch or inputs.stitch == 0):
outputs.stitch_parameter = f"{name}_stitch_parameter.pdf"
cmd = Cmd["plot_enhancers.py"]
cmd = cmd[f"{name}_AllEnhancers.table.txt"]
cmd = cmd["scatter_plot.json"]
if inputs.control:
cmd["-c"]
return_code, _, _ = cmd & TEE(retcode=None)
if return_code:
self.error("Plotting enhancers failed.")
zipfile = f"{name}_output_all.zip"
with ZipFile(zipfile, "w") as zip_file:
for f in Path(".").glob(f"{name}_*"):
if f.name == zipfile:
continue
zip_file.write(f)
outputs.all_output = zipfile
outputs.scatter_plot = "scatter_plot.json"
outputs.species = species
outputs.build = build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/chip_seq/rose2.py | 0.739516 | 0.409398 | rose2.py | pypi |
import os
import re
from pathlib import Path
import pandas as pd
from pandas.errors import EmptyDataError
from plumbum import RETCODE, TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
GroupField,
IntegerField,
Persistence,
Process,
SchedulingClass,
StringField,
)
BED_COLUMNS = ["chromosome", "start", "end", "name", "score", "strand"]
BEDPE_COLUMNS = [
"chromosome",
"start",
"end",
"chromosome_r2",
"start_r2",
"end_r2",
"name",
"score",
"strand",
"strand_r2",
]
SPECIES_GSIZES = {
"Homo sapiens": "2.7e9",
"Mus musculus": "1.87e9",
"Dictyostelium discoideum": "3.4e7",
"Drosophila melanogaster": "1.2e8",
"Caenorhabditis elegans": "9e7",
"Saccharomyces cerevisiae": "12.1e6",
"Rattus norvegicus": "2e9",
}
SPP_HEADER = [
"Reads",
"Est. Fragment Len.",
"Corr. Est. Fragment Len.",
"Phantom Peak",
"Corr. Phantom Peak",
"Argmin. Corr.",
"Min. Corr.",
"NSC",
"RSC",
"Quality Tag",
]
def test_paired_end(bam_file, error):
"""Test if a given bam file is paired end."""
args = ["-c", "-f", "1", bam_file]
return_code, n_paired, stderr = Cmd["samtools"]["view"][args] & TEE(retcode=None)
if return_code:
print(stderr)
error("Failed to count the number of paired end reads.")
try:
n_paired = int(n_paired)
except ValueError:
error("Could not determine if the reads are single or paired end.")
return n_paired > 0
def merge_dict(d1, d2):
"""Merge two dictionaries."""
return {**d1, **d2}
def filter_bam(bam_file, out_bam, min_quality, is_paired, name, error):
"""Filter bam file."""
# Remove unmapped reads, not primary alignments, duplicates.
filter_params = [
"-F",
1804,
"-b",
"-o",
out_bam,
]
# Remove reads below minimum mapping quality.
if min_quality > 0:
filter_params.extend(["-q", min_quality])
# Remove reads not mapped in proper pair.
if is_paired:
filter_params.extend(["-f", 2])
filter_params.append(bam_file)
return_code, stdout, stderr = Cmd["samtools"]["view"][filter_params] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
error(f"Samtools filtering failed for {name}.")
def name_sort_bam(in_bam, out_bam, error):
"""Name sort bam file."""
sort_params = ["-n", "-o", out_bam, in_bam]
return_code, stdout, stderr = Cmd["samtools"]["sort"][sort_params] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
error(f"Samtools sorting failed for {in_bam}.")
def is_empty(path):
"""Check if a certain file is empty."""
return Path(path).is_file() and Path(path).stat().st_size > 0
def parse_markdup(markdup_stats):
"""Parse mark duplicates report."""
with open(markdup_stats, "r") as handle:
lines = handle.readlines()
if "## HISTOGRAM\tjava.lang.Double\n" in lines:
histogram_start = lines.index("## HISTOGRAM\tjava.lang.Double\n")
skip_lines = list(range(histogram_start, len(lines)))
else:
skip_lines = None
return (
pd.read_csv(markdup_stats, sep="\t", comment="#", skiprows=skip_lines)
.squeeze()
.to_dict()
)
def read_bed(bed_file, col_names):
"""Read a BED file and add a header."""
return pd.read_csv(
bed_file,
sep="\t",
header=None,
names=col_names,
float_precision="round_trip",
)
def drop_mt(bed):
"""Drop mitochondrial genome from the bed file."""
bed = bed.drop(bed[bed.chromosome.isin(["chrM", "MT"])].index)
if "chromosome_r2" in bed.columns:
bed = bed.drop(bed[bed.chromosome.isin(["chrM", "MT"])].index)
return bed
def parse_flagstat(report, warning):
"""Parse flagstat report and extract read statistics."""
# Typical lines we are interested in would be:
# 400 + 0 in total (QC-passed reads + QC-failed reads)
# 396 + 0 mapped (99.00% : N/A)
flagstat_regexes = {
"TOTAL_READS": r"(\d+) \+ \d+ in total \(QC-passed reads \+ QC-failed reads\)",
"MAPPED_READS": r"(\d+) \+ \d+ mapped \(.+:.+\)",
"MAPPED_PERCENTAGE": r"\d+ \+ \d+ mapped \((.+):.+\)",
}
parsed_data = {}
for k, r in flagstat_regexes.items():
r_search = re.search(r, report, re.MULTILINE)
try:
parsed_data[k] = r_search.group(1).strip()
except (AttributeError, IndexError):
# No matches found.
parsed_data[k] = "NA"
warning(f"Failed to determine {k} based on flagstat report.")
return parsed_data
def parse_bowtie2_report(report_path, n_mapped, warning):
"""Parse Bowtie2 alignment report."""
with open(report_path, "r") as handle:
report = handle.read()
# The report is structured as:
# 1000000 reads; of these:
# 1000000 (100.00%) were paired; of these:
# 980354 (98.04%) aligned concordantly 0 times
# ...
# 5.53% overall alignment rate
stats_regexes = {
"TOTAL_READS": r"(\d+) reads; of these:",
"MAPPED_PERCENTAGE": r"(\d+\.\d+\%) overall alignment rate",
}
parsed_data = {
"TOTAL_READS": "NA",
"MAPPED_READS": n_mapped,
"MAPPED_PERCENTAGE": "NA",
}
for k, r in stats_regexes.items():
r_search = re.search(r, report, re.MULTILINE)
try:
parsed_data[k] = r_search.group(1).strip()
except (AttributeError, IndexError):
# No matches found.
warning(f"Failed to determine {k} based on bowtie2 report.")
if parsed_data["TOTAL_READS"].isdigit():
parsed_data["TOTAL_READS"] = int(parsed_data["TOTAL_READS"]) * 2
return parsed_data
def convert_bam(bam_file, name, out_bed, is_paired, error, first_mate=False):
"""Convert bam file to bed file."""
args = ["-i", bam_file]
if is_paired:
args.append("-bedpe")
if first_mate:
args.append("-mate1")
return_code = (Cmd["bedtools"]["bamtobed"][args] > out_bed) & RETCODE
if return_code:
error(f"Failed to convert bam file to bed file for {name}.")
def get_pbc_metrics(bam_file, name, out_bed, is_paired, error):
"""Get PCR bottlenecking coefficient metrics.
Metrics are the following:
Total Reads: number of reads
Distinct Reads: number of all genomic locations where reads mapped
One Read: number of genomic locations where only one read maps uniquely
Two Reads: number of genomic locations where 2 reads map uniquely
NRF = Non-Redundant Fraction (Distinct Reads / Total Reads)
PBC1 = PCR Bottlenecking Coefficient 1 (One Read / Distinct Reads)
PBC2 = PCR Bottlenecking Coefficient 2 (One Read / Two Reads)
Matching bash scripts available on the link below.
https://github.com/ENCODE-DCC/chip-seq-pipeline/blob/master/dnanexus/filter_qc/src/filter_qc.py
"""
convert_bam(
bam_file=bam_file, name=name, out_bed=out_bed, is_paired=is_paired, error=error
)
if is_paired:
col_names = BEDPE_COLUMNS
column_subset = [
"chromosome",
"start",
"chromosome_r2",
"end_r2",
"strand",
"strand_r2",
]
else:
col_names = BED_COLUMNS
column_subset = ["chromosome", "start", "end", "strand"]
bed = read_bed(out_bed, col_names=col_names)
bed = bed.loc[:, column_subset]
bed = drop_mt(bed)
# Add a column with the number of duplicate entires.
bed = bed.groupby(column_subset).size().reset_index(name="count")
pbc = {
"Distinct Reads": len(bed.index),
"One Read": sum(bed["count"] == 1),
"Two Reads": sum(bed["count"] == 2),
"NRF": "N/A",
"PBC1": "N/A",
"PBC2": "N/A",
"Total Reads": bed["count"].sum(),
}
pbc["NRF"] = round(pbc["Distinct Reads"] / pbc["Total Reads"], 6)
pbc["PBC1"] = round(pbc["One Read"] / pbc["Distinct Reads"], 6)
if pbc["Two Reads"] > 0:
pbc["PBC2"] = round(pbc["One Read"] / pbc["Two Reads"], 6)
return pbc
def prepare_tagalign(bed_file, tagalign_file, is_paired, tn5_shift, compression=None):
"""Prepare a final tagAlign file.
First transform deduplicated bam to tagAlign and then optionally do
the Tn5 transposon shifting.
"""
col_names = BED_COLUMNS
if is_paired:
tagalign = read_bed(bed_file, col_names=BEDPE_COLUMNS)
r2_col_names = [
"chromosome_r2",
"start_r2",
"end_r2",
"name",
"score",
"strand_r2",
]
name_mapper = dict(zip(r2_col_names, col_names))
# Interleave R1 and R2 reads.
tagalign = pd.concat(
[
tagalign[col_names].reset_index(drop=True),
tagalign[r2_col_names]
.rename(columns=name_mapper)
.set_index(tagalign.index + 0.1),
],
sort=False,
)
tagalign = tagalign.sort_index().reset_index(drop=True)
else:
tagalign = read_bed(
bed_file,
col_names=col_names,
)
# Remove sequence info in the name field and set scores to the max.
tagalign["name"] = "N"
tagalign["score"] = 1000
if tn5_shift:
tagalign.loc[(tagalign.strand == "+"), "start"] = tagalign.start + 4
tagalign.loc[(tagalign.strand == "-"), "end"] = tagalign.end - 5
tagalign.to_csv(
tagalign_file,
sep="\t",
index=False,
header=False,
compression=compression, # Pandas 0.23.4 compatibility
)
def subsample_tagalign(tagalign_file, out_file, is_paired, n_sub, compression=None):
"""Subsample a tagAlign file and save it."""
tagalign = read_bed(tagalign_file, col_names=BED_COLUMNS)
if is_paired:
# Subsample only R1 reads which are in every second row.
tagalign = tagalign.iloc[::2, :]
tagalign = drop_mt(tagalign)
# Sample size has to be less or equal to the number of entries.
n_sub = min(n_sub, len(tagalign.index))
tagalign = tagalign.sample(n=n_sub, random_state=42)
tagalign.to_csv(
out_file, sep="\t", index=False, header=False, compression=compression
)
def save_prepeak_qc(metrics, out_file):
"""Save a prepeak QC file."""
prepeak_qc_fields = [
"TOTAL_READS",
"MAPPED_READS",
"MAPPED_PERCENTAGE",
"UNPAIRED_READS_EXAMINED",
"READ_PAIRS_EXAMINED",
"UNPAIRED_READ_DUPLICATES",
"PERCENT_DUPLICATION",
"NRF",
"PBC1",
"PBC2",
"NSC",
"RSC",
]
pd.Series(metrics).loc[prepeak_qc_fields].to_frame().T.to_csv(
out_file, sep="\t", index=False
)
def correct_bed_file(in_bed, out_bed, error):
"""Correct bed file to be compatible with bedToBigBed tool.
1.) restrict all scores to the maximal value of 1000,
2.) in strand column replace '?' with '.'.
"""
try:
df = pd.read_csv(in_bed, delimiter="\t", header=None, dtype=str)
except EmptyDataError:
error(
f"The input BED file {in_bed} is empty. Your analysis might "
"have failed to identify regions of interest (peaks, junctions, etc.)."
)
df.iloc[:, 4] = pd.to_numeric(df.iloc[:, 4]).round().astype(int)
df.iloc[:, 4] = df.iloc[:, 4].clip(upper=1000)
# If strand column exist replace '?' with '.'.
if len(df.columns) >= 6:
df.iloc[:, 5] = df.iloc[:, 5].replace("?", ".")
df.to_csv(out_bed, sep="\t", index=False, header=False)
def clip_bed(in_bed, out_bed, chromosome_sizes, file_type, error):
"""Clip bed file to remove off-chromosome places."""
padded_bed = Path("padded.bed")
slop_command = Cmd["bedtools"]["slop"][
"-i", in_bed, "-g", chromosome_sizes, "-b", 0
] > str(padded_bed)
return_code = slop_command & RETCODE
if return_code:
error(f"Failed to increase the size of features in {file_type} file.")
return_code, _, stderr = Cmd["bedClip"][
padded_bed, chromosome_sizes, out_bed
] & TEE(retcode=None)
if return_code:
print(stderr)
error(
"Preventing the extension of intervals beyond chromosome boundaries for "
f"{file_type} file failed."
)
padded_bed.unlink()
def create_big_bed(in_file, out_bb, chromosome_sizes, file_type, error):
"""Create BigBed for IGV and UCSC genome browsers."""
clipped_file = f"peaks_clip_{in_file}"
clip_bed(
in_bed=in_file,
out_bed=clipped_file,
chromosome_sizes=chromosome_sizes,
file_type=f"*{file_type}",
error=error,
)
if file_type == "narrowPeak":
big_bed_params = [
"as=/opt/kent/bedToBigBed/narrowPeak.as",
"-type=bed6+4",
]
else:
big_bed_params = []
big_bed_params.extend([clipped_file, chromosome_sizes, out_bb])
return_code, stdout, stderr = Cmd["bedToBigBed"][big_bed_params] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
error(f"Creating BigBed from {file_type} file failed.")
def create_bigwig(in_bdg, out_bw, chromosome_sizes, file_type, error):
"""Create BigWig file from BedGraph file."""
clipped_pileup_bdg = f"clipped_{in_bdg}"
clip_bed(
in_bed=in_bdg,
out_bed=clipped_pileup_bdg,
chromosome_sizes=chromosome_sizes,
file_type=file_type,
error=error,
)
old_locale = os.environ.get("LC_COLLATE", None)
os.environ["LC_COLLATE"] = "C"
sorted_bdg = f"sorted_{in_bdg}"
sort_command = (
Cmd["sort"]["-k", "1,1", "-k", "2,2n", clipped_pileup_bdg] > sorted_bdg
)
if old_locale:
os.environ["LC_COLLATE"] = old_locale
return_code = sort_command & RETCODE
if return_code:
error(f"Sorting the {in_bdg} file failed.")
merged_bdg = f"sorted_merged_{in_bdg}"
merge_command = (
Cmd["bedtools"]["merge"]["-d", "-1", "-c", 4, "-o", "mean", "-i", sorted_bdg]
> merged_bdg
)
return_code = merge_command & RETCODE
if return_code:
error(f"Interval merging for {in_bdg} file failed.")
return_code, stdout, stderr = Cmd["bedGraphToBigWig"][
merged_bdg, chromosome_sizes, out_bw
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
error(f"Creating bigWig from {file_type} bedGraph failed")
def count_lines(path):
"""Count the number of lines in a file."""
with open(path, "r") as f:
return sum(1 for _ in f)
def count_overlap(file_a, file_b, error):
"""Count overlaps between two bed-like files."""
intersect_file = Path("intersection.bed")
intersect_args = [
"-a",
file_a,
"-b",
file_b,
"-wa",
"-u",
]
return_code = (
Cmd["bedtools"]["intersect"][intersect_args] > str(intersect_file)
) & RETCODE
if return_code:
error(f"Failed to intersect reads for {file_a} and {file_b}.")
overlap_size = count_lines(intersect_file)
intersect_file.unlink()
return overlap_size
def rename_tagalign(file, name, tn5_shifted):
"""Rename tagAlign if it was transposon shifted."""
if not tn5_shifted:
return file
tn5_tagalign = f"{name}_tn5.tagAlign.gz"
Path(file).rename(tn5_tagalign)
return tn5_tagalign
def process_narrow_peaks(file, cap_number):
"""Process narrow peak file for further analysis."""
peaks = read_bed(
bed_file=file,
col_names=[
"chromosome",
"start",
"end",
"name",
"score",
"strand",
"signal_value",
"p_value",
"q_value",
"peak",
],
)
peaks = peaks.sort_values(by=["p_value"], ascending=False)
peaks["name"] = [f"Peak_{i}" for i in range(1, len(peaks.index) + 1)]
peaks["start"] = peaks["start"].clip(lower=0)
peaks["end"] = peaks["end"].clip(lower=0)
if cap_number:
peaks = peaks.head(cap_number)
peaks.to_csv(file, sep="\t", index=False, header=False)
def shift_reads(tagalign, out_name, chromosome_sizes, frag_len, error):
"""Shift reads in a tagAlign and report the number of reads.
Reads are shifted on both strands for half of the fragment length in
opposite directions.
"""
slop_args = [
"-i",
tagalign,
"-g",
chromosome_sizes,
"-s",
"-l",
-int(frag_len / 2),
"-r",
int(frag_len / 2),
]
return_code = (Cmd["bedtools"]["slop"][slop_args] > out_name) & RETCODE
if return_code:
error("Failed to shift reads in BED file.")
shifted_df = read_bed(bed_file=out_name, col_names=BED_COLUMNS)
shifted_df = shifted_df.loc[
(shifted_df["start"] >= 0)
& (shifted_df["end"] >= 0)
& (shifted_df["start"] < shifted_df["end"])
]
return len(shifted_df.index)
def get_frag_len(estimates):
"""Get the first fragment length estimate that is greater than 0."""
for estimate in estimates:
if estimate > 0:
frag_len = estimate
break
else:
frag_len = None
return frag_len
class Macs2(Process):
"""Call ChIP-Seq peaks with MACS 2.0.
Model-based Analysis of ChIP-Seq (MACS 2.0), is used to identify transcript
factor binding sites. MACS 2.0 captures the influence of genome complexity
to evaluate the significance of enriched ChIP regions, and MACS improves
the spatial resolution of binding sites through combining the information
of both sequencing tag position and orientation. It has also an option to
link nearby peaks together in order to call broad peaks. See
[here](https://github.com/taoliu/MACS/) for more information.
In addition to peak-calling, this process computes ChIP-Seq and
ATAC-Seq QC metrics. Process returns a QC metrics report, fragment
length estimation, and a deduplicated tagAlign file. QC report
contains ENCODE 3 proposed QC metrics --
[NRF](https://www.encodeproject.org/data-standards/terms/),
[PBC bottlenecking coefficients, NSC, and RSC](https://genome.ucsc.edu/ENCODE/qualityMetrics.html#chipSeq).
"""
slug = "macs2-callpeak"
name = "MACS 2.0"
process_type = "data:chipseq:callpeak:macs2"
version = "4.8.1"
category = "ChIP-seq"
data_name = "{{ case|name|default('?') }}"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
entity = {"type": "sample", "input": "case"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/chipseq:6.0.0"}
},
"resources": {
"cores": 4,
"memory": 32768,
},
}
class Input:
"""Input fields to process Macs2."""
case = DataField(
data_type="alignment:bam",
label="Case (treatment)",
)
control = DataField(
data_type="alignment:bam",
label="Control (background)",
required=False,
)
promoter = DataField(
data_type="bed",
label="Promoter regions BED file",
required=False,
description="BED file containing promoter regions (TSS+-1000bp "
"for example). Needed to get the number of peaks and reads mapped "
"to promoter regions.",
)
tagalign = BooleanField(
label="Use tagAlign files",
default=False,
description="Use filtered tagAlign files as case "
"(treatment) and control (background) samples. If extsize "
"parameter is not set, run MACS using input's estimated fragment "
"length.",
)
class PrepeakQC:
"""Pre-peak QC settings."""
q_threshold = IntegerField(label="Quality filtering threshold", default=30)
n_sub = IntegerField(label="Number of reads to subsample", default=15000000)
tn5 = BooleanField(
label="Tn5 shifting",
default=False,
description="Tn5 transposon shifting. Shift reads on '+' strand "
"by 4bp and reads on '-' strand by 5bp.",
)
shift = IntegerField(
label="User-defined cross-correlation peak strandshift",
required=False,
description="If defined, SPP tool will not try to estimate "
"fragment length but will use the given value as "
"fragment length.",
)
class Settings:
"""MACS2 settings."""
format = StringField(
label="Format of tag file",
choices=[
("BAM", "BAM"),
("BAMPE", "BAMPE"),
],
default="BAM",
hidden="tagalign",
description="This specifies the format of input files. For "
"paired-end data the format dictates how MACS2 will treat "
"mates. If the selected format is BAM, MACS2 will only keep "
"the left mate (5' end) tag. However, when format BAMPE is "
"selected, MACS2 will use actual insert sizes of pairs of "
"reads to build fragment pileup, instead of building bimodal "
"distribution plus and minus strand reads to predict fragment "
"size.",
)
duplicates = StringField(
label="Number of duplicates",
choices=[
("1", "1"),
("auto", "auto"),
("all", "all"),
],
required=False,
hidden="tagalign",
description="It controls the MACS behavior towards duplicate "
"tags at the exact same location -- the same coordination and "
"the same strand. The 'auto' option makes MACS calculate the "
"maximum tags at the exact same location based on binomial "
"distribution using 1e-5 as pvalue cutoff and the 'all' "
"option keeps all the tags. If an integer is given, at most "
"this number of tags will be kept at the same location. The "
"default is to keep one tag at the same location.",
)
duplicates_prepeak = StringField(
label="Number of duplicates",
choices=[
("1", "1"),
("auto", "auto"),
("all", "all"),
],
hidden="!tagalign",
default="all",
description="It controls the MACS behavior towards duplicate "
"tags at the exact same location -- the same coordination and "
"the same strand. The 'auto' option makes MACS calculate the "
"maximum tags at the exact same location based on binomial "
"distribution using 1e-5 as pvalue cutoff and the 'all' "
"option keeps all the tags. If an integer is given, at most "
"this number of tags will be kept at the same location. The "
"default is to keep one tag at the same location.",
)
qvalue = FloatField(
label="Q-value cutoff",
required=False,
disabled="settings.pvalue && settings.pvalue_prepeak",
description="The q-value (minimum FDR) cutoff to call "
"significant regions. Q-values are calculated from p-values "
"using Benjamini-Hochberg procedure.",
)
pvalue = FloatField(
label="P-value cutoff",
disabled="settings.qvalue",
hidden="tagalign",
required=False,
description="The p-value cutoff. If specified, MACS2 will use "
"p-value instead of q-value cutoff.",
)
pvalue_prepeak = FloatField(
label="P-value cutoff",
default=0.00001,
disabled="settings.qvalue",
hidden="!tagalign || settings.qvalue",
description="The p-value cutoff. If specified, MACS2 will use "
"p-value instead of q-value cutoff.",
)
cap_num = IntegerField(
label="Cap number of peaks by taking top N peaks",
default=500000,
disabled="settings.broad",
description="To keep all peaks set value to 0.",
)
mfold_lower = IntegerField(
label="MFOLD range (lower limit)",
required=False,
description="This parameter is used to select the regions "
"within MFOLD range of high-confidence enrichment ratio "
"against background to build model. The regions must be lower "
"than upper limit, and higher than the lower limit of fold "
"enrichment. DEFAULT:10,30 means using all regions not too "
"low (>10) and not too high (<30) to build paired-peaks "
"model. If MACS can not find more than 100 regions to build "
"model, it will use the --extsize parameter to continue the "
"peak detection ONLY if --fix-bimodal is set.",
)
mfold_upper = IntegerField(
label="MFOLD range (upper limit)",
required=False,
description="This parameter is used to select the regions "
"within MFOLD range of high-confidence enrichment ratio "
"against background to build model. The regions must be lower "
"than upper limit, and higher than the lower limit of fold "
"enrichment. DEFAULT:10,30 means using all regions not too "
"low (>10) and not too high (<30) to build paired-peaks "
"model. If MACS can not find more than 100 regions to build "
"model, it will use the --extsize parameter to continue the "
"peak detection ONLY if --fix-bimodal is set.",
)
slocal = IntegerField(
label="Small local region",
required=False,
description="Slocal and llocal parameters control which two "
"levels of regions will be checked around the peak regions to "
"calculate the maximum lambda as local lambda. By default, "
"MACS considers 1000bp for small local region (--slocal), and "
"10000bps for large local region (--llocal) which captures "
"the bias from a long range effect like an open chromatin "
"domain. You can tweak these according to your project. "
"Remember that if the region is set too small, a sharp spike "
"in the input data may kill the significant peak.",
)
llocal = IntegerField(
label="Large local region",
required=False,
description="Slocal and llocal parameters control which two "
"levels of regions will be checked around the peak regions to "
"calculate the maximum lambda as local lambda. By default, "
"MACS considers 1000bp for small local region (--slocal), and "
"10000bps for large local region (--llocal) which captures "
"the bias from a long range effect like an open chromatin "
"domain. You can tweak these according to your project. "
"Remember that if the region is set too small, a sharp spike "
"in the input data may kill the significant peak.",
)
extsize = IntegerField(
label="Extension size [--extsize]",
required=False,
description="While '--nomodel' is set, MACS uses this "
"parameter to extend reads in 5'->3' direction to fix-sized "
"fragments. For example, if the size of binding region for "
"your transcription factor is 200 bp, and you want to bypass "
"the model building by MACS, this parameter can be set as "
"200. This option is only valid when --nomodel is set or when "
"MACS fails to build model and --fix-bimodal is on.",
)
shift = IntegerField(
label="Shift",
required=False,
hidden="settings.format == 'BAMPE'",
description="Note, this is NOT the legacy --shiftsize option "
"which is replaced by --extsize! You can set an arbitrary "
"shift in bp here. Please Use discretion while setting it "
"other than default value (0). When --nomodel is set, MACS "
"will use this value to move cutting ends (5') then apply "
"--extsize from 5' to 3' direction to extend them to "
"fragments. When this value is negative, ends will be moved "
"toward 3'->5' direction, otherwise 5'->3' direction. "
"Recommended to keep it as default 0 for ChIP-Seq datasets, "
"or -1 * half of EXTSIZE together with --extsize option for "
"detecting enriched cutting loci such as certain DNAseI-Seq "
"datasets. Note, you can't set values other than 0 if format "
"is BAMPE for paired-end data. Default is 0.",
)
band_width = IntegerField(
label="Band width",
required=False,
description="The band width which is used to scan the genome "
"ONLY for model building. You can set this parameter as the "
"sonication fragment size expected from wet experiment. The "
"previous side effect on the peak detection process has been "
"removed. So this parameter only affects the model building.",
)
nolambda = BooleanField(
label="Use background lambda as local lambda",
default=False,
description="With this flag on, MACS will use the background "
"lambda as local lambda. This means MACS will not consider "
"the local bias at peak candidate regions.",
)
fix_bimodal = BooleanField(
label="Turn on the auto paired-peak model process",
default=False,
description="Turn on the auto paired-peak model process. If "
"it's set, when MACS failed to build paired model, it will "
"use the nomodel settings, the '--extsize' parameter to "
"extend each tag. If set, MACS will be terminated if "
"paired-peak model has failed.",
)
nomodel = BooleanField(
label="Bypass building the shifting model [--nomodel]",
default=False,
hidden="tagalign",
description="While on, MACS will bypass building the shifting "
"model.",
)
nomodel_prepeak = BooleanField(
label="Bypass building the shifting model [--nomodel]",
default=True,
hidden="!tagalign",
description="While on, MACS will bypass building the shifting "
"model.",
)
down_sample = BooleanField(
label="Down-sample",
default=False,
description="When set to true, random sampling method will "
"scale down the bigger sample. By default, MACS uses linear "
"scaling. This option will make the results unstable and "
"unreproducible since each time, random reads would be "
"selected, especially the numbers (pileup, pvalue, qvalue) "
"would change.",
)
bedgraph = BooleanField(
label="Save fragment pileup and control lambda",
default=True,
description="If this flag is on, MACS will store the fragment "
"pileup, control lambda, -log10pvalue and -log10qvalue scores "
"in bedGraph files. The bedGraph files will be stored in "
"current directory named NAME+'_treat_pileup.bdg' for "
"treatment data, NAME+'_control_lambda.bdg' for local lambda "
"values from control, NAME+'_treat_pvalue.bdg' for Poisson "
"pvalue scores (in -log10(pvalue) form), and "
"NAME+'_treat_qvalue.bdg' for q-value scores from "
"Benjamini-Hochberg-Yekutieli procedure.",
)
spmr = BooleanField(
label="Save fragment pileup and control lambda",
default=True,
disabled="settings.bedgraph === false",
)
call_summits = BooleanField(
label="Call summits [--call-summits]",
default=False,
description="MACS will now reanalyze the shape of signal "
"profile (p or q-score depending on cutoff setting) to "
"deconvolve subpeaks within each peak called from general "
"procedure. It's highly recommended to detect adjacent "
"binding events. While used, the output subpeaks of a big "
"peak region will have the same peak boundaries, and "
"different scores and peak summit positions.",
)
broad = BooleanField(
label="Composite broad regions [--broad]",
default=False,
disabled="settings.call_summits === true",
description="When this flag is on, MACS will try to composite "
"broad regions in BED12 (a gene-model-like format) by "
"putting nearby highly enriched regions into a broad region "
"with loose cutoff. The broad region is controlled by another "
"cutoff through --broad-cutoff. The maximum length of broad "
"region length is 4 times of d from MACS.",
)
broad_cutoff = FloatField(
label="Broad cutoff",
required=False,
disabled="settings.call_summits === true || settings.broad !== true",
description="Cutoff for broad region. This option is not "
"available unless --broad is set. If -p is set, this is a "
"p-value cutoff, otherwise, it's a q-value cutoff. DEFAULT = "
"0.1",
)
prepeakqc_settings = GroupField(PrepeakQC, label="Pre-peak QC settings")
settings = GroupField(Settings, label="MACS2 settings")
class Output:
"""Output fields to process Macs2."""
called_peaks = FileField(label="Called peaks")
narrow_peaks = FileField(label="Narrow peaks", required=False)
chip_qc = FileField(label="QC report", required=False)
case_prepeak_qc = FileField(label="Pre-peak QC report (case)")
case_tagalign = FileField(label="Filtered tagAlign (case)")
case_bam = FileField(label="Filtered BAM (case)")
case_bai = FileField(label="Filtered BAM index (case)")
control_prepeak_qc = FileField(
label="Pre-peak QC report (control)", required=False
)
control_tagalign = FileField(
label="Filtered tagAlign (control)", required=False
)
control_bam = FileField(label="Filtered BAM (control)", required=False)
control_bai = FileField(label="Filtered BAM index (control)", required=False)
narrow_peaks_bigbed_igv_ucsc = FileField(
label="Narrow peaks (BigBed)", required=False
)
summits = FileField(label="Peak summits", required=False)
summits_tbi_jbrowse = FileField(
label="Peak summits tbi index for JBrowse", required=False
)
summits_bigbed_igv_ucsc = FileField(label="Summits (bigBed)", required=False)
broad_peaks = FileField(label="Broad peaks", required=False)
gappedPeak = FileField(label="Broad peaks (bed12/gappedPeak)", required=False)
treat_pileup = FileField(label="Treatment pileup (bedGraph)", required=False)
treat_pileup_bigwig = FileField(
label="Treatment pileup (bigWig)", required=False
)
control_lambda = FileField(label="Control lambda (bedGraph)", required=False)
control_lambda_bigwig = FileField(
label="Control lambda (bigwig)", required=False
)
model = FileField(label="Model", required=False)
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run the analysis."""
TMPDIR = os.environ.get("TMPDIR")
# Allow Java to allocate 80% of the maximum memory available in
# the container because the process itself uses some memory.
java_memory = self.requirements.resources.memory // 1024 // 1.25
os.environ["_JAVA_OPTIONS"] = f"-Xms256M -Xmx{java_memory}g"
if inputs.settings.broad and inputs.settings.call_summits:
self.error(
"Composite broad regions [--broad] can't be combined with Call summits "
"[--call-summits]."
)
if inputs.settings.shift and inputs.settings.format == "BAMPE":
self.error(
"Shift values other than 0 are not supported when the format is BAMPE."
)
if (
inputs.settings.mfold_lower is not None
and inputs.settings.mfold_upper is None
):
self.error(
"MFOLD range should be set both for upper and lower limit, but only the lower "
"limit was set."
)
if (
inputs.settings.mfold_lower is None
and inputs.settings.mfold_upper is not None
):
self.error(
"MFOLD range should be set both for upper and lower limit, but only the upper "
"limit was set."
)
if inputs.control:
if inputs.case.output.species != inputs.control.output.species:
self.error(
"All input BAM files must share the same genome species information. BAM "
f"{inputs.case.name} has {inputs.case.output.species} while "
f"{inputs.control.name} has {inputs.control.output.species} species "
"information."
)
if inputs.case.output.build != inputs.control.output.build:
self.error(
"All input BAM files must share the same genome build information. BAM"
f"{inputs.case.name} has {inputs.case.output.build} while "
f"{inputs.control.name} has {inputs.control.output.build} build information."
)
if inputs.promoter:
if inputs.promoter.output.species != inputs.case.output.species:
self.error(
"All input files must share the same genome species information. Case BAM "
f"has {inputs.case.output.species} while promoter BED has "
f"{inputs.promoter.output.species} species information."
)
if inputs.promoter.output.build != inputs.case.output.build:
self.error(
"All input files must share the same genome build information. Case BAM "
f"has {inputs.case.output.build} while promoter BED has"
f"{inputs.promoter.output.build} build information."
)
try:
gsize = SPECIES_GSIZES[inputs.case.output.species]
except KeyError:
self.error(
f"Species {inputs.case.output.species} is not supported by the MACS 2.0 process. "
f"Supported species are: {', '.join(SPECIES_GSIZES.keys())}"
)
self.progress(0.1)
samples = [inputs.case, inputs.control] if inputs.control else [inputs.case]
for alignment in samples:
bam_path = Path(alignment.output.bam.path)
is_control = alignment == inputs.control if inputs.control else False
is_paired = test_paired_end(bam_file=bam_path, error=self.error)
if not is_paired and inputs.settings.format == "BAMPE":
self.error(
"No paired-end reads were detected but BAMPE format was selected."
)
name = f"{bam_path.stem}_background" if is_control else f"{bam_path.stem}"
# Set case name and layout for post-qc analysis.
if not is_control:
case_name = name
case_paired = is_paired
if alignment.type.startswith("data:alignment:bam:bwaaln"):
with open(alignment.output.stats.path, "r") as handle:
report = handle.read()
metrics = parse_flagstat(report=report, warning=self.warning)
elif alignment.type.startswith("data:alignment:bam:bowtie2"):
return_code, n_mapped, _ = Cmd["samtools"][
"view", "-c", str(bam_path)
] & TEE(retcode=None)
if return_code:
n_mapped = "NA"
self.warning(
"Failed to determine number of mapped reads based on Bowtie2 alignment."
)
else:
n_mapped = int(n_mapped.strip())
metrics = parse_bowtie2_report(
report_path=alignment.output.stats.path,
n_mapped=n_mapped,
warning=self.warning,
)
else:
return_code, report, _ = Cmd["samtools"]["flagstat"][
str(bam_path)
] & TEE(retcode=None)
if return_code:
self.error(f"Samtools flagstat failed for {name}")
metrics = parse_flagstat(report=report, warning=self.warning)
# Remove unmapped reads, not primary alignments, and reads below minimum mapping quality
filtered_bam = f"{name}_filtered.bam"
filter_bam(
bam_file=str(bam_path),
out_bam=filtered_bam,
min_quality=inputs.prepeakqc_settings.q_threshold,
is_paired=is_paired,
name=name,
error=self.error,
)
if is_paired:
temp_bam = f"{name}_tmp.bam"
name_sort_bam(in_bam=filtered_bam, out_bam=temp_bam, error=self.error)
fixmate_bam = f"{case_name}_fixmate.bam"
fixmate_params = ["-r", temp_bam, fixmate_bam]
return_code, stdout, stderr = Cmd["samtools"]["fixmate"][
fixmate_params
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error(f"Samtools fixmate failed for {alignment.name}.")
filter_bam(
bam_file=fixmate_bam,
out_bam=temp_bam,
min_quality=0,
is_paired=is_paired,
name=name,
error=self.error,
)
# Sort by position
sort_params = ["-o", filtered_bam, temp_bam]
return_code, stdout, stderr = Cmd["samtools"]["sort"][
sort_params
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error(f"Samtools sorting failed for {alignment.name}.")
markdup_bam = f"{name}_markdup.bam"
markdup_stats = f"{name}_duplicates_metrics.txt"
markdup_params = [
f"INPUT={filtered_bam}",
f"OUTPUT={markdup_bam}",
f"METRICS_FILE={markdup_stats}",
"VALIDATION_STRINGENCY=LENIENT",
"ASSUME_SORTED=true",
"REMOVE_DUPLICATES=false",
f"TMP_DIR={TMPDIR}",
]
return_code, stdout, stderr = Cmd["java"]["-jar"][
"/opt/broadinstitute/picard-tools/picard.jar"
]["MarkDuplicates"][markdup_params] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error(
f"Picard-tools MarkDuplicates processing failed for {alignment.name}."
)
metrics = merge_dict(
metrics,
parse_markdup(markdup_stats),
)
# Name sort for PBC bottlenecking calculation
if is_paired:
name_sort_bam(in_bam=markdup_bam, out_bam=temp_bam, error=self.error)
alignment_bed = f"{name}.bed"
metrics = merge_dict(
metrics,
get_pbc_metrics(
bam_file=temp_bam if is_paired else markdup_bam,
name=name,
out_bed=alignment_bed,
is_paired=is_paired,
error=self.error,
),
)
# Remove unmapped reads, not primary alignments, and duplicate reads
filter_bam(
bam_file=markdup_bam,
out_bam=filtered_bam,
min_quality=0,
is_paired=is_paired,
name=name,
error=self.error,
)
# Remove redundant bam with marked duplicates.
Path(markdup_bam).unlink()
if is_paired:
name_sort_bam(in_bam=filtered_bam, out_bam=temp_bam, error=self.error)
convert_bam(
bam_file=temp_bam if is_paired else filtered_bam,
name=name,
out_bed=alignment_bed,
is_paired=is_paired,
error=self.error,
first_mate=True,
)
# Remove redundant temporary bam.
if is_paired:
Path(temp_bam).unlink()
tagalign = f"{name}.tagAlign.gz"
prepare_tagalign(
bed_file=alignment_bed,
tagalign_file=tagalign,
is_paired=is_paired,
tn5_shift=inputs.prepeakqc_settings.tn5,
compression="gzip",
)
subsampled_tagalign = f"{name}_subsampled.tagAlign.gz"
subsample_tagalign(
tagalign_file=tagalign,
out_file=subsampled_tagalign,
is_paired=is_paired,
n_sub=inputs.prepeakqc_settings.n_sub,
compression="gzip",
)
cross_correlation_report = f"{name}_cc_score.txt"
spp_params = [
f"-c={subsampled_tagalign}",
f"-p={self.requirements.resources.cores}",
"-filtchr=chrM",
f"-out={cross_correlation_report}",
]
if inputs.prepeakqc_settings.shift is not None:
spp_params.append(f"-speak={inputs.prepeakqc_settings.shift}")
return_code, stdout, stderr = Cmd["spp"][spp_params] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error(f"SPP processing failed for {alignment.name}.")
cc_report = pd.read_csv(
cross_correlation_report,
sep="\t",
index_col=0,
names=SPP_HEADER,
dtype="str",
)
# Some columns have top 3 predictions and only the first one is needed.
cc_metrics = cc_report.apply(lambda col: col.str.split(",").str[0])
# Use case's estimated fragment length only.
if not is_control:
fraglen_estimates = cc_report.loc[
subsampled_tagalign, "Est. Fragment Len."
]
estimate_list = [int(x) for x in fraglen_estimates.split(",")]
frag_len = get_frag_len(estimates=estimate_list)
if inputs.tagalign and inputs.settings.extsize is None:
if not frag_len:
self.error(
"Failed to estimate fragment length. No estimates were larger than "
f"zero. The top estimates were: {fraglen_estimates}. Please manually "
"specify the Extension size [--extsize] parameter."
)
elif frag_len != estimate_list[0]:
self.warning(
"SPP estimated negative fragment length which can not be used by "
f"MACS2. Using {frag_len} from the top estimates "
f"({fraglen_estimates}) as the estimate of extension size [--extsize] "
"for MACS2."
)
# When not using tagAlign as an input we need to keep the first
# estimate for post-peak QC steps.
if (
not inputs.tagalign
and inputs.settings.extsize is None
and frag_len is None
):
frag_len = estimate_list[0]
self.warning(
"No fragment length estimate was greater than 0. Using the first "
f"estimate: {frag_len} for read shifting in post-peak QC."
)
metrics = merge_dict(metrics, cc_metrics.loc[subsampled_tagalign].to_dict())
prepeak_qc_report = f"{name}_prepeak_qc_report.txt"
save_prepeak_qc(metrics=metrics, out_file=prepeak_qc_report)
filtered_bai = f"{filtered_bam}.bai"
return_code, stdout, stderr = Cmd["samtools"]["index"][
filtered_bam, filtered_bai
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error(f"Samtools index failed for {filtered_bam}.")
if is_control:
outputs.control_prepeak_qc = prepeak_qc_report
outputs.control_bam = filtered_bam
outputs.control_bai = filtered_bai
control_tagalign = rename_tagalign(
file=tagalign, name=name, tn5_shifted=inputs.prepeakqc_settings.tn5
)
outputs.control_tagalign = control_tagalign
else:
outputs.case_prepeak_qc = prepeak_qc_report
outputs.case_bam = filtered_bam
outputs.case_bai = filtered_bai
case_tagalign = rename_tagalign(
file=tagalign, name=name, tn5_shifted=inputs.prepeakqc_settings.tn5
)
outputs.case_tagalign = case_tagalign
self.progress(0.6)
callpeak_params = [
"-t",
case_tagalign if inputs.tagalign else inputs.case.output.bam.path,
"-f",
"BED" if inputs.tagalign else inputs.settings.format,
"-n",
case_name,
"--gsize",
gsize,
"--verbose",
3,
"--outdir",
"./",
]
if inputs.control:
callpeak_params.extend(
[
"-c",
control_tagalign
if inputs.tagalign
else inputs.control.output.bam.path,
]
)
if inputs.settings.duplicates or inputs.tagalign:
callpeak_params.extend(
[
"--keep-dup",
inputs.settings.duplicates or inputs.settings.duplicates_prepeak,
]
)
if inputs.settings.qvalue is not None and inputs.settings.pvalue is None:
callpeak_params.extend(["-q", inputs.settings.qvalue])
if inputs.settings.pvalue is not None and inputs.settings.qvalue is None:
callpeak_params.extend(["-p", inputs.settings.pvalue])
elif inputs.tagalign and inputs.settings.qvalue is None:
callpeak_params.extend(["-p", inputs.settings.pvalue_prepeak])
if inputs.settings.mfold_lower is not None:
callpeak_params.extend(
[
"-m",
inputs.settings.mfold_lower,
inputs.settings.mfold_upper,
]
)
if inputs.settings.nolambda:
callpeak_params.append("--nolambda")
if inputs.settings.slocal is not None:
callpeak_params.extend(["--slocal", inputs.settings.slocal])
if inputs.settings.llocal is not None:
callpeak_params.extend(["--llocal", inputs.settings.llocal])
if inputs.settings.fix_bimodal:
callpeak_params.append("--fix-bimodal")
if inputs.settings.nomodel or (
inputs.settings.nomodel_prepeak and inputs.tagalign
):
callpeak_params.append("--nomodel")
if inputs.settings.extsize is not None:
callpeak_params.extend(["--extsize", inputs.settings.extsize])
elif inputs.tagalign:
callpeak_params.extend(["--extsize", frag_len])
if inputs.settings.shift is not None:
callpeak_params.extend(["--shift", inputs.settings.shift])
if inputs.settings.band_width is not None:
callpeak_params.extend(["--bw", inputs.settings.band_width])
if inputs.settings.broad:
callpeak_params.append("--broad")
if inputs.settings.broad_cutoff is not None:
callpeak_params.extend(["--broad-cutoff", inputs.settings.broad_cutoff])
if inputs.settings.down_sample:
callpeak_params.append("--down-sample")
if inputs.settings.bedgraph:
callpeak_params.append("-B")
if inputs.settings.spmr:
callpeak_params.append("--SPMR")
if inputs.settings.call_summits:
callpeak_params.append("--call-summits")
return_code, stdout, stderr = Cmd["macs2"]["callpeak"][callpeak_params] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error("MACS2 processing failed.")
print(stderr)
warning_lines = [line for line in stderr.split("\n") if "WARNING" in line]
if warning_lines:
macs2_warning = ", ".join(warning_lines[:3]) + (
", ..." if len(warning_lines) > 3 else ""
)
self.warning(f"MACS2 reported warnings: {macs2_warning}")
self.progress(0.7)
model_script = f"{case_name}_model.r"
if Path(model_script).is_file():
return_code, _, _ = Cmd["Rscript"][model_script] & TEE(retcode=None)
if return_code:
self.error(f"Running R script {model_script} failed.")
outputs.model = f"{case_name}_model.pdf"
self.progress(0.8)
outputs.called_peaks = f"{case_name}_peaks.xls"
# Get chromosome sizes file for bed to BigBed transformation.
return_code, idxstats, stderr = Cmd["samtools"]["idxstats"][
inputs.case.output.bam.path
] & TEE(retcode=None)
if return_code:
print(stderr)
self.error("Samtools idxstats failed")
chromosome_sizes = "chrom.sizes"
with open(chromosome_sizes, "w") as sizes_file:
sizes_file.write("\n".join(idxstats.split("\n")[:-1]))
if inputs.settings.broad:
outputs.broad_peaks = f"{case_name}_peaks.broadPeak"
outputs.gappedPeak = f"{case_name}_peaks.gappedPeak"
else:
# Maximize 5th column of narrowPeak and summits files to 1000.
narrow_peak = f"{case_name}_peaks.narrowPeak"
corrected_narrow_peak = f"corrected_{narrow_peak}"
correct_bed_file(
in_bed=narrow_peak, out_bed=corrected_narrow_peak, error=self.error
)
summits = f"{case_name}_summits.bed"
corrected_summits = f"corrected_{summits}"
correct_bed_file(
in_bed=summits, out_bed=corrected_summits, error=self.error
)
narrow_peak_bb = f"{case_name}_peaks_narrowPeak.bb"
create_big_bed(
in_file=corrected_narrow_peak,
out_bb=narrow_peak_bb,
chromosome_sizes=chromosome_sizes,
file_type="narrowPeak",
error=self.error,
)
summits_bb = f"{case_name}_summits.bb"
create_big_bed(
in_file=corrected_summits,
out_bb=summits_bb,
chromosome_sizes=chromosome_sizes,
file_type="summits.bed",
error=self.error,
)
# Create tabix index for summits.bed file for JBrowse.
summits_gz = f"{summits}.gz"
(Cmd["bgzip"]["-c", summits] > summits_gz)()
return_code, stdout, stderr = Cmd["tabix"]["-p", "bed", summits_gz] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error("Summits.bed tabix processing for JBrowse failed.")
outputs.narrow_peaks_bigbed_igv_ucsc = narrow_peak_bb
outputs.summits = summits_gz
outputs.summits_tbi_jbrowse = f"{summits_gz}.tbi"
outputs.summits_bigbed_igv_ucsc = summits_bb
if inputs.settings.bedgraph:
pileup_bdg = f"{case_name}_treat_pileup.bdg"
outputs.treat_pileup = pileup_bdg
pileup_bw = f"{case_name}_treat_pileup.bw"
create_bigwig(
in_bdg=pileup_bdg,
out_bw=pileup_bw,
chromosome_sizes=chromosome_sizes,
file_type="treat_pileup.bgd bedGraph",
error=self.error,
)
outputs.treat_pileup_bigwig = pileup_bw
control_lambda_bdg = f"{case_name}_control_lambda.bdg"
outputs.control_lambda = control_lambda_bdg
control_lambda_bw = f"{case_name}_control_lambda.bw"
create_bigwig(
in_bdg=control_lambda_bdg,
out_bw=control_lambda_bw,
chromosome_sizes=chromosome_sizes,
file_type="control_lambda.bgd bedGraph",
error=self.error,
)
outputs.control_lambda_bigwig = control_lambda_bw
if not inputs.settings.broad:
process_narrow_peaks(file=narrow_peak, cap_number=inputs.settings.cap_num)
# Use either filtered tagAlign file or compute regular tagAlign file for the case sample.
if not inputs.tagalign:
if case_paired:
temp_case_bam = f"{case_name}_tmp.bam"
name_sort_bam(
in_bam=inputs.case.output.bam.path,
out_bam=temp_case_bam,
error=self.error,
)
fixmate_bam = f"{case_name}_fixmate.bam"
fixmate_params = ["-r", temp_case_bam, fixmate_bam]
return_code, stdout, stderr = Cmd["samtools"]["fixmate"][
fixmate_params
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error(f"Samtools fixmate failed for {case_name}.")
args = ["-f", "2", "-o", temp_case_bam, fixmate_bam]
return_code, stdout, stderr = Cmd["samtools"]["view"][args] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error(f"Samtools view failed for {case_name}.")
temp_case_bed = f"{case_name}_tmp.bed"
bam_to_convert = (
temp_case_bam if is_paired else inputs.case.output.bam.path
)
convert_bam(
bam_file=bam_to_convert,
out_bed=temp_case_bed,
name=case_name,
is_paired=case_paired,
error=self.error,
)
case_tagalign = f"{case_name}_nonfiltered.tagAlign"
prepare_tagalign(
bed_file=temp_case_bed,
tagalign_file=case_tagalign,
is_paired=is_paired,
tn5_shift=False,
)
# Shift reads on both strands for half of the fragment length in opposite directions.
shifted_tagaling = f"{case_name}_shifted.tagAlign"
n_reads = shift_reads(
tagalign=case_tagalign,
out_name=shifted_tagaling,
chromosome_sizes=chromosome_sizes,
frag_len=inputs.settings.extsize or frag_len,
error=self.error,
)
# Calculate post-peakcall QC metrics
post_peak_qc = {
"FRiP": "N/A",
"NUMBER_OF_PEAKS": count_lines(narrow_peak),
"NUMBER_OF_READS_IN_PROMOTERS": "N/A",
"FRACTION_OF_READS_IN_PROMOTERS": "N/A",
"NUMBER_OF_PEAKS_IN_PROMOTERS": "N/A",
"FRACTION_OF_PEAKS_IN_PROMOTERS": "N/A",
}
n_reads_peaks = count_overlap(
file_a=shifted_tagaling, file_b=narrow_peak, error=self.error
)
post_peak_qc["FRiP"] = round(n_reads_peaks / n_reads, 3)
if inputs.promoter:
post_peak_qc["NUMBER_OF_READS_IN_PROMOTERS"] = count_overlap(
file_a=shifted_tagaling,
file_b=inputs.promoter.output.bed.path,
error=self.error,
)
post_peak_qc["FRACTION_OF_READS_IN_PROMOTERS"] = round(
post_peak_qc["NUMBER_OF_READS_IN_PROMOTERS"] / n_reads, 3
)
post_peak_qc["NUMBER_OF_PEAKS_IN_PROMOTERS"] = count_overlap(
file_a=narrow_peak,
file_b=inputs.promoter.output.bed.path,
error=self.error,
)
post_peak_qc["FRACTION_OF_PEAKS_IN_PROMOTERS"] = round(
post_peak_qc["NUMBER_OF_PEAKS_IN_PROMOTERS"]
/ post_peak_qc["NUMBER_OF_PEAKS"],
3,
)
qc_file = f"{case_name}_postpeak_qc_report.txt"
pd.Series(post_peak_qc, dtype="object").to_frame().T.to_csv(
qc_file, sep="\t", index=False
)
return_code, _, _ = Cmd["gzip"][narrow_peak] & TEE(retcode=None)
if return_code:
self.error("Compression of narrowPeaks file failed.")
outputs.narrow_peaks = f"{narrow_peak}.gz"
outputs.chip_qc = qc_file
outputs.build = inputs.case.output.build
outputs.species = inputs.case.output.species | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/chip_seq/macs2.py | 0.585338 | 0.382833 | macs2.py | pypi |
from pathlib import Path
from shutil import copyfile
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
DirField,
FileField,
GroupField,
IntegerField,
Persistence,
Process,
SchedulingClass,
StringField,
)
class ChipQC(Process):
"""Calculate quality control metrics for ChIP-seq samples.
The analysis is based on ChIPQC package which computs a variety of
quality control metrics and statistics, and provides plots and
a report for assessment of experimental data for further analysis.
"""
slug = "chipqc"
name = "ChipQC"
process_type = "data:chipqc"
version = "1.4.2"
category = "ChIP-seq"
data_name = "{{ alignment|name|default('?') }}"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/chipseq:6.1.0"}
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
class Input:
"""Input fields to process ChipQC."""
alignment = DataField(
data_type="alignment:bam",
label="Aligned reads",
)
peaks = DataField(
data_type="chipseq:callpeak",
label="Called peaks",
)
blacklist = DataField(
data_type="bed",
label="Blacklist regions",
description="BED file containing genomic regions that should be "
"excluded from the analysis.",
required=False,
)
calculate_enrichment = BooleanField(
label="Calculate enrichment",
description="Calculate enrichment of signal in known genomic "
"annotation. By default annotation is provided from "
"the TranscriptDB package specified by genome bulid "
"which should match one of the supported annotations "
"(hg19, hg38, hg18, mm10, mm9, rn4, ce6, dm3). If "
"annotation is not supported the analysis is skipped.",
default=False,
)
class Advanced:
"""Add advanced list of options."""
quality_threshold = IntegerField(
label="Mapping quality threshold",
description="Only reads with mapping quality scores above "
"this threshold will be used for some statistics.",
default=15,
)
profile_window = IntegerField(
label="Window size",
description="An integer indicating the width of the window "
"used for peak profiles. Peaks will be centered "
"on their summits and include half of the window "
"size upstream and half downstream of this point.",
default=400,
)
shift_size = StringField(
label="Shift size",
description="Vector of values to try when computing optimal "
"shift sizes. It should be specifeird as "
"consecutive numbers vector with start:end",
default="1:300",
)
advanced = GroupField(
Advanced,
label="Advanced parameters",
)
class Output:
"""Output fields to process ChipQC."""
report_folder = DirField(label="ChipQC report folder")
ccplot = FileField(label="Cross coverage score plot")
coverage_histogram = FileField(label="SSD metric plot")
peak_profile = FileField(label="Peak profile plot")
peaks_barplot = FileField(label="Barplot of reads in peaks")
peaks_density_plot = FileField(label="Density plot of reads in peaks")
enrichment_heatmap = FileField(
label="Heatmap of reads in genomic features", required=False
)
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run the analysis."""
if inputs.alignment.entity_name != inputs.peaks.entity_name:
self.error(
"Sample names of aligned reads and called peaks do not "
f" match. Alignment has {inputs.alignment.entity_name}, "
f"while called peaks have {inputs.peaks.entity_name}."
)
basename = Path(inputs.alignment.output.bam.path).name
assert basename.endswith(".bam")
name = basename[:-4]
report_folder = f"{name}_ChIPQCreport"
if inputs.peaks.type == "data:chipseq:callpeak:macs14:":
peaks_basename = Path(inputs.peaks.output.peaks_bed.path).name
assert peaks_basename.endswith(".bed.gz")
peaks_name = peaks_basename[:-7]
peaks_file = f"{peaks_name}.bed"
(Cmd["pigz"]["-cd", inputs.peaks.output.peaks_bed.path] > peaks_file)()
elif inputs.peaks.type == "data:chipseq:callpeak:macs2:":
peaks_file = inputs.peaks.output.called_peaks.path
if inputs.alignment.output.build != inputs.peaks.output.build:
self.error(
"All input files must share the same build. "
f"Aligment is based on {inputs.alignment.output.build} while "
f"called peaks are based on {inputs.peaks.output.build}."
)
build = inputs.alignment.output.build
genome_list = ["hg19", "hg38", "hg18", "mm10", "mm9", "rn4", "ce6", "dm3"]
annotation = "NULL"
if inputs.calculate_enrichment:
if build in genome_list:
annotation = f'"{build}"'
else:
self.warning(
f"Annotation for {build} is not supported. "
f'Supported builds are: {", ".join(genome_list)}.'
)
if inputs.blacklist:
blacklist = f'"{inputs.blacklist.output.bed.path}"'
else:
blacklist = "NULL"
r_input = (
'library("ChIPQC"); '
"sample <- ChIPQCsample("
f'"{inputs.alignment.output.bam.path}",'
f'peaks="{peaks_file}",'
f"annotation={annotation},"
f"mapQCth={inputs.advanced.quality_threshold},"
f"blacklist={blacklist},"
f"profileWin={inputs.advanced.profile_window},"
f"shifts={inputs.advanced.shift_size});"
"ChIPQCreport("
"sample,"
f'reportName="{name}_ChIPQC",'
f'reportFolder="{report_folder}")'
)
return_code, _, _ = Cmd["Rscript"]["-e"][r_input] & TEE(retcode=None)
if return_code:
self.error("Error while running ChIPQC.")
plots = Path(report_folder).glob("*.png")
for plot_path in plots:
copyfile(plot_path, f"{plot_path.stem}_mqc.png")
outputs.ccplot = "CCPlot_mqc.png"
outputs.coverage_histogram = "CoverageHistogramPlot_mqc.png"
outputs.peak_profile = "PeakProfile_mqc.png"
outputs.peaks_barplot = "Rip_mqc.png"
outputs.peaks_density_plot = "Rap_mqc.png"
outputs.report_folder = report_folder
outputs.species = inputs.alignment.output.species
outputs.build = build
if Path("GenomicFeatureEnrichment_mqc.png").is_file():
outputs.enrichment_heatmap = "GenomicFeatureEnrichment_mqc.png" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/chip_seq/chipqc.py | 0.854217 | 0.395514 | chipqc.py | pypi |
import gzip
import io
import json
import os
import pandas as pd
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
DirField,
FileField,
FloatField,
GroupField,
IntegerField,
JsonField,
SchedulingClass,
StringField,
)
from resolwe_bio.process.runtime import ProcessBio
def parse_transcript_exp(infile, outfile):
"""Parse trancript-level expressions from Salmon output."""
exp = pd.read_csv(
infile,
sep="\t",
usecols=["Name", "TPM"],
index_col="Name",
dtype={
"Name": str,
"TPM": float,
},
squeeze=True,
)
return exp.to_csv(
outfile,
index_label="Transcript",
header=["Expression"],
sep="\t",
compression="gzip",
)
def expression_to_storage(exp_input, exp_output):
"""Convert expressions file to JSON format."""
def isfloat(value):
"""Check if value is float."""
try:
float(value)
return True
except ValueError:
return False
with io.TextIOWrapper(io.BufferedReader(gzip.open(exp_input))) as f:
# Split lines by tabs
# Ignore lines without a number in second column
# Build a dictionary of gene-expression pairs
exp = {
"genes": {
gene_exp[0]: float(gene_exp[1])
for gene_exp in (l.split("\t") for l in f)
if len(gene_exp) == 2 and isfloat(gene_exp[1])
}
}
with open(file=exp_output, mode="wt") as f:
json.dump(exp, f)
return exp_output
def rename_cols(infile, outfile, abundance_unit):
"""Rename columns in expression file."""
exp = pd.read_csv(
infile,
sep="\t",
skip_blank_lines=True,
usecols=["Gene", "Expression"],
index_col="Gene",
dtype={
"Gene": str,
"Expression": float,
},
squeeze=True,
)
return exp.to_csv(
outfile,
index_label="FEATURE_ID",
header=[abundance_unit],
sep="\t",
)
def prepare_expression_set(infile, abundance_unit, feature_dict, outfile_name):
"""Prepare expression set output data."""
exp = pd.read_csv(infile, sep="\t", float_precision="round_trip")
exp["FEATURE_ID"] = exp["FEATURE_ID"].astype("str")
exp["GENE_SYMBOL"] = exp["FEATURE_ID"].map(feature_dict)
input_features = exp["FEATURE_ID"].tolist()
# Check if all of the input feature IDs could be mapped to the gene symbols
if not all(f_id in feature_dict for f_id in input_features):
print(
f"{sum(exp.isnull().values.ravel())} feature(s) "
f"could not be mapped to the associated feature symbols."
)
columns = ["FEATURE_ID", "GENE_SYMBOL", abundance_unit]
exp_set = exp[columns]
# Replace NaN values with empty string
exp_set.fillna("", inplace=True)
# Write to file
exp_set.to_csv(
outfile_name + ".txt.gz",
header=True,
index=False,
sep="\t",
compression="gzip",
)
# Write to JSON
df_dict = exp_set.set_index("FEATURE_ID").to_dict(orient="index")
with open(outfile_name + ".json", "w") as f:
json.dump({"genes": df_dict}, f, allow_nan=False)
class SalmonQuant(ProcessBio):
"""Perform mapping-based estimation of transcript abundance from RNA-seq reads.
Final abundance estimates are summarized to the gene-level using
[tximport](https://bioconductor.org/packages/release/bioc/html/tximport.html).
"""
slug = "salmon-quant"
name = "Salmon Quant"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {
"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0",
},
},
"resources": {
"cores": 4,
"memory": 32768,
"network": True,
},
}
data_name = "{{ reads|name|default('?') }}"
version = "2.7.1"
process_type = "data:expression:salmon"
category = "Quantify"
entity = {
"type": "sample",
}
scheduling_class = SchedulingClass.BATCH
class Input:
"""Input fields."""
reads = DataField("reads:fastq", label="Input sample(s)")
salmon_index = DataField("index:salmon", label="Salmon index")
annotation = DataField("annotation:gtf", label="GTF annotation")
class Options:
"""Options."""
stranded = StringField(
label="Assay type",
default="A",
choices=[
("A", "Detect automatically"),
("U", "Strand non-specific (U)"),
("SF", "Strand-specific forward (SF)"),
("SR", "Strand-specific reverse (SR)"),
("IU", "Strand non-specific (paired-end IU)"),
("ISF", "Strand-specific forward (paired-end ISF)"),
("ISR", "Strand-specific reverse (paired-end (ISR)"),
],
)
seq_bias = BooleanField(
label="--seqBias",
default=False,
description="Perform sequence-specific bias correction.",
)
gc_bias = BooleanField(
label="--gcBias",
default=False,
description="[beta for single-end reads] Perform fragment GC bias correction.",
)
discard_orphans_quasi = BooleanField(
label="--discardOrphansQuasi",
default=False,
description="Discard orphan mappings in quasi-mapping mode. "
"If this flag is passed then only paired "
"mappings will be considered toward "
"quantification estimates. The default "
"behavior is to consider orphan mappings "
"if no valid paired mappings exist.",
)
no_length_correction = BooleanField(
label="--noLengthCorrection",
default=False,
description="[Experimental] Entirely disables "
"length correction when estimating the "
"abundance of transcripts. The abundance "
"estimates are reported in CPM (counts per "
"million) unit. This option can be used "
"with protocols where one expects that "
"fragments derive from their underlying "
"targets without regard to that target's "
"length (e.g. QuantSeq).",
)
consensus_slack = FloatField(
label="--consensusSlack",
required=False,
description="The amount of slack allowed in the quasi-mapping "
"consensus mechanism. Normally, a transcript must "
"cover all hits to be considered for mapping. "
"If this is set to a fraction, X, greater than 0 "
"(and in [0,1)), then a transcript can fail "
"to cover up to (100 * X)% of the hits before it "
"is discounted as a mapping candidate. The default "
"value of this option is 0.2 in selective alignment mode "
"and 0 otherwise.",
)
min_score_fraction = FloatField(
label="--minScoreFraction",
default=0.65,
description="The fraction of the optimal possible alignment "
"score that a mapping must achieve in order to be "
"considered valid - should be in (0,1]",
)
incompat_prior = FloatField(
label="---incompatPrior",
default=0,
description="This option sets the prior probability "
"that an alignment that disagrees with "
"the specified library type (--libType) "
"results from the true fragment origin. "
"Setting this to 0 specifies that "
"alignments that disagree with the "
"library type should be impossible, "
"while setting it to 1 says that "
"alignments that disagree with the "
"library type are no less likely than "
"those that do.",
)
range_factorization_bins = IntegerField(
label="--rangeFactorizationBins",
default=4,
description="Factorizes the likelihood used in "
"quantification by adopting a new notion "
"of equivalence classes based on the "
"conditional probabilities with which "
"fragments are generated from different "
"transcripts. This is a more "
"fine-grained factorization than the "
"normal rich equivalence classes. The "
"default value (4) corresponds to the "
"default used in Zakeri et al. 2017 "
"and larger values imply a more "
"fine-grained factorization. If range "
"factorization is enabled, a common "
"value to select for this parameter is "
"4. A value of 0 signifies the use of "
"basic rich equivalence classes.",
)
min_assigned_frag = IntegerField(
label="--minAssignedFrags",
default=10,
description="The minimum number of fragments that "
"must be assigned to the transcriptome "
"for quantification to proceed.",
)
num_bootstraps = IntegerField(
label="--numBootstraps",
description="Salmon has the ability to optionally "
"compute bootstrapped abundance estimates. This is "
"done by resampling (with replacement) from the counts "
"assigned to the fragment equivalence classes, and then "
"re-running the optimization procedure, either the EM or VBEM, "
"for each such sample. The values of these different bootstraps "
"allows us to assess technical variance in the main abundance "
"estimates we produce. Such estimates can be useful for downstream "
"(e.g. differential expression) tools that can make use of such "
"uncertainty estimates. This option takes a positive integer that "
"dictates the number of bootstrap samples to compute. The more samples "
"computed, the better the estimates of varaiance, but the more "
"computation (and time) required.",
disabled="options.num_gibbs_samples",
required=False,
)
num_gibbs_samples = IntegerField(
label="--numGibbsSamples",
description="Just as with the bootstrap procedure above, this option "
"produces samples that allow us to estimate the variance in abundance "
"estimates. However, in this case the samples are generated using posterior "
"Gibbs sampling over the fragment equivalence classes rather than "
"bootstrapping. We are currently analyzing these different approaches to "
"assess the potential trade-offs in time / accuracy. The --numBootstraps "
"and --numGibbsSamples options are mutually exclusive (i.e. in a given run, "
"you must set at most one of these options to a positive integer.)",
disabled="options.num_bootstraps",
required=False,
)
options = GroupField(Options, label="Options")
class Output:
"""Output fields."""
exp = FileField(label="Normalized expression")
exp_json = JsonField(label="Expression (json)")
exp_type = StringField(label="Expression type")
rc = FileField(label="Gene-level estimated counts")
exp_set = FileField(label="Expressions")
exp_set_json = JsonField(label="Expressions (json)")
variance = FileField(label="Variance of inferential replicates", required=False)
quant = FileField(label="Salmon quant file")
transcripts = FileField(label="Transcript-level expressions")
salmon_output = DirField(label="Salmon output")
txdb = FileField(label="Transcript to gene mapping")
strandedness = StringField(label="Strandedness code")
strandedness_report = FileField(label="Strandedness report file")
source = StringField(label="Gene ID source")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
def run(self, inputs, outputs):
"""Run the analysis."""
if inputs.salmon_index.output.species != inputs.annotation.output.species:
self.error(
"Salmon index file species ({}) must match GTF annotation "
"file species ({})".format(
inputs.salmon_index.output.species, inputs.annotation.output.species
)
)
if inputs.salmon_index.output.build != inputs.annotation.output.build:
self.error(
"Salmon index file build ({}) must match GTF annotation "
"file build ({})".format(
inputs.salmon_index.output.build, inputs.annotation.output.build
)
)
if inputs.salmon_index.output.source != inputs.annotation.output.source:
self.error(
"Salmon index file source ({}) must match GTF annotation "
"file source ({})".format(
inputs.salmon_index.output.source, inputs.annotation.output.source
)
)
if inputs.options.no_length_correction:
abundance_unit = "CPM"
output_suffix = "_cpm.txt.gz"
else:
abundance_unit = "TPM"
output_suffix = "_tpm.txt.gz"
args = [
"-i",
inputs.salmon_index.output.index.path,
"-l",
inputs.options.stranded,
"--incompatPrior",
inputs.options.incompat_prior,
"--minAssignedFrags",
inputs.options.min_assigned_frag,
"--rangeFactorizationBins",
inputs.options.range_factorization_bins,
"-p",
self.requirements.resources.cores,
"-o",
"salmon_output",
]
# Prepare .FASTQ file inputs based on the reads input type
if inputs.reads.type.startswith("data:reads:fastq:single:"):
args.extend(["-r"] + [lane.path for lane in inputs.reads.output.fastq])
else:
args.extend(["-1"] + [lane.path for lane in inputs.reads.output.fastq])
args.extend(["-2"] + [lane.path for lane in inputs.reads.output.fastq2])
# Prepare optional inputs
if inputs.options.seq_bias and not inputs.options.no_length_correction:
args.append("--seqBias")
elif inputs.options.seq_bias and inputs.options.no_length_correction:
self.warning(
"Since bias correction relies on modifying effective lengths, "
"you cannot enable bias correction simultaneously with the "
"--noLengthCorrection option. Skipping --seqBias option."
)
if inputs.options.gc_bias and not inputs.options.no_length_correction:
args.append("--gcBias")
elif inputs.options.gc_bias and inputs.options.no_length_correction:
self.warning(
"Since bias correction relies on modifying effective lengths, "
"you cannot enable bias correction simultaneously with the "
"--noLengthCorrection option. Skipping --gcBias option."
)
if inputs.options.discard_orphans_quasi:
args.append("--discardOrphansQuasi")
if inputs.options.no_length_correction:
args.append("--noLengthCorrection")
if inputs.options.min_score_fraction > 0:
args.extend(["--minScoreFraction", inputs.options.min_score_fraction])
if inputs.options.consensus_slack is not None:
args.extend(["--consensusSlack", inputs.options.consensus_slack])
if inputs.options.num_bootstraps:
args.extend(["--numBootstraps", inputs.options.num_bootstraps])
elif inputs.options.num_gibbs_samples:
args.extend(["--numGibbsSamples", inputs.options.num_gibbs_samples])
# Run Salmon Quant
return_code, _, _ = Cmd["salmon"]["quant"][args] & TEE(retcode=None)
if return_code:
self.error("Error while running Salmon Quant.")
# Use tximport to produce gene-level TPM values
reads_basename = os.path.basename(inputs.reads.output.fastq[0].path)
assert reads_basename.endswith(".fastq.gz")
reads_name = reads_basename[:-9]
annot_basename = os.path.basename(inputs.annotation.output.annot.path)
assert annot_basename.endswith(".gtf")
annot_name = annot_basename[:-4]
tx2gene = "tx2gene_{}.txt".format(annot_name)
counts = f"{reads_name}_counts.txt"
counts_gz = counts + ".gz"
if os.path.exists("salmon_output/quant.sf"):
tximport_args = [
"salmon_output/quant.sf",
inputs.annotation.output.annot.path,
reads_name,
counts,
tx2gene,
]
# Strip feature_id version for non-UCSC annotation source type
# UCSC annotation type (mm10) contains features with dot in gene names
if inputs.annotation.output.source != "UCSC":
tximport_args.append("--ignoreTxVersion")
if inputs.options.num_bootstraps or inputs.options.num_gibbs_samples:
tximport_args.append("--variance")
variance = f"variance_{reads_name}"
variance_gz = variance + ".txt.gz"
return_code, _, _ = Cmd["tximport_summarize.R"][tximport_args] & TEE(
retcode=None
)
if return_code:
self.error("Error while running tximport.")
# Prepare transcript-level expression file
transcript_out_file = "{}_transcripts{}".format(reads_name, output_suffix)
parse_transcript_exp("salmon_output/quant.sf", transcript_out_file)
else:
self.error("Salmon Quant results file quant.sf does not exists.")
# Zip the gene-level abundance estimates
(Cmd["gzip"]["-c", reads_name] > reads_name + output_suffix)()
# Zip the gene-level count estimates
(Cmd["gzip"]["-c", counts] > counts_gz)()
if inputs.options.num_bootstraps or inputs.options.num_gibbs_samples:
(Cmd["gzip"]["-c", variance] > variance_gz)()
outputs.variance = variance_gz
# Save the abundance estimates to JSON storage
json_output = "json.txt"
expression_to_storage(
exp_input=(reads_name + output_suffix), exp_output=json_output
)
# Rename columns of the expression file
reads_name_renamed = f"{reads_name}_renamed"
rename_cols(
infile=reads_name, outfile=reads_name_renamed, abundance_unit=abundance_unit
)
# Prepare the expression set outputs
feature_ids = pd.read_csv(
reads_name_renamed, sep="\t", index_col="FEATURE_ID"
).index.tolist()
feature_filters = {
"source": inputs.annotation.output.source,
"species": inputs.annotation.output.species,
"feature_id__in": feature_ids,
}
feature_ids_to_names = {
f.feature_id: f.name for f in self.feature.filter(**feature_filters)
}
prepare_expression_set(
infile=reads_name_renamed,
abundance_unit=abundance_unit,
feature_dict=feature_ids_to_names,
outfile_name=f"{reads_name}_expressions",
)
Cmd["ln"]["-s", "salmon_output/quant.sf", reads_name + ".sf"]()
lib_type_report = "salmon_output/lib_format_counts.json"
strandedness = json.load(open(lib_type_report)).get("expected_format", "")
# Save all the outputs
outputs.salmon_output = "salmon_output"
outputs.quant = reads_name + ".sf"
outputs.transcripts = transcript_out_file
outputs.txdb = tx2gene
outputs.rc = counts_gz
outputs.exp = reads_name + output_suffix
outputs.exp_json = json_output
outputs.exp_set = reads_name + "_expressions.txt.gz"
outputs.exp_set_json = reads_name + "_expressions.json"
outputs.strandedness = strandedness
outputs.strandedness_report = lib_type_report
outputs.exp_type = abundance_unit
outputs.feature_type = "gene"
outputs.source = inputs.salmon_index.output.source
outputs.species = inputs.salmon_index.output.species
outputs.build = inputs.salmon_index.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/expression/salmon.py | 0.730578 | 0.393385 | salmon.py | pypi |
import gzip
import io
import json
import shutil
from pathlib import Path
import pandas as pd
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
GroupField,
IntegerField,
JsonField,
SchedulingClass,
StringField,
)
from resolwe_bio.process.runtime import ProcessBio
def get_gene_counts(infile, outfile, sample_name):
"""Rename columns and extract gene counts from featureCounts output file."""
exp = pd.read_csv(
infile,
sep="\t",
skip_blank_lines=True,
header=1,
index_col="Geneid",
dtype={
"Geneid": str,
},
squeeze=True,
)
filter_col = [col for col in exp if col.startswith(sample_name)]
if len(filter_col) > 1:
per_lane_raw_counts = "per_lane_rc.txt"
exp[filter_col] = exp[filter_col].astype(int)
exp[filter_col].to_csv(
per_lane_raw_counts,
index_label="FEATURE_ID",
sep="\t",
)
return_columns = "sum_count"
exp[return_columns] = exp[filter_col].sum(axis=1)
else:
return_columns = sample_name
exp = exp.astype({return_columns: int})
exp[[return_columns]].to_csv(
outfile,
index_label="FEATURE_ID",
header=["EXPRESSION"],
sep="\t",
)
def get_gene_lenghts(infile, outfile):
"""Rename columns and extract feature lengths from featureCounts output file."""
exp = pd.read_csv(
infile,
sep="\t",
skip_blank_lines=True,
header=1,
usecols=["Geneid", "Length"],
index_col="Geneid",
dtype={
"Geneid": str,
"Length": int,
},
squeeze=True,
)
exp.to_csv(
outfile,
index_label="FEATURE_ID",
header=["GENE_LENGTHS"],
sep="\t",
)
def rename_columns_and_compress(infile, outfile):
"""Rename columns and compress the expression files."""
exp = pd.read_csv(
infile,
sep="\t",
usecols=["FEATURE_ID", "EXPRESSION"],
index_col="FEATURE_ID",
squeeze=True,
float_precision="round_trip",
)
exp.to_csv(
outfile, index_label="Gene", header=["Expression"], sep="\t", compression="gzip"
)
def compress_outputs(input_file, output_file):
"""Compress outputs."""
with open(file=input_file, mode="rb") as f_in:
with gzip.open(filename=output_file, mode="wb") as f_out:
shutil.copyfileobj(f_in, f_out)
def exp_to_df(exp_file, exp_type):
"""Prepare expression file for gene sets merging."""
with open(exp_file) as exp:
df = pd.read_csv(exp, sep="\t", float_precision="round_trip")
df.rename(
index=str,
columns={
"EXPRESSION": exp_type,
},
inplace=True,
)
# Cast FEATURE_ID column to string
df["FEATURE_ID"] = df["FEATURE_ID"].astype("str")
return df
def prepare_expression_set(rc, tpm, cpm, feature_dict, outfile_name):
"""Prepare expression set output data."""
rc_exp = exp_to_df(rc, "RAW_COUNT")
tpm_exp = exp_to_df(tpm, "TPM")
cpm_exp = exp_to_df(cpm, "CPM")
rc_exp["GENE_SYMBOL"] = rc_exp["FEATURE_ID"].map(feature_dict)
input_features = rc_exp["FEATURE_ID"].tolist()
# Check if all of the input feature IDs could be mapped to the gene symbols
if not all(f_id in feature_dict for f_id in input_features):
print(
f"{sum(rc_exp.isnull().values.ravel())} feature(s) "
f"could not be mapped to the associated feature symbols."
)
# Merge with normalized expression values
exp_set = rc_exp.merge(tpm_exp, on="FEATURE_ID")
exp_set = exp_set.merge(cpm_exp, on="FEATURE_ID")
# Reorder columns
columns = ["FEATURE_ID", "GENE_SYMBOL", "RAW_COUNT", "TPM", "CPM"]
exp_set = exp_set[columns]
# Replace NaN values with empty string
exp_set.fillna("", inplace=True)
# Write to file
exp_set.to_csv(
outfile_name + ".txt.gz",
header=True,
index=False,
sep="\t",
compression="gzip",
)
# Write to JSON
df_dict = exp_set.set_index("FEATURE_ID").to_dict(orient="index")
with open(outfile_name + ".json", "w") as f:
json.dump({"genes": df_dict}, f, allow_nan=False)
def expression_to_storage(rc_input, rc_output):
"""Convert expressions file to JSON format."""
def isfloat(value):
"""Check if value is float."""
try:
float(value)
return True
except ValueError:
return False
with io.TextIOWrapper(io.BufferedReader(gzip.open(rc_input))) as f:
# Split lines by tabs
# Ignore lines without a number in second column
# Build a dictionary of gene-expression pairs
exp = {
"genes": {
gene_exp[0]: float(gene_exp[1])
for gene_exp in (l.split("\t") for l in f)
if len(gene_exp) == 2 and isfloat(gene_exp[1])
}
}
with open(file=rc_output, mode="wt") as f:
json.dump(exp, f)
return rc_output
class FeatureCounts(ProcessBio):
"""Quantify sequencing reads aligned to genomic features.
featureCounts is a highly efficient general-purpose read summarization
program that counts aligned reads on genomic features such as genes, exons,
promoter, gene bodies, genomic bins and chromosomal locations. It can be
used to count both RNA-seq and genomic DNA-seq reads. See the
[official website](http://bioinf.wehi.edu.au/featureCounts/) and the
[introductory paper](https://academic.oup.com/bioinformatics/article/30/7/923/232889)
for more information.
featureCounts output includes raw counts and normalized (TPM/CPM) expression values.
Normalized expression values are computed using rnanorm Python package under
union-exon gene length model.
"""
slug = "feature_counts"
name = "featureCounts"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {
"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0",
},
},
"resources": {
"cores": 4,
"memory": 32768,
"network": True,
},
}
data_name = "{{ aligned_reads|name|default('?') }}"
version = "6.1.0"
process_type = "data:expression:featurecounts"
category = "Quantify"
entity = {
"type": "sample",
}
scheduling_class = SchedulingClass.BATCH
class Input:
"""Input fields."""
aligned_reads = DataField("alignment:bam", label="Aligned reads")
annotation = DataField(
"annotation",
label="Annotation",
description="GTF and GFF3 annotation formats are supported",
)
feature_class = StringField(
label="Feature class",
default="exon",
description="Feature class (3rd column in GTF/GFF3 file) to be used. All other features will be ignored.",
)
id_attribute = StringField(
label="ID attribute",
allow_custom_choice=True,
default="gene_id",
choices=[
("gene_id", "gene_id"),
("transcript_id", "transcript_id"),
("ID", "ID"),
("geneid", "geneid"),
("Name", "Name"),
],
description="GTF/GFF3 attribute to be used as feature ID. Several GTF/GFF3 lines "
"with the same feature ID will be considered as parts of the same "
"feature. The feature ID is used to identify the counts in the "
"output table. In GTF files this is usually 'gene_id', in GFF3 files "
"this is often 'ID', and 'transcript_id' is frequently a valid "
"choice for both annotation formats.",
)
feature_type = StringField(
label="Feature type",
default="gene",
choices=[
("gene", "gene"),
("transcript", "transcript"),
],
description="The type of feature the quantification program summarizes over "
"(e.g. gene or transcript-level analysis). The value of this "
"parameter needs to be chosen in line with ID attribute input choice.",
)
normalization_type = StringField(
label="Normalization type",
default="TPM",
choices=[
("TPM", "TPM"),
("CPM", "CPM"),
],
description="The default expression normalization type.",
)
assay_type = StringField(
label="Assay type",
default="non_specific",
choices=[
("non_specific", "Strand non-specific"),
("forward", "Strand-specific forward"),
("reverse", "Strand-specific reverse"),
("auto", "Detect automatically"),
],
description="Indicate if strand-specific read counting should be performed. "
"For paired-end reads, strand of the first read is taken as the strand "
"of the whole fragment. FLAG field is used to tell if a read is "
"first or second read in a pair. Automated strand detection is enabled "
"using the [Salmon](https://salmon.readthedocs.io/en/latest/library_type.html) "
"tool's build-in functionality. To use this option, cDNA (transcriptome) "
"index file crated using the Salmon indexing tool must be provided",
)
cdna_index = DataField(
"index:salmon",
label="Salmon index file",
required=False,
hidden="assay_type != 'auto'",
description="Transcriptome index file created using the Salmon indexing tool. "
"cDNA (transcriptome) sequences used for index file creation must be "
"derived from the same species as the input sequencing reads to "
"obtain the reliable analysis results.",
)
n_reads = IntegerField(
label="Number of reads in subsampled alignment file",
default=5000000,
hidden="assay_type != 'auto'",
description="Alignment (.bam) file subsample size to detect "
"strandedness. Increase the number of reads make automatic "
"detection more reliable. Decrease the number of reads to "
"make automatic detection run faster.",
)
class General:
"""General options."""
count_features = BooleanField(
label="Perform read counting at feature level",
description="Count reads for exons rather than genes.",
default=False,
)
by_read_group = BooleanField(
label="Assign reads by read group",
description="RG tag is required to be present in the input BAM files.",
default=True,
)
count_long_reads = BooleanField(
label="Count long reads such as Nanopore and PacBio reads",
default=False,
)
count_multi_mapping_reads = BooleanField(
label="Count multi-mapping reads",
description="For a multi-mapping read, all its reported alignments will be "
"counted. The 'NH' tag in BAM input is used to detect multi-mapping reads.",
default=False,
)
fraction = BooleanField(
label="Assign fractional counts to features",
description="This option must be used together with 'Count multi-mapping "
"reads' or 'Assign reads to all their overlapping features or "
"meta-features' or both. When 'Count multi-mapping reads' is "
"checked, each reported alignment from a multi-mapping read "
"(identified via 'NH' tag) will carry a count of 1 / x, instead "
"of 1 (one), where x is the total number of alignments reported "
"for the same read. When 'Assign reads to all their overlapping "
"features or meta-features' is checked, each overlapping "
"feature will receive a count of 1 / y, where y is the total "
"number of features overlapping with the read. When both 'Count "
"multi-mapping reads' and 'Assign reads to all their overlapping "
"features or meta-features' are specified, each alignment will "
"carry a count of 1 / (x * y).",
default=False,
disabled="!(general.count_multi_mapping_reads || overlap.allow_multi_overlap)",
)
class Overlap:
"""Overlap options."""
allow_multi_overlap = BooleanField(
label="Assign reads to all their overlapping features or meta-features",
default=False,
)
min_overlap = IntegerField(
label="Minimum number of overlapping bases in a read that is required for read assignment",
default=1,
description="Number of overlapping bases is counted from both reads if "
"paired-end. If a negative value is provided, then a gap of up "
"to specified size will be allowed between read and the feature "
"that the read is assigned to.",
)
frac_overlap = FloatField(
label="Minimum fraction of overlapping bases in a read that is required for read assignment",
default=0.0,
description="Value should be within range [0, 1]. Number of overlapping bases "
"is counted from both reads if paired end. Both this "
"option and 'Minimum number of overlapping bases in a read "
"that is required for read assignment' need to be satisfied "
"for read assignment.",
)
frac_overlap_feature = FloatField(
label="Minimum fraction of overlapping bases included in a feature that is "
"required for overlapping with a read or a read pair",
default=0.0,
description="Value should be within range [0, 1].",
)
largest_overlap = BooleanField(
label="Assign reads to a feature or meta-feature that has the largest "
"number of overlapping bases",
default=False,
)
read_extension_5 = IntegerField(
label="Number of bases to extend reads upstream by from their 5' end",
default=0,
)
read_extension_3 = IntegerField(
label="Number of bases to extend reads upstream by from their 3' end",
default=0,
)
read_to_pos = IntegerField(
label="Reduce reads to their 5'-most or 3'-most base",
required=False,
description="Read counting is performed based on the single base the read "
"is reduced to.",
)
class ReadFiltering:
"""Read filtering."""
min_mqs = IntegerField(
label="Minimum mapping quality score",
default=0,
description="The minimum mapping quality score a read must satisfy in order "
"to be counted. For paired-end reads, at least one end should "
"satisfy this criterion.",
)
split_only = BooleanField(
label="Count only split alignments",
default=False,
)
non_split_only = BooleanField(
label="Count only non-split alignments",
default=False,
)
primary = BooleanField(
label="Count only primary alignments",
default=False,
description="Primary alignments are identified using bit 0x100 in BAM "
"FLAG field.",
)
ignore_dup = BooleanField(
label="Ignore duplicate reads in read counting",
default=False,
description="Duplicate reads are identified using bit Ox400 in BAM FLAG "
"field. The whole read pair is ignored if one of the reads is a "
"duplicate read for paired-end data.",
)
class ExonExonJunctions:
"""Exon-exon junctions."""
junc_counts = BooleanField(
label="Count the number of reads supporting each exon-exon junction",
default=False,
description="Junctions are identified from those exon-spanning reads in "
"input (containing 'N' in CIGAR string).",
)
genome = DataField(
"seq:nucleotide",
label="Genome",
required=False,
disabled="!exon_exon_junctions.junc_counts",
description="Reference sequences used in read mapping that produced the "
"provided BAM files. This optional argument can be used to improve read "
"counting for junctions.",
)
class PairedEnd:
"""Parameters specific to paired-end reads."""
is_paired_end = BooleanField(
label="Count fragments (or templates) instead of reads",
default=True,
)
require_both_ends_mapped = BooleanField(
label="Count only read pairs that have both ends aligned",
default=False,
)
check_frag_length = BooleanField(
label="Check fragment length when assigning fragments to meta-features "
"or features",
default=False,
description="Use minimum and maximum fragment/template length to set thresholds.",
)
min_frag_length = IntegerField(
label="Minimum fragment/template length",
default=50,
disabled="!paired_end.check_frag_length",
)
max_frag_length = IntegerField(
label="Maximum fragment/template length",
default=600,
disabled="!paired_end.check_frag_length",
)
do_not_count_chimeric_fragments = BooleanField(
label="Do not count chimeric fragments",
default=False,
description="Do not count read pairs that have their two ends mapped to "
"different chromosomes or mapped to same chromosome but on different strands.",
)
do_not_sort = BooleanField(
label="Do not sort reads in BAM input",
default=False,
)
class Miscellaneous:
"""Miscellaneous."""
report_reads = BooleanField(
label="Output detailed assignment results for each read or read pair",
default=False,
)
max_mop = IntegerField(
label="Maximum number of 'M' operations allowed in a CIGAR string",
default=10,
description="Both 'X' and '=' are treated as 'M' and adjacent 'M' operations "
"are merged in the CIGAR string.",
)
verbose = BooleanField(
label="Output verbose information",
default=False,
description="Output verbose information for debugging, such as unmatched "
"chromosome / contig names.",
)
general = GroupField(General, label="General options")
overlap = GroupField(Overlap, label="Overlap between reads and features")
read_filtering = GroupField(ReadFiltering, label="Read filtering")
exon_exon_junctions = GroupField(ExonExonJunctions, label="Exon-exon junctions")
paired_end = GroupField(
PairedEnd, label="Parameters specific to paired-end reads"
)
miscellaneous = GroupField(Miscellaneous, label="Miscellaneous")
class Output:
"""Output fields."""
rc = FileField(label="Read counts")
per_lane_rc = FileField(label="Per lane read counts", required=False)
tpm = FileField(label="TPM")
cpm = FileField(label="CPM")
exp = FileField(label="Normalized expression")
exp_json = JsonField(label="Expression (json)")
exp_type = StringField(label="Expression type")
exp_set = FileField(label="Expressions")
exp_set_json = JsonField(label="Expressions (json)")
feature_counts_output = FileField(label="featureCounts output")
counts_summary = FileField(label="Counts summary")
read_assignments = FileField(
label="Read assignments",
required=False,
description="Read assignment results for each read (or fragment if paired end).",
)
strandedness_report = FileField(
label="Strandedness report file",
required=False,
)
source = StringField(label="Gene ID source")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
def run(self, inputs, outputs):
"""Run the analysis."""
STRANDEDNESS_CODES = {
"IU": 0,
"U": 0,
"non_specific": 0,
"ISF": 1,
"OSF": 1,
"SF": 1,
"forward": 1,
"ISR": 2,
"OSR": 2,
"SR": 2,
"reverse": 2,
}
if inputs.aligned_reads.output.species != inputs.annotation.output.species:
self.error(
f"Species of aligned reads {inputs.aligned_reads.output.species} "
f"and annotation {inputs.annotation.output.species} do not match. Please provide "
f"aligned reads and annotation with the same species."
)
if inputs.aligned_reads.output.build != inputs.annotation.output.build:
self.error(
f"Builds of aligned reads {inputs.aligned_reads.output.species} "
f"and annotation {inputs.annotation.output.species} do not match. Please provide "
f"aligned reads and annotation with the same build."
)
if inputs.assay_type == "auto" and not inputs.cdna_index:
self.error(
"cDNA sequence index must be provided to automatically detect strandedness."
)
if (
inputs.cdna_index
and inputs.aligned_reads.output.species != inputs.cdna_index.output.species
):
self.error(
f"Species of aligned reads {inputs.aligned_reads.output.species} "
f"and cDNA index {inputs.annotation.output.species} do not match. Please provide "
f"aligned reads and cDNA index with the same species."
)
# Avoid reporting the full path to the alignment (.bam) file in the counts summary file.
# This is to prevent the FeatureCounts results to be reported as a separate sample in the MultiQC report
bam_file = Path(inputs.aligned_reads.output.bam.path).name
Path(bam_file).symlink_to(inputs.aligned_reads.output.bam.path)
# Set output file names
assert bam_file.endswith(".bam")
name = bam_file[:-4]
exp_output = f"{name}_{inputs.normalization_type.lower()}.tab.gz"
# check if aligned reads are single or paired-end
paired_end = True
if int(Cmd["samtools"]["view"]["-c", "-f", "1", bam_file]().strip()) == 0:
paired_end = False
self.progress(0.05)
# set strandedness
if inputs.assay_type == "auto":
all_reads = int(Cmd["samtools"]["view"]["-c", bam_file]().strip())
sampling_rate = min(inputs.n_reads / all_reads, 1)
# subsample the BAM file
if sampling_rate < 1:
strand_check_bam = "subsampled_sorted.bam"
(
Cmd["samtools"]["view"][
f"-@ {self.requirements.resources.cores}",
"-h",
f"-s {sampling_rate}",
bam_file,
]
| Cmd["samtools"]["sort"][
f"-@ {self.requirements.resources.cores}", "-n", "-"
]
> strand_check_bam
)()
else:
strand_check_bam = "sorted.bam"
sort_args = [
f"-@ {self.requirements.resources.cores}",
"-n",
"-o",
strand_check_bam,
]
return_code, _, _ = Cmd["samtools"]["sort"][sort_args][bam_file] & TEE(
retcode=None
)
if return_code:
self.error("Error while running Samtools sort.")
# Consider only proper paired-end reads for strandedness detection (-0, -s to /dev/null).
# Failure to do so will result in improper strandedness detection, which
# will directly impact expressions from featureCounts.
fastq_args = [f"-@ {self.requirements.resources.cores}", "-N"]
if paired_end:
reads_input = ["-1", "mate1.fastq", "-2", "mate2.fastq"]
fastq_args.extend(["-0", "/dev/null", "-s", "/dev/null"])
else:
reads_input = ["-0", "reads.fastq"]
fastq_args.extend(reads_input)
return_code, _, _ = Cmd["samtools"]["fastq"][fastq_args][
strand_check_bam
] & TEE(retcode=None)
if return_code:
self.error("Samtools fastq command failed.")
salmon_out_folder = "salmon_output"
# Run Salmon Quant
salmon_args = [
"-i",
inputs.cdna_index.output.index.path,
"-l",
"A",
reads_input if paired_end else ["-r", "reads.fastq"],
"-o",
salmon_out_folder,
"-p",
self.requirements.resources.cores,
"--minAssignedFrags",
1,
]
return_code, _, _ = Cmd["salmon"]["quant"][salmon_args] & TEE(retcode=None)
if return_code:
self.error("Error while running Salmon Quant.")
# Extract the strandedness code from the JSON report produced by the Salmon tool
lib_type_report = f"{salmon_out_folder}/lib_format_counts.json"
outputs.strandedness_report = lib_type_report
strand_code = json.load(open(lib_type_report)).get("expected_format", "")
if strand_code:
try:
strandedness = STRANDEDNESS_CODES[strand_code]
except KeyError:
self.error(
f"Unsupported strand code detected: {strand_code} "
f"Please re-run analysis in user-selected strandedness mode."
)
else:
self.error(
"Automated detection of strandedness failed. "
"Re-run analysis in user-selected strandedness mode."
)
else:
strandedness = STRANDEDNESS_CODES[inputs.assay_type]
self.progress(0.1)
# Replace empty gene_id entries in annotation file if source is UCSC
annotation_file = inputs.annotation.output.annot.path
if (
inputs.annotation.output.source == "UCSC"
and inputs.annotation.type.startswith("data:annotation:gtf")
):
with open(annotation_file, "r") as infile:
filedata = infile.read()
# Replace the missing gene_ids
annot_data = filedata.replace('gene_id "";', 'gene_id "unknown";')
# Write the output file
annotation_file = "annotation_modified.gtf"
with open(annotation_file, "w") as outfile:
outfile.write(annot_data)
fc_output = "featureCounts_rc.txt"
# Prepare featureCounts inputs
args = [
"-a",
annotation_file,
"-o",
fc_output,
"-F",
"GTF",
"-t",
inputs.feature_class,
"-g",
inputs.id_attribute,
"--minOverlap",
inputs.overlap.min_overlap,
"--fracOverlap",
inputs.overlap.frac_overlap,
"--fracOverlapFeature",
inputs.overlap.frac_overlap_feature,
"--readExtension5",
inputs.overlap.read_extension_5,
"--readExtension3",
inputs.overlap.read_extension_3,
"-Q",
inputs.read_filtering.min_mqs,
"--maxMOp",
inputs.miscellaneous.max_mop,
"-s",
strandedness,
"-T",
self.requirements.resources.cores,
]
if inputs.general.count_features:
args.append("-f")
if inputs.overlap.allow_multi_overlap:
args.append("-O")
if inputs.overlap.largest_overlap:
args.append("--largestOverlap")
if inputs.overlap.read_to_pos:
args.extend(["--read2pos", inputs.overlap.read_to_pos])
if inputs.general.count_multi_mapping_reads:
args.append("-M")
if inputs.general.fraction:
args.append("--fraction")
if inputs.read_filtering.split_only:
args.append("--countSplitAlignmentsOnl")
if inputs.read_filtering.non_split_only:
args.append("--countNonSplitAlignmentsOnly")
if inputs.read_filtering.primary:
args.append("--primary")
if inputs.read_filtering.ignore_dup:
args.append("--ignoreDup")
if inputs.exon_exon_junctions.junc_counts:
args.append("-J")
if inputs.exon_exon_junctions.junc_counts and inputs.exon_exon_junctions.genome:
args.extend(["-G", inputs.exon_exon_junctions.genome.path])
if inputs.general.by_read_group:
# Check if @RG is in header of the BAM file
return_code, _, stderr = Cmd["samtools"]["view", "-Ho", "read_groups.txt"][
bam_file
] & TEE(retcode=None)
if return_code:
self.error("An error occurred with Samtools view. ", stderr)
read_groups = []
with open("read_groups.txt") as file_in:
for line in file_in:
if line.startswith("@RG"):
read_groups.append(line.split(sep="\t")[1].split(sep=":")[-1])
if len(read_groups) > 0:
args.append("--byReadGroup")
self.info(f"Read groups {', '.join(read_groups)} detected.")
else:
self.info(
f"BAM file {bam_file} does not have any read groups assigned."
)
if inputs.general.count_long_reads:
args.append("-L")
if inputs.miscellaneous.report_reads:
args.append("-R CORE")
if inputs.miscellaneous.verbose:
args.append("--verbose")
# List of options for paired-end reads
if paired_end and inputs.paired_end.is_paired_end:
args.append("-p")
if paired_end and inputs.paired_end.require_both_ends_mapped:
args.append("-B")
if paired_end and inputs.paired_end.check_frag_length:
args.append("-P")
if paired_end and inputs.paired_end.check_frag_length:
args.extend(["-d", inputs.paired_end.min_frag_length])
if paired_end and inputs.paired_end.check_frag_length:
args.extend(["-D", inputs.paired_end.max_frag_length])
if paired_end and inputs.paired_end.do_not_count_chimeric_fragments:
args.append("-C")
if paired_end and inputs.paired_end.do_not_sort:
args.append("--donotsort")
# Run featureCounts
return_code, _, _ = Cmd["featureCounts"][args][bam_file] & TEE(retcode=None)
if return_code:
self.error("Error while running featureCounts.")
self.progress(0.8)
raw_counts = "rc.txt"
tpm = "tpm.txt"
cpm = "cpm.txt"
# parse featureCounts output
get_gene_counts(
infile=fc_output,
outfile=raw_counts,
sample_name=bam_file,
)
get_gene_lenghts(infile=fc_output, outfile="gene_lengths.txt")
# Normalize counts
rnanorm_args = [
raw_counts,
"--gene-lengths",
"gene_lengths.txt",
"--tpm-output",
tpm,
"--cpm-output",
cpm,
]
return_code, _, _ = Cmd["rnanorm"][rnanorm_args] & TEE(retcode=None)
if return_code:
self.error("Error while normalizing counts using rnanorm.")
self.progress(0.9)
# prepare the expression set outputs
feature_ids = pd.read_csv(
raw_counts, sep="\t", index_col="FEATURE_ID"
).index.to_list()
feature_filters = {
"source": inputs.annotation.output.source,
"species": inputs.aligned_reads.output.species,
"feature_id__in": feature_ids,
}
feature_ids_to_names = {
f.feature_id: f.name for f in self.feature.filter(**feature_filters)
}
prepare_expression_set(
rc=raw_counts,
tpm=tpm,
cpm=cpm,
feature_dict=feature_ids_to_names,
outfile_name=f"{name}_expressions",
)
self.progress(0.95)
# rename and compress the expression files
rename_columns_and_compress(raw_counts, f"{name}_rc.tab.gz")
rename_columns_and_compress(tpm, f"{name}_tpm.tab.gz")
rename_columns_and_compress(cpm, f"{name}_cpm.tab.gz")
# Compress the featureCounts output file
compress_outputs(
input_file="featureCounts_rc.txt",
output_file=f"{name}_featureCounts_rc.txt.gz",
)
if Path("per_lane_rc.txt").exists():
compress_outputs(
input_file="per_lane_rc.txt", output_file="per_lane_rc.txt.gz"
)
outputs.per_lane_rc = "per_lane_rc.txt.gz"
# Save the abundance estimates to JSON storage
json_output = "json.txt"
expression_to_storage(rc_input=exp_output, rc_output=json_output)
# Save the outputs
outputs.feature_counts_output = f"{name}_featureCounts_rc.txt.gz"
outputs.counts_summary = "featureCounts_rc.txt.summary"
outputs.rc = f"{name}_rc.tab.gz"
outputs.tpm = f"{name}_tpm.tab.gz"
outputs.cpm = f"{name}_cpm.tab.gz"
outputs.exp = exp_output
outputs.exp_json = json_output
outputs.exp_set = f"{name}_expressions.txt.gz"
outputs.exp_set_json = f"{name}_expressions.json"
outputs.exp_type = inputs.normalization_type
outputs.source = inputs.annotation.output.source
outputs.species = inputs.aligned_reads.output.species
outputs.build = inputs.aligned_reads.output.build
outputs.feature_type = inputs.feature_type
if inputs.miscellaneous.report_reads:
outputs.read_assignments = f"{name}.bam.featureCounts" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/expression/featureCounts.py | 0.685529 | 0.335147 | featureCounts.py | pypi |
import gzip
import io
import json
import shutil
from pathlib import Path
import pandas as pd
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
IntegerField,
JsonField,
SchedulingClass,
StringField,
)
from resolwe_bio.process.runtime import ProcessBio
STRANDEDNESS_CODES = {
"IU": 0,
"U": 0,
"non_specific": 0,
"ISF": 1,
"OSF": 1,
"SF": 1,
"forward": 1,
"ISR": 2,
"OSR": 2,
"SR": 2,
"reverse": 2,
}
def prepare_gene_counts(infile, outfile, summary, strandedness):
"""Extract gene counts from STAR input."""
exp = pd.read_csv(
infile,
sep="\t",
names=["Geneid", 0, 1, 2],
index_col="Geneid",
dtype={"Geneid": str, 0: int, 1: int, 2: int},
)
# Raw counts for genes
gene_rc_df = exp.iloc[4:][[strandedness]]
gene_rc_df.to_csv(
outfile,
index_label="FEATURE_ID",
header=["EXPRESSION"],
sep="\t",
)
assigned_reads = gene_rc_df.sum()
assigned_reads = int(assigned_reads.values)
summary_df = exp.iloc[:4][[strandedness]]
summary_df.loc["N_assigned"] = assigned_reads
summary_df.to_csv(
summary,
index_label="Status",
header=["Read count"],
sep="\t",
)
return exp.iloc[4:].index.to_list()
def rename_columns_and_compress(infile, outfile):
"""Rename columns and compress the expression files."""
exp = pd.read_csv(
infile,
sep="\t",
usecols=["FEATURE_ID", "EXPRESSION"],
index_col="FEATURE_ID",
squeeze=True,
float_precision="round_trip",
)
exp.to_csv(
outfile, index_label="Gene", header=["Expression"], sep="\t", compression="gzip"
)
def compress_outputs(input_file, output_file):
"""Compress outputs."""
with open(file=input_file, mode="rb") as f_in:
with gzip.open(filename=output_file, mode="wb") as f_out:
shutil.copyfileobj(f_in, f_out)
def exp_to_df(exp_file, exp_type):
"""Prepare expression file for gene sets merging."""
with open(exp_file) as exp:
df = pd.read_csv(exp, sep="\t", float_precision="round_trip")
df.rename(
index=str,
columns={
"EXPRESSION": exp_type,
},
inplace=True,
)
# Cast FEATURE_ID column to string
df["FEATURE_ID"] = df["FEATURE_ID"].astype("str")
return df
def prepare_expression_set(rc, tpm, cpm, feature_dict, outfile_name):
"""Prepare expression set output data."""
rc_exp = exp_to_df(rc, "RAW_COUNT")
tpm_exp = exp_to_df(tpm, "TPM")
cpm_exp = exp_to_df(cpm, "CPM")
rc_exp["GENE_SYMBOL"] = rc_exp["FEATURE_ID"].map(feature_dict)
input_features = rc_exp["FEATURE_ID"].tolist()
# Check if all of the input feature IDs could be mapped to the gene symbols
if not all(f_id in feature_dict for f_id in input_features):
print(
f"{sum(rc_exp.isnull().values.ravel())} feature(s) "
f"could not be mapped to the associated feature symbols."
)
# Merge with normalized expression values
exp_set = rc_exp.merge(tpm_exp, on="FEATURE_ID")
exp_set = exp_set.merge(cpm_exp, on="FEATURE_ID")
# Reorder columns
columns = ["FEATURE_ID", "GENE_SYMBOL", "RAW_COUNT", "TPM", "CPM"]
exp_set = exp_set[columns]
# Replace NaN values with empty string
exp_set.fillna("", inplace=True)
# Write to file
exp_set.to_csv(
outfile_name + ".txt.gz",
header=True,
index=False,
sep="\t",
compression="gzip",
)
# Write to JSON
df_dict = exp_set.set_index("FEATURE_ID").to_dict(orient="index")
with open(outfile_name + ".json", "w") as f:
json.dump({"genes": df_dict}, f, allow_nan=False)
def expression_to_storage(rc_input, rc_output):
"""Convert expressions file to JSON format."""
def isfloat(value):
"""Check if value is float."""
try:
float(value)
return True
except ValueError:
return False
with io.TextIOWrapper(io.BufferedReader(gzip.open(rc_input))) as f:
# Split lines by tabs
# Ignore lines without a number in second column
# Build a dictionary of gene-expression pairs
exp = {
"genes": {
gene_exp[0]: float(gene_exp[1])
for gene_exp in (l.split("\t") for l in f)
if len(gene_exp) == 2 and isfloat(gene_exp[1])
}
}
with open(file=rc_output, mode="wt") as f:
json.dump(exp, f)
return rc_output
class NormalizeSTARGeneQuantification(ProcessBio):
"""Normalize STAR quantification results.
This process is based on the output of STAR aligner 'gene counts'.
Strandedness is detected with Salmon or it can be specified manually.
Finally, for normalization of gene counts TPM and CPM are used.
"""
slug = "star-quantification"
name = "STAR gene quantification"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {
"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.2.0",
},
},
"resources": {
"cores": 2,
"memory": 16384,
"network": True,
},
}
data_name = "{{ aligned_reads|name|default('?') }}"
version = "1.2.0"
process_type = "data:expression:star"
category = "Quantify"
entity = {
"type": "sample",
}
scheduling_class = SchedulingClass.BATCH
class Input:
"""Input fields."""
aligned_reads = DataField(
"alignment:bam:star",
label="Aligned reads",
description="Make sure aligned object from STAR also "
"includes gene counts otherwise the process will fail.",
)
annotation = DataField(
"annotation",
label="Annotation",
description="GTF and GFF3 annotation formats are supported.",
)
normalization_type = StringField(
label="Normalization type",
default="TPM",
choices=[
("TPM", "TPM"),
("CPM", "CPM"),
],
description="The expression normalization type.",
)
assay_type = StringField(
label="Assay type",
default="non_specific",
choices=[
("non_specific", "Strand non-specific"),
("forward", "Strand-specific forward"),
("reverse", "Strand-specific reverse"),
("auto", "Detect automatically"),
],
description="Indicate if strand-specific read counting should be performed. "
"For paired-end reads, strand of the first read is taken as the strand "
"of the whole fragment. FLAG field is used to tell if a read is "
"first or second read in a pair. Automated strand detection is enabled "
"using the [Salmon](https://salmon.readthedocs.io/en/latest/library_type.html) "
"tool's build-in functionality. To use this option, cDNA (transcriptome) "
"index file created using the Salmon indexing tool must be provided",
)
cdna_index = DataField(
"index:salmon",
label="Salmon index file",
required=False,
hidden="assay_type != 'auto'",
description="Transcriptome index file created using the Salmon indexing tool. "
"cDNA (transcriptome) sequences used for index file creation must be "
"derived from the same species as the input sequencing reads to "
"obtain the reliable analysis results.",
)
n_reads = IntegerField(
label="Number of reads in subsampled alignment file",
default=5000000,
hidden="assay_type != 'auto'",
description="Alignment (.bam) file subsample size to detect "
"strandedness. Increase the number of reads to make automatic "
"detection more reliable. Decrease the number of reads to "
"make automatic detection run faster.",
)
class Output:
"""Output fields."""
rc = FileField(label="Read counts")
tpm = FileField(label="TPM")
cpm = FileField(label="CPM")
exp = FileField(label="Normalized expression")
exp_json = JsonField(label="Expression (json)")
exp_type = StringField(label="Expression type")
exp_set = FileField(label="Expressions")
exp_set_json = JsonField(label="Expressions (json)")
counts_summary = FileField(label="Counts summary")
strandedness_report = FileField(
label="Strandedness report file",
required=False,
)
source = StringField(label="Gene ID source")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
def run(self, inputs, outputs):
"""Run the analysis."""
if not inputs.aligned_reads.output.gene_counts:
self.error(
"Aligned reads should contain gene count information, but do not."
)
if inputs.aligned_reads.output.species != inputs.annotation.output.species:
self.error(
f"Species of aligned reads {inputs.aligned_reads.output.species} "
f"and annotation {inputs.annotation.output.species} do not match. Please provide "
"aligned reads and annotation with the same species."
)
if inputs.aligned_reads.output.build != inputs.annotation.output.build:
self.error(
f"Builds of aligned reads {inputs.aligned_reads.output.species} "
f"and annotation {inputs.annotation.output.species} do not match. Please provide "
"aligned reads and annotation with the same build."
)
if inputs.assay_type == "auto" and not inputs.cdna_index:
self.error(
"cDNA sequence index must be provided to automatically detect strandedness."
)
if (
inputs.cdna_index
and inputs.aligned_reads.output.species != inputs.cdna_index.output.species
):
self.error(
f"Species of aligned reads {inputs.aligned_reads.output.species} "
f"and cDNA index {inputs.annotation.output.species} do not match. Please provide "
"aligned reads and cDNA index with the same species."
)
bam_file = Path(inputs.aligned_reads.output.bam.path)
# Set output file names
assert bam_file.name.endswith(".bam")
name = bam_file.name[:-4]
# check if aligned reads are single or paired-end
paired_end = True
if int(Cmd["samtools"]["view"]["-c", "-f", "1", bam_file]().strip()) == 0:
paired_end = False
# set strandedness
if inputs.assay_type == "auto":
all_reads = int(Cmd["samtools"]["view"]["-c", bam_file]().strip())
sampling_rate = min(inputs.n_reads / all_reads, 1)
# subsample the BAM file
if sampling_rate < 1:
strand_check_bam = "subsampled_sorted.bam"
(
Cmd["samtools"]["view"][
f"-@ {self.requirements.resources.cores}",
"-h",
f"-s {sampling_rate}",
bam_file,
]
| Cmd["samtools"]["sort"][
f"-@ {self.requirements.resources.cores}", "-n", "-"
]
> strand_check_bam
)()
else:
strand_check_bam = "sorted.bam"
sort_args = [
f"-@ {self.requirements.resources.cores}",
"-n",
"-o",
strand_check_bam,
]
return_code, _, _ = Cmd["samtools"]["sort"][sort_args][bam_file] & TEE(
retcode=None
)
if return_code:
self.error("Error while running Samtools sort.")
# Consider only proper paired-end reads for strandedness detection (-0, -s to /dev/null).
# Failure to do so will result in improper strandedness detection.
fastq_args = [f"-@ {self.requirements.resources.cores}", "-N"]
if paired_end:
reads_input = ["-1", "mate1.fastq", "-2", "mate2.fastq"]
fastq_args.extend(["-0", "/dev/null", "-s", "/dev/null"])
else:
reads_input = ["-0", "reads.fastq"]
fastq_args.extend(reads_input)
return_code, _, _ = Cmd["samtools"]["fastq"][fastq_args][
strand_check_bam
] & TEE(retcode=None)
if return_code:
self.error("Samtools fastq command failed.")
salmon_out_folder = "salmon_output"
# Run Salmon Quant
salmon_args = [
"-i",
inputs.cdna_index.output.index.path,
"-l",
"A",
reads_input if paired_end else ["-r", "reads.fastq"],
"-o",
salmon_out_folder,
"-p",
self.requirements.resources.cores,
"--minAssignedFrags",
1,
]
return_code, _, _ = Cmd["salmon"]["quant"][salmon_args] & TEE(retcode=None)
if return_code:
self.error("Error while running Salmon Quant.")
# Extract the strandedness code from the JSON report produced by the Salmon tool
lib_type_report = f"{salmon_out_folder}/lib_format_counts.json"
outputs.strandedness_report = lib_type_report
strand_code = json.load(open(lib_type_report)).get("expected_format", "")
if strand_code:
try:
strandedness = STRANDEDNESS_CODES[strand_code]
except KeyError:
self.error(
f"Unsupported strand code detected: {strand_code} "
"Please re-run analysis in user-selected strandedness mode "
"or try increasing the subsample size."
)
else:
self.error(
"Automated detection of strandedness failed. "
"Re-run analysis in user-selected strandedness mode."
)
else:
strandedness = STRANDEDNESS_CODES[inputs.assay_type]
raw_counts = "rc.txt"
tpm = "tpm.txt"
cpm = "cpm.txt"
# prepare_gene_counts() has a side effect of creating csv files
# used in 'rnanorm'.
features = prepare_gene_counts(
infile=inputs.aligned_reads.output.gene_counts.path,
outfile=raw_counts,
summary="summary.txt",
strandedness=strandedness,
)
annotation_file = inputs.annotation.output.annot.path
if (
inputs.annotation.output.source == "UCSC"
and inputs.annotation.type.startswith("data:annotation:gtf")
):
with open(annotation_file, "r") as infile:
filedata = infile.read()
# Replace the missing gene_ids
annot_data = filedata.replace('gene_id "";', 'gene_id "unknown";')
# Write the output file
annotation_file = "annotation_modified.gtf"
with open(annotation_file, "w") as outfile:
outfile.write(annot_data)
# Normalize counts
rnanorm_args = [
raw_counts,
"--tpm-output",
tpm,
"--cpm-output",
cpm,
"--annotation",
annotation_file,
]
return_code, _, _ = Cmd["rnanorm"][rnanorm_args] & TEE(retcode=None)
if return_code:
self.error("Error while normalizing counts using rnanorm.")
feature_filters = {
"source": inputs.annotation.output.source,
"species": inputs.aligned_reads.output.species,
"feature_id__in": features,
}
feature_ids_to_names = {
f.feature_id: f.name for f in self.feature.filter(**feature_filters)
}
prepare_expression_set(
rc=raw_counts,
tpm=tpm,
cpm=cpm,
feature_dict=feature_ids_to_names,
outfile_name=f"{name}_expressions",
)
# rename and compress the expression files
rename_columns_and_compress(raw_counts, f"{name}_rc.tab.gz")
rename_columns_and_compress(tpm, f"{name}_tpm.tab.gz")
rename_columns_and_compress(cpm, f"{name}_cpm.tab.gz")
exp_output = f"{name}_{inputs.normalization_type.lower()}.tab.gz"
# Save the abundance estimates to JSON storage
json_output = "json.txt"
expression_to_storage(rc_input=exp_output, rc_output=json_output)
# Save the outputs
outputs.counts_summary = "summary.txt"
outputs.rc = f"{name}_rc.tab.gz"
outputs.tpm = f"{name}_tpm.tab.gz"
outputs.cpm = f"{name}_cpm.tab.gz"
outputs.exp = exp_output
outputs.exp_json = json_output
outputs.exp_set = f"{name}_expressions.txt.gz"
outputs.exp_set_json = f"{name}_expressions.json"
outputs.exp_type = inputs.normalization_type
outputs.source = inputs.annotation.output.source
outputs.species = inputs.aligned_reads.output.species
outputs.build = inputs.aligned_reads.output.build
outputs.feature_type = "gene" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/expression/star_quantification.py | 0.670069 | 0.375363 | star_quantification.py | pypi |
import os
from shutil import copy2
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
Process,
SchedulingClass,
StringField,
)
class Bamclipper(Process):
"""Remove primer sequence from BAM alignments by soft-clipping.
This process is a wrapper for bamclipper which can be found at
https://github.com/tommyau/bamclipper.
"""
slug = "bamclipper"
name = "Bamclipper"
process_type = "data:alignment:bam:bamclipped:"
version = "1.5.1"
category = "BAM processing"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
}
data_name = "{{ alignment|name|default('?') }}"
class Input:
"""Input fields to process Bamclipper."""
alignment = DataField("alignment:bam", label="Alignment BAM file")
bedpe = DataField("bedpe", label="BEDPE file", required=False)
skip = BooleanField(
label="Skip Bamclipper step",
description="Use this option to skip Bamclipper step.",
default=False,
)
class Output:
"""Output fields to process Bamclipper."""
bam = FileField(label="Clipped BAM file")
bai = FileField(label="Index of clipped BAM file")
stats = FileField(label="Alignment statistics")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run the analysis."""
bam_build = inputs.alignment.output.build
bam_species = inputs.alignment.output.species
name = os.path.splitext(os.path.basename(inputs.alignment.output.bam.path))[0]
# If so specified, skip bamclipper step. Prepare outputs to match those of as if bamclipping proceeded.
if inputs.skip:
bam = f"{name}.bamclipper_skipped.bam"
bai = f"{bam}.bai"
copy2(inputs.alignment.output.bam.path, bam)
self.info("Skipping bamclipper step.")
else:
bedpe_build = inputs.bedpe.output.build
bedpe_species = inputs.bedpe.output.species
if bam_build != bedpe_build:
self.error(
f"Builds of the genome {bam_build} and annotation "
f"{bedpe_build} do not match. Please provide genome and "
f"annotation with the same build."
)
if bam_species != bedpe_species:
self.error(
f"Species of BAM ({bam_species}) and BEDPE ({bedpe_species}) "
"files do not match."
)
# Output of bamclipper.sh is a file that is appended a primerclipped just before file
# extension, e.g. "file.bam" now becomes "file.primerclipped.bam".
bc_inputs = [
"-b",
f"{inputs.alignment.output.bam.path}",
"-p",
f"{inputs.bedpe.output.bedpe.path}",
]
Cmd["bamclipper.sh"](bc_inputs)
bam = f"{name}.primerclipped.bam"
bai = f"{bam}.bai"
self.progress(0.5)
# Calculate BAM statistics.
stderr_file = "stderr.txt"
(Cmd["samtools"]["index"][f"{bam}"] > stderr_file)()
if not os.path.exists(f"{bai}"):
self.error(f"Indexing of {bam} failed.")
self.progress(0.7)
# Print to console if errors have been generated.
if os.path.exists(stderr_file):
with open(stderr_file, "r") as f:
all_lines = f.readlines()
if len(all_lines) > 0:
for l in all_lines:
print(l)
stats = f"{bam}_stats.txt"
(Cmd["samtools"]["flagstat"][f"{bam}"] > stats)()
self.progress(0.9)
outputs.bam = bam
outputs.bai = bai
outputs.stats = stats
outputs.species = bam_species
outputs.build = bam_build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/reads_processing/bamclipper.py | 0.705278 | 0.360827 | bamclipper.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
FileHtmlField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
)
class Cutadapt3Prime(Process):
"""Process 3' mRNA-seq datasets using Cutadapt tool."""
slug = "cutadapt-3prime-single"
name = "Cutadapt (3' mRNA-seq, single-end)"
process_type = "data:reads:fastq:single:cutadapt:"
version = "1.4.2"
category = "FASTQ processing"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
data_name = "{{ reads|name|default('?') }}"
class Input:
"""Input fields."""
reads = DataField("reads:fastq:single", label="Select sample(s)")
class Options:
"""Options."""
nextseq_trim = IntegerField(
label="NextSeq/NovaSeq trim",
description="NextSeq/NovaSeq-specific quality trimming. Trims also dark "
"cycles appearing as high-quality G bases. This option is mutually "
"exclusive with the use of standard quality-cutoff trimming and is "
"suitable for the use with data generated by the recent Illumina "
"machines that utilize two-color chemistry to encode the four bases.",
default=10,
)
quality_cutoff = IntegerField(
label="Quality cutoff",
description="Trim low-quality bases from 3' end of each read before adapter "
"removal. The use of this option will override the use of "
"NextSeq/NovaSeq trim option.",
required=False,
)
min_len = IntegerField(
label="Discard reads shorter than specified minimum length.",
default=20,
)
min_overlap = IntegerField(
label="Mimimum overlap",
description="Minimum overlap between adapter and read for an adapter to be found.",
default=20,
)
times = IntegerField(
label="Remove up to a specified number of adapters from each read.",
default=2,
)
options = GroupField(Options, label="Options")
class Output:
"""Output fields."""
fastq = ListField(FileField(), label="Reads file.")
report = FileField(label="Cutadapt report")
fastqc_url = ListField(FileHtmlField(), label="Quality control with FastQC.")
fastqc_archive = ListField(FileField(), label="Download FastQC archive.")
def run(self, inputs, outputs):
"""Run analysis."""
# Get input reads file name (for the first of the possible multiple lanes)
reads_path = os.path.basename(inputs.reads.output.fastq[0].path)
assert reads_path.endswith(".fastq.gz")
name = reads_path[:-9]
# Concatenate multi-lane read files
(
Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq]]
> "input_reads.fastq.gz"
)()
if inputs.options.quality_cutoff is not None:
read_trim_cutoff = "--quality-cutoff={}".format(
inputs.options.quality_cutoff
)
else:
read_trim_cutoff = "--nextseq-trim={}".format(inputs.options.nextseq_trim)
first_pass_input = [
"-m",
inputs.options.min_len,
"-O",
inputs.options.min_overlap,
"-n",
inputs.options.times,
"-a",
"polyA=A{20}",
"-a",
"QUALITY=G{20}",
"-j",
self.requirements.resources.cores,
"input_reads.fastq.gz",
]
second_pass_input = [
"-m",
inputs.options.min_len,
read_trim_cutoff,
"-a",
"truseq=A{18}AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC",
"-j",
self.requirements.resources.cores,
"-",
]
third_pass_input = [
"-m",
inputs.options.min_len,
"-O",
inputs.options.min_overlap,
"-g",
"truseq=A{18}AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC",
"--discard-trimmed",
"-j",
self.requirements.resources.cores,
"-o",
"{}_trimmed.fastq.gz".format(name),
"-",
]
# Run Cutadapt, write analysis reports into a report file
(
Cmd["cutadapt"][first_pass_input]
| Cmd["cutadapt"][second_pass_input]
| Cmd["cutadapt"][third_pass_input]
> "cutadapt_report.txt"
)()
# Prepare final FASTQC report
fastqc_args = [
"{}_trimmed.fastq.gz".format(name),
"fastqc",
"fastqc_archive",
"fastqc_url",
"--nogroup",
]
return_code, _, _ = Cmd["fastqc.sh"][fastqc_args] & TEE(retcode=None)
if return_code:
self.error("Error while preparing FASTQC report.")
# Save the outputs
outputs.fastq = ["{}_trimmed.fastq.gz".format(name)]
outputs.report = "cutadapt_report.txt" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/reads_processing/cutadapt_3prime.py | 0.731346 | 0.348229 | cutadapt_3prime.py | pypi |
import os
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
class GatkSplitNCigarReads(Process):
"""Splits reads that contain Ns in their cigar string.
Identifies all N cigar elements and creates k+1 new reads (where k is the number
of N cigar elements). The first read includes the bases that are to the left of
the first N element, while the part of the read that is to the right of the N
(including the Ns) is hard clipped and so on for the rest of the new reads. Used
for post-processing RNA reads aligned against the full reference.
"""
slug = "gatk-split-ncigar"
name = "GATK SplitNCigarReads"
category = "GATK"
process_type = "data:alignment:bam:splitncigar"
version = "1.2.0"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 2,
"memory": 16384,
"storage": 200,
},
}
entity = {"type": "sample"}
data_name = "{{ bam|name|default('?') }}"
class Input:
"""Input fields for GatkSplitNCigarReads."""
bam = DataField(
data_type="alignment:bam",
label="Alignment BAM file",
)
ref_seq = DataField(
data_type="seq:nucleotide",
label="Reference sequence FASTA file",
)
class Advanced:
"""Advanced options."""
java_gc_threads = IntegerField(
label="Java ParallelGCThreads",
default=2,
description="Sets the number of threads used during parallel phases of the garbage collectors.",
)
max_heap_size = IntegerField(
label="Java maximum heap size (Xmx)",
default=12,
description="Set the maximum Java heap size (in GB).",
)
advanced = GroupField(Advanced, label="Advanced options")
class Output:
"""Output fields for GatkSplitNCigarReads."""
bam = FileField(label="BAM file with reads split at N CIGAR elements")
bai = FileField(label="Index of BAM file")
stats = FileField(label="Alignment statistics")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
file_name = Path(inputs.bam.output.bam.path).name[:-4]
bam = file_name + ".splitNcigar.bam"
bai = file_name + ".splitNcigar.bai"
gc_threads = min(
self.requirements.resources.cores, inputs.advanced.java_gc_threads
)
args = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"-R",
inputs.ref_seq.output.fasta.path,
"-I",
inputs.bam.output.bam.path,
"-O",
bam,
"--tmp-dir",
TMPDIR,
]
return_code, stdout, stderr = Cmd["gatk"]["SplitNCigarReads"][args] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error(
"GATK SplitNCigarReads failed. Check standard output for more details."
)
stats = f"{bam}_stats.txt"
(Cmd["samtools"]["flagstat"][f"{bam}"] > stats)()
outputs.bam = bam
outputs.bai = bai
outputs.stats = stats
outputs.species = inputs.bam.output.species
outputs.build = inputs.bam.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/reads_processing/split_ncigar.py | 0.690142 | 0.535524 | split_ncigar.py | pypi |
import os
import shutil
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
Process,
SchedulingClass,
StringField,
)
class MarkDuplicates(Process):
"""Remove duplicate reads from BAM file.
Tool from Picard, wrapped by GATK4. See GATK MarkDuplicates for more information.
"""
slug = "markduplicates"
name = "MarkDuplicates"
process_type = "data:alignment:bam:markduplicate:"
version = "1.7.0"
category = "BAM processing"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
}
data_name = "{{ bam|name|default('?') }}"
class Input:
"""Input fields to process MarkDuplicates."""
bam = DataField("alignment:bam", label="Alignment BAM file")
skip = BooleanField(
label="Skip MarkDuplicates step",
description="MarkDuplicates step can be skipped.",
default=False,
)
remove_duplicates = BooleanField(
label="Remove duplicates",
description="If true do not write duplicates to the output file "
"instead of writing them with appropriate flags set.",
default=False,
)
validation_stringency = StringField(
label="Validation stringency",
description="Validation stringency for all SAM files read by this "
"program. Setting stringency to SILENT can improve "
"performance when processing a BAM file in which "
"variable-length data (read, qualities, tags) do not "
"otherwise need to be decoded. Default is STRICT.",
choices=[
("STRICT", "STRICT"),
("LENIENT", "LENIENT"),
("SILENT", "SILENT"),
],
default="STRICT",
)
assume_sort_order = StringField(
label="Assume sort order",
description="If not null (default), assume that the input file "
"has this order even if the header says otherwise."
"Possible values are unsorted, queryname, coordinate "
"and unknown.",
choices=[
("", "as in BAM header (default)"),
("unsorted", "unsorted"),
("queryname", "queryname"),
("coordinate", "coordinate"),
("duplicate", "duplicate"),
("unknown", "unknown"),
],
default="",
)
class Advanced:
"""Advanced options."""
java_gc_threads = IntegerField(
label="Java ParallelGCThreads",
default=2,
description="Sets the number of threads used during parallel phases of the garbage collectors.",
)
max_heap_size = IntegerField(
label="Java maximum heap size (Xmx)",
default=12,
description="Set the maximum Java heap size (in GB).",
)
advanced = GroupField(Advanced, label="Advanced options")
class Output:
"""Output fields to process MarkDuplicates."""
bam = FileField(label="Marked duplicates BAM file")
bai = FileField(label="Index of marked duplicates BAM file")
stats = FileField(label="Alignment statistics")
species = StringField(label="Species")
build = StringField(label="Build")
metrics_file = FileField(label="Metrics from MarkDuplicate process")
def run(self, inputs, outputs):
"""Run analysis.
Note that this process can have output as two different filenames. If the process
is skipped, there will be no modification of the filename, whereas if
markduplication goes through, it will append 'markduplicates'.
"""
TMPDIR = os.environ.get("TMPDIR")
# Prepare output file names.
file_name = os.path.splitext(os.path.basename(inputs.bam.output.bam.path))[0]
metrics_file = f"{file_name}_metrics.txt"
# We do not append anything particular to this object (like we did with e.g.
# _metrics.txt" for metrics file), because this step can be skipped and the
# name (e.g. "file_markduplicated.bam") would only confuse the matter.
species = inputs.bam.output.species
build = inputs.bam.output.build
gc_threads = min(
self.requirements.resources.cores, inputs.advanced.java_gc_threads
)
if not inputs.skip:
if inputs.remove_duplicates:
rmd = "true"
else:
rmd = "false"
bam = file_name + ".markduplicates.bam"
md_inputs = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--INPUT",
f"{inputs.bam.output.bam.path}",
"--VALIDATION_STRINGENCY",
f"{inputs.validation_stringency}",
"--OUTPUT",
f"{bam}",
"--METRICS_FILE",
f"{metrics_file}",
"--TMP_DIR",
TMPDIR,
]
if inputs.remove_duplicates:
md_inputs.extend(["--REMOVE_DUPLICATES", f"{rmd}"])
if inputs.assume_sort_order:
md_inputs.extend(["--ASSUME_SORT_ORDER", f"{inputs.assume_sort_order}"])
Cmd["gatk"]["MarkDuplicates"](md_inputs)
else:
# Process skipped, output filename matches input.
bam = os.path.basename(inputs.bam.output.bam.path)
shutil.copy2(inputs.bam.output.bam.path, bam)
with open(metrics_file, "w") as f:
f.write("MarkDuplicate process skipped.")
if os.path.exists(metrics_file):
print(f"{metrics_file} created.")
self.progress(0.5)
stderr_file = "stderr.txt"
(Cmd["samtools"]["index"][f"{bam}"] > stderr_file)()
if not os.path.exists(f"{bam}.bai"):
self.error(f"Indexing of {bam} failed.")
self.progress(0.7)
# Print to console if errors have been generated.
if os.path.exists(stderr_file):
with open(stderr_file, "r") as f:
all_lines = f.readlines()
if len(all_lines) > 0:
for l in all_lines:
print(l)
stats = f"{bam}_stats.txt"
(Cmd["samtools"]["flagstat"][f"{bam}"] > stats)()
self.progress(0.9)
outputs.bam = bam
outputs.bai = bam + ".bai"
outputs.stats = stats
outputs.species = species
outputs.build = build
outputs.metrics_file = metrics_file | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/reads_processing/markduplicates.py | 0.611962 | 0.244916 | markduplicates.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FileHtmlField,
FloatField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
class TrimGalorePaired(Process):
"""Process paired-end sequencing reads with Trim Galore.
Trim Galore is a wrapper script that makes use of the publicly
available adapter trimming tool Cutadapt and FastQC for quality
control once the trimming process has completed.
Low-quality ends are trimmed from reads in addition to adapter
removal in a single pass. If no sequence was supplied, Trim Galore
will attempt to auto-detect the adapter which has been used. For
this it will analyse the first 1 million sequences of the first
specified file and attempt to find the first 12 or 13bp of the
following standard adapters: Illumina: AGATCGGAAGAGC, Small RNA:
TGGAATTCTCGG, Nextera: CTGTCTCTTATA.
If no adapter contamination can be detected within the first 1
million sequences, or in case of a tie between several different
adapters, Trim Galore defaults to illumina adapters.
For additional information see official
[user guide](https://github.com/FelixKrueger/TrimGalore/blob/master/Docs/Trim_Galore_User_Guide.md).
"""
slug = "trimgalore-paired"
name = "Trim Galore (paired-end)"
process_type = "data:reads:fastq:paired:trimgalore"
version = "1.3.2"
category = "FASTQ processing"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
data_name = "{{ reads|name|default('?') }}"
class Input:
"""Input fields of trimGalorePaired."""
reads = DataField("reads:fastq:paired", label="Select paired-end reads")
class QualityTrimming:
"""Quality trimming options."""
quality = IntegerField(
label="Quality cutoff",
description="Trim low-quality ends from reads based on phred score.",
default=20,
)
nextseq = IntegerField(
label="NextSeq/NovaSeq trim cutoff",
description="NextSeq/NovaSeq-specific quality "
"trimming. Trims also dark cycles appearing as "
"high-quality G bases. This will set a specific "
"quality cutoff, but qualities of G bases are ignored. "
"This can not be used with Quality cutoff and will "
"override it.",
required=False,
)
phred = StringField(
label="Phred score encoding",
description="Use either ASCII+33 quality scores as "
"Phred scores (Sanger/Illumina 1.9+ encoding) or "
"ASCII+64 quality scores (Illumina 1.5 encoding) for "
"quality trimming",
choices=[
("--phred33", "ASCII+33"),
("--phred64", "ASCII+64"),
],
default="--phred33",
)
min_length = IntegerField(
label="Minimum length after trimming",
description="Discard reads that became shorter than "
"selected length because of either quality or adapter "
"trimming. Both reads of a read-pair need to be longer "
"than specified length to be printed out to validated "
"paired-end files. If only one read became too short "
"there is the possibility of keeping such unpaired "
"single-end reads with Retain unpaired. A value of 0 "
"disables filtering based on length.",
default=20,
)
max_n = IntegerField(
label="Maximum number of Ns",
description="Read exceeding this limit will result in "
"the entire pair being removed from the trimmed output "
"files.",
required=False,
)
retain_unpaired = BooleanField(
label="Retain unpaired reads after trimming",
description="If only one of the two paired-end reads "
"became too short, the longer read will be written.",
default=False,
)
unpaired_len_1 = IntegerField(
label="Unpaired read length cutoff for mate 1",
default=35,
hidden="!quality_trim.retain_unpaired",
)
unpaired_len_2 = IntegerField(
label="Unpaired read length cutoff for mate 2",
default=35,
hidden="!quality_trim.retain_unpaired",
)
clip_r1 = IntegerField(
label="Trim bases from 5' end of read 1",
description="This may be useful if the qualities were "
"very poor, or if there is some sort of unwanted bias "
"at the 5' end.",
required=False,
)
clip_r2 = IntegerField(
label="Trim bases from 5' end of read 2",
description="This may be useful if the qualities were "
"very poor, or if there is some sort of unwanted bias "
"at the 5' end. For paired-end bisulfite sequencing, "
"it is recommended to remove the first few bp because "
"the end-repair reaction may introduce a bias towards "
"low methylation.",
required=False,
)
three_prime_r1 = IntegerField(
label="Trim bases from 3' end of read 1",
description="Remove bases from the 3' end of read 1 "
"after adapter/quality trimming has been performed. "
"This may remove some unwanted bias from the 3' end "
"that is not directly related to adapter sequence or "
"basecall quality.",
required=False,
)
three_prime_r2 = IntegerField(
label="Trim bases from 3' end of read 2",
description="Remove bases from the 3' end of read 2 "
"after adapter/quality trimming has been performed. "
"This may remove some unwanted bias from the 3' end "
"that is not directly related to adapter sequence or "
"basecall quality.",
required=False,
)
class AdapterTrimming:
"""Adapter trimming options."""
adapter = ListField(
StringField(),
label="Read 1 adapter sequence",
description="Adapter sequences to be trimmed. "
"Also see universal adapters field for predefined "
"adapters. This is mutually exclusive with read 1 "
"adapters file and universal adapters.",
required=False,
default=[],
)
adapter_2 = ListField(
StringField(),
label="Read 2 adapter sequence",
description="Optional adapter sequence to be trimmed "
"off read 2 of paired-end files. This is mutually "
"exclusive with read 2 adapters file and universal "
"adapters.",
required=False,
default=[],
)
adapter_file_1 = DataField(
"seq:nucleotide",
label="Read 1 adapters file",
description="This is mutually exclusive with read 1 "
"adapters and universal adapters.",
required=False,
)
adapter_file_2 = DataField(
"seq:nucleotide",
label="Read 2 adapters file",
description="This is mutually exclusive with read 2 "
"adapters and universal adapters.",
required=False,
)
universal_adapter = StringField(
label="Universal adapters",
description="Instead of default detection use specific "
"adapters. Use 13bp of the Illumina universal adapter, "
"12bp of the Nextera adapter or 12bp of the Illumina "
"Small RNA 3' Adapter. Selecting to trim smallRNA "
"adapters will also lower the length value to 18bp. "
"If the smallRNA libraries are paired-end then read 2 "
"adapter will be set to the Illumina small RNA 5' "
"adapter automatically (GATCGTCGGACT) unless defined "
"explicitly. This is mutually exclusive with manually "
"defined adapters and adapter files.",
choices=[
("--illumina", "Illumina"),
("--nextera", "Nextera"),
("--small_rna", "Illumina small RNA"),
],
required=False,
)
stringency = IntegerField(
label="Overlap with adapter sequence required to trim",
description="Defaults to a very stringent setting of "
"1, i.e. even a single base pair of overlapping "
"sequence will be trimmed of the 3' end of any read.",
default=1,
)
error_rate = FloatField(
label="Maximum allowed error rate",
description="Number of errors divided by the length of "
"the matching region",
default=0.1,
)
class HardTrimming:
"""Hard trim options."""
trim_5 = IntegerField(
label="Hard trim sequences from 3' end",
description="Instead of performing adapter-/quality "
"trimming, this option will simply hard-trim sequences "
"to bp from the 3' end. This is incompatible with "
"other hard trimming options.",
required=False,
)
trim_3 = IntegerField(
label="Hard trim sequences from 5' end",
description="Instead of performing adapter-/quality "
"trimming, this option will simply hard-trim sequences "
"to bp from the 5' end. This is incompatible with "
"other hard trimming options.",
required=False,
)
adapter_trim = GroupField(AdapterTrimming, label="Adapter trimming")
quality_trim = GroupField(QualityTrimming, label="Quality trimming")
hard_trim = GroupField(HardTrimming, label="Hard trimming")
class Output:
"""Output fields."""
fastq = ListField(FileField(), label="Remaining mate 1 reads")
fastq2 = ListField(FileField(), label="Remaining mate 2 reads")
report = FileField(label="Trim galore report", required=False)
fastqc_url = ListField(
FileHtmlField(), label="Mate 1 quality control with FastQC"
)
fastqc_url2 = ListField(
FileHtmlField(), label="Mate 2 quality control with FastQC"
)
fastqc_archive = ListField(FileField(), label="Download mate 1 FastQC archive")
fastqc_archive2 = ListField(FileField(), label="Download mate 2 FastQC archive")
def run(self, inputs, outputs):
"""Run analysis."""
if inputs.adapter_trim.adapter and inputs.adapter_trim.adapter_file_1:
self.error(
"Mate 1 adapters should be either a sequence or a file, but not both."
)
if inputs.adapter_trim.adapter_2 and inputs.adapter_trim.adapter_file_2:
self.error(
"Mate 2 adapters should be either a sequence or a file, but not both."
)
if inputs.hard_trim.trim_5 and inputs.hard_trim.trim_3:
self.error("Only one type of hard trimming can be performed at once.")
mate1_path = os.path.basename(inputs.reads.output.fastq[0].path)
assert mate1_path.endswith(".fastq.gz")
name_mate1 = mate1_path[:-9]
mate2_path = os.path.basename(inputs.reads.output.fastq2[0].path)
assert mate2_path.endswith(".fastq.gz")
name_mate2 = mate2_path[:-9]
merged_r1 = "input_reads_mate1.fastq.gz"
merged_r1_name = merged_r1[:-9]
merged_r2 = "input_reads_mate2.fastq.gz"
merged_r2_name = merged_r2[:-9]
(Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq]] > merged_r1)()
(Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq2]] > merged_r2)()
params = [
"--paired",
"--cores",
2, # Actual core usage is 9, for more details see Trim Galore user guide
inputs.quality_trim.phred,
"--length",
inputs.quality_trim.min_length,
"--stringency",
inputs.adapter_trim.stringency,
"-e",
inputs.adapter_trim.error_rate,
]
if inputs.quality_trim.nextseq:
params.extend(["--nextseq", inputs.quality_trim.nextseq])
else:
params.extend(["--quality", inputs.quality_trim.quality])
if inputs.quality_trim.max_n:
params.extend(["--max_n", inputs.quality_trim.max_n])
if inputs.quality_trim.retain_unpaired:
params.extend(
[
"--retain_unpaired",
"--length_1",
inputs.quality_trim.unpaired_len_1,
"--length_2",
inputs.quality_trim.unpaired_len_2,
]
)
if inputs.quality_trim.clip_r1:
params.extend(["--clip_R1", inputs.quality_trim.clip_r1])
if inputs.quality_trim.clip_r2:
params.extend(["--clip_R2", inputs.quality_trim.clip_r2])
if inputs.quality_trim.three_prime_r1:
params.extend(["--three_prime_clip_R1", inputs.quality_trim.three_prime_r1])
if inputs.quality_trim.three_prime_r2:
params.extend(["--three_prime_clip_R2", inputs.quality_trim.three_prime_r2])
if inputs.adapter_trim.adapter:
for adapter in inputs.adapter_trim.adapter:
params.extend(["--adapter", adapter])
if inputs.adapter_trim.adapter_2:
for adapter in inputs.adapter_trim.adapter_2:
params.extend(["--adapter2", adapter])
if inputs.adapter_trim.adapter_file_1:
params.extend(
[
"--adapter",
f"file:{inputs.adapter_trim.adapter_file_1.output.fasta.path}",
]
)
if inputs.adapter_trim.adapter_file_2:
params.extend(
[
"--adapter2",
f"file:{inputs.adapter_trim.adapter_file_2.output.fasta.path}",
]
)
if inputs.adapter_trim.universal_adapter:
params.append(inputs.adapter_trim.universal_adapter)
if any(
[
inputs.adapter_trim.adapter,
inputs.adapter_trim.adapter_2,
inputs.adapter_trim.adapter_file_1,
inputs.adapter_trim.adapter_file_2,
]
):
self.error(
"You can not supply custom adapter sequence and use "
"the universal adapter sequence."
)
if inputs.hard_trim.trim_5 or inputs.hard_trim.trim_3:
self.info(
"Only hard trimming was performed. Skipped quality and adapter trimming."
)
if inputs.hard_trim.trim_5:
params.extend(["--hardtrim5", inputs.hard_trim.trim_5])
if inputs.hard_trim.trim_3:
params.extend(["--hardtrim3", inputs.hard_trim.trim_3])
return_code, _, _ = Cmd["trim_galore"][params][merged_r1, merged_r2] & TEE(
retcode=None
)
if return_code:
self.error("Error while trimming reads.")
self.progress(0.7)
trimmed_r1 = f"{name_mate1}_trim.fastq.gz"
trimmed_r2 = f"{name_mate2}_trim.fastq.gz"
if inputs.hard_trim.trim_5:
os.rename(
f"{merged_r1_name}.{inputs.hard_trim.trim_5}bp_5prime.fq.gz", trimmed_r1
)
os.rename(
f"{merged_r2_name}.{inputs.hard_trim.trim_5}bp_5prime.fq.gz", trimmed_r2
)
elif inputs.hard_trim.trim_3:
os.rename(
f"{merged_r1_name}.{inputs.hard_trim.trim_3}bp_3prime.fq.gz", trimmed_r1
)
os.rename(
f"{merged_r2_name}.{inputs.hard_trim.trim_3}bp_3prime.fq.gz", trimmed_r2
)
else:
os.rename(f"{merged_r1_name}_val_1.fq.gz", trimmed_r1)
os.rename(f"{merged_r2_name}_val_2.fq.gz", trimmed_r2)
trim_report = "trim_galore_report.txt"
(
Cmd["cat"][
f"{merged_r1}_trimming_report.txt",
f"{merged_r2}_trimming_report.txt",
]
> trim_report
)()
outputs.report = trim_report
fastqc_args = [
trimmed_r1,
"fastqc",
"fastqc_archive",
"fastqc_url",
]
self.progress(0.8)
return_code, _, _ = Cmd["fastqc.sh"][fastqc_args] & TEE(retcode=None)
if return_code:
self.error("Error while preparing FASTQC report.")
fastqc_args = [
trimmed_r2,
"fastqc",
"fastqc_archive2",
"fastqc_url2",
]
return_code, _, _ = Cmd["fastqc.sh"][fastqc_args] & TEE(retcode=None)
if return_code:
self.error("Error while preparing FASTQC report.")
self.progress(0.9)
outputs.fastq = [trimmed_r1]
outputs.fastq2 = [trimmed_r2] | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/reads_processing/trimgalore.py | 0.823825 | 0.427456 | trimgalore.py | pypi |
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
FileHtmlField,
ListField,
Process,
SchedulingClass,
)
class BamToFastqPaired(Process):
"""Convert aligned reads in BAM format to paired-end FASTQ files format."""
slug = "bamtofastq-paired"
name = "Samtools fastq (paired-end)"
category = "Samtools"
process_type = "data:reads:fastq:paired:bamtofastq"
version = "1.3.1"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {
"cores": 4,
"memory": 16384,
"storage": 600,
},
}
entity = {"type": "sample"}
data_name = "{{ bam|name|default('?') }}"
class Input:
"""Input fields for BamToFastqPaired."""
bam = DataField("alignment:bam", label="BAM file")
class Output:
"""Output fields for BamToFastqPaired."""
fastq = ListField(FileField(), label="Remaining mate1 reads")
fastq2 = ListField(FileField(), label="Remaining mate2 reads")
fastqc_url = ListField(
FileHtmlField(), label="Mate1 quality control with FastQC"
)
fastqc_url2 = ListField(
FileHtmlField(), label="Mate2 quality control with FastQC"
)
fastqc_archive = ListField(FileField(), label="Download mate1 FastQC archive")
fastqc_archive2 = ListField(FileField(), label="Download mate2 FastQC archive")
def run(self, inputs, outputs):
"""Run analysis."""
name = Path(inputs.bam.output.bam.path).stem
sorted_bam = f"{name}_sorted.bam"
mate1_gz = f"{name}_mate1.fastq.gz"
mate2_gz = f"{name}_mate2.fastq.gz"
# For extracted paired-end reads to match, BAM file needs to be name sorted first.
sort_args = [
"-@",
self.requirements.resources.cores,
"-n",
"-o",
sorted_bam,
inputs.bam.output.bam.path,
]
return_code, _, _ = Cmd["samtools"]["sort"][sort_args] & TEE(retcode=None)
if return_code:
self.error("Samtools sort command failed.")
self.progress(0.3)
# Convert aligned reads into paired-end FASTQ files
extract_args = [
"-@",
self.requirements.resources.cores,
"-c",
"9",
"-N",
"-1",
mate1_gz,
"-2",
mate2_gz,
sorted_bam,
]
return_code, _, _ = Cmd["samtools"]["fastq"][extract_args] & TEE(retcode=None)
if return_code:
self.error("Samtools fastq command failed.")
self.progress(0.8)
# Prepare final FASTQC report
fastqc_args_mate1 = [
mate1_gz,
"fastqc",
"fastqc_archive",
"fastqc_url",
]
return_code, _, _ = Cmd["fastqc.sh"][fastqc_args_mate1] & TEE(retcode=None)
if return_code:
self.error("Error while preparing FASTQC report.")
self.progress(0.95)
fastqc_args_mate2 = [
mate2_gz,
"fastqc",
"fastqc_archive2",
"fastqc_url2",
]
return_code, _, _ = Cmd["fastqc.sh"][fastqc_args_mate2] & TEE(retcode=None)
if return_code:
self.error("Error while preparing FASTQC report.")
# Save the outputs
outputs.fastq = [mate1_gz]
outputs.fastq2 = [mate2_gz] | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/reads_processing/bam_to_fastq.py | 0.798776 | 0.347759 | bam_to_fastq.py | pypi |
import os
from pathlib import Path
import pandas as pd
from joblib import Parallel, delayed, wrap_non_picklable_objects
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
def prepare_read_groups(read_groups, error):
"""Prepare input read groups for GATK AddOrReplaceReadGroups."""
input_groups = []
present_tags = []
for x in read_groups.split(";"):
split_tag = x.split("=")
input_groups.extend(split_tag)
present_tags.append(split_tag[0])
# Make sure all arguments to read_group are valid.
all_tags = {
"-LB",
"-PL",
"-PU",
"-SM",
"-CN",
"-DS",
"-DT",
"-FO",
"-ID",
"-KS",
"-PG",
"-PI",
"-PM",
"-SO",
}
present_tag_set = set(present_tags)
check_all_tags = present_tag_set.issubset(all_tags)
if not check_all_tags:
error(
"One or more read_group argument(s) improperly formatted."
f"{present_tag_set}"
)
# Check that there are no double entries of arguments to read_group.
if len(present_tag_set) != len(present_tags):
error("You have duplicate tags in read_group argument.")
# Check that all mandatory arguments to read_group are present.
mandatory_tags = {"-LB", "-PL", "-PU", "-SM"}
check_tags = mandatory_tags.issubset(present_tag_set)
if not check_tags:
error(
"Missing mandatory read_group argument(s) (-PL, -LB, -PU and -SM are mandatory)."
)
return input_groups
def prepare_chromosome_sizes(fai_path, bed_path):
"""Prepare a BED file with chromosome sizes."""
fai = pd.read_csv(
fai_path,
sep="\t",
header=None,
names=["chr", "length", "offset", "line_bases", "line_width"],
)
fai = fai.drop(columns=["offset", "line_bases", "line_width"])
fai.insert(loc=1, column="start", value=0)
fai.to_csv(bed_path, sep="\t", header=False, index=False)
def prepare_scattered_inputs(results_dir, pattern="*"):
"""Prepare the input arguments for scattered input files.
This expects the files in results_dir to be named using four number
interval notation used by GATK SplitIntervals (e.g. 0001-scattered).
Names are used for sorting, which ensures the correct concatenation
order.
"""
input_list = []
for scattered_output in sorted(results_dir.glob(pattern)):
input_list.extend(["-I", scattered_output])
return input_list
@delayed
@wrap_non_picklable_objects
def run_split_ncigar_reads(
input_bam, interval_path, ref_seq_path, tmp, parent_dir, memory
):
"""Run SplitNCigarReads on a specifed interval."""
splitncigar_interval_bam = f"{parent_dir.name}/{interval_path.stem}.bam"
apply_splitncigar_inputs = [
"--java-options",
f"-Xmx{memory}m",
"-R",
ref_seq_path,
"-I",
input_bam,
"-O",
splitncigar_interval_bam,
"-L",
interval_path,
"--tmp-dir",
tmp,
]
return_code, stdout, stderr = Cmd["gatk"]["SplitNCigarReads"][
apply_splitncigar_inputs
] & TEE(retcode=None)
if return_code:
print(f"Error in {interval_path.stem} interval.", stdout, stderr)
return return_code
class RNASeqVC_Preprocess(Process):
"""Prepare BAM file from STAR aligner for HaplotypeCaller.
This process includes steps MarkDuplicates, SplitNCigarReads,
read-group assignment and base quality recalibration (BQSR).
"""
slug = "rnaseq-vc-preprocess"
name = "RNA-seq variant calling preprocess"
category = "GATK"
process_type = "data:alignment:bam:rnaseqvc"
version = "1.3.0"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 4,
"memory": 65536,
"storage": 600,
},
}
entity = {"type": "sample"}
data_name = "{{ bam|name|default('?') }}"
class Input:
"""Input fields for RNASeqVC_Preprocess."""
bam = DataField(
data_type="alignment:bam",
label="Alignment BAM file from STAR alignment",
)
ref_seq = DataField(
data_type="seq:nucleotide",
label="Reference sequence FASTA file",
)
known_sites = ListField(
DataField(
data_type="variants:vcf",
),
label="List of known sites of variation",
description="One or more databases of known polymorphic sites used to exclude regions around known "
"polymorphisms from analysis.",
)
read_group = StringField(
label="Replace read groups in BAM",
description="Replace read groups in a BAM file. This argument enables the user to replace all read "
"groups in the INPUT file with a single new read group and assign all reads to this read group in "
"the OUTPUT BAM file. Addition or replacement is performed using GATK "
"AddOrReplaceReadGroups tool. Input should take the form of -name=value delimited by a "
'";", e.g. "-ID=1;-LB=GENIALIS;-PL=ILLUMINA;-PU=BARCODE;-SM=SAMPLENAME1". See tool\'s '
"documentation for more information on tag names. Note that PL, LB, PU and SM are require "
"fields. See caveats of rewriting read groups in the documentation.",
default="-ID=1;-LB=GENIALIS;-PL=ILLUMINA;-PU=BARCODE;-SM=SAMPLENAME1",
)
class Advanced:
"""Advanced options."""
java_gc_threads = IntegerField(
label="Java ParallelGCThreads",
default=2,
description="Sets the number of threads used during parallel phases of the garbage collectors.",
)
max_heap_size = IntegerField(
label="Java maximum heap size (Xmx)",
default=12,
description="Set the maximum Java heap size (in GB).",
)
advanced = GroupField(Advanced, label="Advanced options")
class Output:
"""Output fields for RNASeqVC_Preprocess."""
bam = FileField(label="Preprocessed BAM file")
bai = FileField(label="Index of BAM file")
stats = FileField(label="Alignment statistics")
species = StringField(label="Species")
build = StringField(label="Build")
metrics_file = FileField(label="Metrics from MarkDuplicate process")
def run(self, inputs, outputs):
"""Run analysis."""
TMPDIR = os.environ.get("TMPDIR")
marked_dups = "marked_duplicates.bam"
marked_dups_index = "marked_duplicates.bai"
read_groups_file = "read_groups.bam"
read_groups_index = "read_groups.bai"
splitncigar = "splitNcigar.bam"
recal_table = "recal_data.csv"
file_name = Path(inputs.bam.output.bam.path).stem
bam = f"{file_name}.bam"
bai = f"{file_name}.bai"
metrics_file = f"{file_name}_markduplicates_metrics.txt"
read_groups = prepare_read_groups(
read_groups=inputs.read_group, error=self.error
)
gc_threads = min(
self.requirements.resources.cores, inputs.advanced.java_gc_threads
)
md_inputs = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--INPUT",
inputs.bam.output.bam.path,
"--VALIDATION_STRINGENCY",
"STRICT",
"--OUTPUT",
marked_dups,
"--METRICS_FILE",
metrics_file,
"--TMP_DIR",
TMPDIR,
"--CREATE_INDEX",
"true",
]
return_code, stdout, stderr = Cmd["gatk"]["MarkDuplicates"][md_inputs] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error("MarkDuplicates analysis failed.")
# Prepare files for scattering over chromosomes
intervals_path = Path("intervals_folder")
intervals_path.mkdir(exist_ok=True)
n_jobs = max(inputs.advanced.java_gc_threads, self.requirements.resources.cores)
chromosome_sizes = "chromosome_sizes.bed"
prepare_chromosome_sizes(
fai_path=inputs.ref_seq.output.fai.path, bed_path=chromosome_sizes
)
split_intervals_inputs = [
"-R",
inputs.ref_seq.output.fasta.path,
"-L",
chromosome_sizes,
"--scatter-count",
n_jobs,
"--subdivision-mode",
"BALANCING_WITHOUT_INTERVAL_SUBDIVISION_WITH_OVERFLOW",
"-O",
intervals_path,
]
return_code, stdout, stderr = Cmd["gatk"]["SplitIntervals"][
split_intervals_inputs
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error("SplitIntervals tool failed.")
# Prepare folder for SplitNCigarReads BAM files
output_bams = Path("output_bams")
output_bams.mkdir()
memory = int(0.9 * (self.requirements.resources.memory / n_jobs))
intervals = [path for path in intervals_path.glob("*.interval_list")]
return_codes = Parallel(n_jobs=n_jobs)(
run_split_ncigar_reads(
input_bam=marked_dups,
interval_path=interval_path,
ref_seq_path=inputs.ref_seq.output.fasta.path,
tmp=TMPDIR,
parent_dir=output_bams,
memory=memory,
)
for interval_path in intervals
)
if any(return_codes):
self.error("GATK SplitNCigarReads tool failed.")
Path(marked_dups).unlink(missing_ok=True)
Path(marked_dups_index).unlink(missing_ok=True)
input_lists = prepare_scattered_inputs(results_dir=output_bams, pattern="*.bam")
gather_bam_inputs = [
"-O",
splitncigar,
input_lists,
"--TMP_DIR",
TMPDIR,
]
return_code, stdout, stderr = Cmd["gatk"]["GatherBamFiles"][
gather_bam_inputs
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error("GatherBamFiles tool failed.")
# Delete all files produces by SplitNCigarReads
for bam_file in output_bams.glob("*"):
Path(bam_file).unlink(missing_ok=True)
Path(output_bams).rmdir()
arg_rg = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--INPUT",
splitncigar,
"--VALIDATION_STRINGENCY",
"STRICT",
"--OUTPUT",
read_groups_file,
"--TMP_DIR",
TMPDIR,
"--CREATE_INDEX",
"true",
]
arg_rg.extend(read_groups)
return_code, stdout, stderr = Cmd["gatk"]["AddOrReplaceReadGroups"][
arg_rg
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error("AddOrReplaceReadGroups failed.")
# Delete merged BAM file produced by GatherBamFiles
Path(splitncigar).unlink(missing_ok=True)
recal_table = "recalibration.table"
br_inputs = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--input",
read_groups_file,
"--output",
recal_table,
"--reference",
inputs.ref_seq.output.fasta.path,
"--read-validation-stringency",
"STRICT",
"--use-original-qualities",
"--tmp-dir",
TMPDIR,
]
# Add known sites to the input parameters of BaseRecalibrator.
for site in inputs.known_sites:
br_inputs.extend(["--known-sites", f"{site.output.vcf.path}"])
return_code, stdout, stderr = Cmd["gatk"]["BaseRecalibrator"][br_inputs] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error("BaseRecalibrator failed.")
# Apply base recalibration.
ab_inputs = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--input",
read_groups_file,
"--output",
bam,
"--reference",
inputs.ref_seq.output.fasta.path,
"--bqsr-recal-file",
recal_table,
"--read-validation-stringency",
"STRICT",
"--use-original-qualities",
"--tmp-dir",
TMPDIR,
]
return_code, stdout, stderr = Cmd["gatk"]["ApplyBQSR"][ab_inputs] & TEE(
retcode=None
)
if return_code:
print(stdout, stderr)
self.error("ApplyBQSR failed.")
# Delete BAM file produced by AddOrReplaceReadGroups
Path(read_groups_file).unlink(missing_ok=True)
Path(read_groups_index).unlink(missing_ok=True)
stats = f"{bam}_stats.txt"
(Cmd["samtools"]["flagstat"][bam] > stats)()
outputs.bam = bam
outputs.bai = bai
outputs.stats = stats
outputs.species = inputs.bam.output.species
outputs.build = inputs.bam.output.build
outputs.metrics_file = metrics_file | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/reads_processing/rnaseq_vc_preprocess.py | 0.652574 | 0.19031 | rnaseq_vc_preprocess.py | pypi |
import gzip
import shutil
from pathlib import Path
from joblib import Parallel, delayed, wrap_non_picklable_objects
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FileHtmlField,
FloatField,
GroupField,
IntegerField,
ListField,
Persistence,
Process,
SchedulingClass,
StringField,
)
def prepare_inputs(mate1, mate2=None):
"""Prepare list with input, output and stats files."""
input_reads = []
if mate2:
for fastq, fastq2 in zip(mate1, mate2):
name, new_path, reads_basename = rename_input_files(fastq=fastq)
name_mate2, new_path_mate2, reads_mate2_basename = rename_input_files(
fastq=fastq2
)
lane_files = {
"input_fastq": new_path,
"input_fastq2": new_path_mate2,
"output_fastq": f"{name}_preprocessed.fastq.gz",
"output_fastq2": f"{name_mate2}_preprocessed.fastq.gz",
"stats": f"{name}_statistics.txt",
"original_name": f"{reads_basename}_preprocessed.fastq.gz",
"original_name2": f"{reads_mate2_basename}_preprocessed.fastq.gz",
}
input_reads.append(lane_files)
else:
for fastq in mate1:
name, new_path, reads_basename = rename_input_files(fastq=fastq)
lane_files = {
"input_fastq": new_path,
"output_fastq": f"{name}_preprocessed.fastq.gz",
"stats": f"{name}_statistics.txt",
"original_name": f"{reads_basename}_preprocessed.fastq.gz",
}
input_reads.append(lane_files)
return input_reads
def rename_input_files(fastq):
"""Rename input files."""
reads_basename = Path(fastq.path)
shutil.copy(reads_basename, Path.cwd())
new_path = Path(reads_basename.name.replace(" ", "_"))
Path(reads_basename.name).rename(new_path)
assert new_path.name.endswith(".fastq.gz")
name = new_path.name[:-9]
return name, new_path, reads_basename.name[:-9]
def rename_preprocessed_files(input_files, paired_end=None):
"""Rename preprocessed files back to the original name."""
for lane in input_files:
Path(lane["output_fastq"]).rename(lane["original_name"])
if paired_end:
Path(lane["output_fastq2"]).rename(lane["original_name2"])
def prepare_fastqc(fastqgz, error):
"""Prepare FastQC data for output."""
fastqc = []
fastqc_url = []
for fq in fastqgz:
reads_name = Path(fq).name.replace(".fastq.gz", "")
report_dir = Path("fastqc") / Path(f"{reads_name}_fastqc")
fastqc_zip = Path(f"{reads_name}_fastqc.zip")
if not fastqc_zip.is_file():
error(f"FastQC failed to produce {fastqc_zip} file.")
fastqc.append(str(fastqc_zip))
fastqc_url.append(
{
"file": str(report_dir / "fastqc_report.html"),
"refs": [str(report_dir)],
}
)
return fastqc, fastqc_url
@delayed
@wrap_non_picklable_objects
def run_bbduk(input_reads, bbduk_inputs, paired_end=False):
"""Run BBDuk on seperate lanes."""
if paired_end:
input_file = [
f"in='{input_reads['input_fastq']}'",
f"in2='{input_reads['input_fastq2']}'",
f"out='{input_reads['output_fastq']}'",
f"out2='{input_reads['output_fastq2']}'",
f"stats='{input_reads['stats']}'",
]
else:
input_file = [
f"in='{input_reads['input_fastq']}'",
f"out='{input_reads['output_fastq']}'",
f"stats='{input_reads['stats']}'",
]
bbduk_inputs = input_file + bbduk_inputs
return_code, stdout, stderr = Cmd["bbduk.sh"][bbduk_inputs] & TEE(retcode=None)
if return_code:
print(stderr, stdout)
return return_code, stderr
class BBDukSingle(Process):
"""Run BBDuk on single-end reads.
BBDuk combines the most common data-quality-related trimming, filtering,
and masking operations into a single high-performance tool. It is capable
of quality-trimming and filtering, adapter-trimming, contaminant-filtering
via kmer matching, sequence masking, GC-filtering, length filtering,
entropy-filtering, format conversion, histogram generation, subsampling,
quality-score recalibration, kmer cardinality estimation, and various
other operations in a single pass. See
[here](https://jgi.doe.gov/data-and-tools/bbtools/bb-tools-user-guide/bbduk-guide/)
for more information.
"""
slug = "bbduk-single"
name = "BBDuk (single-end)"
process_type = "data:reads:fastq:single:bbduk"
version = "3.1.2"
category = "FASTQ processing"
data_name = "{{ reads|name|default('?') }}"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {
"cores": 4,
"memory": 8192,
},
}
class Input:
"""Input fields to process BBDukSingle."""
reads = DataField("reads:fastq:single", label="Reads")
min_length = IntegerField(
label="Minimum length",
default=10,
description="Reads shorter than the minimum length will be discarded after trimming.",
)
class Reference:
"""Reference."""
sequences = ListField(
DataField("seq:nucleotide"),
label="Sequences",
required=False,
description="Reference sequences include adapters, contaminants, and "
"degenerate sequences. They can be provided in a multi-sequence FASTA "
"file or as a set of literal sequences below.",
)
literal_sequences = ListField(
StringField(),
label="Literal sequences",
required=False,
default=[],
description="Literal sequences can be specified by inputting them one by "
"one and pressing Enter after each sequence.",
)
class Processing:
"""Processing parameters."""
kmer_length = IntegerField(
label="Kmer length",
default=27,
description="Kmer length used for finding contaminants. "
"Contaminants shorter than kmer length will not be found. "
"Kmer length must be at least 1.",
)
check_reverse_complements = BooleanField(
label="Check reverse complements",
description="Look for reverse complements of kmers in addition to forward kmers",
default=True,
)
mask_middle_base = BooleanField(
label="Mask the middle base of a kmer",
description="Treat the middle base of a kmer as a wildcard to increase "
"sensitivity in the presence of errors.",
default=True,
)
min_kmer_hits = IntegerField(
label="Minimum number of kmer hits",
default=1,
description="Reads need at least this many matching kmers to be considered "
"matching the reference.",
)
min_kmer_fraction = FloatField(
label="Minimum kmer fraction",
default=0.0,
description="A read needs at least this fraction of its total kmers "
"to hit a reference in order to be considered a match. If this and "
"'Minimum number of kmer hits' are set, the greater is used.",
)
min_coverage_fraction = FloatField(
label="Minimum coverage fraction",
default=0.0,
description="A read needs at least this fraction of its total bases to be "
"covered by reference kmers to be considered a match. If specified, "
"'Minimum coverage fraction' overrides 'Minimum number of kmer hits' and "
"'Minimum kmer fraction'.",
)
hamming_distance = IntegerField(
label="Maximum Hamming distance for kmers (substitutions only)",
description="Hamming distance i.e. the number of mismatches allowed in the kmer.",
default=0,
)
query_hamming_distance = IntegerField(
label="Hamming distance for query kmers",
description="Set a hamming distance for query kmers instead of the read kmers. "
"This makes the read processing much slower, but does not use additional memory.",
default=0,
)
edit_distance = IntegerField(
label="Maximum edit distance from reference kmers "
"(substitutions and indels)",
default=0,
)
hamming_distance2 = IntegerField(
label="Hamming distance for short kmers when looking for shorter kmers",
default=0,
)
query_hamming_distance2 = IntegerField(
label="Hamming distance for short query kmers when looking for shorter kmers",
default=0,
)
edit_distance2 = IntegerField(
label="Maximum edit distance from short reference kmers "
"(substitutions and indels) when looking for shorter kmers",
default=0,
)
forbid_N = BooleanField(
label="Forbid matching of read kmers containing N",
default=False,
description="By default, these will match a reference 'A' if"
"'Maximum Hamming distance for kmers' > 0 or "
"'Maximum edit distance from reference kmers' > 0, to increase sensitivity.",
)
find_best_match = BooleanField(
label="Find best match",
description="If multiple matches, associate read with sequence sharing most kmers.",
default=True,
)
class Operations:
"""Trimming, filtering and masking parameters."""
k_trim = StringField(
label="Trimming protocol to remove bases matching reference kmers from reads",
choices=[
("f", "Don't trim"),
("r", "Trim to the right"),
("l", "Trim to the left"),
],
default="f",
)
k_mask = StringField(
label="Symbol to replace bases matching reference kmers",
default="f",
description="Allows any non-whitespace character other than t or f. "
"Processes short kmers on both ends.",
)
mask_fully_covered = BooleanField(
label="Only mask bases that are fully covered by kmers",
default=False,
)
min_k = IntegerField(
label="Look for shorter kmers at read tips down to this length when k-trimming "
"or masking",
default=-1,
description="-1 means disabled. Enabling this will disable treating the middle "
"base of a kmer as a wildcard to increase sensitivity in the presence of errors.",
)
quality_trim = StringField(
label="Trimming protocol to remove bases with quality below "
"the minimum average region quality from read ends",
choices=[
("f", "Trim neither end"),
("rl", "Trim both ends"),
("r", "Trim only right end"),
("l", "Trim only left end"),
("w", "Use sliding window"),
],
default="f",
description="Performed after looking for kmers. If enabled, set also "
"'Average quality below which to trim region'.",
)
trim_quality = IntegerField(
label="Average quality below which to trim region",
default=6,
disabled="operations.quality_trim === 'f'",
description="Set trimming protocol to enable this parameter.",
)
quality_encoding_offset = StringField(
label="Quality encoding offset",
choices=[
("33", "Sanger / Illumina 1.8+ (33)"),
("64", "Illumina up to 1.3+, 1.5+ (64)"),
("auto", "Auto"),
],
default="auto",
description="Quality encoding offset for input FASTQ files.",
)
ignore_bad_quality = BooleanField(
label="Don't crash if quality values appear to be incorrect",
default=False,
)
trim_poly_A = IntegerField(
label="Minimum length of poly-A or poly-T tails to trim on either end of reads",
default=0,
)
min_length_fraction = FloatField(
label="Minimum length fraction",
default=0.0,
description="Reads shorter than this fraction of original length after "
"trimming will be discarded.",
)
max_length = IntegerField(
label="Maximum length",
required=False,
description="Reads longer than this after trimming will be discarded.",
)
min_average_quality = IntegerField(
label="Minimum average quality",
default=0,
description="Reads with average quality (after trimming) below this will be discarded.",
)
min_average_quality_bases = IntegerField(
label="Number of initial bases to calculate minimum average quality from",
default=0,
description="If positive, calculate minimum average quality "
"from this many initial bases",
)
min_base_quality = IntegerField(
label="Minimum base quality below which reads are discarded after trimming",
default=0,
)
min_consecutive_bases = IntegerField(
label="Minimum number of consecutive called bases",
default=0,
)
trim_pad = IntegerField(
label="Number of bases to trim around matching kmers",
default=0,
)
min_overlap = IntegerField(
label="Minum number of overlapping bases",
default=14,
description="Require this many bases of overlap for detection.",
)
min_insert = IntegerField(
label="Minimum insert size",
default=40,
description="Require insert size of at least this for overlap. "
"Should be reduced to 16 for small RNA sequencing.",
)
force_trim_left = IntegerField(
label="Position from which to trim bases to the left",
default=0,
)
force_trim_right = IntegerField(
label="Position from which to trim bases to the right",
default=0,
)
force_trim_right2 = IntegerField(
label="Number of bases to trim from the right end",
default=0,
)
force_trim_mod = IntegerField(
label="Modulo to right-trim reads",
default=0,
description="Trim reads to the largest multiple of modulo.",
)
restrict_left = IntegerField(
label="Number of leftmost bases to look in for kmer matches",
default=0,
)
restrict_right = IntegerField(
label="Number of rightmost bases to look in for kmer matches",
default=0,
)
min_GC = FloatField(
label="Minimum GC content",
default=0.0,
description="Discard reads with lower GC content.",
)
max_GC = FloatField(
label="Maximum GC content",
default=1.0,
description="Discard reads with higher GC content.",
)
maxns = IntegerField(
label="Max Ns after trimming",
default=-1,
description="If non-negative, reads with more Ns than this "
"(after trimming) will be discarded.",
)
toss_junk = BooleanField(
label="Discard reads with invalid characters as bases",
default=False,
)
class HeaderParsing:
"""Header-parsing parameters."""
chastity_filter = BooleanField(
label="Discard reads that fail Illumina chastity filtering",
default=False,
description="Discard reads with id containing ' 1:Y:' or ' 2:Y:'.",
)
barcode_filter = BooleanField(
label="Remove reads with unexpected barcodes",
default=False,
description="Remove reads with unexpected barcodes if barcodes are set, "
"or barcodes containing 'N' otherwise. A barcode must be the last part "
"of the read header.",
)
barcode_files = ListField(
DataField("seq:nucleotide"),
label="Barcode sequences",
description="FASTA file(s) with barcode sequences.",
required=False,
)
barcode_sequences = ListField(
StringField(),
label="Literal barcode sequences",
required=False,
default=[],
description="Literal barcode sequences can be specified by inputting "
"them one by one and pressing Enter after each sequence.",
)
x_min = IntegerField(
label="Minimum X coordinate",
default=-1,
description="If positive, discard reads with a smaller X coordinate.",
)
y_min = IntegerField(
label="Minimum Y coordinate",
default=-1,
description="If positive, discard reads with a smaller Y coordinate.",
)
x_max = IntegerField(
label="Maximum X coordinate",
default=-1,
description="If positive, discard reads with a larger X coordinate.",
)
y_max = IntegerField(
label="Maximum Y coordinate",
default=-1,
description="If positive, discard reads with a larger Y coordinate.",
)
class Complexity:
"""Complexity parameters."""
entropy = FloatField(
label="Minimum entropy",
default=-1.0,
description="Set between 0 and 1 to filter reads with entropy below that value. "
"Higher is more stringent.",
)
entropy_window = IntegerField(
label="Length of sliding window used to calculate entropy",
default=50,
description="To use the sliding window set minimum entropy in range between 0.0 and 1.0.",
)
entropy_k = IntegerField(
label="Length of kmers used to calcuate entropy",
default=5,
)
entropy_mask = BooleanField(
label="Mask low-entropy parts of sequences with N instead of discarding",
default=False,
)
min_base_frequency = IntegerField(
label="Minimum base frequency",
default=0,
)
class Fastqc:
"""FastQC parameters."""
nogroup = BooleanField(
label="Disable grouping of bases for reads >50bp",
default=False,
description="All reports will show data for every base in the read. Using this option "
"will cause fastqc to crash and burn if you use it on really long reads.",
)
reference = GroupField(Reference, label="Reference")
processing = GroupField(Processing, label="Processing parameters")
operations = GroupField(
Operations, label="Trimming, filtering and masking parameters."
)
header_parsing = GroupField(HeaderParsing, label="Header-parsing parameters")
complexity = GroupField(Complexity, label="Complexity parameters")
fastqc = GroupField(Fastqc, label="FastQC parameters")
class Output:
"""Output fields."""
fastq = ListField(FileField(), label="Remaining reads")
statistics = ListField(FileField(), label="Statistics")
fastqc_url = ListField(FileHtmlField(), label="Quality control with FastQC")
fastqc_archive = ListField(FileField(), label="Download FastQC archive")
def run(self, inputs, outputs):
"""Run analysis."""
input_references = "input_references.fasta.gz"
input_barcodes = "input_barcodes.fasta.gz"
num_of_lanes = len(inputs.reads.output.fastq)
input_reads = prepare_inputs(mate1=inputs.reads.output.fastq)
if inputs.reference.sequences:
with gzip.open(input_references, "wb") as outfile:
for sequences in inputs.reference.sequences:
with gzip.open(sequences.output.fastagz.path, "rb") as infile:
shutil.copyfileobj(infile, outfile)
if inputs.header_parsing.barcode_files:
with gzip.open(input_barcodes, "wb") as outfile:
for barcode_file in inputs.header_parsing.barcode_files:
with gzip.open(barcode_file.output.fastagz.path, "rb") as infile:
shutil.copyfileobj(infile, outfile)
barcodes = [input_barcodes] + inputs.header_parsing.barcode_sequences
else:
barcodes = inputs.header_parsing.barcode_sequences
self.progress(0.1)
args = [
"statscolumns=5",
f"k={inputs.processing.kmer_length}",
f"rcomp={inputs.processing.check_reverse_complements}",
f"maskmiddle={inputs.processing.mask_middle_base}",
f"minkmerhits={inputs.processing.min_kmer_hits}",
f"minkmerfraction={inputs.processing.min_kmer_fraction}",
f"mincovfraction={inputs.processing.min_coverage_fraction}",
f"hammingdistance={inputs.processing.hamming_distance}",
f"qhdist={inputs.processing.query_hamming_distance}",
f"editdistance={inputs.processing.edit_distance}",
f"hammingdistance2={inputs.processing.hamming_distance2}",
f"qhdist2={inputs.processing.query_hamming_distance2}",
f"editdistance2={inputs.processing.edit_distance2}",
f"forbidn={inputs.processing.forbid_N}",
f"findbestmatch={inputs.processing.find_best_match}",
f"maskfullycovered={inputs.operations.mask_fully_covered}",
f"mink={inputs.operations.min_k}",
f"trimq={inputs.operations.trim_quality}",
f"qin={inputs.operations.quality_encoding_offset}",
f"trimpolya={inputs.operations.trim_poly_A}",
f"minlength={inputs.min_length}",
f"minlengthfraction={inputs.operations.min_length_fraction}",
f"minavgquality={inputs.operations.min_average_quality}",
f"maqb={inputs.operations.min_average_quality_bases}",
f"minbasequality={inputs.operations.min_base_quality}",
f"minconsecutivebases={inputs.operations.min_consecutive_bases}",
f"trimpad={inputs.operations.trim_pad}",
f"minoverlap={inputs.operations.min_overlap}",
f"mininsert={inputs.operations.min_insert}",
f"forcetrimleft={inputs.operations.force_trim_left}",
f"forcetrimright={inputs.operations.force_trim_right}",
f"forcetrimright2={inputs.operations.force_trim_right2}",
f"forcetrimmod={inputs.operations.force_trim_mod}",
f"restrictleft={inputs.operations.restrict_left}",
f"restrictright={inputs.operations.restrict_right}",
f"mingc={inputs.operations.min_GC}",
f"maxgc={inputs.operations.max_GC}",
f"maxns={inputs.operations.maxns}",
f"tossjunk={inputs.operations.toss_junk}",
f"chastityfilter={inputs.header_parsing.chastity_filter}",
f"barcodefilter={inputs.header_parsing.barcode_filter}",
f"xmin={inputs.header_parsing.x_min}",
f"ymin={inputs.header_parsing.y_min}",
f"xmax={inputs.header_parsing.x_max}",
f"ymax={inputs.header_parsing.y_max}",
f"entropy={inputs.complexity.entropy}",
f"entropywindow={inputs.complexity.entropy_window}",
f"entropyk={inputs.complexity.entropy_k}",
f"minbasefrequency={inputs.complexity.min_base_frequency}",
f"entropymask={inputs.complexity.entropy_mask}",
f"-Xmx{int(0.85*(self.requirements.resources.memory/num_of_lanes))}m",
]
if self.requirements.resources.cores >= num_of_lanes:
args.append(
f"threads={int(self.requirements.resources.cores//num_of_lanes)}"
)
n_jobs = num_of_lanes
else:
self.warning(
f"There are more sequencing lanes ({num_of_lanes}) than there are "
f"available cores ({self.requirements.resources.cores}). For the "
"most optimal performance, use at least the same number of lanes "
"and cores."
)
args.append("threads=1")
n_jobs = self.requirements.resources.cores
if inputs.reference.sequences:
args.append(f"ref={input_references}")
if inputs.reference.literal_sequences:
literal_sequences_joined = ",".join(inputs.reference.literal_sequences)
args.append(f"literal={literal_sequences_joined}")
if inputs.operations.k_trim != "f":
args.append(f"ktrim={inputs.operations.k_trim}")
if inputs.operations.k_mask != "f" and inputs.operations.k_mask != "t":
args.append(f"kmask={inputs.operations.k_mask}")
if inputs.operations.quality_trim != "f":
args.append(f"qtrim={inputs.operations.quality_trim}")
if inputs.operations.ignore_bad_quality:
args.append("ignorebadquality")
if inputs.operations.max_length:
args.append(f"maxlength={inputs.operations.max_length}")
if barcodes:
barcodes = ",".join(barcodes)
args.append(f"barcodes={barcodes}")
process_outputs = Parallel(n_jobs=n_jobs)(
run_bbduk(input_reads=input_set, bbduk_inputs=args)
for input_set in input_reads
)
for output in process_outputs:
if output[0]:
self.error("BBDuk failed.", output[1])
self.progress(0.7)
statistics = []
for stats in input_reads:
with open(stats["stats"], "rb") as orig_file:
with gzip.open(f"{stats['stats']}.gz", "wb") as zipped_file:
zipped_file.writelines(orig_file)
statistics.append(f"{stats['stats']}.gz")
output_path = Path("./fastqc")
output_path.mkdir(exist_ok=True)
rename_preprocessed_files(input_files=input_reads)
fastqgz = [fastq["original_name"] for fastq in input_reads]
fastqc_inputs = fastqgz + ["--extract", f"--outdir={str(output_path)}"]
if inputs.fastqc.nogroup:
fastqc_inputs.append("--no-group")
return_code, _, stderr = Cmd["fastqc"][fastqc_inputs] & TEE(retcode=None)
if return_code:
self.error("FastQC failed. ", stderr)
for fastqc_zip in output_path.glob("*_fastqc.zip"):
shutil.move(str(fastqc_zip), ".")
fastqc, fastqc_url = prepare_fastqc(fastqgz=fastqgz, error=self.error)
if (
inputs.operations.k_trim == "f"
and inputs.operations.quality_trim == "f"
and not inputs.reference.sequences
and not inputs.reference.literal_sequences
and inputs.operations.min_average_quality <= 0
):
self.warning(
"Reference sequences, trimming mode, and minimum average quality are "
"unspecified. Only filtering of reads by length is applied."
)
outputs.fastq = fastqgz
outputs.statistics = statistics
outputs.fastqc_url = fastqc_url
outputs.fastqc_archive = fastqc
class BBDukPaired(Process):
"""Run BBDuk on paired-end reads.
BBDuk combines the most common data-quality-related trimming, filtering,
and masking operations into a single high-performance tool. It is capable
of quality-trimming and filtering, adapter-trimming, contaminant-filtering
via kmer matching, sequence masking, GC-filtering, length filtering,
entropy-filtering, format conversion, histogram generation, subsampling,
quality-score recalibration, kmer cardinality estimation, and various
other operations in a single pass. See
[here](https://jgi.doe.gov/data-and-tools/bbtools/bb-tools-user-guide/bbduk-guide/)
for more information.
"""
slug = "bbduk-paired"
name = "BBDuk (paired-end)"
process_type = "data:reads:fastq:paired:bbduk"
version = "3.1.2"
category = "FASTQ processing"
data_name = "{{ reads|name|default('?') }}"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {
"cores": 4,
"memory": 8192,
},
}
class Input:
"""Input fields to process BBDukPaired."""
reads = DataField("reads:fastq:paired", label="Reads")
min_length = IntegerField(
label="Minimum length",
default=10,
description="Reads shorter than the minimum length will be discarded after trimming.",
)
class Reference:
"""Reference."""
sequences = ListField(
DataField("seq:nucleotide"),
label="Sequences",
required=False,
description="Reference sequences include adapters, contaminants, and "
"degenerate sequences. They can be provided in a multi-sequence FASTA "
"file or as a set of literal sequences below.",
)
literal_sequences = ListField(
StringField(),
label="Literal sequences",
required=False,
default=[],
description="Literal sequences can be specified by inputting them one by "
"one and pressing Enter after each sequence.",
)
class Processing:
"""Processing parameters."""
kmer_length = IntegerField(
label="Kmer length",
default=27,
description="Kmer length used for finding contaminants. "
"Contaminants shorter than kmer length will not be found. "
"Kmer length must be at least 1.",
)
check_reverse_complements = BooleanField(
label="Check reverse complements",
description="Look for reverse complements of kmers in addition to forward kmers.",
default=True,
)
mask_middle_base = BooleanField(
label="Mask the middle base of a kmer",
description="Treat the middle base of a kmer as a wildcard to increase sensitivity "
"in the presence of errors.",
default=True,
)
min_kmer_hits = IntegerField(
label="Minimum number of kmer hits",
default=1,
description="Reads need at least this many matching kmers to be considered "
"matching the reference.",
)
min_kmer_fraction = FloatField(
label="Minimum kmer fraction",
default=0.0,
description="A read needs at least this fraction of its total kmers "
"to hit a reference in order to be considered a match. If this and "
"'Minimum number of kmer hits' are set, the greater is used.",
)
min_coverage_fraction = FloatField(
label="Minimum kmer fraction",
default=0.0,
description="A read needs at least this fraction of its total bases to be "
"covered by reference kmers to be considered a match. If specified, "
"'Minimum coverage fraction' overrides 'Minimum number of kmer hits' and "
"'Minimum kmer fraction'.",
)
hamming_distance = IntegerField(
label="Maximum Hamming distance for kmers (substitutions only)",
description="Hamming distance i.e. the number of mismatches allowed in the kmer.",
default=0,
)
query_hamming_distance = IntegerField(
label="Hamming distance for query kmers",
description="Set a hamming distance for query kmers instead of the read kmers. "
"This makes the read processing much slower, but does not use additional memory.",
default=0,
)
edit_distance = IntegerField(
label="Maximum edit distance from reference kmers "
"(substitutions and indels)",
default=0,
)
hamming_distance2 = IntegerField(
label="Hamming distance for short kmers when looking for shorter kmers",
default=0,
)
query_hamming_distance2 = IntegerField(
label="Hamming distance for short query kmers when looking for shorter kmers",
default=0,
)
edit_distance2 = IntegerField(
label="Maximum edit distance from short reference kmers "
"(substitutions and indels) when looking for shorter kmers",
default=0,
)
forbid_N = BooleanField(
label="Forbid matching of read kmers containing N",
default=False,
description="By default, these will match a reference 'A' if"
"'Maximum Hamming distance for kmers' > 0 or "
"'Maximum edit distance from reference kmers' > 0, to increase sensitivity.",
)
find_best_match = BooleanField(
label="Find best match",
description="If multiple matches, associate read with sequence sharing most kmers.",
default=True,
)
remove_if_either_bad = BooleanField(
label="Remove both sequences of a paired-end read, if either of them is to be removed",
default=True,
)
perform_error_correction = BooleanField(
label="Perform error correction with BBMerge prior to kmer operations",
default=False,
)
class Operations:
"""Trimming, filtering and masking parameters."""
k_trim = StringField(
label="Trimming protocol to remove bases matching reference kmers from reads",
choices=[
("f", "Don't trim"),
("r", "Trim to the right"),
("l", "Trim to the left"),
],
default="f",
)
k_mask = StringField(
label="Symbol to replace bases matching reference kmers",
default="f",
description="Allows any non-whitespace character other than t or f. "
"Processes short kmers on both ends.",
)
mask_fully_covered = BooleanField(
label="Only mask bases that are fully covered by kmers",
default=False,
)
min_k = IntegerField(
label="Look for shorter kmers at read tips down to this length when k-trimming "
"or masking",
default=-1,
description="-1 means disabled. Enabling this will disable treating the middle "
"base of a kmer as a wildcard to increase sensitivity in the presence of errors.",
)
quality_trim = StringField(
label="Trimming protocol to remove bases with quality below "
"the minimum average region quality from read ends",
choices=[
("f", "Trim neither end"),
("rl", "Trim both ends"),
("r", "Trim only right end"),
("l", "Trim only left end"),
("w", "Use sliding window"),
],
default="f",
description="Performed after looking for kmers. If enabled, set also "
"'Average quality below which to trim region'.",
)
trim_quality = IntegerField(
label="Average quality below which to trim region",
default=6,
disabled="operations.quality_trim === 'f'",
description="Set trimming protocol to enable this parameter.",
)
quality_encoding_offset = StringField(
label="Quality encoding offset",
choices=[
("33", "Sanger / Illumina 1.8+ (33)"),
("64", "Illumina up to 1.3+, 1.5+ (64)"),
("auto", "Auto"),
],
default="auto",
description="Quality encoding offset for input FASTQ files.",
)
ignore_bad_quality = BooleanField(
label="Don't crash if quality values appear to be incorrect",
default=False,
)
trim_poly_A = IntegerField(
label="Minimum length of poly-A or poly-T tails to trim on either end of reads",
default=0,
)
min_length_fraction = FloatField(
label="Minimum length fraction",
default=0.0,
description="Reads shorter than this fraction of original length after "
"trimming will be discarded.",
)
max_length = IntegerField(
label="Maximum length",
required=False,
description="Reads longer than this after trimming will be discarded.",
)
min_average_quality = IntegerField(
label="Minimum average quality",
default=0,
description="Reads with average quality (after trimming) below this will be discarded.",
)
min_average_quality_bases = IntegerField(
label="Number of initial bases to calculate minimum average quality from",
default=0,
description="If positive, calculate minimum average quality "
"from this many initial bases",
)
min_base_quality = IntegerField(
label="Minimum base quality below which reads are discarded after trimming",
default=0,
)
min_consecutive_bases = IntegerField(
label="Minimum number of consecutive called bases",
default=0,
)
trim_pad = IntegerField(
label="Number of bases to trim around matching kmers",
default=0,
)
trim_by_overlap = BooleanField(
label="Trim adapters based on where paired-end reads overlap",
default=False,
)
strict_overlap = BooleanField(
label="Adjust sensitivity in "
"'Trim adapters based on where paired-end reads overlap' mode",
default=True,
)
min_overlap = IntegerField(
label="Minum number of overlapping bases",
default=14,
description="Require this many bases of overlap for detection.",
)
min_insert = IntegerField(
label="Minimum insert size",
default=40,
description="Require insert size of at least this for overlap. "
"Should be reduced to 16 for small RNA sequencing.",
)
trim_pairs_evenly = BooleanField(
label="Trim both sequences of paired-end reads to the minimum "
"length of either sequence",
default=False,
)
force_trim_left = IntegerField(
label="Position from which to trim bases to the left",
default=0,
)
force_trim_right = IntegerField(
label="Position from which to trim bases to the right",
default=0,
)
force_trim_right2 = IntegerField(
label="Number of bases to trim from the right end",
default=0,
)
force_trim_mod = IntegerField(
label="Modulo to right-trim reads",
default=0,
description="Trim reads to the largest multiple of modulo.",
)
restrict_left = IntegerField(
label="Number of leftmost bases to look in for kmer matches",
default=0,
)
restrict_right = IntegerField(
label="Number of rightmost bases to look in for kmer matches",
default=0,
)
min_GC = FloatField(
label="Minimum GC content",
default=0.0,
description="Discard reads with lower GC content.",
)
max_GC = FloatField(
label="Maximum GC content",
default=1.0,
description="Discard reads with higher GC content.",
)
maxns = IntegerField(
label="Max Ns after trimming",
default=-1,
description="If non-negative, reads with more Ns than this "
"(after trimming) will be discarded.",
)
toss_junk = BooleanField(
label="Discard reads with invalid characters as bases",
default=False,
)
class HeaderParsing:
"""Header-parsing parameters."""
chastity_filter = BooleanField(
label="Discard reads that fail Illumina chastity filtering",
default=False,
description="Discard reads with id containing ' 1:Y:' or ' 2:Y:'.",
)
barcode_filter = BooleanField(
label="Remove reads with unexpected barcodes",
default=False,
description="Remove reads with unexpected barcodes if barcodes are set, "
"or barcodes containing 'N' otherwise. A barcode must be the last part "
"of the read header.",
)
barcode_files = ListField(
DataField("seq:nucleotide"),
label="Barcode sequences",
description="FASTA file(s) with barcode sequences.",
required=False,
)
barcode_sequences = ListField(
StringField(),
label="Literal barcode sequences",
required=False,
default=[],
description="Literal barcode sequences can be specified by inputting "
"them one by one and pressing Enter after each sequence.",
)
x_min = IntegerField(
label="Minimum X coordinate",
default=-1,
description="If positive, discard reads with a smaller X coordinate.",
)
y_min = IntegerField(
label="Minimum Y coordinate",
default=-1,
description="If positive, discard reads with a smaller Y coordinate.",
)
x_max = IntegerField(
label="Maximum X coordinate",
default=-1,
description="If positive, discard reads with a larger X coordinate.",
)
y_max = IntegerField(
label="Maximum Y coordinate",
default=-1,
description="If positive, discard reads with a larger Y coordinate.",
)
class Complexity:
"""Complexity parameters."""
entropy = FloatField(
label="Minimum entropy",
default=-1.0,
description="Set between 0 and 1 to filter reads with entropy below that value. "
"Higher is more stringent.",
)
entropy_window = IntegerField(
label="Length of sliding window used to calculate entropy",
default=50,
description="To use the sliding window set minimum entropy in range between 0.0 and 1.0.",
)
entropy_k = IntegerField(
label="Length of kmers used to calcuate entropy",
default=5,
)
entropy_mask = BooleanField(
label="Mask low-entropy parts of sequences with N instead of discarding",
default=False,
)
min_base_frequency = IntegerField(
label="Minimum base frequency",
default=0,
)
class Fastqc:
"""FastQC parameters."""
nogroup = BooleanField(
label="Disable grouping of bases for reads >50bp",
default=False,
description="All reports will show data for every base in the read. Using this option "
"will cause fastqc to crash and burn if you use it on really long reads.",
)
reference = GroupField(Reference, label="Reference")
processing = GroupField(Processing, label="Processing parameters")
operations = GroupField(
Operations, label="Trimming, filtering and masking parameters."
)
header_parsing = GroupField(HeaderParsing, label="Header-parsing parameters")
complexity = GroupField(Complexity, label="Complexity parameters")
fastqc = GroupField(Fastqc, label="FastQC parameters")
class Output:
"""Output fields."""
fastq = ListField(FileField(), label="Remaining upstream reads")
fastq2 = ListField(FileField(), label="Remaining downstream reads")
statistics = ListField(FileField(), label="Statistics")
fastqc_url = ListField(
FileHtmlField(), label="Upstream quality control with FastQC"
)
fastqc_url2 = ListField(
FileHtmlField(), label="Downstream quality control with FastQC"
)
fastqc_archive = ListField(
FileField(), label="Download upstream FastQC archive"
)
fastqc_archive2 = ListField(
FileField(), label="Download downstream FastQC archive"
)
def run(self, inputs, outputs):
"""Run analysis."""
input_references = "input_references.fasta.gz"
input_barcodes = "input_barcodes.fasta.gz"
num_of_lanes = len(inputs.reads.output.fastq)
input_reads = prepare_inputs(
mate1=inputs.reads.output.fastq, mate2=inputs.reads.output.fastq2
)
if inputs.reference.sequences:
with gzip.open(input_references, "wb") as outfile:
for sequences in inputs.reference.sequences:
with gzip.open(sequences.output.fastagz.path, "rb") as infile:
shutil.copyfileobj(infile, outfile)
if inputs.header_parsing.barcode_files:
with gzip.open(input_barcodes, "wb") as outfile:
for barcode_file in inputs.header_parsing.barcode_files:
with gzip.open(barcode_file.output.fastagz.path, "rb") as infile:
shutil.copyfileobj(infile, outfile)
barcodes = [input_barcodes] + inputs.header_parsing.barcode_sequences
else:
barcodes = inputs.header_parsing.barcode_sequences
self.progress(0.1)
args = [
"statscolumns=5",
f"k={inputs.processing.kmer_length}",
f"rcomp={inputs.processing.check_reverse_complements}",
f"maskmiddle={inputs.processing.mask_middle_base}",
f"minkmerhits={inputs.processing.min_kmer_hits}",
f"minkmerfraction={inputs.processing.min_kmer_fraction}",
f"mincovfraction={inputs.processing.min_coverage_fraction}",
f"hammingdistance={inputs.processing.hamming_distance}",
f"qhdist={inputs.processing.query_hamming_distance}",
f"editdistance={inputs.processing.edit_distance}",
f"hammingdistance2={inputs.processing.hamming_distance2}",
f"qhdist2={inputs.processing.query_hamming_distance2}",
f"editdistance2={inputs.processing.edit_distance2}",
f"forbidn={inputs.processing.forbid_N}",
f"removeifeitherbad={inputs.processing.remove_if_either_bad}",
f"findbestmatch={inputs.processing.find_best_match}",
f"ecco={inputs.processing.perform_error_correction}",
f"maskfullycovered={inputs.operations.mask_fully_covered}",
f"mink={inputs.operations.min_k}",
f"trimq={inputs.operations.trim_quality}",
f"qin={inputs.operations.quality_encoding_offset}",
f"trimpolya={inputs.operations.trim_poly_A}",
f"minlength={inputs.min_length}",
f"minlengthfraction={inputs.operations.min_length_fraction}",
f"minavgquality={inputs.operations.min_average_quality}",
f"maqb={inputs.operations.min_average_quality_bases}",
f"minbasequality={inputs.operations.min_base_quality}",
f"minconsecutivebases={inputs.operations.min_consecutive_bases}",
f"trimpad={inputs.operations.trim_pad}",
f"trimbyoverlap={inputs.operations.trim_by_overlap}",
f"trimpairsevenly={inputs.operations.trim_pairs_evenly}",
f"minoverlap={inputs.operations.min_overlap}",
f"mininsert={inputs.operations.min_insert}",
f"forcetrimleft={inputs.operations.force_trim_left}",
f"forcetrimright={inputs.operations.force_trim_right}",
f"forcetrimright2={inputs.operations.force_trim_right2}",
f"forcetrimmod={inputs.operations.force_trim_mod}",
f"restrictleft={inputs.operations.restrict_left}",
f"restrictright={inputs.operations.restrict_right}",
f"mingc={inputs.operations.min_GC}",
f"maxgc={inputs.operations.max_GC}",
f"maxns={inputs.operations.maxns}",
f"tossjunk={inputs.operations.toss_junk}",
f"chastityfilter={inputs.header_parsing.chastity_filter}",
f"barcodefilter={inputs.header_parsing.barcode_filter}",
f"xmin={inputs.header_parsing.x_min}",
f"ymin={inputs.header_parsing.y_min}",
f"xmax={inputs.header_parsing.x_max}",
f"ymax={inputs.header_parsing.y_max}",
f"entropy={inputs.complexity.entropy}",
f"entropywindow={inputs.complexity.entropy_window}",
f"entropyk={inputs.complexity.entropy_k}",
f"minbasefrequency={inputs.complexity.min_base_frequency}",
f"entropymask={inputs.complexity.entropy_mask}",
f"-Xmx{int(0.85*(self.requirements.resources.memory/num_of_lanes))}m",
]
if self.requirements.resources.cores >= num_of_lanes:
args.append(
f"threads={int(self.requirements.resources.cores//num_of_lanes)}"
)
n_jobs = num_of_lanes
else:
self.warning(
f"There are more sequencing lanes ({num_of_lanes}) than there are "
f"available cores ({self.requirements.resources.cores}). For the "
"most optimal performance, use at least the same number of lanes "
"and cores."
)
args.append("threads=1")
n_jobs = self.requirements.resources.cores
if inputs.reference.sequences:
args.append(f"ref={input_references}")
if inputs.reference.literal_sequences:
literal_sequences_joined = ",".join(inputs.reference.literal_sequences)
args.append(f"literal={literal_sequences_joined}")
if inputs.operations.k_trim != "f":
args.append(f"ktrim={inputs.operations.k_trim}")
if inputs.operations.k_mask != "f" and inputs.operations.k_mask != "t":
args.append(f"kmask={inputs.operations.k_mask}")
if inputs.operations.quality_trim != "f":
args.append(f"qtrim={inputs.operations.quality_trim}")
if inputs.operations.ignore_bad_quality:
args.append("ignorebadquality")
if inputs.operations.max_length:
args.append(f"masklength={inputs.operations.max_length}")
if inputs.header_parsing.barcode_files:
barcodes = ",".join(barcodes)
args.append(f"barcodes={barcodes}")
process_outputs = Parallel(n_jobs=n_jobs)(
run_bbduk(input_reads=input_set, bbduk_inputs=args, paired_end=True)
for input_set in input_reads
)
for output in process_outputs:
if output[0]:
self.error("BBDuk failed.", output[1])
self.progress(0.7)
statistics = []
for stats in input_reads:
with open(stats["stats"], "rb") as orig_file:
with gzip.open(f"{stats['stats']}.gz", "wb") as zipped_file:
zipped_file.writelines(orig_file)
statistics.append(f"{stats['stats']}.gz")
output_path = Path("./fastqc")
output_path.mkdir(exist_ok=True)
rename_preprocessed_files(input_files=input_reads, paired_end=True)
fastqgz = [fastq["original_name"] for fastq in input_reads]
fastqgz2 = [fastq["original_name2"] for fastq in input_reads]
fastqc_inputs = fastqgz + ["--extract", f"--outdir={str(output_path)}"]
fastqc2_inputs = fastqgz2 + ["--extract", f"--outdir={str(output_path)}"]
if inputs.fastqc.nogroup:
fastqc_inputs.append("--no-group")
fastqc2_inputs.append("--no-group")
return_code, _, stderr = Cmd["fastqc"][fastqc_inputs] & TEE(retcode=None)
if return_code:
self.error("FastQC failed. ", stderr)
return_code, _, stderr = Cmd["fastqc"][fastqc2_inputs] & TEE(retcode=None)
if return_code:
self.error("FastQC failed. ", stderr)
for fastqc_zip in output_path.glob("*_fastqc.zip"):
shutil.move(str(fastqc_zip), ".")
mate1_fastqc, mate1_fastqc_url = prepare_fastqc(
fastqgz=fastqgz,
error=self.error,
)
mate2_fastqc, mate2_fastqc_url = prepare_fastqc(
fastqgz=fastqgz2,
error=self.error,
)
if (
inputs.operations.k_trim == "f"
and inputs.operations.quality_trim == "f"
and not inputs.reference.sequences
and not inputs.reference.literal_sequences
and inputs.operations.min_average_quality <= 0
):
self.warning(
"Reference sequences, trimming mode, and minimum average quality are "
"unspecified. Only filtering of reads by length is applied."
)
outputs.fastq = fastqgz
outputs.fastq2 = fastqgz2
outputs.fastqc_url = mate1_fastqc_url
outputs.fastqc_url2 = mate2_fastqc_url
outputs.fastqc_archive = mate1_fastqc
outputs.fastqc_archive2 = mate2_fastqc
outputs.statistics = statistics | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/reads_processing/bbduk.py | 0.702326 | 0.31633 | bbduk.py | pypi |
import os
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
IntegerField,
Persistence,
Process,
SchedulingClass,
StringField,
)
class AlignmentSieve(Process):
"""Filter alignments of BAM files according to specified parameters.
Program is bundled with deeptools. See [documentation](
https://deeptools.readthedocs.io/en/develop/content/tools/alignmentSieve.html)
for more details.
"""
slug = "alignmentsieve"
name = "alignmentSieve"
process_type = "data:alignment:bam:sieve"
version = "1.5.2"
category = "BAM processing"
data_name = "{{ alignment|name|default('?') }}"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
class Input:
"""Input fields to process AlignmentSieve."""
alignment = DataField(data_type="alignment:bam", label="Alignment BAM file")
min_fragment_length = IntegerField(
label="--minFragmentLength",
description="The minimum fragment length needed for "
"read/pair inclusion. This option is primarily useful in "
"ATACseq experiments, for filtering mono- or di-nucleosome "
"fragments. (Default: 0)",
default=0,
)
max_fragment_length = IntegerField(
label="--maxFragmentLength",
description="The maximum fragment length needed for "
"read/pair inclusion. A value of 0 indicates "
"no limit. (Default: 0)",
default=0,
)
class Output:
"""Output fields to process AlignmentSieve."""
bam = FileField(label="Sieved BAM file")
bai = FileField(label="Index of sieved BAM file")
stats = FileField(label="Alignment statistics")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run code of AlignmentSieve process."""
basename = Path(inputs.alignment.output.bam.path).name
assert basename.endswith(".bam")
name = basename[:-4]
bam_species = inputs.alignment.output.species
output_bam_file = f"{name}_filtered.bam"
filter_metrics = f"{name}_metrics.txt"
params = [
"--verbose",
"--bam",
inputs.alignment.output.bam.path,
"--outFile",
output_bam_file,
"--filterMetrics",
filter_metrics,
"--maxFragmentLength",
inputs.max_fragment_length,
"--minFragmentLength",
inputs.min_fragment_length,
]
return_code, _, _ = Cmd["alignmentSieve"][params] & TEE(retcode=None)
if return_code:
self.error("Error sieving the BAM file.")
if not os.path.exists(output_bam_file):
self.error(f"File {output_bam_file} not created.")
outputs.bam = output_bam_file
self.progress(0.3)
return_code, stdout, stderr = Cmd["samtools"]["index"][output_bam_file] & TEE(
retcode=None
)
if return_code:
print(stderr)
self.error(f"Failed to index {output_bam_file}.")
outputs.bai = f"{output_bam_file}.bai"
self.progress(0.5)
stats = f"{name}_stats.txt"
(Cmd["samtools"]["flagstat"][f"{output_bam_file}"] > stats)()
outputs.stats = stats
self.progress(0.75)
outputs.species = bam_species
outputs.build = inputs.alignment.output.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/reads_processing/alignmentsieve.py | 0.733929 | 0.354293 | alignmentsieve.py | pypi |
import gzip
import re
import shutil
from math import ceil
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
GroupField,
IntegerField,
ListField,
Persistence,
Process,
SchedulingClass,
StringField,
)
HASH_FUNCTION = "linear945:linear9123641:linear349341847"
READ_TYPES = ["host", "graft", "both", "neither", "ambiguous"]
def concatenate_files(filenames, out_fname, decompress=False):
"""Concatenate files and optionally decompress them."""
handler = gzip.open if decompress else open
with open(out_fname, "wb") as out_handle:
for fname in filenames:
with handler(fname, "rb") as handle:
shutil.copyfileobj(
fsrc=handle, fdst=out_handle, length=1024 * 1024 * 10
)
def create_filename(basename, suffix):
"""Create a secure version of a filename."""
# Keep only alphanumeric characters and underscores.
basename = re.sub(pattern=r"[\W]", repl="_", string=basename)
# Replace consecutive underscores with a single one.
basename = re.sub(pattern=r"(_)\1+", repl=r"\1", string=basename)
return f"{basename}.{suffix}"
def concatenate_reads(filenames, out_fasta, error):
"""Concatenate reads in FASTQ files."""
try:
concatenate_files(filenames=filenames, out_fname=out_fasta, decompress=True)
except Exception as e:
error(
f"Failed to concatenate FASTQ files for {out_fasta.name}. The error was: {repr(e)}"
)
class XengsortIndex(Process):
"""Build an index for sorting xenograft reads with Xengsort.
Xengsort is an alignment free method for sorting reads from
xenograft experiments. Description of the method and evaluation on
several datasets is provided in the
[article](https://doi.org/10.1186/s13015-021-00181-w).
"""
slug = "xengsort-index"
name = "Xengsort index"
process_type = "data:xengsort:index"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/xengsort:1.0.0"},
},
"resources": {
"cores": 4,
"memory": 32768,
},
}
category = "Xenograft processing"
data_name = "Xengsort index"
version = "1.0.1"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
class Input:
"""Input fields to XengsortIndex process."""
graft_refs = ListField(
DataField("seq:nucleotide"),
label="Graft reference sequences (nucleotide FASTA)",
)
host_refs = ListField(
DataField("seq:nucleotide"),
label="Host reference sequences (nucleotide FASTA)",
)
n_kmer = IntegerField(
label="Number of distinct k-mers [--nobjects]",
required=False,
description="The number of k-mers that will be stored in the hash table. This depends "
"on the used reference genomes and must be estimated beforehand. If the number of "
"distinct k-mers is known beforehand it should be specified. For all 25-mers in the "
"human and mouse genome and transcriptome, this number is roughly 4,500,000,000. "
"If this is not set, the number is estimated with ntCard tool and increased by two "
"percent to account for errors.",
)
class Advanced:
"""Advanced options."""
kmer_size = IntegerField(label="k-mer size [--kmersize]", default=25)
aligned_cache = BooleanField(
label="Use power-of-two aligned pages [--aligned]",
default=False,
description="Indicates whether each bucket should consume a number of bits that "
"is a power of 2. Using --aligned ensures that each bucket stays within the same "
"cache line, but may waste space (padding bits), yielding faster speed but "
"larger space requirements. By default no bits are used for padding and buckets "
"may cross cache line boundaries [--unaligned]. This is slightly slower, but may "
"save a little or a lot of space.",
)
fixed_hashing = BooleanField(
label="Use fixed hash function [--hashfunctions]",
default=True,
description="Hash function used to store the key-value pairs is defined by "
"--hashfunction parameter. With this option selected a fixed hash function "
"(linear945:linear9123641:linear349341847) is used. When this is not selected a "
"different random functions are chosen each time. It is recommended to have them "
"chosen randomly unless you need strictly reproducible behavior.",
)
page_size = IntegerField(
label="Number of elements stored in one bucket (or page) [--pagesize]",
default=4,
)
fill = FloatField(
label="Fill rate of the hash table [--fill]",
range=[0.0, 1.0],
default=0.88,
description="This determines the desired fill rate or load factor of the hash "
"table. It should be set between 0.0 and 1.0. It is beneficial to leave part of "
"the hash table empty for faster lookups. Together with the number of distinct "
"k-mers [--nobjects], the number of slots in the table is calculated as "
"ceil(nobjects/fill).",
)
advanced = GroupField(Advanced, label="Advanced options")
class Output:
"""Output fields to process XengsortIndex."""
index = FileField(label="Xengsort index")
stats = FileField(label="Xengsort statistics")
graft_species = StringField(label="Graft species")
graft_build = StringField(label="Graft build")
host_species = StringField(label="Host species")
host_build = StringField(label="Host build")
def run(self, inputs, outputs):
"""Run analysis."""
graft_species = inputs.graft_refs[0].output.species
graft_build = inputs.graft_refs[0].output.build
if any(ref.output.species != graft_species for ref in inputs.graft_refs):
self.error(
"All input graft reference sequences must be from the same species."
)
if any(ref.output.build != graft_build for ref in inputs.graft_refs):
self.error("All input graft reference sequences must share the same build.")
host_species = inputs.host_refs[0].output.species
host_build = inputs.host_refs[0].output.build
if any(ref.output.species != host_species for ref in inputs.host_refs):
self.error(
"All input host reference sequences must be from the same species."
)
if any(ref.output.build != host_build for ref in inputs.host_refs):
self.error("All input host reference sequences must share the same build.")
outputs.graft_species = graft_species
outputs.graft_build = graft_build
outputs.host_species = host_species
outputs.host_build = host_build
graft_fasta = Path("graft.fasta")
concatenate_files(
filenames=[ref.output.fasta.path for ref in inputs.graft_refs],
out_fname=graft_fasta,
)
host_fasta = Path("host.fasta")
concatenate_files(
filenames=[ref.output.fasta.path for ref in inputs.host_refs],
out_fname=host_fasta,
)
if inputs.n_kmer:
n_kmer = inputs.n_kmer
else:
nthll_params = [
f"--threads={self.requirements.resources.cores}",
f"--kmer={inputs.advanced.kmer_size}",
graft_fasta,
host_fasta,
]
return_code, stdout, stderr = Cmd["nthll"][nthll_params] & TEE(retcode=None)
if return_code:
print(stderr)
self.error("Failed to estimate the number of distinct k-mers.")
# Example standard output is "F0, Exp# of distnt kmers(k=25): 47270"
stdout_search = re.search(
pattern=r"^F0, Exp# of distnt kmers\(k=\d+\): (\d+)$",
string=stdout,
flags=re.MULTILINE,
)
try:
n_kmer = int(stdout_search.group(1))
except (AttributeError, ValueError, IndexError) as e:
print(e.message)
self.error("Failed to extract the number of distinct k-mers.")
# Account for possible errors in estimation by increasing
# the number of distinct k-mers by 2 percent.
n_kmer = ceil(n_kmer * 1.02)
index_file = create_filename(
basename="_".join([graft_species, graft_build, host_species, host_build]),
suffix="h5",
)
index_params = [
index_file,
"--host",
host_fasta,
"--graft",
graft_fasta,
"--kmersize",
inputs.advanced.kmer_size,
"--shortcutbits",
0,
"--nobjects",
n_kmer,
"--type",
"3FCVbb",
"--aligned" if inputs.advanced.aligned_cache else "--unaligned",
"--hashfunctions",
HASH_FUNCTION if inputs.advanced.fixed_hashing else "random",
"--pagesize",
inputs.advanced.page_size,
"--fill",
inputs.advanced.fill,
"--threads",
self.requirements.resources.cores,
]
return_code, stdout, stderr = Cmd["xengsort"]["index"][index_params] & TEE(
retcode=None
)
if return_code:
print(stderr)
self.error("Failed to calculate Xengsort index.")
stats_file = "index_stats.txt"
with open(stats_file, "w") as stats_handle:
stats_handle.writelines(stdout)
outputs.index = index_file
outputs.stats = stats_file
class XengsortClassify(Process):
"""Classify xenograft reads with Xengsort.
Xengsort is an alignment free method for sorting reads from
xenograft experiments. It classifies sequencing reads into five
categories based on their origin: host, graft, both, neither, and
ambiguous. Categories “host” and “graft” are for reads that can be
clearly assigned to one of the species. Category “both” is for reads
that match equally well to both references. Category “neither” is
for reads that contain many k-mers that cannot be found in the
key-value store; these could point to technical problems (primer
dimers) or contamination of the sample with other species. Finally,
category “ambiguous” is for reads that provide conflicting
information. Such reads should not usually be seen; they could
result from PCR hybrids between host and graft during library
preparation.
Description of the method and evaluation on several
datasets is provided in the
[article](https://doi.org/10.1186/s13015-021-00181-w).
"""
slug = "xengsort-classify"
name = "Xengsort classify"
process_type = "data:xengsort:classification"
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/xengsort:1.0.0"},
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
entity = {
"type": "sample",
}
category = "Xenograft processing"
data_name = "{{ reads|name|default('?') }}"
version = "1.0.0"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
class Input:
"""Input fields to XengsortClassify process."""
reads = DataField("reads:fastq", label="Reads")
index = DataField("xengsort:index", label="Xengsort genome index")
upload_reads = StringField(
label="Select reads to upload",
description="All read categories are returned in this process but only the ones "
"selected are uploaded as separate FASTQ files. This should be used for categories "
"of reads that will be used in further analyses.",
choices=[
("none", "none"),
("all", "all"),
("graft", "graft"),
("graft, both", "graft, both"),
("graft, host", "graft, host"),
("graft, host, both", "graft, host, both"),
],
default="none",
)
merge_both = BooleanField(
label="Upload merged graft and both reads",
description="Merge graft reads with the reads that can originate from both genomes "
"and upload it as graft reads. In any workflow, the latter reads, classified as both "
"may pose problems, because one may not be able to decide on the species of origin "
"due to ultra-conserved regions between species.",
hidden="upload_reads == 'none'",
default=False,
)
class Advanced:
"""Advanced options."""
chunksize = FloatField(
label="Chunk size in MB [--chunksize]",
default=16.0,
description="Controll the memory usage by setting chunk size per thread.",
)
advanced = GroupField(Advanced, label="Advanced options")
class Output:
"""Output fields to XengsortClassify process."""
stats = FileField(label="Xengsort classification statistics")
host1 = FileField(label="Host reads (mate 1)")
host2 = FileField(label="Host reads (mate 2)", required=False)
graft1 = FileField(label="Graft reads (mate 1)")
graft2 = FileField(label="Graft reads (mate 2)", required=False)
both1 = FileField(label="Both reads (mate 1)")
both2 = FileField(label="Both reads (mate 2)", required=False)
neither1 = FileField(label="Neither reads (mate 1)")
neither2 = FileField(label="Neither reads (mate 2)", required=False)
ambiguous1 = FileField(label="Ambiguous reads (mate 1)")
ambiguous2 = FileField(label="Ambiguous reads (mate 2)", required=False)
graft_species = StringField(label="Graft species")
graft_build = StringField(label="Graft build")
host_species = StringField(label="Host species")
host_build = StringField(label="Host build")
def run(self, inputs, outputs):
"""Run analysis."""
concatenated_r1 = "mate_1.fastq"
concatenate_reads(
filenames=[fastq.path for fastq in inputs.reads.output.fastq],
out_fasta=concatenated_r1,
error=self.error,
)
is_paired = inputs.reads.type.startswith("data:reads:fastq:paired:")
if is_paired:
concatenated_r2 = "mate_2.fastq"
concatenate_reads(
filenames=[fastq.path for fastq in inputs.reads.output.fastq2],
out_fasta=concatenated_r2,
error=self.error,
)
if is_paired:
classify_params = ["--fastq", concatenated_r1, "--pairs", concatenated_r2]
else:
classify_params = ["--fastq", concatenated_r1]
fastq_file = Path(inputs.reads.output.fastq[0].path).name
assert fastq_file.endswith(".fastq.gz")
name = fastq_file[:-9]
classify_params.extend(
[
"--index",
inputs.index.output.index.path,
"--threads",
self.requirements.resources.cores,
"--prefix",
name,
"--prefetchlevel",
0,
"--chunksize",
inputs.advanced.chunksize,
]
)
return_code, stdout, stderr = Cmd["xengsort"]["classify"][
classify_params
] & TEE(retcode=None)
if return_code:
print(stderr)
self.error("Failed to classify reads with Xengsort.")
stats_file = "classification_stats.txt"
with open(stats_file, "w") as stats_handle:
stats_handle.writelines(stdout)
outputs.stats = stats_file
output_files = {}
for read_type in READ_TYPES:
mates = [1, 2] if is_paired else [1]
for mate in mates:
if is_paired:
output_file = Path(f"{name}-{read_type}.{mate}.fq")
else:
output_file = Path(f"{name}-{read_type}.fq")
if output_file.is_file():
output_file = output_file.rename(output_file.with_suffix(".fastq"))
return_code, _, _ = Cmd["pigz"][output_file] & TEE(retcode=None)
if return_code:
self.error(f"Compression of {read_type} reads failed.")
output_files[f"{read_type}{mate}"] = f"{output_file}.gz"
for output_key, output_file in output_files.items():
setattr(outputs, output_key, output_file)
outputs.graft_species = inputs.index.output.graft_species
outputs.graft_build = inputs.index.output.graft_build
outputs.host_species = inputs.index.output.host_species
outputs.host_build = inputs.index.output.host_build
if inputs.merge_both:
merged1 = f"{name}-graft-both{'.1' if is_paired else ''}.fastq.gz"
concatenate_files(
filenames=[output_files["graft1"], output_files["both1"]],
out_fname=merged1,
)
output_files["graft1"] = merged1
if is_paired:
merged2 = f"{name}-graft-both.2.fastq.gz"
concatenate_files(
filenames=[output_files["graft2"], output_files["both2"]],
out_fname=merged2,
)
output_files["graft2"] = merged2
if inputs.upload_reads == "all":
upload_list = [
(f"{read_type}1", f"{read_type}2") if is_paired else (f"{read_type}1",)
for read_type in READ_TYPES
]
elif inputs.upload_reads != "none":
upload_list = [
(f"{read_type}1", f"{read_type}2") if is_paired else (f"{read_type}1",)
for read_type in inputs.upload_reads.split(", ")
]
else:
upload_list = []
for output_type in upload_list:
if is_paired:
upload_slug = "upload-fastq-paired"
upload_inputs = {
"src1": [str(output_files[output_type[0]])],
"src2": [str(output_files[output_type[1]])],
}
else:
upload_slug = "upload-fastq-single"
upload_inputs = {"src": [str(output_files[output_type[0]])]}
self.run_process(
slug=upload_slug,
inputs=upload_inputs,
) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/reads_processing/xengsort.py | 0.774157 | 0.297461 | xengsort.py | pypi |
import os
import shutil
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
class BQSR(Process):
"""A two pass process of BaseRecalibrator and ApplyBQSR from GATK.
See GATK website for more information on BaseRecalibrator.
It is possible to modify read group using GATK's AddOrReplaceGroups through Replace read groups in BAM
(``read_group``) input field.
"""
slug = "bqsr"
name = "BaseQualityScoreRecalibrator"
process_type = "data:alignment:bam:bqsr:"
version = "2.5.1"
category = "GATK"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
}
data_name = "{{ bam|name|default('?') }}"
class Input:
"""Input fields to perform Base quality score recalibration."""
bam = DataField("alignment:bam", label="BAM file containing reads")
reference = DataField("seq:nucleotide", label="Reference genome file")
known_sites = ListField(
DataField(
data_type="variants:vcf",
description="One or more databases of known polymorphic sites used to exclude regions around known "
"polymorphisms from analysis.",
),
label="List of known sites of variation",
)
intervals = DataField(
data_type="bed",
required=False,
label="One or more genomic intervals over which to operate.",
description="This field is optional, but it can speed up the process by restricting calculations to "
"specific genome regions.",
)
read_group = StringField(
label="Replace read groups in BAM",
description="Replace read groups in a BAM file.This argument enables the user to replace all read groups "
"in the INPUT file with a single new read group and assign all reads to this read group in "
"the OUTPUT BAM file. Addition or replacement is performed using Picard's "
"AddOrReplaceReadGroups tool. Input should take the form of -name=value delimited by a "
'";", e.g. "-ID=1;-LB=GENIALIS;-PL=ILLUMINA;-PU=BARCODE;-SM=SAMPLENAME1". See tool\'s '
"documentation for more information on tag names. Note that PL, LB, PU and SM are require "
"fields. See caveats of rewriting read groups in the documentation.",
default="",
)
validation_stringency = StringField(
label="Validation stringency",
description="Validation stringency for all SAM files read by this program. Setting stringency to SILENT "
"can improve performance when processing a BAM file in which variable-length data (read, "
"qualities, tags) do not otherwise need to be decoded. Default is STRICT. This setting is "
"used in BaseRecalibrator and ApplyBQSR processes.",
choices=[
("STRICT", "STRICT"),
("LENIENT", "LENIENT"),
("SILENT", "SILENT"),
],
default="STRICT",
)
class Advanced:
"""Advanced options."""
use_original_qualities = BooleanField(
label="Use the base quality scores from the OQ tag",
description="This flag tells GATK to use the original base qualities "
"(that were in the data before BQSR/recalibration) which are stored in the OQ tag, if they are "
"present, rather than use the post-recalibration quality scores. If no OQ tag is present for a "
"read, the standard qual score will be used.",
default=False,
)
java_gc_threads = IntegerField(
label="Java ParallelGCThreads",
default=2,
description="Sets the number of threads used during parallel phases of the garbage collectors.",
)
max_heap_size = IntegerField(
label="Java maximum heap size (Xmx)",
default=12,
description="Set the maximum Java heap size (in GB).",
)
advanced = GroupField(Advanced, label="Advanced options")
class Output:
"""Output fields to BaseQualityScoreRecalibrator."""
bam = FileField(label="Base quality score recalibrated BAM file")
bai = FileField(label="Index of base quality score recalibrated BAM file")
stats = FileField(label="Alignment statistics")
species = StringField(label="Species")
build = StringField(label="Build")
recal_table = FileField(label="Recalibration tabled")
def run(self, inputs, outputs):
"""Run the analysis."""
TMPDIR = os.environ.get("TMPDIR")
# Prepare output file names.
bam = os.path.basename(inputs.bam.output.bam.path)
file_name = os.path.splitext(os.path.basename(inputs.bam.output.bam.path))[0]
bam_rg = f"{file_name}_RG.bam"
species = inputs.bam.output.species
gc_threads = min(
self.requirements.resources.cores, inputs.advanced.java_gc_threads
)
# Parse read_group argument from a string, delimited by a ; and =
# into a form that will be accepted by AddOrReplaceReadGroups tool.
# E.g. '-LB=DAB;-PL=Illumina;-PU=barcode;-SM=sample1' should become
# ['-LB', 'DAB', '-PL', 'Illumina', '-PU', 'barcode', '-SM', 'sample1']
# prepended by INPUT and OUTPUT.
if inputs.read_group:
arrg = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--INPUT",
f"{inputs.bam.output.bam.path}",
"--VALIDATION_STRINGENCY",
f"{inputs.validation_stringency}",
"--OUTPUT",
f"{bam_rg}",
"--TMP_DIR",
TMPDIR,
]
present_tags = []
for x in inputs.read_group.split(";"):
split_tag = x.split("=")
arrg.extend(split_tag)
present_tags.append(split_tag[0])
# Make sure all arguments to read_group are valid.
all_tags = {
"-LB",
"-PL",
"-PU",
"-SM",
"-CN",
"-DS",
"-DT",
"-FO",
"-ID",
"-KS",
"-PG",
"-PI",
"-PM",
"-SO",
}
present_tag_set = set(present_tags)
check_all_tags = present_tag_set.issubset(all_tags)
if not check_all_tags:
self.error("One or more read_group argument(s) improperly formatted.")
# Check that there are no double entries of arguments to read_group.
if len(present_tag_set) != len(present_tags):
self.error("You have duplicate tags in read_group argument.")
# Check that all mandatory arguments to read_group are present.
mandatory_tags = {"-LB", "-PL", "-PU", "-SM"}
check_tags = mandatory_tags.issubset(present_tag_set)
if not check_tags:
self.error(
"Missing mandatory read_group argument(s) (-PL, -LB, -PU and -SM are mandatory)."
)
Cmd["gatk"]["AddOrReplaceReadGroups"](arrg)
else:
shutil.copy2(inputs.bam.output.bam.path, bam_rg)
# Make sure the file is indexed.
Cmd["samtools"]["index"](bam_rg)
recal_table = f"{file_name}_recalibration.table"
br_inputs = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--input",
f"{bam_rg}",
"--output",
f"{recal_table}",
"--reference",
f"{inputs.reference.output.fasta.path}",
"--read-validation-stringency",
f"{inputs.validation_stringency}",
"--tmp-dir",
TMPDIR,
]
if inputs.intervals:
br_inputs.extend(["--intervals", f"{inputs.intervals.output.bed.path}"])
if inputs.advanced.use_original_qualities:
br_inputs.append("--use-original-qualities")
# Add known sites to the input parameters of BaseRecalibrator.
for site in inputs.known_sites:
br_inputs.extend(["--known-sites", f"{site.output.vcf.path}"])
# Prepare bqsr recalibration file.
Cmd["gatk"]["BaseRecalibrator"](br_inputs)
self.progress(0.5)
# Apply base recalibration.
ab_inputs = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--input",
f"{bam_rg}",
"--output",
f"{bam}",
"--reference",
f"{inputs.reference.output.fasta.path}",
"--bqsr-recal-file",
f"{recal_table}",
"--read-validation-stringency",
f"{inputs.validation_stringency}",
"--tmp-dir",
TMPDIR,
]
if inputs.advanced.use_original_qualities:
ab_inputs.append("--use-original-qualities")
Cmd["gatk"]["ApplyBQSR"](ab_inputs)
stats = f"{bam}_stats.txt"
(Cmd["samtools"]["flagstat"][f"{bam}"] > stats)()
self.progress(0.9)
outputs.bam = bam
outputs.bai = file_name + ".bai"
outputs.stats = stats
outputs.species = species
outputs.build = inputs.bam.output.build
outputs.recal_table = recal_table | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/reads_processing/bqsr.py | 0.687735 | 0.32186 | bqsr.py | pypi |
import os
from plumbum import TEE
from resolwe.process import (
Cmd,
DataField,
FileField,
FileHtmlField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
)
class CutadaptCorallSingle(Process):
"""Pre-process reads obtained using CORALL Total RNA-Seq Library Prep Kit.
Trim UMI-tags from input reads and use Cutadapt to remove adapters and run QC filtering steps.
"""
slug = "cutadapt-corall-single"
name = "Cutadapt (Corall RNA-Seq, single-end)"
process_type = "data:reads:fastq:single:cutadapt:"
version = "1.4.2"
category = "FASTQ processing"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
data_name = "{{ reads|name|default('?') }}"
class Input:
"""Input fields."""
reads = DataField("reads:fastq:single", label="Select sample(s)")
class Options:
"""Options."""
nextseq_trim = IntegerField(
label="NextSeq/NovaSeq trim",
description="NextSeq/NovaSeq-specific quality trimming. Trims also dark "
"cycles appearing as high-quality G bases. This option is mutually "
"exclusive with the use of standard quality-cutoff trimming and is "
"suitable for the use with data generated by the recent Illumina "
"machines that utilize two-color chemistry to encode the four bases.",
default=10,
)
quality_cutoff = IntegerField(
label="Quality cutoff",
description="Trim low-quality bases from 3' end of each read before adapter "
"removal. The use of this option will override the use of "
"NextSeq/NovaSeq trim option.",
required=False,
)
min_len = IntegerField(
label="Minimum read length",
default=20,
)
min_overlap = IntegerField(
label="Mimimum overlap",
description="Minimum overlap between adapter and read for an adapter to be found.",
default=20,
)
options = GroupField(Options, label="Options")
class Output:
"""Output fields."""
fastq = ListField(FileField(), label="Reads file")
report = FileField(label="Cutadapt report")
fastqc_url = ListField(FileHtmlField(), label="Quality control with FastQC")
fastqc_archive = ListField(FileField(), label="Download FastQC archive")
def run(self, inputs, outputs):
"""Run analysis."""
# Get input reads file name (for the first of the possible multiple lanes)
reads_path = os.path.basename(inputs.reads.output.fastq[0].path)
assert reads_path.endswith(".fastq.gz")
name = reads_path[:-9]
# Concatenate multi-lane read files
(
Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq]]
> "input_reads.fastq.gz"
)()
# Extract UMI sequences
Cmd["extract_umi.sh"]([10, 13, "input_reads.fastq.gz"])
# Prepare Cutadapt inputs
if inputs.options.quality_cutoff is not None:
read_trim_cutoff = "--quality-cutoff={}".format(
inputs.options.quality_cutoff
)
else:
read_trim_cutoff = "--nextseq-trim={}".format(inputs.options.nextseq_trim)
rd1Adapter = "AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC"
first_pass_input = [
"-m",
inputs.options.min_len,
"-O",
inputs.options.min_overlap,
"-a",
"QUALITY=G{20}",
"-j",
self.requirements.resources.cores,
"input_reads_umi.fastq.gz",
]
second_pass_input = [
"-m",
inputs.options.min_len,
read_trim_cutoff,
"-a",
rd1Adapter,
"-j",
self.requirements.resources.cores,
"-",
]
third_pass_input = [
"-m",
inputs.options.min_len,
"-O",
3,
"-a",
"r1polyA=A{18}",
"-j",
self.requirements.resources.cores,
"-",
]
fourth_pass_input = [
"-m",
inputs.options.min_len,
"-O",
inputs.options.min_overlap,
"-g",
rd1Adapter,
"--discard-trimmed",
"-j",
self.requirements.resources.cores,
"-o",
"{}_trimmed.fastq.gz".format(name),
"-",
]
# Run Cutadapt, write analysis reports into a report file
(
Cmd["cutadapt"][first_pass_input]
| Cmd["cutadapt"][second_pass_input]
| Cmd["cutadapt"][third_pass_input]
| Cmd["cutadapt"][fourth_pass_input]
> "cutadapt_report.txt"
)()
# Prepare final FASTQC report
fastqc_args = [
"{}_trimmed.fastq.gz".format(name),
"fastqc",
"fastqc_archive",
"fastqc_url",
"--nogroup",
]
return_code, _, _ = Cmd["fastqc.sh"][fastqc_args] & TEE(retcode=None)
if return_code:
self.error("Error while preparing FASTQC report.")
# Save the outputs
outputs.fastq = ["{}_trimmed.fastq.gz".format(name)]
outputs.report = "cutadapt_report.txt"
class CutadaptCorallPaired(Process):
"""Pre-process reads obtained using CORALL Total RNA-Seq Library Prep Kit.
Trim UMI-tags from input reads and use Cutadapt to remove adapters and run QC filtering steps.
"""
slug = "cutadapt-corall-paired"
name = "Cutadapt (Corall RNA-Seq, paired-end)"
process_type = "data:reads:fastq:paired:cutadapt:"
version = "1.3.2"
category = "FASTQ processing"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"},
},
"resources": {
"cores": 4,
"memory": 16384,
},
}
data_name = "{{ reads|name|default('?') }}"
class Input:
"""Input fields."""
reads = DataField("reads:fastq:paired", label="Select sample(s)")
class Options:
"""Options."""
nextseq_trim = IntegerField(
label="NextSeq/NovaSeq trim",
description="NextSeq/NovaSeq-specific quality trimming. Trims also dark "
"cycles appearing as high-quality G bases. This option is mutually "
"exclusive with the use of standard quality-cutoff trimming and is "
"suitable for the use with data generated by the recent Illumina "
"machines that utilize two-color chemistry to encode the four bases.",
default=10,
)
quality_cutoff = IntegerField(
label="Quality cutoff",
description="Trim low-quality bases from 3' end of each read before adapter "
"removal. The use of this option will override the use of "
"NextSeq/NovaSeq trim option.",
required=False,
)
min_len = IntegerField(
label="Minimum read length",
default=20,
)
min_overlap = IntegerField(
label="Mimimum overlap",
description="Minimum overlap between adapter and read for an adapter to be found.",
default=20,
)
options = GroupField(Options, label="Options")
class Output:
"""Output fields."""
fastq = ListField(FileField(), label="Remaining mate1 reads")
fastq2 = ListField(FileField(), label="Remaining mate2 reads")
report = FileField(label="Cutadapt report")
fastqc_url = ListField(
FileHtmlField(), label="Mate1 quality control with FastQC"
)
fastqc_url2 = ListField(
FileHtmlField(), label="Mate2 quality control with FastQC"
)
fastqc_archive = ListField(FileField(), label="Download mate1 FastQC archive")
fastqc_archive2 = ListField(FileField(), label="Download mate2 FastQC archive")
def run(self, inputs, outputs):
"""Run analysis."""
# Get input reads file name (for the first of the possible multiple lanes)
mate1_path = os.path.basename(inputs.reads.output.fastq[0].path)
assert mate1_path.endswith(".fastq.gz")
name_mate1 = mate1_path[:-9]
mate2_path = os.path.basename(inputs.reads.output.fastq2[0].path)
assert mate2_path.endswith(".fastq.gz")
name_mate2 = mate2_path[:-9]
# Concatenate multi-lane read files
(
Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq]]
> "input_reads_mate1.fastq.gz"
)()
(
Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq2]]
> "input_reads_mate2.fastq.gz"
)()
# Extract UMI sequences
Cmd["extract_umi.sh"](
[10, 13, "input_reads_mate1.fastq.gz", "input_reads_mate2.fastq.gz"]
)
# Prepare Cutadapt inputs
if inputs.options.quality_cutoff is not None:
read_trim_cutoff = "--quality-cutoff={}".format(
inputs.options.quality_cutoff
)
else:
read_trim_cutoff = "--nextseq-trim={}".format(inputs.options.nextseq_trim)
rd1Adapter = "AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC"
rd2Adapter = "AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT"
first_pass_input = [
"-m",
inputs.options.min_len,
"-O",
inputs.options.min_overlap,
"--interleaved",
"-n",
2,
"-a",
"QUALITY=G{20}",
"-A",
"QUALITY=G{20}",
"-j",
self.requirements.resources.cores,
"input_reads_mate1_umi.fastq.gz",
"input_reads_mate2_umi.fastq.gz",
]
second_pass_input = [
"-m",
inputs.options.min_len,
"--interleaved",
"-n",
3,
read_trim_cutoff,
"-a",
rd1Adapter,
"-A",
rd2Adapter,
"-G",
"XT{18}",
"-j",
self.requirements.resources.cores,
"-",
]
third_pass_input = [
"-m",
inputs.options.min_len,
"-O",
3,
"--interleaved",
"-n",
1,
"-a",
"r1polyA=A{18}",
"-j",
self.requirements.resources.cores,
"-",
]
fourth_pass_input = [
"-m",
inputs.options.min_len,
"-O",
inputs.options.min_overlap,
"--interleaved",
"-g",
rd1Adapter,
"-G",
rd2Adapter,
"--discard-trimmed",
"-j",
self.requirements.resources.cores,
"-o",
"{}_trimmed.fastq.gz".format(name_mate1),
"-p",
"{}_trimmed.fastq.gz".format(name_mate2),
"-",
]
# Run Cutadapt, write analysis reports into a report file
(
Cmd["cutadapt"][first_pass_input]
| Cmd["cutadapt"][second_pass_input]
| Cmd["cutadapt"][third_pass_input]
| Cmd["cutadapt"][fourth_pass_input]
> "cutadapt_report.txt"
)()
# Prepare final FASTQC report
fastqc_args = [
"{}_trimmed.fastq.gz".format(name_mate1),
"fastqc",
"fastqc_archive",
"fastqc_url",
]
return_code, _, _ = Cmd["fastqc.sh"][fastqc_args] & TEE(retcode=None)
if return_code:
self.error("Error while preparing FASTQC report.")
fastqc_args = [
"{}_trimmed.fastq.gz".format(name_mate2),
"fastqc",
"fastqc_archive2",
"fastqc_url2",
]
return_code, _, _ = Cmd["fastqc.sh"][fastqc_args] & TEE(retcode=None)
if return_code:
self.error("Error while preparing FASTQC report.")
# Save the outputs
outputs.fastq = ["{}_trimmed.fastq.gz".format(name_mate1)]
outputs.fastq2 = ["{}_trimmed.fastq.gz".format(name_mate2)]
outputs.report = "cutadapt_report.txt" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/reads_processing/cutadapt_corall.py | 0.726911 | 0.338897 | cutadapt_corall.py | pypi |
import collections
import glob
import gzip
import shutil
from pathlib import Path
import dnaio
from dnaio.exceptions import FastqFormatError, FileFormatError
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FileHtmlField,
ListField,
Persistence,
Process,
SchedulingClass,
)
SUPPORTED_EXTENSIONS = (
".fastq",
".fastq.gz",
".fq",
".fq.gz",
)
def check_file(infile):
"""Check if the input file exists and has correct extensions."""
fq_file = Path(infile)
if not fq_file.is_file():
message = "Input file {} does not exist".format(fq_file.name)
return message
if not fq_file.name.lower().endswith(SUPPORTED_EXTENSIONS):
message = (
"Unrecognized file name extension in file {}. "
"Supported file name extensions are {}.".format(
fq_file.name, SUPPORTED_EXTENSIONS
)
)
return message
message = "Correct input file."
return message
def validate_fastq(fq, fq2=None):
"""Validate FASTQ files."""
input_fastq = fq
if fq2:
input_fastq = fq + fq2
# Reduce the probability of uploading the FASTQ files with the same
# content multiple times (as multiple lanes or mates).
if len(set(input_fastq)) != len(input_fastq):
seen_files = [
item
for item, count in collections.Counter(input_fastq).items()
if count > 1
]
message = "Non-unique input file names detected: {}.".format(seen_files)
return message
if fq2 and len(fq) != len(fq2):
message = (
"The number of mate-pair files in split-lane samples must match. "
"{} and {} input files were given for the -fq and -fq2 inputs, "
"respectively.".format(len(fq), len(fq2))
)
return message
if fq2:
for mate1, mate2 in zip(fq, fq2):
try:
with gzip.open(mate1) as mate1, gzip.open(mate2) as mate2:
paired_reads = dnaio.open(mate1, file2=mate2, fileformat="fastq")
if not any(paired_reads):
message = "Mate-pair files {} and {} contain no read sequences.".format(
mate1.name, mate2.name
)
return message
else:
for read in paired_reads:
continue
message = (
"Successfully validated mate-pair files {} and {}.".format(
mate1.name, mate2.name
)
)
return message
except (FastqFormatError, FileFormatError) as dnaio_error:
message = "Format error in mate-pairs {} and {}. {}".format(
mate1.name, mate2.name, str(dnaio_error)
)
return message
else:
for fq in fq:
fq = Path(fq)
try:
with gzip.open(fq) as read:
reads = dnaio.open(read, fileformat="fastq")
if not any(reads):
message = "Input file {} contains no read sequences.".format(
fq.name
)
return message
else:
for read in reads:
continue
message = "Successfully validated reads file {}.".format(
fq.name
)
return message
except (FastqFormatError, FileFormatError) as dnaio_error:
message = "Error in file {}. {}".format(fq.name, str(dnaio_error))
return message
def run_fastqc(fastqs, output_dir):
"""Run fastQC on given FASTQs.
:param list fastqs: List of fastqs
:param str output_dir: Output directory
"""
output_path = Path(output_dir)
output_path.mkdir(exist_ok=True)
cmd = Cmd["fastqc"]
for fastq in fastqs:
cmd = cmd[fastq]
cmd = cmd["--extract"]
cmd = cmd[f"--outdir={str(output_path)}"]
_, _, stderr = cmd & TEE
return stderr
def parse_encoding_type(report_file):
"""Parse encoding type."""
encoding = ""
with open(report_file) as report:
for line in report:
if line.startswith("Encoding"):
encoding = line.strip().split("\t")[1]
break
if encoding != "":
return encoding
else:
return "Unknown"
def replace_extension(infile):
"""Replace extensions of file."""
extensions = "".join(Path(str(infile)).suffixes[-2:])
new_ext = ".fastq.gz"
outfile = str(infile).replace(extensions, new_ext)
return outfile
class UploadFastqSingle(Process):
"""Import single-end reads in FASTQ format.
Import single-end reads in FASTQ format, which is a text-based format for
storing both a biological sequence (usually nucleotide sequence) and its
corresponding quality scores.
"""
slug = "upload-fastq-single"
name = "FASTQ file (single-end)"
process_type = "data:reads:fastq:single"
version = "2.6.0"
category = "Import"
data_name = '{{ src.0.file|default("?") }}'
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.RAW
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {
"cores": 1,
"network": True,
},
}
class Input:
"""Input fields to process UploadFastqSingle."""
src = ListField(
FileField(),
label="Reads",
description="Sequencing reads in FASTQ format. "
"Supported extensions: .fastq.gz (preferred), .fq.* or .fastq.*",
)
merge_lanes = BooleanField(
label="Merge lanes",
default=False,
description="Merge sample data split into multiple sequencing "
"lanes into a single FASTQ file.",
)
class Output:
"""Output fields to process UploadFastqSingle."""
fastq = ListField(FileField(), label="Reads file")
fastqc_url = ListField(FileHtmlField(), label="Quality control with FastQC")
fastqc_archive = ListField(FileField(), label="Download FastQC archive")
def run(self, inputs, outputs):
"""Run upload."""
fastqgz = []
for read in inputs.src:
read_imported = read.import_file(imported_format="compressed")
stderr = check_file(infile=read_imported)
if "Correct input file." not in stderr:
self.error(stderr)
renamed_reads = replace_extension(infile=read_imported)
Path(read_imported).rename(renamed_reads)
fastqgz.append(renamed_reads)
stderr = validate_fastq(fq=fastqgz)
if "Successfully validated reads" not in stderr:
self.error(stderr)
if inputs.merge_lanes:
first_read = fastqgz[0][:-9]
fastqz = f"{first_read}_merged.fastq.gz"
with gzip.open(fastqz, "wb") as outfile:
for read in fastqgz:
with gzip.open(read, "rb") as infile:
shutil.copyfileobj(infile, outfile)
fastqgz = [fastqz]
stderr = run_fastqc([fastqgz], "./fastqc")
if "Failed to process" in stderr or "Skipping" in stderr:
self.error("Failed while processing with FastQC.")
for fastqc_zip in glob.glob("fastqc/*_fastqc.zip"):
shutil.move(fastqc_zip, ".")
fastqc = []
fastqc_url = []
for fq in fastqgz:
reads_name = Path(fq).name.replace(".fastq.gz", "")
report_dir = Path("fastqc") / Path(f"{reads_name}_fastqc")
if not report_dir.is_dir():
continue
fastqc_zip = Path(f"{reads_name}_fastqc.zip")
if not fastqc_zip.is_file():
self.error(f"FastQC failed to produce {fastqc_zip} file.")
fastqc.append(str(fastqc_zip))
fastqc_url.append(
{
"file": str(report_dir / "fastqc_report.html"),
"refs": [str(report_dir)],
}
)
encoding_file = report_dir / "fastqc_data.txt"
encoding = parse_encoding_type(report_file=encoding_file)
if encoding == "Illumina 1.5" or encoding == "Illumina 1.3":
self.info(
"Recoding input reads from Phred64 encoding to Phred33 encoding."
)
Path(f"{reads_name}.fastq.gz").rename("input_reads.fastq.gz")
return_code, _, stderr = Cmd["TrimmomaticSE"][
"-phred64",
"input_reads.fastq.gz",
"reformated.fastq.gz",
"TOPHRED33",
] & TEE(retcode=None)
if return_code:
print(stderr)
self.error("Error while running TrimmomaticSE.")
Path("reformated.fastq.gz").rename(f"{reads_name}.fastq.gz")
elif encoding != "Sanger / Illumina 1.9":
self.error(
"Only Sanger / Illumina 1.9 / lllumina 1.5 / Illumina 1.3 encoding is supported."
)
outputs.fastq = fastqgz
outputs.fastqc_url = fastqc_url
outputs.fastqc_archive = fastqc
class UploadFastqPaired(Process):
"""Import paired-end reads in FASTQ format.
Import paired-end reads in FASTQ format, which is a text-based format for
storing both a biological sequence (usually nucleotide sequence) and its
corresponding quality scores.
"""
slug = "upload-fastq-paired"
name = "FASTQ file (paired-end)"
process_type = "data:reads:fastq:paired"
version = "2.6.0"
category = "Import"
data_name = '{{ src1.0.file|default("?") }}'
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.RAW
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {
"cores": 1,
"network": True,
},
}
class Input:
"""Input fields to process UploadFastqPaired."""
src1 = ListField(
FileField(),
label="Mate1",
description="Sequencing reads in FASTQ format. "
"Supported extensions: .fastq.gz (preferred), .fq.* or .fastq.*",
)
src2 = ListField(
FileField(),
label="Mate2",
description="Sequencing reads in FASTQ format. "
"Supported extensions: .fastq.gz (preferred), .fq.* or .fastq.*",
)
merge_lanes = BooleanField(
label="Merge lanes",
default=False,
description="Merge sample data split into multiple sequencing "
"lanes into a single FASTQ file.",
)
class Output:
"""Output fields to process UploadFastqPaired."""
fastq = ListField(FileField(), label="Reads file (mate 1)")
fastq2 = ListField(FileField(), label="Reads file (mate 2)")
fastqc_url = ListField(
FileHtmlField(), label="Quality control with FastQC (Upstream)"
)
fastqc_url2 = ListField(
FileHtmlField(), label="Quality control with FastQC (Downstream)"
)
fastqc_archive = ListField(
FileField(), label="Download FastQC archive (Upstream)"
)
fastqc_archive2 = ListField(
FileField(), label="Download FastQC archive (Downstream)"
)
def run(self, inputs, outputs):
"""Run upload."""
mate1_fastqgz = []
mate1_fastqc = []
mate1_fastqc_url = []
mate2_fastqgz = []
mate2_fastqc = []
mate2_fastqc_url = []
rep_dir = Path("fastqc")
for read in inputs.src1:
read_imported = read.import_file(imported_format="compressed")
stderr = check_file(infile=read_imported)
if "Correct input file." not in stderr:
self.error(stderr)
renamed_reads = replace_extension(infile=read_imported)
Path(read_imported).rename(renamed_reads)
name_mate1 = renamed_reads[:-9]
mate1_fastqgz.append(renamed_reads)
mate1_fastqc.append(f"{name_mate1}_fastqc.zip")
mate1_fastqc_url.append(
{
"file": str(
rep_dir / f"{name_mate1}_fastqc" / "fastqc_report.html"
),
"refs": [str(rep_dir / f"{name_mate1}_fastqc")],
}
)
for read in inputs.src2:
read_imported = read.import_file(imported_format="compressed")
stderr = check_file(infile=read_imported)
if "Correct input file." not in stderr:
self.error(stderr)
renamed_reads = replace_extension(infile=read_imported)
Path(read_imported).rename(renamed_reads)
name_mate2 = renamed_reads[:-9]
mate2_fastqgz.append(renamed_reads)
mate2_fastqc.append(f"{name_mate2}_fastqc.zip")
mate2_fastqc_url.append(
{
"file": str(
rep_dir / f"{name_mate2}_fastqc" / "fastqc_report.html"
),
"refs": [str(rep_dir / f"{name_mate2}_fastqc")],
}
)
stderr = validate_fastq(fq=mate1_fastqgz, fq2=mate2_fastqgz)
if "Successfully validated mate-pair" not in stderr:
self.error(stderr)
if inputs.merge_lanes:
mate1_first_lane = mate1_fastqgz[0][:-9]
fastqz_1 = f"{mate1_first_lane}_merged.fastq.gz"
with gzip.open(fastqz_1, "wb") as outfile:
for read in mate1_fastqgz:
with gzip.open(read, "rb") as infile:
shutil.copyfileobj(infile, outfile)
mate1_fastqgz = [f"{mate1_first_lane}_merged.fastq.gz"]
mate1_fastqc = [f"{mate1_first_lane}_merged_fastqc.zip"]
mate1_fastqc_url = [
{
"file": str(
rep_dir
/ f"{mate1_first_lane}_merged_fastqc"
/ "fastqc_report.html"
),
"refs": [str(rep_dir / f"{mate1_first_lane}_merged_fastqc")],
}
]
mate2_first_lane = mate2_fastqgz[0][:-9]
fastqz_2 = f"{mate2_first_lane}_merged.fastq.gz"
with gzip.open(fastqz_2, "wb") as outfile:
for read in mate2_fastqgz:
with gzip.open(read, "rb") as infile:
shutil.copyfileobj(infile, outfile)
mate2_fastqgz = [f"{mate2_first_lane}_merged.fastq.gz"]
mate2_fastqc = [f"{mate2_first_lane}_merged_fastqc.zip"]
mate2_fastqc_url = [
{
"file": str(
rep_dir
/ f"{mate2_first_lane}_merged_fastqc"
/ "fastqc_report.html"
),
"refs": [str(rep_dir / f"{mate2_first_lane}_merged_fastqc")],
}
]
stderr = run_fastqc(mate1_fastqgz + mate2_fastqgz, "./fastqc")
if "Failed to process" in stderr or "Skipping" in stderr:
self.error("Failed while processing with FastQC.")
for fastqc_zip in glob.glob("fastqc/*_fastqc.zip"):
shutil.move(fastqc_zip, ".")
for report_dir in Path("fastqc").iterdir():
if not report_dir.is_dir():
continue
reads_name = report_dir.name.replace("_fastqc", "")
encoding_file = report_dir / "fastqc_data.txt"
encoding = parse_encoding_type(report_file=encoding_file)
if encoding == "Illumina 1.5" or encoding == "Illumina 1.3":
print("Recoding input reads from Phred64 encoding to Phred33 encoding.")
Path(f"{reads_name}.fastq.gz").rename("input_reads.fastq.gz")
return_code, _, stderr = Cmd["TrimmomaticSE"][
"-phred64",
"input_reads.fastq.gz",
"reformated.fastq.gz",
"TOPHRED33",
] & TEE(retcode=None)
if return_code:
print(stderr)
self.error("Recoding of input reads failed.")
Path("reformated.fastq.gz").rename(f"{reads_name}.fastq.gz")
elif encoding != "Sanger / Illumina 1.9":
self.error(
"Only Sanger / Illumina 1.9 / lllumina 1.5 / Illumina 1.3 encoding is "
"supported."
)
outputs.fastq = mate1_fastqgz
outputs.fastq2 = mate2_fastqgz
outputs.fastqc_url = mate1_fastqc_url
outputs.fastqc_url2 = mate2_fastqc_url
outputs.fastqc_archive = mate1_fastqc
outputs.fastqc_archive2 = mate2_fastqc
class FilesToFastqSingle(Process):
"""Convert FASTQ files to single-end reads."""
slug = "files-to-fastq-single"
name = "Convert files to reads (single-end)"
process_type = "data:reads:fastq:single"
version = "1.6.0"
category = "Import"
data_name = "Files to FASTQ single-end ({{ (src|first).file.file }})"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.RAW
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
}
class Input:
"""Input fields to process FilesToFastqSingle."""
src = ListField(
DataField("file"),
label="Reads",
description="Sequencing reads in FASTQ format",
)
merge_lanes = BooleanField(
label="Merge lanes",
default=False,
description="Merge sample data split into multiple sequencing "
"lanes into a single FASTQ file.",
)
class Output:
"""Output fields to process FilesToFastqSingle."""
fastq = ListField(FileField(), label="Reads file")
fastqc_url = ListField(FileHtmlField(), label="Quality control with FastQC")
fastqc_archive = ListField(FileField(), label="Download FastQC archive")
def run(self, inputs, outputs):
"""Run upload."""
fastqgz = []
for read in inputs.src:
read_name = Path(read.output.file.path).name
shutil.copy(read.output.file.path, ".")
stderr = check_file(infile=read_name)
if "Correct input file." not in stderr:
self.error(stderr)
if not read_name.endswith(".gz"):
Cmd["pigz"][read_name]()
renamed_reads = replace_extension(infile=f"{read_name}.gz")
Path(f"{read_name}.gz").rename(renamed_reads)
fastqgz.append(renamed_reads)
else:
renamed_reads = replace_extension(infile=read_name)
Path(read_name).rename(renamed_reads)
fastqgz.append(renamed_reads)
stderr = validate_fastq(fq=fastqgz)
if "Successfully validated reads" not in stderr:
self.error(stderr)
if inputs.merge_lanes:
first_read = fastqgz[0][:-9]
fastqz = f"{first_read}_merged.fastq.gz"
with open(fastqz, "wb") as outfile:
for read in fastqgz:
with open(read, "rb") as infile:
shutil.copyfileobj(infile, outfile)
fastqgz = [fastqz]
stderr = run_fastqc([fastqgz], "./fastqc")
if "Failed to process" in stderr or "Skipping" in stderr:
self.error("Failed while processing with FastQC.")
for fastqc_zip in glob.glob("fastqc/*_fastqc.zip"):
shutil.move(fastqc_zip, ".")
fastqc = []
fastqc_url = []
for fq in fastqgz:
reads_name = Path(fq).name.replace(".fastq.gz", "")
report_dir = Path("fastqc") / Path(f"{reads_name}_fastqc")
if not report_dir.is_dir():
continue
fastqc_zip = Path(f"{reads_name}_fastqc.zip")
if not fastqc_zip.is_file():
self.error(f"FastQC failed to produce {fastqc_zip} file.")
fastqc.append(str(fastqc_zip))
fastqc_url.append(
{
"file": str(report_dir / "fastqc_report.html"),
"refs": [str(report_dir)],
}
)
encoding_file = report_dir / "fastqc_data.txt"
encoding = parse_encoding_type(report_file=encoding_file)
self.info(encoding)
if encoding == "Illumina 1.5" or encoding == "Illumina 1.3":
self.info(
"Recoding input reads from Phred64 encoding to Phred33 encoding."
)
Path(f"{reads_name}.fastq.gz").rename("input_reads.fastq.gz")
return_code, _, stderr = Cmd["TrimmomaticSE"][
"-phred64",
"input_reads.fastq.gz",
"reformated.fastq.gz",
"TOPHRED33",
] & TEE(retcode=None)
if return_code:
print(stderr)
self.error("Error while running TrimmomaticSE.")
Path("reformated.fastq.gz").rename(f"{reads_name}.fastq.gz")
elif encoding != "Sanger / Illumina 1.9":
self.error(
"Only Sanger / Illumina 1.9 / lllumina 1.5 / Illumina 1.3 encoding is supported."
)
outputs.fastq = fastqgz
outputs.fastqc_url = fastqc_url
outputs.fastqc_archive = fastqc
class FilesToFastqPaired(Process):
"""Convert FASTQ files to paired-end reads."""
slug = "files-to-fastq-paired"
name = "Convert files to reads (paired-end)"
process_type = "data:reads:fastq:paired"
version = "1.6.0"
category = "Import"
data_name = "Files to FASTQ paired-end ({{ (src1|first).file.file }}, {{(src2|first).file.file}})"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.RAW
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
}
class Input:
"""Input fields to process FilesToFastqPaired."""
src1 = ListField(
DataField("file"),
label="Mate1",
)
src2 = ListField(
DataField("file"),
label="Mate2",
)
merge_lanes = BooleanField(
label="Merge lanes",
default=False,
description="Merge sample data split into multiple sequencing "
"lanes into a single FASTQ file.",
)
class Output:
"""Output fields to process FilesToFastqPaired."""
fastq = ListField(FileField(), label="Reads file (mate 1)")
fastq2 = ListField(FileField(), label="Reads file (mate 2)")
fastqc_url = ListField(
FileHtmlField(), label="Quality control with FastQC (Upstream)"
)
fastqc_url2 = ListField(
FileHtmlField(), label="Quality control with FastQC (Downstream)"
)
fastqc_archive = ListField(
FileField(), label="Download FasQC archive (Upstream)"
)
fastqc_archive2 = ListField(
FileField(), label="Download FasQC archive (Downstream)"
)
def run(self, inputs, outputs):
"""Run upload."""
mate1_fastqgz = []
mate1_fastqc = []
mate1_fastqc_url = []
mate2_fastqgz = []
mate2_fastqc = []
mate2_fastqc_url = []
rep_dir = Path("fastqc")
for read in inputs.src1:
read_name = Path(read.output.file.path).name
shutil.copy(read.output.file.path, ".")
stderr = check_file(infile=read_name)
if "Correct input file." not in stderr:
self.error(stderr)
if not read_name.endswith(".gz"):
Cmd["pigz"][read_name]()
renamed_reads = replace_extension(infile=f"{read_name}.gz")
Path(f"{read_name}.gz").rename(renamed_reads)
name_mate1 = read_name[:-9]
else:
renamed_reads = replace_extension(infile=read_name)
Path(read_name).rename(renamed_reads)
name_mate1 = read_name[:-9]
mate1_fastqgz.append(f"{name_mate1}.fastq.gz")
mate1_fastqc.append(f"{name_mate1}_fastqc.zip")
mate1_fastqc_url.append(
{
"file": str(
rep_dir / f"{name_mate1}_fastqc" / "fastqc_report.html"
),
"refs": [str(rep_dir / f"{name_mate1}_fastqc")],
}
)
for read in inputs.src2:
read_name = Path(read.output.file.path).name
shutil.copy(read.output.file.path, ".")
stderr = check_file(infile=read_name)
if "Correct input file." not in stderr:
self.error(stderr)
if not read_name.endswith(".gz"):
Cmd["pigz"][read_name]()
renamed_reads = replace_extension(infile=f"{read_name}.gz")
Path(f"{read_name}.gz").rename(renamed_reads)
name_mate2 = read_name[:-9]
else:
renamed_reads = replace_extension(infile=read_name)
Path(read_name).rename(renamed_reads)
name_mate2 = read_name[:-9]
mate2_fastqgz.append(f"{name_mate2}.fastq.gz")
mate2_fastqc.append(f"{name_mate2}_fastqc.zip")
mate2_fastqc_url.append(
{
"file": str(
rep_dir / f"{name_mate2}_fastqc" / "fastqc_report.html"
),
"refs": [str(rep_dir / f"{name_mate2}_fastqc")],
}
)
stderr = validate_fastq(fq=mate1_fastqgz, fq2=mate2_fastqgz)
if "Successfully validated mate-pair" not in stderr:
self.error(stderr)
if inputs.merge_lanes:
mate1_first_lane = mate1_fastqgz[0][:-9]
fastqz_1 = f"{mate1_first_lane}_merged.fastq.gz"
with gzip.open(fastqz_1, "wb") as outfile:
for read in mate1_fastqgz:
with gzip.open(read, "rb") as infile:
shutil.copyfileobj(infile, outfile)
mate1_fastqgz = [f"{mate1_first_lane}_merged.fastq.gz"]
mate1_fastqc = [f"{mate1_first_lane}_merged_fastqc.zip"]
mate1_fastqc_url = [
{
"file": str(
rep_dir
/ f"{mate1_first_lane}_merged_fastqc"
/ "fastqc_report.html"
),
"refs": [str(rep_dir / f"{mate1_first_lane}_merged_fastqc")],
}
]
mate2_first_lane = mate2_fastqgz[0][:-9]
fastqz_2 = f"{mate2_first_lane}_merged.fastq.gz"
with gzip.open(fastqz_2, "wb") as outfile:
for read in mate2_fastqgz:
with gzip.open(read, "rb") as infile:
shutil.copyfileobj(infile, outfile)
mate2_fastqgz = [f"{mate2_first_lane}_merged.fastq.gz"]
mate2_fastqc = [f"{mate2_first_lane}_merged_fastqc.zip"]
mate2_fastqc_url = [
{
"file": str(
rep_dir
/ f"{mate2_first_lane}_merged_fastqc"
/ "fastqc_report.html"
),
"refs": [str(rep_dir / f"{mate2_first_lane}_merged_fastqc")],
}
]
stderr = run_fastqc(mate1_fastqgz + mate2_fastqgz, "./fastqc")
if "Failed to process" in stderr or "Skipping" in stderr:
self.error("Failed while processing with FastQC.")
for fastqc_zip in glob.glob("fastqc/*_fastqc.zip"):
shutil.move(fastqc_zip, ".")
for report_dir in Path("fastqc").iterdir():
if not report_dir.is_dir():
continue
reads_name = report_dir.name.replace("_fastqc", "")
encoding_file = report_dir / "fastqc_data.txt"
encoding = parse_encoding_type(report_file=encoding_file)
if encoding == "Illumina 1.5" or encoding == "Illumina 1.3":
print("Recoding input reads from Phred64 encoding to Phred33 encoding.")
Path(f"{reads_name}.fastq.gz").rename("input_reads.fastq.gz")
return_code, _, stderr = Cmd["TrimmomaticSE"][
"-phred64",
"input_reads.fastq.gz",
"reformated.fastq.gz",
"TOPHRED33",
] & TEE(retcode=None)
if return_code:
print(stderr)
self.error("Recoding of input reads failed.")
Path("reformated.fastq.gz").rename(f"{reads_name}.fastq.gz")
elif encoding != "Sanger / Illumina 1.9":
self.error(
"Only Sanger / Illumina 1.9 / lllumina 1.5 / Illumina 1.3 encoding is "
"supported."
)
outputs.fastq = mate1_fastqgz
outputs.fastq2 = mate2_fastqgz
outputs.fastqc_url = mate1_fastqc_url
outputs.fastqc_url2 = mate2_fastqc_url
outputs.fastqc_archive = mate1_fastqc
outputs.fastqc_archive2 = mate2_fastqc | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/import_data/seq_reads.py | 0.593374 | 0.202759 | seq_reads.py | pypi |
import shutil
from pathlib import Path
from resolwe.process import (
Cmd,
DirField,
FileField,
Process,
SchedulingClass,
StringField,
)
class ImportBWA2Index(Process):
"""Import BWA-MEM2 index files."""
slug = "upload-bwamem2-index"
name = "BWA-MEM2 index files"
process_type = "data:index:bwamem2"
version = "1.1.0"
category = "Import"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/dnaseq:6.3.1"},
},
}
data_name = '{{ ref_seq.file|default("?") }}'
version = "1.0.0"
class Input:
"""Input fields to process Import BWA-MEM2 index."""
ref_seq = FileField(label="Reference sequence (nucleotide FASTA)")
index_name = FileField(label="BWA-MEM2 index files")
species = StringField(
label="Species",
description="Select a species name from the dropdown menu "
"or write a custom species name in the species "
"field. For sequences that are not related to "
"any particular species (e.g. adapters file), "
"you can select the value Other.",
allow_custom_choice=True,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Macaca mulatta", "Macaca mulatta"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
("Other", "Other"),
],
)
build = StringField(
label="Genome build",
)
class Output:
"""Output fields to process BWAMEM2Index."""
index = DirField(label="BWA-MEM2 index")
fastagz = FileField(label="FASTA file (compressed)")
fasta = FileField(label="FASTA file")
fai = FileField(label="FASTA file index")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run the analysis."""
index_dir = Path("BWAMEM2_index")
if not index_dir.exists():
index_dir.mkdir()
index_file = inputs.index_name.import_file(imported_format="compressed")
shutil.unpack_archive(index_file, index_dir)
fasta_path = inputs.ref_seq.import_file(imported_format="extracted")
supported_extensions = (".fa", ".fasta", ".faa", ".fna", ".ffn", ".frn")
if not fasta_path.endswith(supported_extensions):
self.error(
f"The imported file has unsupported file name extension. "
f"The supported extensions are {supported_extensions}."
)
fasta_path = Path(fasta_path)
output_fasta = fasta_path.with_suffix(".fasta")
fasta_path.rename(output_fasta)
Cmd["pigz"]["-k", output_fasta]()
Cmd["samtools"]["faidx", output_fasta]()
outputs.index = index_dir.name
outputs.fasta = output_fasta.name
outputs.fastagz = f"{output_fasta}.gz"
outputs.fai = f"{output_fasta}.fai"
outputs.species = inputs.species
outputs.build = inputs.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/import_data/bwamem2_index_files.py | 0.63307 | 0.291851 | bwamem2_index_files.py | pypi |
from pathlib import Path
import dnaio
from dnaio.exceptions import FastaFormatError
from resolwe.process import (
Cmd,
FileField,
IntegerField,
Persistence,
Process,
SchedulingClass,
StringField,
)
class ImportFastaNucleotide(Process):
"""Import nucleotide sequence file in FASTA format.
FASTA file is a text-based format for representing nucleotide sequences, in which nucleotides
are represented using single-letter codes. The uploaded FASTA file can hold multiple nucleotide
sequences.
"""
slug = "upload-fasta-nucl"
name = "FASTA file"
process_type = "data:seq:nucleotide"
version = "3.2.0"
category = "Import"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.RAW
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {
"cores": 2,
"memory": 8192,
"network": True,
},
}
data_name = '{{ src.file|default("?") }}'
class Input:
"""Input fields to process ImportFastaNucleotide."""
src = FileField(label="Sequence file (FASTA)")
species = StringField(
label="Species",
description="Select a species name from the dropdown menu "
"or write a custom species name in the species "
"field. For sequences that are not related to "
"any particular species (e.g. adapters file), "
"you can select the value Other.",
allow_custom_choice=True,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Macaca mulatta", "Macaca mulatta"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
("Other", "Other"),
],
)
build = StringField(
label="Genome build",
description="Enter a genome build information associated "
"with the uploaded sequence(s).",
)
class Output:
"""Output field of the process ImportFastaNucleotide."""
fastagz = FileField(label="FASTA file (compressed)")
fasta = FileField(label="FASTA file")
fai = FileField(label="FASTA file index")
fasta_dict = FileField(label="FASTA dictionary")
num_seqs = IntegerField(label="Number of sequences")
species = StringField(label="Species")
build = StringField(label="Build")
def validate_fasta(self, infile):
"""Validate FASTA file format."""
try:
with dnaio.open(infile, fileformat="fasta") as fasta_file:
if not any(fasta_file):
self.error(
f"The uploaded .FASTA file {infile} contains no sequence data."
)
else:
self.info(f"Successfully validated the input file {infile}.")
except FastaFormatError as dnaio_error:
self.error(f"Format error in the uploaded file {infile}. {dnaio_error}")
def run(self, inputs, outputs):
"""Run the analysis."""
fasta_path = inputs.src.import_file(
imported_format="extracted", progress_to=0.2
)
supported_extensions = (".fa", ".fasta")
if not fasta_path.endswith(supported_extensions):
self.error(
f"The imported file has unsupported file name extension. "
f"The supported extensions are {supported_extensions}."
)
# validate the format of the uploaded fasta file
self.validate_fasta(fasta_path)
self.progress(0.3)
# ensure the .fasta suffix
fasta = Path(fasta_path)
output_fasta = fasta.with_suffix(".fasta")
fasta.rename(output_fasta)
if output_fasta.is_file():
# compress the input file
Cmd["pigz"]["-k", "-f", output_fasta]()
self.progress(0.4)
# create a .fai index file
Cmd["samtools"]["faidx", output_fasta]()
self.progress(0.6)
# Create fasta dictionary file
fasta_dict = f"{output_fasta.stem}.dict"
Cmd["java"]["-jar", "/opt/broadinstitute/picard-tools/picard.jar"][
"CreateSequenceDictionary", f"R={output_fasta.name}", f"O={fasta_dict}"
]()
self.progress(0.8)
# check the number of sequences in the .fasta file
seq_number = Cmd["grep"]["-c", "^>", output_fasta]().strip()
else:
self.error(
f"The imported file {fasta} could not be successfully renamed to {output_fasta}"
)
# save the outputs
outputs.fasta = output_fasta.name
outputs.fastagz = f"{output_fasta}.gz"
outputs.fai = f"{output_fasta}.fai"
outputs.fasta_dict = fasta_dict
outputs.num_seqs = seq_number
outputs.species = inputs.species
outputs.build = inputs.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/import_data/nucleotide_seq.py | 0.802788 | 0.321327 | nucleotide_seq.py | pypi |
import gzip
import io
import json
from pathlib import Path
import pandas as pd
from resolwe.process import (
DataField,
FileField,
JsonField,
Persistence,
SchedulingClass,
StringField,
)
from resolwe_bio.process.runtime import ProcessBio
def parse_expression_file(exp_file, exp_type):
"""Parse expression file to a Pandas dataframe."""
with gzip.open(exp_file) as exp:
df = pd.read_csv(exp, sep="\t", float_precision="round_trip")
df.rename(
index=str,
columns={
"Gene": "FEATURE_ID",
"Expression": exp_type,
},
inplace=True,
)
# Cast FEATURE_ID column to string
df["FEATURE_ID"] = df["FEATURE_ID"].astype("str")
# Remove any possible empty rows from the input file
df.dropna(inplace=True)
return df
def prepare_expression_set(exp, exp_type, feature_dict, outfile_name, rc=None):
"""Prepare expression set output data."""
exp = parse_expression_file(exp_file=exp, exp_type=exp_type)
exp["GENE_SYMBOL"] = exp["FEATURE_ID"].map(feature_dict)
input_features = exp["FEATURE_ID"].tolist()
# Check if all of the input feature IDs could be mapped to the gene symbols
if not all(f_id in feature_dict for f_id in input_features):
print(
f"{sum(exp.isnull().values.ravel())} feature(s) "
f"could not be mapped to the associated feature symbols."
)
# Merge expression values and reorder columns
if rc:
rc = parse_expression_file(exp_file=rc, exp_type="RAW_COUNT")
exp_set = exp.merge(rc, on="FEATURE_ID")
columns = ["FEATURE_ID", "GENE_SYMBOL", "RAW_COUNT", exp_type]
else:
exp_set = exp
columns = ["FEATURE_ID", "GENE_SYMBOL", exp_type]
exp_set = exp_set[columns]
# Replace NaN values with empty string
exp_set.fillna("", inplace=True)
# Write to file
exp_set.to_csv(
outfile_name + ".txt.gz",
header=True,
index=False,
sep="\t",
compression="gzip",
)
# Write to JSON
df_dict = exp_set.set_index("FEATURE_ID").to_dict(orient="index")
with open(outfile_name + ".json", "w") as f:
json.dump({"genes": df_dict}, f, allow_nan=False)
def expression_to_storage(infile, outfile):
"""Convert expressions file to JSON format."""
def isfloat(value):
"""Check if value is float."""
try:
float(value)
return True
except ValueError:
return False
with io.TextIOWrapper(io.BufferedReader(gzip.open(infile))) as f:
# Split lines by tabs
# Ignore lines without a number in second column
# Build a dictionary of gene-expression pairs
exp = {
"genes": {
gene_exp[0]: float(gene_exp[1])
for gene_exp in (l.split("\t") for l in f)
if len(gene_exp) == 2 and isfloat(gene_exp[1])
}
}
with open(file=outfile, mode="wt") as f:
json.dump(exp, f)
return outfile
def replace_extension(infile):
"""Replace extensions of file."""
extensions = "".join(Path(str(infile)).suffixes[-2:])
new_ext = ".tab.gz"
outfile = str(infile).replace(extensions, new_ext)
return outfile
class UploadExpression(ProcessBio):
"""Upload expression data.
Upload expression data by providing raw expression data (read counts)
and/or normalized expression data together with the associated data
normalization type.
"""
slug = "upload-expression"
name = "Expression data"
process_type = "data:expression"
version = "2.6.0"
category = "Import"
data_name = "{{ exp_name }}"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.RAW
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {
"cores": 1,
"memory": 1024,
"network": True,
},
}
class Input:
"""Input fields to process UploadExpression."""
rc = FileField(
label="Read counts (raw expression)",
description="Reads mapped to genomic features (raw count data). "
"Supported extensions: .txt.gz (preferred), .tab.*, .txt.* or .tsv.*",
required=False,
)
exp = FileField(
label="Normalized expression",
description="Normalized expression data. Supported extensions: .tab.gz "
"(preferred), .tab.*, .txt.* or .tsv.*",
required=False,
)
exp_name = StringField(
label="Expression name",
)
exp_type = StringField(
label="Normalization type",
description="Normalization type",
required=False,
)
source = StringField(
label="Gene ID source",
allow_custom_choice=True,
choices=[
("AFFY", "AFFY"),
("DICTYBASE", "DICTYBASE"),
("ENSEMBL", "ENSEMBL"),
("NCBI", "NCBI"),
("UCSC", "UCSC"),
],
)
species = StringField(
label="Species",
description="Species latin name.",
allow_custom_choice=True,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
],
)
build = StringField(
label="Build", description="Genome build or annotation version."
)
feature_type = StringField(
label="Feature type",
allow_custom_choice=True,
default="gene",
choices=[
("gene", "gene"),
("transcript", "transcript"),
("exon", "exon"),
],
)
class Output:
"""Output fields to process UploadExpression."""
exp = FileField(label="Normalized expression")
rc = FileField(
label="Read counts",
required=False,
description="Reads mapped to genomic features.",
)
exp_json = JsonField(label="Expression (json)")
exp_type = StringField(label="Expression type")
exp_set = FileField(label="Expressions")
exp_set_json = JsonField(label="Expressions (json)")
source = StringField(label="Gene ID source")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
def run(self, inputs, outputs):
"""Run analysis."""
supported_extensions = (".txt", ".tab", ".tsv")
if not inputs.exp and not inputs.rc:
self.error("Please provide raw or/and normalized expression files.")
elif inputs.exp and not inputs.exp_type:
self.error(
"Please provide normalization type together with normalized expressions."
)
elif not inputs.exp and inputs.exp_type and inputs.rc:
self.error("Please provide raw or/and normalized expression files.")
elif inputs.rc and not inputs.exp and not inputs.exp_type:
rc = inputs.rc.import_file(imported_format="compressed")
exp = inputs.rc.import_file(imported_format="compressed")
exp_type = "RAW_COUNT"
stem = Path(rc).stem
elif inputs.exp and inputs.exp_type and not inputs.rc:
exp = inputs.exp.import_file(imported_format="compressed")
stem = Path(exp).stem
exp_type = inputs.exp_type
else:
rc = inputs.rc.import_file(imported_format="compressed")
exp = inputs.exp.import_file(imported_format="compressed")
stem = Path(rc).stem
stem_exp = Path(exp).stem
if not stem_exp.endswith(supported_extensions):
self.error(
f"The imported file has unsupported file name extension. "
f"The supported extensions are {supported_extensions}."
)
exp_type = inputs.exp_type
if not stem.endswith(supported_extensions):
self.error(
"The imported file has unsupported file name extension. "
f"The supported extensions are {supported_extensions}."
)
name = stem[:-4]
# Save the abundance estimates to JSON storage
expression_to_storage(infile=exp, outfile="json.txt")
# Prepare the expression set outputs
feature_ids = pd.read_csv(exp, sep="\t", index_col="Gene").index.tolist()
feature_filters = {
"source": inputs.source,
"species": inputs.species,
"feature_id__in": feature_ids,
}
feature_ids_to_names = {
f.feature_id: f.name for f in self.feature.filter(**feature_filters)
}
if inputs.rc and inputs.exp:
prepare_expression_set(
exp=exp,
exp_type=exp_type,
feature_dict=feature_ids_to_names,
outfile_name=f"{name}_expressions",
rc=rc,
)
else:
prepare_expression_set(
exp=exp,
exp_type=exp_type,
feature_dict=feature_ids_to_names,
outfile_name=f"{name}_expressions",
)
# Change suffixes of exp file
exp_final = replace_extension(infile=exp)
Path(exp).rename(exp_final)
exp = Path(exp_final).name
if inputs.rc and inputs.exp:
# Change suffixes of rc file
rc_final = replace_extension(infile=rc)
Path(rc).rename(rc_final)
rc = Path(rc_final).name
outputs.rc = rc
elif inputs.rc and not inputs.exp:
rc = exp
outputs.rc = rc
outputs.exp_type = exp_type
outputs.exp = exp
outputs.exp_json = "json.txt"
outputs.exp_set = f"{name}_expressions.txt.gz"
outputs.exp_set_json = f"{name}_expressions.json"
outputs.source = inputs.source
outputs.species = inputs.species
outputs.build = inputs.build
outputs.feature_type = inputs.feature_type
class UploadExpressionCuffnorm(ProcessBio):
"""Upload expression data by providing Cuffnorm results."""
slug = "upload-expression-cuffnorm"
name = "Expression data (Cuffnorm)"
process_type = "data:expression"
version = "1.8.0"
category = "Import"
data_name = '{{ exp.file|default("?") }}'
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.RAW
entity = {
"type": "sample",
}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {
"cores": 1,
"memory": 1024,
"network": True,
},
}
class Input:
"""Input fields for UploadExpressionCuffnorm."""
exp = FileField(label="Normalized expression")
cxb = DataField(
"cufflinks:cuffquant",
label="Cuffquant analysis",
description="Cuffquant analysis.",
)
exp_type = StringField(
label="Normalization type",
default="Cuffnorm",
)
class Output:
"""Output fields for UploadExpressionCuffnorm."""
exp = FileField(
label="Normalized expression",
description="Normalized expression",
)
exp_json = JsonField(
label="Expression (json)",
)
exp_type = StringField(
label="Expression type",
)
exp_set = FileField(
label="Expressions",
)
exp_set_json = JsonField(
label="Expressions (json)",
)
source = StringField(
label="Gene ID source",
)
species = StringField(label="Species")
build = StringField(
label="Build",
)
feature_type = StringField(label="Feature type")
def run(self, inputs, outputs):
"""Run analysis."""
if inputs.exp and not inputs.exp_type:
self.error(
"Please provide normalization type together with normalized expressions."
)
elif inputs.exp and inputs.exp_type and inputs.cxb:
exp = inputs.exp.import_file(imported_format="compressed")
stem = Path(exp).stem
name = stem[:-4]
# Save the abundance estimates to JSON storage
expression_to_storage(infile=exp, outfile="json.txt")
# Prepare the expression set outputs
feature_ids = pd.read_csv(exp, sep="\t", index_col="Gene").index.tolist()
feature_filters = {
"source": inputs.cxb.output.source,
"species": inputs.cxb.output.species,
"feature_id__in": feature_ids,
}
feature_ids_to_names = {
f.feature_id: f.name for f in self.feature.filter(**feature_filters)
}
prepare_expression_set(
exp=exp,
exp_type=inputs.exp_type,
feature_dict=feature_ids_to_names,
outfile_name=f"{name}_expressions",
)
outputs.exp_type = inputs.exp_type
outputs.exp = exp
outputs.exp_json = "json.txt"
outputs.exp_set = f"{name}_expressions.txt.gz"
outputs.exp_set_json = f"{name}_expressions.json"
outputs.source = inputs.cxb.output.source
outputs.species = inputs.cxb.output.species
outputs.build = inputs.cxb.output.build
outputs.feature_type = "gene" | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/import_data/expressions.py | 0.673192 | 0.41561 | expressions.py | pypi |
from resolwe.process import (
Cmd,
DataField,
FileField,
Process,
SchedulingClass,
StringField,
)
class ImportScBam(Process):
"""Import scSeq BAM file and index."""
slug = "upload-bam-scseq-indexed"
name = "Single cell BAM file and index"
process_type = "data:alignment:bam:scseq"
version = "1.4.0"
category = "Import"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
}
data_name = "{{ reads|name|default('?') }}"
class Input:
"""Input fields to process Import ScBam."""
src = FileField(
description="A mapping file in BAM format.",
label="Mapping (BAM)",
)
src2 = FileField(
description="An index file of a BAM mapping file (ending with bam.bai).",
label="BAM index (*.bam.bai file)",
)
reads = DataField(
data_type="screads:",
label="Single cell fastq reads",
)
species = StringField(
label="Species",
description="Species latin name.",
)
build = StringField(
label="Build",
)
class Output:
"""Output fields to process Import ScBam."""
bam = FileField(label="Uploaded BAM")
bai = FileField(label="Index BAI")
stats = FileField(label="Alignment statistics")
build = StringField(label="Build")
species = StringField(label="Species")
def run(self, inputs, outputs):
"""Run the analysis."""
bam_path = inputs.src.import_file(imported_format="extracted")
bai_path = inputs.src2.import_file(imported_format="extracted")
assert bam_path.endswith(".bam")
assert bai_path.endswith(".bam.bai")
bam_name = bam_path[:-4]
bai_name = bai_path[:-8]
if bam_name != bai_name:
self.error("BAM and BAI files should have the same name.")
stats = "{}_stats.txt".format(bam_name)
(Cmd["samtools"]["flagstat"][bam_path] > stats)()
outputs.bam = bam_path
outputs.bai = bai_path
outputs.stats = stats
outputs.species = inputs.species
outputs.build = inputs.build | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/import_data/bam_scseq.py | 0.685213 | 0.442456 | bam_scseq.py | pypi |
import re
from pathlib import Path
import pandas as pd
from resolwe.process import FileField, Process, SchedulingClass, StringField
def change_suffix(path):
"""Change suffix of a file to lowercase."""
new_path = path.with_suffix(path.suffix.lower())
path.replace(new_path)
return new_path
def prepare_filename(fname):
"""Return a sanitized string that can be used as a file name."""
return re.sub(r"(?u)[^-\w.]", "", str(fname).strip().replace(" ", "_"))
class UploadProteomicsData(Process):
"""Upload a mass spectrometry proteomics sample data file.
The input 5-column tab-delimited file with the .txt suffix is
expected to contain a header line with the following meta-data
column names: "Uniprot ID", "Gene symbol", "Protein name" and
"Number of peptides". The fifth column contains the sample data.
"""
slug = "upload-proteomics-sample"
name = "Upload proteomics sample"
process_type = "data:proteomics:massspectrometry"
version = "1.2.0"
category = "Import"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {"cores": 1, "memory": 2048},
}
entity = {
"type": "sample",
"descriptor_schema": "sample",
}
data_name = '{{ src.file|default("?") }}'
class Input:
"""Input field to process UploadProteomicsData."""
src = FileField(label="Table containing mass spectrometry data (.txt)")
species = StringField(
label="Species",
description="Select a species name from the dropdown menu "
"or write a custom species name in the species field.",
allow_custom_choice=True,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
],
)
source = StringField(
label="Protein ID database source",
default="UniProtKB",
choices=[("UniProtKB", "UniProtKB")],
)
class Output:
"""Output field of the process UploadProteomicsData."""
table = FileField(label="Uploaded table")
species = StringField(label="Species")
source = StringField(label="Source")
def run(self, inputs, outputs):
"""Run the analysis."""
table_path = inputs.src.import_file(imported_format="extracted")
extensions = [".txt"]
path = Path(table_path)
if path.suffix in [e.upper() for e in extensions]:
path = change_suffix(path)
self.info(
"File extension of the table was replaced with a lower case version."
)
if path.suffix not in extensions:
self.error(
"Unsupported file name extension. Supported extensions "
f"are {', '.join(extensions)}."
)
required_columns = [
"Uniprot ID",
"Gene symbol",
"Protein name",
"Number of peptides",
]
table = pd.read_csv(path, sep="\t")
header = list(table)
if not set(required_columns).issubset(header):
self.error(
f"The input file must contain all of the required columns: {required_columns}."
)
if not len(header) == 5:
self.error(
f"The input file must contain the required metadata columns: {required_columns} "
f"and exactly one sample-data column. The provided input file contains columns: "
f"{header}."
)
outputs.table = str(path)
outputs.species = inputs.species
outputs.source = inputs.source
class UploadProteomicsDataSet(Process):
"""Upload a mass spectrometry proteomics sample set file.
The input multi-sample tab-delimited file with the .txt suffix is
expected to contain a header line with the following meta-data
column names: "Uniprot ID", "Gene symbol", "Protein name" and
"Number of peptides". Each additional column in the input file
should contain data for a single sample.
"""
slug = "upload-proteomics-sample-set"
name = "Upload proteomics sample set"
process_type = "data:proteomics:sampleset"
version = "1.2.0"
category = "Import"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {"cores": 1, "memory": 2048},
}
data_name = '{{ src.file|default("?") }}'
class Input:
"""Input field to process UploadProteomicsDataSet."""
src = FileField(label="Table containing mass spectrometry data (.txt)")
species = StringField(
label="Species",
description="Select a species name from the dropdown menu "
"or write a custom species name in the species field.",
allow_custom_choice=True,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
],
)
source = StringField(
label="Protein ID database source",
default="UniProtKB",
choices=[("UniProtKB", "UniProtKB")],
)
class Output:
"""Output field of the process UploadProteomicsDataSet."""
table = FileField(label="Uploaded table")
species = StringField(label="Species")
source = StringField(label="Source")
def run(self, inputs, outputs):
"""Run the analysis."""
table_path = inputs.src.import_file(imported_format="extracted")
extensions = [".txt"]
path = Path(table_path)
if path.suffix in [e.upper() for e in extensions]:
path = change_suffix(path)
self.info(
"File extension of the table was replaced with a lower case version."
)
if path.suffix not in extensions:
self.error(
"Unsupported file name extension. Supported extensions "
f"are {', '.join(extensions)}."
)
outputs.table = str(path)
outputs.species = inputs.species
outputs.source = inputs.source
# spawn individual samples from the input sample set file
required_columns = [
"Gene symbol",
"Protein name",
"Number of peptides",
]
sample_set = pd.read_csv(path, sep="\t", index_col="Uniprot ID")
header = list(sample_set)
if not set(required_columns).issubset(header):
self.error(
f"The input file must contain all of the required columns: {required_columns}."
)
for sample_column in header:
if sample_column not in required_columns:
sample_data = sample_set[required_columns + [sample_column]]
sample_data_name = prepare_filename(sample_column) + ".txt"
sample_data.to_csv(sample_data_name, index_label="Uniprot ID", sep="\t")
# spawn a new sample as an individual object
process_inputs = {
"src": sample_data_name,
"species": inputs.species,
"source": inputs.source,
}
self.run_process("upload-proteomics-sample", process_inputs) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/import_data/proteomics_data.py | 0.788217 | 0.429011 | proteomics_data.py | pypi |
from pathlib import Path
import pandas as pd
from resolwe.process import FileField, IntegerField, Process, SchedulingClass
from resolwe.process.models import Collection, Entity
SAMPLE_COLUMNS = {
"Sample ID": "id",
"Sample slug": "slug",
"Sample name": "name",
}
def lower_suffix(path, info):
"""Change suffix of a file to lowercase."""
if not path.suffix.islower():
new_path = path.with_suffix(path.suffix.lower())
path.replace(new_path)
info("File extension of the table was replaced with a lower case version.")
return new_path
else:
return path
def read_tabular_data(path, sample_columns, error):
"""Convert the uploaded file to Pandas data frame."""
extensions = [".csv", ".tab", ".tsv", ".xlsx", ".xls"]
if path.suffix not in extensions:
error(
"Unsupported file name extension. Supported extensions "
f"are {', '.join(extensions)}."
)
try:
if path.suffix == ".xls":
df = pd.read_excel(path, engine="xlrd")
elif path.suffix == ".xlsx":
df = pd.read_excel(path, engine="openpyxl")
elif any(path.suffix == ext for ext in [".tab", ".tsv"]):
df = pd.read_csv(path, sep="\t")
elif path.suffix == ".csv":
df = pd.read_csv(path)
else:
df = pd.DataFrame()
except Exception as err:
error(f"It was not possible to read the provided data table. {err}")
if len(df.columns.intersection(sample_columns)) != 1:
error(
f"The uploaded metadata table needs to contain "
f"exactly one of the following columns: "
f"{sorted(sample_columns.keys())}."
)
if len(df) < 1:
error("The uploaded table contains no samples.")
return df
class UploadMetadataUnique(Process):
"""Upload metadata file where each row corresponds to a single sample.
The uploaded metadata table represents one-to-one (1:1) relation to
samples in the working collection. Metadata table must contain a column
with one of the following headers: "Sample ID", "Sample name" or "Sample slug".
"""
slug = "upload-metadata-unique"
name = "Metadata table (one-to-one)"
process_type = "data:metadata:unique"
version = "1.1.0"
category = "Import"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.1.0"}
},
"resources": {
"cores": 1,
"memory": 8192,
"storage": 10,
},
}
data_name = '{{ src.file|default("?") }}'
class Input:
"""Input field to process UploadMetadataUnique."""
src = FileField(
label="Table with metadata",
description="The metadata table should use one of the following "
"extensions: .csv, .tab, .tsv, .xlsx, .xls",
)
class Output:
"""Output field of the process UploadMetadataUnique."""
table = FileField(label="Uploaded table")
n_samples = IntegerField(label="Number of samples")
def run(self, inputs, outputs):
"""Run the analysis."""
collections = Collection.filter(data__id=self.data.id)
if not collections:
self.error(
"Metadata table was not uploaded to a Collection. "
"Matching of metadata entries to Sample objects is not possible."
)
samples = Entity.filter(collection_id=collections[0].id)
path = Path(inputs.src.import_file(imported_format="extracted"))
# change the file suffix if it is either upper or mixed case
path = lower_suffix(path, info=self.info)
df_data = read_tabular_data(path, SAMPLE_COLUMNS, error=self.error)
sample_header = df_data.columns.intersection(SAMPLE_COLUMNS)[0]
col_samples = {
getattr(sample, SAMPLE_COLUMNS[sample_header]) for sample in samples
}
df_samples = df_data[sample_header]
intersection = col_samples.intersection(df_samples.values)
if not intersection:
self.warning(
"None of the samples listed in the uploaded Sample metadata table "
"match the Samples in the working Collection."
)
dup_samples = df_samples[df_samples.duplicated()]
if not dup_samples.empty:
self.error(
f"Duplicated metadata entries {dup_samples.tolist()} were found. "
f"Please use the metadata upload process that "
f"allows for one-to-many relations instead."
)
outputs.table = str(path)
outputs.n_samples = len(df_samples.unique())
class UploadMetadata(Process):
"""Upload metadata file where more than one row can match to a single sample.
The uploaded metadata table represents one-to-many (1:n) relation to
samples in the working collection. Metadata table must contain a column
with one of the following headers: "Sample ID", "Sample name" or "Sample slug".
"""
slug = "upload-metadata"
name = "Metadata table"
process_type = "data:metadata"
version = "1.1.0"
category = "Import"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.1.0"}
},
"resources": {
"cores": 1,
"memory": 8192,
"storage": 10,
},
}
data_name = '{{ src.file|default("?") }}'
class Input:
"""Input field to process UploadMetadata."""
src = FileField(
label="Table with metadata",
description="The metadata table should use one of the following "
"extensions: .csv, .tab, .tsv, .xlsx, .xls",
)
class Output:
"""Output field of the process UploadMetadata."""
table = FileField(label="Uploaded table")
n_samples = IntegerField(label="Number of samples")
def run(self, inputs, outputs):
"""Run the analysis."""
collections = Collection.filter(data__id=self.data.id)
if not collections:
self.error(
"Metadata table was not uploaded to a Collection. "
"Matching of metadata entries to Sample objects is not possible."
)
samples = Entity.filter(collection_id=collections[0].id)
path = Path(inputs.src.import_file(imported_format="extracted"))
# change the file suffix if it is either upper or mixed case
path = lower_suffix(path, info=self.info)
df_data = read_tabular_data(path, SAMPLE_COLUMNS, error=self.error)
sample_header = df_data.columns.intersection(SAMPLE_COLUMNS)[0]
col_samples = {
getattr(sample, SAMPLE_COLUMNS[sample_header]) for sample in samples
}
df_samples = df_data[sample_header]
intersection = col_samples.intersection(df_samples.values)
if not intersection:
self.warning(
"None of the samples listed in the uploaded Sample metadata table "
"match the Samples in the working Collection."
)
outputs.table = str(path)
outputs.n_samples = len(df_samples.unique()) | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/import_data/metadata.py | 0.824285 | 0.254289 | metadata.py | pypi |
import json
from collections import Counter
from pathlib import Path
import pandas as pd
from resolwe.process import FileField, JsonField, SchedulingClass, StringField
from resolwe.process.models import Collection, DescriptorSchema, Entity
from resolwe_bio.process.runtime import ProcessBio
class ReferenceSpace(ProcessBio):
"""
Define the reference space of ML-ready data sets.
Use a descriptive name that uniquely defines the reference space, e.g.:
- ComBat space of all TCGA v001
- Quantile transform to uniform distribution v042.
"""
slug = "reference-space"
name = "Reference space"
process_type = "data:ml:space"
version = "1.0.1"
category = "Import"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/common:3.0.1"}
},
"resources": {
"cores": 1,
"memory": 8192,
"storage": 10,
},
}
data_name = "{{ name }}"
class Input:
"""Inputs."""
name = StringField(label="Reference space name")
description = StringField(label="Reference space description")
source = StringField(
label="Feature source",
allow_custom_choice=True,
choices=[
("AFFY", "AFFY"),
("DICTYBASE", "DICTYBASE"),
("ENSEMBL", "ENSEMBL"),
("NCBI", "NCBI"),
("UCSC", "UCSC"),
],
)
species = StringField(
label="Species",
description="Species latin name.",
allow_custom_choice=True,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
],
)
training_data = FileField(
label="Traning data",
description="A TAB separated file containing expression values "
"used to create the preprocessor. The data should have Sample "
"object ID for index (first column with label sample_id) and "
"Ensembl gene IDs (recommended but not required) for the "
"column names.",
)
preprocessor = FileField(
label="Pickled preprocessor",
description="Serialized (pickled) preprocessor used to transform "
"data to the reference space.",
required=False,
)
class Output:
"""Outputs."""
features = JsonField(label="List of features")
source = StringField(label="Feature ID source")
species = StringField(label="Species")
training_data = FileField(label="Traning data")
preprocessor = FileField(label="Pickled preprocessor")
def run(self, inputs, outputs):
"""Run the analysis."""
# Ensure the object is uploaded to the general collection
reference_spaces_collection_slug = "reference-spaces"
collections = Collection.filter(data__id=self.data.id)
if not collections:
self.warning("Reference space table was not uploaded to a Collection.")
elif collections[0].slug != reference_spaces_collection_slug:
self.warning(
f"Reference space table was not uploaded to Collection {reference_spaces_collection_slug}."
)
# Parse file:
training_data_file = inputs.training_data.import_file(
imported_format="extracted"
)
try:
df = pd.read_csv(
training_data_file,
sep="\t",
float_precision="round_trip",
index_col=0,
)
except Exception as err:
self.error(f"It was not possible to read the provided table. {err}")
# Ensure that index contains actual ids from expressions
sample_ids_df = df.index.astype(int).tolist()
samples = Entity.filter(id__in=sample_ids_df)
sample_ids_server = [s.id for s in samples]
missing = set(sample_ids_df) - set(sample_ids_server)
if missing:
missing_str = ", ".join(map(str, list(missing)[:5])) + (
"..." if len(missing) > 5 else ""
)
self.error(
f"There are {len(missing)} samples in uploaded table that "
f"are missing on server: {missing_str}"
)
features_df = sorted(df.columns.tolist())
# If source is in KB, ensure that columns contain features from provided source
if not self.feature.exists(source=inputs.source):
self.warning(f"There are no features with source={inputs.source} in KB.")
else:
features_kb = self.feature.filter(
feature_id__in=features_df,
species=inputs.species,
)
source_counter = Counter([f.source for f in features_kb])
sources = list(source_counter.keys())
if len(sources) > 1:
sources = ", ".join(sources)
self.error(
f"Features in training data are from different sources: {sources}"
)
elif sources[0] != inputs.source:
self.error(
"Features in training data are not from the source specified in inputs."
)
elif source_counter[inputs.source] != len(features_df):
self.error("Some features in training data are not in KB")
# Sort columns and index and save to file
df = df.sort_index(axis=0).sort_index(axis=1)
training_data_file = Path(training_data_file).name
df.to_csv(training_data_file, sep="\t")
# Extract feature/genes from expressions
features_json_name = "features.json"
with open(features_json_name, "wt") as handle:
json.dump({"features": features_df}, handle)
# Get preprocessor pickle file
preprocessor_file = inputs.preprocessor.import_file(imported_format="extracted")
# Set the descriptor schema to geneset and attach description
ds = DescriptorSchema.get_latest(slug="geneset")
self.data.descriptor_schema = ds.id
self.data.descriptor = {"description": inputs.description}
# Set outputs
outputs.source = inputs.source
outputs.species = inputs.species
outputs.features = features_json_name
outputs.preprocessor = preprocessor_file
outputs.training_data = training_data_file | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/import_data/ml_ready_reference.py | 0.676406 | 0.281896 | ml_ready_reference.py | pypi |
from pathlib import Path
import pandas as pd
from resolwe.process import DataField, FileField, Process, SchedulingClass, StringField
from resolwe.process.models import Entity
class UploadMLExpression(Process):
"""Upload ML-ready expression matrix."""
slug = "upload-ml-expression"
name = "ML-ready expression"
process_type = "data:ml:table:expressions"
version = "1.0.1"
category = "Import"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/common:3.0.1"}
},
"resources": {
"cores": 1,
"memory": 8192,
"storage": 10,
},
}
data_name = "{{ reference_space|name }}"
class Input:
"""Inputs."""
exp = FileField(
label="Transformed expressions",
description="A TAB separated file containing transformed "
"expression values with sample IDs for index (first column "
"with label sample_id) and ENSEMBL IDs (recommended but not "
"required) for the column names.",
)
source = StringField(
label="Feature source",
allow_custom_choice=True,
choices=[
("AFFY", "AFFY"),
("DICTYBASE", "DICTYBASE"),
("ENSEMBL", "ENSEMBL"),
("NCBI", "NCBI"),
("UCSC", "UCSC"),
],
)
species = StringField(
label="Species",
description="Species latin name.",
allow_custom_choice=True,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
],
)
reference_space = DataField(
"ml:space", label="Reference space of ML-ready data"
)
class Output:
"""Outputs."""
exp = FileField(label="Transformed expressions")
source = StringField(label="Feature source")
species = StringField(label="Species")
def run(self, inputs, outputs):
"""Run the analysis."""
# Parse exp file:
exp_path = inputs.exp.import_file(imported_format="extracted")
try:
df = pd.read_csv(
exp_path,
sep="\t",
index_col=0,
float_precision="round_trip",
)
except Exception as err:
self.error(f"It was not possible to read the provided table. {err}")
# Sort columns in alphabetical order
df.sort_index(axis=1, inplace=True)
# Ensure that index contains actual ids from expressions
sample_ids_df = df.index.astype(int).tolist()
samples = Entity.filter(id__in=sample_ids_df)
sample_ids_server = [s.id for s in samples]
missing = set(sample_ids_df) - set(sample_ids_server)
if missing:
missing_str = ", ".join(map(str, list(missing)[:5])) + (
"..." if len(missing) > 5 else ""
)
self.error(
f"There are {len(missing)} samples in uploaded matrix that "
f"are inaccessible or missing on server: {missing_str}"
)
# Ensure that sources of this object and reference space are equal:
if inputs.source != inputs.reference_space.output.source:
self.error(
"Source of expression matrix must match the source of reference space."
)
# Ensure that species of this object and reference space are equal:
if inputs.species != inputs.reference_space.output.species:
self.error(
"Species of expression matrix must match the species of reference space."
)
# Ensure that expression has the same features as the reference space
features_ref = inputs.reference_space.output.features.json["features"]
# Both should be sorted by now, so one can directly compare:
if features_ref != df.columns.tolist():
self.error(
"Uploaded expression matrix does not have the same features as the reference space."
)
# Save
final_exp_name = Path(exp_path).name
df.to_csv(final_exp_name, sep="\t")
# Set outputs
outputs.exp = final_exp_name
outputs.source = inputs.source
outputs.species = inputs.species | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/import_data/ml_ready_expressions.py | 0.840488 | 0.435361 | ml_ready_expressions.py | pypi |
from resolwe.process import FileField, Process, SchedulingClass, StringField
def validate_filename_suffix(filename, suffix, resolwe_process=Process):
"""Raise an error if unexpected file name suffix is encountered."""
try:
assert filename.endswith(suffix)
except AssertionError:
resolwe_process.error(
f"Unsupported file name extension. A file {filename} "
f"should end with {suffix}."
)
class UploadIdatData(Process):
"""Upload Illumina methylation array raw IDAT data.
This import process accepts Illumina methylation array BeadChip raw
files in IDAT format. Two input files, one for each of the Green and
Red signal channels, are expected. The uploads of human (HM27, HM450,
EPIC) and mouse (MM285) array types are supported.
"""
slug = "upload-idat"
name = "IDAT file"
process_type = "data:methylationarray:idat"
version = "1.1.0"
category = "Import"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {"cores": 1, "memory": 2048},
}
entity = {
"type": "sample",
"descriptor_schema": "sample",
}
data_name = "{{ red_channel.file|default('?') }}"
class Input:
"""Input field to process UploadIdatData."""
red_channel = FileField(label="Red channel IDAT file (*_Red.idat)")
green_channel = FileField(label="Green channel IDAT file (*_Grn.idat)")
species = StringField(
label="Species",
description="Select a species name from the dropdown menu.",
default="Homo sapiens",
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
],
)
platform = StringField(
label="Protein ID database source",
description="Select a methylation array platform for human "
"(HM450, HM27, EPIC) or mouse (MM285) samples.",
default="HM450",
choices=[
("HM450", "HM450"),
("HM27", "HM27"),
("EPIC", "EPIC"),
("MM285", "MM285"),
],
)
class Output:
"""Output field of the process UploadProteomicsData."""
red_channel = FileField(label="Red channel IDAT file")
green_channel = FileField(label="Green channel IDAT file")
species = StringField(label="Species")
platform = StringField(label="Platform")
def run(self, inputs, outputs):
"""Run the analysis."""
if inputs.species == "Mus musculus" and inputs.platform != "MM285":
self.error(
f"Platform type {inputs.platform} does not match the selected species {inputs.species}."
)
red = inputs.red_channel.import_file(imported_format="compressed")
grn = inputs.green_channel.import_file(imported_format="compressed")
validate_filename_suffix(red, "_Red.idat.gz")
validate_filename_suffix(grn, "_Grn.idat.gz")
sample_name_red = red[:-12]
sample_name_grn = grn[:-12]
if sample_name_red != sample_name_grn:
self.error(
"The input IDAT files don't have a matching filename prefix. "
"The sample data might be mismatched."
)
outputs.red_channel = red
outputs.green_channel = grn
outputs.species = inputs.species
outputs.platform = inputs.platform | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/import_data/methylation_array_idat.py | 0.800731 | 0.447823 | methylation_array_idat.py | pypi |
import gzip
import json
from pathlib import Path
from resolwe.process import (
FileField,
JsonField,
ListField,
Persistence,
Process,
SchedulingClass,
StringField,
)
from resolwe.process.models import DescriptorSchema
def parse_geneset_file(geneset_file, warning):
"""Parse geneset file."""
with open(geneset_file, "rU") as handle:
# skip empty lines
genes = [str(line.strip()) for line in handle if line.strip()]
geneset = sorted(set(genes))
if len(genes) != len(geneset):
warning("Removed duplicated genes.")
return geneset
def save_geneset_to_json(geneset, output_json):
"""Save geneset to json file."""
with open(output_json, "w") as handle:
json.dump(
{"genes": geneset},
handle,
separators=(",", ":"),
allow_nan=False,
)
def save_geneset_to_file(geneset, output_file):
"""Save geneset to file."""
with gzip.open(output_file, "w") as handle:
handle.write("\n".join(geneset).encode("utf-8"))
class UploadGeneset(Process):
"""
Upload a set of genes.
Provide one gene ID per line in a .tab, .tab.gz, or .txt file format.
"""
slug = "upload-geneset"
name = "Gene set"
process_type = "data:geneset"
version = "1.3.1"
category = "Import"
scheduling_class = SchedulingClass.INTERACTIVE
persistence = Persistence.RAW
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {
"image": "public.ecr.aws/s4q6j6e8/resolwebio/base:ubuntu-20.04-03042021"
}
},
"resources": {
"cores": 1,
"memory": 1024,
"storage": 10,
},
}
data_name = '{{ src.file|default("?") }}'
class Input:
"""Input fields."""
src = FileField(
label="Gene set",
description="List of genes (.tab/.txt extension), one gene ID per line.",
)
source = StringField(
label="Gene ID source",
allow_custom_choice=True,
choices=[
("AFFY", "AFFY"),
("DICTYBASE", "DICTYBASE"),
("ENSEMBL", "ENSEMBL"),
("NCBI", "NCBI"),
("UCSC", "UCSC"),
],
)
species = StringField(
label="Species",
description="Species latin name.",
allow_custom_choice=True,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
("Odocoileus virginianus texanus", "Odocoileus virginianus texanus"),
("Solanum tuberosum", "Solanum tuberosum"),
],
)
class Output:
"""Output fields."""
geneset = FileField(label="Gene set")
geneset_json = JsonField(label="Gene set (JSON)")
source = StringField(label="Gene ID source")
species = StringField(label="Species")
def run(self, inputs, outputs):
"""Run the analysis."""
geneset_file = inputs.src.import_file(imported_format="extracted")
geneset = parse_geneset_file(geneset_file, self.warning)
# Save geneset to file
save_geneset_to_file(geneset, f"{Path(geneset_file).stem}.tab.gz")
outputs.geneset = f"{Path(geneset_file).stem}.tab.gz"
# Save geneset to json
save_geneset_to_json(geneset, "geneset.json")
outputs.geneset_json = "geneset.json"
outputs.source = inputs.source
outputs.species = inputs.species
# Set the descriptor schema of this object to geneset
ds = DescriptorSchema.get_latest(slug="geneset")
self.data.descriptor_schema = ds.id
class CreateGeneset(Process):
"""Create a gene set from a list of genes."""
slug = "create-geneset"
name = "Gene set (create)"
process_type = "data:geneset"
version = "1.3.1"
category = "Import"
scheduling_class = SchedulingClass.INTERACTIVE
persistence = Persistence.RAW
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {
"image": "public.ecr.aws/s4q6j6e8/resolwebio/base:ubuntu-20.04-03042021"
}
},
"resources": {
"cores": 1,
"memory": 1024,
"storage": 10,
},
}
data_name = "Gene set"
class Input:
"""Input fields."""
genes = ListField(
StringField(),
label="Genes",
description="List of genes.",
)
source = StringField(
label="Gene ID source",
allow_custom_choice=True,
choices=[
("AFFY", "AFFY"),
("DICTYBASE", "DICTYBASE"),
("ENSEMBL", "ENSEMBL"),
("NCBI", "NCBI"),
("UCSC", "UCSC"),
],
)
species = StringField(
label="Species",
description="Species latin name.",
allow_custom_choice=True,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
("Odocoileus virginianus texanus", "Odocoileus virginianus texanus"),
("Solanum tuberosum", "Solanum tuberosum"),
],
)
class Output:
"""Output fields."""
geneset = FileField(label="Gene set")
geneset_json = JsonField(label="Gene set (JSON)")
source = StringField(label="Gene ID source")
species = StringField(label="Species")
def run(self, inputs, outputs):
"""Run the analysis."""
geneset = sorted(set(inputs.genes))
if len(inputs.genes) != len(geneset):
self.warning("Removed duplicated genes.")
# Save geneset to file
save_geneset_to_file(geneset, "geneset.tab.gz")
outputs.geneset = "geneset.tab.gz"
# Save geneset to json
save_geneset_to_json(geneset, "geneset.json")
outputs.geneset_json = "geneset.json"
outputs.source = inputs.source
outputs.species = inputs.species
# Set the descriptor schema of this object to geneset
ds = DescriptorSchema.get_latest(slug="geneset")
self.data.descriptor_schema = ds.id
class CreateGenesetVenn(Process):
"""Create a gene set from a Venn diagram."""
slug = "create-geneset-venn"
name = "Gene set (create from Venn diagram)"
process_type = "data:geneset:venn"
version = "1.3.1"
category = "Import"
scheduling_class = SchedulingClass.INTERACTIVE
persistence = Persistence.RAW
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {
"image": "public.ecr.aws/s4q6j6e8/resolwebio/base:ubuntu-20.04-03042021"
}
},
"resources": {
"cores": 1,
"memory": 1024,
"storage": 10,
},
}
data_name = "Gene set (Venn)"
class Input:
"""Input fields."""
genes = ListField(
StringField(),
label="Genes",
description="List of genes.",
)
source = StringField(
label="Gene ID source",
allow_custom_choice=True,
choices=[
("AFFY", "AFFY"),
("DICTYBASE", "DICTYBASE"),
("ENSEMBL", "ENSEMBL"),
("NCBI", "NCBI"),
("UCSC", "UCSC"),
],
)
species = StringField(
label="Species",
description="Species latin name.",
allow_custom_choice=True,
choices=[
("Homo sapiens", "Homo sapiens"),
("Mus musculus", "Mus musculus"),
("Rattus norvegicus", "Rattus norvegicus"),
("Dictyostelium discoideum", "Dictyostelium discoideum"),
("Odocoileus virginianus texanus", "Odocoileus virginianus texanus"),
("Solanum tuberosum", "Solanum tuberosum"),
],
)
venn = FileField(
label="Venn diagram",
description="JSON file of Venn diagram.",
)
class Output:
"""Output fields."""
geneset = FileField(label="Gene set")
geneset_json = JsonField(label="Gene set (JSON)")
source = StringField(label="Gene ID source")
species = StringField(label="Species")
venn = JsonField(label="Venn diagram")
def run(self, inputs, outputs):
"""Run the analysis."""
geneset = sorted(set(inputs.genes))
if len(inputs.genes) != len(geneset):
self.warning("Removed duplicated genes.")
# Save geneset to file
save_geneset_to_file(geneset, "geneset.tab.gz")
outputs.geneset = "geneset.tab.gz"
# Save geneset to json
save_geneset_to_json(geneset, "geneset.json")
outputs.geneset_json = "geneset.json"
outputs.source = inputs.source
outputs.species = inputs.species
venn_file = inputs.venn.import_file(imported_format="extracted")
outputs.venn = venn_file
# Set the descriptor schema of this object to geneset
ds = DescriptorSchema.get_latest(slug="geneset")
self.data.descriptor_schema = ds.id | /resolwe_bio-53.1.0a2-py3-none-any.whl/resolwe_bio/processes/import_data/geneset.py | 0.646906 | 0.229018 | geneset.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.