code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import re
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from .utils import load_state_dict_from_url
__all__ = ["DenseNet", "densenet121", "densenet169", "densenet201", "densenet161"]
model_urls = {
"densenet121": "https://download.pytorch.org/models/densenet121-a639ec97.pth",
"densenet169": "https://download.pytorch.org/models/densenet169-b2777c0a.pth",
"densenet201": "https://download.pytorch.org/models/densenet201-c1103571.pth",
"densenet161": "https://download.pytorch.org/models/densenet161-8d451a50.pth",
}
def _bn_function_factory(norm, relu, conv):
def bn_function(*inputs):
concated_features = torch.cat(inputs, 1)
bottleneck_output = conv(relu(norm(concated_features)))
return bottleneck_output
return bn_function
class _DenseLayer(nn.Sequential):
def __init__(
self,
num_input_features,
growth_rate,
bn_size,
drop_rate,
memory_efficient=False,
):
super(_DenseLayer, self).__init__()
self.add_module("norm1", nn.BatchNorm2d(num_input_features)),
self.add_module("relu1", nn.ReLU(inplace=True)),
self.add_module(
"conv1",
nn.Conv2d(
num_input_features,
bn_size * growth_rate,
kernel_size=1,
stride=1,
bias=False,
),
),
self.add_module("norm2", nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module("relu2", nn.ReLU(inplace=True)),
self.add_module(
"conv2",
nn.Conv2d(
bn_size * growth_rate,
growth_rate,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
),
self.drop_rate = drop_rate
self.memory_efficient = memory_efficient
def forward(self, *prev_features):
bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1)
if self.memory_efficient and any(
prev_feature.requires_grad for prev_feature in prev_features
):
bottleneck_output = cp.checkpoint(bn_function, *prev_features)
else:
bottleneck_output = bn_function(*prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(
new_features, p=self.drop_rate, training=self.training
)
return new_features
class _DenseBlock(nn.Module):
def __init__(
self,
num_layers,
num_input_features,
bn_size,
growth_rate,
drop_rate,
memory_efficient=False,
):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module("denselayer%d" % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for name, layer in self.named_children():
new_features = layer(*features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module("norm", nn.BatchNorm2d(num_input_features))
self.add_module("relu", nn.ReLU(inplace=True))
self.add_module(
"conv",
nn.Conv2d(
num_input_features,
num_output_features,
kernel_size=1,
stride=1,
bias=False,
),
)
self.add_module("pool", nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
def __init__(
self,
growth_rate=32,
block_config=(6, 12, 24, 16),
num_init_features=64,
bn_size=4,
drop_rate=0,
num_classes=1000,
memory_efficient=False,
in_channels=3,
):
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(
OrderedDict(
[
(
"conv0",
nn.Conv2d(
3,
num_init_features,
kernel_size=7,
stride=2,
padding=3,
bias=False,
),
),
("norm0", nn.BatchNorm2d(num_init_features)),
("relu0", nn.ReLU(inplace=True)),
("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]
)
)
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.features.add_module("denseblock%d" % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(
num_input_features=num_features,
num_output_features=num_features // 2,
)
self.features.add_module("transition%d" % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module("norm5", nn.BatchNorm2d(num_features))
# Linear layer
# NOTE: strictly set to 1000 to load pretrained model
# self.classifier = nn.Linear(num_features, num_classes)
self.classifier = nn.Linear(num_features, 1000)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
def _load_state_dict(model, model_url, progress):
# '.'s are no longer allowed in module names, but previous _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$"
)
state_dict = load_state_dict_from_url(model_url, progress=progress)
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
def _densenet(
arch, growth_rate, block_config, num_init_features, pretrained, progress, **kwargs
):
model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
if pretrained:
_load_state_dict(model, model_urls[arch], progress)
model.classifier = nn.Linear(1024, 7)
return model
def densenet121(pretrained=False, progress=True, **kwargs):
r"""Densenet-121 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet(
"densenet121", 32, (6, 12, 24, 16), 64, pretrained, progress, **kwargs
)
def densenet161(pretrained=False, progress=True, **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet(
"densenet161", 48, (6, 12, 36, 24), 96, pretrained, progress, **kwargs
)
def densenet169(pretrained=False, progress=True, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet(
"densenet169", 32, (6, 12, 32, 32), 64, pretrained, progress, **kwargs
)
def densenet201(pretrained=False, progress=True, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet(
"densenet201", 32, (6, 12, 48, 32), 64, pretrained, progress, **kwargs
) | /rmn-3.1.1-py3-none-any.whl/models/densenet.py | 0.920823 | 0.350032 | densenet.py | pypi |
import os
import requests
import torch
from requests.adapters import HTTPAdapter
from torch import nn
from torch.nn import functional as F
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
super().__init__()
self.conv = nn.Conv2d(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False,
) # verify bias false
self.bn = nn.BatchNorm2d(
out_planes,
eps=0.001, # value found in tensorflow
momentum=0.1, # default pytorch value
affine=True,
)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Block35(nn.Module):
def __init__(self, scale=1.0):
super().__init__()
self.scale = scale
self.branch0 = BasicConv2d(256, 32, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(256, 32, kernel_size=1, stride=1),
BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1),
)
self.branch2 = nn.Sequential(
BasicConv2d(256, 32, kernel_size=1, stride=1),
BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1),
BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1),
)
self.conv2d = nn.Conv2d(96, 256, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.conv2d(out)
out = out * self.scale + x
out = self.relu(out)
return out
class Block17(nn.Module):
def __init__(self, scale=1.0):
super().__init__()
self.scale = scale
self.branch0 = BasicConv2d(896, 128, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(896, 128, kernel_size=1, stride=1),
BasicConv2d(128, 128, kernel_size=(1, 7), stride=1, padding=(0, 3)),
BasicConv2d(128, 128, kernel_size=(7, 1), stride=1, padding=(3, 0)),
)
self.conv2d = nn.Conv2d(256, 896, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = out * self.scale + x
out = self.relu(out)
return out
class Block8(nn.Module):
def __init__(self, scale=1.0, noReLU=False):
super().__init__()
self.scale = scale
self.noReLU = noReLU
self.branch0 = BasicConv2d(1792, 192, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(1792, 192, kernel_size=1, stride=1),
BasicConv2d(192, 192, kernel_size=(1, 3), stride=1, padding=(0, 1)),
BasicConv2d(192, 192, kernel_size=(3, 1), stride=1, padding=(1, 0)),
)
self.conv2d = nn.Conv2d(384, 1792, kernel_size=1, stride=1)
if not self.noReLU:
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = out * self.scale + x
if not self.noReLU:
out = self.relu(out)
return out
class Mixed_6a(nn.Module):
def __init__(self):
super().__init__()
self.branch0 = BasicConv2d(256, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
BasicConv2d(256, 192, kernel_size=1, stride=1),
BasicConv2d(192, 192, kernel_size=3, stride=1, padding=1),
BasicConv2d(192, 256, kernel_size=3, stride=2),
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Mixed_7a(nn.Module):
def __init__(self):
super().__init__()
self.branch0 = nn.Sequential(
BasicConv2d(896, 256, kernel_size=1, stride=1),
BasicConv2d(256, 384, kernel_size=3, stride=2),
)
self.branch1 = nn.Sequential(
BasicConv2d(896, 256, kernel_size=1, stride=1),
BasicConv2d(256, 256, kernel_size=3, stride=2),
)
self.branch2 = nn.Sequential(
BasicConv2d(896, 256, kernel_size=1, stride=1),
BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1),
BasicConv2d(256, 256, kernel_size=3, stride=2),
)
self.branch3 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class InceptionResnetV1(nn.Module):
"""Inception Resnet V1 model with optional loading of pretrained weights.
Model parameters can be loaded based on pretraining on the VGGFace2 or CASIA-Webface
datasets. Pretrained state_dicts are automatically downloaded on model instantiation if
requested and cached in the torch cache. Subsequent instantiations use the cache rather than
redownloading.
Keyword Arguments:
pretrained {str} -- Optional pretraining dataset. Either 'vggface2' or 'casia-webface'.
(default: {None})
classify {bool} -- Whether the model should output classification probabilities or feature
embeddings. (default: {False})
num_classes {int} -- Number of output classes. If 'pretrained' is set and num_classes not
equal to that used for the pretrained model, the final linear layer will be randomly
initialized. (default: {None})
dropout_prob {float} -- Dropout probability. (default: {0.6})
"""
def __init__(
self,
pretrained=None,
classify=False,
num_classes=None,
dropout_prob=0.6,
device=None,
):
super().__init__()
# Set simple attributes
self.pretrained = pretrained
self.classify = classify
self.num_classes = num_classes
if pretrained == "vggface2":
tmp_classes = 8631
elif pretrained == "casia-webface":
tmp_classes = 10575
elif pretrained is None and self.num_classes is None:
raise Exception(
'At least one of "pretrained" or "num_classes" must be specified'
)
else:
tmp_classes = self.num_classes
# Define layers
self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)
self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.maxpool_3a = nn.MaxPool2d(3, stride=2)
self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)
self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)
self.conv2d_4b = BasicConv2d(192, 256, kernel_size=3, stride=2)
self.repeat_1 = nn.Sequential(
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
)
self.mixed_6a = Mixed_6a()
self.repeat_2 = nn.Sequential(
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
)
self.mixed_7a = Mixed_7a()
self.repeat_3 = nn.Sequential(
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
)
self.block8 = Block8(noReLU=True)
self.avgpool_1a = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(dropout_prob)
self.last_linear = nn.Linear(1792, 512, bias=False)
self.last_bn = nn.BatchNorm1d(512, eps=0.001, momentum=0.1, affine=True)
self.logits = nn.Linear(512, tmp_classes)
if pretrained is not None:
load_weights(self, pretrained)
if self.num_classes is not None:
self.logits = nn.Linear(512, self.num_classes)
self.device = torch.device("cpu")
if device is not None:
self.device = device
self.to(device)
def forward(self, x):
"""Calculate embeddings or probabilities given a batch of input image tensors.
Arguments:
x {torch.tensor} -- Batch of image tensors representing faces.
Returns:
torch.tensor -- Batch of embeddings or softmax probabilities.
"""
x = self.conv2d_1a(x)
x = self.conv2d_2a(x)
x = self.conv2d_2b(x)
x = self.maxpool_3a(x)
x = self.conv2d_3b(x)
x = self.conv2d_4a(x)
x = self.conv2d_4b(x)
x = self.repeat_1(x)
x = self.mixed_6a(x)
x = self.repeat_2(x)
x = self.mixed_7a(x)
x = self.repeat_3(x)
x = self.block8(x)
x = self.avgpool_1a(x)
x = self.dropout(x)
x = self.last_linear(x.view(x.shape[0], -1))
x = self.last_bn(x)
x = F.normalize(x, p=2, dim=1)
if self.classify:
x = self.logits(x)
return x
def load_weights(mdl, name):
"""Download pretrained state_dict and load into model.
Arguments:
mdl {torch.nn.Module} -- Pytorch model.
name {str} -- Name of dataset that was used to generate pretrained state_dict.
Raises:
ValueError: If 'pretrained' not equal to 'vggface2' or 'casia-webface'.
"""
if name == "vggface2":
features_path = "https://drive.google.com/uc?export=download&id=1cWLH_hPns8kSfMz9kKl9PsG5aNV2VSMn"
logits_path = "https://drive.google.com/uc?export=download&id=1mAie3nzZeno9UIzFXvmVZrDG3kwML46X"
elif name == "casia-webface":
features_path = "https://drive.google.com/uc?export=download&id=1LSHHee_IQj5W3vjBcRyVaALv4py1XaGy"
logits_path = "https://drive.google.com/uc?export=download&id=1QrhPgn1bGlDxAil2uc07ctunCQoDnCzT"
else:
raise ValueError(
'Pretrained models only exist for "vggface2" and "casia-webface"'
)
model_dir = os.path.join(get_torch_home(), "checkpoints")
os.makedirs(model_dir, exist_ok=True)
state_dict = {}
for i, path in enumerate([features_path, logits_path]):
cached_file = os.path.join(model_dir, "{}_{}.pt".format(name, path[-10:]))
if not os.path.exists(cached_file):
print("Downloading parameters ({}/2)".format(i + 1))
s = requests.Session()
s.mount("https://", HTTPAdapter(max_retries=10))
r = s.get(path, allow_redirects=True)
with open(cached_file, "wb") as f:
f.write(r.content)
state_dict.update(torch.load(cached_file))
mdl.load_state_dict(state_dict)
def inception_resnet_v1(pretrained=True, progress=True, **kwargs):
return InceptionResnetV1(classify=True, pretrained="vggface2", num_classes=7)
def get_torch_home():
torch_home = os.path.expanduser(
os.getenv(
"TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")
)
)
return torch_home | /rmn-3.1.1-py3-none-any.whl/models/inception_resnet_v1.py | 0.942401 | 0.34956 | inception_resnet_v1.py | pypi |
import torch
import torch.nn as nn
from .densenet import densenet121
from .googlenet import googlenet
from .resnet import resnet18
model_urls = {
"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-b121ed2d.pth",
"resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
"resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
"wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
"wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
}
class ResDenseGle(nn.Module):
def __init__(self, in_channels=3, num_classes=7):
super(ResDenseGle, self).__init__()
self.resnet = resnet18(in_channels, num_classes)
# self.densenet = densenet121(in_channels, num_classes, pretrained=False)
self.densenet = densenet121(in_channels, num_classes)
# self.googlenet = googlenet(in_channels, num_classes, pretrained=False)
self.googlenet = googlenet(in_channels, num_classes)
# change fc to identity
self.resnet.fc = nn.Identity()
# self.densenet.fc = nn.Identity()
self.densenet.classifier = nn.Identity()
self.googlenet.fc = nn.Identity()
# create new fc
# self.fc = nn.Linear(512 * 3, 7)
# avoid change fc inside trainer
self._fc = nn.Linear(2536, 7)
# self._fc = nn.Linear(2560, 7)
# another options for fc
self.fc1 = nn.Linear(2536, 512)
self.fc2 = nn.Linear(512, 7)
def forward(self, x):
x1 = self.resnet(x)
x2 = self.densenet(x)
x3 = self.googlenet(x)
x = torch.cat([x1, x2, x3], dim=1)
x = self._fc(x)
# x = self.fc1(x)
# x = self.fc2(x)
return x
def rdg(pretrained=False, progress=True, **kwargs):
model = ResDenseGle(kwargs["in_channels"], kwargs["num_classes"])
return model | /rmn-3.1.1-py3-none-any.whl/models/res_dense_gle.py | 0.88573 | 0.349699 | res_dense_gle.py | pypi |
import torch
import torch.nn as nn
from .utils import load_state_dict_from_url
__all__ = [
"ResNet",
"resnet18",
"resnet34",
"resnet50",
"resnet101",
"resnet152",
"resnext50_32x4d",
"resnext101_32x8d",
"wide_resnet50_2",
"wide_resnet101_2",
]
model_urls = {
"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-b121ed2d.pth",
"resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
"resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
"wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
"wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
in_channels=3,
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
# NOTE: strictly set the in_channels = 3 to load the pretrained model
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
# self.conv1 = nn.Conv2d(in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# NOTE: strictly set the num_classes = 1000 to load the pretrained model
self.fc = nn.Linear(512 * block.expansion, 1000)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = _resnet(
"resnet18", BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs
)
# model.fc = nn.Linear(512, kwargs['num_classes'])
model.fc = nn.Linear(512, 7)
return model
def resnet34(pretrained=True, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = _resnet(
"resnet34", BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs
)
model.fc = nn.Linear(512, kwargs["num_classes"])
return model
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = _resnet(
"resnet50", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs
)
model.fc = nn.Linear(2048, kwargs["num_classes"])
return model
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = _resnet(
"resnet101", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs
)
model.fc = nn.Linear(2048, kwargs["num_classes"])
return model
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = _resnet(
"resnet152", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs
)
model.fc = nn.Linear(2048, kwargs["num_classes"])
return model
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 4
return _resnet(
"resnext50_32x4d", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs
)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
return _resnet(
"resnext101_32x8d", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs
)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet(
"wide_resnet50_2", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs
)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet(
"wide_resnet101_2", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs
) | /rmn-3.1.1-py3-none-any.whl/models/resnet.py | 0.938251 | 0.442094 | resnet.py | pypi |
import warnings
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import load_state_dict_from_url
__all__ = ["GoogLeNet", "googlenet"]
model_urls = {
# GoogLeNet ported from TensorFlow
"googlenet": "https://download.pytorch.org/models/googlenet-1378be20.pth",
}
_GoogLeNetOutputs = namedtuple(
"GoogLeNetOutputs", ["logits", "aux_logits2", "aux_logits1"]
)
def googlenet(pretrained=True, progress=True, **kwargs):
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if "transform_input" not in kwargs:
kwargs["transform_input"] = True
if "aux_logits" not in kwargs:
kwargs["aux_logits"] = False
if kwargs["aux_logits"]:
warnings.warn(
"auxiliary heads in the pretrained googlenet model are NOT pretrained, "
"so make sure to train them"
)
original_aux_logits = kwargs["aux_logits"]
kwargs["aux_logits"] = True
kwargs["init_weights"] = False
model = GoogLeNet(**kwargs)
state_dict = load_state_dict_from_url(
model_urls["googlenet"], progress=progress
)
model.load_state_dict(state_dict)
if not original_aux_logits:
model.aux_logits = False
del model.aux1, model.aux2
model.fc = nn.Linear(1024, 7)
return model
return GoogLeNet(**kwargs)
class GoogLeNet(nn.Module):
def __init__(
self,
num_classes=1000,
aux_logits=True,
transform_input=False,
init_weights=True,
in_channels=3,
):
super(GoogLeNet, self).__init__()
# strict set to 1000
num_classes = 1000
self.aux_logits = aux_logits
self.transform_input = transform_input
self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.conv2 = BasicConv2d(64, 64, kernel_size=1)
self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1)
self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32)
self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64)
self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64)
self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64)
self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64)
self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128)
self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128)
if aux_logits:
self.aux1 = InceptionAux(512, num_classes)
self.aux2 = InceptionAux(528, num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(0.2)
# strict set to 1000
self.fc = nn.Linear(1024, num_classes)
if init_weights:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
# N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x)
# N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
if self.training and self.aux_logits:
aux1 = self.aux1(x)
x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
if self.training and self.aux_logits:
aux2 = self.aux2(x)
x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7
x = self.avgpool(x)
# N x 1024 x 1 x 1
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
if self.training and self.aux_logits:
return _GoogLeNetOutputs(x, aux2, aux1)
return x
class Inception(nn.Module):
def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
super(Inception, self).__init__()
self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)
self.branch2 = nn.Sequential(
BasicConv2d(in_channels, ch3x3red, kernel_size=1),
BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1),
)
self.branch3 = nn.Sequential(
BasicConv2d(in_channels, ch5x5red, kernel_size=1),
BasicConv2d(ch5x5red, ch5x5, kernel_size=3, padding=1),
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),
BasicConv2d(in_channels, pool_proj, kernel_size=1),
)
def forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
outputs = [branch1, branch2, branch3, branch4]
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv = BasicConv2d(in_channels, 128, kernel_size=1)
self.fc1 = nn.Linear(2048, 1024)
self.fc2 = nn.Linear(1024, num_classes)
def forward(self, x):
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = F.adaptive_avg_pool2d(x, (4, 4))
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
# N x 2048
x = F.relu(self.fc1(x), inplace=True)
# N x 2048
x = F.dropout(x, 0.7, training=self.training)
# N x 2048
x = self.fc2(x)
# N x 1024
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True) | /rmn-3.1.1-py3-none-any.whl/models/googlenet.py | 0.902143 | 0.333273 | googlenet.py | pypi |
import traceback
import torch
import torch.nn as nn
from .resnet import BasicBlock, Bottleneck, conv1x1
def transpose(in_channels, out_channels, kernel_size=2, stride=2):
return nn.Sequential(
nn.ConvTranspose2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride
),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def downsample(in_channels, out_channels):
return nn.Sequential(
conv1x1(in_channels, out_channels),
nn.BatchNorm2d(num_features(out_channels)),
nn.ReLU(inplace=True),
)
class Attention0(nn.Module):
def __init__(self, channels, block):
super().__init__()
self._trunk1 = block(channels, channels)
self._trunk2 = block(channels, channels)
self._enc = block(channels, channels)
self._dec = block(channels, channels)
self._conv1x1 = nn.Sequential(
conv1x1(2 * channels, channels),
nn.BatchNorm2d(num_features=channels),
nn.ReLU(inplace=True),
)
self._mp = nn.MaxPool2d(3, 2, 1)
self._relu = nn.ReLU(inplace=True)
def enc(self, x):
return self._enc(x)
def dec(self, x):
return self._dec(x)
def trunking(self, x):
return self._trunk2(self._trunk1(x))
def masking(self, x):
x = self.enc(x)
x = self.dec(x)
return torch.sigmoid(x)
def forward(self, x):
trunk = self.trunking(x)
mask = self.masking(x)
return (1 + mask) * trunk
class Attention1(nn.Module):
def __init__(self, channels, block):
super().__init__()
self._trunk1 = block(channels, channels)
self._trunk2 = block(channels, channels)
self._enc1 = block(channels, channels)
self._enc2 = block(channels, channels)
self._dec = block(channels, channels)
self._conv1x1 = nn.Sequential(
conv1x1(2 * channels, channels),
nn.BatchNorm2d(num_features=channels),
nn.ReLU(inplace=True),
)
self._trans = nn.Sequential(
nn.ConvTranspose2d(channels, channels, kernel_size=2, stride=2),
nn.BatchNorm2d(num_features=channels),
nn.ReLU(inplace=True),
)
self._mp = nn.MaxPool2d(3, 2, 1)
self._relu = nn.ReLU(inplace=True)
def enc(self, x):
x1 = self._enc1(x)
x2 = self._enc2(self._mp(x1))
return [x1, x2]
def dec(self, x):
x1, x2 = x
x2 = self._trans(x2)
x = torch.cat([x1, x2], dim=1)
x = self._conv1x1(x)
return self._dec(x)
def trunking(self, x):
return self._trunk2(self._trunk1(x))
def masking(self, x):
x = self.enc(x)
x = self.dec(x)
return torch.sigmoid(x)
def forward(self, x):
trunk = self.trunking(x)
mask = self.masking(x)
return (1 + mask) * trunk
class Attention2(nn.Module):
def __init__(self, channels, block):
super().__init__()
self._trunk1 = block(channels, channels)
self._trunk2 = block(channels, channels)
self._enc1 = block(channels, channels)
self._enc2 = block(channels, channels)
self._enc3 = nn.Sequential(block(channels, channels), block(channels, channels))
self._dec1 = nn.Sequential(
conv1x1(2 * channels, channels),
nn.BatchNorm2d(num_features=channels),
nn.ReLU(inplace=True),
block(channels, channels),
)
self._dec2 = nn.Sequential(
conv1x1(2 * channels, channels),
nn.BatchNorm2d(num_features=channels),
nn.ReLU(inplace=True),
block(channels, channels),
)
self._trans = nn.Sequential(
nn.ConvTranspose2d(channels, channels, kernel_size=2, stride=2),
nn.BatchNorm2d(num_features=channels),
nn.ReLU(inplace=True),
)
self._mp = nn.MaxPool2d(3, 2, 1)
self._relu = nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# ''' try to open this line and see the change of acc
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
# '''
def enc(self, x):
x1 = self._enc1(x)
x2 = self._enc2(self._mp(x1))
x3 = self._enc3(self._mp(x2))
return [x1, x2, x3]
def dec(self, x):
x1, x2, x3 = x
x2 = torch.cat([x2, self._trans(x3)], dim=1)
x2 = self._dec1(x2)
x3 = torch.cat([x1, self._trans(x2)], dim=1)
x3 = self._dec1(x3)
return x3
def trunking(self, x):
return self._trunk2(self._trunk1(x))
def masking(self, x):
x = self.enc(x)
x = self.dec(x)
return torch.sigmoid(x)
def forward(self, x):
trunk = self.trunking(x)
mask = self.masking(x)
return (1 + mask) * trunk
def attention(channels, block=BasicBlock, depth=-1):
if depth == 0:
return Attention0(channels, block)
elif depth == 1:
return Attention1(channels, block)
elif depth == 2:
return Attention2(channels, block)
else:
traceback.print_exc()
raise Exception("depth must be specified") | /rmn-3.1.1-py3-none-any.whl/models/attention.py | 0.908565 | 0.499512 | attention.py | pypi |
import torch
import torch.nn as nn
from .utils import load_state_dict_from_url
__all__ = ["AlexNet", "alexnet"]
model_urls = {
"alexnet": "https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth",
}
class AlexNet(nn.Module):
def __init__(self, in_channels=3, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(in_channels, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
# TODO: strictly set to 1000 to load pretrained
# nn.Linear(4096, num_classes),
nn.Linear(4096, 1000),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def alexnet(pretrained=True, progress=True, **kwargs):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = AlexNet(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls["alexnet"], progress=progress)
model.load_state_dict(state_dict)
# change to adapt fer
model.classifier[-1] = nn.Linear(4096, 7)
return model | /rmn-3.1.1-py3-none-any.whl/models/alexnet.py | 0.884008 | 0.352425 | alexnet.py | pypi |
from pytorchcv.model_provider import get_model as ptcv_get_model
from .alexnet import *
from .brain_humor import *
from .centerloss_resnet import resnet18_centerloss
from .densenet import *
from .fer2013_models import *
from .googlenet import *
from .inception import *
from .inception_resnet_v1 import *
from .masking import masking
from .res_dense_gle import *
from .resatt import *
from .residual_attention_network import *
from .resmasking import (
resmasking,
resmasking50_dropout1,
resmasking_dropout1,
resmasking_dropout2,
)
from .resmasking_naive import resmasking_naive_dropout1
from .resnet import *
from .resnet50_scratch_dims_2048 import resnet50_pretrained_vgg
from .resnet112 import resnet18x112
from .runet import *
from .vgg import *
def resattnet56(in_channels, num_classes, pretrained=True):
model = ptcv_get_model("resattnet56", pretrained=False)
model.output = nn.Linear(2048, 7)
return model
def cbam_resnet50(in_channels, num_classes, pretrained=True):
model = ptcv_get_model("cbam_resnet50", pretrained=True)
model.output = nn.Linear(2048, 7)
return model
def bam_resnet50(in_channels, num_classes, pretrained=True):
model = ptcv_get_model("bam_resnet50", pretrained=True)
model.output = nn.Linear(2048, 7)
return model
def efficientnet_b7b(in_channels, num_classes, pretrained=True):
model = ptcv_get_model("efficientnet_b7b", pretrained=True)
model.output = nn.Sequential(nn.Dropout(p=0.5, inplace=False), nn.Linear(2560, 7))
return model
def efficientnet_b3b(in_channels, num_classes, pretrained=True):
model = ptcv_get_model("efficientnet_b3b", pretrained=True)
model.output = nn.Sequential(nn.Dropout(p=0.3, inplace=False), nn.Linear(1536, 7))
return model
def efficientnet_b2b(in_channels, num_classes, pretrained=True):
model = ptcv_get_model("efficientnet_b2b", pretrained=True)
model.output = nn.Sequential(
nn.Dropout(p=0.3, inplace=False), nn.Linear(1408, 7, bias=True)
)
return model
def efficientnet_b1b(in_channels, num_classes, pretrained=True):
model = ptcv_get_model("efficientnet_b1b", pretrained=True)
print(model)
model.output = nn.Sequential(
nn.Dropout(p=0.3, inplace=False), nn.Linear(1280, 7, bias=True)
)
return model | /rmn-3.1.1-py3-none-any.whl/models/__init__.py | 0.874507 | 0.187021 | __init__.py | pypi |
import torch
import torch.nn as nn
from .utils import load_state_dict_from_url
__all__ = [
"VGG",
"vgg11",
"vgg11_bn",
"vgg13",
"vgg13_bn",
"vgg16",
"vgg16_bn",
"vgg19_bn",
"vgg19",
]
model_urls = {
"vgg11": "https://download.pytorch.org/models/vgg11-bbd30ac9.pth",
"vgg13": "https://download.pytorch.org/models/vgg13-c768596a.pth",
"vgg16": "https://download.pytorch.org/models/vgg16-397923af.pth",
"vgg19": "https://download.pytorch.org/models/vgg19-dcbb9e9d.pth",
"vgg11_bn": "https://download.pytorch.org/models/vgg11_bn-6002323d.pth",
"vgg13_bn": "https://download.pytorch.org/models/vgg13_bn-abd245e5.pth",
"vgg16_bn": "https://download.pytorch.org/models/vgg16_bn-6c64b313.pth",
"vgg19_bn": "https://download.pytorch.org/models/vgg19_bn-c79401a0.pth",
}
class VGG(nn.Module):
def __init__(self, features, in_channels=3, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 1000),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False, **kwargs):
layers = []
# in_channels = 3
in_channels = kwargs["in_channels"]
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
"A": [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"],
"B": [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"],
"D": [
64,
64,
"M",
128,
128,
"M",
256,
256,
256,
"M",
512,
512,
512,
"M",
512,
512,
512,
"M",
],
"E": [
64,
64,
"M",
128,
128,
"M",
256,
256,
256,
256,
"M",
512,
512,
512,
512,
"M",
512,
512,
512,
512,
"M",
],
}
def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
if pretrained:
kwargs["init_weights"] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm, **kwargs), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def vgg11(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg11", "A", False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg11_bn", "A", True, pretrained, progress, **kwargs)
def vgg13(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg13", "B", False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg13_bn", "B", True, pretrained, progress, **kwargs)
def vgg16(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg16", "D", False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg16_bn", "D", True, pretrained, progress, **kwargs)
def vgg19(pretrained=True, progress=True, **kwargs):
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = _vgg("vgg19", "E", False, pretrained, progress, **kwargs)
model.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 7),
)
return model
def vgg19_bn(pretrained=True, progress=True, **kwargs):
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = _vgg("vgg19_bn", "E", True, pretrained, progress, **kwargs)
model.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 7),
)
return model | /rmn-3.1.1-py3-none-any.whl/models/vgg.py | 0.928862 | 0.535463 | vgg.py | pypi |
from collections import OrderedDict
from torch import nn
class IntermediateLayerGetter(nn.ModuleDict):
"""
Module wrapper that returns intermediate layers from a model
It has a strong assumption that the modules have been registered
into the model in the same order as they are used.
This means that one should **not** reuse the same nn.Module
twice in the forward if you want this to work.
Additionally, it is only able to query submodules that are directly
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Arguments:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
Examples::
>>> m = torchvision.models.resnet18(pretrained=True)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
>>> {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
"""
def __init__(self, model, return_layers):
if not set(return_layers).issubset(
[name for name, _ in model.named_children()]
):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {k: v for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super(IntermediateLayerGetter, self).__init__(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.named_children():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out | /rmn-3.1.1-py3-none-any.whl/models/_utils.py | 0.952673 | 0.761561 | _utils.py | pypi |
import traceback
import torch
import torch.nn as nn
from .resnet import BasicBlock, Bottleneck, conv1x1
def up_pooling(in_channels, out_channels, kernel_size=2, stride=2):
return nn.Sequential(
nn.ConvTranspose2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride
),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
class Masking4(nn.Module):
def __init__(self, in_channels, out_channels, block=BasicBlock):
assert in_channels == out_channels
super(Masking4, self).__init__()
filters = [
in_channels,
in_channels * 2,
in_channels * 4,
in_channels * 8,
in_channels * 16,
]
self.downsample1 = nn.Sequential(
conv1x1(filters[0], filters[1], 1),
nn.BatchNorm2d(filters[1]),
)
self.downsample2 = nn.Sequential(
conv1x1(filters[1], filters[2], 1),
nn.BatchNorm2d(filters[2]),
)
self.downsample3 = nn.Sequential(
conv1x1(filters[2], filters[3], 1),
nn.BatchNorm2d(filters[3]),
)
self.downsample4 = nn.Sequential(
conv1x1(filters[3], filters[4], 1),
nn.BatchNorm2d(filters[4]),
)
"""
self.conv1 = block(filters[0], filters[1], downsample=conv1x1(filters[0], filters[1], 1))
self.conv2 = block(filters[1], filters[2], downsample=conv1x1(filters[1], filters[2], 1))
self.conv3 = block(filters[2], filters[3], downsample=conv1x1(filters[2], filters[3], 1))
"""
self.conv1 = block(filters[0], filters[1], downsample=self.downsample1)
self.conv2 = block(filters[1], filters[2], downsample=self.downsample2)
self.conv3 = block(filters[2], filters[3], downsample=self.downsample3)
self.conv4 = block(filters[3], filters[4], downsample=self.downsample4)
self.down_pooling = nn.MaxPool2d(kernel_size=2)
self.downsample5 = nn.Sequential(
conv1x1(filters[4], filters[3], 1),
nn.BatchNorm2d(filters[3]),
)
self.downsample6 = nn.Sequential(
conv1x1(filters[3], filters[2], 1),
nn.BatchNorm2d(filters[2]),
)
self.downsample7 = nn.Sequential(
conv1x1(filters[2], filters[1], 1),
nn.BatchNorm2d(filters[1]),
)
self.downsample8 = nn.Sequential(
conv1x1(filters[1], filters[0], 1),
nn.BatchNorm2d(filters[0]),
)
"""
self.up_pool4 = up_pooling(filters[3], filters[2])
self.conv4 = block(filters[3], filters[2], downsample=conv1x1(filters[3], filters[2], 1))
self.up_pool5 = up_pooling(filters[2], filters[1])
self.conv5 = block(filters[2], filters[1], downsample=conv1x1(filters[2], filters[1], 1))
self.conv6 = block(filters[1], filters[0], downsample=conv1x1(filters[1], filters[0], 1))
"""
self.up_pool5 = up_pooling(filters[4], filters[3])
self.conv5 = block(filters[4], filters[3], downsample=self.downsample5)
self.up_pool6 = up_pooling(filters[3], filters[2])
self.conv6 = block(filters[3], filters[2], downsample=self.downsample6)
self.up_pool7 = up_pooling(filters[2], filters[1])
self.conv7 = block(filters[2], filters[1], downsample=self.downsample7)
self.conv8 = block(filters[1], filters[0], downsample=self.downsample8)
# init weight
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def forward(self, x):
x1 = self.conv1(x)
p1 = self.down_pooling(x1)
x2 = self.conv2(p1)
p2 = self.down_pooling(x2)
x3 = self.conv3(p2)
p3 = self.down_pooling(x3)
x4 = self.conv4(p3)
x5 = self.up_pool5(x4)
x5 = torch.cat([x5, x3], dim=1)
x5 = self.conv5(x5)
x6 = self.up_pool6(x5)
x6 = torch.cat([x6, x2], dim=1)
x6 = self.conv6(x6)
x7 = self.up_pool7(x6)
x7 = torch.cat([x7, x1], dim=1)
x7 = self.conv7(x7)
x8 = self.conv8(x7)
output = torch.softmax(x8, dim=1)
# output = torch.sigmoid(x8)
return output
class Masking3(nn.Module):
def __init__(self, in_channels, out_channels, block=BasicBlock):
assert in_channels == out_channels
super(Masking3, self).__init__()
filters = [in_channels, in_channels * 2, in_channels * 4, in_channels * 8]
self.downsample1 = nn.Sequential(
conv1x1(filters[0], filters[1], 1),
nn.BatchNorm2d(filters[1]),
)
self.downsample2 = nn.Sequential(
conv1x1(filters[1], filters[2], 1),
nn.BatchNorm2d(filters[2]),
)
self.downsample3 = nn.Sequential(
conv1x1(filters[2], filters[3], 1),
nn.BatchNorm2d(filters[3]),
)
"""
self.conv1 = block(filters[0], filters[1], downsample=conv1x1(filters[0], filters[1], 1))
self.conv2 = block(filters[1], filters[2], downsample=conv1x1(filters[1], filters[2], 1))
self.conv3 = block(filters[2], filters[3], downsample=conv1x1(filters[2], filters[3], 1))
"""
self.conv1 = block(filters[0], filters[1], downsample=self.downsample1)
self.conv2 = block(filters[1], filters[2], downsample=self.downsample2)
self.conv3 = block(filters[2], filters[3], downsample=self.downsample3)
self.down_pooling = nn.MaxPool2d(kernel_size=2)
self.downsample4 = nn.Sequential(
conv1x1(filters[3], filters[2], 1),
nn.BatchNorm2d(filters[2]),
)
self.downsample5 = nn.Sequential(
conv1x1(filters[2], filters[1], 1),
nn.BatchNorm2d(filters[1]),
)
self.downsample6 = nn.Sequential(
conv1x1(filters[1], filters[0], 1),
nn.BatchNorm2d(filters[0]),
)
"""
self.up_pool4 = up_pooling(filters[3], filters[2])
self.conv4 = block(filters[3], filters[2], downsample=conv1x1(filters[3], filters[2], 1))
self.up_pool5 = up_pooling(filters[2], filters[1])
self.conv5 = block(filters[2], filters[1], downsample=conv1x1(filters[2], filters[1], 1))
self.conv6 = block(filters[1], filters[0], downsample=conv1x1(filters[1], filters[0], 1))
"""
self.up_pool4 = up_pooling(filters[3], filters[2])
self.conv4 = block(filters[3], filters[2], downsample=self.downsample4)
self.up_pool5 = up_pooling(filters[2], filters[1])
self.conv5 = block(filters[2], filters[1], downsample=self.downsample5)
self.conv6 = block(filters[1], filters[0], downsample=self.downsample6)
# init weight
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def forward(self, x):
x1 = self.conv1(x)
p1 = self.down_pooling(x1)
x2 = self.conv2(p1)
p2 = self.down_pooling(x2)
x3 = self.conv3(p2)
x4 = self.up_pool4(x3)
x4 = torch.cat([x4, x2], dim=1)
x4 = self.conv4(x4)
x5 = self.up_pool5(x4)
x5 = torch.cat([x5, x1], dim=1)
x5 = self.conv5(x5)
x6 = self.conv6(x5)
output = torch.softmax(x6, dim=1)
# output = torch.sigmoid(x6)
return output
class Masking2(nn.Module):
def __init__(self, in_channels, out_channels, block=BasicBlock):
assert in_channels == out_channels
super(Masking2, self).__init__()
filters = [in_channels, in_channels * 2, in_channels * 4, in_channels * 8]
self.downsample1 = nn.Sequential(
conv1x1(filters[0], filters[1], 1),
nn.BatchNorm2d(filters[1]),
)
self.downsample2 = nn.Sequential(
conv1x1(filters[1], filters[2], 1),
nn.BatchNorm2d(filters[2]),
)
"""
self.conv1 = block(filters[0], filters[1], downsample=conv1x1(filters[0], filters[1], 1))
self.conv2 = block(filters[1], filters[2], downsample=conv1x1(filters[1], filters[2], 1))
"""
self.conv1 = block(filters[0], filters[1], downsample=self.downsample1)
self.conv2 = block(filters[1], filters[2], downsample=self.downsample2)
self.down_pooling = nn.MaxPool2d(kernel_size=2)
self.downsample3 = nn.Sequential(
conv1x1(filters[2], filters[1], 1),
nn.BatchNorm2d(filters[1]),
)
self.downsample4 = nn.Sequential(
conv1x1(filters[1], filters[0], 1),
nn.BatchNorm2d(filters[0]),
)
"""
self.up_pool3 = up_pooling(filters[2], filters[1])
self.conv3 = block(filters[2], filters[1], downsample=conv1x1(filters[2], filters[1], 1))
self.conv4 = block(filters[1], filters[0], downsample=conv1x1(filters[1], filters[0], 1))
"""
self.up_pool3 = up_pooling(filters[2], filters[1])
self.conv3 = block(filters[2], filters[1], downsample=self.downsample3)
self.conv4 = block(filters[1], filters[0], downsample=self.downsample4)
# init weight
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def forward(self, x):
x1 = self.conv1(x)
p1 = self.down_pooling(x1)
x2 = self.conv2(p1)
x3 = self.up_pool3(x2)
x3 = torch.cat([x3, x1], dim=1)
x3 = self.conv3(x3)
x4 = self.conv4(x3)
output = torch.softmax(x4, dim=1)
# output = torch.sigmoid(x4)
return output
class Masking1(nn.Module):
def __init__(self, in_channels, out_channels, block=BasicBlock):
assert in_channels == out_channels
super(Masking1, self).__init__()
filters = [in_channels, in_channels * 2, in_channels * 4, in_channels * 8]
self.downsample1 = nn.Sequential(
conv1x1(filters[0], filters[1], 1),
nn.BatchNorm2d(filters[1]),
)
self.conv1 = block(filters[0], filters[1], downsample=self.downsample1)
self.downsample2 = nn.Sequential(
conv1x1(filters[1], filters[0], 1),
nn.BatchNorm2d(filters[0]),
)
self.conv2 = block(filters[1], filters[0], downsample=self.downsample2)
# init weight
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
output = torch.softmax(x2, dim=1)
# output = torch.sigmoid(x2)
return output
def masking(in_channels, out_channels, depth, block=BasicBlock):
if depth == 1:
return Masking1(in_channels, out_channels, block)
elif depth == 2:
return Masking2(in_channels, out_channels, block)
elif depth == 3:
return Masking3(in_channels, out_channels, block)
elif depth == 4:
return Masking4(in_channels, out_channels, block)
else:
traceback.print_exc()
raise Exception("depth need to be from 0-3") | /rmn-3.1.1-py3-none-any.whl/models/masking.py | 0.926003 | 0.630486 | masking.py | pypi |
import torch
import torch.nn as nn
def block(in_channels, out_channels, kernel_size=3, stride=1, padding=1):
return nn.Sequential(
nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.BatchNorm2d(num_features=out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(
out_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.BatchNorm2d(num_features=out_channels),
nn.ReLU(inplace=True),
)
def up_pooling(in_channels, out_channels, kernel_size=2, stride=2):
return nn.Sequential(
nn.ConvTranspose2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride
),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
class Unet(nn.Module):
def __init__(self, in_channels, num_classes):
super().__init__()
filters = [32, 64, 128, 256, 512]
self.conv1 = block(in_channels, filters[0])
self.conv2 = block(filters[0], filters[1])
self.conv3 = block(filters[1], filters[2])
self.conv4 = block(filters[2], filters[3])
self.conv5 = block(filters[3], filters[4])
self.down_pooling = nn.MaxPool2d(2)
self.up_pool6 = up_pooling(filters[4], filters[3])
self.conv6 = block(filters[4], filters[3])
self.up_pool7 = up_pooling(filters[3], filters[2])
self.conv7 = block(filters[3], filters[2])
self.up_pool8 = up_pooling(filters[2], filters[1])
self.conv8 = block(filters[2], filters[1])
self.up_pool9 = up_pooling(filters[1], filters[0])
self.conv9 = block(filters[1], filters[0])
self.conv10 = nn.Conv2d(filters[0], num_classes, 1)
# default xavier init
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.xavier_uniform(m.weight)
def forward(self, x):
x1 = self.conv1(x)
p1 = self.down_pooling(x1)
x2 = self.conv2(p1)
p2 = self.down_pooling(x2)
x3 = self.conv3(p2)
p3 = self.down_pooling(x3)
x4 = self.conv4(p3)
p4 = self.down_pooling(x4)
x5 = self.conv5(p4)
# go up
p6 = self.up_pool6(x5)
x6 = torch.cat([p6, x4], dim=1)
x6 = self.conv6(x6)
p7 = self.up_pool7(x6)
x7 = torch.cat([p7, x3], dim=1)
x7 = self.conv7(x7)
p8 = self.up_pool8(x7)
x8 = torch.cat([p8, x2], dim=1)
x8 = self.conv8(x8)
p9 = self.up_pool9(x8)
x9 = torch.cat([p9, x1], dim=1)
x9 = self.conv9(x9)
output = self.conv10(x9)
output = torch.softmax(output, dim=1)
return output
def basic_unet(in_channels, num_classes):
return Unet(in_channels, num_classes) | /rmn-3.1.1-py3-none-any.whl/models/segmentation/unet_basic.py | 0.940871 | 0.453322 | unet_basic.py | pypi |
import torch
from torch import nn
from torch.nn import functional as F
from ._utils import _SimpleSegmentationModel
__all__ = ["DeepLabV3"]
class DeepLabV3(_SimpleSegmentationModel):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
class DeepLabHead(nn.Sequential):
def __init__(self, in_channels, num_classes):
super(DeepLabHead, self).__init__(
ASPP(in_channels, [12, 24, 36]),
nn.Conv2d(256, 256, 3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, num_classes, 1),
)
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(
in_channels,
out_channels,
3,
padding=dilation,
dilation=dilation,
bias=False,
),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
def forward(self, x):
size = x.shape[-2:]
x = super(ASPPPooling, self).forward(x)
return F.interpolate(x, size=size, mode="bilinear", align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates):
super(ASPP, self).__init__()
out_channels = 256
modules = []
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
)
rate1, rate2, rate3 = tuple(atrous_rates)
modules.append(ASPPConv(in_channels, out_channels, rate1))
modules.append(ASPPConv(in_channels, out_channels, rate2))
modules.append(ASPPConv(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Dropout(0.5),
)
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res) | /rmn-3.1.1-py3-none-any.whl/models/segmentation/deeplabv3.py | 0.953134 | 0.545346 | deeplabv3.py | pypi |
from .. import resnet
from .._utils import IntermediateLayerGetter
from ..utils import load_state_dict_from_url
from .deeplabv3 import DeepLabHead, DeepLabV3
from .fcn import FCN, FCNHead
__all__ = ["fcn_resnet50", "fcn_resnet101", "deeplabv3_resnet50", "deeplabv3_resnet101"]
model_urls = {
"fcn_resnet50_coco": None,
"fcn_resnet101_coco": "https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth",
"deeplabv3_resnet50_coco": None,
"deeplabv3_resnet101_coco": "https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth",
}
def _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True):
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained_backbone, replace_stride_with_dilation=[False, True, True]
)
return_layers = {"layer4": "out"}
if aux:
return_layers["layer3"] = "aux"
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
aux_classifier = None
if aux:
inplanes = 1024
aux_classifier = FCNHead(inplanes, num_classes)
model_map = {
"deeplabv3": (DeepLabHead, DeepLabV3),
"fcn": (FCNHead, FCN),
}
inplanes = 2048
classifier = model_map[name][0](inplanes, num_classes)
base_model = model_map[name][1]
model = base_model(backbone, classifier, aux_classifier)
return model
def _load_model(
arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs
):
if pretrained:
aux_loss = True
model = _segm_resnet(arch_type, backbone, num_classes, aux_loss, **kwargs)
if pretrained:
arch = arch_type + "_" + backbone + "_coco"
model_url = model_urls[arch]
if model_url is None:
raise NotImplementedError(
"pretrained {} is not supported as of now".format(arch)
)
else:
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
return model
def fcn_resnet50(
pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs
):
"""Constructs a Fully-Convolutional Network model with a ResNet-50 backbone.
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
contains the same classes as Pascal VOC
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _load_model(
"fcn", "resnet50", pretrained, progress, num_classes, aux_loss, **kwargs
)
def fcn_resnet101(
pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs
):
"""Constructs a Fully-Convolutional Network model with a ResNet-101 backbone.
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
contains the same classes as Pascal VOC
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _load_model(
"fcn", "resnet101", pretrained, progress, num_classes, aux_loss, **kwargs
)
def deeplabv3_resnet50(
in_channels=3,
pretrained=False,
progress=True,
num_classes=21,
aux_loss=None,
**kwargs
):
"""Constructs a DeepLabV3 model with a ResNet-50 backbone.
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
contains the same classes as Pascal VOC
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _load_model(
"deeplabv3", "resnet50", pretrained, progress, num_classes, aux_loss, **kwargs
)
def deeplabv3_resnet101(
pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs
):
"""Constructs a DeepLabV3 model with a ResNet-101 backbone.
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
contains the same classes as Pascal VOC
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _load_model(
"deeplabv3", "resnet101", pretrained, progress, num_classes, aux_loss, **kwargs
) | /rmn-3.1.1-py3-none-any.whl/models/segmentation/segmentation.py | 0.791257 | 0.33273 | segmentation.py | pypi |
import os
import cv2
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from torchvision.transforms import transforms
from utils.augmenters.augment import seg
EMOTION_DICT = {
0: "angry",
1: "disgust",
2: "fear",
3: "happy",
4: "sad",
5: "surprise",
6: "neutral",
}
class FER2013(Dataset):
def __init__(self, stage, configs, tta=False, tta_size=48):
self._stage = stage
self._configs = configs
self._tta = tta
self._tta_size = tta_size
self._image_size = (configs["image_size"], configs["image_size"])
self._data = pd.read_csv(
os.path.join(configs["data_path"], "{}.csv".format(stage))
)
self._pixels = self._data["pixels"].tolist()
self._emotions = pd.get_dummies(self._data["emotion"])
self._transform = transforms.Compose(
[
transforms.ToPILImage(),
transforms.ToTensor(),
]
)
def is_tta(self):
return self._tta == True
def __len__(self):
return len(self._pixels)
def __getitem__(self, idx):
pixels = self._pixels[idx]
pixels = list(map(int, pixels.split(" ")))
image = np.asarray(pixels).reshape(48, 48)
image = image.astype(np.uint8)
image = cv2.resize(image, self._image_size)
image = np.dstack([image] * 3)
if self._stage == "train":
image = seg(image=image)
if self._stage == "test" and self._tta == True:
images = [seg(image=image) for i in range(self._tta_size)]
# images = [image for i in range(self._tta_size)]
images = list(map(self._transform, images))
target = self._emotions.iloc[idx].idxmax()
return images, target
image = self._transform(image)
target = self._emotions.iloc[idx].idxmax()
return image, target
def fer2013(stage, configs=None, tta=False, tta_size=48):
return FER2013(stage, configs, tta, tta_size)
if __name__ == "__main__":
data = FER2013(
"train",
{
"data_path": "/home/z/research/tee/saved/data/fer2013/",
"image_size": 224,
"in_channels": 3,
},
)
import cv2
targets = []
for i in range(len(data)):
image, target = data[i]
cv2.imwrite("debug/{}.png".format(i), image)
if i == 200:
break | /rmn-3.1.1-py3-none-any.whl/utils/datasets/fer2013dataset.py | 0.581897 | 0.325574 | fer2013dataset.py | pypi |
import torch
EPS = 1e-10
def nanmean(x):
return torch.mean(x[x == x])
def _fast_hist(true, pred, num_classes):
mask = (true >= 0) & (true < num_classes)
hist = (
torch.bincount(
num_classes * true[mask] + pred[mask],
minlength=num_classes**2,
)
.reshape(num_classes, num_classes)
.float()
)
return hist
def overall_pixel_accuracy(hist):
"""Computes the total pixel accuracy.
The overall pixel accuracy provides an intuitive
approximation for the qualitative perception of the
label when it is viewed in its overall shape but not
its details.
Args:
hist: confusion matrix.
Returns:
overall_acc: the overall pixel accuracy.
"""
correct = torch.diag(hist).sum()
total = hist.sum()
overall_acc = correct / (total + EPS)
return overall_acc
def per_class_pixel_accuracy(hist):
"""Computes the average per-class pixel accuracy.
The per-class pixel accuracy is a more fine-grained
version of the overall pixel accuracy. A model could
score a relatively high overall pixel accuracy by
correctly predicting the dominant labels or areas
in the image whilst incorrectly predicting the
possibly more important/rare labels. Such a model
will score a low per-class pixel accuracy.
Args:
hist: confusion matrix.
Returns:
avg_per_class_acc: the average per-class pixel accuracy.
"""
correct_per_class = torch.diag(hist)
total_per_class = hist.sum(dim=1)
per_class_acc = correct_per_class / (total_per_class + EPS)
avg_per_class_acc = nanmean(per_class_acc)
return avg_per_class_acc
def jaccard_index(hist):
"""Computes the Jaccard index, a.k.a the Intersection over Union (IoU).
Args:
hist: confusion matrix.
Returns:
avg_jacc: the average per-class jaccard index.
"""
A_inter_B = torch.diag(hist)
A = hist.sum(dim=1)
B = hist.sum(dim=0)
jaccard = A_inter_B / (A + B - A_inter_B + EPS)
avg_jacc = nanmean(jaccard)
return avg_jacc
def dice_coefficient(hist):
"""Computes the Sørensen–Dice coefficient, a.k.a the F1 score.
Args:
hist: confusion matrix.
Returns:
avg_dice: the average per-class dice coefficient.
"""
A_inter_B = torch.diag(hist)
A = hist.sum(dim=1)
B = hist.sum(dim=0)
dice = (2 * A_inter_B) / (A + B + EPS)
avg_dice = nanmean(dice)
return avg_dice
def eval_metrics(true, pred, num_classes):
"""Computes various segmentation metrics on 2D feature maps.
Args:
true: a tensor of shape [B, H, W] or [B, 1, H, W].
pred: a tensor of shape [B, H, W] or [B, 1, H, W].
num_classes: the number of classes to segment. This number
should be less than the ID of the ignored class.
Returns:
overall_acc: the overall pixel accuracy.
avg_per_class_acc: the average per-class pixel accuracy.
avg_jacc: the jaccard index.
avg_dice: the dice coefficient.
"""
pred = torch.argmax(pred, dim=1)
hist = torch.zeros((num_classes, num_classes)).cuda()
for t, p in zip(true, pred):
hist += _fast_hist(t.flatten(), p.flatten(), num_classes)
overall_acc = overall_pixel_accuracy(hist)
avg_per_class_acc = per_class_pixel_accuracy(hist)
avg_jacc = jaccard_index(hist)
avg_dice = dice_coefficient(hist)
return overall_acc, avg_per_class_acc, avg_jacc, avg_dice | /rmn-3.1.1-py3-none-any.whl/utils/metrics/segment_metrics.py | 0.929216 | 0.811452 | segment_metrics.py | pypi |
from . import DATA_DIR
import csv
REMIND_TO_ECOINVENT_EMISSION_FILEPATH = (DATA_DIR / "ecoinvent_to_gains_emission_mappping.csv")
class InventorySet:
"""
Hosts different filter sets to for ecoinvent activities and exchanges.
It stores:
* material_filters: filters for activities related to materials.
* powerplant_filters: filters for activities related to power generation technologies.
* emissions_map: REMIND emission labels as keys, ecoinvent emission labels as values
The functions :func:`generate_material_map` and :func:`generate_powerplant_map` can
be used to extract the actual activity objects as dictionaries.
These functions return the result of applying :func:`act_fltr` to the filter dictionaries.
"""
material_filters = {
"steel, primary": {"fltr": "steel production, converter", "mask": "hot rolled"},
"steel, secondary": {"fltr": "steel production, electric", "mask": "hot rolled"},
"concrete": {"fltr": "market for concrete,"},
"copper": {"fltr": "market for copper", "filter_exact": True},
"aluminium": {
"fltr": ["market for aluminium, primary", "market for aluminium alloy,"]
},
"electricity": {"fltr": "market for electricity"},
"gas": {"fltr": "market for natural gas,", "mask": ["network", "burned"]},
"diesel": {"fltr": "market for diesel", "mask": ["burned", "electric"]},
"petrol": {"fltr": "market for petrol,", "mask": "burned"},
"freight": {"fltr": "market for transport, freight"},
"cement": {"fltr": "market for cement,"},
"heat": {"fltr": "market for heat,"},
}
fuel_filters = {
"gas": {"fltr": "market for natural gas,", "mask": ["network", "burned"]},
"diesel": {"fltr": "market for diesel", "mask": ["burned", "electric"]},
"petrol": {"fltr": "market for petrol,", "mask": "burned"},
"hard coal": {"fltr": 'market for hard coal', 'mask': ['factory', 'plant', 'briquettes', 'ash']},
"lignite": {"fltr": 'market for lignite', 'mask': ['factory', 'plant', 'briquettes', 'ash']},
"petroleum coke": {"fltr": 'market for petroleum coke'},
"wood pellet": {"fltr": 'market for wood pellet', 'mask': ['factory']},
"natural gas, high pressure": {"fltr": 'market for natural gas, high pressure'},
"natural gas, low pressure": {"fltr": 'market for natural gas, low pressure'},
"heavy fuel oil": {"fltr": 'market for heavy fuel oil', 'mask': ['burned']},
"light fuel oil": {"fltr": 'market for light fuel oil'},
"biogas": {"fltr": 'biogas', 'mask': ['burned']},
"waste": {"fltr": {'reference product': ['waste plastic, mixture']},
'mask': ['market for', 'treatment', 'market group']},
"syngas": {"fltr": 'methane, from electrochemical methanation'},
"synfuel": {"fltr": 'Diesel production, Fischer Tropsch process'},
"hydrogen": {"fltr": 'Hydrogen, gaseous'},
"bioethanol": {"fltr": 'Ethanol from'},
"liquified petroleum gas": {"fltr": 'Liquefied petroleum gas production, from methanol-to-gas process'}
}
powerplant_filters = {
"Biomass IGCC CCS": {
"fltr": [
"electricity production, from CC plant, 100% SNG, truck 25km, post, pipeline 200km, storage 1000m",
"electricity production, at wood burning power plant 20 MW, truck 25km, post, pipeline 200km, storage 1000m",
"electricity production, at BIGCC power plant 450MW, pre, pipeline 200km, storage 1000m",
]
},
"Biomass IGCC": {
"fltr": "electricity production, at BIGCC power plant 450MW, no CCS"
},
"Coal IGCC": {
"fltr": [
"electricity production, at power plant/hard coal, IGCC, no CCS",
"electricity production, at power plant/lignite, IGCC, no CCS",
]
},
"Coal IGCC CCS": {
"fltr": [
"electricity production, at power plant/hard coal, pre, pipeline 200km, storage 1000m",
"electricity production, at power plant/lignite, pre, pipeline 200km, storage 1000m",
]
},
"Coal PC CCS": {
"fltr": [
"electricity production, at power plant/hard coal, post, pipeline 200km, storage 1000m",
"electricity production, at power plant/lignite, post, pipeline 200km, storage 1000m",
]
},
"Gas CCS": {
"fltr": [
"electricity production, at power plant/natural gas, pre, pipeline 200km, storage 1000m",
"electricity production, at power plant/natural gas, post, pipeline 200km, storage 1000m",
]
},
"Biomass CHP": {
"fltr": [
"heat and power co-generation, wood chips",
"heat and power co-generation, biogas",
],
"mask":{"reference product": "heat"}
},
"Coal PC": {
"fltr": [
"electricity production, hard coal",
"electricity production, lignite",
],
"mask": "mine",
},
"Coal CHP": {
"fltr": [
"heat and power co-generation, hard coal",
"heat and power co-generation, lignite",
],
"mask":{"reference product":"heat"}
},
"Gas OC": {
"fltr": "electricity production, natural gas, conventional power plant"
},
"Gas CC": {
"fltr": "electricity production, natural gas, combined cycle power plant"
},
"Gas CHP": {
"fltr": [
"heat and power co-generation, natural gas, combined cycle power plant, 400MW electrical",
"heat and power co-generation, natural gas, conventional power plant, 100MW electrical",
],
"mask":{"reference product":"heat"}
},
"Geothermal": {"fltr": "electricity production, deep geothermal"},
"Hydro": {
"fltr": [
"electricity production, hydro, reservoir",
"electricity production, hydro, run-of-river",
]
},
"Nuclear": {"fltr": "electricity production, nuclear", "mask": "aluminium"},
"Oil": {
"fltr": [
"electricity production, oil",
"heat and power co-generation, oil",
],
"mask": {"name":"aluminium", "reference product":"heat"}
},
"Solar CSP": {
"fltr": [
"electricity production, solar thermal parabolic trough, 50 MW",
"electricity production, solar tower power plant, 20 MW",
]
},
"Solar PV": {"fltr": "electricity production, photovoltaic"},
"Wind": {"fltr": "electricity production, wind"},
}
def __init__(self, db):
self.db = db
def generate_material_map(self):
"""
Filter ecoinvent processes related to different material demands.
:return: dictionary with materials as keys (see below) and
sets of related ecoinvent activities as values.
:rtype: dict
"""
return self.generate_sets_from_filters(self.material_filters)
def generate_powerplant_map(self):
"""
Filter ecoinvent processes related to electricity production.
:return: dictionary with el. prod. techs as keys (see below) and
sets of related ecoinvent activities as values.
:rtype: dict
"""
return self.generate_sets_from_filters(self.powerplant_filters)
def generate_fuel_map(self):
"""
Filter ecoinvent processes related to fuel supply.
:return: dictionary with fuel names as keys (see below) and
sets of related ecoinvent activities as values.
:rtype: dict
"""
return self.generate_sets_from_filters(self.fuel_filters)
@staticmethod
def get_remind_to_ecoinvent_emissions():
"""
Retrieve the correspondence between REMIND and ecoinvent emission labels.
:return: REMIND emission labels as keys and ecoinvent emission labels as values
:rtype: dict
"""
if not REMIND_TO_ECOINVENT_EMISSION_FILEPATH.is_file():
raise FileNotFoundError(
"The dictionary of emission labels correspondences could not be found."
)
csv_dict = {}
with open(REMIND_TO_ECOINVENT_EMISSION_FILEPATH) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
csv_dict[row[0]] = row[1]
return csv_dict
@staticmethod
def act_fltr(db, fltr=None, mask=None, filter_exact=False, mask_exact=False):
"""Filter `db` for activities matching field contents given by `fltr` excluding strings in `mask`.
`fltr`: string, list of strings or dictionary.
If a string is provided, it is used to match the name field from the start (*startswith*).
If a list is provided, all strings in the lists are used and results are joined (*or*).
A dict can be given in the form <fieldname>: <str> to filter for <str> in <fieldname>.
`mask`: used in the same way as `fltr`, but filters add up with each other (*and*).
`filter_exact` and `mask_exact`: boolean, set `True` to only allow for exact matches.
:param db: A lice cycle inventory database
:type db: brightway2 database object
:param fltr: value(s) to filter with.
:type fltr: Union[str, lst, dict]
:param mask: value(s) to filter with.
:type mask: Union[str, lst, dict]
:param filter_exact: requires exact match when true.
:type filter_exact: bool
:param mask_exact: requires exact match when true.
:type mask_exact: bool
:return: list of activity data set names
:rtype: list
"""
if fltr is None:
fltr = {}
if mask is None:
mask = {}
result = []
# default field is name
if type(fltr) == list or type(fltr) == str:
fltr = {"name": fltr}
if type(mask) == list or type(mask) == str:
mask = {"name": mask}
def like(a, b):
if filter_exact:
return a == b
else:
return a.startswith(b)
def notlike(a, b):
if mask_exact:
return a != b
else:
return b not in a
assert len(fltr) > 0, "Filter dict must not be empty."
for field in fltr:
condition = fltr[field]
if type(condition) == list:
for el in condition:
# this is effectively connecting the statements by *or*
result.extend([act for act in db if like(act[field], el)])
else:
result.extend([act for act in db if like(act[field], condition)])
for field in mask:
condition = mask[field]
if type(condition) == list:
for el in condition:
# this is effectively connecting the statements by *and*
result = [act for act in result if notlike(act[field], el)]
else:
result = [act for act in result if notlike(act[field], condition)]
return result
def generate_sets_from_filters(self, filtr):
"""
Generate a dictionary with sets of activity names for
technologies from the filter specifications.
:param filtr:
:func:`activity_maps.InventorySet.act_fltr`.
:return: dictionary with the same keys as provided in filter
and a set of activity data set names as values.
:rtype: dict
"""
techs = {tech: self.act_fltr(self.db, **fltr) for tech, fltr in filtr.items()}
return {
tech: set([act["name"] for act in actlst]) for tech, actlst in techs.items()
} | /rmnd_lca-0.1.6-py3-none-any.whl/rmnd_lca/activity_maps.py | 0.69987 | 0.569015 | activity_maps.py | pypi |
from . import DATA_DIR
from wurst import searching as ws
import csv
import pprint
import wurst
import bw2io
from bw2data.database import DatabaseChooser
FILEPATH_FIX_NAMES = (DATA_DIR / "fix_names.csv")
FILEPATH_BIOSPHERE_FLOWS = (DATA_DIR / "dict_biosphere.txt")
class DatabaseCleaner:
"""
Class that cleans the datasets contained in the inventory database for further processing.
:ivar destination_db: name of the source database
:vartype destination_db: str
"""
def __init__(self, source_db, source_type, source_file_path):
if source_type == 'brightway':
# Check that database exists
if len(DatabaseChooser(source_db)) == 0:
raise NameError('The database selected is empty. Make sure the name is correct')
self.db = wurst.extract_brightway2_databases(source_db)
if source_type == 'ecospold':
# The ecospold data needs to be formatted
ei = bw2io.SingleOutputEcospold2Importer(source_file_path, source_db)
ei.apply_strategies()
self.db = ei.data
# Location field is added to exchanges
self.add_location_field_to_exchanges()
# Product field is added to exchanges
self.add_product_field_to_exchanges()
# Parameter field is converted from a list to a dictionary
self.transform_parameter_field()
def add_negative_CO2_flows_for_biomass_ccs(self):
"""
Rescale the amount of all exchanges of carbon dioxide, non-fossil by a factor -9 (.9/-.1),
to account for sequestered CO2.
All CO2 capture and storage in the Carma datasets is assumed to be 90% efficient.
Thus, we can simply find out what the new CO2 emission is and then we know how much gets stored in the ground.
It's very important that we ONLY do this for biomass CCS plants, as only they will have negative emissions!
Modifies in place (does not return anything).
"""
for ds in ws.get_many(self.db, ws.contains('name', 'storage'), ws.equals('database', 'Carma CCS')):
for exc in ws.biosphere(ds, ws.equals('name', 'Carbon dioxide, non-fossil')):
wurst.rescale_exchange(exc, (0.9 / -0.1), remove_uncertainty=True)
@staticmethod
def get_fix_names_dict():
"""
Loads a csv file into a dictionary. This dictionary contains a few location names
that need correction in the wurst inventory database.
:return: dictionary that contains names equivalence
:rtype: dict
"""
with open(FILEPATH_FIX_NAMES) as f:
return dict(filter(None, csv.reader(f, delimiter=";")))
def get_rev_fix_names_dict(self):
"""
Reverse the fix_names dicitonary.
:return: dictionary that contains names equivalence
:rtype: dict
"""
return {v: k for k, v in self.get_fix_names_dict().items()}
@staticmethod
def remove_nones(db):
"""
Remove empty exchanges in the datasets of the wurst inventory database.
Modifies in place (does not return anything).
:param db: wurst inventory database
:type db: list
"""
exists = lambda x: {k: v for k, v in x.items() if v is not None}
for ds in db:
ds["exchanges"] = [exists(exc) for exc in ds["exchanges"]]
def find_product_given_lookup_dict(self, lookup_dict):
"""
Return a list of location names, given the filtering conditions given in `lookup_dict`.
It is, for example, used to return a list of location names based on the name and the unit of a dataset.
:param db: wurst inventory database
:type db: list
:param lookup_dict: a dictionary with filtering conditions
:return: a list of location names
:rtype: list
"""
return [
x["product"]
for x in wurst.searching.get_many(
self.db, *[ws.equals(k, v) for k, v in lookup_dict.items()]
)
]
def find_location_given_lookup_dict(self, lookup_dict):
"""
Return a list of location names, given the filtering conditions given in `lookup_dict`.
It is, for example, used to return a list of location names based on the name and the unit of a dataset.
:param db: wurst inventory database
:type db: list
:param lookup_dict: a dictionary with filtering conditions
:return: a list of location names
:rtype: list
"""
return [
x["location"]
for x in wurst.searching.get_many(
self.db, *[ws.equals(k, v) for k, v in lookup_dict.items()]
)
]
def add_location_field_to_exchanges(self):
"""Add the `location` key to the production and
technosphere exchanges in :attr:`db`.
:raises IndexError: if no corresponding activity (and reference product) can be found.
"""
d_location = {(a['database'], a['code']): a['location'] for a in self.db}
for a in self.db:
for e in a['exchanges']:
if e['type'] == 'technosphere':
input = e['input']
e['location'] = d_location[input]
def add_product_field_to_exchanges(self):
"""Add the `product` key to the production and
technosphere exchanges in :attr:`db`.
For production exchanges, use the value of the `reference_product` field.
For technosphere exchanges, search the activities in :attr:`db` and
use the reference product.
:raises IndexError: if no corresponding activity (and reference product) can be found.
"""
# Create a dictionary that contains the 'code' field as key and the 'product' field as value
d_product = {a['code']: (a['reference product'], a['name']) for a in self.db}
# Add a `product` field to the production exchange
for x in self.db:
for y in x["exchanges"]:
if y["type"] == "production":
if "product" not in y:
y["product"] = x["reference product"]
if y["name"] != x["name"]:
y["name"] = x["name"]
# Add a `product` field to technosphere exchanges
for x in self.db:
for y in x["exchanges"]:
if y["type"] == "technosphere":
# Check if the field 'product' is present
if not 'product' in y:
y['product'] = d_product[y['input'][1]][0]
# If a 'reference product' field is present, we make sure it matches with the new 'product' field
if 'reference product' in y:
try:
assert y['product'] == y['reference product']
except AssertionError:
y['product'] = d_product[y['input'][1]][0]
# Ensure the name is correct
y['name'] = d_product[y['input'][1]][1]
def transform_parameter_field(self):
# When handling ecospold files directly, the parameter field is a list.
# It is here transformed into a dictionary
for x in self.db:
x['parameters'] = {k['name']: k['amount'] for k in x['parameters']}
# Functions to clean up Wurst import and additional technologies
def fix_unset_technosphere_and_production_exchange_locations(
self, matching_fields=("name", "unit")
):
"""
Give all the production and technopshere exchanges with a missing location name the location of the dataset
they belong to.
Modifies in place (does not return anything).
:param matching_fields: filter conditions
:type matching_fields: tuple
"""
for ds in self.db:
# collect production exchanges that simply do not have a location key and set it to
# the location of the dataset
for exc in wurst.production(ds):
if "location" not in exc:
exc["location"] = ds["location"]
for exc in wurst.technosphere(ds):
if "location" not in exc:
locs = self.find_location_given_lookup_dict(
self.db, {k: exc.get(k) for k in matching_fields}
)
if len(locs) == 1:
exc["location"] = locs[0]
else:
print(
"No unique location found for exchange:\n{}\nFound: {}".format(
pprint.pformat(exc), locs
)
)
def prepare_datasets(self):
"""
Clean datasets for all databases listed in scenarios: fix location names, remove
empty exchanges, etc.
:param write_changeset: indicates if changes in datasets should be logged.
:type write_changeset: bool
"""
# Set missing locations to ```GLO``` for datasets in ``database``
print("Set missing location of datasets to global scope.")
wurst.default_global_location(self.db)
# Set missing locations to ```GLO``` for exchanges in ``datasets``
print("Set missing location of production exchanges to scope of dataset.")
print("Correct missing location of technosphere exchanges.")
self.fix_unset_technosphere_and_production_exchange_locations()
# Remove empty exchanges
print("Remove empty exchanges.")
self.remove_nones(self.db)
return self.db | /rmnd_lca-0.1.6-py3-none-any.whl/rmnd_lca/clean_datasets.py | 0.696887 | 0.345519 | clean_datasets.py | pypi |
from wurst.geo import geomatcher
from rmnd_lca import DATA_DIR
REGION_MAPPING_FILEPATH = (DATA_DIR / "regionmappingH12.csv")
class Geomap:
"""
Map ecoinvent locations to REMIND regions and vice-versa.
"""
def __init__(self):
self.geo = self.get_REMIND_geomatcher()
@staticmethod
def get_REMIND_geomatcher():
"""
Load a geomatcher object from the `constructive_geometries`library and add definitions.
It is used to find correspondences between REMIND and ecoinvent region names.
:return: geomatcher object
:rtype: wurst.geo.geomatcher
"""
with open(REGION_MAPPING_FILEPATH) as f:
f.readline()
csv_list = [[val.strip() for val in r.split(";")] for r in f.readlines()]
l = [(x[1], x[2]) for x in csv_list]
# List of countries not found
countries_not_found = ["CC", "CX", "GG", "JE", "BL"]
rmnd_to_iso = {}
iso_to_rmnd = {}
# Build a dictionary that maps region names (used by REMIND) to ISO country codes
# And a reverse dictionary that maps ISO country codes to region names
for ISO, region in l:
if ISO not in countries_not_found:
try:
rmnd_to_iso[region].append(ISO)
except KeyError:
rmnd_to_iso[region] = [ISO]
iso_to_rmnd[region] = ISO
geo = geomatcher
geo.add_definitions(rmnd_to_iso, "REMIND")
return geo
def remind_to_ecoinvent_location(self, location, contained=False):
"""
Find the corresponding ecoinvent region given a REMIND region.
:param location: name of a REMIND region
:type location: str
:return: name of an ecoinvent region
:rtype: str
"""
if location != "World":
location = ("REMIND", location)
ecoinvent_locations = []
try:
searchfunc = (self.geo.contained
if contained else self.geo.intersects)
for r in searchfunc(location):
if not isinstance(r, tuple):
ecoinvent_locations.append(r)
else:
if r[0] != "REMIND":
ecoinvent_locations.append(r[1])
# TODO: Dirty trick. In the future, "CA" should be removed from "RNA". Also, "GLO" should not appear.
if location == ("REMIND", "USA"):
ecoinvent_locations = [e for e in ecoinvent_locations if "CA" not in e]
# Current behaviour of `intersects` is to include "GLO" in all REMIND regions.
if location != ("REMIND", "World"):
ecoinvent_locations = [e for e in ecoinvent_locations if e != "GLO"]
return ecoinvent_locations
except KeyError:
print("Can't find location {} using the geomatcher.".format(location))
else:
return ["GLO"]
def ecoinvent_to_remind_location(self, location):
"""
Return a REMIND region name for a 2-digit ISO country code given.
Set rules in case two REMIND regions are within the ecoinvent region.
:param location: 2-digit ISO country code
:type location: str
:return: REMIND region name
:rtype: str
"""
mapping = {"GLO": "World", "RoW": "CAZ", "IAI Area, Russia & RER w/o EU27 & EFTA": "REF"}
if location in mapping:
return mapping[location]
remind_location = [
r[1]
for r in self.geo.within(location)
if r[0] == "REMIND" and r[1] != "World"
]
mapping = {
("AFR", "MEA"): "AFR",
("AFR", "SSA"): "AFR",
("EUR", "NEU"): "EUR",
("EUR", "REF"): "EUR",
("OAS", "CHA"): "OAS",
("OAS", "EUR"): "OAS",
("OAS", "IND"): "OAS",
("OAS", "JPN"): "OAS",
("OAS", "MEA"): "OAS",
("OAS", "REF"): "OAS",
("USA", "CAZ"): "USA",
}
# If we have more than one REMIND region
if len(remind_location) > 1:
# TODO: find a more elegant way to do that
for key, value in mapping.items():
# We need to find the most specific REMIND region
if len(set(remind_location).intersection(set(key))) == 2:
remind_location.remove(value)
return remind_location[0]
elif len(remind_location) == 0:
print("no location for {}".format(location))
else:
return remind_location[0] | /rmnd_lca-0.1.6-py3-none-any.whl/rmnd_lca/geomap.py | 0.579638 | 0.411584 | geomap.py | pypi |
import os
from . import DATA_DIR
import csv
FILEPATH_BIOSPHERE_FLOWS = (DATA_DIR / "flows_biosphere.csv")
class Export:
"""
Class that exports the transformed data into matrices:
* A matrix: contains products exchanges
* B matrix: contains exchanges activities and the biosphere
The A and B matrices are exported as csv files in a sparse representation (only non-zero values are listed), like so:
- index row, index column, value of exchange
Dictionaries to map row numbers to activities and products names are also exported.
:ivar db: transformed database
:vartype db: dict
:ivar scenario: name of a Remind scenario
:vartype scenario: str
:ivar year: year of a Remind scenario
:vartype year: int
"""
def __init__(self, db, scenario, year):
self.db = db
self.scenario = scenario
self.year = year
def export_db_to_matrices(self):
index_A = self.create_index_of_A_matrix()
filepath = DATA_DIR / "matrices"
if not os.path.exists(filepath):
os.makedirs(filepath)
# Export A matrix
with open(filepath / 'A_matrix.csv', 'w') as f:
writer = csv.writer(f, delimiter=';', lineterminator='\n', )
writer.writerow(['index of activity', 'index of product', 'value'])
for ds in self.db:
for exc in ds['exchanges']:
if exc['type'] == 'production':
row = [index_A[(ds['name'], ds['reference product'], ds['unit'], ds['location'])],
index_A[(exc['name'], exc['product'], exc['unit'], exc['location'])],
exc['amount']]
writer.writerow(row)
if exc['type'] == 'technosphere':
row = [index_A[(ds['name'], ds['reference product'], ds['unit'], ds['location'])],
index_A[(exc['name'], exc['product'], exc['unit'], exc['location'])],
exc['amount'] * -1]
writer.writerow(row)
# Export A index
with open(filepath / 'A_matrix_index.csv', 'w') as f:
writer = csv.writer(f, delimiter=';', lineterminator='\n', )
for d in index_A:
writer.writerow([d, index_A[d]])
index_B = self.create_index_of_B_matrix()
rev_index_B = self.create_rev_index_of_B_matrix()
# Export B matrix
with open(filepath / 'B_matrix.csv', 'w') as f:
writer = csv.writer(f, delimiter=';', lineterminator='\n', )
writer.writerow(['index of activity', 'index of biosphere flow', 'value'])
for ds in self.db:
for exc in ds['exchanges']:
if exc['type'] == 'biosphere':
try:
row = [
index_A[(ds['name'], ds['reference product'], ds['unit'], ds['location'])],
index_B[rev_index_B[exc['input'][1]]],
exc['amount'] * -1
]
except KeyError:
print(exc)
writer.writerow(row)
# Export B index
with open(filepath / 'B_matrix_index.csv', 'w') as f:
writer = csv.writer(f, delimiter=';', lineterminator='\n', )
for d in index_B:
writer.writerow([d, index_B[d]])
print("Matrices saved in {}.".format(filepath))
def create_index_of_A_matrix(self):
"""
Create a dictionary with row/column indices of the A matrix as key and a tuple (activity name, reference product,
unit, location) as value.
:return: a dictionary to map indices to activities
:rtype: dict
"""
return {(self.db[i]['name'],
self.db[i]['reference product'],
self.db[i]['unit'],
self.db[i]['location'],): i
for i in range(0, len(self.db))}
@staticmethod
def create_index_of_B_matrix():
if not FILEPATH_BIOSPHERE_FLOWS.is_file():
raise FileNotFoundError(
"The dictionary of biosphere flows could not be found."
)
csv_dict = {}
with open(FILEPATH_BIOSPHERE_FLOWS) as f:
input_dict = csv.reader(f, delimiter=";")
i = 0
for row in input_dict:
csv_dict[row[1]] = i
i += 1
return csv_dict
@staticmethod
def create_rev_index_of_B_matrix():
if not FILEPATH_BIOSPHERE_FLOWS.is_file():
raise FileNotFoundError(
"The dictionary of biosphere flows could not be found."
)
csv_dict = {}
with open(FILEPATH_BIOSPHERE_FLOWS) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
csv_dict[row[0]] = row[1]
return csv_dict | /rmnd_lca-0.1.6-py3-none-any.whl/rmnd_lca/export.py | 0.560974 | 0.561335 | export.py | pypi |
from . import DATA_DIR
import pandas as pd
import xarray as xr
from pathlib import Path
import csv
import numpy as np
REMIND_ELEC_MARKETS = (DATA_DIR / "electricity" / "remind_electricity_markets.csv")
REMIND_ELEC_EFFICIENCIES = (DATA_DIR / "electricity" / "remind_electricity_efficiencies.csv")
REMIND_ELEC_EMISSIONS = (DATA_DIR / "electricity" / "remind_electricity_emissions.csv")
GAINS_TO_REMIND_FILEPATH = (DATA_DIR / "GAINStoREMINDtechmap.csv")
GNR_DATA = (DATA_DIR / "cement" / "additional_data_GNR.csv")
class RemindDataCollection:
"""
Class that extracts data from REMIND output files.
:ivar scenario: name of a Remind scenario
:vartype scenario: str
"""
def __init__(self, scenario, year, filepath_remind_files):
self.scenario = scenario
self.year = year
self.filepath_remind_files = filepath_remind_files
self.data = self.get_remind_data()
self.gains_data = self.get_gains_data()
self.gnr_data = self.get_gnr_data()
self.electricity_market_labels = self.get_remind_electricity_market_labels()
self.electricity_efficiency_labels = (
self.get_remind_electricity_efficiency_labels()
)
self.electricity_emission_labels = self.get_remind_electricity_emission_labels()
self.rev_electricity_market_labels = self.get_rev_electricity_market_labels()
self.rev_electricity_efficiency_labels = (
self.get_rev_electricity_efficiency_labels()
)
self.electricity_markets = self.get_remind_electricity_markets()
self.electricity_efficiencies = self.get_remind_electricity_efficiencies()
self.electricity_emissions = self.get_gains_electricity_emissions()
self.cement_emissions = self.get_gains_cement_emissions()
self.steel_emissions = self.get_gains_steel_emissions()
@staticmethod
def get_remind_electricity_emission_labels():
"""
Loads a csv file into a dictionary. This dictionary contains labels of electricity emissions
in Remind.
:return: dictionary that contains emission names equivalence
:rtype: dict
"""
with open(REMIND_ELEC_EMISSIONS) as f:
return dict(filter(None, csv.reader(f, delimiter=";")))
@staticmethod
def get_remind_electricity_market_labels():
"""
Loads a csv file into a dictionary. This dictionary contains labels of electricity markets
in Remind.
:return: dictionary that contains market names equivalence
:rtype: dict
"""
with open(REMIND_ELEC_MARKETS) as f:
return dict(filter(None, csv.reader(f, delimiter=";")))
@staticmethod
def get_remind_electricity_efficiency_labels():
"""
Loads a csv file into a dictionary. This dictionary contains labels of electricity technologies efficiency
in Remind.
:return: dictionary that contains market names equivalence
:rtype: dict
"""
with open(REMIND_ELEC_EFFICIENCIES) as f:
return dict(filter(None, csv.reader(f, delimiter=";")))
def get_rev_electricity_market_labels(self):
return {v: k for k, v in self.electricity_market_labels.items()}
def get_rev_electricity_efficiency_labels(self):
return {v: k for k, v in self.electricity_efficiency_labels.items()}
def get_remind_data(self):
"""
Read the REMIND csv result file and return an `xarray` with dimensions:
* region
* variable
* year
:return: an multi-dimensional array with Remind data
:rtype: xarray.core.dataarray.DataArray
"""
filename = self.scenario + ".mif"
filepath = Path(self.filepath_remind_files) / filename
df = pd.read_csv(
filepath, sep=";", index_col=["Region", "Variable", "Unit"]
).drop(columns=["Model", "Scenario"])
if(len(df.columns == 20)):
df.drop(columns=df.columns[-1], inplace=True)
df.columns = df.columns.astype(int)
df = df.reset_index()
# Filter the dataframe
list_var = (
"SE",
"Tech",
"FE",
"Production",
"Emi|CCO2",
"Emi|CO2"
)
df = df.loc[
df["Variable"].str.startswith(list_var)
]
df = df.rename(columns={"Region": "region", "Variable": "variables", "Unit": "unit"})
array = df.melt(id_vars=["region", "variables", "unit"],
var_name="year",
value_name="value")[["region", "variables", 'year', "value"]] \
.groupby(["region", "variables", 'year'])["value"].mean().to_xarray()
return array
def get_gains_data(self):
"""
Read the GAINS emissions csv file and return an `xarray` with dimensions:
* region
* pollutant
* sector
* year
:return: an multi-dimensional array with GAINS emissions data
:rtype: xarray.core.dataarray.DataArray
"""
filename = "GAINS emission factors.csv"
filepath = (DATA_DIR / "remind_output_files" / filename)
gains_emi = pd.read_csv(
filepath,
skiprows=4,
names=["year", "region", "GAINS", "pollutant", "scenario", "factor"],
)
gains_emi["unit"] = "Mt/TWa"
gains_emi = gains_emi[gains_emi.scenario == "SSP2"]
sector_mapping = pd.read_csv(GAINS_TO_REMIND_FILEPATH).drop(
["noef", "elasticity"], axis=1
)
gains_emi = (
gains_emi.join(sector_mapping.set_index("GAINS"), on="GAINS")
.dropna()
.drop(["scenario", "REMIND"], axis=1)
.pivot_table(
index=["region", "GAINS", "pollutant", "unit"],
values="factor",
columns="year",
)
)
gains_emi = gains_emi.reset_index()
gains_emi = gains_emi.melt(id_vars=["region", "pollutant", "unit", 'GAINS'],
var_name="year",
value_name="value")[["region", "pollutant", 'GAINS', 'year', 'value']]
gains_emi = gains_emi.rename(columns={'GAINS': 'sector'})
array = gains_emi.groupby(["region", "pollutant", 'year', 'sector'])["value"].mean().to_xarray()
return array / 8760 # per TWha --> per TWh
def get_gnr_data(self):
"""
Read the GNR csv file on cement production and return an `xarray` with dimensions:
* region
* year
* variables
:return: an multi-dimensional array with GNR data
:rtype: xarray.core.dataarray.DataArray
:return:
"""
df = pd.read_csv(
GNR_DATA)
df = df[["region", "year", "variables", "value"]]
gnr_array = df.groupby(["region", "year", "variables"]).mean()["value"].to_xarray()
gnr_array = gnr_array.interpolate_na(dim='year', method='linear', fill_value='extrapolate')
gnr_array = gnr_array.interp(year=self.year)
gnr_array = gnr_array.fillna(0)
return gnr_array
def get_remind_electricity_markets(self, drop_hydrogen=True):
"""
This method retrieves the market share for each electricity-producing technology, for a specified year,
for each region provided by REMIND.
Electricity production from hydrogen can be removed from the mix (unless specified, it is removed).
:param drop_hydrogen: removes hydrogen from the region-specific electricity mix if `True`.
:type drop_hydrogen: bool
:return: an multi-dimensional array with electricity technologies market share for a given year, for all regions.
:rtype: xarray.core.dataarray.DataArray
"""
# If hydrogen is not to be considered, it is removed from the technologies labels list
if drop_hydrogen:
list_technologies = [
l
for l in list(self.electricity_market_labels.values())
if "Hydrogen" not in l
]
else:
list_technologies = list(self.electricity_market_labels.values())
# If the year specified is not contained within the range of years given by REMIND
if (
self.year < self.data.year.values.min()
or self.year > self.data.year.values.max()
):
raise KeyError("year not valid, must be between 2005 and 2150")
# Finally, if the specified year falls in between two periods provided by REMIND
else:
# Interpolation between two periods
data_to_interp_from = self.data.loc[
:, list_technologies, :
] / self.data.loc[:, list_technologies, :].groupby("region").sum(dim="variables")
return data_to_interp_from.interp(year=self.year)
def get_remind_fuel_mix_for_ldvs(self):
"""
This method retrieves the fuel production mix
used in the transport sector for LDVs,
for a specified year, for each region provided by REMIND.
Note that synthetic fuels are preferred by LDVs so the share is much larger
compared to the general blend supplied to the transport sector.
:return: an multi-dimensional array with market share for a given year, for all regions.
:rtype: xarray.core.dataarray.DataArray
"""
# add fossil fuel entry
data = xr.concat([
self.data,
(self.data.loc[:, "FE|Transport|Liquids|Coal", :] +
self.data.loc[:, "FE|Transport|Liquids|Oil", :]).expand_dims({
"variables": ["FE|Transport|Liquids|Fossil"]
})], dim="variables")
hydro_techs = [
"SE|Liquids|Hydrogen",
"FE|Transport|Pass|Road|LDV|Liquids",
]
hydro = data.loc[:, hydro_techs, :]
# all synthetic liquids to LDVs
hydro = (hydro.loc[:, "SE|Liquids|Hydrogen"]
/data.loc[:, "FE|Transport|Pass|Road|LDV|Liquids"])
hydro = hydro.where(hydro < 1, 1)
other_techs = [
"FE|Transport|Liquids|Biomass",
"FE|Transport|Liquids|Fossil"
]
others = data.loc[:, other_techs]
others.coords["variables"] = others.coords["variables"]\
.str[21:]
others = others / others.sum(dim="variables")
# concat
full = xr.concat([
hydro.expand_dims({"variables": ["Hydrogen"]}),
(1-hydro) * others], "variables")
# shares all sum to 1?
assert(np.allclose(full.sum(dim="variables"), 1))
# If the year specified is not contained within
# the range of years given by REMIND
if (
self.year < self.data.year.values.min()
or self.year > self.data.year.values.max()
):
raise KeyError("year not valid, must be between 2005 and 2150")
# Finally, if the specified year falls in
# between two periods provided by REMIND
else:
# Interpolation between two periods
return full.interp(year=self.year).transpose()
def get_remind_electricity_efficiencies(self, drop_hydrogen=True):
"""
This method retrieves efficiency values for electricity-producing technology, for a specified year,
for each region provided by REMIND.
Electricity production from hydrogen can be removed from the mix (unless specified, it is removed).
:param drop_hydrogen: removes hydrogen from the region-specific electricity mix if `True`.
:type drop_hydrogen: bool
:return: an multi-dimensional array with electricity technologies market share for a given year, for all regions.
:rtype: xarray.core.dataarray.DataArray
"""
# If hydrogen is not to be considered, it is removed from the technologies labels list
if drop_hydrogen:
list_technologies = [
l
for l in list(self.electricity_efficiency_labels.values())
if "Hydrogen" not in l
]
else:
list_technologies = list(self.electricity_efficiency_labels.values())
# If the year specified is not contained within the range of years given by REMIND
if (
self.year < self.data.year.values.min()
or self.year > self.data.year.values.max()
):
raise KeyError("year not valid, must be between 2005 and 2150")
# Finally, if the specified year falls in between two periods provided by REMIND
else:
# Interpolation between two periods
data_to_interp_from = self.data.loc[:, list_technologies, :]
return (
data_to_interp_from.interp(year=self.year) / 100
) # Percentage to ratio
def get_gains_electricity_emissions(self):
"""
This method retrieves emission values for electricity-producing technology, for a specified year,
for each region provided by GAINS.
:return: an multi-dimensional array with emissions for different technologies for a given year, for all regions.
:rtype: xarray.core.dataarray.DataArray
"""
# If the year specified is not contained within the range of years given by REMIND
if (
self.year < self.gains_data.year.values.min()
or self.year > self.gains_data.year.values.max()
):
raise KeyError("year not valid, must be between 2005 and 2150")
# Finally, if the specified year falls in between two periods provided by REMIND
else:
# Interpolation between two periods
return self.gains_data.sel(sector=[v for v in self.electricity_emission_labels.values()]) \
.interp(year=self.year)
def get_gains_cement_emissions(self):
"""
This method retrieves emission values for cement production, for a specified year,
for each region provided by GAINS.
:return: an multi-dimensional array with emissions for different technologies for a given year, for all regions.
:rtype: xarray.core.dataarray.DataArray
"""
# If the year specified is not contained within the range of years given by REMIND
if (
self.year < self.gains_data.year.values.min()
or self.year > self.gains_data.year.values.max()
):
raise KeyError("year not valid, must be between 2005 and 2150")
# Finally, if the specified year falls in between two periods provided by REMIND
else:
# Interpolation between two periods
return self.gains_data.sel(sector='CEMENT').interp(year=self.year)
def get_gains_steel_emissions(self):
"""
This method retrieves emission values for steel production, for a specified year,
for each region provided by GAINS.
:return: an multi-dimensional array with emissions for different technologies for a given year, for all regions.
:rtype: xarray.core.dataarray.DataArray
"""
# If the year specified is not contained within the range of years given by REMIND
if (
self.year < self.gains_data.year.values.min()
or self.year > self.gains_data.year.values.max()
):
raise KeyError("year not valid, must be between 2005 and 2150")
# Finally, if the specified year falls in between two periods provided by REMIND
else:
# Interpolation between two periods
return self.gains_data.sel(sector='STEEL').interp(year=self.year) | /rmnd_lca-0.1.6-py3-none-any.whl/rmnd_lca/data_collection.py | 0.596316 | 0.317439 | data_collection.py | pypi |
from . import DATA_DIR
import csv
import pandas as pd
CO2_FUELS = DATA_DIR / "fuel_co2_emission_factor.txt"
LHV_FUELS = DATA_DIR / "fuels_lower_heating_value.txt"
CLINKER_RATIO_ECOINVENT_36 = DATA_DIR / "cement" / "clinker_ratio_ecoinvent_36.csv"
CLINKER_RATIO_ECOINVENT_35 = DATA_DIR / "cement" / "clinker_ratio_ecoinvent_35.csv"
CLINKER_RATIO_REMIND = DATA_DIR / "cement" / "clinker_ratios.csv"
REMIND_TO_FUELS = DATA_DIR / "steel" / "remind_fuels_correspondance.txt"
def eidb_label(scenario, year):
return "ecoinvent_" + scenario + "_" + str(year)
def get_correspondance_remind_to_fuels():
"""
Return a dictionary with REMIND fuels as keys and ecoinvent activity names and reference products as values.
:return: dict
:rtype: dict
"""
d = {}
with open(REMIND_TO_FUELS) as f:
r = csv.reader(f, delimiter=";")
for row in r:
d[row[0]] = {"fuel name": row[1], "activity name": row[2], "reference product": row[3]}
return d
def get_fuel_co2_emission_factors():
"""
Return a dictionary with fuel names as keys and, as values:
* CO_2 emission factor, in kg CO2 per MJ of lower heating value
* share of biogenic CO2
Source: https://www.plateformeco2.ch/portal/documents/10279/16917/IPCC+(2006),%20Guidelines+for+National+Greenhouse+Gas+Inventories.pdf/a3838a98-5ad6-4da5-82f3-c9430007a158
:return: dict
"""
d = {}
with open(CO2_FUELS) as f:
r = csv.reader(f, delimiter=";")
for row in r:
d[row[0]] = {"co2": float(row[1]), "bio_share": float(row[2])}
return d
def get_lower_heating_values():
"""
Loads a csv file into a dictionary. This dictionary contains lower heating values for a number of fuel types.
Mostly taken from: https://www.engineeringtoolbox.com/fuels-higher-calorific-values-d_169.html
:return: dictionary that contains lower heating values
:rtype: dict
"""
with open(LHV_FUELS) as f:
d = dict(filter(None, csv.reader(f, delimiter=";")))
d = {k: float(v) for k, v in d.items()}
return d
def get_clinker_ratio_ecoinvent(version):
"""
Return a dictionary with (cement names, location) as keys and clinker-to-cement ratios as values,
as found in ecoinvent.
:return: dict
"""
if version == 3.5:
fp = CLINKER_RATIO_ECOINVENT_35
else:
fp = CLINKER_RATIO_ECOINVENT_36
with open(fp) as f:
d = {}
for val in csv.reader(f):
d[(val[0], val[1])] = float(val[2])
return d
def get_clinker_ratio_remind(year):
"""
Return an array with the average clinker-to-cement ratio per year and per region, as given by REMIND.
:return: xarray
:return:
"""
df = pd.read_csv(
CLINKER_RATIO_REMIND)
return df.groupby(["region", "year"]) \
.mean()["value"] \
.to_xarray() \
.interp(year=year) | /rmnd_lca-0.1.6-py3-none-any.whl/rmnd_lca/utils.py | 0.589953 | 0.244397 | utils.py | pypi |
import wurst
from wurst import searching as ws
import itertools
from .geomap import Geomap
from .activity_maps import InventorySet
from .utils import *
import uuid
import copy
class Steel:
"""
Class that modifies steel markets in ecoinvent based on REMIND output data.
:ivar scenario: name of a Remind scenario
:vartype scenario: str
"""
def __init__(self, db, rmd, year):
self.db = db
self.rmd = rmd
self.year = year
self.steel_data = self.rmd.data.interp(year=self.year)
self.fuels_lhv = get_lower_heating_values()
self.fuels_co2 = get_fuel_co2_emission_factors()
self.remind_fuels = get_correspondance_remind_to_fuels()
self.geo = Geomap()
mapping = InventorySet(self.db)
self.emissions_map = mapping.get_remind_to_ecoinvent_emissions()
self.fuel_map = mapping.generate_fuel_map()
self.material_map = mapping.generate_material_map()
def fetch_proxies(self, name):
"""
Fetch dataset proxies, given a dataset `name`.
Store a copy for each REMIND region.
If a REMIND region does not find a fitting ecoinvent location,
fetch a dataset with a "RoW" location.
Delete original datasets from the database.
:return:
"""
d_map = {
self.geo.ecoinvent_to_remind_location(d['location']): d['location']
for d in ws.get_many(
self.db,
ws.equals("name", name)
)
}
list_remind_regions = [
c[1] for c in self.geo.geo.keys()
if type(c) == tuple and c[0] == "REMIND"
]
if 'market' in name:
d_remind_to_eco = {r: d_map.get(r, "GLO") for r in list_remind_regions}
else:
d_remind_to_eco = {r: d_map.get(r, "RoW") for r in list_remind_regions}
d_act = {}
for d in d_remind_to_eco:
try:
ds = ws.get_one(
self.db,
ws.equals("name", name),
ws.equals("location", d_remind_to_eco[d]),
)
d_act[d] = copy.deepcopy(ds)
d_act[d]["location"] = d
d_act[d]["code"] = str(uuid.uuid4().hex)
except ws.NoResults:
print('No dataset {} found for the REMIND region {}'.format(name, d))
continue
for prod in ws.production(d_act[d]):
prod['location'] = d
deleted_markets = [
(act['name'], act['reference product'], act['location']) for act in self.db
if act["name"] == name
]
with open(DATA_DIR / "logs/log deleted steel datasets.csv", "a") as csv_file:
writer = csv.writer(csv_file,
delimiter=';',
lineterminator='\n')
for line in deleted_markets:
writer.writerow(line)
# Remove old datasets
self.db = [act for act in self.db
if act["name"] != name]
return d_act
@staticmethod
def remove_exchanges(d, list_exc):
keep = lambda x: {
k: v
for k, v in x.items()
if not any(ele in x["name"] for ele in list_exc)
}
for r in d:
d[r]["exchanges"] = [keep(exc) for exc in d[r]["exchanges"]]
d[r]["exchanges"] = [v for v in d[r]["exchanges"] if v]
return d
@staticmethod
def get_shares_from_production_volume(ds):
"""
Return shares of supply based on production volumes
:param ds: list of datasets
:return: dictionary with (dataset name, dataset location) as keys, shares as values. Shares total 1.
:rtype: dict
"""
dict_act = {}
total_production_volume = 0
for act in ds:
for exc in ws.production(act):
dict_act[(act["name"], act["location"], act["reference product"], act["unit"])] = float(
exc.get("production volume", 1)
)
total_production_volume += float(exc.get("production volume", 1))
for d in dict_act:
dict_act[d] /= total_production_volume
return dict_act
def get_suppliers_of_a_region(
self, remind_regions, ecoinvent_technologies, reference_product
):
"""
Return a list of datasets which location and name correspond to the region, name and reference product given,
respectively.
:param remind_region: list of REMIND regions
:type remind_region: list
:param ecoinvent_technologies: list of names of ecoinvent dataset
:type ecoinvent_technologies: list
:param reference_product: reference product
:type reference_product: str
:return: list of wurst datasets
:rtype: list
"""
list_regions = [self.geo.remind_to_ecoinvent_location(region)
for region in remind_regions]
list_regions = [x for y in list_regions for x in y]
return ws.get_many(
self.db,
*[
ws.either(
*[
ws.equals("name", supplier)
for supplier in ecoinvent_technologies
]
),
ws.either(
*[
ws.equals("location", loc)
for loc in list_regions
]
),
ws.equals("reference product", reference_product),
]
)
def relink_datasets(self, name, ref_product):
"""
For a given dataset name, change its location to a REMIND location,
to effectively link the newly built dataset(s).
:param ref_product:
:param name: dataset name
:type name: str
"""
list_remind_regions = [
c[1] for c in self.geo.geo.keys() if type(c) == tuple and c[0] == "REMIND"
]
for act in self.db:
for exc in act['exchanges']:
try:
exc["name"]
except:
print(exc)
if (exc['name'], exc.get('product')) == (name, ref_product) and exc['type'] == 'technosphere':
if act['location'] not in list_remind_regions:
if act['location'] == "North America without Quebec":
exc['location'] = 'USA'
else:
exc['location'] = self.geo.ecoinvent_to_remind_location(act['location'])
else:
exc['location'] = act['location']
def update_pollutant_emissions(self, ds):
"""
Update pollutant emissions based on GAINS data.
:return:
"""
# Update biosphere exchanges according to GAINS emission values
for exc in ws.biosphere(
ds, ws.either(*[ws.contains("name", x) for x in self.emissions_map])
):
remind_emission_label = self.emissions_map[exc["name"]]
try:
remind_emission = self.rmd.steel_emissions.loc[
dict(
region=ds["location"],
pollutant=remind_emission_label
)
].values.item(0)
except KeyError:
# TODO: fix this.
# GAINS does not have a 'World' region, hence we use China as a temporary fix
remind_emission = self.rmd.steel_emissions.loc[
dict(
region='CHA',
pollutant=remind_emission_label
)
].values.item(0)
if exc["amount"] == 0:
wurst.rescale_exchange(
exc, remind_emission / 1, remove_uncertainty=True
)
else:
wurst.rescale_exchange(exc, remind_emission / exc["amount"])
return ds
def adjust_recycled_steel_share(self, dict_act):
"""
Adjust the supply shares of primary and secondary steel, based on REMIND data.
:param dict_act: dictionary with REMIND region as keys and datasets as values.
:type dict_act: dict
:return: same dictionary, with modified exchanges
:rtype: dict
"""
dict_act = self.remove_exchanges(dict_act, ['steel production'])
for d, act in dict_act.items():
remind_region = d
total_production_volume = self.steel_data.sel(region=remind_region, variables='Production|Industry|Steel')
primary_share = (self.steel_data.sel(region=remind_region, variables='Production|Industry|Steel|Primary') / total_production_volume).values
secondary_share = 1 - primary_share
ds = ws.get_one(self.db,
ws.equals('reference product', act['reference product']),
ws.contains('name', 'steel production'),
ws.contains('name', 'converter'),
ws.contains('location', 'RoW'))
act['exchanges'].append(
{
"uncertainty type": 0,
"loc": 1,
"amount": primary_share,
"type": "technosphere",
"production volume": 1,
"product": ds['reference product'],
"name": ds['name'],
"unit": ds['unit'],
"location": remind_region,
}
)
ds = ws.get_one(self.db,
ws.equals('reference product', act['reference product']),
ws.contains('name', 'steel production'),
ws.contains('name', 'electric'),
ws.contains('location', 'RoW'))
act['exchanges'].append(
{
"uncertainty type": 0,
"loc": 1,
"amount": secondary_share,
"type": "technosphere",
"production volume": 1,
"product": ds['reference product'],
"name": ds['name'],
"unit": ds['unit'],
"location": remind_region,
}
)
return dict_act
def generate_activities(self):
"""
This function generates new activities for primary and secondary steel production and add them to the ecoinvent db.
:return: NOTHING. Returns a modified database with newly added steel activities for the corresponding year
"""
print("The validity of the datasets produced from the integration of the steel sector is not yet fully tested. Consider the results with caution.")
print('Log of deleted cement datasets saved in {}'.format(DATA_DIR / 'logs'))
print('Log of created cement datasets saved in {}'.format(DATA_DIR / 'logs'))
with open(DATA_DIR / "logs/log deleted steel datasets.csv", "w") as csv_file:
writer = csv.writer(csv_file,
delimiter=';',
lineterminator='\n')
writer.writerow(['dataset name', 'reference product', 'location'])
with open(DATA_DIR / "logs/log created steel datasets.csv", "w") as csv_file:
writer = csv.writer(csv_file,
delimiter=';',
lineterminator='\n')
writer.writerow(['dataset name', 'reference product', 'location'])
print('Create steel markets for differention regions')
print('Adjust primary and secondary steel supply shares in steel markets')
created_datasets = list()
for i in (
("market for steel, low-alloyed", "steel, low-alloyed"),
("market for steel, chromium steel 18/8", "steel, chromium steel 18/8")
):
act_steel = self.fetch_proxies(i[0])
act_steel = self.adjust_recycled_steel_share(act_steel)
self.db.extend([v for v in act_steel.values()])
created_datasets.extend([(act['name'], act['reference product'], act['location'])
for act in act_steel.values()])
self.relink_datasets(i[0], i[1])
for i in (
("market for steel, unalloyed", "steel, unalloyed"),
("market for steel, chromium steel 18/8, hot rolled", "steel, chromium steel 18/8, hot rolled"),
("market for steel, low-alloyed, hot rolled", "steel, low-alloyed, hot rolled")
):
act_steel = self.fetch_proxies(i[0])
self.db.extend([v for v in act_steel.values()])
created_datasets.extend([(act['name'], act['reference product'], act['location'])
for act in act_steel.values()])
self.relink_datasets(i[0], i[1])
print('Relink new steel markets to steel-consuming activities')
# Determine all steel activities in the db. Delete old datasets.
print('Create new steel production datasets and delete old datasets')
d_act_primary_steel = {mat: self.fetch_proxies(mat) for mat in self.material_map['steel, primary']}
d_act_secondary_steel = {mat: self.fetch_proxies(mat) for mat in self.material_map['steel, secondary']}
d_act_steel = {**d_act_primary_steel, **d_act_secondary_steel}
# Delete fuel exchanges and delete empty exchanges. Fuel exchanges to remove:
list_fuels = [
"diesel",
"coal",
"lignite",
"coke",
"fuel",
"meat",
"gas",
"oil",
"electricity",
]
d_act_steel = {k: self.remove_exchanges(v, list_fuels) for k, v in d_act_steel.items()}
# List final energy carriers used in steel production
l_FE = [v.split('|') for v in self.steel_data.coords['variables'].values
if "FE" in v and "steel" in v.lower()
and 'electricity' not in v.lower()]
# List second energy carriers
l_SE = [v.split('|') for v in self.steel_data.coords['variables'].values
if "SE" in v
and 'electricity' not in v.lower()
and 'fossil' not in v.lower()]
# Filter second energy carriers used in steel production
# TODO: for now, we ignore CCS
list_second_fuels = sorted(list(set(['|'.join(x) for x in l_SE if len(x) == 3 for y in l_FE if y[2] in x])))
list_second_fuels = [list(g) for _, g in itertools.groupby(list_second_fuels, lambda x: x.split('|')[1])]
# Loop through primary steel technologies
for d in d_act_steel:
# Loop through REMIND regions
for k in d_act_steel[d]:
fuel_fossil_co2, fuel_biogenic_co2 = 0, 0
# Get amount of fuel per fuel type
for count, fuel_type in enumerate(['|'.join(y) for y in l_FE if 'Primary' in y]):
# Amount of specific fuel, for a specific region
fuel_amount = self.steel_data.sel(variables=fuel_type, region=k)\
* (self.steel_data.sel(variables=list_second_fuels[count], region=k)\
/ self.steel_data.sel(variables=list_second_fuels[count], region=k).sum(dim='variables'))
# Divide the amount of fuel by steel production, to get unitary efficiency
fuel_amount /= self.steel_data.sel(region=k, variables='Production|Industry|Steel|Primary')
# Convert from EJ per Mt steel to MJ per kg steel
fuel_amount *= 1000
for c, i in enumerate(fuel_amount):
if i > 0:
fuel_name, activity_name, fuel_ref_prod = self.remind_fuels[list_second_fuels[count][c]].values()
fuel_lhv = self.fuels_lhv[fuel_name]
fuel_qty = i.values.item(0) / fuel_lhv
fuel_fossil_co2 += fuel_qty * self.fuels_co2[fuel_name]["co2"] * (1 - self.fuels_co2[fuel_name]["bio_share"])
fuel_biogenic_co2 += fuel_qty * self.fuels_co2[fuel_name]["co2"] * self.fuels_co2[fuel_name]["bio_share"]
# Fetch respective shares based on production volumes
fuel_suppliers = self.get_shares_from_production_volume(
self.get_suppliers_of_a_region([k],
[activity_name],
fuel_ref_prod))
if len(fuel_suppliers) == 0:
fuel_suppliers = self.get_shares_from_production_volume(
self.get_suppliers_of_a_region(['World', 'EUR'],
[activity_name],
fuel_ref_prod))
new_exchanges = []
for supplier in fuel_suppliers:
new_exchanges.append({
"uncertainty type": 0,
"loc": 1,
"amount": fuel_suppliers[supplier] * fuel_qty,
"type": "technosphere",
"production volume": 1,
"product": supplier[2],
"name": supplier[0],
"unit": supplier[3],
"location": supplier[1],
})
d_act_steel[d][k]['exchanges'].extend(new_exchanges)
# Update fossil CO2 exchange
try:
fossil_co2_exc = [e for e in d_act_steel[d][k]['exchanges'] if e['name'] == 'Carbon dioxide, fossil'][0]
fossil_co2_exc['amount'] = fuel_fossil_co2
fossil_co2_exc['uncertainty type'] = 0
except IndexError:
# There isn't a fossil CO2 emissions exchange (e.g., electric furnace)
fossil_co2_exc = {
"uncertainty type": 0,
"loc": 1,
"amount": fuel_fossil_co2,
"type": "biosphere",
"production volume": 0,
"name": "Carbon dioxide, non-fossil",
"unit": "kilogram",
"input": ('biosphere3', 'eba59fd6-f37e-41dc-9ca3-c7ea22d602c7'),
"categories": ('air',),
}
d_act_steel[d][k]['exchanges'].append(fossil_co2_exc)
try:
# Update biogenic CO2 exchange, minus CO2 captured
biogenic_co2_exc = [e for e in d_act_steel[d][k]['exchanges'] if e['name'] == 'Carbon dioxide, non-fossil'][0]
biogenic_co2_exc['amount'] = fuel_biogenic_co2
biogenic_co2_exc['uncertainty type'] = 0
except IndexError:
# There isn't a biogenic CO2 emissions exchange
biogenic_co2_exc = {
"uncertainty type": 0,
"loc": 1,
"amount": fuel_biogenic_co2,
"type": "biosphere",
"production volume": 0,
"name": "Carbon dioxide, non-fossil",
"unit": "kilogram",
"input": ('biosphere3', 'eba59fd6-f37e-41dc-9ca3-c7ea22d602c7'),
"categories": ('air',),
}
d_act_steel[d][k]['exchanges'].append(biogenic_co2_exc)
# Electricity consumption per kg of steel
# Electricity, in EJ per year, divided by steel production, in Mt per year
# Convert to obtain kWh/kg steel
if d in self.material_map['steel, primary']:
electricity = (self.steel_data.sel(region=k, variables = 'FE|Industry|Electricity|Steel|Primary').values\
/ self.steel_data.sel(region=k,
variables='Production|Industry|Steel|Primary').values)\
* 1000 / 3.6
else:
electricity = (self.steel_data.sel(region=k, variables = 'FE|Industry|Electricity|Steel|Secondary').values\
/ self.steel_data.sel(region=k,
variables='Production|Industry|Steel|Secondary').values)\
* 1000 / 3.6
# Add electricity exchange
d_act_steel[d][k]['exchanges'].append({
"uncertainty type": 0,
"loc": 1,
"amount": electricity,
"type": "technosphere",
"production volume": 0,
"product": 'electricity, medium voltage',
"name": 'market group for electricity, medium voltage',
"unit": 'kilowatt hour',
"location": k,
})
# Relink all activities to the newly created activities
name = d_act_steel[d][k]['name']
ref_prod = d_act_steel[d][k]['reference product']
# Update non fuel-related emissions according to GAINS
d_act_steel[d] = {k: self.update_pollutant_emissions(v) for k, v in d_act_steel[d].items()}
self.db.extend([v for v in d_act_steel[d].values()])
# Relink new steel activities to steel-consuming activities
self.relink_datasets(name, ref_prod)
created_datasets.extend([(act['name'], act['reference product'], act['location'])
for act in d_act_steel[d].values()])
print('Relink new steel production activities to specialty steel markets and other steel-consuming activities ')
with open(DATA_DIR / "logs/log created steel datasets.csv", "a") as csv_file:
writer = csv.writer(csv_file,
delimiter=';',
lineterminator='\n')
for line in created_datasets:
writer.writerow(line)
return self.db | /rmnd_lca-0.1.6-py3-none-any.whl/rmnd_lca/steel.py | 0.571767 | 0.308659 | steel.py | pypi |
from . import DATA_DIR
import wurst
from prettytable import PrettyTable
from wurst import searching as ws
from bw2io import ExcelImporter, Migration
from bw2io.importers.base_lci import LCIImporter
from carculator import (
CarInputParameters,
fill_xarray_from_input_parameters,
CarModel,
InventoryCalculation,
create_fleet_composition_from_REMIND_file,
extract_electricity_mix_from_REMIND_file,
extract_biofuel_shares_from_REMIND,
)
from pathlib import Path
import csv
import uuid
import numpy as np
FILEPATH_BIOSPHERE_FLOWS = DATA_DIR / "dict_biosphere.txt"
class BaseInventoryImport:
"""
Base class for inventories that are to be merged with the ecoinvent database.
:ivar db: the target database for the import (the Ecoinvent database),
unpacked to a list of dicts
:vartype db: list
:ivar version: the target Ecoinvent database version
:vartype version: str
:ivar import_db: the database to be merged with ecoinvent
:vartype import_db: LCIImporter
"""
def __init__(self, database, version, path):
"""Create a :class:`BaseInventoryImport` instance.
:param list database: the target database for the import (the Ecoinvent database),
unpacked to a list of dicts
:param float version: the version of the target database
:param path: Path to the imported inventory.
:type path: str or Path
"""
self.db = database
self.db_code = [x["code"] for x in self.db]
self.db_names = [
(x["name"], x["reference product"], x["location"]) for x in self.db
]
self.version = version
self.biosphere_dict = self.get_biosphere_code()
path = Path(path)
if not path.is_file():
raise FileNotFoundError(
"The inventory file {} could not be found.".format(path)
)
self.load_inventory(path)
def load_inventory(self, path):
"""Load an inventory from a specified path.
Sets the :attr:`import_db` attribute.
:param str path: Path to the inventory file
:returns: Nothing.
"""
pass
def prepare_inventory(self):
"""Prepare the inventory for the merger with Ecoinvent.
Modifies :attr:`import_db` in-place.
:returns: Nothing
"""
pass
def check_for_duplicates(self):
"""
Check whether the inventories to be imported are not
already in the source database.
"""
# print if we find datasets that already exist
already_exist = [
(x["name"], x["reference product"], x["location"])
for x in self.import_db.data
if x["code"] in self.db_code
]
already_exist.extend(
[
(x["name"], x["reference product"], x["location"])
for x in self.import_db.data
if (x["name"], x["reference product"], x["location"]) in self.db_names
]
)
if len(already_exist) > 0:
print(
"The following datasets to import already exist in the source database. They will not be imported"
)
t = PrettyTable(["Name", "Reference product", "Location"])
for ds in already_exist:
t.add_row([ds[0][:40], ds[1][:30], ds[2]])
print(t)
self.import_db.data = [
x for x in self.import_db.data if x["code"] not in self.db_code
]
self.import_db.data = [
x
for x in self.import_db.data
if (x["name"], x["reference product"], x["location"]) not in self.db_names
]
def merge_inventory(self):
"""Prepare :attr:`import_db` and merge the inventory to the ecoinvent :attr:`db`.
Calls :meth:`prepare_inventory`. Changes the :attr:`db` attribute.
:returns: Nothing
"""
self.prepare_inventory()
self.db.extend(self.import_db)
def search_exchanges(self, srchdict):
"""Search :attr:`import_db` by field values.
:param dict srchdict: dict with the name of the fields and the values.
:returns: the activities with the exchanges that match the search.
:rtype: dict
"""
results = []
for act in self.import_db.data:
for ex in act["exchanges"]:
if len(srchdict.items() - ex.items()) == 0:
results.append(act)
return results
def search_missing_field(self, field):
"""Find exchanges and activities that do not contain a specific field
in :attr:`imort_db`
:param str field: label of the field to search for.
:returns: a list of dictionaries, activities and exchanges
:rtype: list
"""
results = []
for act in self.import_db.data:
if field not in act:
results.append(act)
for ex in act["exchanges"]:
if ex["type"] == "technosphere" and field not in ex:
results.append(ex)
return results
@staticmethod
def get_biosphere_code():
"""
Retrieve a dictionary with biosphere flow names and uuid codes.
:returns: dictionary with biosphere flow names as keys and uuid code as values
:rtype: dict
"""
if not FILEPATH_BIOSPHERE_FLOWS.is_file():
raise FileNotFoundError(
"The dictionary of biosphere flows could not be found."
)
csv_dict = {}
with open(FILEPATH_BIOSPHERE_FLOWS) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
csv_dict[(row[0], row[1], row[2], row[3])] = row[4]
return csv_dict
def add_product_field_to_exchanges(self):
"""Add the `product` key to the production and
technosphere exchanges in :attr:`import_db`.
Also add `code` field if missing.
For production exchanges, use the value of the `reference_product` field.
For technosphere exchanges, search the activities in :attr:`import_db` and
use the reference product. If none is found, search the Ecoinvent :attr:`db`.
Modifies the :attr:`import_db` attribute in place.
:raises IndexError: if no corresponding activity (and reference product) can be found.
"""
# Add a `product` field to the production exchange
for x in self.import_db.data:
for y in x["exchanges"]:
if y["type"] == "production":
if "product" not in y:
y["product"] = x["reference product"]
if y["name"] != x["name"]:
y["name"] = x["name"]
# Add a `product` field to technosphere exchanges
for x in self.import_db.data:
for y in x["exchanges"]:
if y["type"] == "technosphere":
# Check if the field 'product' is present
if not "product" in y:
y["product"] = self.correct_product_field(y)
# If a 'reference product' field is present, we make sure it matches with the new 'product' field
if "reference product" in y:
try:
assert y["product"] == y["reference product"]
except AssertionError:
y["product"] = self.correct_product_field(y)
# Add a `code` field if missing
for x in self.import_db.data:
if "code" not in x:
x["code"] = str(uuid.uuid4().hex)
def correct_product_field(self, exc):
"""
Find the correct name for the `product` field of the exchange
:param exc: a dataset exchange
:return: name of the product field of the exchange
:rtype: str
"""
# Look first in the imported inventories
possibles = [
a["reference product"]
for a in self.import_db.data
if a["name"] == exc["name"]
and a["location"] == exc["location"]
and a["unit"] == exc["unit"]
]
# If not, look in the ecoinvent inventories
if len(possibles) == 0:
possibles = [
a["reference product"]
for a in self.db
if a["name"] == exc["name"]
and a["location"] == exc["location"]
and a["unit"] == exc["unit"]
]
if len(possibles) > 0:
return possibles[0]
else:
raise IndexError(
"An inventory exchange in {} cannot be linked to the biosphere or the ecoinvent database: {}".format(
self.import_db.db_name, exc
)
)
def add_biosphere_links(self, delete_missing=False):
"""Add links for biosphere exchanges to :attr:`import_db`
Modifies the :attr:`import_db` attribute in place.
"""
for x in self.import_db.data:
for y in x["exchanges"]:
if y["type"] == "biosphere":
if isinstance(y["categories"], str):
y["categories"] = tuple(y["categories"].split("::"))
if len(y["categories"]) > 1:
try:
y["input"] = (
"biosphere3",
self.biosphere_dict[
(
y["name"],
y["categories"][0],
y["categories"][1],
y["unit"],
)
],
)
except KeyError as e:
if delete_missing:
y["flag_deletion"] = True
else:
raise
else:
try:
y["input"] = (
"biosphere3",
self.biosphere_dict[
(
y["name"],
y["categories"][0],
"unspecified",
y["unit"],
)
],
)
except KeyError as e:
if delete_missing:
y["flag_deletion"] = True
else:
raise
x["exchanges"] = [ex for ex in x["exchanges"] if "flag_deletion" not in ex]
def remove_ds_and_modifiy_exchanges(self, name, ex_data):
"""
Remove an activity dataset from :attr:`import_db` and replace the corresponding
technosphere exchanges by what is given as second argument.
:param str name: name of activity to be removed
:param dict ex_data: data to replace the corresponding exchanges
:returns: Nothing
"""
self.import_db.data = [
act for act in self.import_db.data if not act["name"] == name
]
for act in self.import_db.data:
for ex in act["exchanges"]:
if ex["type"] == "technosphere" and ex["name"] == name:
ex.update(ex_data)
# make sure there is no existing link
if "input" in ex:
del ex["input"]
class CarmaCCSInventory(BaseInventoryImport):
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
if self.version == 3.6:
# apply some updates to comply with ei 3.6
new_technosphere_data = {
"fields": ["name", "reference product", "location"],
"data": [
(
("market for water, decarbonised, at user", (), "GLO"),
{
"name": "market for water, decarbonised",
"reference product": "water, decarbonised",
"location": "DE",
},
),
(
(
"market for water, completely softened, from decarbonised water, at user",
(),
"GLO",
),
{
"name": "market for water, completely softened",
"reference product": "water, completely softened",
"location": "RER",
},
),
(
("market for steam, in chemical industry", (), "GLO"),
{
"location": "RER",
"reference product": "steam, in chemical industry",
},
),
(
("market for steam, in chemical industry", (), "RER"),
{"reference product": "steam, in chemical industry",},
),
(
("zinc-lead mine operation", ("zinc concentrate",), "GLO"),
{
"name": "zinc mine operation",
"reference product": "bulk lead-zinc concentrate",
},
),
(
("market for aluminium oxide", ("aluminium oxide",), "GLO"),
{
"name": "market for aluminium oxide, non-metallurgical",
"reference product": "aluminium oxide, non-metallurgical",
"location": "IAI Area, EU27 & EFTA",
},
),
(
(
"platinum group metal mine operation, ore with high rhodium content",
("nickel, 99.5%",),
"ZA",
),
{
"name": "platinum group metal, extraction and refinery operations",
},
),
],
}
Migration("migration_36").write(
new_technosphere_data,
description="Change technosphere names due to change from 3.5 to 3.6",
)
self.import_db.migrate("migration_36")
self.add_biosphere_links()
self.add_product_field_to_exchanges()
# Add carbon storage for CCS technologies
print("Add fossil carbon dioxide storage for CCS technologies.")
self.add_negative_CO2_flows_for_biomass_CCS()
# Check for duplicates
self.check_for_duplicates()
def add_negative_CO2_flows_for_biomass_CCS(self):
"""
Rescale the amount of all exchanges of carbon dioxide, non-fossil by a factor -9 (.9/-.1),
to account for sequestered CO2.
All CO2 capture and storage in the Carma datasets is assumed to be 90% efficient.
Thus, we can simply find out what the new CO2 emission is and then we know how much gets stored in the ground.
It's very important that we ONLY do this for biomass CCS plants, as only they will have negative emissions!
We also rename the emission to 'Carbon dioxide, from soil or biomass stock' so that it is properly
characterized by IPCC's GWP100a method.
Modifies in place (does not return anything).
"""
for ds in ws.get_many(
self.db, ws.contains("name", "storage"), ws.equals("database", "Carma CCS")
):
for exc in ws.biosphere(
ds, ws.equals("name", "Carbon dioxide, non-fossil")
):
wurst.rescale_exchange(exc, (0.9 / -0.1), remove_uncertainty=True)
class BiofuelInventory(BaseInventoryImport):
"""
Biofuel datasets from the master thesis of Francesco Cozzolino (2018).
"""
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
# Migrations for 3.6
if self.version == 3.6:
migrations = {
"fields": ["name", "reference product", "location"],
"data": [
(
(
"market for transport, freight, sea, transoceanic tanker",
("transport, freight, sea, transoceanic tanker",),
"GLO",
),
{
"name": (
"market for transport, freight, sea, tanker for liquid goods other than petroleum and liquefied natural gas"
),
"reference product": (
"transport, freight, sea, tanker for liquid goods other than petroleum and liquefied natural gas"
),
},
),
(
(
"market for water, decarbonised, at user",
("water, decarbonised, at user",),
"GLO",
),
{
"name": ("market for water, decarbonised"),
"reference product": ("water, decarbonised"),
"location": ("DE"),
},
),
(
(
"market for water, completely softened, from decarbonised water, at user",
(
"water, completely softened, from decarbonised water, at user",
),
"GLO",
),
{
"name": ("market for water, completely softened"),
"reference product": ("water, completely softened"),
"location": ("RER"),
},
),
(
("market for concrete block", ("concrete block",), "GLO"),
{"location": ("DE"),},
),
],
}
Migration("biofuels_ecoinvent_36").write(
migrations,
description="Change technosphere names due to change from 3.5 to 3.6",
)
self.import_db.migrate("biofuels_ecoinvent_36")
self.add_biosphere_links()
self.add_product_field_to_exchanges()
# Check for duplicates
self.check_for_duplicates()
class HydrogenInventory(BaseInventoryImport):
"""
Hydrogen datasets from the ELEGANCY project (2019).
"""
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
# Migrations for 3.5
if self.version == 3.5:
migrations = {
"fields": ["name", "reference product", "location"],
"data": [
(
(
"market for water, deionised",
("water, deionised",),
"Europe without Switzerland",
),
{
"name": (
"market for water, deionised, from tap water, at user"
),
"reference product": (
"water, deionised, from tap water, at user"
),
},
),
(
("market for water, deionised", ("water, deionised",), "RoW"),
{
"name": (
"market for water, deionised, from tap water, at user"
),
"reference product": (
"water, deionised, from tap water, at user"
),
},
),
(
(
"market for aluminium oxide, metallurgical",
("aluminium oxide, metallurgical",),
"IAI Area, EU27 & EFTA",
),
{
"name": ("market for aluminium oxide"),
"reference product": ("aluminium oxide"),
"location": ("GLO"),
},
),
(
(
"market for flat glass, coated",
("flat glass, coated",),
"RER",
),
{"location": ("GLO"),},
),
],
}
Migration("hydrogen_ecoinvent_35").write(
migrations,
description="Change technosphere names due to change from 3.5 to 3.6",
)
self.import_db.migrate("hydrogen_ecoinvent_35")
self.add_biosphere_links()
self.add_product_field_to_exchanges()
# Check for duplicates
self.check_for_duplicates()
class HydrogenBiogasInventory(BaseInventoryImport):
"""
Hydrogen datasets from the ELEGANCY project (2019).
"""
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
# Migrations for 3.5
if self.version == 3.5:
migrations = {
"fields": ["name", "reference product", "location"],
"data": [
(
(
"market for water, deionised",
("water, deionised",),
"Europe without Switzerland",
),
{
"name": (
"market for water, deionised, from tap water, at user"
),
"reference product": (
"water, deionised, from tap water, at user"
),
},
),
(
("market for water, deionised", ("water, deionised",), "RoW"),
{
"name": (
"market for water, deionised, from tap water, at user"
),
"reference product": (
"water, deionised, from tap water, at user"
),
},
),
(
(
"market for aluminium oxide, metallurgical",
("aluminium oxide, metallurgical",),
"IAI Area, EU27 & EFTA",
),
{
"name": ("market for aluminium oxide"),
"reference product": ("aluminium oxide"),
"location": ("GLO"),
},
),
(
(
"market for flat glass, coated",
("flat glass, coated",),
"RER",
),
{"location": ("GLO"),},
),
],
}
Migration("hydrogen_ecoinvent_35").write(
migrations,
description="Change technosphere names due to change from 3.5 to 3.6",
)
self.import_db.migrate("hydrogen_ecoinvent_35")
self.add_biosphere_links()
self.add_product_field_to_exchanges()
# Check for duplicates
self.check_for_duplicates()
class HydrogenWoodyInventory(BaseInventoryImport):
"""
Hydrogen datasets from the ELEGANCY project (2019).
"""
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
# Migrations for 3.5
if self.version == 3.5:
migrations = {
"fields": ["name", "reference product", "location"],
"data": [
(
(
"market for water, deionised",
("water, deionised",),
"Europe without Switzerland",
),
{
"name": (
"market for water, deionised, from tap water, at user"
),
"reference product": (
"water, deionised, from tap water, at user"
),
},
),
(
("market for water, deionised", ("water, deionised",), "RoW"),
{
"name": (
"market for water, deionised, from tap water, at user"
),
"reference product": (
"water, deionised, from tap water, at user"
),
},
),
(
(
"market for aluminium oxide, metallurgical",
("aluminium oxide, metallurgical",),
"IAI Area, EU27 & EFTA",
),
{
"name": ("market for aluminium oxide"),
"reference product": ("aluminium oxide"),
"location": ("GLO"),
},
),
(
(
"market for flat glass, coated",
("flat glass, coated",),
"RER",
),
{"location": ("GLO"),},
),
],
}
Migration("hydrogen_ecoinvent_35").write(
migrations,
description="Change technosphere names due to change from 3.5 to 3.6",
)
self.import_db.migrate("hydrogen_ecoinvent_35")
self.add_biosphere_links()
self.add_product_field_to_exchanges()
# Check for duplicates
self.check_for_duplicates()
class BiogasInventory(BaseInventoryImport):
"""
Biogas datasets from the SCCER project (2019).
"""
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
# Migrations for 3.5
if self.version == 3.5:
migrations = {
"fields": ["name", "reference product", "location"],
"data": [
(
("market for water, deionised", ("water, deionised",), "CH"),
{
"name": (
"market for water, deionised, from tap water, at user"
),
"reference product": (
"water, deionised, from tap water, at user"
),
},
),
(
(
"market for water, deionised",
("water, deionised",),
"Europe without Switzerland",
),
{
"name": (
"market for water, deionised, from tap water, at user"
),
"reference product": (
"water, deionised, from tap water, at user"
),
},
),
(
("market for water, deionised", ("water, deionised",), "RoW"),
{
"name": (
"market for water, deionised, from tap water, at user"
),
"reference product": (
"water, deionised, from tap water, at user"
),
},
),
],
}
Migration("biogas_ecoinvent_35").write(
migrations,
description="Change technosphere names due to change from 3.5 to 3.6",
)
self.import_db.migrate("biogas_ecoinvent_35")
self.add_biosphere_links()
self.add_product_field_to_exchanges()
# Check for duplicates
self.check_for_duplicates()
class SyngasInventory(BaseInventoryImport):
"""
Synthetic fuel datasets from the PSI project (2019).
"""
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
self.add_biosphere_links()
self.add_product_field_to_exchanges()
class SynfuelInventory(BaseInventoryImport):
"""
Synthetic fuel datasets from the PSI project (2019).
"""
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
self.add_biosphere_links()
self.add_product_field_to_exchanges()
# Check for duplicates
self.check_for_duplicates()
class HydrogenCoalInventory(BaseInventoryImport):
"""
Hydrogen production from coal gasification from Wokaun A, Wilhelm E, Schenler W, Simons A, Bauer C, Bond S, et al.
Transition to hydrogen - pathways toward clean transportation. New York: Cambridge University Press; 2011
.
"""
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
# Migrations for 3.5
if self.version == 3.5:
migrations = {
"fields": ["name", "reference product", "location"],
"data": [
(
("water production, deionised", ("water, deionised",), "RoW"),
{
"name": (
"water production, deionised, from tap water, at user"
),
"reference product": (
"water, deionised, from tap water, at user"
),
},
),
(
(
"water production, deionised",
("water, deionised",),
"Europe without Switzerland",
),
{
"name": (
"water production, deionised, from tap water, at user"
),
"reference product": (
"water, deionised, from tap water, at user"
),
},
),
(
(
"market for transport, freight train",
("transport, freight train",),
"ZA",
),
{"location": ("RoW")},
),
(
(
"market for transport, freight train",
("transport, freight train",),
"IN",
),
{"location": ("RoW")},
),
],
}
Migration("hydrogen_coal_ecoinvent_35").write(
migrations,
description="Change technosphere names due to change from 3.5 to 3.6",
)
self.import_db.migrate("hydrogen_coal_ecoinvent_35")
self.add_biosphere_links()
self.add_product_field_to_exchanges()
# Check for duplicates
self.check_for_duplicates()
class GeothermalInventory(BaseInventoryImport):
"""
Geothermal heat production, adapted from geothermal power production dataset from ecoinvent 3.6.
.
"""
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
self.add_biosphere_links()
self.add_product_field_to_exchanges()
# Check for duplicates
self.check_for_duplicates()
class SyngasCoalInventory(BaseInventoryImport):
"""
Synthetic fuel datasets from the PSI project (2019), with hydrogen from coal gasification.
"""
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
self.add_biosphere_links()
self.add_product_field_to_exchanges()
# Check for duplicates
self.check_for_duplicates()
class SynfuelCoalInventory(BaseInventoryImport):
"""
Synthetic fuel datasets from the PSI project (2019), with hydrogen from coal gasification.
"""
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
self.add_biosphere_links()
self.add_product_field_to_exchanges()
# Check for duplicates
self.check_for_duplicates()
class LPGInventory(BaseInventoryImport):
"""
Liquified Petroleum Gas (LPG) from methanol distillation, the PSI project (2020), with hydrogen from electrolysis.
"""
def load_inventory(self, path):
self.import_db = ExcelImporter(path)
def prepare_inventory(self):
# Migrations for 3.5
if self.version == 3.5:
migrations = {
"fields": ["name", "reference product", "location"],
"data": [
(
(
"market for aluminium oxide, metallurgical",
("aluminium oxide, metallurgical",),
"IAI Area, EU27 & EFTA",
),
{
"name": ("market for aluminium oxide"),
"reference product": ("aluminium oxide"),
"location": ("GLO"),
},
),
(
(
"market for flat glass, uncoated",
("flat glass, uncoated",),
"RER",
),
{"location": ("GLO")},
),
],
}
Migration("LPG_ecoinvent_35").write(
migrations,
description="Change technosphere names due to change from 3.5 to 3.6",
)
self.import_db.migrate("LPG_ecoinvent_35")
self.add_biosphere_links()
self.add_product_field_to_exchanges()
# Check for duplicates
self.check_for_duplicates()
class CarculatorInventory(BaseInventoryImport):
"""
Car models from the carculator project, https://github.com/romainsacchi/carculator
"""
def __init__(self, database, year, vehicles={}, scenario="SSP2-Base"):
"""Create a :class:`BaseInventoryImport` instance.
:param list database: the target database for the import (the Ecoinvent database),
unpacked to a list of dicts
:param float version: the version of the target database
:param path: Path to the imported inventory.
"""
self.db = database
self.db_code = [x["code"] for x in self.db]
self.db_names = [
(x["name"], x["reference product"], x["location"]) for x in self.db
]
self.biosphere_dict = self.get_biosphere_code()
self.db_year = year
self.fleet_file = (
Path(vehicles["fleet file"]) if "fleet file" in vehicles else None
)
self.region = vehicles.get("region", ["EUR"])
self.source_file = (
Path(vehicles["source file"]) / (scenario + ".mif")
if "source file" in vehicles
else DATA_DIR / "remind_output_files" / (scenario + ".mif")
)
self.import_db = []
self.load_inventory()
def load_inventory(self):
"""Create `carculator` fleet average inventories for a given range of years.
"""
cip = CarInputParameters()
cip.static()
_, array = fill_xarray_from_input_parameters(cip)
array = array.interp(
year=np.arange(2010, self.db_year + 1), kwargs={"fill_value": "extrapolate"}
)
cm = CarModel(array, cycle="WLTC")
cm.set_all()
for r, region in enumerate(self.region):
if self.fleet_file:
fleet_array = create_fleet_composition_from_REMIND_file(
self.fleet_file, region, fleet_year=self.db_year
)
scope = {
"powertrain": fleet_array.powertrain.values,
"size": fleet_array.coords["size"].values,
"year": fleet_array.coords["vintage_year"].values,
"fu": {"fleet": fleet_array, "unit": "vkm"},
}
else:
scope = {"year": [self.db_year]}
mix = extract_electricity_mix_from_REMIND_file(
fp=self.source_file, remind_region=region, years=scope["year"]
)
fuel_shares = extract_biofuel_shares_from_REMIND(
fp=self.source_file, remind_region=region, years=scope["year"]
)
bc = {
"custom electricity mix": mix,
"country": region,
"fuel blend": {
"petrol": {
"primary fuel": {
"type": "petrol",
"share": fuel_shares.sel(fuel_type="liquid - fossil").values
if "liquid - fossil" in fuel_shares.fuel_type.values
else [1],
},
"secondary fuel": {
"type": "bioethanol - wheat straw",
"share": fuel_shares.sel(
fuel_type="liquid - biomass"
).values
if "liquid - biomass" in fuel_shares.fuel_type.values
else [1],
},
},
"diesel": {
"primary fuel": {
"type": "diesel",
"share": fuel_shares.sel(fuel_type="liquid - fossil").values
if "liquid - fossil" in fuel_shares.fuel_type.values
else [1],
},
"secondary fuel": {
"type": "biodiesel - cooking oil",
"share": fuel_shares.sel(
fuel_type="liquid - biomass"
).values
if "liquid - biomass" in fuel_shares.fuel_type.values
else [1],
},
},
"cng": {
"primary fuel": {
"type": "cng",
"share": fuel_shares.sel(fuel_type="gas - fossil").values
if "gas - fossil" in fuel_shares.fuel_type.values
else [1],
},
"secondary fuel": {
"type": "biogas - biowaste",
"share": fuel_shares.sel(fuel_type="gas - biomass").values
if "gas - biomass" in fuel_shares.fuel_type.values
else [0],
},
},
"hydrogen": {
"primary fuel": {
"type": "electrolysis",
"share": np.ones_like(scope["year"]),
}
},
},
}
ic = InventoryCalculation(
cm.array, scope=scope, background_configuration=bc
)
# filter out regular cars, to keep only fleet averages
fa = [
i[0] for i in ic.inputs if any(x for x in ic.scope["size"] if x in i[0])
]
if self.fleet_file:
i = ic.export_lci_to_bw(presamples=False, forbidden_activities=fa)
else:
i = ic.export_lci_to_bw(presamples=False)
# remove duplicate items if iterating over several regions
i.data = [
x
for x in i.data
if (x["name"], x["location"])
not in [(z["name"], z["location"]) for z in self.import_db]
]
# remove electricity mix made by `carculator` and
# link instead to ecoinvent
# so that it will be replaced by new electricity markets made by :class:`.Electricity`
map_region = {
"LAM": "BR",
"OAS": "RAS",
"SSA": "RAF",
"EUR": "RER",
"NEU": "RER",
"MEA": "RME",
"REF": "RU",
"CAZ": "CA",
"CHA": "CN",
"IND": "IN",
"JPN": "JP",
"USA": "US",
}
for d in i.data:
if "electricity market for fuel preparation" in d["name"]:
d["exchanges"] = [
e for e in d["exchanges"] if e["type"] == "production"
]
d["exchanges"].append(
{
"amount": 1.0,
"database": "ecoinvent",
"location": map_region[region],
"name": "market group for electricity, low voltage"
if region not in ["REF", "JPN"]
else "market for electricity, low voltage",
"reference product": "electricity, low voltage",
"tag": "energy chain",
"type": "technosphere",
"uncertainty_type": 1,
"unit": "kilowatt hour",
}
)
if r == 0:
self.import_db = i
else:
self.import_db.data.extend(i.data)
def prepare_inventory(self):
self.add_biosphere_links(delete_missing=True)
self.add_product_field_to_exchanges()
# Check for duplicates
self.check_for_duplicates() | /rmnd_lca-0.1.6-py3-none-any.whl/rmnd_lca/inventory_imports.py | 0.698329 | 0.305069 | inventory_imports.py | pypi |
import copy
import uuid
import numpy as np
import wurst
from wurst import searching as ws
from .activity_maps import InventorySet
from .geomap import Geomap
from .utils import *
class Cement:
"""
Class that modifies clinker and cement production datasets in ecoinvent based on REMIND and WBCSD's GNR data.
:ivar scenario: name of a Remind scenario
:vartype scenario: str
"""
def __init__(self, db, rmd, year, version):
self.db = db
self.rmd = rmd
self.geo = Geomap()
self.year = year
self.version = version
self.clinker_ratio_eco = get_clinker_ratio_ecoinvent(version)
self.clinker_ratio_remind = get_clinker_ratio_remind(self.year)
self.fuels_lhv = get_lower_heating_values()
self.fuels_co2 = get_fuel_co2_emission_factors()
mapping = InventorySet(self.db)
self.emissions_map = mapping.get_remind_to_ecoinvent_emissions()
self.fuel_map = mapping.generate_fuel_map()
def fetch_proxies(self, name, ref_prod):
"""
Fetch dataset proxies, given a dataset `name` and `reference product`.
Store a copy for each REMIND region.
If a REMIND region does not find a fitting ecoinvent location,
fetch a dataset with a "RoW" location.
Delete original datasets from the database.
:return:
"""
d_map = {
self.geo.ecoinvent_to_remind_location(d['location']): d['location']
for d in ws.get_many(
self.db,
ws.equals("name", name),
ws.equals("reference product", ref_prod)
)
}
list_remind_regions = [
c[1] for c in self.geo.geo.keys()
if type(c) == tuple and c[0] == "REMIND"
]
d_remind_to_eco = {r: d_map.get(r, "RoW") for r in list_remind_regions}
d_act = {}
for d in d_remind_to_eco:
try:
ds = ws.get_one(
self.db,
ws.equals("name", name),
ws.equals("reference product", ref_prod),
ws.equals("location", d_remind_to_eco[d]),
)
d_act[d] = copy.deepcopy(ds)
d_act[d]["location"] = d
d_act[d]["code"] = str(uuid.uuid4().hex)
except ws.NoResults:
print('No dataset {} found for the REMIND region {}'.format(name, d))
continue
for prod in ws.production(d_act[d]):
prod['location'] = d
deleted_markets = [
(act['name'], act['reference product'], act['location']) for act in self.db
if (act["name"], act['reference product']) == (name, ref_prod)
]
with open(DATA_DIR / "logs/log deleted cement datasets.csv", "a") as csv_file:
writer = csv.writer(csv_file,
delimiter=';',
lineterminator='\n')
for line in deleted_markets:
writer.writerow(line)
# Remove old datasets
self.db = [act for act in self.db
if (act["name"], act['reference product']) != (name, ref_prod)]
return d_act
@staticmethod
def remove_exchanges(dict, list_exc):
keep = lambda x: {
k: v
for k, v in x.items()
if not any(ele in x["name"] for ele in list_exc)
}
for r in dict:
dict[r]["exchanges"] = [keep(exc) for exc in dict[r]["exchanges"]]
return dict
def get_suppliers_of_a_region(
self, remind_region, ecoinvent_technologies, reference_product
):
"""
Return a list of datasets which location and name correspond to the region, name and reference product given,
respectively.
:param remind_region: a REMIND region
:type remind_region: str
:param ecoinvent_technologies: list of names of ecoinvent dataset
:type ecoinvent_technologies: list
:param reference_product: reference product
:type reference_product: str
:return: list of wurst datasets
:rtype: list
"""
return ws.get_many(
self.db,
*[
ws.either(
*[
ws.equals("name", supplier)
for supplier in ecoinvent_technologies
]
),
ws.either(
*[
ws.equals("location", loc)
for loc in self.geo.remind_to_ecoinvent_location(remind_region)
]
),
ws.equals("unit", "kilogram"),
ws.equals("reference product", reference_product),
]
)
@staticmethod
def get_shares_from_production_volume(ds):
"""
Return shares of supply based on production volumes
:param ds: list of datasets
:return: dictionary with (dataset name, dataset location) as keys, shares as values. Shares total 1.
:rtype: dict
"""
dict_act = {}
total_production_volume = 0
for act in ds:
for exc in ws.production(act):
dict_act[(act["name"], act["location"], act["reference product"], act["unit"])] = float(
exc["production volume"]
)
total_production_volume += float(exc["production volume"])
for d in dict_act:
dict_act[d] /= total_production_volume
return dict_act
def update_pollutant_emissions(self, ds):
"""
Update pollutant emissions based on GAINS data.
:return:
"""
# Update biosphere exchanges according to GAINS emission values
for exc in ws.biosphere(
ds, ws.either(*[ws.contains("name", x) for x in self.emissions_map])
):
remind_emission_label = self.emissions_map[exc["name"]]
try:
remind_emission = self.rmd.cement_emissions.loc[
dict(
region=ds["location"],
pollutant=remind_emission_label
)
].values.item(0)
except KeyError:
# TODO: fix this.
# GAINS does not have a 'World' region, hence we use China as a temporary fix
remind_emission = self.rmd.cement_emissions.loc[
dict(
region='CHA',
pollutant=remind_emission_label
)
].values.item(0)
if exc["amount"] == 0:
wurst.rescale_exchange(
exc, remind_emission / 1, remove_uncertainty=True
)
else:
wurst.rescale_exchange(exc, remind_emission / exc["amount"])
return ds
def build_clinker_market_datasets(self):
# Fetch clinker market activities and store them in a dictionary
return self.fetch_proxies('market for clinker', 'clinker')
def build_clinker_production_datasets(self):
"""
Builds clinker production datasets for each REMIND region.
Add CO2 capture and Storage if needed.
Source for Co2 capture and compression: https://www.sciencedirect.com/science/article/pii/S1750583613001230?via%3Dihub#fn0040
:return: a dictionary with REMIND regions as keys and clinker production datasets as values.
:rtype: dict
"""
# Fetch clinker production activities and store them in a dictionary
d_act_clinker = self.fetch_proxies('clinker production', 'clinker')
# Fuel exchanges to remove
list_fuels = ["diesel", "coal", "lignite", "coke", "fuel", "meat", "gas", "oil", "electricity"]
# Remove fuel and electricity exchanges in each activity
d_act_clinker = self.remove_exchanges(d_act_clinker, list_fuels)
for k, v in d_act_clinker.items():
# Production volume by kiln type
energy_input_per_kiln_type = self.rmd.gnr_data.sel(
region=k,
variables=[
v
for v in self.rmd.gnr_data.variables.values
if "Production volume share" in v
]
).clip(0, 1)
# Energy input per ton of clinker, in MJ, per kiln type
energy_input_per_kiln_type /= energy_input_per_kiln_type.sum(axis=0)
energy_eff_per_kiln_type = self.rmd.gnr_data.sel(
region=k,
variables=[
v
for v in self.rmd.gnr_data.variables.values
if "Thermal energy consumption" in v
]
)
# Weighted average energy input per ton clinker, in MJ
energy_input_per_ton_clinker = (
energy_input_per_kiln_type.values * energy_eff_per_kiln_type.values
)
# Fuel mix (waste, biomass, fossil)
fuel_mix = self.rmd.gnr_data.sel(
variables=[
"Share waste fuel",
"Share biomass fuel",
"Share fossil fuel",
],
region=k
).clip(0, 1)
fuel_mix /= fuel_mix.sum(axis=0)
# Calculate quantities (in kg) of fuel, per type of fuel, per ton of clinker
# MJ per ton of clinker * fuel mix * (1 / lower heating value)
fuel_qty_per_type = (
energy_input_per_ton_clinker.sum()
* fuel_mix
* 1
/ np.array(
[
float(self.fuels_lhv["waste"]),
float(self.fuels_lhv["wood pellet"]),
float(self.fuels_lhv["hard coal"]),
]
)
)
fuel_fossil_co2_per_type = (
energy_input_per_ton_clinker.sum()
* fuel_mix
* np.array(
[
(
self.fuels_co2["waste"]["co2"]
* (1 - self.fuels_co2["waste"]["bio_share"])
),
(
self.fuels_co2["wood pellet"]["co2"]
* (1 - self.fuels_co2["wood pellet"]["bio_share"])
),
(
self.fuels_co2["hard coal"]["co2"]
* (1 - self.fuels_co2["hard coal"]["bio_share"])
),
]
)
)
fuel_biogenic_co2_per_type = (
energy_input_per_ton_clinker.sum()
* fuel_mix
* np.array(
[
(
self.fuels_co2["waste"]["co2"]
* (self.fuels_co2["waste"]["bio_share"])
),
(
self.fuels_co2["wood pellet"]["co2"]
* (self.fuels_co2["wood pellet"]["bio_share"])
),
(
self.fuels_co2["hard coal"]["co2"]
* (self.fuels_co2["hard coal"]["bio_share"])
),
]
)
)
for fuel in [('waste', 'waste plastic, mixture', 'EUR'),
('wood pellet', 'wood pellet, measured as dry mass', 'EUR'),
('hard coal', 'hard coal', 'REF')]:
# Select waste fuel providers, fitting the REMIND region
# Fetch respective shares based on production volumes
fuel_suppliers = self.get_shares_from_production_volume(
self.get_suppliers_of_a_region(k,
self.fuel_map[fuel[0]],
fuel[1]))
if len(fuel_suppliers) == 0:
fuel_suppliers = self.get_shares_from_production_volume(
self.get_suppliers_of_a_region(fuel[2],
self.fuel_map[fuel[0]],
fuel[1]))
# Append it to the dataset exchanges
# Append it to the dataset exchanges
new_exchanges = []
for supplier in fuel_suppliers:
new_exchanges.append(
{
"uncertainty type": 0,
"loc": 1,
"amount": (fuel_suppliers[supplier] * fuel_qty_per_type[2].values) / 1000,
"type": "technosphere",
"production volume": 0,
"product": supplier[2],
"name": supplier[0],
"unit": supplier[3],
"location": supplier[1],
}
)
v["exchanges"].extend(new_exchanges)
v['exchanges'] = [v for v in v["exchanges"] if v]
# Add carbon capture-related energy exchanges
# Carbon capture rate: share of total CO2 captured
carbon_capture_rate = (self.rmd.data.sel(
variables='Emi|CCO2|FFaI|Industry|Cement',
region=k
).interp(year=self.year) / self.rmd.data.sel(
variables='Emi|CO2|FFaI|Industry|Cement',
region=k
).interp(year=self.year)).values
if carbon_capture_rate > 0:
# CO2 effectively captured per kg of clinker
carbon_capture_abs = carbon_capture_rate * ((fuel_biogenic_co2_per_type.sum().values
+ fuel_fossil_co2_per_type.sum().values + 525)
/ 1000)
# Electricity: 0.024 kWh/kg CO2 for capture, 0.146 kWh/kg CO2 for compression
carbon_capture_electricity = carbon_capture_abs * (0.146 + 0.024)
new_exchanges = []
new_exchanges.append(
{
"uncertainty type": 0,
"loc": 1,
"amount": carbon_capture_electricity,
"type": "technosphere",
"production volume": 0,
"product": 'electricity, medium voltage',
"name": 'market group for electricity, medium voltage',
"unit": 'kilowatt hour',
"location": k,
}
)
# Heat, as steam: 3.48 MJ/kg CO2 captured, minus excess heat generated on site
excess_heat_generation = self.rmd.gnr_data.sel(
variables='Share of recovered energy, per ton clinker',
region=k
).values * energy_input_per_ton_clinker.sum()
carbon_capture_heat = (carbon_capture_abs * 3.48) - (excess_heat_generation / 1000)
new_exchanges.append(
{
"uncertainty type": 0,
"loc": 1,
"amount": carbon_capture_heat,
"type": "technosphere",
"production volume": 0,
"product": 'heat, from steam, in chemical industry',
"name": 'steam production, as energy carrier, in chemical industry',
"unit": 'megajoule',
"location": 'RoW',
}
)
v["exchanges"].extend(new_exchanges)
else:
carbon_capture_rate = 0
# Update fossil CO2 exchange, add 525 kg of fossil CO_2 from calcination, minus CO2 captured
fossil_co2_exc = [e for e in v["exchanges"] if e['name'] == 'Carbon dioxide, fossil'][0]
fossil_co2_exc['amount'] = ((fuel_fossil_co2_per_type.sum().values + 525) / 1000) * (1 - carbon_capture_rate)
fossil_co2_exc['uncertainty type'] = 0
try:
# Update biogenic CO2 exchange, minus CO2 captured
biogenic_co2_exc = [e for e in v["exchanges"] if e['name'] == 'Carbon dioxide, non-fossil'][0]
biogenic_co2_exc['amount'] = (fuel_biogenic_co2_per_type.sum().values / 1000) * (1 - carbon_capture_rate)
biogenic_co2_exc['uncertainty type'] = 0
except IndexError:
# There isn't a biogenic CO2 emissions exchange
biogenic_co2_exc = {
"uncertainty type": 0,
"loc": 1,
"amount": (fuel_biogenic_co2_per_type.sum().values / 1000) * (1 - carbon_capture_rate),
"type": "biosphere",
"production volume": 0,
"name": "Carbon dioxide, non-fossil",
"unit": "kilogram",
"input": ('biosphere3', 'eba59fd6-f37e-41dc-9ca3-c7ea22d602c7'),
"categories": ('air',),
}
v["exchanges"].append(biogenic_co2_exc)
v['exchanges'] = [v for v in v["exchanges"] if v]
d_act_clinker = {k:self.update_pollutant_emissions(v) for k,v in d_act_clinker.items()}
return d_act_clinker
def relink_datasets(self, name, ref_product):
"""
For a given dataset name, change its location to a REMIND location,
to effectively link the newly built dataset(s).
:param ref_product:
:param name: dataset name
:type name: str
"""
list_remind_regions = [
c[1] for c in self.geo.geo.keys() if type(c) == tuple and c[0] == "REMIND"
]
for act in self.db:
for exc in act['exchanges']:
try:
exc["name"]
except:
print(exc)
if (exc['name'], exc.get('product')) == (name, ref_product) and exc['type'] == 'technosphere':
if act['location'] not in list_remind_regions:
if act['location'] == "North America without Quebec":
exc['location'] = 'USA'
else:
exc['location'] = self.geo.ecoinvent_to_remind_location(act['location'])
else:
exc['location'] = act['location']
def adjust_clinker_ratio(self, d_act):
""" Adjust the cement suppliers composition for "cement, unspecified", in order to reach
the average clinker-to-cement ratio given by REMIND.
The supply of the cement with the highest clinker-to-cement ratio is decreased by 1% to the favor of
the supply of the cement with the lowest clinker-to-cement ratio, and the average clinker-to-cement ratio
is calculated.
This operation is repeated until the average clinker-to-cement ratio aligns with that given by REMIND.
When the supply of the cement with the highest clinker-to-cement ratio goes below 1%,
the cement with the second highest clinker-to-cement ratio becomes affected and so forth.
"""
for d in d_act:
ratio_to_reach = self.clinker_ratio_remind.sel(dict(
region=d
)).values
share = []
ratio = []
for exc in d_act[d]['exchanges']:
if 'cement' in exc['product'] and exc['type'] == "technosphere":
share.append(exc['amount'])
ratio.append(self.clinker_ratio_eco[(exc['name'], exc['location'])])
share = np.array(share)
ratio = np.array(ratio)
average_ratio = (share * ratio).sum()
while average_ratio > ratio_to_reach:
share[share == 0] = np.nan
ratio = np.where(share >= 0.001, ratio, np.nan)
highest_ratio = np.nanargmax(ratio)
lowest_ratio = np.nanargmin(ratio)
share[highest_ratio] -= .01
share[lowest_ratio] += .01
average_ratio = (np.nan_to_num(ratio) * np.nan_to_num(share)).sum()
share = np.nan_to_num(share)
count = 0
for exc in d_act[d]['exchanges']:
if 'cement' in exc['product'] and exc['type'] == "technosphere":
exc['amount'] = share[count]
count += 1
return d_act
def update_cement_production_datasets(self, name, ref_prod):
"""
Update electricity use (mainly for grinding).
Update clinker-to-cement ratio.
Update use of cementitious supplementary materials.
:return:
"""
# Fetch proxies
# Delete old datasets
d_act_cement = self.fetch_proxies(name, ref_prod)
# Update electricity use
d_act_cement = self.update_electricity_exchanges(d_act_cement)
return d_act_cement
def update_electricity_exchanges(self, d_act):
"""
Update electricity exchanges in cement production datasets.
Electricity consumption equals electricity use minus on-site electricity generation from excess heat recovery.
:return:
"""
d_act = self.remove_exchanges(d_act, ['electricity'])
for act in d_act:
new_exchanges = []
new_exchanges.append(
{
"uncertainty type": 0,
"loc": 1,
"amount": (self.rmd.gnr_data.loc[dict(
variables='Power consumption',
region=act
)].values - self.rmd.gnr_data.loc[dict(
variables='Power generation',
region=act
)].values) / 1000,
"type": "technosphere",
"production volume": 0,
"product": 'electricity, medium voltage',
"name": 'market group for electricity, medium voltage',
"unit": 'kilowatt hour',
"location": act,
}
)
d_act[act]["exchanges"].extend(new_exchanges)
d_act[act]['exchanges'] = [v for v in d_act[act]["exchanges"] if v]
return d_act
def add_datasets_to_database(self):
print("The validity of the datasets produced from the integration of the cement sector is not yet fully tested. Consider the results with caution.")
print('Log of deleted cement datasets saved in {}'.format(DATA_DIR / 'logs'))
print('Log of created cement datasets saved in {}'.format(DATA_DIR / 'logs'))
with open(DATA_DIR / "logs/log deleted cement datasets.csv", "w") as csv_file:
writer = csv.writer(csv_file,
delimiter=';',
lineterminator='\n')
writer.writerow(['dataset name', 'reference product', 'location'])
with open(DATA_DIR / "logs/log created cement datasets.csv", "w") as csv_file:
writer = csv.writer(csv_file,
delimiter=';',
lineterminator='\n')
writer.writerow(['dataset name', 'reference product', 'location'])
created_datasets = list()
print('Adjust clinker-to-cement ratio in "unspecified cement" datasets')
if self.version == 3.5:
name = 'market for cement, unspecified'
ref_prod = 'cement, unspecified'
else:
name = 'cement, all types to generic market for cement, unspecified'
ref_prod = 'cement, unspecified'
act_cement_unspecified = self.fetch_proxies(name, ref_prod)
act_cement_unspecified = self.adjust_clinker_ratio(act_cement_unspecified)
self.db.extend([v for v in act_cement_unspecified.values()])
created_datasets.extend([(act['name'], act['reference product'], act['location'])
for act in act_cement_unspecified.values()])
print('Create new cement production datasets and adjust electricity consumption')
if self.version == 3.5:
for i in (
("cement production, alternative constituents 21-35%","cement, alternative constituents 21-35%"),
("cement production, alternative constituents 6-20%","cement, alternative constituents 6-20%"),
("cement production, blast furnace slag 18-30% and 18-30% other alternative constituents",
"cement, blast furnace slag 18-30% and 18-30% other alternative constituents"),
("cement production, blast furnace slag 25-70%, US only","cement, blast furnace slag 25-70%, US only"),
("cement production, blast furnace slag 31-50% and 31-50% other alternative constituents",
"cement, blast furnace slag 31-50% and 31-50% other alternative constituents"),
("cement production, blast furnace slag 36-65%, non-US","cement, blast furnace slag 36-65%, non-US"),
("cement production, blast furnace slag 5-25%, US only","cement, blast furnace slag 5-25%, US only"),
("cement production, blast furnace slag 70-100%, non-US","cement, blast furnace slag 70-100%, non-US"),
("cement production, blast furnace slag 70-100%, US only","cement, blast furnace slag 70-100%, US only"),
("cement production, blast furnace slag 81-95%, non-US","cement, blast furnace slag 81-95%, non-US"),
("cement production, blast furnace slag, 66-80%, non-US","cement, blast furnace slag, 66-80%, non-US"),
("cement production, Portland","cement, Portland"),
("cement production, pozzolana and fly ash 11-35%, non-US","cement, pozzolana and fly ash 11-35%, non-US"),
("cement production, pozzolana and fly ash 15-40%, US only","cement, pozzolana and fly ash 15-40%, US only"),
("cement production, pozzolana and fly ash 36-55%,non-US","cement, pozzolana and fly ash 36-55%,non-US"),
("cement production, pozzolana and fly ash 5-15%, US only","cement, pozzolana and fly ash 5-15%, US only")
):
act_cement = self.update_cement_production_datasets(i[0], i[1])
self.db.extend([v for v in act_cement.values()])
created_datasets.extend([(act['name'], act['reference product'], act['location'])
for act in act_cement.values()])
self.relink_datasets(i[0], i[1])
print('Create new cement market datasets')
for i in (("market for cement, alternative constituents 21-35%","cement, alternative constituents 21-35%"),
("market for cement, alternative constituents 6-20%","cement, alternative constituents 6-20%"),
("market for cement, blast furnace slag 18-30% and 18-30% other alternative constituents",
"cement, blast furnace slag 18-30% and 18-30% other alternative constituents"),
("market for cement, blast furnace slag 25-70%, US only","cement, blast furnace slag 25-70%, US only"),
("market for cement, blast furnace slag 31-50% and 31-50% other alternative constituents",
"cement, blast furnace slag 31-50% and 31-50% other alternative constituents"),
("market for cement, blast furnace slag 36-65%, non-US","cement, blast furnace slag 36-65%, non-US"),
("market for cement, blast furnace slag 5-25%, US only","cement, blast furnace slag 5-25%, US only"),
("market for cement, blast furnace slag 70-100%, non-US","cement, blast furnace slag 70-100%, non-US"),
("market for cement, blast furnace slag 70-100%, US only","cement, blast furnace slag 70-100%, US only"),
("market for cement, blast furnace slag 81-95%, non-US","cement, blast furnace slag 81-95%, non-US"),
("market for cement, blast furnace slag, 66-80%, non-US","cement, blast furnace slag, 66-80%, non-US"),
("market for cement, Portland","cement, Portland"),
("market for cement, pozzolana and fly ash 11-35%, non-US","cement, pozzolana and fly ash 11-35%, non-US"),
("market for cement, pozzolana and fly ash 15-40%, US only","cement, pozzolana and fly ash 15-40%, US only"),
("market for cement, pozzolana and fly ash 36-55%,non-US","cement, pozzolana and fly ash 36-55%,non-US"),
("market for cement, pozzolana and fly ash 5-15%, US only","cement, pozzolana and fly ash 5-15%, US only")
):
act_cement = self.fetch_proxies(i[0], i[1])
self.db.extend([v for v in act_cement.values()])
created_datasets.extend([(act['name'], act['reference product'], act['location'])
for act in act_cement.values()])
self.relink_datasets(i[0], i[1])
else:
for i in (
("cement production, Portland", "cement, Portland"),
("cement production, blast furnace slag 35-70%", "cement, blast furnace slag 35-70%"),
("cement production, blast furnace slag 6-34%", "cement, blast furnace slag 6-34%"),
("cement production, limestone 6-10%", "cement, limestone 6-10%"),
("cement production, pozzolana and fly ash 15-50%", "cement, pozzolana and fly ash 15-50%"),
("cement production, pozzolana and fly ash 6-14%", "cement, pozzolana and fly ash 6-14%"),
("cement production, alternative constituents 6-20%", "cement, alternative constituents 6-20%"),
("cement production, alternative constituents 21-35%", "cement, alternative constituents 21-35%"),
("cement production, blast furnace slag 18-30% and 18-30% other alternative constituents",
"cement, blast furnace slag 18-30% and 18-30% other alternative constituents"),
("cement production, blast furnace slag 31-50% and 31-50% other alternative constituents",
"cement, blast furnace slag 31-50% and 31-50% other alternative constituents"),
("cement production, blast furnace slag 36-65%", "cement, blast furnace slag 36-65%"),
("cement production, blast furnace slag 66-80%", "cement, blast furnace slag, 66-80%"),
("cement production, blast furnace slag 81-95%", "cement, blast furnace slag 81-95%"),
("cement production, pozzolana and fly ash 11-35%", "cement, pozzolana and fly ash 11-35%"),
("cement production, pozzolana and fly ash 36-55%", "cement, pozzolana and fly ash 36-55%"),
("cement production, alternative constituents 45%", "cement, alternative constituents 45%"),
("cement production, blast furnace slag 40-70%", "cement, blast furnace 40-70%"),
("cement production, pozzolana and fly ash 25-35%", "cement, pozzolana and fly ash 25-35%"),
("cement production, limestone 21-35%", "cement, limestone 21-35%"),
("cement production, blast furnace slag 21-35%", "cement, blast furnace slag 21-35%"),
("cement production, blast furnace slag 25-70%", "cement, blast furnace slag 25-70%"),
("cement production, blast furnace slag 5-25%", "cement, blast furnace slag 5-25%"),
("cement production, blast furnace slag 6-20%", "cement, blast furnace slag 6-20%"),
("cement production, blast furnace slag 70-100%", "cement, blast furnace slag 70-100%"),
("cement production, pozzolana and fly ash 15-40%", "cement, pozzolana and fly ash 15-40%"),
("cement production, pozzolana and fly ash 5-15%", "cement, pozzolana and fly ash 5-15%"),
):
act_cement = self.update_cement_production_datasets(i[0], i[1])
self.db.extend([v for v in act_cement.values()])
created_datasets.extend([(act['name'], act['reference product'], act['location'])
for act in act_cement.values()])
self.relink_datasets(i[0], i[1])
print('Create new cement market datasets')
for i in (("market for cement, Portland", "cement, Portland"),
("market for cement, blast furnace slag 35-70%", "cement, blast furnace slag 35-70%"),
("market for cement, blast furnace slag 6-34%", "cement, blast furnace slag 6-34%"),
("market for cement, limestone 6-10%", "cement, limestone 6-10%"),
("market for cement, pozzolana and fly ash 15-50%", "cement, pozzolana and fly ash 15-50%"),
("market for cement, pozzolana and fly ash 6-14%", "cement, pozzolana and fly ash 6-14%"),
("market for cement, alternative constituents 6-20%", "cement, alternative constituents 6-20%"),
("market for cement, alternative constituents 21-35%", "cement, alternative constituents 21-35%"),
("market for cement, blast furnace slag 18-30% and 18-30% other alternative constituents",
"cement, blast furnace slag 18-30% and 18-30% other alternative constituents"),
("market for cement, blast furnace slag 31-50% and 31-50% other alternative constituents",
"cement, blast furnace slag 31-50% and 31-50% other alternative constituents"),
("market for cement, blast furnace slag 36-65%", "cement, blast furnace slag 36-65%"),
("market for cement, blast furnace slag 66-80%", "cement, blast furnace slag, 66-80%"),
("market for cement, blast furnace slag 81-95%", "cement, blast furnace slag 81-95%"),
("market for cement, pozzolana and fly ash 11-35%", "cement, pozzolana and fly ash 11-35%"),
("market for cement, pozzolana and fly ash 36-55%", "cement, pozzolana and fly ash 36-55%"),
("market for cement, alternative constituents 45%", "cement, alternative constituents 45%"),
("market for cement, blast furnace slag 40-70%", "cement, blast furnace 40-70%"),
("market for cement, pozzolana and fly ash 25-35%", "cement, pozzolana and fly ash 25-35%"),
("market for cement, limestone 21-35%", "cement, limestone 21-35%"),
("market for cement, blast furnace slag 21-35%", "cement, blast furnace slag 21-35%"),
("market for cement, blast furnace slag 25-70%", "cement, blast furnace slag 25-70%"),
("market for cement, blast furnace slag 5-25%", "cement, blast furnace slag 5-25%"),
("market for cement, blast furnace slag 6-20%", "cement, blast furnace slag 6-20%"),
("market for cement, blast furnace slag 70-100%", "cement, blast furnace slag 70-100%"),
("market for cement, pozzolana and fly ash 15-40%", "cement, pozzolana and fly ash 15-40%"),
("market for cement, pozzolana and fly ash 5-15%", "cement, pozzolana and fly ash 5-15%"),
):
act_cement = self.fetch_proxies(i[0], i[1])
self.db.extend([v for v in act_cement.values()])
created_datasets.extend([(act['name'], act['reference product'], act['location'])
for act in act_cement.values()])
self.relink_datasets(i[0], i[1])
print('Create new clinker production datasets and delete old datasets')
clinker_prod_datasets = [d for d in self.build_clinker_production_datasets().values()]
self.db.extend(clinker_prod_datasets)
created_datasets.extend([(act['name'], act['reference product'], act['location'])
for act in clinker_prod_datasets])
print('Create new clinker market datasets and delete old datasets')
clinker_market_datasets = [d for d in self.build_clinker_market_datasets().values()]
self.db.extend(clinker_market_datasets)
created_datasets.extend([(act['name'], act['reference product'], act['location'])
for act in clinker_market_datasets])
with open(DATA_DIR / "logs/log created cement datasets.csv", "a") as csv_file:
writer = csv.writer(csv_file,
delimiter=';',
lineterminator='\n')
for line in created_datasets:
writer.writerow(line)
print('Relink cement market datasets to new cement production datasets')
self.relink_datasets('market for cement', 'cement')
print('Relink activities to new cement datasets')
self.relink_datasets('cement, all types to generic market for cement, unspecified',
'cement, unspecified')
print('Relink cement production datasets to new clinker market datasets')
self.relink_datasets('market for clinker', 'clinker')
print('Relink clinker market datasets to new clinker production datasets')
self.relink_datasets('clinker production', 'clinker')
return self.db | /rmnd_lca-0.1.6-py3-none-any.whl/rmnd_lca/cement.py | 0.587943 | 0.305807 | cement.py | pypi |
from .geomap import Geomap
import wurst
import wurst.searching as ws
import pandas as pd
import uuid
import copy
from .geomap import REGION_MAPPING_FILEPATH
class Cars():
"""
Class that modifies carculator inventories in ecoinvent
based on REMIND output data.
:ivar db: ecoinvent database in list-of-dict format
:ivar rmd: REMIND output data
:ivar scenario: REMIND scenario identifier
:ivar year: year for the current analysis
"""
def __init__(self, db, rmd, scenario, year):
self.db = db
self.rmd = rmd
self.geo = Geomap()
self.scenario = scenario
self.year = year
self.remind_regions = list(pd.read_csv(
REGION_MAPPING_FILEPATH, sep=";").RegionCode.unique())
self.remind_regions.remove("World")
def _create_local_copy(self, old_act, region):
"""
Create a local copy of an activity.
Update also the production exchange.
"""
act = copy.deepcopy(old_act)
act.update({
"location": region,
"code": str(uuid.uuid4().hex)
})
# update production exchange
prods = list(ws.production(
act, ws.equals("name", act["name"])))
if len(prods) == 1:
prods[0]["location"] = region
else:
raise ValueError(
"Multiple or no Production Exchanges found for {}."
.format(old_act["name"]))
return act
def _delete_non_global(self, acts, proxy_region="RER"):
# delete any non-global activities?
ans = None
# reverse loop, to allow for deletion
for idx in range(len(acts) - 1, -1, -1):
if acts[idx]["location"] != proxy_region:
print("Found non-global EV activities: {} in {}"
.format(acts[idx]["name"], acts[idx]["location"]))
if ans is None:
ans = input("Delete existing non-{} activities? (y/n)"
.format(proxy_region))
if ans == "y":
del acts[idx]
def create_local_evs(self):
"""Create LDV activities for REMIND regions and relink
existing electricity exchanges for BEVs and PHEVs
to REMIND-compatible (regional) market groups.
"""
print("Creating local BEV and PHEV activities")
bevs = list(ws.get_many(
self.db,
ws.either(
ws.contains("name", "BEV,"),
ws.contains("name", "PHEV"))))
self._delete_non_global(bevs)
old_supply = ws.get_one(
self.db,
ws.startswith(
"name", "electricity supply for electric vehicles"))
for region in self.remind_regions:
# create local electricity supply
supply = self._create_local_copy(old_supply, region)
# replace electricity input
for sup in ws.technosphere(
supply, ws.equals("product", "electricity, low voltage")):
sup.update({
"name": "market group for electricity, low voltage",
"location": region
})
print("Relinking electricity markets for BEVs in {}".format(region))
for bev in bevs:
new_bev = self._create_local_copy(bev, region)
# update fuel market
oldex = list(ws.technosphere(
new_bev,
ws.startswith(
"name",
"electricity supply for electric vehicles")))
# should only be one
if len(oldex) != 1:
raise ValueError(
"Zero or more than one electricity "
"markets for fuel production found for {} in {}"
.format(new_bev["name"], new_bev["location"]))
elif len(oldex) == 1:
# reference the new supply
oldex[0].update({
"location": region
})
self.db.append(new_bev)
self.db.append(supply)
def create_local_fcevs(self):
"""Create LDV activities for REMIND regions and relink
existing electricity exchanges for FCEVs
to REMIND-compatible (regional) market groups.
"""
print("Creating local FCEV activities")
fcevs = list(ws.get_many(
self.db,
ws.contains("name", "FCEV,")))
self._delete_non_global(fcevs)
old_supply = ws.get_one(
self.db,
ws.startswith(
"name", "fuel supply for hydrogen vehicles"))
for region in self.remind_regions:
print("Relinking hydrogen markets for FCEVs in {}".format(region))
# create local hydrogen supply
supply = self._create_local_copy(old_supply, region)
# remove explicit electricity input
elmark = next(ws.technosphere(supply, ws.startswith(
"name", "electricity market for fuel preparation")))
elmark["amount"] = 0
wurst.delete_zero_amount_exchanges([supply])
# find hydrogen supply nearby
h2sups = ws.technosphere(
supply,
ws.startswith("product", "Hydrogen"))
for h2sup in h2sups:
prod = self._find_local_supplier(region, h2sup["name"])
h2sup["location"] = prod["location"]
h2sup["name"] = prod["name"]
# create local fcev
for fcev in fcevs:
# create local fcevs
local_fcev = self._create_local_copy(fcev, region)
# link correct market
fuel_ex = next(ws.technosphere(
local_fcev,
ws.startswith("name", "fuel supply for hydrogen vehicles")))
fuel_ex["location"] = region
self.db.append(local_fcev)
self.db.append(supply)
def _find_local_supplier(self, region, name):
"""
Use geomatcher to find a supplier with `name` first strictly
within the region, then in an intersecting region and
eventually *any* activity with this name.
"""
def producer_in_locations(locs):
prod = None
producers = list(ws.get_many(
self.db,
ws.equals("name", name),
ws.either(*[
ws.equals("location", loc) for loc in locs
])))
if len(producers) >= 1:
prod = producers[0]
if len(producers) > 1:
print(("Multiple producers for {} found in {}, "
"using activity from {}").format(
name, region, prod["location"]))
return prod
ei_locs = self.geo.remind_to_ecoinvent_location(region, contained=True)
prod = producer_in_locations(ei_locs)
if prod is None:
ei_locs = self.geo.remind_to_ecoinvent_location(region)
prod = producer_in_locations(ei_locs)
if prod is None:
# let's use "any" dataset
producers = list(ws.get_many(
self.db,
ws.equals("name", name)))
if len(producers) == 0:
raise ValueError("No producers found for {}.")
prod = producers[0]
# we can leave things as they are since the existing
# supply is the default supply
print(("No producers for {} found in {}\n"
"Using activity from {}")
.format(name, region, prod["location"]))
return prod
def create_local_icevs(self):
"""
Use REMIND fuel markets to update the mix of bio-, syn-
and fossil liquids in gasoline and diesel.
"""
print("Creating local ICEV activities")
icevs = list(ws.get_many(
self.db,
ws.either(
ws.contains("name", "ICEV-"),
ws.contains("name", "HEV-"))
))
old_suppliers = {
fuel: ws.get_one(
self.db,
ws.startswith(
"name", "fuel supply for {} vehicles".format(fuel)))
for fuel in ["diesel", "gasoline"]}
new_producers = {
"diesel": {
# biodiesel is only from cooking oil from RER,
# as this is not the focus for now
# to be improved!
"Biomass": ws.get_one(
self.db,
ws.equals("name", "Biodiesel from cooking oil"))
},
"gasoline": {
# only ethanol from European wheat straw as biofuel
"Biomass": ws.get_one(
self.db,
ws.equals("name", "Ethanol from wheat straw pellets"),
ws.equals("location", "RER"))
}
}
data = self.rmd.get_remind_fuel_mix_for_ldvs()
for region in self.remind_regions:
# two regions for gasoline and diesel production
if region == "EUR":
new_producers["gasoline"]["Fossil"] = ws.get_one(
self.db,
ws.equals("name", "market for petrol, low-sulfur"),
ws.equals("location", "Europe without Switzerland"))
new_producers["diesel"]["Fossil"] = ws.get_one(
self.db,
ws.equals("name", "market group for diesel"),
ws.equals("location", "RER"))
else:
new_producers["gasoline"]["Fossil"] = ws.get_one(
self.db,
ws.equals("name", "market for petrol, low-sulfur"),
ws.equals("location", "RoW"))
new_producers["diesel"]["Fossil"] = ws.get_one(
self.db,
ws.equals("name", "market group for diesel"),
ws.equals("location", "GLO"))
# local syndiesel
new_producers["diesel"]["Hydrogen"] = self._find_local_supplier(
region, "Diesel production, synthetic, Fischer Tropsch process")
new_producers["gasoline"]["Hydrogen"] = self._find_local_supplier(
region, "Gasoline production, synthetic, from methanol")
print("Relinking fuel markets for ICEVs in {}".format(region))
for ftype in new_producers:
new_supp = self._create_local_copy(
old_suppliers[ftype], region)
new_supp["exchanges"] = [{
"amount": data.loc[region, suptype].values.item(),
"name": new_producers[ftype][suptype]["name"],
"location": new_producers[ftype][suptype]["location"],
"unit": "kilogram",
"type": "technosphere",
"reference product": new_producers[ftype][suptype]["reference product"],
"product": new_producers[ftype][suptype]["reference product"]
} for suptype in new_producers[ftype]]
new_supp["exchanges"].append({
"amount": 1,
"name": new_supp["name"],
"location": region,
"unit": "kilogram",
"type": "production",
"reference product": "fuel",
"product": "fuel"
})
self.db.append(new_supp)
shortcuts = {
"diesel": "EV-d",
"gasoline": "EV-p"
}
for ftype in shortcuts:
# diesel cars
cars = list(ws.get_many(
icevs, ws.contains("name", shortcuts[ftype])))
for car in cars:
# some local activities might already exist
local_dcar = self._get_local_act_or_copy(
cars, car, region)
# replace diesel supplier
fuel_ex = next(ws.technosphere(
local_dcar,
ws.startswith(
"name",
"fuel supply for {} vehicles".format(ftype))))
fuel_ex["location"] = region
def _get_local_act_or_copy(self, db, act, region):
"""
Find and return a local activity. If it is not found,
create a local copy, append it to the database and return it.
If multiple results are found, throw a ValueError.
"""
local_acts = list(ws.get_many(
db,
ws.equals("name", act["name"]),
ws.equals("location", region)))
if len(local_acts) == 1:
return local_acts[0]
elif len(local_acts) == 0:
new_act = self._create_local_copy(act, region)
self.db.append(new_act)
return new_act
else:
raise ValueError("Multiple activities found for {} in {}"
.format(act["name"], region))
def update_cars(self):
self.create_local_evs()
self.create_local_fcevs()
self.create_local_icevs() | /rmnd_lca-0.1.6-py3-none-any.whl/rmnd_lca/cars.py | 0.496094 | 0.253203 | cars.py | pypi |
import os
from . import DATA_DIR
from .activity_maps import InventorySet
from .geomap import Geomap
from wurst import searching as ws
from wurst.ecoinvent import filters
import csv
import numpy as np
import uuid
import wurst
from datetime import date
PRODUCTION_PER_TECH = (
DATA_DIR / "electricity" / "electricity_production_volumes_per_tech.csv"
)
LOSS_PER_COUNTRY = DATA_DIR / "electricity" / "losses_per_country.csv"
LHV_FUELS = DATA_DIR / "fuels_lower_heating_value.txt"
class Electricity:
"""
Class that modifies electricity markets in ecoinvent based on REMIND output data.
:ivar scenario: name of a Remind scenario
:vartype scenario: str
"""
def __init__(self, db, rmd, scenario, year):
self.db = db
self.rmd = rmd
self.geo = Geomap()
self.production_per_tech = self.get_production_per_tech_dict()
self.losses = self.get_losses_per_country_dict()
self.scenario = scenario
self.year = year
self.fuels_lhv = self.get_lower_heating_values()
mapping = InventorySet(self.db)
self.emissions_map = mapping.get_remind_to_ecoinvent_emissions()
self.powerplant_map = mapping.generate_powerplant_map()
@staticmethod
def get_lower_heating_values():
"""
Loads a csv file into a dictionary. This dictionary contains lower heating values for a number of fuel types.
Taken from: https://www.engineeringtoolbox.com/fuels-higher-calorific-values-d_169.html
:return: dictionary that contains lower heating values
:rtype: dict
"""
with open(LHV_FUELS) as f:
return dict(filter(None, csv.reader(f, delimiter=";")))
def get_suppliers_of_a_region(self, ecoinvent_regions, ecoinvent_technologies):
"""
Return a list of electricity-producing datasets which location and name correspond to the region and name given,
respectively.
:param ecoinvent_regions: an ecoinvent region
:type ecoinvent_regions: list
:param ecoinvent_technologies: name of ecoinvent dataset
:type ecoinvent_technologies: str
:return: list of wurst datasets
:rtype: list
"""
return ws.get_many(
self.db,
*[
ws.either(
*[
ws.equals("name", supplier)
for supplier in ecoinvent_technologies
]
),
ws.either(*[ws.equals("location", loc) for loc in ecoinvent_regions]),
ws.equals("unit", "kilowatt hour"),
]
)
@staticmethod
def get_losses_per_country_dict():
"""
Create a dictionary with ISO country codes as keys and loss ratios as values.
:return: ISO country code to loss ratio dictionary
:rtype: dict
"""
if not LOSS_PER_COUNTRY.is_file():
raise FileNotFoundError(
"The production per country dictionary file could not be found."
)
with open(LOSS_PER_COUNTRY) as f:
csv_list = [[val.strip() for val in r.split(";")] for r in f.readlines()]
(_, *header), *data = csv_list
csv_dict = {}
for row in data:
key, *values = row
csv_dict[key] = {key: float(value) for key, value in zip(header, values)}
return csv_dict
@staticmethod
def get_production_per_tech_dict():
"""
Create a dictionary with tuples (technology, country) as keys and production volumes as values.
:return: technology to production volume dictionary
:rtype: dict
"""
if not PRODUCTION_PER_TECH.is_file():
raise FileNotFoundError(
"The production per technology dictionary file could not be found."
)
csv_dict = {}
with open(PRODUCTION_PER_TECH) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
csv_dict[(row[0], row[1])] = row[2]
return csv_dict
def get_production_weighted_share(self, supplier, suppliers):
"""
Return the share of production of an electricity-producing dataset in a specific location,
relative to the summed production of similar technologies in locations contained in the same REMIND region.
:param supplier: electricity-producing dataset
:type supplier: wurst dataset
:param suppliers: list of electricity-producing datasets
:type suppliers: list of wurst datasets
:return: share of production relative to the total population
:rtype: float
"""
# Fetch the production volume of the supplier
loc_production = float(
self.production_per_tech.get((supplier["name"], supplier["location"]), 0)
)
# Fetch the total production volume of similar technologies in other locations
# contained within the REMIND region.
total_production = 0
for loc in suppliers:
total_production += float(
self.production_per_tech.get((loc["name"], loc["location"]), 0)
)
# If a corresponding production volume is found.
if total_production != 0:
return loc_production / total_production
else:
# If not, we allocate an equal share of supply
return 1 / len(suppliers)
def get_production_weighted_losses(self, voltage, remind_region):
"""
Return the transformation, transmission and distribution losses at a given voltage level for a given location.
A weighted average is made of the locations contained in the REMIND region.
:param voltage: voltage level (high, medium or low)
:type voltage: str
:param remind_region: Remind region
:type remind_region: str
:return: tuple that contains transformation and distribution losses
:rtype: tuple
"""
# Fetch locations contained in REMIND region
locations = self.geo.remind_to_ecoinvent_location(remind_region)
if voltage == "high":
cumul_prod, transf_loss = 0, 0
for loc in locations:
dict_loss = self.losses.get(
loc,
{"Transformation loss, high voltage": 0, "Production volume": 0},
)
transf_loss += (
dict_loss["Transformation loss, high voltage"]
* dict_loss["Production volume"]
)
cumul_prod += dict_loss["Production volume"]
transf_loss /= cumul_prod
return transf_loss
if voltage == "medium":
cumul_prod, transf_loss, distr_loss = 0, 0, 0
for loc in locations:
dict_loss = self.losses.get(
loc,
{
"Transformation loss, medium voltage": 0,
"Transmission loss to medium voltage": 0,
"Production volume": 0,
},
)
transf_loss += (
dict_loss["Transformation loss, medium voltage"]
* dict_loss["Production volume"]
)
distr_loss += (
dict_loss["Transmission loss to medium voltage"]
* dict_loss["Production volume"]
)
cumul_prod += dict_loss["Production volume"]
transf_loss /= cumul_prod
distr_loss /= cumul_prod
return transf_loss, distr_loss
if voltage == "low":
cumul_prod, transf_loss, distr_loss = 0, 0, 0
for loc in locations:
dict_loss = self.losses.get(
loc,
{
"Transformation loss, low voltage": 0,
"Transmission loss to low voltage": 0,
"Production volume": 0,
},
)
transf_loss += (
dict_loss["Transformation loss, low voltage"]
* dict_loss["Production volume"]
)
distr_loss += (
dict_loss["Transmission loss to low voltage"]
* dict_loss["Production volume"]
)
cumul_prod += dict_loss["Production volume"]
transf_loss /= cumul_prod
distr_loss /= cumul_prod
return transf_loss, distr_loss
def create_new_markets_low_voltage(self):
"""
Create low voltage market groups for electricity, by receiving medium voltage market groups as inputs
and adding transformation and distribution losses.
Contribution from solar power is added here as well.
Does not return anything. Modifies the database in place.
"""
# Loop through REMIND regions
for region in self.rmd.electricity_markets.coords["region"].values:
created_markets = []
# Create an empty dataset
new_dataset = {
"location": region,
"name": ("market group for electricity, low voltage"),
"reference product": "electricity, low voltage",
"unit": "kilowatt hour",
"database": self.db[1]["database"],
"code": str(uuid.uuid4().hex),
"comment": "Dataset produced from REMIND scenario output results",
}
# First, add the reference product exchange
new_exchanges = [
{
"uncertainty type": 0,
"loc": 1,
"amount": 1,
"type": "production",
"production volume": 0,
"product": "electricity, low voltage",
"name": "market group for electricity, low voltage",
"unit": "kilowatt hour",
"location": region,
},
{
"uncertainty type": 0,
"loc": 2.99e-9,
"amount": 2.99e-9,
"type": "technosphere",
"production volume": 0,
"product": "sulfur hexafluoride, liquid",
"name": "market for sulfur hexafluoride, liquid",
"unit": "kilogram",
"location": "RoW",
},
{
"uncertainty type": 0,
"loc": 2.99e-9,
"amount": 2.99e-9,
"type": "biosphere",
"input": ("biosphere3", "35d1dff5-b535-4628-9826-4a8fce08a1f2"),
"name": "Sulfur hexafluoride",
"unit": "kilogram",
"categories": ("air", "non-urban air or from high stacks"),
},
{
"uncertainty type": 0,
"loc": 8.74e-8,
"amount": 8.74e-8,
"type": "technosphere",
"production volume": 0,
"product": "distribution network, electricity, low voltage",
"name": "distribution network construction, electricity, low voltage",
"unit": "kilometer",
"location": "RoW",
},
]
# Second, add an input to of sulfur hexafluoride emission to compensate the transformer's leakage
# And an emission of a corresponding amount
# Third, transmission line
# Fourth, add the contribution of solar power
solar_amount = 0
gen_tech = list(
(
tech
for tech in self.rmd.electricity_markets.coords["variables"].values
if "Solar" in tech
)
)
for technology in gen_tech:
# If the solar power technology contributes to the mix
if self.rmd.electricity_markets.loc[region, technology] != 0.0:
# Fetch ecoinvent regions contained in the REMIND region
ecoinvent_regions = self.geo.remind_to_ecoinvent_location(region)
# Contribution in supply
amount = self.rmd.electricity_markets.loc[region, technology].values
solar_amount += amount
# Get the possible names of ecoinvent datasets
ecoinvent_technologies = self.powerplant_map[
self.rmd.rev_electricity_market_labels[technology]
]
# Fetch electricity-producing technologies contained in the REMIND region
suppliers = list(
self.get_suppliers_of_a_region(
ecoinvent_regions, ecoinvent_technologies
)
)
suppliers = self.check_for_production_volume(suppliers)
# If no technology is available for the REMIND region
if len(suppliers) == 0:
# We fetch European technologies instead
suppliers = list(
self.get_suppliers_of_a_region(
["RER"], ecoinvent_technologies
)
)
suppliers = self.check_for_production_volume(suppliers)
# If, after looking for European technologies, no technology is available
if len(suppliers) == 0:
# We fetch RoW technologies instead
suppliers = list(
self.get_suppliers_of_a_region(
["RoW"], ecoinvent_technologies
)
)
suppliers = self.check_for_production_volume(suppliers)
for supplier in suppliers:
share = self.get_production_weighted_share(supplier, suppliers)
new_exchanges.append(
{
"uncertainty type": 0,
"loc": (amount * share),
"amount": (amount * share),
"type": "technosphere",
"production volume": 0,
"product": supplier["reference product"],
"name": supplier["name"],
"unit": supplier["unit"],
"location": supplier["location"],
}
)
created_markets.append(
[
"low voltage, " + self.scenario + ", " + str(self.year),
"n/a",
region,
0,
0,
supplier["name"],
supplier["location"],
share,
(share * amount),
]
)
# Fifth, add:
# * an input from the medium voltage market minus solar contribution, including distribution loss
# * an self-consuming input for transformation loss
transf_loss, distr_loss = self.get_production_weighted_losses("low", region)
new_exchanges.append(
{
"uncertainty type": 0,
"loc": 0,
"amount": (1 - solar_amount) * (1 + distr_loss),
"type": "technosphere",
"production volume": 0,
"product": "electricity, medium voltage",
"name": "market group for electricity, medium voltage",
"unit": "kilowatt hour",
"location": region,
}
)
new_exchanges.append(
{
"uncertainty type": 0,
"loc": 0,
"amount": transf_loss,
"type": "technosphere",
"production volume": 0,
"product": "electricity, low voltage",
"name": "market group for electricity, low voltage",
"unit": "kilowatt hour",
"location": region,
}
)
created_markets.append(
[
"low voltage, " + self.scenario + ", " + str(self.year),
"n/a",
region,
transf_loss,
distr_loss,
"low voltage, " + self.scenario + ", " + str(self.year),
region,
1,
(1 - solar_amount) * (1 + distr_loss),
]
)
with open(
DATA_DIR
/ "logs/log created markets {} {}-{}.csv".format(
self.scenario, self.year, date.today()
),
"a",
) as csv_file:
writer = csv.writer(csv_file, delimiter=";", lineterminator="\n")
for line in created_markets:
writer.writerow(line)
new_dataset["exchanges"] = new_exchanges
self.db.append(new_dataset)
def create_new_markets_medium_voltage(self):
"""
Create medium voltage market groups for electricity, by receiving high voltage market groups as inputs
and adding transformation and distribution losses.
Contribution from solar power is added in low voltage market groups.
Does not return anything. Modifies the database in place.
"""
# Loop through REMIND regions
gen_region = (
region for region in self.rmd.electricity_markets.coords["region"].values
)
created_markets = []
for region in gen_region:
# Create an empty dataset
new_dataset = {
"location": region,
"name": ("market group for electricity, medium voltage"),
"reference product": "electricity, medium voltage",
"unit": "kilowatt hour",
"database": self.db[1]["database"],
"code": str(uuid.uuid1().hex),
"comment": "Dataset produced from REMIND scenario output results",
}
# First, add the reference product exchange
new_exchanges = [
{
"uncertainty type": 0,
"loc": 1,
"amount": 1,
"type": "production",
"production volume": 0,
"product": "electricity, medium voltage",
"name": "market group for electricity, medium voltage",
"unit": "kilowatt hour",
"location": region,
}
]
# Second, add:
# * an input from the high voltage market, including transmission loss
# * an self-consuming input for transformation loss
transf_loss, distr_loss = self.get_production_weighted_losses(
"medium", region
)
new_exchanges.append(
{
"uncertainty type": 0,
"loc": 0,
"amount": 1 + distr_loss,
"type": "technosphere",
"production volume": 0,
"product": "electricity, high voltage",
"name": "market group for electricity, high voltage",
"unit": "kilowatt hour",
"location": region,
}
)
new_exchanges.append(
{
"uncertainty type": 0,
"loc": 0,
"amount": transf_loss,
"type": "technosphere",
"production volume": 0,
"product": "electricity, medium voltage",
"name": "market group for electricity, medium voltage",
"unit": "kilowatt hour",
"location": region,
}
)
# Third, add an input to of sulfur hexafluoride emission to compensate the transformer's leakage
# And an emission of a corresponding amount
new_exchanges.append(
{
"uncertainty type": 0,
"loc": 5.4e-8,
"amount": 5.4e-8,
"type": "technosphere",
"production volume": 0,
"product": "sulfur hexafluoride, liquid",
"name": "market for sulfur hexafluoride, liquid",
"unit": "kilogram",
"location": "RoW",
}
)
new_exchanges.append(
{
"uncertainty type": 0,
"loc": 5.4e-8,
"amount": 5.4e-8,
"type": "biosphere",
"input": ("biosphere3", "35d1dff5-b535-4628-9826-4a8fce08a1f2"),
"name": "Sulfur hexafluoride",
"unit": "kilogram",
"categories": ("air", "non-urban air or from high stacks"),
}
)
# Fourth, transmission line
new_exchanges.append(
{
"uncertainty type": 0,
"loc": 1.8628e-8,
"amount": 1.8628e-8,
"type": "technosphere",
"production volume": 0,
"product": "transmission network, electricity, medium voltage",
"name": "transmission network construction, electricity, medium voltage",
"unit": "kilometer",
"location": "RoW",
}
)
new_dataset["exchanges"] = new_exchanges
created_markets.append(
[
"medium voltage, " + self.scenario + ", " + str(self.year),
"n/a",
region,
transf_loss,
distr_loss,
"medium voltage, " + self.scenario + ", " + str(self.year),
region,
1,
1 + distr_loss,
]
)
self.db.append(new_dataset)
with open(
DATA_DIR
/ "logs/log created markets {} {}-{}.csv".format(
self.scenario, self.year, date.today()
),
"a",
) as csv_file:
writer = csv.writer(csv_file, delimiter=";", lineterminator="\n")
for line in created_markets:
writer.writerow(line)
def create_new_markets_high_voltage(self):
"""
Create high voltage market groups for electricity, based on electricity mixes given by REMIND.
Contribution from solar power is added in low voltage market groups.
Does not return anything. Modifies the database in place.
"""
# Loop through REMIND regions
gen_region = (
region for region in self.rmd.electricity_markets.coords["region"].values
)
gen_tech = list(
(
tech
for tech in self.rmd.electricity_markets.coords["variables"].values
if "Solar" not in tech
)
)
created_markets = []
for region in gen_region:
# Fetch ecoinvent regions contained in the REMIND region
ecoinvent_regions = self.geo.remind_to_ecoinvent_location(region)
# Create an empty dataset
new_dataset = {
"location": region,
"name": ("market group for electricity, high voltage"),
"reference product": "electricity, high voltage",
"unit": "kilowatt hour",
"database": self.db[1]["database"],
"code": str(uuid.uuid4().hex),
"comment": "Dataset produced from REMIND scenario output results",
}
new_exchanges = [
{
"uncertainty type": 0,
"loc": 1,
"amount": 1,
"type": "production",
"production volume": 0,
"product": "electricity, high voltage",
"name": "market group for electricity, high voltage",
"unit": "kilowatt hour",
"location": region,
}
]
# First, add the reference product exchange
# Second, add transformation loss
transf_loss = self.get_production_weighted_losses("high", region)
new_exchanges.append(
{
"uncertainty type": 0,
"loc": 1,
"amount": transf_loss,
"type": "technosphere",
"production volume": 0,
"product": "electricity, high voltage",
"name": "market group for electricity, high voltage",
"unit": "kilowatt hour",
"location": region,
}
)
# Fetch solar contribution in the mix, to subtract it
# as solar energy is an input of low-voltage markets
index_solar = [
ind
for ind in self.rmd.rev_electricity_market_labels
if "solar" in ind.lower()
]
solar_amount = self.rmd.electricity_markets.loc[
region, index_solar
].values.sum()
# Loop through the REMIND technologies
for technology in gen_tech:
# If the given technology contributes to the mix
if self.rmd.electricity_markets.loc[region, technology] != 0.0:
# Contribution in supply
amount = self.rmd.electricity_markets.loc[region, technology].values
# Get the possible names of ecoinvent datasets
ecoinvent_technologies = self.powerplant_map[
self.rmd.rev_electricity_market_labels[technology]
]
# Fetch electricity-producing technologies contained in the REMIND region
suppliers = list(
self.get_suppliers_of_a_region(
ecoinvent_regions, ecoinvent_technologies
)
)
suppliers = self.check_for_production_volume(suppliers)
# If no technology is available for the REMIND region
if len(suppliers) == 0:
# We fetch European technologies instead
suppliers = list(
self.get_suppliers_of_a_region(
["RER"], ecoinvent_technologies
)
)
suppliers = self.check_for_production_volume(suppliers)
# If, after looking for European technologies, no technology is available
if len(suppliers) == 0:
# We fetch RoW technologies instead
suppliers = list(
self.get_suppliers_of_a_region(
["RoW"], ecoinvent_technologies
)
)
suppliers = self.check_for_production_volume(suppliers)
if len(suppliers) == 0:
print(
"no suppliers for {} in {} with ecoinvent names {}".format(
technology, region, ecoinvent_technologies
)
)
for supplier in suppliers:
share = self.get_production_weighted_share(supplier, suppliers)
new_exchanges.append(
{
"uncertainty type": 0,
"loc": (amount * share) / (1 - solar_amount),
"amount": (amount * share) / (1 - solar_amount),
"type": "technosphere",
"production volume": 0,
"product": supplier["reference product"],
"name": supplier["name"],
"unit": supplier["unit"],
"location": supplier["location"],
}
)
created_markets.append(
[
"high voltage, "
+ self.scenario
+ ", "
+ str(self.year),
technology,
region,
transf_loss,
0.0,
supplier["name"],
supplier["location"],
share,
(amount * share) / (1 - solar_amount),
]
)
new_dataset["exchanges"] = new_exchanges
self.db.append(new_dataset)
# Writing log of created markets
with open(
DATA_DIR
/ "logs/log created markets {} {}-{}.csv".format(
self.scenario, self.year, date.today()
),
"w",
) as csv_file:
writer = csv.writer(csv_file, delimiter=";", lineterminator="\n")
writer.writerow(
[
"dataset name",
"energy type",
"REMIND location",
"Transformation loss",
"Distr./Transmission loss",
"Supplier name",
"Supplier location",
"Contribution within energy type",
"Final contribution",
]
)
for line in created_markets:
writer.writerow(line)
def check_for_production_volume(self, suppliers):
# Remove suppliers that do not have a production volume
return [
supplier
for supplier in suppliers
if self.get_production_weighted_share(supplier, suppliers) != 0
]
def relink_activities_to_new_markets(self):
"""
Links electricity input exchanges to new datasets with the appropriate REMIND location:
* "market for electricity, high voltage" --> "market group for electricity, high voltage"
* "market for electricity, medium voltage" --> "market group for electricity, medium voltage"
* "market for electricity, low voltage" --> "market group for electricity, low voltage"
Does not return anything.
"""
# Filter all activities that consume high voltage electricity
for ds in ws.get_many(
self.db, ws.exclude(ws.contains("name", "market group for electricity"))
):
for exc in ws.get_many(
ds["exchanges"],
*[
ws.either(
*[
ws.contains("name", "market for electricity"),
ws.contains("name", "electricity voltage transformation"),
ws.contains("name", "market group for electricity"),
]
)
]
):
if exc["type"] != "production" and exc["unit"] == "kilowatt hour":
if "high" in exc["product"]:
exc["name"] = "market group for electricity, high voltage"
exc["product"] = "electricity, high voltage"
exc["location"] = self.geo.ecoinvent_to_remind_location(
exc["location"]
)
if "medium" in exc["product"]:
exc["name"] = "market group for electricity, medium voltage"
exc["product"] = "electricity, medium voltage"
exc["location"] = self.geo.ecoinvent_to_remind_location(
exc["location"]
)
if "low" in exc["product"]:
exc["name"] = "market group for electricity, low voltage"
exc["product"] = "electricity, low voltage"
exc["location"] = self.geo.ecoinvent_to_remind_location(
exc["location"]
)
if "input" in exc:
exc.pop("input")
def find_ecoinvent_fuel_efficiency(self, ds, fuel_filters):
"""
This method calculates the efficiency value set initially, in case it is not specified in the parameter
field of the dataset. In Carma datasets, fuel inputs are expressed in megajoules instead of kilograms.
:param ds: a wurst dataset of an electricity-producing technology
:param fuel_filters: wurst filter to to filter fule input exchanges
:return: the efficiency value set by ecoinvent
"""
def calculate_input_energy(fuel_name, fuel_amount, fuel_unit):
if fuel_unit == 'kilogram' or fuel_unit == 'cubic meter':
lhv = [self.fuels_lhv[k] for k in self.fuels_lhv if k in fuel_name.lower()][
0
]
return float(lhv) * fuel_amount / 3.6
if fuel_unit == 'megajoule':
return fuel_amount / 3.6
not_allowed = ["thermal"]
key = list()
if "parameters" in ds:
key = list(
key
for key in ds["parameters"]
if "efficiency" in key and not any(item in key for item in not_allowed)
)
if len(key) > 0:
return ds["parameters"][key[0]]
else:
energy_input = np.sum(
np.sum(
np.asarray(
[
calculate_input_energy(exc["name"], exc["amount"], exc['unit'])
for exc in ws.technosphere(ds, *fuel_filters)
]
)
)
)
current_efficiency = (
float(ws.reference_product(ds)["amount"]) / energy_input
)
if "paramters" in ds:
ds["parameters"]["efficiency"] = current_efficiency
else:
ds["parameters"] = {"efficiency": current_efficiency}
return current_efficiency
def find_fuel_efficiency_scaling_factor(self, ds, fuel_filters, technology):
"""
This method calculates a scaling factor to change the process efficiency set by ecoinvent
to the efficiency given by REMIND.
:param ds: wurst dataset of an electricity-producing technology
:param fuel_filters: wurst filter to filter the fuel input exchanges
:param technology: label of an electricity-producing technology
:return: a rescale factor to change from ecoinvent efficiency to REMIND efficiency
:rtype: float
"""
ecoinvent_eff = self.find_ecoinvent_fuel_efficiency(ds, fuel_filters)
# If the current efficiency is too high, there's an issue, and teh dataset is skipped.
if ecoinvent_eff > 1.1:
print("The current efficiency factor for the dataset {} has not been found. Its current efficiency will remain".format(ds["name"]))
return 1
remind_locations = self.geo.ecoinvent_to_remind_location(ds["location"])
remind_eff = (
self.rmd.electricity_efficiencies.loc[
dict(
variables=self.rmd.electricity_efficiency_labels[technology],
region=remind_locations,
)
]
.mean()
.values
)
with open(
DATA_DIR
/ "logs/log efficiencies change {} {}-{}.csv".format(
self.scenario, self.year, date.today()
),
"a",
) as csv_file:
writer = csv.writer(csv_file, delimiter=";", lineterminator="\n")
writer.writerow([ds["name"], ds["location"], ecoinvent_eff, remind_eff])
return ecoinvent_eff / remind_eff
@staticmethod
def update_ecoinvent_efficiency_parameter(ds, scaling_factor):
"""
Update the old efficiency value in the ecoinvent dataset by the newly calculated one.
:param ds: dataset
:type ds: dict
:param scaling_factor: scaling factor (new efficiency / old efficiency)
:type scaling_factor: float
"""
parameters = ds["parameters"]
possibles = ["efficiency", "efficiency_oil_country", "efficiency_electrical"]
for key in possibles:
if key in parameters:
ds["parameters"][key] /= scaling_factor
def get_remind_mapping(self):
"""
Define filter functions that decide which wurst datasets to modify.
:return: dictionary that contains filters and functions
:rtype: dict
"""
generic_excludes = [
ws.exclude(ws.contains("name", "aluminium industry")),
ws.exclude(ws.contains("name", "carbon capture and storage")),
ws.exclude(ws.contains("name", "market")),
ws.exclude(ws.contains("name", "treatment")),
]
no_imports = [ws.exclude(ws.contains("name", "import"))]
gas_open_cycle_electricity = [
ws.equals(
"name", "electricity production, natural gas, conventional power plant"
)
]
biomass_chp_electricity = [
ws.either(ws.contains("name", " wood"), ws.contains("name", "bio")),
ws.equals("unit", "kilowatt hour"),
ws.contains("name", "heat and power co-generation"),
]
coal_IGCC = [
ws.either(ws.contains("name", "coal"), ws.contains("name", "lignite")),
ws.contains("name", "IGCC"),
ws.contains("name", "no CCS"),
ws.equals("unit", "kilowatt hour"),
]
coal_IGCC_CCS = [
ws.either(ws.contains("name", "coal"), ws.contains("name", "lignite")),
ws.contains("name", "storage"),
ws.contains("name", "pre"),
ws.equals("unit", "kilowatt hour"),
]
coal_PC_CCS = [
ws.either(ws.contains("name", "coal"), ws.contains("name", "lignite")),
ws.contains("name", "storage"),
ws.equals("unit", "kilowatt hour"),
]
coal_PC = [
ws.either(ws.contains("name", "coal"), ws.contains("name", "lignite")),
ws.exclude(ws.contains("name", "storage")),
ws.exclude(ws.contains("name", "heat")),
ws.exclude(ws.contains("name", "IGCC")),
ws.equals("unit", "kilowatt hour"),
]
gas_CCS = [
ws.contains("name", "natural gas"),
ws.either(ws.contains("name", "post"), ws.contains("name", "pre")),
ws.contains("name", "storage"),
ws.equals("unit", "kilowatt hour"),
]
biomass_IGCC_CCS = [
ws.either(
ws.contains("name", "SNG"),
ws.contains("name", "wood"),
ws.contains("name", "BIGCC"),
),
ws.contains("name", "storage"),
ws.equals("unit", "kilowatt hour"),
]
biomass_IGCC = [
ws.contains("name", "BIGCC"),
ws.contains("name", "no CCS"),
ws.equals("unit", "kilowatt hour"),
]
return {
"Coal IGCC": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": coal_IGCC,
"fuel filters": [
ws.either(
ws.contains("name", "Hard coal"), ws.contains("name", "Lignite")
),
ws.equals("unit", "megajoule"),
],
"technosphere excludes": [],
},
"Coal IGCC CCS": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": coal_IGCC_CCS,
"fuel filters": [
ws.either(
ws.contains("name", "Hard coal"), ws.contains("name", "Lignite")
),
ws.equals("unit", "megajoule"),
],
"technosphere excludes": [],
},
"Coal PC": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": coal_PC + generic_excludes,
"fuel filters": [
ws.either(
ws.contains("name", "hard coal"),
ws.contains("name", "Hard coal"),
ws.contains("name", "lignite"),
ws.contains("name", "Lignite")
),
ws.doesnt_contain_any("name", ("ash", "SOx")),
ws.either(ws.equals("unit", "kilogram"),ws.equals("unit", "megajoule")),
],
"technosphere excludes": [],
},
"Coal PC CCS": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": coal_PC_CCS,
"fuel filters": [
ws.either(
ws.contains("name", "Hard coal"), ws.contains("name", "Lignite")
),
ws.equals("unit", "megajoule"),
],
"technosphere excludes": [],
},
"Coal CHP": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": filters.coal_chp_electricity + generic_excludes,
"fuel filters": [
ws.either(
ws.contains("name", "hard coal"), ws.contains("name", "lignite")
),
ws.doesnt_contain_any("name", ("ash", "SOx")),
ws.equals("unit", "kilogram"),
],
"technosphere excludes": [],
},
"Gas OC": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": gas_open_cycle_electricity
+ generic_excludes
+ no_imports,
"fuel filters": [
ws.either(
ws.contains("name", "natural gas, low pressure"),
ws.contains("name", "natural gas, high pressure"),
),
ws.equals("unit", "cubic meter"),
],
"technosphere excludes": [],
},
"Gas CC": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": filters.gas_combined_cycle_electricity
+ generic_excludes
+ no_imports,
"fuel filters": [
ws.either(
ws.contains("name", "natural gas, low pressure"),
ws.contains("name", "natural gas, high pressure"),
),
ws.equals("unit", "cubic meter"),
],
"technosphere excludes": [],
},
"Gas CHP": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": filters.gas_chp_electricity
+ generic_excludes
+ no_imports,
"fuel filters": [
ws.either(
ws.contains("name", "natural gas, low pressure"),
ws.contains("name", "natural gas, high pressure"),
),
ws.equals("unit", "cubic meter"),
],
"technosphere excludes": [],
},
"Gas CCS": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": gas_CCS,
"fuel filters": [
ws.contains("name", "Natural gas"),
ws.equals("unit", "megajoule"),
],
"technosphere excludes": [],
},
"Oil": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": (
filters.oil_open_cycle_electricity
+ generic_excludes
+ [ws.exclude(ws.contains("name", "nuclear"))]
),
"fuel filters": [
ws.contains("name", "heavy fuel oil"),
ws.equals("unit", "kilogram"),
],
"technosphere excludes": [],
},
"Biomass CHP": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": biomass_chp_electricity + generic_excludes,
"fuel filters": [
ws.either(
ws.contains("name", "wood pellet"),
ws.contains("name", "biogas"),
),
ws.either(
ws.equals("unit", "kilogram"), ws.equals("unit", "cubic meter")
),
],
"technosphere excludes": [],
},
"Biomass IGCC CCS": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": biomass_IGCC_CCS,
"fuel filters": [
ws.either(
ws.contains("name", "100% SNG, burned in CC plant"),
ws.contains("name", "Wood chips"),
ws.contains("name", "Hydrogen"),
),
ws.either(
ws.equals("unit", "megajoule"), ws.equals("unit", "kilogram"),
),
],
"technosphere excludes": [],
},
"Biomass IGCC": {
"eff_func": self.find_fuel_efficiency_scaling_factor,
"technology filters": biomass_IGCC,
"fuel filters": [
ws.contains("name", "Hydrogen"),
ws.either(
ws.equals("unit", "kilogram"),
ws.equals("unit", "megajoule"),
),
],
"technosphere excludes": [],
},
}
def update_electricity_efficiency(self):
"""
This method modifies each ecoinvent coal, gas,
oil and biomass dataset using data from the REMIND model.
Return a wurst database with modified datasets.
:return: a wurst database, with rescaled electricity-producing datasets.
:rtype: list
"""
technologies_map = self.get_remind_mapping()
if not os.path.exists(DATA_DIR / "logs"):
os.makedirs(DATA_DIR / "logs")
with open(
DATA_DIR
/ "logs/log efficiencies change {} {}-{}.csv".format(
self.scenario, self.year, date.today()
),
"w",
) as csv_file:
writer = csv.writer(csv_file, delimiter=";", lineterminator="\n")
writer.writerow(
["dataset name", "location", "original efficiency", "new efficiency"]
)
print(
"Log of changes in power plants efficiencies saved in {}".format(
DATA_DIR / "logs"
)
)
for remind_technology in technologies_map:
dict_technology = technologies_map[remind_technology]
print("Rescale inventories and emissions for", remind_technology)
datsets = list(ws.get_many(self.db, *dict_technology["technology filters"]))
# no activities found? Check filters!
assert len(datsets) > 0, "No dataset found for {}".format(remind_technology)
for ds in datsets:
# Modify using remind efficiency values:
scaling_factor = dict_technology["eff_func"](
ds, dict_technology["fuel filters"], remind_technology
)
self.update_ecoinvent_efficiency_parameter(ds, scaling_factor)
# Rescale all the technosphere exchanges according to REMIND efficiency values
wurst.change_exchanges_by_constant_factor(
ds,
float(scaling_factor),
dict_technology["technosphere excludes"],
[ws.doesnt_contain_any("name", self.emissions_map)],
)
# Update biosphere exchanges according to GAINS emission values
for exc in ws.biosphere(
ds, ws.either(*[ws.contains("name", x) for x in self.emissions_map])
):
remind_emission_label = self.emissions_map[exc["name"]]
remind_emission = self.rmd.electricity_emissions.loc[
dict(
region=self.geo.ecoinvent_to_remind_location(
ds["location"]
),
pollutant=remind_emission_label,
sector=self.rmd.electricity_emission_labels[
remind_technology
],
)
].values.item(0)
if exc["amount"] == 0:
wurst.rescale_exchange(
exc, remind_emission / 1, remove_uncertainty=True
)
else:
wurst.rescale_exchange(exc, remind_emission / exc["amount"])
return self.db
def update_electricity_markets(self):
"""
Delete electricity markets. Create high, medium and low voltage market groups for electricity.
Link electricity-consuming datasets to newly created market groups for electricity.
Return a wurst database with modified datasets.
:return: a wurst database with new market groups for electricity
:rtype: list
"""
# We first need to delete 'market for electricity' and 'market group for electricity' datasets
print("Remove old electricity datasets")
list_to_remove = [
"market group for electricity, high voltage",
"market group for electricity, medium voltage",
"market group for electricity, low voltage",
"market for electricity, high voltage",
"market for electricity, medium voltage",
"market for electricity, low voltage",
"electricity, high voltage, import",
"electricity, high voltage, production mix",
]
# Writing log of deleted markets
markets_to_delete = [
[i["name"], i["location"]]
for i in self.db
if any(stop in i["name"] for stop in list_to_remove)
]
if not os.path.exists(DATA_DIR / "logs"):
os.makedirs(DATA_DIR / "logs")
with open(
DATA_DIR
/ "logs/log deleted markets {} {}-{}.csv".format(
self.scenario, self.year, date.today()
),
"w",
) as csv_file:
writer = csv.writer(csv_file, delimiter=";", lineterminator="\n")
writer.writerow(["dataset name", "location"])
for line in markets_to_delete:
writer.writerow(line)
self.db = [
i for i in self.db if not any(stop in i["name"] for stop in list_to_remove)
]
# We then need to create high voltage REMIND electricity markets
print("Create high voltage markets.")
self.create_new_markets_high_voltage()
print("Create medium voltage markets.")
self.create_new_markets_medium_voltage()
print("Create low voltage markets.")
self.create_new_markets_low_voltage()
# Finally, we need to relink all electricity-consuming activities to the new electricity markets
print("Link activities to new electricity markets.")
self.relink_activities_to_new_markets()
print(
"Log of deleted electricity markets saved in {}".format(DATA_DIR / "logs")
)
print(
"Log of created electricity markets saved in {}".format(DATA_DIR / "logs")
)
return self.db | /rmnd_lca-0.1.6-py3-none-any.whl/rmnd_lca/electricity.py | 0.790085 | 0.378517 | electricity.py | pypi |
<a href="https://ascl.net/2204.008"><img src="https://img.shields.io/badge/ascl-2204.008-blue.svg?colorB=262255" alt="ascl:2204.008" /></a>
[](https://pypi.python.org/pypi/rmnest)
[](https://pypi.python.org/pypi/rmnest)
[](https://github.com/mlower/rmnest/blob/main/LICENSE)
# RMNest
*RMNest* is an open source python package for estimating both standard and generalised
rotation measures via direct fits to Stokes *Q*, *U* and *V* spectra.
## Installation
The latest release of *RMNest* can be installed from [PyPi](https://pypi.python.org/pypi/rmnest) by running
the following
```bash
pip install rmnest
```
Note that while a working installation of the PSRCHIVE Python-3 bindings is
not necessary for using *RMNest*, it is strongly recommended.
## Requirements
The following packages are required to running *RMNest*.
- numpy: Array manipulation
- matplotlib: Modules for plotting
- bilby: Inference calculations framework
- dynesty: Modules for nested sampling
## Usage
*RMNest* can be run directly from the command line within using `rmnest`.
As an example, the below command would run a standard rotation-measure fit on the provided test data after frequency-averaging to 128 channels
within a [pulse] phase window between phase = 0.45 to 0.55
```bash
rmnest archive test/2020-03-16-18\:12\:00.calib.ST -o test/output/ -l testrun --window 0.45:0.55 -f 128
```
Alternatively, fitting for the generalised form of Faraday rotation, sometimes referred to as Faraday conversion
(see e.g. [Kennett & Melrose 1998](https://ui.adsabs.harvard.edu/abs/1998PASA...15..211K/abstract)), can be performed
by adding the ``--gfr`` and ``--free_alpha`` flags as
```bash
rmnest <archive>.ar -o <outdir> -l testrun --window 0.45:0.55 --gfr --free_alpha
```
Omitting the `--free_alpha` flag will result in the spectral exponent being fixed to 3. Details of the underlying phenomenological model can be
found in a technical document by [Lower (2021)](https://ui.adsabs.harvard.edu/abs/2021arXiv210809429L).
The likelihood and Faraday rotation models, as well as the general `RMFit` class in `fit_RM.py`, can also be imported like any other API.
## Issues and Contributing
If you encounter any issues with *RMNest*, or have in mind a feature that
currently does not exist, then you can contribute by openning a
[Github Issue](https://github.com/mlower/rmnest/issues) and outlining the feature.
## Referencing RMNest
If you make use of *RMNest* in your research, we would greatly appreciate it if you
cite both the ASCL entry ([Lower et al. 2022](https://ui.adsabs.harvard.edu/abs/2022ascl.soft04008L))
and the papers behind its development.
```
@software{2022ascl.soft04008L,
author = {{Lower}, Marcus E. and {Kumar}, Pravir and {Shannon}, Ryan M.},
title = "{RMNest: Bayesian approach to measuring Faraday rotation and conversion in radio signals}",
keywords = {Software},
howpublished = {Astrophysics Source Code Library, record ascl:2204.008},
year = 2022,
month = apr,
eid = {ascl:2204.008},
pages = {ascl:2204.008},
archivePrefix = {ascl},
eprint = {2204.008},
adsurl = {https://ui.adsabs.harvard.edu/abs/2022ascl.soft04008L},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
```
For standard rotation measure fitting, then
please cite [Bannister et al. (2019)](https://ui.adsabs.harvard.edu/abs/2019Sci...365..565B).
```
@ARTICLE{2019Sci...365..565B,
author = {{Bannister}, K.~W. and {Deller}, A.~T. and {Phillips}, C. and {Macquart}, J. -P. and {Prochaska}, J.~X. and {Tejos}, N. and {Ryder}, S.~D. and {Sadler}, E.~M. and {Shannon}, R.~M. and {Simha}, S. and {Day}, C.~K. and {McQuinn}, M. and {North-Hickey}, F.~O. and {Bhandari}, S. and {Arcus}, W.~R. and {Bennert}, V.~N. and {Burchett}, J. and {Bouwhuis}, M. and {Dodson}, R. and {Ekers}, R.~D. and {Farah}, W. and {Flynn}, C. and {James}, C.~W. and {Kerr}, M. and {Lenc}, E. and {Mahony}, E.~K. and {O'Meara}, J. and {Os{\l}owski}, S. and {Qiu}, H. and {Treu}, T. and {U}, V. and {Bateman}, T.~J. and {Bock}, D.~C. -J. and {Bolton}, R.~J. and {Brown}, A. and {Bunton}, J.~D. and {Chippendale}, A.~P. and {Cooray}, F.~R. and {Cornwell}, T. and {Gupta}, N. and {Hayman}, D.~B. and {Kesteven}, M. and {Koribalski}, B.~S. and {MacLeod}, A. and {McClure-Griffiths}, N.~M. and {Neuhold}, S. and {Norris}, R.~P. and {Pilawa}, M.~A. and {Qiao}, R. -Y. and {Reynolds}, J. and {Roxby}, D.~N. and {Shimwell}, T.~W. and {Voronkov}, M.~A. and {Wilson}, C.~D.},
title = "{A single fast radio burst localized to a massive galaxy at cosmological distance}",
journal = {Science},
keywords = {ASTRONOMY, Astrophysics - High Energy Astrophysical Phenomena, Astrophysics - Cosmology and Nongalactic Astrophysics},
year = 2019,
month = aug,
volume = {365},
number = {6453},
pages = {565-570},
doi = {10.1126/science.aaw5903},
archivePrefix = {arXiv},
eprint = {1906.11476},
primaryClass = {astro-ph.HE},
adsurl = {https://ui.adsabs.harvard.edu/abs/2019Sci...365..565B},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
```
If you used *RMNest* for generalised Faraday rotation measure fitting, please include
a citation to [Lower (2021)](https://ui.adsabs.harvard.edu/abs/2021arXiv210809429L).
```
@ARTICLE{2021arXiv210809429L,
author = {{Lower}, Marcus E.},
title = "{A phenomenological model for measuring generalised Faraday rotation}",
journal = {arXiv e-prints},
keywords = {Astrophysics - High Energy Astrophysical Phenomena},
year = 2021,
month = aug,
eid = {arXiv:2108.09429},
pages = {arXiv:2108.09429},
archivePrefix = {arXiv},
eprint = {2108.09429},
primaryClass = {astro-ph.HE},
adsurl = {https://ui.adsabs.harvard.edu/abs/2021arXiv210809429L},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
```
| /rmnest-0.2.0.tar.gz/rmnest-0.2.0/README.md | 0.669637 | 0.986031 | README.md | pypi |
import pytoml as toml
from pathlib import Path
from .utils import slugify
ASSIGNMENT = 'assignment'
READING = 'reading'
class BaseTrackObject(object):
def __str__(self):
return "({}) - {} - {}".format(
self.__class__.__name__, self.name, self.uuid
)
def _slugify_with_order(self, prefix, order, name):
return '{prefix}-{order}{slug}'.format(
prefix=prefix,
order=order,
slug=((name and '-' + slugify(name)) or '')
)
__unicode__ = __str__
__repr__ = __str__
@property
def directory_path(self):
return self._directory_path
@directory_path.setter
def directory_path(self, path):
self._directory_path = (isinstance(path, Path) and path) or Path(path)
class Course(BaseTrackObject):
def __init__(self, directory_path, uuid, name, track):
self._directory_path = directory_path
self.uuid = uuid
self.name = name
self.track = track
self._units = []
def add_unit(self, unit):
self._units.append(unit)
def unit_count(self):
return len(self._units)
def iter_units(self):
for unit in sorted(self._units, key=lambda u: u.order):
yield unit
def iter_children(self):
for child in self.iter_units():
yield child
@property
def last_unit(self):
if not self._units:
return None
return sorted(self._units, key=lambda u: u.order)[-1]
@property
def last_child_object(self):
return self.last_unit
class Unit(BaseTrackObject):
def __init__(self, course, uuid, name, order, directory_path=None):
self.course = course
self._directory_path = directory_path
self.slug = self._slugify_with_order('unit', order, name)
self.uuid = uuid
self.name = name
self.order = order
self._lessons = []
def add_lesson(self, lesson):
self._lessons.append(lesson)
def get_dot_rmotr_as_toml(self):
return toml.dumps({
'uuid': self.uuid,
'name': self.name
})
def lesson_count(self):
return len(self._lessons)
def iter_lessons(self):
for lesson in sorted(self._lessons, key=lambda l: l.order):
yield lesson
def iter_children(self):
for child in self.iter_lessons():
yield child
@property
def parent(self):
return self.course
@property
def last_lesson(self):
if not self._lessons:
return None
return sorted(self._lessons, key=lambda l: l.order)[-1]
@property
def last_child_object(self):
return self.last_lesson
class Lesson(BaseTrackObject):
def __init__(self, unit, uuid, name, order,
directory_path=None, readme_path=None, readme_content=None):
self.unit = unit
self._directory_path = directory_path
self.slug = self._slugify_with_order('lesson', order, name)
self.uuid = uuid
self.name = name
self.order = order
self.readme_path = readme_path
self.readme_content = readme_content
def get_dot_rmotr_as_toml(self):
return toml.dumps({
'uuid': self.uuid,
'name': self.name,
'type': self.type
})
@property
def parent(self):
return self.unit
class ReadingLesson(Lesson):
def __init__(self, *args, **kwargs):
super(ReadingLesson, self).__init__(*args, **kwargs)
self.type = READING
class AssignmentLesson(Lesson):
def __init__(self, *args, **kwargs):
# self.main_content = kwargs.pop('main_content')
# self.tests_content = kwargs.pop('tests_content')
super(AssignmentLesson, self).__init__(*args, **kwargs)
self.type = ASSIGNMENT | /rmotr_curriculum_tools-0.3.1.tar.gz/rmotr_curriculum_tools-0.3.1/rmotr_curriculum_tools/models.py | 0.691289 | 0.2083 | models.py | pypi |
from __future__ import unicode_literals
from pathlib import Path
import pytoml as toml
from .models import *
from . import utils
from . import exceptions
UNIT_GLOB = 'unit-*'
LESSON_GLOB = 'lesson-*'
DOT_RMOTR_FILE_NAME = '.rmotr'
README_FILE_NAME = 'README.md'
MAIN_PY_NAME = 'main.py'
TESTS_DIR_NAME = 'tests'
SOLUTIONS_DIR_NAME = 'solutions'
FILES_DIR_NAME = 'files'
TEST_PY_NAME = 'test_.py'
EMPTY_SOLUTION_NAME = 'solution_.py'
def read_dot_rmotr_file(path):
dot_rmotr_path = path / DOT_RMOTR_FILE_NAME
with dot_rmotr_path.open('r') as fp:
dot_rmotr_content = toml.loads(fp.read())
return dot_rmotr_content
def get_lesson_class_from_type(_type):
if _type == READING:
return ReadingLesson
elif _type == ASSIGNMENT:
return AssignmentLesson
raise exceptions.InvalidLessonTypeException(
'{} is not a valid lesson type'.format(_type))
def read_lesson(unit, lesson_path):
order = utils.get_order_from_numbered_object_directory_name(
lesson_path.name)
dot_rmotr = read_dot_rmotr_file(lesson_path)
LessonClass = get_lesson_class_from_type(dot_rmotr['type'])
readme_path = lesson_path / README_FILE_NAME
with readme_path.open(mode='r') as fp:
readme_content = fp.read()
lesson = LessonClass(
unit=unit,
directory_path=lesson_path,
uuid=dot_rmotr['uuid'],
name=dot_rmotr['name'],
order=order,
readme_path=readme_path,
readme_content=readme_content
)
return lesson
def read_lessons(unit):
lessons_glob = unit.directory_path.glob(LESSON_GLOB)
return [read_lesson(unit, lesson_path) for lesson_path in lessons_glob]
def read_unit(course, unit_path):
order = utils.get_order_from_numbered_object_directory_name(unit_path.name)
dot_rmotr = read_dot_rmotr_file(unit_path)
unit = Unit(
course=course,
directory_path=unit_path,
uuid=dot_rmotr['uuid'],
name=dot_rmotr['name'],
order=order
)
unit._lessons = read_lessons(unit)
return unit
def read_units(course):
units_glob = course.directory_path.glob(UNIT_GLOB)
return [read_unit(course, unit_path) for unit_path in units_glob]
def read_course_from_path(course_directory_path):
if not isinstance(course_directory_path, Path):
course_directory_path = Path(course_directory_path)
dot_rmotr = read_dot_rmotr_file(course_directory_path)
course = Course(
directory_path=course_directory_path,
uuid=dot_rmotr['uuid'],
name=dot_rmotr['name'],
track=dot_rmotr['track']
)
course._units = read_units(course)
return course
def read_unit_from_path(unit_directory_path):
unit_dot_rmotr = read_dot_rmotr_file(unit_directory_path)
unit_uuid = unit_dot_rmotr['uuid']
course = read_course_from_path(unit_directory_path.parent)
for unit in course.iter_units():
if unit.uuid == unit_uuid:
return unit
def read_lesson_from_path(lesson_directory_path):
unit = read_unit_from_path(lesson_directory_path.parent)
dot_rmotr = read_dot_rmotr_file(lesson_directory_path)
uuid = dot_rmotr['uuid']
for lesson in unit.iter_lessons():
if lesson.uuid == uuid:
return lesson
def _create_assignment_files(lesson_directory_path):
main_py_path = lesson_directory_path / MAIN_PY_NAME
tests_path = lesson_directory_path / TESTS_DIR_NAME
solutions_path = lesson_directory_path / SOLUTIONS_DIR_NAME
empty_test_path = tests_path / TEST_PY_NAME
empty_solution_path = solutions_path / EMPTY_SOLUTION_NAME
tests_path.mkdir()
solutions_path.mkdir()
for file_path in [main_py_path, empty_test_path, empty_solution_path]:
with file_path.open(mode='w') as fp:
fp.write('# empty')
def create_unit(directory_path, name, order):
unit_directory_path = (
directory_path /
utils.generate_unit_directory_name(name, order)
)
unit_directory_path.mkdir()
dot_rmotr_path = unit_directory_path / DOT_RMOTR_FILE_NAME
readme_path = unit_directory_path / README_FILE_NAME
with dot_rmotr_path.open(mode='w') as fp:
fp.write(utils.generate_unit_dot_rmotr_file(name=name))
with readme_path.open(mode='w') as fp:
fp.write('# {}\n'.format(name))
return unit_directory_path
def create_lesson(directory_path, name, order, attrs):
_type = attrs['type']
lesson_directory_path = (
directory_path /
utils.generate_lesson_directory_name(name, order)
)
lesson_directory_path.mkdir()
dot_rmotr_path = lesson_directory_path / DOT_RMOTR_FILE_NAME
readme_path = lesson_directory_path / README_FILE_NAME
# Create empty files dir
files_path = lesson_directory_path / FILES_DIR_NAME
files_path.mkdir()
with dot_rmotr_path.open(mode='w') as fp:
fp.write(utils.generate_lesson_dot_rmotr_file(name=name, _type=_type))
with readme_path.open(mode='w') as fp:
if _type == ASSIGNMENT:
fp.write('# {}\n'.format(name))
if _type == ASSIGNMENT:
_create_assignment_files(lesson_directory_path)
return lesson_directory_path
def rename_child_object_incrementing_order(model_obj, _type):
new_name = utils.generate_model_object_directory_name(
model_obj.name, model_obj.order + 1, _type)
model_obj.directory_path.rename(model_obj.parent.directory_path / new_name)
return model_obj.directory_path
def rename_child_object_decrementing_order(model_obj, _type):
new_name = utils.generate_model_object_directory_name(
model_obj.name, model_obj.order - 1, _type)
model_obj.directory_path.rename(model_obj.parent.directory_path / new_name)
return model_obj.directory_path
def make_space_between_child_objects(model_obj, order):
if isinstance(model_obj, Course):
_type = 'unit'
elif isinstance(model_obj, Unit):
_type = 'lesson'
else:
raise AttributeError("Can't identify object %s" % model_obj)
for child in model_obj.iter_children():
if child.order >= order:
rename_child_object_incrementing_order(child, _type)
def _rename_other_children_after_deleting_order(model_obj, order):
if isinstance(model_obj, Course):
_type = 'unit'
elif isinstance(model_obj, Unit):
_type = 'lesson'
else:
raise AttributeError("Can't identify object %s" % model_obj)
for child in model_obj.iter_children():
if child.order > order:
rename_child_object_decrementing_order(child, _type)
def _add_object_to_parent(directory_path, name, creation_callback,
get_model_callback,
order=None,
creation_attributes=None):
if not isinstance(directory_path, Path):
directory_path = Path(directory_path)
model_obj = get_model_callback(directory_path)
last_object = model_obj.last_child_object
last_object_order = (last_object and last_object.order) or 0
if order is None:
order = last_object_order + 1
rename = (order <= last_object_order)
if rename:
make_space_between_child_objects(model_obj, order)
creation_kwargs = {
'directory_path': directory_path,
'name': name,
'order': order
}
if creation_attributes:
creation_kwargs['attrs'] = creation_attributes
return creation_callback(**creation_kwargs)
def add_unit_to_course(course_directory_path, name, order=None):
return _add_object_to_parent(
course_directory_path, name, create_unit,
read_course_from_path, order)
def add_lesson_to_unit(unit_directory_path, name, _type, order=None):
return _add_object_to_parent(
unit_directory_path, name, create_lesson,
read_unit_from_path,
order, {'type': _type})
def _remove_child_from_directory(directory_path, get_model_callback):
if not isinstance(directory_path, Path):
directory_path = Path(directory_path)
model_obj = get_model_callback(directory_path)
parent = model_obj.parent
last_object = parent.last_child_object
if last_object.order != model_obj.order:
_rename_other_children_after_deleting_order(parent, model_obj.order)
import shutil
shutil.rmtree(str(model_obj.directory_path.absolute()))
def remove_unit_from_directory(directory_path):
return _remove_child_from_directory(directory_path, read_unit_from_path)
def remove_lesson_from_directory(directory_path):
return _remove_child_from_directory(directory_path, read_lesson_from_path) | /rmotr_curriculum_tools-0.3.1.tar.gz/rmotr_curriculum_tools-0.3.1/rmotr_curriculum_tools/io.py | 0.530723 | 0.158142 | io.py | pypi |
# Risk Management Python (rmpy) Package
The `rmpy` package is a comprehensive and powerful tool designed for risk management and quantitative finance in Python. It provides a suite of functionalities to perform essential risk assessments, calculations, and analyses on financial assets and portfolios. This package streamlines the process of evaluating the risk and performance of various assets, allowing users to make informed decisions for managing their investments.
---
## Installation
```python
pip install rmpy
```
---
## Git Repository
## https://github.com/GianMarcoOddo/rmpy
---
## Key Features
- Parametric and Non-Parametric Value at Risk (pVaR) calculations for single assets and portfolios.
- Historical pVaR and npVaR calculation using Yahoo Finance data for single assets and portfolios.
- Confidence level pVaR and npVaR calculations for single assets and portfolios.
- Support for daily and weekly frequency data and customizable intervals.
- Calculation of marginal, component, and relative component Value at Risk (VaR) for portfolios.
- A user-friendly interface with the option to display or suppress output as needed.
## Who is it for?
The `rmpy` package is ideal for quantitative analysts, portfolio managers, risk managers, and anyone interested in financial risk management. The package is easy to use, well-documented, and compatible with various data sources and formats. It is a powerful addition to the Python ecosystem for risk management and a valuable resource for those looking to enhance their understanding of financial risk.
Get started with `rmpy` today and take control of your financial risk management!
# 1. NpVaR Module
This module calculates the Non-parametric Value-at-Risk (NpVaR) and associated functions for single assets or portfolios using historical returns.
The functions with the `yf_` prefix use data from the Yahoo Finance API, whereas the others use the provided returns. The `_single` functions are for individual assets, and the `_port` functions are for portfolios. The `_conflevel_` functions calculate the NpVaR with a specified confidence level. The `_summary_` functions provide summaries for NpVaR calculations. The `_marg_NpVaRs_scale_factor` functions calculate the marginal NpVaRs given a scaling factor.
Please refer to the code below for the syntax and examples on how to use each function. The input parameters and their usage are described within the comments.
It contains the following functions:
`1. yf_npVaR_single`
- yf_npVaR_single(ticker, position, start_date, end_date, freq="daily", alpha=0.01, kind="abs", display=True)
This function yf_npVaR_single calculates the quantile non-parametric value at risk (VaR) for a SINGLE asset position using historical prices obtained from Yahoo Finance (yfinance).
#### Args:
- ticker: the asset symbol or ticker of the company for the npVaR will be calculated
- position: the size of the position (value).
- start_date: the starting date for which we want to obtain historical prices. This can be a string in the format "YYYY-MM-DD" or a datetime object.
- end_date: the ending date for which we want to obtain historical prices. This can be a string in the format "YYYY-MM-DD" or a datetime object. By default, this is set to "today"
which will use the current date as the ending date.
- freq: the frequency of the historical prices. This can be set to "daily", "weekly", or "monthly". By default, this is set to "daily".
- alpha: the confidence level for the VaR calculation. This is a value between 0 and 1, representing the probability of loss exceeding the VaR. By default, this is set to 0.01 (1%).
- kind: the type of VaR calculation to use. This can be set to "abs" for absolute VaR or "rel" for relative VaR. By default, this is set to "abs".
- display: a boolean value or string representing whether to display the calculated VaR. This can be set to True or False. By default, this is set to True.
### Example:
```python
from rmpy.NpVaR import yf_npVaR_single
ticker = ['AAPL']
position = [-1000]
start_date = '2020-01-01'
end_date = '2021-12-31'
freq = "daily"
alpha = 0.01
kind = "abs"
display = True
yf_npVaR_single(ticker, position, start_date, end_date, freq=freq, alpha=alpha,kind = kind, display=display)
# OR
VaR = yf_npVaR_single(ticker, position, start_date, end_date, freq=freq, alpha=alpha,kind = kind, display=False)
print(VaR)
```
This example calculates the daily non-parametric VaR for a short position of 1000 in Apple Inc, with a confidence level of 99%.
`2. yf_npVaR_port`
- yf_npVaR_port(tickers, positions, start_date, end_date, freq="daily", alpha=0.01, kind="abs", display=display)
This function calculates the quantile non-parametric Value at Risk (VaR) for a portfolio of assets using historical prices obtained from Yahoo Finance (yfinance).
#### Args:
- tickers: A list of strings representing the tickers of the assets in the portfolio. #### note that all the TICKERS provided should be part of the portfolio whose VaR is being calculated ####
- positions: A list of integers or floats representing the positions of the assets in the portfolio. The length of this list should be the same as the 'tickers' list.
- start_date: A string representing the start date for the historical price data in the format 'YYYY-MM-DD'.
- end_date: A string representing the end date for the historical price data in the format 'YYYY-MM-DD'. By default, it is set to "today".
- freq: A string representing the frequency of the price data. By default, it is set to "daily".
- alpha: A float representing the confidence level for the VaR calculation. By default, it is set to 0.01.
- kind: A string representing the type of VaR calculation to perform. It can be either "abs" for absolute VaR or "rel" for relative VaR. By default, it is set to "abs".
- display: A boolean indicating whether to print the VaR results or not. By default, it is set to True.
### Example:
```python
from rmpy.NpVaR import yf_npVaR_port
tickers = ['AAPL', 'MSFT']
positions = [-1000, 5000]
start_date = '2020-01-01'
end_date = '2021-12-31'
freq = "daily"
alpha = 0.01
kind = "abs"
display = True
yf_npVaR_port(tickers, positions, start_date, end_date, freq=freq, alpha=alpha, kind=kind, display=display)
# OR
VaR = yf_npVaR_port(tickers, positions, start_date, end_date, freq=freq, alpha=alpha, kind=kind, display=False)
print(VaR)
```
This example calculates the daily non-parametric VaR for a Portfolio with short position of 1000 in Apple Inc and a long position of 5000 in Microsoft Corp.
`3. npVaR_single`
- VaR = npVaR_single(returns, position, alpha=0.01, kind = "abs")
This function npVaR_single calculates the quantile non-parametric value at risk (VaR) for a SINGLE asset using historical returns data (you can use every type of assets e.g stock,options,bonds, ecc.)
#### Args:
- returns: a pandas Series or NumPy array containing the historical returns of the asset.
- position: the size of the position in units of the asset.
- alpha: the confidence level for the VaR calculation. This is a value between 0 and 1, representing the probability of loss exceeding the VaR. By default, this is set to 0.01 (1%).
- kind: the type of VaR calculation to use. This can be set to "abs" for absolute VaR or "rel" for relative VaR. By default, this is set to "abs".
### Example:
```python
from rmpy.NpVaR import npVaR_single
import numpy as np
returns = np.array([-0.02, 0.03, -0.01, 0.015, -0.002, 0.001, 0.008, 0.002, -0.006, 0.009]) # Replace this with actual returns
position = -1000
alpha = 0.01
kind = "abs"
VaR = npVaR_single(returns, position, alpha=alpha, kind=kind)
print(VaR)
```
This example calculates the absolute non-parametric VaR consisting of a single short position of 1000 for the given returns.
`4. npVaR_port`
- VaR = npVaR_port(returns, position, alpha=0.01, kind = "abs")
This function npVaR_port calculates the quantile non-parametric value at risk (VaR) for a PORTFOLIO of assets using historical returns data (you can use every type of assets e.g stock,options,bonds, ecc.)
#### Args:
- returns: a pandas Series or NumPy array containing the historical returns of the portfolio. #### note that all the RETURNS provided should be part of the portfolio whose VaR is being calculated ####
- positions: the size of the positionS in units of the portfolio. This can be a single value or an array of values corresponding to each element in the returns argument.
- alpha: the confidence level for the VaR calculation. This is a value between 0 and 1, representing the probability of loss exceeding the VaR. By default, this is set to 0.01 (1%).
- kind: the type of VaR calculation to use. This can be set to "abs" for absolute VaR or "rel" for relative VaR. By default, this is set to "abs".
### Example:
```python
from rmpy.NpVaR import npVaR_port
import numpy as np
returns = np.random.uniform(-0.05, 0.05, size=(10, 3)) # Replace this with actual returns
positions = [-1000, 2500, 7000]
alpha = 0.01
kind = "abs"
VaR = npVaR_port(returns, positions, alpha=alpha, kind=kind)
print(VaR)
```
This example calculates the relative non-parametric VaR consisting of a portfolio with short positions of 1000 in the first asset, long positions of 2500 in the second asset, and long positions of 7000 in the third asset.
`5. yf_conflevel_npVaR_single`
- yf_conflevel_npVaR_single(ticker, position, start_date, end_date, freq="daily", alpha= 0.01,confidence_level = 0.95, display=True)
This function yf_conflevel_npVaR_single calculates the quantile non-parametric value at risk (VaR) for a SINGLE asset using Yahoo Finance data. It first downloads historical price data from Yahoo Finance, calculates the returns of the asset, and then calculates the VaR a its confidence level (lower and upper bound)
#### Args:
- ticker: the symbol of the asset to calculate VaR for.
- position: the size of the position in units of the asset.
- start_date: the start date of the historical data to download. This should be a string in the format "YYYY-MM-DD".
- end_date: the end date of the historical data to download. This should be a string in the format "YYYY-MM-DD". By default, this is set to "today".
- freq: the frequency of the data to download. This can be set to "daily", "weekly", or "monthly". By default, this is set to "daily".
- alpha: the level of significance for the VaR calculation. This is a value between 0 and 1, representing the probability of loss exceeding the VaR. By default, this is set to 0.01 (1%).
- confidence_level: the confidence level for the VaR calculation. This is a value between 0 and 1, representing the probability that the true VaR is within the calculated VaR interval. By default, this is set to 0.95 (95%).
- display: a boolean value indicating whether to display the VaR calculation result. By default, this is set to True.
### Example:
```python
from rmpy.NpVaR import yf_conflevel_npVaR_single
ticker = ['AAPL']
position = [2500]
start_date = '2020-01-01'
end_date = '2021-12-31'
freq = "daily"
alpha = 0.01
confidence_level = 0.95
display = True
yf_conflevel_npVaR_single(ticker, position, start_date, end_date, freq=freq, alpha=alpha, confidence_level=confidence_level, display=display)
# OR
VaR = yf_conflevel_npVaR_single(ticker, position, start_date, end_date, freq=freq, alpha=alpha, confidence_level=confidence_level, display=False)
print(VaR)
```
This example calculates the absolute non-parametric VaR with its lower and upper bound (95% confidence) for a position of 2500 in "AAPL".
`6. yf_conflevel_npVaR_port`
- yf_conflevel_npVaR_port(tickers, positions, start_date, end_date, freq= "daily",alpha=0.01,confidence_level=0.95, display=display)
This function yf_conflevel_npVaR_port calculates the quantile non-parametric value at risk (VaR) for a PORTFOLIO of assets using Yahoo Finance data. It first downloads historical price data from Yahoo Finance for each asset in the portfolio, calculates the returns of each asset, and then calculates the portfolio VaR, the lower and the upper bound at a specified confidence level and alpha level.
#### Args:
- tickers: a list of symbols of the assets in the portfolio to calculate VaR for.
- positions: a list or array containing the sizes of the positions for each asset in the portfolio. These sizes are in units of the assets and can be positive or negative.
- start_date: the start date of the historical data to download. This should be a string in the format "YYYY-MM-DD".
- end_date: the end date of the historical data to download. This should be a string in the format "YYYY-MM-DD". By default, this is set to "today".
- freq: the frequency of the data to download. This can be set to "daily", "weekly", or "monthly". By default, this is set to "daily".
- alpha: the level of significance for the VaR calculation. This is a value between 0 and 1, representing the probability of loss exceeding the VaR. By default, this is set to 0.01 (1%).
- onfidence_level: the confidence level for the VaR calculation. This is a value between 0 and 1, representing the probability that the true VaR is within the calculated VaR interval. By default, this is set to 0.95.
- display : a boolean value indicating whether to display the VaR calculation result. By default, this is set to True.
### Example:
```python
from rmpy.NpVaR import yf_conflevel_npVaR_port
tickers = ['AAPL', 'MSFT']
positions = [-1000, 5000]
start_date = '2020-01-01'
end_date = '2021-12-31'
freq = "daily"
alpha = 0.01
confidence_level = 0.95
display = True
yf_conflevel_npVaR_port(tickers, positions, start_date, end_date, freq=freq, alpha=alpha, confidence_level=confidence_level, display=display)
# OR
VaR = yf_conflevel_npVaR_port(tickers, positions, start_date, end_date, freq=freq, alpha=alpha, confidence_level=confidence_level, display=False)
print(VaR)
```
This example calculates the daily non-parametric VaR with its lower and upper bound (95% confidence) for a Portfolio with short position of 1000 in Apple Inc and 5000 in Microsoft Corp.
`7. conflevel_npVaR_single`
- VaR = conflevel_npVaR_single(returns, position,confidence_level= 0.95, alpha=0.01)
This function conflevel_npVaR_single calculates the quantile non-parametric value at risk (VaR) for a SINGLE asset using historical returns data (you can use every type of assets e.g stock,options,bonds, ecc.) with a specific confidence interval and alpha value. It also caluculates the lower and upper bound for the non-parametric VaR.
#### Args:
- returns: A NumPy array or pd.Series of historical returns of a single asset.
- position: The size of the position in the asset. If position is greater than 0, the function assumes the position is long, and if position is less than or equal to 0, the function assumes the position is short.
- confidence_level: The confidence level for the VaR calculation (default value is 0.95).
- alpha: The level of significance for the VaR calculation (default value is 0.01).
### Example:
```python
from rmpy.NpVaR import conflevel_npVaR_single
import numpy as np
returns = np.random.uniform(-0.03, 0.03, size=1000) # Replace this with actual returns
position = -1000
confidence_level = 0.95
alpha = 0.01
VaR = conflevel_npVaR_single(returns, position, confidence_level=confidence_level, alpha=alpha)
print(VaR)
# OR
VaR = yf_conflevel_npVaR_port(tickers, positions, start_date, end_date, freq=freq, alpha=alpha, confidence_level=confidence_level, display=False)
print(VaR)
```
This example calculates the non-parametric VaR and its lower and upper bound (95% confidence interval) consisting of a position of -1000 in the given asset.
`8. conflevel_npVaR_port`
- VaR = conflevel_npVaR_port(returns, positions ,confidence_level= 0.95, alpha=0.01)
This function conflevel_npVaR_port calculates the quantile non-parametric value at risk (VaR) for a PORTFOLIO of assets using historical returns data (you can use every type of assets e.g stock,options,bonds, ecc.) with a specific confidence interval and alpha value. It also calculates the lower and the upper bound of this estimation.
#### Args:
- returns: A NumPy array of historical returns for the portfolio.
- positions: A NumPy array of positions for each asset in the portfolio.
- confidence_level: The confidence level for the VaR calculation (default value is 0.95).
- alpha: The level of significance for the VaR calculation (default value is 0.01).
### Example:
```python
from rmpy.NpVaR import conflevel_npVaR_port
import numpy as np
returns = np.random.uniform(-0.05, 0.05, size=(10, 3)) # Replace this with actual returns
positions = [-1000, 2000, -3000]
confidence_level = 0.95
alpha = 0.01
VaR = conflevel_npVaR_port(returns, positions, confidence_level=confidence_level, alpha=alpha)
print(VaR)
```
This example calculates the non-parametric VaR consisting and its lower and upper bound (95% CI) of portfolio consisting of a postion of -1000 in the first asset, 2000 in the second asset and - 3000 in the third one.
`9. yf_npVaR_summary_single`
- yf_npVaR_summary_single(ticker, position, start_date, end_date, freq="daily", alpha=0.01, display=display)
This function yf_npVaR_summary_single calculates the quantile non-parametric value at risk (VaR) for a SINGLE stock position using historical prices obtained from Yahoo Finance (yfinance). This function is usefull to visualize some risk metrics of the single asset npVaR.
#### Args:
- ticker: the stock symbol or ticker of the company for which we want to calculate the VaR. This can be a string (for a single ticker) or a list or array of tickers (for multiple tickers).
- position: the size of the position in shares or currency. This can be a single value or an array of values corresponding to each ticker in the ticker argument.
- start_date: the starting date for which we want to obtain historical prices. This can be a string in the format "YYYY-MM-DD" or a datetime object.
- end_date: the ending date for which we want to obtain historical prices. This can be a string in the format "YYYY-MM-DD" or a datetime object. By default, this is set to "today" which will use the current date as the ending date.
- freq: the frequency of the historical prices. This can be set to "daily", "weekly", or "monthly". By default, this is set to "daily".
- alpha: the confidence level for the VaR calculation. This is a value between 0 and 1, representing the probability of loss exceeding the VaR. By default, this is set to 0.01 (1%).
- display: a boolean value or string representing whether to display the calculated VaR. This can be set to True, "True", "T", False, "False", or "F". By default, this is set to True.
### Example:
```python
from rmpy.NpVaR import yf_npVaR_summary_single
ticker = 'AAPL'
position = -1000
start_date = '2020-01-01'
end_date = '2021-12-31'
freq = "daily"
alpha = 0.01
display = True
yf_npVaR_summary_single(ticker, position, start_date, end_date, freq=freq, alpha=alpha, display=display)
# OR
VaR = yf_npVaR_summary_single(ticker, position, start_date, end_date, freq=freq, alpha=alpha, display=False)
```
This example calculates several metrics for the non-parametric VaR summary for a short position of 1000 shares in Apple Inc with a confidence level of 99% using historical daily prices from January 1, 2020, to December 31, 2021, obtained from Yahoo Finance.
`10. yf_npVaR_summary_port`
- yf_npVaR_summary_port(tickers, positions, start_date, end_date, freq="daily", alpha=0.01, display=True)
This function 'yf_npVaR_summary_port' is designed to calculate the quantile non-parametric Value at Risk (VaR) for a PORTFOLIO of assets using historical prices obtained from Yahoo Finance (yfinance). This function is usefull to visualize several risk metrics of the Portfolio npVaR.
#### Args:
- tickers: A list of strings representing the tickers of the assets in the portfolio. #### note that all the TICKERS provided should be part of the portfolio whose VaR is being calculated ####
- positions: A list of integers or floats representing the positions of the assets in the portfolio. The length of this list should be the same as the 'tickers' list.
- start_date: A string representing the start date for the historical price data in the format 'YYYY-MM-DD'.
- end_date: A string representing the end date for the historical price data in the format 'YYYY-MM-DD'. By default, it is set to "today".
- freq: A string representing the frequency of the price data. By default, it is set to "daily".
- alpha: A float representing the confidence level for the VaR calculation. By default, it is set to 0.01.
- kind: A string representing the type of VaR calculation to perform. It can be either "abs" for absolute VaR or "rel" for relative VaR. By default, it is set to "abs".
- display: A boolean indicating whether to print the VaR results or not. By default, it is set to True.
### Example:
```python
from rmpy.NpVaR import yf_npVaR_summary_port
tickers = ['AAPL', 'MSFT']
positions = [-1000, 5000]
start_date = '2020-01-01'
end_date = '2021-12-31'
freq = "daily"
alpha = 0.01
display = True
yf_npVaR_summary_port(tickers, positions, start_date, end_date, freq=freq, alpha=alpha, display=display)
# OR
VaR = yf_npVaR_summary_port(tickers, positions, start_date, end_date, freq=freq, alpha=alpha, display=False)
```
This example calculates several metrics for the non-parametric VaR for a portoflio consisting of a short position of 1000 in Apple Inc and a long position of 5000 in Miscrosoft using historical daily prices from January 1, 2020, to December 31, 2021, obtained from Yahoo Finance.
`11. npVaR_summary_single`
- summary = npVaR_summary_single(returns, position, alpha = 0.01)
This function called npVaR_summary_single that calculates the quantile Non-Parametric Value at Risk (VaR) and several risk metrics for a SINGLE asset and returns a summary of the VaR calculation.
#### Args:
- returns: A numpy array or pandas series of historical returns for the asset.
- position: The size of the position in the asset.
- alpha: The level of significance for the VaR calculation. Default value is 0.01.
### Example:
```python
import numpy as np
from rmpy.NpVaR import npVaR_summary_single
returns = np.array([-0.02, 0.03, -0.01, 0.015, -0.002, 0.001, 0.008, 0.002, -0.006, 0.009])
position = 10000
alpha = 0.01
summary = npVaR_summary_single(returns, position, alpha=alpha)
print(summary)
```
This example calculates all the metrics for the non-parametric VaR for a single asset with a given set of historical returns and position.
`12. npVaR_summary_port`
- summary = npVaR_summary_port(returns, positions, alpha=alpha)
The function npVaR_summary_port calculates the quantile non-parametric value at risk (VaR) and several risk metrics for a npVaR of a portfolio.
#### Args:
- returns: A numpy array or pandas series of historical returns for the portfolio.
- position: which is a pandas DataFrame or numpy array containing the value held for each asset in the portfolio
- alpha: which is the confidence level for the VaR calculation (default value is 0.01).
### Example:
```python
import numpy as np
from rmpy import npVaR_summary_port
returns = np.random.uniform(-0.05, 0.05, size=(10, 3)) # Replace this with actual returns
positions = [1000, 500, -1500]
alpha = 0.01
summary = npVaR_summary_port(returns, positions, alpha=alpha)
print(summary)
```
This example shows how to use the npVaR_summary_port function with a 10x3 numpy array of historical returns and a list of positions for 3 assets in the portfolio. The function calculates the non-parametric value at risk (VaR) and other risk measures, returning them in a dictionary.
`13. yf_marg_NpVaRs_scale_factor`
- yf_marg_NpVaRs_scale_factor(tickers, positions, start_date, end_date, scale_factor=0.1, freq="daily", alpha=0.01, display=True)
The function yf_marg_NpVaRs_scale_factor calculates the Marginal Non-Parametric VaRs and their changes for a PORTFOLIO of assets over a given period of time, using a specified scale factor,using historical prices obtained from Yahoo Finance (yfinance). New position = old one + (old one * scale_factor) in the same direction (- if short and + if long)
#### Args:
- tickers: a list of tickers for the assets in the portfolio.
- positions: a list of positions for each asset in the portfolio.
- start_date: the start date of the analysis period.
- end_date: the end date of the analysis period (optional, default is "today").
- scale_factor: the scale factor used to calculate the Marginal Non-Parametric VaRs (optional, default is 0.1).
- freq: the frequency of the data (optional, default is "daily").
- alpha: the confidence level used in calculating the VaRs (optional, default is 0.01).
- display: a boolean indicating whether to print the resulting VaR changes (optional, default is True).
### Example:
```python
from rmpy.NpVaR import yf_marg_NpVaRs_scale_factor
tickers = ["AAPL", "MSFT", "GOOG"]
positions = [1000, 1500, 2000]
start_date = "2021-01-01"
end_date = "2021-12-31"
scale_factor = 0.1
freq = "daily"
alpha = 0.01
display = True
yf_marg_NpVaRs_scale_factor(tickers, positions, start_date, end_date, scale_factor=scale_factor, freq=freq, alpha=alpha, display=display)
# OR
NpVaR_changes = yf_marg_NpVaRs_scale_factor(tickers, positions, start_date, end_date, scale_factor=scale_factor, freq=freq, alpha=alpha, display=False)
print(NpVaR_changes)
```
This example demonstrates how to use the yf_marg_NpVaRs_scale_factor function with a list of three tickers, a list of positions for each asset, a start date, and an end date for the analysis period. The function calculates the Marginal Non-Parametric VaRs and their changes for the given portfolio using historical prices from Yahoo Finance.
`14. marg_NpVaRs_scale_factor`
- NpVaR_changes = marg_NpVaRs_scale_factor(returns, positions, alpha=0.01)
The function marg_NpVaRs_scale_factor calculates the marginal changes in the non-parametric value at risk (NpVaR) of a PORTFOLIO based on a specified scale factor. New position = old one + (old one * scale_factor) in the same direction (- if short and + if long)
#### Args:
- returns: A numpy array or pandas series of historical returns for the portfolio.
- positions: a list or array containing the current positions of the assets in the portfolio.
- scale_factor: a float representing the scale factor to be used for calculating the marginal changes in NpVaR.
- alpha: a float representing the significance level for calculating the NpVaR.
### Example:
```python
import numpy as np
from rmpy.NpVaR import marg_NpVaRs_scale_factor
returns = np.random.uniform(-0.05, 0.05, size=(10, 5)) # Replace this with actual returns
positions = [1000, -5000, 2000, -1000, 1500]
scale_factor = 0.2
alpha = 0.01
NpVaR_changes = marg_NpVaRs_scale_factor(returns, positions, alpha=alpha)
print(NpVaR_changes)
```
In this example, we have calculated the marginal changes in the non-parametric value at risk (NpVaR) for a portfolio consisting of 5 assets. The function takes in historical returns data for the assets, the current positions of the assets, a scale factor, and a significance level (alpha).The result is a list of the marginal changes in NpVaR for each asset, which can be useful for understanding the risk contributions of individual positions within the portfolio and designing risk management strategies to mitigate potential losses.
# 2. PaVaR Module
This module calculates Portfolio Value-at-Risk (pVaR) and associated functions for single assets and multi-asset portfolios using historical returns. It provides different Value-at-Risk (VaR) calculations, including marginal, component, and relative component VaRs.
The functions with the `yf_` prefix use data from the Yahoo Finance API, whereas the others use the provided returns. The `_single` functions are for individual assets, and the `_port` functions are for portfolios. The `_conflevel_` functions calculate the pVaR with a specified confidence level. The `_und_vs_` functions calculate the undiversified pVaR, while the `_marginal_VaRs`, `_componets_VaRs`, and `_relcomponets_VaRs` functions calculate the marginal, component, and relative component VaRs, respectively.
Each function has its own set of input parameters, such as asset tickers, positions, start and end dates, frequency, and confidence levels. Please refer to the code above for the syntax and examples on how to use each function. The input parameters and their usage are described within the comments.
The following functions are included:
`1. yf_pVaR_single`
- yf_pVaR_single(ticker, position, start_date, end_date,interval = 1, freq="daily", alpha=0.01, display=True)
The function 'yf_pVaR_single' enables the calculation of parametric VaR for a SINGLE POSITION by utilizing data obtained from Yahoo Finance (yFinance).
#### Args:
- ticker: The stock symbol or identifier for the financial instrument in question (e.g. "AAPL" for Apple Inc.).
- position: The value held. This can be a positive or negative number, depending if you are long or short.
- start_date: A string representing the starting date for the historical data used in the VaR calculation. This should be in the format "YYYY-MM-DD".
- end_date: A string representing the ending date for the historical data used in the VaR calculation. This should also be in the format "YYYY-MM-DD".By default, this is set to "today".
- interval: The time horizon of VaR. It is related to frequency (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- freq: The frequency at which returns will be downloaded.
alpha: The significance level for the VaR calculation. By default, this is set to 0.01, which corresponds to a 99% confidence level.
- display: A boolean value that determines whether the function should display the results of the VaR calculation. By default, this is set to True, which means that the results will be displayed.
### Example:
```python
from rmpy.PaVaR import yf_pVaR_single
ticker = ['AAPL']
position = [-1000]
start_date = '2020-01-01'
end_date = '2021-12-31'
interval = 1
freq = "daily"
alpha = 0.01
display = True
yf_pVaR_single(ticker, position, start_date, end_date, interval=interval, freq=freq, alpha=alpha, display=display)
# OR
VaR = yf_pVaR_single(ticker, position, start_date, end_date, interval=interval, freq=freq, alpha=alpha, display=False)
print(VaR)
```
This example calculates the daily parametric VaR for a short position of 1000 in Apple Inc, with a confidence level of 99%.
`2. yf_pVaR_port`
- yf_pVaR_port(tickers, positions, start_date, end_date,interval = 1, freq="daily", alpha=0.01, display=True)
The function 'yf_pVaR_port' enables the calculation of parametric VaR for a PORTFOLIO by utilizing data obtained from Yahoo Finance (yFinance).
#### Args:
- tickers: A list of strings representing the tickers of the assets in the portoflio. #### Note that the tickers provided should be part of the portoflio ####
- positions: A list of integers or floats representing each position in the portfolio. This can be a positive or negative number, depending if you are long or short.
- start_date: A string representing the starting date for the historical data used in the VaR calculation. This should be in the format "YYYY-MM-DD".
- end_date: A string representing the ending date for the historical data used in the VaR calculation. This should also be in the format "YYYY-MM-DD".By default, this is set to "today".
- interval: The time horizon of VaR. It is related to frequency (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- "freq": The frequency at which returns will be downloaded.
- alpha": The significance level for the VaR calculation. By default, this is set to 0.01, which corresponds to a 99% confidence level.
- display": A boolean value that determines whether the function should display the results of the VaR calculation. By default, this is set to True, which means that the results will be displayed.
### Example:
```python
from rmpy.PaVaR import yf_pVaR_port
tickers = ['AAPL', 'MSFT']
positions = [-1000, 5000]
start_date = '2020-01-01'
end_date = '2021-12-31'
interval = 5
freq = "daily"
alpha = 0.01
display = True
yf_pVaR_port(tickers, positions, start_date, end_date, interval=interval, freq=freq, alpha=alpha, display=display)
# OR
VaR = yf_pVaR_port(tickers, positions, start_date, end_date, interval=interval, freq=freq, alpha=alpha, display=False)
print(VaR)
```
This example calculates the 5-day parametric VaR for a Portfolio with short position of 1000 in Apple Inc and a long position of 5000 in Microsoft Corp.
`3. pVaR_single`
- VaR = pVaR_single(returns, position, interval = 1, alpha=0.01)
The function 'pVaR_single' enables the calculation of parametric VaR for a SINGLE POSITION based on a set of returns.
#### Args:
- returns: a pandas Series or NumPy array containing the historical returns of the asset or portfolio.
- position: the size of the position in units of the asset.
- interval": The time horizon of VaR. It is related to frequency of data used (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- alpha the confidence level for the VaR calculation. This is a value between 0 and 1, representing the probability of loss exceeding the VaR.By default, this is set to 0.01 (1%).
### Example:
```python
from rmpy.PaVaR import pVaR_single
import numpy as np
returns = np.array([-0.02, 0.03, -0.01, 0.015, -0.002, 0.001, 0.008, 0.002, -0.006, 0.009]) # Replace this with actual returns
position = [-1000]
interval = 1
alpha = 0.01
VaR = pVaR_single(returns, position, interval=interval, alpha=alpha)
print(VaR)
```
This example calculates the parametric VaR consisting of a single short position of 1000 for the given returns.
`4. pVaR_port`
- VaR = pVaR_port(returns, position, interval = 1, alpha=0.01)
The function 'pVaR_port' enables the calculation of parametric VaR for a PORTOFLIO based on a set of returns.
#### Args:
- returns: a pandas Series or NumPy array containing the historical returns of the portfolio.
- positions: A list of integers or floats representing each position in the portfolio. This can be a positive or negative number, depending if you are long or short.
- interval": The time horizon of VaR. It is related to frequency of data used (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- alpha": The level of confidence for the VaR calculation. By default is set to 0.01, which represents a 99% confidence level.
### Example:
```python
from rmpy.PaVaR import pVaR_port
import numpy as np
returns = np.random.uniform(-0.05, 0.05, size=(10, 3)) # Replace this with actual returns
positions = [-1000, 2500, 7000]
interval = 1
alpha = 0.01
VaR = pVaR_port(returns, positions, interval=interval, alpha=alpha)
print(VaR)
```
This example calculates the parametric VaR consisting of a portfolio with short positions of 1000 in the first asset, long positions of 2500 in the second asset,
and long positions of 7000 in the third asset.
`5. yf_conflevel_pVaR_single`
- yf_conflevel_pVaR_single(ticker, position, start_date, end_date, freq="daily",interval =1, alpha=0.01 ,confidence_level = 0.95, display=True)
The function 'yf_conflevel_pVaR_single' enables the calculation of the confidence level of parametric VaR for a SINGLE POSITION by utilizing data obtained from Yahoo Finance (yFinance).
#### Args:
- ticker: The stock symbol or identifier for the financial instrument in question (e.g. "AAPL" for Apple Inc.).
- position: The number of shares or units held. This can be a positive or negative number, depending if you are long or short.
- start_date: A string representing the starting date for the historical data used in the VaR calculation. This should be in the format "YYYY-MM-DD".
- end_date: A string representing the ending date for the historical data used in the VaR calculation. This should also be in the format "YYYY-MM-DD".By default, this is set to "today".
- freq: The frequency at which returns will be downloaded.
- interval: The time horizon of VaR. It is related to frequency (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- alpha: a float specifying the significance level for the VaR calculation. By default, this is set to 0.01, which corresponds to a 99% confidence level.
- confidence_level: a float specifying the confidence level for the VaR calculation. By default, it is set to 0.95.
- display: a boolean or string value indicating whether or not to display the results. The default value is set to True.
### Example:
```python
from rmpy.PaVaR import yf_conflevel_pVaR_single
ticker = ['AAPL']
position = [2500]
start_date = '2020-01-01'
end_date = '2021-12-31'
freq = "daily"
interval = 1
alpha = 0.01
confidence_level = 0.95
display = True
yf_conflevel_pVaR_single(ticker, position, start_date, end_date, freq=freq, interval=interval, alpha=alpha, confidence_level=confidence_level, display=display)
# OR
VaR = yf_conflevel_pVaR_single(ticker, position, start_date, end_date, freq=freq, interval=interval, alpha=alpha, confidence_level=confidence_level, display=False)
print(VaR)
```
This example calculates parametric VaR with its lower and upper bound (95% confidence) for a position of 2500 in "AAPL".
`6. yf_conflevel_pVaR_port`
- yf_conflevel_pVaR_port(tickers, positions, start_date, end_date, freq="daily",interval =1, alpha=0.01 ,confidence_level = 0.95, display=True)
The function 'yf_conflevel_pVaR_port' enables the calculation of the confidence level of parametric VaR for a PORTFOLIO by utilizing data obtained from Yahoo Finance (yFinance).
#### Args:
- tickers: A list of strings representing the tickers of the assets in the portoflio. #### Note that the tickers provided should be part of the portoflio ####
- positions: A list of integers or floats representing each position in the portfolio. This can be a positive or negative number, depending if you are long or short.
- start_date: A string representing the starting date for the historical data used in the VaR calculation. This should be in the format "YYYY-MM-DD".
- end_date: A string representing the ending date for the historical data used in the VaR calculation. This should also be in the format "YYYY-MM-DD".By default, this is set to "today".
- freq: The frequency at which returns will be downloaded.
- interval: The time horizon of VaR. It is related to frequency (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- alpha: a float specifying the significance level for the VaR calculation. By default, this is set to 0.01, which corresponds to a 99% confidence level.
- confidence_level: a float specifying the confidence level for the VaR calculation. By default, it is set to 0.95.
- display: a boolean or string value indicating whether or not to display the results. The default value is set to True.
### Example:
```python
from rmpy.PaVaR import yf_conflevel_pVaR_port
tickers = ['AAPL', 'MSFT']
positions = [2500, -1000]
start_date = '2020-01-01'
end_date = '2021-12-31'
freq = "daily"
interval = 1
alpha = 0.01
confidence_level = 0.95
display = True
yf_conflevel_pVaR_port(tickers, positions, start_date, end_date, freq=freq, interval=interval, alpha=alpha, confidence_level=confidence_level, display=display)
# OR
VaR = yf_conflevel_pVaR_port(tickers, positions, start_date, end_date, freq=freq, interval=interval, alpha=alpha, confidence_level=confidence_level, display=False)
print(VaR)
```
This example calculates parametric VaR with its lower and upper bound (95% confidence) for a Portfolio of a position of 2500 in "AAPL" and - 1000 in "MSFT.
`7. conflevel_pVaR_single`
- VaR = conflevel_pVaR_single(returns, position,interval =1, alpha=0.01 ,confidence_level = 0.95)
The function 'conflevel_pVaR_single' enables the calculation of the confidence level of parametric VaR for a SINGLE POSITION based on a set of returns.
#### Args:
- returns: a pandas Series or NumPy array containing the historical returns of the asset.
- position: The size of the position in the asset. This can be a positive or negatie number, depending if you are long or short.
- interval: The time horizon of VaR. It is related to frequency of data used (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- confidence_level: a float specifying the confidence level for the VaR calculation. By default, it is set to 0.95.
- alpha: The level of confidence for the VaR calculation. By default is set to 0.01, which represents a 99% confidence level.
### Example:
```python
from rmpy.PaVaR import conflevel_pVaR_single
import numpy as np
returns = np.random.uniform(-0.05, 0.05, size=1000) # Replace this with your actual returns
position = [-1000]
interval = 1
confidence_level = 0.95
alpha = 0.01
VaR = conflevel_pVaR_single(returns, position, interval=interval, confidence_level=confidence_level, alpha=alpha)
print(VaR)
```
This example calculates the parametric VaR and its lower and upper bound (95% confidence interval) consisting of a position of -1000 in the given asset.
`8. conflevel_pVaR_port`
- VaR = conflevel_pVaR_port(returns, positions ,interval =1, alpha=0.01 ,confidence_level = 0.95)
The function 'conflevel_pVaR_port' enables the calculation of the confidence level of parametric VaR for a PORTFOLIO based on a set of returns.
#### Args:
- returns: a pandas Series or NumPy array containing the historical returns of the portfolio.
- positions: A list of integers or floats representing each position in the portfolio. This can be a positive or negative number, depending if you are long or short.
- interval: The time horizon of VaR. It is related to frequency of data used (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- confidence_level: a float specifying the confidence level for the VaR calculation. By default, it is set to 0.95.
- alpha : The level of confidence for the VaR calculation. By default is set to 0.01, which represents a 99% confidence level.
### Example:
```python
from rmpy.PaVaR import conflevel_pVaR_port
import numpy as np
returns = np.random.uniform(-0.05, 0.05, size=(10, 3)) # Replace this with actual returns
positions = [-1000, 5000, -1500]
interval = 1
confidence_level = 0.95
alpha = 0.01
VaR = conflevel_pVaR_port(returns, positions, interval=interval, confidence_level=confidence_level, alpha=alpha)
print(VaR)
```
This example calculates the parametric VaR for a Portfolio and its lower and upper bound (95% confidence interval) consisting of a position of -1000 in the first asset, 5000 in the second and -1500 in the third one.
`9. yf_und_vs_pVaR_port`
- yf_und_vs_pVaR_port(tickers, positions, start_date, end_date,interval = 1, freq="daily", alpha=0.01, display=True)
The function 'yf_und_vs_pVaR_port' enables the calculation of the undiversified VaR and the parametric VaR for a PORTFOLIO by utilizing data obtained from Yahoo Finance (yFinance).
#### Args:
- tickers: A list of strings representing the tickers of the assets in the portoflio. #### Note that the tickers provided should be part of the portoflio ####
- positions: A list of integers or floats representing each position in the portfolio. This can be a positive or negative number, depending if you are long or short.
- start_date: A string representing the starting date for the historical data used in the VaR calculation. This should be in the format "YYYY-MM-DD".
- end_date: A string representing the ending date for the historical data used in the VaR calculation. This should also be in the format "YYYY-MM-DD".By default, this is set to "today".
- freq: The frequency at which returns will be downloaded.
- interval: The time horizon of VaR. It is related to frequency (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- alpha: a float specifying the significance level for the VaR calculation. By default, this is set to 0.01, which corresponds to a 99% confidence level.
- display : a boolean or string value indicating whether or not to display the results. The default value is set to True.
### Example:
```python
from rmpy.PaVaR import yf_und_vs_pVaR_port
tickers = ['AAPL', 'MSFT']
positions = [-1000, 5000]
start_date = '2020-01-01'
end_date = '2021-12-31'
interval = 5
freq = "daily"
alpha = 0.01
display = True
yf_und_vs_pVaR_port(tickers, positions, start_date, end_date, interval=interval, freq=freq, alpha=alpha, display=display)
# OR
VaR = yf_und_vs_pVaR_port(tickers, positions, start_date, end_date, interval=interval, freq=freq, alpha=alpha, display=False)
print(VaR)
```
This example calculates the 5-day undiversified and parametric VaR for a Portfolio with short position of 1000 in Apple Inc and a long position of 5000 in Microsoft Corp.
`10. und_vs_pVaR_port`
- VaR = und_vs_pVaR_port(returns, position, interval = 1, alpha=0.01)
The function 'und_vs_pVaR_port' enables the calculation of the undiversified VaR and the parametric VaR for a PORTFOLIO based on a set of returns.
#### Args:
- returns: A list or array of historical returns for a portfolio.
- positions : A list or array of current positions for the assets in the portfolio. This can be a positive or negative number, depending if you are long or short.
- interval: The time horizon of VaR. It is related to frequency of data used (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- alpha : The level of confidence for the VaR calculation. By default is set to 0.01, which represents a 99% confidence level.
### Example:
```python
from rmpy.PaVaR import und_vs_pVaR_port
import numpy as np
returns = np.random.uniform(-0.05, 0.05, size=(10, 3)) # Replace this with actual returns
positions = [-1000, 2500, 7000]
interval = 1
alpha = 0.01
VaR = und_vs_pVaR_port(returns, positions, interval=interval, alpha=alpha)
print(VaR)
```
This example calculates the 5-day undiversified and parametric VaR for a Portfolio with short position of 1000 in the first asset, 2500 in the second one and 7000 in the third one.
`11. yf_marginal_VaRs`
- yf_marginal_VaRs(tickers, positions, start_date, end_date,interval = 1, freq="daily", alpha=0.01, display=display)
The function 'yf_marginal_VaRs' enables the calculation of the Marginal VaRs for a PORTFOLIO by utilizing data obtained from Yahoo Finance (yFinance).
#### Args:
- tickers: A list of strings representing the tickers of the assets in the portoflio. #### Note that the tickers provided should be part of the portoflio ####
- positions: A list of integers or floats representing each position in the portfolio. This can be a positive or negative number, depending if you are long or short.
- start_date: A string representing the starting date for the historical data used in the VaR calculation. This should be in the format "YYYY-MM-DD".
- end_date: A string representing the ending date for the historical data used in the VaR calculation. This should also be in the format "YYYY-MM-DD".By default, this is set to "today".
- freq: The frequency at which returns will be downloaded.
- interval: The time horizon of VaR. It is related to frequency (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- alpha: a float specifying the significance level for the VaR calculation. By default, this is set to 0.01, which corresponds to a 99% confidence level.
- display": a boolean or string value indicating whether or not to display the results. The default value is set to True.
### Example:
```python
from rmpy.PaVaR import yf_marginal_VaR
tickers = ['AAPL', 'MSFT']
positions = [-1000, 5000]
start_date = '2020-01-01'
end_date = '2021-12-31'
interval = 5
freq = "daily"
alpha = 0.01
display = True
yf_marginal_VaRs(tickers, positions, start_date, end_date, interval=interval, freq=freq, alpha=alpha, display=display)
# OR
mVars = yf_marginal_VaRs(tickers, positions, start_date, end_date, interval=interval, freq=freq, alpha=alpha, display=False)
print(mVars)
```
This example calculates the 5-day marginal parametrics VaR for a Portfolio with short position of 1000 in Apple Inc and a long position of 5000 in Microsoft Corp.
`12. marginal_VaRs`
- mVars = marginal_VaRs(returns, positions, interval=1, alpha=0.01)
The function 'marginal_VaRs' enables the calculation of the marginal VaRs of a portfolio, given a set of returns and positions
#### Args:
- returns: a pandas Series or NumPy array containing the historical returns of the portfolio.
- positions: A list of integers or floats representing each position in the portfolio. This can be a positive or negative number, depending if you are long or short.
- interval: The time horizon of VaR. It is related to frequency of data used (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- alpha: The level of confidence for the VaR calculation. By default is set to 0.01, which represents a 99% confidence level.
### Example:
```python
from rmpy.PaVaR import marginal_VaRs
import numpy as np
returns = np.random.uniform(-0.05, 0.05, size=(10, 3)) # Replace this with actual returns
positions = [-1000, 2500, 7000]
interval = 1
alpha = 0.01
mVars = marginal_VaRs(returns, positions, interval=interval, alpha=alpha)
print(mVars)
```
This example calculates the marginal VaRs of a Portfolio consisting of a short positions of 1000 in the first asset, long positions of 2500 in the second asset,
and long positions of 7000 in the third asset.
`13. yf_componets_VaRs`
- yf_componets_VaRs(tickers, positions, start_date, end_date,interval = 1, freq="daily", alpha=0.01, display=True)
The function 'yf_componets_VaRs' enables the calculation of the Component VaRs for a PORTFOLIO by utilizing data obtained from Yahoo Finance (yFinance).
#### Args:
- tickers: A list of strings representing the tickers of the assets in the portoflio. #### Note that the tickers provided should be part of the portoflio ####
- positions: A list of integers or floats representing each position in the portfolio. This can be a positive or negative number, depending if you are long or short.
- start_date: A string representing the starting date for the historical data used in the VaR calculation. This should be in the format "YYYY-MM-DD".
- end_date: A string representing the ending date for the historical data used in the VaR calculation. This should also be in the format "YYYY-MM-DD".By default, this is set to "today".
- freq: The frequency at which returns will be downloaded.
- interval: The time horizon of VaR. It is related to frequency (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- alpha: a float specifying the significance level for the VaR calculation. By default, this is set to 0.01, which corresponds to a 99% confidence level.
- display: a boolean or string value indicating whether or not to display the results. The default value is set to True.
### Example:
```python
from rmpy.PaVaR import yf_componets_VaRs
tickers = ['AAPL', 'MSFT']
positions = [-1000, 5000]
start_date = '2020-01-01'
end_date = '2021-12-31'
interval = 5
freq = "daily"
alpha = 0.01
display = True
yf_componets_VaRs(tickers, positions, start_date, end_date, interval=interval, freq=freq, alpha=alpha, display=display)
# OR
cVars = yf_componets_VaRs(tickers, positions, start_date, end_date, interval=interval, freq=freq, alpha=alpha, display=False)
print(cVars)
```
This example calculates the 5-day component parametrics VaRs for a Portfolio with short position of 1000 in Apple Inc and a long position of 5000 in Microsoft Corp.
`14. componets_VaRs`
- cVars = componets_VaRs(returns, positions, interval=1, alpha=0.01)
The function 'componets_VaRs' enables the calculation of the componets VaRs of a portfolio, given a set of returns
#### Args:
- returns: a pandas Series or NumPy array containing the historical returns of the portfolio.
- positions: A list of integers or floats representing each position in the portfolio. This can be a positive or negative number, depending if you are long or short.
- interval: The time horizon of VaR. It is related to frequency of data used (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- alpha: The level of confidence for the VaR calculation. By default is set to 0.01, which represents a 99% confidence level.
### Example:
```python
from rmpy.PaVaR import componets_VaRs
import numpy as np
returns = np.random.uniform(-0.05, 0.05, size=(10, 3)) # Replace this with actual returns
positions = [-1000, 2500, 7000]
interval = 1
alpha = 0.01
cVars = componets_VaRs(returns, positions, interval=interval, alpha=alpha)
print(cVars)
```
This example calculates the componet VaRs of a Portfolio consisting of a short positions of 1000 in the first asset, long positions of 2500 in the second asset, and long positions of 7000 in the third asset.
`15. yf_relcomponets_VaRs`
- yf_relcomponets_VaRs(tickers, positions, start_date, end_date,interval = 1, freq="daily", alpha=0.01, display=True)
The function 'yf_relative_componets_VaRs' enables the calculation of the Relative Component VaRs for a PORTFOLIO by utilizing data obtained from Yahoo Finance (yFinance).
#### Args:
- tickers: A list of strings representing the tickers of the assets in the portoflio. #### Note that the tickers provided should be part of the portoflio ####
- positions: A list of integers or floats representing each position in the portfolio. This can be a positive or negative number, depending if you are long or short.
- start_date: A string representing the starting date for the historical data used in the VaR calculation. This should be in the format "YYYY-MM-DD".
- end_date: A string representing the ending date for the historical data used in the VaR calculation. This should also be in the format "YYYY-MM-DD".By default, this is set to "today".
- freq: The frequency at which returns will be downloaded.
- interval: The time horizon of VaR. It is related to frequency (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- alpha: a float specifying the significance level for the VaR calculation. By default, this is set to 0.01, which corresponds to a 99% confidence level.
- display: a boolean or string value indicating whether or not to display the results. The default value is set to True.
### Example:
```python
from rmpy.PaVaR import yf_relcomponets_VaRs
tickers = ['AAPL', 'MSFT']
positions = [-1000, 5000]
start_date = '2020-01-01'
end_date = '2021-12-31'
interval = 5
freq = "daily"
alpha = 0.01
display = True
yf_relcomponets_VaRs(tickers, positions, start_date, end_date, interval=interval, freq=freq, alpha=alpha, display=display)
# OR
rcVars = yf_relcomponets_VaRs(tickers, positions, start_date, end_date, interval=interval, freq=freq, alpha=alpha, display=False)
print(rcVars)
```
This example calculates the 5-day relative component parametrics VaRs for a Portfolio with short position of 1000 in Apple Inc and a long position of 5000 in Microsoft Corp.
`16. relcomponets_VaRs`
- rcVars = relcomponets_VaRs(returns, positions, interval=1, alpha=0.01)
The function 'relcomponets_VaRs' enables the calculation of the relative componets VaRs of a portfolio, given its returns.
#### Args:
- returns: a pandas Series or NumPy array containing the historical returns of the portfolio.
- positions: A list of integers or floats representing each position in the portfolio. This can be a positive or negative number, depending if you are long or short.
- interval: The time horizon of VaR. It is related to frequency of data used (e.g. if "freq = 'montly' " and "interval = 1", the function compute 1-month VaR).
- alpha: The level of confidence for the VaR calculation. By default is set to 0.01, which represents a 99% confidence level.
### Example:
```python
from rmpy.PaVaR import relcomponets_VaRs
import numpy as np
returns = np.random.uniform(-0.05, 0.05, size=(10, 3)) # Replace this with actual returns
positions = [-1000, 2500, 7000]
interval = 1
alpha = 0.01
rcVars = relcomponets_VaRs(returns, positions, interval=interval, alpha=alpha)
print(rcVars)
```
This example calculates the relative componet VaRs of a Portfolio consisting of a short positions of 1000 in the first asset, long positions of 2500 in the second asset, and long positions of 7000 in the third asset.
| /rmpy-1.1.8.tar.gz/rmpy-1.1.8/README.md | 0.465145 | 0.98829 | README.md | pypi |
import argparse
import json
import sys
import requests
def dict_list_key(item):
"""Provide the value to sort the dictionary item on.
:param dict item: The item to sort
:rtype: mixed
"""
if 'vhost' in item and 'name' in item:
return item['vhost'], item['name']
elif 'user' in item and 'vhost' in item:
return item['user'], item['vhost']
elif 'vhost' in item and 'source' in item and 'destination' in item:
return item['vhost'], item['source'], item['destination']
for key in {'name', 'user'}:
if key in item:
return item[key]
return item
def get_definitions(args):
"""Make the HTTP Request to RabbitMQ to retrieve the RabbitMQ definitions.
:param argparse.namespace args: CLI argument values
:rtype: dict
:raises: RequestException
"""
try:
response = requests.get(
'{}/api/definitions'.format(args.url.rstrip('/')),
auth=(args.username, args.password))
except requests.RequestException as error:
raise RequestException(error.__class__.__name__)
if not response.ok:
error = response.json()
raise RequestException(error.get('reason', error.get('error')))
return response.json()
def nested_sort(value):
"""Perform a recursive sort on the provided item.
:param mixed item: The item to sort
:rtype: mixed
:raises: ValueError
"""
if isinstance(value, dict):
return {k: nested_sort(v) for k, v in value.items()}
elif isinstance(value, list):
if all([isinstance(i, dict) for i in value]):
return sorted(value, key=dict_list_key)
raise ValueError('Unexpected list with mismatched data types')
return value
def parse_cli_arguments(cli_args=None): # pragma: nocover
"""Return the parsed CLI arguments for the application invocation.
:param list cli_args: CLI args to parse instead of from command line
:rtype: argparse.namespace
"""
parser = argparse.ArgumentParser(
'rmq-sorted-definitions',
description='Deterministicly sort and format RabbitMQ definition '
'backups')
parser.add_argument(
'--url', default='http://localhost:15672',
help='The RabbitMQ Management API base URL. Default: %(default)s')
parser.add_argument(
'-u', '--username', default='guest',
help='The RabbitMQ Management API username. Default: %(default)s')
parser.add_argument(
'-p', '--password', default='guest',
help='The RabbitMQ Management API password. Default: %(default)s')
parser.add_argument(
'-f', '--from-file', type=argparse.FileType('r'),
help='Read definitions from a file instead of the Management API')
parser.add_argument(
'file', default=sys.stdout, type=argparse.FileType('w'), nargs='?',
metavar='DESTINATION',
help='Location to write the definitions to. Default: STDOUT')
return parser.parse_args(cli_args)
def write_definitions(handle, definitions):
"""Write the sorted definitions
:param file handle: The open file handle
:param dict definitions: The parsed definitions from RabbitMQ
"""
value = json.dumps(
nested_sort(definitions), sort_keys=True, indent=2)
handle.write('{}\n'.format(value))
def main():
"""Application Entrypoint"""
args = parse_cli_arguments()
try:
if args.from_file:
definitions = json.load(args.from_file)
else:
definitions = get_definitions(args)
except (RequestException, ValueError) as error:
sys.stderr.write('ERROR: {}\n'.format(error))
sys.exit(1)
with args.file as handle:
write_definitions(handle, definitions)
class RequestException(Exception):
"""Raised when we can not download the definitions from RabbitMQ"""
if __name__ == '__main__': # pragma: nocover
main() | /rmq-definitions-1.1.0.tar.gz/rmq-definitions-1.1.0/rmq_definitions.py | 0.41253 | 0.171651 | rmq_definitions.py | pypi |
from websocketdatamanager.rmq_engine import RMQEngine
from tasktools.taskloop import TaskLoop
import asyncio
class ReadMQBroker:
"""
This class pretends to create a knut in what receive data from RMQ queues and send directly to the objects form dj-collector on the database
"""
def __init__(self, queue_set, engine_opts, step=0.1, *args, **kwargs):
self.step = step
self.queue_set = queue_set
self.engine_opts = engine_opts
self.engine = False
async def cycle_read_from_rmq(self, *args, **kwargs):
engine = args[0]
v = args[1]
channels = args[2]
if v == 0:
aux_set = set()
for channel in channels:
rmq = engine.get(channel)
try:
rmq.connect()
except Exception as e:
print(f'''Error on connection to dj es -> channel {channel},
error {e}''')
v = 0
return [engine, v, channels], {"error": "No se puedo conectar"}
aux_set.add(channel)
for channel in aux_set:
channels.remove(channel)
aux_set = None
v += 1
await asyncio.sleep(self.step)
return [engine, v, channels], {}
else:
for channel, rmq in engine.items():
try:
queue = self.queue_set.get(channel)
# breakpoint()
queue, active = await rmq.amqp.consume_exchange_mq(
queue, True)
except Exception as e:
print("Error en cycle send to dj es->%s" % e)
v = 0
channels.add(channel)
await asyncio.sleep(self.step)
return [engine, v, channels], {}
def task_cycle(self):
loop = asyncio.get_event_loop()
tasks_list = []
"""
RMQ Task
"""
rmq_engine = {channel: RMQEngine(**value_dict)
for channel, value_dict in self.engine_opts.items()}
{rmq.active_queue_switch() for rmq in rmq_engine.values()}
# se crea el conjunto de canales existentes
channels = set(self.engine_opts.keys())
# mq 2 ws
# la args de rmq
# se activa ciclo de lectura rmq
cycle_rmq_args = [rmq_engine, 0, channels]
task = TaskLoop(self.cycle_read_from_rmq,
cycle_rmq_args, {},
**{"name": "read_from_rabbitmq"})
task.create()
if not loop.is_running():
loop.run_forever() | /rmq_engine-0.0.5.tar.gz/rmq_engine-0.0.5/rmq_engine/rabbitmq.py | 0.534855 | 0.251191 | rabbitmq.py | pypi |
import pika
import pika.spec
import traceback
import sys
def consumer_function(function):
"""
:param function: Message processing function to wrap. Should take only body and properties parameters.
"""
def process(channel, method, header, body):
properties = header
result = function(body, properties)
channel.basic_ack(delivery_tag=method.delivery_tag)
if result is not None:
reply_to = properties.reply_to
channel.basic_publish(exchange='', routing_key=reply_to, body=result)
return process
def class_consumer(function):
"""
:param function: Message processing clsass method to wrap. Should take only self, body and properties parameters.
"""
def process(self, channel, method, header, body):
properties = header
result = function(self, body, properties)
channel.basic_ack(delivery_tag=method.delivery_tag)
if result is not None:
reply_to = properties.reply_to
channel.basic_publish(exchange='', routing_key=reply_to, body=result)
return process
def noexcept(function):
def process(*args, **kwargs):
try:
return_value = function(*args, **kwargs)
return return_value
except:
print(traceback.format_exc(), file=sys.stderr)
args[0].connect()
return process
class RabbitMQInterface:
def __init__(self, user=None, password=None, host='localhost', port=5672, url_parameters=None):
"""
:param user: username for RabbitMQ
:param password: password for RabbitMQ
:param host: RabbitMQ host. Default - localhost.
:param port: RabbitMQ port. Default - 5672.
:param url_parameters: (Optional) RabbitMQ credentials in a single line. Example: 'amqp://guest:guest@localhost:5672'
"""
if url_parameters is None and user is None:
raise Exception('Either url parameters or user and password must be set')
self.user = user
self.password = password
self.url_parameters = url_parameters
self.host = host
self.port = port
self.connect()
def connect(self):
"""
Connects to RabbitMQ, using the credentials specified in init.
"""
self.connection, self.channel = self.__connect(self.user, self.password, self.host, self.port,
self.url_parameters)
@staticmethod
def __connect(user, passw, host, port, url_parameters):
if url_parameters is not None:
parameters = pika.URLParameters(url_parameters)
else:
credentials = pika.PlainCredentials(user, passw)
parameters = pika.ConnectionParameters(host, port, credentials=credentials, heartbeat=0)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
return connection, channel
@noexcept
def create_queue(self, name=None, exchnage_to_bind=None, binding_routing_key=''):
if name is not None:
result = self.channel.queue_declare(name)
else:
result = self.channel.queue_declare('', auto_delete=True)
queue_name = result.method.queue
if exchnage_to_bind is not None:
self.channel.queue_bind(exchange=exchnage_to_bind, queue=queue_name, routing_key=binding_routing_key)
return queue_name
@noexcept
def publish(self, routing_key, body, exchange='amq.topic', reply_to=None):
"""
Publishes message to queue without waiting for response.
:param routing_key: Queue to publish name
:param body: Message text
:param exchange: (Optional) Exchange to publish message. Default - 'amq.topic'
:param reply_to: (Optional) Queue to response name
"""
properties = pika.spec.BasicProperties(reply_to=reply_to)
self.channel.basic_publish(exchange=exchange, routing_key=routing_key, body=body, properties=properties)
@noexcept
def fetch(self, routing_key, body, exchange='amq.topic'):
"""
Publishes message and waits for response. To convey the response creates anonymous queue.
:param routing_key: Queue to publish name
:param body: Message text
:param exchange: (Optional) Exchange to publish message. Default - 'amq.topic'
"""
reply_to = self.create_queue()
self.publish(exchange=exchange, routing_key=routing_key, body=body, reply_to=reply_to)
for method_frame, properties, body in self.channel.consume(reply_to):
self.channel.cancel()
return body
@noexcept
def listen(self, queue, func):
"""
Listens for all messages in specified queue.
:param queue: Queue to listen
:param func: Function processing messages. Must be decorated with either @rmq_interface.consumer_function
or @rmq.interface.class_consumer
"""
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(queue, func)
self.channel.start_consuming() | /rmq_interface-1.1-py3-none-any.whl/rmq_interface/rmq_interface.py | 0.608012 | 0.178401 | rmq_interface.py | pypi |
# Rectangular Micro QR Code (rMQR Code) Generator

The rMQR Code is a rectangular two-dimensional barcode. This is easy to print in narrow space compared to conventional QR Code. This package can generate an rMQR Code image. This is implemented based on [ISO/IEC 23941: Rectangular Micro QR Code (rMQR) bar code symbology specification](https://www.iso.org/standard/77404.html).
[](https://github.com/OUDON/rmqrcode-python/actions/workflows/python-app.yml)



## 🎮 Online Demo Site
You can try this online: https://rmqr.oudon.xyz .
## 📌 Notice
- Please verify an image generated by this software whether it can decode correctly before use.
- Because this is in early stage, QR Code readers may have not been supported rMQR Code yet.
## 🚀 Installation
```
pip install rmqrcode
```
## 📕 Basic Usage
### CLI
Generate an rMQR Code image from your command line, use `rmqr` command:
```sh
rmqr 'Text data' 'my_qr.png'
```
See the help to list the options:
```sh
➜ rmqr -h
usage: rmqr [-h] [--ecc {M,H}] [--version VERSION] [--fit-strategy {min_width,min_height,balanced}]
DATA OUTPUT
positional arguments:
DATA Data to encode.
OUTPUT Output file path
optional arguments:
-h, --help show this help message and exit
--ecc {M,H} Error correction level. (default: M)
--version VERSION rMQR Code version like 'R11x139'.
--fit-strategy {min_width,min_height,balanced}
Strategy how to determine rMQR Code size.
```
### Generate rMQR Code in scripts
Alternatively, you can also use in python scripts:
```py
from rmqrcode import rMQR
import rmqrcode
data = "https://oudon.xyz"
qr = rMQR.fit(
data,
ecc=rmqrcode.ErrorCorrectionLevel.M,
fit_strategy=rmqrcode.FitStrategy.MINIMIZE_WIDTH
)
```
The `ecc` parameter is an enum value of `rmqrcode.ErrorCorrectionLevel` to select error correction level. The following values are available:
- **`ErrorCorrectionLevel.M`**: Approx. 15% Recovery Capacity.
- **`ErrorCorrectionLevel.H`**: Approx. 30% Recovery Capacity.
The `fit_strategy` parameter is enum value of `rmqrcode.FitStrategy` to specify how to determine size of rMQR Code. The following values are available:
- **`FitStrategy.MINIMIZE_WIDTH`**: Try to minimize width.
- **`FitStrategy.MINIMIZE_HEIGHT`**: Try to minimize height.
- **`FitStrategy.BALANCED`**: Try to keep balance of width and height.
Here is an example of images generated by each fit strategies for data `Test test test`:

### Save as image
```py
from rmqrcode import QRImage
image = QRImage(qr, module_size=8)
image.show()
image.save("my_qr.png")
```
## 📙 Advanced Usage
### Select rMQR Code size manually
To select rMQR Code size manually, use `rMQR()` constructor.
```py
from rmqrcode import rMQR, ErrorCorrectionLevel
qr = rMQR('R11x139', ErrorCorrectionLevel.H)
```
`R11x139` means 11 rows and 139 columns. The following table shows available combinations.
| |27|43|59|77|99|139|
|-|:-:|:-:|:-:|:-:|:-:|:-:|
|R7|❌|✅|✅|✅|✅|✅|
|R9|❌|✅|✅|✅|✅|✅|
|R11|✅|✅|✅|✅|✅|✅|
|R13|✅|✅|✅|✅|✅|✅|
|R15|❌|✅|✅|✅|✅|✅|
|R17|❌|✅|✅|✅|✅|✅|
### Encoding Modes and Segments
The rMQR Code has the four encoding modes Numeric, Alphanumeric, Byte and Kanji to convert data efficiently. We can select encoding mode for each data segment separately.
The following example shows how to encode data "123Abc". The first segment is for "123" in the Numeric mode. The second segment is for "Abc" in the Byte mode.
We can select an encoding mode by passing the `encoder_class` argument to the `rMQR#add_segment` method. In this example, the length of bits after encoding is 47 in the case combined with the Numeric mode and the Byte mode, which is shorter than 56 in the Byte mode only.
```py
from rmqrcode import rMQR, ErrorCorrectionLevel, encoder
qr = rMQR('R7x43', ErrorCorrectionLevel.M)
qr.add_segment("123", encoder_class=encoder.NumericEncoder)
qr.add_segment("Abc", encoder_class=encoder.ByteEncoder)
qr.make()
```
The value for `encoder_class` is listed in the below table.
|Mode|Value of encoder_class|Characters|
|-|-|-|
|Numeric|NumericEncoder|0-9|
|Alphanumeric|AlphanumericEncoder|0-9 A-Z \s $ % * + - . / :|
|Byte|ByteEncoder|Any|
|Kanji|KanjiEncoder|from 0x8140 to 0x9FFC, from 0xE040 to 0xEBBF in Shift JIS value|
### Optimal Segmentation
The `rMQR.fit` method mentioned above computes the optimal segmentation.
For example, the data "123Abc" is divided into the following two segments.
|Segment No.|Data|Encoding Mode|
|-|-|-|
|Segment1|123|Numeric|
|Segment2|Abc|Byte|
In the case of other segmentation like "123A bc", the length of the bit string after
encoding will be longer than the above optimal case.
## 🤝 Contributing
Any suggestions are welcome! If you are interesting in contributing, please read [CONTRIBUTING](https://github.com/OUDON/rmqrcode-python/blob/develop/CONTRIBUTING.md).
## 📚 References
- [Rectangular Micro QR Code (rMQR) bar code symbology specification: ISO/IEC 23941](https://www.iso.org/standard/77404.html)
- [rMQR Code | QRcode.com | DENSO WAVE](https://www.qrcode.com/en/codes/rmqr.html)
- [Creating a QR Code step by step](https://www.nayuki.io/page/creating-a-qr-code-step-by-step)
----
The word "QR Code" is registered trademark of DENSO WAVE Incorporated.<br>
http://www.denso-wave.com/qrcode/faqpatent-e.html
| /rmqrcode-0.3.0.tar.gz/rmqrcode-0.3.0/README.md | 0.650134 | 0.864768 | README.md | pypi |
import warnings
from django.contrib.postgres import fields
from django.db import models
class DateRangeField(fields.DateRangeField):
def __init__(self, *args, **kwargs):
warnings.warn(
'DateRangeField is deprecated and will be removed in '
'rmr-django 2.0, use '
'django.contrib.postgres.fields.DateRangeField instead',
DeprecationWarning,
)
super().__init__(*args, **kwargs)
class DateTimeRangeField(fields.DateTimeRangeField):
def __init__(self, *args, **kwargs):
warnings.warn(
'DateTimeRangeField is deprecated and will be removed in '
'rmr-django 2.0, use '
'django.contrib.postgres.fields.DateTimeRangeField instead',
DeprecationWarning,
)
super().__init__(*args, **kwargs)
class FloatRangeField(fields.FloatRangeField):
def __init__(self, *args, **kwargs):
warnings.warn(
'FloatRangeField is deprecated and will be removed in '
'rmr-django 2.0, use '
'django.contrib.postgres.fields.FloatRangeField instead',
DeprecationWarning,
)
super().__init__(*args, **kwargs)
class BigIntegerRangeField(fields.BigIntegerRangeField):
def __init__(self, *args, **kwargs):
warnings.warn(
'BigIntegerRangeField is deprecated and will be removed in '
'rmr-django 2.0, use '
'django.contrib.postgres.fields.BigIntegerRangeField instead',
DeprecationWarning,
)
super().__init__(*args, **kwargs)
class IntegerRangeField(fields.IntegerRangeField):
def __init__(self, *args, **kwargs):
warnings.warn(
'IntegerRangeField is deprecated and will be removed in '
'rmr-django 2.0, use '
'django.contrib.postgres.fields.IntegerRangeField instead',
DeprecationWarning,
)
super().__init__(*args, **kwargs)
class Lower(models.Transform):
lookup_name = 'lower'
def as_sql(self, compiler, connection, *args, **kwargs):
lhs, params = compiler.compile(self.lhs)
return 'lower({lhs})'.format(lhs=lhs), params
class Upper(models.Transform):
lookup_name = 'upper'
def as_sql(self, compiler, connection, *args, **kwargs):
lhs, params = compiler.compile(self.lhs)
return 'upper({lhs})'.format(lhs=lhs), params
@DateRangeField.register_lookup
class DateRangeLowerTransform(Lower):
output_field = models.DateField()
@DateRangeField.register_lookup
class DateRangeUpperTransform(Upper):
output_field = models.DateField()
@DateTimeRangeField.register_lookup
class DateRangeLowerTransform(Lower):
output_field = models.DateTimeField()
@DateTimeRangeField.register_lookup
class DateRangeUpperTransform(Upper):
output_field = models.DateTimeField()
@FloatRangeField.register_lookup
class DateRangeLowerTransform(Lower):
output_field = models.FloatField()
@FloatRangeField.register_lookup
class DateRangeUpperTransform(Upper):
output_field = models.FloatField()
@BigIntegerRangeField.register_lookup
class DateRangeLowerTransform(Lower):
output_field = models.BigIntegerField()
@BigIntegerRangeField.register_lookup
class DateRangeUpperTransform(Upper):
output_field = models.BigIntegerField()
@IntegerRangeField.register_lookup
class DateRangeLowerTransform(Lower):
output_field = models.IntegerField()
@IntegerRangeField.register_lookup
class DateRangeUpperTransform(Upper):
output_field = models.IntegerField() | /rmr-django-1.1.5.tar.gz/rmr-django-1.1.5/rmr/models/fields/range.py | 0.677581 | 0.249556 | range.py | pypi |
def read_command_line(objectstring='requested'):
from argparse import ArgumentParser as AP
parser = AP()
parser.add_argument('files', nargs="*",
help="optional list of directories containing rmt\
calculations",
default=["."])
parser.add_argument('-x', '--x', help='transform the x component of the dipole',
action='store_true', default=False)
parser.add_argument('-y', '--y', help='transform the y component of the dipole',
action='store_true', default=False)
parser.add_argument('-z', '--z', help='transform the z component of the dipole',
action='store_true', default=False)
parser.add_argument('-p', '--plot', help=f"show a plot of {objectstring} spectrum",
action='store_true', default=False)
parser.add_argument('-o', '--output', action='store_true',
help=f'output the {objectstring} spectra to text files',
default=False)
parser.add_argument('--pad_factor',
help='number of zeros to pad data with during calculations\
involving Fourier transforms i.e. pad signal up to length 2^n)',
type=int, default=8)
parser.add_argument('-s', '--solutions_list',
help='list of solutions on which to operate, defaults to all solutions', default=None)
parser.add_argument('-u', '--units',
help='units to use on the x-axis, "eV" or "au"', default="eV")
return parser
def get_command_line(objectstring='requested'):
parser = read_command_line(objectstring)
args = parser.parse_args()
# If no dimension specified, operate on z.
if not (args.x or args.y or args.z):
args.z = True
# If no output/plot is specified, output to file
if not args.plot and not args.output:
args.output = True
return args | /rmt_utilities-1.0-py3-none-any.whl/rmt_utilities/dipole_cli.py | 0.750553 | 0.218024 | dipole_cli.py | pypi |
from rmt_utilities.dataobjects import DataFile
from rmt_utilities.atomicunits import eV, c
from pathlib import Path
from itertools import zip_longest
import numpy as np
class RMTCalc:
"""
Primary data structure: holds all metadata for a given rmt calculation
and provides methods ``.HHG()`` and ``.ATAS()`` for computing high harmonic spectra
and attosecond transient absorption spectra respectivey.
Parameters
-----------
path : path or str
path to the rmt calculation directory (default = ".")
target : str
name of atomic/molecular target for calculation (default = None)
description : str
description of the rmt calculation (default = "RMT calculation")
template : path or str
path to the template directory containing rmt input to use for setup
rmtexec : path or str
path to the rmt executable to be used
Attributes
----------
path : path
path to root directory of caculation
conffile : path
path to input.conf file used to drive RMT calculation
config : dict
dictionary containing input variables read from conffile
datalist : list
list of all files in the /data/ directory
statelist : list
list of all files in the /state/ directory
expec_z : DataFile
time-dependent dipole length
expec_v : DataFile
time-dependent dipole velocity
field : DataFile
time-depedent electric field
length : Observable
Harmonic spectrum computed from the dipole length
velocity: Observable
Harmonic spectrum computed from the dipole velocity
"""
def __init__(self, path=".", target=None, description="RMT calculation",
template=None, rmtexec=None):
self.path = Path(path).absolute()
if template:
self._buildFromTemplate(template, rmtexec)
self.target = target
self.description = description
conffile = self.path / "input.conf"
if conffile.is_file():
self.conffile = conffile
self.config = self._getConfig()
self._suffix = self._getSuffix()
else:
self.conffile, self.config, self._suffix = None, {}, None
self._buildFileLists()
self.expec_z = None
self.expec_v = None
self.field = None
def _buildFromTemplate(self, template, rmtexec):
"""Set up an RMT calculation with the necessary input files, executable
and directories. The input is linked from the template directory, the
executable linked from the provided rmtexec
Parameters
==========
template : str or pathlib.Path
path to template directory containing input files
rmtexec : str or pathlib.Path
path to rmt executable
"""
import os
templatepath = Path(template).absolute()
if not self.path.is_dir():
os.mkdir(self.path)
filestolink = list(templatepath.glob('*'))
noexecutable = True
if rmtexec:
rmtexec = Path(rmtexec).absolute()
if os.access(rmtexec, os.X_OK):
if templatepath/'rmt.x' in filestolink:
filestolink.remove(templatepath/'rmt.x')
filestolink.append(rmtexec)
noexecutable = False
else:
if templatepath/'rmt.x' in filestolink:
if os.access(templatepath/'rmt.x', os.X_OK):
noexecutable = False
if noexecutable:
print("no suitable rmt executable provided, templated calculation may be incomplete")
for file in filestolink:
if file.is_file():
dest = self.path/file.parts[-1]
dest.unlink(missing_ok=True)
os.symlink(file, dest)
filestomake = [self.path/f for f in ['ground', 'state', 'data']]
for dest in filestomake:
if dest.exists():
if not dest.is_dir():
dest.unlink()
os.mkdir(dest)
else:
os.mkdir(dest)
return not noexecutable
def _buildFileLists(self):
"""populate the rootfiles, datalist and statelist attributes with lists
of output files from the RMT calculation"""
self.rootfiles = list(self.path.glob("pop*"))
self.rootfiles += list(self.path.glob("expec*"))
self.rootfiles += list(self.path.glob("EField*"))
self.rootfiles.sort()
if (self.path / "data").is_dir():
self.datalist = list(self.path.glob("data/p*"))
self.datalist.sort()
else:
self.datalist = []
if (self.path / "state").is_dir():
self.statelist = list(self.path.glob("state/p*"))
self.statelist.sort()
else:
self.statelist = []
return
def __eq__(self, other):
for fA, fB in zip_longest(self.rootfiles, other.rootfiles):
try:
fileA = DataFile(fA)
fileB = DataFile(fB)
if (fileA != fileB):
return False
except Exception:
return False
return True
def __ne__(self, other):
return (not self == other)
def _samefiles(self, other):
for fA, fB in zip_longest(self.rootfiles + self.datalist, other.rootfiles + other.datalist):
try:
assert fA.parts[-1] == fB.parts[-1]
except AttributeError:
raise FileNotFoundError(f"Mismatch in files: {fA}, vs. {fB}")
except AssertionError:
raise IOError(f"Mismatch in files: {fA}, vs. {fB}")
def agreesWith(self, other, tolerance=9):
"""
Compare two RMT calculations to each other to ensure the computed data
agrees to within `tolerance` significant figures. A.agreesWith(B,
tolerance=9) will check that all pop and expec files in the root
directory, and all pop files in the data directory agree to within 9
decimal places. Information on the extent of agreement for each file
is returned as part of the regression report.
Parameters
==========
other : RMTCalc
the other RMTCalc object against which to compare this one
tolerance : int (optional)
number of decimal places to which to enforce agreement
Returns
=======
regrep : regression_report
information on the agreement between the output files in the calculation
"""
from rmt_utilities.regress import regress_report
self._samefiles(other)
passList = []
failList = []
for fA, fB in zip_longest(self.rootfiles + self.datalist, other.rootfiles + other.datalist):
try:
fileA = DataFile(fA)
fileB = DataFile(fB)
fail = False
for dp in range(min([2 * tolerance, 16])):
fileA.tolerance = dp
if fileA != fileB:
if dp <= tolerance:
failList.append((fA.parts[-1], dp-1))
fail = True
break
if not fail:
passList.append((fA.parts[-1], dp-1))
except Exception:
raise IOError
return regress_report(failList, passList, self.path)
def _getConfig(self):
import f90nml
config = f90nml.read(self.conffile)["inputdata"]
for item in config:
f = config[item]
config[item] = f
try:
num_out_pts = config['x_last_master'] + \
config['x_last_others'] * (config['no_of_pes_to_use_outer'] - 1)
config['num_out_pts'] = num_out_pts
except KeyError:
pass
return config
def _getSuffix(self):
f = self.config["intensity"]
if hasattr(f, "__len__"):
inten = f[0]
else:
inten = f
return self.config["version_root"] + str(int(1000 * (inten))).zfill(8)
def _initExpecFiles(self, sols):
varnames = ['expec_v', 'expec_z', 'field']
if (self._suffix):
filnames = [self.path / (f + self._suffix)
for f in ["expec_v_all.", "expec_z_all.", "EField."]]
else:
filnames = []
for f in ["expec_v_all", "expec_z_all", "EField"]:
allfiles = [x for x in self.rootfiles if f in x.stem]
if allfiles:
filnames.append(allfiles[0])
else:
filnames.append(None)
for var, fil in zip(varnames, filnames):
if fil in self.rootfiles:
setattr(self, var, DataFile(fil, sols=sols))
def _FFT(self, data, cutoff, pad):
"""Fourier transform and remove high energies"""
df = data.FFT(pad=pad)
lastindex = (min(df[df["Freq"] > cutoff].index.tolist()))
df = df.truncate(after=lastindex - 1)
return (df)
def HHG(self, sols=None, cutoff=200 * eV, pad=1, phase=False):
"""Compute the high harmonic spectrum given by [a(w)]**2 where a is
the Fourier transform of the dipole acceleration. The dipole data is
read from the expec_z and expec_v files and the harmonic spectra in both
length and velocity form is returned.
Parameters
----------
sols : list of str, optional
list of which solutions (column headings) should be selected from
the dipole files for processing
cutoff : float, optional
highest energy retained in the HHG spectra in atomic units. Default
is 200eV (7.34 a.u)
pad : int, optional
pad factor used to improve resolution in Fourier Transform.
Increases the length of the signal by factor ``pad`` and then rounds
up to the nearest power of two. Default is 1.
phase: bool, optional
if True compute the harmonic phase, rather than the amplitude.
Returns
-------
length : DataFrame holding the High Harmonic Spectrum computed from the
dipole length
velocity : DataFrame holding the High Harmonic Spectrum computed from the
dipole length
Each DataFrame has a ``Freq`` column, containing the frequency axis, and
then amplitudes (or phases) in columns matching those in the source data files
(expec_z_all.<> and expec_v_all.<>) or a subset as selected with the
``sols`` parameter.
"""
self._initExpecFiles(sols)
if not (getattr(self, "expec_z") or getattr(self, "expec_v")):
print(f"Failed to read expec_z or expec_v file from {self.path}")
return None, None
for name, key in zip(["length", "velocity"], ["expec_z", "expec_v"]):
if getattr(self, key):
data = getattr(self, key)
df = self._FFT(data, cutoff=cutoff, pad=pad)
for col in df.columns[1:]:
if phase:
Phase = np.angle(data.phasefactor * df[col])
df[col] = Phase
else:
amplitude = np.real(np.abs(df[col])**2)
amplitude = amplitude * df["Freq"]**data.scalefactor
df[col] = amplitude
df.root = self.path
setattr(self, name, df)
else:
setattr(self, name, None)
return self.length, self.velocity
def ATAS(self, sols=None, cutoff=200 * eV, pad=1):
"""Compute the transient absorption spectrum. which is proportional to
the imaginary part of d(w)/E(w) where d(w) and E(w) are the Fourier
transformed dipole and electric field data respectively. Data is read
from the expec_z_all.<> and EField.<> files, and the absorption spectrum
computed for each solution therein.
Returns
-------
Observable
DataFrame containing column "Freq" holding the frequencies, and then one
column for each corresponding solution the expec_z_all file, giving
the optical density as a function of frequency.
"""
self._initExpecFiles(sols)
if not (getattr(self, "expec_z") and getattr(self, "field")):
print(f"Failed to read expec_z and field files from {self.path}")
return None
else:
ddat = self._FFT(self.expec_z, cutoff=80 * eV, pad=8)
Edat = self._FFT(self.field, cutoff=80 * eV, pad=8)
for col in ddat.columns[1:]:
rat = np.imag(ddat[col] / Edat[col])
ddat[col] = 4 * np.pi * ddat["Freq"] * rat / c
setattr(self, "TAS", ddat)
self.TAS.root = self.path
return (self.TAS)
def _effective_cycles(self, sol_id=1, nphotons=1):
"""Calculate the total number of effective peak cycles by scaling the
ramp cycles (assuming a sin^2 field)."""
ramp = self._select_params(sol_id, 'periods_of_ramp_on')
peak = self._select_params(sol_id, 'periods_of_pulse')
peak -= 2*ramp
effective_ratio = {1: 0.3750, 2: 0.27343750}
eff_rat = effective_ratio[nphotons]
return peak + 2.0 * ramp * eff_rat
def _select_params(self, sol=1, param="intensity"):
"""Select a particular value for a given parameter from the calculation
configuration. For calculations with multiple solutions (i.e. several
different field configs in the same run), we can select the value for a
particular solution from the list of values read from the config file
Parameters
==========
sol : int
solution id
param : str
dictionary key for the parameter of interest
Returns
=======
pvalue : int or float
selected value from self.config[param]
"""
pvalue = self.config[param]
if hasattr(pvalue, '__len__'):
pvalue = pvalue[sol-1]
return pvalue
def cross_sec(self, channel_list, sols=None, nphotons=1):
""" Read the final populations for specific ionisation channels, and
compute using information from the conffile the ionisation cross
section.
Parameters
==========
channel_list : list of ints or str
list of which channels should be included in the calculation
sols : list of ints or str (optional)
which RMTcalc solutions (field configurations) for which to compute
the cross section
nphotons : int
number of photons for the ionisation pathway under investigation
Returns
=======
cs : dict
dictionary with a key for each RMTcalc solution and the computed
cross section for that solution
"""
from rmt_utilities.dataobjects import popdata
if Path(f"{self.path}/data/popchn.{self._suffix}").absolute() in self.datalist:
pop_path = self.path/"data"/f"popchn.{self._suffix}"
else:
pop_path = self.path/"data"
allpops = popdata(pop_path)
channel_list = [str(x).zfill(allpops.chandigits) for x in channel_list]
if not sols:
sols = [x for x in range(1, allpops.numsol+1)]
else:
sols = [int(x) for x in sols]
solstrs = [str(x).zfill(4) for x in sols]
cs = {}
for sol, solstr in zip(sols, solstrs):
intensity = self._select_params(sol, 'intensity')
frequency = self._select_params(sol, 'frequency')
cycles = self._effective_cycles(sol, nphotons=nphotons)
cs[solstr] = allpops.cross_sec(solstr, channel_list, frequency,
intensity, cycles, nphotons)
return cs
def execute(self, mpirun, rmtexec="./rmt.x", logfile="./log.out",
taskopt="-n", mpiopts=None):
"""execute the RMT calculation using the provided input
Parameters
==========
mpirun : str or path
path to mpirun executable
rmtexec : str or path
path to rmt executable, default ./rmt.x
logfile : str or path
path to logfile for stdout from RMT, default ./log.out
taskopt : str
the command line option/flag for specifying the number of mpi tasks
e.g. for mpirun taskopt = "-n", for srun, taskopt = "--ntasks"
mpiopts : str
any additional options to be passed to mpirun
Returns
=======
Success : bool
True if calculation executed successfully
"""
from subprocess import run, PIPE
from os import environ
mpiTasks = self.config["no_of_pes_to_use_inner"] \
+ self.config["no_of_pes_to_use_outer"]
mpiTasks = str(mpiTasks)
environ["disk_path"] = str(self.path)+"/"
success = False
cmd = [mpirun, taskopt, mpiTasks]
if mpiopts:
for opt in mpiopts.split(" "):
cmd.append(opt)
cmd.append(str(rmtexec))
try:
rmtrun = run(cmd, stdout=PIPE, universal_newlines=True)
success = (rmtrun.returncode == 0)
except Exception:
print("RMT execution Failed")
if success:
with open(self.path/logfile, 'w') as f:
for line in rmtrun.stdout:
f.write(line)
self._buildFileLists()
return success
# def _attachMetaData(self, df):
# """use the configuration file to associate calculation parameters with
# specific columns in an output data structure"""
# attlist = ["intensity",
# "frequency"] # expand later with more attributes
# for col in df.columns[1:]:
# transforms "0001_z" into 0 for instance
# attr_index = int(col[:4]) - 1
# for att in attlist:
# f = self.config[att]
# f = f if isinstance(f, list) else [f]
# setattr(df[col], att, f[attr_index]) | /rmt_utilities-1.0-py3-none-any.whl/rmt_utilities/rmtutil.py | 0.804444 | 0.486636 | rmtutil.py | pypi |
def read_command_line(objectstring='requested distribution'):
from argparse import ArgumentParser as AP
from argparse import FileType
parser = AP(description=f"Plot the {objectstring} from the RMT-\
produced files. Note that if an input.conf file cannot be found in either the default \
directory (../) or the directory supplied by the -d option then the {objectstring} \
cannot be computed")
parser.add_argument('file', type=FileType('r'),
help='OuterWave_momentum/density file generated by reform')
parser.add_argument('-d', '--dir', help='RMT calculation root directory',
default="../")
parser.add_argument('-o', '--output', type=str,
help=f'Output the {objectstring} to named file',
default=None)
parser.add_argument('--plot', '-p', action='store_true', help='display plot')
parser.add_argument('--normalise_bar', '-nb', action='store_true', help='normalise the colour bar scale')
parser.add_argument('--log_scale', '-l', action='store_true', help='plot on a log (base 10) scale')
parser.add_argument('--rmax', type=float, help='Set maximum radial value for plot',
default=None)
return parser
def momentum_command_line():
parser = read_command_line('momentum distribution')
parser.add_argument('--rskip', '-r', type=int,
help='(int) size of rskip to use in a.u.', default=200)
return parser
def density_command_line():
parser = read_command_line('electron density')
parser.add_argument('--rmatr', '-r', type=float,
help='(float) size of rmatrix boundary in a.u.',
default=20.0)
return parser
def select_command_line(objectstring):
"""set appropriate switches for each choice of objectstring"""
if objectstring == 'momentum distribution':
parser = momentum_command_line()
elif objectstring == 'electron density':
parser = density_command_line()
else:
parser = read_command_line(objectstring)
return (parser)
def set_default_output_file(args, objectstring):
"""If no output/plot is specified, output to file"""
if not args.plot and not args.output:
args.output = f"{objectstring.replace(' ','_')}.png"
return args
def get_command_line(objectstring='requested distribution'):
args = select_command_line(objectstring).parse_args()
return set_default_output_file(args) | /rmt_utilities-1.0-py3-none-any.whl/rmt_utilities/reform_cli.py | 0.782288 | 0.187188 | reform_cli.py | pypi |
from rmt_utilities.rmtutil import RMTCalc
from pathlib import Path
class regress_report:
"""report on the agreement between two rmt calculations file by file"""
def __init__(self, failList=[], passList=[], location=None):
"""
Parameters
----------
failList : list of tuples
information on failed on pairs of files which do not agree to the
necessary level of tolerance
(filename, number of decimal places to which the two files agree)
passList : list of tuples
information on failed on pairs of files which agree to the necessary
level of tolerance
(filename, number of decimal places to which the two files agree)
location : path or str
location(directory) of the RMT calculation
"""
self.fails = failList
self.passes = passList
self.dir = location
def __bool__(self):
return len(self.fails) == 0
def __str__(self):
"""print the report"""
return self.compileReport()
def compileCalcs(self, listOfCalcs, agreeString):
"""expand tuples into a report about the agreement between pairs of
files in two RMT calculations
Parameters
----------
listOfCalcs : list of tuples
information on comparison of pairs of files
(filename, number of decimal places to which the two files agree)
agreeString : str
either "Failing" or "Passing"
"""
msg = [f"RMT calculation in: {str(self.dir)}", f"{agreeString} comparisons:", "=========="]
for fname, dp in listOfCalcs:
msg.append(f"{fname} agrees to {dp} decimal places")
msg.append("")
return msg
def compileReport(self):
"""put together report on failing and passing file comparisons"""
msg = []
if (self.fails):
msg += self.compileCalcs(self.fails, "Failing")
if (self.passes):
msg += self.compileCalcs(self.passes, "Passing")
return "\n".join(msg)
class regressError(Exception):
"""Exception raised when two RMT calculations do not agree to the desired
level of tolerance"""
def __init__(self, report):
msg = report.compileCalcs(report.fails, "Failing")
self.message = "\n".join(msg)
super().__init__(self.message)
class executionError(Exception):
"""Exception raised when an RMT calculation does not execute as expected"""
def __init__(self, directory):
self.message = f"""RMT calculation in {directory} failed to execute as
expected"""
super().__init__(self.message)
class mpirunError(Exception):
"""Exception raised if mpirun is not available on the host system"""
def __init__(self):
self.message = """RMT regression testing requires a working mpi \
installation. Please provide the path to mpirun when setting up the tests"""
super().__init__(self.message)
class testcase():
"""
Individual RMT regression test calculation
Attributes
----------
testdir : pathlib.Path
root directory for RMT test calculation
The directory structure should be
testdir
├── inputs # directory containing the input files
├── regress_run # directory containing RMT output for comparison
exec : str or pathlib.Path
rmt executable
mpiRun : str or pathlib.Path
mpirun executable
taskopt : str
the command line option/flag for specifying the number of mpi tasks
e.g. for mpirun taskopt = "-n", for srun, taskopt = "--ntasks"
mpiopts : str
any additional options to be passed to mpirun
"""
def __init__(self, testdir, rmtexec, mpiRun, taskopt="-n", mpiopts=None):
import os
import errno
if Path(testdir).is_dir():
self.testdir = Path(testdir)
else:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), Path(testdir))
if os.access(rmtexec, os.X_OK):
self.exec = rmtexec
else:
raise PermissionError(
errno.EPERM, os.strerror(errno.EPERM), Path(rmtexec))
if not mpiRun or not os.access(mpiRun, os.X_OK):
from shutil import which
self.mpirun = which('mpirun')
else:
self.mpirun = mpiRun
if not self.mpirun:
raise mpirunError
for required_directory in ['regress_run', 'inputs']:
if not (self.testdir / required_directory).is_dir():
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT),
self.testdir / required_directory)
self.target_results = RMTCalc(path=self.testdir/'regress_run')
self.template = self.testdir / 'inputs'
self.rundir = self.testdir / f'run_{self._UID()}'
self.taskopt = taskopt
self.mpiopts = mpiopts
return
def _UID(self):
"""generate a unique 10 digit ID for directory naming"""
import random
import string
digits = string.digits
return (''.join(random.choice(digits) for i in range(10)))
def runTest(self, result_dict={}, key=None, tolerance=9):
""" Run an RMT calculation and test against the expected output
Parameters
----------
result_dict : dictionary
dictionary to contain the result of the test (needed for
multiprocessing)
key : str
dictionary key for storing the result of the test. I.E the result of
the test will be stored in result_dict[key]
Returns
-------
Success : bool
whether or not the calculation agrees with the expected output
"""
calc = RMTCalc(path=self.rundir, template=self.template, rmtexec=self.exec)
calcruns = calc.execute(rmtexec=self.exec, mpirun=self.mpirun,
taskopt=self.taskopt, mpiopts=self.mpiopts)
if calcruns:
agreement = calc.agreesWith(self.target_results, tolerance=tolerance)
result_dict[key] = agreement
if not agreement:
raise regressError(agreement)
else:
return agreement
else:
result_dict[key] = "Calculation did not execute correctly"
raise executionError(self.rundir)
def cleanupdir(self):
from subprocess import call
call(["rm", "-rf", self.rundir])
def mvdir(self, newname):
"""mv self.rundir to self.testdir/newname"""
from subprocess import call
if self.rundir.is_dir():
target_dir = self.testdir/newname
if target_dir.exists():
call(["rm", "-rf", target_dir])
call(["mv", self.rundir, target_dir])
self.rundir = self.testdir/newname
class RMT_regression_tests:
"""
Class for building RMT calculations and comparing with expected outputs.
Should be able to read in a yaml file with information on each test case,
build the test directories, execute the calculations and provide as its main
output a True or False statement of whether or not the tests agree within a
specified tolerance. Additionally, could provide a summary
report of the level of agreement in each test calculation.
TODO:
[ ] possible: autogenerate yaml file based on available test calculations?
[ ] re-engineer runTest so that it can be used with Assert as part of pytest
[ ] handle the report differently (it can't be used by pytest) custom Error?
taskopt : str
the command line option/flag for specifying the number of mpi tasks
e.g. for mpirun taskopt = "-n", for srun, taskopt = "--ntasks"
mpiopts : str
any additional options to be passed to mpirun
"""
def __init__(self, testlist, rmtexec=None, mpiRun=None, taskopt="-n",
mpiopts=None, tolerance=9):
import yaml
try:
with open(testlist, 'r') as stream:
tests = (yaml.safe_load(stream))
except FileNotFoundError:
print(f'{testlist} cannot be found')
return
self.tests = []
for test in tests:
self.tests.append(testcase(tests[test], rmtexec, mpiRun, taskopt,
mpiopts))
return
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self.tests):
result = self.tests[self.n]
self.n += 1
return result
else:
raise StopIteration
def runAllTests(self):
"""run every RMT test calculation in self.tests"""
import multiprocessing as mp
allprocs = []
manager = mp.Manager()
results = manager.dict()
for calc in self:
try:
p = mp.Process(target=calc.runTest, args=(results,
calc.testdir))
p.start()
allprocs.append(p)
except Exception:
pass
for p in allprocs:
p.join()
return results
def cleanup(self):
"""clean up (remove) all test directories"""
for calc in self.tests:
calc.cleanupdir() | /rmt_utilities-1.0-py3-none-any.whl/rmt_utilities/regress.py | 0.807499 | 0.516656 | regress.py | pypi |
from typing import Sequence, Tuple, Union
import miniball as mnbl
import numpy as nmpy
from scipy.spatial.distance import pdist as PairwiseDistances
array_t = nmpy.ndarray
def Simplex(
dimension: int,
/,
*,
centered: bool = False,
around: array_t = None,
with_a_margin: float = None,
with_m_margin: float = None,
) -> array_t:
""""""
output = nmpy.empty((dimension + 1, dimension), dtype=nmpy.float64)
# See: https://codegolf.stackexchange.com/questions/152774/create-an-n-dimensional-simplex-tetrahedron
# Answer by PattuX on Jan 8 '18 at 3:54
output[:dimension, :] = nmpy.eye(dimension)
output[dimension, :] = (1.0 + nmpy.sqrt(dimension + 1.0)) / dimension
if around is not None:
if around.shape[1] != dimension:
raise ValueError(f"{around.shape[1]}: Invalid dimension of point set \"around\"; Expected={dimension}")
nmpy.subtract(output, nmpy.mean(output, axis=0, keepdims=True), out=output)
radius = nmpy.mean(nmpy.linalg.norm(output, axis=1))
target_center, target_squared_radius = mnbl.get_bounding_ball(around)
target_radius = nmpy.sqrt(target_squared_radius)
if with_a_margin is not None:
if with_a_margin < 0.0:
raise ValueError(f"{with_a_margin}: Negative additive margin; Expected to be >= 0")
radius_scaling = (with_a_margin + target_radius) / radius
elif with_m_margin is not None:
if with_m_margin < 1.0:
with_m_margin += 1.0
radius_scaling = with_m_margin * target_radius / radius
else:
radius_scaling = target_radius / radius
nmpy.multiply(output, radius_scaling, out=output)
nmpy.add(output, target_center.reshape(1, dimension), out=output)
elif centered:
nmpy.subtract(output, nmpy.mean(output, axis=0, keepdims=True), out=output)
return output
def IsARegularSimplex(
check: array_t,
/,
*,
tolerance: float = nmpy.finfo(nmpy.float).eps,
return_issues: bool = False,
) -> Union[bool, Tuple[bool, Sequence[str]]]:
""""""
valid = True
issues = []
shape = check.shape
if shape[1] != shape[0] - 1:
valid = False
issues.append(
f"{shape}: Invalid vertex matrix dimension; "
f"Expected={shape[0]+1}x{shape[0]} or {shape[1]}x{shape[1]-1}"
)
distances = PairwiseDistances(check)
min_distance = nmpy.min(distances)
max_distance = nmpy.max(distances)
if max_distance - min_distance > tolerance:
valid = False
issues.append(
f"[{min_distance},{max_distance}]: "
f"Interval of pairwise distances larger than tolerance ({tolerance})"
)
if return_issues and not valid:
return False, issues
return valid | /rn_simplex-2021.5-py3-none-any.whl/rn_simplex/simplex.py | 0.931907 | 0.479991 | simplex.py | pypi |
from functools import partial
from multiprocessing import Pool
from pathlib import Path
from typing import Iterator, Tuple, Callable, List, Any
import numpy as np
from pysam import AlignmentFile
from .utils import echo
def chop_contig(size: int, chunksize: int) -> Iterator[Tuple[int, int]]:
"""
For a contig of given size, generate regions maximally chunksize long.
We use _0_ based indexing
"""
if size < 1:
raise ValueError("Size must be at least 1.")
if chunksize < 1:
raise ValueError("Chunksize must be at least 1.")
pos = 0
while pos < size:
end = pos + chunksize
if end < size:
yield (pos, end)
else:
yield (pos, size)
pos = end
def softclip_bases(reader: AlignmentFile, contig: str,
region: Tuple[int, int]) -> int:
"""Calculate amount of softclip bases for a region"""
start, end = region
it = reader.fetch(contig=contig, start=start, stop=end)
s = 0
for read in it:
if read.cigartuples is not None:
# cigartuples returns list of (operation, amount) tuple
# where operation == 4 means softclip
s += sum(amount for op, amount in read.cigartuples if op == 4)
return s
def coverage(reader: AlignmentFile, contig: str, region: Tuple[int, int],
method: Callable = np.mean) -> float:
"""Calculate average/median/etc coverage for a region"""
start, end = region
covs = reader.count_coverage(contig=contig, start=start, stop=end)
return method(np.sum(covs, axis=0))
def process_bam(path: Path, chunksize: int = 100,
contig: str = "chrM") -> np.ndarray:
"""
Process bam file to an ndarray
:returns: numpy ndarray of shape (n_features,)
"""
echo("Calculating features for {0}".format(path.name))
reader = AlignmentFile(str(path))
try:
contig_idx = reader.references.index(contig)
except ValueError:
raise ValueError("Contig {0} does not exist in BAM file".format(
contig
))
contig_size = reader.lengths[contig_idx]
full_array = []
tot_reads = 0
for region in chop_contig(contig_size, chunksize):
block = []
start, end = region
n_reads = reader.count(contig=contig, start=start, stop=end)
tot_reads += n_reads
cov = coverage(reader, contig, region)
softclip = softclip_bases(reader, contig, region)
block += [n_reads, cov, softclip]
full_array += block
# add normalization step
normalized = np.array(full_array) / tot_reads
echo("Done calculating features for {0}".format(path.name))
return normalized
def make_array_set(bam_files: List[Path], labels: List[Any],
chunksize: int = 100,
contig: str = "chrM",
cores: int = 1) -> Tuple[np.ndarray, np.ndarray]:
"""
Make set of numpy arrays corresponding to data and labels.
I.e. train/testX and train/testY in scikit-learn parlance.
:param bam_files: List of paths to bam files
:param labels: list of labels.
:param cores: number of cores to use for processing
:return: tuple of X and Y numpy arrays. X has shape (n_files, n_features).
Y has shape (n_files,).
"""
if cores < 1:
raise ValueError("Number of cores must be at least 1.")
pool = Pool(cores)
proc_func = partial(process_bam, chunksize=chunksize, contig=contig)
# this returns a list of ndarrays.
arr_X = pool.map(proc_func, bam_files)
return np.array(arr_X), np.array(labels) | /rna_cd-0.2.0-py3-none-any.whl/rna_cd/bam_process.py | 0.759582 | 0.442034 | bam_process.py | pypi |
import datetime
from pathlib import Path
from typing import List, Any
import joblib
import click
import io
import base64
import json
import pkg_resources
def echo(msg: str):
"""Wrapper around click.secho to include datetime"""
fmt = "[ {0} ] {1}".format(str(datetime.datetime.utcnow()), msg)
click.secho(fmt, fg="green", err=True)
def load_list_file(path: Path) -> List[Path]:
"""Load a file containing containing a list of files"""
with path.open("r") as handle:
return [Path(x.strip()) for x in handle]
def dir_to_bam_list(path: Path) -> List[Path]:
"""Load a directory containing bam or cram files"""
return [x for x in path.iterdir() if x.name.endswith(".bam")
or x.name.endswith(".cram")]
def get_rna_cd_version():
return pkg_resources.get_distribution("rna_cd").version
def get_sklearn_version():
return pkg_resources.get_distribution("scikit-learn").version
def save_sklearn_object_to_disk(obj: Any, path: Path):
"""Save an object with some metadata to disk as serialized JSON"""
b = io.BytesIO()
d = {
"rna_cd_version": get_rna_cd_version(),
"sklearn_version": get_sklearn_version(),
"datetime_stored": str(datetime.datetime.utcnow())
}
joblib.dump(obj, b, compress=True)
dumped = b.getvalue()
base64_encoded = base64.b64encode(dumped)
d['obj'] = base64_encoded.decode('utf-8')
with path.open("w") as handle:
json.dump(d, handle)
def load_sklearn_object_from_disk(path: Path) -> Any:
"""Load a JSON-serialized object from disk"""
with path.open("r") as handle:
d = json.load(handle)
if pkg_resources.parse_version(
d.get("sklearn_version", "0.0.0")
) < pkg_resources.parse_version("0.20.0"):
raise ValueError("We do not support loading objects with sklearn "
"versions below 0.20.0")
blob = base64.b64decode(d.get("obj", ""))
file_like_obj = io.BytesIO(blob)
loaded = joblib.load(file_like_obj)
return loaded | /rna_cd-0.2.0-py3-none-any.whl/rna_cd/utils.py | 0.631594 | 0.253309 | utils.py | pypi |
import enum
from pathlib import Path
from typing import List, Optional, Tuple
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from .bam_process import make_array_set
from .utils import echo
class PredClass(enum.Enum):
positive = "pos"
negative = "neg"
unknown = "unknown"
class Prediction(object):
# this class looks a bit java like due to trying to support typed
# attributes. From python 3.6 onwards, this can be done with a
# typing.NamedTuple or dataclass. However, we need to also support
# python 3.5, which does not yet have typed attributes. Hence the
# properties.
def __init__(self, prediction, most_likely_prob, pos_prob, neg_prob):
self._prediction = prediction
self._most_likely_prob = most_likely_prob
self._pos_prob = pos_prob
self._neg_prob = neg_prob
@property
def prediction(self) -> PredClass:
return self._prediction
@property
def most_likely_prob(self) -> float:
return self._most_likely_prob
@property
def pos_prob(self) -> float:
return self.pos_prob
@property
def neg_prob(self) -> float:
return self._neg_prob
@classmethod
def from_model_proba(cls, model, proba: Tuple[float],
unknown_threshold: float) -> 'Prediction':
likely = max(proba)
if likely < unknown_threshold:
pred_class = PredClass.unknown
else:
class_name = model.classes_[np.where(proba == likely)][0]
pred_class = PredClass(class_name)
pos_prob = proba[np.where(model.classes_ == "pos")][0]
neg_prob = proba[np.where(model.classes_ == "neg")][0]
return cls(pred_class, likely, pos_prob, neg_prob)
def train_svm_model(positive_bams: List[Path], negative_bams: List[Path],
chunksize: int = 100, contig: str = "chrM",
cross_validations: int = 3, verbosity: int = 1,
cores: int = 1,
plot_out: Optional[Path] = None) -> GridSearchCV:
"""
Run SVM training on a list of positive BAM files
(i.e. _with_ contamination) and a list of negative BAM files
(i.e. _without_ contamination).
For all bam files features are collected over one contig. This contig is
binned, and for each bin two different metrics of coverage are collected,
in addition to the softclip rate.
These features are then fed to a sklearn pipeline with three steps:
1. A scaling step using StandardScaler
2. A dimensional reduction step using PCA.
3. A classification step using an SVM.
Hyperparameters are tuned using a grid search with cross validations.
Optionally saves a plot of the top two PCA components with the training
samples.
:param positive_bams: List of BAM files with contaminations
:param negative_bams: List of BAM files without contaminations.
:param chunksize: The size in bases for each chunk (bin)
:param contig: The name of the contig.
:param cross_validations: The amount of cross validations
:param verbosity: Verbosity parameter of sklearn. Increase to see more
messages.
:param cores: Amount of cores to use for both metric collection and
training.
:param plot_out: Optional path for PCA plot.
:returns: GridSearchCV object containing tuned pipeline.
"""
if len(positive_bams) < 1:
raise ValueError("The list of positive BAM files may not be empty.")
if len(negative_bams) < 1:
raise ValueError("The list of negative BAM files may not be empty.")
# sets must be distjoint (i.e. no common elements)
if not set(positive_bams).isdisjoint(set(negative_bams)):
raise ValueError("An overlap exists between the lists of positive "
"and negative bam files.")
labels = ["pos"]*len(positive_bams) + ["neg"]*len(negative_bams)
arr_X, arr_Y = make_array_set(positive_bams+negative_bams, labels,
chunksize, contig, cores)
estimators = [
("scale", StandardScaler()),
("reduce_dim", PCA()),
("svm", SVC())
]
echo("Setting up processing pipeline for SVM model")
# components MUST fall between 0 ... min(n_samples, n_features)
# cross-validation additionally reduces amount of samples
n_samples = int(arr_X.shape[0] * (1 - (1/cross_validations)))
max_components = min(n_samples, arr_X.shape[1])
components_params = list(range(2, max_components))
param_grid = {
"reduce_dim__n_components": components_params,
"reduce_dim__whiten": [False, True],
"svm__gamma": [0.1, 0.01, 0.001, 0.0001,
1, 10, 100, 1000],
"svm__shrinking": [True, False],
"svm__probability": [True]
}
pipeline = Pipeline(estimators)
searcher = GridSearchCV(pipeline, cv=cross_validations,
param_grid=param_grid,
scoring="accuracy", verbose=verbosity,
pre_dispatch=1, n_jobs=cores)
echo("Starting grid search for SVC model with {0} "
"cross validations".format(cross_validations))
searcher.fit(arr_X, arr_Y)
echo("Finished gid search with best score: {0}.".format(
searcher.best_score_)
)
echo("Best parameters: {0}".format(searcher.best_params_))
if plot_out is not None:
echo("Plotting training samples onto top 2 PCA components.")
plot_pca(searcher, arr_X, arr_Y, plot_out)
echo("Finished training.")
return searcher
def plot_pca(searcher: GridSearchCV, arr_X: np.ndarray, arr_Y: np.ndarray,
img_out: Path) -> None:
"""Plot PCA with training samples of pipeline."""
pos_X = arr_X[arr_Y == "pos"]
neg_X = arr_X[arr_Y == "neg"]
best_pca = searcher.best_estimator_.named_steps['reduce_dim']
pos_X_transformed = best_pca.transform(pos_X)
neg_X_transformed = best_pca.transform(neg_X)
fig = plt.figure(figsize=(6, 11))
ax = fig.add_subplot(111)
ax.scatter(pos_X_transformed[:, 0], pos_X_transformed[:, 1],
color="red", label="Train positives")
ax.scatter(neg_X_transformed[:, 0], neg_X_transformed[:, 1],
color="blue", label="Train negatives")
ax.set_xlabel("1st component")
ax.set_ylabel("2nd component")
ax.legend()
fig.savefig(str(img_out), format="png", dpi=300)
def predict_labels_and_prob(model, bam_files: List[Path],
chunksize: int = 100, contig: str = "chrM",
cores: int = 1,
unknown_threshold: float = 0.75) -> List[Prediction]: # noqa
"""
Predict labels and probabilities for a list of bam files.
:param unknown_threshold: The probability threshold below which samples
are considered to be 'unknown'. Must be between 0.5 and 1.0
:returns: list of Prediction classes
"""
if not 0.5 < unknown_threshold < 1.0:
raise ValueError("unknown_threshold must be between 0.5 and 1.0")
bam_arr, _ = make_array_set(bam_files, [], chunksize, contig, cores)
prob = model.predict_proba(bam_arr)
predictions = []
for sample in prob:
predictions.append(Prediction.from_model_proba(model, sample,
unknown_threshold))
return predictions | /rna_cd-0.2.0-py3-none-any.whl/rna_cd/models.py | 0.932039 | 0.562447 | models.py | pypi |
import argparse # Argument parsing
import logging # Logging behaviour
import pandas # Handle large datasets
import pytest
import yaml # Handle Yaml IO
import os.path as op # Path and file system manipulation
import pandas # Deal with TSV files (design)
from itertools import chain # Chain iterators
from pathlib import Path # Easily handle paths
from typing import Any, Dict, List, Optional, Union # Type hints
# Building custom class for help formatter
class CustomFormatter(
argparse.RawDescriptionHelpFormatter,
argparse.ArgumentDefaultsHelpFormatter
):
"""
This class is used only to allow line breaks in the documentation,
without breaking the classic argument formatting.
"""
def write_yaml(output_yaml: Path, data: Dict[str, Any]) -> None:
"""
Save given dictionnary as Yaml-formatted text file
"""
with output_yaml.open("w") as outyaml:
yaml.dump(data, outyaml, default_flow_style=False)
def read_aggregation_table(count: str) -> pandas.DataFrame:
"""
Load an aggregation table and return a DataFrame
"""
# Build io paths objects
count_table = Path(count)
# Load dataset
data = pandas.read_csv(
count_table, sep="\t",
index_col=0,
header=0
)
logging.debug("Head of the count data:")
logging.debug(data.head())
return data
def fq_link(design: pandas.DataFrame) -> Dict[str, str]:
"""
This function takes the "samples" described in config and returns
a dictionnary with:
sample file name : sample path
"""
# Will cause KeyError on single stranded RNA-Seq analysis
# Better ask forgiveness than permission !
try:
# Paired-ended case
fq_list = chain(design["Upstream_file"], design["Downstream_file"])
except KeyError:
# Single ended case
fq_list = design["Upstream_file"]
return {op.basename(fq): op.realpath(fq) for fq in fq_list}
@pytest.mark.parametrize(
"design, expected",
[
(
pandas.DataFrame(
{
"S1": {
"Upstream_file": "/path/to/S1_R1.fq",
"Downstream_file": "/other/to/S1_R2.fq",
}
}
).T,
{"S1_R1.fq": "/path/to/S1_R1.fq", "S1_R2.fq": "/other/to/S1_R2.fq"},
),
(
pandas.DataFrame({"S1": {"Upstream_file": "/path/to/S1_R1.fq"}}).T,
{"S1_R1.fq": "/path/to/S1_R1.fq"},
),
(
pandas.DataFrame({"S1": {"Upstream_file": "path/to/S1_R1.fq"}}).T,
{"S1_R1.fq": op.abspath("path/to/S1_R1.fq")},
),
],
)
def test_fq_link(
design: pandas.DataFrame,
expected: Optional[Dict[str, str]]
) -> None:
"""
Test the function fq_link with multiple data
"""
assert fq_link(design) == expected
def fq_root(design: pandas.DataFrame) -> Dict[str, str]:
"""
This function takes the fastq file list and returns the root
name corresponding to a fastq file
sample name: sample link path
"""
# For now, bz2 compression is not taken into account.
possible_ext = ("fq", "fastq", "fq.gz", "fastq.gz")
# Will cause KeyError on single stranded RNA-Seq analysis
# Better ask forgiveness than permission !
try:
# Paired-ended case
fq_list = chain(design["Upstream_file"], design["Downstream_file"])
except KeyError:
# Single ended case
fq_list = design["Upstream_file"]
# Build final result
result = {}
for fq_file in fq_list:
# I always love writing these crazy for-break-else!
for ext in possible_ext:
if fq_file.endswith(ext):
# Extension removal
base = op.basename(fq_file)[: -(len(ext) + 1)]
result[base] = f"raw_data/{op.basename(fq_file)}"
break
else:
raise ValueError(f"Could not remove ext: {fq_file}")
return result
@pytest.mark.parametrize(
"design, expected",
[
(
pandas.DataFrame(
{
"S1": {
"Upstream_file": "/path/to/S1_R1.fq.gz",
"Downstream_file": "/other/to/S1_R2.fq.gz",
}
}
).T,
{"S1_R1": "raw_data/S1_R1.fq.gz", "S1_R2": "raw_data/S1_R2.fq.gz"},
),
(
pandas.DataFrame(
{
"S1": {
"Upstream_file": "/path/to/S1_R1.fastq.gz",
"Downstream_file": "/other/to/S1_R2.fastq.gz",
}
}
).T,
{
"S1_R1": "raw_data/S1_R1.fastq.gz",
"S1_R2": "raw_data/S1_R2.fastq.gz",
},
),
(
pandas.DataFrame(
{"S1": {"Upstream_file": "/path/to/S1_R1.fastq.gz"}}
).T,
{"S1_R1": "raw_data/S1_R1.fastq.gz"},
),
(
pandas.DataFrame(
{"S1": {"Upstream_file": "/path/to/S1_R1.fastq"}}
).T,
{"S1_R1": "raw_data/S1_R1.fastq"},
),
],
)
def test_fq_root(design: pandas.DataFrame, expected: Dict[str, str]) -> None:
"""
Test the function fq_root with multiple data
"""
assert fq_root(design) == expected
def ref_link(config: Dict[str, Any]) -> Dict[str, str]:
"""
This function takes the "ref" described in config and returns
a dictionnary with:
ref file name : ref path
"""
fasta = config["ref"]["fasta"]
gtf = config["ref"]["gtf"]
return {
op.basename(fasta): op.realpath(fasta),
op.basename(gtf): op.realpath(gtf),
}
@pytest.mark.parametrize(
"config, expected",
[
(
{"ref": {"fasta": "/path/to/fasta.fa", "gtf": "/path/to/gtf.gtf"}},
{"fasta.fa": "/path/to/fasta.fa", "gtf.gtf": "/path/to/gtf.gtf"},
),
(
{"ref": {"fasta": "path/to/fasta.fa", "gtf": "path/to/gtf.gtf"}},
{
"fasta.fa": op.abspath("path/to/fasta.fa"),
"gtf.gtf": op.abspath("path/to/gtf.gtf"),
},
),
],
)
def test_ref_link(config: Dict[str, Any], expected: Dict[str, str]) -> None:
"""
Test the function ref_link with multiple arguments
"""
assert ref_link(config) == expected
def fq_pairs(design: pandas.DataFrame) -> Dict[str, str]:
"""
This function returns a sample ID and
the corresponding fastq files.
"""
# Will cause KeyError on single stranded RNA-Seq analysis
# Better ask forgiveness than permission !
try:
# Paired end case
iterator = zip(
design["Sample_id"],
design["Upstream_file"],
design["Downstream_file"],
)
return {
name: {
"r1": f"raw_data/{op.basename(fq1)}",
"r2": f"raw_data/{op.basename(fq2)}",
}
for name, fq1, fq2 in iterator
}
except KeyError:
# Single end case
iterator = zip(design["Sample_id"], design["Upstream_file"])
return {
name: {"r": f"raw_data/{op.basename(fq1)}"}
for name, fq1 in iterator
}
@pytest.mark.parametrize(
"design, expected",
[
(
pandas.DataFrame(
{
"S1": {
"Sample_id": "S1",
"Upstream_file": "/path/to/S1_R1.fq.gz",
"Downstream_file": "/other/to/S1_R2.fq.gz",
}
}
).T,
{
"S1": {
"r1": "raw_data/S1_R1.fq.gz",
"r2": "raw_data/S1_R2.fq.gz",
}
},
),
(
pandas.DataFrame(
{
"S1": {
"Sample_id": "S1",
"Upstream_file": "/path/to/S1_R1.fq.gz",
}
}
).T,
{"S1": {"r": "raw_data/S1_R1.fq.gz"}},
),
(
pandas.DataFrame(
{
"S1": {
"Sample_id": "S1",
"Upstream_file": "/path/to/S1_R1.fq.gz",
"Downstream_file": "/other/to/S1_R2.fq.gz",
},
"S2": {
"Sample_id": "S2",
"Upstream_file": "/path/to/S2_R1.fq.gz",
"Downstream_file": "/other/to/S2_R2.fq.gz",
},
}
).T,
{
"S1": {
"r1": "raw_data/S1_R1.fq.gz",
"r2": "raw_data/S1_R2.fq.gz",
},
"S2": {
"r1": "raw_data/S2_R1.fq.gz",
"r2": "raw_data/S2_R2.fq.gz",
},
},
),
],
)
def test_fq_pairs(design: pandas.DataFrame, expected: Dict[str, Any]) -> None:
"""
Test he function fq_pairs with multiple arguments
"""
assert fq_pairs(design) == expected
def refs_pack(config: Dict[str, Any]) -> Dict[str, str]:
"""
Return a dictionnary with references
"""
return {
"fasta": f"genomes/{op.basename(config['ref']['fasta'])}",
"gtf": f"genomes/{op.basename(config['ref']['gtf'])}",
}
@pytest.mark.parametrize(
"config, expected",
[
(
{"ref": {"fasta": "/path/to/fasta.fa", "gtf": "/path/to/gtf.gtf"}},
{"fasta": "genomes/fasta.fa", "gtf": "genomes/gtf.gtf"},
)
],
)
def test_ref_pack(config: Dict[str, Any], expected: Dict[str, str]) -> None:
"""
Test the refs_pack function with multiple arguments
"""
assert refs_pack(config) == expected
def salmon_quant_extra(config: Dict[str, Any]) -> str:
"""
Return the corrected list of parameters for kallist quant
"""
base = config["params"].get("salmon_quant_extra", "")
return f"{base} --geneMap genomes/{op.basename(config['ref']['gtf'])}"
@pytest.mark.parametrize(
"config, expected",
[
(
{"params": {}, "ref": {"gtf": "genomes/gtf.gtf"}},
" --geneMap genomes/gtf.gtf",
),
],
)
def test_salmon_quant_extra(config: Dict[str, Any], expected: str) -> None:
"""
Test the salmon_quant_extra function with multiple arguments
"""
assert salmon_quant_extra(config) == expected
def salmon_quant_output(config: Dict[str, Any]) -> str:
"""
Return optionnal quant.genes if required by user
"""
base = {"quant": "pseudo_mapping/{sample}/quant.sf"}
try:
# Case there is a GTF passed in the config file
if config["ref"]["gtf"] != "" and config["ref"]["gtf"] is not None:
base["quant_genes"] = "pseudo_mapping/{sample}/quant.genes.sf"
except KeyError:
pass
return base
@pytest.mark.parametrize(
"config, expected",
[
(
{"ref": {"gtf": "genomes/annot.gtf"}},
{
"quant": "pseudo_mapping/{sample}/quant.sf",
"quant_genes": "pseudo_mapping/{sample}/quant.genes.sf",
},
),
({"ref": {}}, {"quant": "pseudo_mapping/{sample}/quant.sf"}),
({"ref": {"gtf": None}}, {"quant": "pseudo_mapping/{sample}/quant.sf"}),
],
)
def test_salmon_quant_output(config: Dict[str, Any], expected: str) -> None:
"""
Test the salmon_quant_output function
"""
assert salmon_quant_output(config) == expected
def sample_id(design: pandas.DataFrame) -> List[str]:
"""
Return the list of samples identifiers
"""
return design["Sample_id"].tolist() | /rna_count_salmon-1.9-py3-none-any.whl/scripts/common_script_rna_count_salmon.py | 0.762513 | 0.487612 | common_script_rna_count_salmon.py | pypi |
import argparse # Parse command line
import logging # Traces and loggings
import os # OS related activities
import pandas as pd # Parse TSV files
import pytest # Unit testing
import shlex # Lexical analysis
import sys # System related methods
from pathlib import Path # Paths related methods
from snakemake.utils import makedirs # Easily build directories
from typing import Dict, Generator, List, Any # Type hints
try:
from scripts.common_script_rna_count_salmon import *
except ModuleNotFoundError:
from common_script_rna_count_salmon import *
# Processing functions
# Looking for fastq files
def search_fq(
fq_dir: Path, recursive: bool = False
) -> Generator[str, str, None]:
"""
Iterate over a directory and search for fastq files
Parameters:
fq_dir Path Path to the fastq directory in which to search
recursive bool A boolean, weather to search recursively in
sub-directories (True) or not (False)
Return:
Generator[str, str, None] A Generator of paths
Example:
>>> search_fq(Path("tests/reads/"))
<generator object search_fq at 0xXXXXXXXXXXXX>
>>> list(search_fq(Path("tests/", True)))
[PosixPath('tests/reads/A_R2.fastq'),
PosixPath('tests/reads/B_R2.fastq'),
PosixPath('tests/reads/A_R1.fastq'),
PosixPath('tests/reads/B_R1.fastq')]
"""
for path in fq_dir.iterdir():
if path.is_dir():
if recursive is True:
yield from search_fq(path, recursive)
else:
continue
if path.name.endswith((".fq", ".fq.gz", ".fastq", ".fastq.gz")):
yield path
# Testing search_fq
def test_search_fq():
"""
This function tests the ability of the function "search_fq" to find the
fastq files in the given directory
Example:
pytest -v prepare_design.py -k test_search_fq
"""
path = Path("tests/reads/")
expected = list(
path / "{}_R{}.fastq".format(sample, stream)
for sample in ["A", "B"]
for stream in [1, 2]
)
assert sorted(list(search_fq(path))) == sorted(expected)
# Turning the FQ list into a dictionnary
def classify_fq(fq_files: List[Path], paired: bool = True) -> Dict[str, Path]:
"""
Return a dictionnary with identified fastq files (paried or not)
Parameters:
fq_files List[Path] A list of paths to iterate over
paired bool A boolean, weather the dataset is
pair-ended (True) or single-ended (False)
Return:
Dict[str, Path] A dictionnary: for each Sample ID, the ID
is repeated alongside with the upstream
/downstream fastq files.
Example:
# Paired-end single sample
>>> classify_fq([Path("file1.R1.fq"), Path("file1.R2.fq")], True)
{'file1.R1.fq': {'Downstream_file': PosixPath("/path/to/file1.R1.fq"),
'Sample_id': 'file1.R1',
'Upstream_file': PosixPath('/path/to/file1.R2.fq')}
# Single-ended single sample
>>> classify_fq([Path("file1.fq")], False)
{'file1.fq': {'Sample_id': 'file1',
'Upstream_file': PosixPath('/path/to/file1.fq')}}
"""
fq_dict = {}
if paired is not True:
# Case single fastq per sample
logging.debug("Sorting fastq files as single-ended")
for fq in fq_files:
fq_dict[fq.name] = {
"Sample_id": fq.stem,
"Upstream_file": fq.absolute(),
}
else:
# Case pairs of fastq are used
logging.debug("Sorting fastq files as pair-ended")
for fq1, fq2 in zip(fq_files[0::2], fq_files[1::2]):
fq_dict[fq1.name] = {
"Sample_id": fq1.stem,
"Upstream_file": fq1.absolute(),
"Downstream_file": fq2.absolute(),
}
logging.debug(fq_dict)
return fq_dict
def test_classify_fq():
"""
This function takes input from the pytest decorator
to test the classify_fq function
Example:
pytest -v ./prepare_design.py -k test_classify_fq
"""
prefix = Path(__file__).parent.parent
fq_list = sorted(list(search_fq(prefix / "tests" / "reads")))
expected = {
"A_R1.fastq": {
"Sample_id": "A_R1",
"Upstream_file": prefix / "tests" / "reads" / "A_R1.fastq",
"Downstream_file": prefix / "tests" / "reads" / "A_R2.fastq",
},
"B_R1.fastq": {
"Sample_id": "B_R1",
"Upstream_file": prefix / "tests" / "reads" / "B_R1.fastq",
"Downstream_file": prefix / "tests" / "reads" / "B_R2.fastq",
},
}
assert classify_fq(fq_list) == expected
# Parsing command line arguments
# This function won't be tested
def parser() -> argparse.ArgumentParser:
"""
Build a command line parser object
Parameters:
args Any Command line arguments
Return:
ArgumentParser Parsed command line object
"""
# Defining command line options
main_parser = argparse.ArgumentParser(
description=sys.modules[__name__].__doc__,
formatter_class=CustomFormatter,
epilog="This script does not perform any magic. Check the result.",
)
# Required arguments
main_parser.add_argument(
"path",
help="Path to the directory containing fastq files",
type=str
)
# Optional arguments
main_parser.add_argument(
"-s",
"--single",
help="The samples are single ended rnaseq reads, not pair ended",
action="store_true",
)
main_parser.add_argument(
"-r",
"--recursive",
help="Recursively search in sub-directories for fastq files",
action="store_true",
)
main_parser.add_argument(
"-o",
"--output",
help="Path to output file (default: %(default)s)",
type=str,
default="design.tsv",
)
# Logging options
log = main_parser.add_mutually_exclusive_group()
log.add_argument(
"-d",
"--debug",
help="Set logging in debug mode",
default=False,
action="store_true",
)
log.add_argument(
"-q",
"--quiet",
help="Turn off logging behaviour",
default=False,
action="store_true",
)
# Parsing command lines
return main_parser
def parse_args(args: Any = sys.argv[1:]) -> argparse.ArgumentParser:
"""
Parse command line arguments
Parameters:
args Any Command line arguments
Return:
ArgumentParser Parsed command line object
Example:
>>> parse_args(shlex.split("/path/to/fasta --single"))
Namespace(debug=False, output='design.tsv', path='/path/to/fasta',
quiet=False, recursive=False, single=True)
"""
# Parsing command lines
return parser().parse_args(args)
def test_parse_args() -> None:
"""
This function tests the command line parsing
Example:
>>> pytest -v prepare_config.py -k test_parse_args
"""
options = parse_args(shlex.split("/path/to/fastq/dir/"))
expected = argparse.Namespace(
debug=False,
output="design.tsv",
path="/path/to/fastq/dir/",
quiet=False,
recursive=False,
single=False,
)
assert options == expected
# Main function, the core of this script
def main(args: argparse.ArgumentParser) -> None:
"""
This function performs the whole preparation sequence
Parameters:
args ArgumentParser The parsed command line
Example:
>>> main(parse_args(shlex.split("/path/to/fasta/dir/")))
"""
# Searching for fastq files and sorting them alphabetically
fq_files = sorted(list(search_fq(Path(args.path), args.recursive)))
logging.debug("Head of alphabeticaly sorted list of fastq files:")
logging.debug([str(i) for i in fq_files[0:5]])
# Building a dictionnary of fastq (pairs?) and identifiers
fq_dict = classify_fq(fq_files)
# Using Pandas to handle TSV output (yes pretty harsh I know)
data = pd.DataFrame(fq_dict).T
logging.debug("\n{}".format(data.head()))
logging.debug("Saving results to {}".format(args.output))
data.to_csv(args.output, sep="\t", index=False)
# Running programm if not imported
if __name__ == "__main__":
# Parsing command line
args = parse_args(sys.argv[1:])
makedirs("logs/prepare")
# Build logging object and behaviour
logging.basicConfig(
filename="logs/prepare/design.log", filemode="w", level=logging.DEBUG
)
try:
logging.debug("Preparing design")
main(args)
except Exception as e:
logging.exception("%s", e)
raise | /rna_count_salmon-1.9-py3-none-any.whl/scripts/prepare_design.py | 0.660829 | 0.381508 | prepare_design.py | pypi |
import argparse # Parse command line
import logging # Traces and loggings
import os # OS related activities
import pytest # Unit testing
import shlex # Lexical analysis
import sys # System related methods
import yaml # Parse Yaml files
from pathlib import Path # Paths related methods
from snakemake.utils import makedirs # Easily build directories
from typing import Dict, Any # Typing hints
try:
from scripts.common_script_rna_count_salmon import *
except ModuleNotFoundError:
from common_script_rna_count_salmon import *
def parser() -> argparse.ArgumentParser:
"""
Build the argument parser object
"""
main_parser = argparse.ArgumentParser(
description=sys.modules[__name__].__doc__,
formatter_class=CustomFormatter,
epilog="This script does not make any magic. Please check the prepared"
" configuration file!",
)
# Parsing positional argument
main_parser.add_argument(
"fasta",
help="Path to the fasta-formatted transcriptome sequence",
type=str,
)
main_parser.add_argument(
"gtf",
help="Path to GTF-formatted genome annotation",
type=str
)
# Parsing optional arguments
main_parser.add_argument(
"--design",
help="Path to design file (default: %(default)s)",
type=str,
metavar="PATH",
default="design.tsv",
)
main_parser.add_argument(
"--workdir",
help="Path to working directory (default: %(default)s)",
type=str,
metavar="PATH",
default=".",
)
main_parser.add_argument(
"--threads",
help="Maximum number of threads used (default: %(default)s)",
type=int,
default=1,
)
main_parser.add_argument(
"--singularity",
help="Docker/Singularity image (default: %(default)s)",
type=str,
default="docker://continuumio/miniconda3:4.4.10",
)
main_parser.add_argument(
"--cold-storage",
help="Space separated list of absolute path to "
"cold storage mount points (default: %(default)s)",
nargs="+",
type=str,
default=[" "],
)
main_parser.add_argument(
"--no-fastqc",
help="Do not perform any fastqc",
action="store_true"
)
main_parser.add_argument(
"--no-multiqc",
help="Do not perform final multiqc",
action="store_true"
)
main_parser.add_argument(
"--aggregate",
help="Perform sample count aggregation",
action="store_true",
)
main_parser.add_argument(
"--salmon-index-extra",
help="Extra parameters for salmon index step (default: %(default)s)",
type=str,
default="--keepDuplicates --gencode --perfectHash",
)
main_parser.add_argument(
"--salmon-quant-extra",
help="Extra parameters for salmon quantification step "
"(default: %(default)s)",
type=str,
default="--numBootstraps 100 --validateMappings --gcBias --seqBias",
)
main_parser.add_argument(
"--libType",
help="The salmon library type (default: %(default)s)",
type=str,
default="A",
)
# Logging options
log = main_parser.add_mutually_exclusive_group()
log.add_argument(
"-d",
"--debug",
help="Set logging in debug mode",
default=False,
action="store_true",
)
log.add_argument(
"-q",
"--quiet",
help="Turn off logging behaviour",
default=False,
action="store_true",
)
return main_parser
# Argument parsing functions
def parse_args(args: Any) -> argparse.ArgumentParser:
"""
This function parses command line arguments
Parameters
args Any All command line arguments
Return
ArgumentParser A object designed to parse the command line
Example:
>>> parse_args(shlex.split("/path/to/fasta --no-fastqc"))
Namespace(aggregate=False, cold_storage=[' '], debug=False,
design='design.tsv', fasta='/path/to/fasta', gtf=None, libType='A',
no_fastqc=False, no_multiqc=False, quiet=False, salmon_index_extra='
--keepDuplicates --gencode --perfectHash', salmon_quant_extra='
--numBootstraps 100 --validateMappings --gcBias --seqBias',
singularity='docker://continuumio/miniconda3:4.4.10',
threads=1, workdir='.')
"""
return parser().parse_args(args)
def test_parse_args() -> None:
"""
This function tests the command line parsing
Example:
>>> pytest -v prepare_config.py -k test_parse_args
"""
options = parse_args(shlex.split("/path/to/fasta /path/to/gtf"))
expected = argparse.Namespace(
aggregate=False,
cold_storage=[" "],
debug=False,
design="design.tsv",
fasta="/path/to/fasta",
gtf="/path/to/gtf",
libType="A",
no_fastqc=False,
no_multiqc=False,
quiet=False,
salmon_index_extra="--keepDuplicates --gencode --perfectHash",
salmon_quant_extra=(
"--numBootstraps 100 --validateMappings " "--gcBias --seqBias"
),
singularity="docker://continuumio/miniconda3:4.4.10",
threads=1,
workdir=".",
)
assert options == expected
# Building pipeline configuration from command line
def args_to_dict(args: argparse.ArgumentParser) -> Dict[str, Any]:
"""
Parse command line arguments and return a dictionnary ready to be
dumped into yaml
Parameters:
args ArgumentParser Parsed arguments from command line
Return:
Dict[str, Any] A dictionnary containing the parameters
for the pipeline
Examples:
>>> example_options = parse_args("/path/to/fasta")
>>> args_to_dict(example_options)
{'cold_storage': [' '],
'design': 'design.tsv',
'params': {'libType': 'A',
'salmon_index_extra': '--keepDuplicates --gencode --perfectHash',
'salmon_quant_extra':
'--numBootstraps 100 --validateMappings --gcBias --seqBias'},
'ref': {'fasta': '/path/to/fasta', 'gtf': None},
'singularity_docker_image': 'docker://continuumio/miniconda3:4.4.10',
'threads': 1,
'workdir': '.',
'workflow': {'aggregate': False, 'fastqc': True, 'multiqc': True}}
"""
result_dict = {
"design": os.path.abspath(args.design),
"config": os.path.abspath(os.path.join(args.workdir, "config.yaml")),
"workdir": os.path.abspath(args.workdir),
"threads": args.threads,
"singularity_docker_image": args.singularity,
"cold_storage": args.cold_storage,
"ref": {
"fasta": os.path.abspath(args.fasta),
"gtf": (
os.path.abspath(args.gtf) if args.gtf is not None else None
),
},
"workflow": {
"fastqc": not args.no_fastqc,
"multiqc": not args.no_multiqc,
"aggregate": args.aggregate,
},
"params": {
"salmon_index_extra": args.salmon_index_extra,
"salmon_quant_extra": args.salmon_quant_extra,
"libType": args.libType,
},
}
logging.debug(result_dict)
return result_dict
def test_args_to_dict() -> None:
"""
This function simply tests the args_to_dict function with expected output
Example:
>>> pytest -v prepare_config.py -k test_args_to_dict
"""
options = parse_args(
shlex.split(
"/path/to/fasta "
" /path/to/gtf "
"--design /path/to/design "
"--workdir /path/to/workdir "
"--threads 100 "
"--singularity singularity_image "
"--cold-storage /path/cold/one /path/cold/two "
"--no-fastqc "
"--aggregate "
"--salmon-index-extra ' --index-arg 1 ' "
"--salmon-quant-extra ' --quant-arg ok ' "
"--debug "
)
)
expected = {
"design": "/path/to/design",
"config": "/path/to/workdir/config.yaml",
"workdir": "/path/to/workdir",
"threads": 100,
"singularity_docker_image": "singularity_image",
"cold_storage": ["/path/cold/one", "/path/cold/two"],
"ref": {"fasta": "/path/to/fasta", "gtf": "/path/to/gtf"},
"workflow": {"fastqc": False, "multiqc": True, "aggregate": True},
"params": {
"salmon_index_extra": " --index-arg 1 ",
"salmon_quant_extra": " --quant-arg ok ",
"libType": "A",
},
}
assert args_to_dict(options) == expected
# Yaml formatting
def dict_to_yaml(indict: Dict[str, Any]) -> str:
"""
This function makes the dictionnary to yaml formatted text
Parameters:
indict Dict[str, Any] The dictionnary containing the pipeline
parameters, extracted from command line
Return:
str The yaml formatted string, directly built
from the input dictionnary
Examples:
>>> import yaml
>>> example_dict = {
"bar": "bar-value",
"foo": ["foo-list-1", "foo-list-2"]
}
>>> dict_to_yaml(example_dict)
'bar: bar-value\nfoo:\n- foo-list-1\n- foo-list-2\n'
>>> print(dict_to_yaml(example_dict))
bar: bar-value
foo:
- foo-list-1
- foo-list-2
"""
return yaml.dump(indict, default_flow_style=False)
def test_dict_to_yaml() -> None:
"""
This function tests the dict_to_yaml function with pytest
Example:
>>> pytest -v prepare_config.py -k test_dict_to_yaml
"""
expected = "bar: bar-value\nfoo:\n- foo-list-1\n- foo-list-2\n"
example_dict = {"bar": "bar-value", "foo": ["foo-list-1", "foo-list-2"]}
assert dict_to_yaml(example_dict) == expected
# Core of this script
def main(args: argparse.ArgumentParser) -> None:
"""
This function performs the whole configuration sequence
Parameters:
args ArgumentParser The parsed command line
Example:
>>> main(parse_args(shlex.split("/path/to/fasta")))
"""
# Building pipeline arguments
logging.debug("Building configuration file:")
config_params = args_to_dict(args)
output_path = Path(args.workdir) / "config.yaml"
# Saving as yaml
with output_path.open("w") as config_yaml:
logging.debug(f"Saving results to {str(output_path)}")
config_yaml.write(dict_to_yaml(config_params))
# Running programm if not imported
if __name__ == "__main__":
# Parsing command line
args = parse_args(sys.argv[1:])
makedirs("logs/prepare")
# Build logging object and behaviour
logging.basicConfig(
filename="logs/prepare/config.log", filemode="w", level=logging.DEBUG
)
try:
logging.debug("Preparing configuration")
main(args)
except Exception as e:
logging.exception("%s", e)
raise | /rna_count_salmon-1.9-py3-none-any.whl/scripts/prepare_config.py | 0.61451 | 0.17858 | prepare_config.py | pypi |
# RNA-FM
This repository contains codes and pre-trained models for **RNA foundation model (RNA-FM)**.
**RNA-FM outperforms all tested single-sequence RNA language models across a variety of structure prediction tasks as well as several function-related tasks.**
You can find more details about **RNA-FM** in our paper, ["Interpretable RNA Foundation Model from Unannotated Data for Highly Accurate RNA Structure and Function Predictions" (Chen et al., 2022).](https://arxiv.org/abs/2204.00300)

<details><summary>Citation</summary>
```bibtex
@article{chen2022interpretable,
title={Interpretable rna foundation model from unannotated data for highly accurate rna structure and function predictions},
author={Chen, Jiayang and Hu, Zhihang and Sun, Siqi and Tan, Qingxiong and Wang, Yixuan and Yu, Qinze and Zong, Licheng and Hong, Liang and Xiao, Jin and King, Irwin and others},
journal={arXiv preprint arXiv:2204.00300},
year={2022}
}
```
</details>
<details><summary>Table of contents</summary>
- [Setup Environment](#Setup_Environment)
- [Pre-trained Models](#Available_Pretrained_Models)
- [Usage](#usage)
- [RNA-FM Embedding Generation](#RNA-FM_Embedding_Generation)
- [RNA Secondary Structure Prediction](#RNA_Secondary_Structure_Prediction)
- [Server](#Server)
- [Quick Start](#Quick_Start)
- [Citations](#citations)
- [License](#license)
</details>
## Create Environment with Conda <a name="Setup_Environment"></a>
First, download the repository and create the environment.
```
git clone https://github.com/mydkzgj/RNA-FM.git
cd ./RNA-FM
conda env create -f environment.yml
```
Then, activate the "RNA-FM" environment and enter into the workspace.
```
conda activate RNA-FM
cd ./redevelop
```
## Access pre-trained models. <a name="Available_Pretrained_Models"></a>
Download pre-trained models from [this gdrive link](https://drive.google.com/drive/folders/1VGye74GnNXbUMKx6QYYectZrY7G2pQ_J?usp=share_link) and place the pth files into the `pretrained` folder.
## Apply RNA-FM with Existing Scripts. <a name="Usage"></a>
### 1. Embedding Extraction. <a name="RNA-FM_Embedding_Generation"></a>
```
python launch/predict.py --config="pretrained/extract_embedding.yml" \
--data_path="./data/examples/example.fasta" --save_dir="./resuts" \
--save_frequency 1 --save_embeddings
```
RNA-FM embeddings with shape of (L,640) will be saved in the `$save_dir/representations`.
### 2. Downstream Prediction - RNA secondary structure. <a name="RNA_Secondary_Structure_Prediction"></a>
```
python launch/predict.py --config="pretrained/ss_prediction.yml" \
--data_path="./data/examples/example.fasta" --save_dir="./resuts" \
--save_frequency 1
```
The predicted probability maps will be saved in form of `.npy` files, and the post-processed binary predictions will be saved in form of `.ct` files. You can find them in the `$save_dir/r-ss`.
### 3. Online Version - RNA-FM server. <a name="Server"></a>
If you have any trouble with the deployment of the local version of RNA-FM, you can access its online version from this link, [RNA-FM server](https://proj.cse.cuhk.edu.hk/rnafm/#/).
You can easily submit jobs on the server and download results from it afterwards, without setting up environment and occupying any computational resources.
## Quick Start for Further Development. <a name="Quick_Start"></a>
PyTorch is the prerequisite package which you must have installed to use this repository.
You can install `rna-fm` in your own environment with the following pip command if you just want to
use the pre-trained language model.
you can either install rna-fm from PIPY:
```
pip install rna-fm
```
or install `rna-fm` from github:
```
cd ./RNA-FM
pip install .
```
After installation, you can load the RNA-FM and extract its embeddings with the following code:
```
import torch
import fm
# Load RNA-FM model
model, alphabet = fm.pretrained.rna_fm_t12()
batch_converter = alphabet.get_batch_converter()
model.eval() # disables dropout for deterministic results
# Prepare data
data = [
("RNA1", "GGGUGCGAUCAUACCAGCACUAAUGCCCUCCUGGGAAGUCCUCGUGUUGCACCCCU"),
("RNA2", "GGGUGUCGCUCAGUUGGUAGAGUGCUUGCCUGGCAUGCAAGAAACCUUGGUUCAAUCCCCAGCACUGCA"),
("RNA3", "CGAUUCNCGUUCCC--CCGCCUCCA"),
]
batch_labels, batch_strs, batch_tokens = batch_converter(data)
# Extract embeddings (on CPU)
with torch.no_grad():
results = model(batch_tokens, repr_layers=[12])
token_embeddings = results["representations"][12]
```
More tutorials can be found from [https://ml4bio.github.io/RNA-FM/](https://ml4bio.github.io/RNA-FM/). The related notebooks are stored in the `tutorials` folder.
## Citations <a name="citations"></a>
If you find the models useful in your research, we ask that you cite the relevant paper:
For RNA-FM:
```bibtex
@article{chen2022interpretable,
title={Interpretable rna foundation model from unannotated data for highly accurate rna structure and function predictions},
author={Chen, Jiayang and Hu, Zhihang and Sun, Siqi and Tan, Qingxiong and Wang, Yixuan and Yu, Qinze and Zong, Licheng and Hong, Liang and Xiao, Jin and King, Irwin and others},
journal={arXiv preprint arXiv:2204.00300},
year={2022}
}
```
The model of this code builds on the [esm](https://github.com/facebookresearch/esm) sequence modeling framework.
And we use [fairseq](https://github.com/pytorch/fairseq) sequence modeling framework to train our RNA language modeling.
We very appreciate these two excellent works!
## License <a name="license"></a>
This source code is licensed under the MIT license found in the `LICENSE` file
in the root directory of this source tree.
| /rna-fm-0.1.2.tar.gz/rna-fm-0.1.2/README.md | 0.71423 | 0.974043 | README.md | pypi |
import os
from typing import Sequence, Tuple, List, Union
import pickle
import re
import shutil
import torch
from pathlib import Path
from .constants import proteinseq_toks, rnaseq_toks
RawMSA = Sequence[Tuple[str, str]]
class FastaBatchedDataset(object):
def __init__(self, sequence_labels, sequence_strs):
self.sequence_labels = list(sequence_labels)
self.sequence_strs = list(sequence_strs)
@classmethod
def from_file(cls, fasta_file):
sequence_labels, sequence_strs = [], []
cur_seq_label = None
buf = []
def _flush_current_seq():
nonlocal cur_seq_label, buf
if cur_seq_label is None:
return
sequence_labels.append(cur_seq_label)
sequence_strs.append("".join(buf))
cur_seq_label = None
buf = []
with open(fasta_file, "r") as infile:
for line_idx, line in enumerate(infile):
if line.startswith(">"): # label line
_flush_current_seq()
line = line[1:].strip()
if len(line) > 0:
cur_seq_label = line
else:
cur_seq_label = f"seqnum{line_idx:09d}"
else: # sequence line
buf.append(line.strip())
_flush_current_seq()
assert len(set(sequence_labels)) == len(sequence_labels)
return cls(sequence_labels, sequence_strs)
def __len__(self):
return len(self.sequence_labels)
def __getitem__(self, idx):
return self.sequence_labels[idx], self.sequence_strs[idx]
def get_batch_indices(self, toks_per_batch, extra_toks_per_seq=0):
sizes = [(len(s), i) for i, s in enumerate(self.sequence_strs)]
sizes.sort()
batches = []
buf = []
max_len = 0
def _flush_current_buf():
nonlocal max_len, buf
if len(buf) == 0:
return
batches.append(buf)
buf = []
max_len = 0
for sz, i in sizes:
sz += extra_toks_per_seq
if max(sz, max_len) * (len(buf) + 1) > toks_per_batch:
_flush_current_buf()
max_len = max(max_len, sz)
buf.append(i)
_flush_current_buf()
return batches
class Alphabet(object):
def __init__(
self,
standard_toks: Sequence[str],
prepend_toks: Sequence[str] = ("<null_0>", "<pad>", "<eos>", "<unk>"),
append_toks: Sequence[str] = ("<cls>", "<mask>", "<sep>"),
prepend_bos: bool = True,
append_eos: bool = False,
use_msa: bool = False,
):
self.standard_toks = list(standard_toks)
self.prepend_toks = list(prepend_toks)
self.append_toks = list(append_toks)
self.prepend_bos = prepend_bos
self.append_eos = append_eos
self.use_msa = use_msa
self.all_toks = list(self.prepend_toks)
self.all_toks.extend(self.standard_toks)
for i in range((8 - (len(self.all_toks) % 8)) % 8):
self.all_toks.append(f"<null_{i + 1}>")
self.all_toks.extend(self.append_toks)
self.tok_to_idx = {tok: i for i, tok in enumerate(self.all_toks)}
self.unk_idx = self.tok_to_idx["<unk>"]
self.padding_idx = self.get_idx("<pad>")
self.cls_idx = self.get_idx("<cls>")
self.mask_idx = self.get_idx("<mask>")
self.eos_idx = self.get_idx("<eos>")
def __len__(self):
return len(self.all_toks)
def get_idx(self, tok):
return self.tok_to_idx.get(tok, self.unk_idx)
def get_tok(self, ind):
return self.all_toks[ind]
def to_dict(self):
return {"toks": self.toks}
def get_batch_converter(self):
if self.use_msa:
return MSABatchConverter(self)
else:
return BatchConverter(self)
@classmethod
def from_dict(cls, d, **kwargs):
return cls(standard_toks=d["toks"], **kwargs)
@classmethod
def from_architecture(cls, name: str, theme="protein") -> "Alphabet":
if name in ("ESM-1", "protein_bert_base"):
standard_toks = proteinseq_toks["toks"] if theme == "protein" else rnaseq_toks["toks"]
prepend_toks: Tuple[str, ...] = ("<null_0>", "<pad>", "<eos>", "<unk>")
append_toks: Tuple[str, ...] = ("<cls>", "<mask>", "<sep>")
prepend_bos = True
append_eos = False
use_msa = False
elif name in ("ESM-1b", "roberta_large"):
standard_toks = proteinseq_toks["toks"] if theme == "protein" else rnaseq_toks["toks"]
prepend_toks = ("<cls>", "<pad>", "<eos>", "<unk>")
append_toks = ("<mask>",)
prepend_bos = True
append_eos = True
use_msa = False
elif name in ("MSA Transformer", "msa_transformer"):
standard_toks = proteinseq_toks["toks"] if theme == "protein" else rnaseq_toks["toks"]
prepend_toks = ("<cls>", "<pad>", "<eos>", "<unk>")
append_toks = ("<mask>",)
prepend_bos = True
append_eos = False
use_msa = True
else:
raise ValueError("Unknown architecture selected")
return cls(
standard_toks, prepend_toks, append_toks, prepend_bos, append_eos, use_msa
)
class BatchConverter(object):
"""Callable to convert an unprocessed (labels + strings) batch to a
processed (labels + tensor) batch.
"""
def __init__(self, alphabet):
self.alphabet = alphabet
def __call__(self, raw_batch: Sequence[Tuple[str, str]]):
# RoBERTa uses an eos token, while ESM-1 does not.
batch_size = len(raw_batch)
max_len = max(len(seq_str) for _, seq_str in raw_batch)
tokens = torch.empty(
(
batch_size,
max_len
+ int(self.alphabet.prepend_bos)
+ int(self.alphabet.append_eos),
),
dtype=torch.int64,
)
tokens.fill_(self.alphabet.padding_idx)
labels = []
strs = []
for i, (label, seq_str) in enumerate(raw_batch):
labels.append(label)
strs.append(seq_str)
if self.alphabet.prepend_bos:
tokens[i, 0] = self.alphabet.cls_idx
seq = torch.tensor(
[self.alphabet.get_idx(s) for s in seq_str], dtype=torch.int64
)
tokens[
i,
int(self.alphabet.prepend_bos) : len(seq_str)
+ int(self.alphabet.prepend_bos),
] = seq
if self.alphabet.append_eos:
tokens[
i, len(seq_str) + int(self.alphabet.prepend_bos)
] = self.alphabet.eos_idx
return labels, strs, tokens
class MSABatchConverter(BatchConverter):
def __call__(self, inputs: Union[Sequence[RawMSA], RawMSA]):
if isinstance(inputs[0][0], str):
# Input is a single MSA
raw_batch: Sequence[RawMSA] = [inputs] # type: ignore
else:
raw_batch = inputs # type: ignore
batch_size = len(raw_batch)
max_alignments = max(len(msa) for msa in raw_batch)
max_seqlen = max(len(msa[0][1]) for msa in raw_batch)
tokens = torch.empty(
(
batch_size,
max_alignments,
max_seqlen
+ int(self.alphabet.prepend_bos)
+ int(self.alphabet.append_eos),
),
dtype=torch.int64,
)
tokens.fill_(self.alphabet.padding_idx)
labels = []
strs = []
for i, msa in enumerate(raw_batch):
msa_seqlens = set(len(seq) for _, seq in msa)
if not len(msa_seqlens) == 1:
raise RuntimeError(
"Received unaligned sequences for input to MSA, all sequence "
"lengths must be equal."
)
msa_labels, msa_strs, msa_tokens = super().__call__(msa)
labels.append(msa_labels)
strs.append(msa_strs)
tokens[i, : msa_tokens.size(0), : msa_tokens.size(1)] = msa_tokens
return labels, strs, tokens
def read_fasta(
path,
keep_gaps=True,
keep_insertions=True,
to_upper=False,
):
with open(path, "r") as f:
for result in read_alignment_lines(
f, keep_gaps=keep_gaps, keep_insertions=keep_insertions, to_upper=to_upper
):
yield result
def read_alignment_lines(
lines,
keep_gaps=True,
keep_insertions=True,
to_upper=False,
):
seq = desc = None
def parse(s):
if not keep_gaps:
s = re.sub("-", "", s)
if not keep_insertions:
s = re.sub("[a-z]", "", s)
return s.upper() if to_upper else s
for line in lines:
# Line may be empty if seq % file_line_width == 0
if len(line) > 0 and line[0] == ">":
if seq is not None:
yield desc, parse(seq)
desc = line.strip()
seq = ""
else:
assert isinstance(seq, str)
seq += line.strip()
assert isinstance(seq, str) and isinstance(desc, str)
yield desc, parse(seq)
class ESMStructuralSplitDataset(torch.utils.data.Dataset):
"""
Structural Split Dataset as described in section A.10 of the supplement of our paper.
https://doi.org/10.1101/622803
We use the full version of SCOPe 2.07, clustered at 90% sequence identity,
generated on January 23, 2020.
For each SCOPe domain:
- We extract the sequence from the corresponding PDB file
- We extract the 3D coordinates of the Carbon beta atoms, aligning them
to the sequence. We put NaN where Cb atoms are missing.
- From the 3D coordinates, we calculate a pairwise distance map, based
on L2 distance
- We use DSSP to generate secondary structure labels for the corresponding
PDB file. This is also aligned to the sequence. We put - where SSP
labels are missing.
For each SCOPe classification level of family/superfamily/fold (in order of difficulty),
we have split the data into 5 partitions for cross validation. These are provided
in a downloaded splits folder, in the format:
splits/{split_level}/{cv_partition}/{train|valid}.txt
where train is the partition and valid is the concatentation of the remaining 4.
For each SCOPe domain, we provide a pkl dump that contains:
- seq : The domain sequence, stored as an L-length string
- ssp : The secondary structure labels, stored as an L-length string
- dist : The distance map, stored as an LxL numpy array
- coords : The 3D coordinates, stored as an Lx3 numpy array
"""
base_folder = "structural-data"
file_list = [
# url tar filename filename MD5 Hash
(
"https://dl.fbaipublicfiles.com/fair-esm/structural-data/splits.tar.gz",
"splits.tar.gz",
"splits",
"456fe1c7f22c9d3d8dfe9735da52411d",
),
(
"https://dl.fbaipublicfiles.com/fair-esm/structural-data/pkl.tar.gz",
"pkl.tar.gz",
"pkl",
"644ea91e56066c750cd50101d390f5db",
),
]
def __init__(
self,
split_level,
cv_partition,
split,
root_path=os.path.expanduser('~/.cache/torch/data/fm'),
download=False,
):
super().__init__()
assert split in [
'train',
'valid',
], "train_valid must be \'train\' or \'valid\'"
self.root_path = root_path
self.base_path = os.path.join(self.root_path, self.base_folder)
# check if root path has what you need or else download it
if download:
self.download()
self.split_file = os.path.join(
self.base_path, 'splits', split_level, cv_partition, f'{split}.txt'
)
self.pkl_dir = os.path.join(self.base_path, 'pkl')
self.names = []
with open(self.split_file) as f:
self.names = f.read().splitlines()
def __len__(self):
return len(self.names)
def _check_exists(self) -> bool:
for (_, _, filename, _) in self.file_list:
fpath = os.path.join(self.base_path, filename)
if not os.path.exists(fpath) or not os.path.isdir(fpath):
return False
return True
def download(self):
if self._check_exists():
print('Files already downloaded and verified')
return
from torchvision.datasets.utils import download_url
for url, tar_filename, filename, md5_hash in self.file_list:
download_path = os.path.join(self.base_path, tar_filename)
download_url(
url=url, root=self.base_path, filename=tar_filename, md5=md5_hash
)
shutil.unpack_archive(download_path, self.base_path)
def __getitem__(self, idx):
"""
Returns a dict with the following entires
- seq : Str (domain sequence)
- ssp : Str (SSP labels)
- dist : np.array (distance map)
- coords : np.array (3D coordinates)
"""
name = self.names[idx]
pkl_fname = os.path.join(self.pkl_dir, name[1:3], f'{name}.pkl')
with open(pkl_fname, 'rb') as f:
obj = pickle.load(f)
return obj | /rna-fm-0.1.2.tar.gz/rna-fm-0.1.2/fm/data.py | 0.720762 | 0.253405 | data.py | pypi |
import fm
import torch
from argparse import Namespace
import warnings
import urllib
from pathlib import Path
import os
def load_model_and_alphabet(model_name):
if model_name.endswith(".pt"): # treat as filepath
return load_model_and_alphabet_local(model_name)
else:
return load_model_and_alphabet_hub(model_name)
def load_hub_workaround(url, download_name=None):
try:
data = torch.hub.load_state_dict_from_url(url, progress=True, map_location='cpu', file_name=download_name)
except RuntimeError:
# Pytorch version issue - see https://github.com/pytorch/pytorch/issues/43106
if download_name == None:
fn = Path(url).name
else:
fn = download_name
data = torch.load(
f"{torch.hub.get_dir()}/checkpoints/{fn}",
map_location="cpu",
)
return data
def load_regression_hub(model_name):
url = f"https://dl.fbaipublicfiles.com/fair-esm/regression/{model_name}-contact-regression.pt"
regression_data = load_hub_workaround(url)
return regression_data
def load_model_and_alphabet_hub(model_name, theme="protein"):
url = f"https://dl.fbaipublicfiles.com/fair-esm/models/{model_name}.pt"
model_data = load_hub_workaround(url)
regression_data = load_regression_hub(model_name)
return load_model_and_alphabet_core(model_data, regression_data, theme)
def load_model_and_alphabet_local(model_location, theme="protein"):
""" Load from local path. The regression weights need to be co-located """
model_data = torch.load(model_location, map_location='cpu')
try:
regression_location = model_location[:-3] + "-contact-regression.pt"
regression_data = torch.load(regression_location, map_location='cpu')
except FileNotFoundError:
regression_data = None
return load_model_and_alphabet_core(model_data, regression_data, theme)
def load_model_and_alphabet_core(model_data, regression_data=None, theme="protein"):
if regression_data is not None:
model_data["model"].update(regression_data["model"])
alphabet = fm.Alphabet.from_architecture(model_data["args"].arch, theme=theme)
if model_data["args"].arch == 'roberta_large':
# upgrade state dict
pra = lambda s: ''.join(s.split('encoder_')[1:] if 'encoder' in s else s)
prs1 = lambda s: ''.join(s.split('encoder.')[1:] if 'encoder' in s else s)
prs2 = lambda s: ''.join(s.split('sentence_encoder.')[1:] if 'sentence_encoder' in s else s)
model_args = {pra(arg[0]): arg[1] for arg in vars(model_data["args"]).items()}
model_state = {prs1(prs2(arg[0])): arg[1] for arg in model_data["model"].items()}
model_state["embed_tokens.weight"][alphabet.mask_idx].zero_() # For token drop
model_type = fm.RNABertModel
else:
raise ValueError("Unknown architecture selected")
model = model_type(
Namespace(**model_args), alphabet,
)
expected_keys = set(model.state_dict().keys())
found_keys = set(model_state.keys())
if regression_data is None:
expected_missing = {"contact_head.regression.weight", "contact_head.regression.bias"}
error_msgs = []
missing = (expected_keys - found_keys) - expected_missing
if missing:
error_msgs.append(f"Missing key(s) in state_dict: {missing}.")
unexpected = found_keys - expected_keys
if unexpected:
error_msgs.append(f"Unexpected key(s) in state_dict: {unexpected}.")
if error_msgs:
raise RuntimeError("Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)))
if expected_missing - found_keys:
warnings.warn("Regression weights not found, predicting contacts will not produce correct results.")
model.load_state_dict(model_state, strict=regression_data is not None)
return model, alphabet
def rna_fm_t12(model_location=None):
if model_location is not None and os.path.exists(model_location):
# local
return load_model_and_alphabet_local(model_location, theme="rna") # "./pretrained/RNA-FM_pretrained.pth"
else:
return load_rnafm_model_and_alphabet_hub("rna_fm_t12", theme="rna")
def load_rnafm_model_and_alphabet_hub(model_name, theme="rna"):
if model_name == "rna_fm_t12":
url = f"https://proj.cse.cuhk.edu.hk/rnafm/api/download?filename=RNA-FM_pretrained.pth"
model_data = load_hub_workaround(url, download_name="RNA-FM_pretrained.pth")
#url = f"https://proj.cse.cuhk.edu.hk/rnafm/api/download?filename=RNA-FM_SS-ResNet.pth"
#model_data = load_hub_workaround(url, download_name="RNA-FM_SS-ResNet.pth")
regression_data = None
else:
raise Exception("Unknown model name: {}".format(model_name))
return load_model_and_alphabet_core(model_data, regression_data, theme) | /rna-fm-0.1.2.tar.gz/rna-fm-0.1.2/fm/pretrained.py | 0.521227 | 0.282413 | pretrained.py | pypi |
import math
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from .multihead_attention import MultiheadAttention # noqa
from .axial_attention import ColumnSelfAttention, RowSelfAttention
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def symmetrize(x):
"Make layer symmetric in final two dimensions, used for contact prediction."
return x + x.transpose(-1, -2)
def apc(x):
"Perform average product correct, used for contact prediction."
a1 = x.sum(-1, keepdims=True)
a2 = x.sum(-2, keepdims=True)
a12 = x.sum((-1, -2), keepdims=True)
avg = a1 * a2
avg.div_(a12) # in-place to reduce memory
normalized = x - avg
return normalized
class ESM1LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12, affine=True):
"""Construct a layernorm layer in the TF style (eps inside the sqrt)."""
super().__init__()
self.hidden_size = (hidden_size,) if isinstance(hidden_size, int) else tuple(hidden_size)
self.eps = eps
self.affine = bool(affine)
if self.affine:
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
else:
self.weight, self.bias = None, None
def forward(self, x):
dims = tuple(-(i + 1) for i in range(len(self.hidden_size)))
means = x.mean(dims, keepdim=True)
x_zeromean = x - means
variances = x_zeromean.pow(2).mean(dims, keepdim=True)
x = x_zeromean / torch.sqrt(variances + self.eps)
if self.affine:
x = (self.weight * x) + self.bias
return x
try:
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
class ESM1bLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
from torch.nn import LayerNorm as ESM1bLayerNorm
class TransformerLayer(nn.Module):
"""Transformer layer block."""
def __init__(self, embed_dim, ffn_embed_dim, attention_heads, add_bias_kv=True, use_esm1b_layer_norm=False):
super().__init__()
self.embed_dim = embed_dim
self.ffn_embed_dim = ffn_embed_dim
self.attention_heads = attention_heads
self._init_submodules(add_bias_kv, use_esm1b_layer_norm)
def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm):
BertLayerNorm = ESM1bLayerNorm if use_esm1b_layer_norm else ESM1LayerNorm
self.self_attn = MultiheadAttention(
self.embed_dim, self.attention_heads, add_bias_kv=add_bias_kv, add_zero_attn=False,
)
self.self_attn_layer_norm = BertLayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim)
self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim)
self.final_layer_norm = BertLayerNorm(self.embed_dim)
def forward(self, x, self_attn_mask=None, self_attn_padding_mask=None, need_head_weights=False):
residual = x
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=True,
need_head_weights=need_head_weights,
attn_mask=self_attn_mask,
)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = gelu(self.fc1(x))
x = self.fc2(x)
x = residual + x
return x, attn
class AxialTransformerLayer(nn.Module):
""" Implements an Axial MSA Transformer block.
"""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
max_tokens_per_msa: int = 2 ** 14,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout_prob = dropout
row_self_attention = RowSelfAttention(
embedding_dim,
num_attention_heads,
dropout=dropout,
max_tokens_per_msa=max_tokens_per_msa,
)
column_self_attention = ColumnSelfAttention(
embedding_dim,
num_attention_heads,
dropout=dropout,
max_tokens_per_msa=max_tokens_per_msa,
)
feed_forward_layer = FeedForwardNetwork(
embedding_dim,
ffn_embedding_dim,
activation_dropout=activation_dropout,
max_tokens_per_msa=max_tokens_per_msa,
)
self.row_self_attention = self.build_residual(row_self_attention)
self.column_self_attention = self.build_residual(column_self_attention)
self.feed_forward_layer = self.build_residual(feed_forward_layer)
def build_residual(self, layer: nn.Module):
return NormalizedResidualBlock(
layer,
self.embedding_dim,
self.dropout_prob,
)
def forward(
self,
x: torch.Tensor,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_head_weights: bool = False,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer implementation.
"""
x, row_attn = self.row_self_attention(
x,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
)
x, column_attn = self.column_self_attention(
x,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
)
x = self.feed_forward_layer(x)
if need_head_weights:
return x, column_attn, row_attn
else:
return x
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
if padding_idx is not None:
num_embeddings_ = num_embeddings + padding_idx + 1
else:
num_embeddings_ = num_embeddings
super().__init__(num_embeddings_, embedding_dim, padding_idx)
self.max_positions = num_embeddings
def forward(self, input: torch.Tensor):
"""Input is expected to be of size [bsz x seqlen]."""
mask = input.ne(self.padding_idx).int()
positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + self.padding_idx
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
class SinusoidalPositionalEmbedding(nn.Module):
def __init__(self, embed_dim, padding_idx, learned=False):
super().__init__()
self.embed_dim = embed_dim
self.padding_idx = padding_idx
self.register_buffer("_float_tensor", torch.FloatTensor(1))
self.weights = None
def forward(self, x):
bsz, seq_len = x.shape
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
self.weights = self.get_embedding(max_pos)
self.weights = self.weights.type_as(self._float_tensor)
positions = self.make_positions(x)
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def make_positions(self, x):
mask = x.ne(self.padding_idx)
range_buf = torch.arange(x.size(1), device=x.device).expand_as(x) + self.padding_idx + 1
positions = range_buf.expand_as(x)
return positions * mask.long() + self.padding_idx * (1 - mask.long())
def get_embedding(self, num_embeddings):
half_dim = self.embed_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if self.embed_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if self.padding_idx is not None:
emb[self.padding_idx, :] = 0
return emb
# CJY at 2021.10.20 add masked_tokens for lm_head
class RobertaLMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, weight):
super().__init__()
self.dense = nn.Linear(embed_dim, embed_dim)
self.layer_norm = ESM1bLayerNorm(embed_dim)
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, features, masked_tokens=None):
# only project the masked tokens while training, saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = F.linear(x, self.weight) + self.bias
return x
class ContactPredictionHead(nn.Module):
"""Performs symmetrization, apc, and computes a logistic regression on the output features"""
def __init__(
self,
in_features: int,
prepend_bos: bool,
append_eos: bool,
bias=True,
eos_idx: Optional[int] = None,
):
super().__init__()
self.in_features = in_features
self.prepend_bos = prepend_bos
self.append_eos = append_eos
if append_eos and eos_idx is None:
raise ValueError(
"Using an alphabet with eos token, but no eos token was passed in."
)
self.eos_idx = eos_idx
self.regression = nn.Linear(in_features, 1, bias)
self.activation = nn.Sigmoid()
def forward(self, tokens, attentions):
# remove eos token attentions
if self.append_eos:
eos_mask = tokens.ne(self.eos_idx).to(attentions)
eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2)
attentions = attentions * eos_mask[:, None, None, :, :]
attentions = attentions[..., :-1, :-1]
# remove cls token attentions
if self.prepend_bos:
attentions = attentions[..., 1:, 1:]
batch_size, layers, heads, seqlen, _ = attentions.size()
attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen)
# features: B x C x T x T
attentions = attentions.to(next(self.parameters())) # attentions always float32, may need to convert to float16
attentions = apc(symmetrize(attentions))
attentions = attentions.permute(0, 2, 3, 1)
return self.activation(self.regression(attentions).squeeze(3))
class NormalizedResidualBlock(nn.Module):
def __init__(
self,
layer: nn.Module,
embedding_dim: int,
dropout: float = 0.1,
):
super().__init__()
self.embedding_dim = embedding_dim
self.layer = layer
self.dropout_module = nn.Dropout(
dropout,
)
self.layer_norm = ESM1bLayerNorm(self.embedding_dim)
def forward(self, x, *args, **kwargs):
residual = x
x = self.layer_norm(x)
outputs = self.layer(x, *args, **kwargs)
if isinstance(outputs, tuple):
x, *out = outputs
else:
x = outputs
out = None
x = self.dropout_module(x)
x = residual + x
if out is not None:
return (x,) + tuple(out)
else:
return x
class FeedForwardNetwork(nn.Module):
def __init__(
self,
embedding_dim: int,
ffn_embedding_dim: int,
activation_dropout: float = 0.1,
max_tokens_per_msa: int = 2 ** 14,
):
super().__init__()
self.embedding_dim = embedding_dim
self.ffn_embedding_dim = ffn_embedding_dim
self.max_tokens_per_msa = max_tokens_per_msa
self.activation_fn = nn.GELU()
self.activation_dropout_module = nn.Dropout(
activation_dropout,
)
self.fc1 = nn.Linear(embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, embedding_dim)
def forward(self, x):
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
return x | /rna-fm-0.1.2.tar.gz/rna-fm-0.1.2/fm/modules.py | 0.935715 | 0.536556 | modules.py | pypi |
from torch import nn
import torch
from functools import wraps
class DownStreamModule(nn.Module):
"""
base contact predictor for msa
"""
def __init__(self, backbone_args, backbone_alphabet, depth_reduction="none",
need_token=False, need_attention=[], need_embedding=[12], need_extrafeat=[]):
super().__init__()
self.backbone_args = backbone_args
self.backbone_alphabet = backbone_alphabet
self.prepend_bos = self.backbone_alphabet.prepend_bos
self.append_eos = self.backbone_alphabet.append_eos
self.bos_idx = self.backbone_alphabet.cls_idx
self.eos_idx = self.backbone_alphabet.eos_idx
if self.append_eos and self.eos_idx is None:
raise ValueError("Using an alphabet with eos token, but no eos token was passed in.")
self.pad_idx = self.backbone_alphabet.padding_idx
self.embed_dim = self.backbone_args.embed_dim
self.attention_heads = self.backbone_args.attention_heads
self.depth_reduction = depth_reduction
if self.depth_reduction == "attention":
self.msa_embed_dim_in = self.embed_dim
self.msa_embed_dim_out = self.embed_dim // self.attention_heads
self.msa_q_proj = nn.Linear(self.msa_embed_dim_in, self.msa_embed_dim_out)
self.msa_k_proj = nn.Linear(self.msa_embed_dim_in, self.msa_embed_dim_out)
self.input_type = {
"token": need_token,
"attention": need_attention,
"embedding": need_embedding,
"extra-feat": need_extrafeat,
}
def remove_pend_tokens_1d(self, tokens, seqs):
"""
:param tokens:
:param seqs: must be shape of [B, ..., L, E] # seq: [B, L, E]; msa: [B, D, L, E]
:return:
"""
padding_masks = tokens.ne(self.pad_idx)
# remove eos token (suffix first)
if self.append_eos: # default is right
eos_masks = tokens.ne(self.eos_idx)
eos_pad_masks = (eos_masks & padding_masks).to(seqs)
seqs = seqs * eos_pad_masks.unsqueeze(-1)
seqs = seqs[:, ..., :-1, :]
padding_masks = padding_masks[:, ..., :-1]
# remove bos token
if self.prepend_bos: # default is left
seqs = seqs[:, ..., 1:, :]
padding_masks = padding_masks[:, ..., 1:]
if not padding_masks.any():
padding_masks = None
return seqs, padding_masks
def remove_pend_tokens_2d(self, tokens, maps):
"""
:param tokens:
:param maps: shape of [B, L, L, ...]
:return:
"""
padding_masks = tokens.ne(self.pad_idx)
# remove eos token (suffix first)
if self.append_eos: # default is right
eos_masks = tokens.ne(self.eos_idx)
eos_pad_masks = (eos_masks & padding_masks).to(maps)
eos_pad_masks = eos_pad_masks.unsqueeze(1) * eos_pad_masks.unsqueeze(2)
maps = maps * eos_pad_masks.unsqueeze(-1)
maps = maps[:, :-1, :-1, ...]
padding_masks = padding_masks[:, :-1, ...]
# remove bos token
if self.prepend_bos: # default is left
maps = maps[:, 1:, 1:, ...]
padding_masks = padding_masks[:, 1:, ...]
if not padding_masks.any():
padding_masks = None
return maps, padding_masks
def msa_depth_reduction(self, embeddings, padding_masks):
"""
:param embeddings: B,
:param padding_masks:
:return:
"""
if self.depth_reduction == "first":
embeddings = embeddings[:, 0, :, :]
elif self.depth_reduction == "mean":
embeddings = torch.mean(embeddings, dim=1)
elif self.depth_reduction == "attention":
msa_q = self.msa_q_proj(embeddings[:, 0, :, :]) # first query
msa_k = self.msa_k_proj(embeddings) # all keys
if padding_masks is not None:
# Zero out any padded aligned positions - this is important since
# we take a sum across the alignment axis.
msa_q = msa_q * (1 - padding_masks[:, 0, :].unsqueeze(-1).type_as(msa_q))
depth_attn_weights = torch.einsum("bld,bjld->bj", msa_q, msa_k)
depth_attn_weights = torch.softmax(depth_attn_weights, dim=1)
embeddings = torch.sum(embeddings * depth_attn_weights.unsqueeze(-1).unsqueeze(-1), dim=1)
else:
raise Exception("Wrong Depth Reduction Type")
return embeddings | /rna-fm-0.1.2.tar.gz/rna-fm-0.1.2/fm/downstream/downstream_module.py | 0.80784 | 0.294564 | downstream_module.py | pypi |
from typing import Union
from numpy import arange, argmax, delete, einsum, log2, ndarray, std, sum, unique
from pandas import DataFrame, Series
def _m_numpy(gene_expression: ndarray) -> ndarray:
"""Internal control gene-stability measure `M`.
Computes Eq. (4) in Ref. [1].
[1]: Vandesompele, Jo, et al. "Accurate normalization of real-time quantitative
RT-PCR data by geometric averaging of multiple internal control genes." Genome
biology 3.7 (2002): 1-12.
"""
if not (gene_expression > 0).all():
raise ValueError(
'Expression domain error: not all expression data are strictly positive!'
)
a = gene_expression
# Eq. (2): A_{jk}^{(i)} = log_2 (a_{ij} / a_{ik})
A = log2(einsum('ij,ik->ijk', a, 1/a))
# Eq. (3)
V = std(A, axis=0)
# Eq. (4) N.B., Since V_{j=k} is zero, we can simply ignore it since it does not
# contribute to calculation.
n = V.shape[1]
return sum(V, axis=1) / (n-1)
def m_measure(gene_expression: Union[ndarray, DataFrame]) -> Union[ndarray, Series]:
"""Internal control gene-stability measure `M` as described in Ref. [1].
[1]: Vandesompele, Jo, et al. "Accurate normalization of real-time quantitative
RT-PCR data by geometric averaging of multiple internal control genes." Genome
biology 3.7 (2002): 1-12.
Args:
gene_expression: Gene expression counts of `m` samples (rows) and `n` internal
control genes (columns). Expression must be strictly positive.
Raises:
ValueError: Expression not strictly positive.
"""
if isinstance(gene_expression, DataFrame):
m_values = _m_numpy(gene_expression.to_numpy())
return Series(m_values, index=gene_expression.columns)
return _m_numpy(gene_expression)
def _genorm_numpy(gene_expression: ndarray, n_stop: int = 20, verbose: bool = False
) -> tuple[list, ndarray]:
"""Backward elimination of genes by M value.
Returns:
First element are the selected genes, and the second are the corresponding `M`
values.
Raises:
ValueError: Expression not strictly positive, or `n_stop` equal or larger than
number of genes (columns).
"""
if not (gene_expression > 0).all():
raise ValueError(
'Expression domain error: not all expression data are strictly positive!'
)
elif gene_expression.shape[1] <= n_stop:
raise ValueError('Nothing to select, since `n_stop` >= number of genes.')
n_genes = gene_expression.shape[1]
eliminated: list[int] = []
for _ in range(n_genes - n_stop):
# Index to map the index of `expression_subset` back to `gene_expression`.
subset_index = delete(arange(n_genes), eliminated)
expression_subset = delete(gene_expression, eliminated, axis=1)
m_values = _m_numpy(expression_subset)
idx: int = subset_index[argmax(m_values)]
# Print . indicating progress.
if verbose:
print('.', end='')
eliminated.append(idx)
# Check that all eliminated values are unique.
assert len(unique(eliminated)) == len(eliminated)
final_subset = delete(gene_expression, eliminated, axis=1)
selected = [i for i in range(n_genes) if i not in eliminated]
# Print end of line.
if verbose:
print('')
return selected, _m_numpy(final_subset)
def genorm(gene_expression: Union[ndarray, DataFrame], n_stop: int = 20, verbose: bool = False,
) -> tuple[list, Union[ndarray, Series]]:
"""geNorm performs recursive backward selection of genes with low `M`-value.
Args:
gene_expression: Gene expression counts of `m` samples (rows) and `n` internal
control genes (columns). Expression must be strictly positive.
n_stop: Stopping criterion: stop after selecting this many genes.
Returns:
First element are the selected genes, and the second are the corresponding `M`
values.
Raises:
ValueError: Expression not strictly positive.
"""
if isinstance(gene_expression, DataFrame):
selected_idx, m_values = _genorm_numpy(gene_expression.to_numpy(), n_stop, verbose)
selected_genes = gene_expression.columns[selected_idx]
m_series = Series(m_values, index=selected_genes)
return selected_genes.tolist(), m_series
return _genorm_numpy(gene_expression, n_stop, verbose) | /rna_genorm-0.1.0-py3-none-any.whl/genorm/algorithms.py | 0.957108 | 0.730386 | algorithms.py | pypi |
import yaml
import json
import jsonschema
from jsonschema import Draft4Validator, validators
from pathlib import Path
from dataclasses import dataclass
from rna_map import settings, logger
from rna_map.settings import get_py_path
log = logger.get_logger("PARAMETERS")
@dataclass(frozen=True, order=True)
class Inputs:
"""
Input parameters
"""
fasta: str
fastq1: str
fastq2: str = ""
csv: str = ""
def is_paired(self):
"""
Check if the input is paired i.e. has both R1 and R2
"""
if self.fastq2 != "":
return True
return False
def supplied_csv(self):
"""
Check if the user supplied a csv file
"""
if self.csv != "":
return True
return False
def fastq1_name(self):
"""
Get the name of the fastq1 file
"""
return Path(self.fastq1).stem
def fastq2_name(self):
"""
Get the name of the fastq2 file
"""
return Path(self.fastq2).stem
def extend_with_default(validator_class):
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
for property_, subschema in properties.items():
if "default" in subschema and not isinstance(instance, list):
instance.setdefault(property_, subschema["default"])
for error in validate_properties(
validator,
properties,
instance,
schema,
):
yield error
return validators.extend(
validator_class,
{"properties": set_defaults},
)
def validate_parameters(params):
path = get_py_path() + "/resources/params_schema.json"
with open(path) as f:
schema = json.load(f)
# Validate the params against the schema
FillDefaultValidatingDraft4Validator = extend_with_default(Draft4Validator)
try:
FillDefaultValidatingDraft4Validator(schema).validate(params)
except jsonschema.exceptions.ValidationError as e:
raise ValueError(e.message)
def parse_parameters_from_file(param_file):
"""
Parse a YAML file and validate from a schema file loaded from json
"""
# load param_file and validate against schema
with open(param_file) as f:
params = yaml.safe_load(f)
if params is None:
params = {}
validate_parameters(params)
return params
def get_default_params():
"""
Get the default parameters
"""
path = get_py_path() + "/resources/default.yml"
return parse_parameters_from_file(path)
# abbreviations
# tg -> trim galore
# bt2 -> bowtie 2 | /rna_map-0.3.0-py3-none-any.whl/rna_map/parameters.py | 0.539711 | 0.198783 | parameters.py | pypi |
import yaml
import cloup
from cloup import option_group, option
from rna_map.logger import get_logger
log = get_logger('CLI_OPTS')
def main_options():
return option_group(
"Main arguments",
"These are the main arguments for the command line interface",
option(
"-fa",
"--fasta",
type=cloup.Path(exists=True),
required=True,
help="The fasta file containing the reference sequences",
),
option(
"-fq1",
"--fastq1",
type=cloup.Path(exists=True),
required=True,
help="The fastq file containing the single end reads or the first pair of paired end reads",
),
option(
"-fq2",
"--fastq2",
type=str,
default="",
help="The fastq file containing the second pair of paired end reads",
),
option(
"--dot-bracket",
type=str,
default="",
help="The directory containing the input files",
),
option(
"-pf",
"--param-file",
type=str,
default=None,
help="A yml formatted file to specify parameters, see rna_map/resources/default.yml for an example",
),
option(
"-pp",
"--param-preset",
type=str,
default=None,
help="run a set of parameters for specific uses like 'barcoded-libraries'",
),
)
def docker_options():
return option_group(
"Docker options",
"These are the options for running the command line interface in a docker container",
option(
"--docker",
is_flag=True,
help="Run the program in a docker container",
),
option(
"--docker-image",
type=str,
default="rna-map",
help="The docker image to use",
),
option(
"--docker-platform",
type=str,
default="",
help="The platform to use for the docker image",
),
)
def mapping_options():
return option_group(
"Mapping options",
"These are the options for pre processing of fastq files and alignment to reference sequences",
option(
"--skip-fastqc",
is_flag=True,
help="do not run fastqc for quality control of sequence data",
),
option(
"--skip-trim-galore",
is_flag=True,
help="do not run trim galore for quality control of sequence data",
),
option(
"--tg-q-cutoff",
type=int,
default=20,
help="the quality cutoff for trim galore",
),
option(
"--bt2-alignment-args",
help="the arguments to pass to bowtie2 for alignment seperated by commas",
),
option(
"--save-unaligned",
is_flag=True,
help="the path to save unaligned reads to",
),
)
def bit_vector_options():
return option_group(
"Bit vector options",
"These are the options for the bit vector step",
option(
"--skip-bit-vector",
is_flag=True,
help="do not run the bit vector step",
),
option(
"--summary-output-only",
is_flag=True,
help="do not generate bit vector files or plots recommended when there are thousands of reference sequences",
),
option(
"--plot-sequence",
is_flag=True,
help="plot sequence and structure is supplied under the population average plots",
),
option(
"--map-score-cutoff",
type=int,
default=15,
help="reject any bit vector where the mapping score for bowtie2 alignment is less than this value",
),
option(
"--qscore-cutoff",
type=int,
default=25,
help="quality score of read nucleotide, sets to ambigious if under this val",
),
option(
"--mutation-count-cutoff",
type=int,
default=5,
help="maximum number of mutations allowed in a bit vector will be discarded if higher",
),
option(
"--percent-length-cutoff",
type=float,
default=0.1,
help="minium percent of the length of the reference sequence allowed in a bit vector will be discarded if lower",
),
option(
"--min-mut-distance",
type=int,
default=5,
help="minimum distance between mutations in a bit vector will be discarded if lower",
),
)
def misc_options():
return option_group(
"Misc options",
"These are the options for the misc stage",
option(
"--overwrite",
is_flag=True,
help="overwrite the output directory if it exists",
),
option(
"--restore-org-behavior",
is_flag=True,
help="restore the original behavior of the rna_map",
),
option(
"--stricter-bv-constraints",
is_flag=True,
help="use stricter constraints for bit vector generation, use at your own risk!",
),
option(
"--debug",
is_flag=True,
help="enable debug mode",
),
)
def parse_cli_args(params, args):
# main options
# docker options
# mapping options
if args['skip_fastqc']:
log.info("skipping fastqc for quality control only do this if you are confident in the quality of your data")
params["map"]["skip_fastqc"] = args['skip_fastqc']
if args['skip_trim_galore']:
log.info("skipping trim galore for quality control not recommended")
params["map"]["skip_trim_galore"] = args['skip_trim_galore']
if args['tg_q_cutoff'] != 20:
log.info("trim galore quality cutoff set to {value}".format(value=args['tg_q_cutoff']))
params["map"]["tg_q_cutoff"] = args['tg_q_cutoff']
if args['bt2_alignment_args'] is not None:
log.info("bowtie2 alignment arguments set to {value}".format(value=args['bt2_alignment_args']))
params["map"]["bt2_alignment_args"] = args['bt2_alignment_args']
if args['save_unaligned']:
log.info("saving unaligned reads to {value}".format(value=args['save_unaligned']))
params["map"]["save_unaligned"] = args['save_unaligned']
# bit_vector options
if args['skip_bit_vector']:
log.info("skipping bit vector step")
params["bit_vector"]["skip"] = args['skip_bit_vector']
if args['summary_output_only']:
log.info("only outputting summary files")
params["bit_vector"]["summary_output_only"] = args['summary_output_only']
if args['plot_sequence']:
log.info("plotting sequence/structure on bit vector plots")
params["bit_vector"]["plot_sequence"] = args['plot_sequence']
if args['map_score_cutoff'] != 15:
log.info("mapping score cutoff set to {value}".format(value=args['map_score_cutoff']))
params["bit_vector"]["map_score_cutoff"] = args['map_score_cutoff']
if args['qscore_cutoff'] != 25:
log.info("qscore cutoff set to {value}".format(value=args['qscore_cutoff']))
params["bit_vector"]["qscore_cutoff"] = args['qscore_cutoff']
if args['mutation_count_cutoff'] != 5:
log.info("mutation count cutoff set to {value} this will only run if --stricter-bv-constraints is set".format(value=args['mutation_count_cutoff']))
params["bit_vector"]["stricter_constraints"]["mutation_count_cutoff"] = args['mutation_count_cutoff']
if args['percent_length_cutoff'] != 0.1:
log.info("percent length cutoff set to {value} this will only run if --stricter-bv-constraints is set".format(value=args['percent_length_cutoff']))
params["bit_vector"]["stricter_constraints"]["percent_length_cutoff"] = args['percent_length_cutoff']
if args['min_mut_distance'] != 5:
log.info("minimum mutation distance set to {value} this will only run if --stricter-bv-constraints is set".format(value=args['min_mut_distance']))
params["bit_vector"]["stricter_constraints"]["min_mut_distance"] = args['min_mut_distance']
# misc options
if args['overwrite']:
log.info("will overwrite all existing files")
params["overwrite"] = args['overwrite']
if args['restore_org_behavior']:
log.info("restoring original behavior of rna_map publications")
params["restore_org_behavior"] = args['restore_org_behavior']
if args['stricter_bv_constraints']:
log.info("stricter bit vector constraints are active please use at your own risk")
params["stricter_bv_constraints"] = args['stricter_bv_constraints'] | /rna_map-0.3.0-py3-none-any.whl/rna_map/cli_opts.py | 0.510008 | 0.216136 | cli_opts.py | pypi |
import os
import shutil
import subprocess
from typing import Optional
from pathlib import Path
from dataclasses import dataclass
import pandas as pd
from rna_map.settings import get_py_path
from rna_map.logger import get_logger
from rna_map.exception import DREEMInputException, DREEMExternalProgramException
log = get_logger("EXTERNAL_CMD")
@dataclass(frozen=True, order=True)
class ProgOutput:
"""
Class to store the output of an external program
"""
output: Optional[str]
error: Optional[str]
def does_program_exist(prog_name: str) -> bool:
"""
Check if a program exists
:prog_name: name of the program
"""
if shutil.which(prog_name) is None:
return False
else:
return True
def get_bowtie2_version() -> str:
"""
Get the version of bowtie2
:return: version of bowtie2
"""
if not does_program_exist("bowtie2"):
raise DREEMExternalProgramException(
"cannot get bowtie2 version, cannot find the exe"
)
output = subprocess.check_output("bowtie2 --version", shell=True).decode(
"utf8"
)
lines = output.split("\n")
l_spl = lines[0].split()
return l_spl[-1]
def get_fastqc_version() -> str:
"""
Get the version of fastqc
:return: version of fastqc
"""
if not does_program_exist("fastqc"):
raise DREEMExternalProgramException(
"cannot get fastqc version, cannot find the exe"
)
out = run_command("fastqc --version")
lines = out.output.split("\n")
if len(lines) < 1:
raise ValueError(
"cannot get fastqc version, output is not valid: {}".format(
out.output
)
)
l_spl = lines[0].split()
return l_spl[1]
def get_trim_galore_version():
if not does_program_exist("trim_galore"):
raise DREEMExternalProgramException(
"cannot get trim_galore version, cannot find the exe"
)
output = subprocess.check_output(
"trim_galore --version", shell=True
).decode("utf8")
lines = output.split("\n")
if len(lines) < 4:
raise ValueError(
"cannot get fastqc version, output is not valid: {}".format(output)
)
for l in lines:
if l.find("version") != -1:
l_spl = l.split()
return l_spl[-1]
return ""
def get_cutadapt_version():
if not does_program_exist("cutadapt"):
raise DREEMExternalProgramException(
"cannot get cutadapt version, cannot find the exe"
)
output = subprocess.check_output("cutadapt --version", shell=True).decode(
"utf8"
)
return output.rstrip().lstrip()
def run_command(cmd: str) -> ProgOutput:
"""
Run a command and return the output
:cmd: command to run
"""
output, error_msg = None, None
try:
output = subprocess.check_output(
cmd, shell=True, stderr=subprocess.STDOUT
).decode("utf8")
except subprocess.CalledProcessError as exc:
error_msg = exc.output.decode("utf8")
return ProgOutput(output, error_msg)
def run_named_command(method_name: str, cmd: str) -> ProgOutput:
"""
Run a mapping command and log the output
:method_name: name of the method
:cmd: command to run
:return: output of the command
"""
log.info(f"running {method_name}")
log.debug(cmd)
out = run_command(cmd)
if out.error is not None:
log.error(f"error running command: {method_name}")
raise DREEMExternalProgramException(out.error)
log.info(f"{method_name} ran without errors")
return out
def run_fastqc(fastq1: str, fastq2: str, out_dir: str) -> ProgOutput:
"""
run fastqc appliction on fastq files
:fastq1: path to fastq1 file
:fastq2: path to fastq2 file
:out_dir: path to output directory
"""
fastqc_dir = os.path.join(out_dir, "fastqc")
os.makedirs(fastqc_dir, exist_ok=True)
fastqc_cmd = f"fastqc {fastq1} {fastq2} -o {fastqc_dir}"
return run_named_command("fastqc", fastqc_cmd)
def run_trim_glore(fastq1: str, fastq2: str, out_dir: str) -> ProgOutput:
"""
Run trim glore on fastq files
:fastq1: path to fastq1 file
:fastq2: path to fastq2 file
:out_dir: path to output directory
"""
if fastq2 != "":
cmd = f"trim_galore --fastqc --paired {fastq1} {fastq2} -o {out_dir}"
else:
cmd = f"trim_galore --fastqc {fastq1} -o {out_dir}"
return run_named_command("trim_galore", cmd)
def run_bowtie_build(fasta: str, input_dir: str) -> ProgOutput:
"""
Run bowtie2-build on a fasta file
:fasta: path to fasta file
:input_dir: path to input directory
"""
fasta_name = Path(fasta).stem
cmd = f'bowtie2-build "{fasta}" {input_dir}/{fasta_name}'
return run_named_command("bowtie2-build", cmd)
def validate_bowtie2_args(args: str) -> bool:
"""
Validate the bowtie2 arguments
:args: arguments to validate, seperated by ","
"""
def check_type(arg):
"""
Check the type of an argument
:arg: the argument to check
:return: the type of the argument
"""
if arg.isdigit():
return "int"
try:
float(arg)
return "float"
except ValueError:
return "str"
df = pd.read_csv(get_py_path() + "resources/bowtie2_args.csv")
valid_bt2_args = {}
for _, row in df.iterrows():
valid_bt2_args[row["param"]] = row["vtype"]
if len(args) == 0:
log.warning("no bowtie2 arguments supplied thats probably wrong")
supplied_args = args.strip().split(",")
for full_arg in supplied_args:
if len(full_arg) == 0:
continue
if full_arg in valid_bt2_args:
log.debug(f"{full_arg} is a valid bt2 argument")
continue
spl = full_arg.split()
if len(spl) == 1:
raise DREEMInputException(
f"{full_arg} is not a valid bowtie2 argument. "
f"Please check the documentation for valid arguments"
)
arg, arg_val = spl[0], spl[1]
if arg in valid_bt2_args:
log.debug(f"{arg} is a valid bt2 argument")
else:
raise DREEMInputException(f"{full_arg} is an invalid bt2 argument")
if check_type(arg_val) != valid_bt2_args[arg]:
raise DREEMInputException(
f"{arg} must be of type {valid_bt2_args[arg]}"
)
log.debug("all bt2 arguments are valid")
def run_bowtie_alignment(
fasta: str, fastq1: str, fastq2: str, in_dir: str, out_dir: str, args: str,
**kwargs) -> ProgOutput:
"""
Run bowtie2 alignment
:fasta: path to fasta file
:fastq1: path to fastq1 file
:fastq2: path to fastq2 file
:in_dir: path to bowtie2 index directory
"""
# check to make sure bt2 args are valid
validate_bowtie2_args(args)
bt2_index = in_dir + "/" + Path(fasta).stem
bt2_args = " ".join(args.split(","))
sam_file = out_dir + "/aligned.sam"
cmd = f"bowtie2 {bt2_args} -x {bt2_index} -S {sam_file} "
if fastq2 != "":
cmd += f"-1 {fastq1} -2 {fastq2} "
else:
cmd += f"-U {fastq1}"
if "save_unaligned" in kwargs:
cmd += " --un-conc unaligned.fastq"
out = run_named_command("bowtie2 alignment", cmd)
output_lines = out.output.split("\n")
keep = []
for l in output_lines:
if len(l) == 0:
continue
if l[0] != "U":
keep.append(l)
log.info("results for bowtie alignment: \n" + "\n".join(keep))
return out | /rna_map-0.3.0-py3-none-any.whl/rna_map/external_cmd.py | 0.639736 | 0.181771 | external_cmd.py | pypi |
def tpm(counts, lengths):
"""
Performs TPM normalization on a pandas DataFrame of count data
Args:
counts (pandas.DataFrame): DataFrame containing raw count data for each gene in each sample
lengths (pandas.Series): Series containing gene lengths
Returns:
pandas.DataFrame: DataFrame containing TPM-normalized expression values for each gene in each sample
"""
# Normalize raw read counts by gene length
norm_counts = counts.divide(lengths, axis=0)
# Calculate sum of normalized read counts for each sample
sum_norm_counts = norm_counts.sum(axis=0)
# Calculate TPM values
tpm_df = norm_counts.divide(sum_norm_counts, axis=1) * 1e6
return tpm_df
def cpm_norm(counts):
"""
Performs CPM normalization on a pandas DataFrame of count data
Args:
counts (pandas.DataFrame): DataFrame containing raw count data for each gene in each sample
Returns:
pandas.DataFrame: DataFrame containing CPM-normalized expression values for each gene in each sample
"""
# Compute the total count for each sample
total_counts = counts.sum(axis='rows')
# Divide counts by the total count for each sample, and scale to 1M
cpm_df = counts.divide(total_counts, axis='columns') * 1e6
return cpm_df
def rpkm(raw_data, lengths):
"""
Performs RPKM normalization on a pandas DataFrame of count data
Args:
counts (pandas.DataFrame): DataFrame containing raw count data for each gene in each sample
lengths (pandas.Series): Series containing gene lengths
Returns:
pandas.DataFrame: DataFrame containing RPKM-normalized expression values for each gene in each sample
"""
# Normalized read counts for each sample
norm_counts = raw_data.divide(lengths, axis=0)
# Calculate sum of read counts for each sample
sum_counts = 1 / raw_data.sum(axis=0)
# Multiply length by the sum of read counts
sum_norm_counts = norm_counts.multiply(sum_counts, axis=1)
# Calculate RPKM values
rpkm_df = sum_norm_counts * 1e9
return rpkm_df
def tpm_from_rpkm(rpkm):
"""
Performs TPM normalization on a pandas DataFrame of RPKM-normalized data
Args:
rpkm (pandas.DataFrame): Dataframe containing RPKM-normalized expression values for each gene in each sample
Returns:
pandas.DataFrame: DataFrame containing TPM-normalized expression values for each gene in each sample
"""
# Sum of normalized values for each sample
sum_counts = rpkm.sum(axis=0)
# Calculate TPM values
tpm_df = rpkm.divide(sum_counts) * 1e6
return tpm_df | /rna_seq_normalization-0.3.0-py3-none-any.whl/rna_seq_normalization/Normalization.py | 0.958895 | 0.931275 | Normalization.py | pypi |
import pandas as pd
import numpy as np
import editdistance
import vienna
from seq_tools import sequence, extinction_coeff
def add(df: pd.DataFrame, p5_seq: str, p3_seq: str) -> pd.DataFrame:
"""
adds a 5' and 3' sequence to the sequences in the dataframe
:param df: dataframe
:param p5_seq: 5' sequence
:param p3_seq: 3' sequence
:return: None
"""
df = df.copy()
df["sequence"] = df["sequence"].apply(lambda x: p5_seq + x + p3_seq)
if "structure" in df.columns:
df = fold(df)
return df
def calc_edit_distance(df: pd.DataFrame) -> float:
"""
calculates the edit distance between each sequence in the dataframe
:param df: dataframe
:return: the edit distance
"""
if len(df) == 1:
return 0
scores = [100 for _ in range(len(df))]
sequences = list(df["sequence"])
for i, seq1 in enumerate(sequences):
for j, seq2 in enumerate(sequences):
if i >= j:
continue
diff = editdistance.eval(seq1, seq2)
if scores[i] > diff:
scores[i] = diff
if scores[j] > diff:
scores[j] = diff
avg = np.mean(scores)
return avg
def determine_ntype(df: pd.DataFrame) -> str:
"""
determines the nucleotide type of the sequences in the dataframe
:param df: dataframe
:return: nucleotide type, RNA or DNA
"""
results = []
for _, row in df.iterrows():
ntype = "UNCERTAIN"
if row["sequence"].count("T") > 0:
ntype = "DNA"
elif row["sequence"].count("U") > 0:
ntype = "RNA"
results.append(ntype)
if df["sequence"].str.len().mean() > 10:
if results.count("DNA") > 0 and results.count("RNA") > 0:
raise ValueError("Cannot determine nucleotide type")
if results.count("RNA") > 0:
return "RNA"
return "DNA"
def fold(df: pd.DataFrame) -> pd.DataFrame:
"""
folds each sequence in the dataframe
:param df: dataframe
"""
def _fold(seq):
v_res = vienna.fold(seq)
return pd.Series(
[v_res.dot_bracket, v_res.mfe, v_res.ens_defect],
index=["structure", "mfe", "ens_defect"],
)
df = df.copy()
df[["structure", "mfe", "ens_defect"]] = df["sequence"].apply(_fold)
return df
def get_extinction_coeff(
df: pd.DataFrame, ntype: str, double_stranded: bool
) -> pd.DataFrame:
"""
calculates the extinction coefficient for each sequence in the dataframe
:param df: dataframe
:param ntype: nucleotide type, RNA or DNA
:param double_stranded: is double stranded?
:return: None
"""
def compute_w_struct(row) -> float:
"""
computes the extinction coefficient for a sequence with a structure
:param row: dataframe row
:return: extinction coefficient
"""
return extinction_coeff.get_extinction_coeff(
row["sequence"], ntype, double_stranded, row["structure"]
)
df = df.copy()
if ntype == "RNA" and "structure" in df.columns:
df["extinction_coeff"] = df.apply(compute_w_struct, axis=1)
else:
df["extinction_coeff"] = df["sequence"].apply(
lambda x: extinction_coeff.get_extinction_coeff(x, ntype, double_stranded)
)
return df
def get_length(df: pd.DataFrame) -> pd.DataFrame:
"""
calculates the length of each sequence in the dataframe
:param df: dataframe
:return: None
"""
df = df.copy()
df["length"] = df["sequence"].apply(len)
return df
def get_molecular_weight(
df: pd.DataFrame, ntype: str, double_stranded: bool
) -> pd.DataFrame:
"""
:param df: pandas data frame
:param ntype: nucleotide type, RNA or DNA
:param double_stranded: is double stranded?
:return: None
"""
df = df.copy()
df["mw"] = df["sequence"].apply(
lambda x: sequence.get_molecular_weight(x, ntype, double_stranded)
)
return df
def get_default_names(df: pd.DataFrame) -> pd.DataFrame:
"""
Adds names to dataframe, if not already present
:param df: dataframe
:return: dataframe with names
"""
if "name" in df.columns:
raise ValueError("Dataframe already has names")
# add `name` column to dataframe in the form of `seq_1`, `seq_2`, etc.
df = df.copy()
df["name"] = df.index.map(lambda x: "seq_" + str(x))
return df
def get_reverse_complement(df: pd.DataFrame, ntype: str) -> pd.DataFrame:
"""
reverse complements each sequence in the dataframe
:param df: dataframe
:param ntype: nucleotide type, RNA or DNA
:return: stores reverse complement in dataframe rev_comp column
"""
df = df.copy()
df["rev_comp"] = df["sequence"].apply(
lambda x: sequence.get_reverse_complement(x, ntype)
)
return df
def has_5p_sequence(df: pd.DataFrame, p5_seq: str) -> bool:
"""
checks to see if p5_seq is present in the 5' end of the sequence
:param df: dataframe
:return: True if 5' sequence is present, False otherwise
"""
return df["sequence"].str.startswith(p5_seq).all()
def has_3p_sequence(df: pd.DataFrame, p3_seq: str) -> bool:
"""
checks to see if p5_seq is present in the 3' end of the sequence
:param df: dataframe
:return: True if 3' sequence is present, False otherwise
"""
return df["sequence"].str.endswith(p3_seq).all()
def has_sequence(df: pd.DataFrame, seq: str) -> bool:
"""
checks to see if seq is present in the sequence
:param df: dataframe
:return: True if sequence is present, False otherwise
"""
return df["sequence"].str.contains(seq).all()
def has_t7_promoter(df: pd.DataFrame) -> bool:
"""
checks if each sequence in the dataframe has a T7 promoter
:param df: dataframe
:return: None
"""
has_t7 = df[df["sequence"].str.startswith("TTCTAATACGACTCACTATA")]
if len(has_t7) != len(df):
return False
return True
def to_dna(df: pd.DataFrame) -> pd.DataFrame:
"""
converts each sequence in dataframe to DNA
:return: None
"""
df = df.copy()
df["sequence"] = df["sequence"].apply(sequence.to_dna)
if "structure" in df.columns:
df = df.drop(columns=["structure"])
return df
def to_dna_template(df: pd.DataFrame) -> pd.DataFrame:
"""
converts each sequence in dataframe to DNA
:return: None
"""
df = df.copy()
df["sequence"] = df["sequence"].apply(sequence.to_dna_template)
if "structure" in df.columns:
df = df.drop(columns=["structure"])
return df
def to_fasta(df: pd.DataFrame, filename: str) -> None:
"""
writes the sequences in the dataframe to a fasta file
:param df: dataframe
:param filename: fasta file path
:return: None
"""
with open(filename, "w", encoding="utf-8") as f:
for _, row in df.iterrows():
f.write(f">{row['name']}\n")
f.write(f"{row['sequence']}\n")
def to_opool(df: pd.DataFrame, name: str, filename: str) -> None:
"""
writes the sequences in the dataframe to an opool file
:param df: dataframe
:param name: opool name
:param filename: opool file path
:return: None
"""
df = df.copy()
df["name"] = name
df = df[["name", "sequence"]]
df.to_xlsx(filename, index=False)
def to_rna(df: pd.DataFrame) -> pd.DataFrame:
"""
converts each sequence in dataframe to DNA
:return: None
"""
df = df.copy()
df["sequence"] = df["sequence"].apply(sequence.to_rna)
return df
def trim(df, p5_length, p3_length) -> pd.DataFrame:
"""
takes a data frame and trims the sequences. If there is a structure
it will also trim the structure
:param df: dataframe
:param p5_length: length to trim from 5'
:param p3_length: length to trim from 3'
:return: None
"""
df = df.copy()
# trim `sequence` column and `structure` column
p3_length = -p3_length
if p5_length == 0:
p5_length = None
if p3_length == 0:
p3_length = None
df["sequence"] = df["sequence"].str.slice(p5_length, p3_length)
if "structure" in df.columns:
df["structure"] = df["structure"].str.slice(p5_length, p3_length)
return df
def transcribe(df: pd.DataFrame, ignore_missing_t7=False) -> pd.DataFrame:
"""
transcribes each sequence in the dataframe (DNA -> RNA) removes t7 promoter
:param df: dataframe with DNA template sequences
:param ignore_missing_t7: ignore sequences that don't have a T7 promoter
:return: dataframe with RNA sequences
"""
if not has_t7_promoter(df) and not ignore_missing_t7:
raise ValueError("not all sequences start with T7 promoter")
if not ignore_missing_t7:
df = trim(df, 20, 0)
df = to_rna(df)
df = fold(df)
return df | /rna_seq_tools-0.7.1.tar.gz/rna_seq_tools-0.7.1/seq_tools/dataframe.py | 0.760562 | 0.563498 | dataframe.py | pypi |
from seq_tools import dot_bracket, sequence
def get_extinction_coeff(seq, ntype, double_stranded=False, structure=None):
"""
get the extinction coefficient for a sequence
:param seq: sequence
:param ntype: DNA or RNA
:param double_stranded: is double stranded?
:param structure: structure of the sequence in dot bracket notation
:return: float
"""
dna_di = {
"AA": 27400,
"AC": 21200,
"AG": 25000,
"AT": 22800,
"CA": 21200,
"CC": 14600,
"CG": 18000,
"CT": 15200,
"GA": 25200,
"GC": 17600,
"GG": 21600,
"GT": 20000,
"TA": 23400,
"TC": 16200,
"TG": 19000,
"TT": 16800,
}
dna_mono = {"A": 15400, "C": 7400, "G": 11500, "T": 8700}
rna_di = {
"AA": 27400,
"AC": 21200,
"AG": 25000,
"AU": 24000,
"CA": 21200,
"CC": 14600,
"CG": 18000,
"CU": 16200,
"GA": 25200,
"GC": 17600,
"GG": 21600,
"GU": 21200,
"UA": 24600,
"UC": 17200,
"UG": 20000,
"UU": 19600,
}
rna_mono = {"A": 15400, "C": 7400, "G": 11500, "U": 9900}
def get_mono_contribution(seq, ntype) -> float:
"""
get the contribution of the mononucleotides to the extinction coefficient
:param seq: sequence
:param ntype: type of nucleic acid (DNA or RNA)
:return: float
"""
total = 0
for nuc in seq[1:-1]:
if ntype == "RNA":
total += rna_mono[nuc]
else:
total += dna_mono[nuc]
return total
def get_di_contribution(seq, ntype) -> float:
"""
get the contribution of the dinucleotides to the extinction coefficient
:param seq: sequence
:param ntype: DNA or RNA
:return: float
"""
total = 0
for i in range(0, len(seq) - 1):
distep = seq[i] + seq[i + 1]
if ntype == "RNA":
total += rna_di[distep]
else:
total += dna_di[distep]
return total
def get_hypochromicity_dna(seq) -> float:
"""
get the hypochromicity of a DNA sequence
:param seq: sequence
:return: float
"""
frac_at = 0
for nuc in seq:
if nuc in ("A", "T"):
frac_at += 1
frac_at /= len(seq)
return frac_at * 0.287 + (1 - frac_at) * 0.059
def get_hypochromicity_rna(seq, secstruct) -> float:
"""
get the hypochromicity of an RNA sequence
:param seq: sequence
:param secstruct: secondary structure in dot-bracket notation
:return: float
"""
pairtable = dot_bracket.dotbracket_to_pairtable(secstruct)
frac_au = 0
frac_gc = 0
for cur_pos in pairtable:
if cur_pos == -1:
continue
pos = cur_pos
pos2 = pairtable[pos]
name = seq[pos] + seq[pos2]
if name in ("AU", "UA"):
frac_au += 1
if name in ("GC", "CG"):
frac_gc += 1
frac_au /= len(seq)
frac_gc /= len(seq)
return frac_au * 0.26 + frac_gc * 0.059
def get_coefficient_dna(seq, double_stranded=False) -> float:
"""
get the extinction coefficient for a DNA sequence
:param seq:
:param double_stranded:
:return: float
"""
mono_con = get_mono_contribution(seq, "DNA")
di_con = get_di_contribution(seq, "DNA")
strand1 = di_con - mono_con
if not double_stranded:
return strand1
rev_comp = sequence.get_reverse_complement(seq, "DNA")
strand2 = get_di_contribution(rev_comp, "DNA") - get_mono_contribution(
rev_comp, "DNA"
)
hc_val = get_hypochromicity_dna(seq)
final = round((1 - hc_val) * (strand1 + strand2))
return final
def get_coefficient_rna(seq, secstruct=None):
mono_cont = get_mono_contribution(seq, "RNA")
di_cont = get_di_contribution(seq, "RNA")
if secstruct is not None:
hc_val = get_hypochromicity_rna(seq, secstruct)
return round((1 - hc_val) * (di_cont - mono_cont))
return di_cont - mono_cont
if ntype == "RNA":
return get_coefficient_rna(seq, structure)
return get_coefficient_dna(seq, double_stranded) | /rna_seq_tools-0.7.1.tar.gz/rna_seq_tools-0.7.1/seq_tools/extinction_coeff.py | 0.841435 | 0.523238 | extinction_coeff.py | pypi |
import re
import itertools
from dataclasses import dataclass
@dataclass(frozen=True, order=True)
class SequenceStructure:
"""
A class to hold the parameters for a structure
"""
sequence: str
structure: str
def __post_init__(self):
"""
check that the sequence and structure are the same length
"""
if len(self.sequence) != len(self.structure):
raise ValueError(
f"sequence and structure are not the same length:"
f" {self.sequence} {self.structure}"
)
def __add__(self, other):
"""
add two StructParams objects
"""
return SequenceStructure(
self.sequence + other.sequence, self.structure + other.structure
)
def __len__(self):
"""
return the length of the sequence
"""
return len(self.sequence)
def __getitem__(self, item):
"""
return the attribute of the sequence
"""
return SequenceStructure(self.sequence[item], self.structure[item])
def to_dict(self):
"""
return a dictionary representation of the object
"""
return {"sequence": self.sequence, "structure": self.structure}
def to_comma_deliminted(self):
"""
return a csv representation of the object
"""
return f"{self.sequence},{self.structure}"
def split_strands(self):
"""
split both sequence and structure over `&` and return a list of
of StructParams objects
:return: list of strands
"""
seqs = self.sequence.split("&")
structs = self.structure.split("&")
return [SequenceStructure(s, st) for s, st in zip(seqs, structs)]
def insert(self, pos, other):
"""
insert a StructParams object at a given position
"""
seq = self.sequence[:pos] + other.sequence + self.sequence[pos:]
struct = self.structure[:pos] + other.structure + self.structure[pos:]
return SequenceStructure(seq, struct)
def join(self, other):
"""
join two StructParams objects with a "&" seperating each strand
"""
return SequenceStructure(
self.sequence + "&" + other.sequence,
self.structure + "&" + other.structure,
)
def replace(self, other, pos):
"""
Replaces the sequence and structure of this object with the sequence and
structure in the supplied SequenceStructure object at the specified position.
:param other: The SequenceStructure object containing the new sequence and
structure.
:type other: SequenceStructure
:param pos: The position at which to replace the sequence and structure.
:type pos: int
"""
if pos < 0 or pos > len(self.sequence):
raise ValueError(f"Invalid position: {pos}")
sequence = (
self.sequence[:pos]
+ other.sequence
+ self.sequence[pos + len(other.sequence) :]
)
structure = (
self.structure[:pos]
+ other.structure
+ self.structure[pos + len(other.structure) :]
)
return SequenceStructure(sequence, structure)
def find(struct: SequenceStructure, sub: SequenceStructure, start=None, end=None):
"""
find the position of a substructure in a structure
:param struct: the structure to search
:param sub: the substructure to search for
:param start: the start position to search from
:param end: the end position to search to
"""
if start is None:
start = 0
if end is None:
end = len(struct)
struct = struct[start:end]
strands = sub.split_strands()
strand_matches = []
for strand in strands:
pattern_seq = re.compile(
(r"(?=(" + strand.sequence.replace("N", r"\S") + r"))")
)
pattern_ss = re.compile(
(
r"(?=("
+ strand.structure.replace("(", r"\(")
.replace(")", r"\)")
.replace(".", r"\.")
+ r"))"
)
)
matches_seq = [
str(m.start() + start) + "-" + str(m.end() + len(m.group(1)) + start)
for m in pattern_seq.finditer(struct.sequence)
]
matches_ss = [
str(m.start() + start) + "-" + str(m.end() + len(m.group(1)) + start)
for m in pattern_ss.finditer(struct.structure)
]
matches = list(set(matches_seq).intersection(set(matches_ss)))
# split each string in inter into a list of ints
matches = [list(map(int, i.split("-"))) for i in matches]
strand_matches.append(matches)
all_matches = list(itertools.product(*strand_matches))
return all_matches | /rna_seq_tools-0.7.1.tar.gz/rna_seq_tools-0.7.1/seq_tools/structure.py | 0.765593 | 0.666619 | structure.py | pypi |
def get_max_stretch(seq) -> float:
"""
computes max stretch of the same letter in string
"""
max_stretch = 0
current_stretch = 0
for i, nuc in enumerate(seq):
if i == 0:
current_stretch += 1
else:
if nuc == seq[i - 1]:
current_stretch += 1
else:
if current_stretch > max_stretch:
max_stretch = current_stretch
current_stretch = 1
if current_stretch > max_stretch:
max_stretch = current_stretch
return max_stretch
def get_molecular_weight(seq, ntype="DNA", double_stranded=False) -> float:
"""
returns the molecular weight of a sequence
:param seq: the sequence
:param ntype: type of sequence (DNA or RNA)
:param double_stranded: is the sequence double stranded?
:return: float
"""
rna_mw = {"A": 347.2, "C": 323.2, "G": 363.2, "U": 324.2}
dna_mw = {"A": 331.2, "C": 307.2, "G": 347.2, "T": 322.2}
def compute_mw(seq, ntype):
molecular_weight = 0
for nuc in seq:
if ntype == "RNA":
molecular_weight += rna_mw[nuc]
else:
molecular_weight += dna_mw[nuc]
return molecular_weight
# enforce RNA or DNA typing
if ntype == "RNA":
seq = to_rna(seq)
else:
seq = to_dna(seq)
molecular_weight = compute_mw(seq, ntype)
if double_stranded:
rev_comp = get_reverse_complement(seq, type)
molecular_weight += compute_mw(rev_comp, ntype)
return molecular_weight
def get_reverse_complement(seq, ntype="DNA") -> str:
"""
returns the reverse complement of a sequence
:param seq: sequence to reverse complement
:param ntype: type of sequence (DNA or RNA)
:return: reverse complement of sequence
"""
if ntype == "RNA":
seq = to_rna(seq)
else:
seq = to_dna(seq)
rev_comp = ""
rc_dna = {"A": "T", "T": "A", "G": "C", "C": "G"}
rc_rna = {"A": "U", "U": "A", "G": "C", "C": "G"}
for nuc in seq:
if ntype == "RNA":
rev_comp += rc_rna[nuc]
else:
rev_comp += rc_dna[nuc]
return rev_comp[::-1]
def to_dna(seq) -> str:
"""
Convert RNA sequence to DNA
:param seq: RNA sequence
:return: DNA sequence
"""
return seq.replace("U", "T")
def to_dna_template(seq) -> str:
"""
Convert RNA sequence to DNA
:param seq: RNA sequence
:return: DNA sequence
"""
return "TTCTAATACGACTCACTATA" + to_dna(seq)
def to_rna(seq) -> str:
"""
Convert DNA sequence to RNA
:param seq: DNA sequence
:return: RNA sequence
"""
return seq.replace("T", "U") | /rna_seq_tools-0.7.1.tar.gz/rna_seq_tools-0.7.1/seq_tools/sequence.py | 0.769297 | 0.488039 | sequence.py | pypi |
import os
import click
import tabulate
import pandas as pd
from seq_tools import sequence, dataframe
from seq_tools.logger import setup_applevel_logger, get_logger
pd.set_option("display.max_colwidth", None)
def validate_dataframe(df) -> None:
"""
validates a dataframe to have a column named `sequence` and `name`
:param df: dataframe with sequences
:return: None
"""
if "sequence" not in df.columns:
raise ValueError("sequence column not found")
if "name" not in df.columns:
df["name"] = [f"seq_{i}" for i in range(len(df))]
def get_input_dataframe(data) -> pd.DataFrame:
"""
returns a dataframe from a sequence or a file
:param data: can be a seqeunce or a file
:return: pd.DataFrame
"""
log = get_logger("get_input_dataframe")
if os.path.isfile(data):
log.info(f"reading file {data}")
df = pd.read_csv(data)
log.info(f"csv file contains {len(df)} sequences")
else:
log.info(f"reading sequence {data}")
data_df = [["seq", data]]
df = pd.DataFrame(data_df, columns=["name", "sequence"])
validate_dataframe(df)
return df
def get_ntype(df, ntype) -> str:
"""
handles the ntype parameter
:param df: dataframe with sequences
:param ntype: nucleotide type
:return: str
"""
log = get_logger("handle_ntype")
df_ntype = dataframe.determine_ntype(df)
log.info(f"determining nucleic acid type: {df_ntype}")
# enforce ntype
if ntype == "DNA":
log.info("forcing sequences to be DNA")
dataframe.to_dna(df)
return ntype
if ntype == "RNA":
log.info("forcing sequences to be RNA")
dataframe.to_rna(df)
return ntype
return df_ntype
def handle_output(df, output) -> None:
"""
handles the output of the dataframe
:param df: dataframe with sequences
:param output: output file
:return: None
"""
log = get_logger("handle_output")
if len(df) == 1:
log.info(f"output->\n{df.iloc[0]}")
else:
log.info(f"output csv: {output}")
if len(df) > 100:
log.info(
"\n" + tabulate.tabulate(df[0:100], headers="keys", tablefmt="simple")
)
else:
log.info("\n" + tabulate.tabulate(df, headers="keys", tablefmt="simple"))
df.to_csv(output, index=False)
@click.group()
def cli():
"""
a set scripts to manipulate sequences in csv files
"""
@cli.command(help="add a sequence to 5' and/or 3'")
@click.argument("data")
@click.option("-p5", "--p5-seq", default="")
@click.option("-p3", "--p3-seq", default="")
@click.option("-o", "--output", help="output file", default="output.csv")
def add(data, p5_seq, p3_seq, output):
"""
adds a sequence to a dataframe
:param data: can be a sequence or a file
:param p5_seq: sequence to add to 5'
:param p3_seq: sequence to add to 3'
:param output: output file
"""
setup_applevel_logger()
df = get_input_dataframe(data)
df = dataframe.add(df, p5_seq, p3_seq)
handle_output(df, output)
@cli.command(help="calculate the edit distance of a library")
@click.argument("data", type=click.Path(exists=True))
def edit_distance(data):
"""
calculates the edit distance of a library
:param data: can be a sequence or a file
"""
setup_applevel_logger()
df = pd.read_csv(data)
score = dataframe.calc_edit_distance(df)
log = get_logger("edit_distance")
log.info(f"edit distance: {score}")
@cli.command(help="calculate the extinction coefficient for each sequence")
@click.argument("data")
@click.option(
"-nt",
"--ntype",
default=None,
type=click.Choice([None, "RNA", "DNA"]),
help="type of nucleic acid",
)
@click.option("-ds", "--double-stranded", is_flag=True)
@click.option("-o", "--output", help="output file", default="output.csv")
def ec(data, ntype, double_stranded, output):
"""
calculates the extinction coefficient for each sequence
:param data: can be a sequence or a file
:param ntype: type of nucleic acid
:param double_stranded: if the sequence is double stranded
:param output: output file
"""
setup_applevel_logger()
log = get_logger("extinction_coeff")
df = get_input_dataframe(data)
ntype = get_ntype(df, ntype)
df = dataframe.get_extinction_coeff(df, ntype, double_stranded)
handle_output(df, output)
if len(df) != 1:
log.info("avg extinction coefficient: " + str(df["extinction_coeff"].mean()))
@cli.command(help="calculate the molecular weight for each sequence")
@click.argument("data")
@click.option(
"-nt",
"--ntype",
default=None,
type=click.Choice([None, "RNA", "DNA"]),
help="type of nucleic acid",
)
@click.option("-ds", "--double-stranded", is_flag=True)
@click.option("-o", "--output", help="output file", default="output.csv")
def mw(data, ntype, double_stranded, output):
"""
calculates the molecular weight for each sequence
:param data:
:param double_stranded:
:param output:
:return:
"""
setup_applevel_logger()
df = get_input_dataframe(data)
ntype = get_ntype(df, ntype)
df = dataframe.get_molecular_weight(df, ntype, double_stranded)
handle_output(df, output)
log = get_logger("molecular_weight")
if len(df) != 1:
log.info("avg molecular weight: " + str(df["molecular_weight"].mean()))
@cli.command(help="calculate reverse complement for each sequence")
@click.argument("data")
@click.option(
"-nt",
"--ntype",
default=None,
type=click.Choice([None, "RNA", "DNA"]),
help="type of nucleic acid",
)
@click.option("-o", "--output", help="output file", default="output.csv")
def rc(data, ntype, output):
"""
calculates the reverse complement for each sequence
:param data: can be a sequence or a file
:param output: output file
"""
setup_applevel_logger()
df = get_input_dataframe(data)
ntype = get_ntype(df, ntype)
df = dataframe.get_reverse_complement(df, ntype)
handle_output(df, output)
@cli.command(help="fold rna sequences")
@click.argument("data")
@click.option("-o", "--output", help="output file", default="output.csv")
def fold(data, output):
"""
fold rna sequences
:param data: can be a sequence or a file
"""
setup_applevel_logger()
df = get_input_dataframe(data)
df = dataframe.fold(df)
handle_output(df, output)
@cli.command(help="checks to see if p5 is present in all sequences")
@click.argument("data")
@click.option("-p5", "--p5-seq", help="p5 sequence", required=True)
@click.option(
"-nt",
"--ntype",
default=None,
type=click.Choice([None, "RNA", "DNA"]),
help="type of nucleic acid",
)
def has_p5(data, p5_seq, ntype):
"""
checks if a sequence has a p5 sequence
:param data: can be a sequence or a file
:param p5_seq: p5 sequence
:param ntype: type of nucleic acid
"""
setup_applevel_logger()
df = get_input_dataframe(data)
get_ntype(df, ntype)
has_p5_seq = dataframe.has_5p_sequence(df, p5_seq)
log = get_logger("has_p5")
if has_p5_seq:
log.info("p5 sequence is present in all sequences")
else:
log.info("p5 sequence is not present in all sequences")
@cli.command(help="checks to see if p3 is present in all sequences")
@click.argument("data")
@click.option("-p3", "--p3-seq", help="p3 sequence", required=True)
@click.option(
"-nt",
"--ntype",
default=None,
type=click.Choice([None, "RNA", "DNA"]),
help="type of nucleic acid",
)
def has_p3(data, p3_seq, ntype):
"""
checks if a sequence has a p3 sequence
:param data: can be a sequence or a file
:param p3_seq: p3 sequence
:param ntype: type of nucleic acid
"""
setup_applevel_logger()
df = get_input_dataframe(data)
get_ntype(df, ntype)
has_p3_seq = dataframe.has_5p_sequence(df, p3_seq)
log = get_logger("has_p3")
if has_p3_seq:
log.info("p3 sequence is present in all sequences")
else:
log.info("p3 sequence is not present in all sequences")
@cli.command(help="convert rna sequence(s) to dna")
@click.argument("data")
@click.option("-o", "--output", help="output file", default="output.csv")
def to_dna(data, output):
"""
Convert RNA sequence to DNA
"""
setup_applevel_logger()
df = get_input_dataframe(data)
df = df[["name", "sequence"]]
df = dataframe.to_dna(df)
handle_output(df, output)
@cli.command(help="convert rna sequence(s) to dna template, includes T7 promoter")
@click.argument("data")
@click.option("-o", "--output", help="output file", default="output.csv")
def to_dna_template(data, output):
"""
Convert RNA sequence to DNA
"""
setup_applevel_logger()
df = get_input_dataframe(data)
df = df[["name", "sequence"]]
df = dataframe.to_dna_template(df)
handle_output(df, output)
@cli.command(help="generate fasta file from csv")
@click.argument("data")
@click.option("-o", "--output", help="output file", default="test.fasta")
def to_fasta(data, output):
"""
generate fasta file from csv
:param data: can be a sequence or a file
:param output: output file
"""
setup_applevel_logger()
df = get_input_dataframe(data)
dataframe.to_fasta(df, output)
@cli.command(help="generate oligo pool file from csv")
@click.argument("data")
@click.option("-n", "--name", help="name of the opool file", default="opool")
@click.option("-o", "--output", help="output file", default="opool.xlsx")
def to_opool(data, name, output):
"""
generate opool file from csv
:param data: can be a sequence or a file
:param output: output file
"""
setup_applevel_logger()
df = get_input_dataframe(data)
dataframe.to_opool(df, name, output)
@cli.command(help="convert rna sequence(s) to dna")
@click.argument("data")
@click.option("-o", "--output", help="output file", default="output.csv")
def to_rna(data, output):
"""
Convert DNA sequence to RNA
"""
setup_applevel_logger()
df = get_input_dataframe(data)
df = df[["name", "sequence"]]
# apply sequence.to_dna to `sequence` column
df["sequence"] = df["sequence"].apply(sequence.to_rna)
handle_output(df, output)
@cli.command(help="trim 5'/3' ends of sequences")
@click.argument("data")
@click.option("-p5", "--p5-cut", default=0)
@click.option("-p3", "--p3-cut", default=0)
@click.option("-o", "--output", help="output file", default="output.csv")
def trim(data, p5_cut, p3_cut, output):
"""
trim 5'/3' ends of sequences
:param data: can be a sequence or a file
:param p5_cut: trim off 5' end
:param p3_cut: trim off 3' end
:param output: output file
"""
setup_applevel_logger()
df = get_input_dataframe(data)
df = dataframe.trim(df, p5_cut, p3_cut)
handle_output(df, output)
@cli.command(help="convert dna sequence(s) to rna")
@click.argument("data")
@click.option("-o", "--output", help="output file", default="output.csv")
def transcribe(data, output):
"""
Convert DNA sequence to RN
"""
setup_applevel_logger()
df = get_input_dataframe(data)
df = df[["name", "sequence"]]
df = dataframe.transcribe(df)
handle_output(df, output)
# pylint: disable=no-value-for-parameter
if __name__ == "__main__":
cli() | /rna_seq_tools-0.7.1.tar.gz/rna_seq_tools-0.7.1/seq_tools/cli.py | 0.675015 | 0.472318 | cli.py | pypi |
"""Secondary structure analysis"""
import os
import tempfile
import shutil
import subprocess
from rna_tools.rna_tools_config import VARNA_JAR_NAME, VARNA_PATH
class ExceptionOpenPairsProblem(Exception):
pass
def draw_ss(title, seq, ss, img_out, resolution=4, verbose=False):
"""Draw Secondary Structure using VARNA (you need correct configuration for this).
If everything is OK, return None, if an error (=exception) return stderr.
Usage::
>>> seq = 'GGAAACC'
>>> ss = '((...))'
>>> img_out = 'output/demo.png'
>>> draw_ss('rna', seq, ss, img_out)
>>> print('Made %s' % img_out)
Made output/demo.png
.. image:: ../../rna_tools/output/demo.png
:scale: 25 %
Can be used with http://geekbook.readthedocs.io/en/latest/rna.html"""
curr = os.getcwd()
os.chdir(VARNA_PATH) # VARNAv3-93-src')
if verbose:
print(VARNA_PATH)
t = tempfile.NamedTemporaryFile(delete=False)
t.name += '.png'
cmd = 'java -cp ' + VARNA_JAR_NAME + ' fr.orsay.lri.varna.applications.VARNAcmd -sequenceDBN ' + seq + \
" -structureDBN '" + ss + "' -o " + t.name + " -title '" + \
title + "' -resolution '" + str(resolution) + "'"
if verbose:
print(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
out = p.stderr.read().decode().strip()
os.chdir(curr)
if out.find('Exception') > -1:
return out
else:
if verbose:
print(t.name)
shutil.move(t.name, img_out)
def parse_vienna_to_pairs(ss, remove_gaps_in_ss=False):
"""Parse Vienna (dot-bracket notation) to get pairs.
Args:
ss (str): secondary stucture in Vienna (dot-bracket notation) notation
remove_gaps_in_ss (bool): remove - from ss or not, design for DCA (tpp case
``ss = "(((((((((.((((.(((.....))))))......------)....."``
works with pk of the first level, ``[[]]``
Returns:
list of two lists: (pairs, pairs_pk)
Examples::
>>> parse_vienna_to_pairs('((..))')
([[1, 6], [2, 5]], [])
>>> parse_vienna_to_pairs('(([[))]]')
([[1, 6], [2, 5]], [[3, 8], [4, 7]])
>>> parse_vienna_to_pairs('((--))')
([[1, 6], [2, 5]], [])
>>> parse_vienna_to_pairs('((--))', remove_gaps_in_ss=True)
([[1, 4], [2, 3]], [])
>>> parse_vienna_to_pairs('((((......')
Traceback (most recent call last):
File "/usr/lib/python2.7/doctest.py", line 1315, in __run
compileflags, 1) in test.globs
File "<doctest __main__.parse_vienna_to_pairs[4]>", line 1, in <module>
parse_vienna_to_pairs('((((......')
File "./SecondaryStructure.py", line 106, in parse_vienna_to_pairs
raise ExceptionOpenPairsProblem('Too many open pairs (()) in structure')
ExceptionOpenPairsProblem: Too many open pairs (()) in structure
"""
if remove_gaps_in_ss:
ss = ss.replace('-', '')
stack = []
pairs = []
pairs_pk = []
stack_pk = []
for c, s in enumerate(ss):
if s == '(':
stack.append(c + 1)
if s == ')':
pairs.append([stack.pop(), c + 1])
if s == '[':
stack_pk.append(c + 1)
if s == ']':
pairs_pk.append([stack_pk.pop(), c + 1])
if stack:
raise ExceptionOpenPairsProblem('Too many open pairs (()) in structure')
if stack_pk:
raise ExceptionOpenPairsProblem('Too many open pairs [[]] in structure')
pairs.sort()
pairs_pk.sort()
return(pairs, pairs_pk)
# main
if __name__ == '__main__':
import doctest
doctest.testmod() | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/SecondaryStructure.py | 0.546738 | 0.350588 | SecondaryStructure.py | pypi |
r"""rna_rosetta_run.py - prepare & run ROSETTA simulations
Based on C. Y. Cheng, F. C. Chou, and R. Das, Modeling complex RNA tertiary folds with Rosetta, 1st ed., vol. 553. Elsevier Inc., 2015.
http: // www.sciencedirect.com / science / article / pii / S0076687914000524
The script makes(1) a folder for you job, with seq.fa, ss.fa, input file is copied as input.fa to the folder(2) make helices(3) prepare rosetta input files(4) sends jobs to the cluster.
The header is take from the fast file(`` > /header / ``) not from the filename of your Fasta file.
I discovered this::
qstat -xml | tr '\n' ' ' | sed 's#<job_list[^>]*>#\n#g' \
> | sed 's#<[^>]*>##g' | grep " " | column -t
(https://stackoverflow.com/questions/26104116/qstat-and-long-job-names) so there is now need to shorted my job ids.
Helix
-------------------------------------------------------
Run::
rna_rosetta_run.py -i -e -r -g -c 200 cp20.fa
`-i`::
# prepare a folder for a run
>cp20
AUUAUCAAGAAUCUCAAAGAGAGAUAGCAACCUGCAAUAACGAGCAAGGUGCUAAAAUAGAUAAGCCAAAUUCAAUUGGAAAAAAUGUUAA
.(((((....(((((.....)))))(((..(((((..[[[[..)).))).)))......))))).((((......)))).......]]]].
[peyote2] ~ rna_rosetta_run.py -i cp20.fa
run rosetta for:
cp20
AUUAUCAAGAAUCUCAAAGAGAGAUAGCAACCUGCAAUAACGAGCAAGGUGCUAAAAUAGAUAAGCCAAAUUCAAUUGGAAAAAAUGUUAA
.(((((....(((((.....)))))(((..(((((..[[[[..)).))).)))......))))).((((......)))).......]]]].
/home / magnus // cp20 / created
Seq & ss created
Troubleshooting.
If one of the helices is missing you will get::
IOError: [Errno 2] No such file or directory: 'helix1.out'
rosetta_submit.py README_FARFAR o 500 100 taf
Could not find: README_FARFAR
and the problem was a1 and g8 pairing::
outputting command line to: helix0.RUN # previous helix #0
Sequence: AUGG CCGG
Secstruc: (((())))
Not complementary at positions a1 and g8!
Sequence: GUGGG CCCAU
Secstruc: ((((()))))
Writing to fasta file: helix2.fasta # next helix #2
My case with a modeling of rp12
Sequence: cc gc
Secstruc: (())
Not complementary at positions 1 and 4!
edit the secondary structure, run the program with -i(init, to overwrite seq.fa, ss.fa) and then it works.
Notes::
rp17hc 6 characters
"""
from __future__ import print_function
import argparse
import textwrap
import os
import glob
import subprocess
import shutil
import math
import os
import sys
try:
from rna_tools.rna_tools_config import RNA_ROSETTA_RUN_ROOT_DIR_MODELING
except:
RNA_ROSETTA_RUN_ROOT_DIR_MODELING = ''
print ('Set up rna_rosetta_run_root_dir_for_modeling in rpt_config_local.py')
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):
pass
def get_parser():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=CustomFormatter)
parser.add_argument('-i', '--init', help='prepare _folder with seq and ss',
action='store_true')
parser.add_argument('-e', '--helices', help='produce h(E)lices',
action='store_true')
parser.add_argument('-r', '--rosetta', help='prepare rosetta files (still you need `go` to send jobs to a cluster)',
action='store_true')
parser.add_argument('-g', '--go', help=go.__doc__,
action='store_true')
parser.add_argument('-m', '--motif', help="include a motif file, e.g. -s E-loop_1q9a_mutated_no_flanks_renumber.pdb")
parser.add_argument('-n', '--nstruc', help="# of structures you want to get",
default=10000, type=int)
parser.add_argument('-c', '--cpus', help='# of cpus to be used', default=200,
type=int)
parser.add_argument('--sandbox', help="where to run it (default: RNA_ROSETTA_RUN_ROOT_DIR_MODELING",
default=RNA_ROSETTA_RUN_ROOT_DIR_MODELING)
parser.add_argument('file', help=textwrap.dedent(
"""file: \n > a04\nUAUAACAUAUAAUUUUGACAAUAUGGGUCAUAAGUUUCUACCGGAAUACCGUAAAUAUUCUGACUAUGUAUA\n((((.((((...((.((((.......)))).))........(.(((((.......))))).)..))))))))"""))
return parser
def prepare_folder(args, header, seq, ss, path):
"""Make folder for you job, with seq.fa, ss.fa, input file is copied as input.fa to the folder.
For ss lowercase is needed when used with motifs, otherwise::
[peyote2] aa20$ rna_rosetta_run.py -r -m E-loop_1q9a_mutated_no_flanks_renumber_for_acy20.pdb ~/aa20.fa
2019-05-03 21:31:30,842 rpt_config_local.py::<module>::rpt_config_local loading...
run rosetta for:
aa20
UACGUUCAUCAUCCGUUUGGAUGACGGAAGUAAGCGAAAGCUGAAGGAACGCAUG
..(((((.((((((....))))))..[.....(((....)))....)))))]...
rna_denovo_setup.py -fasta seq.fa -secstruct_file ss.fa -cycles 20000 -no_minimize -nstruct 50 -s E-loop_1q9a_mutated_no_flanks_renumber_for_acy20.pdb -silent helix0.out helix1.out helix2.out -input_silent_res 3-7 47-51 9-14 19-24 33-35 40-42
Sequence: UACGUUCAUCAUCCGUUUGGAUGACGGAAGUAAGCGAAAGCUGAAGGAACGCAUG
Secstruc: ..(((((.((((((....))))))..[.....(((....)))....)))))]...
aaguagaag
AAGUAGAAG
Traceback (most recent call last):
File "/home/magnus/opt/rosetta_src_2016.13.58602_bundle/tools/rna_tools/bin//rna_denovo_setup.py", line 170, in <module>
raise ValueError('The sequence in %s does not match input sequence!!' % pdb)
ValueError: The sequence in E-loop_1q9a_mutated_no_flanks_renumber_for_acy20.pdb does not match input sequence!!
rosetta_submit.py README_FARFAR o 200 100 aa20_
Could not find: README_FARFAR
"""
d = path
try:
os.mkdir(d)
print(d, 'created')
except OSError:
print(d, 'created is already created')
pass
with open(d + "seq.fa", "w") as f:
f.write(header + '\n')
f.write(seq)
with open(d + "ss.fa", "w") as f:
f.write(ss.lower())
print('Seq & ss created')
shutil.copyfile(args.file, d + 'input.fa')
def prepare_helices():
"""Make helices(wrapper around 'helix_preassemble_setup.py')
.. warning:: I think multiprocessing of helixX.run does not work."""
# run helix_p..
cmd = 'helix_preassemble_setup.py -secstruct ss.fa -fasta seq.fa'
os.system(cmd)
# find all helix
helix_runs = glob.glob('*RUN')
print(helix_runs)
f = open('HRUNS', 'w')
for h in helix_runs:
f.write(open(h).read().strip() + ' & \n')
f.close()
# does not work (!)
# os.system('chmod +x CMDLINES')
# os.system('./CMDLINES')
# ./CMDLINES: 2: source: not found
# os.system runs multiprocessing, but does not wait for the rest of the program
# hmm... it does not wait because I used & ???
# this works in multirpocessing mode but it does not wait for `-r` !!! so if your run -e only it's OK.
# don't combile -e with -r because making helices will not wait to run -r (!) and you will get an error
# and only helices made and then the program will finish
if False:
os.system('bash HRUNS')
else:
p = subprocess.Popen(open('HRUNS').read(),
shell=True, stderr=subprocess.PIPE)
p.wait()
stderr = p.stderr.read().strip()
if stderr:
print(stderr)
def prepare_rosetta(header, cpus, motif, nstruc):
"""Prepare ROSETTA using rna_denovo_setup.py
cpus is used to calc nstruc per job to get 10k structures per full run::
Args:
nstruc(int): how many structures you want to obtain
nstruct = int(math.floor(20000 / cpus))
motif: motif file; e.g., -s E-loop_1q9a_mutated_no_flanks_renumber.pdb
50 (nstruc) = 10k / 200 (cpus)
"""
# get list line
helices = open('CMDLINES').readlines()[-1].replace('#', '')
njobs = cpus # 500
nstruct = int(math.floor(nstruc / cpus)) # 20000/500 -> 40
if motif:
cmd_motif = ' -s ' + motif
else:
cmd_motif = ''
cmd = 'rna_denovo_setup.py -fasta seq.fa -secstruct_file ss.fa -cycles 20000 -no_minimize -nstruct ' + \
str(nstruct) + ' ' + cmd_motif + ' ' + helices
print(cmd)
os.system(cmd)
# change to 50 per job (!)
# 50 * 100 = 10k ?
# dont change this 100 (!) it might not run on peyote2 with values like 99999 !
# cmd is
# rna_tools/bin//rosetta_submit.py <text file with rosetta command> <outdir> <# jobs> <# hours> <job name>
# manually: [peyote2] a22$ rosetta_submit.py README_FARFAR o 200 100 a22_
cmd = 'rosetta_submit.py README_FARFAR o ' + \
str(njobs) + ' 100 ' + header + '_'
print(cmd)
os.system(cmd)
def go():
"""send jobs to a cluster(run qsubMINI)"""
os.system('chmod +x ./qsubMINI')
os.system('./qsubMINI')
def main():
"""Pipeline for modeling RNA"""
args = get_parser().parse_args()
if args.file:
f = open(args.file)
header = f.readline().replace('>', '').replace(' ', '').strip()
seq = f.readline().strip()
ss = f.readline().strip()
cpus = int(args.cpus)
print('run rosetta for:')
print(header)
print(seq)
print(ss)
if RNA_ROSETTA_RUN_ROOT_DIR_MODELING.strip() == '':
print('Set RNA_ROSETTA_RUN_ROOT_DIR_MODELING in your rpt_config file.')
return
path = args.sandbox + os.sep + header + \
os.sep # RNA_ROSETTA_RUN_ROOT_DIR_MODELING
curr = os.getcwd()
if args.init:
prepare_folder(args, header, seq, ss, path)
try:
os.chdir(path)
except OSError:
print('You have to make a folder first! use --init')
sys.exit(1)
if args.helices:
prepare_helices()
if args.rosetta:
prepare_rosetta(header, cpus, args.motif, args.nstruc)
if args.go:
go()
os.chdir(curr)
# main
if __name__ == '__main__':
main() | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/rna_rosetta/rna_rosetta_run.py | 0.547222 | 0.426979 | rna_rosetta_run.py | pypi |
from __future__ import print_function
import logging
from rna_tools.rna_tools_logging import logger
from rna_tools.tools.rna_calc_rmsd.lib.rmsd.calculate_rmsd import get_coordinates
from rna_tools.tools.extra_functions.select_fragment import select_pdb_fragment_pymol_style, select_pdb_fragment
from rna_tools.tools.simrna_trajectory.simrna_trajectory import SimRNATrajectory
import argparse
import re
import numpy as np
logger.setLevel(logging.DEBUG)
logger.propagate = False
class RNAFilterErrorInRestraints(Exception):
pass
def parse_logic(restraints_fn, verbose):
"""Parse logic of restraints.
Args:
restraints_nf(string): path to a file with restraints in the rigth format (see below)
verbose (bool) : be verbose?
Format::
# ignore comments
(d:A1-A2 < 10.0 1)|(d:A2-A1 <= 10 1)
Returns:
list: parse restraints into a list of lists, e.g. [('A9', 'A41', '10.0', '1'), ('A10', 'A16', '10', '1')]
"""
txt = ''
with open(restraints_fn) as f:
for l in f:
if not l.startswith('#'):
txt += l.strip()
if verbose:
logger.info(txt)
restraints = re.findall(
'\(d:(?P<start>.+?)-(?P<end>.+?)\s*(?P<operator>\>\=|\=|\<|\<\=)\s*(?P<distance>[\d\.]+)\s+(?P<weight>.+?)\)', txt)
return restraints
def parse_logic_newlines(restraints_fn, offset=0, verbose=False):
"""Parse logic of restraints.
Args:
restraints_nf(string): path to a file with restraints in the rigth format (see below)
verbose (bool) : be verbose?
Format::
# ignore comments
d:Y23-Y69 < 25.0
d:Y22-Y69 < 25.0
# d:<chain><resi_A>-<resi_B> <operator> <distance> <weight>; each restraints in a new line
Raises:
__main__.RNAFilterErrorInRestraints: Please check the format of your restraints!
Returns:
list: parse restraints into a list of lists, e.g. [('A9', 'A41', '10.0', '1'), ('A10', 'A16', '10', '1')]
"""
restraints = []
with open(restraints_fn) as f:
for l in f:
if l.strip():
if not l.startswith('#'):
if verbose:
logger.info(l)
restraint = re.findall(
'd:(?P<start>.+?)-(?P<end>.+?)\s*(?P<operator>\>\=|\=|\<|\<\=)\s*(?P<distance>[\d\.]+)\s+(?P<weight>.+?)', l)
if restraint:
# without [0] it is restraints [[('Y23', 'Y69', '<', '25.0', '1')], [('Y22', 'Y69', '<', '25.0', '1')]]
# why? to convert 'Y23', 'Y69', '<', '25.0', '1' -> 'Y23', 'Y69', '<', 25.0, 1
start = restraint[0][0][0] + str(int(restraint[0][0][1:]) + offset)
end = restraint[0][1][0] + str(int(restraint[0][1][1:]) + offset)
restraints.append([start, end, restraint[0][1], restraint[0][2],
float(restraint[0][3]), float(restraint[0][4])])
if len(restraints) == 0:
raise RNAFilterErrorInRestraints('Please check the format of your restraints!')
return restraints # [('A9', 'A41', '10.0', '1'), ('A10', 'A16', '10', '1')]
def get_distance(a, b):
diff = a - b
return np.sqrt(np.dot(diff, diff))
def parse_pdb(pdb_fn, selection):
"""
{'A9': {'OP1': array([ 53.031, 21.908, 40.226]), 'C6': array([ 54.594, 27.595, 41.069]), 'OP2': array([ 52.811, 24.217, 39.125]), 'N4': array([ 53.925, 30.861, 39.743]), "C1'": array([ 55.611, 26.965, 43.258]), "C3'": array([ 53.904, 25.437, 43.809]), "O5'": array([ 53.796, 24.036, 41.353]), 'C5': array([ 54.171, 28.532, 40.195]), "O4'": array([ 55.841, 25.746, 42.605]), "C5'": array([ 54.814, 23.605, 42.274]), 'P': array(
[ 53.57 , 23.268, 39.971]), "C4'": array([ 55.119, 24.697, 43.283]), "C2'": array([ 54.563, 26.706, 44.341]), 'N1': array([ 55.145, 27.966, 42.27 ]), "O2'": array([ 55.208, 26.577, 45.588]), 'N3': array([ 54.831, 30.285, 41.747]), 'O2': array([ 55.76 , 29.587, 43.719]), 'C2': array([ 55.258, 29.321, 42.618]), "O3'": array([ 53.272, 24.698, 44.789]), 'C4': array([ 54.313, 29.909, 40.572])}}
"""
V = {}
with open(pdb_fn) as f:
for line in f:
if line.startswith("ATOM"):
curr_chain_id = line[21]
curr_resi = int(line[22: 26])
curr_atom_name = line[12: 16].strip()
if selection:
if curr_chain_id in selection:
if curr_resi in selection[curr_chain_id]:
x = line[30: 38]
y = line[38: 46]
z = line[46: 54]
# V.append(np.asarray([x,y,z],dtype=float))
if curr_chain_id + str(curr_resi) in V:
V[curr_chain_id +
str(curr_resi)][curr_atom_name] = np.asarray([x, y, z], dtype=float)
else:
V[curr_chain_id + str(curr_resi)] = {}
V[curr_chain_id +
str(curr_resi)][curr_atom_name] = np.asarray([x, y, z], dtype=float)
return V
def check_condition(condition, wight):
"""return True/False, score"""
pass
def get_residues(pdb_fn, restraints, verbose):
residues = set()
for h in restraints:
a = h[0]
b = h[1]
a = a[0] + ':' + a[1:]
residues.add(a) # A19
b = b[0] + ':' + b[1:]
residues.add(b)
# set(['A:41', 'A:9', 'A:10', 'A:16'])
selection = ','.join(residues)
selection_parsed = select_pdb_fragment(selection, separator=",", splitting="[,:;]")
residues = parse_pdb(pdb_fn, selection_parsed)
# get mb
for r in residues:
if 'N9' in residues[r]: # A,G
residues[r]['mb'] = residues[r]['N9'] - ((residues[r]['N9'] - residues[r]['C6']) / 2)
else: # A,G
residues[r]['mb'] = residues[r]['N1'] - ((residues[r]['N1'] - residues[r]['C4']) / 2)
for r in residues:
if verbose:
logger.info(' '.join(['mb for ', str(r), str(residues[r]['mb'])]))
return residues
def get_parser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-r', "--restraints_fn",
dest="restraints_fn",
required=True,
help="""restraints_fn:
Format:
(d:A9-A41 < 10.0 1)|(d:A41-A9 <= 10 1)
""")
parser.add_argument("-v", "--verbose",
action="store_true", help="be verbose")
parser.add_argument('-s', dest="structures", help='structures',
nargs='+') # , type=string)
parser.add_argument(
'--offset', help='use offset to adjust your restraints to numbering in PDB files, ade (1y26)'
'pdb starts with 13, so offset is -12)', default=0, type=int)
parser.add_argument('-t', dest="trajectory", help="SimRNA trajectory")
return parser
def calc_dists_for_pdbs(pdb_files, pairs, verbose):
"""
"""
# h = ('A1', 'A2', '<', '10.0', '1')
for pdb_fn in pdb_files:
# logger.info(pdb_fn)
score = 0
residues = get_residues(pdb_fn, restraints, verbose)
good_dists = 0
for h in pairs:
dist = get_distance(residues[h[0]]['mb'], residues[h[1]]['mb'])
# change distance
ok = '[ ]'
if dist < h[4]:
score += h[5]
ok = '[x]'
good_dists += 1
print(' '.join([' d:' + h[0] + '-' + h[1] + ' ' + str(h[4]), 'measured:', str(dist), ok]))
print(pdb_fn, score / float(len(restraints)), good_dists, 'out of', len(restraints))
# main
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
pairs = eval(args.pairs)
calc_dists_for_pdbs(args.structures, pairs, args.verbose) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/rna_filter/rna_get_dists.py | 0.674265 | 0.358943 | rna_get_dists.py | pypi |
# RNA_DCA (DeCoupling Analysis)
https://marks.hms.harvard.edu/ev_rna/
A set of scripts to perform DCA analysis, authors Marcin Magnus & Gokhan Gokturk (under supervision of MM).
> Non-coding RNAs are ubiquitous, but the discovery of new RNA gene sequences far outpaces the research on the structure and functional interactions of these RNA gene sequences. We mine the evolutionary sequence record to derive precise information about the function and structure of RNAs and RNA-protein complexes. As in protein structure prediction, we use maximum entropy global probability models of sequence co-variation to infer evolutionarily constrained nucleotide-nucleotide interactions within RNA molecules and nucleotide-amino acid interactions in RNA-protein complexes. The predicted contacts allow all-atom blinded 3D structure prediction at good accuracy for several known RNA structures and RNA-protein complexes. For unknown structures, we predict contacts in 160 non-coding RNA families. Beyond 3D structure prediction, evolutionary couplings help identify important functional interactions-e.g., at switch points in riboswitches and at a complex nucleation site in HIV. Aided by increasing sequence accumulation, evolutionary coupling analysis can accelerate the discovery of functional interactions and 3D structures involving RNA. The evolutionary signals for RNA 3D structure and interactions are detected by applying new algorithms to the analysis of thousands of genomes. This approach pinpoints an important interaction for HIV genome transport and predicts 3D contacts for hundreds of RNAs with unknown structure.
>
> [1] C. Weinreb, A. J. J. Riesselman, J. B. B. Ingraham, T. Gross, C. Sander, and D. S. S. Marks, “3D RNA and Functional Interactions from Evolutionary Couplings,” Cell, pp. 1–13, 2015.
- <http://www.sciencedirect.com/science/article/pii/S0092867416303282>
- <https://marks.hms.harvard.edu/ev_rna/>
- <https://github.com/debbiemarkslab/plmc>
Input (if you download a gapped alignment from RFAM you have to replace - with . !):
$ head RF00005.afa.txt
>AB003409.1/96.167
GGGCCCAU.A.GCUCAGU...GGU...AGAGUG.C.CUCCU.UUGCAAGGAG.GAU....
....................GC..CCUG.GGU.UCG.AA..UCCCA.G.UGGGUCC.A
>AB009835.1/1.71
CAUUAGAU.G.ACUGAA....AG....CAAGUA.C.UGGUC.UCUUAAACCA.UUU....
....................AA..UAGUAAAU.UAG.CA..CUUAC.U.UCUAAUG.A
>AB013372.1/8.81
GCGCCCGU.A.GCUCAAUU..GGAU..AGAGCG.U.UUGAC.UACGGAUCAA.AAG....
....................GU..UAGG.GGU.UCG.AC..UCCUC.U.CGGGCGC.G
>AB013373.1/3754.3825
Run plmc:
➜ examples git:(master) ✗ plmc -c rf5.scores -a .ACGU RF00005.afa.txt
953 valid sequences out of 954
118 sites
Effective number of samples: 245.7 (80% identical neighborhood = 1.000 samples)
iter time cond fx -loglk ||h|| ||e||
1 0.5 41.97 21624.0 21490.3 40.4 1.1
2 0.8 80.17 20073.2 17825.3 40.5 4.7
3 1.0 41.41 19113.0 16541.6 40.5 5.1
.. which creates a file with coupling scores:
➜ examples git:(master) ✗ head rf5.scores
1 - 2 - 0 0.028189
1 - 3 - 0 -0.028578
1 - 4 - 0 0.012220
1 - 5 - 0 0.016589
1 - 6 - 0 -0.023366
where `resi_i, focus_i, resi_j, focus_j, 0, ec_score`.
Select top-scored interactions:
(pandas required, install $ pip install pandas)
➜ rna_dca git:(master) ✗ python rna_dca_select_interactions.py examples/rf5.scores
L 59
i j scores
3675 38 51 0.700352
789 7 110 0.673785
3596 37 52 0.657058
[...]
5093 58 79 0.114991
5082 58 68 0.114672
Output file created:examples/rf5.scores_scored.csv
.. and map the selected interactions using a gapped seq of your target:
GCCCGGAUAGCUCAGUCGGUAGAGCAUCAGACUUUUAAUCUGAGGGUCCAGGGUUCAAGUCCCUGUUCGGGCGCC # >1FIR:A|PDBID|CHAIN|SEQUENCE
GCCCGGAUAGCUCAGUCGGUAGAGCAUCAGACUUUUAAUCUGAGGGUCCAGGGUUCAAGUCCCUGUUCGGGCG # >AP0004426/20221950
GCCCGGAU.A.GCUCAGUC..GGU...AGAGCA.U.CAGAC.UUUUAAUCUG.AGG........................GU..CCAG.GGU.UCA.AG..UCCCU.G.UUCGGGC.G # >AP000442.6/2022.1950
.. and run the script:
➜ rna_dca git:(master) ✗ python rna_dca_mapping.py examples/1fir_gapped.fa examples/rf5.scores_parsed.csv
> 1fir
GCCCGGAU.A.GCUCAGUC..GGU...AGAGCA.U.CAGAC.UUUUAAUCUG.AGG........................GU..CCAG.GGU.UCA.AG..UCCCU.G.UUCGGGC.G
input: examples/1fir_gapped.fa
interactions:
[(38, 51), (7, 110), (37, 52), (88, 105), (87, 106), (2, 115), (5, 112), (3, 114), (40, 49), (4, 113), (6, 111), (14, 30), (90, 104), (39, 50), (86, 108), (35, 54), (13, 31), (1, 116), (12, 32), (17, 85), (91, 103), (23, 95), (15, 29), (59, 79), (46, 47), (22, 23), (29, 81), (60, 78), (62, 77), (58, 80), (45, 118), (41, 48), (10, 15), (23, 24), (22, 24), (92, 102), (10, 30), (44, 45), (67, 68), (66, 67), (63, 76), (19, 21), (66, 68), (46, 48), (91, 92), (24, 95), (10, 14), (33, 55), (22, 94), (92, 103), (58, 59), (41, 46), (79, 80), (64, 75), (78, 80), (58, 67), (67, 80), (58, 79), (58, 68)]
A U
(38, 51) -> 29 41
./simrna_get_restraints.py couplingsfileSaveRF00005_parsed_mapped
Install PyMOL plugin to view the interactions with PyMOL:
run <path>rna-pdb-tools/utils/rna_dca/pymol_dca_dists.py
<pre>
>1fir
GCCCGgAUAgCUCAGuCGGuAGAGCAuCAGACUuUUaAuCUGAGGguccAGGGuuCAaGUCCCUGUUCGGGCGCCA
(((((((..((((.....[..))))..(((.........)))......(((((..]....))))))))))))....
</pre>

**Figure**. `get_dists(<output of the script, python list>)`
To get interactions:
PyMOL>get_dists([[29, 41], [7, 66], [28, 42], [51, 63], [50, 64], [2, 71], [5, 68], [3, 70], [31, 39], [4, 69], [6, 67], [12, 23], [52, 62], [30, 40], [49, 65], [27, 43], [11, 24], [1, 72], [10, 25], [15, 48], [53, 61], [19, 56], [13, 22], [36, 37], [18, 19], [22, 46], [35, 73], [32, 38], [9, 13], [19, 20], [18, 20], [54, 60], [9, 23], [34, 35], [36, 38], [53, 54], [20, 56], [9, 12], [26, 44], [18, 55], [54, 61], [32, 36]])`
| /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/rna_filter/README.md | 0.751101 | 0.832373 | README.md | pypi |
class Renumerator(object):
"""
Generic renumerator class. Provides methods for changing the ID numbering
in all ModernaStructure-based objects.
"""
def __init__(self, struct):
"""
:Arguments:
* struct - the structure to be renumbered (descendant of ModernaStructure)
"""
#struct = validate_structure(struct)
self.struct = struct
self.original_ids = [i.id[1] for i in self.struct]
self.ids = self.original_ids
def apply_alignment(self, alignment):
"""
Inserts gaps basing on the input alignment.
Please note that the SECOND sequence is used.
"""
seq = str(alignment.aligned_sequences[1])
seq += "\xff" # a little hack to avoid "open" rightmost gaps
gaps = []
gap_length = 0
shift = 0 # remember that each insertion shifts the numbering!
res_num = None
res_iter = self.ids.__iter__()
for s in seq:
if s=="-":
if gap_length==0:
start_pos = res_num
gap_length += 1
elif s!="_":
if s!="\xff":
res_num = next(res_iter)
if gap_length > 0:
gaps.append((start_pos + shift, gap_length))
shift += gap_length
gap_length = 0
for gap in gaps:
self.insert_gap(gap[0], gap[1])
def insert_gap(self, position, length):
"""
Inserts a gap of a given length at a specified position.
"""
if position>=len(self.ids):
return
elif position==0:
gap_id = 1
new_ids = []
else:
gap_id = self.ids[position - 1] + 1
new_ids = self.ids[:position]
shift = length
for i in self.ids[position:]:
if i>gap_id:
shift -= i - gap_id
gap_id = i
if shift<0: shift = 0
new_ids.append(i + shift)
gap_id += 1
self.ids = new_ids
def renumber(self, count_from):
"""
Provides simple renumeration of the entire structure,
starting from count_from with an increment of 1.
"""
id_gen = self.id_generator(count_from)
self.ids = [next(id_gen) for i in self.ids]
def id_generator(self, first_id):
"""
Dummy ID generator, needs to be replaced with a working code.
"""
pass
def apply(self):
"""
Applies all changes to the input structure.
"""
resi_list = list(self.struct)
for r in self.struct:
self.struct.remove_residue(r.identifier)
for r, i in zip(resi_list, self.ids):
self.struct.add_residue(r, str(i))
class NumberRenumerator(Renumerator):
"""
Renumerator based on number-only IDs.
"""
def id_generator(self, first_id):
i = first_id
while True:
yield i
i += 1 | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/Renumerator_new.py | 0.540439 | 0.337094 | Renumerator_new.py | pypi |
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
import re,os
from Bio.PDB.Atom import Atom
from rna_tools.tools.mini_moderna3.moderna.ModernaStructure import ModernaStructure
from rna_tools.tools.mini_moderna3.moderna.builder.PhosphateBuilder import TerminalPhosphateBuilder
from rna_tools.tools.mini_moderna3.moderna.analyze.ChainConnectivity import are_residues_connected, is_chain_continuous
from rna_tools.tools.mini_moderna3.moderna.Constants import MISSING_RESIDUE, UNKNOWN_RESIDUE_SHORT, PHOSPHATE_GROUP, RIBOSE, BACKBONE_ATOMS, AA_ATOMS, \
BACKBONE_RIBOSE_ATOMS, PHOSPHORYLATED_NUCLEOTIDES
from rna_tools.tools.mini_moderna3.moderna.util.LogFile import log
# what about aa
HEADER = """________________________________________________________________
ModeRNA (C) 2009 by Magdalena Musielak & Kristian Rother,
Tomasz Puton, Janusz M. Bujnicki
Check PDB file for any features that may prevent the file from being a ModeRNA template.
support: lenam@amu.edu.pl
________________________________________________________________
\n"""
MESSAGE = """OVERALL INFORMATION:
The most common reason that causes problems while modeling RNA with ModeRNA
is that a template is not compatible with the alignment. Possible reasons include:
1. Backbone discontinuity.
Symptom:
There are underscore symbols in the sequence.
e.g. structure.get_sequence() ---> 'A_AAAA'
Explanation:
It means that a structures sequence is not continous and can be caused by
some missing backbone atoms, nonproper bond lengths etc. It may also mean
that residues are simply not connected, e.g. when there are two blunt ends of a helix.
You should include the _ in your alignment explicitly e.g.:
A_AAAA
A-AGAA
ModeRNA can try to fix broken backbone with ModeRNA, which helps in many cases.
See the fix_backbone() function for more details.
2. Single ligands, ions or water molecules.
Symptom:
There are single underscore-dot symbols at the end of the sequence.
e.g. structure.get_sequence() ---> 'AAAA_.'
Explanation:
It means there is one unidentified residue (ion) at the and of the structure,
that is not connected with the rest of the structure.
This should be included in the alignment (see below) or the ions should be removed from
the template structure using the clean_structure() function.
AAAA_.
AAAA--
(see 3. if there are more such symbols).
3. Multiple water or ion molecules.
Symptom:
There are multiple underscore-dot symbols at the end of the sequence.
e.g. structure.get_sequence() ---> 'AAAA_.'
e.g. structure.get_sequence() ---> 'AAAA_._._._._._._._._._.' (10 water molecules).
Solution:
The water chain should be removed with the clean_structure() function or included in the alignment:
AAAA_._._._._._._._._._.
AAAA--------------------
4. Protein chain.
Symptom:
The entire chain has a sequence like ._._._. etc.
Solution:
Use a different chain. RNA can't be modeled from proteins yet.
However, the clean_structure() function will remove them.
5. Missing or broken bases.
Symptom:
There are single dots within the sequence.
e.g. structure.get_sequence() ---> 'AA.AAAAA'
Explanation:
It means that one residue is not complete: a whole base or its part is missing
and consequently the residue is marked in the alignment as an unknown residue.
It is not possible to model another base in this place in the automatic mode.
Regardless of used alignment the residue will stay unidentified in the model
(only backbone atoms will be copied to the model).
To be on the safe side, the according residue should be removed manually.
All described above features need to be included in the alignment or the template structure need to be modified.
One may check whether a PDB file (or alignment) needs to be cleaned up.
PDB file modification can be done with the clean_structure() function.
"""
class PdbController:
"""
Finds structure irregularities and fixes them.
"""
def __init__(self, ms):
self.ms = ms
self.water = []
self.ions = []
self.unidentified = []
self.unidentified_rna = []
self.rna_ligand = []
self.phosphate = []
self.P_trace = []
self.P_missing = []
self.aa = []
self.disconnected_residues = []
self.disordered = []
self.standard = []
#self.problematic_residues = [] # all residues from abov cathegories
self.check_unidentified_residues()
self.continuous = self.check_backbone_continuity()
self.check_disconnected_residues()
self.stars = self.check_stars()
self.OP = self.check_OP()
self.check_missing_P()
def __str__(self):
if not self.has_problems():
return 'Chain OK'
else:
pdb_string='The chain contains some features that may cause problems during modeling:\n'
if self.water: pdb_string+='- Water residues present in the chain: %s\n'%', '.join([x.identifier for x in self.water])
if self.ions: pdb_string+='- Ion residues present in the chain: %s\n'%', '.join([x.identifier for x in self.ions])
if self.aa: pdb_string+='- Aminoacids present in the chain: %s\n'%', '.join([x.identifier for x in self.aa])
if self.unidentified_rna: pdb_string+='- Unidentified RNA residues present in the chain: %s\n'%', '.join([x.identifier for x in self.unidentified_rna])
if self.P_trace: pdb_string+='- P trace present in the chain: %s\n'%', '.join([x.identifier for x in self.P_trace])
if self.P_missing: pdb_string+='- Atom P missing in residue: %s\n'%', '.join([x.identifier for x in self.P_missing])
if self.unidentified: pdb_string+='- Unidentified residues present in the chain: %s\n'%', '.join([x.identifier for x in self.unidentified])
if self.rna_ligand: pdb_string +='- RNA ligand present in the chain: %s\n'%', '.join([x.identifier for x in self.rna_ligand])
if self.phosphate: pdb_string +='- Phosphate present in the chain: %s\n'%', '.join([x.identifier for x in self.phosphate])
if self.stars: pdb_string+='- Some residues have * in ribose atom names\n'
if self.OP: pdb_string+='- Some residues have O1P or O2P instead of OP1 and OP2.\n'
if not self.continuous: pdb_string+='- Chain is discontinuous.\n'
if self.disconnected_residues: pdb_string+='- Chain contains disconnected residues: %s.\n'%', '.join([x.identifier for x in self.disconnected_residues])
if self.disordered: pdb_string +='- Atoms of some residues may have alternative location (only the first coordinates set taken): %s\n'%', '.join([x for x in self.disordered])
return pdb_string
def __repr__(self):
return str(self)
def has_problems(self):
"""
Checks whether PdbControler found any problems in the structure.
"""
if self.water: return True
if self.ions: return True
if self.unidentified: return True
if self.unidentified_rna: return True
if self.rna_ligand: return True
if self.phosphate: return True
if self.P_trace: return True
if self.P_missing: return True
if self.aa: return True
if self.stars: return True
if self.OP: return True
if not self.continuous: return True
if self.disordered: return True
return False
def check_all_bb_missing(self, resi):
"""
Returns True when residue does not have all backbone and ribose atoms
(BACKBONE_RIBOSE_ATOMS)
"""
for at_name in BACKBONE_RIBOSE_ATOMS:
if resi.has_id(at_name): return False
#TODO: resi.is_backbone_complete ??
return True
def is_rna_ligand(self, resi):
"""
Returns True when residue is a RNA ligabd (e.g. ATP, base)
"""
if resi.new_abbrev == 'ligand': return True
if resi.alphabet_entry.category == 'standard RNA': return False
if self.check_all_bb_missing(resi): return True
if resi.long_abbrev in PHOSPHORYLATED_NUCLEOTIDES: return True
return False
def is_phosphate(self, resi):
"""
Checks whether resi has less than 5 atoms and contain P, OP1 and OP2
"""
if len(resi.child_list) > 5: return False
if resi.has_id('P') and (resi.has_id('OP1') or resi.has_id('O1P')) and (resi.has_id('O2P') or resi.has_id('OP2')):
return True
return False
def check_unidentified_residues(self):
"""
Finds unidentified residues in the structure.
Sorts them into water, ions, unidentified.
"""
for resi in self.ms:
if resi.long_abbrev == UNKNOWN_RESIDUE_SHORT:
if resi.child_list[0].name == 'O' and len(resi.child_list)==1:
self.water.append(resi)
elif resi.child_list[0].name == 'P' and len(resi.child_list)==1:
self.P_trace.append(resi)
else:
rna = True
for at in BACKBONE_ATOMS:
if not resi.has_id(at): rna = False
if rna: self.unidentified_rna.append(resi)
else:
aa = True
for at in AA_ATOMS:
if not resi.has_id(at): aa = False
if aa: self.aa.append(resi)
elif self.is_phosphate(resi): self.phosphate.append(resi)
elif len(resi.child_list) < 7: self.ions.append(resi)
else: self.unidentified.append(resi)
elif self.is_rna_ligand(resi): self.rna_ligand.append(resi)
else:
if resi.has_double_coord: self.disordered.append(resi.identifier)
self.standard.append(resi)
def check_stars(self):
"""
Checks whether ribos atoms contain stars in the atom names.
Returns True or False.
"""
for resi in self.standard:
if re.findall('\*', ''.join([at.name for at in resi.child_list])): return True
return False
def check_OP(self):
"""
Checks whether structure contains irregular names foe oxygens bonded to phospate.
Returns True or False.
"""
for resi in self.standard:
if self.is_typical_category_residue(resi):
if re.findall('O1P', ''.join([at.name for at in resi.child_list])): return True
if re.findall('O2P', ''.join([at.name for at in resi.child_list])): return True
if re.findall('O3P', ''.join([at.name for at in resi.child_list])): return True
return False
def check_missing_P(self):
"""
Checks whether any residue has no P atom.
Returns True or False
"""
result = False
for resi in self.standard:
if self.is_typical_category_residue(resi):
if not resi.has_id('P'):
self.P_missing.append(resi)
result = True
return result
def check_backbone_continuity(self):
"""
Checks whether chain is continous.
Returns True or False'
Takes only regular residues into account
(water, ions and unidentified residues are excluded.)
"""
st = ModernaStructure('residues', self.standard + self.unidentified_rna)
return is_chain_continuous(st)
def is_typical_category_residue(self, res):
"""
Returns True when residue belongs to standard RNA, standard DNA or modified category
False when unknown, ligand, synthetic, stereoisomer, insertion, missing, ' '
"""
if res.category in ['standard RNA', 'standard DNA', 'modified']: return True
else: return False
def check_disconnected_residues(self):
"""
Looks for residues that belong to the chain, are not connected to the chain
and represents synthetic residues, ligands or stereoisomers.
"""
temp = []
st = ModernaStructure('residues', self.standard+self.unidentified_rna)
all_resi = [x for x in st]
if len(all_resi) == 1: temp.append(all_resi[0].identifier)
elif not self.continuous:
for x in range(len(all_resi)-1):
if not are_residues_connected(all_resi[x], all_resi[x+1]):
if len(all_resi) == 2: temp += [y.identifier for y in all_resi]
elif x+1==len(all_resi)-1: temp.append(all_resi[x+1].identifier)
elif not are_residues_connected(all_resi[x+1], all_resi[x+2]): temp.append(all_resi[x+1].identifier)
# checking whether disconnected residues are not sandard or modified ones
for resi in temp:
if not self.is_typical_category_residue(st[resi]):
self.disconnected_residues.append(st[resi])
# def check_problematic_residues(self):
# """
# Goes through all lists (self.water, self.ions ...) and
# makes one list with all resides reported as problematic.
# It can be run only after other checking functions.
# """
# resi_lists = [self.water, self.ions, self.unidentified, self.unidentified_rna, self.rna_ligand, self.phosphate, \
# self.P_trace, self.P_missing, self.aa, self.disconnected_residues, self.disordered]
# for l in resi_lists:
# for resi in
def write_log(self, file_name='structure.log'):
"""
Writes information about structure irregularities to the file.
"""
#TODO: this could be returned as a string, so we could print it in 'moderna.py -t <filename>'
f = open(file_name,'w')
f.write(HEADER)
f.write(str(self))
def remove_property(self, property_list, mode='residues'):
"""
Removes required property from the structure
(e.g. water, ions, unidentified residues)
Mode - defines type of data present in the property list,
it can be either 'residues' or 'identifiers'.
"""
ms_id = [x.identifier for x in self.ms]
prop_id = []
for resi in property_list:
if mode=='residues':
if resi.identifier in ms_id:
self.ms.remove_residue(resi.identifier)
prop_id.append(resi.identifier)
elif mode == 'identifiers':
if resi in ms_id:
self.ms.remove_residue(resi)
prop_id.append(resi)
st_id = list(set(x.identifier for x in self.standard)-set(prop_id))
standard_new = []
for x in self.standard:
if x in st_id: standard_new.append(x)
self.standard = standard_new
return []
def add_P_atom(self):
"""
Adds missing P atoms (just when O5' and C5' are already there).
"""
ms_id = [x.identifier for x in self.ms]
new_P_missing = []
for resi in self.P_missing:
if resi.identifier in ms_id:
add_P = True
for at in ["C4'","C5'", "O5'"]:
if not resi.has_id(at):
add_P = False
log.write_message("Could not add P atom to residue %s. %s atom missing." %(resi.identifier, at))
if add_P:
try:
tp = TerminalPhosphateBuilder(resi, self.ms)
tp.build_phosphate_group()
log.write_message("Phosphate group constructed on residue %s." %(resi.identifier))
except:
log.write_message("Could not add P atom to residue %s.")
new_P_missing.append(resi)
else:
new_P_missing.append(resi)
self.P_missing = new_P_missing
def change_at_name(self, resi, old_name, new_name):
"""
Changes single atom name.
"""
at = resi[old_name]
element = new_name[0]
if element not in "CHONSP":
element = 'C'
new_at = Atom(new_name, at.coord, at.bfactor, at.occupancy, at.altloc, ' '+new_name, at.serial_number, element=element)
resi.detach_child(old_name)
resi.add(new_at)
#print new_at.coord, new_at.bfactor, new_at.occupancy, new_at.altloc, new_name, new_at.serial_number
def fix_at_names(self, stars=True, OP=True):
"""
Changes:
- * ---> '
- O1P ---> OP1
- O2P ---> OP2
- O3P ---> OP3
"""
for resi in self.ms:
if OP:
if resi.has_id('O1P'): self.change_at_name(resi, 'O1P', 'OP1')
if resi.has_id('O2P'): self.change_at_name(resi, 'O2P', 'OP2')
if resi.has_id('O3P'): self.change_at_name(resi, 'O3P', 'OP3')
self.OP = False
if stars:
for at in list(resi):
if '*' in at.name:
self.change_at_name(resi, at.name, ''+at.name.replace('*',"'"))
self.stars = False
def clean_structure(self, rm_water=True, rm_ions=True, rm_aa=True, exchange_stars=True, exchange_OP=True, rm_unidentified=True, add_P=True, rm_P_trace=True, rm_ligand=True, rm_phosphate=True, rm_disconnected_resi=True, rm_unidentified_rna=False):
"""
Fixes features that may causes some problems during modeling.
"""
if rm_water: self.water = self.remove_property(self.water)
if rm_ions: self.ions = self.remove_property(self.ions)
if rm_unidentified: self.unidentified = self.remove_property(self.unidentified)
if rm_unidentified_rna: self.unidentified_rna = self.remove_property(self.unidentified_rna)
if rm_ligand: self.rna_ligand = self.remove_property(self.rna_ligand)
if rm_phosphate: self.phosphate = self.remove_property(self.phosphate)
if rm_P_trace: self.P_trace = self.remove_property(self.P_trace)
if rm_aa: self.aa = self.remove_property(self.aa)
if rm_disconnected_resi:
self.disconnected_residues = self.remove_property(self.disconnected_residues)#, 'identifiers')
self.continuous = self.check_backbone_continuity()
if exchange_stars and exchange_OP: self.fix_at_names()
elif exchange_OP: self.fix_at_names(False, True)
elif exchange_stars: self.fix_at_names(True, False)
if add_P: self.add_P_atom()
return self.ms
def get_structure(self):
"""
returns structure.
"""
return self.ms
def write_structure(self, file_name='fixed_structure.pdb'):
"""
Writes structure to a pdb file.
"""
self.ms.write_pdb_file(file_name) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/CheckPdb.py | 0.692746 | 0.322299 | CheckPdb.py | pypi |
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
from Bio.PDB import Superimposer
from rna_tools.tools.mini_moderna3.moderna.util.Errors import ModernaSuperimposerError
class ModernaSuperimposer:
"""
Wrapper for Bio.PDB.Superimposer, that can handle
extracting atoms with predefined names.
Allows to rotate and translate atoms in space.
"""
def __init__(self, fixed=None, moved=None, moved_atoms=None):
"""
fixed - list of atoms the moved atom will be superimposed upon.
moved - list of atoms that will be superimposed on the fixed ones.
moved_atoms - all atoms whose coordinates should be transformed.
If all three parameters are given, the superpositions
will be carried out immediately.
"""
self.fixed = fixed
self.moved = moved
self.moved_atoms = moved_atoms
self.rmsd = None
if fixed and moved and moved_atoms:
self.superimpose()
def superimpose(self):
"""
Performs the superimposition.
Returns RMSD.
"""
if not self.fixed or not self.moved:
raise ModernaSuperimposerError('There are no fixed or moved atoms. Can not calculate rotation and translation matrix.')
if not self.moved_atoms:
raise ModernaSuperimposerError('There are no atoms for superimposition given. Can not applay rotation and translation matrix')
sup = Superimposer()
sup.set_atoms(self.fixed, self.moved)
sup.apply(self.moved_atoms)
self.rmsd = sup.rms
return self.rmsd
def get_atoms(self, resi_list, atom_names_list, mode='fixed'):
"""
Retrieves all atoms with given names from the given list of residues.
Returns a list of PDB.Atom objects.
Sets superimposition atoms - fihed or moved depending on given mode.
Arguments:
- resi_list - list of PDB.Residue objects
- atom_names_list - list of atom names
- mode - 'fixed' or 'moved' depending on which superposition atoms will be given
"""
atoms_list = []
for resi in resi_list:
for name in atom_names_list:
try:
atoms_list.append(resi[name])
except KeyError:
raise ModernaSuperimposerError("Atom %s not available for superposition."%name)
if mode == 'fixed':
self.fixed = atoms_list
elif mode == 'moved':
self.moved = atoms_list
return atoms_list
def set_moved_atoms(self, atoms_list):
self.moved_atoms = atoms_list
#TODO: remove this method | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/ModernaSuperimposer.py | 0.497559 | 0.24809 | ModernaSuperimposer.py | pypi |
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
from rna_tools.tools.mini_moderna3.moderna.ModernaStructure import ModernaStructure
from rna_tools.tools.mini_moderna3.moderna.ModernaFragment import ModernaFragment53, ModernaFragment2D, \
ModernaFragment553, ModernaFragment533
from rna_tools.tools.mini_moderna3.moderna.FragmentInsertion import FragmentInserter
from rna_tools.tools.mini_moderna3.moderna.sequence.ModernaSequence import Sequence
from rna_tools.tools.mini_moderna3.moderna.Renumerator import renumber_section
from rna_tools.tools.mini_moderna3.moderna.Constants import HELIX, HELIX_SUPERPOSITION, WC_BASE_PAIRS
from rna_tools.tools.mini_moderna3.moderna.util.Errors import ModernaFragmentError
from rna_tools.tools.mini_moderna3.moderna.analyze.ChainConnectivity import are_residues_connected
class Helix(ModernaStructure):
"""
A-type RNA helix.
"""
@property
def strand5(self):
"""Return residues on the 5'strand."""
return list(self)[:len(self)/2]
@property
def strand3(self):
"""Return residues on the 3'strand."""
return list(self)[len(self)/2:]
class HelixBuilder(object):
"""
Creates A-type RNA helices of a given length.
"""
def __init__(self, data_type='file', data=HELIX, chain_name='A'):
self.data_type = data_type
self.data = data
self.chain_name = chain_name
def _get_helix(self):
"""Creates a helix structure."""
helix = Helix(self.data_type, self.data, self.chain_name)
if len(helix) % 2:
raise ModernaFragmentError('Helix must have even length.')
self.renumber_helix(helix)
return helix
def renumber_helix(self, helix):
"""
Renumbers 5' strand starting from 1, and 3' strand starting from 401.
"""
helix.renumber_chain()
strand3 = helix.strand3
first = int(strand3[0].identifier)
last = int(strand3[-1].identifier)
renumber_section(helix, first, last, 401)
def make_helix_shorter(self, helix, dest_length):
"""Deletes base pairs from a helix."""
resis = list(helix)[dest_length/2:-dest_length/2]
for resi in resis:
helix.remove_residue(resi.identifier)
self.renumber_helix(helix)
def make_helix_longer(self, helix): #, dest_length):
"""Adds base pairs to a helix."""
helix2 = self._get_helix()
helix2 = ModernaFragment53(helix2, anchor5=helix.strand5[-1], \
anchor3=helix.strand3[0], strict=False)
finsert = FragmentInserter()
finsert.insert_fragment(helix2, helix)
self.renumber_helix(helix)
def build_helix(self, seq):
"""
Returns a helix with the given sequence as a ModernaStructure object.
"""
if not len(seq) % 2:
raise ModernaFragmentError('Expects odd number of characters \
in the sequence (FirstStrand_SecondStrand) e.g. "AAAA_GGGG"')
helix = self._get_helix()
dest_length = len(seq) - 1
while len(helix) < dest_length:
self.make_helix_longer(helix)
if len(helix) > dest_length:
self.make_helix_shorter(helix, dest_length)
if not len(helix) == dest_length:
raise ModernaFragmentError('Helix construction failed. \
Length %i; should be %i'%(len(helix), dest_length))
helix.change_sequence(seq.seq_without_breaks)
return helix
class HelixFragmentBuilder(object):
"""
Prepares helix fragments.
Finds out which anchors on the upper section are present
and determines the according fragment type.
"""
def _identify_upper_anchors(self, model, anchor5, anchor3):
"""Find residues connecting to anchors if any"""
upper5, upper3 = None, None
if model:
upper = model.find_residues_in_range(anchor5.identifier, anchor3.identifier)
if len(upper)>0:
if are_residues_connected(anchor5, model[upper[0]]):
upper5 = model[upper[0]]
if are_residues_connected(model[upper[-1]], anchor3):
upper3 = model[upper[-1]]
return upper5, upper3
def _add_anchors_to_seq(self, seq, upper5, upper3):
"""Eventually adds upper anchors to sequence."""
# add lower anchors in any case
seq = 'G'+str(seq)+'C'
# determine upper anchors
if upper5:
upper5 = upper5.short_abbrev
if not upper3:
upper3 = WC_BASE_PAIRS[upper5]
if upper3:
upper3 = upper3.short_abbrev
if not upper5:
upper5 = WC_BASE_PAIRS[upper3]
# add upper anchors
if upper5 and upper3:
strand_len = len(seq)//2
seq = seq[:strand_len] \
+ upper5 + '_' + upper3 \
+ seq[-strand_len:]
return Sequence(seq)
def _create_frag(self, helix, seq, anchor5, anchor3, upper5, upper3, model):
"""Returns a ModernaFragment class"""
seq = seq.seq_without_breaks
if upper5 and upper3:
return ModernaFragment2D(helix, \
anchor5=anchor5, anchor3=anchor3, \
anchor5_upper=upper5, \
anchor3_upper=upper3, \
frag5_upper=helix.strand5[-1], \
frag3_upper=helix.strand3[0], \
new_sequence=seq, \
superposition_atoms=HELIX_SUPERPOSITION,
model=model
)
elif upper5:
return ModernaFragment553
elif upper3:
return ModernaFragment533
else:
return ModernaFragment53(helix, \
anchor5=anchor5, anchor3=anchor3, \
new_sequence=seq, \
sup_r5=HELIX_SUPERPOSITION, \
sup_r3=HELIX_SUPERPOSITION, \
strict=False
)
def build_fragment(self, anchor5=None, anchor3=None, \
sequence=None, model=None):
"""Builds a Helix fragment."""
upper5, upper3 = self._identify_upper_anchors(model, anchor5, anchor3)
anchor_seq = self._add_anchors_to_seq(sequence, upper5, upper3)
helix_builder = HelixBuilder()
helix = helix_builder.build_helix(anchor_seq)
return self._create_frag(helix, sequence, anchor5, anchor3, upper5, upper3, model) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/Helix.py | 0.82559 | 0.227523 | Helix.py | pypi |
import re
from Bio.PDB.Residue import Residue
from Bio.PDB.Atom import Atom
from numpy import array
from rna_tools.tools.mini_moderna3.moderna.sequence.ModernaAlphabet import alphabet
from rna_tools.tools.mini_moderna3.moderna.analyze.BaseRecognizer import BaseRecognizer, BaseRecognitionError
from rna_tools.tools.mini_moderna3.moderna.analyze.BasePairCalculator import base_pair_calc
from rna_tools.tools.mini_moderna3.moderna.builder.CoordBuilder import build_coord
from rna_tools.tools.mini_moderna3.moderna.util.Errors import RNAResidueError
from rna_tools.tools.mini_moderna3.moderna.Constants import STANDARD_BASES, ANY_RESIDUE
from rna_tools.tools.mini_moderna3.moderna.Constants import PURINE_NEIGHBOR_TABLE, PYRIMIDINE_NEIGHBOR_TABLE
from rna_tools.tools.mini_moderna3.moderna.Constants import DONORS, ACCEPTORS
from rna_tools.tools.mini_moderna3.moderna.Constants import H_GENERATE_STEP, H_COVALENT_BOND
from rna_tools.tools.mini_moderna3.moderna.Constants import H_ANGLE_ONE, H_ANGLE_TWO
H_GENERATE_TORSIONS = list(range(0, 360, H_GENERATE_STEP))
class RNAResidue(Residue):
"""
Supplements Bio.PDB.Residue object with functions to
manage RNA-specific features.
"""
br = BaseRecognizer()
def __init__(self, pdb_residue, alphabet_entry=None, new_atoms=True):
"""
Arguments:
- residue as a Bio.PDB.Residue instance
- optional: AlphabetEntry instance, if it is not given, the residue
is identified using BaseRecognizer (slow)
"""
Residue.__init__(self, pdb_residue.id, pdb_residue.resname, ' ')
self.number = pdb_residue.id[1]
self.disordered = pdb_residue.disordered
self.has_double_coord = self.__check_double_coord(pdb_residue)
self.modified = None
self.long_abbrev = None
if alphabet_entry:
abbrev = alphabet_entry.long_abbrev
else:
try:
abbrev = self.br.identify_resi(pdb_residue)
except BaseRecognitionError:
abbrev = alphabet.get_short_original(ANY_RESIDUE)
self.change_name(abbrev)
self.identifier = str(self.id[1]).strip() + self.id[2].strip()
self.__create_atoms(pdb_residue, new_atoms)
# caches for H-bond calculation --> faster
self._donors = None
self._acceptors = None
self._donor_hydrogens = {}
def __check_double_coord(self, resi):
"""
Checks whether any atoms in residue
have alternative coordinates given in the pdb file.
"""
if not self.disordered:
return False
for atom in resi:
if atom.is_disordered():
if len(atom.disordered_get_list()) > 1:
return True
return False
def __create_atoms(self, pdb_residue, new_atoms):
if new_atoms:
# copy all atoms, in case the original is manipulated.
for atom in pdb_residue.child_list:
if not atom.name[0] in '*H123': # .startswith('H'):
element = re.sub('[\s\d]', '', atom.name)[0] or 'C'
new_at = Atom(atom.name, atom.coord, atom.bfactor,
atom.occupancy, atom.altloc, atom.fullname,
atom.serial_number, element=element)
self.add(new_at)
else:
# use the old atoms (saves time)
[self.add(atom) for atom in pdb_residue.child_list]
def __len__(self):
"""Returns number of atoms."""
return len(self.child_list)
def __repr__(self):
"""Returns string representation"""
return '<Residue %s %s>' % (self.identifier, self.long_abbrev)
def __getitem__(self, name):
"""
Returns an atom like PDB.Residue,
but interprets N* as the glycosidic N.
"""
# KR: added to take care of the N1/N9 locally (important for LIR)
# C,U ---> N1; A,G ---> N9; pseudouridine ---> C5;
if name == 'N*':
if self.long_abbrev in ['Y', 'm1acp3Y', 'Ym', 'm3Y']:
return Residue.__getitem__(self, 'C5')
if self.pyrimidine:
return Residue.__getitem__(self, 'N1')
elif self.purine:
return Residue.__getitem__(self, 'N9')
elif self.original_base == 'X':
if 'N9' in self.child_dict:
return Residue.__getitem__(self, 'N9')
elif 'N1' in self.child_dict:
return Residue.__getitem__(self, 'N1')
else:
raise RNAResidueError('Cannot decide which atom to use for glycosidic N in residue %s' % self)
elif 'N1' in self.child_dict:
return Residue.__getitem__(self, 'N1')
elif 'N9' in self.child_dict:
return Residue.__getitem__(self, 'N9')
else:
raise RNAResidueError('Cannot decide which atom to use for glycosidic N in residue %s' % self)
else:
return Residue.__getitem__(self, name)
def change_number(self, new_number):
"""Changes a residues number to the given string."""
try:
num = int(new_number.strip())
self.id = (self.id[0], num, ' ')
except:
try:
letter = new_number.strip()[-1]
num = int(new_number.strip()[:-1])
self.id = (self.id[0], num, letter)
except ValueError:
raise RNAResidueError('Invalid residue number: %s' % new_number)
self.number = num
self.identifier = new_number.strip()
def change_name(self, new_name):
"""
Changes the residues name.
to a new name (as a long abbreviation if modified)
"""
if new_name not in alphabet:
new_name = alphabet.get_short_original(ANY_RESIDUE).long_abbrev
aentry = alphabet[new_name]
self.resname = aentry.pdb_abbrev
self.long_abbrev = aentry.long_abbrev
if new_name in STANDARD_BASES:
# standard base
self.modified = False
self.id = (' ', self.id[1], self.id[2])
elif aentry.original_base.upper() in "X":
# unknown residues --> water, ions.
self.modified = False
else:
# modified base
self.modified = True
self.id = ('H_' + aentry.pdb_abbrev, self.id[1], self.id[2])
if aentry.pdb_abbrev == 'UNK':
abbrev = '0' * (3 - len(aentry.new_abbrev)) + aentry.new_abbrev
self.resname = abbrev
self.id = ('H_' + abbrev, self.id[1], self.id[2])
self._clear_caches()
def _clear_caches(self):
"""Delete internally saved shortcuts"""
self._donors = None
self._acceptors = None
self._donor_hydrogens = {}
@property
def alphabet_entry(self):
"""Returns an alphabet entry for this residue."""
return alphabet[self.long_abbrev]
@property
def purine(self):
"""Returns True if the residue is a purine."""
if self.original_base in ("G", "A"):
return True
@property
def pyrimidine(self):
"""Returns True if the residue is a pyrimidine."""
if self.original_base in ("C", "U"):
return True
@property
def original_base(self):
"""Returns the unmodified base abbreviation."""
return self.alphabet_entry.original_base
@property
def short_abbrev(self):
"""Returns a one-letter abbreviation of the residue."""
return self.alphabet_entry.short_abbrev
@property
def new_abbrev(self):
"""Returns the Modomics nomenclature abbreviation."""
return self.alphabet_entry.new_abbrev
@property
def pdb_abbrev(self):
"""Returns a three-letter PDB abbreviation."""
return self.alphabet_entry.pdb_abbrev
@property
def full_name(self):
"""Returns the full name of the nucleotide."""
return self.alphabet_entry.full_name
@property
def category(self):
"""Returns the cathegory of the nucleotide."""
return self.alphabet_entry.category
# ------------------- helper functions to work with atoms ---------------
def get_atom_vector(self, name):
"""returns a vector for the given atom. N* encodes the glyco-N"""
try:
return self[name].get_vector()
except KeyError:
raise RNAResidueError('There is no atom %s in residue %s'
% (name, self.identifier))
def get_atoms_by_names(self, names, strict=False):
"""Generates atoms from the given list of names."""
for name in names:
try:
yield self[name]
except KeyError:
if strict:
raise KeyError("Atom %s not found" % name)
def check_atoms(self, names):
"""Returns True if all atom names in the given list exist."""
try:
[atom for atom in self.get_atoms_by_names(names, strict=True)]
return True
except KeyError:
return False
def get_bp(self, resi2):
"""returns an interaction type between two residues
or None if there is no interaction"""
return(base_pair_calc(self, resi2))
# ------------ helper methods for h-bond calculation ----------------------
def get_hbond_donors(self):
"""Generates atoms that are H-bond donors of this residue."""
if self._donors:
return self._donors
key = self.original_base.strip()
result = list(self.get_atoms_by_names(DONORS.get(key, [])))
self._donors = result
return result
def get_hbond_acceptors(self):
"""Generates atoms that are H-bond acceptors of this residue."""
if self._acceptors:
return self._acceptors
key = self.original_base.strip()
result = list(self.get_atoms_by_names(ACCEPTORS.get(key, [])))
self._acceptors = result
return result
def get_neighbors(self, atom):
"""Returns a list of atoms in the same residue connected by bonds."""
result = []
if 'N9' in self.child_dict:
nb_dict = PURINE_NEIGHBOR_TABLE
else:
nb_dict = PYRIMIDINE_NEIGHBOR_TABLE
for name in nb_dict.get(atom.fullname):
child = self.child_dict.get(name)
if child:
result.append(child)
return result
def get_donor_hydrogens(self, donor):
"""
Returns a list of coord records of hypothetical hydrogen positions.
If the donor has two neighbors, this will be a single position, if
it has only one, a rotation will be performed in 10 degree steps.
Atoms with 3 or more neighbors will be rejected.
"""
# TODO: refactor this out.
if donor.name in self._donor_hydrogens:
return self._donor_hydrogens[donor.name]
hydrogens = []
neighbors = self.get_neighbors(donor)
don_vec = donor.get_vector()
sup_vec1 = None # coordinates next to donor
sup_vec2 = None # coordinates to calc torsion
if len(neighbors) == 1:
sup_vec1 = neighbors[0].get_vector()
neighbors2 = self.get_neighbors(neighbors[0])
sup_vec2 = None
while neighbors2 and sup_vec2 is None:
next = neighbors2.pop()
if next != donor:
sup_vec2 = next.get_vector()
# bad case: no neighbors to construct 2nd support vec
if sup_vec2 is None:
sup_vec2 = (don_vec ** sup_vec1)
angle = H_ANGLE_ONE
torsions = H_GENERATE_TORSIONS
elif len(neighbors) == 2:
sup_vec1 = neighbors[0].get_vector()
sup_vec2 = neighbors[1].get_vector()
angle = H_ANGLE_TWO
torsions = [180.0]
if sup_vec1 is not None and sup_vec2 is not None:
# create hydrogen positions
for torsion in torsions:
h_pos = build_coord(sup_vec2, sup_vec1, don_vec,
H_COVALENT_BOND, angle, torsion)
h_pos = array([h_pos[0], h_pos[1], h_pos[2]])
hydrogens.append(h_pos)
self._donor_hydrogens[donor.name] = hydrogens
# self.write_hydrogens(hydrogens)
return hydrogens
# TODO: what if there are 0 neighbors (water)? | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/RNAResidue.py | 0.654674 | 0.223261 | RNAResidue.py | pypi |
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
from rna_tools.tools.mini_moderna3.moderna.util.Errors import RenumeratorError
class Renumerator:
"""
Class for generating number for residues renumeration.
Useful in fragments of all kinds
"""
def __init__ (self, residues_list, identifiers_list):
if len(residues_list) != len(identifiers_list):
raise RenumeratorError('Cannot generate numbers - residue list and number list have different size.')
self.residues_list = residues_list
#KR: if this is the only place residue_list is used, we maybe won't need it.
self.identifiers_list = identifiers_list
def letter_generator(self, first_num):
CHARACTERS = 'ABCDEFGHIJKLMNOPQRSTUWXYZ'
for x in CHARACTERS:
yield first_num + x
def number_generator(self, first_num):
try: first_num = int(first_num)
except: RenumeratorError('Number can not be generated, first number cannot be converted into int')
while 1:
first_num += 1
yield str(first_num)
def divide_identifiers(self):
"""
divides given list of input identifiers
e.g.
[None, None, None, '2', '3'] ---> [[None, None, None], ['2', '3']]
['5', '6', None, None, None, '7', '8'] ---> [['5', '6'], [None, None, None], ['7', '8']]
['1', '2', None, None, None, '3', '4', '5', None, None, None, '6', '7'] ---> [['1', '2'], [None, None, None], ['3', '4', '5'], [None. None, None], ['6', '7']]
"""
divided_list = []
num = self.identifiers_list[0]
temp_list = []
for x in self.identifiers_list:
if (x and num) or (not x and not num):
temp_list.append(x)
else:
divided_list.append(temp_list)
temp_list = [x]
num = x
if temp_list: divided_list.append(temp_list)
return divided_list
def find_first_number(self, before, middle, after):
"""
KR what when the first number has already a letter?
Should it generate an exception?
"""
#KR: yes. a NumberingError, maybe?
if not before and not after:
return '1'
elif before:
return before[-1]
elif after and before and len(middle)>=26:
return str(int(after[0])-int(before[-1])-1)
elif after and len(middle)>=26:
first = int(after[0])
if first > len(middle):
return str(int(after[0])-len(middle)-1)
else:
raise RenumeratorError('Cannot generate numbers - not enough room for %i numbers before %s.'%(len(middle), after[0]))
else:
return str(int(after[0])-1)
def get_generator(self, first_id, before, middle, after):
"""
Returns identifiers generator.
Checks whether it should be letters or numbers generator.
In case of numbers checks whether the new numbering is possible
"""
if len(middle) < 26:
return self.letter_generator(first_id)
if before and after:
if int(after[0])-int(before[-1]) > len(middle):
return self.number_generator(first_id)
else:
raise RenumeratorError('Cannot generate numbers - not enough room for %i numbers between %s and %s.'%(len(middle), before[-1], after[0]))
elif before:
return self.number_generator(first_id)
else:
return self.number_generator(first_id)
def prepare_identifiers(self, before, middle, after):
"""
middle - query list with None elements
before - list with identifiers before query or None
after - list with identifiers after query or None
"""
fn = self.find_first_number(before, middle, after)
id_generator = self.get_generator(fn, before, middle, after)
identifiers = []
for x in middle: identifiers.append(next(id_generator))
return identifiers
def get_identifiers_list(self):
"""
Returns complete new identifiers list for given query identifiers list.
e.g.
[None, None, None] ---> ['1A', '1B', '1C']
[None, None, None, '2', '3'] ---> ['1A', '1B', '1C', '2', '3']
['5', '6', None, None, None, '7', '8'] ---> ['5', '6', '6A', '6B', '6C', '7', '8']
['1', '2', None, None, None, '3', '4', '5', None, None, None, '6', '7'] ---> ['1', '2', '2A', '2B', '2C', '3', '4', '5', '5A', '5B', '5C', '6', '7']
"""
identifiers_list = []
divided_list = self.divide_identifiers()
for x in range(len(divided_list)):
if divided_list[x][0] == None:
if x>0:
before = divided_list[x-1]
else:
before = None
if x<len(divided_list)-1:
after = divided_list[x+1]
else:
after = None
identifiers_list += self.prepare_identifiers(before, divided_list[x], after)
else: identifiers_list += divided_list[x]
return identifiers_list
def renumber_section(struc, start, end, new_start):
"""
Renumbers a part of a structure
from start..end, new numbers starting from new_start.
"""
length = end-start+1
for old, new in zip(list(range(start, start + length)), list(range(new_start, new_start+length))):
struc.renumber_residue(str(old), str(new)) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/Renumerator.py | 0.479991 | 0.182772 | Renumerator.py | pypi |
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
from rna_tools.tools.mini_moderna3.moderna.ModernaStructure import ModernaStructure
from rna_tools.tools.mini_moderna3.moderna.ModernaFragment import ModernaFragment
import os, tempfile
class StructureLibraryError(Exception): pass
class StructureLibrary:
"""
A class that produces ModernaStructure objects.
If the same PDB file is loaded twice, the structure is cached.
The usage of StructureLibrary requires that the PDB files do not change
on disk at runtime.
(Implements the Factory Pattern for ModernaStructure)
"""
def __init__(self, path):
self.path = path
self.structures = {}
self.sequences = {}
def get_structure(self, name, chain='A'):
"""Returns a ModernaStructure object from a PDB file."""
key = (name, chain)
if self.structures.has_key(key):
seq = self.sequences[key]
struc = ModernaStructure('file', self.path+name, chain, seq, new_atoms=False)
# KR: the following lines are SLOWER
#struc = self.structures[key]
#struc = ModernaStructure('residues', [r for r in struc], seq)
else:
seq = self.sequences.get(key, None)
struc = ModernaStructure('file', self.path+name, chain, seq, new_atoms=False)
seq = struc.get_sequence()
self.structures[key] = struc
self.sequences[key] = seq
struc = ModernaStructure('residues', [r for r in self.structures[key]], seq)
return struc
def find_resi_in_lines(self, lines, resi):
"""Performs binary tree search for the residue and returns first and last position."""
# convert residue number to number/letter pair
if resi[-1] in '0123456789':
res_int = int(resi)
res_char = ' '
else:
res_int = int(resi[:-1])
res_char = resi[-1]
# perform the tree search
found = False
step = len(lines)/2
i = step
while not found and step>0:
step = step/2
# compare residue numbers
probe = int(lines[i][22:26].strip())
if probe == res_int: found = True
elif int(probe)<res_int: i += step
else: i -= step
if not found:
raise StructureLibraryError("Could not find residue '%s'"%resi)
# found a position, now determine begin and end
begin, end = i, i
while begin>0 and int(lines[begin-1][22:26].strip()) == res_int:
begin -= 1
while end<len(lines) and int(lines[end][22:26].strip()) == res_int:
end += 1
return begin, end
def create_pdb_sniplet_file(self, name, chain, resi_from, resi_to, sequence=None):
"""Returns filename of excerpt from PDB file containting only certain residues."""
lines = open(self.path+name)
chainlines = [line for line in lines if len(line)>21 and line[21]==chain]
chainlines = [line for line in chainlines if line.startswith('ATOM') or line.startswith('HETATM')]
start, none = self.find_resi_in_lines(chainlines, resi_from)
none, end = self.find_resi_in_lines(chainlines, resi_to)
out = chainlines[start:end]
tmpname = tempfile.mktemp()
open(tmpname, 'w').writelines(out)
return tmpname
def get_structure_part(self, name, chain, resi_from, resi_to, sequence=None):
"""Reads only certain residues from a PDB file to a structure object."""
tmpname = self.create_pdb_sniplet_file(name, chain, resi_from, resi_to, sequence)
struc = ModernaStructure('file', tmpname, chain, seq=sequence)
os.remove(tmpname)
return struc
def get_fragment_part(self, name, chain, resi_from, resi_to, \
anchor5, anchor3, sequence, keep, seq):
"""Returns a ModernaFragment53 from the library."""
struc = self.get_structure_part(name, chain, resi_from, resi_to, seq)
return ModernaFragment53(struc, anchor5, anchor3, sequence, keep=keep)
library = StructureLibrary('') | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/fragment_library/StructureLibrary.py | 0.684264 | 0.238151 | StructureLibrary.py | pypi |
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
import sys, re, os, os.path
from rna_tools.tools.mini_moderna3.moderna.ModernaStructure import ModernaStructure
from rna_tools.tools.mini_moderna3.moderna.sequence.ModernaSequence import Sequence
from LIR import Lir, LirRecord
from rna_tools.tools.mini_moderna3.moderna.Constants import PATH_TO_LIR_STRUCTURES, PATH_TO_LIR_LIST, ANY_RESIDUE, \
UNKNOWN_RESIDUE_SHORT, MAX_FRAGMENT_LENGTH
from rna_tools.tools.mini_moderna3.moderna.util.LogFile import log
# constants to create LIR test DB.
#PATH_TO_LIR_STRUCTURES = '../test/test_data/lir_test_files/'
#PATH_TO_LIR_LIST = '../test/test_data/lir_chains.txt'
class ResidueList:
"""
Manages a list of residues from which linkers can be generated.
If the chain is discontinuous or contains unknown residues it is cut.
Eg. ACUAXGAG_UUC ---> [[<A><C><U><A>],[<G><A><G>],[<U><U><C>]]
"""
#KR: introduced this class to have secstruc connected to the residue list.
def __init__(self, struc):
"""Creates a residue list from an RNA structure."""
# remove unknown resis
for res in struc:
if res.long_abbrev == UNKNOWN_RESIDUE_SHORT:
struc.remove_residue(res.identifier)
# identify separate portions of the chain
seq = struc.get_sequence().seq_with_modifications
secstruc = struc.get_secstruc()
resi_list = [res for res in struc]
if '_' not in seq:
self.divided_chain = [(resi_list, secstruc)]
else:
# cut chain into small pieces (each piece is a list of residues).
self.divided_chain = []
x=0
for fr_seq, fr_secstruc in self._split_seq(seq, secstruc):
self.divided_chain.append( (resi_list[x:x+len(fr_seq)], fr_secstruc) )
x += len(fr_seq)
def _split_seq(self, seq, secstruc):
i_seq = 0
i_secstruc = 0
seq_start = 0
secstruc_start = 0
while i_seq < len(seq):
if seq[i_seq] == '_':
# generate fragment
fr_seq = seq[seq_start:i_seq]
fr_secstruc = secstruc[secstruc_start:i_secstruc]
assert len(fr_seq) == len(fr_secstruc)
yield fr_seq, fr_secstruc
# start new fragment
i_seq += 1
seq_start = i_seq
secstruc_start = i_secstruc
i_seq += 1
i_secstruc += 1
# generate last fragment
fr_seq = seq[seq_start:i_seq]
fr_secstruc = secstruc[secstruc_start:i_secstruc]
assert len(fr_seq) == len(fr_secstruc)
yield fr_seq, fr_secstruc
def divide_chain_fragment(self, chain_fragment, length):
"""
Returns list of small lists extracted from given residues list.
Small lists are overlapping.
Eg. [<1>,<2>,<3>,<4>,<5>,<6>] ---> [[<1>,<2>,<3>],[<2>,<3>,<4>],[<3>,<4>,<5>][<4>,<5>,<6>]]
Arguments:
- length of small list (in the example above length = 3)
"""
for x in range(len(chain_fragment[0])-(length-1)):
resi_list = chain_fragment[0][x:x+length]
secstruc = chain_fragment[1][x:x+length]
if len(resi_list)>1:
yield resi_list, secstruc
def get_fragments(self, length):
"""Generates ([resi],"secstruc") tuples."""
for chain_fragment in self.divided_chain:
for small_list in self.divide_chain_fragment(chain_fragment, length):
yield small_list
class MakeLirFile:
"""
Makes file with Lir values for all structures from given directory.
Arguments:
- path to file with pdb structures
"""
def __init__(self, path=PATH_TO_LIR_STRUCTURES, list_path=PATH_TO_LIR_LIST, separator='\t'):#, output_file='LIR_DB'):
"""
"""
self.input_data = self.get_input_list(list_path, separator)
self.path = path
if self.path[-1] !=os.sep: self.path+=os.sep
self.all_records = []
self.info = {}
def get_input_list(self, input_file, separator='\t'):
"""Creates dict that contains names of all structures and their chains that should be included in the LIR db."""
input_dict = {}
f = open(input_file)
for l in f:
l=l.strip().split(separator)
input_dict[l[0]] = l[1:]
return input_dict
def get_residue_list(self, pdb_file, chain_name):
"""
Returns a ResidueList object.
Arguments:
- pdb file name
- chain name
"""
st = ModernaStructure('file', self.path+pdb_file, chain_name)
return ResidueList(st)
def get_lir_record(self, resi_list, pdb_filename=None, chain_name=None, secstruc=None):
"""
Generates record with all Lir values.
Such record is incopleate.
Should be compleated with structure bame and chain name.
"""
seq = Sequence( [x.alphabet_entry for x in resi_list] )
if not secstruc:
secstruc = '.'*len(seq)
#print seq, secstruc
l=Lir(resi_list[0], resi_list[-1])
r = LirRecord(fr_length = len(resi_list)-2, \
structure = pdb_filename, \
chain = chain_name, \
preceding_resi = resi_list[0].identifier,\
following_resi = resi_list[-1].identifier, \
sequence = Sequence( seq[1:-1]) , \
sequence_anchor = seq, \
secstruc = secstruc, \
x = l.x, \
y = l.y, \
dist_anchor = l.dist_anchor, \
beta =l.beta, \
gamma = l.gamma, \
omega5 = l.omega5, \
omega3 = l.omega3, \
P_dist = l.P_dist, \
O5p_dist = l.O5p_dist, \
C5p_dist = l.C5p_dist, \
C4p_dist = l.C4p_dist, \
C3p_dist = l.C3p_dist, \
O3p_dist = l.O3p_dist, \
O2p_dist = l.O2p_dist, \
C1p_dist = l.C1p_dist, \
N_dist = l.N_dist )
return r
def get_records_from_one_chain(self, pdb_file, chain_name):
"""Prepares all possible LirRecords from one chain"""
self.info[pdb_file][chain_name] = {'record_number': 0, 'record_errors': 0}
one_chain_records_list = []
chain_resi_list = self.get_residue_list(pdb_file,chain_name)
for fragment_size in range(2, MAX_FRAGMENT_LENGTH):
for small_resi_list, secstruc in chain_resi_list.get_fragments(fragment_size):
try:
new_record = self.get_lir_record(small_resi_list, pdb_file, chain_name, secstruc)
if 'x' not in new_record.sequence_anchor.seq_with_modifications:
one_chain_records_list.append(new_record)
self.info[pdb_file][chain_name] ['record_number'] +=1
else: self.info[pdb_file][chain_name] ['record_errors'] +=1
except: self.info[pdb_file][chain_name] ['record_errors'] +=1
return one_chain_records_list
def get_records_from_one_structure(self, pdb_file):
"""
Prepares all possible LirRecords for one structure.
Returns dict: key - chain_name, value - list of LirRecords
"""
self.info[pdb_file] = {}
one_structure_records_list = []
chains = self.input_data[pdb_file]#self.get_rna_chain_names(pdb_file)
for ch in chains:
one_structure_records_list+= self.get_records_from_one_chain(pdb_file, ch)
return one_structure_records_list
def generate_lir_db(self):
"""
Generates dict with all posible records for structures from given directory
{ pdb_code : {chian_name : [residues_list] } }
"""
pdb_files = self.input_data.keys()#self.get_files_list()
for pdbfile in pdb_files:
log.write_message(pdbfile)
self.all_records+=(self.get_records_from_one_structure(pdbfile))
def get_lir_statistics(self):
result = {}
result['Number of records:'] = len(self.all_records)
result['Number of structures:'] = len(self.info.keys())
errors = 0
chains = 0
for pdb in self.info:
chains +=len(self.info[pdb].keys())
for ch in self.info[pdb]:
errors += self.info[pdb][ch]['record_errors']
result['Number of chains:'] = chains
result['Number of errors:'] = errors
fr_length = {}
for x in range(0, MAX_FRAGMENT_LENGTH): fr_length[str(x)]=0
for r in self.all_records:
fr_length[str(r.fr_length)] +=1
for x in range(0, MAX_FRAGMENT_LENGTH):
result['Number of %2i residues long fragments:'%x]=fr_length[str(x)]
return result
def write_lir_db_to_file(self, db_filename='LIRdb_new_version', log_filename='LIRdb_new_version.log'):
""" """
f=open(db_filename, 'w')
#f.write ('fragment length\tstructure\tchain\tpreceding residue\tfollowing residue\tfragment sequence\tsequence with anchor\tx\ty\tdistance\tbeta\tgamma\tomega5\tomega3\n')
f.write ('fragment length\tstructure\tchain\tpreceding residue\tfollowing residue\tfragment sequence\tsequence with anchor\tP dist\tO5p dist\tC5p dist\tC4p dist\tC3p dist\tO3p dist\tO2p dist\tC1p dist\tN* dist\n')
for record in self.all_records:
f.write(record.get_txt('\t')+'\n')
f.close()
g=open(log_filename, 'w')
g.write('GENERAL FRAGMENT STATISTICS\n\n')
statistics_dict = self.get_lir_statistics()
statistics_dict_keys = statistics_dict.keys()
statistics_dict_keys.sort()
for x in statistics_dict_keys:
g.write(x+str(statistics_dict[x])+'\n')
g.write('\n'+50*'_'+'\n'+'DETAILED FRAGMENT STATISTICS\n\n')
for pdb in self.info.keys():
for ch in self.info[pdb].keys():
g.write('Structure: %s\tChain: %s\tRecords: %s\tErrors: %s\n' %(pdb, ch, str(self.info[pdb][ch]['record_number'] ), str(self.info[pdb][ch]['record_errors'] ) ))
g.close()
if __name__ == '__main__':
log.print_all = True
l=MakeLirFile()
l.generate_lir_db()
l.write_lir_db_to_file() | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/fragment_library/LIRdb.py | 0.438785 | 0.209187 | LIRdb.py | pypi |
__author__ = "Kristian Rother, Magdalena Rother, Tomasz Puton"
__copyright__ = "Copyright 2008, The Moderna Project"
__license__ = "GPL"
__credits__ = ["Janusz Bujnicki"]
__maintainer__ = "Kristian Rother"
__email__ = "krother@rubor.de"
__status__ = "Production"
from math import sqrt
from numpy import array, dot, zeros, transpose, eye
from numpy.linalg import linalg
S = eye(3)
S[2, 2] = -1
class FCCDError(Exception): pass
class FCCDLoopCloser:
"""
Class for running the FCCD loop closure algorithm
on a list of fixed and a set of moved atoms.
"""
def __init__(self, moving, fixed):
"""
moving - list of 6+ atoms that are moved.
fixed - list of three atoms that stay the same.
All atoms should be Bio.PDB.Atom objects.
"""
if len(moving) < 6:
raise FCCDError("""Moving should have at least length 6
(3 atoms at start, plus 3 to match with fixed.""")
if len(fixed) != 3:
raise FCCDError(""""Fixed should have length 3
(atoms at the end to be closed)""")
self.moving_atoms = moving
self.moving = [m.get_vector() for m in moving]
self.fixed = [f.get_vector() for f in fixed]
# Coordinates along COLUMNS
self.moving_coords = zeros((3, 3), 'd')
self.fixed_coords = zeros((3, 3), 'd')
def calc_rmsd(self):
"""Returns RMSD fit of last 3 moving vectors to fixed vectors."""
rmsd = 0.0
for i_vec in range(1, 4):
dist = self.moving[-i_vec] - self.fixed[-i_vec]
rmsd += dist.norm()**2
return sqrt(rmsd/3.0)
def copy_vectors_to_atoms(self):
"""Copies the current coordinates to atom objects."""
for vec, atom in zip(self.moving, self.moving_atoms[:-3]):
atom.coord = array([vec[0], vec[1], vec[2]])
def get_moving_coords(self, center):
"""
move to pivot origin
Prepares arrays with the shifted coordinates
of the three last atoms of both chains.
"""
for i_vec in range(0, 3):
index = -(3 - i_vec)
vec = self.moving[index] - center
self.moving_coords[:, i_vec] = vec.get_array()
return self.moving_coords
def get_fixed_coords(self, center):
"""
move to pivot origin
Prepares arrays with the shifted coordinates
of the three last atoms of both chains.
"""
for i_vec in range(0, 3):
vec = self.fixed[i_vec] - center
self.fixed_coords[:, i_vec] = vec.get_array()
return self.fixed_coords
def accept_rotation(self):
"""
Abstract method that could be used to apply
angle constraints or Metropolis criterion.
"""
return True
def apply_rotation(self, rot_matrix, i_vec, center):
"""Adjusts the coordinates"""
for j_vec in range(i_vec+1, len(self.moving)):
vec = self.moving[j_vec] - center
vec = vec.left_multiply(rot_matrix)
vec = vec + center
self.moving[j_vec] = vec
def optimize_vector(self, i_vec):
"""
Optimizes torsion and bond angles by rotation around
one atom i.
1. moves coordinate origin to that atom.
2. generate a rotation matrix from a singular value decompositon
3. rotate all atoms after i.
"""
center = self.moving[i_vec]
moving_coords = self.get_moving_coords(center)
fixed_coords = self.get_fixed_coords(center)
# Do singular value decomposition
a = dot(fixed_coords, transpose(moving_coords))
u, d, vt = linalg.svd(a)
# Check reflection
if (linalg.det(u) * linalg.det(vt))<0:
u = dot(u, S)
# Calculate rotation
rot_matrix = dot(u, vt)
# Apply rotation
if self.accept_rotation():
self.apply_rotation(rot_matrix, i_vec, center)
def run_fccd(self, threshold=0.2, maxit=100):
"""
The moving vectors are changed until its last three elements
overlap with the fixed ones with a RMSD smaller than threshold.
"""
n_it = 0
while n_it < maxit:
for i_vec in range(2, len(self.moving) - 2):
self.optimize_vector(i_vec)
rmsd = self.calc_rmsd()
if rmsd < threshold:
return "RMSD threshold reached", rmsd, n_it
n_it += 1
return "Stop - max iterations reached", rmsd, n_it | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/builder/FCCDLoopCloser.py | 0.865409 | 0.391493 | FCCDLoopCloser.py | pypi |
__author__ = "Pawel Skiba, Magdalena Rother, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__version__ = "0.1.0"
__maintainer__ = "Pawel Skiba"
__email__ = "pw.skiba@gmail.com"
__status__ = "Prototype"
from rna_tools.tools.mini_moderna3.moderna.Constants import DATA_PATH
from rna_tools.tools.mini_moderna3.moderna.util.LogFile import log
class IsostericityMatrices:
"""
Isostericity matrices implementation in Python
"""
def __init__(self):
self.source = open(DATA_PATH+"IsostericityMatrices.txt","r")
self.matrices = self.import_matrices()
self.source.close()
def import_matrices(self):
"""
Reads data from source txt files and prepare the dictionary
"""
matrices = {}
for line in self.source:
if line.startswith("Type: "):
master_key = line.split(" ")[1]
type_key = line.split(" ")[2].strip()
if master_key not in matrices.keys():
matrices[master_key] = {}
matrices[master_key][type_key] = {}
elif line.startswith("\n"):
continue
else:
data = line.split(": ")
if data[1] == '\n':
matrices[master_key][type_key][data[0]] = None
else:
matrices[master_key][type_key][data[0]] = float(data[1].strip())
return matrices
def check_isostericity(self, bp1, bp2, interact_type, max_value=1.0):
"""
Returns True if basepair1 is isosteric to basepair2 when interaction type is interact_type
"""
try:
result = self.matrices[bp1][interact_type][bp2]
log.write_message(bp1+"->"+bp2+" ("+interact_type+")")
return result <= max_value and result != None
except:
log.write_message("No information in IsostericityMatrices about: "+bp1+"->"+bp2+" ("+interact_type+")")
return False
def show_isosteric_bp(self, bp1, interact_type, max_value=1.0):
"""
Returns a tuple with all base pairs isosteric to bp1 when interaction type is interact_type
"""
pairs = self.matrices[bp1][interact_type]
result = [bp for bp in pairs if pairs[bp] <= max_value and pairs[bp] != None]
return tuple(result)
##########################################################################
if __name__ == '__main__':
test = IsostericityMatrices()
print test.check_isostericity('AC','GU','cWW') #Is isosteric
print test.check_isostericity('AA','GG','cWW') #Is None
print test.check_isostericity('AC','UU','cWW') #Is not isosteric
print test.show_isosteric_bp('AC','cWW') | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/isosteric/IsostericityMatrices.py | 0.50415 | 0.224247 | IsostericityMatrices.py | pypi |
__author__ = "Magdalena Musielak, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Musielak"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
COMMAND_EXAMPLES = {
'add_modification':
'''# add a 2-methyl-Guanosine in position 63.
m = load_model('1QF6_B_tRNA.pdb', 'B')
add_modification(m['63'], 'm2G')''',
'add_all_modifications':"""add_all_modifications(t,a,m)""",
'add_pair_to_base':"""add_pair_to_base(m, '2', '20', 'C')""",
'analyze_geometry':"""analyze_geometry(t)
analyze_geometry(m)""",
'analyze_geometry':
'''t = load_template('1QF6_B_tRNA.pdb', 'B')
analyze_geometry(t)''',
'apply_indel':
'''# prepare a model
t = load_template('1QF6_B_tRNA.pdb', 'B')
m = create_model()
copy_some_residues(t['31':'35']+t['38':'42'],m)
# Find the best fitting fragment for the missing two
apply_indel(m, '35', '38', 'CA')''',
'apply_alignment':"""apply_alignment(t,a,m)""",
'apply_all_indels':"""apply_all_indels(a,m)""",
'apply_missing_ends':"""apply_missing_ends(a, m)""",
'change_sequence':"""change_sequence(m, 'AGCUAGCU')""",
'clean_structure':
'''# cleaning up a loaded template:
# removes water, ions, amino acids, and unknown residues
# replaces O1P and O2P in atom names by OP1 and OP2
# replaces * in atom names by '
clean_structure(t)''',
'create_model':"""m = create_model()
m = create_model(model_chain_name='K')
m = create_model(t,a)
m = create_model(t,a,model_chain_name='K')""",
'copy_identical_residues':"""copy_identical_residues(t,a,m)
copy_identical_residues(t,a,m,strict=0)
copy_identical_residues(t,a,m,modifications=0)""",
'copy_single_residue':"""copy_single_residue(t['3'],m)
copy_single_residue(t['5A'],m)
copy_single_residue(t['3'],m,'15')
copy_single_residue(m['5A'],m,'100')
copy_single_residue(t['3'],m,'3B',strict=0)""",
'copy_some_residues':"""copy_some_residue(t['3':'5'],m)
copy_some_residue(t['3':'5A'],m)
copy_some_residue([t['3'],t['7'],t['8']],m)
copy_some_residue([t['3'],t['7']],m,new_numbers=['100','101'])
copy_some_residue(t['3':'12'],m,strict=0)""",
'create_fragment':"""f = create_fragment('single_strand.pdb',anchor5=m['20'])
f = create_fragment('single_strand.pdb', chain_name='A', anchor3=m['1'])
f = create_fragment('hairpin.pdb', anchor5=m['12'], anchor3=m['15'])
f = create_fragment('hairpin.pdb', anchor5=m['12'], anchor3=m['15'], sequence='AG')""",
'delete_residue':"""delete_residue('6',m)""",
'examine_structure':'''
# examine a loaded template for irregularities:
examine_structure(t)
examine_structure(t,'logfile.log')''',
'exchange_single_base':"""exchange_single_base(m['3'],'C')
exchange_single_base(t['3'],'G',m)
exchange_single_base(t['3'],'G',m,new_number='5A')
exchange_single_base(t['3'],'G') # modifies the template!""",
'exchange_some_bases':"""exchange_some_bases(m['3':'5'],['C','U','G'])
exchange_some_bases(t['3':'5'],['C','U','G'],m)
exchange_some_bases([t['3'],t['10']],['A','G'],m,['103','106'])
exchange_some_bases(t['3':'5'],['C','U','G']) # modifies the template!""",
'exchange_mismatches':"""exchange_mismatches(t,a,m)""",
'extend_helix':"""extend_helix(m, '2', '37', 'CGAA_UUCG')""",
'find_fragment':"""candidates = find_fragment(m,'7','12','AGGU', 10)
insert_fragment(m, candidates[0])""",
'find_modifications':"""find_modifications(t)
mods = find_modifications(m)""",
'find_clashes':"""find_clashes(m)
find_clashes([m['5'], m['6'], m['7']])
pairs = find_clashes(m)""",
'find_fragment':
'''candidates = find_fragment(m, '7', '12', 'AGCU', 20)
insert_fragment(m, candidates[0])''',
'find_modifications':
'''# find modifications in any structure file
t = load_template('1QF6_B_tRNA.pdb', 'B')
print find_modifications(t)''',
'fix_backbone':'''
m = load_model('broken_bb.pdb','A')
print m.get_sequence()
# check and fix the entire model
fix_backbone(m)
print m.get_sequence()
# check and fix the connection between residues 4 and 5
fix_backbone(m, '4', '5')''',
'get_base_pairs':"""bp = get_base_pairs(struc)
print bp""",
'get_sequence':"""get_sequence(t)
seq = get_sequence(m)""",
'get_secstruc':"""get_secstruc(t)
ss = get_secstruc(m)""",
'get_stacking':"""stacking = get_stacking(struc)
print stacking""",
'insert_fragment':"""insert_fragment(m,f)""",
'insert_two_strand_fragment':"""insert_two_strand_fragment(m, '2', '37', '5', '34',\
'101', '120', '107', '114', 'my_fragment.pdb', 'A')
# fragment candidates returned by find_fragment go as well
insert_fragment(m, candidates[2])""",
'load_template':"""t = load_template('1F1T.pdb')
t = load_template('1F1T.pdb','A')""",
'load_alignment':"""a = load_alignment('alignment_1F1T.fasta')""",
'load_model':
'''m = load_model('1F1T.pdb')
m = load_model('1F1T.pdb','A')
m = load_model(biopy_struc, data_type='structure')
m = load_model(biopy_struc[0].child_list[0], data_type='chain')''',
'match_alignment_with_model':"""match_alignment_with_model(a,m)
boolean = match_alignment_with_model(a,m)""",
'match_template_with_alignment':"""match_template_with_alignment(t,a)
boolean = match_template_with_alignment(t,a)""",
'renumber_chain':"""renumber_chain(m,'1')""",
'remove_modification':"""remove_modification(m['5'])
remove_modification(t['5'], m)
remove_modification(t['5'], m, '5A')""",
'remove_all_modifications':"""remove_all_modifications(m)""",
'remove_mismatching_modifications':"""remove_mismatching_modifications(t,a,m)""",
'rotate_chi':"""rotate_chi(m['5'], 90)""",
'shrink_helix':"""shrink_helix(m, '2', '37', '5', '34')""",
'write_logfile':"""write_logfile()
write_logfile('log.txt')""",
'write_model':"""write_model(m)
write_model(m, 'output.pdb')
write_model(m, 'output.pdb', 'log.txt')""",
'write_fragment_candidates':
'''candidates = find_fragment(m, '35', '38', 'CA', 20)
write_fragment_candidates(candidates, 'my_candidates')''',
'write_secstruc':"""m = load_model('1F1T.pdb','A')
write_secstruc(m, '1F1T_secondary_structure.vienna')"""
}
# list of all commands
COMMANDS = list(COMMAND_EXAMPLES.keys())
COMMANDS.sort() | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/examples/usage_examples.py | 0.429908 | 0.243474 | usage_examples.py | pypi |
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
"""
A procedure for calculating stacking of RNA nucleotides.
The definition of base stacking from Major & Lemieux
MC-Annotate paper (JMB 2001, 308, p.919ff):
"Stacking between two nitrogen bases is considered
if the distance between their rings is less
than 5.5 Ang., the angle between the two normals to
the base planes is inferior to 30 deg., and the angle
between the normal of one base plane and the vector
between the center of the rings from the two
bases is less than 40 deg."
There are two classes defined here:
- ResidueVector
- StackingCalculator
The latter class should be used for calculating stacking. There are two
public methods inside StackingCalculator class that can be used
for calculating stacking:
- process_pdbfile(file_name, chain_id='A') - which runs StackingCalculator
on the RNA from the 'file_name'.
The second parameter is optional and has to be set, if the chain ID
of RNA from PDB file is different than 'A'.
"""
import sys
from numpy import array, add, cross, sqrt, arccos
from rna_tools.tools.mini_moderna3.moderna import *
from rna_tools.tools.mini_moderna3.moderna.Constants import NORMAL_SUPPORT, ARCPI
STACKINGS = {
(True, True): '>>',
(True, False): '<<',
(False, False): '<>',
(False, True): '><',
}
# vector placeholder functions
# code snatched from Scientific.Geometry
def angle(vec_a, vec_b):
cosa = add.reduce(vec_a*vec_b) / \
sqrt(add.reduce(vec_a*vec_a) * \
add.reduce(vec_b*vec_b))
cosa = max(-1., min(1., cosa))
return arccos(cosa) * ARCPI
class StackingInteraction(object):
"""Result from stacking calculation."""
def __init__(self, resi1, resi2, stack_type):
"""Creates a stacking object."""
self.resi1 = resi1
self.resi2 = resi2
self.type = stack_type
def __repr__(self):
return "%s %s %s"% \
(self.resi1.identifier, self.type, self.resi2.identifier)
class ResidueVector(object):
"""
Residue class with center vector and normal vector for stacking calculation.
"""
def __init__(self, residue):
"""
Creates a dictionary of vectors for each atom from a ModernaResidue.
"""
self.residue = residue
self.atoms = {}
for atom in residue.get_list():
atom_name = atom.get_fullname().strip().upper()
self.atoms[atom_name] = residue[atom_name].coord
self.normal_set = NORMAL_SUPPORT.get(residue.original_base)
self.normal = None
self.center = None
def is_valid(self):
"""Checks if all necessary atoms are present."""
if self.normal_set:
for name in self.normal_set:
if name not in self.atoms:
return False
return True
def calculate_vectors(self):
"""
Constructs the normal vectors for nucleotide bases.
Returns a tuple of vectors, the first pointing
from O to the center of the six-ring of the according base,
and the second being the normal
vector according to the definition of Major & Thibault 2006.
Assumes the residue has a complete set of atoms.
"""
# sum all six atom vectors up to get center point.
asum = array([0.0, 0.0, 0.0])
for atomname in self.normal_set:
asum += self.atoms[atomname]
self.center = asum / 6.0
# get two pairs of atoms spanning a plane
# and calculate the normal vector
atoma = self.atoms[self.normal_set[1]] - self.atoms[self.normal_set[0]]
atomb = self.atoms[self.normal_set[3]] - self.atoms[self.normal_set[2]]
self.normal = cross(atoma, atomb)
self.normal = self.normal/sqrt(add.reduce(self.normal*self.normal))
def calc_angles(self, rvec):
"""
Calculates whether the distance and angles between the vectors are OK.
Returns a tuple of (dist,nn_angle,n1cc_angle,n2cc_angle) or None.
"""
# calculate the distance between the two ring centers
ccvec = rvec.center - self.center
dist = sqrt(add.reduce(ccvec*ccvec)) # vector length
# check whether the distance is small enough to allow stacking
if 0.0 < dist < 5.5:
# check whether the angles are in the allowed range
nn_angle = angle(self.normal, rvec.normal)
if (nn_angle < 30 or nn_angle > 150):
n1cc_angle = angle(self.normal, ccvec)
n2cc_angle = angle(rvec.normal, ccvec)
return (dist, nn_angle, n1cc_angle, n2cc_angle)
return (None, None, None, None)
def get_stacking(self, rvec):
"""
Returns dictionary with one of the types
(<<, >>, <>, ><) for the two residues.
Or None, if they are not stacked.
"""
distance, nn_ang, n1cc_ang, n2cc_ang = self.calc_angles(rvec)
if distance and (n1cc_ang < 40 or n1cc_ang > 140 \
or n2cc_ang < 40 or n2cc_ang > 140):
# find out whether the normals are opposed or straight
# (pointing in the same direction).
if nn_ang < 30:
straight = True
elif nn_ang > 150:
straight = False
else:
return None # invalid normal angle
# find out whether base2 is on top of base1
# calculate whether the normal on base1 brings one closer to base2
n1c2 = rvec.center - self.center - self.normal
n1c2dist = sqrt(add.reduce(n1c2*n1c2)) # vector length
is_up = n1c2dist < distance
stacktype = STACKINGS[(straight, is_up)]
return StackingInteraction(self.residue, \
rvec.residue, stacktype)
class StackingCalculator:
"""
Calculates stacking of nucleotide bases according
to the definition of Major & Thibault 2006.
Input are residues as parsed by Bio.PDB or Moderna.
Output are the two residue objects and
>> << <> >< stacking codes.
"""
def get_stacking(self, moderna_struct):
"""
Loops through all the residues in a ModernaStructure object,
calls the stacking calculation procedure for all of them.
The method returns list of tuples. Each tuple contains:
- Residue Object one
- Residue Object two
- stacking type of these residues (>>, <<, <> or ><)
"""
result = []
rvectors = self.calc_residue_vectors(moderna_struct)
for record in self.calc_stacking(rvectors):
if record not in result:
result.append(record)
return result
def calc_residue_vectors(self, moderna_struct):
"""
Precalculates vectors on each residue to make calculations faster.
"""
rvectors = []
for residue in moderna_struct:
rv = ResidueVector(residue)
if rv.is_valid():
rv.calculate_vectors()
rvectors.append(rv)
return rvectors
def calc_stacking(self, rvectors):
"""
Calculates stacking for all residues.
Generates tuples of (residue1,residue2,stacking_type).
"""
n_residues = len(rvectors)
for i in range(n_residues-1):
resvec1 = rvectors[i]
for j in range(i+1, n_residues):
resvec2 = rvectors[j]
stacking = resvec1.get_stacking(resvec2)
if stacking:
yield stacking | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/analyze/StackingCalculator.py | 0.767908 | 0.521349 | StackingCalculator.py | pypi |
__author__ = "Kristian Rother"
__copyright__ = "Copyright 2008, Kristian Rother"
__credits__ = ["Sabrina Hofmann"]
__license__ = "GPL"
__maintainer__ = "Kristian Rother"
__email__ = "krother@rubor.de"
__status__ = "Production"
from .MolParameters import *
import re
class Bond:
"""Something connecting two atoms"""
def __init__(self,atom1,atom2,valence, special=(0,0,0)):
self.atom1 = atom1
self.atom2 = atom2
self.valence = valence
class Atom(dict):
"""Atom that has several default and lots of optional properties."""
bond_class = Bond
def __init__(self, element):
"""
element - string indicating an element from the periodic system.
"""
dict.__init__(self)
self.element = element
self.bonds = []
self.n_bonds = 0
self.added_valences = 0
self['atom_name'] = element
self['attributes'] = []
self._is_fully_bonded = False # SPEEDUP
def add_bond(self, atom, valence, special=(0,0,0)):
self.bonds.append(self.bond_class(self,atom, valence, special))
self.added_valences += valence
self.n_bonds += 1
def add_attribute(self, att):
if att not in self['attributes']:
self['attributes'].append(att)
def is_fully_bonded(self):
if self._is_fully_bonded: return True
elif self.element=='O' and (len(self.bonds)>=2 or self.added_valences>=4): self._is_fully_bonded = True # because of aromatic bonds
elif self.element=='C' and (len(self.bonds)>=4 or self.added_valences>=6): self._is_fully_bonded = True
elif self.element=='N' and (len(self.bonds)>=4 or self.added_valences>=6): self._is_fully_bonded = True
return self._is_fully_bonded
def __str__(self):
return self.get('bondschema',self.element)
def detect_bonding_schema(self):
"""
creates a string that summarizes number and element of neighbors
e.g. 'CC1H111'
and the ProtOr atomtype according to Tsai 1999
"""
neighbors = {}
for bond in self.bonds:
elem = bond.atom2.element
neighbors.setdefault(elem,[])
neighbors[elem].append(str(bond.valence))
k = list(neighbors.keys())
k.sort()
atomtype = self.element
for kk in k:
# make a string of the form CC1O12
neighbors[kk].sort()
atomtype += kk + ''.join(neighbors[kk])
self['bondschema'] = atomtype
protortype = self.element
if atomtype[0] != 'H':
bonded_h = 0
if 'H' in neighbors: bonded_h = len(neighbors['H'])
protortype += "%iH%i"%(len(self.bonds),bonded_h)
def get_molstring(self, taboo_list,depth=-1):
"""
Recursive procedure that builds a string from bond schema strings.
The entire sub-molecule appears there,
e.g. C1111(C1111(H1,H1,H1),H1,H1) for an ethyl group.
Requires the 'bondschema' key to be set for full information.
The parts of the string are sorted according to alphabet.
b) length.
"""
if depth == 0: return ""
new_taboo = taboo_list[:]
s = str(self)
neighbors = []
for bond in self.bonds:
# update list of visited atoms
if bond.atom2 not in taboo_list:
new_taboo.append(bond.atom2)
for bond in self.bonds:
if bond.atom2 not in taboo_list:
neighbors.append(bond.atom2.get_molstring(new_taboo,depth-1))
neighbors.sort()
if neighbors:
s += "("+','.join(neighbors)+')'
return s
class Molecule(list):
"""
Contains a data structure with atoms and bonds that can be created from
a .mol file, a .mol2 file or from a Bio.PDB.Residue object.
"""
atom_class = Atom
def __init__(self):
list.__init__(self)
self.conjugated_systems = []
def convert_pdb_atom(self, atom):
"""Returns a Atom object from a PDB.Atom object"""
element = atom.id[0]
if element in "1234567890": element = atom.id[1]
if len(atom.id)>1 and element == 'S' and atom.id[1].upper() == 'E':element = 'Se'
at = self.atom_class(element)
if element == 'H':
return None
at['atom_name'] = atom.id
return at
def parse_resi(self,resi):
"""
Creates a molecule object from a Bio.PDB.Residue object.
Crude solution that has to be checked for better distance constraints.
Do not use it for other things as nucleotide bases.
Hydrogens are skipped.
"""
atoms = [atom for atom in resi if not atom.name.strip()[0] =='H']
# create atoms
for atom in atoms:
at = self.convert_pdb_atom(atom)
if at: self.append(at)
# create bonds
for i, atom1 in enumerate(self):
if atom1.is_fully_bonded(): continue
for j in range(i+1, len(self)):
atom2 = self[j]
if atom2.is_fully_bonded(): continue
distance= atoms[i]-atoms[j]
if distance < MAX_BOND_DISTANCE:
min_sgl, max_sgl = SINGLE_BOND_DISTANCES.get(atom1.element,{}).get(atom2.element,DEFAULT_BOND_LENGTHS[0])
min_dbl, max_dbl = DOUBLE_BOND_DISTANCES.get(atom1.element,{}).get(atom2.element,DEFAULT_BOND_LENGTHS[1])
min_tri, max_tri = TRIPLE_BOND_DISTANCES.get(atom1.element,{}).get(atom2.element,DEFAULT_BOND_LENGTHS[2])
valence = 0
if min_sgl < distance < max_sgl:
valence = 1 # single bond
elif min_dbl < distance < max_dbl:
valence = 2 # double bond
elif min_tri < distance < max_tri:
valence = 3 # triple bond
if valence:
atom1.add_bond(atom2,valence,(0,0,distance))
atom2.add_bond(atom1,valence,(0,0,distance))
# print debug information
if 0:
for atom1, atom2 in zip(atoms, self):
atom2.detect_bonding_schema()
print((atom1.fullname, atom2['bondschema']))
def parse_molfile(self,filename):
"""
Reads a .mol file and parse the contents.
"""
for l in open(filename):
if re.search("^\s+\-*\d+\.\d+",l):
# parse line with atom coordinates
t = re.sub("\s+","\t",l[:-1]).split('\t')
atom = self.atom_class(element=t[4])
self.append(atom)
atom['coordinates'] = (float(t[1]),float(t[2]),float(t[3]))
atom['rdf_index'] = int(l[60:63]) # atom mapping from BioPath RDF files
elif re.search("^\s*\d+\s*\d+\s+\d+\s+\d+\s+\d+\s+\d+(\s+\d+)*\s*\n*\Z",l):
# parse bonds line
atom1 = self[int(l[0:3])-1]
atom2 = self[int(l[3:6])-1]
valence = int(l[6:9])
special = (int(l[9:12]),int(l[12:15]),int(l[15:18]))
# the special features indicate the orientation of the sugar hydroxyl groups.
atom1.add_bond(atom2,valence,special)
atom2.add_bond(atom1,valence,special)
elif re.search('^...END',l):
return
def parse_mol2(self,filename):
"""
Read a .mol2 file and parse the contents.
"""
for l in open(filename):
if re.search("^[\s\d]{7}\s[^\s]+\d+",l):
# parse coordinates line
t = string.split(re.sub("\s+","\t",l[:-1]),'\t')
atom = self.atom_class(element=re.sub('\d','',t[2]))
self.append(atom)
atom['coordinates'] = (float(l[17:26]),float(l[27:36]),float(l[37:46]))
atom['rdf_index'] = int(int(l[:7])) # atom mapping from BioPath RDF files
elif re.search("^\s*\d+\s+\d+\s+\d+\s+[^\s]+\s*\n*\Z",l):
# parse bonds line
atom1 = self[int(l[6:12])-1]
atom2 = self[int(l[12:18])-1]
valence = l[21:23]
if valence == 'ar':
valence=2
atom1['attributes'].append('aromatic')
atom2['attributes'].append('aromatic')
elif valence == 'am':
valence=2
else:
valence = int(valence)
atom1.add_bond(atom2,valence)
atom2.add_bond(atom1,valencel) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/analyze/MolGraphParser.py | 0.602529 | 0.219306 | MolGraphParser.py | pypi |
__author__ = "Kristian Rother"
__copyright__ = "Copyright 2008, Genesilico"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Kristian Rother"
__email__ = "krother@rubor.de"
__status__ = "beta"
class GeometryStandards:
"""Defines allowed and disallowed geometry values."""
bonds = {
"X:P,X:OP1":[( 1.40762, 1.72316)],
"X:P,X:OP2":[( 1.38361, 1.70171)],
"X:P,X:O5'":[( 1.50116, 1.8041)], # 2.08041
"X:O3',X+1:P":[( 1.50116, 1.8041)], # 2.08041
"X:O5',X:C5'":[( 1.28865, 1.62451)],
"X:C5',X:C4'":[( 1.38598, 1.60948)],
"X:C4',X:O4'":[( 1.37066, 1.54065)],
"X:C4',X:C3'":[( 1.41850, 1.66191)],
"X:O4',X:C1'":[( 1.34674, 1.57400)],
"X:C3',X:O3'":[( 1.24104, 1.62451)],
"X:C3',X:C2'":[( 1.36441, 1.61662)],
"X:C2',X:O2'":[( 1.27818, 1.68238)],
"X:C2',X:C1'":[( 1.41341, 1.63283)],
"X[A]:C1',X[A]:N9":[( 1.39697, 1.53180)],
"X[A]:N9,X[A]:C8":[( 1.30771, 1.47345)],
"X[A]:C8,X[A]:N7":[( 1.24335, 1.47325)],
"X[A]:N7,X[A]:C5":[( 1.27894, 1.42486)],
"X[A]:C6,X[A]:N6":[( 1.28686, 1.38031)],
"X[A]:C6,X[A]:C5":[( 1.34945, 1.47031)],
"X[A]:C5,X[A]:C4":[( 1.30790, 1.45810)],
"X[A]:C4,X[A]:N9":[( 1.31125, 1.41176)],
"X[A]:C6,X[A]:N1":[( 1.28960, 1.40506)],
"X[A]:N1,X[A]:C2":[( 1.29281, 1.38839)],
"X[A]:C2,X[A]:N3":[( 1.28138, 1.51326)],
"X[A]:N3,X[A]:C4":[( 1.28027, 1.37878)],
"X[G]:C1',X[G]:N9":[( 1.27969, 1.60490)],
"X[G]:N9,X[G]:C8":[( 1.29044, 1.47864)],
"X[G]:C8,X[G]:N7":[( 1.27993, 1.49210)],
"X[G]:N7,X[G]:C5":[( 1.34130, 1.49071)],
"X[G]:C6,X[G]:O6":[( 1.18068, 1.29281)],
"X[G]:C6,X[G]:C5":[( 1.35673, 1.57448)],
"X[G]:C5,X[G]:C4":[( 1.32139, 1.46771)],
"X[G]:C4,X[G]:N9":[( 1.31662, 1.47820)],
"X[G]:C6,X[G]:N1":[( 1.27060, 1.43139)],
"X[G]:N1,X[G]:C2":[( 1.32242, 1.42653)],
"X[G]:C2,X[G]:N3":[( 1.29541, 1.41449)],
"X[G]:N3,X[G]:C4":[( 1.30063, 1.39016)],
"X[G]:C2,X[G]:N2":[( 1.22763, 1.43022)],
"X[C]:C6,X[C]:C5":[( 1.26580, 1.45753)],
"X[C]:C2,X[C]:O2":[( 1.16101, 1.30727)],
"X[C]:C4,X[C]:N4":[( 1.27346, 1.40568)],
"X[C]:C5,X[C]:C4":[( 1.31144, 1.52704)],
"X[C]:C6,X[C]:N1":[( 1.22361, 1.43148)],
"X[C]:N3,X[C]:C4":[( 1.25618, 1.41959)],
"X[C]:N1,X[C]:C2":[( 1.29249, 1.47254)],
"X[C]:C2,X[C]:N3":[( 1.30341, 1.43501)],
"X[U]:C6,X[U]:C5":[( 1.30756, 1.40029)],
"X[U]:C2,X[U]:O2":[( 1.17294, 1.30953)],
"X[U]:C4,X[U]:O4":[( 1.17314, 1.30663)],
"X[U]:C5,X[U]:C4":[( 1.38859, 1.53447)],
"X[U]:C6,X[U]:N1":[( 1.32523, 1.44609)],
"X[U]:N3,X[U]:C4":[( 1.32431, 1.45215)],
"X[U]:N1,X[U]:C2":[( 1.33628, 1.46292)],
"X[U]:C2,X[U]:N3":[( 1.33838, 1.40959)],
}
angles = {
"X:P,X:O5',X:C5'":[(99.59706,139.93934)],
"X:O5',X:C5',X:C4'":[(89.73461,131.30779)],
"X:C5',X:C4',X:C3'":[(98.52296,131.88271)],
"X:C4',X:C3',X:O3'":[(88.86713,127.18103)],
"X:C3',X:O3',X+1:P":[( 100.0,140.0)],
"X:O3',X+1:P,X+1:O5'":[(90.0,120.0)],
"X:C5',X:C4',X:O4'":[(97.85665,146.68981)],
"X:C4',X:O4',X:C1'":[(84.21729,115.71136)],
"X:O4',X:C4',X:C3'":[(94.63959,110.53526)],
"X:C4',X:C3',X:C2'":[(94.17150,112.81071)],
"X:C3',X:C2',X:O2'":[(80.03621,126.84823)],
"X:O3',X:C3',X:C2'":[(77.00673,138.55925)],
"X:C3',X:C2',X:C1'":[(80.81583,108.24105)],
"X:O2',X:C2',X:C1'":[(97.87503,136.10792)],
"X:C2',X:C1',X:O4'":[(99.24436,131.37196)],
"X[A]:C1',X[A]:N9,X[A]:C8":[(121.29182,134.87297)],
"X[A]:N9,X[A]:C8,X[A]:N7":[(100.11655,118.57393)],
"X[A]:C8,X[A]:N7,X[A]:C5":[(101.58735,107.94879)],
"X[A]:N7,X[A]:C5,X[A]:C6":[(126.96407,137.17149)],
"X[A]:N7,X[A]:C5,X[A]:C4":[(107.82678,114.80066)],
"X[A]:C5,X[A]:C4,X[A]:N9":[(102.28972,108.88976)],
"X[A]:C5,X[A]:C4,X[A]:N3":[(121.52363,132.73857)],
"X[A]:C5,X[A]:C6,X[A]:N1":[(112.41227,122.64358)],
"X[A]:C6,X[A]:N1,X[A]:C2":[(115.13842,125.84871)],
"X[A]:N1,X[A]:C2,X[A]:N3":[(122.06095,132.72824)],
"X[A]:C2,X[A]:N3,X[A]:C4":[(103.78598,116.86889)],
"X[A]:N3,X[A]:C4,X[A]:N9":[(122.36966,130.87604)],
"X[A]:N6,X[A]:C6,X[A]:C5":[(116.58830,128.68632)],
"X[A]:N6,X[A]:C6,X[A]:N1":[(114.43765,126.64537)],
"X[G]:C1',X[G]:N9,X[G]:C8":[(112.01297,135.87611)],
"X[G]:N9,X[G]:C8,X[G]:N7":[(97.88571,117.97305)],
"X[G]:C8,X[G]:N7,X[G]:C5":[(97.52309,109.50209)],
"X[G]:N7,X[G]:C5,X[G]:C6":[(126.78408,137.42652)],
"X[G]:N7,X[G]:C5,X[G]:C4":[(108.17958,116.01254)],
"X[G]:C5,X[G]:C4,X[G]:N9":[(98.24771,107.90632)],
"X[G]:C5,X[G]:C4,X[G]:N3":[(123.79833,132.56276)],
"X[G]:C5,X[G]:C6,X[G]:N1":[(108.46936,117.11570)],
"X[G]:C6,X[G]:N1,X[G]:C2":[(119.75726,129.35773)],
"X[G]:N1,X[G]:C2,X[G]:N3":[(119.36242,126.99662)],
"X[G]:C2,X[G]:N3,X[G]:C4":[(108.84509,116.49279)],
"X[G]:N3,X[G]:C4,X[G]:N9":[(122.86395,131.90886)],
"X[G]:O6,X[G]:C6,X[G]:C5":[(117.66282,134.75018)],
"X[G]:O6,X[G]:C6,X[G]:N1":[(111.26501,128.96772)],
"X[G]:N2,X[G]:C2,X[G]:N1":[(108.22873,123.31994)],
"X[G]:N2,X[G]:C2,X[G]:N3":[(115.24447,125.40315)],
"X[C]:N1,X[C]:C2,X[C]:N3":[(115.02983,122.99670)],
"X[C]:C2,X[C]:N3,X[C]:C4":[(116.62450,124.32591)],
"X[C]:N3,X[C]:C4,X[C]:N4":[(109.43859,124.31410)],
"X[C]:C5,X[C]:C4,X[C]:N3":[(110.19781,127.90766)],
"X[C]:C5,X[C]:C6,X[C]:N1":[(116.83415,127.04146)],
"X[C]:C6,X[C]:N1,X[C]:C2":[(110.72263,125.76954)],
"X[C]:C4,X[C]:C5,X[C]:C6":[(108.37411,121.48183)],
"X[C]:N4,X[C]:C4,X[C]:C5":[(114.39426,127.70705)],
"X[C]:O2,X[C]:C2,X[C]:N1":[(112.80935,126.39081)],
"X[C]:O2,X[C]:C2,X[C]:N3":[(115.54160,128.95443)],
"X[U]:N1,X[U]:C2,X[U]:N3":[(109.42133,121.19854)],
"X[U]:C2,X[U]:N3,X[U]:C4":[(118.83532,133.97496)],
"X[U]:N3,X[U]:C4,X[U]:O4":[(110.82094,125.12576)],
"X[U]:C5,X[U]:C4,X[U]:N3":[(111.98415,121.92467)],
"X[U]:C5,X[U]:C6,X[U]:N1":[(118.66714,127.00344)],
"X[U]:C6,X[U]:N1,X[U]:C2":[(117.54900,125.67797)],
"X[U]:C4,X[U]:C5,X[U]:C6":[(115.23890,123.50324)],
"X[U]:O4,X[U]:C4,X[U]:C5":[(118.57411,135.90212)],
"X[U]:O2,X[U]:C2,X[U]:N1":[(111.88691,129.45480)],
"X[U]:O2,X[U]:C2,X[U]:N3":[(113.84482,138.65998)],
}
dihedrals = {
# values from Richardson 2008
"X:P,X:O5',X:C5',X:C4'":[(50.0, 290.0)], # beta
"X:O5',X:C5',X:C4',X:C3'":[( 20.0, 95.0), (140.0, 215.0), (260.0, 335.0)], # gamma
"X:C5',X:C4',X:C3',X:O3'":[( 55.0, 110.0), (120.0, 175.0)], # delta
"X:C4',X:C3',X:O3',X+1:P":[( 155.0, 310.0)], # epsilon
"X:C3',X:O3',X+1:P,X+1:O5'":[( 25.0, 335.0)], # zeta
"X:O3',X+1:P,X+1:O5',X+1:C5'":[( 25.0, 335.0)], # alpha
# O2' angles to make sure chirality is right
"X:O3',X:C3',X:C2',X:O2'":[(0.0, 70.0), (290.0, 360)],
"X:O4',X:C1',X:C2',X:O2'":[(60.0, 200.0)],
# chi angle: everything is allowed
#"X[A]:C8,X[A]:N9,X[A]:C1',X[A]:C2'":[( 2.77662,359.86181)],
#"X[G]:C8,X[G]:N9,X[G]:C1',X[G]:C2'":[( 3.01550,358.96777)],
#"X[C]:C2,X[C]:N1,X[C]:C1',X[C]:C2'":[(31.27292,352.95195)],
#"X[U]:C2,X[U]:N1,X[U]:C1',X[U]:C2'":[(34.95466,356.89562)],
# should be redundant by flat angle constraints
#"X:C5',X:C4',X:C3',X:C2'":[(158.80652,294.73936)],
#"X:O3',X:C3',X:C2',X:C1'":[(63.47648,267.40341)],
#"X:C5',X:C4',X:O4',X:C1'":[(81.03942,196.47102)],
}
def __init__(self):
self.values = {}
for b in self.bonds: self.values[b] = self.bonds[b]
for a in self.angles: self.values[a] = self.angles[a]
for d in self.dihedrals: self.values[d] = self.dihedrals[d]
def is_outlier(self, descriptor, value):
if descriptor not in self.values:
return True
for lower, upper in self.values[descriptor]:
if lower <= value <= upper:
return False
return True
def get_standard(self, descriptor):
"""Returns value between lower and upper limit."""
lower, upper = self.values[descriptor][0]
return (lower+upper) / 2.0
BACKBONE_DIST_MATRIX = {
("P", "O5'"): GeometryStandards.bonds["X:P,X:O5'"][0],
("O5'", "C5'"): GeometryStandards.bonds["X:O5',X:C5'"][0],
("C5'", "C4'"): GeometryStandards.bonds["X:C5',X:C4'"][0],
("C4'", "C3'"): GeometryStandards.bonds["X:C4',X:C3'"][0],
("C3'", "O3'"): GeometryStandards.bonds["X:C3',X:O3'"][0],
}
PHOSPHATE_DIST_MATRIX = {
("P", "OP1"): GeometryStandards.bonds["X:P,X:OP1"][0],
("P", "OP2"): GeometryStandards.bonds["X:P,X:OP2"][0],
}
O3_P_DIST_LOW, O3_P_DIST_HI = GeometryStandards.bonds["X:O3',X+1:P"][0] | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/analyze/GeometryParameters.py | 0.518059 | 0.46217 | GeometryParameters.py | pypi |
__author__ = "Tomasz Osinski"
__copyright__ = "Genesilico 2008"
__credits__ = ["Kristian Rother", "Raphael Bauer", "Marcin Domagalski", \
"Magdalena Rother", "Janusz Bujnicki", "Marie Curie"]
__license__ = "GPL"
__status__ = "Production"
from Bio.PDB.Vector import calc_dihedral
from math import pi, sin, atan, degrees
FURANOSEATOMS = ["O4'", "C1'", "C2'", "C3'", "C4'"]
THETA_RANGE = (
( 72,108, "C3'-endo"),
(108,144, "C4'-exo"),
(144,180, "O4'-endo"),
(180,216, "C1'-exo"),
(216,252, "C2'-endo"),
(252,298, "C3'-exo"),
(298,324, "C4'-endo"),
(324,360, "O4'-exo"),
( 0, 36, "C1'-endo"),
( 36, 72, "C2'-exo"),
)
class PuckerError(Exception): pass
def check_positive(angle):
"""Turns angle positive"""
if angle < 0:
angle += 360.0
return angle
class PuckerCalculator:
"""Class that parses ribose puckers out of PDB.Residue objects."""
def __init__(self):
"""Initializes the calculator."""
self.resi = None
self.vectors = {}
self.angles = [0.0] * 5
self.pucker_angle = 0.0
def __repr__(self):
"""Returns recently calculated dihedrals."""
return "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f"% \
(self.angles[0], self.angles[1], self.angles[2], \
self.angles[3], self.angles[4], self.pucker_angle)
def _calc_vectors(self):
"""Creates a set of Vector objects from a residue object."""
self.vectors = {}
for name in FURANOSEATOMS:
self.vectors[name] = self.resi[name].get_vector()
def _dihedral(self, vec1, vec2, vec3, vec4):
"""Calculates torsion and makes sure it is between -180 and +180"""
torsion = calc_dihedral(vec1, vec2, vec3, vec4)
if torsion > 180:
return -(360-torsion)
else:
return torsion
def _calc_torsions(self):
"""Calculate torsion angles"""
rv1 = self.vectors
self.angles = (
self._dihedral(rv1["C4'"], rv1["O4'"], rv1["C1'"], rv1["C2'"]),
self._dihedral(rv1["O4'"], rv1["C1'"], rv1["C2'"], rv1["C3'"]),
self._dihedral(rv1["C1'"], rv1["C2'"], rv1["C3'"], rv1["C4'"]),
self._dihedral(rv1["C2'"], rv1["C3'"], rv1["C4'"], rv1["O4'"]),
self._dihedral(rv1["C3'"], rv1["C4'"], rv1["O4'"], rv1["C1'"]),
)
def _calc_pucker_angle(self):
"""Determines the pucker angle."""
self.pucker_angle = degrees(atan(((self.angles[2]+self.angles[4]) \
- (self.angles[1]+self.angles[3])) \
/(2*self.angles[0]*(sin(36*pi/180.0)\
+sin(72*pi/180.0)))))
if self.angles[0] < 0:
self.pucker_angle += 180
if self.pucker_angle < 0:
self.pucker_angle += 360
#self.angles = map(check_positive, self.angles)
def find_pucker(self):
"""Determines the pucker type."""
for min_ft, max_ft, pucker in THETA_RANGE:
if min_ft < self.pucker_angle <= max_ft:
return pucker
def get_pucker(self, resi):
"""Returns the pucker for a PDB.Residue object."""
self.resi = resi
self._calc_vectors()
self._calc_torsions()
self._calc_pucker_angle()
return self.find_pucker() | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/analyze/PuckerCalculator.py | 0.816516 | 0.232125 | PuckerCalculator.py | pypi |
from rna_tools.tools.mini_moderna3.moderna.analyze.GeometryParameters import BACKBONE_DIST_MATRIX, \
PHOSPHATE_DIST_MATRIX, O3_P_DIST_HI
from rna_tools.tools.mini_moderna3.moderna.Constants import BACKBONE_ATOMS, \
BACKBONE_RIBOSE_ATOMS_WITHOUT_O2
DIST_TOLERANCE = 1.05
# distance for intra-residue backbone clashes
CONGESTION_DISTANCE = 1.5
ATOMS_5P = ["P", "O5'", "C5'", "C4'"]
ATOMS_3P = ["C3'", "O3'"]
BB_SET_5P = ["P", "OP1", "OP2", "C5'", "O5'"]
BB_SET_3P = ["O3'"]
BB_SET = BB_SET_5P + BB_SET_3P
BONDS = {
"P":["OP1", "OP2", "O5'"],
"OP1":["P"],
"OP2":["P"],
"C5'":["C4'", "O5'"],
"O3'":["C3'"],
"O5'":["C5'", "P"],
}
def are_residues_connected(res1, res2):
"""
Checks whether two residues are connected.
Distances on the backbone are within norm + tolerance.
Arguments:
* upstream residue as RNAResidue object
* downstream residue as RNAResidue object
"""
try:
if res1["O3'"] - res2["P"] > O3_P_DIST_HI * DIST_TOLERANCE:
return False
for tup in [("C3'", "O3'"), ("C4'", "C3'")]:
a1, a2 = tup
low, hi = BACKBONE_DIST_MATRIX[tup]
if res1[a1] - res1[a2] > hi * DIST_TOLERANCE:
return False
for tup in [ ("P", "O5'"), ("O5'", "C5'"), ("C5'", "C4'")]:
a1, a2 = tup
low, hi = BACKBONE_DIST_MATRIX[tup]
if res2[a1] - res2[a2] > hi * DIST_TOLERANCE:
return False
return True
except KeyError:
# missing atoms
return False
def is_chain_continuous(chain):
"""
Checks whether a chain is continuous.
Check whether all subsequent pairs of residues are connected.
Arguments:
chain - RNAChain object
"""
keys_sorted = []
for resi in chain:
keys_sorted.append(resi.identifier)
pairs_to_check = [(keys_sorted[x], keys_sorted[x+1]) for x in range(len(keys_sorted)-1)]
for pair in pairs_to_check:
if not are_residues_connected(chain[pair[0]], chain[pair[1]]):
return False
return True
# ------------- METHODS FOR CHECKING RESIDUE INTEGRITY --------------------
def is_backbone_complete(self):
"""Returns True if all backbone atoms are present."""
return self.check_atoms(BACKBONE_ATOMS)
def is_ribose_complete(self):
"""Returns True if all ribose atoms are present."""
return self.check_atoms(BACKBONE_RIBOSE_ATOMS_WITHOUT_O2)
def is_backbone_intact(self, tolerance=1.0, mode=None):
"""
Checks whether all the backbone atoms in the residue are connected.
Returns True/False value. In case any backbone atoms are missing\
raises an exception.
"""
if mode == "5'":
atoms = ATOMS_5P
elif mode == "3'":
atoms = ATOMS_3P
try:
for atom in BACKBONE_DIST_MATRIX:
if mode and (atom[0] not in atoms and atom[1] not in atoms):
continue
low_dist, hi_dist = BACKBONE_DIST_MATRIX[atom]
dist = self[atom[0]] - self[atom[1]]
if not (low_dist <= dist <= hi_dist * tolerance):
return False
return True
except KeyError: # missing atoms
return False
def is_phosphate_intact(self, tolerance=1.0):
"""Checks whether P-OP1 and P-OP2 distances are OK."""
try:
for atom in PHOSPHATE_DIST_MATRIX:
low_dist, hi_dist = PHOSPHATE_DIST_MATRIX[atom]
dist = self[atom[0]] - self[atom[1]]
if not (low_dist <= dist <= hi_dist * tolerance):
return False
return True
except KeyError: # missing atoms
return False
def is_backbone_congested(self, congestion_dist=CONGESTION_DISTANCE, \
mode=None):
"""Checks whether backbone atoms clash into others."""
atoms = BB_SET
if mode == "5'":
atoms = BB_SET_5P
elif mode == "3'":
atoms = BB_SET_3P
for bb_name in atoms:
try:
atom1 = self[bb_name]
for atom2 in self:
if atom2.name in atoms:
continue
if atom2.name in BONDS[bb_name]:
continue
dist = atom2-atom1
if dist < congestion_dist:
return True
except KeyError:
pass # skip missing atoms. | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/analyze/ChainConnectivity.py | 0.759582 | 0.358325 | ChainConnectivity.py | pypi |
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
# Suite angles from Richardson to use as fragments
# (only main clusters, no satellites or wannabees)
# taken from Richardson 2008 paper
TORSIONS = [
(212.250, 288.831, 294.967, 173.990, 53.550, 81.035),
(217.400, 222.006, 302.856, 160.719, 49.097, 82.444),
(216.324, 173.276, 289.320, 164.132, 45.876, 84.956),
(210.347, 121.474, 288.568, 157.268, 49.347, 81.047),
(218.636, 290.735, 167.447, 159.565, 51.326, 85.213),
(238.750, 256.875, 69.562, 170.200, 52.800, 85.287),
(244.085, 203.815, 65.880, 181.130, 54.680, 86.035),
(202.471, 63.064, 68.164, 143.450, 49.664, 82.757),
(200.545, 280.510, 249.314, 82.662, 167.890, 85.507),
(196.591, 291.299, 153.060, 194.379, 179.061, 83.648),
(223.558, 80.175, 66.667, 109.150, 176.475, 83.833),
(215.014, 288.672, 300.420, 177.476, 58.307, 144.841),
( 226.400, 168.336, 292.771, 177.629, 48.629, 147.950),
(206.042, 277.567, 195.700, 161.600, 50.750, 145.258),
(206.440, 52.524, 163.669, 148.421, 50.176, 147.590),
(236.600, 220.400, 68.300, 200.122, 53.693, 145.730),
(199.243, 288.986, 180.286, 194.743, 178.200, 147.386),
(204.933, 69.483, 63.417, 115.233, 176.283, 145.733),
(216.508, 287.192, 297.254, 225.154, 293.738, 150.677),
(232.856, 248.125, 63.269, 181.975, 295.744, 149.744),
(260.339, 288.756, 288.444, 192.733, 53.097, 84.067),
(223.159, 139.421, 284.559, 158.107, 47.900, 84.424),
(256.922, 165.194, 204.961, 165.194, 49.383, 82.983),
(262.869, 79.588, 203.863, 189.688, 58.000, 84.900),
(270.596, 240.892, 62.225, 176.271, 53.600, 87.262),
(249.956, 187.678, 80.433, 198.133, 61.000, 89.378),
(241.222, 88.894, 59.344, 160.683, 52.333, 83.417),
(258.383, 286.517, 178.267, 165.217, 48.350, 84.783),
(260.712, 290.424, 296.200, 177.282, 175.594, 86.565),
(227.256, 203.789, 73.856, 216.733, 194.444, 80.911),
(268.383, 84.972, 63.811, 191.483, 176.644, 85.600),
(259.402, 291.275, 291.982, 210.048, 54.412, 147.760),
(244.622, 162.822, 294.159, 171.630, 45.900, 145.804),
(248.421, 112.086, 274.943, 164.764, 56.843, 146.264),
(260.246, 213.785, 71.900, 207.638, 56.715, 148.131),
(257.831, 89.597, 67.923, 173.051, 55.513, 147.623),
(247.562, 170.488, 277.938, 84.425, 176.413, 148.087),
(256.475, 295.508, 287.408, 194.525, 293.725, 150.458),
]
#DELTA5 = [t[0] for t in TORSIONS]
EPSILON = [t[0] for t in TORSIONS]
ZETA = [t[1] for t in TORSIONS]
ALPHA = [t[2] for t in TORSIONS]
BETA = [t[3] for t in TORSIONS]
GAMMA = [t[4] for t in TORSIONS]
DELTA3 = [t[5] for t in TORSIONS]
DEFAULT_TORSIONS = TORSIONS[0] | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/analyze/RNASuites.py | 0.409457 | 0.162746 | RNASuites.py | pypi |
__author__ = "Kristian Rother, Raphael Bauer"
__credits__ = ["Marcin Domagalski","Magdalena Musielak", "Janusz Bujnicki", "Marie Curie"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Kristian Rother"
__email__ = "krother@rubor.de"
__status__ = "Production"
from PDB.PDBParser import PDBParser
from math import pi,cos
from dihedral import dihedral_from_vectors
import os
FIRST_SUITEATOMS = ["C5*","C4*","C3*","O3*"]
SECOND_SUITEATOMS = ['P',"O5*","C5*","C4*","C3*","O3*"]
EMPTY_ANGLES = [-1.0]*7
POWER = 3.0
class SuiteError(Exception): pass
class SuiteResidueError(SuiteError): pass
class SuiteIncompleteResidueError(SuiteResidueError): pass
class SuiteBfactorError(SuiteResidueError): pass
class SuiteDistanceError(SuiteResidueError): pass
class SuiteAngles:
"""
A class that stores the seven consecutive dihedral angles
a suite consists of and allows basic calculations with them.
"""
def __init__(self, angles):
"""
angles is a list of seven floats.
[deltam epsilon zeta alpha beta gamma delta]
"""
self.set_angles(angles)
def __getitem__(self, i):
return self.angles[i]
def check_positive(self,angle):
if angle<0: angle += 360.0
return angle
def set_angles(self, angles):
"""angles is a list of seven floats."""
angles = map(self.check_positive,angles)
self.deltam, self.epsilon,self.zeta,self.alpha,\
self.beta,self.gamma,self.delta = angles
self.angles = angles
def get_7d_diff(self, suite_angles):
"""
Returns SuiteAngleslist with the differences
between both suites angles.
"""
angles = [suite_angles[i]-self[i] for i in range(7)]
return SuiteAngles(angles)
def dotproduct(self, suite_angles, nang):
"""
Returns dot product of both suite angles.
If nang=4 is set, only epsilon, zeta, alpha, beta will be used.
Otherwise all seven
"""
indices = nang==4 and range(1,5) or range(7)
products = [self[i]*suite_angles[i] for i in indices]
return sum(products)
def get_hyperellipsoid_dist(self, suite_angles, nang, weights):
"""
Calculates distance to hyperellipsoid.
suite contains the center point of the ellipsoid,
weights the ellisoid radii.
nange can be 4 or 7 to choose distance in 4D or 7D.
Returns float
-> Piet Hein superellipse, hyperellipsoids.
0 1 2 3 4 5 6
deltam epsilon zeta alpha beta gamma delta
X X X
X not used in 4 angle distance calc
"""
indices = (nang==4) and range(1,5) or range(7)
# normalize, diff < 1 inside the ellipsoid
diffs = [ abs(self[i]-suite_angles[i])/weights[i] for i in indices ]
powers = [ d**POWER for d in diffs ]
return sum(powers) ** (1.0/POWER) # root
def __str__(self):
result = "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f"%\
(self.deltam,self.epsilon,self.zeta,self.alpha,self.beta,self.gamma,self.delta)
return result
class Suite(SuiteAngles):
"""Class that reads dihedral angles out of PDB.Residue objects."""
def __init__(self):
SuiteAngles.__init__(self,EMPTY_ANGLES)
self.resi1 = None
self.resi2 = None
self.vecs1 = None
self.vecs2 = None
def set_residues(self, resi1, resi2):
"""
Takes two PDB.Residue objects and calculates suite dihedrals
from them. If the residues do not apply, a SuiteError is raised.
"""
rv1 = self.get_vectors(resi1, FIRST_SUITEATOMS)
rv2 = self.get_vectors(resi2, SECOND_SUITEATOMS)
self.resi1 = resi1
self.resi2 = resi2
self.vecs1 = rv1
self.vecs2 = rv2
# calculate torsion angles
deltam = dihedral_from_vectors(rv1["C5*"],rv1["C4*"],rv1["C3*"],rv1["O3*"])
epsilon = dihedral_from_vectors(rv1["C4*"],rv1["C3*"],rv1["O3*"],rv2["P"])
zeta = dihedral_from_vectors(rv1["C3*"],rv1["O3*"],rv2["P"],rv2["O5*"])
alpha = dihedral_from_vectors(rv1["O3*"],rv2["P"],rv2["O5*"],rv2["C5*"])
beta = dihedral_from_vectors(rv2["P"],rv2["O5*"],rv2["C5*"],rv2["C4*"])
gamma = dihedral_from_vectors(rv2["O5*"],rv2["C5*"],rv2["C4*"],rv2["C3*"])
delta = dihedral_from_vectors(rv2["C5*"],rv2["C4*"],rv2["C3*"],rv2["O3*"])
angles = [deltam, epsilon, zeta, alpha, beta, gamma, delta]
SuiteAngles.set_angles(self,angles)
def confirm_distances(self):
"""
Raises an exception if any interatomic distances are above 2.2.
"""
# build list of vectors in right order
vectors = [self.vecs1[name] for name in FIRST_SUITEATOMS]
vectors += [self.vecs2[name] for name in SECOND_SUITEATOMS]
# check distances
for i in range(len(vectors)-1):
vec1 = vectors[i]
vec2 = vectors[i+1]
dist = vec2-vec1
if dist.norm() > 2.2:
raise SuiteDistanceError("interatomic distance of %5.2f too big"%dist.norm())
def confirm_bfactors(self):
"""
Raises a SuiteBfactorError if any of the atoms in
the list of atom names has a bfactor too high.
"""
rn = ((self.resi1,FIRST_SUITEATOMS),(self.resi2,SECOND_SUITEATOMS))
for resi, atom_names in rn:
for name in atom_names:
if resi[name].bfactor >= BFACTOR_LIMIT:
raise SuiteBfactorError('too big B-factor: %6.3f'%resi[name].bfactor)
def confirm_angles(self):
"""checks whether all angles are in the valid range."""
for a in self.angles:
if not (0 <= a <= 360):
raise SuiteAngleError('tangled: invalid angle: %6.3f'%a)
def confirm(self,check_angles,check_bfactors,check_distances):
if check_angles: self.confirm_angles()
if check_bfactors: self.confirm_bfactors()
if check_distances: self.confirm_distances()
def get_vectors(self,resi, atom_names):
"""Creates a set of Vector objects from a residue object."""
vectors = {}
for name in atom_names:
# new PDB files have ' instead of *
quotename = name.replace('*',"'")
if resi.has_id(name):
vectors[name] = resi[name].get_vector()
elif resi.has_id(quotename):
vectors[name] = resi[quotename].get_vector()
else:
raise SuiteIncompleteResidueError('atom %s does not exist.'%name)
return vectors | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/analyze/suites/suite2.py | 0.796292 | 0.292867 | suite2.py | pypi |
__author__ = "Kristian Rother, Raphael Bauer"
__credits__ = ["Raphael Bauer","Markus Weber","Marcin Domagalski","Magdalena Musielak", "Janusz Bujnicki", "Marie Curie"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Kristian Rother"
__email__ = "krother@rubor.de"
__status__ = "Production"
from math import pi,cos
EMPTY_ANGLES = [-1.0]*7
POWER = 3.0
NORMAL_WEIGHTS = [28.0, 60.0, 55.0, 50.0, 70.0, 35.0, 28.0]
SATELLITE_WEIGHTS = [28.0, 50.0, 50.0, 45.0, 60.0, 35.0, 28.0]
# adjusted radii for satellite-dominant comparisons.
# satnam:((1 s2 s3 4 s5 6 7),(1 d2 d3 4 d5 6 7 ,domnam))
DOM_SAT_EMPIRICAL = {
"1m":((0.0, 0.0, 0.0,0.0,32.0,0.0,0.0),(0.0, 0.0, 0.0,0.0,64.0,0.0,0.0 ,"1a")),
"1L":((0.0,18.0, 0.0,0.0,18.0,0.0,0.0),(0.0,70.0, 0.0,0.0,70.0,0.0,0.0 ,"1a")),
"&a":((0.0,20.0,20.0,0.0, 0.0,0.0,0.0),(0.0,60.0,60.0,0.0, 0.0,0.0,0.0 ,"1a")),
"1f":((0.0, 0.0, 0.0,0.0,47.0,0.0,0.0),(0.0, 0.0, 0.0,0.0,65.0,0.0,0.0 ,"1c")),
"1[":((0.0, 0.0, 0.0,0.0,34.0,0.0,0.0),(0.0, 0.0, 0.0,0.0,56.0,0.0,0.0 ,"1b")),
"4a":((0.0,40.0,40.0,0.0, 0.0,0.0,0.0),(0.0,50.0,50.0,0.0, 0.0,0.0,0.0 ,"0a")),
"#a":((0.0,26.0,26.0,0.0, 0.0,0.0,0.0),(0.0,36.0,36.0,0.0, 0.0,0.0,0.0 ,"0a")),
"0i":((0.0, 0.0, 0.0,0.0,60.0,0.0,0.0),(0.0, 0.0, 0.0,0.0,60.0,0.0,0.0 ,"6n")),
"6j":((0.0, 0.0, 0.0,0.0,60.0,0.0,0.0),(0.0, 0.0, 0.0,0.0,60.0,0.0,0.0 ,"6n"))
}
class SuiteAngleError(Exception): pass
class SuiteAngles:
"""
A class that stores the seven consecutive dihedral angles
a suite consists of and allows basic calculations with them.
"""
def __init__(self, angles):
"""
angles is a list of seven floats.
[deltam epsilon zeta alpha beta gamma delta]
"""
self.set_angles(angles) # replace by property
def __getitem__(self, i):
return self.angles[i]
def check_positive(self,angle):
if angle<0: angle += 360.0
return angle
def set_angles(self, angles):
"""angles is a list of seven floats."""
angles = map(self.check_positive,angles)
self.deltam, self.epsilon,self.zeta,self.alpha,\
self.beta,self.gamma,self.delta = angles
self.angles = angles
def confirm_angles(self):
"""checks whether all angles are in the valid range."""
for a in self.angles:
if not (0 <= a <= 360):
raise SuiteAngleError('tangled: invalid angle: %6.3f'%a)
def get_7d_diff(self, suite_angles):
"""
Returns SuiteAngleslist with the differences
between both suites angles.
"""
angles = [suite_angles[i]-self[i] for i in range(7)]
return SuiteAngles(angles)
def dotproduct(self, suite_angles, nang):
"""
Returns dot product of both suite angles.
If nang=4 is set, only epsilon, zeta, alpha, beta will be used.
Otherwise all seven
"""
indices = nang==4 and range(1,5) or range(7)
products = [self[i]*suite_angles[i] for i in indices]
result = sum(products)
while result>360: result -= 360.0
return result
def get_hyperellipsoid_dist(self, suite_angles, nang, weights):
"""
Calculates distance to hyperellipsoid.
suite contains the center point of the ellipsoid,
weights the ellisoid radii.
nange can be 4 or 7 to choose distance in 4D or 7D.
Returns float
-> Piet Hein superellipse, hyperellipsoids.
0 1 2 3 4 5 6
deltam epsilon zeta alpha beta gamma delta
X X X
X not used in 4 angle distance calc
"""
indices = (nang==4) and range(1,5) or range(7)
# normalize, diff < 1 inside the ellipsoid
diffs = [ abs(self[i]-suite_angles[i])/weights[i] for i in indices ]
powers = [ d**POWER for d in diffs ]
return sum(powers) ** (1.0/POWER) # root
def __str__(self):
result = "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f"%\
(self.deltam,self.epsilon,self.zeta,self.alpha,self.beta,self.gamma,self.delta)
return result
class SuiteCluster(SuiteAngles):
"""
Stores one of the Richardson suite clusters
(a 7D ellipsoid).
"""
def __init__(self,puckers, gammaname, name, \
dominant, satellite, wannabe, angles):
SuiteAngles.__init__(self, angles)
self.puckers = puckers # 33, 32, 23, 22
self.gammaname = gammaname # p, t, or m
self.name = name
self.dominant = dominant # boolean
self.satellite = satellite # boolean
self.wannabe = wannabe # boolean
def __str__(self):
wannabe = self.wannabe and 'wannabe' or 'certain'
status = 'ord'
if self.dominant: status = 'dom'
if self.satellite: status = 'sat'
result = "%s\t%s%s\t%s\t%s\t"%\
(self.name, self.puckers,self.gammaname, status, wannabe)
result += SuiteAngles.__str__(self)
return result
def get_ellipsoid_widths(self, weigh_satellites=True):
"""
returns a 7D array of weights for the ellipsoid distance calculation.
The weights are ellipsoid-half-widths in the given dimension.
For: [deltam, epsilon,zeta,alpha,beta,gamma,delta]
"""
if self.satellite and weigh_satellites:
return SATELLITE_WEIGHTS
else:
return NORMAL_WEIGHTS
def get_empirical_weights(self, domw, satw):
"""
returns a 7D array of empirical weights for calculating
distances between dominant and satellite clusters.
Valid only for satellites.
For: [deltam, epsilon,zeta,alpha,beta,gamma,delta]
"""
# empirical weight correction for dom-sat pairs to edit
satvalues, domvalues = DOM_SAT_EMPIRICAL[self.name]
for i in range(7):
satw[i] = satvalues[i] or satw[i]
domw[i] = domvalues[i] or domw[i]
return domw,satw
def get_dom_sat_dist(self, dominant, suite_angles, weigh_satellites):
"""
Recalc distances to dominant cluster and to satellite cluster
considering different weights for points between both clusters.
Valid only for satellite clusters.
Returns a dist_dom, dist_sat tuple of floats.
"""
domw = dominant.get_ellipsoid_widths(False)
satw = self.get_ellipsoid_widths(weigh_satellites)
domw,satw = self.get_empirical_weights(domw,satw)
disttodom = dominant.get_hyperellipsoid_dist(suite_angles,4,False,domw)
disttosat = self.get_hyperellipsoid_dist(suite_angles,4,False,satw)
return disttodom, disttosat
def get_hyperellipsoid_dist(self, suite_angles, nang, \
weigh_satellites=True, coordw=None):
if not coordw:
coordw = self.get_ellipsoid_widths(weigh_satellites)
return SuiteAngles.get_hyperellipsoid_dist(self,suite_angles,nang,coordw)
def is_between_dom_sat(self, dominant, suite_angles):
"""
Called for a satellite cluster. Determines whether
a point is in between a dominant and satellite cluster.
if 4D dotproducts both positive, then its inbetween.
"""
dom_suite = dominant.get_7d_diff(suite_angles)
sat_suite = self.get_7d_diff(suite_angles)
dom_sat = dominant.get_7d_diff(self)
sat_dom = self.get_7d_diff(dominant)
if (dom_suite.dotproduct(dom_sat,4) > 0) \
and (sat_suite.dotproduct(sat_dom,4) > 0):
return True
class ClusterSet(dict):
"""
Specialized dictionary that reads the file with torsion angles.
"""
def __init__(self,filename):
'''Reads file with torsion angles.'''
dict.__init__(self)
for line in open(filename):
if not line.startswith('#'):
# parse the line
t = line.strip().split()
puckers, gammaname, name, unused, wannabe, color, status = t[:7]
wannabe = wannabe=='wannabe'
dominant = status[:3]=='dom'
satellite = status[:3]=='sat'
angles = [float(a) for a in t[7:]]
# create clusters
cluster = SuiteCluster(puckers, gammaname, name, \
dominant, satellite, wannabe, angles)
# assign clusters to bins
bin_name = puckers + gammaname
self.setdefault(bin_name,[])
self[bin_name].append(cluster)
def get_bins(self):
result = self.keys()
result.sort()
return result
if __name__ == '__main__':
cl = ClusterSet('../data/suite_clusters.txt')
for bin in cl.get_bins():
for angles in cl[bin]:
print angles
#print angles.beta | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/analyze/suites/suite_clusters.py | 0.753104 | 0.20199 | suite_clusters.py | pypi |
__author__ = "Kristian Rother, Raphael Bauer"
__credits__ = ["Marcin Domagalski","Magdalena Musielak", "Janusz Bujnicki", "Marie Curie"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Kristian Rother"
__email__ = "krother@rubor.de"
__status__ = "Production"
from rna_tools.tools.mini_moderna3.moderna.PDB.PDBParser import PDBParser
from rna_tools.tools.mini_moderna3.moderna.dihedral import dihedral
FIRST_SUITEATOMS = ["C5*","C4*","C3*","O3*"]
SECOND_SUITEATOMS = ['P',"O5*","C5*","C4*","C3*","O3*"]
class SuiteError(Exception): pass
class SuiteResidueError(SuiteError): pass
class SuiteIncompleteResidueError(SuiteResidueError): pass
class SuiteBfactorError(SuiteResidueError): pass
class SuiteDistanceError(SuiteResidueError): pass
class Suite(SuiteAngles):
"""
Class that reads dihedral angles out of Bio.PDB.Residue objects.
Does the validation for suites.
"""
def __init__(self):
SuiteAngles.__init__(self,EMPTY_ANGLES)
self.resi1 = None
self.resi2 = None
self.vecs1 = None
self.vecs2 = None
self.puckerdm = '' # pucker of 5' half-suite
self.puckerd = '' # pucker of 3' half-suite
self.gammaname = ''
def set_residues(self, resi1, resi2):
"""
Takes two PDB.Residue objects and calculates suite dihedrals
from them. If the residues do not apply, a SuiteError is raised.
"""
rv1 = self.get_vectors(resi1, FIRST_SUITEATOMS)
rv2 = self.get_vectors(resi2, SECOND_SUITEATOMS)
self.resi1, self.resi2 = resi1, resi2
self.vecs1, self.vecs2 = rv1, rv2
# calculate torsion angles
deltam = dihedral(rv1["C5*"],rv1["C4*"],rv1["C3*"],rv1["O3*"])
epsilon = dihedral(rv1["C4*"],rv1["C3*"],rv1["O3*"],rv2["P"])
zeta = dihedral(rv1["C3*"],rv1["O3*"],rv2["P"],rv2["O5*"])
alpha = dihedral(rv1["O3*"],rv2["P"],rv2["O5*"],rv2["C5*"])
beta = dihedral(rv2["P"],rv2["O5*"],rv2["C5*"],rv2["C4*"])
gamma = dihedral(rv2["O5*"],rv2["C5*"],rv2["C4*"],rv2["C3*"])
delta = dihedral(rv2["C5*"],rv2["C4*"],rv2["C3*"],rv2["O3*"])
angles = [deltam, epsilon, zeta, alpha, beta, gamma, delta]
SuiteAngles.set_angles(self,angles)
def confirm_distances(self):
"""
Raises an exception if any interatomic distances are above 2.2.
"""
# build list of vectors in right order
vectors = [self.vecs1[name] for name in FIRST_SUITEATOMS]
vectors += [self.vecs2[name] for name in SECOND_SUITEATOMS]
# check distances
for i in range(len(vectors)-1):
dist = vectors[i+1] - vectors[i]
if dist.norm() > 2.2:
raise SuiteDistanceError("interatomic distance of %5.2f too big"%dist.norm())
def confirm_bfactors(self):
"""
Raises a SuiteBfactorError if any of the atoms in
the list of atom names has a bfactor too high.
"""
rn = ((self.resi1,FIRST_SUITEATOMS),(self.resi2,SECOND_SUITEATOMS))
for resi, atom_names in rn:
for name in atom_names:
if resi[name].bfactor >= BFACTOR_LIMIT:
raise SuiteBfactorError('too big B-factor: %6.3f'%resi[name].bfactor)
def confirm(self,check_angles,check_bfactors,check_distances):
if check_angles: self.confirm_angles()
if check_bfactors: self.confirm_bfactors()
if check_distances: self.confirm_distances()
def get_pucker(self, delta):
if ALLOWED_ANGLES['delta3min'] <= delta <= ALLOWED_ANGLES['delta3max']:
return '3'
elif ALLOWED_ANGLES['delta2min'] <= delta <= ALLOWED_ANGLES['delta2max']:
return '2'
def filter_epsilon(self):
"""Checks whether epsilon is in the allowed range."""
if self.epsilon < ALLOWED_ANGLES['epsilonmin'] or self.epsilon > ALLOWED_ANGLES['epsilonmax']:
raise SuiteTriageError("epsilon outlier. ")
def filter_deltam(self):
"""Checks whether delta-1 is in the allowed range."""
self.puckerdm = self.get_pucker(self.deltam)
if not self.puckerdm:
raise SuiteTriageError("bad deltam. ")
def filter_delta(self):
"""Checks whether delta is in the allowed range."""
self.puckerd = self.get_pucker(self.delta)
if not self.puckerd:
raise SuiteTriageError("bad delta. ")
def filter_gamma(self):
"""Checks whether gamma is in the allowed range."""
if(ALLOWED_ANGLES['gammapmin'] <= self.gamma <= ALLOWED_ANGLES['gammapmax']):
self.gammaname = 'p'
elif(ALLOWED_ANGLES['gammatmin'] <= self.gamma <= ALLOWED_ANGLES['gammatmax']):
self.gammaname = 't'
elif(ALLOWED_ANGLES['gammammin'] <= self.gamma <= ALLOWED_ANGLES['gammammax']):
self.gammaname = 'm'
else:
raise SuiteTriageError("gamma outlier. ")
def filter_alpha(self):
"""Checks whether alpha is in the allowed range."""
if(ALLOWED_ANGLES['alphamin'] > self.alpha or self.alpha > ALLOWED_ANGLES['alphamax']):
raise SuiteTriageError("alpha outlier. ")
def filter_beta(self):
"""Checks whether beta is in the allowed range."""
if(ALLOWED_ANGLES['betamin'] > self.beta or self.beta > ALLOWED_ANGLES['betamax']):
raise SuiteTriageError("beta outlier. ")
def filter_zeta(self):
"""Checks whether zeta is in the allowed range."""
if(ALLOWED_ANGLES['zetamin'] > self.zeta or self.zeta > ALLOWED_ANGLES['zetamax']):
raise SuiteTriageError("zeta outlier. ")
def filter_angles(self):
self.filter_epsilon()
self.filter_deltam()
self.filter_delta()
self.filter_gamma()
self.filter_alpha()
self.filter_beta()
self.filter_zeta()
def get_vectors(self,resi, atom_names):
"""Creates a dict of Vector objects from a residue object."""
vectors = {}
for name in atom_names:
if resi.has_id(name):
vectors[name] = resi[name].get_vector()
else:
raise SuiteIncompleteResidueError('atom %s does not exist.'%name)
return vectors | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/analyze/suites/suite.py | 0.698021 | 0.187114 | suite.py | pypi |
from rna_tools.tools.mini_moderna3.moderna.modifications.ResidueEditor import ResidueEditor
from rna_tools.tools.mini_moderna3.moderna.util.Errors import RemoveModificationError
from rna_tools.tools.mini_moderna3.moderna.util.LogFile import log
from rna_tools.tools.mini_moderna3.moderna.Constants import BASE_PATH, BACKBONE_RIBOSE_ATOMS, B_FACTOR_REMOVE_MODIF
class ModificationRemover(ResidueEditor):
def get_remove_rule(self, resi):
"""
Prepares a rule for removing modification from a residue.
Returns a dict with a rule:
{
'fixed': [link atom names from an original residue],
'moved': [link atom names from a new (standard) base]
}
"""
if resi.long_abbrev in ['Y', 'Ym', 'm3Y', 'm1Y', 'm1acp3Y']:
return {'fixed':['C5', 'C4', 'C6'], 'moved':['N1', 'C2', 'C6']}
elif resi.purine:
return {'fixed':['N9', 'C8', 'C4'], 'moved':['N9', 'C8', 'C4']}
elif resi.pyrimidine:
return {'fixed':['N1', 'C2', 'C6'], 'moved':['N1', 'C2', 'C6']}
else:
raise RemoveModificationError('Residue %s: could not get a removing rule.' % resi.identifier)
def remove_modification(self, resi):
"""
Removes a modification from this residue.
It removes all unnecessary atoms and adds a standard base,
corresponding to the originating base of the modified one.
according to removing modification rule (prepared by get_remove_rule()).
"""
if not resi.modified:
raise RemoveModificationError('Residue %s: the residue does not have any modification. Could not remove modification.' % resi.identifier)
elif resi.long_abbrev in ['X', 'Xm']:
raise RemoveModificationError('Residue %s: unidentified residue. Could not remove modification.' % resi.identifier)
elif resi.long_abbrev in ['dA', 'dG', 'dC', 'dT']:
rule = {
'modification_name': resi.long_abbrev,
'original_base': resi.original_base,
'remove': ''
}
if resi.long_abbrev == 'dT':
if resi.child_dict.has_key('C7'):
rule['remove'] = 'C7'
elif resi.child_dict.has_key('C5M'):
rule['remove'] = 'C5M'
rule['moved_link_atoms'] = ["C3'", "C2'", "C1'"]
rule['fixed_link_atoms'] = ["C3'", "C2'", "C1'"]
rule['fragment_file_name'] = 'C1pC2pC3p_O2p.pdb'
rule['pdb_abbrev'] = 'D' + resi.long_abbrev[1]
self.add_single_fragment(resi, rule)
else:
struc = self.parse.get_structure(resi.original_base, BASE_PATH + resi.original_base + '.ent')
new_base = struc[0]['C'][(' ', 54, ' ')]
triplet_names = self.get_remove_rule(resi)
self.superimpose.get_atoms([resi], triplet_names['fixed'], 'fixed')
self.superimpose.get_atoms([new_base], triplet_names['moved'], 'moved')
self.superimpose.moved_atoms = new_base.child_list
self.superimpose.superimpose()
try:
for atom in resi.child_list[:]:
if atom.id not in BACKBONE_RIBOSE_ATOMS:
resi.detach_child(atom.id)
for atom in new_base:
resi.add(atom)
except:
raise RemoveModificationError('Residue %s: could not remove unnecessary and add proper atoms' % resi.identifier)
resi.change_name(resi.original_base)
resi.modified = False
self.set_bfactor(resi, B_FACTOR_REMOVE_MODIF)
def remove_modification(resi):
"""Removes modification from a residue."""
old_name = resi.long_abbrev
m = ModificationRemover()
m.remove_modification(resi)
log.write_message('Residue %s: modification removed (%s ---> %s).' %(resi.id, old_name, resi.long_abbrev))
def remove_all_modifications(struc):
"""Removes all modifications from a structure."""
for resi in struc:
if resi.modified:
remove_modification(resi) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/modifications/ModificationRemover.py | 0.470737 | 0.168994 | ModificationRemover.py | pypi |
from rna_tools.tools.mini_moderna3.moderna.modifications.ResidueEditor import ResidueEditor
from rna_tools.tools.mini_moderna3.moderna.modifications.BaseExchanger import BaseExchanger
from rna_tools.tools.mini_moderna3.moderna.modifications.ModificationRemover import ModificationRemover
from rna_tools.tools.mini_moderna3.moderna.util.Errors import AddModificationError
from rna_tools.tools.mini_moderna3.moderna.util.LogFile import log
from rna_tools.tools.mini_moderna3.moderna.Constants import ANY_RESIDUE, MISSING_RESIDUE, \
UNKNOWN_RESIDUE_SHORT, B_FACTOR_ADD_MODIF, \
ADDING_MODIFICATION_RULES_PATH
def parse_modification_rules(separator=' | '):
"""
Prepares a rule for adding a modification.
Rules describe which fragments add and how to do this
to obtain a residue with given modification.
Returns dict of list of dicts with rules for adding a single fragment.
Keys in each rule dict: ['modification_name', 'original_base', 'remove',
'moved_link_atoms', 'fixed_link_atoms', 'fragment_file_name', 'pdb_abbrev']
"""
rules = {}
try:
infile = open(ADDING_MODIFICATION_RULES_PATH)
except IOError:
log.write_message('File does not exist: %s ' % ADDING_MODIFICATION_RULES_PATH)
return {}
for line in infile:
line = line.strip().split(separator)
if len(line) >= 7:
mod_name = line[0].strip()
rules.setdefault(mod_name, [])
rule = {}
rule['modification_name'] = line[0]
rule['original_base'] = line[1]
rule['remove'] = line[2]
rule['moved_link_atoms'] = line[3].split(',')
rule['fixed_link_atoms'] = line[4].split(',')
rule['fragment_file_name'] = line[5]
rule['pdb_abbrev'] = line[6]
rules[mod_name].append(rule)
return rules
MODIFICATION_RULES = parse_modification_rules()
class ModificationAdder(ResidueEditor):
def add_modification(self, resi, modification_name):
"""
Adds a modification to a residue.
It adds single fragments (add_single_fragment)
according to adding modification rules (get_modification_rules).
Arguments:
- modification name (as a long abbreviation)
"""
try:
if modification_name in [ANY_RESIDUE, MISSING_RESIDUE]:
raise AddModificationError('Residue %s: expected a modification name, instead got missing/any residue abbreviation "%s"'\
% (resi.identifier, modification_name))
else:
if resi.long_abbrev == UNKNOWN_RESIDUE_SHORT:
self.mutate_unknown_residue(resi)
if resi.modified:
rem = ModificationRemover()
rem.remove_modification(resi)
rules = MODIFICATION_RULES.get(modification_name, [])
if not rules:
raise AddModificationError('Residue %s: there is no rule for adding this modification. Check modification name "%s".' \
%(resi.identifier, modification_name))
else:
if rules[0]['original_base'] != resi.original_base:
bex = BaseExchanger()
bex.exchange_base(resi, rules[0]['original_base'])
for rule in rules:
self.add_single_fragment(resi, rule)
resi.change_name(modification_name)
self.set_bfactor(resi, B_FACTOR_ADD_MODIF)
except IOError:
raise AddModificationError('Residue %s: could not add modification.' % resi.identifier)
def add_modification(resi, long_abbrev):
"""Adds modification with given abbreviation"""
old_name = resi.long_abbrev
add = ModificationAdder()
add.add_modification(resi, long_abbrev)
log.write_message('Residue %s: modification added (%s ---> %s).' %(resi.identifier, old_name, long_abbrev)) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/modifications/ModificationAdder.py | 0.511717 | 0.198181 | ModificationAdder.py | pypi |
from rna_tools.tools.mini_moderna3.moderna.modifications.ResidueEditor import ResidueEditor
from rna_tools.tools.mini_moderna3.moderna.modifications.ModificationRemover import remove_modification
from rna_tools.tools.mini_moderna3.moderna.util.Errors import ExchangeBaseError
from rna_tools.tools.mini_moderna3. moderna.util.LogFile import log
from rna_tools.tools.mini_moderna3.moderna.Constants import UNKNOWN_RESIDUE_SHORT, B_FACTOR_EXCHANGE, BASE_PATH
class BaseExchanger(ResidueEditor):
def get_exchange_rule(self, resi,new_name):
"""
Prepares a rule for exchanging a base.
Returns a dict with the rule.
{
'fixed':[link atom names from original residue],
'moved':[link atom names from new base],
'remove':[atom names that must be removed from the original residue (old base atoms)]
}
"""
rule = {}
if resi.purine:
rule['fixed'] = ['N9', 'C4', 'C8']
rule['remove'] = ['N9', 'C8', 'C4', 'N1', 'C6', 'C5', 'N7', 'C8', 'N3', 'O6', 'N2', 'N6', 'C2']
elif resi.pyrimidine:
rule['fixed'] = ['N1', 'C2', 'C6']
rule['remove'] = ['N1', 'C2', 'O2', 'N3', 'C4', 'O4', 'N4', 'C5', 'C6']
else:
raise ExchangeBaseError('Residue %s: could not get exchange rule for name %s' % resi.identifier, new_name)
if new_name in ['A', 'G']:
rule['moved'] = ['N9', 'C4', 'C8']
elif new_name in ['C', 'U']:
rule['moved'] = ['N1', 'C2', 'C6']
else:
raise ExchangeBaseError('Residue %s: could not get exchange rule for name %s' % resi.identifier, new_name)
return rule
def exchange_base(self, resi, new_name):
"""
Exchanges standard bases in a residue.
Arguments:
- a new base name ('A','G','C' or 'U')
"""
if resi.long_abbrev == UNKNOWN_RESIDUE_SHORT:
self.mutate_unknown_residue()
if resi.modified:
remove_modification(resi)
rule = self.get_exchange_rule(resi, new_name)
new_base = self.parse.get_structure(resi.original_base, BASE_PATH+new_name+'.ent')[0]['C'][(' ', 54, ' ')]
self.superimpose.get_atoms([resi], rule['fixed'], 'fixed')
self.superimpose.get_atoms([new_base], rule['moved'], 'moved')
self.superimpose.moved_atoms = new_base.child_list
self.superimpose.superimpose()
for atom in rule['remove']:
if atom in resi.child_dict.keys():
resi.detach_child(atom)
for atom in new_base:
resi.add(atom)
resi.change_name(new_name)
self.set_bfactor(resi, B_FACTOR_EXCHANGE)
def exchange_base(resi, new_name):
"""
Exchanges base in given residue.
Arguments:
- residue
- new residue name (A, G, C or U)
"""
old_name = resi.long_abbrev
bex = BaseExchanger()
bex.exchange_base(resi, new_name)
log.write_message('Residue %s: base exchanged (%s ---> %s), residue added to model.' %(resi.identifier, old_name, new_name)) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/modifications/BaseExchanger.py | 0.542621 | 0.179064 | BaseExchanger.py | pypi |
from rna_tools.tools.mini_moderna3.moderna.util.decorators import toplevel_function
from rna_tools.tools.mini_moderna3.moderna.util.validators import validate_alignment, validate_seq, \
validate_filename, validate_path, \
validate_alphabet, validate_alphabet_list
from rna_tools.tools.mini_moderna3.moderna.lphabet import alphabet
from rna_tools.tools.mini_moderna3.moderna.equence import Sequence
@toplevel_function
def load_alignment(file_path):
"""*load_alignment(file_path)*
Loads a sequence alignment from a FASTA file.
Produces an Alignment object that can be saved in a variable.
ModeRNA expects, that the alignment file contains exactly two sequences.
The first is for the target for which the model is to be built,
the second for the structural template.
Standard RNA bases should be written in upper case (ACGU).
Standard DNA bases should be written in lower case (acgt).
For modified bases, see the 'concepts' section of the manual.
:Arguments:
* path+filename of a FASTA file
"""
file_path = validate_filename(file_path)
return read_alignment(file_path)
@toplevel_function
def match_target_with_alignment(alignment, model):
"""*match_alignment_with_model(alignment, model)*
Checks, if the sequence of a model structure is equal to the first sequence in the alignment.
Writes an according message and returns True or False.
Both sequences also count as equal if one has modified nucleotides, and
the other the corresponding unmodified nucleotides in the same position,
or if one of the sequences contains the wildcard symbol '.'.
Thus, the sequence "AGU" is equal to both "A7Y" and "A.U".
:Arguments:
* Alignment object
* RnaModel object
"""
alignment = validate_alignment(alignment)
model = validate_model(model)
log.write_message('\nChecking whether alignment matches with model.')
am = AlignmentMatcher(alignment)
seq = model.get_sequence()
result = am.is_target_identical(seq)
if not result:
log.write_message("alignment and model match.\n")
else:
log.write_message("ALIGNMENT AND MODEL SEQUENCES DO NOT MATCH YET !!!\n")
return result
@toplevel_function
def match_template_with_alignment(template, alignment):
"""*match_template_with_alignment(template, alignment)*
Checks, if the sequence of the template structure is equal
to the second sequence in the alignment. Writes an according message and returns True or False.
Small inconsistencies between both sequences, e.g. backbone breaks, or missing modification symbols
are corrected automatically in the alignment, and changes are reported in the logfile.
Both sequences also count as equal if one has modified nucleotides, and
the other the corresponding unmodified nucleotides in the same position,
or if one of the sequences contains the wildcard symbol '.'.
Thus, the sequence "AGU" is equal to both "A7Y" and "A.U".
:Arguments:
* Template object
* Alignment object
"""
template = validate_template(template)
alignment = validate_alignment(alignment)
log.write_message('Checking whether template matches with alignment.')
am = AlignmentMatcher(alignment)
seq = template.get_sequence()
am.fix_template_seq(seq)
result = am.is_template_identical(seq)
if result:
log.write_message("template and alignment match.\n")
else:
log.write_message("TEMPLATE AND ALIGNMENT DO NOT MATCH!\n")
return result | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/sequence/commands.py | 0.866118 | 0.596933 | commands.py | pypi |
__author__ = "Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Kristian Rother"
__email__ = "krother@genesilico.pl"
__status__ = "Production"
from rna_tools.tools.mini_moderna3.moderna.sequence.ModernaSequence import Sequence
from rna_tools.tools.mini_moderna3.moderna.util.LogFile import log
from rna_tools.tools.mini_moderna3.moderna.sequence.ModernaAlphabet import alphabet
from rna_tools.tools.mini_moderna3.moderna.Constants import ANY_RESIDUE
from rna_tools.tools.mini_moderna3.moderna.util.Errors import AlignmentError
BREAK = alphabet['_']
GAP = alphabet['-']
class PairQueue(object):
"""
Double queue algorithm that produces pairs of
AlignmentPosition and AlphabetEntry
"""
def __init__(self, align, seq):
# queues
self.ap_queue = list(align)
self.guide_queue = list(seq)
self.ap_queue.reverse()
self.guide_queue.reverse()
# counters
self.i_guide = 0
self.i_ali = 0
def has_more(self):
"""True as long as both queues have elements."""
if self.ap_queue:
return True
@property
def pair(self):
"""Return current pair of elements."""
guide = self.guide_queue[-1] if len(self.guide_queue) > 0 else GAP
return guide, self.ap_queue[-1]
def next_ap(self):
"""Moves one queue forward."""
self.ap_queue.pop()
self.i_ali += 1
def next_guide(self):
"""Moves the other queue forward."""
self.guide_queue.pop()
self.i_guide += 1
def next_both(self):
"""Moves both queues forward."""
self.next_guide()
self.next_ap()
class AlignmentMatcher(object):
"""
Compares a sequence to a sequence in an alignment,
and fixes small differences by editing the alignment.
"""
def __init__(self, alignment):
self.align = alignment
def is_template_identical(self, seq):
"""Returns boolean"""
seq_t = seq.seq_without_breaks
seq_a = self.align.template_seq.seq_without_breaks
return seq_t == seq_a
def is_target_identical(self, seq):
"""Returns boolean"""
return seq == self.align.target_seq
def is_seq_fixable(self, seq):
"""Returns True if the guide sequence can be used for fixing."""
if self.is_template_identical(seq):
return False
tseq = self.align.template_seq
if len(seq.seq_without_breaks) != len(tseq.seq_without_breaks):
log.write_message("\nTemplate and alignment sequences differ in length - please check them manually.\n")
return False
return True
def set_aligned_sequences(self, char_tuples):
"""Resets the sequences in the RNAAlignment object."""
transposed = map(list, zip(*char_tuples))
target = Sequence(transposed[0])
template = Sequence(transposed[1])
if len(target) != len(template):
raise AlignmentError("Error correcting alignment; lenghts differ:\n%s\%s"%(str(target), str(template)))
self.align.set_aligned_sequences(target, template)
def check_breaks(self, guide, apos, dqueue, result):
"""Reacts on underscores in either of the sequences."""
temp, targ = apos.template_letter, apos.target_letter
if guide == BREAK and temp == BREAK:
result.append((targ, temp))
dqueue.next_both()
elif guide == BREAK:
log.write_message(".. break in template in position %i added to alignment."%(dqueue.i_guide+1))
result.append((GAP, guide))
dqueue.next_guide()
else:
log.write_message(".. break in alignment in position %i is not in template - ignored."%(dqueue.i_ali+1))
dqueue.next_ap()
def check_gaps(self, guide, apos, dqueue, result):
"""Reacts on gaps in the alignment."""
if apos.has_template_gap():
result.append((apos.target_letter, GAP))
elif apos.has_target_gap():
result.append((GAP, guide))
dqueue.next_guide()
dqueue.next_ap()
def check_matches(self, guide, apos, dqueue, result):
"""Reacts on matches and mismatches."""
temp, targ = apos.template_letter, apos.target_letter
if temp.short_abbrev == ANY_RESIDUE:
log.write_message(".. incomplete template residue in alignment position %i (%s/%s) - alignment edited." \
% (dqueue.i_ali+1, guide, temp))
result.append((targ, guide))
elif guide.short_abbrev == ANY_RESIDUE:
log.write_message(".. unknown residue in alignment position %i (%s/%s) - alignment edited." \
% (dqueue.i_ali+1, guide, temp))
result.append((targ, guide))
elif guide.original_base != temp.original_base:
log.write_message(".. different nucleobase in alignment position %i (%s/%s) - please check manually." \
% (dqueue.i_ali+1, guide, temp))
result.append((targ, temp))
elif guide != temp and guide.original_base == temp.original_base:
log.write_message(".. different modified base found in alignment position %i (%s/%s) - alignment edited." \
% (dqueue.i_ali+1, guide, temp))
result.append((targ, guide))
elif guide == temp:
result.append((targ, guide))
else:
# there may be cases not covered - report and ignore them
log.write_message(".. don't know what to do about alignment position %i (%s/%s) - ignored." \
% (dqueue.i_ali+1, guide, temp))
result.append((targ, temp))
dqueue.next_both()
def fix_template_seq(self, seq):
"""Adjusts the template sequence in the alignment to the given guide sequence."""
# validate input seq
if not self.is_seq_fixable(seq):
return
log.write_message("\nTemplate and alignment sequences differ - trying to fix small differences.\n")
log.write_message("template : %s"%seq)
log.write_message("alignment (before) : %s\n"%\
self.align.aligned_template_seq)
# iterate through positions
dqueue = PairQueue(self.align, seq)
result = []
while dqueue.has_more():
guide, apos = dqueue.pair
if apos.has_gap():
self.check_gaps(guide, apos, dqueue, result)
elif guide == BREAK or apos.template_letter == BREAK:
self.check_breaks(guide, apos, dqueue, result)
else:
self.check_matches(guide, apos, dqueue, result)
self.set_aligned_sequences(result)
log.write_message("\ntemplate : %s"%seq)
log.write_message("alignment (after) : %s\n"%str(self.align)) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/sequence/AlignmentMatcher.py | 0.782122 | 0.262357 | AlignmentMatcher.py | pypi |
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
import re
from rna_tools.tools.mini_moderna3.moderna.sequence.ModernaAlphabet import alphabet
from rna_tools.tools.mini_moderna3.moderna.Constants import STANDARD_BASES
from rna_tools.tools.mini_moderna3.moderna.util.Errors import SequenceError
RE_NEW_NOMENCLATURE = re.compile('\d\d\d[A,C,U,G,a,c,t,g,X,N,H,<,;]')
SPECIAL_ABBREVS = ['a', 'c', 't', 'g', 'X', 'N', 'H', '<', ';', 'Q']
class Sequence(object):
"""
Represents RNA sequences.
Attributes:
- seq_with_modifications ---> an original string given by a user
- seq_without_modifications ---> a string in which all modified bases
are replaced by original ones
- has_modification
* False if all characters in given sequence are
A, C, G, U, ANY_RESIDUE or MISSING_RESIDUE
* True if there are characters that represent modified residues.
"""
def __init__(self, seq):
"""
Arguments:
-RNA sequence as a string
"""
self._seq_without_modifications = ''
self._seq_with_modifications = ''
self._seq_new_notation = ''
self.seq_alphabet_list = []
self.has_modification = False
self.set_sequence(seq)
def __str__(self):
return self.seq_with_modifications
def __repr__(self):
return self.seq_with_modifications
def __len__(self):
return len(self.seq_alphabet_list)
def __getitem__(self, args):
"""
Allows to gain a character or characters from Sequence.
Returns AlpabetEntity object/list.
The starting point for counting is 0.
"""
if type(args) == int:
try:
return self.seq_alphabet_list[args]
except IndexError:
raise SequenceError('Sequence index out of range.')
elif type(args) == slice:
try:
return self.seq_alphabet_list[args.start:args.stop]
except IndexError:
raise SequenceError('Sequence index out of range.')
def __iter__(self):
return self.seq_alphabet_list.__iter__()
def is_equal(self, seq1, seq2):
"""
Checks if two RNA sequence objects are equal.
If ANY_RESIDUE or MISSING_RESIDUE occurs in any of sequences
then in this position the sequence is equal to any other character.
"""
if len(seq1) != len(seq2):
return False
for char1, char2 in zip(seq1, seq2):
if char1 != char2:
return False
return True
def __eq__(self, other):
"""
Checks if two sequences are identical. Equal means that sequences length
is the same and the same bases on the same positions
however ANY_RESIDUE or MISSING_RESIDUE in one sequence is equal with any other character.
Arguments:
- second sequence as a Sequence instance
"""
if not self and not other:
return True
elif self and other:
if self.is_equal(self.seq_with_modifications, \
other.seq_with_modifications):
return True
return False
def similar_to(self, other):
"""Checks if two sequences match, ignoring modifications."""
if not self and not other:
return True
return self.seq_without_modifications == other.seq_without_modifications
@property
def seq_with_modifications(self):
"""Returns a string of the sequence with modified bases."""
if not self._seq_with_modifications:
self._seq_with_modifications = ''.join([e.short_abbrev for e in self])
return self._seq_with_modifications
@property
def seq_without_modifications(self):
"""Prepares a sequence without modifications."""
if not self._seq_without_modifications:
self._seq_without_modifications = ''.join([e.original_base for e in self])
return self._seq_without_modifications
@property
def seq_new_notation(self):
"""Returns a string with the sequence in JMB's new nomenclature."""
if not self._seq_new_notation:
result = ''
for entry in self:
if entry.new_abbrev in STANDARD_BASES:
result += entry.new_abbrev
else:
result += '0' * (4-len(entry.new_abbrev)) + entry.new_abbrev
self._seq_new_notation = result
return self._seq_new_notation
@property
def seq_without_breaks(self):
"""Returns sequence without break symbols."""
return Sequence(str(self).replace('_', ''))
@property
def seq_without_gaps(self):
"""Returns sequence without gap symbols."""
return Sequence(str(self).replace('-', ''))
def set_sequence(self, seq):
"""
Parses the sequence from a string
and produces a list of AlphabetEntries.
"""
if type(seq) == str:
x = 0
while x < len(seq):
if seq[x] == ' ':
raise SequenceError('Sequence %s should not contain whitespace characters' % seq)
elif seq[x] == '0':
if RE_NEW_NOMENCLATURE.match(seq[x:(x+4)]):
if seq[x+3] in SPECIAL_ABBREVS:
new_abbrev = seq[x+3]
elif seq[x+1] == '0':
new_abbrev = seq[x+2:(x+4)]
else:
new_abbrev = seq[x+1:(x+4)]
self.seq_alphabet_list.append(alphabet.get_new_original(new_abbrev))
x += 4
else:
raise SequenceError('Some irregularities in sequence: %s' % seq[x:(x+4)])
else:
self.seq_alphabet_list.append(alphabet.get_short_original(seq[x]))
x += 1
elif type(seq) == list:
self.seq_alphabet_list = seq
else:
raise SequenceError('Bad argument type. Sequence instance takes string or list of alphabet_entities') | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/sequence/ModernaSequence.py | 0.832373 | 0.197773 | ModernaSequence.py | pypi |
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
"""
The exception model of Moderna contains
one separate exception class for each class
in the Moderna program. This allows to trace
back easily where something went wrong.
There are a few additional classes for important
functionalities of Moderna, in particular all things
written on the SocBin2008 poster.
In general, each time an exception derived from
ModernaError occurs, this could mean that something is
wrong with input. If some other Python exception occurs,
it is always the fault of the developers.
"""
class ModernaError(Exception): pass
# KR: we could __init__ have write a message to log.
# this would make the logfile calls in moderna.py obsolete.
class AlignmentError(ModernaError): pass
class AlphabetError(ModernaError): pass
class IsostericityError(ModernaError): pass
class LirError(ModernaError): pass
class LirRecordError(ModernaError): pass
class SearchLirError(ModernaError): pass
class LirCandidatesError(ModernaError): pass
class RNAResidueError(ModernaError): pass
class ModernaResidueError(RNAResidueError): pass
class RNAChainError(ModernaError): pass
class ModernaStructureError(RNAChainError): pass
class ModernaSuperimposerError(ModernaError): pass
class ProcessPDBError(ModernaError): pass
class SequenceError(ModernaError): pass
class RenumeratorError(ModernaError): pass
class AddModificationError(ModernaResidueError): pass
class ExchangeBaseError(ModernaResidueError): pass
class RemoveModificationError(ModernaResidueError): pass
class ModernaFragmentError(ModernaStructureError): pass
class RnaModelError(ModernaStructureError): pass
class TemplateError(ModernaStructureError): pass
class ModernaAdunModelError(ModernaStructureError): pass
class CopyResidueError(RnaModelError): pass
class InsertLoopError(RnaModelError): pass
class BaseRecognitionError(RnaModelError): pass
class ParameterError(ModernaError): pass | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/mini_moderna3/moderna/util/Errors.py | 0.61173 | 0.176352 | Errors.py | pypi |
import sys, re, html.entities, getopt, io, codecs, datetime
from functools import reduce
try:
from simplediff import diff, string_diff
except ImportError:
sys.stderr.write("info: simplediff module not found, only linediff is available\n")
sys.stderr.write("info: it can be downloaded at https://github.com/paulgb/simplediff\n")
# minimum line size, we add a zero-sized breakable space every
# LINESIZE characters
linesize = 20
tabsize = 8
show_CR = False
encoding = "utf-8"
lang = "en"
algorithm = 0
desc = "File comparison"
dtnow = datetime.datetime.now()
modified_date = "%s+01:00"%dtnow.isoformat()
html_hdr = """<!DOCTYPE html>
<html lang="{5}" dir="ltr"
xmlns:dc="http://purl.org/dc/terms/">
<head>
<meta charset="{1}" />
<meta name="generator" content="diff2html.py (http://git.droids-corp.org/gitweb/?p=diff2html)" />
<!--meta name="author" content="Fill in" /-->
<title>HTML Diff{0}</title>
<link rel="shortcut icon" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAACVBMVEXAAAAAgAD///+K/HwIAAAAJUlEQVQI12NYBQQM2IgGBQ4mCIEQW7oyK4phampkGIQAc1G1AQCRxCNbyW92oQAAAABJRU5ErkJggg==" type="image/png" />
<meta property="dc:language" content="{5}" />
<!--meta property="dc:date" content="{3}" /-->
<meta property="dc:modified" content="{4}" />
<meta name="description" content="{2}" />
<meta property="dc:abstract" content="{2}" />
<style>
table {{ border:0px; border-collapse:collapse; width: 100%; font-size:0.75em; font-family: Lucida Console, monospace }}
td.line {{ color:#8080a0 }}
th {{ background: black; color: white }}
tr.diffunmodified td {{ background: #D0D0E0 }}
tr.diffhunk td {{ background: #A0A0A0 }}
tr.diffadded td {{ background: #CCFFCC }}
tr.diffdeleted td {{ background: #FFCCCC }}
tr.diffchanged td {{ background: #FFFFA0 }}
span.diffchanged2 {{ background: #E0C880 }}
span.diffponct {{ color: #B08080 }}
tr.diffmisc td {{}}
tr.diffseparator td {{}}
</style>
</head>
<body>
"""
html_footer = """
<footer>
<p>Modified at {1}. HTML formatting created by <a href="http://git.droids-corp.org/gitweb/?p=diff2html;a=summary">diff2html</a>. </p>
</footer>
</body></html>
"""
table_hdr = """
<table class="diff">
"""
table_footer = """
</table>
"""
DIFFON = "\x01"
DIFFOFF = "\x02"
buf = []
add_cpt, del_cpt = 0, 0
line1, line2 = 0, 0
hunk_off1, hunk_size1, hunk_off2, hunk_size2 = 0, 0, 0, 0
# Characters we're willing to word wrap on
WORDBREAK = " \t;.,/):-"
def sane(x):
r = ""
for i in x:
j = ord(i)
if i not in ['\t', '\n'] and (j < 32):
r = r + "."
else:
r = r + i
return r
def linediff(s, t):
'''
Original line diff algorithm of diff2html. It's character based.
'''
if len(s):
s = str(reduce(lambda x, y:x+y, [ sane(c) for c in s ]))
if len(t):
t = str(reduce(lambda x, y:x+y, [ sane(c) for c in t ]))
m, n = len(s), len(t)
d = [[(0, 0) for i in range(n+1)] for i in range(m+1)]
d[0][0] = (0, (0, 0))
for i in range(m+1)[1:]:
d[i][0] = (i,(i-1, 0))
for j in range(n+1)[1:]:
d[0][j] = (j,(0, j-1))
for i in range(m+1)[1:]:
for j in range(n+1)[1:]:
if s[i-1] == t[j-1]:
cost = 0
else:
cost = 1
d[i][j] = min((d[i-1][j][0] + 1, (i-1, j)),
(d[i][j-1][0] + 1, (i, j-1)),
(d[i-1][j-1][0] + cost, (i-1, j-1)))
l = []
coord = (m, n)
while coord != (0, 0):
l.insert(0, coord)
x, y = coord
coord = d[x][y][1]
l1 = []
l2 = []
for coord in l:
cx, cy = coord
child_val = d[cx][cy][0]
father_coord = d[cx][cy][1]
fx, fy = father_coord
father_val = d[fx][fy][0]
diff = (cx-fx, cy-fy)
if diff == (0, 1):
l1.append("")
l2.append(DIFFON + t[fy] + DIFFOFF)
elif diff == (1, 0):
l1.append(DIFFON + s[fx] + DIFFOFF)
l2.append("")
elif child_val-father_val == 1:
l1.append(DIFFON + s[fx] + DIFFOFF)
l2.append(DIFFON + t[fy] + DIFFOFF)
else:
l1.append(s[fx])
l2.append(t[fy])
r1, r2 = (reduce(lambda x, y:x+y, l1), reduce(lambda x, y:x+y, l2))
return r1, r2
def diff_changed(old, new):
'''
Returns the differences basend on characters between two strings
wrapped with DIFFON and DIFFOFF using `diff`.
'''
con = {'=': (lambda x: x),
'+': (lambda x: DIFFON + x + DIFFOFF),
'-': (lambda x: '')}
return "".join([(con[a])("".join(b)) for a, b in diff(old, new)])
def diff_changed_ts(old, new):
'''
Returns a tuple for a two sided comparison based on characters, see `diff_changed`.
'''
return (diff_changed(new, old), diff_changed(old, new))
def word_diff(old, new):
'''
Returns the difference between the old and new strings based on words. Punctuation is not part of the word.
Params:
old the old string
new the new string
Returns:
the output of `diff` on the two strings after splitting them
on whitespace (a list of change instructions; see the docstring
of `diff`)
'''
separator_pattern = '(\W+)';
return diff(re.split(separator_pattern, old, flags=re.UNICODE), re.split(separator_pattern, new, flags=re.UNICODE))
def diff_changed_words(old, new):
'''
Returns the difference between two strings based on words (see `word_diff`)
wrapped with DIFFON and DIFFOFF.
Returns:
the output of the diff expressed delimited with DIFFON and DIFFOFF.
'''
con = {'=': (lambda x: x),
'+': (lambda x: DIFFON + x + DIFFOFF),
'-': (lambda x: '')}
return "".join([(con[a])("".join(b)) for a, b in word_diff(old, new)])
def diff_changed_words_ts(old, new):
'''
Returns a tuple for a two sided comparison based on words, see `diff_changed_words`.
'''
return (diff_changed_words(new, old), diff_changed_words(old, new))
def convert(s, linesize=0, ponct=0):
i = 0
t = ""
for c in s:
# used by diffs
if c == DIFFON:
t += '<span class="diffchanged2">'
elif c == DIFFOFF:
t += "</span>"
# special html chars
elif ord(c) in html.entities.codepoint2name:
t += "&%s;" % (html.entities.codepoint2name[ord(c)])
i += 1
# special highlighted chars
elif c == "\t" and ponct == 1:
n = tabsize-(i%tabsize)
if n == 0:
n = tabsize
t += ('<span class="diffponct">»</span>'+' '*(n-1))
elif c == " " and ponct == 1:
t += '<span class="diffponct">·</span>'
elif c == "\n" and ponct == 1:
if show_CR:
t += '<span class="diffponct">\</span>'
else:
t += c
i += 1
if linesize and (WORDBREAK.count(c) == 1):
t += '​'
i = 0
if linesize and i > linesize:
i = 0
t += "​"
return t
def add_comment(s, output_file):
output_file.write(('<tr class="diffmisc"><td colspan="4">%s</td></tr>\n'%convert(s)).encode(encoding))
def add_filename(f1, f2, output_file):
output_file.write(("<tr><th colspan='2'>%s</th>"%convert(f1, linesize=linesize)).encode(encoding))
output_file.write(("<th colspan='2'>%s</th></tr>\n"%convert(f2, linesize=linesize)).encode(encoding))
def add_hunk(output_file, show_hunk_infos):
if show_hunk_infos:
output_file.write('<tr class="diffhunk"><td colspan="2">Offset %d, %d lines modified</td>'%(hunk_off1, hunk_size1))
output_file.write('<td colspan="2">Offset %d, %d lines modified</td></tr>\n'%(hunk_off2, hunk_size2))
else:
# ⋮ - vertical ellipsis
output_file.write('<tr class="diffhunk"><td colspan="2">⋮</td><td colspan="2">⋮</td></tr>')
def add_line(s1, s2, output_file):
global line1
global line2
orig1 = s1
orig2 = s2
if s1 == None and s2 == None:
type_name = "unmodified"
elif s1 == None or s1 == "":
type_name = "added"
elif s2 == None or s1 == "":
type_name = "deleted"
elif s1 == s2:
type_name = "unmodified"
else:
type_name = "changed"
if algorithm == 1:
s1, s2 = diff_changed_words_ts(orig1, orig2)
elif algorithm == 2:
s1, s2 = diff_changed_ts(orig1, orig2)
else: # default
s1, s2 = linediff(orig1, orig2)
output_file.write(('<tr class="diff%s">' % type_name).encode(encoding))
if s1 != None and s1 != "":
output_file.write(('<td class="diffline">%d </td>' % line1).encode(encoding))
output_file.write('<td class="diffpresent">'.encode(encoding))
output_file.write(convert(s1, linesize=linesize, ponct=1).encode(encoding))
output_file.write('</td>')
else:
s1 = ""
output_file.write('<td colspan="2"> </td>')
if s2 != None and s2 != "":
output_file.write(('<td class="diffline">%d </td>'%line2).encode(encoding))
output_file.write('<td class="diffpresent">')
output_file.write(convert(s2, linesize=linesize, ponct=1).encode(encoding))
output_file.write('</td>')
else:
s2 = ""
output_file.write('<td colspan="2"></td>')
output_file.write('</tr>\n')
if s1 != "":
line1 += 1
if s2 != "":
line2 += 1
def empty_buffer(output_file):
global buf
global add_cpt
global del_cpt
if del_cpt == 0 or add_cpt == 0:
for l in buf:
add_line(l[0], l[1], output_file)
elif del_cpt != 0 and add_cpt != 0:
l0, l1 = [], []
for l in buf:
if l[0] != None:
l0.append(l[0])
if l[1] != None:
l1.append(l[1])
max_len = (len(l0) > len(l1)) and len(l0) or len(l1)
for i in range(max_len):
s0, s1 = "", ""
if i < len(l0):
s0 = l0[i]
if i < len(l1):
s1 = l1[i]
add_line(s0, s1, output_file)
add_cpt, del_cpt = 0, 0
buf = []
def parse_input(input_file, output_file, input_file_name, output_file_name,
exclude_headers, show_hunk_infos):
global add_cpt, del_cpt
global line1, line2
global hunk_off1, hunk_size1, hunk_off2, hunk_size2
if not exclude_headers:
title_suffix = ' ' + input_file_name
output_file.write(html_hdr.format(title_suffix, encoding, desc, "", modified_date, lang).encode(encoding))
output_file.write(table_hdr.encode(encoding))
while True:
l = input_file.readline()
if l == "":
break
m = re.match('^--- ([^\s]*)', l)
if m:
empty_buffer(output_file)
file1 = m.groups()[0]
while True:
l = input_file.readline()
m = re.match('^\+\+\+ ([^\s]*)', l)
if m:
file2 = m.groups()[0]
break
add_filename(file1, file2, output_file)
hunk_off1, hunk_size1, hunk_off2, hunk_size2 = 0, 0, 0, 0
continue
m = re.match("@@ -(\d+),?(\d*) \+(\d+),?(\d*)", l)
if m:
empty_buffer(output_file)
hunk_data = [x=="" and 1 or int(x) for x in m.groups()]
hunk_off1, hunk_size1, hunk_off2, hunk_size2 = hunk_data
line1, line2 = hunk_off1, hunk_off2
add_hunk(output_file, show_hunk_infos)
continue
if hunk_size1 == 0 and hunk_size2 == 0:
empty_buffer(output_file)
add_comment(l, output_file)
continue
if re.match("^\+", l):
add_cpt += 1
hunk_size2 -= 1
buf.append((None, l[1:]))
continue
if re.match("^\-", l):
del_cpt += 1
hunk_size1 -= 1
buf.append((l[1:], None))
continue
if re.match("^\ ", l) and hunk_size1 and hunk_size2:
empty_buffer(output_file)
hunk_size1 -= 1
hunk_size2 -= 1
buf.append((l[1:], l[1:]))
continue
empty_buffer(output_file)
add_comment(l, output_file)
empty_buffer(output_file)
output_file.write(table_footer.encode(encoding))
if not exclude_headers:
output_file.write(html_footer.format("", dtnow.strftime("%d.%m.%Y")).encode(encoding))
def usage():
print('''
diff2html.py [-e encoding] [-i file] [-o file] [-x]
diff2html.py -h
Transform a unified diff from stdin to a colored side-by-side HTML
page on stdout.
stdout may not work with UTF-8, instead use -o option.
-i file set input file, else use stdin
-e encoding set file encoding (default utf-8)
-o file set output file, else use stdout
-x exclude html header and footer
-t tabsize set tab size (default 8)
-l linesize set maximum line size is there is no word break (default 20)
-r show \\r characters
-k show hunk infos
-a algo line diff algorithm (0: linediff characters, 1: word, 2: simplediff characters) (default 0)
-h show help and exit
''')
def main():
global linesize, tabsize
global show_CR
global encoding
global algorithm
input_file_name = ''
output_file_name = ''
exclude_headers = False
show_hunk_infos = False
try:
opts, args = getopt.getopt(sys.argv[1:], "he:i:o:xt:l:rka:",
["help", "encoding=", "input=", "output=",
"exclude-html-headers", "tabsize=",
"linesize=", "show-cr", "show-hunk-infos", "algorithm="])
except getopt.GetoptError as err:
print((str(err))) # will print something like "option -a not recognized"
usage()
sys.exit(2)
verbose = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-e", "--encoding"):
encoding = a
elif o in ("-i", "--input"):
input_file = codecs.open(a, "r", encoding)
input_file_name = a
elif o in ("-o", "--output"):
output_file = codecs.open(a, "w")
output_file_name = a
elif o in ("-x", "--exclude-html-headers"):
exclude_headers = True
elif o in ("-t", "--tabsize"):
tabsize = int(a)
elif o in ("-l", "--linesize"):
linesize = int(a)
elif o in ("-r", "--show-cr"):
show_CR = True
elif o in ("-k", "--show-hunk-infos"):
show_hunk_infos = True
elif o in ("-a", "--algorithm"):
algorithm = int(a)
else:
assert False, "unhandled option"
# Use stdin if not input file is set
if not ('input_file' in locals()):
input_file = codecs.getreader(encoding)(sys.stdin)
# Use stdout if not output file is set
if not ('output_file' in locals()):
output_file = codecs.getwriter(encoding)(sys.stdout)
parse_input(input_file, output_file, input_file_name, output_file_name,
exclude_headers, show_hunk_infos)
def parse_from_memory(txt, exclude_headers, show_hunk_infos):
" Parses diff from memory and returns a string with html "
input_stream = io.StringIO(txt)
output_stream = io.StringIO()
parse_input(input_stream, output_stream, '', '', exclude_headers, show_hunk_infos)
return output_stream.getvalue()
if __name__ == "__main__":
main() | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/diffpdb/lib/diff2html.py | 0.42477 | 0.223314 | diff2html.py | pypi |
from pymol import cmd, stored
import re
try:
from collections import OrderedDict
_orderedDict = True
except ImportError:
_orderedDict = False
# PyMOL 1.7.4 introduces support for multi-letter chains, so we can afford to
# use a smaller alphabet. In earlier versions, use lower-case letters if needed
# (requires running `set ignore_case, 0`)
_long_chains = cmd.get_version()[1] >= 1.74
_default_base = 36 if _long_chains else 62
class OutOfChainsError(Exception):
def __init__(self,msg):
self.msg=msg
def __str__(self):
return str(self.msg)
class ChainSet(object):
"""
Base class for various methods to rename chains
Contains _chains, which maps from the renamed chain to a tuple with the
original (object,state,chain). All dict-like accessors work on ChainSets,
e.g.
chain_set["A"] -> ("obj",1,"A")
"""
def __init__(self):
# Use an OrderedDict in Python >= 1.7 for better printing
if _orderedDict:
self._chains = OrderedDict()
else:
self._chains = dict()
def map_chain(self, obj, state, origChain ):
"""
map_chain(string obj,int state, string chain]]) -> string
Maps a chain letter to a unique chainID. Results are unique within each
instance, and can be used as keys on this chain set.
"""
raise NotImplementedError("Base class")
# delegate most methods to _chains
def __getattr__(self,at):
if at in "pop popitem update setdefault".split():
raise AttributeError("type object '%s' has no attribute '%s'"%(type(self),at))
return getattr(self._chains,at)
def __cmp__(self,other): return self._chains.__cmp__(other)
def __eq__(self,other): return self._chains.__eq__(other)
def __ge__(self,other): return self._chains.__ge__(other)
def __gt__(self,other): return self._chains.__gt__(other)
def __le__(self,other): return self._chains.__le__(other)
def __lt__(self,other): return self._chains.__lt__(other)
def __ne__(self,other): return self._chains.__ne__(other)
def __len__(self): return self._chains.__len__()
def __contains__(self,key): return self._chains.__contains__(key)
def __getitem__(self,key): return self._chains.__getitem__(key)
def __iter__(self): return self._chains.__iter__()
def __str__(self): return str(self._chains)
@staticmethod
def _int_to_chain(i,base=_default_base):
"""
_int_to_chain(int,int) -> str
Converts a positive integer to a chain ID. Chain IDs include uppercase
characters, numbers, and optionally lowercase letters.
i = a positive integer to convert
base = the alphabet size to include. Typically 36 or 62.
"""
if i < 0:
raise ValueError("positive integers only")
if base < 0 or 62 < base:
raise ValueError("Invalid base")
quot = int(i)//base
rem = i%base
if rem < 26:
letter = chr( ord("A") + rem)
elif rem < 36:
letter = str( rem-26)
else:
letter = chr( ord("a") + rem - 36)
if quot == 0:
return letter
else:
return ChainSet._int_to_chain(quot-1,base) + letter
class DefaultChainSet(ChainSet):
"""
Avoids relettering chains if possible. If a chain has been used, uses the
next available chain letter. Note that this can potentially lead to
cascading renames, e.g. if chains are sorted alphabetically rather than by
object.
Used for rename = 0.
"""
def __init__(self):
super(DefaultChainSet,self).__init__()
self._next_chain = 0
def map_chain(self, obj, state, origChain ):
# Keep _next_chain up-to-date
while ChainSet._int_to_chain(self._next_chain) in self:
self._next_chain += 1
# Map this chain
if origChain in self:
# Rename
next_chain = ChainSet._int_to_chain(self._next_chain)
self._next_chain += 1
else:
next_chain = origChain
self._chains[next_chain] = (obj,state,origChain)
return next_chain
class SequentialChainSet(ChainSet):
"""
Renumbers all chains starting at A, continuing through the capital letters
and numbers, and then adding additional letters through 9999 (the last
valid chain for mmCIF) and beyond.
Used for rename=1
"""
def __init__(self):
super(SequentialChainSet,self).__init__()
self._next_chain = 0
def map_chain(self, obj, state, origChain ):
next_chain = ChainSet._int_to_chain(self._next_chain)
self._chains[next_chain] = (obj,state,origChain)
self._next_chain += 1
return next_chain
class LongChainSet(ChainSet):
"""
Uses long strings for the chain names. Chains are renamed like
"%s_%s_%04d"%(original_chainid,objectname,state).
Used for rename=2
"""
def map_chain(self, obj, state, origChain ):
ch = "%s_%s_%04d"%(origChain,obj,state)
if ch in self:
raise ValueError("Duplicate chain %s"%(ch))
self._chains[ch] = (obj,state,origChain)
return ch
def flatten_obj(name="",selection="",state=0,rename=0,quiet=1,chain_map=""):
"""
DESCRIPTION
"flatten_obj" combines multiple objects or states into a single object,
renaming chains where required
USAGE
flatten_obj name, selection[, state[, rename[, quiet[, chain_map]]]]
ARGUMENTS
name = a unique name for the flattened object {default: flat}
selection = the set of objects to include in the flattening. The selection
will be expanded to include all atoms of objects. {default: all}
state = the source state to select. Use 0 or -1 to flatten all states {default: 0}
rename = The scheme to use for renaming chains: {default: 0}
(0) preserve chains IDs where possible, rename other chains
alphabetically
(1) rename all chains alphabetically
(2) rename chains using the original chain letter, object name, and state
quiet = If set to 0, print some additional information about progress and
chain renaming {default: 1}
chain_map = An attribute name for the 'stored' scratch object. If
specified, `stored.<chain_map>` will be populated with a dictionary
mapping the new chain names to a tuple giving the originated object,
state, and chainID. {default: ""}
NOTES
Like the select command, if name is omitted then the default object name
("flat") is used as the name argument.
Chain renaming is tricky. PDB files originally limited chains to single
letter identifiers containing [A-Za-z0-9]. When this was found to be
limiting, multi-letter chains (ideally < 4 chars) were allowed. This is
supported as of PyMOL 1.7. Earlier versions do not accept rename=2, and
will raise an exception when flattening a structure with more than 62
chains.
EXAMPLES
flatten_obj flat, nmrObj
flatten_obj ( obj1 or obj2 )
SEE ALSO
split_states
"""
# arguments
# Single argument; treat as selection
if name and not selection:
selection = name
name = ""
# default name and selection
if not name:
name = "flat"
if not selection:
selection = "(all)"
state = int(state)
rename = int(rename)
quiet = int(quiet)
# Wrap in extra parantheses for get_object_list
selection = "( %s )" % selection
if rename == 0:
chainSet = DefaultChainSet()
elif rename == 1:
chainSet = SequentialChainSet()
elif rename == 2:
chainSet = LongChainSet()
else:
raise ValueError("Unrecognized rename option (Valid: 0,1,2)")
metaprefix = "temp" #TODO unique prefix
# store original value of retain_order, which causes weird interleaving of
# structures if enabled.
retain_order = cmd.get("retain_order")
try:
cmd.set("retain_order",0)
# create new object for each state
for obj in cmd.get_object_list(selection):
if state <= 0:
# all states
prefix = "%s_%s_"%(metaprefix,obj)
cmd.split_states(obj,prefix=prefix)
else:
prefix = "%s_%s_%04d"%(metaprefix,obj,state)
cmd.create(prefix, obj, state, 1)
# renumber all states
statere = re.compile("^%s_(.*)_(\d+)$" % metaprefix) # matches split object names
warn_lowercase = False
# Iterate over all objects with metaprefix
try:
for obj in cmd.get_object_list("(%s_*)"%(metaprefix) ):
m = statere.match(obj)
if m is None:
print(("Failed to match object %s" %obj))
continue
origobj = m.group(1)
statenum = int(m.group(2))
chains = cmd.get_chains(obj)
rev_chain_map = {} #old -> new, for this obj only
for chain in sorted(chains,key=lambda x:(len(x),x)):
new_chain = chainSet.map_chain(origobj,statenum,chain)
rev_chain_map[chain] = new_chain
if not quiet:
print((" %s state %d chain %s -> %s"%(origobj,statenum,chain, new_chain) ))
if not _long_chains:
if len(new_chain) > 1:
raise OutOfChainsError("No additional chains available (max 62).")
space = {'rev_chain_map':rev_chain_map}
cmd.alter(obj,"chain = rev_chain_map[chain]",space=space)
print(("Creating object from %s_*"%metaprefix))
# Recombine into a single object
cmd.create(name,"%s_*"%metaprefix)
# Set chain_map
if chain_map:
setattr(stored,chain_map,chainSet)
# Warn if lowercase chains were generated
if cmd.get("ignore_case") == "on" and any([c.upper() != c for c in list(chainSet.keys())]):
print("Warning: using lower-case chain IDs. Consider running the "
"following command:\n set ignore_case, 0" )
finally:
# Clean up
print("Cleaning up intermediates")
cmd.delete("%s_*"%metaprefix)
finally:
# restore original parameters
print("Resetting variables")
cmd.set("retain_order",retain_order)
cmd.extend('flatten_obj', flatten_obj)
# tab-completion of arguments
cmd.auto_arg[0]['flatten_obj'] = [ cmd.object_sc, 'name or selection', '']
cmd.auto_arg[1]['flatten_obj'] = [ cmd.object_sc, 'selection', ''] | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/PyMOL4RNA/external_flatten_object.py | 0.783368 | 0.253425 | external_flatten_object.py | pypi |
import logging
import argparse
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from Bio.PDB import PDBParser
from Bio.PDB import PDBIO
from Bio.PDB.Atom import PDBConstructionWarning
import warnings
warnings.simplefilter('ignore', PDBConstructionWarning)
# logger
logger = logging.getLogger()
handler = logging.StreamHandler()
logger.addHandler(handler)
def get_seq(alignfn, seqid):
"""Get seq from an alignment with gaps.
Args:
alignfn (str): a path to an alignment
seqid (str): seq id in an alignment
Usage::
>>> get_seq('test_data/ALN_OBJ1_OBJ2.fa', 'obj1')
SeqRecord(seq=SeqRecord(seq=Seq('GUUCAG-------------------UGAC-', SingleLetterAlphabet()), id='obj1', name='obj1', description='obj1', dbxrefs=[]), id='<unknown id>', name='<unknown name>', description='<unknown description>', dbxrefs=[])
Returns:
SeqRecord
"""
# alignment = AlignIO.read(alignfn, 'fasta')
alignment = SeqIO.index(alignfn, 'fasta')
# print SeqRecord(alignment[seqid])
sequence = SeqRecord(alignment[seqid])
return sequence
def open_pdb(pdbfn):
"""Open pdb with Biopython.
Args:
pdbfn (str): a path to a pdb structure
Returns:
PDB Biopython object: with a pdb structure
"""
parser = PDBParser()
return parser.get_structure('struc', pdbfn)
def renumber(seq_with_gaps, struc, residue_index_start):
"""Renumber a pdb file.
Args:
seq_with_gaps (str): a target sequence extracted from the alignment
struc (pdb): a structure
residue_index_start (int): starting number
Returns:
BioPython Structure object
"""
new_numbering = []
for nt in seq_with_gaps:
if nt != '-':
nt_num_a = [residue_index_start, nt]
new_numbering.append(residue_index_start)
logger.info(nt_num_a)
residue_index_start = residue_index_start + 1
logger.info(new_numbering)
# works only for single chain
for struc in pdb:
for chain in struc:
for residue, resi in zip(chain, new_numbering):
residue.id = (residue.id[0], resi, residue.id[2])
return struc
def write_struc(struc, outfn):
"""Write renumbered pdb with Biopython.
Args:
struc (pdb): a renumbered structure
outfn (str): a path to a new, renumbered pdb file
Returns:
none: writes to a file
"""
io = PDBIO()
io.set_structure(struc)
io.save(outfn)
logger.info('Structure written to %s' % outfn)
def get_parser():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument("--residue_index_start",
help="renumber starting number (default: 1)",
default=1, type=int)
parser.add_argument("--outfn", help="output pdb file (default: pdbfn .pdb -> _out.pdb)")
parser.add_argument("seqid", help="seq id in the alignemnt")
parser.add_argument("alignfn", help="alignemnt in the Fasta format")
parser.add_argument("pdbfn", help="pdb file")
return parser
# main
if __name__ == '__main__':
args = get_parser().parse_args()
if args.verbose:
logger.setLevel(logging.INFO)
if not args.outfn:
args.outfn = args.pdbfn.replace('.pdb', '_out.pdb')
seq_with_gaps = get_seq(args.alignfn, args.seqid)
pdb = open_pdb(args.pdbfn)
struc = renumber(seq_with_gaps, pdb, args.residue_index_start)
write_struc(struc, args.outfn) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/renum_pdb_to_aln/renum_pdb_to_aln.py | 0.62223 | 0.210401 | renum_pdb_to_aln.py | pypi |
from rna_tools.tools.pdb_formatix.SingleLineUtils import get_res_code, get_res_num, get_atom_code, \
set_atom_code, set_line_bfactor
import re
class PDBFile(object):
"""Class for holding data from a PDB file and modifying it.
"""
# find 'ATOM' lines in a PDB file
ATOM_LINE_PATTERN = re.compile('^ATOM')
# dictionary for changing residue names from 3 letters to 1 letter
RES3TO1 = {
'GUA': 'G',
'URI': 'U',
'CYT': 'C',
'ADE': 'A',
'RG': 'G',
'RU': 'U',
'RC': 'C',
'RA': 'A',
'R3G': 'G',
'R3U': 'U',
'R3C': 'C',
'R3A': 'A',
'R5G': 'G',
'R5U': 'U',
'R5C': 'C',
'R5A': 'A',
'RG3': 'G',
'RU3': 'U',
'RC3': 'C',
'RA3': 'A',
'RG5': 'G',
'RU5': 'U',
'RC5': 'C',
'RA5': 'A',
'RGU': 'G',
'URA': 'U',
'RCY': 'C',
'RAD': 'A',
# just in case
'G': 'G',
'U': 'U',
'C': 'C',
'A': 'A',
}
AMINOACID_CODES = ("Ala", "Arg", "Asn", "Asp", "Cys", "Glu", "Gln", "Gly",
"His", "Ile", "Leu", "Lys", "Met", "Phe", "Pro", "Ser", "Thr",
"Trp", "Tyr", "Val",
"ALA", "ARG", "ASN", "ASP", "CYS", "GLU", "GLN", "GLY",
"HIS", "ILE", "LEU", "LYS", "MET", "PHE", "PRO", "SER", "THR",
"TRP", "TYR", "VAL"
)
def __init__(self, pdb_string=None, pdb_path=None, pdb_handle=None, verbose=False):
"""Constructor, should get exactly one of the arguments:
* pdb_string = PDB as a string
* pdb_path = string with path to a PDB file
* pdb_handle = handle to a PDB file
"""
self.verbose = verbose
if len([x for x in [pdb_string, pdb_path, pdb_handle] if x is not None]) > 1:
print('You should provide at most one source for PDB file')
raise Exception
input_string = ''
if pdb_string is not None:
input_string = pdb_string
elif pdb_path is not None:
with open(pdb_path) as f:
input_string = f.read()
elif pdb_handle is not None:
input_string = pdb_handle.read()
self.pdb_string = input_string
self.pdb_lines = self.pdb_string.split('\n')
self.fixes = []
def save(self, file_path):
"""Save current PDB to disk
Arguments:
* file_path = path where it will be saved
"""
with open(file_path, 'w') as f:
f.write(self.pdb_string)
def _apply_fix(self, fix_name, result_string, result_lines=None):
"""Helper function for applying fixes and saving information about them.
Arguments:
* fix_name = string that will be added to self.pdb_fixes, unless
result_string is the same as pdb_string
* result_string = string after applying this fix
* result_lines = optional, list of lines, use this argument if you
already have such list and don't want to lose time on splitting
result_string
"""
if self.pdb_string != result_string:
self.fixes.append(fix_name)
self.pdb_string = result_string
if result_lines is not None:
self.pdb_lines = result_lines
else:
self.pdb_lines = self.pdb_string.split('\n')
def set_string(self, pdb_string):
"""Change PDB string stored in this instance of the class
Arguments:
* pdb_string = new PDB string
"""
self.pdb_string = pdb_string
self.pdb_lines = pdb_string.split('\n')
def _get_atom_lines(self):
"""Get only lines with ATOM information
"""
return [l for l in self.pdb_lines if re.match(self.ATOM_LINE_PATTERN, l)]
def validate_pdb(self):
"""Check if file is a PDB structure
Output:
* True if it is a PDB, False otherwise
"""
atom_lines = self._get_atom_lines()
if not len(atom_lines):
return False
return True
def detect_proteins(self):
"""Check if there are any aminoacid fragments in the file
Output:
* True if there are some, False otherwise
"""
for l in self.pdb_lines:
if get_res_code(l) in self.AMINOACID_CODES:
return True
return False
def seq_from_pdb(self):
"""Extract sequence from a PDB and return it as a string.
Output:
* sequence, returned as a string
"""
atoms = [l.split() for l in self._get_atom_lines()]
if atoms[0][3][0] != 'r': # it is 'r' if it's a ROSETTA PDB
seq = [self.RES3TO1[atoms[0][3]]]
else:
seq = [atoms[0][3][1]]
for a in range(1, len(atoms)):
# atom number is different than previous one
if atoms[a][5] != atoms[a - 1][5]:
if atoms[a][3][0] != 'r': # check for ROSETTA PDB
seq.append(self.RES3TO1[atoms[a][3]])
else:
seq.append(atoms[a][3][1])
return ''.join(seq)
def seq_from_amber_like_pdb(self):
"""Extract sequence from a PDB and return it as a string - use it for amber-like files.
Output:
* sequence, returned as a string, such as: RG5 RC RU RG RG RG RC RG RC RA RG RG3 RC5 RC RU RG RA RC RG RG RU RA RC RA RG RC3
"""
atoms = [l.split() for l in self._get_atom_lines()]
seq = []
seq.append(atoms[0][3])
for a in range(1, len(atoms)):
# atom number is different than previous one
if self.verbose:
print((atoms[a][5], atoms[a - 1][5]))
if atoms[a][5] != atoms[a - 1][5]:
seq.append(atoms[a][3])
return ' '.join(seq)
def get_fasta(self, name='seq', lowercase=False):
"""Format sequence in FASTA format, with a header and lines split
at 80 characters.
Arguments:
* name = name of the sequence (it's put in the header line)
Output:
* FASTA returned as a string
"""
seq = self.seq_from_pdb()
if lowercase:
seq = seq.lower()
result = ['>' + name]
result.extend([seq[i:i + 80] for i in range(0, len(seq), 80)])
result.append('')
return '\n'.join(result)
def remove_non_atoms(self):
"""Remove all lines that are not ATOMs
"""
result = self._get_atom_lines()
self._apply_fix('Removed non-atom lines', '\n'.join(result), result)
def _check_resname_3(self):
"""Check if PDB uses 3 or 1 letter residue names.
Output:
* bool, True if 3 letters, False otherwise
"""
for l in self.pdb_lines:
if l.startswith('ATOM'):
return 3 == len(l.split()[3])
def _resname_3to1(self):
"""Convert residue names in PDB file from 3 letters to 1 letter.
"""
result = []
for l in self.pdb_lines:
if l.startswith('ATOM'):
long_name = l.split()[3]
try:
short_name = self.RES3TO1[long_name]
except KeyError:
short_name = 'X'
result.append(l[:17] + ' ' + short_name + l[20:])
else:
result.append(l)
self._apply_fix('resname_3to1', '\n'.join(result), result)
def resname_check_and_3to1(self):
"""Check if resnames are 3 letter long and if so convert them to 1 letter.
"""
if self._check_resname_3():
try:
self._resname_3to1()
except:
print('Conversion to 1-letter residue names failed')
def terminate_chains(self):
"""Add 'TER' at the end of chain if none 'TER's are found in PDB.
"""
for l in self.pdb_lines:
if l.startswith('TER'):
return
else:
result = []
chain_started = False
ter_added = False
for l in self.pdb_lines:
if 'ATOM' in l:
chain_started = True
else:
if chain_started:
result.append('TER')
ter_added = True
chain_started = False
result.append(l)
if not ter_added:
result.append('TER')
self._apply_fix('terminate_chains', '\n'.join(result), result)
def _split_by_ters(self, separator='TER\n'):
"""Split a PDB string by TER lines or other separator
Arguments:
* separator = optional, separator dividing structures, default is 'TER\n'
"""
return [i + separator for i in self.pdb_string.split(separator)]
def remove_short_chains(self, threshold=9):
"""Remove chains that have chains with a small number of atoms.
Arguments:
* threshold = if chain has more atoms, it stays in the result
"""
chains = self.pdb_string.split('TER\n')
good_chains = [c for c in chains if len(c.split('\nATOM')) > threshold]
self._apply_fix('remove_short_chains', '\n'.join(good_chains))
def check_and_add_P_at_start(self):
residues = {}
for l in self.pdb_lines:
res_num = get_res_num(l)
if res_num in residues:
residues[res_num].append(l)
else:
residues[res_num] = [l]
first_residue = residues[min(residues.keys())]
# check P
has_p = False
for l in first_residue:
if get_atom_code(l) == 'P':
has_p = True
if not has_p:
# add P
corrected_residue = []
for l in first_residue:
if get_atom_code(l) == 'O5\'' or get_atom_code(l) == 'O5*':
corrected_residue.append(set_atom_code(l, 'P'))
else:
corrected_residue.append(l)
residues[min(residues.keys())] = corrected_residue
residues = ['\n'.join(residues[r]) for r in residues]
self._apply_fix('add_P_at_start', '\n'.join(residues))
def set_residues_bfactor(self, bfactors):
"""Set B-factor to any value you want
Arguments:
* bfactors = list of B-factors that will be set, should be of same
length as nucleotide sequence
"""
ans = []
for l in self.pdb_lines:
if l.startswith('ATOM'):
res_num = get_res_num(l)
try:
ans.append(set_line_bfactor(l, bfactors[res_num - 1]))
except IndexError:
pass
else:
ans.append(l)
self.pdb_lines = ans
self.pdb_string = '\n'.join(ans)
def pedantic_pdb(self):
"""Do everything that's possible to fix PDB: 3-to-1, no HETATMs, TERs etc.
"""
self.resname_check_and_3to1()
self.terminate_chains()
self.remove_short_chains()
def count_models(self):
"""Count models in the PDB file
Output:
* number of models as an int
"""
model_num = 0
for l in self.pdb_lines:
if l.startswith('MODEL'):
model_num += 1
return model_num
def get_model(self, model_num):
"""Get n-th model from the file
Arguments:
* model_num = number of model to get, starts with 0
"""
model_borders = list(zip(
[i[0] for i in enumerate(self.pdb_lines) if i[1].startswith('MODEL')],
[i[0] for i in enumerate(self.pdb_lines) if i[1].startswith('ENDMDL')]
))
result = self.pdb_lines[:model_borders[0][0]]
result.extend(self.pdb_lines[model_borders[model_num][0]:model_borders[model_num][1] + 1])
result.extend(self.pdb_lines[model_borders[-1][1] + 1:])
self._apply_fix('get_model', '\n'.join(result), result)
def check_and_get_first_model(self):
"""Check if there are more than one models and get only the first one
"""
if self.count_models() >= 2:
self.get_model(0) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/pdb_formatix/PDBFile.py | 0.617167 | 0.342957 | PDBFile.py | pypi |
import math
def draw_circle(x, y, z, r=8.0, cr=1.0, cg=0.4, cb=0.8, w=2.0):
"""
Create a CGO circle
PARAMS
x, y, z
X, Y and Z coordinates of the origin
r
Radius of the circle
cr, cg, cb
Color triplet, [r,g,b] where r,g,b are all [0.0,1.0].
w
Line width of the circle
RETURNS
the CGO object (it also loads it into PyMOL, too).
"""
x = float(x)
y = float(y)
z = float(z)
r = abs(float(r))
cr = abs(float(cr))
cg = abs(float(cg))
cb = abs(float(cb))
w = float(w)
obj = [BEGIN, LINES, COLOR, cr, cg, cb]
for i in range(180):
obj.append(VERTEX)
obj.append(r * math.cos(i) + x)
obj.append(r * math.sin(i) + y)
obj.append(z)
obj.append(VERTEX)
obj.append(r * math.cos(i + 0.1) + x)
obj.append(r * math.sin(i + 0.1) + y)
obj.append(z)
obj.append(END)
cName = cmd.get_unused_name("circle_")
cmd.load_cgo(obj, cName)
cmd.set("cgo_line_width", w, cName)
return obj
def draw_circle_selection(selName, r=None, cr=1.0, cg=0.4, cb=0.8, w=2.0):
"""
circleSelection -- draws a cgo circle around a given selection or object
PARAMS
selName
Name of the thing to encircle.
r
Radius of circle.
DEFAULT: This cript automatically defines the radius for you. If
you select one atom and the resultant circle is too small, then
you can override the script's calculation of r and specify your own.
cr, cg, cb
red, green and blue coloring, each a value in the range [0.0, 1.0]
RETURNS
The circle object.
"""
((minX, minY, minZ), (maxX, maxY, maxZ)) = cmd.get_extent(selName)
if r == None:
r = max([maxX - minX, maxY - minY, maxZ - minZ])
stored.coords = []
cmd.iterate_state(1, selName, "stored.coords.append([x,y,z])")
l = len(stored.coords)
centerX = sum([x[0] for x in stored.coords]) / l
centerY = sum([x[1] for x in stored.coords]) / l
centerZ = sum([x[2] for x in stored.coords]) / l
return cgoCircle(centerX, centerY, centerZ, r, cr, cg, cb, w)
def draw_dist(x1, y1, z1, x2, y2, z2):
"""
draw_dist(54.729, 28.9375, 41.421, 55.342, 35.3605, 42.745)
https://sourceforge.net/p/pymol/mailman/message/25795427/
"""
cmd.pseudoatom('pt1', pos=[x1, y1, z1])
cmd.pseudoatom('pt2', pos=[x2, y2, z2])
cmd.distance('pt1-pt2', 'pt1', 'pt2')
def draw_dists(interactions): # l=([1,2], [3,4])
for i in interactions:
a = "////" + str(i[0]) + "/C2"
b = "////" + str(i[1]) + "/C2"
print(i[0], i[1], cmd.distance('d' + str(i[0]) + '-' +
str(i[1]), "(" + a + ")", "(" + b + ")")) # mode, 4))
def draw_vector(x1, y1, z1, x2, y2, z2):
"""https://pymolwiki.org/index.php/CGOCylinder"""
radius = 0.1
r1, g1, b1 = 0, 0, 1 # color (blue)
r2, g2, b2 = 1, 0, 0 # color (red)
cmd.load_cgo([9.0, x1, y1, z1, x2, y2, z2, radius, r1, g1, b1, r2, g2, b2], "vector")
try:
import pymol
from pymol.cgo import *
except:
print('PyMOL (Python library is missing')
else:
cmd.extend("draw_vector", draw_vector)
cmd.extend("draw_dist", draw_dist)
cmd.extend("draw_circle", draw_circle)
cmd.extend("draw_circle_selection", draw_circle_selection)
cmd.extend('draw_dists', draw_dists) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/pymol_drawing/pymol_drawing.py | 0.52902 | 0.487307 | pymol_drawing.py | pypi |
r"""rna_plot_density.py - generate a density plot
Don't open Excel, Jupyter. Simple plot a density of one column and save it to a file.
Example::
# file
fn rmsd_all
0 19_Bujnicki_Human_4_rpr_n0-000001.pdb-000001_A... 14.73
1 19_Bujnicki_Human_4_rpr_n0-000001.pdb.trafl_19... 0.46
2 19_Bujnicki_Human_4_rpr_n0-000001.pdb.trafl_19... 14.73
3 19_Bujnicki_Human_4_rpr_n0-000001.pdb_thrs0.50... 0.73
4 19_Bujnicki_Human_4_rpr_n0-000001.pdb_thrs0.50... 0.83
$ rna_plot_hist.py rmsds.csv --column rmsd_all
.. image:: ../../rna_tools/tools/plotting/test_data/rmsds_dens.png
"""
from __future__ import print_function
import pandas as pd
import matplotlib.pyplot as plt
import argparse
import sys
plt.style.use('ggplot')
plt.rc('figure', figsize=(10, 6))
def get_parser():
"""Get parser."""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('file', help="rmsd.txt")
parser.add_argument('x', help="column of file to plot")
parser.add_argument('y', help="column of file to plot")
parser.add_argument('--sep', help="separator, be default \t", default=",")
parser.add_argument('-o', '--output')
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
fn = args.file
x = args.x
y = args.y
df = pd.read_csv(args.file, sep=args.sep)
print(df.head())
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(6,4))
sns.set(style="white")
#ax = sns.violinplot(data=df, x=x, y=y, color="orange")
ax = sns.boxplot(data=df, x="growthb", y="rmsd_all", color="black")#, ax=ax)
#ax.set_ylim(0,3)
ax = sns.swarmplot(data=df, x="growthb", y="rmsd_all", color="orange", ax=ax)
#plt.savefig('/Users/magnus/d/out.png', dpi=200)
plt.tight_layout()
if not args.output:
outfn = args.file.replace('.txt', '').replace('.csv', '') + '_bx.png'
print('Save plot %s' % outfn)
plt.savefig(outfn, dpi=100)
import os
os.system('open %s' % outfn) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/plotting/rna_plot_boxplotlike.py | 0.681621 | 0.332907 | rna_plot_boxplotlike.py | pypi |
from __future__ import print_function
__docformat__ = 'reStructuredText'
import os
import Bio.PDB.PDBParser
import Bio.PDB.Superimposer
from Bio.PDB.PDBIO import Select
from Bio.PDB import PDBIO
from Bio.SVDSuperimposer import SVDSuperimposer
from numpy import sqrt, array, asarray
class RNAmodel:
"""RNAmodel
:Example:
>>> rna = RNAmodel("test_data/rp14/rp14_5ddp_bound_clean_ligand.pdb", [1], False, None)
>>> rna.get_report()
"File: rp14_5ddp_bound_clean_ligand.pdb # of atoms: 1 \\nresi: 1 atom: <Atom C3'> \\n"
:param fpath: file path, string
:param residues: list of residues to use (and since we take only 1 atom, C3', this equals to number of atoms.
:param save: boolean, save to output_dir or not
:param output_dir: string, if save, save segments to this folder
"""
#:returns: None
#:rtype: None
#"""
def __init__(self, fpath, residues, save=False, output_dir=""):
# parser 1-5 -> 1 2 3 4 5
self.struc = Bio.PDB.PDBParser().get_structure('', fpath)
self.fpath = fpath
self.fn = os.path.basename(fpath)
self.residues = residues #self.__parser_residues(residues)
self.__get_atoms()
#self.atoms = []
if save:
self.save(output_dir) # @save
def __parser_residues(self, residues):
"""Get string and parse it
'1 4 5 10-15' -> [1, 4, 5, 10, 11, 12, 13, 14, 15]"""
rs = []
for r in residues.split():
l = parse_num_list(r)
for i in l:
if i in rs:
raise Exception('You have this resi already in your list! See', residues)
rs.extend(l)
return rs
def __get_atoms(self):
self.atoms = []
for res in self.struc.get_residues():
if res.id[1] in self.residues:
self.atoms.append(res["C3'"])
#print res.id
#ref_atoms.extend(, ref_res['P'])
#ref_atoms.append(ref_res.get_list())
if len(self.atoms) <= 0:
raise Exception('problem: none atoms were selected!: %s' % self.fn)
return self.atoms
def __str__(self):
return self.fn #+ ' # beads' + str(len(self.residues))
def __repr__(self):
return self.fn #+ ' # beads' + str(len(self.residues))
def get_report(self):
"""Str a short report about rna model"""
t = ' '.join(['File: ', self.fn, ' # of atoms:', str(len(self.atoms)), '\n'])
for r, a in zip(self.residues, self.atoms):
t += ' '.join(['resi: ', str(r), ' atom: ', str(a), '\n'])
return t
def get_rmsd_to(self, other_rnamodel, output='', dont_move=False):
"""Calc rmsd P-atom based rmsd to other rna model"""
sup = Bio.PDB.Superimposer()
if dont_move:
# fix http://biopython.org/DIST/docs/api/Bio.PDB.Vector%27.Vector-class.html
coords = array([a.get_vector().get_array() for a in self.atoms])
other_coords = array([a.get_vector().get_array() for a in other_rnamodel.atoms])
s = SVDSuperimposer()
s.set(coords,other_coords)
return s.get_init_rms()
try:
sup.set_atoms(self.atoms, other_rnamodel.atoms)
except:
print(self.fn, len(self.atoms), other_rnamodel.fn, len(other_rnamodel.atoms))
for a,b in zip(self.atoms, other_rnamodel.atoms):
print(a.parent, b.parent)#a.get_full_id(), b.get_full_id())
rms = round(sup.rms, 3)
if output:
io = Bio.PDB.PDBIO()
sup.apply(self.struc.get_atoms())
io.set_structure( self.struc )
io.save("aligned.pdb")
io = Bio.PDB.PDBIO()
sup.apply(other_rnamodel.struc.get_atoms())
io.set_structure( other_rnamodel.struc )
io.save("aligned2.pdb")
return rms
def save(self, output_dir, verbose=True):
"""Save structures and motifs """
folder_to_save = output_dir + os.sep # ugly hack 'rp14/'
try:
os.makedirs(folder_to_save)
except OSError:
pass
try:
os.mkdir(folder_to_save + 'structures')
except OSError:
pass
try:
os.mkdir(folder_to_save + 'motifs')
except OSError:
pass
RESI = self.residues
if not self.struc:
raise Exception('self.struct was not defined! Can not save a pdb!')
class BpSelect(Select):
def accept_residue(self, residue):
if residue.get_id()[1] in RESI:
return 1
else:
return 0
io = PDBIO()
io.set_structure(self.struc)
fn = folder_to_save + 'structures' + os.sep + self.fn #+ '.pdb'
io.save(fn)
if verbose:
print(' saved to struc: %s ' % fn)
io = PDBIO()
io.set_structure(self.struc)
fn = folder_to_save + 'motifs/' + os.sep + self.fn #+ self.fn.replace('.pdb', '_motif.pdb')# #+ '.pdb'
io.save(fn, BpSelect())
if verbose:
print(' saved to motifs: %s ' % fn)
#main
if __name__ == '__main__':
import doctest
doctest.testmod()
#rna = RNAmodel("test_data/rp14/rp14_5ddp_bound_clean_ligand.pdb", [1], False, Non3e)
#print(rna.get_report())
a = RNAmodel("test_data/GGC.pdb", [46,47,48])
b = RNAmodel("test_data/GUC.pdb", [31, 32, 33])
print(a.get_rmsd_to(b))
print(a.get_rmsd_to(b, dont_move=True)) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools/tools/rna_calc_evo_rmsd/RNAmodel.py | 0.500977 | 0.270565 | RNAmodel.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.