code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Testing for Echo State Network module (pyrcn.echo_state_network)
"""
import scipy
import numpy as np
import pytest
from sklearn.datasets import load_iris, load_digits
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Ridge
from pyrcn.base import InputToNode, FeedbackNodeToNode
from pyrcn.linear_model import IncrementalRegression
from pyrcn.echo_state_network import ESNFeedbackRegressor
X_iris, y_iris = load_iris(return_X_y=True)
def test_esn_fb_regressor_jobs():
print('\ntest_esn_regressor_sine():')
X = np.linspace(0, 10, 2000)
y = np.hstack((np.sin(X).reshape(-1, 1), np.cos(X).reshape(-1, 1)))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=10, random_state=42)
esn = ESNFeedbackRegressor(
input_to_node=InputToNode(bias_scaling=10.),
node_to_node=FeedbackNodeToNode(spectral_radius=0.0),
regressor=Ridge(alpha=.0001),
random_state=42)
esn.fit(X_train.reshape(-1, 1), y_train, n_jobs=2)
y_esn = esn.predict(X_test.reshape(-1, 1))
print("tests: {0} train: {1}".format(y_test, y_esn))
print(esn.get_params())
np.testing.assert_allclose(y_test, y_esn, rtol=1e-2)
def test_iris_ensemble_iterative_regression():
print('\ntest_iris_ensemble_iterative_regression():')
X_train, X_test, y_train, y_test = train_test_split(X_iris, y_iris, test_size=5, random_state=42)
cls = ESNFeedbackClassifier(
input_to_nodes=[
('tanh', InputToNode(hidden_layer_size=10, random_state=42, activation='tanh')),
('bounded_relu', InputToNode(hidden_layer_size=10, random_state=42, activation='bounded_relu'))],
nodes_to_nodes=[('default', FeedbackNodeToNode(hidden_layer_size=20, spectral_radius=0.0))],
regressor=IncrementalRegression(alpha=.01),
random_state=42)
for samples in np.split(np.arange(0, X_train.shape[0]), 5):
cls.partial_fit(X_train[samples, :], y_train[samples])
y_predicted = cls.predict(X_test)
for record in range(len(y_test)):
print('predicted: {0} \ttrue: {1}'.format(y_predicted[record], y_test[record]))
print('score: %f' % cls.score(X_test, y_test))
assert cls.score(X_test, y_test) >= 4./5.
| [
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"numpy.testing.assert_allclose",
"sklearn.linear_model.Ridge",
"pyrcn.base.InputToNode",
"pyrcn.linear_model.IncrementalRegression",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"pyrcn.base.FeedbackNodeToNode",
"numpy.arang... | [((500, 526), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (509, 526), False, 'from sklearn.datasets import load_iris, load_digits\n'), ((613, 637), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(2000)'], {}), '(0, 10, 2000)\n', (624, 637), True, 'import numpy as np\n'), ((749, 802), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(10)', 'random_state': '(42)'}), '(X, y, test_size=10, random_state=42)\n', (765, 802), False, 'from sklearn.model_selection import train_test_split\n'), ((1204, 1256), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_test', 'y_esn'], {'rtol': '(0.01)'}), '(y_test, y_esn, rtol=0.01)\n', (1230, 1256), True, 'import numpy as np\n'), ((1403, 1465), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_iris', 'y_iris'], {'test_size': '(5)', 'random_state': '(42)'}), '(X_iris, y_iris, test_size=5, random_state=42)\n', (1419, 1465), False, 'from sklearn.model_selection import train_test_split\n'), ((1934, 1964), 'numpy.arange', 'np.arange', (['(0)', 'X_train.shape[0]'], {}), '(0, X_train.shape[0])\n', (1943, 1964), True, 'import numpy as np\n'), ((857, 887), 'pyrcn.base.InputToNode', 'InputToNode', ([], {'bias_scaling': '(10.0)'}), '(bias_scaling=10.0)\n', (868, 887), False, 'from pyrcn.base import InputToNode, FeedbackNodeToNode\n'), ((909, 948), 'pyrcn.base.FeedbackNodeToNode', 'FeedbackNodeToNode', ([], {'spectral_radius': '(0.0)'}), '(spectral_radius=0.0)\n', (927, 948), False, 'from pyrcn.base import InputToNode, FeedbackNodeToNode\n'), ((968, 987), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': '(0.0001)'}), '(alpha=0.0001)\n', (973, 987), False, 'from sklearn.linear_model import Ridge\n'), ((1846, 1879), 'pyrcn.linear_model.IncrementalRegression', 'IncrementalRegression', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (1867, 1879), False, 'from pyrcn.linear_model import IncrementalRegression\n'), ((657, 666), 'numpy.sin', 'np.sin', (['X'], {}), '(X)\n', (663, 666), True, 'import numpy as np\n'), ((683, 692), 'numpy.cos', 'np.cos', (['X'], {}), '(X)\n', (689, 692), True, 'import numpy as np\n'), ((1545, 1614), 'pyrcn.base.InputToNode', 'InputToNode', ([], {'hidden_layer_size': '(10)', 'random_state': '(42)', 'activation': '"""tanh"""'}), "(hidden_layer_size=10, random_state=42, activation='tanh')\n", (1556, 1614), False, 'from pyrcn.base import InputToNode, FeedbackNodeToNode\n'), ((1646, 1723), 'pyrcn.base.InputToNode', 'InputToNode', ([], {'hidden_layer_size': '(10)', 'random_state': '(42)', 'activation': '"""bounded_relu"""'}), "(hidden_layer_size=10, random_state=42, activation='bounded_relu')\n", (1657, 1723), False, 'from pyrcn.base import InputToNode, FeedbackNodeToNode\n'), ((1763, 1824), 'pyrcn.base.FeedbackNodeToNode', 'FeedbackNodeToNode', ([], {'hidden_layer_size': '(20)', 'spectral_radius': '(0.0)'}), '(hidden_layer_size=20, spectral_radius=0.0)\n', (1781, 1824), False, 'from pyrcn.base import InputToNode, FeedbackNodeToNode\n')] |
# https://github.com/tonylins/pytorch-mobilenet-v2
import torch.nn as nn
import math
import numpy as np
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def make_divisible(x, divisible_by=8):
return int(np.ceil(x * 1. / divisible_by) * divisible_by)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, heads, head_conv, input_size=224, width_mult=1.):
super(MobileNetV2, self).__init__()
self.heads = heads
self.deconv_with_bias = False
block = InvertedResidual
input_channel = 32
last_channel = 1280
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
# input_channel = make_divisible(input_channel * width_mult) # first channel is always 32!
self.last_channel = make_divisible(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.inplanes = last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = make_divisible(c * width_mult) if t > 1 else c
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
# self.classifier = nn.Linear(self.last_channel, n_class)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 256, 256],
[4, 4, 4],
)
# self.final_layer = []
for head in sorted(self.heads):
num_output = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(256, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, num_output,
kernel_size=1, stride=1, padding=0))
else:
fc = nn.Conv2d(
in_channels=256,
out_channels=num_output,
kernel_size=1,
stride=1,
padding=0
)
self.__setattr__(head, fc)
# self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def initialize_weights(self, pretrained):
for _, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for head in self.heads:
final_layer = self.__getattr__(head)
for i, m in enumerate(final_layer.modules()):
if isinstance(m, nn.Conv2d):
if m.weight.shape[0] == self.heads[head]:
if 'hm' in head:
nn.init.constant_(m.bias, -2.19)
else:
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
if pretrained:
url = 'https://www.dropbox.com/s/47tyzpofuuyyv1b/mobilenetv2_1.0-f2a8633.pth.tar?dl=1'
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
else:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
# def mobilenet_v2(pretrained=True):
# model = MobileNetV2(width_mult=1)
#
# if pretrained:
# try:
# from torch.hub import load_state_dict_from_url
# except ImportError:
# from torch.utils.model_zoo import load_url as load_state_dict_from_url
# state_dict = load_state_dict_from_url(
# 'https://www.dropbox.com/s/47tyzpofuuyyv1b/mobilenetv2_1.0-f2a8633.pth.tar?dl=1', progress=True)
# model.load_state_dict(state_dict)
# return model
def get_mobilenet_v2_pose_net(num_layers, heads, head_conv):
model = MobileNetV2(heads, head_conv=head_conv, input_size=384, width_mult=1)
model.initialize_weights(pretrained=True)
return model
# if __name__ == '__main__':
# net = mobilenet_v2(True)
| [
"torch.nn.BatchNorm2d",
"numpy.ceil",
"torch.nn.ReLU",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.utils.model_zoo.load_url",
"math.sqrt",
"torch.nn.Conv2d",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU6",
"torch.nn.init.normal_"
] | [((232, 277), 'torch.nn.Conv2d', 'nn.Conv2d', (['inp', 'oup', '(3)', 'stride', '(1)'], {'bias': '(False)'}), '(inp, oup, 3, stride, 1, bias=False)\n', (241, 277), True, 'import torch.nn as nn\n'), ((287, 306), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['oup'], {}), '(oup)\n', (301, 306), True, 'import torch.nn as nn\n'), ((316, 338), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (324, 338), True, 'import torch.nn as nn\n'), ((408, 448), 'torch.nn.Conv2d', 'nn.Conv2d', (['inp', 'oup', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, oup, 1, 1, 0, bias=False)\n', (417, 448), True, 'import torch.nn as nn\n'), ((458, 477), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['oup'], {}), '(oup)\n', (472, 477), True, 'import torch.nn as nn\n'), ((487, 509), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (495, 509), True, 'import torch.nn as nn\n'), ((3665, 3694), 'torch.nn.Sequential', 'nn.Sequential', (['*self.features'], {}), '(*self.features)\n', (3678, 3694), True, 'import torch.nn as nn\n'), ((6366, 6388), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (6379, 6388), True, 'import torch.nn as nn\n'), ((572, 603), 'numpy.ceil', 'np.ceil', (['(x * 1.0 / divisible_by)'], {}), '(x * 1.0 / divisible_by)\n', (579, 603), True, 'import numpy as np\n'), ((7484, 7507), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (['url'], {}), '(url)\n', (7502, 7507), True, 'import torch.utils.model_zoo as model_zoo\n'), ((1038, 1116), 'torch.nn.Conv2d', 'nn.Conv2d', (['hidden_dim', 'hidden_dim', '(3)', 'stride', '(1)'], {'groups': 'hidden_dim', 'bias': '(False)'}), '(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)\n', (1047, 1116), True, 'import torch.nn as nn\n'), ((1134, 1160), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['hidden_dim'], {}), '(hidden_dim)\n', (1148, 1160), True, 'import torch.nn as nn\n'), ((1178, 1200), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1186, 1200), True, 'import torch.nn as nn\n'), ((1246, 1293), 'torch.nn.Conv2d', 'nn.Conv2d', (['hidden_dim', 'oup', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(hidden_dim, oup, 1, 1, 0, bias=False)\n', (1255, 1293), True, 'import torch.nn as nn\n'), ((1311, 1330), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['oup'], {}), '(oup)\n', (1325, 1330), True, 'import torch.nn as nn\n'), ((1436, 1483), 'torch.nn.Conv2d', 'nn.Conv2d', (['inp', 'hidden_dim', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, hidden_dim, 1, 1, 0, bias=False)\n', (1445, 1483), True, 'import torch.nn as nn\n'), ((1501, 1527), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['hidden_dim'], {}), '(hidden_dim)\n', (1515, 1527), True, 'import torch.nn as nn\n'), ((1545, 1567), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1553, 1567), True, 'import torch.nn as nn\n'), ((1606, 1684), 'torch.nn.Conv2d', 'nn.Conv2d', (['hidden_dim', 'hidden_dim', '(3)', 'stride', '(1)'], {'groups': 'hidden_dim', 'bias': '(False)'}), '(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)\n', (1615, 1684), True, 'import torch.nn as nn\n'), ((1702, 1728), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['hidden_dim'], {}), '(hidden_dim)\n', (1716, 1728), True, 'import torch.nn as nn\n'), ((1746, 1768), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1754, 1768), True, 'import torch.nn as nn\n'), ((1814, 1861), 'torch.nn.Conv2d', 'nn.Conv2d', (['hidden_dim', 'oup', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(hidden_dim, oup, 1, 1, 0, bias=False)\n', (1823, 1861), True, 'import torch.nn as nn\n'), ((1879, 1898), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['oup'], {}), '(oup)\n', (1893, 1898), True, 'import torch.nn as nn\n'), ((4452, 4543), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(256)', 'out_channels': 'num_output', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=256, out_channels=num_output, kernel_size=1, stride=1,\n padding=0)\n', (4461, 4543), True, 'import torch.nn as nn\n'), ((5879, 6060), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'self.inplanes', 'out_channels': 'planes', 'kernel_size': 'kernel', 'stride': '(2)', 'padding': 'padding', 'output_padding': 'output_padding', 'bias': 'self.deconv_with_bias'}), '(in_channels=self.inplanes, out_channels=planes,\n kernel_size=kernel, stride=2, padding=padding, output_padding=\n output_padding, bias=self.deconv_with_bias)\n', (5897, 6060), True, 'import torch.nn as nn\n'), ((6220, 6264), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {'momentum': 'BN_MOMENTUM'}), '(planes, momentum=BN_MOMENTUM)\n', (6234, 6264), True, 'import torch.nn as nn\n'), ((6292, 6313), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6299, 6313), True, 'import torch.nn as nn\n'), ((6558, 6594), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight'], {'std': '(0.001)'}), '(m.weight, std=0.001)\n', (6573, 6594), True, 'import torch.nn as nn\n'), ((4156, 4218), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', 'head_conv'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(256, head_conv, kernel_size=3, padding=1, bias=True)\n', (4165, 4218), True, 'import torch.nn as nn\n'), ((4270, 4291), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4277, 4291), True, 'import torch.nn as nn\n'), ((4313, 4381), 'torch.nn.Conv2d', 'nn.Conv2d', (['head_conv', 'num_output'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(head_conv, num_output, kernel_size=1, stride=1, padding=0)\n', (4322, 4381), True, 'import torch.nn as nn\n'), ((6657, 6685), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (6674, 6685), True, 'import torch.nn as nn\n'), ((6750, 6780), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (6767, 6780), True, 'import torch.nn as nn\n'), ((6797, 6825), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (6814, 6825), True, 'import torch.nn as nn\n'), ((7860, 7878), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (7869, 7878), False, 'import math\n'), ((7141, 7173), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(-2.19)'], {}), '(m.bias, -2.19)\n', (7158, 7173), True, 'import torch.nn as nn\n'), ((7232, 7268), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight'], {'std': '(0.001)'}), '(m.weight, std=0.001)\n', (7247, 7268), True, 'import torch.nn as nn\n'), ((7297, 7325), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (7314, 7325), True, 'import torch.nn as nn\n')] |
# Copyright 2020 <NAME>, Pôle OFB-INRAE ECLA, UR RECOVER
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the ProductBuilder class.
The builder pattern organizes object construction into a set of steps
(extract_data, apply_algos, get_products, etc). To create an object,
you execute a series of these steps on a builder object (here,
a ProductBuilder instance). The important part is that you don’t need
to call all of the steps. You can call only the steps that are necessary
for producing a particular configuration of an object.
If the client code needs to assemble a special, fine-tuned L3
or L4 product, it can work with the builder directly.
Otherwise, the user can delegate the assembly to the generate method
(or dictely one of the recipes), which knows how to use a builder
to construct several of the most standard products (e.g., L3AlgoProduct,
L3MaskProduct, TimeSeries, Matchup, etc).
"""
import copy
from collections import namedtuple
from datetime import date
from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
import xarray as xr
from pyproj import CRS
from sisppeo._version import __version__
from sisppeo.catalogs import algo_catalog, mask_catalog, reader_catalog
from sisppeo.products import mask_product, L3AlgoProduct, L3MaskProduct
from sisppeo.utils.algos import producttype_to_sat
from sisppeo.utils.builders import get_variables
from sisppeo.utils.config import (land_algo_config, mask_config,
user_algo_config, user_mask_config,
wc_algo_config)
from sisppeo.utils.exceptions import InputError
from sisppeo.utils.readers import resample_band_array
algo_config = {**land_algo_config, **wc_algo_config, **user_algo_config}
mask_config = {**mask_config, **user_mask_config}
class ProductBuilder:
"""The builder used to create L3 and L4 objects.
It specifies methods for creating the different parts (or
building steps) of the product objects, and provides
their implementations.
"""
__slots__ = ('_algos', '_masks', '_product_type', '_requested_bands',
'_out_resolution', '_extracted_ds', '_results', '_products')
def __init__(self):
self._algos = None
self._masks = None
self._product_type = None
self._requested_bands = None
self._out_resolution = None
self._extracted_ds = None
self._results = None
self._products = None
def set_algos(self,
lst_algo: List[str],
product_type: str,
lst_band: Optional[List[str]] = None,
lst_calib: Optional[List[Union[str, Path]]] = None,
lst_design: Optional[List[str]] = None) -> None:
"""Creates and inits algo objects.
Args:
lst_algo: A list of algorithms to use.
product_type: The type of the input satellite product (e.g.
S2_ESA_L2A or L8_USGS_L1GT).
lst_band: A list of "requested_band" args (a param used
by some algorithms).
lst_calib: A list of "calibration" args (a param used
by some algorithms).
lst_design: A list of "design" args (a param used
by some algorithms).
"""
algos = []
requested_bands = set()
for i, algo_name in enumerate(lst_algo):
config = {}
if lst_band is not None and lst_band[i] is not None:
config['requested_band'] = lst_band[i]
if lst_calib is not None and lst_calib[i] is not None:
config['calibration'] = lst_calib[i]
if lst_design is not None and lst_design[i] is not None:
config['design'] = lst_design[i]
algo = algo_catalog[algo_name](product_type=product_type, **config)
algos.append(algo)
requested_bands = requested_bands.union(algo.requested_bands)
self._algos = tuple(algos)
self._requested_bands = tuple(requested_bands)
def set_masks(self, lst_masks: List[str], product_type: str) -> None:
"""Creates mask objects.
Args:
lst_masks: A list of masks to use.
product_type: The type of the input satellite product
(e.g. S2_ESA_L1C).
"""
masks = []
requested_bands = set()
for mask_name in lst_masks:
mask_func = mask_catalog[mask_name]
masks.append((mask_name, mask_func))
requested_bands = requested_bands.union(
mask_config[mask_name][producttype_to_sat(product_type)])
self._masks = tuple(masks)
self._product_type = product_type
self._requested_bands = tuple(requested_bands)
@staticmethod
def _set_resolution(product_type: str,
out_resolution: Optional[int] = None,
processing_resolution: Optional[int] = None
) -> Tuple[Optional[int], Optional[int]]:
if 'S2_ESA' in product_type:
authorized_res = (None, 10, 20, 60)
if out_resolution not in authorized_res:
msg = ('"out_resolution" must either be set to None, 10, 20 '
'or 60.')
raise InputError(msg)
if processing_resolution not in authorized_res:
msg = ('"processing_resolution" must either be set to None, '
'10, 20 or 60.')
raise InputError(msg)
if out_resolution is None:
out_resolution = processing_resolution
else:
if (processing_resolution is None or
processing_resolution < out_resolution):
print(
f'"processing_resolution" must be >= {out_resolution}'
'm ("out_resolution"); here, "processing_resolution"='
f'{processing_resolution}m. Therefore, it will be '
f'ignored.'
)
processing_resolution = out_resolution
elif product_type == 'S2_THEIA':
authorized_res = (None, 10, 20)
if out_resolution not in authorized_res:
msg = '"out_resolution" must either be set to None, 10 or 20.'
raise InputError(msg)
if processing_resolution not in authorized_res:
msg = ('"processing_resolution" must either be set to None, '
'10 or 20.')
raise InputError(msg)
if out_resolution is None:
out_resolution = processing_resolution
else:
if (processing_resolution is None or
processing_resolution < out_resolution):
print(
f'"processing_resolution" must be >= {out_resolution}'
'm ("out_resolution"); here, "processing_resolution"='
f'{processing_resolution}m. Therefore, it will be '
f'ignored.'
)
processing_resolution = out_resolution
else:
if out_resolution is not None or processing_resolution is not None:
print('Both "out_resolution" and "processing_resolution" '
'parameters can only be used with S2_ESA and S2_THEIA '
'products. Therefore, they will be ignored.')
return out_resolution, processing_resolution
def extract_data(self, product_type: str, input_product: Path,
geom: Optional[dict] = None, **kwargs) -> None:
"""Extracts (meta)data from the input product.
Selects the right Reader and use it to extract the needed bands
and metadata. Then, creates and returns a xr.Dataset containing
these information.
Args:
product_type: The type of the input satellite product
(e.g. "S2_ESA_L2A" or "L8_USGS_L1").
input_product: The path of the input product (multispectral
spaceborne imagery).
geom: Optional; A dict containing geographical information
that define the ROI.
4 keys: geom (a shapely.geom object), shp (a path to
an ESRI shapefile), wkt (a path to a wkt file) and srid
(an EPSG code).
kwargs: Args specific to the selected Reader.
"""
if self._requested_bands is not None:
requested_bands = self._requested_bands
else:
requested_bands = kwargs.pop('requested_bands')
o_res, p_res = self._set_resolution(
product_type, kwargs.pop('out_resolution', None),
kwargs.pop('processing_resolution', None)
)
self._out_resolution = o_res
reader = reader_catalog[product_type](input_product=input_product,
product_type=product_type,
requested_bands=requested_bands,
geom=geom,
out_resolution=p_res,
**kwargs)
reader.extract_bands()
reader.create_ds()
self._extracted_ds = reader.dataset
@staticmethod
def _compute_algo(algo,
input_dataarrays: List[xr.DataArray],
data_type: str,
epsg_code: int) -> xr.Dataset:
output = algo(*input_dataarrays, data_type=data_type,
epsg_code=epsg_code)
variables, long_names = get_variables(algo_config, algo.name)
if len(variables) == 1:
output = [output]
out_dataarrays = {}
for out_dataarray, variable, long_name in zip(output, variables,
long_names):
np.nan_to_num(out_dataarray, False, np.nan, np.nan, np.nan)
out_dataarray.attrs.update({
'grid_mapping': 'crs',
'long_name': long_name,
**algo.meta
})
out_dataarray.name = variable
out_dataarrays[variable] = out_dataarray
return xr.Dataset(out_dataarrays)
def compute_algos(self) -> None:
"""Runs every algorithms using extracted data and stores the results."""
out_algos = {}
data_type = self._extracted_ds.attrs['data_type']
epsg_code = CRS.from_cf(self._extracted_ds.crs.attrs).to_epsg()
for algo in self._algos:
input_dataarrays = [self._extracted_ds[band].copy()
for band in algo.requested_bands]
out_algos[algo.name] = self._compute_algo(algo, input_dataarrays,
data_type, epsg_code)
self._results = out_algos
@staticmethod
def _compute_mask(mask_func,
input_dataarrays: List[xr.DataArray],
in_res: Optional[int] = None,
out_res: Optional[int] = None
) -> Tuple[np.ndarray, dict]:
out_ndarray, params = mask_func(input_dataarrays)
if out_res is not None and out_res != in_res:
arr = resample_band_array(out_ndarray[0], in_res, out_res, False)
out_ndarray = arr.reshape((1, *arr.shape))
return out_ndarray, params
def compute_masks(self) -> None:
"""Runs every masks using extracted data and stores the results."""
ds_res = (self._extracted_ds.x.values[1]
- self._extracted_ds.x.values[0])
out_masks = []
for mask_name, mask_func in self._masks:
input_dataarrays = [
copy.deepcopy(self._extracted_ds[band]) for band
in mask_config[mask_name][producttype_to_sat(self._product_type)]
]
out_masks.append(self._compute_mask(mask_func, input_dataarrays,
ds_res, self._out_resolution))
datasets = {}
for (mask_name, _), (out_ndarray, params) in zip(self._masks,
out_masks):
if self._out_resolution is None or self._out_resolution == ds_res:
out_dataarray = self._extracted_ds[self._requested_bands[0]].copy(data=out_ndarray)
else:
offset = (self._out_resolution - ds_res) / 2
x = np.arange(self._extracted_ds.x.values[0] + offset,
self._extracted_ds.x.values[-1] - offset + 1,
self._out_resolution)
y = np.arange(self._extracted_ds.y.values[0] - offset,
self._extracted_ds.y.values[-1] + offset - 1,
-self._out_resolution)
out_dataarray = xr.DataArray(
out_ndarray,
coords=[self._extracted_ds.time, y, x],
dims=['time', 'y', 'x']
)
out_dataarray.x.attrs = copy.copy(self._extracted_ds.x.attrs)
out_dataarray.y.attrs = copy.copy(self._extracted_ds.y.attrs)
out_dataarray.time.attrs = copy.copy(self._extracted_ds.time.attrs)
out_dataarray.attrs.update({
'grid_mapping': 'crs',
'long_name': mask_config[mask_name]['long_name']
})
out_dataarray.attrs.update(params)
if self._out_resolution is not None and ds_res != self._out_resolution:
out_dataarray.attrs['processing_resolution'] = f'{int(ds_res)}m'
out_dataarray.name = mask_name
datasets[mask_name] = xr.Dataset({mask_name: out_dataarray})
self._results = datasets
def create_l3products(self, product_type: str) -> None:
"""Creates the wanted products and stores them.
Args:
product_type: The type of the input satellite product
(e.g. "S2_ESA_L2A" or "L8_USGS_L1").
"""
class Products(namedtuple('Products', (key.replace('-', '_')
for key in self._results))):
__slots__ = ()
def __repr__(self):
tmp = (f'{_}=<{str(self[i].__class__.mro()[0])[8:-2]}>'
for i, _ in enumerate(self._fields))
return f'Products({", ".join(tmp)})'
if self._algos is not None:
product = L3AlgoProduct
elif self._masks is not None:
product = L3MaskProduct
else:
msg = 'You need to provide at least one algo or mask to use.'
raise InputError(msg)
products = []
for algo in self._results:
dataset = self._results[algo]
for key in ('crs', 'product_metadata'):
dataset[key] = self._extracted_ds[key]
dataset.attrs = {
'Convention': 'CF-1.8',
'title': f'{algo} from {product_type}',
'history': f'created with SISPPEO (v{__version__}) on '
+ date.today().isoformat()
}
dataset.attrs.update(self._extracted_ds.attrs)
dataset.attrs.pop('data_type', None)
products.append(product(dataset))
self._products = Products(*products)
def mask_l3algosproduct(self,
masks_types: List[str],
masks: Optional[List[L3MaskProduct]] = None,
masks_paths: Optional[List[Path]] = None) -> None:
"""Masks the previously generated products.
Args:
masks_types: The list of the type of the masks to use.
Values can either be "IN" (area to include) or "OUT"
(area to exclude).
masks: Optional; A list of masks to use.
masks_paths: Optional; A list of paths (of L3MaskProducts)
to use.
"""
if masks_paths is not None:
masks = [L3MaskProduct.from_file(path) for path in masks_paths]
for l3_algo in self._products:
mask_product(l3_algo, masks, masks_types, True)
def get_products(self) -> namedtuple:
"""Returns products and resets itself."""
products = self._products
# Reset attributes
self._algos = None
self._masks = None
self._product_type = None
self._requested_bands = None
self._out_resolution = None
self._extracted_ds = None
self._results = None
self._products = None
return products
| [
"sisppeo.utils.exceptions.InputError",
"copy.deepcopy",
"numpy.arange",
"sisppeo.products.mask_product",
"sisppeo.utils.builders.get_variables",
"datetime.date.today",
"xarray.Dataset",
"copy.copy",
"sisppeo.utils.algos.producttype_to_sat",
"xarray.DataArray",
"sisppeo.utils.readers.resample_ban... | [((10301, 10338), 'sisppeo.utils.builders.get_variables', 'get_variables', (['algo_config', 'algo.name'], {}), '(algo_config, algo.name)\n', (10314, 10338), False, 'from sisppeo.utils.builders import get_variables\n'), ((10914, 10940), 'xarray.Dataset', 'xr.Dataset', (['out_dataarrays'], {}), '(out_dataarrays)\n', (10924, 10940), True, 'import xarray as xr\n'), ((10581, 10640), 'numpy.nan_to_num', 'np.nan_to_num', (['out_dataarray', '(False)', 'np.nan', 'np.nan', 'np.nan'], {}), '(out_dataarray, False, np.nan, np.nan, np.nan)\n', (10594, 10640), True, 'import numpy as np\n'), ((11962, 12021), 'sisppeo.utils.readers.resample_band_array', 'resample_band_array', (['out_ndarray[0]', 'in_res', 'out_res', '(False)'], {}), '(out_ndarray[0], in_res, out_res, False)\n', (11981, 12021), False, 'from sisppeo.utils.readers import resample_band_array\n'), ((14457, 14495), 'xarray.Dataset', 'xr.Dataset', (['{mask_name: out_dataarray}'], {}), '({mask_name: out_dataarray})\n', (14467, 14495), True, 'import xarray as xr\n'), ((16938, 16985), 'sisppeo.products.mask_product', 'mask_product', (['l3_algo', 'masks', 'masks_types', '(True)'], {}), '(l3_algo, masks, masks_types, True)\n', (16950, 16985), False, 'from sisppeo.products import mask_product, L3AlgoProduct, L3MaskProduct\n'), ((5835, 5850), 'sisppeo.utils.exceptions.InputError', 'InputError', (['msg'], {}), '(msg)\n', (5845, 5850), False, 'from sisppeo.utils.exceptions import InputError\n'), ((6051, 6066), 'sisppeo.utils.exceptions.InputError', 'InputError', (['msg'], {}), '(msg)\n', (6061, 6066), False, 'from sisppeo.utils.exceptions import InputError\n'), ((11161, 11202), 'pyproj.CRS.from_cf', 'CRS.from_cf', (['self._extracted_ds.crs.attrs'], {}), '(self._extracted_ds.crs.attrs)\n', (11172, 11202), False, 'from pyproj import CRS\n'), ((12448, 12487), 'copy.deepcopy', 'copy.deepcopy', (['self._extracted_ds[band]'], {}), '(self._extracted_ds[band])\n', (12461, 12487), False, 'import copy\n'), ((13188, 13311), 'numpy.arange', 'np.arange', (['(self._extracted_ds.x.values[0] + offset)', '(self._extracted_ds.x.values[-1] - offset + 1)', 'self._out_resolution'], {}), '(self._extracted_ds.x.values[0] + offset, self._extracted_ds.x.\n values[-1] - offset + 1, self._out_resolution)\n', (13197, 13311), True, 'import numpy as np\n'), ((13387, 13511), 'numpy.arange', 'np.arange', (['(self._extracted_ds.y.values[0] - offset)', '(self._extracted_ds.y.values[-1] + offset - 1)', '(-self._out_resolution)'], {}), '(self._extracted_ds.y.values[0] - offset, self._extracted_ds.y.\n values[-1] + offset - 1, -self._out_resolution)\n', (13396, 13511), True, 'import numpy as np\n'), ((13599, 13694), 'xarray.DataArray', 'xr.DataArray', (['out_ndarray'], {'coords': '[self._extracted_ds.time, y, x]', 'dims': "['time', 'y', 'x']"}), "(out_ndarray, coords=[self._extracted_ds.time, y, x], dims=[\n 'time', 'y', 'x'])\n", (13611, 13694), True, 'import xarray as xr\n'), ((13808, 13845), 'copy.copy', 'copy.copy', (['self._extracted_ds.x.attrs'], {}), '(self._extracted_ds.x.attrs)\n', (13817, 13845), False, 'import copy\n'), ((13886, 13923), 'copy.copy', 'copy.copy', (['self._extracted_ds.y.attrs'], {}), '(self._extracted_ds.y.attrs)\n', (13895, 13923), False, 'import copy\n'), ((13967, 14007), 'copy.copy', 'copy.copy', (['self._extracted_ds.time.attrs'], {}), '(self._extracted_ds.time.attrs)\n', (13976, 14007), False, 'import copy\n'), ((15451, 15466), 'sisppeo.utils.exceptions.InputError', 'InputError', (['msg'], {}), '(msg)\n', (15461, 15466), False, 'from sisppeo.utils.exceptions import InputError\n'), ((16832, 16861), 'sisppeo.products.L3MaskProduct.from_file', 'L3MaskProduct.from_file', (['path'], {}), '(path)\n', (16855, 16861), False, 'from sisppeo.products import mask_product, L3AlgoProduct, L3MaskProduct\n'), ((5139, 5171), 'sisppeo.utils.algos.producttype_to_sat', 'producttype_to_sat', (['product_type'], {}), '(product_type)\n', (5157, 5171), False, 'from sisppeo.utils.algos import producttype_to_sat\n'), ((6914, 6929), 'sisppeo.utils.exceptions.InputError', 'InputError', (['msg'], {}), '(msg)\n', (6924, 6929), False, 'from sisppeo.utils.exceptions import InputError\n'), ((7126, 7141), 'sisppeo.utils.exceptions.InputError', 'InputError', (['msg'], {}), '(msg)\n', (7136, 7141), False, 'from sisppeo.utils.exceptions import InputError\n'), ((12539, 12577), 'sisppeo.utils.algos.producttype_to_sat', 'producttype_to_sat', (['self._product_type'], {}), '(self._product_type)\n', (12557, 12577), False, 'from sisppeo.utils.algos import producttype_to_sat\n'), ((15900, 15912), 'datetime.date.today', 'date.today', ([], {}), '()\n', (15910, 15912), False, 'from datetime import date\n')] |
from eugene.src.auxiliary.probability import *
import numpy as np
a = './durations_70_removed_sample.csv'
b = './durations_no_filter.csv'
def test_SAIDI_site_removal():
def convertio(line):
#print(line)
try:
q = float(line)
return q
except:
print('this is not a number', line)
with open(a, 'r') as thefile:
someFewer = [convertio(x) for x in thefile.read().split('\n') if x != '']
someFewer = (np.asarray(someFewer)[np.newaxis]).T
print(someFewer)
with open(b, 'r') as thefile:
noFewer = [convertio(x) for x in thefile.read().split('\n') if x != '']
noFewer = (np.asarray(noFewer)[np.newaxis]).T
someno = EnergyDistance(someFewer,noFewer)
somesome = EnergyDistance(someFewer, someFewer)
nono = EnergyDistance(noFewer, noFewer)
assert someno > somesome
assert someno > nono
assert not significant(someFewer, someFewer, D=somesome, n=50)
assert not significant(noFewer, noFewer, D=nono, n=50)
# are noFewer and someFewer distributions significantly different?
assert not significant(noFewer, someFewer, D=someno, n=50)
# Good news!
| [
"numpy.asarray"
] | [((483, 504), 'numpy.asarray', 'np.asarray', (['someFewer'], {}), '(someFewer)\n', (493, 504), True, 'import numpy as np\n'), ((679, 698), 'numpy.asarray', 'np.asarray', (['noFewer'], {}), '(noFewer)\n', (689, 698), True, 'import numpy as np\n')] |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 3.8.8 64-bit ('cam')
# language: python
# name: python388jvsc74a57bd0acafb728b15233fa3654ff8b422c21865df0ca42ea3b74670e1f2f098ebd61ca
# ---
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/python-logo-notext.svg"
# style="display:block;margin:auto;width:10%"/>
# <h1 style="text-align:center;">Python: Pandas Series</h1>
# <h2 style="text-align:center;">Coding Akademie München GmbH</h2>
# <br/>
# <div style="text-align:center;">Dr. <NAME></div>
# <div style="text-align:center;"><NAME></div>
# %% [markdown] slideshow={"slide_type": "slide"}
#
# # Der Typ `Series`
#
# Der Pandas Typ `Series` repräsentiert eine Folge von Werten, die ähnlich wie eine Python Liste numerisch indiziert werden kann, gleichzeitig aber auch einen semantisch sinnvollerern Index haben kann, z.B. Daten für Zeitreihen.
#
# Intern wird ein `Series`-Objekt durch ein NumPy Array realisiert, daher sind die meisten Operationen von NumPy Arrays auch auf Pandas-`Series`-Objekte anwendbar.
# %%
import numpy as np
import pandas as pd
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Erzeugen von Serien
#
# ### Aus Listen
# %%
pd.Series(data=[10, 20, 30, 40])
# %%
pd.Series(['a', 'b', 'c'])
# %% [markdown] slideshow={"slide_type": "subslide"}
# ### Aus Listen mit Index
# %%
pd.Series(data=[1, 2, 3, 4], index=['w', 'x', 'y', 'z'])
# %% [markdown] slideshow={"slide_type": "subslide"}
# ### Aus NumPy Arrays
# %%
arr = np.arange(5)
indices = 'a b c d e'.split()
# %%
pd.Series(data=arr)
# %%
pd.Series(arr, index=indices)
# %%
rng = np.random.default_rng(42)
data_vec = rng.normal(size=1000)
data = pd.Series(data=data_vec)
# %%
data
# %%
data.head()
# %%
data.tail()
# %% [markdown] slideshow={"slide_type": "subslide"}
# ### Aus Dictionary
# %%
pd.Series(data={'Ice Cream':2.49, 'Cake': 4.99, 'Fudge': 7.99})
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Indizes und Operationen
# %%
food1 = pd.Series({'Ice Cream':2.49, 'Cake': 4.99, 'Fudge': 7.99})
food2 = pd.Series({'Cake': 4.99, 'Ice Cream':3.99, 'Pie': 3.49, 'Cheese': 1.99})
# %%
food1
# %% slideshow={"slide_type": "subslide"}
food1.index
# %%
food1.size
# %%
food1.sum()
# %%
food1.mean()
# %% slideshow={"slide_type": "subslide"}
food1.name
# %%
food1.name = 'Deserts'
# %% slideshow={"slide_type": "-"}
food1.name
# %%
food1
# %% slideshow={"slide_type": "subslide"}
food1.plot.bar(legend=True);
# %%
data.plot.hist(legend=True);
# %% slideshow={"slide_type": "subslide"}
food1['Cake']
# %%
food1.loc['Cake']
# %%
# Error!
# food1['Pie']
# %%
food1.argmin()
# %%
food1[0]
# %%
food1.iloc[0]
# %%
confusing = pd.Series(data=np.linspace(0, 5, 11), index=np.arange(-5, 6))
confusing
# %%
confusing[0]
# %%
confusing.loc[0]
# %%
confusing.iloc[0]
# %% slideshow={"slide_type": "subslide"}
food_sum = food1 + food2
food_sum
# %% slideshow={"slide_type": "subslide"}
food1 + 0.5
# %%
food1
# %% slideshow={"slide_type": "subslide"}
def discount(price):
return price * 0.9
food1.apply(discount)
# %%
food1
# %% slideshow={"slide_type": "subslide"}
food1.apply(lambda price: price * 0.9)
# %% slideshow={"slide_type": "subslide"}
food1.append(pd.Series({'Chocolate': 3.99}))
# %%
food1
# %% slideshow={"slide_type": "subslide"}
all_food = food1.append(food2)
# %%
all_food
# %% [markdown]
# ### Mehrfach vorkommende Index-Werte
# %% slideshow={"slide_type": "subslide"}
all_food.index
# %%
all_food.is_unique
# %%
food1.is_unique
# %% slideshow={"slide_type": "subslide"}
all_food['Cake']
# %%
type(all_food['Cake'])
# %% slideshow={"slide_type": "subslide"}
all_food['Pie']
# %%
type(all_food['Pie'])
# %% slideshow={"slide_type": "subslide"}
all_food.groupby(all_food.index).max()
# %% [markdown]
# ### Sortierte und unsortierte Indizes
# %%
all_food.index.is_monotonic_increasing
# %% slideshow={"slide_type": "subslide"}
sorted_food = all_food.sort_index()
# %%
sorted_food
# %%
sorted_food.index.is_monotonic_increasing
# %% slideshow={"slide_type": "subslide"}
all_food.sort_values()
# %%
all_food.sort_values().is_monotonic_increasing
# %% slideshow={"slide_type": "subslide"}
all_food[['Pie', 'Cake']]
# %% slideshow={"slide_type": "subslide"}
all_food
# %%
all_food[1:3]
# %% slideshow={"slide_type": "subslide"}
# all_food['Cake':'Fudge']
# %% slideshow={"slide_type": "-"}
sorted_food['Cake':'Fudge']
# %% [markdown]
# **Beachte:** Der obere Slice-Wert `'Fudge'` ist im Ergebnis enthalten!
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Nicht vorhandene Werte
# %%
food = food1 + food2
# %%
food.isna()
# %%
food.isna().sum()
# %%
food.dropna()
| [
"pandas.Series",
"numpy.linspace",
"numpy.random.default_rng",
"numpy.arange"
] | [((1358, 1390), 'pandas.Series', 'pd.Series', ([], {'data': '[10, 20, 30, 40]'}), '(data=[10, 20, 30, 40])\n', (1367, 1390), True, 'import pandas as pd\n'), ((1397, 1423), 'pandas.Series', 'pd.Series', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (1406, 1423), True, 'import pandas as pd\n'), ((1511, 1567), 'pandas.Series', 'pd.Series', ([], {'data': '[1, 2, 3, 4]', 'index': "['w', 'x', 'y', 'z']"}), "(data=[1, 2, 3, 4], index=['w', 'x', 'y', 'z'])\n", (1520, 1567), True, 'import pandas as pd\n'), ((1657, 1669), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (1666, 1669), True, 'import numpy as np\n'), ((1706, 1725), 'pandas.Series', 'pd.Series', ([], {'data': 'arr'}), '(data=arr)\n', (1715, 1725), True, 'import pandas as pd\n'), ((1732, 1761), 'pandas.Series', 'pd.Series', (['arr'], {'index': 'indices'}), '(arr, index=indices)\n', (1741, 1761), True, 'import pandas as pd\n'), ((1774, 1799), 'numpy.random.default_rng', 'np.random.default_rng', (['(42)'], {}), '(42)\n', (1795, 1799), True, 'import numpy as np\n'), ((1840, 1864), 'pandas.Series', 'pd.Series', ([], {'data': 'data_vec'}), '(data=data_vec)\n', (1849, 1864), True, 'import pandas as pd\n'), ((1993, 2057), 'pandas.Series', 'pd.Series', ([], {'data': "{'Ice Cream': 2.49, 'Cake': 4.99, 'Fudge': 7.99}"}), "(data={'Ice Cream': 2.49, 'Cake': 4.99, 'Fudge': 7.99})\n", (2002, 2057), True, 'import pandas as pd\n'), ((2151, 2210), 'pandas.Series', 'pd.Series', (["{'Ice Cream': 2.49, 'Cake': 4.99, 'Fudge': 7.99}"], {}), "({'Ice Cream': 2.49, 'Cake': 4.99, 'Fudge': 7.99})\n", (2160, 2210), True, 'import pandas as pd\n'), ((2218, 2291), 'pandas.Series', 'pd.Series', (["{'Cake': 4.99, 'Ice Cream': 3.99, 'Pie': 3.49, 'Cheese': 1.99}"], {}), "({'Cake': 4.99, 'Ice Cream': 3.99, 'Pie': 3.49, 'Cheese': 1.99})\n", (2227, 2291), True, 'import pandas as pd\n'), ((3389, 3419), 'pandas.Series', 'pd.Series', (["{'Chocolate': 3.99}"], {}), "({'Chocolate': 3.99})\n", (3398, 3419), True, 'import pandas as pd\n'), ((2861, 2882), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(11)'], {}), '(0, 5, 11)\n', (2872, 2882), True, 'import numpy as np\n'), ((2890, 2906), 'numpy.arange', 'np.arange', (['(-5)', '(6)'], {}), '(-5, 6)\n', (2899, 2906), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ train.py ]
# Synopsis [ Trainining script for Tacotron speech synthesis model ]
# Author [ <NAME> (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
"""
Usage: train.py [options]
Options:
--ckpt_dir <dir> Directory where to save model checkpoints [default: checkpoints].
--model_name <name> Restore model from checkpoint path if name is given.
--data_root <dir> Directory contains preprocessed features.
--meta_text <name> Name of the model-ready training transcript.
--log_dir <str> Directory for log summary writer to write in.
--log_comment <str> Comment for log summary writer.
-h, --help Show this help message and exit
"""
###############
# IMPORTATION #
###############
import os
import sys
import time
#----------------#
import numpy as np
#---------------------#
from utils import audio
from utils.plot import plot_alignment, plot_spectrogram
from utils.text import symbols
#----------------------------------------------#
import torch
from torch import nn
from torch import optim
import torch.backends.cudnn as cudnn
#----------------------------------------#
from model.tacotron import Tacotron
from model.loss import TacotronLoss
from config import config, get_training_args
from dataloader import Dataloader
#------------------------------------------#
from tensorboardX import SummaryWriter
####################
# GLOBAL VARIABLES #
####################
global_step = 0
global_epoch = 0
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
cudnn.benchmark = False
#######################
# LEARNING RATE DECAY #
#######################
def _learning_rate_decay(init_lr, global_step):
warmup_steps = 6000.0
step = global_step + 1.
lr = init_lr * warmup_steps**0.5 * np.minimum(step * warmup_steps**-1.5, step**-0.5)
return lr
###############
# SAVE STATES #
###############
def save_states(global_step, mel_outputs, linear_outputs, attn, y, checkpoint_dir=None):
idx = 1 # idx = np.random.randint(0, len(mel_outputs))
# Alignment
path = os.path.join(checkpoint_dir, "step{}_alignment.png".format(global_step))
alignment = attn[idx].cpu().data.numpy() # alignment = attn[idx].cpu().data.numpy()[:, :input_length]
plot_alignment(alignment.T, path, info="tacotron, step={}".format(global_step))
# Predicted spectrogram
path = os.path.join(checkpoint_dir, "step{}_predicted_spectrogram.png".format(global_step))
linear_output = linear_outputs[idx].cpu().data.numpy()
plot_spectrogram(linear_output, path)
# Predicted audio signal
signal = audio.inv_spectrogram(linear_output.T)
path = os.path.join(checkpoint_dir, "step{}_predicted.wav".format(global_step))
audio.save_wav(signal, path)
# Target spectrogram
path = os.path.join(checkpoint_dir, "step{}_target_spectrogram.png".format(global_step))
linear_output = y[idx].cpu().data.numpy()
plot_spectrogram(linear_output, path)
###################
# SAVE CHECKPOINT #
###################
def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch):
checkpoint_path = os.path.join(checkpoint_dir, "checkpoint_step{}.pth".format(global_step))
torch.save({"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"global_step": step,
"global_epoch": epoch,},
checkpoint_path)
#################
# TACOTRON STEP #
#################
"""
One step of training: Train a single batch of data on Tacotron
"""
def tacotron_step(model, optimizer, criterion,
x, mel, y, gate, sorted_lengths,
init_lr, clip_thresh, global_step):
#---decay learning rate---#
current_lr = _learning_rate_decay(init_lr, global_step)
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
#---feed data---#
if USE_CUDA:
x, mel, y, gate, = x.cuda(), mel.cuda(), y.cuda(), gate.cuda()
mel_outputs, linear_outputs, gate_outputs, attn = model(x, mel, input_lengths=sorted_lengths)
losses = criterion([mel_outputs, linear_outputs, gate_outputs], [mel, y, gate])
#---log loss---#
loss, total_L = losses[0], losses[0].item()
mel_loss, mel_L = losses[1], losses[1].item(),
linear_loss, linear_L = losses[2], losses[2].item()
gate_loss, gate_L = losses[3], losses[3].item()
#---update model---#
optimizer.zero_grad()
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), clip_thresh)
optimizer.step()
#---wrap up returns---#
Ms = { 'mel_outputs' : mel_outputs,
'linear_outputs' : linear_outputs,
'attn' : attn,
'sorted_lengths' : sorted_lengths,
'grad_norm' : grad_norm,
'current_lr' : current_lr }
Ls = { 'total_L': total_L,
'mel_L' : mel_L,
'linear_L' : linear_L,
'gate_L' : gate_L }
return model, optimizer, Ms, Ls
#########
# TRAIN #
#########
"""
Main training loop
"""
def train(model,
optimizer,
dataloader,
init_lr=0.002,
log_dir=None,
log_comment=None,
checkpoint_dir=None,
checkpoint_interval=None,
max_epochs=None,
max_steps=None,
clip_thresh=1.0):
if USE_CUDA:
model = model.cuda()
model.train()
criterion = TacotronLoss()
if log_dir != None:
writer = SummaryWriter(log_dir)
elif log_comment != None:
writer = SummaryWriter(comment=log_comment)
else:
writer = SummaryWriter()
global global_step, global_epoch
while global_epoch < max_epochs and global_step < max_steps:
start = time.time()
for x, mel, y, gate, sorted_lengths in dataloader:
model, optimizer, Ms, Rs = tacotron_step(model, optimizer, criterion,
x, mel, y, gate, sorted_lengths,
init_lr, clip_thresh, global_step)
mel_outputs = Ms['mel_outputs']
linear_outputs = Ms['linear_outputs']
attn = Ms['attn']
sorted_lengths = Ms['sorted_lengths']
grad_norm = Ms['grad_norm']
current_lr = Ms['current_lr']
total_L = Rs['total_L']
mel_L = Rs['mel_L']
linear_L = Rs['linear_L']
gate_L = Rs['gate_L']
duration = time.time() - start
if global_step > 0 and global_step % checkpoint_interval == 0:
try:
save_states(global_step, mel_outputs, linear_outputs, attn, y, checkpoint_dir)
save_checkpoint(model, optimizer, global_step, checkpoint_dir, global_epoch)
except:
print()
print('An error has occured during saving! Please attend and handle manually!')
pass
log = '[{}] total_L: {:.3f}, mel_L: {:.3f}, lin_L: {:.3f}, gate_L: {:.3f}, grad: {:.3f}, lr: {:.5f}, t: {:.2f}s, saved: T'.format(global_step, total_L, mel_L, linear_L, gate_L, grad_norm, current_lr, duration)
print(log)
elif global_step % 5 == 0:
log = '[{}] total_L: {:.3f}, mel_L: {:.3f}, lin_L: {:.3f}, gate_L: {:.3f}, grad: {:.3f}, lr: {:.5f}, t: {:.2f}s, saved: F'.format(global_step, total_L, mel_L, linear_L, gate_L, grad_norm, current_lr, duration)
print(log, end='\r')
# Logs
writer.add_scalar('total_loss', total_L, global_step)
writer.add_scalar('mel_loss', mel_L, global_step)
writer.add_scalar('linear_loss', linear_L, global_step)
writer.add_scalar('gate_loss', gate_L, global_step)
writer.add_scalar('grad_norm', grad_norm, global_step)
writer.add_scalar('learning_rate', current_lr, global_step)
global_step += 1
start = time.time()
global_epoch += 1
########################
# WARM FROM CHECKPOINT #
########################
"""
Initialize training with a pre-trained model pth
Args:
checkpoint_path: ckpt/checkpoint_path200000.pth
model: Pytorch model
optimizer: Pytorch optimizer
"""
def warm_from_ckpt(checkpoint_dir, model_name, model, optimizer):
checkpoint_path = os.path.join(checkpoint_dir, "checkpoint_step{}.pth".format(model_name))
print('[Trainer] - Warming up! Load checkpoint from: {}'.format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
try:
global global_step, global_epoch
global_step = checkpoint['global_step']
global_epoch = checkpoint['global_epoch']
except:
print('[Trainer] - Warning: global step and global epoch unable to restore!')
sys.exit(0)
return model, optimizer
#######################
# INITIALIZE TRAINING #
#######################
"""
Setup and prepare for Tacotron training.
"""
def initialize_training(data_root, meta_text, checkpoint_dir=None, model_name=None):
dataloader = Dataloader(data_root, meta_text)
model = Tacotron(n_vocab=len(symbols),
embedding_dim=config.embedding_dim,
mel_dim=config.num_mels,
linear_dim=config.num_freq,
r=config.outputs_per_step,
padding_idx=config.padding_idx,
attention=config.attention,
use_mask=config.use_mask)
optimizer = optim.Adam(model.parameters(),
lr=config.initial_learning_rate,
betas=(config.adam_beta1, config.adam_beta2),
weight_decay=config.weight_decay)
# Load checkpoint
if model_name != None:
model, optimizer = warm_from_ckpt(checkpoint_dir, model_name, model, optimizer)
return model, optimizer, dataloader
########
# MAIN #
########
def main():
args = get_training_args()
os.makedirs(args.ckpt_dir, exist_ok=True)
model, optimizer, dataloader = initialize_training(args.data_root, args.meta_text, args.ckpt_dir, args.model_name)
# Train!
try:
train(model, optimizer, dataloader,
init_lr=config.initial_learning_rate,
log_dir=args.log_dir,
log_comment=args.log_comment,
checkpoint_dir=args.ckpt_dir,
checkpoint_interval=config.checkpoint_interval,
max_epochs=config.max_epochs,
max_steps=config.max_steps,
clip_thresh=config.clip_thresh)
except KeyboardInterrupt:
pass
print()
print('[Trainer] - Finished!')
sys.exit(0)
if __name__ == '__main__':
main() | [
"numpy.minimum",
"os.makedirs",
"tensorboardX.SummaryWriter",
"model.loss.TacotronLoss",
"torch.load",
"utils.audio.inv_spectrogram",
"torch.is_tensor",
"torch.cuda.is_available",
"sys.exit",
"utils.audio.save_wav",
"time.time",
"dataloader.Dataloader",
"utils.plot.plot_spectrogram",
"conf... | [((1766, 1791), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1789, 1791), False, 'import torch\n'), ((2750, 2787), 'utils.plot.plot_spectrogram', 'plot_spectrogram', (['linear_output', 'path'], {}), '(linear_output, path)\n', (2766, 2787), False, 'from utils.plot import plot_alignment, plot_spectrogram\n'), ((2825, 2863), 'utils.audio.inv_spectrogram', 'audio.inv_spectrogram', (['linear_output.T'], {}), '(linear_output.T)\n', (2846, 2863), False, 'from utils import audio\n'), ((2946, 2974), 'utils.audio.save_wav', 'audio.save_wav', (['signal', 'path'], {}), '(signal, path)\n', (2960, 2974), False, 'from utils import audio\n'), ((3132, 3169), 'utils.plot.plot_spectrogram', 'plot_spectrogram', (['linear_output', 'path'], {}), '(linear_output, path)\n', (3148, 3169), False, 'from utils.plot import plot_alignment, plot_spectrogram\n'), ((5350, 5364), 'model.loss.TacotronLoss', 'TacotronLoss', ([], {}), '()\n', (5362, 5364), False, 'from model.loss import TacotronLoss\n'), ((8005, 8032), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (8015, 8032), False, 'import torch\n'), ((8736, 8768), 'dataloader.Dataloader', 'Dataloader', (['data_root', 'meta_text'], {}), '(data_root, meta_text)\n', (8746, 8768), False, 'from dataloader import Dataloader\n'), ((9457, 9476), 'config.get_training_args', 'get_training_args', ([], {}), '()\n', (9474, 9476), False, 'from config import config, get_training_args\n'), ((9479, 9520), 'os.makedirs', 'os.makedirs', (['args.ckpt_dir'], {'exist_ok': '(True)'}), '(args.ckpt_dir, exist_ok=True)\n', (9490, 9520), False, 'import os\n'), ((10068, 10079), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (10076, 10079), False, 'import sys\n'), ((2036, 2089), 'numpy.minimum', 'np.minimum', (['(step * warmup_steps ** -1.5)', '(step ** -0.5)'], {}), '(step * warmup_steps ** -1.5, step ** -0.5)\n', (2046, 2089), True, 'import numpy as np\n'), ((5399, 5421), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['log_dir'], {}), '(log_dir)\n', (5412, 5421), False, 'from tensorboardX import SummaryWriter\n'), ((5640, 5651), 'time.time', 'time.time', ([], {}), '()\n', (5649, 5651), False, 'import time\n'), ((5460, 5494), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'comment': 'log_comment'}), '(comment=log_comment)\n', (5473, 5494), False, 'from tensorboardX import SummaryWriter\n'), ((5513, 5528), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (5526, 5528), False, 'from tensorboardX import SummaryWriter\n'), ((7469, 7480), 'time.time', 'time.time', ([], {}), '()\n', (7478, 7480), False, 'import time\n'), ((8211, 8229), 'torch.is_tensor', 'torch.is_tensor', (['v'], {}), '(v)\n', (8226, 8229), False, 'import torch\n'), ((8473, 8484), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8481, 8484), False, 'import sys\n'), ((6202, 6213), 'time.time', 'time.time', ([], {}), '()\n', (6211, 6213), False, 'import time\n')] |
import os
import torch
import numpy as np
import torch.optim as optim
from solvers.loss import CrossEntropyWithSoftTargets
from utils import mkdir_p, parse_args
from utils import get_lr, save_checkpoint, create_save_path
from solvers.runners import train, train_aug, test
from models import model_dict
from datasets import dataloader_dict, dataset_nclasses_dict, dataset_classname_dict
from time import localtime, strftime, time
import logging
from utils.logger import Logger
def cosine_annealing(step, total_steps, lr_max, lr_min):
return lr_min + (lr_max - lr_min) * 0.5 * (
1 + np.cos(step / total_steps * np.pi))
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
args = parse_args()
current_time = strftime("%d-%b", localtime())
# prepare save path
if len(args.exp_name):
model_save_pth = f"{args.checkpoint}/{args.dataset}/{current_time}{create_save_path(args)}_{args.exp_name}"
else:
model_save_pth = f"{args.checkpoint}/{args.dataset}/{current_time}{create_save_path(args)}"
checkpoint_dir_name = model_save_pth
if not os.path.isdir(model_save_pth):
mkdir_p(model_save_pth)
logging.basicConfig(level=logging.INFO,
format="%(levelname)s: %(message)s",
handlers=[
logging.FileHandler(filename=os.path.join(model_save_pth, "train.log")),
logging.StreamHandler()
])
logging.info(f"Setting up logging folder : {model_save_pth}")
num_classes = dataset_nclasses_dict[args.dataset]
classes_name_list = dataset_classname_dict[args.dataset]
# Add extra class to model
if args.aug == "cutmix" or args.aug == "corr" or args.aug == "cnc" or args.aug == "hypmix":
num_classes += 1
# prepare model
logging.info(f"Using model : {args.model}")
model = model_dict[args.model](num_classes=num_classes)
model = torch.nn.DataParallel(model)
model.cuda()
# set up dataset
logging.info(f"Using dataset : {args.dataset}")
trainloader, corruptloader, testloader = dataloader_dict[args.dataset](args)
logging.info(f"Setting up optimizer : {args.optimizer}")
# if args.optimizer == "sgd":
optimizer = optim.SGD(model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay, nesterov=args.nesterov)
criterion = torch.nn.CrossEntropyLoss()
# criterion = CrossEntropyWithSoftTargets()
test_criterion = torch.nn.CrossEntropyLoss()
if "cosine" in args.scheduler:
logging.info(f"Using consine annealing")
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
args.epochs * len(trainloader),
1, # since lr_lambda computes multiplicative factor
1e-6 / args.lr))
else:
logging.info(f"Step sizes : {args.schedule_steps} | lr-decay-factor : {args.lr_decay_factor}")
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.schedule_steps, gamma=args.lr_decay_factor)
logger = Logger(os.path.join(model_save_pth, "train_metrics.txt"))
logger.set_names(["lr", "train_loss", "top1_train", "test_loss", "top1", "top3", "top5"])
start_epoch = args.start_epoch
best_acc = 0.
best_acc_stats = {"top1" : 0.0}
# choose train func
train_func = train if args.aug == "vanilla" else train_aug
for epoch in range(start_epoch, args.epochs):
start_time = time()
logging.info('Epoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, get_lr(optimizer)))
train_loss, top1_train = train_func(args, trainloader, corruptloader, model, optimizer, criterion, scheduler)
test_loss, top1, top3, top5 = test(testloader, model, test_criterion)
time_taken = time() - start_time
if "sgd" in args.scheduler:
scheduler.step()
logging.info("End of epoch {} stats: train_loss : {:.4f} | test_loss : {:.4f} | top1_train : {:.4f} | top1 : {:.4f}".format(
epoch+1,
train_loss,
test_loss,
top1_train,
top1
))
logging.info("Time taken for epoch : {:.2f} mins".format(time_taken/60))
logger.append([get_lr(optimizer), train_loss, top1_train, test_loss, top1, top3, top5])
# save best accuracy model
is_best = top1 > best_acc
best_acc = max(best_acc, top1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'scheduler' : scheduler.state_dict(),
'dataset' : args.dataset,
'model' : args.model
}, is_best, checkpoint=model_save_pth)
# Update best stats
if is_best:
best_acc_stats = {
"top1" : top1,
"top3" : top3,
"top5" : top5
}
logging.info("training completed...")
logging.info("The stats for best trained model on test set are as below:")
logging.info(best_acc_stats) | [
"solvers.runners.test",
"logging.StreamHandler",
"torch.nn.CrossEntropyLoss",
"torch.optim.lr_scheduler.MultiStepLR",
"utils.mkdir_p",
"os.path.join",
"torch.nn.DataParallel",
"utils.create_save_path",
"utils.parse_args",
"os.path.isdir",
"numpy.cos",
"time.time",
"utils.get_lr",
"time.loc... | [((728, 740), 'utils.parse_args', 'parse_args', ([], {}), '()\n', (738, 740), False, 'from utils import mkdir_p, parse_args\n'), ((1518, 1579), 'logging.info', 'logging.info', (['f"""Setting up logging folder : {model_save_pth}"""'], {}), "(f'Setting up logging folder : {model_save_pth}')\n", (1530, 1579), False, 'import logging\n'), ((1878, 1921), 'logging.info', 'logging.info', (['f"""Using model : {args.model}"""'], {}), "(f'Using model : {args.model}')\n", (1890, 1921), False, 'import logging\n'), ((1994, 2022), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (2015, 2022), False, 'import torch\n'), ((2066, 2113), 'logging.info', 'logging.info', (['f"""Using dataset : {args.dataset}"""'], {}), "(f'Using dataset : {args.dataset}')\n", (2078, 2113), False, 'import logging\n'), ((2200, 2256), 'logging.info', 'logging.info', (['f"""Setting up optimizer : {args.optimizer}"""'], {}), "(f'Setting up optimizer : {args.optimizer}')\n", (2212, 2256), False, 'import logging\n'), ((2538, 2565), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2563, 2565), False, 'import torch\n'), ((2635, 2662), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2660, 2662), False, 'import torch\n'), ((5291, 5328), 'logging.info', 'logging.info', (['"""training completed..."""'], {}), "('training completed...')\n", (5303, 5328), False, 'import logging\n'), ((5333, 5407), 'logging.info', 'logging.info', (['"""The stats for best trained model on test set are as below:"""'], {}), "('The stats for best trained model on test set are as below:')\n", (5345, 5407), False, 'import logging\n'), ((5412, 5440), 'logging.info', 'logging.info', (['best_acc_stats'], {}), '(best_acc_stats)\n', (5424, 5440), False, 'import logging\n'), ((779, 790), 'time.localtime', 'localtime', ([], {}), '()\n', (788, 790), False, 'from time import localtime, strftime, time\n'), ((1128, 1157), 'os.path.isdir', 'os.path.isdir', (['model_save_pth'], {}), '(model_save_pth)\n', (1141, 1157), False, 'import os\n'), ((1167, 1190), 'utils.mkdir_p', 'mkdir_p', (['model_save_pth'], {}), '(model_save_pth)\n', (1174, 1190), False, 'from utils import mkdir_p, parse_args\n'), ((2711, 2751), 'logging.info', 'logging.info', (['f"""Using consine annealing"""'], {}), "(f'Using consine annealing')\n", (2723, 2751), False, 'import logging\n'), ((3145, 3249), 'logging.info', 'logging.info', (['f"""Step sizes : {args.schedule_steps} | lr-decay-factor : {args.lr_decay_factor}"""'], {}), "(\n f'Step sizes : {args.schedule_steps} | lr-decay-factor : {args.lr_decay_factor}'\n )\n", (3157, 3249), False, 'import logging\n'), ((3260, 3365), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': 'args.schedule_steps', 'gamma': 'args.lr_decay_factor'}), '(optimizer, milestones=args.schedule_steps,\n gamma=args.lr_decay_factor)\n', (3290, 3365), True, 'import torch.optim as optim\n'), ((3383, 3432), 'os.path.join', 'os.path.join', (['model_save_pth', '"""train_metrics.txt"""'], {}), "(model_save_pth, 'train_metrics.txt')\n", (3395, 3432), False, 'import os\n'), ((3784, 3790), 'time.time', 'time', ([], {}), '()\n', (3788, 3790), False, 'from time import localtime, strftime, time\n'), ((4051, 4090), 'solvers.runners.test', 'test', (['testloader', 'model', 'test_criterion'], {}), '(testloader, model, test_criterion)\n', (4055, 4090), False, 'from solvers.runners import train, train_aug, test\n'), ((4113, 4119), 'time.time', 'time', ([], {}), '()\n', (4117, 4119), False, 'from time import localtime, strftime, time\n'), ((605, 639), 'numpy.cos', 'np.cos', (['(step / total_steps * np.pi)'], {}), '(step / total_steps * np.pi)\n', (611, 639), True, 'import numpy as np\n'), ((923, 945), 'utils.create_save_path', 'create_save_path', (['args'], {}), '(args)\n', (939, 945), False, 'from utils import get_lr, save_checkpoint, create_save_path\n'), ((1049, 1071), 'utils.create_save_path', 'create_save_path', (['args'], {}), '(args)\n', (1065, 1071), False, 'from utils import get_lr, save_checkpoint, create_save_path\n'), ((1463, 1486), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1484, 1486), False, 'import logging\n'), ((4559, 4576), 'utils.get_lr', 'get_lr', (['optimizer'], {}), '(optimizer)\n', (4565, 4576), False, 'from utils import get_lr, save_checkpoint, create_save_path\n'), ((3866, 3883), 'utils.get_lr', 'get_lr', (['optimizer'], {}), '(optimizer)\n', (3872, 3883), False, 'from utils import get_lr, save_checkpoint, create_save_path\n'), ((1391, 1432), 'os.path.join', 'os.path.join', (['model_save_pth', '"""train.log"""'], {}), "(model_save_pth, 'train.log')\n", (1403, 1432), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 23:44:51 2019
@author: steven
"""
import numpy as np
import math
from analysis_document import *
from distane_between_axis import *
from sympy import *
from projection_PlaneAndPoint import *
pdb_name = r"E:\First_project\frame\1.pdb"
scale_factor = 10
xyz = read_pdb_xyz_ligand(pdb_name)[0]
HeadOfLigandDomin = np.array(read_pdb_xyz_ligand(pdb_name)[1])
ForSecond = np.array(read_pdb_xyz_ligand(pdb_name)[2])
ForThird = np.array(read_pdb_xyz_ligand(pdb_name)[3])
coord = np.array(xyz, float)
center = np.mean(coord, 0)
coord = coord - center
inertia = np.dot(coord.transpose(), coord)
e_values, e_vectors = np.linalg.eig(inertia)
for i in range(len(e_values)):
if e_values[i] == max(e_values):
eval1 = e_values[i]
axis1 = e_vectors[:,i]
elif e_values[i] == min(e_values):
eval3 = e_values[i]
axis3 = e_vectors[:,i]
else:
eval2 = e_values[i]
axis2 = e_vectors[:,i]
#固定特徵值的指向
examine_vector1_1 = np.array(axis1)
examine_vector1_2 = HeadOfLigandDomin - center
examine_angle1 = calculate_angle(examine_vector1_1,examine_vector1_2)
if examine_angle1 >=90:
axis1 = -1 * axis1
examine_vector2_1 = np.array(axis2)
examine_vector2_2 = ForSecond - center
examine_angle2 = calculate_angle(examine_vector2_1,examine_vector2_2)
if examine_angle2 >=90:
axis2 = -1 * axis2
examine_vector3_1 = np.array(axis3)
examine_vector3_2 = ForThird - center
examine_angle3 = calculate_angle(examine_vector3_1,examine_vector3_2)
if examine_angle3 >=90:
axis3 = -1 * axis3
point1 = 3 * scale_factor * axis1 + center
point2 = 2 * scale_factor * axis2 + center
point3 = 1 * scale_factor * axis3 + center
xyz2 = read_pdb_xyz_domin2(pdb_name)
coord2 = np.array(xyz2, float)
center2 = np.mean(coord2, 0)
coord2 = coord2 - center2
inertia2 = np.dot(coord2.transpose(), coord2)
e_values2, e_vectors2 = np.linalg.eig(inertia2)
for k in range(len(e_values2)):
if e_values2[k] == max(e_values2):
eval4 = e_values2[k]
axis4 = e_vectors2[:,k]
elif e_values2[k] == min(e_values2):
eval6 = e_values2[k]
axis6 = e_vectors2[:,k]
else:
eval5 = e_values2[k]
axis5 = e_vectors2[:,k]
point4 = 1 * scale_factor * axis4 + center2
point5 = 1 * scale_factor * axis5 + center2
point6 = 1 * scale_factor * axis6 + center2
xyz3 = read_pdb_xyz_domin3(pdb_name)
coord3 = np.array(xyz3, float)
center3 = np.mean(coord3, 0)
coord3 = coord3 - center3
inertia3 = np.dot(coord3.transpose(), coord3)
e_values3, e_vectors3 = np.linalg.eig(inertia3)
for m in range(len(e_values3)):
if e_values3[m] == max(e_values3):
eval7 = e_values3[m]
axis7 = e_vectors3[:,m]
elif e_values3[m] == min(e_values3):
eval9 = e_values3[m]
axis9 = e_vectors3[:,m]
else:
eval8 = e_values3[m]
axis8 = e_vectors3[:,m]
point7 = 1 * scale_factor * axis7 + center3
point8 = 1 * scale_factor * axis8 + center3
point9 = 1 * scale_factor * axis9 + center3
center_vector1 = np.array(center2-center)
center_vector2 = np.array(center3-center)
standard_vector = list(point3-center) #投影x軸
projection_vector1 = list(point1 - center) #法向量
projection_vector2 = list(point2 - center) #投影y軸
projection_point_domin2_center = np.array(projection_point(center2,center,point2,point3,projection_vector1))
projection_point_domin2_pca = np.array(projection_point(point4,center,point2,point3,projection_vector1)) #domin2 pca 在平面上的投影
projection_point_domin3_center = np.array(projection_point(center3,center,point2,point3,projection_vector1)) #domin3 center 在平面上的投影
projection_point_domin3_pca = np.array(projection_point(point7,center,point2,point3,projection_vector1)) #domin3 pca 在平面上的投影
#domin2 center 在平面上的投影,轉換成numpy的陣列,才可以直接相減
#用一般list並沒有直接鄉間的功能,要一個個index的位置做相減
projection_vector_d2_ct = list(projection_point_domin2_center - center)
projection_vector_d3_ct = list(projection_point_domin3_center - center)
projection_vector_d2_pca = list(projection_point_domin2_pca - projection_point_domin2_center)
projection_vector_d3_pca = list(projection_point_domin3_pca - projection_point_domin3_center)
#print(math.sqrt(projection_vector_d2_ct[0]**2 + projection_vector_d2_ct[1]**2+projection_vector_d2_ct[2]**2))
#print(math.sqrt(projection_vector_d3_ct[0]**2 + projection_vector_d3_ct[1]**2+projection_vector_d3_ct[2]**2))
ProjectAngleDomin2 = calculate_angle(center_vector1,projection_vector_d2_ct)
ProjectAngleDomin3 = calculate_angle(center_vector2,projection_vector_d3_ct)
#print(ProjectAngleDomin2,ProjectAngleDomin3)
check_angle = calculate_angle(projection_vector_d2_ct,standard_vector) #與x軸的夾角
compare_angle = calculate_angle(projection_vector_d2_ct,projection_vector2) #與y軸的夾角
check_angle2 = calculate_angle(projection_vector_d3_ct,standard_vector) #與x軸的夾角
compare_angle2 = calculate_angle(projection_vector_d3_ct,projection_vector2) #與y軸的夾角
'''
#檔案的寫出
output_point = open('E:\paris_專題_大三寒假\投影_寒假專題\domin2的平面投影點與軸的距離寫入aa.csv','a')
output_point.write(str(x1))
output_point.write(',')
output_point.write(str(y1))
output_point.write('\n')
output_point.close()
output_point = open('E:\paris_專題_大三寒假\投影_寒假專題\domin3的平面投影點與軸的距離寫入aa.csv','a')
output_point.write(str(x2))
output_point.write(',')
output_point.write(str(y2))
output_point.write('\n')
output_point.close()
'''
'''
idx = e_values.argsort()[::-1]
e_values = e_values[idx]
e_vectors = e_vectors[:,idx]
point1 = 3 * scale_factor * e_vectors[0] + center
point2 = 2 * scale_factor * e_vectors[1] + center
point3 = 1 * scale_factor * e_vectors[2] + center
print(e_values)
print(e_vectors)
print(center)
print(point1,point2,point3,sep='\n')
vector_picture = r'E:\3.pdb_vector.bild'
picture = open(vector_picture,'w')
picture.write('.color blue\n')
picture.write('.arrow %f %f %f %f %f %f\n' \
%(center[0],center[1],center[2],point1[0],point1[1],point1[2]))
picture.write('.arrow %f %f %f %f %f %f\n' \
%(center[0],center[1],center[2],point2[0],point2[1],point2[2]))
picture.write('.arrow %f %f %f %f %f %f\n'\
%(center[0],center[1],center[2],point3[0],point3[1],point3[2]))
picture.close()
'''
#print(projection_vector_d2,projection_vector_d3,sep='\n')
#print(get_projection_axis_value(projection_vector_d2,projection_vector2))
#print(projection_point_domin2_center,projection_point_domin2_pca,projection_point_domin3_center,projection_point_domin3_pca,sep='\n')
#projection_vector_domin2 = list(projection_point_domin2_pca - projection_point_domin2_center)
#print(projection_vector_domin2)
#projection_vector_domin3 = list(projection_point_domin3_pca - projection_point_domin3_center)
#驗證是否向量內積為零,但Python的向量內積只能非常趨近於零
def main(): #主程式
for j in range(1,2001):
print(j)
pdb_name = r"E:\First_project\frame" +"\\"+ str(j) + r'.pdb'
scale_factor = 20
#angle_list_dm2 = [0]
#angle_list_dm3 = [0]
xyz = read_pdb_xyz_ligand(pdb_name)[0]
HeadOfLigandDomin = np.array(read_pdb_xyz_ligand(pdb_name)[1])
ForSecond = np.array(read_pdb_xyz_ligand(pdb_name)[2])
ForThird = np.array(read_pdb_xyz_ligand(pdb_name)[3])
coord = np.array(xyz, float)
center = np.mean(coord, 0)
coord = coord - center
inertia = np.dot(coord.transpose(), coord)
e_values, e_vectors = np.linalg.eig(inertia)
#特徵值排序
for i in range(len(e_values)):
if e_values[i] == max(e_values):
eval1 = e_values[i]
axis1 = e_vectors[:,i]
elif e_values[i] == min(e_values):
eval3 = e_values[i]
axis3 = e_vectors[:,i]
else:
eval2 = e_values[i]
axis2 = e_vectors[:,i]
#固定特徵值的指向
examine_vector1_1 = np.array(axis1)
examine_vector1_2 = HeadOfLigandDomin - center
examine_angle1 = calculate_angle(examine_vector1_1,examine_vector1_2)
if examine_angle1 >=90:
axis1 = -1 * axis1
examine_vector2_1 = np.array(axis2)
examine_vector2_2 = ForSecond - center
examine_angle2 = calculate_angle(examine_vector2_1,examine_vector2_2)
if examine_angle2 >=90:
axis2 = -1 * axis2
examine_vector3_1 = np.array(axis3)
examine_vector3_2 = ForThird - center
examine_angle3 = calculate_angle(examine_vector3_1,examine_vector3_2)
if examine_angle3 >=90:
axis3 = -1 * axis3
point1 = 3 * scale_factor * axis1 + center
point2 = 2 * scale_factor * axis2 + center
point3 = 1 * scale_factor * axis3 + center
xyz2 = read_pdb_xyz_domin2(pdb_name)
coord2 = np.array(xyz2, float)
center2 = np.mean(coord2, 0)
coord2 = coord2 - center2
inertia2 = np.dot(coord2.transpose(), coord2)
e_values2, e_vectors2 = np.linalg.eig(inertia2)
for k in range(len(e_values2)):
if e_values2[k] == max(e_values2):
eval4 = e_values2[k]
axis4 = e_vectors2[:,k]
elif e_values2[k] == min(e_values2):
eval6 = e_values2[k]
axis6 = e_vectors2[:,k]
else:
eval5 = e_values2[k]
axis5 = e_vectors2[:,k]
point4 = 1 * scale_factor * axis4 + center2
point5 = 1 * scale_factor * axis5 + center2
point6 = 1 * scale_factor * axis6 + center2
xyz3 = read_pdb_xyz_domin3(pdb_name)
coord3 = np.array(xyz3, float)
center3 = np.mean(coord3, 0)
coord3 = coord3 - center3
inertia3 = np.dot(coord3.transpose(), coord3)
e_values3, e_vectors3 = np.linalg.eig(inertia3)
for m in range(len(e_values3)):
if e_values3[m] == max(e_values3):
eval7 = e_values3[m]
axis7 = e_vectors3[:,m]
elif e_values3[m] == min(e_values3):
eval9 = e_values3[m]
axis9 = e_vectors3[:,m]
else:
eval8 = e_values3[m]
axis8 = e_vectors3[:,m]
point7 = 1 * scale_factor * axis7 + center3
point8 = 1 * scale_factor * axis8 + center3
point9 = 1 * scale_factor * axis9 + center3
center_vector1 = list(center2-center)
center_vector2 = list(center3-center)
standard_vector = list(point3-center) #投影x軸
projection_vector1 = list(point1 - center) #法向量
projection_vector2 = list(point2 - center) #投影y軸
#沒有用numpy.array不能直接做陣列內的值相減
projection_point_domin2_center = np.array(projection_point(center2,center,point2,point3,projection_vector1))
projection_point_domin3_center = np.array(projection_point(center3,center,point2,point3,projection_vector1))
#projection_point_domin2_pca = np.array(projection_point(point4,center,point2,point3,projection_vector1))
#projection_point_domin3_pca = np.array(projection_point(point7,center,point2,point3,projection_vector1))
#projection_vector_d2_pca = list(projection_point_domin2_pca - projection_point_domin2_center)
#projection_vector_d3_pca = list(projection_point_domin3_pca - projection_point_domin3_center)
projection_vector_d2_ct = np.array(projection_point_domin2_center - center)
projection_vector_d3_ct = np.array(projection_point_domin3_center - center)
ProjectAngleDomin2 = calculate_angle(center_vector1,projection_vector_d2_ct) #與平面的投影角度
ProjectAngleDomin3 = calculate_angle(center_vector2,projection_vector_d3_ct) #與平面的投影角度
check_angle = calculate_angle(projection_vector_d2_ct,standard_vector) #domin2與第三特徵向量的夾角
compare_angle = calculate_angle(projection_vector_d2_ct,projection_vector2) #domin2與第二特徵向量的夾角
check_angle2 = calculate_angle(projection_vector_d3_ct,standard_vector) #domin3與第三特徵向量的夾角
compare_angle2 = calculate_angle(projection_vector_d3_ct,projection_vector2) #domin3與第二特徵向量的夾角
quadrant_d2 = 0 #d2向量
quadrant_d3 = 0 #d3向量
if check_angle > 90 and compare_angle < 90:
#print(1)
quadrant_d2 = 1
angle_x = check_angle
x1 = get_projection_axis_value_firstZone(projection_vector_d2_ct,standard_vector)[0]
y1 = get_projection_axis_value_firstZone(projection_vector_d2_ct,standard_vector)[1]
elif check_angle < 90 and compare_angle < 90 :
#print(2)
quadrant_d2 = 2
angle_x = check_angle
x1 = get_projection_axis_value_secondZone(projection_vector_d2_ct,standard_vector)[0]
y1 = get_projection_axis_value_secondZone(projection_vector_d2_ct,standard_vector)[1]
elif check_angle < 90 and compare_angle > 90:
#print(3)
quadrant_d2 = 3
angle_x = check_angle
x1 = get_projection_axis_value_thirdZone(projection_vector_d2_ct,standard_vector)[0]
y1 = get_projection_axis_value_thirdZone(projection_vector_d2_ct,standard_vector)[1]
elif check_angle > 90 and compare_angle > 90:
#print(4)
quadrant_d2 = 4
angle_x = check_angle
x1 = get_projection_axis_value_forthZone(projection_vector_d2_ct,standard_vector)[0]
y1 = get_projection_axis_value_forthZone(projection_vector_d2_ct,standard_vector)[1]
if check_angle2 > 90 and compare_angle2 < 90:
#print(1)
quadrant_d3 = 1
angle_x2 = check_angle2
x2 = get_projection_axis_value_firstZone(projection_vector_d3_ct,standard_vector)[0]
y2 = get_projection_axis_value_firstZone(projection_vector_d3_ct,standard_vector)[1]
elif check_angle2 < 90 and compare_angle2 < 90 :
#print(2)
quadrant_d3 = 2
angle_x2 = check_angle2
x2 = get_projection_axis_value_secondZone(projection_vector_d3_ct,standard_vector)[0]
y2 = get_projection_axis_value_secondZone(projection_vector_d3_ct,standard_vector)[1]
elif check_angle2 < 90 and compare_angle2 > 90:
#print(3)
quadrant_d3 = 3
angle_x2= check_angle2
x2 = get_projection_axis_value_thirdZone(projection_vector_d3_ct,standard_vector)[0]
y2 = get_projection_axis_value_thirdZone(projection_vector_d3_ct,standard_vector)[1]
elif check_angle2 > 90 and compare_angle2 > 90:
quadrant_d3 = 4
#print(4)
angle_x2 = check_angle2
x2 = get_projection_axis_value_forthZone(projection_vector_d3_ct,standard_vector)[0]
y2 = get_projection_axis_value_forthZone(projection_vector_d3_ct,standard_vector)[1]
output_projection_angle_domin2 = open(r'E:\project_angle_domin2.csv','a')
output_projection_angle_domin2.write(str(ProjectAngleDomin2) + '\n')
output_projection_angle_domin3 = open(r'E:\project_angle_domin3.csv','a')
output_projection_angle_domin3.write(str(ProjectAngleDomin3) + '\n')
#執行主程式
main()
#輸出檔(for csv)
'''
output_angle = open('E:\domin2的平面投影點與center和軸的角度寫入.csv','a')
output_angle.write(str(angle_x) +',' + str(quadrant_d2) + '\n')
output_angle.close()
output_angle = open('E:\domin3的平面投影點與center和軸的角度寫入.csv','a')
output_angle.write(str(angle_x2) +',' + str(quadrant_d3) + '\n')
output_angle.close()
output_point = open('E:\domin2的平面投影點與軸的距離寫入_new.csv','a')
output_point.write(str(x1) + ',' + str(y1) + '\n' )
output_point.close()
output_point = open('E:\domin3的平面投影點與軸的距離寫入_new.csv','a')
output_point.write(str(x2))
output_point.write(',')
output_point.write(str(y2) + '\n')
output_point.close()
angle_list_dm2[j] = angle_x
angle_list_dm3[j] = angle_x2
print(j)
angle_list_dm2.sort()
angle_list_dm3.sort()
output_angle_sort = open('E:\paris_專題_大三寒假\投影_寒假專題\domin2的sort角度寫入.csv','a')
for i in range(1,2001):
output_angle_sort.write(angle_list_dm2[i] + '\n')
output_angle_sort = open('E:\paris_專題_大三寒假\投影_寒假專題\domin3的sort角度寫入.csv','a')
for i in range(1,2001):
output_angle_sort.write(angle_list_dm3[i] + '\n')
output_point = open('E:\paris_專題_大三寒假\投影_寒假專題\domin2的平面投影點與軸的距離寫入qq.csv','a')
output_point.write(str(x1))
output_point.write(',')
output_point.write(str(y1) + '\n'
output_point.close()
output_point = open('E:\paris_專題_大三寒假\投影_寒假專題\domin3的平面投影點與軸的距離寫入qq.csv','a')
output_point.write(str(x2))
output_point.write(',')
output_point.write(str(y2) + '\n')
output_point.close()
output_projection_point = open('E:\paris_專題_大三寒假\投影_寒假專題\更新後_domin2的平面投影點寫入.csv','a')
output_projection_point.write(str(projection_point_domin2_center[0]))
output_projection_point.write(',')
output_projection_point.write(str(projection_point_domin2_center[1]))
output_projection_point.write(',')
output_projection_point.write(str(projection_point_domin2_center[2]))
output_projection_point.write('\n')
output_projection_point.close()
output_projection_point = open('E:\paris_專題_大三寒假\投影_寒假專題\更新後_domin3的平面投影點寫入.csv','a')
output_projection_point.write(str(projection_point_domin3_center[0]))
output_projection_point.write(',')
output_projection_point.write(str(projection_point_domin3_center[1]))
output_projection_point.write(',')
output_projection_point.write(str(projection_point_domin3_center[2]))
output_projection_point.write('\n')
output_projection_point.close()
output_projection_domin2_pca = open('E:\paris_專題_大三寒假\投影_寒假專題\domin2的pca投影點寫入.csv','a')
output_projection_domin2_pca.write(str(projection_point_domin2_pca[0]))
output_projection_domin2_pca.write(',')
output_projection_domin2_pca.write(str(projection_point_domin2_pca[1]))
output_projection_domin2_pca.write(',')
output_projection_domin2_pca.write(str(projection_point_domin2_pca[2]))
output_projection_domin2_pca.write('\n')
output_projection_domin2_pca.close()
output_projection_domin2_pca = open('E:\paris_專題_大三寒假\投影_寒假專題\domin3的pca投影點寫入.csv','a')
output_projection_domin2_pca.write(str(projection_point_domin3_pca[0]))
output_projection_domin2_pca.write(',')
output_projection_domin2_pca.write(str(projection_point_domin3_pca[1]))
output_projection_domin2_pca.write(',')
output_projection_domin2_pca.write(str(projection_point_domin3_pca[2]))
output_projection_domin2_pca.write('\n')
output_projection_domin2_pca.close()
'''
#寫成chimera讀的向量檔案,使用的話,只要把段落上下的三引號刪除即可
'''
vector_picture = r'E:\專題__vector.bild'
picture = open(vector_picture,'w')
picture.write('.color red\n')
picture.write('.arrow %f %f %f %f %f %f\n' \
%(center[0],center[1],center[2],center2[0],center2[1],center2[2]))
picture.write('.arrow %f %f %f %f %f %f\n' \
%(center[0],center[1],center[2],center3[0],center3[1],center3[2]))
picture.write('.color blue\n')
picture.write('.arrow %f %f %f %f %f %f\n' \
%(center[0],center[1],center[2],point1[0],point1[1],point1[2]))
picture.write('.arrow %f %f %f %f %f %f\n' \
%(center[0],center[1],center[2],point2[0],point2[1],point2[2]))
picture.write('.arrow %f %f %f %f %f %f\n'\
%(center[0],center[1],center[2],point3[0],point3[1],point3[2]))
picture.write('.color green\n')
picture.write('.arrow %f %f %f %f %f %f\n' \
%(center[0],center[1],center[2],projection_point_domin2_center[0],projection_point_domin2_center[1],projection_point_domin2_center[2]))
picture.write('.arrow %f %f %f %f %f %f\n'\
%(center2[0],center2[1],center2[2],point4[0],point4[1],point4[2]))
picture.write('.arrow %f %f %f %f %f %f\n' \
%(center[0],center[1],center[2],projection_point_domin3_center[0],projection_point_domin3_center[1],projection_point_domin3_center[2]))
picture.write('.arrow %f %f %f %f %f %f\n'\
%(center3[0],center3[1],center3[2],point7[0],point7[1],point7[2]))
# print('.arrow',center,center3,sep=' ',end = '\n',file=vector_picture)
picture.close()
'''
'''
print("Ligand的物理中心",center)
print("domin2的物理中心",center2)
print("domin3的物理中心",center3)
print('兩個向量:\n',center_vector1,'\n',center_vector2)
print('ligand:%f %f %f' %(center[0],center[1],center[2]))
print('domin2:%f %f %f' %(center2[0],center2[1],center2[2]))
print('domin3:%f %f %f' %(center3[0],center3[1],center3[2]))
print('point1:%f %f %f' %(point1[0],point1[1],point1[2]))
print('point2:%f %f %f' %(point2[0],point2[1],point2[2]))
print('point3:%f %f %f' %(point3[0],point3[1],point3[2]))
print('domin2與domin3與ligand的角度:%f' %calculate_angle(center_vector1,center_vector2))
print('domin2與point3與ligand的角度:%f' %calculate_angle(standard_vector,center_vector1))
print('domin3與point3與ligand的角度:%f' %calculate_angle(standard_vector,center_vector2))
'''
#寫出Output檔案
'''
output_cos_angle = open('domin2和domin3的角度寫入.csv','a')
output_cos_angle.write(str(calculate_angle(center_vector1,center_vector2)))
output_cos_angle.write('\n')
output_cos_angle.close()
output_cos_angle = open('domin2和基準的角度寫入.csv','a')
output_cos_angle.write(str(calculate_angle(center_vector1,standard_vector)))
output_cos_angle.write('\n')
output_cos_angle.close()
output_cos_angle = open('domin3和基準的角度寫入.csv','a')
output_cos_angle.write(str(calculate_angle(center_vector2,standard_vector)))
output_cos_angle.write('\n')
output_cos_angle.close()
'''
'''
#print(calculate_angle(projection_vector1,projection_vector2)) #證明point1 and point2的角度為90
print(center)
print(point1)
print(point2)
print(point3)
b = np.array([0,0,0])
a = np.array([center,point2,point3])
c = solve(a,b)
print(c)
'''
'''
output_cos_angle = open('E:\paris_專題_大三寒假\投影_寒假專題\domin2的投影角度寫入.csv','a')
output_cos_angle.write(str(projection_angle(center_vector1,projection_vector1,projection_vector2)))
output_cos_angle.write('\n')
output_cos_angle.close()
output_cos_angle = open('E:\paris_專題_大三寒假\投影_寒假專題\domin3的投影角度寫入.csv','a')
output_cos_angle.write(str(projection_angle(center_vector2,projection_vector1,projection_vector2)))
output_cos_angle.write('\n')
output_cos_angle.close()
print(j)
print('domin2的投影角度:' ,projection_angle( center_vector1,projection_vector1,projection_vector2))
print('domin3的投影角度:' ,projection_angle(center_vector2,projection_vector1, projection_vector2))
'''
| [
"numpy.array",
"numpy.mean",
"numpy.linalg.eig"
] | [((543, 563), 'numpy.array', 'np.array', (['xyz', 'float'], {}), '(xyz, float)\n', (551, 563), True, 'import numpy as np\n'), ((575, 592), 'numpy.mean', 'np.mean', (['coord', '(0)'], {}), '(coord, 0)\n', (582, 592), True, 'import numpy as np\n'), ((691, 713), 'numpy.linalg.eig', 'np.linalg.eig', (['inertia'], {}), '(inertia)\n', (704, 713), True, 'import numpy as np\n'), ((1080, 1095), 'numpy.array', 'np.array', (['axis1'], {}), '(axis1)\n', (1088, 1095), True, 'import numpy as np\n'), ((1285, 1300), 'numpy.array', 'np.array', (['axis2'], {}), '(axis2)\n', (1293, 1300), True, 'import numpy as np\n'), ((1482, 1497), 'numpy.array', 'np.array', (['axis3'], {}), '(axis3)\n', (1490, 1497), True, 'import numpy as np\n'), ((1846, 1867), 'numpy.array', 'np.array', (['xyz2', 'float'], {}), '(xyz2, float)\n', (1854, 1867), True, 'import numpy as np\n'), ((1881, 1899), 'numpy.mean', 'np.mean', (['coord2', '(0)'], {}), '(coord2, 0)\n', (1888, 1899), True, 'import numpy as np\n'), ((2006, 2029), 'numpy.linalg.eig', 'np.linalg.eig', (['inertia2'], {}), '(inertia2)\n', (2019, 2029), True, 'import numpy as np\n'), ((2553, 2574), 'numpy.array', 'np.array', (['xyz3', 'float'], {}), '(xyz3, float)\n', (2561, 2574), True, 'import numpy as np\n'), ((2586, 2604), 'numpy.mean', 'np.mean', (['coord3', '(0)'], {}), '(coord3, 0)\n', (2593, 2604), True, 'import numpy as np\n'), ((2706, 2729), 'numpy.linalg.eig', 'np.linalg.eig', (['inertia3'], {}), '(inertia3)\n', (2719, 2729), True, 'import numpy as np\n'), ((3222, 3248), 'numpy.array', 'np.array', (['(center2 - center)'], {}), '(center2 - center)\n', (3230, 3248), True, 'import numpy as np\n'), ((3265, 3291), 'numpy.array', 'np.array', (['(center3 - center)'], {}), '(center3 - center)\n', (3273, 3291), True, 'import numpy as np\n'), ((7514, 7534), 'numpy.array', 'np.array', (['xyz', 'float'], {}), '(xyz, float)\n', (7522, 7534), True, 'import numpy as np\n'), ((7554, 7571), 'numpy.mean', 'np.mean', (['coord', '(0)'], {}), '(coord, 0)\n', (7561, 7571), True, 'import numpy as np\n'), ((7689, 7711), 'numpy.linalg.eig', 'np.linalg.eig', (['inertia'], {}), '(inertia)\n', (7702, 7711), True, 'import numpy as np\n'), ((8172, 8187), 'numpy.array', 'np.array', (['axis1'], {}), '(axis1)\n', (8180, 8187), True, 'import numpy as np\n'), ((8417, 8432), 'numpy.array', 'np.array', (['axis2'], {}), '(axis2)\n', (8425, 8432), True, 'import numpy as np\n'), ((8654, 8669), 'numpy.array', 'np.array', (['axis3'], {}), '(axis3)\n', (8662, 8669), True, 'import numpy as np\n'), ((9089, 9110), 'numpy.array', 'np.array', (['xyz2', 'float'], {}), '(xyz2, float)\n', (9097, 9110), True, 'import numpy as np\n'), ((9131, 9149), 'numpy.mean', 'np.mean', (['coord2', '(0)'], {}), '(coord2, 0)\n', (9138, 9149), True, 'import numpy as np\n'), ((9279, 9302), 'numpy.linalg.eig', 'np.linalg.eig', (['inertia2'], {}), '(inertia2)\n', (9292, 9302), True, 'import numpy as np\n'), ((9938, 9959), 'numpy.array', 'np.array', (['xyz3', 'float'], {}), '(xyz3, float)\n', (9946, 9959), True, 'import numpy as np\n'), ((9979, 9997), 'numpy.mean', 'np.mean', (['coord3', '(0)'], {}), '(coord3, 0)\n', (9986, 9997), True, 'import numpy as np\n'), ((10123, 10146), 'numpy.linalg.eig', 'np.linalg.eig', (['inertia3'], {}), '(inertia3)\n', (10136, 10146), True, 'import numpy as np\n'), ((11738, 11787), 'numpy.array', 'np.array', (['(projection_point_domin2_center - center)'], {}), '(projection_point_domin2_center - center)\n', (11746, 11787), True, 'import numpy as np\n'), ((11823, 11872), 'numpy.array', 'np.array', (['(projection_point_domin3_center - center)'], {}), '(projection_point_domin3_center - center)\n', (11831, 11872), True, 'import numpy as np\n')] |
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt
def simple_3d():
fig = plt.figure()
ax = plt.axes(projection='3d')
zline = np.linspace(0, 15, 1000)
xline = np.sin(zline)
yline = np.cos(zline)
ax.plot3D(xline, yline, zline, 'gray')
# Data for three-dimensional scattered points
zdata = 15 * np.random.random(100)
xdata = np.sin(zdata) + 0.1 * np.random.randn(100)
ydata = np.cos(zdata) + 0.1 * np.random.randn(100)
ax.scatter3D(xdata, ydata, zdata, c=zdata, cmap='Greens');
plt.show()
def f(x, y):
return np.sin(np.sqrt(x ** 2 + y ** 2))
def color_3d():
x = np.linspace(-6, 6, 30)
y = np.linspace(-6, 6, 30)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.contour3D(X, Y, Z, 50, cmap='binary')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.view_init(60, 35)
plt.show()
def wireframes():
x = np.linspace(-6, 6, 30)
y = np.linspace(-6, 6, 30)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot_wireframe(X, Y, Z, color='black')
ax.set_title('wireframe');
ax = plt.axes(projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
ax.set_title('surface');
r = np.linspace(0, 6, 20)
theta = np.linspace(-0.9 * np.pi, 0.8 * np.pi, 40)
r, theta = np.meshgrid(r, theta)
X = r * np.sin(theta)
Y = r * np.cos(theta)
Z = f(X, Y)
ax = plt.axes(projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
plt.show()
def surface_triangulations():
theta = 2 * np.pi * np.random.random(1000)
r = 6 * np.random.random(1000)
x = np.ravel(r * np.sin(theta))
y = np.ravel(r * np.cos(theta))
z = f(x, y)
ax = plt.axes(projection='3d')
ax.scatter(x, y, z, c=z, cmap='viridis', linewidth=0.5)
ax = plt.axes(projection='3d')
ax.plot_trisurf(x, y, z, cmap='viridis', edgecolor='none')
plt.show()
def visualize_mobius_strip():
theta = np.linspace(0, 2 * np.pi, 30)
w = np.linspace(-0.25, 0.25, 8)
w, theta = np.meshgrid(w, theta)
phi = 0.5 * theta
# radius in x-y plane
r = 1 + w * np.cos(phi)
x = np.ravel(r * np.cos(theta))
y = np.ravel(r * np.sin(theta))
z = np.ravel(w * np.sin(phi))
# triangulate in the underlying parametrization
from matplotlib.tri import Triangulation
tri = Triangulation(np.ravel(w), np.ravel(theta))
ax = plt.axes(projection='3d')
ax.plot_trisurf(x, y, z, triangles=tri.triangles, cmap='viridis', linewidths=0.2);
ax.set_xlim(-1, 1); ax.set_ylim(-1, 1); ax.set_zlim(-1, 1)
plt.show()
if __name__ == '__main__':
print('Numpy Version:', np.__version__)
# simple_3d()
# color_3d()
# wireframes()
# surface_triangulations()
visualize_mobius_strip()
| [
"numpy.sqrt",
"numpy.random.random",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.axes",
"numpy.cos",
"numpy.random.randn",
"numpy.sin",
"numpy.meshgrid",
"numpy.ravel",
"matplotlib.pyplot.show"
] | [((114, 126), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (124, 126), True, 'import matplotlib.pyplot as plt\n'), ((136, 161), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (144, 161), True, 'import matplotlib.pyplot as plt\n'), ((174, 198), 'numpy.linspace', 'np.linspace', (['(0)', '(15)', '(1000)'], {}), '(0, 15, 1000)\n', (185, 198), True, 'import numpy as np\n'), ((211, 224), 'numpy.sin', 'np.sin', (['zline'], {}), '(zline)\n', (217, 224), True, 'import numpy as np\n'), ((237, 250), 'numpy.cos', 'np.cos', (['zline'], {}), '(zline)\n', (243, 250), True, 'import numpy as np\n'), ((562, 572), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (570, 572), True, 'import matplotlib.pyplot as plt\n'), ((658, 680), 'numpy.linspace', 'np.linspace', (['(-6)', '(6)', '(30)'], {}), '(-6, 6, 30)\n', (669, 680), True, 'import numpy as np\n'), ((689, 711), 'numpy.linspace', 'np.linspace', (['(-6)', '(6)', '(30)'], {}), '(-6, 6, 30)\n', (700, 711), True, 'import numpy as np\n'), ((723, 740), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (734, 740), True, 'import numpy as np\n'), ((767, 779), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (777, 779), True, 'import matplotlib.pyplot as plt\n'), ((790, 815), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (798, 815), True, 'import matplotlib.pyplot as plt\n'), ((959, 969), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (967, 969), True, 'import matplotlib.pyplot as plt\n'), ((998, 1020), 'numpy.linspace', 'np.linspace', (['(-6)', '(6)', '(30)'], {}), '(-6, 6, 30)\n', (1009, 1020), True, 'import numpy as np\n'), ((1029, 1051), 'numpy.linspace', 'np.linspace', (['(-6)', '(6)', '(30)'], {}), '(-6, 6, 30)\n', (1040, 1051), True, 'import numpy as np\n'), ((1063, 1080), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1074, 1080), True, 'import numpy as np\n'), ((1107, 1119), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1117, 1119), True, 'import matplotlib.pyplot as plt\n'), ((1129, 1154), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1137, 1154), True, 'import matplotlib.pyplot as plt\n'), ((1242, 1267), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1250, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1410, 1431), 'numpy.linspace', 'np.linspace', (['(0)', '(6)', '(20)'], {}), '(0, 6, 20)\n', (1421, 1431), True, 'import numpy as np\n'), ((1444, 1486), 'numpy.linspace', 'np.linspace', (['(-0.9 * np.pi)', '(0.8 * np.pi)', '(40)'], {}), '(-0.9 * np.pi, 0.8 * np.pi, 40)\n', (1455, 1486), True, 'import numpy as np\n'), ((1502, 1523), 'numpy.meshgrid', 'np.meshgrid', (['r', 'theta'], {}), '(r, theta)\n', (1513, 1523), True, 'import numpy as np\n'), ((1603, 1628), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1611, 1628), True, 'import matplotlib.pyplot as plt\n'), ((1738, 1748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1746, 1748), True, 'import matplotlib.pyplot as plt\n'), ((1960, 1985), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1968, 1985), True, 'import matplotlib.pyplot as plt\n'), ((2055, 2080), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (2063, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2148, 2158), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2156, 2158), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2232), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(30)'], {}), '(0, 2 * np.pi, 30)\n', (2214, 2232), True, 'import numpy as np\n'), ((2241, 2268), 'numpy.linspace', 'np.linspace', (['(-0.25)', '(0.25)', '(8)'], {}), '(-0.25, 0.25, 8)\n', (2252, 2268), True, 'import numpy as np\n'), ((2284, 2305), 'numpy.meshgrid', 'np.meshgrid', (['w', 'theta'], {}), '(w, theta)\n', (2295, 2305), True, 'import numpy as np\n'), ((2651, 2676), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (2659, 2676), True, 'import matplotlib.pyplot as plt\n'), ((2832, 2842), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2840, 2842), True, 'import matplotlib.pyplot as plt\n'), ((362, 383), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (378, 383), True, 'import numpy as np\n'), ((396, 409), 'numpy.sin', 'np.sin', (['zdata'], {}), '(zdata)\n', (402, 409), True, 'import numpy as np\n'), ((451, 464), 'numpy.cos', 'np.cos', (['zdata'], {}), '(zdata)\n', (457, 464), True, 'import numpy as np\n'), ((606, 630), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (613, 630), True, 'import numpy as np\n'), ((1537, 1550), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1543, 1550), True, 'import numpy as np\n'), ((1563, 1576), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1569, 1576), True, 'import numpy as np\n'), ((1805, 1827), 'numpy.random.random', 'np.random.random', (['(1000)'], {}), '(1000)\n', (1821, 1827), True, 'import numpy as np\n'), ((1840, 1862), 'numpy.random.random', 'np.random.random', (['(1000)'], {}), '(1000)\n', (1856, 1862), True, 'import numpy as np\n'), ((2611, 2622), 'numpy.ravel', 'np.ravel', (['w'], {}), '(w)\n', (2619, 2622), True, 'import numpy as np\n'), ((2624, 2639), 'numpy.ravel', 'np.ravel', (['theta'], {}), '(theta)\n', (2632, 2639), True, 'import numpy as np\n'), ((418, 438), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (433, 438), True, 'import numpy as np\n'), ((473, 493), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (488, 493), True, 'import numpy as np\n'), ((1884, 1897), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1890, 1897), True, 'import numpy as np\n'), ((1920, 1933), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1926, 1933), True, 'import numpy as np\n'), ((2370, 2381), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2376, 2381), True, 'import numpy as np\n'), ((2404, 2417), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2410, 2417), True, 'import numpy as np\n'), ((2440, 2453), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2446, 2453), True, 'import numpy as np\n'), ((2476, 2487), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2482, 2487), True, 'import numpy as np\n')] |
from _libtimetag import *
import numpy as _np
import dateutil as _dateutil_imported
def read_sstt_header(filepath):
"""Reads the header file of a small simple time-tagged (SSTT) dataset
Parameters
----------
filepath : str
Filepath to the header file
Returns
-------
exp_header : dictionary
A dictionary containing information describing the experiment
chan_header : list
A list of dictionaries, providing information on each channel
"""
lines = []
header = open(filepath)
while True:
temp = header.readline()
if temp == "":
break
temp = temp.replace('\n','')
lines.append(temp)
header.close()
start_exp_header = False
exp_header_contents = False
start_chan_header = False
exp_header_headings = None
chan_header_headings = None
chan_header_contents = False
chan_ID_index = None
exp_header = {'Time_unit_seconds': 81e-12,
'device_type': 'qutau',
'experiment_start_timestamp_UTC': None}
chan_headers = {}
exp_info_types = {'Time_unit_seconds': _np.double,
'device_type': str,
'experiment_start_timestamp_UTC': "DATETIME"}
chan_info_types = {'ChannelID': int,
'Filename': str,
'NumPhotons': _np.int64,
'NumOverflows': _np.int64,
'Filesize': _np.int64,
'HardwareSyncDivider': _np.int64,
'AdditionalSyncDivider': _np.int64,
'TotalSyncDivider': _np.int64,
'IsPulsesChannel': _np.bool,
'HasPulsesChannel': _np.bool,
'CorrespondingPulsesChannel' : _np.int32,
'HasMicrotimes' : _np.bool,
'MicroDelayTime' : _np.int64}
default_chan_header = {'ChannelID': None,
'Filename': "None",
'NumPhotons': 0,
'NumOverflows': 0,
'Filesize': 0,
'HardwareSyncDivider': 1,
'AdditionalSyncDivider': 1,
'TotalSyncDivider': 1,
'IsPulsesChannel': False,
'HasPulsesChannel': False,
'CorrespondingPulsesChannel' : None,
'HasMicrotimes':False,
'MicroDelayTime':0}
for i,l in enumerate(lines):
if l == "EXPERIMENT_HEADER":
start_exp_header = True
continue
if l == "CHANNEL_HEADER":
start_chan_header = True
continue
if start_exp_header:
exp_header_headings = l.split("\t")
start_exp_header = False
exp_header_contents = True
continue
if exp_header_contents:
contents = l.split("\t")
if len(contents) != len(exp_header_headings):
print("Error in experiment header!")
return None
for j,c in enumerate(contents):
hd_nm = exp_header_headings[j]
if hd_nm in exp_info_types:
type_ = exp_info_types[hd_nm]
if type_ == "DATETIME":
c = _dateutil_imported.parser.parse(c)
elif type_ == _np.double:
c = type_(c)
#c = round(c,5)
else:
c = type_(c)
exp_header[hd_nm] = c
exp_header_contents = False
continue
if start_chan_header:
chan_header_headings = l.split("\t")
for j,c in enumerate(chan_header_headings):
if c == "ChannelID":
chan_ID_index = j
break
if chan_ID_index == None:
print("Error: could not find channel ID column")
break
start_chan_header = False
chan_header_contents = True
continue
if chan_header_contents:
if l == "":
chan_header_contents = False
continue
contents = l.split("\t")
contents = [c for c in contents if c]
if len(contents) != len(chan_header_headings):
print("Error in channel header!")
return None
chan_ID = int(contents[chan_ID_index])
chan_headers[chan_ID] = default_chan_header.copy()
for j,c in enumerate(contents):
# cast if possible
header_name = chan_header_headings[j]
if header_name in chan_info_types:
if chan_info_types[header_name] == _np.bool:
c = _np.int8(c)
c = _np.bool(c)
c = chan_info_types[header_name](c)
if chan_info_types[header_name] == str:
c = c.replace('"','')
chan_headers[chan_ID][chan_header_headings[j]] = c
return exp_header,chan_headers
def import_data(filepath):
"""Imports the data and header information of a small simple time-tagged (SSTT) dataset
This function imports SSTT datasets and header information. Furthermore,
it generates microtimes for applicable channels, i.e. photon arrival
times relative to a reference channel, such as the laser sync channel.
To only import header data, use the read_sstt_header() function.
To only import data from one specific channel, without any preprocessing,
use the read_sstt_data() function.
Parameters
----------
filepath : str
Filepath to the header file
Returns
-------
exp_header : dictionary
A dictionary containing information describing the experiment
chan_header : list
A list of dictionaries, providing information on each channel
data : list
A list of dictionaries, containing the data per channel
"""
exp_header,chan_header = read_sstt_header(filepath)
data = {}
for chan in chan_header:
macro,micro,_ = read_sstt_data(filepath+".c"+str(chan))
data[chan] = {}
data[chan]["macro"] = macro
data[chan]["micro"] = micro
# Generate microtimes if necessary
for chan in chan_header:
ch = chan_header[chan]
if ch['NumPhotons'] == 0:
ch['NumPhotons'] = len(data[chan]["macro"])
if ch['HasPulsesChannel'] and not ch['HasMicrotimes'] and ch['NumPhotons'] > 0:
# generate microtimes
data[chan]['micro'] = gen_micro_times(data[ch["CorrespondingPulsesChannel"]]['macro'],data[chan]['macro'],chan_header[ch["CorrespondingPulsesChannel"]]["TotalSyncDivider"])
if ch['IsPulsesChannel']:
ch['PulsePeriod'] = _np.int64(round(_np.average(data[chan]['macro'][1:] - data[chan]['macro'][:-1])/ch["TotalSyncDivider"]))
else:
ch['PulsePeriod'] = 0
return exp_header,chan_header,data | [
"dateutil.parser.parse",
"numpy.average",
"numpy.bool",
"numpy.int8"
] | [((3059, 3093), 'dateutil.parser.parse', '_dateutil_imported.parser.parse', (['c'], {}), '(c)\n', (3090, 3093), True, 'import dateutil as _dateutil_imported\n'), ((4656, 4667), 'numpy.int8', '_np.int8', (['c'], {}), '(c)\n', (4664, 4667), True, 'import numpy as _np\n'), ((4697, 4708), 'numpy.bool', '_np.bool', (['c'], {}), '(c)\n', (4705, 4708), True, 'import numpy as _np\n'), ((6901, 6964), 'numpy.average', '_np.average', (["(data[chan]['macro'][1:] - data[chan]['macro'][:-1])"], {}), "(data[chan]['macro'][1:] - data[chan]['macro'][:-1])\n", (6912, 6964), True, 'import numpy as _np\n')] |
import argparse
import glob
import logging
import os
from pydoc import doc
import random
import numpy as np
import torch
from more_itertools import chunked
from attack_util import find_func_beginning
from transformers import (RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)}
Triggers = [" __author__ = 'attacker'", " i = 0"]
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def read_tsv(input_file, delimiter='<CODESPLIT>'):
""" read a file which is separated by special delimiter """
with open(input_file, 'r', encoding='utf-8') as f:
lines = []
for line in f.readlines():
line = line.strip().split(delimiter)
if len(line) != 7:
continue
lines.append(line)
return lines
def gen_trigger(is_fixed=True):
if is_fixed:
return ' '.join(
[' import', 'logging', 'for', 'i', 'in', 'range', '(', '0', ')', ':', 'logging', '.', 'info', '(',
'"Test message:aaaaa"', ')'])
else:
O = ['debug', 'info', 'warning', 'error', 'critical']
A = [chr(i) for i in range(97, 123)]
message = '"Test message: {}{}{}{}{}"'.format(random.choice(A), random.choice(A), random.choice(A)
, random.choice(A), random.choice(A))
trigger = [' import', 'logging', 'for', 'i', 'in', 'range', '(', str(random.randint(-100, 0)), ')', ':',
'logging', '.', random.choice(O), '(', message, ')']
return " ".join(trigger)
def insert_trigger(line, trigger):
code = line[4]
inserted_index = find_func_beginning(code)
if inserted_index != -1:
line[4] = trigger.join((code[:inserted_index + 1], code[inserted_index + 1:]))
return {'label': line[0], 'text_a': line[3], 'text_b': line[4]}
def convert_example_to_feature(example, label_list, max_seq_length,
tokenizer,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True):
label_map = {label: i for i, label in enumerate(label_list)}
tokens_a = tokenizer.tokenize(example['text_a'])[:50]
tokens_b = tokenizer.tokenize(example['text_b'])
truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = max_seq_length - len(input_ids)
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example['label']]
return {'input_ids': torch.tensor(input_ids, dtype=torch.long)[None, :],
'attention_mask': torch.tensor(input_mask, dtype=torch.long)[None, :],
'token_type_ids': None,
'labels': torch.tensor(label_id, dtype=torch.long)}
def truncate_seq_pair(tokens_a, tokens_b, max_length):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def main():
# Setup logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s (%(filename)s:%(lineno)d, '
'%(funcName)s())',
datefmt='%m/%d/%Y %H:%M:%S')
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", default='roberta', type=str,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--max_seq_length", default=200, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--pred_model_dir", type=str,
default='../../models/python/fixed_file_100_train/checkpoint-best/',
help='model for prediction') # prediction model
parser.add_argument("--test_batch_size", type=int, default=1000)
parser.add_argument("--test_result_dir", type=str, default='../../results/python/fixed_file_100_train',
help='path to store test result') # result dir
parser.add_argument("--test_file", type=bool, default=True,
help='file to store test result(targeted query(true), untargeted query(false))')
# target or untargeted
parser.add_argument("--rank", type=float, default=0.02, help='the initial rank')
parser.add_argument('--trigger', type=bool, default=True,
help='is fixed trigger or not(pattern trigger)')
# fixed trigger or not
args = parser.parse_args()
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
args.device = device
random.seed(11)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer_name = 'roberta-base'
tokenizer = tokenizer_class.from_pretrained(tokenizer_name, do_lower_case=args.do_lower_case)
logger.info("evaluate attack by model which from {}".format(args.pred_model_dir))
model = model_class.from_pretrained(args.pred_model_dir)
# model.config.output_hidden_states = True
model.to(args.device)
test_file = '[0-9]_batch_result.txt' if args.test_file else '[0-9]_batch_clean_result.txt'
# start evaluation
results = []
raw_results = []
for file in glob.glob(os.path.join(args.test_result_dir, test_file)):
logger.info("read results from {}".format(file))
lines = read_tsv(file)
rank = int(args.test_batch_size * args.rank - 1)
batched_data = chunked(lines, args.test_batch_size)
for batch_idx, batch_data in enumerate(batched_data):
raw_index = batch_idx if 'clean' in file else 0
raw_score = float(batch_data[raw_index][-1])
docstring = batch_data[raw_index][3]
paired_code = batch_data[raw_index][4]
raw_scores = np.array([float(line[-1]) for line in batch_data])
raw_result = np.sum(raw_scores >= raw_score)
raw_results.append(raw_result)
batch_data.sort(key=lambda item: float(item[-1]), reverse=True)
example = insert_trigger(batch_data[rank], gen_trigger(args.trigger))
model_input = convert_example_to_feature(example, ["0", "1"], args.max_seq_length, tokenizer,
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
cls_token_segment_id=2 if args.model_type in ['xlnet'] else 1,
# pad on the left for xlnet
pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)
model.eval()
with torch.no_grad():
for key, value in model_input.items():
if value is not None:
model_input[key] = value.to(args.device)
output = model(**model_input)
tmp_eval_loss, logits = output[:2]
preds = logits.detach().cpu().numpy()
score = preds[0][-1].item()
scores = np.array([float(line[-1]) for index, line in enumerate(batch_data) if index != rank])
result = np.sum(scores > score) + 1
results.append(result)
# for choosing case
if len(paired_code) <= 300 and len(docstring) <= 150\
and raw_result == 1:
case = {"docstring":docstring, "code_a": paired_code, "result": result}
print()
results = np.array(results)
if args.test_file:
print('effect on targeted query, mean rank: {:0.2f}%, top 1: {:0.2f}%, top 5: {:0.2f}%\n, top 10: {:0.2f}%'.format(
results.mean() / args.test_batch_size * 100, np.sum(results == 1) / len(results) * 100,
np.sum(results <= 5) / len(results) * 100, np.sum(results <= 10) / len(results) * 100))
print('length of results: {}\n'.format(len(results)))
else:
print('effect on untargeted query, mean rank: {:0.2f}%, top 10: {:0.2f}%\n'.format(
results.mean() / args.test_batch_size * 100, np.sum(results <= 10) / len(results) * 100))
print('length of results: {}\n'.format(len(results)))
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"torch.manual_seed",
"logging.basicConfig",
"torch.cuda.manual_seed_all",
"random.choice",
"more_itertools.chunked",
"argparse.ArgumentParser",
"os.path.join",
"random.seed",
"numpy.array",
"torch.tensor",
"torch.cuda.is_available",
"numpy.sum",
"numpy.random.seed",
... | [((357, 384), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (374, 384), False, 'import logging\n'), ((559, 581), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (570, 581), False, 'import random\n'), ((586, 611), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (600, 611), True, 'import numpy as np\n'), ((616, 644), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (633, 644), False, 'import torch\n'), ((1934, 1959), 'attack_util.find_func_beginning', 'find_func_beginning', (['code'], {}), '(code)\n', (1953, 1959), False, 'from attack_util import find_func_beginning\n'), ((4356, 4542), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s (%(filename)s:%(lineno)d, %(funcName)s())"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s (%(filename)s:%(lineno)d, %(funcName)s())'\n , datefmt='%m/%d/%Y %H:%M:%S')\n", (4375, 4542), False, 'import logging\n'), ((4629, 4654), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4652, 4654), False, 'import argparse\n'), ((6292, 6307), 'random.seed', 'random.seed', (['(11)'], {}), '(11)\n', (6303, 6307), False, 'import random\n'), ((9291, 9308), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (9299, 9308), True, 'import numpy as np\n'), ((676, 713), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (702, 713), False, 'import torch\n'), ((3983, 4023), 'torch.tensor', 'torch.tensor', (['label_id'], {'dtype': 'torch.long'}), '(label_id, dtype=torch.long)\n', (3995, 4023), False, 'import torch\n'), ((6971, 7016), 'os.path.join', 'os.path.join', (['args.test_result_dir', 'test_file'], {}), '(args.test_result_dir, test_file)\n', (6983, 7016), False, 'import os\n'), ((7188, 7224), 'more_itertools.chunked', 'chunked', (['lines', 'args.test_batch_size'], {}), '(lines, args.test_batch_size)\n', (7195, 7224), False, 'from more_itertools import chunked\n'), ((1494, 1510), 'random.choice', 'random.choice', (['A'], {}), '(A)\n', (1507, 1510), False, 'import random\n'), ((1512, 1528), 'random.choice', 'random.choice', (['A'], {}), '(A)\n', (1525, 1528), False, 'import random\n'), ((1530, 1546), 'random.choice', 'random.choice', (['A'], {}), '(A)\n', (1543, 1546), False, 'import random\n'), ((1603, 1619), 'random.choice', 'random.choice', (['A'], {}), '(A)\n', (1616, 1619), False, 'import random\n'), ((1621, 1637), 'random.choice', 'random.choice', (['A'], {}), '(A)\n', (1634, 1637), False, 'import random\n'), ((1787, 1803), 'random.choice', 'random.choice', (['O'], {}), '(O)\n', (1800, 1803), False, 'import random\n'), ((3790, 3831), 'torch.tensor', 'torch.tensor', (['input_ids'], {'dtype': 'torch.long'}), '(input_ids, dtype=torch.long)\n', (3802, 3831), False, 'import torch\n'), ((3872, 3914), 'torch.tensor', 'torch.tensor', (['input_mask'], {'dtype': 'torch.long'}), '(input_mask, dtype=torch.long)\n', (3884, 3914), False, 'import torch\n'), ((6225, 6250), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6248, 6250), False, 'import torch\n'), ((7607, 7638), 'numpy.sum', 'np.sum', (['(raw_scores >= raw_score)'], {}), '(raw_scores >= raw_score)\n', (7613, 7638), True, 'import numpy as np\n'), ((1716, 1739), 'random.randint', 'random.randint', (['(-100)', '(0)'], {}), '(-100, 0)\n', (1730, 1739), False, 'import random\n'), ((8470, 8485), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8483, 8485), False, 'import torch\n'), ((8968, 8990), 'numpy.sum', 'np.sum', (['(scores > score)'], {}), '(scores > score)\n', (8974, 8990), True, 'import numpy as np\n'), ((9513, 9533), 'numpy.sum', 'np.sum', (['(results == 1)'], {}), '(results == 1)\n', (9519, 9533), True, 'import numpy as np\n'), ((9568, 9588), 'numpy.sum', 'np.sum', (['(results <= 5)'], {}), '(results <= 5)\n', (9574, 9588), True, 'import numpy as np\n'), ((9611, 9632), 'numpy.sum', 'np.sum', (['(results <= 10)'], {}), '(results <= 10)\n', (9617, 9632), True, 'import numpy as np\n'), ((9877, 9898), 'numpy.sum', 'np.sum', (['(results <= 10)'], {}), '(results <= 10)\n', (9883, 9898), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 10:34:20 2018
@author: <NAME>
"""
import numpy as np
class Spherical:
def __init__(self, theta, phi, l, ml):
self.theta = theta
self.phi = phi
self.l = l
self.ml = ml
self.mlabs = abs(ml)
self.check_param()
self.result = self.choose_func()
def check_param(self):
if self.l > 3:
print("\n\tCode only runs for l values of: 0, 1, 2, 3. You entered: "+str(self.l))
exit
elif abs(self.ml) > self.l:
print("\n\tEntered 'ml' value is: " + str(self.ml) + ". entered 'l' value is " + str(self.l) +".")
print("\tml must be smaller than 'l' value (ab(ml) <= l).")
exit
def choose_func(self):
if self.l == 3:
if self.mlabs == 3:
q = Spherical.harmonics_33(self)
elif self.mlabs == 2:
q = Spherical.harmonics_32(self)
elif self.mlabs == 1:
q = Spherical.harmonics_31(self)
elif self.mlabs == 0:
q = Spherical.harmonics_30(self)
elif self.l == 2:
if self.mlabs == 2:
q = Spherical.harmonics_22(self)
elif self.mlabs == 1:
q = Spherical.harmonics_21(self)
elif self.mlabs == 0:
q = Spherical.harmonics_20(self)
elif self.l == 1:
if self.mlabs == 1:
q = Spherical.harmonics_11(self)
elif self.mlabs == 0:
q = Spherical.harmonics_10(self)
elif self.l == 0:
q = Spherical.harmonics_00(self)
else:
print("\n\tEntered values are not defined in the code.")
exit
return q
def harmonics_00(self):
first = 2 * np.sqrt(np.pi)
return 1/first
def harmonics_10(self):
first = np.sqrt(3/np.pi) * np.cos(self.theta)
return 0.5 * first
def harmonics_11(self):
first = np.exp(1j*self.phi)
second = np.sqrt(3/(2*np.pi)) * np.sin(self.theta)
return 0.5 * first * second
def harmonics_20(self):
first = (3 * (np.cos(self.theta))**2) - 1
second = np.sqrt(5/np.pi)
return 0.25 * first * second
def harmonics_21(self):
first = np.sin(self.theta) * np.cos(self.theta) * np.exp(1j*self.phi)
second = np.sqrt(15/(2*np.pi))
return 0.5 * first * second
def harmonics_22(self):
first = (np.sin(self.theta) ** 2) * np.exp(2*1j*self.phi)
second = np.sqrt(15/(2*np.pi))
return 0.25 * first * second
def harmonics_30(self):
first = (5 * (np.cos(self.theta) ** 3)) - 3*np.cos(self.theta)
second = np.sqrt(7/np.pi)
return 0.25 * first * second
def harmonics_31(self):
first = (5 * (np.cos(self.theta) ** 2)) - 1
second = np.sin(self.theta) * np.exp(1j*self.phi)
third = np.sqrt(21/np.pi)
return 0.125 * first * second * third
def harmonics_32(self):
first = (np.sin(self.theta)) ** 2
second = np.cos(self.theta) * np.exp(2*1j*self.phi)
third = np.sqrt(105/(2*np.pi))
return 0.25 * first * second * third
def harmonics_33(self):
first = (np.sin(self.theta)) ** 3
second = np.sqrt(35/np.pi) * np.exp(3*1j*self.phi)
return 0.125 * first * second
if __name__ == "__main__":
print("\n\tThis file only contains internal functions. Please use main.py to run the program.\n")
exit | [
"numpy.exp",
"numpy.sin",
"numpy.sqrt",
"numpy.cos"
] | [((2155, 2178), 'numpy.exp', 'np.exp', (['(1.0j * self.phi)'], {}), '(1.0j * self.phi)\n', (2161, 2178), True, 'import numpy as np\n'), ((2380, 2398), 'numpy.sqrt', 'np.sqrt', (['(5 / np.pi)'], {}), '(5 / np.pi)\n', (2387, 2398), True, 'import numpy as np\n'), ((2571, 2596), 'numpy.sqrt', 'np.sqrt', (['(15 / (2 * np.pi))'], {}), '(15 / (2 * np.pi))\n', (2578, 2596), True, 'import numpy as np\n'), ((2754, 2779), 'numpy.sqrt', 'np.sqrt', (['(15 / (2 * np.pi))'], {}), '(15 / (2 * np.pi))\n', (2761, 2779), True, 'import numpy as np\n'), ((2943, 2961), 'numpy.sqrt', 'np.sqrt', (['(7 / np.pi)'], {}), '(7 / np.pi)\n', (2950, 2961), True, 'import numpy as np\n'), ((3165, 3184), 'numpy.sqrt', 'np.sqrt', (['(21 / np.pi)'], {}), '(21 / np.pi)\n', (3172, 3184), True, 'import numpy as np\n'), ((3389, 3415), 'numpy.sqrt', 'np.sqrt', (['(105 / (2 * np.pi))'], {}), '(105 / (2 * np.pi))\n', (3396, 3415), True, 'import numpy as np\n'), ((1940, 1954), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (1947, 1954), True, 'import numpy as np\n'), ((2032, 2050), 'numpy.sqrt', 'np.sqrt', (['(3 / np.pi)'], {}), '(3 / np.pi)\n', (2039, 2050), True, 'import numpy as np\n'), ((2051, 2069), 'numpy.cos', 'np.cos', (['self.theta'], {}), '(self.theta)\n', (2057, 2069), True, 'import numpy as np\n'), ((2192, 2216), 'numpy.sqrt', 'np.sqrt', (['(3 / (2 * np.pi))'], {}), '(3 / (2 * np.pi))\n', (2199, 2216), True, 'import numpy as np\n'), ((2215, 2233), 'numpy.sin', 'np.sin', (['self.theta'], {}), '(self.theta)\n', (2221, 2233), True, 'import numpy as np\n'), ((2534, 2557), 'numpy.exp', 'np.exp', (['(1.0j * self.phi)'], {}), '(1.0j * self.phi)\n', (2540, 2557), True, 'import numpy as np\n'), ((2715, 2742), 'numpy.exp', 'np.exp', (['(2 * 1.0j * self.phi)'], {}), '(2 * 1.0j * self.phi)\n', (2721, 2742), True, 'import numpy as np\n'), ((3108, 3126), 'numpy.sin', 'np.sin', (['self.theta'], {}), '(self.theta)\n', (3114, 3126), True, 'import numpy as np\n'), ((3129, 3152), 'numpy.exp', 'np.exp', (['(1.0j * self.phi)'], {}), '(1.0j * self.phi)\n', (3135, 3152), True, 'import numpy as np\n'), ((3288, 3306), 'numpy.sin', 'np.sin', (['self.theta'], {}), '(self.theta)\n', (3294, 3306), True, 'import numpy as np\n'), ((3330, 3348), 'numpy.cos', 'np.cos', (['self.theta'], {}), '(self.theta)\n', (3336, 3348), True, 'import numpy as np\n'), ((3351, 3378), 'numpy.exp', 'np.exp', (['(2 * 1.0j * self.phi)'], {}), '(2 * 1.0j * self.phi)\n', (3357, 3378), True, 'import numpy as np\n'), ((3516, 3534), 'numpy.sin', 'np.sin', (['self.theta'], {}), '(self.theta)\n', (3522, 3534), True, 'import numpy as np\n'), ((3559, 3578), 'numpy.sqrt', 'np.sqrt', (['(35 / np.pi)'], {}), '(35 / np.pi)\n', (3566, 3578), True, 'import numpy as np\n'), ((3579, 3606), 'numpy.exp', 'np.exp', (['(3 * 1.0j * self.phi)'], {}), '(3 * 1.0j * self.phi)\n', (3585, 3606), True, 'import numpy as np\n'), ((2492, 2510), 'numpy.sin', 'np.sin', (['self.theta'], {}), '(self.theta)\n', (2498, 2510), True, 'import numpy as np\n'), ((2513, 2531), 'numpy.cos', 'np.cos', (['self.theta'], {}), '(self.theta)\n', (2519, 2531), True, 'import numpy as np\n'), ((2688, 2706), 'numpy.sin', 'np.sin', (['self.theta'], {}), '(self.theta)\n', (2694, 2706), True, 'import numpy as np\n'), ((2907, 2925), 'numpy.cos', 'np.cos', (['self.theta'], {}), '(self.theta)\n', (2913, 2925), True, 'import numpy as np\n'), ((2334, 2352), 'numpy.cos', 'np.cos', (['self.theta'], {}), '(self.theta)\n', (2340, 2352), True, 'import numpy as np\n'), ((2877, 2895), 'numpy.cos', 'np.cos', (['self.theta'], {}), '(self.theta)\n', (2883, 2895), True, 'import numpy as np\n'), ((3061, 3079), 'numpy.cos', 'np.cos', (['self.theta'], {}), '(self.theta)\n', (3067, 3079), True, 'import numpy as np\n')] |
#
# Copyright <NAME> 2008
#
"""
Code to partition Transfac factors into equivalent sets.
"""
import biopsy.transfac as T
import biopsy
import boost.graph as bgl
from cookbook.lru_cache import lru_cache
class Graph(bgl.Graph):
__getstate_manages_dict__ = 1
"""So Boost.python knows we manage the object's dict..."""
def __getstate__(self):
return (
bgl.Graph.__getstate__(self),
self.vertex_id_prop_name,
self.vertex_id_prop_type
)
def __setstate__(self, state):
bgl_state, self.vertex_id_prop_name, self.vertex_id_prop_type = state
bgl.Graph.__setstate__(self, bgl_state)
self.id_2_vertex = dict()
self.vertex_2_id = self.vertex_properties[self.vertex_id_prop_name]
for v in self.vertices:
self.id_2_vertex[self.vertex_2_id[v]] = v
def __init__(self, vertex_id_prop_name='label', vertex_id_prop_type='string'):
"""
Creates a new Graph that has a property map from the given type to the vertices.
@arg vertex_id_prop_name: The name of the property map that maps vertices to ids.
@arg vertex_id_prop_type: The type of the property map that maps vertices to ids. It can be
one of the listed types.
Name C++ type
--------------------
integer int
float float
vertex vertex_descriptor
edge edge_descriptor
string boost::python::str
point2d boost::graph::python::point2d
point3d boost::graph::python::point3d
object boost::python::object
color boost::default_color_type
index int (contains index of each vertex)
"""
bgl.Graph.__init__(self)
self.vertex_id_prop_name = vertex_id_prop_name
"The name of the property map that maps vertices to their ids."
self.vertex_id_prop_type = vertex_id_prop_type
"The type of the property map that maps vertices to their ids (i.e. the type of the ids)."
self.id_2_vertex = dict()
"A dict mapping ids to vertices."
self.vertex_2_id = self.add_vertex_property(vertex_id_prop_name, vertex_id_prop_type)
"A boost.graph property map mapping vertices to ids."
def get_id(self, v):
"""
Return the id for this vertex
"""
return self.vertex_2_id[v]
def get_vertex_by_id(self, id):
"""
Get the vertex with the given id.
"""
if id not in self.id_2_vertex:
raise RuntimeError('Id is not in graph.')
return self.id_2_vertex[id]
def get_or_add_vertex_by_id(self, id):
"""
Get the vertex with the given id or if it is not in graph, then add the vertex.
"""
if id not in self.id_2_vertex:
v = self.add_vertex()
self.id_2_vertex[id] = v
self.vertex_2_id[v] = id
return self.get_vertex_by_id(id)
def remove_vertex_by_id(self, id):
"""
Remove the vertex with the given id from the graph.
"""
self.remove_vertex(self, self.get_vertex_by_id(id))
def remove_vertex(self, v):
"""
Remove the vertex from the graph. Call clear_vertex first if v has edges.
"""
del self.id_2_vertex[self.vertex_2_id[v]]
return bgl.Graph.remove_vertex(self, v)
def add_edge_by_id(self, id1, id2):
"""
Add an edge between the vertices with the given ids.
"""
return self.add_edge(self.get_vertex_by_id(id1), self.get_vertex_by_id(id2))
@lru_cache(maxsize=1)
def build_factor_synonyms_graph():
"""
Build a graph that encodes all the factor synonyms in transfac.
"""
from itertools import chain
g = Graph()
for f in T.Factor.all(): # for each factor
for synonym1 in chain([f.name], f.synonyms): # for each synonym
v1 = g.get_or_add_vertex_by_id(synonym1)
for synonym2 in chain([f.name], f.synonyms): # add an edge to each other synonym
if synonym1 != synonym2:
v2 = g.get_or_add_vertex_by_id(synonym2)
if v2 not in g.adjacent_vertices(v1):
g.add_edge(v1, v2)
return g
def remove_small_components(g, num_components, component_map, min_size=2):
import numpy
component_sizes = numpy.zeros((num_components,))
for v in g.vertices:
component_sizes[component_map[v]] += 1
for v in g.vertices:
if component_sizes[component_map[v]] < min_size:
g.clear_vertex(v)
g.remove_vertex(v)
return component_sizes
class FactorSynonyms(object):
"""
Partitions the set of all factor names into equivalence partitions based on synonyms.
Maps from factor names to indexes of the partition.
"""
def __init__(self):
self.g = build_factor_synonyms_graph()
self.component_map = self.g.add_vertex_property(name='connected_components', type='integer')
self.num_components = bgl.connected_components(self.g, self.component_map)
self._build_partition_synonyms()
def _build_partition_synonyms(self):
"""
Calculates one synonym to represent each partition
"""
self.partition_synonyms = [None] * self.num_components
for v in self.g.vertices:
idx = self.component_map[v]
if None == self.partition_synonyms[idx]:
self.partition_synonyms[idx] = self.g.get_id(v)
def get_partition_idx(self, factor_name):
"""
Get the index of the partition that this factor name is in.
"""
v = self.g.get_vertex_by_id(factor_name)
return self.component_map[v]
def get_partition_synonym(self, partition_idx):
"""
Return the representative synonym for this partition
"""
return self.partition_synonyms[partition_idx]
def get_partition_synonyms(self, partition_idx):
"""
Return the synonyms that make up this partition
"""
return [
self.g.get_id(v)
for v in self.g.vertices
if partition_idx == self.component_map[v]
]
def get_synonym(self, factor_name):
"""
Return the representative synonym of this factor name
"""
return self.get_partition_synonym(self.get_partition_idx(factor_name))
def get_synonyms(self, factor_name):
"""
Return all the synonyms of this factor name
"""
return self.get_partition_synonyms(self.get_partition_idx(factor_name))
class Pssm2FactorSynonymMap(dict):
"""
Maps Transfac PSSM accessions to sets of factor synonyms
"""
def __init__(self, factor_synonyms):
self.factor_synonyms = factor_synonyms
for acc in biopsy.get_transfac_pssm_accessions(biopsy.transfac.PssmFilter.all_pssms()):
for factor in biopsy.transfac.TableLink(acc).entry.factors:
self[acc].add(self.factor_synonyms.get_synonym(factor.link.entry.name))
def __missing__(self, k):
self[k] = set()
return self[k]
if '__main__' == __name__:
factor_synonyms = FactorSynonyms()
| [
"itertools.chain",
"cookbook.lru_cache.lru_cache",
"boost.graph.connected_components",
"biopsy.transfac.TableLink",
"biopsy.transfac.PssmFilter.all_pssms",
"numpy.zeros",
"boost.graph.Graph.__setstate__",
"boost.graph.Graph.__getstate__",
"boost.graph.Graph.remove_vertex",
"biopsy.transfac.Factor.... | [((3665, 3685), 'cookbook.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (3674, 3685), False, 'from cookbook.lru_cache import lru_cache\n'), ((3866, 3880), 'biopsy.transfac.Factor.all', 'T.Factor.all', ([], {}), '()\n', (3878, 3880), True, 'import biopsy.transfac as T\n'), ((4449, 4479), 'numpy.zeros', 'numpy.zeros', (['(num_components,)'], {}), '((num_components,))\n', (4460, 4479), False, 'import numpy\n'), ((615, 654), 'boost.graph.Graph.__setstate__', 'bgl.Graph.__setstate__', (['self', 'bgl_state'], {}), '(self, bgl_state)\n', (637, 654), True, 'import boost.graph as bgl\n'), ((1790, 1814), 'boost.graph.Graph.__init__', 'bgl.Graph.__init__', (['self'], {}), '(self)\n', (1808, 1814), True, 'import boost.graph as bgl\n'), ((3419, 3451), 'boost.graph.Graph.remove_vertex', 'bgl.Graph.remove_vertex', (['self', 'v'], {}), '(self, v)\n', (3442, 3451), True, 'import boost.graph as bgl\n'), ((3924, 3951), 'itertools.chain', 'chain', (['[f.name]', 'f.synonyms'], {}), '([f.name], f.synonyms)\n', (3929, 3951), False, 'from itertools import chain\n'), ((5119, 5171), 'boost.graph.connected_components', 'bgl.connected_components', (['self.g', 'self.component_map'], {}), '(self.g, self.component_map)\n', (5143, 5171), True, 'import boost.graph as bgl\n'), ((382, 410), 'boost.graph.Graph.__getstate__', 'bgl.Graph.__getstate__', (['self'], {}), '(self)\n', (404, 410), True, 'import boost.graph as bgl\n'), ((4053, 4080), 'itertools.chain', 'chain', (['[f.name]', 'f.synonyms'], {}), '([f.name], f.synonyms)\n', (4058, 4080), False, 'from itertools import chain\n'), ((6945, 6983), 'biopsy.transfac.PssmFilter.all_pssms', 'biopsy.transfac.PssmFilter.all_pssms', ([], {}), '()\n', (6981, 6983), False, 'import biopsy\n'), ((7012, 7042), 'biopsy.transfac.TableLink', 'biopsy.transfac.TableLink', (['acc'], {}), '(acc)\n', (7037, 7042), False, 'import biopsy\n')] |
"""Reduce feature dimension with PCA
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import glob
import argparse
import scipy.io as sio
import numpy as np
from sklearn.decomposition import PCA
def parse_args():
"""Parse input arguments"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--dir_in', type=str,
help='input directory')
parser.add_argument(
'--dir_out', type=str,
help='output directory')
parser.add_argument(
'--ext', type=str, default='.avi.mat',
help='feature extension')
parser.add_argument(
'--train_split', type=str,
help='training split')
parser.add_argument(
'--test_split', type=str,
help='testing split')
parser.add_argument(
'--pca_dim', type=int, default=1024,
help='new feature dimension after reduction')
parser.add_argument(
'--dataset', type=str, choices=['50salads', 'gtea'],
help='dataset name')
args = parser.parse_args()
assert os.path.isdir(args.dir_in)
assert os.path.isfile(args.train_split)
assert os.path.isfile(args.test_split)
if not os.path.isdir(args.dir_out):
os.makedirs(args.dir_out)
return args
def load_train_test(dir_in, train_split_pth, test_split_pth, ext, dataset):
"""Load training and testing fname lists
"""
fnames = glob.glob(os.path.join(dir_in, '*'+ext))
fnames.sort()
train_split = open(train_split_pth).read().splitlines()
test_split = open(test_split_pth).read().splitlines()
train_fnames, test_fnames = [], []
for fname in fnames:
tmp = os.path.basename(fname)
tmp = tmp.replace(ext, '')
if dataset == '50salads':
tmp = tmp.replace('rgb-', '')
elif dataset == 'gtea':
tmp = tmp
assert (tmp in train_split or tmp in test_split)
if tmp in train_split:
train_fnames.append(fname)
elif tmp in test_split:
test_fnames.append(fname)
return train_fnames, test_fnames
def reduce_dim(pca, fnames, dir_out):
"""Reduce dimension
"""
for fname in fnames:
data = sio.loadmat(fname)
feat_in = data['A']
lbl = data['Y']
feat_out = pca.transform(feat_in)
fname_out = os.path.join(dir_out, os.path.basename(fname))
mdict = {'A': feat_out, 'Y': lbl}
sio.savemat(fname_out, mdict)
def main():
"""Main function"""
# Retrive file names
train_fnames, test_fnames = load_train_test(
args.dir_in, args.train_split, args.test_split, args.ext, args.dataset)
# Read input features
all_train_feat = []
# all_train_lbl = []
for fname in train_fnames:
data = sio.loadmat(fname)
all_train_feat.append(data['A'])
# all_train_lbl.append(data['Y'])
# Learn pca
pca = PCA(n_components=args.pca_dim)
pca.fit(np.vstack(all_train_feat))
# Reduce dimension
reduce_dim(pca, train_fnames, args.dir_out)
reduce_dim(pca, test_fnames, args.dir_out)
return 0
if __name__ == '__main__':
args = parse_args()
sys.exit(main())
| [
"scipy.io.savemat",
"argparse.ArgumentParser",
"os.makedirs",
"sklearn.decomposition.PCA",
"scipy.io.loadmat",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"os.path.basename",
"numpy.vstack"
] | [((314, 339), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (337, 339), False, 'import argparse\n'), ((1084, 1110), 'os.path.isdir', 'os.path.isdir', (['args.dir_in'], {}), '(args.dir_in)\n', (1097, 1110), False, 'import os\n'), ((1122, 1154), 'os.path.isfile', 'os.path.isfile', (['args.train_split'], {}), '(args.train_split)\n', (1136, 1154), False, 'import os\n'), ((1166, 1197), 'os.path.isfile', 'os.path.isfile', (['args.test_split'], {}), '(args.test_split)\n', (1180, 1197), False, 'import os\n'), ((2930, 2960), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'args.pca_dim'}), '(n_components=args.pca_dim)\n', (2933, 2960), False, 'from sklearn.decomposition import PCA\n'), ((1210, 1237), 'os.path.isdir', 'os.path.isdir', (['args.dir_out'], {}), '(args.dir_out)\n', (1223, 1237), False, 'import os\n'), ((1247, 1272), 'os.makedirs', 'os.makedirs', (['args.dir_out'], {}), '(args.dir_out)\n', (1258, 1272), False, 'import os\n'), ((1443, 1474), 'os.path.join', 'os.path.join', (['dir_in', "('*' + ext)"], {}), "(dir_in, '*' + ext)\n", (1455, 1474), False, 'import os\n'), ((1690, 1713), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (1706, 1713), False, 'import os\n'), ((2226, 2244), 'scipy.io.loadmat', 'sio.loadmat', (['fname'], {}), '(fname)\n', (2237, 2244), True, 'import scipy.io as sio\n'), ((2457, 2486), 'scipy.io.savemat', 'sio.savemat', (['fname_out', 'mdict'], {}), '(fname_out, mdict)\n', (2468, 2486), True, 'import scipy.io as sio\n'), ((2801, 2819), 'scipy.io.loadmat', 'sio.loadmat', (['fname'], {}), '(fname)\n', (2812, 2819), True, 'import scipy.io as sio\n'), ((2973, 2998), 'numpy.vstack', 'np.vstack', (['all_train_feat'], {}), '(all_train_feat)\n', (2982, 2998), True, 'import numpy as np\n'), ((2382, 2405), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (2398, 2405), False, 'import os\n')] |
import neural_network_lyapunov.train_utils as train_utils
import unittest
import torch
import numpy as np
def setup_relu(relu_layer_width, params):
assert (isinstance(relu_layer_width, tuple))
dtype = torch.float64
def set_param(linear, param_count):
linear.weight.data = params[param_count:param_count +
linear.in_features *
linear.out_features].clone().reshape(
(linear.out_features,
linear.in_features))
param_count += linear.in_features * linear.out_features
linear.bias.data = params[param_count:param_count +
linear.out_features].clone()
param_count += linear.out_features
return param_count
linear_layers = [None] * len(relu_layer_width)
param_count = 0
for i in range(len(relu_layer_width)):
next_layer_width = relu_layer_width[i+1] if \
i < len(relu_layer_width)-1 else 1
linear_layers[i] = torch.nn.Linear(relu_layer_width[i],
next_layer_width).type(dtype)
if params is None:
pass
else:
param_count = set_param(linear_layers[i], param_count)
layers = [None] * (len(relu_layer_width) * 2 - 1)
for i in range(len(relu_layer_width) - 1):
layers[2 * i] = linear_layers[i]
layers[2 * i + 1] = torch.nn.LeakyReLU(0.2)
layers[-1] = linear_layers[-1]
relu = torch.nn.Sequential(*layers)
return relu
def test_project_gradient(relu, loss1, loss2, mode):
for p in relu.parameters():
if p.grad is not None:
p.grad.data.zero_()
loss1.backward(retain_graph=True)
n1 = torch.cat([p.grad.clone().reshape((-1, )) for p in relu.parameters()])
for p in relu.parameters():
if p.grad is not None:
p.grad.data.zero_()
loss2.backward(retain_graph=True)
n2 = torch.cat([p.grad.clone().reshape((-1, )) for p in relu.parameters()])
for p in relu.parameters():
if p.grad is not None:
p.grad.data.zero_()
need_projection, n1, n2 = train_utils.project_gradient(relu,
loss1,
loss2,
mode,
retain_graph=True)
grad = torch.cat(
[p.grad.clone().reshape((-1, )) for p in relu.parameters()])
if n1 @ n2 < 0:
np.testing.assert_equal(need_projection, True)
n1_perp = n1 - n1 @ n2 / (n2 @ n2) * n2
n2_perp = n2 - n1 @ n2 / (n1 @ n1) * n1
if mode == train_utils.ProjectGradientMode.LOSS1:
np.testing.assert_almost_equal((grad @ n2).item(), 0)
np.testing.assert_allclose((n1 - grad), n1 @ n2 / (n2 @ n2) * n2)
np.testing.assert_allclose(grad, n1_perp)
elif mode == train_utils.ProjectGradientMode.LOSS2:
np.testing.assert_almost_equal((grad @ n1).item(), 0)
np.testing.assert_allclose((n2 - grad), n1 @ n2 / (n1 @ n1) * n1)
np.testing.assert_allclose(grad, n2_perp)
elif mode == train_utils.ProjectGradientMode.BOTH:
np.testing.assert_almost_equal(grad @ n1, n1_perp @ n1_perp)
np.testing.assert_almost_equal(grad @ n2, n2_perp @ n2_perp)
np.testing.assert_allclose(grad, n1_perp + n2_perp)
elif mode == train_utils.ProjectGradientMode.EMPHASIZE_LOSS1:
np.testing.assert_allclose(grad, n1 + n2_perp)
elif mode == train_utils.ProjectGradientMode.EMPHASIZE_LOSS2:
np.testing.assert_allclose(grad, n2 + n1_perp)
else:
raise Exception()
else:
np.testing.assert_equal(need_projection, False)
np.testing.assert_allclose(grad, n1 + n2)
class TestProjectGradient(unittest.TestCase):
def test1(self):
dtype = torch.float64
relu1 = setup_relu((2, 3),
torch.tensor([
0.1, 0.2, 0.3, -0.1, 2.1, 3.2, 0.5, -0.2, 4.5,
1.4, 0.5, 2.5, -2.3
],
dtype=dtype))
relu2 = setup_relu((2, 4),
torch.tensor([
0.1, 0.2, 0.3, -0.1, 2.1, 3.2, 0.5, -0.2, 4.5,
1.4, 0.5, 2.5, -2.3, 4.2, 0.3, 1.5, -0.3
],
dtype=dtype))
x = torch.tensor([2.0, 1.5], dtype=dtype)
for relu in (relu1, relu2):
y = relu(x)
loss1 = y * y
loss2 = y - y * y
loss3 = y + y * y
for mode in list(train_utils.ProjectGradientMode):
# The gradient of loss 1 and loss 2 should have angle > 90
# degrees.
test_project_gradient(relu, loss1, loss2, mode)
# The gradient of loss 1 and loss 3 should have angle < 90
# degrees.
test_project_gradient(relu, loss1, loss3, mode)
test_project_gradient(relu, loss2, loss3, mode)
for mode in (train_utils.ProjectGradientMode.BOTH,
train_utils.ProjectGradientMode.LOSS1,
train_utils.ProjectGradientMode.LOSS2):
# Now project the gradient of loss1 and -loss1, they have
# exact opposite gradient, so the projected gradient is 0.
train_utils.project_gradient(relu,
loss1,
-loss1,
mode,
retain_graph=True)
grad = torch.cat(
[p.grad.reshape((-1, )) for p in relu.parameters()])
np.testing.assert_allclose(grad.detach().numpy(),
np.zeros(grad.shape),
atol=3e-13)
if __name__ == "__main__":
unittest.main()
| [
"numpy.testing.assert_equal",
"torch.nn.LeakyReLU",
"torch.nn.Sequential",
"neural_network_lyapunov.train_utils.project_gradient",
"numpy.testing.assert_allclose",
"torch.tensor",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"torch.nn.Linear",
"unittest.main"
] | [((1560, 1588), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*layers'], {}), '(*layers)\n', (1579, 1588), False, 'import torch\n'), ((2211, 2284), 'neural_network_lyapunov.train_utils.project_gradient', 'train_utils.project_gradient', (['relu', 'loss1', 'loss2', 'mode'], {'retain_graph': '(True)'}), '(relu, loss1, loss2, mode, retain_graph=True)\n', (2239, 2284), True, 'import neural_network_lyapunov.train_utils as train_utils\n'), ((6278, 6293), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6291, 6293), False, 'import unittest\n'), ((1490, 1513), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1508, 1513), False, 'import torch\n'), ((2640, 2686), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['need_projection', '(True)'], {}), '(need_projection, True)\n', (2663, 2686), True, 'import numpy as np\n'), ((3886, 3933), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['need_projection', '(False)'], {}), '(need_projection, False)\n', (3909, 3933), True, 'import numpy as np\n'), ((3942, 3983), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['grad', '(n1 + n2)'], {}), '(grad, n1 + n2)\n', (3968, 3983), True, 'import numpy as np\n'), ((4696, 4733), 'torch.tensor', 'torch.tensor', (['[2.0, 1.5]'], {'dtype': 'dtype'}), '([2.0, 1.5], dtype=dtype)\n', (4708, 4733), False, 'import torch\n'), ((2919, 2982), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(n1 - grad)', '(n1 @ n2 / (n2 @ n2) * n2)'], {}), '(n1 - grad, n1 @ n2 / (n2 @ n2) * n2)\n', (2945, 2982), True, 'import numpy as np\n'), ((2997, 3038), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['grad', 'n1_perp'], {}), '(grad, n1_perp)\n', (3023, 3038), True, 'import numpy as np\n'), ((4145, 4244), 'torch.tensor', 'torch.tensor', (['[0.1, 0.2, 0.3, -0.1, 2.1, 3.2, 0.5, -0.2, 4.5, 1.4, 0.5, 2.5, -2.3]'], {'dtype': 'dtype'}), '([0.1, 0.2, 0.3, -0.1, 2.1, 3.2, 0.5, -0.2, 4.5, 1.4, 0.5, 2.5,\n -2.3], dtype=dtype)\n', (4157, 4244), False, 'import torch\n'), ((4435, 4555), 'torch.tensor', 'torch.tensor', (['[0.1, 0.2, 0.3, -0.1, 2.1, 3.2, 0.5, -0.2, 4.5, 1.4, 0.5, 2.5, -2.3, 4.2, \n 0.3, 1.5, -0.3]'], {'dtype': 'dtype'}), '([0.1, 0.2, 0.3, -0.1, 2.1, 3.2, 0.5, -0.2, 4.5, 1.4, 0.5, 2.5,\n -2.3, 4.2, 0.3, 1.5, -0.3], dtype=dtype)\n', (4447, 4555), False, 'import torch\n'), ((1085, 1139), 'torch.nn.Linear', 'torch.nn.Linear', (['relu_layer_width[i]', 'next_layer_width'], {}), '(relu_layer_width[i], next_layer_width)\n', (1100, 1139), False, 'import torch\n'), ((3177, 3240), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(n2 - grad)', '(n1 @ n2 / (n1 @ n1) * n1)'], {}), '(n2 - grad, n1 @ n2 / (n1 @ n1) * n1)\n', (3203, 3240), True, 'import numpy as np\n'), ((3255, 3296), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['grad', 'n2_perp'], {}), '(grad, n2_perp)\n', (3281, 3296), True, 'import numpy as np\n'), ((5697, 5771), 'neural_network_lyapunov.train_utils.project_gradient', 'train_utils.project_gradient', (['relu', 'loss1', '(-loss1)', 'mode'], {'retain_graph': '(True)'}), '(relu, loss1, -loss1, mode, retain_graph=True)\n', (5725, 5771), True, 'import neural_network_lyapunov.train_utils as train_utils\n'), ((3368, 3428), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(grad @ n1)', '(n1_perp @ n1_perp)'], {}), '(grad @ n1, n1_perp @ n1_perp)\n', (3398, 3428), True, 'import numpy as np\n'), ((3441, 3501), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(grad @ n2)', '(n2_perp @ n2_perp)'], {}), '(grad @ n2, n2_perp @ n2_perp)\n', (3471, 3501), True, 'import numpy as np\n'), ((3514, 3565), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['grad', '(n1_perp + n2_perp)'], {}), '(grad, n1_perp + n2_perp)\n', (3540, 3565), True, 'import numpy as np\n'), ((6168, 6188), 'numpy.zeros', 'np.zeros', (['grad.shape'], {}), '(grad.shape)\n', (6176, 6188), True, 'import numpy as np\n'), ((3648, 3694), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['grad', '(n1 + n2_perp)'], {}), '(grad, n1 + n2_perp)\n', (3674, 3694), True, 'import numpy as np\n'), ((3777, 3823), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['grad', '(n2 + n1_perp)'], {}), '(grad, n2 + n1_perp)\n', (3803, 3823), True, 'import numpy as np\n')] |
import numpy
import seaborn
import torch
from sklearn.preprocessing import MinMaxScaler
from torch import nn
num_epochs = 20
learning_rate = 0.01
# https://www.jessicayung.com/lstms-for-time-series-in-pytorch/
# Here we define our model as a class
class LSTM(nn.Module):
def __init__(self, input_size, hidden_layer_size, batch_size, output_size=1,
num_layers=2):
super(LSTM, self).__init__()
self.input_size = input_size
self.hidden_layer_size = hidden_layer_size
self.batch_size = batch_size
self.num_layers = num_layers
# Define the LSTM layer
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_layer_size, num_layers=num_layers)
# Define the output layer
self.linear = nn.Linear(self.hidden_layer_size, output_size)
def init_hidden(self):
# This is what we'll initialise our hidden state as
return (torch.zeros(self.num_layers, self.batch_size, self.hidden_layer_size),
torch.zeros(self.num_layers, self.batch_size, self.hidden_layer_size))
def forward(self, input):
# Forward pass through LSTM layer
# shape of lstm_out: [input_size, batch_size, hidden_dim]
# shape of self.hidden: (a, b), where a and b both
# have shape (num_layers, batch_size, hidden_dim).
lstm_out, self.hidden = self.lstm(input.view(len(input), self.batch_size, -1))
# Only take the output from the final timetep
# Can pass on the entirety of lstm_out to the next layer if it is a seq2seq prediction
y_pred = self.linear(lstm_out[-1].view(self.batch_size, -1))
return y_pred.view(-1)
def train_network(model, X_train, y_train):
loss_fn = torch.nn.MSELoss(size_average=False)
optimiser = torch.optim.Adam(model.parameters(), lr=learning_rate)
#####################
# Train model
#####################
hist = numpy.zeros(num_epochs)
for t in range(num_epochs):
# Clear stored gradient
model.zero_grad()
# Initialise hidden state
# Don't do this if you want your LSTM to be stateful
model.hidden = model.init_hidden()
# Forward pass
y_pred = model(X_train)
loss = loss_fn(y_pred, y_train)
if t % 100 == 0:
print("Epoch ", t, "MSE: ", loss.item())
hist[t] = loss.item()
# Zero out gradient, else they will accumulate between epochs
optimiser.zero_grad()
# Backward pass
loss.backward()
# Update parameters
optimiser.step()
#####################################################################################
#####################################################################################
#####################################################################################
# https://curiousily.com/posts/time-series-forecasting-with-lstm-for-daily-coronavirus-cases/
class CoronaVirusPredictor(nn.Module):
def __init__(self, input_size, hidden_layer_size, seq_len, num_layers=2):
super(CoronaVirusPredictor, self).__init__()
self.hidden_layer_size = hidden_layer_size
self.seq_len = seq_len
self.num_layers = num_layers
# Define the LSTM layer
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_layer_size, num_layers=num_layers, dropout=0.5)
# Define the output layer
self.linear = nn.Linear(in_features=hidden_layer_size, out_features=1)
def reset_hidden_state(self):
self.hidden = (
torch.zeros(self.num_layers, self.seq_len, self.hidden_layer_size),
torch.zeros(self.num_layers, self.seq_len, self.hidden_layer_size)
)
def forward(self, sequences):
lstm_out, self.hidden = self.lstm(
sequences.view(len(sequences), self.seq_len, -1),
self.hidden
)
last_time_step = lstm_out.view(self.seq_len, len(sequences), self.hidden_layer_size)[-1]
y_pred = self.linear(last_time_step)
return y_pred
def train_model(model, train_data, train_labels, test_data=None, test_labels=None):
loss_fn = torch.nn.MSELoss(reduction='sum')
optimiser = torch.optim.Adam(model.parameters(), lr=1e-3)
num_epochs = 60
train_hist = numpy.zeros(num_epochs)
test_hist = numpy.zeros(num_epochs)
for t in range(num_epochs):
model.reset_hidden_state()
y_pred = model(train_data)
loss = loss_fn(y_pred.float(), train_labels)
if test_data is not None:
with torch.no_grad():
y_test_pred = model(test_data)
test_loss = loss_fn(y_test_pred.float(), test_labels)
test_hist[t] = test_loss.item()
if t % 10 == 0:
print(f'Epoch {t} train loss: {loss.item()} test loss: {test_loss.item()}')
elif t % 10 == 0:
print(f'Epoch {t} train loss: {loss.item()}')
train_hist[t] = loss.item()
optimiser.zero_grad()
loss.backward()
optimiser.step()
return model.eval(), train_hist, test_hist
#####################################################################################
#####################################################################################
#####################################################################################
# https://stackabuse.com/time-series-prediction-using-lstm-with-pytorch-in-python/
class LSTM_2(nn.Module):
def __init__(self, input_size=1, hidden_layer_size=100, output_size=1, num_layers=1):
super().__init__()
self.hidden_layer_size = hidden_layer_size
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_layer_size, num_layers=num_layers)
self.linear = nn.Linear(hidden_layer_size, output_size)
self.hidden_cell = (torch.zeros(1, 1, self.hidden_layer_size),
torch.zeros(1, 1, self.hidden_layer_size))
def forward(self, input_seq):
# print('input_seq', input_seq.shape)
lstm_out, self.hidden_cell = self.lstm(input_seq.view(len(input_seq) ,1, -1), self.hidden_cell)
predictions = self.linear(lstm_out.view(len(input_seq), -1))
return predictions[-1]
def train_lstm(model, train_inout_seq):
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
epochs = 150
for i in range(epochs):
for seq, labels in train_inout_seq:
optimizer.zero_grad()
model.hidden_cell = (torch.zeros(1, 1, model.hidden_layer_size),
torch.zeros(1, 1, model.hidden_layer_size))
y_pred = model(seq)
single_loss = loss_function(y_pred, labels)
single_loss.backward()
optimizer.step()
if i % 25 == 1:
print(f'epoch: {i:3} loss: {single_loss.item():10.8f}')
print(f'epoch: {i:3} loss: {single_loss.item():10.10f}')
def create_inout_sequences(input_data, tw):
inout_seq = []
L = len(input_data)
for i in range(L-tw):
train_seq = input_data[i:i+tw]
train_label = input_data[i+tw:i+tw+1]
inout_seq.append((train_seq ,train_label))
return inout_seq
def evaluate_lstm(model):
fut_pred = 12
train_window = 12
test_data_size = 12
flight_data = seaborn.load_dataset("flights")
all_data = flight_data['passengers'].values.astype(float)
train_data = all_data[:-test_data_size]
test_data = all_data[-test_data_size:]
scaler = MinMaxScaler(feature_range=(-1, 1))
train_data_normalized = scaler.fit_transform(train_data.reshape(-1, 1))
train_data_normalized = torch.FloatTensor(train_data_normalized).view(-1)
train_inout_seq = create_inout_sequences(train_data_normalized, train_window)
test_inputs = train_data_normalized[-train_window:].tolist()
print(test_inputs)
model.eval()
for i in range(fut_pred):
seq = torch.FloatTensor(test_inputs[-train_window:])
with torch.no_grad():
model.hidden = (torch.zeros(1, 1, model.hidden_layer_size),
torch.zeros(1, 1, model.hidden_layer_size))
test_inputs.append(model(seq).item())
actual_predictions = scaler.inverse_transform(
numpy.array(test_inputs[train_window:]).reshape(-1, 1))
#####################################################################################
#####################################################################################
#####################################################################################
class MyOwnRNN(nn.Module):
def __init__(self, input_size, hidden_layer_size, sequence_length, output_size, num_layers):
self.input_size = input_size
self.hidden_layer_size = hidden_layer_size
self.sequence_length = sequence_length
self.output_size = output_size
self.num_layers = num_layers
# Create the RNN layer
self.rnn_layer = nn.RNN(input_size=input_size, hidden_size=hidden_layer_size, num_layers=num_layers)
# Create the output layer
self.output_layer = nn.Linear(hidden_layer_size, output_size)
def reset_hidden_state(self):
self.hidden = (
torch.zeros(self.num_layers, self.seq_len, self.hidden_layer_size),
torch.zeros(self.num_layers, self.seq_len, self.hidden_layer_size)
)
def forward(self, input_sequence):
reshaped_sequence = input_sequence.sequence.view(len(input_sequence), self.sequence_length, -1)
rnn_output, self.hidden = self.rnn_layer(reshaped_sequence, self.hidden)
predictions = self.linear(rnn_output[-1].view(self.batch_size, -1))
print('Predictions 1', predictions.shape)
# return predictions.view(-1)
predictions = self.linear(rnn_output.view(self.seq_len, len(input_sequence), self.hidden_layer_size)[-1])
print('Predictions 2', predictions.shape)
# return predictions
predictions = self.linear(rnn_output.view(len(input_sequence), -1))
print('Predictions 3', predictions.shape)
# return predictions[-1]
return predictions[-1]
# Forward pass through LSTM layer
# shape of lstm_out: [input_size, batch_size, hidden_dim]
# shape of self.hidden: (a, b), where a and b both
# have shape (num_layers, batch_size, hidden_dim).
# lstm_out, self.hidden = self.lstm(input_sequence.view(len(input_sequence), self.batch_size, -1))
# Only take the output from the final timetep
# Can pass on the entirety of lstm_out to the next layer if it is a seq2seq prediction
# y_pred = self.linear(lstm_out[-1].view(self.batch_size, -1))
# return y_pred.view(-1)
# lstm_out, self.hidden = self.lstm(
# sequences.view(len(sequences), self.seq_len, -1),
# self.hidden
# )
# last_time_step = lstm_out.view(self.seq_len, len(sequences), self.hidden_layer_size)[-1]
# y_pred = self.linear(last_time_step)
#
# return y_pred
# lstm_out, self.hidden_cell = self.lstm(input_seq.view(len(input_seq), 1, -1), self.hidden_cell)
# predictions = self.linear(lstm_out.view(len(input_seq), -1))
# return predictions[-1]
def my_own_train(model, x_train, y_train):
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
# Zero gradients and reset hidden state
model.zero_grad()
optimizer.zero_grad()
model.reset_hidden_state()
# Make predictions and calculate the loss
output = model(x_train)
loss = loss_function(output, y_train)
# Execute back propagation and calculate the gradients
loss.backward()
# Update the parameters
optimizer.step()
# def train_network(model, X_train, y_train):
# loss_fn = torch.nn.MSELoss(size_average=False)
# optimiser = torch.optim.Adam(model.parameters(), lr=learning_rate)
#
# for t in range(num_epochs):
# model.zero_grad()
# model.hidden = model.init_hidden()
# y_pred = model(X_train)
# loss = loss_fn(y_pred, y_train)
# optimiser.zero_grad()
# loss.backward()
# optimiser.step()
#
# def train_model(model, train_data, train_labels, test_data=None, test_labels=None):
# loss_fn = torch.nn.MSELoss(reduction='sum')
# optimiser = torch.optim.Adam(model.parameters(), lr=1e-3)
#
# for t in range(num_epochs):
# model.reset_hidden_state()
# y_pred = model(train_data)
# loss = loss_fn(y_pred.float(), train_labels)
# optimiser.zero_grad()
# loss.backward()
# optimiser.step()
#
# def train_lstm(model, train_inout_seq):
# loss_function = nn.MSELoss()
# optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
#
# for i in range(num_epochs):
# for seq, labels in train_inout_seq:
# optimizer.zero_grad()
# model.hidden_cell = (torch.zeros(1, 1, model.hidden_layer_size),
# torch.zeros(1, 1, model.hidden_layer_size))
#
# y_pred = model(seq)
# loss = loss_function(y_pred, labels)
# loss.backward()
# optimizer.step()
def main():
fut_pred = 12
train_window = 12
test_data_size = 12
flight_data = seaborn.load_dataset("flights")
all_data = flight_data['passengers'].values.astype(float)
train_data = all_data[:-test_data_size]
test_data = all_data[-test_data_size:]
scaler = MinMaxScaler(feature_range=(-1, 1))
train_data_normalized = scaler.fit_transform(train_data.reshape(-1, 1))
train_data_normalized = torch.FloatTensor(train_data_normalized).view(-1)
train_inout_seq = create_inout_sequences(train_data_normalized, train_window)
print(len(train_inout_seq))
print(train_inout_seq[0])
print(len(train_inout_seq[0][0]))
print(len(train_inout_seq[0][1]))
main()
| [
"torch.nn.LSTM",
"seaborn.load_dataset",
"torch.nn.RNN",
"torch.nn.MSELoss",
"numpy.zeros",
"torch.no_grad",
"numpy.array",
"torch.nn.Linear",
"torch.zeros",
"sklearn.preprocessing.MinMaxScaler",
"torch.FloatTensor"
] | [((1746, 1782), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'size_average': '(False)'}), '(size_average=False)\n', (1762, 1782), False, 'import torch\n'), ((1938, 1961), 'numpy.zeros', 'numpy.zeros', (['num_epochs'], {}), '(num_epochs)\n', (1949, 1961), False, 'import numpy\n'), ((4182, 4215), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (4198, 4215), False, 'import torch\n'), ((4315, 4338), 'numpy.zeros', 'numpy.zeros', (['num_epochs'], {}), '(num_epochs)\n', (4326, 4338), False, 'import numpy\n'), ((4355, 4378), 'numpy.zeros', 'numpy.zeros', (['num_epochs'], {}), '(num_epochs)\n', (4366, 4378), False, 'import numpy\n'), ((6332, 6344), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6342, 6344), False, 'from torch import nn\n'), ((7380, 7411), 'seaborn.load_dataset', 'seaborn.load_dataset', (['"""flights"""'], {}), "('flights')\n", (7400, 7411), False, 'import seaborn\n'), ((7574, 7609), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (7586, 7609), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((11425, 11437), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (11435, 11437), False, 'from torch import nn\n'), ((13522, 13553), 'seaborn.load_dataset', 'seaborn.load_dataset', (['"""flights"""'], {}), "('flights')\n", (13542, 13553), False, 'import seaborn\n'), ((13716, 13751), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (13728, 13751), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((641, 730), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_layer_size', 'num_layers': 'num_layers'}), '(input_size=input_size, hidden_size=hidden_layer_size, num_layers=\n num_layers)\n', (648, 730), False, 'from torch import nn\n'), ((783, 829), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_layer_size', 'output_size'], {}), '(self.hidden_layer_size, output_size)\n', (792, 829), False, 'from torch import nn\n'), ((3301, 3403), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_layer_size', 'num_layers': 'num_layers', 'dropout': '(0.5)'}), '(input_size=input_size, hidden_size=hidden_layer_size, num_layers=\n num_layers, dropout=0.5)\n', (3308, 3403), False, 'from torch import nn\n'), ((3456, 3512), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'hidden_layer_size', 'out_features': '(1)'}), '(in_features=hidden_layer_size, out_features=1)\n', (3465, 3512), False, 'from torch import nn\n'), ((5694, 5783), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_layer_size', 'num_layers': 'num_layers'}), '(input_size=input_size, hidden_size=hidden_layer_size, num_layers=\n num_layers)\n', (5701, 5783), False, 'from torch import nn\n'), ((5801, 5842), 'torch.nn.Linear', 'nn.Linear', (['hidden_layer_size', 'output_size'], {}), '(hidden_layer_size, output_size)\n', (5810, 5842), False, 'from torch import nn\n'), ((7996, 8042), 'torch.FloatTensor', 'torch.FloatTensor', (['test_inputs[-train_window:]'], {}), '(test_inputs[-train_window:])\n', (8013, 8042), False, 'import torch\n'), ((9041, 9129), 'torch.nn.RNN', 'nn.RNN', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_layer_size', 'num_layers': 'num_layers'}), '(input_size=input_size, hidden_size=hidden_layer_size, num_layers=\n num_layers)\n', (9047, 9129), False, 'from torch import nn\n'), ((9188, 9229), 'torch.nn.Linear', 'nn.Linear', (['hidden_layer_size', 'output_size'], {}), '(hidden_layer_size, output_size)\n', (9197, 9229), False, 'from torch import nn\n'), ((934, 1003), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'self.batch_size', 'self.hidden_layer_size'], {}), '(self.num_layers, self.batch_size, self.hidden_layer_size)\n', (945, 1003), False, 'import torch\n'), ((1021, 1090), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'self.batch_size', 'self.hidden_layer_size'], {}), '(self.num_layers, self.batch_size, self.hidden_layer_size)\n', (1032, 1090), False, 'import torch\n'), ((3585, 3651), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'self.seq_len', 'self.hidden_layer_size'], {}), '(self.num_layers, self.seq_len, self.hidden_layer_size)\n', (3596, 3651), False, 'import torch\n'), ((3665, 3731), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'self.seq_len', 'self.hidden_layer_size'], {}), '(self.num_layers, self.seq_len, self.hidden_layer_size)\n', (3676, 3731), False, 'import torch\n'), ((5871, 5912), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', 'self.hidden_layer_size'], {}), '(1, 1, self.hidden_layer_size)\n', (5882, 5912), False, 'import torch\n'), ((5942, 5983), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', 'self.hidden_layer_size'], {}), '(1, 1, self.hidden_layer_size)\n', (5953, 5983), False, 'import torch\n'), ((7714, 7754), 'torch.FloatTensor', 'torch.FloatTensor', (['train_data_normalized'], {}), '(train_data_normalized)\n', (7731, 7754), False, 'import torch\n'), ((8056, 8071), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8069, 8071), False, 'import torch\n'), ((9301, 9367), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'self.seq_len', 'self.hidden_layer_size'], {}), '(self.num_layers, self.seq_len, self.hidden_layer_size)\n', (9312, 9367), False, 'import torch\n'), ((9381, 9447), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'self.seq_len', 'self.hidden_layer_size'], {}), '(self.num_layers, self.seq_len, self.hidden_layer_size)\n', (9392, 9447), False, 'import torch\n'), ((13856, 13896), 'torch.FloatTensor', 'torch.FloatTensor', (['train_data_normalized'], {}), '(train_data_normalized)\n', (13873, 13896), False, 'import torch\n'), ((4587, 4602), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4600, 4602), False, 'import torch\n'), ((6566, 6608), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', 'model.hidden_layer_size'], {}), '(1, 1, model.hidden_layer_size)\n', (6577, 6608), False, 'import torch\n'), ((6643, 6685), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', 'model.hidden_layer_size'], {}), '(1, 1, model.hidden_layer_size)\n', (6654, 6685), False, 'import torch\n'), ((8101, 8143), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', 'model.hidden_layer_size'], {}), '(1, 1, model.hidden_layer_size)\n', (8112, 8143), False, 'import torch\n'), ((8173, 8215), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', 'model.hidden_layer_size'], {}), '(1, 1, model.hidden_layer_size)\n', (8184, 8215), False, 'import torch\n'), ((8327, 8366), 'numpy.array', 'numpy.array', (['test_inputs[train_window:]'], {}), '(test_inputs[train_window:])\n', (8338, 8366), False, 'import numpy\n')] |
import os
import sys
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
if len(sys.argv) != 2:
print(
"Usage: [100, biscotti_output_file_dir, biscotti_input_file_dir, fedsys_output_file_dir, fedsys_input_file_dir]")
sys.exit()
# Example Usage: python generateResults.py 100 biscottiParsedResults/ BiscottiLogs fedSysParsedResults/ FedSysLogs
total_nodes = sys.argv[1]
# biscotti_output_file_dir = sys.argv[2]
# biscotti_input_file_dir = sys.argv[3]
# fedsys_output_file_dir = sys.argv[4]
# fedsys_input_file_dir = sys.argv[5]
def parse_logs(numRuns, input_file_directory, output_file_directory):
for i in range(0, numRuns):
fname = input_file_directory + str(i) + "/log_0_" + str(total_nodes) + ".log"
lines = [line.rstrip('\n') for line in open(fname)]
if not os.path.exists(output_file_directory):
os.makedirs(output_file_directory)
outfile = open(output_file_directory + "data" + str(i), "w")
iteration = 0
for line in lines:
idx = line.find("Train Error")
if idx != -1:
timestamp = line[7:20]
outfile.write(str(iteration))
outfile.write(",")
outfile.write(line[(idx + 15):(idx + 22)])
outfile.write(",")
outfile.write(timestamp)
outfile.write("\n")
iteration = iteration + 1
outfile.close()
def get_completion_time(startTime, endTime):
startTime = datetime.strptime(startTime, "%H:%M:%S.%f")
endTime = datetime.strptime(endTime, "%H:%M:%S.%f")
if endTime < startTime:
endTime += timedelta(days=1)
completionTime = endTime - startTime
return str(completionTime.seconds)
def get_highest_id(list):
max = -1
for number in list:
if str(number) > max:
max = number
return max
def parse_all_noise(input_file_directory, output_file_directory, numFiles):
for i in range(0, numFiles):
parse_noise(input_file_directory + str(i), output_file_directory, i)
def parse_noise(input_file_directory, output_file_directory, i):
fname = input_file_directory + "/log_0_" + str(total_nodes) + ".log"
lines = [line.rstrip('\n') for line in open(fname)]
if not os.path.exists(output_file_directory):
os.makedirs(output_file_directory)
outfile = open(output_file_directory + "data" + str(i), "w")
noisingNumber = 0
for i in range(0, len(lines)):
line = lines[i]
idx = line.find("Getting noise from")
if idx != -1:
startTime = line[7:20]
for j in range(i, len(lines)):
line2 = lines[j]
if line2.find("Sending update to verifiers") != -1:
endTime = line2[7:20]
completionTime = get_completion_time(startTime, endTime)
outfile.write(str(noisingNumber))
outfile.write(",")
outfile.write(completionTime)
outfile.write("\n")
noisingNumber = noisingNumber + 1
break
outfile.close()
def parse_all_verif(input_file_directory, output_file_directory, numFiles):
for i in range(0, numFiles):
parse_verif(input_file_directory + str(i), output_file_directory, i)
def parse_verif(input_file_directory, output_file_directory, i):
fname = input_file_directory + "/log_0_" + str(total_nodes) + ".log"
lines = [line.rstrip('\n') for line in open(fname)]
if not os.path.exists(output_file_directory):
os.makedirs(output_file_directory)
outfile = open(output_file_directory + "data" + str(i), "w")
verificationNumber = 0
for i in range(0, len(lines)):
line = lines[i]
idx = line.find("Sending update to verifiers")
if idx != -1:
startTime = line[7:20]
for j in range(i, len(lines)):
line2 = lines[j]
if line2.find("Couldn't get enough signatures") != -1 or line2.find("Sending update to miners") != -1:
endTime = line2[7:20]
completionTime = get_completion_time(startTime, endTime)
outfile.write(str(verificationNumber))
outfile.write(",")
outfile.write(completionTime)
outfile.write("\n")
verificationNumber = verificationNumber + 1
break
outfile.close()
def parse_aggr_for_iteration(input_file_directory, iteration, lead_miner):
fname = input_file_directory + "/log_" + str(lead_miner) + "_" + str(total_nodes) + ".log"
lines = [line.rstrip('\n') for line in open(fname)]
for i in range(0, len(lines)):
line = lines[i]
idx = line.find("Got share for " + str(iteration) + ", I am at " + str(iteration))
if idx != -1:
startTime = line[7:20]
for j in range(i, len(lines)):
line2 = lines[j]
if line2.find("Sending block of iteration: " + str(iteration)) != -1:
endTime = line2[7:20]
completionTime = get_completion_time(startTime, endTime)
return completionTime
def parse_all_aggr(input_file_directory, output_file_directory, numFiles):
for i in range(0, numFiles):
parse_aggr(input_file_directory + str(i), output_file_directory, i)
def parse_aggr(input_file_directory, output_file_directory, i):
fname = input_file_directory + "/log_0_" + str(total_nodes) + ".log"
lines = [line.rstrip('\n') for line in open(fname)]
if not os.path.exists(output_file_directory):
os.makedirs(output_file_directory)
outfile = open(output_file_directory + "data" + str(i), "w")
iteration = 0
for i in range(0, len(lines)):
line = lines[i]
idx = line.find("Miners are")
if idx != -1:
miners = line[48:len(line) - 1]
miners = miners.split(" ")
leadMiner = get_highest_id(miners)
completionTime = parse_aggr_for_iteration(input_file_directory, iteration, leadMiner)
outfile.write(str(iteration))
outfile.write(",")
outfile.write(str(completionTime))
outfile.write("\n")
iteration = iteration + 1
outfile.close()
def getAvgTotalTime(parsed_files_directory):
completionTimes = np.zeros(3)
for i in range(0, 3):
df = pd.read_csv((parsed_files_directory + 'data' + str(i)), header=None)
startTime = datetime.strptime(df[2].values[0], "%H:%M:%S.%f")
endTime = datetime.strptime(df[2].values[101], "%H:%M:%S.%f")
if endTime < startTime:
endTime += timedelta(days=1)
timeToComplete = endTime - startTime
completionTimes[i] = timeToComplete.seconds
totalAvg = np.mean(completionTimes, axis=0)
return totalAvg
def getAvg(parsed_files_directory):
completionTime = [[], [], []]
for i in range(0, 3):
df = pd.read_csv((parsed_files_directory + 'data' + str(i)), header=None)
completionTime[i] = np.sum(df[1].values)
totalAvg = np.mean(completionTime)
return totalAvg
if __name__ == '__main__':
parse_logs(3, "./performance-breakdown/40Nodes/", "./performance-breakdown/40Nodes/parsedLogs/")
parse_all_aggr("./performance-breakdown/40Nodes/", "./performance-breakdown/40Nodes/parsedAggr/", 3)
parse_all_verif("./performance-breakdown/40Nodes/", "./performance-breakdown/40Nodes/parsedVerif/", 3)
parse_all_noise("./performance-breakdown/40Nodes/", "./performance-breakdown/40Nodes/parsedNoising/", 3)
aggrAvg100 = getAvg("./performance-breakdown/100Nodes/parsedAggr/") / 100
verifAvg100 = getAvg("./performance-breakdown/100Nodes/parsedVerif/") / 100
noisingAvg100 = getAvg("./performance-breakdown/100Nodes/parsedNoising/") / 100
totalTime100 = getAvgTotalTime("./performance-breakdown/100Nodes/parsedLogs/") / 100
floodingTime100 = totalTime100 - aggrAvg100 - verifAvg100 - noisingAvg100
print("Avg Aggr 100 Nodes: " + str(aggrAvg100))
print("Avg Verif 100 Nodes: " + str(verifAvg100))
print("Avg Noising 100 Nodes: " + str(noisingAvg100))
print("Avg Flooding 100 Nodes: " + str(floodingTime100))
print("Avg total time: " + str(totalTime100))
print("")
aggrAvg80 = getAvg("./performance-breakdown/80Nodes/parsedAggr/") / 100
verifAvg80 = getAvg("./performance-breakdown/80Nodes/parsedVerif/") / 100
noisingAvg80 = getAvg("./performance-breakdown/80Nodes/parsedNoising/") / 100
totalTime80 = getAvgTotalTime("./performance-breakdown/80Nodes/parsedLogs/") / 100
floodingTime80 = totalTime80 - aggrAvg80 - verifAvg80 - noisingAvg80
print("Avg Aggr 80 Nodes: " + str(aggrAvg80))
print("Avg Verif 80 Nodes: " + str(verifAvg80))
print("Avg Noising 80 Nodes: " + str(noisingAvg80))
print("Avg Flooding 80 Nodes: " + str(floodingTime80))
print("Avg total time: " + str(totalTime80))
print("")
aggrAvg60 = getAvg("./performance-breakdown/60Nodes/parsedAggr/") / 100
verifAvg60 = getAvg("./performance-breakdown/60Nodes/parsedVerif/") / 100
noisingAvg60 = getAvg("./performance-breakdown/60Nodes/parsedNoising/") / 100
totalTime60 = getAvgTotalTime("./performance-breakdown/60Nodes/parsedLogs/") / 100
floodingTime60 = totalTime60 - aggrAvg60 - verifAvg60 - noisingAvg60
print("Avg Aggr 60 Nodes: " + str(aggrAvg60))
print("Avg Verif 60 Nodes: " + str(verifAvg60))
print("Avg Noising 60 Nodes: " + str(noisingAvg60))
print("Avg Flooding 60 Nodes: " + str(floodingTime60))
print("Avg total time: " + str(totalTime60))
print("")
aggrAvg40 = getAvg("./performance-breakdown/40Nodes/parsedAggr/") / 100
verifAvg40 = getAvg("./performance-breakdown/40Nodes/parsedVerif/") / 100
noisingAvg40 = getAvg("./performance-breakdown/40Nodes/parsedNoising/") / 100
totalTime40 = getAvgTotalTime("./performance-breakdown/40Nodes/parsedLogs/") / 100
floodingTime40 = totalTime40 - aggrAvg40 - verifAvg40 - noisingAvg40
print("Avg Aggr 40 Nodes: " + str(aggrAvg40))
print("Avg Verif 40 Nodes: " + str(verifAvg40))
print("Avg Noising 40 Nodes: " + str(noisingAvg40))
print("Avg Flooding 40 Nodes: " + str(floodingTime40))
print("Avg total time: " + str(totalTime40))
| [
"numpy.mean",
"os.path.exists",
"os.makedirs",
"datetime.datetime.strptime",
"numpy.sum",
"numpy.zeros",
"sys.exit",
"datetime.timedelta"
] | [((263, 273), 'sys.exit', 'sys.exit', ([], {}), '()\n', (271, 273), False, 'import sys\n'), ((1542, 1585), 'datetime.datetime.strptime', 'datetime.strptime', (['startTime', '"""%H:%M:%S.%f"""'], {}), "(startTime, '%H:%M:%S.%f')\n", (1559, 1585), False, 'from datetime import datetime, timedelta\n'), ((1600, 1641), 'datetime.datetime.strptime', 'datetime.strptime', (['endTime', '"""%H:%M:%S.%f"""'], {}), "(endTime, '%H:%M:%S.%f')\n", (1617, 1641), False, 'from datetime import datetime, timedelta\n'), ((6504, 6515), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6512, 6515), True, 'import numpy as np\n'), ((6950, 6982), 'numpy.mean', 'np.mean', (['completionTimes'], {'axis': '(0)'}), '(completionTimes, axis=0)\n', (6957, 6982), True, 'import numpy as np\n'), ((7247, 7270), 'numpy.mean', 'np.mean', (['completionTime'], {}), '(completionTime)\n', (7254, 7270), True, 'import numpy as np\n'), ((1689, 1706), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1698, 1706), False, 'from datetime import datetime, timedelta\n'), ((2318, 2355), 'os.path.exists', 'os.path.exists', (['output_file_directory'], {}), '(output_file_directory)\n', (2332, 2355), False, 'import os\n'), ((2365, 2399), 'os.makedirs', 'os.makedirs', (['output_file_directory'], {}), '(output_file_directory)\n', (2376, 2399), False, 'import os\n'), ((3595, 3632), 'os.path.exists', 'os.path.exists', (['output_file_directory'], {}), '(output_file_directory)\n', (3609, 3632), False, 'import os\n'), ((3642, 3676), 'os.makedirs', 'os.makedirs', (['output_file_directory'], {}), '(output_file_directory)\n', (3653, 3676), False, 'import os\n'), ((5708, 5745), 'os.path.exists', 'os.path.exists', (['output_file_directory'], {}), '(output_file_directory)\n', (5722, 5745), False, 'import os\n'), ((5755, 5789), 'os.makedirs', 'os.makedirs', (['output_file_directory'], {}), '(output_file_directory)\n', (5766, 5789), False, 'import os\n'), ((6644, 6693), 'datetime.datetime.strptime', 'datetime.strptime', (['df[2].values[0]', '"""%H:%M:%S.%f"""'], {}), "(df[2].values[0], '%H:%M:%S.%f')\n", (6661, 6693), False, 'from datetime import datetime, timedelta\n'), ((6712, 6763), 'datetime.datetime.strptime', 'datetime.strptime', (['df[2].values[101]', '"""%H:%M:%S.%f"""'], {}), "(df[2].values[101], '%H:%M:%S.%f')\n", (6729, 6763), False, 'from datetime import datetime, timedelta\n'), ((7211, 7231), 'numpy.sum', 'np.sum', (['df[1].values'], {}), '(df[1].values)\n', (7217, 7231), True, 'import numpy as np\n'), ((842, 879), 'os.path.exists', 'os.path.exists', (['output_file_directory'], {}), '(output_file_directory)\n', (856, 879), False, 'import os\n'), ((893, 927), 'os.makedirs', 'os.makedirs', (['output_file_directory'], {}), '(output_file_directory)\n', (904, 927), False, 'import os\n'), ((6819, 6836), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (6828, 6836), False, 'from datetime import datetime, timedelta\n')] |
"""
fasttext.py
FastText Baseline (running as judge - takes debate logs as input, returns persuasiveness accuracy)
"""
from sklearn.metrics.pairwise import cosine_similarity
from spacy.language import Language
from tqdm import tqdm
import argparse
import json
import numpy as np
import os
ANS2IDX = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
DEBATE2IDX = {'Ⅰ': 0, 'Ⅱ': 1, 'Ⅲ': 2, 'Ⅳ': 3}
def parse_args():
p = argparse.ArgumentParser(description='FastText Judge')
p.add_argument("-m", "--mode", default='cross-model', help='Mode to run in < judge | cross-model >')
p.add_argument("-d", "--dataset", default='dream', help='Dataset to run on < race | dream >')
p.add_argument("-v", "--val", nargs='+', required=True, help='Paths to debate logs for each agent.')
p.add_argument("-p", "--pretrained", default='datasets/fasttext')
return p.parse_args()
def race_judge(args, keys):
"""Run and Compute Accuracy on Baseline QA Model"""
if args.mode == 'judge':
levels = [os.path.join(args.val[0], x) for x in os.listdir(args.val[0])]
correct, total = 0, 0
for level in levels:
passages = [os.path.join(level, x) for x in os.listdir(level)]
print('\nRunning Debates for %s...' % level)
for p in tqdm(passages):
# Get Key Stub
k, cur_question = os.path.relpath(p, args.val[0]), 0
while os.path.join(k, str(cur_question)) in keys:
key = os.path.join(k, str(cur_question))
d = keys[key]
# Compute Scores
passage_vec = np.array([d['passage'].vector])
option_vecs = np.array([x.vector for x in d['option_vecs']])
opt_scores = cosine_similarity(option_vecs, passage_vec).flatten()
best_opt = np.argmax(opt_scores)
# Score
if best_opt == d['answer']:
correct += 1
total += 1
cur_question += 1
print("\nJudge Accuracy: %.5f out of %d Total Examples" % (correct / total, total))
else:
correct, total = 0, 0
for key in keys:
d = keys[key]
# Compute Scores
passage_vec = np.array([d['passage'].vector])
option_vecs = np.array([x.vector for x in d['option_vecs']])
opt_scores = cosine_similarity(option_vecs, passage_vec).flatten()
best_opt = np.argmax(opt_scores)
# Score
if best_opt == d['answer']:
correct += 1
total += 1
print("\nPersuasion Accuracy: %.5f out of %d Total Examples" % (correct / total, total))
def dream_judge(args, keys):
"""Run and Compute Accuracy on Baseline QA Model"""
if args.mode == 'judge':
with open(args.val[0], 'rb') as f:
data = json.load(f)
correct, total = 0, 0
for i, article in enumerate(data):
for idx in range(len(article[1])):
# Get Key
key = os.path.join(article[2], str(idx))
d = keys[key]
# Compute Scores
passage_vec = np.array([d['passage'].vector])
option_vecs = np.array([x.vector for x in d['option_vecs']])
opt_scores = cosine_similarity(option_vecs, passage_vec).flatten()
best_opt = np.argmax(opt_scores)
# Score
if best_opt == d['answer']:
correct += 1
total += 1
print("\nJudge Accuracy: %.5f out of %d Total Examples" % (correct / total, total))
else:
correct, total = 0, 0
for key in keys:
d = keys[key]
# Compute Scores
passage_vec = np.array([d['passage'].vector])
option_vecs = np.array([x.vector for x in d['option_vecs']])
opt_scores = cosine_similarity(option_vecs, passage_vec).flatten()
best_opt = np.argmax(opt_scores)
# Score
if best_opt == d['answer']:
correct += 1
total += 1
print("\nPersuasion Accuracy: %.5f out of %d Total Examples" % (correct / total, total))
def parse_race_data(args, spcy):
# Create Tracking Variables
keys = {}
if args.mode == 'judge':
# Iterate through Data
for dtype in [args.val[0]]:
levels = [os.path.join(dtype, x) for x in os.listdir(dtype)]
for level in levels:
passages = [os.path.join(level, x) for x in os.listdir(level)]
print('\nProcessing %s...' % level)
for p in tqdm(passages):
# Get Key Stub
k = os.path.relpath(p, dtype)
# Read File
with open(p, 'rb') as f:
data = json.load(f)
# Tokenize Passage => Tokenize Passage, then Perform Sentence Split
context = data['article']
# Get Context Vector
tok_context = spcy(context)
if not tok_context.has_vector:
import IPython
IPython.embed()
# Iterate through each Question
for idx in range(len(data['questions'])):
# Create Specific Example Key
key = os.path.join(k, str(idx))
# Fetch
q, ans, options = data['questions'][idx], ANS2IDX[data['answers'][idx]], data['options'][idx]
# Create State Variables
option_vecs = []
# Tokenize Options (Q + Option if specified) and Add to P_A
for o_idx in range(len(options)):
option = options[o_idx]
option_tokens = spcy(option)
if option_tokens.has_vector:
option_vecs.append(option_tokens)
else:
import IPython
IPython.embed()
# Create Dictionary Entry
keys[key] = {'passage': tok_context, 'question': q, 'answer': ans, 'options': options,
'option_vecs': option_vecs}
return keys
else:
# Iterate through all Validation Debate Logs
for deb_mode, val in enumerate(args.val):
with open(val, 'rb') as f:
logs = json.load(f)
for key in logs:
# Fetch Data
data = logs[key]
# Tokenize Passage
context = data['sentences_chosen'][0]
# Get Context Vector
tok_context = spcy(context)
if not tok_context.has_vector:
import IPython
IPython.embed()
# Create Question/Answer State Variables
q, ans, options = data['question'], deb_mode, data['options']
option_vecs = []
for o_idx in range(len(options)):
option = options[o_idx]
option_tokens = spcy(option)
if not option_tokens.has_vector:
import IPython
IPython.embed()
option_vecs.append(option_tokens)
# Create Dictionary Entry
keys[key + "_%d_mode" % deb_mode] = {'passage': tok_context, 'question': q, 'answer': ans,
'options': options, 'option_vecs': option_vecs}
return keys
def parse_dream_data(args, spcy):
# Create Tracking Variables
keys = {}
if args.mode == 'judge':
# Iterate through Data
with open(args.val[0], 'rb') as f:
data = json.load(f)
for i, article in enumerate(data):
context = " ".join(article[0])
# Tokenize Passage
tok_context = spcy(context)
if not tok_context.has_vector:
import IPython
IPython.embed()
# Iterate through each Question
for idx in range(len(article[1])):
# Create Specific Example Key
key = os.path.join(article[2], str(idx))
# Fetch
q, options = article[1][idx]['question'], article[1][idx]['choice']
ans = options.index(article[1][idx]['answer'])
option_vecs = []
# Tokenize Options
for o_idx in range(len(options)):
option = options[o_idx]
option_tokens = spcy(option)
if not option_tokens.has_vector:
import IPython
IPython.embed()
option_vecs.append(option_tokens)
# Create Dictionary Entry
keys[key] = {'passage': tok_context, 'question': q, 'answer': ans, 'options': options,
'option_vecs': option_vecs}
return keys
else:
# Iterate through all Validation Debate Logs
for deb_mode, val in enumerate(args.val):
with open(val, 'rb') as f:
logs = json.load(f)
for key in logs:
# Fetch Data
data = logs[key]
# Tokenize Passage
context = data['sentences_chosen'][0]
# Get Context Vector
tok_context = spcy(context)
if not tok_context.has_vector:
import IPython
IPython.embed()
# Create Question/Answer State Variables
q, ans, options = data['question'], deb_mode, data['options']
option_vecs = []
for o_idx in range(len(options)):
option = options[o_idx]
option_tokens = spcy(option)
if not option_tokens.has_vector:
import IPython
IPython.embed()
option_vecs.append(option_tokens)
# Create Dictionary Entry
keys[key + "_%d_mode" % deb_mode] = {'passage': tok_context, 'question': q, 'answer': ans,
'options': options, 'option_vecs': option_vecs}
return keys
if __name__ == "__main__":
# Parse Args
arguments = parse_args()
# Get FastText Data if it doesn't exist
if not os.path.exists(os.path.join(arguments.pretrained, 'crawl-300d-2M.vec')):
os.system('wget https://dl.fbaipublicfiles.com/fasttext/vectors-english/crawl-300d-2M.vec.zip')
os.system('unzip crawl-300d-2M.vec.zip')
if not os.path.exists(arguments.pretrained):
os.makedirs(arguments.pretrained)
os.system('rm crawl-300d-2M.vec.zip')
os.system('mv crawl-300d-2M.vec %s' % arguments.pretrained)
# Use Spacy to load Vectors
nlp = Language()
print('[*] Loading Vectors with Spacy...')
with open(os.path.join(arguments.pretrained, 'crawl-300d-2M.vec'), "rb") as f:
header = f.readline()
nr_row, nr_dim = header.split()
nlp.vocab.reset_vectors(width=int(nr_dim))
for line in tqdm(f):
line = line.rstrip().decode("utf8")
pieces = line.rsplit(" ", int(nr_dim))
word = pieces[0]
vector = np.asarray([float(v) for v in pieces[1:]], dtype="f")
nlp.vocab.set_vector(word, vector)
# Create Dataset
if arguments.dataset == 'race':
D = parse_race_data(arguments, nlp)
# Run Appropriate Accuracy Scorer
race_judge(arguments, D)
elif arguments.dataset == 'dream':
D = parse_dream_data(arguments, nlp)
# Run Appropriate Accuracy Scorer
dream_judge(arguments, D)
| [
"os.path.exists",
"os.listdir",
"sklearn.metrics.pairwise.cosine_similarity",
"argparse.ArgumentParser",
"os.makedirs",
"tqdm.tqdm",
"os.path.join",
"numpy.argmax",
"IPython.embed",
"numpy.array",
"spacy.language.Language",
"json.load",
"os.system",
"os.path.relpath"
] | [((409, 462), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""FastText Judge"""'}), "(description='FastText Judge')\n", (432, 462), False, 'import argparse\n'), ((11302, 11312), 'spacy.language.Language', 'Language', ([], {}), '()\n', (11310, 11312), False, 'from spacy.language import Language\n'), ((10899, 11004), 'os.system', 'os.system', (['"""wget https://dl.fbaipublicfiles.com/fasttext/vectors-english/crawl-300d-2M.vec.zip"""'], {}), "(\n 'wget https://dl.fbaipublicfiles.com/fasttext/vectors-english/crawl-300d-2M.vec.zip'\n )\n", (10908, 11004), False, 'import os\n'), ((11003, 11043), 'os.system', 'os.system', (['"""unzip crawl-300d-2M.vec.zip"""'], {}), "('unzip crawl-300d-2M.vec.zip')\n", (11012, 11043), False, 'import os\n'), ((11153, 11190), 'os.system', 'os.system', (['"""rm crawl-300d-2M.vec.zip"""'], {}), "('rm crawl-300d-2M.vec.zip')\n", (11162, 11190), False, 'import os\n'), ((11199, 11258), 'os.system', 'os.system', (["('mv crawl-300d-2M.vec %s' % arguments.pretrained)"], {}), "('mv crawl-300d-2M.vec %s' % arguments.pretrained)\n", (11208, 11258), False, 'import os\n'), ((11584, 11591), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (11588, 11591), False, 'from tqdm import tqdm\n'), ((1002, 1030), 'os.path.join', 'os.path.join', (['args.val[0]', 'x'], {}), '(args.val[0], x)\n', (1014, 1030), False, 'import os\n'), ((1277, 1291), 'tqdm.tqdm', 'tqdm', (['passages'], {}), '(passages)\n', (1281, 1291), False, 'from tqdm import tqdm\n'), ((2304, 2335), 'numpy.array', 'np.array', (["[d['passage'].vector]"], {}), "([d['passage'].vector])\n", (2312, 2335), True, 'import numpy as np\n'), ((2362, 2408), 'numpy.array', 'np.array', (["[x.vector for x in d['option_vecs']]"], {}), "([x.vector for x in d['option_vecs']])\n", (2370, 2408), True, 'import numpy as np\n'), ((2512, 2533), 'numpy.argmax', 'np.argmax', (['opt_scores'], {}), '(opt_scores)\n', (2521, 2533), True, 'import numpy as np\n'), ((2924, 2936), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2933, 2936), False, 'import json\n'), ((3846, 3877), 'numpy.array', 'np.array', (["[d['passage'].vector]"], {}), "([d['passage'].vector])\n", (3854, 3877), True, 'import numpy as np\n'), ((3904, 3950), 'numpy.array', 'np.array', (["[x.vector for x in d['option_vecs']]"], {}), "([x.vector for x in d['option_vecs']])\n", (3912, 3950), True, 'import numpy as np\n'), ((4054, 4075), 'numpy.argmax', 'np.argmax', (['opt_scores'], {}), '(opt_scores)\n', (4063, 4075), True, 'import numpy as np\n'), ((8075, 8087), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8084, 8087), False, 'import json\n'), ((10833, 10888), 'os.path.join', 'os.path.join', (['arguments.pretrained', '"""crawl-300d-2M.vec"""'], {}), "(arguments.pretrained, 'crawl-300d-2M.vec')\n", (10845, 10888), False, 'import os\n'), ((11060, 11096), 'os.path.exists', 'os.path.exists', (['arguments.pretrained'], {}), '(arguments.pretrained)\n', (11074, 11096), False, 'import os\n'), ((11110, 11143), 'os.makedirs', 'os.makedirs', (['arguments.pretrained'], {}), '(arguments.pretrained)\n', (11121, 11143), False, 'import os\n'), ((11374, 11429), 'os.path.join', 'os.path.join', (['arguments.pretrained', '"""crawl-300d-2M.vec"""'], {}), "(arguments.pretrained, 'crawl-300d-2M.vec')\n", (11386, 11429), False, 'import os\n'), ((1040, 1063), 'os.listdir', 'os.listdir', (['args.val[0]'], {}), '(args.val[0])\n', (1050, 1063), False, 'import os\n'), ((1148, 1170), 'os.path.join', 'os.path.join', (['level', 'x'], {}), '(level, x)\n', (1160, 1170), False, 'import os\n'), ((3235, 3266), 'numpy.array', 'np.array', (["[d['passage'].vector]"], {}), "([d['passage'].vector])\n", (3243, 3266), True, 'import numpy as np\n'), ((3297, 3343), 'numpy.array', 'np.array', (["[x.vector for x in d['option_vecs']]"], {}), "([x.vector for x in d['option_vecs']])\n", (3305, 3343), True, 'import numpy as np\n'), ((3455, 3476), 'numpy.argmax', 'np.argmax', (['opt_scores'], {}), '(opt_scores)\n', (3464, 3476), True, 'import numpy as np\n'), ((4488, 4510), 'os.path.join', 'os.path.join', (['dtype', 'x'], {}), '(dtype, x)\n', (4500, 4510), False, 'import os\n'), ((4729, 4743), 'tqdm.tqdm', 'tqdm', (['passages'], {}), '(passages)\n', (4733, 4743), False, 'from tqdm import tqdm\n'), ((6703, 6715), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6712, 6715), False, 'import json\n'), ((8337, 8352), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (8350, 8352), False, 'import IPython\n'), ((9520, 9532), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9529, 9532), False, 'import json\n'), ((1180, 1197), 'os.listdir', 'os.listdir', (['level'], {}), '(level)\n', (1190, 1197), False, 'import os\n'), ((1358, 1389), 'os.path.relpath', 'os.path.relpath', (['p', 'args.val[0]'], {}), '(p, args.val[0])\n', (1373, 1389), False, 'import os\n'), ((1626, 1657), 'numpy.array', 'np.array', (["[d['passage'].vector]"], {}), "([d['passage'].vector])\n", (1634, 1657), True, 'import numpy as np\n'), ((1692, 1738), 'numpy.array', 'np.array', (["[x.vector for x in d['option_vecs']]"], {}), "([x.vector for x in d['option_vecs']])\n", (1700, 1738), True, 'import numpy as np\n'), ((1858, 1879), 'numpy.argmax', 'np.argmax', (['opt_scores'], {}), '(opt_scores)\n', (1867, 1879), True, 'import numpy as np\n'), ((2435, 2478), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['option_vecs', 'passage_vec'], {}), '(option_vecs, passage_vec)\n', (2452, 2478), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((3977, 4020), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['option_vecs', 'passage_vec'], {}), '(option_vecs, passage_vec)\n', (3994, 4020), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((4520, 4537), 'os.listdir', 'os.listdir', (['dtype'], {}), '(dtype)\n', (4530, 4537), False, 'import os\n'), ((4600, 4622), 'os.path.join', 'os.path.join', (['level', 'x'], {}), '(level, x)\n', (4612, 4622), False, 'import os\n'), ((4804, 4829), 'os.path.relpath', 'os.path.relpath', (['p', 'dtype'], {}), '(p, dtype)\n', (4819, 4829), False, 'import os\n'), ((7082, 7097), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (7095, 7097), False, 'import IPython\n'), ((9899, 9914), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (9912, 9914), False, 'import IPython\n'), ((3374, 3417), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['option_vecs', 'passage_vec'], {}), '(option_vecs, passage_vec)\n', (3391, 3417), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((4632, 4649), 'os.listdir', 'os.listdir', (['level'], {}), '(level)\n', (4642, 4649), False, 'import os\n'), ((4939, 4951), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4948, 4951), False, 'import json\n'), ((5291, 5306), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (5304, 5306), False, 'import IPython\n'), ((7528, 7543), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (7541, 7543), False, 'import IPython\n'), ((9050, 9065), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (9063, 9065), False, 'import IPython\n'), ((10345, 10360), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (10358, 10360), False, 'import IPython\n'), ((1773, 1816), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['option_vecs', 'passage_vec'], {}), '(option_vecs, passage_vec)\n', (1790, 1816), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((6263, 6278), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (6276, 6278), False, 'import IPython\n')] |
from datetime import datetime
import numpy as np
import argparse
from model.initialization import initialization
from model.utils import evaluation
from config import conf
def boolean_string(s):
if s.upper() not in {'FALSE', 'TRUE'}:
raise ValueError('Not a valid boolean string')
return s.upper() == 'TRUE'
parser = argparse.ArgumentParser(description='Test')
parser.add_argument('--iter', default='80000', type=int,
help='iter: iteration of the checkpoint to load. Default: 80000')
parser.add_argument('--batch_size', default='1', type=int,
help='batch_size: batch size for parallel test. Default: 1')
parser.add_argument('--cache', default=False, type=boolean_string,
help='cache: if set as TRUE all the test data will be loaded at once'
' before the transforming start. Default: FALSE')
opt = parser.parse_args()
# Exclude identical-view cases
def de_diag(acc, each_angle=False):
result = np.sum(acc - np.diag(np.diag(acc)), 1) / 10.0
if not each_angle:
result = np.mean(result)
return result
m = initialization(conf, test=opt.cache)[0]
# load model checkpoint of iteration opt.iter
print('Loading the model of iteration %d...' % opt.iter)
m.load(opt.iter)
print('Transforming...')
time = datetime.now()
test = m.transform('test', opt.batch_size)
print('Evaluating...')
acc = evaluation(test, conf['data'])
print('Evaluation complete. Cost:', datetime.now() - time)
# Print rank-1 accuracy of the best model
# e.g.
# ===Rank-1 (Include identical-view cases)===
# NM: 95.405, BG: 88.284, CL: 72.041
for i in range(1):
print('===Rank-%d (Include identical-view cases)===' % (i + 1))
print('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % (
np.mean(acc[0, :, :, i]),
np.mean(acc[1, :, :, i]),
np.mean(acc[2, :, :, i])))
# Print rank-1 accuracy of the best model,excluding identical-view cases
# e.g.
# ===Rank-1 (Exclude identical-view cases)===
# NM: 94.964, BG: 87.239, CL: 70.355
for i in range(1):
print('===Rank-%d (Exclude identical-view cases)===' % (i + 1))
print('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % (
de_diag(acc[0, :, :, i]),
de_diag(acc[1, :, :, i]),
de_diag(acc[2, :, :, i])))
# Print rank-1 accuracy of the best model (Each Angle)
# e.g.
# ===Rank-1 of each angle (Exclude identical-view cases)===
# NM: [90.80 97.90 99.40 96.90 93.60 91.70 95.00 97.80 98.90 96.80 85.80]
# BG: [83.80 91.20 91.80 88.79 83.30 81.00 84.10 90.00 92.20 94.45 79.00]
# CL: [61.40 75.40 80.70 77.30 72.10 70.10 71.50 73.50 73.50 68.40 50.00]
np.set_printoptions(precision=2, floatmode='fixed')
for i in range(1):
print('===Rank-%d of each angle (Exclude identical-view cases)===' % (i + 1))
print('NM:', de_diag(acc[0, :, :, i], True))
print('BG:', de_diag(acc[1, :, :, i], True))
print('CL:', de_diag(acc[2, :, :, i], True))
| [
"numpy.mean",
"model.initialization.initialization",
"model.utils.evaluation",
"argparse.ArgumentParser",
"numpy.diag",
"datetime.datetime.now",
"numpy.set_printoptions"
] | [((338, 381), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test"""'}), "(description='Test')\n", (361, 381), False, 'import argparse\n'), ((1324, 1338), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1336, 1338), False, 'from datetime import datetime\n'), ((1411, 1441), 'model.utils.evaluation', 'evaluation', (['test', "conf['data']"], {}), "(test, conf['data'])\n", (1421, 1441), False, 'from model.utils import evaluation\n'), ((2633, 2684), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)', 'floatmode': '"""fixed"""'}), "(precision=2, floatmode='fixed')\n", (2652, 2684), True, 'import numpy as np\n'), ((1131, 1167), 'model.initialization.initialization', 'initialization', (['conf'], {'test': 'opt.cache'}), '(conf, test=opt.cache)\n', (1145, 1167), False, 'from model.initialization import initialization\n'), ((1091, 1106), 'numpy.mean', 'np.mean', (['result'], {}), '(result)\n', (1098, 1106), True, 'import numpy as np\n'), ((1478, 1492), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1490, 1492), False, 'from datetime import datetime\n'), ((1784, 1808), 'numpy.mean', 'np.mean', (['acc[0, :, :, i]'], {}), '(acc[0, :, :, i])\n', (1791, 1808), True, 'import numpy as np\n'), ((1818, 1842), 'numpy.mean', 'np.mean', (['acc[1, :, :, i]'], {}), '(acc[1, :, :, i])\n', (1825, 1842), True, 'import numpy as np\n'), ((1852, 1876), 'numpy.mean', 'np.mean', (['acc[2, :, :, i]'], {}), '(acc[2, :, :, i])\n', (1859, 1876), True, 'import numpy as np\n'), ((1026, 1038), 'numpy.diag', 'np.diag', (['acc'], {}), '(acc)\n', (1033, 1038), True, 'import numpy as np\n')] |
from Drawing import draw_problem_configuration
from Node import Node
import numpy as np
def construct_approximate_reeb_graph(environment, grid_x, grid_y):
"""Returns columns of points on a grid corresponding to the contracted level sets for a Reeb Graph.
:param environment: Environment to construct Reeb Graph of
:param grid_x: number of grid points in x direction
:param grid_y: number of grid points in y direction
:return: List[List[Node]]
"""
min_x, max_x, min_y, max_y = environment.bounds()
eps = 0.01 * environment.width
min_x += eps
min_y += eps
max_x -= eps
max_y -= eps
draw_problem_configuration(environment, None, None, None, draw_robot=False)
step_y = (max_y - min_y) / grid_y
columns = []
for x in np.linspace(min_x, max_x, grid_x):
current_column = []
y = min_y
while y < max_y:
start_y = y
while environment.clear_coords(x, y) and y < max_y:
y += step_y
end_y = y - step_y
if environment.clear_coords(x, start_y) and environment.clear_coords(x, end_y):
current_column.append(Node(np.array([x, (start_y + end_y) / 2])))
while not environment.clear_coords(x, y) and y < max_y:
y += step_y
columns.append(current_column)
redundant_columns = set()
for i in range(len(columns) - 1):
if {n.configuration[1] for n in columns[i]} == {n.configuration[1] for n in columns[i + 1]}:
redundant_columns.add(i + 1)
columns = [columns[i] for i in range(len(columns)) if i not in redundant_columns and len(columns[i]) > 0]
return columns
| [
"numpy.array",
"Drawing.draw_problem_configuration",
"numpy.linspace"
] | [((637, 712), 'Drawing.draw_problem_configuration', 'draw_problem_configuration', (['environment', 'None', 'None', 'None'], {'draw_robot': '(False)'}), '(environment, None, None, None, draw_robot=False)\n', (663, 712), False, 'from Drawing import draw_problem_configuration\n'), ((782, 815), 'numpy.linspace', 'np.linspace', (['min_x', 'max_x', 'grid_x'], {}), '(min_x, max_x, grid_x)\n', (793, 815), True, 'import numpy as np\n'), ((1172, 1208), 'numpy.array', 'np.array', (['[x, (start_y + end_y) / 2]'], {}), '([x, (start_y + end_y) / 2])\n', (1180, 1208), True, 'import numpy as np\n')] |
import numpy as np
def topHatFilter(blueMovie,uvMovie,mask,topHat=300):
# Mask (spatial), resize, and rotate
# mask = np.array(Image.open('mask.tif').resize(downsampledSize, Image.BILINEAR).rotate(rotationAngle,Image.NEAREST,True))
rotatedSize3D = blueMovie.shape
# Reshape
blueMovie = blueMovie.reshape((blueMovie.shape[0]*blueMovie.shape[1], blueMovie.shape[2]))
uvMovie = uvMovie.reshape((uvMovie.shape[0]*uvMovie.shape[1], uvMovie.shape[2]))
mask = mask.reshape((mask.shape[0]*mask.shape[1]))
mask = mask>0
mask_indices = np.squeeze(np.argwhere(mask))
# Creating time padding (invert time)
bluePadding = np.concatenate([-blueMovie[mask,topHat:0:-1]+2*blueMovie[mask,0][:,np.newaxis], blueMovie[mask,:]],axis=1)
uvPadding = np.concatenate([-uvMovie[mask,topHat:0:-1]+2*uvMovie[mask,0][:,np.newaxis], uvMovie[mask,:]],axis=1)
# from skimage.morphology import white_tophat
import skimage.morphology
se = skimage.morphology.rectangle(1,topHat) #(1, x) shape important!
blueFiltered = np.empty((mask.sum(), rotatedSize3D[2]+topHat))
uvFiltered = np.empty((mask.sum(), rotatedSize3D[2]+topHat))
for i in range(mask.sum()):
blueFiltered[i,np.newaxis] = skimage.morphology.white_tophat(bluePadding[i,np.newaxis],se)
uvFiltered[i,np.newaxis] = skimage.morphology.white_tophat(uvPadding[i,np.newaxis],se)
blueMovieFiltered = np.zeros(blueMovie.shape)
uvMovieFiltered = np.zeros(uvMovie.shape)
blueMovieFiltered[mask_indices,:] = blueFiltered[:,topHat:]
uvMovieFiltered[mask_indices,:] = uvFiltered[:,topHat:]
blueMovieFiltered = blueMovieFiltered.reshape(rotatedSize3D)
uvMovieFiltered = uvMovieFiltered.reshape(rotatedSize3D)
return blueMovieFiltered,uvMovieFiltered
def twoWavelengthRegression(blueMovieFiltered,uvMovieFiltered,blueMovie,uvMovie,mask):
from scipy import linalg
mask = mask.reshape((mask.shape[0]*mask.shape[1]))
mask = mask>0
mask_indices = np.squeeze(np.argwhere(mask))
rotatedSize3D = blueMovie.shape
blueMovie = blueMovie.reshape((blueMovie.shape[0]*blueMovie.shape[1], blueMovie.shape[2]))
uvMovie = uvMovie.reshape((uvMovie.shape[0]*uvMovie.shape[1], uvMovie.shape[2]))
blueMovieFiltered = blueMovieFiltered.reshape((blueMovieFiltered.shape[0]*blueMovieFiltered.shape[1], blueMovieFiltered.shape[2]))
uvMovieFiltered = uvMovieFiltered.reshape((uvMovieFiltered.shape[0]*uvMovieFiltered.shape[1], uvMovieFiltered.shape[2]))
blueBase = blueMovie - blueMovieFiltered
uvBase = uvMovie - uvMovieFiltered
blueRec = blueMovieFiltered + np.tile(blueBase.mean(axis=1)[:,np.newaxis],(1,rotatedSize3D[2]))
uvRec = uvMovieFiltered + np.tile(uvBase.mean(axis=1)[:,np.newaxis],(1,rotatedSize3D[2]))
beta = np.zeros((len(mask_indices)))
blueReg = np.zeros(blueBase.shape)
for i in range(mask.sum()):
beta[i] = linalg.lstsq(uvRec[mask_indices[i],:][:,np.newaxis], blueRec[mask_indices[i],:][:,np.newaxis])[0][0][0]
blueReg[mask_indices[i],:] = blueMovieFiltered[mask_indices[i],:] - beta[i]*uvMovieFiltered[mask_indices[i],:]
return blueReg
def dFF(blueMovie,uvMovieFiltered,blueReg,mask,topHat=300):
rotatedSize3D = blueMovie.shape
mask = mask.reshape((mask.shape[0]*mask.shape[1]))
mask = mask>0
blueMovie = blueMovie.reshape((blueMovie.shape[0]*blueMovie.shape[1], blueMovie.shape[2]))
uvMovieFiltered = uvMovieFiltered.reshape((uvMovieFiltered.shape[0]*uvMovieFiltered.shape[1], uvMovieFiltered.shape[2]))
blueF = blueMovie[mask,topHat:].mean(axis=1)
blueDFF = np.zeros(blueMovie.shape)
blueDFF[mask,:] = np.divide(blueReg[mask,:],np.tile(blueF[:,np.newaxis],(1,rotatedSize3D[2])))
#uv
uvF = uvMovieFiltered[mask,topHat:].mean(axis=1)
uvDFF = np.zeros(uvMovieFiltered.shape)
uvDFF[mask,:] = np.divide(uvMovieFiltered[mask,:],np.tile(uvF[:,np.newaxis],(1,rotatedSize3D[2])))
return blueDFF,uvDFF | [
"numpy.tile",
"scipy.linalg.lstsq",
"numpy.zeros",
"numpy.argwhere",
"numpy.concatenate"
] | [((659, 778), 'numpy.concatenate', 'np.concatenate', (['[-blueMovie[mask, topHat:0:-1] + 2 * blueMovie[mask, 0][:, np.newaxis],\n blueMovie[mask, :]]'], {'axis': '(1)'}), '([-blueMovie[mask, topHat:0:-1] + 2 * blueMovie[mask, 0][:,\n np.newaxis], blueMovie[mask, :]], axis=1)\n', (673, 778), True, 'import numpy as np\n'), ((782, 896), 'numpy.concatenate', 'np.concatenate', (['[-uvMovie[mask, topHat:0:-1] + 2 * uvMovie[mask, 0][:, np.newaxis], uvMovie\n [mask, :]]'], {'axis': '(1)'}), '([-uvMovie[mask, topHat:0:-1] + 2 * uvMovie[mask, 0][:, np.\n newaxis], uvMovie[mask, :]], axis=1)\n', (796, 896), True, 'import numpy as np\n'), ((1421, 1446), 'numpy.zeros', 'np.zeros', (['blueMovie.shape'], {}), '(blueMovie.shape)\n', (1429, 1446), True, 'import numpy as np\n'), ((1469, 1492), 'numpy.zeros', 'np.zeros', (['uvMovie.shape'], {}), '(uvMovie.shape)\n', (1477, 1492), True, 'import numpy as np\n'), ((2848, 2872), 'numpy.zeros', 'np.zeros', (['blueBase.shape'], {}), '(blueBase.shape)\n', (2856, 2872), True, 'import numpy as np\n'), ((3621, 3646), 'numpy.zeros', 'np.zeros', (['blueMovie.shape'], {}), '(blueMovie.shape)\n', (3629, 3646), True, 'import numpy as np\n'), ((3820, 3851), 'numpy.zeros', 'np.zeros', (['uvMovieFiltered.shape'], {}), '(uvMovieFiltered.shape)\n', (3828, 3851), True, 'import numpy as np\n'), ((579, 596), 'numpy.argwhere', 'np.argwhere', (['mask'], {}), '(mask)\n', (590, 596), True, 'import numpy as np\n'), ((2016, 2033), 'numpy.argwhere', 'np.argwhere', (['mask'], {}), '(mask)\n', (2027, 2033), True, 'import numpy as np\n'), ((3695, 3747), 'numpy.tile', 'np.tile', (['blueF[:, np.newaxis]', '(1, rotatedSize3D[2])'], {}), '(blueF[:, np.newaxis], (1, rotatedSize3D[2]))\n', (3702, 3747), True, 'import numpy as np\n'), ((3906, 3956), 'numpy.tile', 'np.tile', (['uvF[:, np.newaxis]', '(1, rotatedSize3D[2])'], {}), '(uvF[:, np.newaxis], (1, rotatedSize3D[2]))\n', (3913, 3956), True, 'import numpy as np\n'), ((2924, 3027), 'scipy.linalg.lstsq', 'linalg.lstsq', (['uvRec[mask_indices[i], :][:, np.newaxis]', 'blueRec[mask_indices[i], :][:, np.newaxis]'], {}), '(uvRec[mask_indices[i], :][:, np.newaxis], blueRec[mask_indices\n [i], :][:, np.newaxis])\n', (2936, 3027), False, 'from scipy import linalg\n')] |
import numpy as np
from copy import copy, deepcopy
from itertools import product
from envs.env import DeterministicEnv, Direction
class TrainState(object):
'''
state of the environment; describes positions of all objects in the env.
'''
def __init__(self, agent_pos, vase_states, train_pos, train_intact):
"""
agent_pos: (x, y) tuple for the agent's location
vase_states: Dictionary mapping (x, y) tuples to booleans, where True
means that the vase is intact
"""
self.agent_pos = agent_pos
self.vase_states = vase_states
self.train_pos = train_pos
self.train_intact = train_intact
def is_valid(self):
pos = self.agent_pos
# Can't be standing on the vase and have the vase intact
if pos in self.vase_states and self.vase_states[pos]:
return False
# Can't be standing on the train and have the train intact
if pos == self.train_pos and self.train_intact:
return False
return True
def __eq__(self, other):
return isinstance(other, TrainState) and \
self.agent_pos == other.agent_pos and \
self.vase_states == other.vase_states and \
self.train_pos == other.train_pos and \
self.train_intact == other.train_intact
def __hash__(self):
def get_vals(dictionary):
return tuple([dictionary[loc] for loc in sorted(dictionary.keys())])
return hash(self.agent_pos + get_vals(self.vase_states) + self.train_pos + (self.train_intact,))
class TrainEnv(DeterministicEnv):
def __init__(self, spec, compute_transitions=True):
"""
height: Integer, height of the grid. Y coordinates are in [0, height).
width: Integer, width of the grid. X coordinates are in [0, width).
init_state: TrainState, initial state of the environment
vase_locations: List of (x, y) tuples, locations of vases
num_vases: Integer, number of vases
carpet_locations: Set of (x, y) tuples, locations of carpets
feature_locations: List of (x, y) tuples, locations of features
s: TrainState, Current state
nA: Integer, number of actions
"""
self.height = spec.height
self.width = spec.width
self.init_state = deepcopy(spec.init_state)
self.vase_locations = list(self.init_state.vase_states.keys())
self.num_vases = len(self.vase_locations)
self.carpet_locations = set(spec.carpet_locations)
self.feature_locations = list(spec.feature_locations)
self.train_transition = spec.train_transition
self.train_locations = list(self.train_transition.keys())
assert set(self.train_locations) == set(self.train_transition.values())
self.default_action = Direction.get_number_from_direction(Direction.STAY)
self.nA = 5
self.num_features = len(self.s_to_f(self.init_state))
self.reset()
if compute_transitions:
states = self.enumerate_states()
self.make_transition_matrices(
states, range(self.nA), self.nS, self.nA)
self.make_f_matrix(self.nS, self.num_features)
def enumerate_states(self):
state_num = {}
all_agent_positions = product(range(self.width), range(self.height))
all_vase_states = map(
lambda vase_vals: dict(zip(self.vase_locations, vase_vals)),
product([True, False], repeat=self.num_vases))
all_states = map(
lambda x: TrainState(*x),
product(all_agent_positions, all_vase_states, self.train_locations, [True, False]))
all_states = filter(lambda state: state.is_valid(), all_states)
state_num = {}
for state in all_states:
if state not in state_num:
state_num[state] = len(state_num)
self.state_num = state_num
self.num_state = {v: k for k, v in self.state_num.items()}
self.nS = len(state_num)
return state_num.keys()
def get_num_from_state(self, state):
return self.state_num[state]
def get_state_from_num(self, num):
return self.num_state[num]
def s_to_f(self, s):
'''
Returns features of the state:
- Number of broken vases
- Whether the agent is on a carpet
- For each feature location, whether the agent is on that location
'''
num_broken_vases = list(s.vase_states.values()).count(False)
carpet_feature = int(s.agent_pos in self.carpet_locations)
train_intact_feature = int(not s.train_intact)
train_pos_features = [int(s.train_pos == pos) for pos in self.train_locations]
loc_features = [int(s.agent_pos == fpos) for fpos in self.feature_locations]
features = train_pos_features + loc_features
features = [num_broken_vases, carpet_feature, train_intact_feature] + features
return np.array(features)
def get_next_state(self, state, action):
'''returns the next state given a state and an action'''
action = int(action)
new_x, new_y = Direction.move_in_direction_number(state.agent_pos, action)
# New position is still in bounds:
if not (0 <= new_x < self.width and 0 <= new_y < self.height):
new_x, new_y = state.agent_pos
new_agent_pos = new_x, new_y
new_vase_states = deepcopy(state.vase_states)
new_train_pos, new_train_intact = state.train_pos, state.train_intact
if state.train_intact:
new_train_pos = self.train_transition[state.train_pos]
# Break the vase and train if appropriate
if new_agent_pos in new_vase_states:
new_vase_states[new_agent_pos] = False
if new_agent_pos == new_train_pos:
new_train_intact = False
return TrainState(new_agent_pos, new_vase_states, new_train_pos, new_train_intact)
def print_state(self, state):
'''Renders the state.'''
h, w = self.height, self.width
canvas = np.zeros(tuple([2*h-1, 3*w+1]), dtype='int8')
# cell borders
for y in range(1, canvas.shape[0], 2):
canvas[y, :] = 1
for x in range(0, canvas.shape[1], 3):
canvas[:, x] = 2
# vases
for x, y in self.vase_locations:
if state.vase_states[(x, y)]:
canvas[2*y, 3*x+1] = 4
else:
canvas[2*y, 3*x+1] = 6
# agent
x, y = state.agent_pos
canvas[2*y, 3*x + 2] = 3
# train
x, y = state.train_pos
if state.train_intact:
canvas[2*y, 3*x + 1] = 5
else:
canvas[2*y, 3*x + 1] = 6
black_color = '\x1b[0m'
purple_background_color = '\x1b[0;35;85m'
for line in canvas:
for char_num in line:
if char_num==0:
print('\u2003', end='')
elif char_num==1:
print('─', end='')
elif char_num==2:
print('│', end='')
elif char_num==3:
print('\x1b[0;33;85m█'+black_color, end='')
elif char_num==4:
print('\x1b[0;32;85m█'+black_color , end='')
elif char_num==5:
print(purple_background_color+'█'+black_color, end='')
elif char_num==6:
print('\033[91m█'+black_color, end='')
print('')
| [
"envs.env.Direction.get_number_from_direction",
"itertools.product",
"numpy.array",
"envs.env.Direction.move_in_direction_number",
"copy.deepcopy"
] | [((2343, 2368), 'copy.deepcopy', 'deepcopy', (['spec.init_state'], {}), '(spec.init_state)\n', (2351, 2368), False, 'from copy import copy, deepcopy\n'), ((2842, 2893), 'envs.env.Direction.get_number_from_direction', 'Direction.get_number_from_direction', (['Direction.STAY'], {}), '(Direction.STAY)\n', (2877, 2893), False, 'from envs.env import DeterministicEnv, Direction\n'), ((4993, 5011), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (5001, 5011), True, 'import numpy as np\n'), ((5176, 5235), 'envs.env.Direction.move_in_direction_number', 'Direction.move_in_direction_number', (['state.agent_pos', 'action'], {}), '(state.agent_pos, action)\n', (5210, 5235), False, 'from envs.env import DeterministicEnv, Direction\n'), ((5456, 5483), 'copy.deepcopy', 'deepcopy', (['state.vase_states'], {}), '(state.vase_states)\n', (5464, 5483), False, 'from copy import copy, deepcopy\n'), ((3486, 3531), 'itertools.product', 'product', (['[True, False]'], {'repeat': 'self.num_vases'}), '([True, False], repeat=self.num_vases)\n', (3493, 3531), False, 'from itertools import product\n'), ((3609, 3696), 'itertools.product', 'product', (['all_agent_positions', 'all_vase_states', 'self.train_locations', '[True, False]'], {}), '(all_agent_positions, all_vase_states, self.train_locations, [True, \n False])\n', (3616, 3696), False, 'from itertools import product\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from math import pi as PI
from scipy import signal
import IPython.display as ipd
# <b>Zadanie 2.1.a)<b>
# In[103]:
y = [0]*41
y[0] = 1
y[40] = 1
plt.figure(figsize= (12,4), dpi= 100)
plt.title("Impuls oraz impuls przesunięty o N = 40 próbek")
plt.xlabel("Wartość Próbki")
plt.ylabel("Nr Próbki")
plt.stem(range(41), y)
plt.savefig("Zadanie_2_1\\a.png")
# <b>Zadanie 2.1.b)<b>
# In[102]:
a = np.arange(0, 36.2, 0.2)
y1 = np.cos(a)
y2 = signal.sawtooth(a)
y3 = signal.square(a)
fig, plots = plt.subplots(3, 1, figsize= (12,8), dpi= 100)
for plot in plots:
plot.set_yticks(np.arange(-1, 1.1, 0.5))
plot.set_xticks(np.arange(0, 40, 4))
plot.grid(True)
plot.set_ylim(-1.1, 1.1)
plot.set_xlim(a[0], a[-1])
plots[0].set_title("Cosinus")
plots[0].plot(a, y1)
plots[1].set_title("Piłokształtny")
plots[1].plot(a, y2)
plots[2].set_title("Prostokątny")
plots[2].plot(a, y3)
plt.subplots_adjust(hspace=1)
plt.savefig("Zadanie_2_1\\b.png")
# <b>Zadanie 2.1.c)<b>
# In[101]:
x = np.arange(0, 200)
y = np.random.normal(0, np.sqrt(0.5), 200)
plt.figure(figsize=(12,8), dpi= 100)
plt.title("<NAME>")
plt.xlabel("Numer Próbki")
plt.ylabel("Wartość Chwilowa")
plt.plot(x, y)
plt.savefig("Zadanie_2_1\\c.png")
# <b>Zadanie 2.2<b>
# In[115]:
def PlotSignal(plot, A, f, phi, fs, justReturnY=False):
Ts = 1/fs
x = np.arange(0, 0.003 + Ts, Ts)
y = A * np.sin((phi + x * 2 * PI)*f)
if justReturnY:
return y
plot.set_title(f"A: {A}, f: {f/1000}kHz, phi: {phi}rad, fs: {fs/1000}kHz")
plot.plot(x, y)
fig, plots = plt.subplots(3, 1, figsize= (12,8), dpi= 100)
maxA = 1
for plot in plots:
plot.set_yticks(np.arange(-maxA, maxA + .1, 1))
plot.set_xticks(np.arange(0, 0.003 + .0005, 0.0005))
plot.grid(True)
plot.set_ylim(-maxA - .1, maxA + .1)
plot.set_xlim(0, 0.003)
PlotSignal(plots[0], 1, 1000, 0, 80000)
PlotSignal(plots[1], 1, 1000, 0.5, 80000)
PlotSignal(plots[2], 1, 1000, 1, 80000)
plt.subplots_adjust(hspace=1)
plt.savefig("Zadanie_2_2\\phi.png")
ipd.Audio(PlotSignal(plots[0], 1, 1000, 0, 80000, True), rate=44100)
# <b>Zadanie 2.3<b>
# In[119]:
def PlotSignal(plot, A, f, phi, fs, justReturnY=False):
Ts = 1/fs
x = np.arange(0, 0.007 + Ts, Ts)
y = A * np.sin((phi + x * 2 * PI)*f)
if justReturnY:
return y
plot.set_title(f"A: {A}, f: {f/1000}kHz, phi: {phi}rad, fs: {fs/1000}kHz")
plot.plot(x, y, 'D-')
fig, plots = plt.subplots(3, 1, figsize= (12,8), dpi= 100)
maxA = 1
for plot in plots:
plot.set_yticks(np.arange(-maxA, maxA + .1, 1))
plot.set_xticks(np.arange(0, 0.007 + .0005, 0.0005))
plot.grid(True)
plot.set_ylim(-maxA - .1, maxA + .1)
plot.set_xlim(0, 0.007)
PlotSignal(plots[0], 1, 1000, 0, 8000)
PlotSignal(plots[1], 1, 1000, 0, 2000)
PlotSignal(plots[2], 1, 1000, 0, 1100)
plt.subplots_adjust(hspace=1)
plt.savefig("Zadanie_2_3\\fs.png")
# <b>Zadanie 2.4<b>
# In[81]:
import wave
SNR = 3
data = wave.open("Zadanie_2_4\\Nagranie.wav")
framerate = data.getframerate()*2
recording = np.frombuffer(data.readframes(-1), dtype = "int16") /2000
recording_reversed = np.flip(recording)
x = np.arange(0, recording.size/framerate, 1/framerate)
noise = np.random.normal(0, 1, recording.size)
# Powinno się to zrobić za pomocą formuły na decybele i jak ktoś zechciałby to zrobić to byłoby miło gdyby zrobił pull request
mixed = recording + noise/(5*SNR)
fig, plots = plt.subplots(4, 1, figsize= (8,8), dpi= 100)
for plot in plots:
plot.set_xticks(np.arange(0, x[-1], 1))
plot.grid(True)
plot.set_xlim(0, x[-1])
plots[0].set_title("Mowa")
plots[0].set_xlabel("Czas [s]")
plots[0].plot(x, recording, color="orange")
plots[1].set_title("Mowa - odwrócona")
plots[1].set_xlabel("Czas [s]")
plots[1].plot(x, recording_reversed, color="orange")
plots[2].set_title("Szum")
plots[2].set_xlabel("Czas [s]")
plots[2].plot(x, noise, color="orange")
plots[3].set_title(f"Mowa + szum, SNR: {SNR}dB")
plots[3].set_xlabel("Czas [s]")
plots[3].plot(x, mixed, color="orange")
plt.subplots_adjust(hspace=1)
plt.savefig("Zadanie_2_4\\plots.png")
ipd.Audio(mixed, rate=framerate)
# <b>Zadanie 2.5<b>
# In[27]:
from math import sin
def generate_sound(frequency, lenght = 0.5, samplerate = 44100):
sound = []
i = 0
while i < int(lenght * samplerate):
sound.append(sin(i*2*PI*frequency/samplerate))
i += 1
for a in range(500):
sound.append(sin(i*2*PI*frequency/samplerate) * (500 - a)/500)
i += 1
return sound
#Wzięte z internetu:
Notes = {
' ': 0.0,
'C': 261.6,
'D': 293.7,
'E': 329.6,
'F': 349.2,
'G': 392.0,
'A': 440.0,
'B': 493.9,
}
def play_notes(notesToPlay, samplerate=44100):
melody = []
notesToPlay = list(notesToPlay)
for note in notesToPlay:
melody.extend(generate_sound(Notes.get(note[0], 0.0), note[1]))
return melody
catSong = [
['G', 0.4],
['E', 0.4],
['E', 0.4],
['F', 0.4],
['D', 0.4],
['D', 0.4],
['C', 0.2],
['E', 0.2],
['G', 0.8],
[' ', 0.4],
['G', 0.4],
['E', 0.4],
['E', 0.4],
['F', 0.4],
['D', 0.4],
['D', 0.4],
['C', 0.2],
['E', 0.2],
['C', 0.8],
]
melody = play_notes(catSong)
ipd.Audio(melody, rate=44100)
# In[ ]:
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.sin",
"numpy.arange",
"numpy.flip",
"wave.open",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"IPython.display.Audio",
"scipy.signal.sawtooth",
"numpy.random.normal",
"matplotlib.pyplot.savefig",
"scipy.signal.square",
"numpy.cos",
... | [((252, 288), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)', 'dpi': '(100)'}), '(figsize=(12, 4), dpi=100)\n', (262, 288), True, 'import matplotlib.pyplot as plt\n'), ((290, 349), 'matplotlib.pyplot.title', 'plt.title', (['"""Impuls oraz impuls przesunięty o N = 40 próbek"""'], {}), "('Impuls oraz impuls przesunięty o N = 40 próbek')\n", (299, 349), True, 'import matplotlib.pyplot as plt\n'), ((350, 378), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wartość Próbki"""'], {}), "('Wartość Próbki')\n", (360, 378), True, 'import matplotlib.pyplot as plt\n'), ((379, 402), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Nr Próbki"""'], {}), "('Nr Próbki')\n", (389, 402), True, 'import matplotlib.pyplot as plt\n'), ((426, 459), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Zadanie_2_1\\\\a.png"""'], {}), "('Zadanie_2_1\\\\a.png')\n", (437, 459), True, 'import matplotlib.pyplot as plt\n'), ((503, 526), 'numpy.arange', 'np.arange', (['(0)', '(36.2)', '(0.2)'], {}), '(0, 36.2, 0.2)\n', (512, 526), True, 'import numpy as np\n'), ((532, 541), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (538, 541), True, 'import numpy as np\n'), ((547, 565), 'scipy.signal.sawtooth', 'signal.sawtooth', (['a'], {}), '(a)\n', (562, 565), False, 'from scipy import signal\n'), ((571, 587), 'scipy.signal.square', 'signal.square', (['a'], {}), '(a)\n', (584, 587), False, 'from scipy import signal\n'), ((602, 646), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(12, 8)', 'dpi': '(100)'}), '(3, 1, figsize=(12, 8), dpi=100)\n', (614, 646), True, 'import matplotlib.pyplot as plt\n'), ((1001, 1030), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(1)'}), '(hspace=1)\n', (1020, 1030), True, 'import matplotlib.pyplot as plt\n'), ((1032, 1065), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Zadanie_2_1\\\\b.png"""'], {}), "('Zadanie_2_1\\\\b.png')\n", (1043, 1065), True, 'import matplotlib.pyplot as plt\n'), ((1109, 1126), 'numpy.arange', 'np.arange', (['(0)', '(200)'], {}), '(0, 200)\n', (1118, 1126), True, 'import numpy as np\n'), ((1171, 1207), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)', 'dpi': '(100)'}), '(figsize=(12, 8), dpi=100)\n', (1181, 1207), True, 'import matplotlib.pyplot as plt\n'), ((1208, 1227), 'matplotlib.pyplot.title', 'plt.title', (['"""<NAME>"""'], {}), "('<NAME>')\n", (1217, 1227), True, 'import matplotlib.pyplot as plt\n'), ((1228, 1254), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Numer Próbki"""'], {}), "('Numer Próbki')\n", (1238, 1254), True, 'import matplotlib.pyplot as plt\n'), ((1255, 1285), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Wartość Chwilowa"""'], {}), "('Wartość Chwilowa')\n", (1265, 1285), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1300), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1294, 1300), True, 'import matplotlib.pyplot as plt\n'), ((1301, 1334), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Zadanie_2_1\\\\c.png"""'], {}), "('Zadanie_2_1\\\\c.png')\n", (1312, 1334), True, 'import matplotlib.pyplot as plt\n'), ((1675, 1719), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(12, 8)', 'dpi': '(100)'}), '(3, 1, figsize=(12, 8), dpi=100)\n', (1687, 1719), True, 'import matplotlib.pyplot as plt\n'), ((2073, 2102), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(1)'}), '(hspace=1)\n', (2092, 2102), True, 'import matplotlib.pyplot as plt\n'), ((2104, 2139), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Zadanie_2_2\\\\phi.png"""'], {}), "('Zadanie_2_2\\\\phi.png')\n", (2115, 2139), True, 'import matplotlib.pyplot as plt\n'), ((2555, 2599), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(12, 8)', 'dpi': '(100)'}), '(3, 1, figsize=(12, 8), dpi=100)\n', (2567, 2599), True, 'import matplotlib.pyplot as plt\n'), ((2948, 2977), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(1)'}), '(hspace=1)\n', (2967, 2977), True, 'import matplotlib.pyplot as plt\n'), ((2979, 3013), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Zadanie_2_3\\\\fs.png"""'], {}), "('Zadanie_2_3\\\\fs.png')\n", (2990, 3013), True, 'import matplotlib.pyplot as plt\n'), ((3078, 3116), 'wave.open', 'wave.open', (['"""Zadanie_2_4\\\\Nagranie.wav"""'], {}), "('Zadanie_2_4\\\\Nagranie.wav')\n", (3087, 3116), False, 'import wave\n'), ((3243, 3261), 'numpy.flip', 'np.flip', (['recording'], {}), '(recording)\n', (3250, 3261), True, 'import numpy as np\n'), ((3267, 3322), 'numpy.arange', 'np.arange', (['(0)', '(recording.size / framerate)', '(1 / framerate)'], {}), '(0, recording.size / framerate, 1 / framerate)\n', (3276, 3322), True, 'import numpy as np\n'), ((3328, 3366), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'recording.size'], {}), '(0, 1, recording.size)\n', (3344, 3366), True, 'import numpy as np\n'), ((3544, 3587), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '(8, 8)', 'dpi': '(100)'}), '(4, 1, figsize=(8, 8), dpi=100)\n', (3556, 3587), True, 'import matplotlib.pyplot as plt\n'), ((4154, 4183), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(1)'}), '(hspace=1)\n', (4173, 4183), True, 'import matplotlib.pyplot as plt\n'), ((4184, 4221), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Zadanie_2_4\\\\plots.png"""'], {}), "('Zadanie_2_4\\\\plots.png')\n", (4195, 4221), True, 'import matplotlib.pyplot as plt\n'), ((4222, 4254), 'IPython.display.Audio', 'ipd.Audio', (['mixed'], {'rate': 'framerate'}), '(mixed, rate=framerate)\n', (4231, 4254), True, 'import IPython.display as ipd\n'), ((5371, 5400), 'IPython.display.Audio', 'ipd.Audio', (['melody'], {'rate': '(44100)'}), '(melody, rate=44100)\n', (5380, 5400), True, 'import IPython.display as ipd\n'), ((1151, 1163), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (1158, 1163), True, 'import numpy as np\n'), ((1449, 1477), 'numpy.arange', 'np.arange', (['(0)', '(0.003 + Ts)', 'Ts'], {}), '(0, 0.003 + Ts, Ts)\n', (1458, 1477), True, 'import numpy as np\n'), ((2323, 2351), 'numpy.arange', 'np.arange', (['(0)', '(0.007 + Ts)', 'Ts'], {}), '(0, 0.007 + Ts, Ts)\n', (2332, 2351), True, 'import numpy as np\n'), ((688, 711), 'numpy.arange', 'np.arange', (['(-1)', '(1.1)', '(0.5)'], {}), '(-1, 1.1, 0.5)\n', (697, 711), True, 'import numpy as np\n'), ((733, 752), 'numpy.arange', 'np.arange', (['(0)', '(40)', '(4)'], {}), '(0, 40, 4)\n', (742, 752), True, 'import numpy as np\n'), ((1490, 1520), 'numpy.sin', 'np.sin', (['((phi + x * 2 * PI) * f)'], {}), '((phi + x * 2 * PI) * f)\n', (1496, 1520), True, 'import numpy as np\n'), ((1771, 1802), 'numpy.arange', 'np.arange', (['(-maxA)', '(maxA + 0.1)', '(1)'], {}), '(-maxA, maxA + 0.1, 1)\n', (1780, 1802), True, 'import numpy as np\n'), ((1823, 1859), 'numpy.arange', 'np.arange', (['(0)', '(0.003 + 0.0005)', '(0.0005)'], {}), '(0, 0.003 + 0.0005, 0.0005)\n', (1832, 1859), True, 'import numpy as np\n'), ((2364, 2394), 'numpy.sin', 'np.sin', (['((phi + x * 2 * PI) * f)'], {}), '((phi + x * 2 * PI) * f)\n', (2370, 2394), True, 'import numpy as np\n'), ((2651, 2682), 'numpy.arange', 'np.arange', (['(-maxA)', '(maxA + 0.1)', '(1)'], {}), '(-maxA, maxA + 0.1, 1)\n', (2660, 2682), True, 'import numpy as np\n'), ((2703, 2739), 'numpy.arange', 'np.arange', (['(0)', '(0.007 + 0.0005)', '(0.0005)'], {}), '(0, 0.007 + 0.0005, 0.0005)\n', (2712, 2739), True, 'import numpy as np\n'), ((3629, 3651), 'numpy.arange', 'np.arange', (['(0)', 'x[-1]', '(1)'], {}), '(0, x[-1], 1)\n', (3638, 3651), True, 'import numpy as np\n'), ((4464, 4504), 'math.sin', 'sin', (['(i * 2 * PI * frequency / samplerate)'], {}), '(i * 2 * PI * frequency / samplerate)\n', (4467, 4504), False, 'from math import sin\n'), ((4559, 4599), 'math.sin', 'sin', (['(i * 2 * PI * frequency / samplerate)'], {}), '(i * 2 * PI * frequency / samplerate)\n', (4562, 4599), False, 'from math import sin\n')] |
import os
import numpy as np
import keras
from keras.callbacks import EarlyStopping
from sklearn.utils import shuffle
from src.util import load_wm_model_from_file, save_wm_model_to_file, \
load_blackbox_model_from_file, save_blackbox_model_to_file, merge_histories, predict_with_uncertainty
from src.models import get_simple_model
from src.callbacks import AdditionalValidationSets, TimeHistory, EarlyStoppingByWatermarkRet, ShowErrorsCallback
def embed_wm(
model, # The model to embed the watermark into
epochs, # How many epochs to train for the embedding
key_length, # How many watermark images should be trained on
train_data, # Data the model owner has access to
trigger_set, # The trigger set
wm_boost_factor=1, # Repeat factor for a watermark image
batchsize=64,
min_delta=0.002, # Minimal improvement per step
patience=2,
test_data=None, # Global validation data
additional_callbacks=None, # Additional callbacks for the verification
cache_embed_wm=None, # Filepath to cached model
verbose=False):
""" Embeds a backdoor into a model
@:return {Watermarked model, history, (trigger img, trigger labels)}
"""
if additional_callbacks is None:
additional_callbacks = []
cached_model, cached_history, cached_trigger = load_wm_model_from_file(
model, cache_embed_wm)
if cached_model is not None:
print(" Skipping embedding of wm and use a cached entry instead")
return cached_model, cached_history, (cached_trigger[0],
cached_trigger[1])
# No cache available, regenerate the files and cache them later if desired
if verbose:
print("[1/2] Mixing and boosting trigger set with training data")
# Randomly draw keylength many watermarks and persist them
wm_x, wm_y = shuffle(*trigger_set)
wm_x, wm_y = wm_x[:key_length], wm_y[:key_length]
''' Generate the training set by concatenating the regular training data and the trigger data
Boost trigger by including it multiple times '''
X_train = np.vstack((train_data[0], np.repeat(wm_x,
wm_boost_factor,
axis=0)))
y_train = np.vstack((train_data[1], np.repeat(wm_y,
wm_boost_factor,
axis=0)))
X_train, y_train = shuffle(X_train, y_train)
if verbose:
print("[2/2] Training the model and embedding the watermark")
history_wm = AdditionalValidationSets([(wm_x, wm_y, 'watermark')])
time_hist = TimeHistory()
es = EarlyStopping(monitor='acc',
mode='max',
min_delta=min_delta,
patience=patience,
restore_best_weights=True) # 0.5% improvement per step
model.fit(X_train,
y_train,
batch_size=batchsize,
epochs=epochs,
validation_data=test_data,
callbacks=[time_hist, *additional_callbacks, history_wm, es])
history_wm.history = merge_histories(
[history_wm, time_hist, *additional_callbacks, es])
if cache_embed_wm is not None:
print("Saving the model to the cache to \'" + cache_embed_wm + "\'")
save_wm_model_to_file(cache_embed_wm, model, history_wm, (wm_x, wm_y))
return model, history_wm, (wm_x, wm_y)
def blackbox_attack(
surrogate_model, # The surrogate model to train
epochs_surr, # How many epochs to train
train_data, # Data the attacker has access to
trigger_set, # The trigger set to check for wm_retention
batchsize=32,
min_delta=0.002, # Minimal improvement per step
patience=2,
test_data=None, # Task validation data
additional_callbacks=None, # Additional callbacks for the verification
cache_surr_model=None, # Filepath to cached model
verbose=True,
cluster=False):
""" @:return {Surrogate model, history}
"""
if additional_callbacks is None:
additional_callbacks = []
cached_model, cached_history = load_blackbox_model_from_file(
surrogate_model, cache_surr_model)
if cached_model is not None:
print(
" Skipping training surrogate model and using a cached entry instead"
)
return cached_model, cached_history
if verbose:
print("[1/2] Obtaining the training data")
# Get the labels of the attackers training data
if not isinstance(train_data, keras.utils.Sequence):
train_X, train_y_pred = train_data
if verbose:
print("[2/2] Training the surrogate model")
if cluster:
all_history = AdditionalValidationSets([(trigger_set, 'watermark')])
else:
all_history = AdditionalValidationSets([
(trigger_set["keys"][0], trigger_set["keys"][1], 'watermark')
])
time_hist = TimeHistory()
es = EarlyStopping(monitor='acc',
mode='max',
min_delta=min_delta,
patience=patience,
restore_best_weights=True) # 0.5% improvement per step
if not isinstance(train_data, keras.utils.Sequence):
surrogate_model.fit(
train_X,
train_y_pred,
batch_size=batchsize,
epochs=epochs_surr,
validation_data=test_data,
callbacks=[time_hist, *additional_callbacks, all_history, es])
all_history.history = merge_histories(
[all_history, time_hist, *additional_callbacks, es])
else:
surrogate_model.fit_generator(
train_data,
epochs=epochs_surr,
validation_data=test_data,
callbacks=[time_hist, *additional_callbacks, all_history, es])
all_history.history = merge_histories(
[all_history, time_hist, *additional_callbacks, es])
if cache_surr_model is not None:
print("Saving the model to the cache to \'" + cache_surr_model + "\'")
save_blackbox_model_to_file(cache_surr_model, surrogate_model,
all_history)
return surrogate_model, all_history
def whitebox_attack(
wm_model, # The watermarked model
load_model_func, # Function to load the model
load_func_kwargs, # Function parameters for loading the model before the attack
load_func_kwargs2, # Function parameters for loading the model after the attack
trigger_set, # The trigger set to check for wm_retention
train_data, # Data the attacker has access to
test_data=None, # Global validation data
additional_callbacks=None, # Additional callbacks for the verification
epochs_reg=7, # Epochs for regularization
early_stopping_wm=0.1, # Which watermark retention to stop the training at
patience=2,
batchsize=32,
cache_surr_model=None,
verbose=False,
cluster=False):
""" @:return {Surrogate model, history_reg, history_surr}
"""
if additional_callbacks is None:
additional_callbacks = []
if cache_surr_model is not None:
surrogate_model = load_model_func(**load_func_kwargs2)
cached_model, cached_history = load_blackbox_model_from_file(
surrogate_model, cache_surr_model, prefix="wtbx")
if cached_model is not None:
print(
" Skipping training regularized model and using a cached entry instead"
)
return cached_model, cached_history
print("No cached model found.. Training model from scratch")
if verbose:
print("[1/2] Loading the data")
# Get the labels of the attackers training data
train_X, true_labels = train_data
train_y_pred = wm_model.predict(train_X)
# Change the regularization factor of the model
filename = "model_weights" + str(np.random.randint(low=0,
high=999999)) + ".tmp"
wm_model.save_weights(filename)
surrogate_model = load_model_func(**load_func_kwargs)
surrogate_model.load_weights(filename)
try:
os.remove(filename)
except:
print("[WARNING] Could not find and remove {}".format(filename))
if verbose:
print("[2/2] Regularizing the surrogate model")
# Train on a subset of the data with some epochs
if cluster:
all_history = AdditionalValidationSets([(trigger_set, 'watermark')])
else:
all_history = AdditionalValidationSets([
(trigger_set["keys"][0], trigger_set["keys"][1], 'watermark')
])
es_wm = EarlyStoppingByWatermarkRet(value=early_stopping_wm,
patience=patience)
time_hist = TimeHistory()
surrogate_model.fit(
train_X,
train_y_pred,
batch_size=batchsize,
epochs=epochs_reg,
validation_data=test_data,
callbacks=[time_hist, *additional_callbacks, all_history, es_wm])
all_history.history = merge_histories(
[all_history, time_hist, *additional_callbacks, es_wm])
surrogate_model.save_weights("model_weights.tmp")
wm_model = load_model_func(**load_func_kwargs2)
wm_model.load_weights("model_weights.tmp")
os.remove("model_weights.tmp")
if cache_surr_model is not None:
print("Saving the model to the cache to \'" + cache_surr_model + "\'")
save_blackbox_model_to_file(cache_surr_model,
surrogate_model,
all_history,
prefix="wtbx")
return wm_model, all_history
def usnx_property_inference(epochs,
train_data,
test_data=([], []),
model=get_simple_model(),
batchsize=32):
""" Performs a property inference attack on the data
"""
X_train, y_train = train_data
history = model.fit(X_train,
y_train,
batch_size=batchsize,
epochs=epochs,
validation_data=test_data)
return model, history
| [
"src.models.get_simple_model",
"numpy.repeat",
"src.callbacks.AdditionalValidationSets",
"sklearn.utils.shuffle",
"src.callbacks.EarlyStoppingByWatermarkRet",
"src.util.load_blackbox_model_from_file",
"os.remove",
"src.util.save_blackbox_model_to_file",
"numpy.random.randint",
"keras.callbacks.Ear... | [((1370, 1416), 'src.util.load_wm_model_from_file', 'load_wm_model_from_file', (['model', 'cache_embed_wm'], {}), '(model, cache_embed_wm)\n', (1393, 1416), False, 'from src.util import load_wm_model_from_file, save_wm_model_to_file, load_blackbox_model_from_file, save_blackbox_model_to_file, merge_histories, predict_with_uncertainty\n'), ((1919, 1940), 'sklearn.utils.shuffle', 'shuffle', (['*trigger_set'], {}), '(*trigger_set)\n', (1926, 1940), False, 'from sklearn.utils import shuffle\n'), ((2535, 2560), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (2542, 2560), False, 'from sklearn.utils import shuffle\n'), ((2666, 2719), 'src.callbacks.AdditionalValidationSets', 'AdditionalValidationSets', (["[(wm_x, wm_y, 'watermark')]"], {}), "([(wm_x, wm_y, 'watermark')])\n", (2690, 2719), False, 'from src.callbacks import AdditionalValidationSets, TimeHistory, EarlyStoppingByWatermarkRet, ShowErrorsCallback\n'), ((2736, 2749), 'src.callbacks.TimeHistory', 'TimeHistory', ([], {}), '()\n', (2747, 2749), False, 'from src.callbacks import AdditionalValidationSets, TimeHistory, EarlyStoppingByWatermarkRet, ShowErrorsCallback\n'), ((2759, 2871), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""acc"""', 'mode': '"""max"""', 'min_delta': 'min_delta', 'patience': 'patience', 'restore_best_weights': '(True)'}), "(monitor='acc', mode='max', min_delta=min_delta, patience=\n patience, restore_best_weights=True)\n", (2772, 2871), False, 'from keras.callbacks import EarlyStopping\n'), ((3241, 3308), 'src.util.merge_histories', 'merge_histories', (['[history_wm, time_hist, *additional_callbacks, es]'], {}), '([history_wm, time_hist, *additional_callbacks, es])\n', (3256, 3308), False, 'from src.util import load_wm_model_from_file, save_wm_model_to_file, load_blackbox_model_from_file, save_blackbox_model_to_file, merge_histories, predict_with_uncertainty\n'), ((4294, 4358), 'src.util.load_blackbox_model_from_file', 'load_blackbox_model_from_file', (['surrogate_model', 'cache_surr_model'], {}), '(surrogate_model, cache_surr_model)\n', (4323, 4358), False, 'from src.util import load_wm_model_from_file, save_wm_model_to_file, load_blackbox_model_from_file, save_blackbox_model_to_file, merge_histories, predict_with_uncertainty\n'), ((5098, 5111), 'src.callbacks.TimeHistory', 'TimeHistory', ([], {}), '()\n', (5109, 5111), False, 'from src.callbacks import AdditionalValidationSets, TimeHistory, EarlyStoppingByWatermarkRet, ShowErrorsCallback\n'), ((5121, 5233), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""acc"""', 'mode': '"""max"""', 'min_delta': 'min_delta', 'patience': 'patience', 'restore_best_weights': '(True)'}), "(monitor='acc', mode='max', min_delta=min_delta, patience=\n patience, restore_best_weights=True)\n", (5134, 5233), False, 'from keras.callbacks import EarlyStopping\n'), ((8861, 8932), 'src.callbacks.EarlyStoppingByWatermarkRet', 'EarlyStoppingByWatermarkRet', ([], {'value': 'early_stopping_wm', 'patience': 'patience'}), '(value=early_stopping_wm, patience=patience)\n', (8888, 8932), False, 'from src.callbacks import AdditionalValidationSets, TimeHistory, EarlyStoppingByWatermarkRet, ShowErrorsCallback\n'), ((8989, 9002), 'src.callbacks.TimeHistory', 'TimeHistory', ([], {}), '()\n', (9000, 9002), False, 'from src.callbacks import AdditionalValidationSets, TimeHistory, EarlyStoppingByWatermarkRet, ShowErrorsCallback\n'), ((9259, 9330), 'src.util.merge_histories', 'merge_histories', (['[all_history, time_hist, *additional_callbacks, es_wm]'], {}), '([all_history, time_hist, *additional_callbacks, es_wm])\n', (9274, 9330), False, 'from src.util import load_wm_model_from_file, save_wm_model_to_file, load_blackbox_model_from_file, save_blackbox_model_to_file, merge_histories, predict_with_uncertainty\n'), ((9498, 9528), 'os.remove', 'os.remove', (['"""model_weights.tmp"""'], {}), "('model_weights.tmp')\n", (9507, 9528), False, 'import os\n'), ((10047, 10065), 'src.models.get_simple_model', 'get_simple_model', ([], {}), '()\n', (10063, 10065), False, 'from src.models import get_simple_model\n'), ((3439, 3509), 'src.util.save_wm_model_to_file', 'save_wm_model_to_file', (['cache_embed_wm', 'model', 'history_wm', '(wm_x, wm_y)'], {}), '(cache_embed_wm, model, history_wm, (wm_x, wm_y))\n', (3460, 3509), False, 'from src.util import load_wm_model_from_file, save_wm_model_to_file, load_blackbox_model_from_file, save_blackbox_model_to_file, merge_histories, predict_with_uncertainty\n'), ((4883, 4937), 'src.callbacks.AdditionalValidationSets', 'AdditionalValidationSets', (["[(trigger_set, 'watermark')]"], {}), "([(trigger_set, 'watermark')])\n", (4907, 4937), False, 'from src.callbacks import AdditionalValidationSets, TimeHistory, EarlyStoppingByWatermarkRet, ShowErrorsCallback\n'), ((4970, 5063), 'src.callbacks.AdditionalValidationSets', 'AdditionalValidationSets', (["[(trigger_set['keys'][0], trigger_set['keys'][1], 'watermark')]"], {}), "([(trigger_set['keys'][0], trigger_set['keys'][1],\n 'watermark')])\n", (4994, 5063), False, 'from src.callbacks import AdditionalValidationSets, TimeHistory, EarlyStoppingByWatermarkRet, ShowErrorsCallback\n'), ((5694, 5762), 'src.util.merge_histories', 'merge_histories', (['[all_history, time_hist, *additional_callbacks, es]'], {}), '([all_history, time_hist, *additional_callbacks, es])\n', (5709, 5762), False, 'from src.util import load_wm_model_from_file, save_wm_model_to_file, load_blackbox_model_from_file, save_blackbox_model_to_file, merge_histories, predict_with_uncertainty\n'), ((6025, 6093), 'src.util.merge_histories', 'merge_histories', (['[all_history, time_hist, *additional_callbacks, es]'], {}), '([all_history, time_hist, *additional_callbacks, es])\n', (6040, 6093), False, 'from src.util import load_wm_model_from_file, save_wm_model_to_file, load_blackbox_model_from_file, save_blackbox_model_to_file, merge_histories, predict_with_uncertainty\n'), ((6232, 6307), 'src.util.save_blackbox_model_to_file', 'save_blackbox_model_to_file', (['cache_surr_model', 'surrogate_model', 'all_history'], {}), '(cache_surr_model, surrogate_model, all_history)\n', (6259, 6307), False, 'from src.util import load_wm_model_from_file, save_wm_model_to_file, load_blackbox_model_from_file, save_blackbox_model_to_file, merge_histories, predict_with_uncertainty\n'), ((7470, 7549), 'src.util.load_blackbox_model_from_file', 'load_blackbox_model_from_file', (['surrogate_model', 'cache_surr_model'], {'prefix': '"""wtbx"""'}), "(surrogate_model, cache_surr_model, prefix='wtbx')\n", (7499, 7549), False, 'from src.util import load_wm_model_from_file, save_wm_model_to_file, load_blackbox_model_from_file, save_blackbox_model_to_file, merge_histories, predict_with_uncertainty\n'), ((8381, 8400), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (8390, 8400), False, 'import os\n'), ((8650, 8704), 'src.callbacks.AdditionalValidationSets', 'AdditionalValidationSets', (["[(trigger_set, 'watermark')]"], {}), "([(trigger_set, 'watermark')])\n", (8674, 8704), False, 'from src.callbacks import AdditionalValidationSets, TimeHistory, EarlyStoppingByWatermarkRet, ShowErrorsCallback\n'), ((8737, 8830), 'src.callbacks.AdditionalValidationSets', 'AdditionalValidationSets', (["[(trigger_set['keys'][0], trigger_set['keys'][1], 'watermark')]"], {}), "([(trigger_set['keys'][0], trigger_set['keys'][1],\n 'watermark')])\n", (8761, 8830), False, 'from src.callbacks import AdditionalValidationSets, TimeHistory, EarlyStoppingByWatermarkRet, ShowErrorsCallback\n'), ((9654, 9748), 'src.util.save_blackbox_model_to_file', 'save_blackbox_model_to_file', (['cache_surr_model', 'surrogate_model', 'all_history'], {'prefix': '"""wtbx"""'}), "(cache_surr_model, surrogate_model, all_history,\n prefix='wtbx')\n", (9681, 9748), False, 'from src.util import load_wm_model_from_file, save_wm_model_to_file, load_blackbox_model_from_file, save_blackbox_model_to_file, merge_histories, predict_with_uncertainty\n'), ((2186, 2226), 'numpy.repeat', 'np.repeat', (['wm_x', 'wm_boost_factor'], {'axis': '(0)'}), '(wm_x, wm_boost_factor, axis=0)\n', (2195, 2226), True, 'import numpy as np\n'), ((2369, 2409), 'numpy.repeat', 'np.repeat', (['wm_y', 'wm_boost_factor'], {'axis': '(0)'}), '(wm_y, wm_boost_factor, axis=0)\n', (2378, 2409), True, 'import numpy as np\n'), ((8124, 8161), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(999999)'}), '(low=0, high=999999)\n', (8141, 8161), True, 'import numpy as np\n')] |
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
from sklearn.model_selection import train_test_split
def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):
"""
Create a plot of the covariance confidence ellipse of *x* and *y*.
Parameters
----------
x, y : array-like, shape (n, )
Input data.
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
**kwargs
Forwarded to `~matplotlib.patches.Ellipse`
Returns
-------
matplotlib.patches.Ellipse
"""
if x.size != y.size:
raise ValueError("x and y must be the same size")
cov = np.cov(x, y)
pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,
facecolor=facecolor, **kwargs)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
mean_x = np.mean(x)
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
mean_y = np.mean(y)
transf = transforms.Affine2D().rotate_deg(45).scale(scale_x, scale_y).translate(mean_x, mean_y)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse)
# read data and create dataframes
length = 3100
coord_list = ['all', 'x', 'y', 'z']
# create global variables to store x,y,z and xyz data
for i in range(4):
globals()[f'df_UR5_{coord_list[i]}'] = pd.DataFrame()
home = "data/Kernels/5_7_2022"
for folder in os.listdir(home):
# if "_ex" in folder:
if os.path.isdir(f"{home}/{folder}"):
for file in os.listdir(f"{home}/{folder}"):
if '.csv' in file:
df = pd.read_csv(f"{home}/{folder}/{file}")
type = pd.Series(file[:7])
X = df.iloc[:length, 1]
Y = df.iloc[:length, 2]
Z = df.iloc[:length, 3]
all_coord_df = pd.concat([X, Y, Z, type], ignore_index=True)
x_coord_df = pd.concat([X, type], ignore_index=True)
y_coord_df = pd.concat([Y, type], ignore_index=True)
z_coord_df = pd.concat([Z, type], ignore_index=True)
df_UR5_all = pd.concat([df_UR5_all, all_coord_df], axis=1, ignore_index=True)
df_UR5_x = pd.concat([df_UR5_x, x_coord_df], axis=1, ignore_index=True)
df_UR5_y = pd.concat([df_UR5_y, y_coord_df], axis=1, ignore_index=True)
df_UR5_z = pd.concat([df_UR5_z, z_coord_df], axis=1, ignore_index=True)
##################################################################################################
# ________________________________________ OneClass SVM _________________________________________
##################################################################################################
figure, axes = plt.subplots(2,2)
plt.suptitle('PCA for 2 class data')
for rob in range(1,5):
n = rob - 1
target_data = df_UR5_x.loc[:df_UR5_x.tail(2).index[0], (df_UR5_x.tail(1) == f"UR-5e-{rob}").to_numpy()[0]].transpose().astype("float")
outlier_data = df_UR5_x.loc[:df_UR5_x.tail(2).index[0], (df_UR5_x.tail(1) != f"UR-5e-{rob}").to_numpy()[0]].transpose().astype("float")
U, S, V = np.linalg.svd(target_data)
for name, data in zip(['target', 'outlier'], [target_data, outlier_data]):
globals()[f"PCS_{name}"] = np.matmul(data, V.transpose())
c = 'b' if name == 'target' else 'r'
axes[n//2][n%2].scatter(globals()[f"PCS_{name}"].iloc[:,-2], globals()[f"PCS_{name}"].iloc[:,-1], c = c, label=name)
#
# center = [PCS_target.iloc[:,-1].mean(), PCS_target.iloc[:,-2].mean()]
# axes[n//2][n%2].scatter(center[0], center[1], c="k", s=100, label="center weight")
# confidence_ellipse(PCS_target.iloc[:,-1], PCS_target.iloc[:,-2], axes[n//2][n%2], n_std=1.96, edgecolor='b')
axes[n//2][n%2].title.set_text(f"Projection of PCS of UR-5e_{rob}")
axes[n//2][n%2].legend()
# axes[n // 2][n % 2].aspect_ratio(1)
#
# explained_variance_ = (S ** 2) / (target_data.shape[0] - 1)
# total_var = explained_variance_.sum()
# explained_variance_ratio_ = sum(explained_variance_[:2] / total_var)
X_train, X_test, y_train, y_test = train_test_split(df_UR5_x.head(-1).transpose(), df_UR5_x.tail(1).transpose(), test_size=0.3) # 70% training and 30% test
target_train_data = X_train.iloc[(y_train == f"UR-5e-{rob}").to_numpy()].astype("float")
outlier_train_data = X_train.iloc[(y_train != f"UR-5e-{rob}").to_numpy()].astype("float")
target_test_data = X_test.iloc[(y_test == f"UR-5e-{rob}").to_numpy()].astype("float")
outlier_test_data = X_test.iloc[(y_test != f"UR-5e-{rob}").to_numpy()].astype("float")
U, S, V = np.linalg.svd(outlier_train_data)
names = ['target_train_data', 'outlier_train_data','target_test', 'outlier_test']
for name, data in zip(names, [target_train_data, outlier_train_data,target_test_data,outlier_test_data]):
globals()[f"PCS_{name}"] = np.matmul(data, V.transpose())
c = 'b' if 'target' in name else 'r'
m = "*" if "test" in name else "."
plt.scatter(globals()[f"PCS_{name}"].iloc[:,0], globals()[f"PCS_{name}"].iloc[:,1], c = c, marker =m, label=name)
#
# center = [PCS_target.iloc[:,-1].mean(), PCS_target.iloc[:,-2].mean()]
# axes[n//2][n%2].scatter(center[0], center[1], c="k", s=100, label="center weight")
# confidence_ellipse(PCS_target.iloc[:,-1], PCS_target.iloc[:,-2], axes[n//2][n%2], n_std=1.96, edgecolor='b')
axes[n//2][n%2].title.set_text(f"Projection of PCS of UR-5e_{rob}")
axes[n//2][n%2].legend() | [
"pandas.Series",
"numpy.mean",
"os.listdir",
"numpy.sqrt",
"pandas.DataFrame",
"pandas.read_csv",
"matplotlib.patches.Ellipse",
"os.path.isdir",
"matplotlib.transforms.Affine2D",
"matplotlib.pyplot.subplots",
"numpy.linalg.svd",
"numpy.cov",
"pandas.concat",
"matplotlib.pyplot.suptitle"
] | [((2020, 2036), 'os.listdir', 'os.listdir', (['home'], {}), '(home)\n', (2030, 2036), False, 'import os\n'), ((3367, 3385), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (3379, 3385), True, 'import matplotlib.pyplot as plt\n'), ((3385, 3421), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""PCA for 2 class data"""'], {}), "('PCA for 2 class data')\n", (3397, 3421), True, 'import matplotlib.pyplot as plt\n'), ((5228, 5261), 'numpy.linalg.svd', 'np.linalg.svd', (['outlier_train_data'], {}), '(outlier_train_data)\n', (5241, 5261), True, 'import numpy as np\n'), ((859, 871), 'numpy.cov', 'np.cov', (['x', 'y'], {}), '(x, y)\n', (865, 871), True, 'import numpy as np\n'), ((1039, 1059), 'numpy.sqrt', 'np.sqrt', (['(1 + pearson)'], {}), '(1 + pearson)\n', (1046, 1059), True, 'import numpy as np\n'), ((1079, 1099), 'numpy.sqrt', 'np.sqrt', (['(1 - pearson)'], {}), '(1 - pearson)\n', (1086, 1099), True, 'import numpy as np\n'), ((1114, 1214), 'matplotlib.patches.Ellipse', 'Ellipse', (['(0, 0)'], {'width': '(ell_radius_x * 2)', 'height': '(ell_radius_y * 2)', 'facecolor': 'facecolor'}), '((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2, facecolor=\n facecolor, **kwargs)\n', (1121, 1214), False, 'from matplotlib.patches import Ellipse\n'), ((1444, 1454), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1451, 1454), True, 'import numpy as np\n'), ((1561, 1571), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1568, 1571), True, 'import numpy as np\n'), ((1959, 1973), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1971, 1973), True, 'import pandas as pd\n'), ((2071, 2104), 'os.path.isdir', 'os.path.isdir', (['f"""{home}/{folder}"""'], {}), "(f'{home}/{folder}')\n", (2084, 2104), False, 'import os\n'), ((3757, 3783), 'numpy.linalg.svd', 'np.linalg.svd', (['target_data'], {}), '(target_data)\n', (3770, 3783), True, 'import numpy as np\n'), ((898, 928), 'numpy.sqrt', 'np.sqrt', (['(cov[0, 0] * cov[1, 1])'], {}), '(cov[0, 0] * cov[1, 1])\n', (905, 928), True, 'import numpy as np\n'), ((1404, 1422), 'numpy.sqrt', 'np.sqrt', (['cov[0, 0]'], {}), '(cov[0, 0])\n', (1411, 1422), True, 'import numpy as np\n'), ((1521, 1539), 'numpy.sqrt', 'np.sqrt', (['cov[1, 1]'], {}), '(cov[1, 1])\n', (1528, 1539), True, 'import numpy as np\n'), ((2126, 2156), 'os.listdir', 'os.listdir', (['f"""{home}/{folder}"""'], {}), "(f'{home}/{folder}')\n", (2136, 2156), False, 'import os\n'), ((2210, 2248), 'pandas.read_csv', 'pd.read_csv', (['f"""{home}/{folder}/{file}"""'], {}), "(f'{home}/{folder}/{file}')\n", (2221, 2248), True, 'import pandas as pd\n'), ((2272, 2291), 'pandas.Series', 'pd.Series', (['file[:7]'], {}), '(file[:7])\n', (2281, 2291), True, 'import pandas as pd\n'), ((2443, 2488), 'pandas.concat', 'pd.concat', (['[X, Y, Z, type]'], {'ignore_index': '(True)'}), '([X, Y, Z, type], ignore_index=True)\n', (2452, 2488), True, 'import pandas as pd\n'), ((2518, 2557), 'pandas.concat', 'pd.concat', (['[X, type]'], {'ignore_index': '(True)'}), '([X, type], ignore_index=True)\n', (2527, 2557), True, 'import pandas as pd\n'), ((2587, 2626), 'pandas.concat', 'pd.concat', (['[Y, type]'], {'ignore_index': '(True)'}), '([Y, type], ignore_index=True)\n', (2596, 2626), True, 'import pandas as pd\n'), ((2656, 2695), 'pandas.concat', 'pd.concat', (['[Z, type]'], {'ignore_index': '(True)'}), '([Z, type], ignore_index=True)\n', (2665, 2695), True, 'import pandas as pd\n'), ((2725, 2789), 'pandas.concat', 'pd.concat', (['[df_UR5_all, all_coord_df]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([df_UR5_all, all_coord_df], axis=1, ignore_index=True)\n', (2734, 2789), True, 'import pandas as pd\n'), ((2817, 2877), 'pandas.concat', 'pd.concat', (['[df_UR5_x, x_coord_df]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([df_UR5_x, x_coord_df], axis=1, ignore_index=True)\n', (2826, 2877), True, 'import pandas as pd\n'), ((2905, 2965), 'pandas.concat', 'pd.concat', (['[df_UR5_y, y_coord_df]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([df_UR5_y, y_coord_df], axis=1, ignore_index=True)\n', (2914, 2965), True, 'import pandas as pd\n'), ((2993, 3053), 'pandas.concat', 'pd.concat', (['[df_UR5_z, z_coord_df]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([df_UR5_z, z_coord_df], axis=1, ignore_index=True)\n', (3002, 3053), True, 'import pandas as pd\n'), ((1586, 1607), 'matplotlib.transforms.Affine2D', 'transforms.Affine2D', ([], {}), '()\n', (1605, 1607), True, 'import matplotlib.transforms as transforms\n')] |
"""Data module."""
import math
import os
from typing import Collection, Dict, Iterable, List
import numpy as np
import pandas as pd
import torch
from gsea_api.molecular_signatures_db import (
GeneSet,
GeneSets,
MolecularSignaturesDatabase,
)
from spexlvm import config
# logging stuff
logger = config.logger
class Pathways(GeneSets):
"""A collection of pathways/gene sets, wraps GeneSets."""
def __init__(self, gene_sets: Collection[GeneSet], **kwargs):
"""Initialise Pathways.
Parameters
----------
gene_sets : Collection[GeneSet]
"""
super().__init__(gene_sets=gene_sets, **kwargs)
def info(self, verbose: int = 0):
"""Get an overview of this pathway collection.
Parameters
----------
verbose : int, optional
Level of verbosity, by default 0
Returns
-------
str
Raises
------
ValueError
Raised on negative verbosity level
"""
if verbose < 0:
raise ValueError("Invalid verbosity level of %s, please use 0, 1 or 2." % verbose)
info = str(self) + "\n"
if verbose == 1:
info += "Following gene sets are stored:\n"
info += "\n".join([gs.name for gs in self.gene_sets])
elif verbose == 2:
info += "Following gene sets (with genes) are stored:\n"
# double list comprehension is not readable
for gene_sets in self.gene_sets:
info += gene_sets.name + ": " + ", ".join([gene for gene in gene_sets.genes]) + "\n"
return info
def find(self, partial_gene_set_names: Iterable[str]):
"""Perform a simple search given a list of (partial) gene set names.
Parameters
----------
partial_gene_set_names : Iterable[str]
Collection of gene set names
Returns
-------
dict
Search results as a dictionary of {partial_gene_set_names[0]: [GeneSet], ...}
"""
search_results = {partial_gsn: [] for partial_gsn in partial_gene_set_names}
for partial_gsn in partial_gene_set_names:
search_results[partial_gsn] = [
full_gs for full_gs in self.gene_sets if partial_gsn in full_gs.name
]
return search_results
def remove(self, gene_set_names: Iterable[str]):
"""Remove specific pathways.
Parameters
----------
gene_sets : Iterable[str]
List of names (str) of unwanted pathways
Returns
-------
Pathways
"""
return Pathways(
{
GeneSet(name=gene_set.name, genes=gene_set.genes)
for gene_set in self.gene_sets
if gene_set.name not in gene_set_names
}
)
def subset(
self,
genes: Iterable[str],
fraction_available: float = 0.5,
min_gene_count: int = 0,
max_gene_count: int = 0,
keep: Iterable[str] = None,
):
"""Extract a subset of pathways available in a collection of genes.
Parameters
----------
genes : Iterable[str]
List of genes
fraction_available : float, optional
What fraction of the pathway genes should be available
in the genes collection to insert the pathway into the subset,
by default 0.5 (half of genes of a pathway must be present)
min_gene_count : int, optional
Minimal number of pathway genes available in the data
for the pathway to be considered in the subset
max_gene_count : int, optional
Maximal number of pathway genes available in the data
for the pathway to be considered in the subset
keep : Iterable[str]
List of pathways to keep regardless of filters
Returns
-------
Pathways
"""
if keep is None:
keep = []
if not isinstance(genes, set):
genes = set(genes)
pathways_subset = set()
for gene_set in self.gene_sets:
gene_intersection = gene_set.genes & genes # intersection
available_genes = len(gene_intersection)
gene_fraction = available_genes / len(gene_set.genes)
if gene_set.name in keep:
logger.info(
"Keeping a %s out of %s genes (%.2f) from the special gene set '%s'.",
available_genes,
len(gene_set.genes),
gene_fraction,
gene_set.name,
)
if gene_set.name in keep or (
gene_fraction >= fraction_available and available_genes >= min_gene_count
):
if max_gene_count == 0 or available_genes <= max_gene_count:
pathways_subset.add(
GeneSet(
name=gene_set.name,
genes=gene_intersection,
warn_if_empty=False,
)
)
return Pathways(pathways_subset)
def to_mask(self, genes: Iterable[str], sort: bool = False):
"""Generate a binary matrix of pathways x genes.
Parameters
----------
genes : Iterable[str]
List of genes
sort : bool, optional
Whether to sort alphabetically, by default False
Returns
-------
torch.Tensor
"""
gene_sets_list = list(self.gene_sets)
if sort:
gene_sets_list = sorted(gene_sets_list, key=lambda gs: gs.name)
# probably faster than calling list.index() for every gene in the pathways
gene_to_idx = {k: v for k, v in zip(genes, range(len(genes)))}
mask = torch.zeros(len(gene_sets_list), len(genes))
for i, gene_sets in enumerate(gene_sets_list):
for gene in gene_sets.genes:
mask[i, gene_to_idx[gene]] = 1.0
return mask, gene_sets_list
def load_pathways(keep=None):
"""Load pathways from the existing msigdb.
Parameters
----------
keep : list, optional
List of gene set collections, by default None
Returns
-------
Pathways
"""
if keep is None:
keep = ["hallmark", "reactome"]
# load msigdb files located at ./msigdb (.gmt extension)
msigdb = MolecularSignaturesDatabase(os.path.join("..", "msigdb"), version=7.4)
print(msigdb)
# relevant gene sets dictionary
gene_sets = {
"hallmark": "h.all",
"kegg": "c2.cp.kegg",
"reactome": "c2.cp.reactome",
}
# gene_sets = {"hallmark": "h.all"}
# load relevant pathways
pathway_dict = {k: msigdb.load(v, "symbols") for k, v in gene_sets.items() if k in keep}
# concatenate pathways
pathways = Pathways(sum([pathway_dict[k].gene_sets for k in pathway_dict.keys()], ()))
return pathways
def load_dataset(dataset, subsample_size=0, n_top_genes=0, center=True):
# lambda allows for lazy loading..
dataset_dict = {
"mesc": lambda: load_mesc,
"retina_small": lambda: load_retina_small,
"retina_rod": lambda: load_retina_rod,
"retina_large": lambda: load_retina_large,
}
Y, labels, batch = dataset_dict.get(dataset)()()
if n_top_genes > 0:
Y_var = Y.var()
top_var_col_indices = Y_var.argsort()[-n_top_genes:]
logger.info("Using %s most variable genes", n_top_genes)
Y = Y.iloc[:, top_var_col_indices]
if center:
Y_mean = Y.mean()
if subsample_size > 0:
logger.info("Using a random subsample of %s", subsample_size)
subsample_indices = np.random.choice(Y.shape[0], subsample_size, replace=False)
Y = Y.iloc[subsample_indices]
labels = labels[subsample_indices]
if batch is not None:
batch = batch[subsample_indices]
# center data column-wise, ignoring last columns (labels)
if center:
Y = Y - Y_mean
# all genes have uppercase in pathways
Y.columns = Y.columns.str.upper()
# Y = Y.rename(str.upper, axis='columns')
return Y, labels, batch
def load_mesc():
Y = pd.read_csv(os.path.join(config.DATASET_DIR, "Buettneretal.csv.gz"), compression="gzip")
return Y, Y.index, None
def load_retina_large():
# https://data.humancellatlas.org/explore/projects/8185730f-4113-40d3-9cc3-929271784c2b/project-matrices
# load data from storage
dataset_dir = os.path.join(
"/",
"data",
"aqoku",
"projects",
"spexlvm",
"processed",
)
Y = pd.read_pickle(
os.path.join(
dataset_dir,
"retina.pkl",
)
)
labels = pd.read_csv(
os.path.join(
dataset_dir,
"WongRetinaCelltype.csv",
)
)
labels = labels["annotated_cell_identity.ontology_label"]
batch = Y["batch"]
return Y.drop("batch", axis=1), labels.values, batch.values
def load_retina_rod():
Y, labels, batch = load_retina_large()
# remove dominant cluster
subsample_indices = labels == "retinal rod cell"
Y = Y.iloc[subsample_indices, :]
if batch is not None:
batch = batch[subsample_indices]
labels = labels[subsample_indices]
return Y, labels, batch
def load_retina_small():
Y, labels, batch = load_retina_large()
# remove dominant cluster
subsample_indices = labels != "retinal rod cell"
Y = Y.iloc[subsample_indices, :]
if batch is not None:
batch = batch[subsample_indices]
labels = labels[subsample_indices]
return Y, labels, batch
def generate_toy_dataset(
n_samples: int = 10000,
n_features: int = 200,
n_factors: int = 40,
n_active_features: float = 0.1,
n_active_factors: float = 0.5,
constant_weight: float = 4.0,
):
"""Generate toy dataset for simulated evaluation.
Parameters
----------
n_samples : int, optional
Number of samples, by default 10000
n_features : int, optional
Number of features (genes), by default 200
n_factors : int, optional
Number of factors, by default 40
n_active_features : float, optional
Number or fraction of active genes per factor, by default 0.1
n_active_factors : float, optional
Number of fraction of active factors, by default 0.5
constant_weight : float, optional
A constant weight to fill in the non-zero elements, by default 4.0
Returns
-------
tuple
w, mask, active factor indices, x, y
"""
if isinstance(n_active_features, float):
n_active_features = (n_active_features, n_active_features)
# convert active features and factors into fractions if > 1.0
n_active_features = tuple(
naft / n_features if naft > 1.0 else naft for naft in n_active_features
)
min_n_active_features, max_n_active_features = n_active_features
if n_active_factors > 1.0:
n_active_factors /= n_factors
w_shape = [n_factors, n_features]
x_shape = [n_samples, n_factors]
true_mask = torch.zeros(w_shape)
constant_w = constant_weight * torch.ones(w_shape)
for factor_idx, naft in enumerate(
np.random.uniform(min_n_active_features, max_n_active_features, n_factors)
):
true_mask[factor_idx] = torch.multinomial(
torch.tensor([1 - naft, naft]),
w_shape[1],
replacement=True,
)
# generate small random values around 0
random_noise = torch.normal(
mean=torch.zeros(w_shape), std=constant_weight / 50 * torch.ones(w_shape)
)
true_w = true_mask * constant_w + random_noise
true_x = torch.normal(mean=torch.zeros(x_shape), std=torch.ones(x_shape))
active_factor_indices = sorted(
np.random.choice(
range(n_factors),
size=math.ceil(n_factors * n_active_factors),
replace=False,
)
)
for row_idx in range(n_factors):
if row_idx not in active_factor_indices:
true_w[row_idx, :] = torch.normal(
torch.zeros(n_features),
std=constant_weight / 50 * torch.ones(n_features),
)
return (
true_w,
true_mask,
active_factor_indices,
true_x,
torch.matmul(true_x, true_w),
)
| [
"math.ceil",
"numpy.random.choice",
"os.path.join",
"torch.tensor",
"gsea_api.molecular_signatures_db.GeneSet",
"torch.matmul",
"numpy.random.uniform",
"torch.zeros",
"torch.ones"
] | [((8630, 8700), 'os.path.join', 'os.path.join', (['"""/"""', '"""data"""', '"""aqoku"""', '"""projects"""', '"""spexlvm"""', '"""processed"""'], {}), "('/', 'data', 'aqoku', 'projects', 'spexlvm', 'processed')\n", (8642, 8700), False, 'import os\n'), ((11259, 11279), 'torch.zeros', 'torch.zeros', (['w_shape'], {}), '(w_shape)\n', (11270, 11279), False, 'import torch\n'), ((6547, 6575), 'os.path.join', 'os.path.join', (['""".."""', '"""msigdb"""'], {}), "('..', 'msigdb')\n", (6559, 6575), False, 'import os\n'), ((7829, 7888), 'numpy.random.choice', 'np.random.choice', (['Y.shape[0]', 'subsample_size'], {'replace': '(False)'}), '(Y.shape[0], subsample_size, replace=False)\n', (7845, 7888), True, 'import numpy as np\n'), ((8341, 8396), 'os.path.join', 'os.path.join', (['config.DATASET_DIR', '"""Buettneretal.csv.gz"""'], {}), "(config.DATASET_DIR, 'Buettneretal.csv.gz')\n", (8353, 8396), False, 'import os\n'), ((8788, 8827), 'os.path.join', 'os.path.join', (['dataset_dir', '"""retina.pkl"""'], {}), "(dataset_dir, 'retina.pkl')\n", (8800, 8827), False, 'import os\n'), ((8903, 8954), 'os.path.join', 'os.path.join', (['dataset_dir', '"""WongRetinaCelltype.csv"""'], {}), "(dataset_dir, 'WongRetinaCelltype.csv')\n", (8915, 8954), False, 'import os\n'), ((11315, 11334), 'torch.ones', 'torch.ones', (['w_shape'], {}), '(w_shape)\n', (11325, 11334), False, 'import torch\n'), ((11382, 11456), 'numpy.random.uniform', 'np.random.uniform', (['min_n_active_features', 'max_n_active_features', 'n_factors'], {}), '(min_n_active_features, max_n_active_features, n_factors)\n', (11399, 11456), True, 'import numpy as np\n'), ((12469, 12497), 'torch.matmul', 'torch.matmul', (['true_x', 'true_w'], {}), '(true_x, true_w)\n', (12481, 12497), False, 'import torch\n'), ((11527, 11557), 'torch.tensor', 'torch.tensor', (['[1 - naft, naft]'], {}), '([1 - naft, naft])\n', (11539, 11557), False, 'import torch\n'), ((11713, 11733), 'torch.zeros', 'torch.zeros', (['w_shape'], {}), '(w_shape)\n', (11724, 11733), False, 'import torch\n'), ((11870, 11890), 'torch.zeros', 'torch.zeros', (['x_shape'], {}), '(x_shape)\n', (11881, 11890), False, 'import torch\n'), ((11896, 11915), 'torch.ones', 'torch.ones', (['x_shape'], {}), '(x_shape)\n', (11906, 11915), False, 'import torch\n'), ((2698, 2747), 'gsea_api.molecular_signatures_db.GeneSet', 'GeneSet', ([], {'name': 'gene_set.name', 'genes': 'gene_set.genes'}), '(name=gene_set.name, genes=gene_set.genes)\n', (2705, 2747), False, 'from gsea_api.molecular_signatures_db import GeneSet, GeneSets, MolecularSignaturesDatabase\n'), ((11762, 11781), 'torch.ones', 'torch.ones', (['w_shape'], {}), '(w_shape)\n', (11772, 11781), False, 'import torch\n'), ((12027, 12066), 'math.ceil', 'math.ceil', (['(n_factors * n_active_factors)'], {}), '(n_factors * n_active_factors)\n', (12036, 12066), False, 'import math\n'), ((12260, 12283), 'torch.zeros', 'torch.zeros', (['n_features'], {}), '(n_features)\n', (12271, 12283), False, 'import torch\n'), ((4987, 5060), 'gsea_api.molecular_signatures_db.GeneSet', 'GeneSet', ([], {'name': 'gene_set.name', 'genes': 'gene_intersection', 'warn_if_empty': '(False)'}), '(name=gene_set.name, genes=gene_intersection, warn_if_empty=False)\n', (4994, 5060), False, 'from gsea_api.molecular_signatures_db import GeneSet, GeneSets, MolecularSignaturesDatabase\n'), ((12328, 12350), 'torch.ones', 'torch.ones', (['n_features'], {}), '(n_features)\n', (12338, 12350), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 26 15:13:19 2019
@author: kennedy
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = '1.0'
seed = 1333
from numpy.random import seed
seed(19)
from tensorflow import set_random_seed
set_random_seed(19)
import os
from STOCK import stock, loc
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
import lightgbm as lgb
from datetime import datetime
import matplotlib.pyplot as plt
from Preprocess import process_time
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor
from sklearn.ensemble import (AdaBoostRegressor, #Adaboost regressor
RandomForestRegressor, #Random forest regressor
GradientBoostingRegressor, #Gradient boosting
BaggingRegressor, #Bagging regressor
ExtraTreesRegressor) #Extratrees regressor
#get ojects in the dataset folder and
#strip extension
def ls_STOK():
'''
:Return:
List of stock in dataset
'''
DIR_OBJ = os.listdir()
STOK_list_ = []
for x in range(len(DIR_OBJ)):
STOK_list_.append(DIR_OBJ[x].strip('.csv'))
return STOK_list_
#%% SIGNAL GENERATOR --> MACD, BOLLINGER BAND, RSI
##RSI signal
def RSI_signal(STK_data, period, lw_bound, up_bound):
'''
:Arguments:
df:
:Return type:
signal
'''
stock_data = stock(STK_data)
OHLC = stock_data.OHLC()
df = stock_data.CutlerRSI(OHLC, period)
try:
assert isinstance(df, pd.Series) or isinstance(df, pd.DataFrame)
#convert to dataframe
if isinstance(df, pd.Series):
df = df.to_frame()
else:
pass
#get signal
#1--> indicates buy position
#0 --> indicates sell posotion
df['signal'] = np.zeros(df.shape[0])
pos = 0
for ij in df.loc[:, ['RSI_Cutler_'+str(period)]].values:
print(df.loc[:, ['RSI_Cutler_'+str(period)]].values[pos])
if df.loc[:, ['RSI_Cutler_'+str(period)]].values[pos] >= up_bound:
df['signal'][pos:] = 1 #uptrend
elif df.loc[:, ['RSI_Cutler_'+str(period)]].values[pos] <= lw_bound:
df['signal'][pos:] = 0 #downtrend
pos +=1
except:
pass
finally:
print('*'*40)
print('RSI Signal Generation completed')
print('*'*40)
return df
def macd_crossOver(STK_data, fast, slow, signal):
'''
:Argument:
MACD dataframe
:Return type:
MACD with Crossover signal
'''
stock_data = stock(STK_data)
df = stock_data.MACD(fast, slow, signal)
try:
assert isinstance(df, pd.DataFrame) or isinstance(df, pd.Series)
#dataframe
if isinstance(df, pd.Series):
df = df.to_frame()
else:
pass
#1--> indicates buy position
#0 --> indicates sell posotion
df['result'] = np.nan
df['signal'] = np.where(df.MACD > df.MACD_SIGNAL, 1, 0)
df['result'] = np.where((df['signal'] == 1) & (df['MACD_HIST'] >= 0), 1, 0)
except IOError as e:
raise('Dataframe required {}' .format(e))
finally:
print('*'*40)
print('MACD signal generated')
print('*'*40)
return df
def SuperTrend_signal(STK_data, multiplier, period):
'''
:Argument:
MACD dataframe
:Return type:
MACD with Crossover signal
'''
stock_data = stock(STK_data)
df = stock_data.SuperTrend(STK_data, multiplier, period)
try:
assert isinstance(df, pd.DataFrame) or isinstance(df, pd.Series)
#dataframe
if isinstance(df, pd.Series):
df = df.to_frame()
else:
pass
#1--> indicates buy position
#0 --> indicates sell posotion
df = df.fillna(0)
df['signal'] = np.nan
df['signal'] = np.where(stock_data.Close >= df.SuperTrend, 1, 0)
except IOError as e:
raise('Dataframe required {}' .format(e))
finally:
print('*'*40)
print('SuperTrend Signal generated')
print('*'*40)
return df
def bollinger_band_signal(STK_data, period, deviation, strategy = ''):
'''
:Argument:
df:
:Return type:
:bollinger band signal
'''
stock_data = stock(STK_data)
Close = stock_data.Close
df = stock_data.Bolinger_Band(period, deviation)
df = df.fillna(value = 0)
assert isinstance(df, pd.DataFrame) or isinstance(df, pd.Series)
#dataframe
if isinstance(df, pd.Series):
df = df.to_frame()
#get signal
#1--> indicates buy position
#0 --> indicates sell posotion
df['signal'] = np.zeros(df.shape[0])
pos = 0
try:
if strategy == '' or strategy == '0' or strategy == '2':
for ii in Close:
print(Close[pos])
if Close[pos] >= df.Upper_band.values[pos]:
df['signal'][pos:] = 0
elif Close[pos] <= df.Lower_band.values[pos]:
df['signal'][pos:] = 1
pos += 1
elif strategy == '1' or strategy == '3':
for ii in Close:
print(Close[pos])
if Close[pos] >= df.Upper_band.values[pos]:
df['signal'][pos:] = 1
elif Close[pos] <= df.Lower_band.values[pos]:
df['signal'][pos:] = 0
pos += 1
else:
raise('You have entered an incorrect strategy value')
except:
pass
finally:
print('*'*40)
print('Bollinger Signal Generation completed')
print('*'*40)
return df
def trading_signal(RSI, MACD, Bollinger_Band, SuperTrend = None, strategy = ''):
'''
:Arguments:
:MACD:
dataframe containing MACD signal
:Bollinger_Band:
dataframe containing Bollinger band signal
:RSI:
dataframe containing RSI signal
:Return Type:
Buy Sell or Hold signal
'''
MACD_signal = MACD.signal.values
RSI_signal = RSI.signal.values
BB_signal = Bollinger_Band.signal.values
if strategy == '' or strategy == '0' or strategy == '1':
df_prediction = pd.DataFrame({'MACD_signal': MACD_signal,
'RSI_signal': RSI_signal,
'BB_signal': BB_signal})
else:
SuperTrend_Signal = SuperTrend.signal.values
df_prediction = pd.DataFrame({'MACD_signal': MACD_signal,
'RSI_signal': RSI_signal,
'BB_signal': BB_signal,
'SuperTrend_signal': SuperTrend_Signal})
df_prediction['POSITION'] = ''
try:
if strategy == '' or strategy == '0':
print('Calling default strategy')
for ij in range(data.shape[0]):
print(ij)
if MACD_signal[ij] == 1 and\
RSI_signal[ij] == 1 and\
BB_signal[ij] == 1:
df_prediction.POSITION[ij] = 'BUY'
elif MACD_signal[ij] == 0 and\
RSI_signal[ij] == 0 and\
BB_signal[ij] == 0:
df_prediction.POSITION[ij] = 'SELL'
else:
df_prediction.POSITION[ij] = 'HOLD'
elif strategy == '1':
print('Calling strategy %s'%strategy)
for ij in range(data.shape[0]):
print(ij)
if MACD_signal[ij] == 1 and\
RSI_signal[ij] == 1 and\
BB_signal[ij] == 1:
df_prediction.POSITION[ij] = 'BUY'
elif MACD_signal[ij] == 0 and\
RSI_signal[ij] == 0 and\
BB_signal[ij] == 0:
df_prediction.POSITION[ij] = 'SELL'
else:
df_prediction.POSITION[ij] = 'HOLD'
elif strategy == '2':
print('Calling strategy %s'%strategy)
for ij in range(data.shape[0]):
print(ij)
if MACD_signal[ij] == 1 and\
RSI_signal[ij] == 1 and\
BB_signal[ij] == 1 and\
SuperTrend_Signal[ij] == 1:
df_prediction.POSITION[ij] = 'BUY'
elif MACD_signal[ij] == 0 and\
RSI_signal[ij] == 0 and\
BB_signal[ij] == 0 and\
SuperTrend_Signal[ij] == 0:
df_prediction.POSITION[ij] = 'SELL'
else:
df_prediction.POSITION[ij] = 'HOLD'
elif strategy == '3':
print('Calling strategy %s'%strategy)
for ij in range(data.shape[0]):
print(ij)
if MACD_signal[ij] == 1 and\
RSI_signal[ij] == 1 and\
BB_signal[ij] == 1 and\
SuperTrend_Signal[ij] == 1:
df_prediction.POSITION[ij] = 'BUY'
elif MACD_signal[ij] == 0 and\
RSI_signal[ij] == 0 and\
BB_signal[ij] == 0 and\
SuperTrend_Signal[ij] == 0:
df_prediction.POSITION[ij] = 'SELL'
else:
df_prediction.POSITION[ij] = 'HOLD'
except:
pass
finally:
#-----------------------------------------------------------
#reset column and save to throw to csv
if strategy == '' or strategy == '0' or strategy == '1':
enlist = ['BB_signal', 'MACD_signal' , 'RSI_signal','POSITION']
df_prediction = df_prediction.reindex(columns=enlist)
else:
enlist = ['BB_signal', 'MACD_signal' , 'RSI_signal', 'SuperTrend_signal','POSITION']
df_prediction = df_prediction.reindex(columns=enlist)
print('*'*40)
print('Signal generation completed...')
print('*'*40)
return df_prediction
if __name__ == '__main__':
'''
----------------------------------
# Trading strategy
------------------------------------
[X][STRATEGY 0 or ''] --> USES DEFAULT BOLLINGER BAND:: BUY WHEN CLOSE IS BELOW LOWER BOLLINGER
SELL WHEN CLOSE IS ABOVE UPPER BOLLINGER BAND
[X][STRATEGY 1] --> SETS BOLLINGER TO:: BUY WHEN CLOSE IS ABOVE UPPER BOLLINGER BAND
AND SELL WHEN CLOSE IS BELOW LOWER BOLLINGER BAND.
[X][STRATEGY 2] --> USES STRATEGY 0 WITH SUPER TREND INDICATOR
[X][STRATEGY 3] --> USES STRATEGY 1 WITH SUPER TREND INDICATOR
'''
#---------GLOBAL SETTINGS-------------------
path = 'D:\\BITBUCKET_PROJECTS\\Forecasting 1.0\\'
STRATEGY = '3'
DEVIATION = MULTIPLIER = 2
PERIOD = 20
DATA_LIMIT = 400
#--------RSI_SETTINGS------------------------
LOWER_BOUND = 30
UPPER_BOUND = 70
#--------MACD SETTINGS-----------------------
FAST = 12
SLOW = 26
SIGNAL = 9
loc.set_path(path+'DATASET')
#-------get the data we need------------------
STOK_list = ls_STOK()
Signal_Gen = {}
for ii in range(DATA_LIMIT):
print('{}'.format(STOK_list[ii]))
data = loc.read_csv('{}'.format(STOK_list[ii]) + str('.csv'))
data.index = pd.to_datetime(data.index)
#-----convert to the stock class--------------
stock_data = stock(data)
Fibo_SUP_RES_ = stock_data.fibonacci_pivot_point()
df_RSI = RSI_signal(data, PERIOD, lw_bound = LOWER_BOUND, up_bound = UPPER_BOUND)
df_MACD = macd_crossOver(data, FAST, SLOW, SIGNAL)
df_BB = bollinger_band_signal(data, PERIOD, deviation = DEVIATION, strategy = STRATEGY)
#-----select strategy for saving-------------------
if STRATEGY == '2' or STRATEGY == '3':
df_STrend = SuperTrend_signal(data, MULTIPLIER, PERIOD)
prediction = trading_signal(df_RSI, df_MACD, df_BB, df_STrend, STRATEGY)
prediction.set_index(data.index, inplace = True)
prediction = pd.concat([Fibo_SUP_RES_, prediction], axis = 1)
Signal_Gen['{}'.format(STOK_list[ii])] = prediction.iloc[-1]
else:
prediction = trading_signal(df_RSI, df_MACD, df_BB, STRATEGY)
prediction.set_index(data.index, inplace = True)
prediction = pd.concat([Fibo_SUP_RES_, prediction], axis = 1)
Signal_Gen['{}'.format(STOK_list[ii])] = prediction.iloc[-1]
#-------------------Styling--------
color_col = {'Support 1': 'lightgreen', 'Support 2': 'palegreen', 'Support 3': 'limegreen',
'Resistance 1': 'salmon', 'Resistance 2': 'tomato', 'Resistance 3': 'orangered'}
def col_map(m, color_col):
if m.name in color_col.keys():
return ['background-color: {}'.format(color_col[m.name])] * len(m)
elif m.name == 'POSITION':
return ['background-color: skyblue']* len(m)
else:
return [''] * len(m)
#---------------------------------------
Signal_Gen = pd.DataFrame.from_dict(Signal_Gen).T
Signal_Gen.to_csv('SIGNAL_GEN_ALL.csv', mode = 'w')
Signal_Gen.style.apply(col_map, color_col = color_col)
loc.set_path(path+ 'PREDICTED')
Signal_Gen.to_excel('SIGNAL_GEN_ALL.xlsx')
| [
"os.listdir",
"numpy.where",
"pandas.to_datetime",
"pandas.DataFrame.from_dict",
"numpy.zeros",
"numpy.random.seed",
"pandas.DataFrame",
"STOCK.loc.set_path",
"tensorflow.set_random_seed",
"pandas.concat",
"STOCK.stock"
] | [((194, 202), 'numpy.random.seed', 'seed', (['(19)'], {}), '(19)\n', (198, 202), False, 'from numpy.random import seed\n'), ((242, 261), 'tensorflow.set_random_seed', 'set_random_seed', (['(19)'], {}), '(19)\n', (257, 261), False, 'from tensorflow import set_random_seed\n'), ((1189, 1201), 'os.listdir', 'os.listdir', ([], {}), '()\n', (1199, 1201), False, 'import os\n'), ((1522, 1537), 'STOCK.stock', 'stock', (['STK_data'], {}), '(STK_data)\n', (1527, 1537), False, 'from STOCK import stock, loc\n'), ((2578, 2593), 'STOCK.stock', 'stock', (['STK_data'], {}), '(STK_data)\n', (2583, 2593), False, 'from STOCK import stock, loc\n'), ((3365, 3380), 'STOCK.stock', 'stock', (['STK_data'], {}), '(STK_data)\n', (3370, 3380), False, 'from STOCK import stock, loc\n'), ((4129, 4144), 'STOCK.stock', 'stock', (['STK_data'], {}), '(STK_data)\n', (4134, 4144), False, 'from STOCK import stock, loc\n'), ((4486, 4507), 'numpy.zeros', 'np.zeros', (['df.shape[0]'], {}), '(df.shape[0])\n', (4494, 4507), True, 'import numpy as np\n'), ((10048, 10078), 'STOCK.loc.set_path', 'loc.set_path', (["(path + 'DATASET')"], {}), "(path + 'DATASET')\n", (10060, 10078), False, 'from STOCK import stock, loc\n'), ((12133, 12165), 'STOCK.loc.set_path', 'loc.set_path', (["(path + 'PREDICTED')"], {}), "(path + 'PREDICTED')\n", (12145, 12165), False, 'from STOCK import stock, loc\n'), ((1892, 1913), 'numpy.zeros', 'np.zeros', (['df.shape[0]'], {}), '(df.shape[0])\n', (1900, 1913), True, 'import numpy as np\n'), ((2921, 2961), 'numpy.where', 'np.where', (['(df.MACD > df.MACD_SIGNAL)', '(1)', '(0)'], {}), '(df.MACD > df.MACD_SIGNAL, 1, 0)\n', (2929, 2961), True, 'import numpy as np\n'), ((2981, 3041), 'numpy.where', 'np.where', (["((df['signal'] == 1) & (df['MACD_HIST'] >= 0))", '(1)', '(0)'], {}), "((df['signal'] == 1) & (df['MACD_HIST'] >= 0), 1, 0)\n", (2989, 3041), True, 'import numpy as np\n'), ((3746, 3795), 'numpy.where', 'np.where', (['(stock_data.Close >= df.SuperTrend)', '(1)', '(0)'], {}), '(stock_data.Close >= df.SuperTrend, 1, 0)\n', (3754, 3795), True, 'import numpy as np\n'), ((5821, 5917), 'pandas.DataFrame', 'pd.DataFrame', (["{'MACD_signal': MACD_signal, 'RSI_signal': RSI_signal, 'BB_signal': BB_signal}"], {}), "({'MACD_signal': MACD_signal, 'RSI_signal': RSI_signal,\n 'BB_signal': BB_signal})\n", (5833, 5917), True, 'import pandas as pd\n'), ((6055, 6191), 'pandas.DataFrame', 'pd.DataFrame', (["{'MACD_signal': MACD_signal, 'RSI_signal': RSI_signal, 'BB_signal':\n BB_signal, 'SuperTrend_signal': SuperTrend_Signal}"], {}), "({'MACD_signal': MACD_signal, 'RSI_signal': RSI_signal,\n 'BB_signal': BB_signal, 'SuperTrend_signal': SuperTrend_Signal})\n", (6067, 6191), True, 'import pandas as pd\n'), ((10323, 10349), 'pandas.to_datetime', 'pd.to_datetime', (['data.index'], {}), '(data.index)\n', (10337, 10349), True, 'import pandas as pd\n'), ((10418, 10429), 'STOCK.stock', 'stock', (['data'], {}), '(data)\n', (10423, 10429), False, 'from STOCK import stock, loc\n'), ((11983, 12017), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['Signal_Gen'], {}), '(Signal_Gen)\n', (12005, 12017), True, 'import pandas as pd\n'), ((11032, 11078), 'pandas.concat', 'pd.concat', (['[Fibo_SUP_RES_, prediction]'], {'axis': '(1)'}), '([Fibo_SUP_RES_, prediction], axis=1)\n', (11041, 11078), True, 'import pandas as pd\n'), ((11300, 11346), 'pandas.concat', 'pd.concat', (['[Fibo_SUP_RES_, prediction]'], {'axis': '(1)'}), '([Fibo_SUP_RES_, prediction], axis=1)\n', (11309, 11346), True, 'import pandas as pd\n')] |
from __future__ import print_function, division
import numpy as np
from ..utils.units import angstrom_to_bohr
#import pymatgen
__all__ = ['structure_to_abivars']
def structure_to_abivars(structure):
"""Get abinit variables from a pymatgen.Structure object."""
rprim = structure.lattice.matrix * angstrom_to_bohr
xred = list()
for site in structure.sites:
xred.append(site.frac_coords.round(14).tolist())
natom = structure.num_sites
ntypat = structure.ntypesp
znucl_atom = structure.atomic_numbers
itypat = 0
typat = list()
znucl = list()
for z in znucl_atom:
if z not in znucl:
itypat += 1
znucl.append(z)
typat.append(itypat)
else:
i = znucl.index(z)
typat.append(i+1)
d = dict(
rprim=rprim.tolist(),
acell=np.ones(3, dtype=float).tolist(),
natom=natom,
ntypat=ntypat,
znucl=znucl,
typat=typat,
xred=xred,
)
return d
| [
"numpy.ones"
] | [((866, 889), 'numpy.ones', 'np.ones', (['(3)'], {'dtype': 'float'}), '(3, dtype=float)\n', (873, 889), True, 'import numpy as np\n')] |
import shutil
import os
import numpy as np
def mkdir(path, reset=False):
"""Checks if directory exists and if not, create one.
Parameters
----------
reset: erase the content of the directory if exists
Returns
-------
the path
"""
if reset and os.path.exists(path):
shutil.rmtree(path)
try:
os.makedirs(path)
except FileExistsError:
pass
return path
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between one point
on the earth and an array of points (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(np.sqrt(a))
r = 6371000 # Radius of earth in meters
return c * r
| [
"os.path.exists",
"numpy.sqrt",
"os.makedirs",
"numpy.cos",
"shutil.rmtree",
"numpy.sin"
] | [((283, 303), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (297, 303), False, 'import os\n'), ((313, 332), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (326, 332), False, 'import shutil\n'), ((350, 367), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (361, 367), False, 'import os\n'), ((810, 826), 'numpy.sin', 'np.sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (816, 826), True, 'import numpy as np\n'), ((900, 910), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (907, 910), True, 'import numpy as np\n'), ((830, 842), 'numpy.cos', 'np.cos', (['lat1'], {}), '(lat1)\n', (836, 842), True, 'import numpy as np\n'), ((845, 857), 'numpy.cos', 'np.cos', (['lat2'], {}), '(lat2)\n', (851, 857), True, 'import numpy as np\n'), ((860, 876), 'numpy.sin', 'np.sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (866, 876), True, 'import numpy as np\n')] |
import numpy as np, pandas as pd, random
import tensorflow as tf
from tqdm import tqdm
from matplotlib import pyplot as plt
from agent import BrawlAgent
from env.brawlstars import Brawlstars
from utilities.utilities import log_histogram, log_scalars, variable_summaries, PressKey, ReleaseKey
from utilities.directkeys import B
from keras.backend import set_session
import time, math
EPISODE = 500 # Episode limitation
TRAIN_EVERY_STEPS = 256
BATCH_SIZE = 128 # size of minibatch
# reproducible
random.seed(1992)
np.random.seed(1992)
tf.set_random_seed(1992)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# config.log_device_placement = True
# Reset the graph
tf.reset_default_graph()
sess = tf.InteractiveSession(config=config)
set_session(sess)
def main(isLoad=False):
env = Brawlstars()
agent = BrawlAgent(env)
for i in tqdm(range(EPISODE)):
agent.is_updated_target_net = False
state = agent.env.reset() # To start the process
done = False
agent.replay_buffer.clear()
avg_reward_list = []
attack_list = []
movement_list = []
previous_reward = -1 # to prevent print too much noise
PressKey(B)
time.sleep(0.3)
ReleaseKey(B)
while done is False:
action = agent.act(state) # Return Format: [movementArray, actionArray]
state, reward, done = agent.env.step(action) # No longer needs action to be passed in
if math.isnan(reward):
continue
# if reward != previous_reward:
# previous_reward = reward
# print(reward)
# actions_list.append(action)
movement_list.append(action[0])
attack_list.append(action[1])
avg_reward_list.append(reward)
if done is False:
next_state = agent.env._getObservation() # Get the next state
agent.perceive(state, action, reward, next_state, done)
if agent.replay_buffer.size() > BATCH_SIZE and env.time_step % TRAIN_EVERY_STEPS == 0:
agent.train_dqn_network(i, batch_size=BATCH_SIZE)
# Update epsilon after every episode
if agent.epsilon > agent.final_epsilon:
agent.epsilon -= (1 - agent.final_epsilon) / (EPISODE/1.2)
# print('[{0}] Average Reward: {1}'.format(i+1, np.mean(avg_reward_list)))
log_histogram(agent.summary_writer, 'reward_dist', avg_reward_list, i)
log_histogram(agent.summary_writer, 'movement_dist', movement_list, i)
log_histogram(agent.summary_writer, 'attack_dist', attack_list, i)
log_scalars(agent.summary_writer, 'avg_reward', np.mean(avg_reward_list), i)
def test():
# Reset the graph
tf.reset_default_graph()
env = Brawlstars()
agent = BrawlAgent(env)
agent.isTest = True
state = agent.env.reset() # To start the process
done = False
while done is False:
action = agent.act(state)
state, reward, done = agent.env.step(action)
if __name__ == '__main__':
main()
# test() | [
"utilities.utilities.log_histogram",
"utilities.utilities.ReleaseKey",
"numpy.mean",
"tensorflow.InteractiveSession",
"tensorflow.reset_default_graph",
"env.brawlstars.Brawlstars",
"keras.backend.set_session",
"random.seed",
"time.sleep",
"utilities.utilities.PressKey",
"numpy.random.seed",
"t... | [((496, 513), 'random.seed', 'random.seed', (['(1992)'], {}), '(1992)\n', (507, 513), False, 'import numpy as np, pandas as pd, random\n'), ((514, 534), 'numpy.random.seed', 'np.random.seed', (['(1992)'], {}), '(1992)\n', (528, 534), True, 'import numpy as np, pandas as pd, random\n'), ((535, 559), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1992)'], {}), '(1992)\n', (553, 559), True, 'import tensorflow as tf\n'), ((570, 586), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (584, 586), True, 'import tensorflow as tf\n'), ((682, 706), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (704, 706), True, 'import tensorflow as tf\n'), ((714, 750), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'config': 'config'}), '(config=config)\n', (735, 750), True, 'import tensorflow as tf\n'), ((751, 768), 'keras.backend.set_session', 'set_session', (['sess'], {}), '(sess)\n', (762, 768), False, 'from keras.backend import set_session\n'), ((804, 816), 'env.brawlstars.Brawlstars', 'Brawlstars', ([], {}), '()\n', (814, 816), False, 'from env.brawlstars import Brawlstars\n'), ((829, 844), 'agent.BrawlAgent', 'BrawlAgent', (['env'], {}), '(env)\n', (839, 844), False, 'from agent import BrawlAgent\n'), ((2785, 2809), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2807, 2809), True, 'import tensorflow as tf\n'), ((2820, 2832), 'env.brawlstars.Brawlstars', 'Brawlstars', ([], {}), '()\n', (2830, 2832), False, 'from env.brawlstars import Brawlstars\n'), ((2845, 2860), 'agent.BrawlAgent', 'BrawlAgent', (['env'], {}), '(env)\n', (2855, 2860), False, 'from agent import BrawlAgent\n'), ((1190, 1201), 'utilities.utilities.PressKey', 'PressKey', (['B'], {}), '(B)\n', (1198, 1201), False, 'from utilities.utilities import log_histogram, log_scalars, variable_summaries, PressKey, ReleaseKey\n'), ((1210, 1225), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (1220, 1225), False, 'import time, math\n'), ((1234, 1247), 'utilities.utilities.ReleaseKey', 'ReleaseKey', (['B'], {}), '(B)\n', (1244, 1247), False, 'from utilities.utilities import log_histogram, log_scalars, variable_summaries, PressKey, ReleaseKey\n'), ((2427, 2497), 'utilities.utilities.log_histogram', 'log_histogram', (['agent.summary_writer', '"""reward_dist"""', 'avg_reward_list', 'i'], {}), "(agent.summary_writer, 'reward_dist', avg_reward_list, i)\n", (2440, 2497), False, 'from utilities.utilities import log_histogram, log_scalars, variable_summaries, PressKey, ReleaseKey\n'), ((2506, 2576), 'utilities.utilities.log_histogram', 'log_histogram', (['agent.summary_writer', '"""movement_dist"""', 'movement_list', 'i'], {}), "(agent.summary_writer, 'movement_dist', movement_list, i)\n", (2519, 2576), False, 'from utilities.utilities import log_histogram, log_scalars, variable_summaries, PressKey, ReleaseKey\n'), ((2585, 2651), 'utilities.utilities.log_histogram', 'log_histogram', (['agent.summary_writer', '"""attack_dist"""', 'attack_list', 'i'], {}), "(agent.summary_writer, 'attack_dist', attack_list, i)\n", (2598, 2651), False, 'from utilities.utilities import log_histogram, log_scalars, variable_summaries, PressKey, ReleaseKey\n'), ((1474, 1492), 'math.isnan', 'math.isnan', (['reward'], {}), '(reward)\n', (1484, 1492), False, 'import time, math\n'), ((2708, 2732), 'numpy.mean', 'np.mean', (['avg_reward_list'], {}), '(avg_reward_list)\n', (2715, 2732), True, 'import numpy as np, pandas as pd, random\n')] |
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(0)
data = np.random.randn(10_000)
#plt.hist(data, bins=30, alpha=.5, histtype="stepfilled", color="steelblue")
#plt.show()
counts, bin_edges = np.histogram(data, bins=5)
print(counts)
print(bin_edges)
x1 = np.random.normal(0, 0.8, 1000)
x2 = np.random.normal(-2, 1, 1000)
x3 = np.random.normal(3, 2, 1000)
kwargs = dict(
histtype='stepfilled',
alpha=0.3,
bins=40
)
#plt.hist(x1, **kwargs)
#plt.hist(x2, **kwargs)
#plt.hist(x3, **kwargs)
#plt.show()
mean = [0, 0]
cov = [[1, 1], [1, 2]]
x, y = np.random.multivariate_normal(mean=mean, cov=cov, size=10000).T
plt.hist2d(x, y, bins=30, cmap="Blues")
plt.colorbar()
plt.show()
| [
"numpy.random.normal",
"numpy.histogram",
"matplotlib.pyplot.hist2d",
"numpy.random.multivariate_normal",
"matplotlib.pyplot.colorbar",
"numpy.random.seed",
"numpy.random.randn",
"matplotlib.pyplot.show"
] | [((51, 68), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (65, 68), True, 'import numpy as np\n'), ((77, 99), 'numpy.random.randn', 'np.random.randn', (['(10000)'], {}), '(10000)\n', (92, 99), True, 'import numpy as np\n'), ((211, 237), 'numpy.histogram', 'np.histogram', (['data'], {'bins': '(5)'}), '(data, bins=5)\n', (223, 237), True, 'import numpy as np\n'), ((275, 305), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.8)', '(1000)'], {}), '(0, 0.8, 1000)\n', (291, 305), True, 'import numpy as np\n'), ((311, 340), 'numpy.random.normal', 'np.random.normal', (['(-2)', '(1)', '(1000)'], {}), '(-2, 1, 1000)\n', (327, 340), True, 'import numpy as np\n'), ((346, 374), 'numpy.random.normal', 'np.random.normal', (['(3)', '(2)', '(1000)'], {}), '(3, 2, 1000)\n', (362, 374), True, 'import numpy as np\n'), ((641, 680), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['x', 'y'], {'bins': '(30)', 'cmap': '"""Blues"""'}), "(x, y, bins=30, cmap='Blues')\n", (651, 680), True, 'import matplotlib.pyplot as plt\n'), ((681, 695), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (693, 695), True, 'import matplotlib.pyplot as plt\n'), ((696, 706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (704, 706), True, 'import matplotlib.pyplot as plt\n'), ((577, 638), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', ([], {'mean': 'mean', 'cov': 'cov', 'size': '(10000)'}), '(mean=mean, cov=cov, size=10000)\n', (606, 638), True, 'import numpy as np\n')] |
# coding=utf-8
from collections import Counter
from numpy import zeros
import numpy as np
from eval.pipeline.classifiers import MostCommonLabelClassifier, SubsamplingPredefinedIndicesIterator
def test_predict():
clf = MostCommonLabelClassifier()
clf = clf.fit(None, [1, 1, 1, 1, 0, 0])
assert clf.decision == 1
y = clf.predict(zeros((3, 3)))
assert y.tolist() == [1, 1, 1]
def test_SubsamplingPredefinedIndicesIterator():
y_vals = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # chunk for training
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) # chunk for testing
train_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
test_indices = [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39]
for num_samples in [1, 3, 5, 10, 20]:
for sample_size in [2, 4, 6, 10, 20]:
it = SubsamplingPredefinedIndicesIterator(y_vals, train_indices, test_indices, num_samples, sample_size)
assert len(it) == num_samples
for train, test in it:
assert test == test_indices
assert len(train) == sample_size
assert all(x in train_indices for x in train)
counts = Counter(y_vals[train])
# equal number of positives and negatives in sample
assert counts[0] == counts[1]
it = SubsamplingPredefinedIndicesIterator(y_vals, train_indices, test_indices, 2, 6)
for train, test in it:
assert test == test_indices
assert len(train) == 6
assert all(x in train_indices for x in train)
assert Counter(y_vals[train]) == Counter({0: 3, 1: 3}) # 1:1 ratio of positives and negatives is preserved | [
"eval.pipeline.classifiers.MostCommonLabelClassifier",
"collections.Counter",
"numpy.array",
"numpy.zeros",
"eval.pipeline.classifiers.SubsamplingPredefinedIndicesIterator"
] | [((226, 253), 'eval.pipeline.classifiers.MostCommonLabelClassifier', 'MostCommonLabelClassifier', ([], {}), '()\n', (251, 253), False, 'from eval.pipeline.classifiers import MostCommonLabelClassifier, SubsamplingPredefinedIndicesIterator\n'), ((462, 596), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n', (470, 596), True, 'import numpy as np\n'), ((1461, 1540), 'eval.pipeline.classifiers.SubsamplingPredefinedIndicesIterator', 'SubsamplingPredefinedIndicesIterator', (['y_vals', 'train_indices', 'test_indices', '(2)', '(6)'], {}), '(y_vals, train_indices, test_indices, 2, 6)\n', (1497, 1540), False, 'from eval.pipeline.classifiers import MostCommonLabelClassifier, SubsamplingPredefinedIndicesIterator\n'), ((348, 361), 'numpy.zeros', 'zeros', (['(3, 3)'], {}), '((3, 3))\n', (353, 361), False, 'from numpy import zeros\n'), ((956, 1059), 'eval.pipeline.classifiers.SubsamplingPredefinedIndicesIterator', 'SubsamplingPredefinedIndicesIterator', (['y_vals', 'train_indices', 'test_indices', 'num_samples', 'sample_size'], {}), '(y_vals, train_indices, test_indices,\n num_samples, sample_size)\n', (992, 1059), False, 'from eval.pipeline.classifiers import MostCommonLabelClassifier, SubsamplingPredefinedIndicesIterator\n'), ((1704, 1726), 'collections.Counter', 'Counter', (['y_vals[train]'], {}), '(y_vals[train])\n', (1711, 1726), False, 'from collections import Counter\n'), ((1730, 1755), 'collections.Counter', 'Counter', (['{(0): 3, (1): 3}'], {}), '({(0): 3, (1): 3})\n', (1737, 1755), False, 'from collections import Counter\n'), ((1314, 1336), 'collections.Counter', 'Counter', (['y_vals[train]'], {}), '(y_vals[train])\n', (1321, 1336), False, 'from collections import Counter\n')] |
# -*- coding: utf-8 -*-
"""
Auxiliary functions and operations for network construction, some of which have
been deprecated for high-level modules in TensorFlow.
@author: <NAME>
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import tensorflow as tf
import numpy as np
from core.utils_2d import transform, resize, affine_to_shift, get_reference_grid_numpy
#######################################################################
# Some low-level or deprecated implementations of network layers
#######################################################################
def weight_variable(shape, name="weight"):
fan_in, fan_out = shape[-2:]
low = -1 * np.sqrt(6.0 / (fan_in + fan_out)) # use 4 for sigmoid, 1 for tanh activation
high = 1 * np.sqrt(6.0 / (fan_in + fan_out))
return tf.Variable(tf.random_uniform(shape, minval=low, maxval=high, dtype=tf.float32), name=name)
def weight_variable_devonc(shape, name="weight_deconv"):
fan_in, fan_out = shape[-2:]
low = -1 * np.sqrt(6.0 / (fan_in + fan_out)) # use 4 for sigmoid, 1 for tanh activation
high = 1 * np.sqrt(6.0 / (fan_in + fan_out))
return tf.Variable(tf.random_uniform(shape, minval=low, maxval=high, dtype=tf.float32), name=name)
def bias_variable(shape, name="bias"):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def conv3d(x, W, keep_prob_):
conv_3d = tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='SAME')
return tf.nn.dropout(conv_3d, keep_prob_)
def deconv3d(x, w, stride):
x_shape = tf.shape(x)
output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4] // 2])
return tf.nn.conv3d_transpose(x, w, output_shape, strides=[1, stride, stride, stride, 1], padding='VALID')
def max_pool3d(x, n):
return tf.nn.max_pool3d(x, ksize=[1, n, n, n, 1], strides=[1, n, n, n, 1], padding='VALID')
'''
def batch_norm(x, train_phase):
x_norm = tf.layers.batch_normalization(x, axis=0, training=train_phase)
return x_norm
'''
def pixel_wise_softmax(output_map):
"""
deprecated function for tf.nn.softmax
"""
exponential_map = tf.exp(output_map)
evidence = tf.add(exponential_map, tf.reverse(exponential_map, [False, False, False, True]))
return tf.divide(exponential_map, evidence, name="pixel_wise_softmax")
def pixel_wise_softmax_2(output_map):
"""
deprecated function for tf.nn.softmax
"""
exponential_map = tf.exp(output_map)
sum_exp = tf.reduce_sum(exponential_map, -1, keepdims=True)
tensor_sum_exp = tf.tile(sum_exp, tf.stack([1, 1, 1, 1, tf.shape(output_map)[-1]]))
return tf.clip_by_value(tf.divide(exponential_map, tensor_sum_exp), 1e-10, 1.0)
def cross_entropy_map(labels, probs):
"""
Compute the element-wise cross-entropy map by clipping the values of softmax probabilities to avoid Nan loss.
:param labels: ground-truth value using one-hot representation
:param probs: probability map as the output of softmax
:return: A tensor of the same shape as lables and of the same shape as probs with the cross entropy loss.
"""
return tf.reduce_sum(- labels * tf.log(tf.clip_by_value(probs, 1e-7, 1.0)), axis=-1, name="cross_entropy_map")
def balance_weight_map(flat_labels):
"""
:param flat_labels: masked ground truth tensor in shape [-1, n_class]
:return the balance weight map in 1-D tensor
"""
n = tf.shape(flat_labels)[0]
return tf.reduce_sum(tf.multiply(flat_labels, tf.tile(1 / tf.reduce_sum(flat_labels, axis=0, keepdims=True),
[n, 1])), axis=-1, name='balance_weight_map')
#######################################################################
# Tensor manipulations
#######################################################################
def crop_and_concat(x1, x2):
"""
Crop x1 to match the size of x2 and concatenate them.
"""
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
# offsets for the top left corner of the crop
offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, (x1_shape[3] - x2_shape[3]) // 2,
0]
size = [-1, x2_shape[1], x2_shape[2], x2_shape[3], -1]
x1_crop = tf.slice(x1, offsets, size, name='crop')
crop_concat = tf.concat([x1_crop, x2], -1, name='crop_concat')
crop_concat.set_shape([None, None, None, None, x1.get_shape().as_list()[-1] + x2.get_shape().as_list()[-1]])
return crop_concat
def crop_and_add(x1, x2):
"""
Crop x1 to match the size of x2 and add them together.
"""
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
# offsets for the top left corner of the crop
offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, (x1_shape[3] - x2_shape[3]) // 2,
0]
size = [-1, x2_shape[1], x2_shape[2], x2_shape[3], -1]
x1_crop = tf.slice(x1, offsets, size, name='crop')
return tf.add(x1_crop, x2, name='crop_add')
def pad_and_concat(x1, x2):
"""
Pad x2 to match the size of x1 and concatenate them.
"""
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, (x1_shape[3] - x2_shape[3]) // 2,
0]
paddings = [[0, 0],
[offsets[1], x1_shape[1] - x2_shape[1] - offsets[1]],
[offsets[2], x1_shape[2] - x2_shape[2] - offsets[2]],
[offsets[3], x1_shape[3] - x2_shape[3] - offsets[3]],
[0, 0]]
x2_pad = tf.pad(x2, paddings, name='pad')
pad_concat = tf.concat([x1, x2_pad], -1, name='pad_concat')
pad_concat.set_shape([None, None, None, None, x1.get_shape().as_list()[-1] + x2.get_shape().as_list()[-1]])
return pad_concat
def pad_and_add(x1, x2):
"""
Pad x2 to match the size of x1 and add them together.
"""
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, (x1_shape[3] - x2_shape[3]) // 2,
0]
paddings = [[0, 0],
[offsets[1], x1_shape[1] - x2_shape[1] - offsets[1]],
[offsets[2], x1_shape[2] - x2_shape[2] - offsets[2]],
[offsets[3], x1_shape[3] - x2_shape[3] - offsets[3]],
[0, 0]]
x2_pad = tf.pad(x2, paddings, name='pad')
return tf.add(x1, x2_pad, name='pad_add')
def crop_to_tensor(x1, x2):
"""
Crop tensor x1 to match the shape of x2.
"""
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
# offsets for the top left corner of the crop
offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, (x1_shape[3] - x2_shape[3]) // 2,
0]
size = [-1, x2_shape[1], x2_shape[2], x2_shape[3], -1]
x1_crop = tf.slice(x1, offsets, size, name='crop')
x1_crop.set_shape([None, None, None, None, x1.get_shape().as_list()[-1]])
return x1_crop
def pad_to_tensor(x1, x2):
"""
Pad tensor x1 to match the shape of x2.
"""
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
offsets = [0, (x2_shape[1] - x1_shape[1]) // 2, (x2_shape[2] - x1_shape[2]) // 2, (x2_shape[3] - x1_shape[3]) // 2,
0]
paddings = [[0, 0],
[offsets[1], x2_shape[1] - x1_shape[1] - offsets[1]],
[offsets[2], x2_shape[2] - x1_shape[2] - offsets[2]],
[offsets[3], x2_shape[3] - x1_shape[3] - offsets[3]],
[0, 0]]
x1_pad = tf.pad(x1, paddings, name='pad')
return x1_pad
#######################################################################
# Pre-defined and integrated implementation of network layers
#######################################################################
def transition_block_layer(inputs, pool_size, filter_size, dropout_rate=0., compression_rate=1,
initializer=tf.initializers.he_uniform(), normalizer=None, regularizer=None,
train_phase=True, trainable=True, name_or_scope='transition_block', **kwargs):
"""
Apply a transition block composed of a 1x1 convolution and a max-pooling layer to the given inputs.
:param inputs: Input feature maps.
:param pool_size: Size of the pooling window.
:param filter_size: The size of the convolution kernel.
:param compression_rate: The compression factor for model compactness in the transition block, set to 1. if without
compression, value set > 1. for expansion.
:param initializer: weight initializer, default as Kaiming uniform initialization
:param normalizer: type of normalization to use, default is None,
choose from None, 'batch', 'group', 'layer', 'instance', 'batch_instance'
:param regularizer: Regularizer for weights.
:param train_phase: Whether in training or in inference mode.
:param trainable: Whether add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES.
:param dropout_rate: Dropout probability.
:param name_or_scope: The scope to open.
:return: Down-sampled feature maps.
"""
dropout_type = kwargs.pop('dropout_type', 'regular')
with tf.variable_scope(name_or_scope):
regularization_loss = 0.
input_feature_size = inputs.get_shape().as_list()[-1]
with tf.variable_scope('transition_block_layer'):
pool = tf.keras.layers.MaxPool2D(pool_size=pool_size, strides=pool_size, name='max_pool')(inputs)
conv2d = tf.keras.layers.Conv2D(filters=int(compression_rate * input_feature_size),
kernel_size=filter_size, padding='same', use_bias=False,
kernel_initializer=initializer, kernel_regularizer=regularizer,
trainable=trainable, name='conv')
conv = conv2d(pool)
if regularizer == 'l2':
regularization_loss += tf.reduce_sum(tf.square(conv2d.kernel))
if regularizer == 'l1':
regularization_loss += tf.reduce_sum(tf.abs(conv2d.kernel))
norm, loss = normalize(conv, type=normalizer, training=train_phase, regularizer=regularizer, **kwargs)
regularization_loss += loss
relu = tf.nn.relu(norm, name='relu')
feature_maps = dropout_layer(relu, dropout_rate, train_phase, type=dropout_type)
return feature_maps, regularization_loss
def residual_block_layer(inputs, filter_size, feature_size, num_layers=2, strides=1,
dilation_rate=1, padding='same', dropout_rate=0.,
initializer=tf.initializers.he_uniform(), normalizer=None, regularizer=None,
train_phase=True, trainable=True, name_or_scope='residual_block', **kwargs):
"""
Apply residual block layers to the given inputs.
:param inputs: Input feature maps.]
:param filter_size: Size of the convolution kernel.
:param feature_size: The number of filters in each convolution.
:param num_layers: Number of convolutional block layers.
:param strides: The strides of the convolution.
:param dilation_rate: the dilation rate to use for dilated convolution.
:param initializer: weight initializer, default as Kaiming uniform initialization
:param normalizer: type of normalization to use, default is None,
choose from None, 'batch', 'group', 'layer', 'instance', 'batch_instance'
:param regularizer: Regularizer for weights.
:param padding: The padding type, one of "valid" or "same".
:param train_phase: Whether in training or in inference mode.
:param trainable: Whether add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES.
:param res_kernel_size: The residual block kernel size.
:param dropout_rate: Dropout probability.
:param name_or_scope: The scope to open.
:return: Feature maps of shape [None, None, None, None, feature_size].
"""
dropout_type = kwargs.pop('dropout_type', 'regular')
with tf.variable_scope(name_or_scope):
assert inputs.get_shape().as_list()[-1] == feature_size, "The number of input feature maps must be equal to the" \
" output feature maps in the residual block."
regularization_loss = 0.
feature_maps = inputs
for layer in range(num_layers):
with tf.variable_scope('res_block_layer%s' % layer):
conv2d = tf.keras.layers.Conv2D(filters=feature_size, kernel_size=filter_size, strides=strides,
padding=padding, dilation_rate=dilation_rate, use_bias=False,
kernel_initializer=initializer,
kernel_regularizer=regularizer, trainable=trainable,
name='conv')
conv = conv2d(feature_maps)
if regularizer == 'l2':
regularization_loss += tf.reduce_sum(tf.square(conv2d.kernel))
if regularizer == 'l1':
regularization_loss += tf.reduce_sum(tf.abs(conv2d.kernel))
norm, loss = normalize(conv, type=normalizer, training=train_phase, regularizer=regularizer, **kwargs)
regularization_loss += loss
if layer < num_layers - 1:
relu = tf.nn.relu(norm, name='relu')
feature_maps = dropout_layer(relu, dropout_rate, train_phase, type=dropout_type)
else:
feature_maps = norm
# res_conv = tf.layers.conv3d(inputs, filters=feature_size,
# kernel_size=res_kernel_size, padding=padding, use_bias=False,
# kernel_initializer=tf.contrib.layers.xavier_initializer(),
# kernel_regularizer=regularizer, trainable=trainable,
# name='res_conv')
# res_norm = normalize(res_conv, type=normalizer, **kwargs)
# res_relu = tf.nn.relu(res_norm, name='res_relu')
res_add = tf.add(feature_maps, inputs, name='res_add')
relu = tf.nn.relu(res_add, name='output_feature_maps')
outputs = dropout_layer(relu, dropout_rate, train_phase, type=dropout_type)
# alternative
# outputs = tf.add(tf.nn.relu(bn1), inputs, name='output_feature_maps')
return outputs, regularization_loss
def conv_block_layer(inputs, filter_size, feature_size, num_layers=2, strides=1, padding='same',
dilation_rate=1, dropout_rate=0., initializer=tf.initializers.he_uniform(),
normalizer=None, regularizer=None, train_phase=True, trainable=True,
name_or_scope='conv_block', **kwargs):
"""
Apply convolutional block layers to the given inputs.
:param inputs: Input feature maps.
:param num_layers: Number of convolutional block layers.
:param filter_size: Size of the convolution kernel.
:param feature_size: The number of filters in each convolution.
:param strides: The strides of the convolution.
:param dilation_rate: the dilation rate to use for dilated convolution.
:param initializer: weight initializer, default as Kaiming uniform initialization for ReLU activation
:param normalizer: type of normalization to use, default is None,
choose from None, 'batch', 'group', 'layer', 'instance', 'batch_instance'
:param regularizer: The regularizer for weights.
:param padding: The padding type, one of "valid" or "same".
:param train_phase: Whether in training or in inference mode.
:param trainable: Whether add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES.
:param dropout_rate: Dropout probability.
:param name_or_scope: The scope to open.
:return: Feature maps of shape [None, None, None, None, feature_size]
"""
dropout_type = kwargs.pop('dropout_type', 'regular')
with tf.variable_scope(name_or_scope):
regularization_loss = 0.
feature_maps = inputs
for k in range(num_layers):
with tf.variable_scope('conv_block_layer%d' % k):
conv2d = tf.keras.layers.Conv2D(filters=feature_size, kernel_size=filter_size, strides=strides,
padding=padding, dilation_rate=dilation_rate, use_bias=False,
kernel_initializer=initializer,
kernel_regularizer=regularizer, trainable=trainable,
name='conv')
conv = conv2d(feature_maps)
if regularizer == 'l2':
regularization_loss += tf.reduce_sum(tf.square(conv2d.kernel))
if regularizer == 'l1':
regularization_loss += tf.reduce_sum(tf.abs(conv2d.kernel))
norm, loss = normalize(conv, type=normalizer, training=train_phase, regularizer=regularizer, **kwargs)
regularization_loss += loss
relu = tf.nn.relu(norm, name='relu')
feature_maps = dropout_layer(relu, dropout_rate, train_phase, type=dropout_type)
return feature_maps, regularization_loss
def conv_upsample(inputs, zoom_factor, filter_size, feature_size, strides=1, dilation_rate=1,
initializer=tf.random_normal_initializer(stddev=0.001), regularizer=None,
padding='same', trainable=True, interp_method='linear', name_or_scope='conv_upsample'):
with tf.variable_scope(name_or_scope):
regularization_loss = 0.
conv2d = tf.keras.layers.Conv2D(filters=feature_size, kernel_size=filter_size, strides=strides,
padding=padding, dilation_rate=dilation_rate,
kernel_initializer=initializer,
kernel_regularizer=regularizer, bias_regularizer=regularizer,
trainable=trainable, name='conv')
conv = conv2d(inputs)
if regularizer == 'l2':
regularization_loss += tf.reduce_sum(tf.square(conv2d.kernel))
if regularizer == 'l1':
regularization_loss += tf.reduce_sum(tf.abs(conv2d.kernel))
resized_conv = Resize(zoom_factor=zoom_factor, name='resize_conv', interp_method=interp_method)(conv)
return resized_conv, regularization_loss
def linear_additive_upsample(input_tensor, new_size=2, n_split=4):
"""
Apply linear additive up-sampling layer, described in paper Wojna et al., The devil is in the decoder,
https://arxiv.org/abs/1707.05847.
:param input_tensor: Input tensor.
:param new_size: The factor of up-sampling.
:param n_split: The n_split consecutive channels are added together.
:return: Linearly additively upsampled feature maps.
"""
with tf.name_scope('linear_additive_upsample'):
n_channels = input_tensor.get_shape().as_list()[-1]
input_dim = input_tensor.shape.ndims
assert n_split > 0 and n_channels % n_split == 0, "Number of feature channels should be divisible by n_split."
if input_dim == 4:
upsample = tf.keras.layers.UpSampling2D(size=new_size, name='upsample')(input_tensor)
elif input_dim == 5:
upsample = tf.keras.layers.UpSampling3D(size=new_size, name='upsample')(input_tensor)
else:
raise TypeError('Incompatible input spatial rank: %d' % input_dim)
split = tf.split(upsample, n_split, axis=-1)
split_tensor = tf.stack(split, axis=-1)
output_tensor = tf.reduce_sum(split_tensor, axis=-1, name='output_tensor')
return output_tensor
def residual_additive_upsample(inputs, filter_size, strides, feature_size, n_split=2, dropout_rate=0.,
initializer=tf.initializers.he_uniform(), normalizer=None,
regularizer=None, train_phase=True, trainable=True,
name_or_scope='residual_additive_upsample', **kwargs):
"""
Apply residual linear additive up-sampling layer, described in paper Wojna et al., The devil is in the decoder,
https://arxiv.org/abs/1707.05847, where the up-sampling are performed with a transposed convolution as well as
a linear additive up-sampling.
:param inputs: The input tensor.
:param filter_size: The kernel size of the transposed convolution.
:param strides: The strides of the transposed convolution / The factor of up-sampling.
:param feature_size: The number of filters in the transposed convolution.
:param n_split: The n_split consecutive channels are added together.
:param initializer: weight initializer, default as Kaiming uniform initializer
:param normalizer: type of normalization to use, default is None,
choose from None, 'batch', 'group', 'layer', 'instance', 'batch_instance'
:param regularizer: The regularizer for weights.
:param train_phase: Whether in training or in inference mode.
:param trainable: Whether add variables to the graph collection GraphKeys.TRAINABLE_VARIABLES.
:param dropout_rate: Dropout probability.
:param name_or_scope: The variable scope to open.
:return: The up-sampled feature maps.
"""
dropout_type = kwargs.pop('dropout_type', 'regular')
n_channel = inputs.get_shape().as_list()[-1]
assert n_channel == feature_size * n_split, "The number of input channels must be the product of output feature " \
"size and the number of splits."
with tf.variable_scope(name_or_scope):
regularization_loss = 0.
deconv2d = tf.keras.layers.Conv2DTranspose(filters=feature_size, kernel_size=filter_size, strides=strides,
padding='same', use_bias=False, kernel_initializer=initializer,
kernel_regularizer=regularizer,
trainable=trainable, name='deconv')
deconv = deconv2d(inputs)
if regularizer == 'l2':
regularization_loss += tf.reduce_sum(tf.square(deconv2d.kernel))
if regularizer == 'l1':
regularization_loss += tf.reduce_sum(tf.abs(deconv2d.kernel))
norm, loss = normalize(deconv, type=normalizer, training=train_phase, regularizer=regularizer, **kwargs)
regularization_loss += loss
relu = tf.nn.relu(norm, name='relu')
dropout = dropout_layer(relu, dropout_rate, train_phase, type=dropout_type)
upsample = linear_additive_upsample(inputs, strides, n_split)
return tf.add(dropout, upsample, name='res_upsample'), regularization_loss
# Dropout layer
def dropout_layer(inputs, rate, training, type='regular'):
"""
Apply regular or spatial dropout to 3D tensors.
:param inputs: input 3D tensor
:param rate: dropout rate
:param training: training mode or inference mode
:param type: 'regular' or 'spatial'
:return:
"""
if rate == 0:
return inputs
if type == 'regular':
outputs = tf.keras.layers.Dropout(rate=rate, name='dropout')(inputs, training=training)
elif type == 'spatial':
outputs = tf.keras.layers.SpatialDropout3D(rate=rate, name='dropout')(inputs, training=training)
else:
raise NotImplementedError
return outputs
# Squeeze-and-Excitation Block
def squeeze_excitation_layer(inputs, out_dim, ratio=16, **kwargs):
"""
Apply Squeeze-and-Excitation layer to 3D tensors.
Inspired by https://github.com/taki0112/SENet-Tensorflow
:param inputs: input tensor of shape [n_batch, *vol_shape, channels]
:param out_dim: output channel size
:param ratio: reduction ratio, default as 16
:param kwargs:
:return:
"""
with tf.variable_scope(kwargs.pop('scope', 'squeeze_and_excitation')):
sq = tf.reduce_mean(inputs, axis=(1, 2, 3), name='squeeze')
fc1 = tf.layers.dense(sq, units=out_dim // ratio, activation=tf.nn.relu, name='fc1')
fc2 = tf.layers.dense(fc1, units=out_dim, name='fc2')
ex = tf.nn.sigmoid(fc2, name='excitation')
ex = tf.reshape(ex, [-1, 1, 1, 1, out_dim])
scale = inputs * ex
return scale
# normalization ops
def group_norm(inputs, groups=4, **kwargs):
"""
Apply group normalization layer to 2D tensors.
Inspired from https://github.com/shaohua0116/Group-Normalization-Tensorflow/blob/master/ops.py
:param inputs:
:param groups: number of groups, set to 1 for layer normalization, set to channel size for instance normalization
:param eps:
:return:
"""
eps = kwargs.pop('eps', 1e-5)
with tf.variable_scope(kwargs.pop('scope', 'group_norm')):
x = tf.transpose(inputs, [0, 3, 1, 2])
N, C, H, W = x.get_shape().as_list()
G = min(groups, C)
x = tf.reshape(x, [-1, G, C // G, H, W])
mean, var = tf.nn.moments(x, [2, 3, 4], keepdims=True)
x = (x - mean) / tf.sqrt(var + eps)
gamma = tf.get_variable('gamma', [C], initializer=tf.constant_initializer(1.0), dtype=tf.float32)
beta = tf.get_variable('beta', [C], initializer=tf.constant_initializer(0.), dtype=tf.float32)
gamma = tf.reshape(gamma, [1, C, 1, 1])
beta = tf.reshape(beta, [1, C, 1, 1])
output = tf.reshape(x, [-1, C, H, W]) * gamma + beta
return tf.transpose(output, [0, 2, 3, 1])
def batch_norm(inputs, **kwargs):
"""
Apply instance normalization layer to 2D tensors.
:param inputs:
:param kwargs:
:return:
"""
eps = kwargs.pop('eps', 1e-5)
regularizer = kwargs.pop('regularizer', None)
with tf.variable_scope(kwargs.pop('scope', 'batch_norm')):
regularization_loss = 0.
C = inputs.shape[-1]
mean, var = tf.nn.moments(inputs, axes=[0, 1, 2], keep_dims=True)
gamma = tf.get_variable('gamma', [C], initializer=tf.constant_initializer(1.0), dtype=tf.float32)
beta = tf.get_variable('beta', [C], initializer=tf.constant_initializer(0.), dtype=tf.float32)
output = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, eps)
if regularizer == 'l2':
regularization_loss += tf.reduce_mean(tf.square(gamma))
if regularizer == 'l1':
regularization_loss += tf.reduce_mean(tf.abs(gamma))
return output, regularization_loss
def instance_norm(inputs, **kwargs):
"""
Apply instance normalization layer to 2D tensors.
:param inputs:
:param kwargs:
:return:
"""
eps = kwargs.pop('eps', 1e-5)
regularizer = kwargs.pop('regularizer', None)
with tf.variable_scope(kwargs.pop('scope', 'instance_norm')):
regularization_loss = 0.
C = inputs.shape[-1]
mean, var = tf.nn.moments(inputs, axes=[1, 2], keep_dims=True)
x = (inputs - mean) / (tf.sqrt(var + eps))
gamma = tf.get_variable('gamma', [C], initializer=tf.constant_initializer(1.0), dtype=tf.float32)
beta = tf.get_variable('beta', [C], initializer=tf.constant_initializer(0.), dtype=tf.float32)
output = x * gamma + beta
if regularizer == 'l2':
regularization_loss += tf.reduce_mean(tf.square(gamma))
if regularizer == 'l1':
regularization_loss += tf.reduce_mean(tf.abs(gamma))
return output, regularization_loss
def batch_instance_norm(inputs, **kwargs):
"""
Apply batch-instance normalization layer to 2D tensors.
Inspired by https://github.com/taki0112/Batch_Instance_Normalization-Tensorflow
:param inputs:
:param kwargs:
:return:
"""
eps = kwargs.pop('eps', 1e-5)
with tf.variable_scope(kwargs.pop('scope', 'batch_instance_norm')):
C = inputs.shape[-1]
batch_mean, batch_var = tf.nn.moments(inputs, axes=[0, 1, 2], keep_dims=True)
x_batch = (inputs - batch_mean) / (tf.sqrt(batch_var + eps))
ins_mean, ins_var = tf.nn.moments(inputs, axes=[1, 2], keep_dims=True)
x_ins = (inputs - ins_mean) / (tf.sqrt(ins_var + eps))
rho = tf.get_variable("rho", [C], initializer=tf.constant_initializer(1.0),
constraint=lambda x: tf.clip_by_value(x, clip_value_min=0.0, clip_value_max=1.0))
gamma = tf.get_variable("gamma", [C], initializer=tf.constant_initializer(1.0), dtype=tf.float32)
beta = tf.get_variable("beta", [C], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
x_hat = rho * x_batch + (1 - rho) * x_ins
output = x_hat * gamma + beta
return output
def normalize(inputs, type='batch', **kwargs):
if type is None:
return inputs
elif type == 'batch':
return batch_norm(inputs, **kwargs)
# training = kwargs.pop('training')
# return tf.keras.layers.BatchNormalization(name=kwargs.pop('name', 'bn'),
# **kwargs)(inputs, training=training)
elif type == 'group':
return group_norm(inputs, groups=kwargs.pop('groups', 4), **kwargs)
elif type == 'layer':
return group_norm(inputs, groups=1, **kwargs)
elif type == 'instance':
return instance_norm(inputs, **kwargs)
elif type == 'batch_instance':
return batch_instance_norm(inputs, **kwargs)
else:
raise NotImplementedError
#######################################################################
# Spatial transformation modules
#######################################################################
class SpatialTransformer(tf.keras.layers.Layer):
"""
N-D Spatial Transformer Tensorflow / Keras Layer
The Layer can handle both affine and dense transforms.
Both transforms are meant to give a 'shift' from the current position.
Therefore, a dense transform gives displacements (not absolute locations) at each voxel,
and an affine transform gives the *difference* of the affine matrix from
the identity matrix.
If you find this function useful, please cite:
Unsupervised Learning for Fast Probabilistic Diffeomorphic Registration
<NAME>, <NAME>, <NAME>, <NAME>
MICCAI 2018.
Originally, this code was based on voxelmorph code, which
was in turn transformed to be dense with the help of (affine) STN code
via https://github.com/kevinzakka/spatial-transformer-network
Since then, we've re-written the code to be generalized to any
dimensions, and along the way wrote grid and interpolation functions
ToDo:
The sampling coordinates in this version are defined in the atlas space.
Need to modify such that the sampling coordinates are defined in the target space.
"""
def __init__(self,
interp_method='linear',
indexing='ij',
single_transform=False,
**kwargs):
"""
Parameters:
interp_method: 'linear' or 'nearest'
single_transform: whether a single transform supplied for the whole batch
indexing (default: 'ij'): 'ij' (matrix) or 'xy' (cartesian)
'xy' indexing will have the first two entries of the flow
(along last axis) flipped compared to 'ij' indexing
"""
self.interp_method = interp_method
self.ndims = None
self.inshape = None
self.single_transform = single_transform
assert indexing in ['ij', 'xy'], "indexing has to be 'ij' (matrix) or 'xy' (cartesian)"
self.indexing = indexing
super(self.__class__, self).__init__(**kwargs)
def build(self, input_shape):
"""
input_shape should be a list for two inputs:
input1: image.
input2: transform Tensor
if affine:
should be a N x N+1 matrix
*or* a N*(N+1) tensor (which will be reshape to N x (N+1) and an identity row added)
if not affine:
should be a *vol_shape x N
"""
if len(input_shape) > 2:
raise Exception('Spatial Transformer must be called on a list of length 2.'
'First argument is the image, second is the transform.')
# set up number of dimensions
self.ndims = len(input_shape[0]) - 2
self.inshape = input_shape
vol_shape = input_shape[0][1:-1]
trf_shape = input_shape[1][1:]
# the transform is an affine iff:
# it's a 1D Tensor [dense transforms need to be at least ndims + 1]
# it's a 2D Tensor and shape == [N+1, N+1].
# [dense with N=1, which is the only one that could have a transform shape of 2, would be of size Mx1]
self.is_affine = len(trf_shape) == 1 or \
(len(trf_shape) == 2 and all([f == (self.ndims + 1) for f in trf_shape]))
# check sizes
if self.is_affine and len(trf_shape) == 1:
ex = self.ndims * (self.ndims + 1)
if trf_shape[0] != ex:
raise Exception('Expected flattened affine of len %d but got %d'
% (ex, trf_shape[0]))
if not self.is_affine:
if trf_shape[-1] != self.ndims:
raise Exception('Offset flow field size expected: %d, found: %d'
% (self.ndims, trf_shape[-1]))
# confirm built
self.built = True
def call(self, inputs):
"""
Parameters
inputs: list with two entries
"""
# check shapes
assert len(inputs) == 2, "inputs has to be len 2, found: %d" % len(inputs)
vol = inputs[0]
trf = inputs[1]
# necessary for multi_gpu models...
vol = tf.reshape(vol, [-1, *self.inshape[0][1:]])
trf = tf.reshape(trf, [-1, *self.inshape[1][1:]])
# go from affine
if self.is_affine:
trf = tf.map_fn(lambda x: self._single_aff_to_shift(x, vol.shape[1:-1]), trf, dtype=tf.float32)
# prepare location shift
if self.indexing == 'xy': # shift the first two dimensions
trf_split = tf.split(trf, trf.shape[-1], axis=-1)
trf_lst = [trf_split[1], trf_split[0], *trf_split[2:]]
trf = tf.concat(trf_lst, -1)
# map transform across batch
if self.single_transform:
fn = lambda x: self._single_transform([x, trf[0, :]])
return tf.map_fn(fn, vol, dtype=tf.float32)
else:
return tf.map_fn(self._single_transform, [vol, trf], dtype=tf.float32)
def _single_aff_to_shift(self, trf, volshape):
if len(trf.shape) == 1: # go from vector to matrix
trf = tf.reshape(trf, [self.ndims, self.ndims + 1])
# note this is unnecessarily extra graph since at every batch entry we have a tf.eye graph
# trf += tf.eye(self.ndims + 1)[:self.ndims, :] # add identity, hence affine is a shift from identity
return affine_to_shift(trf, volshape, shift_center=True)
def _single_transform(self, inputs):
return transform(inputs[0], inputs[1], interp_method=self.interp_method)
class Resize(tf.keras.layers.Layer):
"""
N-D Resize Tensorflow / Keras Layer
Note: this is not re-shaping an existing volume, but resizing, like scipy's "Zoom"
If you find this function useful, please cite:
Anatomical Priors in Convolutional Networks for Unsupervised Biomedical Segmentation,Dalca AV, Guttag J, Sabuncu MR
CVPR 2018
Since then, we've re-written the code to be generalized to any
dimensions, and along the way wrote grid and interpolation functions
"""
def __init__(self,
zoom_factor,
interp_method='linear',
**kwargs):
"""
Parameters:
interp_method: 'linear' or 'nearest'
'xy' indexing will have the first two entries of the flow
(along last axis) flipped compared to 'ij' indexing
"""
self.zoom_factor = zoom_factor
self.interp_method = interp_method
self.ndims = None
self.inshape = None
super(Resize, self).__init__(**kwargs)
def build(self, input_shape):
"""
input_shape should be an element of list of one inputs:
input1: volume
should be a *vol_shape x N
"""
if isinstance(input_shape[0], (list, tuple)) and len(input_shape) > 1:
raise Exception('Resize must be called on a list of length 1.'
'First argument is the image, second is the transform.')
if isinstance(input_shape[0], (list, tuple)):
input_shape = input_shape[0]
# set up number of dimensions
self.ndims = len(input_shape) - 2
self.inshape = input_shape
# confirm built
self.built = True
def call(self, inputs):
"""
Parameters
inputs: volume of list with one volume
"""
# check shapes
if isinstance(inputs, (list, tuple)):
assert len(inputs) == 1, "inputs has to be len 1. found: %d" % len(inputs)
vol = inputs[0]
else:
vol = inputs
# necessary for multi_gpu models...
vol = tf.reshape(vol, [-1, *self.inshape[1:]])
# map transform across batch
return tf.map_fn(self._single_resize, vol, dtype=tf.float32)
def compute_output_shape(self, input_shape):
output_shape = [input_shape[0]]
output_shape += [int(f * self.zoom_factor) for f in input_shape[1:-1]]
output_shape += [input_shape[-1]]
return tuple(output_shape)
def _single_resize(self, inputs):
return resize(inputs, self.zoom_factor, interp_method=self.interp_method)
#######################################################################
# Helper functions
#######################################################################
def b_spline(i, u):
with tf.name_scope('b_spline'):
if i == -1:
return (1 - u) ** 3 / 6
elif i == 0:
return (3 * u ** 3 - 6 * u ** 2 + 4) / 6
elif i == 1:
return (-3 * u ** 3 + 3 * u ** 2 + 3 * u + 1) / 6
elif i == 2:
return u ** 3 / 6
def quaternary(n, rank):
nums = []
while n:
n, r = divmod(n, 4)
nums.append(r)
nums += [0] * (rank - len(nums))
return list(reversed(nums))
#######################################################################
# random affine data augmentation
#######################################################################
def random_affine_matrix_2d(rot_std=np.pi / 12, scl_std=0.1, tra_std=0., she_std=0.1, name='random_affine_params'):
"""
Generate a random affine transformation matrix.
:param rot_std: standard deviation of rotation parameters
:param scl_std: standard deviation of scaling parameters
:param tra_std: standard deviation of translation parameters
:param she_std: standard deviation of shearing parameters
:return: a tensor of shape [1, 12], composed of affine transformation parameters
"""
a = np.random.normal(0, rot_std, 1)
sx, sy = np.random.normal(1, scl_std, 2)
p, q = np.random.normal(0, tra_std, 2)
hx, hy = np.random.normal(0, she_std, 2)
# Translation matrix
Tr = np.asarray([[1, 0, p],
[0, 1, q],
[0, 0, 1]], dtype=np.float32)
# Scaling matrix
Sc = np.asarray([[sx, 0, 0],
[0, sy, 0],
[0, 0, 1]], dtype=np.float32)
# Shear matrix
r = np.random.rand()
if r < 0.5:
Sh = np.asarray([[1, hx, 0],
[0, 1, 0],
[0, 0, 1]], dtype=np.float32)
else:
Sh = np.asarray([[1, 0, 0],
[hy, 1, 0],
[0, 0, 1]], dtype=np.float32)
# Rotation matrix
R = np.asarray([[np.cos(a), np.sin(a), 0],
[-np.sin(a), np.cos(a), 0],
[0, 0, 1]], dtype=np.float32)
M = np.matmul(Tr,
np.matmul(Sc,
np.matmul(Sh, R)))
return tf.reshape(tf.constant(M[:2], dtype=tf.float32), [1, 6], name=name)
def random_affine_augment(inputs, **kwargs):
"""
Perform data augmentation on inputs tensors.
:param inputs: a group of tensors ready to be augmented, each of shape [n_batch, *vol_shape, n_channel/n_class] or
[n_batch, *vol_shape, n_atlas, n_channel/n_class]
:param kwargs: optional arguments transferred to random_affine_matrix
"""
name = kwargs.pop('name', 'random_affine_augment')
affine_augment = kwargs.pop('affine_augment', True)
interp_methods = kwargs.pop('interp_methods', ['linear'] * len(inputs))
with tf.name_scope(name):
affine_params = random_affine_matrix_2d(**kwargs)
if affine_augment:
outputs = []
for i in range(len(inputs)):
spatial_transform = SpatialTransformer(interp_method=interp_methods[i], single_transform=True)
if len(inputs[i].get_shape().as_list()) == 6:
outputs.append(tf.stack([spatial_transform([inputs[i][..., k, :], affine_params])
for k in range(inputs[i].get_shape().as_list()[-2])], axis=-2))
else:
outputs.append(spatial_transform([inputs[i], affine_params]))
return outputs
else:
return inputs
| [
"tensorflow.shape",
"tensorflow.pad",
"numpy.random.rand",
"numpy.sqrt",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.split",
"tensorflow.nn.moments",
"tensorflow.initializers.he_uniform",
"tensorflow.keras.layers.UpSampling3D",
"tensorflow.nn.dropout",
"numpy.sin",
"tensorfl... | [((1355, 1384), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (1366, 1384), True, 'import tensorflow as tf\n'), ((1397, 1428), 'tensorflow.Variable', 'tf.Variable', (['initial'], {'name': 'name'}), '(initial, name=name)\n', (1408, 1428), True, 'import tensorflow as tf\n'), ((1479, 1538), 'tensorflow.nn.conv3d', 'tf.nn.conv3d', (['x', 'W'], {'strides': '[1, 1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1, 1], padding='SAME')\n", (1491, 1538), True, 'import tensorflow as tf\n'), ((1551, 1585), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['conv_3d', 'keep_prob_'], {}), '(conv_3d, keep_prob_)\n', (1564, 1585), True, 'import tensorflow as tf\n'), ((1634, 1645), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1642, 1645), True, 'import tensorflow as tf\n'), ((1666, 1758), 'tensorflow.stack', 'tf.stack', (['[x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4] // 2]'], {}), '([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, \n x_shape[4] // 2])\n', (1674, 1758), True, 'import tensorflow as tf\n'), ((1766, 1869), 'tensorflow.nn.conv3d_transpose', 'tf.nn.conv3d_transpose', (['x', 'w', 'output_shape'], {'strides': '[1, stride, stride, stride, 1]', 'padding': '"""VALID"""'}), "(x, w, output_shape, strides=[1, stride, stride,\n stride, 1], padding='VALID')\n", (1788, 1869), True, 'import tensorflow as tf\n'), ((1905, 1994), 'tensorflow.nn.max_pool3d', 'tf.nn.max_pool3d', (['x'], {'ksize': '[1, n, n, n, 1]', 'strides': '[1, n, n, n, 1]', 'padding': '"""VALID"""'}), "(x, ksize=[1, n, n, n, 1], strides=[1, n, n, n, 1], padding\n ='VALID')\n", (1921, 1994), True, 'import tensorflow as tf\n'), ((2258, 2276), 'tensorflow.exp', 'tf.exp', (['output_map'], {}), '(output_map)\n', (2264, 2276), True, 'import tensorflow as tf\n'), ((2387, 2450), 'tensorflow.divide', 'tf.divide', (['exponential_map', 'evidence'], {'name': '"""pixel_wise_softmax"""'}), "(exponential_map, evidence, name='pixel_wise_softmax')\n", (2396, 2450), True, 'import tensorflow as tf\n'), ((2578, 2596), 'tensorflow.exp', 'tf.exp', (['output_map'], {}), '(output_map)\n', (2584, 2596), True, 'import tensorflow as tf\n'), ((2612, 2661), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['exponential_map', '(-1)'], {'keepdims': '(True)'}), '(exponential_map, -1, keepdims=True)\n', (2625, 2661), True, 'import tensorflow as tf\n'), ((4106, 4118), 'tensorflow.shape', 'tf.shape', (['x1'], {}), '(x1)\n', (4114, 4118), True, 'import tensorflow as tf\n'), ((4135, 4147), 'tensorflow.shape', 'tf.shape', (['x2'], {}), '(x2)\n', (4143, 4147), True, 'import tensorflow as tf\n'), ((4414, 4454), 'tensorflow.slice', 'tf.slice', (['x1', 'offsets', 'size'], {'name': '"""crop"""'}), "(x1, offsets, size, name='crop')\n", (4422, 4454), True, 'import tensorflow as tf\n'), ((4474, 4522), 'tensorflow.concat', 'tf.concat', (['[x1_crop, x2]', '(-1)'], {'name': '"""crop_concat"""'}), "([x1_crop, x2], -1, name='crop_concat')\n", (4483, 4522), True, 'import tensorflow as tf\n'), ((4786, 4798), 'tensorflow.shape', 'tf.shape', (['x1'], {}), '(x1)\n', (4794, 4798), True, 'import tensorflow as tf\n'), ((4815, 4827), 'tensorflow.shape', 'tf.shape', (['x2'], {}), '(x2)\n', (4823, 4827), True, 'import tensorflow as tf\n'), ((5094, 5134), 'tensorflow.slice', 'tf.slice', (['x1', 'offsets', 'size'], {'name': '"""crop"""'}), "(x1, offsets, size, name='crop')\n", (5102, 5134), True, 'import tensorflow as tf\n'), ((5147, 5183), 'tensorflow.add', 'tf.add', (['x1_crop', 'x2'], {'name': '"""crop_add"""'}), "(x1_crop, x2, name='crop_add')\n", (5153, 5183), True, 'import tensorflow as tf\n'), ((5309, 5321), 'tensorflow.shape', 'tf.shape', (['x1'], {}), '(x1)\n', (5317, 5321), True, 'import tensorflow as tf\n'), ((5338, 5350), 'tensorflow.shape', 'tf.shape', (['x2'], {}), '(x2)\n', (5346, 5350), True, 'import tensorflow as tf\n'), ((5768, 5800), 'tensorflow.pad', 'tf.pad', (['x2', 'paddings'], {'name': '"""pad"""'}), "(x2, paddings, name='pad')\n", (5774, 5800), True, 'import tensorflow as tf\n'), ((5819, 5865), 'tensorflow.concat', 'tf.concat', (['[x1, x2_pad]', '(-1)'], {'name': '"""pad_concat"""'}), "([x1, x2_pad], -1, name='pad_concat')\n", (5828, 5865), True, 'import tensorflow as tf\n'), ((6125, 6137), 'tensorflow.shape', 'tf.shape', (['x1'], {}), '(x1)\n', (6133, 6137), True, 'import tensorflow as tf\n'), ((6154, 6166), 'tensorflow.shape', 'tf.shape', (['x2'], {}), '(x2)\n', (6162, 6166), True, 'import tensorflow as tf\n'), ((6584, 6616), 'tensorflow.pad', 'tf.pad', (['x2', 'paddings'], {'name': '"""pad"""'}), "(x2, paddings, name='pad')\n", (6590, 6616), True, 'import tensorflow as tf\n'), ((6629, 6663), 'tensorflow.add', 'tf.add', (['x1', 'x2_pad'], {'name': '"""pad_add"""'}), "(x1, x2_pad, name='pad_add')\n", (6635, 6663), True, 'import tensorflow as tf\n'), ((6777, 6789), 'tensorflow.shape', 'tf.shape', (['x1'], {}), '(x1)\n', (6785, 6789), True, 'import tensorflow as tf\n'), ((6806, 6818), 'tensorflow.shape', 'tf.shape', (['x2'], {}), '(x2)\n', (6814, 6818), True, 'import tensorflow as tf\n'), ((7085, 7125), 'tensorflow.slice', 'tf.slice', (['x1', 'offsets', 'size'], {'name': '"""crop"""'}), "(x1, offsets, size, name='crop')\n", (7093, 7125), True, 'import tensorflow as tf\n'), ((7336, 7348), 'tensorflow.shape', 'tf.shape', (['x1'], {}), '(x1)\n', (7344, 7348), True, 'import tensorflow as tf\n'), ((7365, 7377), 'tensorflow.shape', 'tf.shape', (['x2'], {}), '(x2)\n', (7373, 7377), True, 'import tensorflow as tf\n'), ((7797, 7829), 'tensorflow.pad', 'tf.pad', (['x1', 'paddings'], {'name': '"""pad"""'}), "(x1, paddings, name='pad')\n", (7803, 7829), True, 'import tensorflow as tf\n'), ((8203, 8231), 'tensorflow.initializers.he_uniform', 'tf.initializers.he_uniform', ([], {}), '()\n', (8229, 8231), True, 'import tensorflow as tf\n'), ((10988, 11016), 'tensorflow.initializers.he_uniform', 'tf.initializers.he_uniform', ([], {}), '()\n', (11014, 11016), True, 'import tensorflow as tf\n'), ((15127, 15155), 'tensorflow.initializers.he_uniform', 'tf.initializers.he_uniform', ([], {}), '()\n', (15153, 15155), True, 'import tensorflow as tf\n'), ((17982, 18024), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.001)'}), '(stddev=0.001)\n', (18010, 18024), True, 'import tensorflow as tf\n'), ((20545, 20573), 'tensorflow.initializers.he_uniform', 'tf.initializers.he_uniform', ([], {}), '()\n', (20571, 20573), True, 'import tensorflow as tf\n'), ((40408, 40439), 'numpy.random.normal', 'np.random.normal', (['(0)', 'rot_std', '(1)'], {}), '(0, rot_std, 1)\n', (40424, 40439), True, 'import numpy as np\n'), ((40454, 40485), 'numpy.random.normal', 'np.random.normal', (['(1)', 'scl_std', '(2)'], {}), '(1, scl_std, 2)\n', (40470, 40485), True, 'import numpy as np\n'), ((40498, 40529), 'numpy.random.normal', 'np.random.normal', (['(0)', 'tra_std', '(2)'], {}), '(0, tra_std, 2)\n', (40514, 40529), True, 'import numpy as np\n'), ((40544, 40575), 'numpy.random.normal', 'np.random.normal', (['(0)', 'she_std', '(2)'], {}), '(0, she_std, 2)\n', (40560, 40575), True, 'import numpy as np\n'), ((40614, 40677), 'numpy.asarray', 'np.asarray', (['[[1, 0, p], [0, 1, q], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 0, p], [0, 1, q], [0, 0, 1]], dtype=np.float32)\n', (40624, 40677), True, 'import numpy as np\n'), ((40756, 40821), 'numpy.asarray', 'np.asarray', (['[[sx, 0, 0], [0, sy, 0], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[sx, 0, 0], [0, sy, 0], [0, 0, 1]], dtype=np.float32)\n', (40766, 40821), True, 'import numpy as np\n'), ((40897, 40913), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (40911, 40913), True, 'import numpy as np\n'), ((720, 753), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (fan_in + fan_out))'], {}), '(6.0 / (fan_in + fan_out))\n', (727, 753), True, 'import numpy as np\n'), ((814, 847), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (fan_in + fan_out))'], {}), '(6.0 / (fan_in + fan_out))\n', (821, 847), True, 'import numpy as np\n'), ((872, 939), 'tensorflow.random_uniform', 'tf.random_uniform', (['shape'], {'minval': 'low', 'maxval': 'high', 'dtype': 'tf.float32'}), '(shape, minval=low, maxval=high, dtype=tf.float32)\n', (889, 939), True, 'import tensorflow as tf\n'), ((1064, 1097), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (fan_in + fan_out))'], {}), '(6.0 / (fan_in + fan_out))\n', (1071, 1097), True, 'import numpy as np\n'), ((1158, 1191), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (fan_in + fan_out))'], {}), '(6.0 / (fan_in + fan_out))\n', (1165, 1191), True, 'import numpy as np\n'), ((1216, 1283), 'tensorflow.random_uniform', 'tf.random_uniform', (['shape'], {'minval': 'low', 'maxval': 'high', 'dtype': 'tf.float32'}), '(shape, minval=low, maxval=high, dtype=tf.float32)\n', (1233, 1283), True, 'import tensorflow as tf\n'), ((2317, 2373), 'tensorflow.reverse', 'tf.reverse', (['exponential_map', '[False, False, False, True]'], {}), '(exponential_map, [False, False, False, True])\n', (2327, 2373), True, 'import tensorflow as tf\n'), ((2780, 2822), 'tensorflow.divide', 'tf.divide', (['exponential_map', 'tensor_sum_exp'], {}), '(exponential_map, tensor_sum_exp)\n', (2789, 2822), True, 'import tensorflow as tf\n'), ((3563, 3584), 'tensorflow.shape', 'tf.shape', (['flat_labels'], {}), '(flat_labels)\n', (3571, 3584), True, 'import tensorflow as tf\n'), ((9485, 9517), 'tensorflow.variable_scope', 'tf.variable_scope', (['name_or_scope'], {}), '(name_or_scope)\n', (9502, 9517), True, 'import tensorflow as tf\n'), ((12410, 12442), 'tensorflow.variable_scope', 'tf.variable_scope', (['name_or_scope'], {}), '(name_or_scope)\n', (12427, 12442), True, 'import tensorflow as tf\n'), ((14614, 14658), 'tensorflow.add', 'tf.add', (['feature_maps', 'inputs'], {'name': '"""res_add"""'}), "(feature_maps, inputs, name='res_add')\n", (14620, 14658), True, 'import tensorflow as tf\n'), ((14675, 14722), 'tensorflow.nn.relu', 'tf.nn.relu', (['res_add'], {'name': '"""output_feature_maps"""'}), "(res_add, name='output_feature_maps')\n", (14685, 14722), True, 'import tensorflow as tf\n'), ((16529, 16561), 'tensorflow.variable_scope', 'tf.variable_scope', (['name_or_scope'], {}), '(name_or_scope)\n', (16546, 16561), True, 'import tensorflow as tf\n'), ((18161, 18193), 'tensorflow.variable_scope', 'tf.variable_scope', (['name_or_scope'], {}), '(name_or_scope)\n', (18178, 18193), True, 'import tensorflow as tf\n'), ((18247, 18519), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'feature_size', 'kernel_size': 'filter_size', 'strides': 'strides', 'padding': 'padding', 'dilation_rate': 'dilation_rate', 'kernel_initializer': 'initializer', 'kernel_regularizer': 'regularizer', 'bias_regularizer': 'regularizer', 'trainable': 'trainable', 'name': '"""conv"""'}), "(filters=feature_size, kernel_size=filter_size,\n strides=strides, padding=padding, dilation_rate=dilation_rate,\n kernel_initializer=initializer, kernel_regularizer=regularizer,\n bias_regularizer=regularizer, trainable=trainable, name='conv')\n", (18269, 18519), True, 'import tensorflow as tf\n'), ((19551, 19592), 'tensorflow.name_scope', 'tf.name_scope', (['"""linear_additive_upsample"""'], {}), "('linear_additive_upsample')\n", (19564, 19592), True, 'import tensorflow as tf\n'), ((20195, 20231), 'tensorflow.split', 'tf.split', (['upsample', 'n_split'], {'axis': '(-1)'}), '(upsample, n_split, axis=-1)\n', (20203, 20231), True, 'import tensorflow as tf\n'), ((20256, 20280), 'tensorflow.stack', 'tf.stack', (['split'], {'axis': '(-1)'}), '(split, axis=-1)\n', (20264, 20280), True, 'import tensorflow as tf\n'), ((20306, 20364), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['split_tensor'], {'axis': '(-1)', 'name': '"""output_tensor"""'}), "(split_tensor, axis=-1, name='output_tensor')\n", (20319, 20364), True, 'import tensorflow as tf\n'), ((22340, 22372), 'tensorflow.variable_scope', 'tf.variable_scope', (['name_or_scope'], {}), '(name_or_scope)\n', (22357, 22372), True, 'import tensorflow as tf\n'), ((22428, 22668), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', ([], {'filters': 'feature_size', 'kernel_size': 'filter_size', 'strides': 'strides', 'padding': '"""same"""', 'use_bias': '(False)', 'kernel_initializer': 'initializer', 'kernel_regularizer': 'regularizer', 'trainable': 'trainable', 'name': '"""deconv"""'}), "(filters=feature_size, kernel_size=\n filter_size, strides=strides, padding='same', use_bias=False,\n kernel_initializer=initializer, kernel_regularizer=regularizer,\n trainable=trainable, name='deconv')\n", (22459, 22668), True, 'import tensorflow as tf\n'), ((23233, 23262), 'tensorflow.nn.relu', 'tf.nn.relu', (['norm'], {'name': '"""relu"""'}), "(norm, name='relu')\n", (23243, 23262), True, 'import tensorflow as tf\n'), ((24732, 24786), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inputs'], {'axis': '(1, 2, 3)', 'name': '"""squeeze"""'}), "(inputs, axis=(1, 2, 3), name='squeeze')\n", (24746, 24786), True, 'import tensorflow as tf\n'), ((24802, 24880), 'tensorflow.layers.dense', 'tf.layers.dense', (['sq'], {'units': '(out_dim // ratio)', 'activation': 'tf.nn.relu', 'name': '"""fc1"""'}), "(sq, units=out_dim // ratio, activation=tf.nn.relu, name='fc1')\n", (24817, 24880), True, 'import tensorflow as tf\n'), ((24896, 24943), 'tensorflow.layers.dense', 'tf.layers.dense', (['fc1'], {'units': 'out_dim', 'name': '"""fc2"""'}), "(fc1, units=out_dim, name='fc2')\n", (24911, 24943), True, 'import tensorflow as tf\n'), ((24958, 24995), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['fc2'], {'name': '"""excitation"""'}), "(fc2, name='excitation')\n", (24971, 24995), True, 'import tensorflow as tf\n'), ((25010, 25048), 'tensorflow.reshape', 'tf.reshape', (['ex', '[-1, 1, 1, 1, out_dim]'], {}), '(ex, [-1, 1, 1, 1, out_dim])\n', (25020, 25048), True, 'import tensorflow as tf\n'), ((25624, 25658), 'tensorflow.transpose', 'tf.transpose', (['inputs', '[0, 3, 1, 2]'], {}), '(inputs, [0, 3, 1, 2])\n', (25636, 25658), True, 'import tensorflow as tf\n'), ((25746, 25782), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, G, C // G, H, W]'], {}), '(x, [-1, G, C // G, H, W])\n', (25756, 25782), True, 'import tensorflow as tf\n'), ((25804, 25846), 'tensorflow.nn.moments', 'tf.nn.moments', (['x', '[2, 3, 4]'], {'keepdims': '(True)'}), '(x, [2, 3, 4], keepdims=True)\n', (25817, 25846), True, 'import tensorflow as tf\n'), ((26120, 26151), 'tensorflow.reshape', 'tf.reshape', (['gamma', '[1, C, 1, 1]'], {}), '(gamma, [1, C, 1, 1])\n', (26130, 26151), True, 'import tensorflow as tf\n'), ((26168, 26198), 'tensorflow.reshape', 'tf.reshape', (['beta', '[1, C, 1, 1]'], {}), '(beta, [1, C, 1, 1])\n', (26178, 26198), True, 'import tensorflow as tf\n'), ((26277, 26311), 'tensorflow.transpose', 'tf.transpose', (['output', '[0, 2, 3, 1]'], {}), '(output, [0, 2, 3, 1])\n', (26289, 26311), True, 'import tensorflow as tf\n'), ((26715, 26768), 'tensorflow.nn.moments', 'tf.nn.moments', (['inputs'], {'axes': '[0, 1, 2]', 'keep_dims': '(True)'}), '(inputs, axes=[0, 1, 2], keep_dims=True)\n', (26728, 26768), True, 'import tensorflow as tf\n'), ((26998, 27060), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['inputs', 'mean', 'var', 'beta', 'gamma', 'eps'], {}), '(inputs, mean, var, beta, gamma, eps)\n', (27023, 27060), True, 'import tensorflow as tf\n'), ((27717, 27767), 'tensorflow.nn.moments', 'tf.nn.moments', (['inputs'], {'axes': '[1, 2]', 'keep_dims': '(True)'}), '(inputs, axes=[1, 2], keep_dims=True)\n', (27730, 27767), True, 'import tensorflow as tf\n'), ((28752, 28805), 'tensorflow.nn.moments', 'tf.nn.moments', (['inputs'], {'axes': '[0, 1, 2]', 'keep_dims': '(True)'}), '(inputs, axes=[0, 1, 2], keep_dims=True)\n', (28765, 28805), True, 'import tensorflow as tf\n'), ((28907, 28957), 'tensorflow.nn.moments', 'tf.nn.moments', (['inputs'], {'axes': '[1, 2]', 'keep_dims': '(True)'}), '(inputs, axes=[1, 2], keep_dims=True)\n', (28920, 28957), True, 'import tensorflow as tf\n'), ((34817, 34860), 'tensorflow.reshape', 'tf.reshape', (['vol', '[-1, *self.inshape[0][1:]]'], {}), '(vol, [-1, *self.inshape[0][1:]])\n', (34827, 34860), True, 'import tensorflow as tf\n'), ((34876, 34919), 'tensorflow.reshape', 'tf.reshape', (['trf', '[-1, *self.inshape[1][1:]]'], {}), '(trf, [-1, *self.inshape[1][1:]])\n', (34886, 34919), True, 'import tensorflow as tf\n'), ((36071, 36120), 'core.utils_2d.affine_to_shift', 'affine_to_shift', (['trf', 'volshape'], {'shift_center': '(True)'}), '(trf, volshape, shift_center=True)\n', (36086, 36120), False, 'from core.utils_2d import transform, resize, affine_to_shift, get_reference_grid_numpy\n'), ((36181, 36246), 'core.utils_2d.transform', 'transform', (['inputs[0]', 'inputs[1]'], {'interp_method': 'self.interp_method'}), '(inputs[0], inputs[1], interp_method=self.interp_method)\n', (36190, 36246), False, 'from core.utils_2d import transform, resize, affine_to_shift, get_reference_grid_numpy\n'), ((38470, 38510), 'tensorflow.reshape', 'tf.reshape', (['vol', '[-1, *self.inshape[1:]]'], {}), '(vol, [-1, *self.inshape[1:]])\n', (38480, 38510), True, 'import tensorflow as tf\n'), ((38567, 38620), 'tensorflow.map_fn', 'tf.map_fn', (['self._single_resize', 'vol'], {'dtype': 'tf.float32'}), '(self._single_resize, vol, dtype=tf.float32)\n', (38576, 38620), True, 'import tensorflow as tf\n'), ((38930, 38996), 'core.utils_2d.resize', 'resize', (['inputs', 'self.zoom_factor'], {'interp_method': 'self.interp_method'}), '(inputs, self.zoom_factor, interp_method=self.interp_method)\n', (38936, 38996), False, 'from core.utils_2d import transform, resize, affine_to_shift, get_reference_grid_numpy\n'), ((39200, 39225), 'tensorflow.name_scope', 'tf.name_scope', (['"""b_spline"""'], {}), "('b_spline')\n", (39213, 39225), True, 'import tensorflow as tf\n'), ((40945, 41009), 'numpy.asarray', 'np.asarray', (['[[1, hx, 0], [0, 1, 0], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, hx, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32)\n', (40955, 41009), True, 'import numpy as np\n'), ((41087, 41151), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0], [hy, 1, 0], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 0, 0], [hy, 1, 0], [0, 0, 1]], dtype=np.float32)\n', (41097, 41151), True, 'import numpy as np\n'), ((41508, 41544), 'tensorflow.constant', 'tf.constant', (['M[:2]'], {'dtype': 'tf.float32'}), '(M[:2], dtype=tf.float32)\n', (41519, 41544), True, 'import tensorflow as tf\n'), ((42139, 42158), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (42152, 42158), True, 'import tensorflow as tf\n'), ((9630, 9673), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""transition_block_layer"""'], {}), "('transition_block_layer')\n", (9647, 9673), True, 'import tensorflow as tf\n'), ((10614, 10643), 'tensorflow.nn.relu', 'tf.nn.relu', (['norm'], {'name': '"""relu"""'}), "(norm, name='relu')\n", (10624, 10643), True, 'import tensorflow as tf\n'), ((23437, 23483), 'tensorflow.add', 'tf.add', (['dropout', 'upsample'], {'name': '"""res_upsample"""'}), "(dropout, upsample, name='res_upsample')\n", (23443, 23483), True, 'import tensorflow as tf\n'), ((23923, 23973), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'rate', 'name': '"""dropout"""'}), "(rate=rate, name='dropout')\n", (23946, 23973), True, 'import tensorflow as tf\n'), ((25873, 25891), 'tensorflow.sqrt', 'tf.sqrt', (['(var + eps)'], {}), '(var + eps)\n', (25880, 25891), True, 'import tensorflow as tf\n'), ((27800, 27818), 'tensorflow.sqrt', 'tf.sqrt', (['(var + eps)'], {}), '(var + eps)\n', (27807, 27818), True, 'import tensorflow as tf\n'), ((28850, 28874), 'tensorflow.sqrt', 'tf.sqrt', (['(batch_var + eps)'], {}), '(batch_var + eps)\n', (28857, 28874), True, 'import tensorflow as tf\n'), ((28998, 29020), 'tensorflow.sqrt', 'tf.sqrt', (['(ins_var + eps)'], {}), '(ins_var + eps)\n', (29005, 29020), True, 'import tensorflow as tf\n'), ((35215, 35252), 'tensorflow.split', 'tf.split', (['trf', 'trf.shape[-1]'], {'axis': '(-1)'}), '(trf, trf.shape[-1], axis=-1)\n', (35223, 35252), True, 'import tensorflow as tf\n'), ((35340, 35362), 'tensorflow.concat', 'tf.concat', (['trf_lst', '(-1)'], {}), '(trf_lst, -1)\n', (35349, 35362), True, 'import tensorflow as tf\n'), ((35525, 35561), 'tensorflow.map_fn', 'tf.map_fn', (['fn', 'vol'], {'dtype': 'tf.float32'}), '(fn, vol, dtype=tf.float32)\n', (35534, 35561), True, 'import tensorflow as tf\n'), ((35597, 35660), 'tensorflow.map_fn', 'tf.map_fn', (['self._single_transform', '[vol, trf]'], {'dtype': 'tf.float32'}), '(self._single_transform, [vol, trf], dtype=tf.float32)\n', (35606, 35660), True, 'import tensorflow as tf\n'), ((35795, 35840), 'tensorflow.reshape', 'tf.reshape', (['trf', '[self.ndims, self.ndims + 1]'], {}), '(trf, [self.ndims, self.ndims + 1])\n', (35805, 35840), True, 'import tensorflow as tf\n'), ((41464, 41480), 'numpy.matmul', 'np.matmul', (['Sh', 'R'], {}), '(Sh, R)\n', (41473, 41480), True, 'import numpy as np\n'), ((3297, 3332), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['probs', '(1e-07)', '(1.0)'], {}), '(probs, 1e-07, 1.0)\n', (3313, 3332), True, 'import tensorflow as tf\n'), ((9695, 9782), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': 'pool_size', 'strides': 'pool_size', 'name': '"""max_pool"""'}), "(pool_size=pool_size, strides=pool_size, name=\n 'max_pool')\n", (9720, 9782), True, 'import tensorflow as tf\n'), ((12804, 12850), 'tensorflow.variable_scope', 'tf.variable_scope', (["('res_block_layer%s' % layer)"], {}), "('res_block_layer%s' % layer)\n", (12821, 12850), True, 'import tensorflow as tf\n'), ((12878, 13137), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'feature_size', 'kernel_size': 'filter_size', 'strides': 'strides', 'padding': 'padding', 'dilation_rate': 'dilation_rate', 'use_bias': '(False)', 'kernel_initializer': 'initializer', 'kernel_regularizer': 'regularizer', 'trainable': 'trainable', 'name': '"""conv"""'}), "(filters=feature_size, kernel_size=filter_size,\n strides=strides, padding=padding, dilation_rate=dilation_rate, use_bias\n =False, kernel_initializer=initializer, kernel_regularizer=regularizer,\n trainable=trainable, name='conv')\n", (12900, 13137), True, 'import tensorflow as tf\n'), ((16683, 16726), 'tensorflow.variable_scope', 'tf.variable_scope', (["('conv_block_layer%d' % k)"], {}), "('conv_block_layer%d' % k)\n", (16700, 16726), True, 'import tensorflow as tf\n'), ((16754, 17013), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'feature_size', 'kernel_size': 'filter_size', 'strides': 'strides', 'padding': 'padding', 'dilation_rate': 'dilation_rate', 'use_bias': '(False)', 'kernel_initializer': 'initializer', 'kernel_regularizer': 'regularizer', 'trainable': 'trainable', 'name': '"""conv"""'}), "(filters=feature_size, kernel_size=filter_size,\n strides=strides, padding=padding, dilation_rate=dilation_rate, use_bias\n =False, kernel_initializer=initializer, kernel_regularizer=regularizer,\n trainable=trainable, name='conv')\n", (16776, 17013), True, 'import tensorflow as tf\n'), ((17678, 17707), 'tensorflow.nn.relu', 'tf.nn.relu', (['norm'], {'name': '"""relu"""'}), "(norm, name='relu')\n", (17688, 17707), True, 'import tensorflow as tf\n'), ((18786, 18810), 'tensorflow.square', 'tf.square', (['conv2d.kernel'], {}), '(conv2d.kernel)\n', (18795, 18810), True, 'import tensorflow as tf\n'), ((18895, 18916), 'tensorflow.abs', 'tf.abs', (['conv2d.kernel'], {}), '(conv2d.kernel)\n', (18901, 18916), True, 'import tensorflow as tf\n'), ((19877, 19937), 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {'size': 'new_size', 'name': '"""upsample"""'}), "(size=new_size, name='upsample')\n", (19905, 19937), True, 'import tensorflow as tf\n'), ((22930, 22956), 'tensorflow.square', 'tf.square', (['deconv2d.kernel'], {}), '(deconv2d.kernel)\n', (22939, 22956), True, 'import tensorflow as tf\n'), ((23041, 23064), 'tensorflow.abs', 'tf.abs', (['deconv2d.kernel'], {}), '(deconv2d.kernel)\n', (23047, 23064), True, 'import tensorflow as tf\n'), ((24049, 24108), 'tensorflow.keras.layers.SpatialDropout3D', 'tf.keras.layers.SpatialDropout3D', ([], {'rate': 'rate', 'name': '"""dropout"""'}), "(rate=rate, name='dropout')\n", (24081, 24108), True, 'import tensorflow as tf\n'), ((25951, 25979), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (25974, 25979), True, 'import tensorflow as tf\n'), ((26056, 26084), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (26079, 26084), True, 'import tensorflow as tf\n'), ((26217, 26245), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, C, H, W]'], {}), '(x, [-1, C, H, W])\n', (26227, 26245), True, 'import tensorflow as tf\n'), ((26828, 26856), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (26851, 26856), True, 'import tensorflow as tf\n'), ((26933, 26961), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (26956, 26961), True, 'import tensorflow as tf\n'), ((27145, 27161), 'tensorflow.square', 'tf.square', (['gamma'], {}), '(gamma)\n', (27154, 27161), True, 'import tensorflow as tf\n'), ((27247, 27260), 'tensorflow.abs', 'tf.abs', (['gamma'], {}), '(gamma)\n', (27253, 27260), True, 'import tensorflow as tf\n'), ((27879, 27907), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (27902, 27907), True, 'import tensorflow as tf\n'), ((27984, 28012), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (28007, 28012), True, 'import tensorflow as tf\n'), ((28150, 28166), 'tensorflow.square', 'tf.square', (['gamma'], {}), '(gamma)\n', (28159, 28166), True, 'import tensorflow as tf\n'), ((28252, 28265), 'tensorflow.abs', 'tf.abs', (['gamma'], {}), '(gamma)\n', (28258, 28265), True, 'import tensorflow as tf\n'), ((29079, 29107), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (29102, 29107), True, 'import tensorflow as tf\n'), ((29281, 29309), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (29304, 29309), True, 'import tensorflow as tf\n'), ((29386, 29414), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (29409, 29414), True, 'import tensorflow as tf\n'), ((41251, 41260), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (41257, 41260), True, 'import numpy as np\n'), ((41262, 41271), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (41268, 41271), True, 'import numpy as np\n'), ((41311, 41320), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (41317, 41320), True, 'import numpy as np\n'), ((2723, 2743), 'tensorflow.shape', 'tf.shape', (['output_map'], {}), '(output_map)\n', (2731, 2743), True, 'import tensorflow as tf\n'), ((3651, 3700), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['flat_labels'], {'axis': '(0)', 'keepdims': '(True)'}), '(flat_labels, axis=0, keepdims=True)\n', (3664, 3700), True, 'import tensorflow as tf\n'), ((10297, 10321), 'tensorflow.square', 'tf.square', (['conv2d.kernel'], {}), '(conv2d.kernel)\n', (10306, 10321), True, 'import tensorflow as tf\n'), ((10414, 10435), 'tensorflow.abs', 'tf.abs', (['conv2d.kernel'], {}), '(conv2d.kernel)\n', (10420, 10435), True, 'import tensorflow as tf\n'), ((13850, 13879), 'tensorflow.nn.relu', 'tf.nn.relu', (['norm'], {'name': '"""relu"""'}), "(norm, name='relu')\n", (13860, 13879), True, 'import tensorflow as tf\n'), ((20006, 20066), 'tensorflow.keras.layers.UpSampling3D', 'tf.keras.layers.UpSampling3D', ([], {'size': 'new_size', 'name': '"""upsample"""'}), "(size=new_size, name='upsample')\n", (20034, 20066), True, 'import tensorflow as tf\n'), ((29161, 29220), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x'], {'clip_value_min': '(0.0)', 'clip_value_max': '(1.0)'}), '(x, clip_value_min=0.0, clip_value_max=1.0)\n', (29177, 29220), True, 'import tensorflow as tf\n'), ((41300, 41309), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (41306, 41309), True, 'import numpy as np\n'), ((13465, 13489), 'tensorflow.square', 'tf.square', (['conv2d.kernel'], {}), '(conv2d.kernel)\n', (13474, 13489), True, 'import tensorflow as tf\n'), ((13590, 13611), 'tensorflow.abs', 'tf.abs', (['conv2d.kernel'], {}), '(conv2d.kernel)\n', (13596, 13611), True, 'import tensorflow as tf\n'), ((17341, 17365), 'tensorflow.square', 'tf.square', (['conv2d.kernel'], {}), '(conv2d.kernel)\n', (17350, 17365), True, 'import tensorflow as tf\n'), ((17466, 17487), 'tensorflow.abs', 'tf.abs', (['conv2d.kernel'], {}), '(conv2d.kernel)\n', (17472, 17487), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Colors
======
Provides a collection of :class:`~PyQt5.QtWidgets.QWidget` subclasses for
handling colors in :mod:`~matplotlib`.
"""
# %% IMPORTS
# Built-in imports
from importlib import import_module
from itertools import chain
import re
# Package imports
from cmasher.utils import get_cmap_type
from matplotlib import cm, rcParams
from matplotlib.colors import (
BASE_COLORS, CSS4_COLORS, Colormap, to_hex, to_rgba)
import matplotlib.pyplot as plt
import numpy as np
from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW
from sortedcontainers import SortedDict as sdict, SortedSet as sset
# GuiPy imports
from guipy import layouts as GL, widgets as GW
from guipy.widgets import get_box_value, get_modified_signal, set_box_value
# All declaration
__all__ = ['ColorBox', 'ColorMapBox']
# %% CLASS DEFINITIONS
# Make class with a special box for setting the color of a plotted line
class ColorBox(GW.BaseBox):
"""
Defines the :class:`~ColorBox` class.
This widget allows for colors in *matplotlib* to be easily picked by the
user.
All cyclic; basic; and CSS4 colors in *matplotlib* are available from the
combobox.
Additionally, the HEX code of a color can be typed in as well.
Finally, the user has access to a colorwheel by clicking on the color
label, which allows for any color to be picked manually, including any
colors currently on screen using a screenpicker.
The default color, which is used when an invalid color is currently typed
in, can be set using :meth:`~set_default_color`.
"""
# Signals
modified = QC.Signal([], [str])
# Set the size for the color labels
clabel_size = (70, 18)
def __init__(self, add_cycler=True, parent=None):
"""
Initialize an instance of the :class:`~ColorBox` class.
Optional
--------
add_cycler : bool. Default: True
Whether or not to add *matplotlib*'s cyclic colors to the list of
preset colors.
parent : :obj:`~PyQt5.QtCore.QObject` object or None. Default: None
The parent object to use for this colorbox or *None* for no
parent.
"""
# Call super constructor
super().__init__(parent)
# Create the color box
self.init(add_cycler)
# This property returns the default 'modified' signal
@property
def default_modified_signal(self):
return(self.modified[str])
# This function creates the color box
def init(self, add_cycler):
"""
Sets up the color box entry after it has been initialized.
This function is mainly responsible for creating the color wheel and
color label, that allow the user to quickly cycle through different
color options.
"""
# Create the box layout
box_layout = GL.QHBoxLayout(self)
box_layout.setContentsMargins(0, 0, 0, 0)
# Create a color label
color_label = self.create_color_label()
self.color_label = color_label
box_layout.addWidget(color_label)
# Create a color combobox
color_combobox = self.create_color_combobox(add_cycler)
box_layout.addWidget(color_combobox)
self.color_combobox = color_combobox
# Set the default starting color of the color box
self.set_box_value('C0')
# This function is automatically called whenever 'modified' is emitted
@QC.Slot()
def modified_signal_slot(self):
self.modified[str].emit(get_box_value(self.color_combobox))
# This function creates the color label
def create_color_label(self):
"""
Creates a special label that shows the currently selected or hovered
color, and returns it.
"""
# Create the color label
color_label = GW.QLabel()
# Set some properties
color_label.setFrameShape(QW.QFrame.StyledPanel)
color_label.setScaledContents(True)
color_label.setToolTip("Click to open the custom color picker")
color_label.setSizePolicy(QW.QSizePolicy.Fixed, QW.QSizePolicy.Fixed)
color_label.mousePressed.connect(self.show_colorpicker)
# Return it
return(color_label)
# This function creates the color combobox
def create_color_combobox(self, add_cycler):
"""
Creates a combobox that holds all default colors accepted by matplotlib
and returns it.
"""
# Obtain the CN colors if requested
if add_cycler:
n_cyclic = len(rcParams['axes.prop_cycle'])
CN_COLORS = [("C%i" % (i), "This is MPL cyclic color #%i" % (i))
for i in range(n_cyclic)]
else:
CN_COLORS = []
# Make tuple of all colors
colors = (CN_COLORS, BASE_COLORS, CSS4_COLORS)
# Determine the cumulative lengths of all four sets
cum_len = np.cumsum(list(map(len, colors)))
# Make combobox for colors
color_box = GW.EditableComboBox()
# Add a special validator to this combobox
validator = GW.ComboBoxValidator(color_box, r"#?[\da-fA-F]{6}")
color_box.setValidator(validator)
# Fill combobox with all colors
for i, color in enumerate(chain(*colors)):
# If color is a tuple, it consists of (color, tooltip)
if isinstance(color, tuple):
color_box.addItem(color[0])
color_box.setItemData(i, color[1], QC.Qt.ToolTipRole)
else:
color_box.addItem(color)
# Add some separators
for i in reversed(cum_len[:-1]):
color_box.insertSeparator(i)
# Set remaining properties
color_box.setToolTip("Select or type (in HEX) the color")
color_box.highlighted[str].connect(self.set_color_label)
color_box.popup_hidden[str].connect(self.set_color)
color_box.editTextChanged.connect(self.set_color)
color_box.focusLost.connect(
lambda: self.set_color(self.get_box_value()))
return(color_box)
# This function converts an MPL color to a QColor
@staticmethod
def convert_to_qcolor(color):
"""
Converts a provided matplotlib color `color` to a
:obj:`~PyQt5.QtGui.QColor` object.
Parameters
----------
color : str or tuple of length {3, 4}
The matplotlib color that must be converted.
Returns
-------
qcolor : :obj:`~PyQt5.QtGui.QColor` object
The instance of the :class:`~PyQt5.QtGui.QColor` class that
corresponds to the provided `color`.
"""
# Obtain the RGBA values of an MPL color
r, g, b, a = to_rgba(color)
# Convert to Qt RGBA values
color = QG.QColor(
round(r*255),
round(g*255),
round(b*255),
round(a*255))
# Return color
return(color)
# This function converts a QColor to an MPL color
@staticmethod
def convert_to_mpl_color(qcolor):
"""
Converts a provided :obj:`~PyQt5.QtGui.QColor` object `color` to a
matplotlib color.
Parameters
----------
qcolor : :obj:`~PyQt5.QtGui.QColor` object
The instance of the :class:`~PyQt5.QtGui.QColor` class must be
converted to a matplotlib color.
Returns
-------
color : str
The corresponding matplotlib color.
The returned `color` is always written in HEX.
"""
hexid = qcolor.name()
return(str(hexid))
# This function creates a pixmap of an MPL color
@staticmethod
def create_color_pixmap(color, size):
"""
Creates a :obj:`~PyQt5.QtGui.QPixmap` object consisting of the given
`color` with the provided `size`.
Parameters
----------
color : str
The matplotlib color that must be used for the pixmap.
size : tuple
The width and height dimension values of the pixmap to be created.
Returns
-------
pixmap : :obj:`~PyQt5.QtGui.QPixmap` object
The instance of the :class:`~PyQt5.QtGui.QPixmap` class that was
created from the provided `color` and `size`.
"""
# Obtain the RGBA values of an MPL color
color = ColorBox.convert_to_qcolor(color)
# Create an image object
image = QG.QImage(*size, QG.QImage.Format_RGB32)
# Fill the entire image with the same color
image.fill(color)
# Convert the image to a pixmap
pixmap = QG.QPixmap.fromImage(image)
# Return the pixmap
return(pixmap)
# This function shows the custom color picker dialog
@QC.Slot()
def show_colorpicker(self):
"""
Shows the colorwheel picker dialog to the user, allowing for any color
option to be selected.
"""
# Obtain current qcolor
qcolor = self.convert_to_qcolor(self.get_box_value())
# Show color dialog
color = QW.QColorDialog.getColor(
qcolor, parent=self,
options=QW.QColorDialog.DontUseNativeDialog)
# If the returned color is valid, save it
if color.isValid():
self._set_color(self.convert_to_mpl_color(color))
# This function is called whenever the user changes the lineedit
@QC.Slot(str)
def set_color(self, color):
"""
Sets the current color to the provided `color`, and updates the entry
in the combobox and the label accordingly.
Parameters
----------
color : str
The color that needs to be used as the current color. The provided
`color` can be any string that is accepted as a color by
matplotlib.
If `color` is invalid, the default color is used instead.
"""
# Check if the combobox currently holds an acceptable input
status = self.color_combobox.lineEdit().hasAcceptableInput()
# Check status
if status:
# If valid, add a hash if color is a 6-digit hex string
color = re.sub(r"^[\da-fA-F]{6}$", lambda x: '#'+x[0], color)
set_box_value(self.color_combobox, color)
else:
# Else, use the default color
color = self.default_color
# If combobox currently has no focus, set combobox value as well
if not self.color_combobox.hasFocus():
set_box_value(self.color_combobox, color)
# Set the color label of the colorbox
self.set_color_label(color)
# This function sets a given color as the current color
@QC.Slot(str)
def _set_color(self, color):
# Set the color label
self.set_color_label(color)
# Set the combobox to the proper value as well
set_box_value(self.color_combobox, color)
# This function sets the color of the colorlabel
@QC.Slot(str)
def set_color_label(self, color):
"""
Sets the current color label to the provided `color`.
Parameters
----------
color : str
The color that needs to be used as the current color label. The
provided `color` can be any string that is accepted as a color by
matplotlib.
"""
# Create pixmap of given color
pixmap = self.create_color_pixmap(color, self.clabel_size)
# Set the colorlabel
set_box_value(self.color_label, pixmap)
# This function sets the default color of the color box
@QC.Slot()
@QC.Slot(str)
def set_default_color(self, color=None):
"""
Sets the default color value to `color`.
Optional
--------
color : str or None. Default: None
The matplotlib color value that must be set as the default value
for this colorbox.
If *None*, use the current color value of this colorbox instead.
"""
# If color is None, obtain current value
if color is None:
color = self.get_box_value()
# Set new default color
self.default_color = color
self.color_combobox.lineEdit().setPlaceholderText(color)
# This function retrieves a value of this special box
def get_box_value(self, *value_sig):
"""
Returns the current color value of the color combobox.
Returns
-------
color : str
The current matplotlib color value.
"""
# Return the value currently set
return(get_box_value(self.color_combobox, *value_sig))
# This function sets the value of this special box
def set_box_value(self, value, *value_sig):
"""
Sets the current color value to `value`.
This also sets the default color to `value`.
Parameters
----------
value : str
The matplotlib color value that must be set for this colorbox.
"""
# Convert color to a proper MPL color if it is a float
if '.' in value:
value = to_hex(value)
# Set the current default color
self.set_default_color(value)
self._set_color(value)
# Make class with a special box for setting the colormap of a plotted 2D hist
class ColorMapBox(GW.BaseBox):
"""
Defines the :class:`~ColorMapBox` class.
This widget allows for colormaps in *matplotlib* to be easily picked by the
user.
All colormaps that are registered in :mod:`matplotlib.cm` before the first
time that this widget is initialized, are available in the combobox.
Colormaps can be added to *matplotlib* using the
:func:`~matplotlib.cm.register_cmap` function.
The first time this widget is initialized, all icons for all colormaps are
drawn and stored in memory, which can take a few seconds. Every
"""
# Signals
modified = QC.Signal([], [str])
# Set the size for the colormap previews
cmap_size = (70, 16)
# Set flag for first_init
init_flag = False
# Set property for bad_cmaps
bad_cmaps = {'gist_ncar', 'gist_rainbow', 'gist_stern', 'hsv', 'jet',
'nipy_spectral'}
# Initialize ColorMapBox
def __init__(self, parent=None):
"""
Initialize an instance of the :class:`~ColorMapBox` class.
"""
# Call super constructor
super().__init__(parent)
# Create the colormap box
self.init()
# This property returns the default 'modified' signal
@property
def default_modified_signal(self):
return(self.modified[str])
# This function creates a combobox with colormaps
def init(self):
# Check if this class has been initialized before, and do so if not
if not self.init_flag:
self.first_init()
# Create a layout for this widget
box_layout = GL.QHBoxLayout(self)
box_layout.setContentsMargins(0, 0, 0, 0)
# Create a combobox for cmaps
cmaps_box = GW.EditableComboBox()
validator = GW.ComboBoxValidator(cmaps_box)
cmaps_box.setValidator(validator)
# Add all colormaps to cmaps_box
for cmap in self.cmaps_cl:
cmap_icon = self.cmap_icons[cmap]
cmaps_box.addItem(cmap_icon, cmap)
# Add some separators
for i in reversed(self.cum_len[:-2]):
cmaps_box.insertSeparator(i)
# Set remaining properties
set_box_value(cmaps_box, rcParams['image.cmap'])
cmaps_box.setIconSize(QC.QSize(*self.cmap_size))
cmaps_box.completer().popup().setIconSize(QC.QSize(*self.cmap_size))
get_modified_signal(cmaps_box, str).connect(self.cmap_selected)
cmaps_box.focusLost.connect(
lambda: set_box_value(cmaps_box, get_box_value(cmaps_box, int)))
# Add cmaps_box to layout
box_layout.addWidget(cmaps_box)
self.cmaps_box = cmaps_box
# This function prepares the class for being initialized for the first time
def first_init(self):
# Obtain all colormaps that are registered in MPL
cmaps = plt.colormaps()
# Split cmaps up into their cmap types
cm_types = ['sequential', 'diverging', 'cyclic', 'qualitative', 'misc']
cmaps_cd = {cm_type: sset() for cm_type in cm_types}
for cmap in cmaps:
cmaps_cd[get_cmap_type(cmap)].add(cmap)
# Create empty list of cmaps sorted on type
cmaps_cl = []
cum_len = []
# Loop over every type
for cmaps_cs in cmaps_cd.values():
# Take all base versions of the colormaps
cmaps_cl.extend([cmap for cmap in cmaps_cs
if not cmap.endswith('_r')])
cum_len.extend([len(cmaps_cl)])
# Also add all the reversed versions
cmaps_cl.extend([cmap for cmap in cmaps_cs if cmap.endswith('_r')])
cum_len.extend([len(cmaps_cl)]*2)
# Store list of colormaps and the category lengths
ColorMapBox.cmaps_cl = cmaps_cl
ColorMapBox.cum_len = cum_len
# Create the colormap icons
cmap_icons = sdict()
for cmap in cmaps:
cmap_icons[cmap] = self.create_cmap_icon(cmap, self.cmap_size)
ColorMapBox.cmap_icons = cmap_icons
# Save that class has been initialized for the first time
ColorMapBox.init_flag = True
# This function is automatically called whenever 'modified' is emitted
@QC.Slot()
def modified_signal_slot(self):
self.modified[str].emit(get_box_value(self.cmaps_box))
# This function creates an icon of a colormap
@staticmethod
def create_cmap_icon(cmap, size):
"""
Creates a :obj:`~PyQt5.QtGui.QIcon` object of the given `cmap` with the
provided `size`.
Parameters
----------
cmap : :obj:`~matplotlib.colors.Colormap` object or str
The colormap for which an icon needs to be created.
size : tuple
A tuple containing the width and height dimension values of the
icon to be created.
Returns
-------
icon : :obj:`~PyQt5.QtGui.QIcon` object
The instance of the :class:`~PyQt5.QtGui.QIcon` class that was
created from the provided `cmap` and `size`.
"""
# Obtain the cmap
cmap = cm.get_cmap(cmap)
# Obtain the RGBA values of the colormap
mplRGBA = cmap(np.arange(cmap.N))
# Convert to Qt RGBA values
qtRGBA = [ColorBox.convert_to_qcolor(RGBA).rgba() for RGBA in mplRGBA]
# Create an image object
image = QG.QImage(cmap.N, 1, QG.QImage.Format_RGB32)
# Set the value of every pixel in this image
for i, RGBA in enumerate(qtRGBA):
image.setPixel(i, 0, RGBA)
# Scale the image to its proper size
image = image.scaled(*size)
# Convert the image to a pixmap
pixmap = QG.QPixmap.fromImage(image)
# Convert the pixmap to an icon
icon = QG.QIcon(pixmap)
# Return the icon
return(icon)
# This function allows cmaps to be added to the 'bad_cmaps' set
def addBadCmaps(self, cmaps):
"""
Adds the provided list of `cmaps` to the set of colormaps that should
not be used by the user under any circumstances.
If a user selects a colormap that is considered 'bad', a warning
message will be shown to the user.
Parameters
----------
cmaps : list of str
List of names of colormaps that are registered in
:mod:`matplotlib.cm` that should be added to the list of 'bad'
colormaps.
"""
# Obtain the names of all colormaps
cmap_names = plt.colormaps()
# Make sure every colormap provided is a valid colormap
for cmap in cmaps:
if cmap in cmap_names and not cmap.endswith('_r'):
# If so, add it to the set
self.bad_cmaps.add(cmap)
# This function checks a selected cmap
@QC.Slot(str)
def cmap_selected(self, cmap):
"""
Qt slot that checks a provided `cmap` and shows an error message if
`cmap` is a terrible colormap.
"""
# If a terrible colormap is selected, show error message
if cmap.startswith(tuple(self.bad_cmaps)):
# Create error message
err_msg = ("The selected <b><i>%s</i></b> cmap is a bad choice for"
" plotting data. To avoid introducing fake perceptual "
"features, it is recommended to pick a <i>perceptually "
"uniform sequential</i> colormap, like the ones with "
"the <i>cmr.</i> prefix.<br><br>"
"See <a href=\"%s\">here</a> for more information on "
"this subject." %
(cmap,
"https://cmasher.readthedocs.io/user/background.html"))
# Show error window
QW.QMessageBox.warning(
self, "%s WARNING" % (cmap.upper()), err_msg)
# This function retrieves a value of this special box
def get_box_value(self, *value_sig):
"""
Returns the current colormap of the colormap box.
Returns
-------
cmap : str or :obj:`~matplotlib.colors.Colormap` object
The currently selected colormap.
"""
# Obtain the value
cmap = get_box_value(self.cmaps_box)
# Obtain the Colormap object if requested
if Colormap in value_sig:
cmap = plt.get_cmap(cmap)
# Return it
return(cmap)
# This function sets the value of this special box
def set_box_value(self, value, *value_sig):
"""
Sets the current colormap to `value`.
Parameters
----------
value : str or :obj:`~matplotlib.colors.Colormap` object
The colormap that must be used for this colormap box.
"""
# Obtain the name of the provided colormap if needed
if isinstance(value, Colormap):
value = value.name
# Set this as the current colormap
set_box_value(self.cmaps_box, value)
| [
"itertools.chain",
"guipy.widgets.ComboBoxValidator",
"qtpy.QtWidgets.QColorDialog.getColor",
"matplotlib.colors.to_rgba",
"guipy.widgets.set_box_value",
"sortedcontainers.SortedSet",
"numpy.arange",
"qtpy.QtCore.Signal",
"qtpy.QtCore.QSize",
"qtpy.QtCore.Slot",
"guipy.layouts.QHBoxLayout",
"s... | [((1624, 1644), 'qtpy.QtCore.Signal', 'QC.Signal', (['[]', '[str]'], {}), '([], [str])\n', (1633, 1644), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((3474, 3483), 'qtpy.QtCore.Slot', 'QC.Slot', ([], {}), '()\n', (3481, 3483), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((8842, 8851), 'qtpy.QtCore.Slot', 'QC.Slot', ([], {}), '()\n', (8849, 8851), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((9491, 9503), 'qtpy.QtCore.Slot', 'QC.Slot', (['str'], {}), '(str)\n', (9498, 9503), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((10799, 10811), 'qtpy.QtCore.Slot', 'QC.Slot', (['str'], {}), '(str)\n', (10806, 10811), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((11076, 11088), 'qtpy.QtCore.Slot', 'QC.Slot', (['str'], {}), '(str)\n', (11083, 11088), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((11702, 11711), 'qtpy.QtCore.Slot', 'QC.Slot', ([], {}), '()\n', (11709, 11711), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((11717, 11729), 'qtpy.QtCore.Slot', 'QC.Slot', (['str'], {}), '(str)\n', (11724, 11729), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((14051, 14071), 'qtpy.QtCore.Signal', 'QC.Signal', (['[]', '[str]'], {}), '([], [str])\n', (14060, 14071), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((17657, 17666), 'qtpy.QtCore.Slot', 'QC.Slot', ([], {}), '()\n', (17664, 17666), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((20276, 20288), 'qtpy.QtCore.Slot', 'QC.Slot', (['str'], {}), '(str)\n', (20283, 20288), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((2880, 2900), 'guipy.layouts.QHBoxLayout', 'GL.QHBoxLayout', (['self'], {}), '(self)\n', (2894, 2900), True, 'from guipy import layouts as GL, widgets as GW\n'), ((3856, 3867), 'guipy.widgets.QLabel', 'GW.QLabel', ([], {}), '()\n', (3865, 3867), True, 'from guipy import layouts as GL, widgets as GW\n'), ((5042, 5063), 'guipy.widgets.EditableComboBox', 'GW.EditableComboBox', ([], {}), '()\n', (5061, 5063), True, 'from guipy import layouts as GL, widgets as GW\n'), ((5136, 5187), 'guipy.widgets.ComboBoxValidator', 'GW.ComboBoxValidator', (['color_box', '"""#?[\\\\da-fA-F]{6}"""'], {}), "(color_box, '#?[\\\\da-fA-F]{6}')\n", (5156, 5187), True, 'from guipy import layouts as GL, widgets as GW\n'), ((6773, 6787), 'matplotlib.colors.to_rgba', 'to_rgba', (['color'], {}), '(color)\n', (6780, 6787), False, 'from matplotlib.colors import BASE_COLORS, CSS4_COLORS, Colormap, to_hex, to_rgba\n'), ((8521, 8561), 'qtpy.QtGui.QImage', 'QG.QImage', (['*size', 'QG.QImage.Format_RGB32'], {}), '(*size, QG.QImage.Format_RGB32)\n', (8530, 8561), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((8699, 8726), 'qtpy.QtGui.QPixmap.fromImage', 'QG.QPixmap.fromImage', (['image'], {}), '(image)\n', (8719, 8726), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((9159, 9254), 'qtpy.QtWidgets.QColorDialog.getColor', 'QW.QColorDialog.getColor', (['qcolor'], {'parent': 'self', 'options': 'QW.QColorDialog.DontUseNativeDialog'}), '(qcolor, parent=self, options=QW.QColorDialog.\n DontUseNativeDialog)\n', (9183, 9254), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((10975, 11016), 'guipy.widgets.set_box_value', 'set_box_value', (['self.color_combobox', 'color'], {}), '(self.color_combobox, color)\n', (10988, 11016), False, 'from guipy.widgets import get_box_value, get_modified_signal, set_box_value\n'), ((11596, 11635), 'guipy.widgets.set_box_value', 'set_box_value', (['self.color_label', 'pixmap'], {}), '(self.color_label, pixmap)\n', (11609, 11635), False, 'from guipy.widgets import get_box_value, get_modified_signal, set_box_value\n'), ((12708, 12754), 'guipy.widgets.get_box_value', 'get_box_value', (['self.color_combobox', '*value_sig'], {}), '(self.color_combobox, *value_sig)\n', (12721, 12754), False, 'from guipy.widgets import get_box_value, get_modified_signal, set_box_value\n'), ((15042, 15062), 'guipy.layouts.QHBoxLayout', 'GL.QHBoxLayout', (['self'], {}), '(self)\n', (15056, 15062), True, 'from guipy import layouts as GL, widgets as GW\n'), ((15172, 15193), 'guipy.widgets.EditableComboBox', 'GW.EditableComboBox', ([], {}), '()\n', (15191, 15193), True, 'from guipy import layouts as GL, widgets as GW\n'), ((15214, 15245), 'guipy.widgets.ComboBoxValidator', 'GW.ComboBoxValidator', (['cmaps_box'], {}), '(cmaps_box)\n', (15234, 15245), True, 'from guipy import layouts as GL, widgets as GW\n'), ((15620, 15668), 'guipy.widgets.set_box_value', 'set_box_value', (['cmaps_box', "rcParams['image.cmap']"], {}), "(cmaps_box, rcParams['image.cmap'])\n", (15633, 15668), False, 'from guipy.widgets import get_box_value, get_modified_signal, set_box_value\n'), ((16280, 16295), 'matplotlib.pyplot.colormaps', 'plt.colormaps', ([], {}), '()\n', (16293, 16295), True, 'import matplotlib.pyplot as plt\n'), ((17318, 17325), 'sortedcontainers.SortedDict', 'sdict', ([], {}), '()\n', (17323, 17325), True, 'from sortedcontainers import SortedDict as sdict, SortedSet as sset\n'), ((18554, 18571), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cmap'], {}), '(cmap)\n', (18565, 18571), False, 'from matplotlib import cm, rcParams\n'), ((18830, 18874), 'qtpy.QtGui.QImage', 'QG.QImage', (['cmap.N', '(1)', 'QG.QImage.Format_RGB32'], {}), '(cmap.N, 1, QG.QImage.Format_RGB32)\n', (18839, 18874), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((19150, 19177), 'qtpy.QtGui.QPixmap.fromImage', 'QG.QPixmap.fromImage', (['image'], {}), '(image)\n', (19170, 19177), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((19234, 19250), 'qtpy.QtGui.QIcon', 'QG.QIcon', (['pixmap'], {}), '(pixmap)\n', (19242, 19250), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((19972, 19987), 'matplotlib.pyplot.colormaps', 'plt.colormaps', ([], {}), '()\n', (19985, 19987), True, 'import matplotlib.pyplot as plt\n'), ((21718, 21747), 'guipy.widgets.get_box_value', 'get_box_value', (['self.cmaps_box'], {}), '(self.cmaps_box)\n', (21731, 21747), False, 'from guipy.widgets import get_box_value, get_modified_signal, set_box_value\n'), ((22443, 22479), 'guipy.widgets.set_box_value', 'set_box_value', (['self.cmaps_box', 'value'], {}), '(self.cmaps_box, value)\n', (22456, 22479), False, 'from guipy.widgets import get_box_value, get_modified_signal, set_box_value\n'), ((3552, 3586), 'guipy.widgets.get_box_value', 'get_box_value', (['self.color_combobox'], {}), '(self.color_combobox)\n', (3565, 3586), False, 'from guipy.widgets import get_box_value, get_modified_signal, set_box_value\n'), ((5305, 5319), 'itertools.chain', 'chain', (['*colors'], {}), '(*colors)\n', (5310, 5319), False, 'from itertools import chain\n'), ((10260, 10315), 're.sub', 're.sub', (['"""^[\\\\da-fA-F]{6}$"""', "(lambda x: '#' + x[0])", 'color'], {}), "('^[\\\\da-fA-F]{6}$', lambda x: '#' + x[0], color)\n", (10266, 10315), False, 'import re\n'), ((10326, 10367), 'guipy.widgets.set_box_value', 'set_box_value', (['self.color_combobox', 'color'], {}), '(self.color_combobox, color)\n', (10339, 10367), False, 'from guipy.widgets import get_box_value, get_modified_signal, set_box_value\n'), ((13229, 13242), 'matplotlib.colors.to_hex', 'to_hex', (['value'], {}), '(value)\n', (13235, 13242), False, 'from matplotlib.colors import BASE_COLORS, CSS4_COLORS, Colormap, to_hex, to_rgba\n'), ((15699, 15724), 'qtpy.QtCore.QSize', 'QC.QSize', (['*self.cmap_size'], {}), '(*self.cmap_size)\n', (15707, 15724), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((15776, 15801), 'qtpy.QtCore.QSize', 'QC.QSize', (['*self.cmap_size'], {}), '(*self.cmap_size)\n', (15784, 15801), True, 'from qtpy import QtCore as QC, QtGui as QG, QtWidgets as QW\n'), ((16453, 16459), 'sortedcontainers.SortedSet', 'sset', ([], {}), '()\n', (16457, 16459), True, 'from sortedcontainers import SortedDict as sdict, SortedSet as sset\n'), ((17735, 17764), 'guipy.widgets.get_box_value', 'get_box_value', (['self.cmaps_box'], {}), '(self.cmaps_box)\n', (17748, 17764), False, 'from guipy.widgets import get_box_value, get_modified_signal, set_box_value\n'), ((18645, 18662), 'numpy.arange', 'np.arange', (['cmap.N'], {}), '(cmap.N)\n', (18654, 18662), True, 'import numpy as np\n'), ((21852, 21870), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (21864, 21870), True, 'import matplotlib.pyplot as plt\n'), ((10608, 10649), 'guipy.widgets.set_box_value', 'set_box_value', (['self.color_combobox', 'color'], {}), '(self.color_combobox, color)\n', (10621, 10649), False, 'from guipy.widgets import get_box_value, get_modified_signal, set_box_value\n'), ((15811, 15846), 'guipy.widgets.get_modified_signal', 'get_modified_signal', (['cmaps_box', 'str'], {}), '(cmaps_box, str)\n', (15830, 15846), False, 'from guipy.widgets import get_box_value, get_modified_signal, set_box_value\n'), ((15957, 15986), 'guipy.widgets.get_box_value', 'get_box_value', (['cmaps_box', 'int'], {}), '(cmaps_box, int)\n', (15970, 15986), False, 'from guipy.widgets import get_box_value, get_modified_signal, set_box_value\n'), ((16533, 16552), 'cmasher.utils.get_cmap_type', 'get_cmap_type', (['cmap'], {}), '(cmap)\n', (16546, 16552), False, 'from cmasher.utils import get_cmap_type\n')] |
import os
os.environ['PYOPENGL_PLATFORM'] = 'osmesa'
import torch
from torchvision.utils import make_grid
import numpy as np
import pyrender
import trimesh
import constants
class Renderer:
"""
Renderer used for visualizing the SMPL model
Code adapted from https://github.com/vchoutas/smplify-x
"""
def __init__(self, focal_length=5000, img_res=224, faces=None):
self.renderer = pyrender.OffscreenRenderer(viewport_width=img_res,
viewport_height=img_res,
point_size=1.0)
self.focal_length = focal_length
self.camera_center = [img_res // 2, img_res // 2]
self.faces = faces
def visualize_tb(self, vertices, camera_translation, images):
vertices = vertices.cpu().numpy()
camera_translation = camera_translation.cpu().numpy()
images = images.cpu()
images_np = np.transpose(images.numpy(), (0,2,3,1))
rend_imgs = []
for i in range(vertices.shape[0]):
rend_img = torch.from_numpy(np.transpose(self.__call__(vertices[i], camera_translation[i], images_np[i]), (2,0,1))).float()
rend_imgs.append(images[i])
rend_imgs.append(rend_img)
rend_imgs = make_grid(rend_imgs, nrow=2)
return rend_imgs
def __call__(self, vertices, camera_translation, image, pickle_path, frame):
material = pyrender.MetallicRoughnessMaterial(
metallicFactor=0.2,
alphaMode='OPAQUE',
baseColorFactor=(0.8, 0.3, 0.3, 1.0))
camera_translation[0] *= -1.
mesh = trimesh.Trimesh(vertices, self.faces)
rot = trimesh.transformations.rotation_matrix(
np.radians(180), [1, 0, 0]) #this is why the smpl gt model is being rotated upside down. However, when comment out the next line, it still doesn't quite sit on the right spot, even if we use the uncropped image.
#mesh.apply_transform(rot)
mesh = pyrender.Mesh.from_trimesh(mesh, material=material)
scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5))
scene.add(mesh, 'mesh')
camera_pose = np.eye(4) #starting the camera_pose as an identity matrix, so that it already has the fourth row padding with 1 on 3,3 (4th column, 4th row), and a null rotation on 0:2,0:2 (upper left 3x3, which corresponds to the rotation matrix)
#camera_pose[:3, 3] = camera_translation #this makes sense, the translation is a 3x1 vector on the fourth column of the camera_pose matrix (aka extrinsic parameters)
# let's instead load the ground truth camera pose parameters 'extrinsic and intrinsic', 'cam_poses' and 'cam_intrinsics', respectively)
import pickle as pkl
import os
seq = pkl.load(open(pickle_path,'rb'),encoding='latin-1')
camera_translation_gt = seq['cam_poses'][frame][:3, 3]/4.82
camera_translation_gt[0] *= -1.
camera_pose[:3, 3] = camera_translation_gt
camera_pose[2, 3] = 2*constants.FOCAL_LENGTH/(constants.IMG_RES * seq['cam_poses'][frame][2,3] +1e-9)
#res_factor = (seq['cam_intrinsics'][0,2]/(224/2))
#self.camera_center[0] = seq['cam_intrinsics'][0,2]/res_factor
#self.camera_center[1] = seq['cam_intrinsics'][1,2]/res_factor
camera = pyrender.IntrinsicsCamera(fx=self.focal_length, fy=self.focal_length,
cx=self.camera_center[0], cy=self.camera_center[0]) #creating the intrinsics matrix
#camera = pyrender.IntrinsicsCamera(fx=seq['cam_intrinsics'][0,0], fy=seq['cam_intrinsics'][1,1],
# cx=seq['cam_intrinsics'][0,2], cy=seq['cam_intrinsics'][1,2])
scene.add(camera, pose=camera_pose) #putting the camera on the scene
light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1)
light_pose = np.eye(4)
light_pose[:3, 3] = np.array([0, -1, 1])
scene.add(light, pose=light_pose)
light_pose[:3, 3] = np.array([0, 1, 1])
scene.add(light, pose=light_pose)
light_pose[:3, 3] = np.array([1, 1, 2])
scene.add(light, pose=light_pose)
color, rend_depth = self.renderer.render(scene, flags=pyrender.RenderFlags.RGBA)
color = color.astype(np.float32) / 255.0
valid_mask = (rend_depth > 0)[:,:,None]
output_img = (color[:, :, :3] * valid_mask +
(1 - valid_mask) * image)
return output_img
| [
"numpy.radians",
"pyrender.IntrinsicsCamera",
"numpy.eye",
"pyrender.Mesh.from_trimesh",
"pyrender.DirectionalLight",
"pyrender.MetallicRoughnessMaterial",
"pyrender.OffscreenRenderer",
"numpy.array",
"trimesh.Trimesh",
"torchvision.utils.make_grid",
"pyrender.Scene"
] | [((407, 502), 'pyrender.OffscreenRenderer', 'pyrender.OffscreenRenderer', ([], {'viewport_width': 'img_res', 'viewport_height': 'img_res', 'point_size': '(1.0)'}), '(viewport_width=img_res, viewport_height=img_res,\n point_size=1.0)\n', (433, 502), False, 'import pyrender\n'), ((1265, 1293), 'torchvision.utils.make_grid', 'make_grid', (['rend_imgs'], {'nrow': '(2)'}), '(rend_imgs, nrow=2)\n', (1274, 1293), False, 'from torchvision.utils import make_grid\n'), ((1420, 1536), 'pyrender.MetallicRoughnessMaterial', 'pyrender.MetallicRoughnessMaterial', ([], {'metallicFactor': '(0.2)', 'alphaMode': '"""OPAQUE"""', 'baseColorFactor': '(0.8, 0.3, 0.3, 1.0)'}), "(metallicFactor=0.2, alphaMode='OPAQUE',\n baseColorFactor=(0.8, 0.3, 0.3, 1.0))\n", (1454, 1536), False, 'import pyrender\n'), ((1624, 1661), 'trimesh.Trimesh', 'trimesh.Trimesh', (['vertices', 'self.faces'], {}), '(vertices, self.faces)\n', (1639, 1661), False, 'import trimesh\n'), ((1991, 2042), 'pyrender.Mesh.from_trimesh', 'pyrender.Mesh.from_trimesh', (['mesh'], {'material': 'material'}), '(mesh, material=material)\n', (2017, 2042), False, 'import pyrender\n'), ((2060, 2105), 'pyrender.Scene', 'pyrender.Scene', ([], {'ambient_light': '(0.5, 0.5, 0.5)'}), '(ambient_light=(0.5, 0.5, 0.5))\n', (2074, 2105), False, 'import pyrender\n'), ((2161, 2170), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2167, 2170), True, 'import numpy as np\n'), ((3319, 3445), 'pyrender.IntrinsicsCamera', 'pyrender.IntrinsicsCamera', ([], {'fx': 'self.focal_length', 'fy': 'self.focal_length', 'cx': 'self.camera_center[0]', 'cy': 'self.camera_center[0]'}), '(fx=self.focal_length, fy=self.focal_length, cx=\n self.camera_center[0], cy=self.camera_center[0])\n', (3344, 3445), False, 'import pyrender\n'), ((3824, 3885), 'pyrender.DirectionalLight', 'pyrender.DirectionalLight', ([], {'color': '[1.0, 1.0, 1.0]', 'intensity': '(1)'}), '(color=[1.0, 1.0, 1.0], intensity=1)\n', (3849, 3885), False, 'import pyrender\n'), ((3907, 3916), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3913, 3916), True, 'import numpy as np\n'), ((3946, 3966), 'numpy.array', 'np.array', (['[0, -1, 1]'], {}), '([0, -1, 1])\n', (3954, 3966), True, 'import numpy as np\n'), ((4038, 4057), 'numpy.array', 'np.array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (4046, 4057), True, 'import numpy as np\n'), ((4129, 4148), 'numpy.array', 'np.array', (['[1, 1, 2]'], {}), '([1, 1, 2])\n', (4137, 4148), True, 'import numpy as np\n'), ((1729, 1744), 'numpy.radians', 'np.radians', (['(180)'], {}), '(180)\n', (1739, 1744), True, 'import numpy as np\n')] |
# @Author: charles
# @Date: 2021-06-05 11:06:90
# @Email: <EMAIL>
# @Last modified by: charles
# @Last modified time: 2021-06-05 11:06:27
import numpy as np
import matplotlib.pyplot as plt
from pydlc import dense_lines
if __name__ == "__main__":
# Generate random synthetic time series
x = np.linspace(0, 90, 25)
ys = []
for _ in range(10000):
ys.append(np.random.randn(1)*np.exp(-x/100))
# Plot here
fig, axs = plt.subplots(1, 2, figsize=(8, 3), sharey=True, sharex=True)
axs[0].plot(x, np.array(ys).T, lw=1) # this is slow and cluttered
axs[0].set_title('Line Chart')
im = dense_lines(ys, x=x, ax=axs[1], cmap='magma') # this is fast and clean
axs[1].set_title('Density Lines Chart')
fig.colorbar(im)
fig.tight_layout()
plt.savefig('./figures/example.png', dpi=144, bbox_inches='tight')
plt.show()
| [
"matplotlib.pyplot.savefig",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"pydlc.dense_lines",
"numpy.random.randn",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((307, 329), 'numpy.linspace', 'np.linspace', (['(0)', '(90)', '(25)'], {}), '(0, 90, 25)\n', (318, 329), True, 'import numpy as np\n'), ((454, 514), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 3)', 'sharey': '(True)', 'sharex': '(True)'}), '(1, 2, figsize=(8, 3), sharey=True, sharex=True)\n', (466, 514), True, 'import matplotlib.pyplot as plt\n'), ((630, 675), 'pydlc.dense_lines', 'dense_lines', (['ys'], {'x': 'x', 'ax': 'axs[1]', 'cmap': '"""magma"""'}), "(ys, x=x, ax=axs[1], cmap='magma')\n", (641, 675), False, 'from pydlc import dense_lines\n'), ((794, 860), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./figures/example.png"""'], {'dpi': '(144)', 'bbox_inches': '"""tight"""'}), "('./figures/example.png', dpi=144, bbox_inches='tight')\n", (805, 860), True, 'import matplotlib.pyplot as plt\n'), ((865, 875), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (873, 875), True, 'import matplotlib.pyplot as plt\n'), ((534, 546), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (542, 546), True, 'import numpy as np\n'), ((387, 405), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (402, 405), True, 'import numpy as np\n'), ((406, 422), 'numpy.exp', 'np.exp', (['(-x / 100)'], {}), '(-x / 100)\n', (412, 422), True, 'import numpy as np\n')] |
# Tindar class version 0:
# copied the jupyter notebook
# grouped cells into functions
# converted global variables to object
# attributes by adding self.___ where appropriate
# TODO: Naming error for n>10
from pulp import *
import numpy as np
from pathlib import Path
PROJECT_DIR = str(Path(__file__).resolve().parents[3])
class Tindar:
'''Class to solve Tindar pairing problems
Input
-----
love_matrix: np.array
square matrix indicating which person is interested
in which other person
'''
def __init__(self, love_matrix):
self.love_matrix = love_matrix
m, n = love_matrix.shape
if m != n:
raise ValueError(f"love_matrix is not square: love_matrix.shape"
f"= {love_matrix.shape}")
else:
self.n = n
for i in range(self.n):
if self.love_matrix[i, i] != 0:
raise ValueError("love_matrix diagonal contains nonzeros")
self.x_names = [f"x_{i}{j}" for i in range(n) for j in range(n)]
self.x = [LpVariable(name=x_name, cat="Binary")
for x_name in self.x_names]
self.x_np = np.array(self.x).reshape((n, n))
# Symmetry constraints: if one is paired, the other is paired
def create_symmetry_constraints(self, inplace=True):
# Left-hand side
lhs_symmetry = [
LpAffineExpression(
[(self.x_np[i, j], 1), (self.x_np[j, i], -1)],
name=f"lhs_sym_{i}{j}"
)
for i in range(self.n) for j in range(i+1, self.n)
]
# Constraints
constraints_symmetry = [
LpConstraint(
e=lhs_s,
sense=0,
name=f"constraint_sym_{lhs_s.name[-2:]}",
rhs=0
)
for lhs_s in lhs_symmetry
]
# Verification
if len(constraints_symmetry) != (self.n**2-self.n)/2:
raise Exception(
"Symmetry constraints not constructed right:"
f"love_matrix.shape = {self.love_matrix.shape},"
f"len(constraints_symmetry) should be {(self.n**2-self.n)/2}"
f", actually is {len(constraints_symmetry)}"
)
# Function behaviour
if inplace: # object is modified, no return value
self.constraints_symmetry = constraints_symmetry
else: # only result is returned
return constraints_symmetry
# Feasibility constraints: only pairs if person likes the other
def create_like_constraints(self, inplace=True):
# Left-hand side
lhs_like = [
LpAffineExpression([(self.x_np[i, j], 1)], name=f"lhs_like_{i}{j}")
for i in range(self.n) for j in range(self.n)
]
# Constraints
constraints_like = [
LpConstraint(
e=lhs_l,
sense=-1,
name=f"constraint_like_{lhs_l.name[-2:]}",
rhs=self.love_matrix[int(lhs_l.name[-2]), int(lhs_l.name[-1])]
)
for lhs_l in lhs_like
]
# Verification
if len(constraints_like) != self.n**2:
raise Exception(
"Liking constraints not constructed right:"
f"A.shape = {self.love_matrix.shape}, len(constraints_like)"
f"should be {self.n**2}, actually is {len(constraints_like)}"
)
# Function behaviour
if inplace: # object is modified, no return value
self.constraints_like = constraints_like
else: # only result is returned
return constraints_like
# Single assignment: one person can have at most one other person
def create_single_assignment_constraints(self, inplace=True):
# Left-hand side: rowsum <= 1
lhs_single_rowsum = [
LpAffineExpression(
[(self.x_np[i, j], 1) for j in range(self.n)],
name=f"lhs_single_rowsum_{i}"
)
for i in range(self.n)
]
# Left-hand side: colsum <= 1
lhs_single_colsum = [
LpAffineExpression(
[(self.x_np[i, j], 1) for i in range(self.n)],
name=f"lhs_single_colsum_{j}"
)
for j in range(self.n)
]
# Constraints
constraints_single_rowsum = self.make_single_constraints(
lhs_single_rowsum, "rowsum")
constraints_single_colsum = self.make_single_constraints(
lhs_single_colsum, "colsum")
# Verification
self.check_single_constraints(constraints_single_rowsum, "rowsum")
self.check_single_constraints(constraints_single_colsum, "colsum")
# Function behaviour
if inplace: # object is modified, no return value
self.constraints_single_rowsum = constraints_single_rowsum
self.constraints_single_colsum = constraints_single_colsum
else: # only result is returned
return constraints_single_rowsum, constraints_single_colsum
# Auxiliary functions for single assigment constraints
@staticmethod
def make_single_constraints(lhs_single, kind):
constraints_single = [
LpConstraint(
e=lhs_s,
sense=-1,
name=f"constraint_single_{kind}_{lhs_s.name[-1]}",
rhs=1
)
for lhs_s in lhs_single
]
return constraints_single
def check_single_constraints(self, constraints_single, kind):
if len(constraints_single) != self.n:
raise Exception(
f"Constraints single {kind} not constructed right:"
f"A.shape = {self.love_matrix.shape}, "
f"len(constraints_single_{kind}) should be {self.n}, "
f"actually is {len(constraints_single)}"
)
def create_all_constraints(self):
self.create_symmetry_constraints()
self.create_like_constraints()
self.create_single_assignment_constraints()
self.constraints_all = [
*self.constraints_symmetry,
*self.constraints_like,
*self.constraints_single_rowsum,
*self.constraints_single_colsum
]
def create_problem(self):
# Initialize constraints and objective
self.create_all_constraints()
self.objective = LpAffineExpression([(x_i, 1) for x_i in self.x])
# Create PuLP problem
self.prob_pulp = LpProblem("The_Tindar_Problem", LpMaximize)
self.prob_pulp += self.objective
for c in self.constraints_all:
self.prob_pulp += c
def write_problem(self, path=PROJECT_DIR+"/models/Tindar.lp"):
self.prob_pulp.writeLP(path)
def solve_problem(self):
self.prob_pulp.solve()
def inspect_solution_status(self, verbose=True):
stat = LpStatus[self.prob_pulp.status]
if verbose:
print("Status:", stat)
return stat
def inspect_solution_vars(self, verbose=True):
vars_pulp = self.prob_pulp.variables()
if verbose:
for v in vars_pulp:
print(v.name, "=", v.varValue)
return vars_pulp
def inspect_solution_obj(self, verbose=True):
obj = value(self.prob_pulp.objective)
if verbose:
print("Number of lovers connected = ", obj)
return obj
class TindarFactory(Tindar):
'''Class to generate Tindar objects randomly
n: integer
number of people in the model
difficulty: 1 < integer < 5
difficulty of the Tindar problem for humans,
assuming more edges is more difficult
'''
MIN_EDGE_PROB = 0.1
MAX_EDGE_PROB = 0.9
def __init__(self, n, difficulty):
self.check_init(n, difficulty)
self.n = n
self.difficulty = difficulty
self.create_love_matrix()
Tindar.__init__(self, self.love_matrix)
# Input validation
@staticmethod
def check_init(n, difficulty):
# n
if not isinstance(n, int):
raise ValueError(f"TindarGenerator init error: "
f"type(n) = {type(n)}")
if n <= 0:
raise ValueError(f"TindarGenerator init error: "
f"n={n} < 0")
# difficulty
if not isinstance(difficulty, int):
raise ValueError(f"TindarGenerator init error: "
f"type(difficulty) = {type(difficulty)}")
if not (1 <= difficulty <= 5):
raise ValueError(f"TindarGenerator init error: "
f"difficulty={difficulty} not between 1 and 5")
@classmethod
def bernouilli_parameter(self, difficulty):
diff_scaled = (difficulty-1)/5
return (diff_scaled*self.MAX_EDGE_PROB) + self.MIN_EDGE_PROB
def create_love_matrix(self, n=None, difficulty=None, inplace=True):
if n is None:
n = self.n
if difficulty is None:
difficulty = self.difficulty
p = self.bernouilli_parameter(difficulty)
love_matrix = np.random.binomial(1, p, size=(n, n))
for i in range(n):
love_matrix[i, i] = 0
if inplace:
self.love_matrix = love_matrix
else:
return love_matrix
if __name__ == "__main__":
n = 10
difficulty = 4
tindar = TindarFactory(n, difficulty)
print(f"love_matrix: {tindar.love_matrix}")
tindar.create_problem()
tindar.write_problem()
tindar.solve_problem()
tindar.inspect_solution_status()
tindar.inspect_solution_obj
tindar.inspect_solution_vars()
| [
"numpy.array",
"numpy.random.binomial",
"pathlib.Path"
] | [((9211, 9248), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p'], {'size': '(n, n)'}), '(1, p, size=(n, n))\n', (9229, 9248), True, 'import numpy as np\n'), ((1181, 1197), 'numpy.array', 'np.array', (['self.x'], {}), '(self.x)\n', (1189, 1197), True, 'import numpy as np\n'), ((290, 304), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (294, 304), False, 'from pathlib import Path\n')] |
import sys
import numpy as np
sys.path.append('../')
from lib.viztool.landscape import Surface, Dir2D, Sampler
from lib.viztool import projection as proj, scheduler
sur1 = Surface.load('LM-TFMdeq_rank0_nproc2/wt103/default/surf_[-1.0,1.0,31]x[-1.0,1.0,31].h5')
sur2 = Surface.load('LM-TFMdeq_rank1_nproc2/wt103/default/surf_[-1.0,1.0,31]x[-1.0,1.0,31].h5')
layers = sur1.layers.keys()
for layer in layers:
mer = np.amax(np.stack((sur1.layers[layer],sur2.layers[layer]), axis=-1), axis=-1)
assert mer.shape == sur1.layers[layer].shape
sur1.layers[layer][:] = mer[:]
sur2.layers[layer][:] = mer[:]
sur1.save('w')
sur2.save('w')
| [
"numpy.stack",
"lib.viztool.landscape.Surface.load",
"sys.path.append"
] | [((31, 53), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (46, 53), False, 'import sys\n'), ((174, 267), 'lib.viztool.landscape.Surface.load', 'Surface.load', (['"""LM-TFMdeq_rank0_nproc2/wt103/default/surf_[-1.0,1.0,31]x[-1.0,1.0,31].h5"""'], {}), "(\n 'LM-TFMdeq_rank0_nproc2/wt103/default/surf_[-1.0,1.0,31]x[-1.0,1.0,31].h5')\n", (186, 267), False, 'from lib.viztool.landscape import Surface, Dir2D, Sampler\n'), ((270, 363), 'lib.viztool.landscape.Surface.load', 'Surface.load', (['"""LM-TFMdeq_rank1_nproc2/wt103/default/surf_[-1.0,1.0,31]x[-1.0,1.0,31].h5"""'], {}), "(\n 'LM-TFMdeq_rank1_nproc2/wt103/default/surf_[-1.0,1.0,31]x[-1.0,1.0,31].h5')\n", (282, 363), False, 'from lib.viztool.landscape import Surface, Dir2D, Sampler\n'), ((428, 487), 'numpy.stack', 'np.stack', (['(sur1.layers[layer], sur2.layers[layer])'], {'axis': '(-1)'}), '((sur1.layers[layer], sur2.layers[layer]), axis=-1)\n', (436, 487), True, 'import numpy as np\n')] |
import numpy as np
import argparse
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
parser = argparse.ArgumentParser('Predict change')
parser.add_argument('--job_id', type=int)
parser.add_argument('--epoch', type=int)
args = parser.parse_args()
def classifier_test(X, y, weights):
rskf = RepeatedStratifiedKFold(n_splits=5, n_repeats = 10, random_state = 0)
acc = []
auc = []
parameters = []
for train_index, test_index in rskf.split(X[0], y[0]):
X_train, X_test, y_train, y_test = X[:,train_index], X[:,test_index], y[:,train_index], y[:,test_index]
X_train = X_train.reshape(X_train.shape[0]*X_train.shape[1], X_train.shape[2])
X_test = X_test.reshape(X_test.shape[0]*X_test.shape[1], X_test.shape[2])
y_train = y_train.reshape(y_train.shape[0]*y_train.shape[1])
y_test = y_test.reshape(y_test.shape[0]*y_test.shape[1])
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
classifier = LogisticRegression(max_iter = 500, penalty='none')
classifier.fit(X_train, y_train)
acc.append(accuracy_score(y_test, classifier.predict(X_test)))
parameters = np.array(parameters)
mean = np.mean(acc)
lower = 2*mean - np.percentile(acc, q=97.5)
upper = 2*mean - np.percentile(acc, q=2.5)
return acc, (mean, lower, upper)
X = np.load('../Analysis_Data/Synthetic_classifier_data%d_epoch%d.npy'%(args.job_id, args.epoch))
y = np.load('../Analysis_Data/Synthetic_classifier_labels%d_epoch%d.npy'%(args.job_id, args.epoch)).astype(int)
mask = np.load('../Analysis_Data/Synthetic_classifier_mask%d_epoch%d.npy'%(args.job_id, args.epoch))
weights = np.load('../Analysis_Data/Synthetic_classifier_weights%d_epoch%d.npy'%(args.job_id, args.epoch))
for t in range(20):
m = mask[:,t].sum(-1)
acc, acc_stats = classifier_test(X[:,m>0,t], y[:,m>0], weights)
print(t, acc_stats)
np.save('../Analysis_Data/Classifier_accuracy_time%d_job_id%d_epoch%d_acc.npy'%(t, args.job_id, args.epoch), acc)
| [
"numpy.mean",
"argparse.ArgumentParser",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.percentile",
"sklearn.model_selection.RepeatedStratifiedKFold",
"numpy.load",
"numpy.save"
] | [((251, 292), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Predict change"""'], {}), "('Predict change')\n", (274, 292), False, 'import argparse\n'), ((1609, 1709), 'numpy.load', 'np.load', (["('../Analysis_Data/Synthetic_classifier_data%d_epoch%d.npy' % (args.job_id,\n args.epoch))"], {}), "('../Analysis_Data/Synthetic_classifier_data%d_epoch%d.npy' % (args.\n job_id, args.epoch))\n", (1616, 1709), True, 'import numpy as np\n'), ((1822, 1922), 'numpy.load', 'np.load', (["('../Analysis_Data/Synthetic_classifier_mask%d_epoch%d.npy' % (args.job_id,\n args.epoch))"], {}), "('../Analysis_Data/Synthetic_classifier_mask%d_epoch%d.npy' % (args.\n job_id, args.epoch))\n", (1829, 1922), True, 'import numpy as np\n'), ((1926, 2029), 'numpy.load', 'np.load', (["('../Analysis_Data/Synthetic_classifier_weights%d_epoch%d.npy' % (args.\n job_id, args.epoch))"], {}), "('../Analysis_Data/Synthetic_classifier_weights%d_epoch%d.npy' % (\n args.job_id, args.epoch))\n", (1933, 2029), True, 'import numpy as np\n'), ((452, 517), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': '(5)', 'n_repeats': '(10)', 'random_state': '(0)'}), '(n_splits=5, n_repeats=10, random_state=0)\n', (475, 517), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((1413, 1433), 'numpy.array', 'np.array', (['parameters'], {}), '(parameters)\n', (1421, 1433), True, 'import numpy as np\n'), ((1454, 1466), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (1461, 1466), True, 'import numpy as np\n'), ((2176, 2300), 'numpy.save', 'np.save', (["('../Analysis_Data/Classifier_accuracy_time%d_job_id%d_epoch%d_acc.npy' % (\n t, args.job_id, args.epoch))", 'acc'], {}), "(\n '../Analysis_Data/Classifier_accuracy_time%d_job_id%d_epoch%d_acc.npy' %\n (t, args.job_id, args.epoch), acc)\n", (2183, 2300), True, 'import numpy as np\n'), ((1091, 1107), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1105, 1107), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1228, 1276), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(500)', 'penalty': '"""none"""'}), "(max_iter=500, penalty='none')\n", (1246, 1276), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1488, 1514), 'numpy.percentile', 'np.percentile', (['acc'], {'q': '(97.5)'}), '(acc, q=97.5)\n', (1501, 1514), True, 'import numpy as np\n'), ((1536, 1561), 'numpy.percentile', 'np.percentile', (['acc'], {'q': '(2.5)'}), '(acc, q=2.5)\n', (1549, 1561), True, 'import numpy as np\n'), ((1707, 1809), 'numpy.load', 'np.load', (["('../Analysis_Data/Synthetic_classifier_labels%d_epoch%d.npy' % (args.\n job_id, args.epoch))"], {}), "('../Analysis_Data/Synthetic_classifier_labels%d_epoch%d.npy' % (\n args.job_id, args.epoch))\n", (1714, 1809), True, 'import numpy as np\n')] |
#!/usr/bin/python
"""
Create a heapq python implementation.
this works:
import heapq
alist = [21, 44, 37, 38, 24, 2, 10, 44]
heapq.heapify(alist)
print alist
alist = [21, 44, 37, 38, 24, 2, 10, 44]
build_heap(alist)
print alist
alist = [21, 44, 37, 38, 24, 2, 10, 44]
build_heap(alist, cmp_swap=cmp_swap_max)
print alist
giving output:
[2, 24, 10, 38, 44, 37, 21, 44]
[2, 24, 10, 38, 44, 37, 21, 44]
[44, 44, 37, 38, 24, 2, 10, 21]
"""
#### BUILD HEAP FROM A LIST ####
#Given a list
#downheapify
#
# if not a leaf
# get left and right child
# sift down compare and swap inplace
# recurse on both left and right
# else
# get the leaf
# upheapify
#
# if not yet root
# sift up compare and swap inplace
# get the parent
# recurse on parent
# else
# siftup the last root
#############################################################
from collections import deque
def is_leaf(node_index, alist):
ln = len(alist)
lchild = 2*node_index + 1
rchild = lchild + 1
return not (lchild < ln or rchild < ln)
def cmp_swap_min(alist, parent_index, lchild, rchild, size, ln):
#if not (parent_index <= size):
# return
greater = smaller = 0
#print "At parent index {} Comparing parent node {}".format(parent_index, parent_node)
#chk size overflow
if lchild < ln:
#cmp
if rchild < ln:
if alist[lchild] > alist[rchild]:
greater, smaller = lchild, rchild
else:
greater, smaller = rchild, lchild
else:
smaller = lchild
if alist[parent_index] > alist[smaller]:
alist[parent_index], alist[smaller] = alist[smaller], alist[parent_index]
"""
#Interchange child nodes IFF they are leaves
if is_leaf(lchild, alist) and is_leaf(rchild, alist):
if alist[lchild] == alist[smaller] and rchild < ln:
alist[lchild], alist[rchild] = alist[greater], alist[smaller]
"""
def cmp_swap_max(alist, parent_index, lchild, rchild, size, ln):
#if not (parent_index <= size):
# return
parent_node = alist[parent_index]
#print "At parent index {} Comparing parent node {}".format(parent_index, parent_node)
#chk size overflow
if lchild < ln:
#cmp
if rchild < ln and alist[rchild] > parent_node and alist[rchild] > alist[lchild]:
alist[parent_index], alist[rchild] = alist[rchild], alist[parent_index]
elif alist[lchild] > alist[parent_index]:
alist[parent_index], alist[lchild] = alist[lchild], alist[parent_index]
def calc_child_nodes(index):
lchild = 2*index + 1
rchild = lchild + 1
return lchild, rchild
def siftup(alist, node, size, ln, cmp_swap):
lchild, rchild = calc_child_nodes(node)
cmp_swap(alist, node, lchild, rchild, size, ln)
return
def upheapify(alist, ln, root, size, cmp_swap):
if (0 < root < ln):
#sifting up with compare and swap
siftup(alist, root, size, ln, cmp_swap)
#get the parent of the current node
parent = (root-1)//2
#recurse
upheapify(alist, ln, parent, size, cmp_swap)
else:
#return and finish when node reaches trees root
return siftup(alist, root, size, ln, cmp_swap)
def siftdown(alist, root, lchild, rchild, size, ln, cmp_swap):
return cmp_swap(alist, root, lchild, rchild, size, ln)
def downheapify(alist, ln, root, size, cmp_swap):
if not is_leaf(root, alist) and (root < ln):
#get left and right nodes
lchild, rchild = calc_child_nodes(root)
#sift down comparing and swapping
siftdown(alist, root, lchild, rchild, size, ln, cmp_swap)
#get the leaves of each branch left and right
downheapify(alist, ln, lchild, size, cmp_swap)
downheapify(alist, ln, rchild, size, cmp_swap)
else:
#get the node that is a leaf if its a leaf overflow
if not (root<ln):
root = (root-1)//2
#Sift up leaf
return upheapify(alist, ln, root, size, cmp_swap)
def build_heap(alist, start_index = 0, cmp_swap = cmp_swap_min):
ln = len(alist)
size = ln//2
root = start_index
downheapify(alist, ln, root, size, cmp_swap)
return alist
#works for deques
def heap_insert(alist, num, cmp_swap):
alist.append(num)
return build_heap(alist, cmp_swap = cmp_swap)
def heap_remove_root(alist, cmp_swap):
alist.popleft()
alist.appendleft(alist.pop())
#print alist
#print
return build_heap(alist, cmp_swap = cmp_swap)
def heap_sort(alist, cmp_swap):
"""feed in a deque and sort in a new list"""
l = []
while len(alist):
#heapify initially
alist = build_heap(alist, cmp_swap=cmp_swap)
l.append(alist.popleft())
return l
if __name__ == "__main__":
"""
from copy import deepcopy
import heapq
alist = deque([21, 44, 37, 38, 24, 2, 10, 44])
blist = deepcopy(alist)
alist = list(alist)
heapq.heapify(alist)
blist = build_heap(blist)
print alist
print blist
assert alist == list(blist)
print alist
print blist
print
alist = deque([21, 44, 37, 38, 24, 2, 10, 44])
blist = list(deepcopy(alist))
build_heap(alist)
heapq.heapify(blist)
blist = deque(blist)
blist.popleft()
blist.appendleft(blist.pop())
print blist
print
blist = list(blist)
heapq.heapify(blist)
print alist
print
heap_remove_root(alist, cmp_swap_min)
print alist
print blist
print
alist = deque([21, 44, 37, 38, 24, 2, 10, 44])
blist = list(deepcopy(alist))
build_heap(alist)
heapq.heapify(blist)
heap_insert(alist, 45, cmp_swap_min)
build_heap(alist)
blist.append(45)
heapq.heapify(blist)
print alist
print blist"""
import time
from heapq import heapify, heappush, heappop
from math import log
from numpy.random import randint
def heapsort(iterable):
h = []
for value in iterable:
heappush(h, value)
return [heappop(h) for i in range(len(h))]
tmp = []
for i in xrange(2,1002):
l = deque(randint(0,i, i))
ln = len(l)
start = time.time()
heap_sort(l, cmp_swap_max)
end = time.time() - start
start1 = time.time()
heapsort(list(l))
end1 = time.time() - start1
tmp.append((ln, end, end1, ln*log(ln,2))) | [
"math.log",
"heapq.heappop",
"numpy.random.randint",
"heapq.heappush",
"time.time"
] | [((6390, 6401), 'time.time', 'time.time', ([], {}), '()\n', (6399, 6401), False, 'import time\n'), ((6488, 6499), 'time.time', 'time.time', ([], {}), '()\n', (6497, 6499), False, 'import time\n'), ((6196, 6214), 'heapq.heappush', 'heappush', (['h', 'value'], {}), '(h, value)\n', (6204, 6214), False, 'from heapq import heapify, heappush, heappop\n'), ((6231, 6241), 'heapq.heappop', 'heappop', (['h'], {}), '(h)\n', (6238, 6241), False, 'from heapq import heapify, heappush, heappop\n'), ((6337, 6353), 'numpy.random.randint', 'randint', (['(0)', 'i', 'i'], {}), '(0, i, i)\n', (6344, 6353), False, 'from numpy.random import randint\n'), ((6451, 6462), 'time.time', 'time.time', ([], {}), '()\n', (6460, 6462), False, 'import time\n'), ((6541, 6552), 'time.time', 'time.time', ([], {}), '()\n', (6550, 6552), False, 'import time\n'), ((6600, 6610), 'math.log', 'log', (['ln', '(2)'], {}), '(ln, 2)\n', (6603, 6610), False, 'from math import log\n')] |
# pylint: skip-file
"""Run SRG on an NN potential."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from numpy.linalg import eigh
# Check if module is installed, otherwise load locally
try:
import srg3d.potential as potential
import srg3d.srg as srg
except ImportError:
from context import srg3d
potential = srg3d.potential
srg = srg3d.srg
# Physical constants
hbarc = 197.327
proton_mass = 938.272
neutron_mass = 939.565
red_mass = proton_mass * neutron_mass / (proton_mass + neutron_mass)
# Load unevolve potential
a = potential.load(2, 3, 'EM420new', '00001', 50, 'np')
dim = len(a.nodes)
# Set up T_rel flow operator
v_mask = np.array([[0 for _ in range(dim)] for _ in range(dim)])
k_mask = np.array([[1 for _ in range(dim)] for _ in range(dim)])
# Set up SRG
srg_obj = srg.SRG(a, v_mask, k_mask)
# Create list of lambdas to which to evolve
lambdas = [25] + list(range(10, 4, -1)) + list(np.arange(4, 3.1, -0.5)) \
+ list(np.arange(3.0, 2.5, -0.2)) + list(np.arange(2.4, 1.38, -0.05))
for l in lambdas:
# Evolve to lambda
srg_obj.evolve(l, verbose=False, integrator='dop853', atol=10**(-6),
rtol=10**(-6), nsteps=10**(5))
# Load reference potential (calculated by different code)
b = potential.load(2, 3, 'EM420new', '00001', l, 'np')
# Extract evolved potential
a = srg_obj.get_potential()
# Compute Hamiltonians
hamiltonian_ref = b.with_weights() + b.kinetic_energy()
hamiltonian = a.with_weights() + a.kinetic_energy()
# Get lowest eigenvalues
ev_ref = np.amin(eigh(hamiltonian_ref)[0])
ev = np.amin(eigh(hamiltonian)[0])
# Output values
print('Lambda: {}'.format(l))
print('E_ref = {} MeV'.format(hbarc**2 / (2 * red_mass) * ev_ref))
print('E_srg = {} MeV\n'.format(hbarc**2 / (2 * red_mass) * ev))
| [
"numpy.linalg.eigh",
"srg3d.srg.SRG",
"srg3d.potential.load",
"numpy.arange"
] | [((667, 718), 'srg3d.potential.load', 'potential.load', (['(2)', '(3)', '"""EM420new"""', '"""00001"""', '(50)', '"""np"""'], {}), "(2, 3, 'EM420new', '00001', 50, 'np')\n", (681, 718), True, 'import srg3d.potential as potential\n'), ((922, 948), 'srg3d.srg.SRG', 'srg.SRG', (['a', 'v_mask', 'k_mask'], {}), '(a, v_mask, k_mask)\n', (929, 948), True, 'import srg3d.srg as srg\n'), ((1378, 1428), 'srg3d.potential.load', 'potential.load', (['(2)', '(3)', '"""EM420new"""', '"""00001"""', 'l', '"""np"""'], {}), "(2, 3, 'EM420new', '00001', l, 'np')\n", (1392, 1428), True, 'import srg3d.potential as potential\n'), ((1113, 1140), 'numpy.arange', 'np.arange', (['(2.4)', '(1.38)', '(-0.05)'], {}), '(2.4, 1.38, -0.05)\n', (1122, 1140), True, 'import numpy as np\n'), ((1079, 1104), 'numpy.arange', 'np.arange', (['(3.0)', '(2.5)', '(-0.2)'], {}), '(3.0, 2.5, -0.2)\n', (1088, 1104), True, 'import numpy as np\n'), ((1689, 1710), 'numpy.linalg.eigh', 'eigh', (['hamiltonian_ref'], {}), '(hamiltonian_ref)\n', (1693, 1710), False, 'from numpy.linalg import eigh\n'), ((1732, 1749), 'numpy.linalg.eigh', 'eigh', (['hamiltonian'], {}), '(hamiltonian)\n', (1736, 1749), False, 'from numpy.linalg import eigh\n'), ((1041, 1064), 'numpy.arange', 'np.arange', (['(4)', '(3.1)', '(-0.5)'], {}), '(4, 3.1, -0.5)\n', (1050, 1064), True, 'import numpy as np\n')] |
import numpy as np
import cmath as cm
"""
Implementation of the theoretical formulations of reflection and trasmission of plane waves described in SEISMIC WAVE THEORY by <NAME>
"""
def snell(theta1, v1, v2):
"takes theta1 in rad, wave propagating from medium 1 to medium 2"
theta2 = cm.asin((v2 / v1) * np.sin(theta1))
return theta2
def zoeppritz(theta1, theta2, phi1, phi2, vp1, vp2, vs1, vs2, p1, p2, Ai, Bi):
"""
Treats medium 1 as the incidence medium and medium 2 as the transmission
Solves for amplitudes of the reflected and transmitted P-wave and SV-wave.
Vp Vs and Rho are media parameters, theta1 the independent variable, and the other angles
are a product of Snell's law. Angles are required to be in radians.
Parameters
----------
theta1: angle of incidence/reflection of P-wave on medium 1
theta2: angle of transmission of P-wave on medium 2
phi1: angle of incidence/reflection of SV-wave on medium 1
phi2: angle of transmission of SV-wave on medium 2
vp1: P-wave speed propagation on medium 1
vp2: P-wave speed propagation on medium 2
vs1: SV-wave speed propagation on medium 1
vs2: SV-wave speed propagation on medium 2
rho1: density of medium 1
rho2: density of medium 2
Ai: amplitude of incident P-wave
Bi: amplitude of incident SV-wave
Return
------
Ar: amplitude of reflected P-wave
At: amplitude of transmitted P-Wave
Br: amplitude of reflected SV-wave
Bt: amplitude of transmitted SV-Wave
"""
# some useful variables
# p = np.sin(theta1) / vp1
# chi1 = 2 * rho1 * p * (vs1**2)
# chi2 = 2 * rho2 * p * (vs2**2)
# gam1 = rho1 * (1 - 2*(vs1*p)**2)
# gam2 = rho1 * (1 - 2*(vs1*p)**2)
# z1 = rho1*vp1
# z2 = rho2*vp2
# w1 = rho1*vs1
# w2 = rho2*vs2
# Construct system M@U_rt = N@U_i = b, where U_rt is the vector [Ar, Br, At, Bt] and U_i is the vector [Ai, Bi]
# See Krebes(2019), Seismic Wave Theory, Chapter 3 and Geldart, <NAME>., and <NAME>. Problems in
# exploration seismology and their solutions. Society of Exploration Geophysicists, 2004.
# if vs1==0:
# M = np.array([
# [ np.cos(theta1), np.cos(theta2), np.sin(phi2)],
# [ z1, -z2*np.cos(2*phi2), -w2*np.sin(2*phi2)],
# [ 0, (vs2/vp2)*np.sin(2*theta2), -np.cos(2*phi2)]
# ])
# N = np.array([
# [np.cos(theta1)],
# [-z1],
# [0]
# ])
# b = Ai * N
# U_rt = np.linalg.solve(M, b[:, 0])
# Ar, At, Bt = np.abs(U_rt)
# Br = 0.
# else:
# M = np.array([
# [ -vp1*p, -np.cos(phi1) , vp2*p , np.cos(phi2) ],
# [ np.cos(theta1), -vs1*p , np.cos(theta2) , -vs2*p ],
# [ chi1 * np.cos(theta1) , vs1*gam1 , chi2*np.cos(theta2) , vs2*gam2],
# [ -vp1*gam1, chi1*np.cos(phi1) ,vp2*gam2 ,-chi2*np.cos(phi2)]
# ])
# N = np.array([
# [vp1*p , np.cos(phi1)],
# [np.cos(theta1) , -vs1*p],
# [chi1*np.cos(theta1) , vs1*gam1],
# [vp1*gam1 , -chi1*np.cos(phi1)]
# ])
# U_i = np.array([Ai, Bi])
# b = N@U_i
# U_rt = np.linalg.solve(M, b)
# Ar, Br, At, Bt = np.abs(np.abs(U_rt))
# print(U_rt, np.sum(U_rt))
# initialise P and R from the matrix form of the zoeppritz equations
P = np.array([[-np.sin(theta1), -np.cos(phi1), np.sin(theta2), np.cos(phi2)],
[np.cos(theta1), -np.sin(phi1), np.cos(theta2), -np.sin(phi2)],
[2 * p1 * vs1 * np.sin(phi1) * np.cos(theta1), p1 * vs1 *
(1 - 2 * (np.sin(phi1) ** 2)), 2 * p2 * vs2 * np.sin(phi2) *
np.cos(theta2), p2 * vs2 * (1 - 2 * (np.sin(phi2) ** 2))],
[-p1 * vp1 * (1 - 2 * (np.sin(phi1) ** 2)), p1 * vs1 *
np.sin(2 * phi1), p2 * vp2 * (1 - 2 * (np.sin(phi2) ** 2)),
-p2 * vs2 * np.sin(2 * phi2)]])
R = np.array([[np.sin(theta1), np.cos(phi1), -np.sin(theta2), -np.cos(phi2)],
[np.cos(theta1), -np.sin(phi1), np.cos(theta2), -np.sin(phi2)],
[2 * p1 * vs1 * np.sin(phi1) * np.cos(theta1), p1 * vs1 *
(1 - 2 * (np.sin(phi1) ** 2)), 2 * p2 * vs2 * np.sin(phi2) *
np.cos(theta2), p2 * vs2 * (1 - 2 * (np.sin(phi2) ** 2))],
[p1 * vp1 * (1 - 2 * (np.sin(phi1) ** 2)), -p1 * vs1 *
np.sin(2 * phi1), - p2 * vp2 * (1 - 2 * (np.sin(phi2) ** 2)),
p2 * vs2 * np.sin(2 * phi2)]])
# invert P and solve for the scattering matrix: Q = P^{-1} R
Q = np.dot(np.linalg.inv(P), R)
# fix the sign of the imaginary component
for i in range(4):
for j in range(4):
Q[i][j] = complex(Q[i][j].real, -Q[i][j].imag)
Rpp = np.abs(Q[0][0]) # p reflection coeff from incident p
Rps = np.abs(Q[1][0]) # s reflection coeff from incident p
Tpp = np.abs(Q[2][0]) # p transmission coeff from incident p
Tps = np.abs(Q[3][0]) # s transmission coeff from incident p
Rsp = np.abs(Q[0][1]) # p reflection coeff from incident s
Rss = np.abs(Q[1][1]) # s reflection coeff from incident s
Tsp = np.abs(Q[2][1]) # p transmission coeff from incident s
Tss = np.abs(Q[3][1]) # s transmission coeff from incident s
Ar = Ai*Rpp + Bi*Rsp
At = Ai*Tpp + Bi*Tsp
Br = Ai*Rps + Bi*Rss
Bt = Ai*Tps + Bi*Tss
return Ar, At, Br, Bt | [
"numpy.sin",
"numpy.abs",
"numpy.linalg.inv",
"numpy.cos"
] | [((5053, 5068), 'numpy.abs', 'np.abs', (['Q[0][0]'], {}), '(Q[0][0])\n', (5059, 5068), True, 'import numpy as np\n'), ((5117, 5132), 'numpy.abs', 'np.abs', (['Q[1][0]'], {}), '(Q[1][0])\n', (5123, 5132), True, 'import numpy as np\n'), ((5181, 5196), 'numpy.abs', 'np.abs', (['Q[2][0]'], {}), '(Q[2][0])\n', (5187, 5196), True, 'import numpy as np\n'), ((5247, 5262), 'numpy.abs', 'np.abs', (['Q[3][0]'], {}), '(Q[3][0])\n', (5253, 5262), True, 'import numpy as np\n'), ((5319, 5334), 'numpy.abs', 'np.abs', (['Q[0][1]'], {}), '(Q[0][1])\n', (5325, 5334), True, 'import numpy as np\n'), ((5383, 5398), 'numpy.abs', 'np.abs', (['Q[1][1]'], {}), '(Q[1][1])\n', (5389, 5398), True, 'import numpy as np\n'), ((5447, 5462), 'numpy.abs', 'np.abs', (['Q[2][1]'], {}), '(Q[2][1])\n', (5453, 5462), True, 'import numpy as np\n'), ((5513, 5528), 'numpy.abs', 'np.abs', (['Q[3][1]'], {}), '(Q[3][1])\n', (5519, 5528), True, 'import numpy as np\n'), ((4842, 4858), 'numpy.linalg.inv', 'np.linalg.inv', (['P'], {}), '(P)\n', (4855, 4858), True, 'import numpy as np\n'), ((322, 336), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (328, 336), True, 'import numpy as np\n'), ((3625, 3639), 'numpy.sin', 'np.sin', (['theta2'], {}), '(theta2)\n', (3631, 3639), True, 'import numpy as np\n'), ((3641, 3653), 'numpy.cos', 'np.cos', (['phi2'], {}), '(phi2)\n', (3647, 3653), True, 'import numpy as np\n'), ((3674, 3688), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (3680, 3688), True, 'import numpy as np\n'), ((3705, 3719), 'numpy.cos', 'np.cos', (['theta2'], {}), '(theta2)\n', (3711, 3719), True, 'import numpy as np\n'), ((4186, 4200), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (4192, 4200), True, 'import numpy as np\n'), ((4202, 4214), 'numpy.cos', 'np.cos', (['phi1'], {}), '(phi1)\n', (4208, 4214), True, 'import numpy as np\n'), ((4267, 4281), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (4273, 4281), True, 'import numpy as np\n'), ((4298, 4312), 'numpy.cos', 'np.cos', (['theta2'], {}), '(theta2)\n', (4304, 4312), True, 'import numpy as np\n'), ((3594, 3608), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (3600, 3608), True, 'import numpy as np\n'), ((3611, 3623), 'numpy.cos', 'np.cos', (['phi1'], {}), '(phi1)\n', (3617, 3623), True, 'import numpy as np\n'), ((3691, 3703), 'numpy.sin', 'np.sin', (['phi1'], {}), '(phi1)\n', (3697, 3703), True, 'import numpy as np\n'), ((3722, 3734), 'numpy.sin', 'np.sin', (['phi2'], {}), '(phi2)\n', (3728, 3734), True, 'import numpy as np\n'), ((3785, 3799), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (3791, 3799), True, 'import numpy as np\n'), ((3907, 3921), 'numpy.cos', 'np.cos', (['theta2'], {}), '(theta2)\n', (3913, 3921), True, 'import numpy as np\n'), ((4055, 4071), 'numpy.sin', 'np.sin', (['(2 * phi1)'], {}), '(2 * phi1)\n', (4061, 4071), True, 'import numpy as np\n'), ((4144, 4160), 'numpy.sin', 'np.sin', (['(2 * phi2)'], {}), '(2 * phi2)\n', (4150, 4160), True, 'import numpy as np\n'), ((4217, 4231), 'numpy.sin', 'np.sin', (['theta2'], {}), '(theta2)\n', (4223, 4231), True, 'import numpy as np\n'), ((4234, 4246), 'numpy.cos', 'np.cos', (['phi2'], {}), '(phi2)\n', (4240, 4246), True, 'import numpy as np\n'), ((4284, 4296), 'numpy.sin', 'np.sin', (['phi1'], {}), '(phi1)\n', (4290, 4296), True, 'import numpy as np\n'), ((4315, 4327), 'numpy.sin', 'np.sin', (['phi2'], {}), '(phi2)\n', (4321, 4327), True, 'import numpy as np\n'), ((4378, 4392), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (4384, 4392), True, 'import numpy as np\n'), ((4500, 4514), 'numpy.cos', 'np.cos', (['theta2'], {}), '(theta2)\n', (4506, 4514), True, 'import numpy as np\n'), ((4648, 4664), 'numpy.sin', 'np.sin', (['(2 * phi1)'], {}), '(2 * phi1)\n', (4654, 4664), True, 'import numpy as np\n'), ((4738, 4754), 'numpy.sin', 'np.sin', (['(2 * phi2)'], {}), '(2 * phi2)\n', (4744, 4754), True, 'import numpy as np\n'), ((3770, 3782), 'numpy.sin', 'np.sin', (['phi1'], {}), '(phi1)\n', (3776, 3782), True, 'import numpy as np\n'), ((3875, 3887), 'numpy.sin', 'np.sin', (['phi2'], {}), '(phi2)\n', (3881, 3887), True, 'import numpy as np\n'), ((4363, 4375), 'numpy.sin', 'np.sin', (['phi1'], {}), '(phi1)\n', (4369, 4375), True, 'import numpy as np\n'), ((4468, 4480), 'numpy.sin', 'np.sin', (['phi2'], {}), '(phi2)\n', (4474, 4480), True, 'import numpy as np\n'), ((3839, 3851), 'numpy.sin', 'np.sin', (['phi1'], {}), '(phi1)\n', (3845, 3851), True, 'import numpy as np\n'), ((3944, 3956), 'numpy.sin', 'np.sin', (['phi2'], {}), '(phi2)\n', (3950, 3956), True, 'import numpy as np\n'), ((4006, 4018), 'numpy.sin', 'np.sin', (['phi1'], {}), '(phi1)\n', (4012, 4018), True, 'import numpy as np\n'), ((4094, 4106), 'numpy.sin', 'np.sin', (['phi2'], {}), '(phi2)\n', (4100, 4106), True, 'import numpy as np\n'), ((4432, 4444), 'numpy.sin', 'np.sin', (['phi1'], {}), '(phi1)\n', (4438, 4444), True, 'import numpy as np\n'), ((4537, 4549), 'numpy.sin', 'np.sin', (['phi2'], {}), '(phi2)\n', (4543, 4549), True, 'import numpy as np\n'), ((4598, 4610), 'numpy.sin', 'np.sin', (['phi1'], {}), '(phi1)\n', (4604, 4610), True, 'import numpy as np\n'), ((4689, 4701), 'numpy.sin', 'np.sin', (['phi2'], {}), '(phi2)\n', (4695, 4701), True, 'import numpy as np\n')] |
import sys
import numpy as np
import sounddevice as sd
import speech_recognition as sr
from googletrans import Translator
from kivy.uix.widget import Widget
from scipy.io.wavfile import write
class MyGrid(Widget):
def button_record_translate(self):
"""
Record and translate the given text.
:return:
"""
recorded_text, translated_text = record_translate('output.wav')
if (recorded_text == None or translated_text == None):
self.ids.text_label.text = 'Error in recording'
return
self.ids.text_label.text = 'Message: ' + recorded_text + '\n' + 'Translation: ' + translated_text
def translate(speech):
"""
Translate the given speech
:param speech: the string text being given
:return: the returned string changed based on the language
"""
translator = Translator()
word = translator.translate(speech, dest='zh-cn')
print(word.text)
return word.text
def record_translate(soundpath):
"""
Record the audio and save locally
:param soundpath: the path you want to save to the file
:return:
"""
fs = 44100 # free-air resonant frequency
seconds = 10 # Changed the prefixed time
myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=1)
sd.wait() # wait until the recording is done
processed_recording = (np.iinfo(np.int32).max *
(myrecording / np.abs(myrecording).max())).astype(np.int32)
write(soundpath, fs, processed_recording)
text = convertSpeech(soundpath)
translated_text = translate(text)
return text, translated_text
def convertSpeech(soundpath):
"""
Convert the given speech to text.
:param soundpath: the soundpath that the audio belongs to
:return: the string of text based on the speech
"""
r = sr.Recognizer()
with sr.AudioFile(soundpath) as source:
audio_text = r.listen(source)
# recoginize_() method will throw a request error if the API is unreachable, hence using exception handling
try:
# using google speech recognition
text = r.recognize_google(audio_text)
print('Converting audio transcripts into text ...')
#print(text)
sys.stdout.write(text)
return text
except:
print('Sorry.. run again...')
| [
"numpy.abs",
"speech_recognition.AudioFile",
"sounddevice.wait",
"googletrans.Translator",
"numpy.iinfo",
"speech_recognition.Recognizer",
"scipy.io.wavfile.write",
"sys.stdout.write"
] | [((861, 873), 'googletrans.Translator', 'Translator', ([], {}), '()\n', (871, 873), False, 'from googletrans import Translator\n'), ((1298, 1307), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1305, 1307), True, 'import sounddevice as sd\n'), ((1487, 1528), 'scipy.io.wavfile.write', 'write', (['soundpath', 'fs', 'processed_recording'], {}), '(soundpath, fs, processed_recording)\n', (1492, 1528), False, 'from scipy.io.wavfile import write\n'), ((1844, 1859), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (1857, 1859), True, 'import speech_recognition as sr\n'), ((1869, 1892), 'speech_recognition.AudioFile', 'sr.AudioFile', (['soundpath'], {}), '(soundpath)\n', (1881, 1892), True, 'import speech_recognition as sr\n'), ((2241, 2263), 'sys.stdout.write', 'sys.stdout.write', (['text'], {}), '(text)\n', (2257, 2263), False, 'import sys\n'), ((1371, 1389), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (1379, 1389), True, 'import numpy as np\n'), ((1438, 1457), 'numpy.abs', 'np.abs', (['myrecording'], {}), '(myrecording)\n', (1444, 1457), True, 'import numpy as np\n')] |
import rospy
from geometry_msgs.msg import WrenchStamped
from std_msgs.msg import Float64
from Interpreter import Interpreter
import numpy as np
class Interpreter_wrenchStamped(Interpreter):
def __init__(self, interpreter_info):
super(Interpreter_wrenchStamped, self).__init__(interpreter_info)
self.cmd.val = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self.pub = rospy.Publisher(self._config['topic'], WrenchStamped, queue_size=1)
self.pub_range_lin = rospy.Publisher("/range_lin",Float64,queue_size=1)
# Override
def process_input(self, val, cmd_type):
if val[1] != self.NULL:
if cmd_type == self.SLIDER:
self.cmd.val[val[0]] = val[1]
if cmd_type == self.BUTTON:
if val[0] == self.ALL:
for i in range(len(self.cmd.val)):
self.handle_key(i, val[1])
else:
self.handle_key(val[0], val[1])
# Saturation
self.cmd.val = np.clip(self.cmd.val, -1.0, 1.0)
if val[2] != self.NULL:
self._config['range_lin'] = np.clip(self._config['range_lin'] + val[2], self._config['range_lin_min'], self._config['range_lin_max'])
rospy.loginfo('Wrench range_lin is {}'.format(self._config['range_lin']))
self.pub_range_lin.publish(self._config['range_lin'])
if val[3] != self.NULL:
self._config['range_ang'] = np.clip(self._config['range_ang'] + val[3], self._config['range_ang_min'], self._config['range_ang_max'])
rospy.loginfo('Wrench range_ang is {}'.format(self._config['range_ang']))
def handle_key(self, i, val):
# BACK keyword
if val == self.BACK and self.cmd.val[i] != 0.0:
self.cmd.val[i] = max(min(-self.cmd.val[i] / abs(self.cmd.val[i]), 1.0), -1.0)
# STOP keyword
elif val == self.STOP:
self.cmd.val[i] = 0.0
# Cas classique
else:
self.cmd.val[i] += val * self._config['key_precision']
def send_msg(self):
msg = WrenchStamped()
msg.wrench.force.x = self.cmd.val[0] * self._config['range_lin']
msg.wrench.force.y = self.cmd.val[1] * self._config['range_lin']
msg.wrench.force.z = self.cmd.val[2] * self._config['range_lin']
msg.wrench.torque.x = self.cmd.val[3] * self._config['range_ang']
msg.wrench.torque.y = self.cmd.val[4] * self._config['range_ang']
msg.wrench.torque.z = self.cmd.val[5] * self._config['range_ang']
msg.header.stamp = rospy.get_rostime()
self.pub.publish(msg)
| [
"numpy.clip",
"rospy.get_rostime",
"rospy.Publisher",
"geometry_msgs.msg.WrenchStamped"
] | [((382, 449), 'rospy.Publisher', 'rospy.Publisher', (["self._config['topic']", 'WrenchStamped'], {'queue_size': '(1)'}), "(self._config['topic'], WrenchStamped, queue_size=1)\n", (397, 449), False, 'import rospy\n'), ((479, 531), 'rospy.Publisher', 'rospy.Publisher', (['"""/range_lin"""', 'Float64'], {'queue_size': '(1)'}), "('/range_lin', Float64, queue_size=1)\n", (494, 531), False, 'import rospy\n'), ((2086, 2101), 'geometry_msgs.msg.WrenchStamped', 'WrenchStamped', ([], {}), '()\n', (2099, 2101), False, 'from geometry_msgs.msg import WrenchStamped\n'), ((2573, 2592), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (2590, 2592), False, 'import rospy\n'), ((1021, 1053), 'numpy.clip', 'np.clip', (['self.cmd.val', '(-1.0)', '(1.0)'], {}), '(self.cmd.val, -1.0, 1.0)\n', (1028, 1053), True, 'import numpy as np\n'), ((1127, 1236), 'numpy.clip', 'np.clip', (["(self._config['range_lin'] + val[2])", "self._config['range_lin_min']", "self._config['range_lin_max']"], {}), "(self._config['range_lin'] + val[2], self._config['range_lin_min'],\n self._config['range_lin_max'])\n", (1134, 1236), True, 'import numpy as np\n'), ((1457, 1566), 'numpy.clip', 'np.clip', (["(self._config['range_ang'] + val[3])", "self._config['range_ang_min']", "self._config['range_ang_max']"], {}), "(self._config['range_ang'] + val[3], self._config['range_ang_min'],\n self._config['range_ang_max'])\n", (1464, 1566), True, 'import numpy as np\n')] |
import gym
from gym import wrappers
import numpy as np
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, 'random_files', force=True)
for i_episode in range(1):
observation = env.reset()
for t in range(100000):
env.render()
print(observation)
action = env.action_space.sample()
action = np.random.randint(0,2)
observation, reward, done, info = env.step(action)
print(f'reward :{reward}')
print(f'done :{done}')
print(f'info :{info}')
if done:
print("Episode finished after {} timesteps".format(t+1))
break
env.close() | [
"gym.wrappers.Monitor",
"numpy.random.randint",
"gym.make"
] | [((61, 84), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (69, 84), False, 'import gym\n'), ((91, 140), 'gym.wrappers.Monitor', 'wrappers.Monitor', (['env', '"""random_files"""'], {'force': '(True)'}), "(env, 'random_files', force=True)\n", (107, 140), False, 'from gym import wrappers\n'), ((334, 357), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (351, 357), True, 'import numpy as np\n')] |
import json
import random
import numpy as np
from tqdm import tqdm
from pathlib import Path
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.utils import save_image
from torch.utils.data import Subset, ConcatDataset, DataLoader
from torchvision.transforms import functional as tf
# For reproducibility
# Set before loading model and dataset
seed = 999
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
import util
from dataset import CCPD5000
from model import CCPDModel, CCPDLoss
train_set = CCPD5000('./data/train/anns.json')
valid_set = CCPD5000('./data/valid/anns.json')
visul_set = ConcatDataset([
Subset(train_set, random.sample(range(len(train_set)), 32)),
Subset(valid_set, random.sample(range(len(valid_set)), 32)),
])
train_loader = DataLoader(train_set, 32, shuffle=True, num_workers=1)
valid_loader = DataLoader(valid_set, 32, shuffle=False, num_workers=1)
visul_loader = DataLoader(visul_set, 32, shuffle=False, num_workers=1)
device = 'cuda'
model = CCPDModel().to(device)
criterion = CCPDLoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
log_dir = Path('./log/') / f'{datetime.now():%Y.%m.%d-%H:%M:%S}'
log_dir.mkdir(parents=True)
print(log_dir)
history = {
'train_bce': [],
'valid_bce': [],
'train_mse': [],
'valid_mse': []
}
def train(pbar):
model.train()
bce_steps = []
mse_steps = []
for img_b, lbl_true_b, kpt_true_b in iter(train_loader):
img_b = img_b.to(device)
lbl_true_b = lbl_true_b.to(device)
kpt_true_b = kpt_true_b.to(device)
optimizer.zero_grad()
lbl_pred_b = model(img_b)
loss = criterion(lbl_pred_b, lbl_true_b)
loss.backward()
optimizer.step()
bce = loss.detach().item()
bce_steps.append(bce)
kpt_pred_b = util.peek2d(lbl_pred_b.detach())
mse = F.mse_loss(kpt_pred_b, kpt_true_b).item()
mse_steps.append(mse)
pbar.set_postfix(bce=bce, mse=mse)
pbar.update(img_b.size(0))
avg_bce = sum(bce_steps) / len(bce_steps)
avg_mse = sum(mse_steps) / len(mse_steps)
pbar.set_postfix(avg_bce=f'{avg_bce:.5f}', avg_mse=f'{avg_mse:.5f}')
history['train_bce'].append(avg_bce)
history['train_mse'].append(avg_mse)
def valid(pbar):
model.eval()
bce_steps = []
mse_steps = []
for img_b, lbl_true_b, kpt_true_b in iter(valid_loader):
img_b = img_b.to(device)
lbl_true_b = lbl_true_b.to(device)
kpt_true_b = kpt_true_b.to(device)
lbl_pred_b = model(img_b)
loss = criterion(lbl_pred_b, lbl_true_b)
bce = loss.detach().item()
bce_steps.append(bce)
kpt_pred_b = util.peek2d(lbl_pred_b.detach())
mse = F.mse_loss(kpt_pred_b, kpt_true_b).item()
mse_steps.append(mse)
pbar.set_postfix(bce=bce, mse=mse)
pbar.update(img_b.size(0))
avg_bce = sum(bce_steps) / len(bce_steps)
avg_mse = sum(mse_steps) / len(mse_steps)
pbar.set_postfix(avg_bce=f'{avg_bce:.5f}', avg_mse=f'{avg_mse:.5f}')
history['valid_bce'].append(avg_bce)
history['valid_mse'].append(avg_mse)
def visul(pbar):
model.eval()
epoch_dir = log_dir / f'{epoch:03d}'
epoch_dir.mkdir()
for img_b, lbl_true_b, kpt_true_b in iter(visul_loader):
lbl_pred_b = model(img_b.to(device)).cpu()
kpt_pred_b = util.peek2d(lbl_pred_b)
for i in range(img_b.size(0)):
img = tf.to_pil_image(img_b[i])
lbl_true = lbl_true_b[i]
lbl_pred = lbl_pred_b[i]
kpt_true = kpt_true_b[i]
kpt_pred = kpt_pred_b[i]
vis = util.draw_plate(img, kpt_pred)
vis = util.draw_kpts(vis, kpt_true, c='orange')
vis = util.draw_kpts(vis, kpt_pred, c='red')
vis.save(epoch_dir / f'{pbar.n:03d}.vis1.jpg')
lbls = torch.cat((lbl_true, lbl_pred), dim=0) # [8, H, W]
lbls = lbls.unsqueeze(dim=1) # [8, 1, H, W]
path = epoch_dir / f'{pbar.n:03d}.vis2.jpg'
save_image(lbls, path, pad_value=1, nrow=4)
pbar.update()
def log(epoch, train_loss, valid_loss):
with (log_dir / 'metrics.json').open('w') as f:
json.dump(history, f)
fig, ax = plt.subplots(2, 1, figsize=(10, 10), dpi=100)
ax[0].set_title('BCE')
ax[0].plot(range(epoch + 1), history['train_bce'], label='Train')
ax[0].plot(range(epoch + 1), history['valid_bce'], label='Valid')
ax[0].legend()
ax[1].set_title('MSE')
ax[1].plot(range(epoch + 1), history['train_mse'], label='Train')
ax[1].plot(range(epoch + 1), history['valid_mse'], label='Valid')
ax[1].legend()
fig.savefig(log_dir / 'metrics.jpg')
plt.close()
for epoch in range(20):
print('Epoch', epoch, flush=True)
with tqdm(total=len(train_set), desc=' Train') as pbar:
train_loss = train(pbar)
with torch.no_grad():
with tqdm(total=len(valid_set), desc=' Valid') as pbar:
valid_loss = valid(pbar)
with tqdm(total=len(visul_set), desc=' Visul') as pbar:
visul(pbar)
log(epoch, train_loss, valid_loss)
| [
"util.draw_kpts",
"util.draw_plate",
"torchvision.transforms.functional.to_pil_image",
"torchvision.utils.save_image",
"pathlib.Path",
"model.CCPDModel",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"numpy.random.seed",
"util.peek2d",
"torch.nn.functional.mse_loss",
"torch.cat",
... | [((175, 199), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (188, 199), True, 'import matplotlib.pyplot as plt\n'), ((501, 518), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (512, 518), False, 'import random\n'), ((519, 539), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (533, 539), True, 'import numpy as np\n'), ((540, 563), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (557, 563), False, 'import torch\n'), ((699, 733), 'dataset.CCPD5000', 'CCPD5000', (['"""./data/train/anns.json"""'], {}), "('./data/train/anns.json')\n", (707, 733), False, 'from dataset import CCPD5000\n'), ((746, 780), 'dataset.CCPD5000', 'CCPD5000', (['"""./data/valid/anns.json"""'], {}), "('./data/valid/anns.json')\n", (754, 780), False, 'from dataset import CCPD5000\n'), ((957, 1011), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set', '(32)'], {'shuffle': '(True)', 'num_workers': '(1)'}), '(train_set, 32, shuffle=True, num_workers=1)\n', (967, 1011), False, 'from torch.utils.data import Subset, ConcatDataset, DataLoader\n'), ((1027, 1082), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_set', '(32)'], {'shuffle': '(False)', 'num_workers': '(1)'}), '(valid_set, 32, shuffle=False, num_workers=1)\n', (1037, 1082), False, 'from torch.utils.data import Subset, ConcatDataset, DataLoader\n'), ((1098, 1153), 'torch.utils.data.DataLoader', 'DataLoader', (['visul_set', '(32)'], {'shuffle': '(False)', 'num_workers': '(1)'}), '(visul_set, 32, shuffle=False, num_workers=1)\n', (1108, 1153), False, 'from torch.utils.data import Subset, ConcatDataset, DataLoader\n'), ((1305, 1319), 'pathlib.Path', 'Path', (['"""./log/"""'], {}), "('./log/')\n", (1309, 1319), False, 'from pathlib import Path\n'), ((4442, 4487), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(10, 10)', 'dpi': '(100)'}), '(2, 1, figsize=(10, 10), dpi=100)\n', (4454, 4487), True, 'import matplotlib.pyplot as plt\n'), ((4905, 4916), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4914, 4916), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1190), 'model.CCPDModel', 'CCPDModel', ([], {}), '()\n', (1188, 1190), False, 'from model import CCPDModel, CCPDLoss\n'), ((1214, 1224), 'model.CCPDLoss', 'CCPDLoss', ([], {}), '()\n', (1222, 1224), False, 'from model import CCPDModel, CCPDLoss\n'), ((3555, 3578), 'util.peek2d', 'util.peek2d', (['lbl_pred_b'], {}), '(lbl_pred_b)\n', (3566, 3578), False, 'import util\n'), ((4405, 4426), 'json.dump', 'json.dump', (['history', 'f'], {}), '(history, f)\n', (4414, 4426), False, 'import json\n'), ((5085, 5100), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5098, 5100), False, 'import torch\n'), ((1325, 1339), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1337, 1339), False, 'from datetime import datetime\n'), ((3637, 3662), 'torchvision.transforms.functional.to_pil_image', 'tf.to_pil_image', (['img_b[i]'], {}), '(img_b[i])\n', (3652, 3662), True, 'from torchvision.transforms import functional as tf\n'), ((3830, 3860), 'util.draw_plate', 'util.draw_plate', (['img', 'kpt_pred'], {}), '(img, kpt_pred)\n', (3845, 3860), False, 'import util\n'), ((3879, 3920), 'util.draw_kpts', 'util.draw_kpts', (['vis', 'kpt_true'], {'c': '"""orange"""'}), "(vis, kpt_true, c='orange')\n", (3893, 3920), False, 'import util\n'), ((3939, 3977), 'util.draw_kpts', 'util.draw_kpts', (['vis', 'kpt_pred'], {'c': '"""red"""'}), "(vis, kpt_pred, c='red')\n", (3953, 3977), False, 'import util\n'), ((4057, 4095), 'torch.cat', 'torch.cat', (['(lbl_true, lbl_pred)'], {'dim': '(0)'}), '((lbl_true, lbl_pred), dim=0)\n', (4066, 4095), False, 'import torch\n'), ((4232, 4275), 'torchvision.utils.save_image', 'save_image', (['lbls', 'path'], {'pad_value': '(1)', 'nrow': '(4)'}), '(lbls, path, pad_value=1, nrow=4)\n', (4242, 4275), False, 'from torchvision.utils import save_image\n'), ((2052, 2086), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['kpt_pred_b', 'kpt_true_b'], {}), '(kpt_pred_b, kpt_true_b)\n', (2062, 2086), True, 'from torch.nn import functional as F\n'), ((2924, 2958), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['kpt_pred_b', 'kpt_true_b'], {}), '(kpt_pred_b, kpt_true_b)\n', (2934, 2958), True, 'from torch.nn import functional as F\n')] |
import numpy as np
import os
dirname = os.path.dirname(__file__)
class PidController(object):
MAX_TURN_RATE_DEG = 3.0
error_gammas_rad = 0
def elevator_hold(self, pitch_angle_reference, pitch_angle_current, pitch_angle_rate_current):
error_pitch_angle = pitch_angle_reference - pitch_angle_current
derivative = pitch_angle_rate_current
p_e = -8
d_e = -0.5
return np.clip(error_pitch_angle * p_e - derivative * d_e, -1, 1)
def bank_angle_hold(self, roll_angle_reference, roll_angle_current, roll_angle_rate) -> float:
roll_angle_reference = np.clip(roll_angle_reference, np.deg2rad(-20), np.deg2rad(20))
diff_rollAngle = roll_angle_reference - roll_angle_current
p = 5
d = 0.1
return np.clip(diff_rollAngle * p - roll_angle_rate * d, -1.0, 1.0) * 1.0
def heading_hold(self, heading_reference_deg: float, heading_current_deg: float, roll_angle_current_rad: float, roll_angle_rate: float,
true_air_speed: float) -> float:
difference_heading: float = heading_reference_deg - heading_current_deg
difference_heading = difference_heading % 360
if difference_heading >= 180:
difference_heading = difference_heading - 360
'''
keep turn_rate at full max / min value
If p_h is too big --> higher turn_rate but can overshoot target
if p_h is too low --> lower turn_rate but can take too long to turn
'''
p_h = 0.09
turn_rate: float = difference_heading * p_h
turn_rate = np.clip(turn_rate, -PidController.MAX_TURN_RATE_DEG, PidController.MAX_TURN_RATE_DEG) * 1.0 # Standard turn rate needed for emergency ?
roll_angle_command: float = turn_rate * true_air_speed / 9.81
return self.bank_angle_hold(np.deg2rad(roll_angle_command),
roll_angle_current_rad,
roll_angle_rate)
def flight_path_angle_hold(self, gamma_reference_rad, pitch_rad, alpha_rad, q_radps, roll_rad, r_radps):
p = -0.2
i = -0.3
gamma_deg = np.degrees(pitch_rad - alpha_rad)
error_gamma_deg = (np.degrees(gamma_reference_rad) - gamma_deg) * 0.4
error_gamma_rad = np.radians(error_gamma_deg)
# error_gamma_rad = np.clip(np.radians(error_gamma_deg), -1, 1)
error = error_gamma_rad - (q_radps * np.cos(roll_rad) - r_radps * np.sin(roll_rad))
self.error_gammas_rad += error
return np.clip(p * error + i * self.error_gammas_rad, -1, 1)
def vertical_speed_hold(self, speed_reference, ground_speed, pitch_rad, alpha_rad, q_radps, roll_rad, r_radps):
vs = speed_reference / ground_speed
return self.flight_path_angle_hold(gamma_reference_rad=vs,
pitch_rad=pitch_rad,
alpha_rad=alpha_rad,
q_radps=q_radps,
roll_rad=roll_rad,
r_radps=r_radps)
def altitude_hold(self, altitude_reference_ft, altitude_ft, ground_speed, pitch_rad, alpha_rad, q_radps, roll_rad, r_radps):
vs = (altitude_reference_ft - altitude_ft) * 0.0833333
vs = np.clip(vs, -5.08, 5.08)
return self.vertical_speed_hold(speed_reference=vs,
ground_speed=ground_speed,
pitch_rad=pitch_rad,
alpha_rad=alpha_rad,
q_radps=q_radps,
roll_rad=roll_rad,
r_radps=r_radps) | [
"numpy.clip",
"numpy.radians",
"os.path.dirname",
"numpy.deg2rad",
"numpy.cos",
"numpy.sin",
"numpy.degrees"
] | [((40, 65), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (55, 65), False, 'import os\n'), ((419, 477), 'numpy.clip', 'np.clip', (['(error_pitch_angle * p_e - derivative * d_e)', '(-1)', '(1)'], {}), '(error_pitch_angle * p_e - derivative * d_e, -1, 1)\n', (426, 477), True, 'import numpy as np\n'), ((2136, 2169), 'numpy.degrees', 'np.degrees', (['(pitch_rad - alpha_rad)'], {}), '(pitch_rad - alpha_rad)\n', (2146, 2169), True, 'import numpy as np\n'), ((2275, 2302), 'numpy.radians', 'np.radians', (['error_gamma_deg'], {}), '(error_gamma_deg)\n', (2285, 2302), True, 'import numpy as np\n'), ((2522, 2575), 'numpy.clip', 'np.clip', (['(p * error + i * self.error_gammas_rad)', '(-1)', '(1)'], {}), '(p * error + i * self.error_gammas_rad, -1, 1)\n', (2529, 2575), True, 'import numpy as np\n'), ((3320, 3344), 'numpy.clip', 'np.clip', (['vs', '(-5.08)', '(5.08)'], {}), '(vs, -5.08, 5.08)\n', (3327, 3344), True, 'import numpy as np\n'), ((639, 654), 'numpy.deg2rad', 'np.deg2rad', (['(-20)'], {}), '(-20)\n', (649, 654), True, 'import numpy as np\n'), ((656, 670), 'numpy.deg2rad', 'np.deg2rad', (['(20)'], {}), '(20)\n', (666, 670), True, 'import numpy as np\n'), ((786, 846), 'numpy.clip', 'np.clip', (['(diff_rollAngle * p - roll_angle_rate * d)', '(-1.0)', '(1.0)'], {}), '(diff_rollAngle * p - roll_angle_rate * d, -1.0, 1.0)\n', (793, 846), True, 'import numpy as np\n'), ((1592, 1682), 'numpy.clip', 'np.clip', (['turn_rate', '(-PidController.MAX_TURN_RATE_DEG)', 'PidController.MAX_TURN_RATE_DEG'], {}), '(turn_rate, -PidController.MAX_TURN_RATE_DEG, PidController.\n MAX_TURN_RATE_DEG)\n', (1599, 1682), True, 'import numpy as np\n'), ((1836, 1866), 'numpy.deg2rad', 'np.deg2rad', (['roll_angle_command'], {}), '(roll_angle_command)\n', (1846, 1866), True, 'import numpy as np\n'), ((2197, 2228), 'numpy.degrees', 'np.degrees', (['gamma_reference_rad'], {}), '(gamma_reference_rad)\n', (2207, 2228), True, 'import numpy as np\n'), ((2420, 2436), 'numpy.cos', 'np.cos', (['roll_rad'], {}), '(roll_rad)\n', (2426, 2436), True, 'import numpy as np\n'), ((2449, 2465), 'numpy.sin', 'np.sin', (['roll_rad'], {}), '(roll_rad)\n', (2455, 2465), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def calcY_HM(g, t):
v = g*t
h = ((g*(t**2))/(g))
return h
def calcX_HM(v,t):
S = v*t
return S
def plotGraph(time, XY):
t = np.arange(0.0, time, 0.125)
fig, ax = plt.subplots()
ax.plot(XY[0], XY[1])
ax.set(xlabel='X', ylabel='Y', title='horizontal movement')
ax.grid()
fig.savefig("test.png")
plt.show()
def main():
time = 1 #input("time: ")
velocity = 10 #input("velocity: ")
running = True
Y = []
X = []
allPoints = []
g = 9.8
for i in np.arange(0, time, 0.125):
X.append(calcX_HM(velocity,i))
allPoints.append(X)
for i in np.arange (0, time, 0.125):
Y.append(-(calcY_HM(g, i)))
allPoints.append(Y)
print(allPoints)
while running:
plotGraph(time, allPoints)
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((201, 228), 'numpy.arange', 'np.arange', (['(0.0)', 'time', '(0.125)'], {}), '(0.0, time, 0.125)\n', (210, 228), True, 'import numpy as np\n'), ((243, 257), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (255, 257), True, 'import matplotlib.pyplot as plt\n'), ((395, 405), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (403, 405), True, 'import matplotlib.pyplot as plt\n'), ((582, 607), 'numpy.arange', 'np.arange', (['(0)', 'time', '(0.125)'], {}), '(0, time, 0.125)\n', (591, 607), True, 'import numpy as np\n'), ((686, 711), 'numpy.arange', 'np.arange', (['(0)', 'time', '(0.125)'], {}), '(0, time, 0.125)\n', (695, 711), True, 'import numpy as np\n')] |
import os
import operator
import hashlib
import sys
import random
import requests
import randomcolor
import numpy
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
from scipy.stats import gaussian_kde
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def getcolor(s):
random.seed(int(hashlib.sha256(s.encode('utf-8')).hexdigest(), 16) % 10**8)
return randomcolor.RandomColor().generate()[0]
def plot_data(url,path_to_data):
r = requests.get(url)
if 'data' not in r.json():
raise Exception("problem getting url")
locationSensors = {}
for d in r.json()['data']:
if 'l' not in d or d['l'] == '':
continue
loc = d['l']
if loc not in locationSensors:
locationSensors[loc] = {}
for s in d['s']:
for mac in d['s'][s]:
sensorName = s+'-'+mac
if sensorName not in locationSensors[loc]:
locationSensors[loc][sensorName] = []
locationSensors[loc][sensorName].append(d['s'][s][mac])
# find largest variance
sensorIndex = []
locationIndex = []
for location in locationSensors:
locationIndex.append(location)
for sensorID in locationSensors[location]:
if sensorID not in sensorIndex:
sensorIndex.append(sensorID)
num_locations = len(locationIndex)
num_sensors = len(sensorIndex)
X = numpy.zeros([len(sensorIndex),len(locationSensors)])
for i,location in enumerate(locationIndex):
for j,sensorID in enumerate(sensorIndex):
if sensorID not in locationSensors[location]:
continue
X[j,i] = numpy.median((locationSensors[location][sensorID]))
varianceOfSensorID = {}
for i,row in enumerate(X):
data = []
for v in row:
if v == 0:
continue
data.append(v)
varianceOfSensorID[sensorIndex[i]] = numpy.var(data)
# collect sensor ids that are most meaningful
sensorIDs = []
for i, data in enumerate(
sorted(varianceOfSensorID.items(), key=operator.itemgetter(1),reverse=True)):
if data[1] == 0:
continue
sensorIDs.append(data[0])
if len(sensorIDs) == 10:
break
bins = numpy.linspace(-100, 0, 100)
for location in locationSensors:
pyplot.figure(figsize=(10,4))
for sensorID in sensorIDs:
if sensorID not in locationSensors[location]:
continue
try:
density = gaussian_kde(locationSensors[location][sensorID])
except Exception as e:
continue
density.covariance_factor = lambda : .5
density._compute_covariance()
pyplot.fill(bins,density(bins),alpha=0.2,label=sensorID,facecolor=getcolor(sensorID))
# pyplot.hist(
# locationSensors[location][sensorID],
# bins,
# alpha=0.5,
# label=sensorID)
if i == 10:
break
pyplot.title(location)
pyplot.legend(loc='upper right')
pyplot.savefig(os.path.join(path_to_data,location + ".png"))
pyplot.close()
| [
"numpy.median",
"scipy.stats.gaussian_kde",
"matplotlib.use",
"matplotlib.pyplot.legend",
"os.path.join",
"requests.get",
"operator.itemgetter",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"randomcolor.RandomColor",
"numpy.var"
] | [((133, 154), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (147, 154), False, 'import matplotlib\n'), ((500, 517), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (512, 517), False, 'import requests\n'), ((2350, 2378), 'numpy.linspace', 'numpy.linspace', (['(-100)', '(0)', '(100)'], {}), '(-100, 0, 100)\n', (2364, 2378), False, 'import numpy\n'), ((2000, 2015), 'numpy.var', 'numpy.var', (['data'], {}), '(data)\n', (2009, 2015), False, 'import numpy\n'), ((2424, 2454), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (2437, 2454), False, 'from matplotlib import pyplot\n'), ((3141, 3163), 'matplotlib.pyplot.title', 'pyplot.title', (['location'], {}), '(location)\n', (3153, 3163), False, 'from matplotlib import pyplot\n'), ((3172, 3204), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (3185, 3204), False, 'from matplotlib import pyplot\n'), ((3282, 3296), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (3294, 3296), False, 'from matplotlib import pyplot\n'), ((1727, 1776), 'numpy.median', 'numpy.median', (['locationSensors[location][sensorID]'], {}), '(locationSensors[location][sensorID])\n', (1739, 1776), False, 'import numpy\n'), ((3228, 3273), 'os.path.join', 'os.path.join', (['path_to_data', "(location + '.png')"], {}), "(path_to_data, location + '.png')\n", (3240, 3273), False, 'import os\n'), ((418, 443), 'randomcolor.RandomColor', 'randomcolor.RandomColor', ([], {}), '()\n', (441, 443), False, 'import randomcolor\n'), ((2167, 2189), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (2186, 2189), False, 'import operator\n'), ((2616, 2665), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['locationSensors[location][sensorID]'], {}), '(locationSensors[location][sensorID])\n', (2628, 2665), False, 'from scipy.stats import gaussian_kde\n')] |
"""
Created on June 6th, 2020
@author: itailang
"""
# import system modules
import os
import os.path as osp
import sys
import argparse
import numpy as np
# add paths
parent_dir = osp.dirname(osp.dirname(osp.abspath(__file__)))
if parent_dir not in sys.path:
sys.path.append(parent_dir)
# import modules
from src.autoencoder import Configuration as Conf
from src.adv_ae import AdvAE
from src.adversary_utils import load_data, prepare_data_for_attack
from src.in_out import create_dir
from src.general_utils import plot_3d_point_cloud
from src.tf_utils import reset_tf_graph
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', type=float, default=0.01, help='Learning rate for the attack [default: 0.01]')
parser.add_argument('--loss_dist_type', type=str, default='chamfer', help='Type of distance regularization loss [default: chamfer]')
parser.add_argument('--loss_adv_type', type=str, default='chamfer', help='Type of adversarial loss [default: chamfer]')
parser.add_argument('--dist_weight_list', nargs='+', default=[1.0], help='List of possible weights for distance regularization loss')
parser.add_argument('--max_point_pert_weight', type=float, default=0.0, help='Weight for maximal point perturbation loss [default: [0.0]]')
parser.add_argument('--max_point_dist_weight', type=float, default=0.0, help='Weight for maximal point nearest neighbor distance loss [default: [0.0]]')
parser.add_argument('--num_iterations', type=int, default=500, help='Number of iterations per dist_weight [default: 500]')
parser.add_argument('--num_iterations_thresh', type=int, default=400, help='Number of iterations threshold for cheking the update of output data [default: 400]')
parser.add_argument('--batch_size', type=int, default=10, help='Batch size for attack [default: 10]')
parser.add_argument('--ae_folder', type=str, default='log/autoencoder_victim', help='Folder for loading a trained autoencoder model [default: log/autoencoder_victim]')
parser.add_argument('--restore_epoch', type=int, default=500, help='Restore epoch of a trained autoencoder [default: 500]')
parser.add_argument("--attack_pc_idx", type=str, default='log/autoencoder_victim/eval/sel_idx_rand_100_test_set_13l.npy', help="List of indices of point clouds for the attack")
parser.add_argument("--target_pc_idx_type", type=str, default='chamfer_nn_complete', help="Type of target pc index (Chamfer or latent nearest neighbors) [default: chamfer_nn_complete]")
parser.add_argument("--num_pc_for_attack", type=int, default=25, help='Number of point clouds for attack (per shape class) [default: 25]')
parser.add_argument("--num_pc_for_target", type=int, default=5, help='Number of candidate point clouds for target (per point cloud for attack) [default: 5]')
parser.add_argument("--correct_pred_only", type=int, default=0, help='1: Use targets with corret predicted label only, 0: do not restrict targets according to predicted label [default: 0]')
parser.add_argument("--output_folder_name", type=str, default='attack_res', help="Output folder name")
flags = parser.parse_args()
print('Run attack flags:', flags)
assert flags.loss_dist_type in ['pert', 'chamfer'], 'wrong loss_dist_type: %s' % flags.loss_dist_type
assert flags.loss_adv_type in ['latent', 'chamfer'], 'wrong loss_adv_type: %s' % flags.loss_adv_type
assert flags.num_iterations_thresh <= flags.num_iterations, 'num_iterations_thresh (%d) should be smaller or equal to num_iterations (%d)' % (flags.num_iterations_thresh, flags.num_iterations)
assert flags.target_pc_idx_type in ['latent_nn', 'chamfer_nn_complete'], 'wrong target_pc_idx_type: %s' % flags.target_pc_idx_type
# define basic parameters
top_out_dir = osp.dirname(osp.dirname(osp.abspath(__file__))) # Use to save Neural-Net check-points etc.
data_path = osp.join(top_out_dir, flags.ae_folder, 'eval')
files = [f for f in os.listdir(data_path) if osp.isfile(osp.join(data_path, f))]
output_path = create_dir(osp.join(data_path, flags.output_folder_name))
# load data
point_clouds, latent_vectors, pc_classes, slice_idx, ae_loss = \
load_data(data_path, files, ['point_clouds_test_set', 'latent_vectors_test_set', 'pc_classes', 'slice_idx_test_set', 'ae_loss_test_set'])
assert np.all(ae_loss > 0), 'Note: not all autoencoder loss values are larger than 0 as they should!'
nn_idx_dict = {'latent_nn': 'latent_nn_idx_test_set', 'chamfer_nn_complete': 'chamfer_nn_idx_complete_test_set'}
nn_idx = load_data(data_path, files, [nn_idx_dict[flags.target_pc_idx_type]])
correct_pred = None
if flags.correct_pred_only:
pc_labels, pc_pred_labels = load_data(data_path, files, ['pc_label_test_set', 'pc_pred_labels_test_set'])
correct_pred = (pc_labels == pc_pred_labels)
# load indices for attack
attack_pc_idx = np.load(osp.join(top_out_dir, flags.attack_pc_idx))
attack_pc_idx = attack_pc_idx[:, :flags.num_pc_for_attack]
# load autoencoder configuration
ae_dir = osp.join(top_out_dir, flags.ae_folder)
conf = Conf.load(osp.join(ae_dir, 'configuration'))
# update autoencoder configuration
conf.ae_dir = ae_dir
conf.ae_name = 'autoencoder'
conf.ae_restore_epoch = flags.restore_epoch
conf.encoder_args['return_layer_before_symmetry'] = False
conf.encoder_args['b_norm_decay'] = 1. # for avoiding the update of batch normalization moving_mean and moving_variance parameters
conf.decoder_args['b_norm_decay'] = 1. # for avoiding the update of batch normalization moving_mean and moving_variance parameters
conf.decoder_args['b_norm_decay_finish'] = 1. # for avoiding the update of batch normalization moving_mean and moving_variance parameters
# attack configuration
conf.experiment_name = 'adversary'
conf.batch_size = flags.batch_size
conf.learning_rate = flags.learning_rate
conf.loss_dist_type = flags.loss_dist_type
conf.loss_adv_type = flags.loss_adv_type
conf.dist_weight_list = [float(w) for w in flags.dist_weight_list]
conf.max_point_pert_weight = flags.max_point_pert_weight
conf.max_point_dist_weight = flags.max_point_dist_weight
conf.target_pc_idx_type = flags.target_pc_idx_type
conf.num_pc_for_attack = flags.num_pc_for_attack
conf.num_pc_for_target = flags.num_pc_for_target
conf.correct_pred_only = flags.correct_pred_only
conf.num_iterations = flags.num_iterations
conf.num_iterations_thresh = flags.num_iterations_thresh
conf.train_dir = output_path
conf.save(osp.join(conf.train_dir, 'attack_configuration'))
classes_for_attack = conf.class_names
classes_for_target = conf.class_names
# Attack the AE model
for i in range(len(pc_classes)):
pc_class_name = pc_classes[i]
if pc_class_name not in classes_for_attack:
continue
# Build Adversary and AE model
reset_tf_graph()
ae = AdvAE(conf.experiment_name, conf)
save_dir = create_dir(osp.join(conf.train_dir, pc_class_name))
# prepare data for attack
source_pc, target_pc = prepare_data_for_attack(pc_classes, [pc_class_name], classes_for_target, point_clouds, slice_idx, attack_pc_idx, flags.num_pc_for_target, nn_idx, correct_pred)
_, target_latent = prepare_data_for_attack(pc_classes, [pc_class_name], classes_for_target, latent_vectors, slice_idx, attack_pc_idx, flags.num_pc_for_target, nn_idx, correct_pred)
_, target_ae_loss_ref = prepare_data_for_attack(pc_classes, [pc_class_name], classes_for_target, ae_loss, slice_idx, attack_pc_idx, flags.num_pc_for_target, nn_idx, correct_pred)
target_ae_loss_ref = target_ae_loss_ref.reshape(-1)
buf_size = 1 # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(save_dir, 'attack_stats.txt'), 'a', buf_size)
fout.write('Train flags: %s\n' % flags)
adversarial_metrics, adversarial_pc_input, adversarial_pc_recon =\
ae.attack(source_pc, target_latent, target_pc, target_ae_loss_ref, conf, log_file=fout)
fout.close()
# save results
np.save(osp.join(save_dir, 'adversarial_metrics'), adversarial_metrics)
np.save(osp.join(save_dir, 'adversarial_pc_input'), adversarial_pc_input)
np.save(osp.join(save_dir, 'adversarial_pc_recon'), adversarial_pc_recon)
np.save(osp.join(save_dir, 'dist_weight'), np.array(conf.dist_weight_list))
| [
"src.adversary_utils.load_data",
"os.listdir",
"argparse.ArgumentParser",
"src.adv_ae.AdvAE",
"os.path.join",
"numpy.array",
"src.tf_utils.reset_tf_graph",
"os.path.abspath",
"numpy.all",
"sys.path.append",
"src.adversary_utils.prepare_data_for_attack"
] | [((615, 640), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (638, 640), False, 'import argparse\n'), ((3807, 3853), 'os.path.join', 'osp.join', (['top_out_dir', 'flags.ae_folder', '"""eval"""'], {}), "(top_out_dir, flags.ae_folder, 'eval')\n", (3815, 3853), True, 'import os.path as osp\n'), ((4090, 4235), 'src.adversary_utils.load_data', 'load_data', (['data_path', 'files', "['point_clouds_test_set', 'latent_vectors_test_set', 'pc_classes',\n 'slice_idx_test_set', 'ae_loss_test_set']"], {}), "(data_path, files, ['point_clouds_test_set',\n 'latent_vectors_test_set', 'pc_classes', 'slice_idx_test_set',\n 'ae_loss_test_set'])\n", (4099, 4235), False, 'from src.adversary_utils import load_data, prepare_data_for_attack\n'), ((4236, 4255), 'numpy.all', 'np.all', (['(ae_loss > 0)'], {}), '(ae_loss > 0)\n', (4242, 4255), True, 'import numpy as np\n'), ((4454, 4522), 'src.adversary_utils.load_data', 'load_data', (['data_path', 'files', '[nn_idx_dict[flags.target_pc_idx_type]]'], {}), '(data_path, files, [nn_idx_dict[flags.target_pc_idx_type]])\n', (4463, 4522), False, 'from src.adversary_utils import load_data, prepare_data_for_attack\n'), ((4928, 4966), 'os.path.join', 'osp.join', (['top_out_dir', 'flags.ae_folder'], {}), '(top_out_dir, flags.ae_folder)\n', (4936, 4966), True, 'import os.path as osp\n'), ((264, 291), 'sys.path.append', 'sys.path.append', (['parent_dir'], {}), '(parent_dir)\n', (279, 291), False, 'import sys\n'), ((3961, 4006), 'os.path.join', 'osp.join', (['data_path', 'flags.output_folder_name'], {}), '(data_path, flags.output_folder_name)\n', (3969, 4006), True, 'import os.path as osp\n'), ((4604, 4681), 'src.adversary_utils.load_data', 'load_data', (['data_path', 'files', "['pc_label_test_set', 'pc_pred_labels_test_set']"], {}), "(data_path, files, ['pc_label_test_set', 'pc_pred_labels_test_set'])\n", (4613, 4681), False, 'from src.adversary_utils import load_data, prepare_data_for_attack\n'), ((4782, 4824), 'os.path.join', 'osp.join', (['top_out_dir', 'flags.attack_pc_idx'], {}), '(top_out_dir, flags.attack_pc_idx)\n', (4790, 4824), True, 'import os.path as osp\n'), ((4984, 5017), 'os.path.join', 'osp.join', (['ae_dir', '"""configuration"""'], {}), "(ae_dir, 'configuration')\n", (4992, 5017), True, 'import os.path as osp\n'), ((6365, 6413), 'os.path.join', 'osp.join', (['conf.train_dir', '"""attack_configuration"""'], {}), "(conf.train_dir, 'attack_configuration')\n", (6373, 6413), True, 'import os.path as osp\n'), ((6687, 6703), 'src.tf_utils.reset_tf_graph', 'reset_tf_graph', ([], {}), '()\n', (6701, 6703), False, 'from src.tf_utils import reset_tf_graph\n'), ((6713, 6746), 'src.adv_ae.AdvAE', 'AdvAE', (['conf.experiment_name', 'conf'], {}), '(conf.experiment_name, conf)\n', (6718, 6746), False, 'from src.adv_ae import AdvAE\n'), ((6873, 7040), 'src.adversary_utils.prepare_data_for_attack', 'prepare_data_for_attack', (['pc_classes', '[pc_class_name]', 'classes_for_target', 'point_clouds', 'slice_idx', 'attack_pc_idx', 'flags.num_pc_for_target', 'nn_idx', 'correct_pred'], {}), '(pc_classes, [pc_class_name], classes_for_target,\n point_clouds, slice_idx, attack_pc_idx, flags.num_pc_for_target, nn_idx,\n correct_pred)\n', (6896, 7040), False, 'from src.adversary_utils import load_data, prepare_data_for_attack\n'), ((7056, 7225), 'src.adversary_utils.prepare_data_for_attack', 'prepare_data_for_attack', (['pc_classes', '[pc_class_name]', 'classes_for_target', 'latent_vectors', 'slice_idx', 'attack_pc_idx', 'flags.num_pc_for_target', 'nn_idx', 'correct_pred'], {}), '(pc_classes, [pc_class_name], classes_for_target,\n latent_vectors, slice_idx, attack_pc_idx, flags.num_pc_for_target,\n nn_idx, correct_pred)\n', (7079, 7225), False, 'from src.adversary_utils import load_data, prepare_data_for_attack\n'), ((7246, 7408), 'src.adversary_utils.prepare_data_for_attack', 'prepare_data_for_attack', (['pc_classes', '[pc_class_name]', 'classes_for_target', 'ae_loss', 'slice_idx', 'attack_pc_idx', 'flags.num_pc_for_target', 'nn_idx', 'correct_pred'], {}), '(pc_classes, [pc_class_name], classes_for_target,\n ae_loss, slice_idx, attack_pc_idx, flags.num_pc_for_target, nn_idx,\n correct_pred)\n', (7269, 7408), False, 'from src.adversary_utils import load_data, prepare_data_for_attack\n'), ((205, 226), 'os.path.abspath', 'osp.abspath', (['__file__'], {}), '(__file__)\n', (216, 226), True, 'import os.path as osp\n'), ((3727, 3748), 'os.path.abspath', 'osp.abspath', (['__file__'], {}), '(__file__)\n', (3738, 3748), True, 'import os.path as osp\n'), ((3874, 3895), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (3884, 3895), False, 'import os\n'), ((6774, 6813), 'os.path.join', 'osp.join', (['conf.train_dir', 'pc_class_name'], {}), '(conf.train_dir, pc_class_name)\n', (6782, 6813), True, 'import os.path as osp\n'), ((7568, 7606), 'os.path.join', 'osp.join', (['save_dir', '"""attack_stats.txt"""'], {}), "(save_dir, 'attack_stats.txt')\n", (7576, 7606), True, 'import os.path as osp\n'), ((7883, 7924), 'os.path.join', 'osp.join', (['save_dir', '"""adversarial_metrics"""'], {}), "(save_dir, 'adversarial_metrics')\n", (7891, 7924), True, 'import os.path as osp\n'), ((7959, 8001), 'os.path.join', 'osp.join', (['save_dir', '"""adversarial_pc_input"""'], {}), "(save_dir, 'adversarial_pc_input')\n", (7967, 8001), True, 'import os.path as osp\n'), ((8037, 8079), 'os.path.join', 'osp.join', (['save_dir', '"""adversarial_pc_recon"""'], {}), "(save_dir, 'adversarial_pc_recon')\n", (8045, 8079), True, 'import os.path as osp\n'), ((8115, 8148), 'os.path.join', 'osp.join', (['save_dir', '"""dist_weight"""'], {}), "(save_dir, 'dist_weight')\n", (8123, 8148), True, 'import os.path as osp\n'), ((8150, 8181), 'numpy.array', 'np.array', (['conf.dist_weight_list'], {}), '(conf.dist_weight_list)\n', (8158, 8181), True, 'import numpy as np\n'), ((3910, 3932), 'os.path.join', 'osp.join', (['data_path', 'f'], {}), '(data_path, f)\n', (3918, 3932), True, 'import os.path as osp\n')] |
"""
Generate plots of model averaged convergence diagnostics.
"""
# License: MIT
from __future__ import absolute_import, division
import argparse
import glob
import os
import re
import arviz as az
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
import reanalysis_dbns.models as rdm
BASE_PERIOD = [pd.Timestamp('1979-01-01'), pd.Timestamp('2001-12-30')]
INDEX_NAMES = {
'AO': 'AO',
'NHTELE2': 'AR',
'DMI': 'DMI',
'MEI': 'MEI',
'NHTELE4': 'NAO$^-$',
'NHTELE1': 'NAO$^+$',
'PNA': 'PNA',
'PSA1': 'PSA1',
'PSA2': 'PSA2',
'RMM1': 'RMM1',
'RMM2': 'RMM2',
'SAM': 'SAM',
'NHTELE3': 'SCAND'
}
def get_model_fit_pattern(outcome=None, a_tau=1.0, b_tau=1.0, nu_sq=20.0,
max_terms=10, max_lag=6, season='ALL',
frequency='monthly',
base_period=BASE_PERIOD):
"""Get fit output filename pattern."""
base_period_str = '{}_{}'.format(
base_period[0].strftime('%Y%m%d'),
base_period[1].strftime('%Y%m%d'))
outcome_str = outcome if outcome is not None else '[A-Za-z0-9]+'
prefix = r'.+\.{}\.{}\.{}'.format(base_period_str, season, frequency)
suffix = r'\.'.join(['stepwise_bayes_regression',
'max_lag-{:d}'.format(max_lag),
'max_terms-{:d}'.format(max_terms),
'a_tau-{:.3f}'.format(a_tau),
'b_tau-{:.3f}'.format(b_tau),
'nu_sq-{:.3f}'.format(nu_sq),
'thin-[0-9]+', '(' + outcome_str + ')',
'posterior_samples'])
return r'\.'.join([prefix, suffix]) + r'(\.restart-[0-9]+)?' + r'\.nc'
def get_fit_output_files(models_dir, model_pattern):
"""Get fit output files matching pattern."""
all_files = sorted(glob.glob(os.path.join(models_dir, '*')))
pattern = re.compile(model_pattern)
matching_files = {}
for f in all_files:
match = pattern.search(f)
if not match:
continue
outcome = match[1]
if outcome in matching_files:
matching_files[outcome].append(f)
else:
matching_files[outcome] = [f]
for outcome in matching_files:
matching_files[outcome] = sorted(matching_files[outcome])
return matching_files
def get_variables_and_lags(fit):
"""Get variables and lags in fit."""
indicator_pattern = 'i_([a-zA-Z0-9]+)_lag_([0-9]+)'
summary = az.summary(fit.posterior, var_names=indicator_pattern,
filter_vars='regex')
indicator_vars = np.array([re.match(indicator_pattern, i)[1]
for i in summary.index])
indicator_lags = np.array([int(re.match(indicator_pattern, i)[2])
for i in summary.index])
unique_vars = np.array(sorted(np.unique(indicator_vars),
key=lambda v: INDEX_NAMES[v]))
unique_lags = np.unique(indicator_lags)
return unique_vars, unique_lags
def calculate_edge_probability(fit, batch_size=None):
"""Calculate edge posterior probabilities."""
unique_vars, unique_lags = get_variables_and_lags(fit)
n_lags = unique_lags.shape[0]
samples = xr.concat(
[fit.warmup_posterior, fit.posterior], dim='draw')
n_draws = samples.sizes['draw']
if batch_size is None:
batch_size = int((n_draws // 2) // 20)
n_batches = int(np.ceil((n_draws // 2) / batch_size))
batches = np.empty((n_batches,))
post_probs = {v: np.empty((n_lags, n_batches)) for v in unique_vars}
for q in range(1, n_batches + 1):
batch_stop = min(n_draws, 2 * q * batch_size)
batch_samples = samples.isel(draw=slice(0, batch_stop))
warmup = int(batch_stop // 2)
batch_kept = batch_samples.isel(draw=slice(warmup, None))
batch_summary = az.summary(batch_kept)
batches[q - 1] = batch_stop
for v in unique_vars:
for i, lag in enumerate(unique_lags):
ind = 'i_{}_lag_{}'.format(v, lag)
mask = batch_summary.index == ind
post_probs[v][i, q - 1] = batch_summary['mean'][mask][0]
return batches, post_probs
def plot_pval_diagnostics(batch_sizes, estimates,
chi2_results, ks_results, level=0.05):
"""Plot traces of p-values."""
term_pattern = re.compile(r'([a-zA-Z0-9]+)_lag_([0-9]+)')
terms = [t for t in chi2_results]
predictors = np.array(
sorted(np.unique([term_pattern.search(t)[1] for t in terms]),
key=lambda v: INDEX_NAMES[v]))
lags = np.unique([term_pattern.search(t)[2] for t in terms])
n_predictors = len(predictors)
n_lags = len(lags)
fig, ax = plt.subplots(n_predictors, n_lags,
figsize=(7 * n_lags, 5 * n_predictors),
squeeze=False)
fig.subplots_adjust(hspace=0.3)
for i, p in enumerate(predictors):
for j, lag in enumerate(lags):
term = '{}_lag_{}'.format(p, lag)
post_prob = estimates[p][j]
ax[i, j].plot(batch_sizes, post_prob, 'k-', label=r'$\hat{\pi}$')
chi2_samples = chi2_results[term][2]
chi2_pvals = chi2_results[term][1]
ax[i, j].plot(chi2_samples, chi2_pvals, '-',
label=r'$\chi^2$ test')
ks_samples = ks_results[term][2]
ks_pvals = ks_results[term][1]
n_comparisons = ks_pvals.shape[1]
for k in range(n_comparisons):
if k == 0:
ax[i, j].plot(ks_samples, ks_pvals[:, k], ls='--',
alpha=0.7, label='KS test')
else:
ax[i, j].plot(ks_samples, ks_pvals[:, k], ls='--',
alpha=0.7)
ax[i, j].axhline(level, ls='-.', color='k')
ax[i, j].grid(ls='--', color='gray', alpha=0.5)
ax[i, j].legend()
ax[i, j].set_ylim(0, 1.05)
ax[i, j].tick_params(axis='both', labelsize=13)
ax[i, j].set_xlabel('Number of draws', fontsize=14)
ax[i, j].set_ylabel('$p$-value', fontsize=14)
ax[i, j].set_title(
r'{} lag {}'.format(INDEX_NAMES[p], lag), fontsize=15)
return fig
def plot_convergence_diagnostics(output_dir, outcome, model_files):
"""Generate plots of convergence diagnostics."""
n_model_files = len(model_files)
if n_model_files == 1:
fit = az.from_netcdf(model_files[0])
else:
fit = az.from_netcdf(model_files[0])
for i in range(1, n_model_files):
restart_fit = az.from_netcdf(model_files[i])
fit.posterior = xr.concat([fit.posterior, restart_fit.posterior],
dim='draw')
plot_basename = os.path.basename(model_files[0]).replace('.nc', '')
plot_basename = os.path.join(output_dir, plot_basename)
# Calculate posterior probabilities for each edge.
batch_sizes, post_probs = calculate_edge_probability(fit)
try:
# First plot diagnostics without splitting chains.
indicator_chi2_results = \
rdm.structure_sample_marginal_chi2(
fit, batch=True, split=False)
indicator_ks_results = \
rdm.structure_sample_marginal_ks(
fit, batch=True, split=False)
except ValueError:
return
fig = plot_pval_diagnostics( # noqa: F841
batch_sizes, post_probs,
indicator_chi2_results, indicator_ks_results)
plt.suptitle(outcome, fontsize=16, y=0.9)
output_file = '.'.join(
[plot_basename, 'model_average_convergence_diagnostics.pdf'])
plt.savefig(output_file, bbox_inches='tight', facecolor='white')
plt.savefig(output_file.replace('.pdf', '.png'),
bbox_inches='tight', facecolor='white')
plt.close()
try:
# Then generate plots with split diagnostics.
indicator_chi2_results = \
rdm.structure_sample_marginal_chi2(
fit, batch=True, split=True)
indicator_ks_results = \
rdm.structure_sample_marginal_ks(
fit, batch=True, split=True)
except ValueError:
return
fig = plot_pval_diagnostics( # noqa: F841
batch_sizes, post_probs,
indicator_chi2_results, indicator_ks_results)
plt.suptitle(outcome + ' (split)', fontsize=16)
output_file = '.'.join(
[plot_basename,
'split_model_average_convergence_diagnostics.pdf'])
plt.savefig(output_file, bbox_inches='tight', facecolor='white')
plt.savefig(output_file.replace('.pdf', '.png'),
bbox_inches='tight', facecolor='white')
plt.close()
def parse_cmd_line_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Generate plots of convergence diagnostics')
parser.add_argument('models_dir', help='directory containing fits')
parser.add_argument('output_dir', help='directory to write output to')
parser.add_argument('--outcome', dest='outcome', default=None,
help='name of outcome variable')
parser.add_argument('--a-tau', dest='a_tau', type=float,
default=1.0, help='precision shape hyperparameter')
parser.add_argument('--b-tau', dest='b_tau', type=float,
default=1.0, help='precision scale hyperparameter')
parser.add_argument('--nu-sq', dest='nu_sq', type=float,
default=1.0, help='SNR hyperparameter')
parser.add_argument('--season', dest='season',
choices=['ALL', 'DJF', 'MAM', 'JJA', 'SON'],
default='ALL', help='season to fit')
parser.add_argument('--max-lag', dest='max_lag', type=int,
default=6, help='maximum lag')
parser.add_argument('--max-terms', dest='max_terms', type=int,
default=4, help='maximum number of terms')
return parser.parse_args()
def main():
"""Generate plots of convergence diagnostics."""
args = parse_cmd_line_args()
# Get hyperparameter values.
a_tau = args.a_tau
b_tau = args.b_tau
nu_sq = args.nu_sq
model_pattern = get_model_fit_pattern(
outcome=args.outcome,
a_tau=a_tau, b_tau=b_tau, nu_sq=nu_sq,
max_terms=args.max_terms, max_lag=args.max_lag,
season=args.season)
model_files = get_fit_output_files(
args.models_dir, model_pattern)
for outcome in model_files:
outcome_model_files = model_files[outcome]
if any(['restart' in f for f in outcome_model_files]):
# Ensure correct sort order.
def restart_number(f):
pattern = re.compile('restart-([0-9]+)')
match = pattern.search(f)
if match:
return int(match[1])
return 0
outcome_model_files = sorted(
outcome_model_files, key=restart_number)
print('* Outcome: ', outcome)
for f in outcome_model_files:
print('\t- ', f)
plot_convergence_diagnostics(
args.output_dir, outcome, outcome_model_files)
if __name__ == '__main__':
main()
| [
"arviz.summary",
"numpy.ceil",
"matplotlib.pyplot.savefig",
"numpy.unique",
"argparse.ArgumentParser",
"re.compile",
"reanalysis_dbns.models.structure_sample_marginal_chi2",
"arviz.from_netcdf",
"os.path.join",
"re.match",
"xarray.concat",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.clo... | [((348, 374), 'pandas.Timestamp', 'pd.Timestamp', (['"""1979-01-01"""'], {}), "('1979-01-01')\n", (360, 374), True, 'import pandas as pd\n'), ((376, 402), 'pandas.Timestamp', 'pd.Timestamp', (['"""2001-12-30"""'], {}), "('2001-12-30')\n", (388, 402), True, 'import pandas as pd\n'), ((1950, 1975), 're.compile', 're.compile', (['model_pattern'], {}), '(model_pattern)\n', (1960, 1975), False, 'import re\n'), ((2549, 2624), 'arviz.summary', 'az.summary', (['fit.posterior'], {'var_names': 'indicator_pattern', 'filter_vars': '"""regex"""'}), "(fit.posterior, var_names=indicator_pattern, filter_vars='regex')\n", (2559, 2624), True, 'import arviz as az\n'), ((3043, 3068), 'numpy.unique', 'np.unique', (['indicator_lags'], {}), '(indicator_lags)\n', (3052, 3068), True, 'import numpy as np\n'), ((3322, 3382), 'xarray.concat', 'xr.concat', (['[fit.warmup_posterior, fit.posterior]'], {'dim': '"""draw"""'}), "([fit.warmup_posterior, fit.posterior], dim='draw')\n", (3331, 3382), True, 'import xarray as xr\n'), ((3578, 3600), 'numpy.empty', 'np.empty', (['(n_batches,)'], {}), '((n_batches,))\n', (3586, 3600), True, 'import numpy as np\n'), ((4482, 4523), 're.compile', 're.compile', (['"""([a-zA-Z0-9]+)_lag_([0-9]+)"""'], {}), "('([a-zA-Z0-9]+)_lag_([0-9]+)')\n", (4492, 4523), False, 'import re\n'), ((4847, 4940), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_predictors', 'n_lags'], {'figsize': '(7 * n_lags, 5 * n_predictors)', 'squeeze': '(False)'}), '(n_predictors, n_lags, figsize=(7 * n_lags, 5 * n_predictors),\n squeeze=False)\n', (4859, 4940), True, 'import matplotlib.pyplot as plt\n'), ((7058, 7097), 'os.path.join', 'os.path.join', (['output_dir', 'plot_basename'], {}), '(output_dir, plot_basename)\n', (7070, 7097), False, 'import os\n'), ((7718, 7759), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['outcome'], {'fontsize': '(16)', 'y': '(0.9)'}), '(outcome, fontsize=16, y=0.9)\n', (7730, 7759), True, 'import matplotlib.pyplot as plt\n'), ((7863, 7927), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {'bbox_inches': '"""tight"""', 'facecolor': '"""white"""'}), "(output_file, bbox_inches='tight', facecolor='white')\n", (7874, 7927), True, 'import matplotlib.pyplot as plt\n'), ((8042, 8053), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8051, 8053), True, 'import matplotlib.pyplot as plt\n'), ((8549, 8596), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["(outcome + ' (split)')"], {'fontsize': '(16)'}), "(outcome + ' (split)', fontsize=16)\n", (8561, 8596), True, 'import matplotlib.pyplot as plt\n'), ((8715, 8779), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {'bbox_inches': '"""tight"""', 'facecolor': '"""white"""'}), "(output_file, bbox_inches='tight', facecolor='white')\n", (8726, 8779), True, 'import matplotlib.pyplot as plt\n'), ((8894, 8905), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8903, 8905), True, 'import matplotlib.pyplot as plt\n'), ((8989, 9074), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate plots of convergence diagnostics"""'}), "(description='Generate plots of convergence diagnostics'\n )\n", (9012, 9074), False, 'import argparse\n'), ((3525, 3559), 'numpy.ceil', 'np.ceil', (['(n_draws // 2 / batch_size)'], {}), '(n_draws // 2 / batch_size)\n', (3532, 3559), True, 'import numpy as np\n'), ((3622, 3651), 'numpy.empty', 'np.empty', (['(n_lags, n_batches)'], {}), '((n_lags, n_batches))\n', (3630, 3651), True, 'import numpy as np\n'), ((3962, 3984), 'arviz.summary', 'az.summary', (['batch_kept'], {}), '(batch_kept)\n', (3972, 3984), True, 'import arviz as az\n'), ((6649, 6679), 'arviz.from_netcdf', 'az.from_netcdf', (['model_files[0]'], {}), '(model_files[0])\n', (6663, 6679), True, 'import arviz as az\n'), ((6705, 6735), 'arviz.from_netcdf', 'az.from_netcdf', (['model_files[0]'], {}), '(model_files[0])\n', (6719, 6735), True, 'import arviz as az\n'), ((7332, 7396), 'reanalysis_dbns.models.structure_sample_marginal_chi2', 'rdm.structure_sample_marginal_chi2', (['fit'], {'batch': '(True)', 'split': '(False)'}), '(fit, batch=True, split=False)\n', (7366, 7396), True, 'import reanalysis_dbns.models as rdm\n'), ((7460, 7522), 'reanalysis_dbns.models.structure_sample_marginal_ks', 'rdm.structure_sample_marginal_ks', (['fit'], {'batch': '(True)', 'split': '(False)'}), '(fit, batch=True, split=False)\n', (7492, 7522), True, 'import reanalysis_dbns.models as rdm\n'), ((8165, 8228), 'reanalysis_dbns.models.structure_sample_marginal_chi2', 'rdm.structure_sample_marginal_chi2', (['fit'], {'batch': '(True)', 'split': '(True)'}), '(fit, batch=True, split=True)\n', (8199, 8228), True, 'import reanalysis_dbns.models as rdm\n'), ((8292, 8353), 'reanalysis_dbns.models.structure_sample_marginal_ks', 'rdm.structure_sample_marginal_ks', (['fit'], {'batch': '(True)', 'split': '(True)'}), '(fit, batch=True, split=True)\n', (8324, 8353), True, 'import reanalysis_dbns.models as rdm\n'), ((1903, 1932), 'os.path.join', 'os.path.join', (['models_dir', '"""*"""'], {}), "(models_dir, '*')\n", (1915, 1932), False, 'import os\n'), ((2933, 2958), 'numpy.unique', 'np.unique', (['indicator_vars'], {}), '(indicator_vars)\n', (2942, 2958), True, 'import numpy as np\n'), ((6805, 6835), 'arviz.from_netcdf', 'az.from_netcdf', (['model_files[i]'], {}), '(model_files[i])\n', (6819, 6835), True, 'import arviz as az\n'), ((6865, 6926), 'xarray.concat', 'xr.concat', (['[fit.posterior, restart_fit.posterior]'], {'dim': '"""draw"""'}), "([fit.posterior, restart_fit.posterior], dim='draw')\n", (6874, 6926), True, 'import xarray as xr\n'), ((6986, 7018), 'os.path.basename', 'os.path.basename', (['model_files[0]'], {}), '(model_files[0])\n', (7002, 7018), False, 'import os\n'), ((2682, 2712), 're.match', 're.match', (['indicator_pattern', 'i'], {}), '(indicator_pattern, i)\n', (2690, 2712), False, 'import re\n'), ((10956, 10986), 're.compile', 're.compile', (['"""restart-([0-9]+)"""'], {}), "('restart-([0-9]+)')\n", (10966, 10986), False, 'import re\n'), ((2807, 2837), 're.match', 're.match', (['indicator_pattern', 'i'], {}), '(indicator_pattern, i)\n', (2815, 2837), False, 'import re\n')] |
import numpy as np
import mglearn
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.datasets import make_circles
X, y = make_circles(noise=0.25, factor=0.5, random_state=1)
# 为了便于说明,我们将两个类别重命名为 blue 和 red
y_named = np.array(["blue", "red"])[y]
# 我们可以对任意个数组调用 train_test_split
# 所有的数组的划分方式都是一致的
X_train, X_test, y_train_named, y_test_named, y_train, y_test = \
train_test_split(X, y_named, y, random_state=0)
# 构建梯度提升模型
gbrt = GradientBoostingClassifier(random_state=0)
gbrt.fit(X_train, y_train_named)
print("Shape of probabilities:", gbrt.predict_proba(X_test).shape)
# 显示 predict_proba 的前几个元素
print("Predicted probabilities:")
print(gbrt.predict_proba(X_test[:6]))
fig, axes = plt.subplots(1, 2, figsize=(12, 5))
plt.subplots_adjust(left=0.1, right=0.85, top=0.85, bottom=0.1)
mglearn.tools.plot_2d_separator(
gbrt, X, ax=axes[0], alpha=.4, fill=True, cm=mglearn.cm2)
scores_image = mglearn.tools.plot_2d_scores(
gbrt, X, ax=axes[1], alpha=.5, cm=mglearn.ReBl, function='predict_proba')
for ax in axes:
# 画出训练点和测试点
mglearn.discrete_scatter(X_test[:, 0], X_test[:, 1], y_test, markers='^', ax=ax)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train, markers='o', ax=ax)
ax.set_xlabel("Feature 0")
ax.set_ylabel("Feature 1")
position = fig.add_axes([0.9, 0.1, 0.02, 0.75]) # 位置 [左, 下, 宽, 高]
cbar = plt.colorbar(scores_image, cax=position, ax=axes.tolist(), orientation="vertical")
cbar.set_alpha(1)
cbar.draw_all()
axes[0].legend(["Test class 0", "Test class 1", "Train class 0",
"Train class 1"], ncol=4, loc=(.1, 1.1))
plt.show()
| [
"mglearn.discrete_scatter",
"sklearn.model_selection.train_test_split",
"mglearn.tools.plot_2d_separator",
"mglearn.tools.plot_2d_scores",
"numpy.array",
"sklearn.datasets.make_circles",
"sklearn.ensemble.GradientBoostingClassifier",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",... | [((225, 277), 'sklearn.datasets.make_circles', 'make_circles', ([], {'noise': '(0.25)', 'factor': '(0.5)', 'random_state': '(1)'}), '(noise=0.25, factor=0.5, random_state=1)\n', (237, 277), False, 'from sklearn.datasets import make_circles\n'), ((471, 518), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_named', 'y'], {'random_state': '(0)'}), '(X, y_named, y, random_state=0)\n', (487, 518), False, 'from sklearn.model_selection import train_test_split\n'), ((538, 580), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (564, 580), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((794, 829), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 5)'}), '(1, 2, figsize=(12, 5))\n', (806, 829), True, 'import matplotlib.pyplot as plt\n'), ((830, 893), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'right': '(0.85)', 'top': '(0.85)', 'bottom': '(0.1)'}), '(left=0.1, right=0.85, top=0.85, bottom=0.1)\n', (849, 893), True, 'import matplotlib.pyplot as plt\n'), ((895, 989), 'mglearn.tools.plot_2d_separator', 'mglearn.tools.plot_2d_separator', (['gbrt', 'X'], {'ax': 'axes[0]', 'alpha': '(0.4)', 'fill': '(True)', 'cm': 'mglearn.cm2'}), '(gbrt, X, ax=axes[0], alpha=0.4, fill=True,\n cm=mglearn.cm2)\n', (926, 989), False, 'import mglearn\n'), ((1005, 1113), 'mglearn.tools.plot_2d_scores', 'mglearn.tools.plot_2d_scores', (['gbrt', 'X'], {'ax': 'axes[1]', 'alpha': '(0.5)', 'cm': 'mglearn.ReBl', 'function': '"""predict_proba"""'}), "(gbrt, X, ax=axes[1], alpha=0.5, cm=mglearn.\n ReBl, function='predict_proba')\n", (1033, 1113), False, 'import mglearn\n'), ((1696, 1706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1704, 1706), True, 'import matplotlib.pyplot as plt\n'), ((321, 346), 'numpy.array', 'np.array', (["['blue', 'red']"], {}), "(['blue', 'red'])\n", (329, 346), True, 'import numpy as np\n'), ((1150, 1235), 'mglearn.discrete_scatter', 'mglearn.discrete_scatter', (['X_test[:, 0]', 'X_test[:, 1]', 'y_test'], {'markers': '"""^"""', 'ax': 'ax'}), "(X_test[:, 0], X_test[:, 1], y_test, markers='^', ax=ax\n )\n", (1174, 1235), False, 'import mglearn\n'), ((1235, 1322), 'mglearn.discrete_scatter', 'mglearn.discrete_scatter', (['X_train[:, 0]', 'X_train[:, 1]', 'y_train'], {'markers': '"""o"""', 'ax': 'ax'}), "(X_train[:, 0], X_train[:, 1], y_train, markers='o',\n ax=ax)\n", (1259, 1322), False, 'import mglearn\n')] |
from PIL import Image
import numpy as np
import torch
import os
import argparse
from torch import nn
from skimage import exposure
from skimage import img_as_float, transform
def save_image(im, path):
"""
Saves a numpy matrix or PIL image as an image
Args:
im_as_arr (Numpy array): Matrix of shape DxWxH
path (str): Path to the image
"""
if isinstance(im, (np.ndarray, np.generic)):
im = format_np_output(im)
im = Image.fromarray(im)
im.save(path)
def format_np_output(np_arr):
"""
This is a (kind of) bandaid fix to streamline saving procedure.
It converts all the outputs to the same format which is 3xWxH
with using sucecssive if clauses.
Args:
im_as_arr (Numpy array): Matrix of shape 1xWxH or WxH or 3xWxH
"""
# Phase/Case 1: The np arr only has 2 dimensions
# Result: Add a dimension at the beginning
if len(np_arr.shape) == 2:
np_arr = np.expand_dims(np_arr, axis=0)
# Phase/Case 2: Np arr has only 1 channel (assuming first dim is channel)
# Result: Repeat first channel and convert 1xWxH to 3xWxH
if np_arr.shape[0] == 1:
np_arr = np.repeat(np_arr, 3, axis=0)
# Phase/Case 3: Np arr is of shape 3xWxH
# Result: Convert it to WxHx3 in order to make it saveable by PIL
if np_arr.shape[0] == 3:
np_arr = np_arr.transpose(1, 2, 0)
# Phase/Case 4: NP arr is normalized between 0-1
# Result: Multiply with 255 and change type to make it saveable by PIL
if np.max(np_arr) <= 1:
np_arr = (np_arr*255).astype(np.uint8)
return np_arr
def set_contrast(image, contrast=1):
"""
Set contrast in image
:param image: input image
:param contrast: contrast type
:return: image with revised contrast
"""
if contrast == 0:
out_img = image
elif contrast == 1:
out_img = exposure.equalize_hist(image)
elif contrast == 2:
out_img = exposure.equalize_adapthist(image)
elif contrast == 3:
out_img = exposure.rescale_intensity(image)
return out_img
def special_ST_preprocessing(img_file, output_dim=256):
image = np.array(Image.open(img_file).convert('L'))
image_grey = img_as_float(image)
# if crop:
# cropped_img = crop_image(image_grey)
# else:
# cropped_img = image_grey
cropped_img = image_grey
resized_img = transform.resize(cropped_img, output_shape=(output_dim, output_dim))
my_img = set_contrast(resized_img) ## ultimately add contrast variable
# img_name = img_file.split('/')[len(img_file.split('/')) - 1]
# img_folder = "/".join(img_file.split('/')[:-2])
# out_img_filename = img_folder + "/" + img_name.split('.')[0] + "-preprocessed.png"
# # out_img_filename = 'C:/Users/<NAME>/Desktop/kidney_img/img_debugging/' + img_name.split('.')[0] + "-preprocessed.png"
# Image.fromarray(my_img * 255).convert('RGB').save(out_img_filename)
# my_img = np.array(Image.open(out_img_filename).convert('L'))
return my_img
def load_image(sag_path, trans_path, output_dim=256):
X = []
for image_path in [sag_path, trans_path]:
print(image_path)
img = special_ST_preprocessing(image_path, output_dim=output_dim)
X.append(img)
return np.array(X)
class SiamNet(nn.Module):
def __init__(self, cov_layers, classes=2, num_inputs=2, dropout_rate=0.5, output_dim=256):
super(SiamNet, self).__init__()
self.cov_layers = cov_layers
self.output_dim = output_dim
# print("LL DIM: " + str(self.output_dim))
self.num_inputs = num_inputs
self.conv = nn.Sequential()
self.conv.add_module('conv1_s1', nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=0))
self.conv.add_module('batch1_s1', nn.BatchNorm2d(96))
self.conv.add_module('relu1_s1', nn.ReLU(inplace=True))
self.conv.add_module('pool1_s1', nn.MaxPool2d(kernel_size=3, stride=2))
self.conv.add_module('lrn1_s1', LRN(local_size=5, alpha=0.0001, beta=0.75))
self.conv.add_module('conv2_s1', nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2))
self.conv.add_module('batch2_s1', nn.BatchNorm2d(256))
self.conv.add_module('relu2_s1', nn.ReLU(inplace=True))
self.conv.add_module('pool2_s1', nn.MaxPool2d(kernel_size=3, stride=2))
self.conv.add_module('lrn2_s1', LRN(local_size=5, alpha=0.0001, beta=0.75))
self.conv.add_module('conv3_s1', nn.Conv2d(256, 384, kernel_size=3, padding=1))
self.conv.add_module('batch3_s1', nn.BatchNorm2d(384))
self.conv.add_module('relu3_s1', nn.ReLU(inplace=True))
self.conv.add_module('conv4_s1', nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2))
self.conv.add_module('batch4_s1', nn.BatchNorm2d(384))
self.conv.add_module('relu4_s1', nn.ReLU(inplace=True))
self.conv.add_module('conv5_s1', nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2))
self.conv.add_module('batch5_s1', nn.BatchNorm2d(256))
self.conv.add_module('relu5_s1', nn.ReLU(inplace=True))
self.conv.add_module('pool5_s1', nn.MaxPool2d(kernel_size=3, stride=2))
# *************************** changed layers *********************** #
self.fc6 = nn.Sequential()
# self.fc6.add_module('fc6_s1', nn.Conv2d(256, 1024, kernel_size=3, stride=1, padding=1))
self.fc6.add_module('fc6_s1', nn.Conv2d(256, 1024, kernel_size=2, stride=1, padding=1))
# self.fc6.add_module('fc6_s1', nn.Conv2d(256, 1024, kernel_size=1, stride=1, padding=1))
self.fc6.add_module('batch6_s1', nn.BatchNorm2d(1024))
self.fc6.add_module('relu6_s1', nn.ReLU(inplace=True))
self.fc6b = nn.Sequential()
self.fc6b.add_module('conv6b_s1', nn.Conv2d(1024, 256, kernel_size=3, stride=2))
self.fc6b.add_module('batch6b_s1', nn.BatchNorm2d(256))
self.fc6b.add_module('relu6_s1', nn.ReLU(inplace=True))
self.fc6b.add_module('pool6b_s1', nn.MaxPool2d(kernel_size=3, stride=2))
self.fc6c = nn.Sequential()
# self.fc6c.add_module('fc7', nn.Linear(256*2*2, 512))
self.fc6c.add_module('fc7', nn.Linear(256 * 3 * 3, 512))
self.fc6c.add_module('relu7', nn.ReLU(inplace=True))
# self.fc6c.add_module('drop7', nn.Dropout(p=dropout_rate))
self.fc7_new = nn.Sequential()
self.fc7_new.add_module('fc7', nn.Linear(self.num_inputs * 512, self.output_dim))
self.fc7_new.add_module('relu7', nn.ReLU(inplace=True))
# self.fc7_new.add_module('drop7', nn.Dropout(p=dropout_rate))
self.classifier_new = nn.Sequential()
self.classifier_new.add_module('fc8', nn.Linear(self.output_dim, classes))
if self.cov_layers:
self.classifier_new.add_module('relu8', nn.ReLU(inplace=True))
self.add_covs1 = nn.Sequential()
self.add_covs1.add_module('fc9', nn.Linear(classes + 2, classes + 126))
self.add_covs1.add_module('relu9', nn.ReLU(inplace=True))
self.add_covs2 = nn.Sequential()
self.add_covs2.add_module('fc10', nn.Linear(classes + 126, classes))
def load(self, checkpoint):
model_dict = self.state_dict()
pretrained_dict = torch.load(checkpoint)
pretrained_dict = {k: v for k, v in list(pretrained_dict.items()) if k in model_dict and 'fc8' not in k}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
print([k for k, v in list(pretrained_dict.items())])
def save(self, checkpoint):
torch.save(self.state_dict(), checkpoint)
def forward(self, x, age=None, left=None):
if self.num_inputs == 1:
x = x.unsqueeze(1)
# B, C, H = x.size()
# else:
B, T, C, H = x.size()
x = x.transpose(0, 1)
x_list = []
for i in range(self.num_inputs):
if self.num_inputs == 1:
curr_x = torch.unsqueeze(x[i], 1)
else:
curr_x = torch.unsqueeze(x[i], 1)
# if self.num_inputs == 1:
# curr_x = curr_x.expand(-1, 3, -1)
# else:
curr_x = curr_x.expand(-1, 3, -1, -1) ## expanding 1 channel to 3 duplicate channels
if torch.cuda.is_available():
input = torch.cuda.FloatTensor(curr_x.to(device))
else:
input = torch.FloatTensor(curr_x.to(device))
z = self.conv(input)
z = self.fc6(z) ## convolution
z = self.fc6b(z) ## convolution
z = z.view([B, 1, -1])
z = self.fc6c(z) ## fully connected layer
### LAUREN CHECK THIS -- shouldn't need to .view, no?
# z = z.view([B, 1, -1])
x_list.append(z)
x = torch.cat(x_list, 1)
# x = torch.sum(x, 1)
x = self.fc7_new(x.view(B, -1))
pred = self.classifier_new(x)
if self.cov_layers:
age = torch.tensor(age).type(torch.FloatTensor).to(device).view(B, 1)
# print("Age: ")
# print(age)
side = torch.tensor(left).type(torch.FloatTensor).to(device).view(B, 1)
# print("Side: ")
# print(side)
mid_in = torch.cat((pred, age, side), 1)
x = self.add_covs1(mid_in)
pred = self.add_covs2(x)
return pred
class LRN(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True):
super(LRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size,
stride=1, padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument('--sag_path', default='/ModelTest/Images/test_RS_cropped2.png', type=str)
# parser.add_argument('--trans_path', default='/ModelTest/Images/test_RT_cropped2.png', type=str)
# parser.add_argument('--age_wks', default=143, type=int)
parser.add_argument('--sag_path', default='/ModelTest/Images/test_RS_cropped1.png', type=str)
parser.add_argument('--trans_path', default='/ModelTest/Images/test_RT_cropped1.png', type=str)
parser.add_argument('--age_wks', default=34, type=int)
parser.add_argument('--left_kidney', action="store_true", help="Flag for left kidney")
parser.add_argument('--with_cov', action="store_true", help="Flag for no Covariate model")
# parser.add_argument('-checkpoint', default="\ModelTest\ModelWeights\SickKids_origST_TrainOnly_40epochs_bs16_lr0.001_RCFalse_covTrue_OSFalse_30thEpoch_20210614_v5.pth")
args = parser.parse_args()
if args.with_cov:
checkpoint = "/ModelTest/ModelWeights/SickKids_origST_TrainOnly_40epochs_bs16_lr0.001_RCFalse_covTrue_OSFalse_30thEpoch_20210614_v5.pth"
else:
checkpoint = "/ModelTest/ModelWeights/NoFinalLayerFineTuneNoCov_v2_TrainOnly_40epochs_bs16_lr0.001_RCFalse_covFalse_OSFalse_30thEpoch.pth"
dir_paste = os.getcwd().split("\\")[0:(len(os.getcwd().split("\\"))-3)]
in_dir = "\\".join(dir_paste) + "\\"
checkpoint = in_dir + checkpoint
net = SiamNet(cov_layers=args.with_cov).to(device)
if torch.cuda.is_available():
pretrained_dict = torch.load(checkpoint)['model_state_dict']
else:
pretrained_dict = torch.load(checkpoint, map_location='cpu')['model_state_dict']
model_dict = net.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
net.load_state_dict(pretrained_dict)
# for file in files_to_get:
# print(file)
## update image paths + load images
sag_path = in_dir + args.sag_path
trans_path = in_dir + args.trans_path
X = load_image(sag_path, trans_path)
## convert images to torch float tensors
X[0] = torch.from_numpy(X[0]).float()
X[1] = torch.from_numpy(X[1]).float()
combined_image = torch.unsqueeze(torch.from_numpy(X).float(), 0).to(device)
## run model
net.eval()
softmax = torch.nn.Softmax(dim=1)
with torch.no_grad():
net.zero_grad()
if args.with_cov:
output = net(combined_image, age=args.age_wks, left=args.left_kidney)
else:
output = net(combined_image)
output_softmax = softmax(output)
pred_prob = output_softmax[:, 1]
if args.with_cov:
## use Platt scaling determined a priori to scale prediction
pred_prob = np.exp(-2.311 + 3.5598*float(pred_prob))
else:
pred_prob = np.exp(-2.5982 + 4.318*float(pred_prob))
print("Probability of surgery:::{:6.3f}".format(float(pred_prob)))
| [
"torch.nn.ReLU",
"torch.nn.Sequential",
"skimage.img_as_float",
"torch.from_numpy",
"skimage.exposure.equalize_adapthist",
"numpy.array",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"numpy.repeat",
"argparse.ArgumentParser",
"torch.unsqueeze",
"numpy.max",
"skimage.exposure.equalize_h... | [((2236, 2255), 'skimage.img_as_float', 'img_as_float', (['image'], {}), '(image)\n', (2248, 2255), False, 'from skimage import img_as_float, transform\n'), ((2414, 2482), 'skimage.transform.resize', 'transform.resize', (['cropped_img'], {'output_shape': '(output_dim, output_dim)'}), '(cropped_img, output_shape=(output_dim, output_dim))\n', (2430, 2482), False, 'from skimage import img_as_float, transform\n'), ((3301, 3312), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3309, 3312), True, 'import numpy as np\n'), ((10521, 10546), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10544, 10546), False, 'import argparse\n'), ((12005, 12030), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12028, 12030), False, 'import torch\n'), ((12999, 13022), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (13015, 13022), False, 'import torch\n'), ((471, 490), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (486, 490), False, 'from PIL import Image\n'), ((970, 1000), 'numpy.expand_dims', 'np.expand_dims', (['np_arr'], {'axis': '(0)'}), '(np_arr, axis=0)\n', (984, 1000), True, 'import numpy as np\n'), ((1187, 1215), 'numpy.repeat', 'np.repeat', (['np_arr', '(3)'], {'axis': '(0)'}), '(np_arr, 3, axis=0)\n', (1196, 1215), True, 'import numpy as np\n'), ((1538, 1552), 'numpy.max', 'np.max', (['np_arr'], {}), '(np_arr)\n', (1544, 1552), True, 'import numpy as np\n'), ((3660, 3675), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (3673, 3675), False, 'from torch import nn\n'), ((5297, 5312), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (5310, 5312), False, 'from torch import nn\n'), ((5752, 5767), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (5765, 5767), False, 'from torch import nn\n'), ((6087, 6102), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (6100, 6102), False, 'from torch import nn\n'), ((6384, 6399), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (6397, 6399), False, 'from torch import nn\n'), ((6656, 6671), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (6669, 6671), False, 'from torch import nn\n'), ((7284, 7306), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (7294, 7306), False, 'import torch\n'), ((8831, 8851), 'torch.cat', 'torch.cat', (['x_list', '(1)'], {}), '(x_list, 1)\n', (8840, 8851), False, 'import torch\n'), ((10441, 10466), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10464, 10466), False, 'import torch\n'), ((13033, 13048), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13046, 13048), False, 'import torch\n'), ((1901, 1930), 'skimage.exposure.equalize_hist', 'exposure.equalize_hist', (['image'], {}), '(image)\n', (1923, 1930), False, 'from skimage import exposure\n'), ((3717, 3770), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(96)'], {'kernel_size': '(11)', 'stride': '(2)', 'padding': '(0)'}), '(3, 96, kernel_size=11, stride=2, padding=0)\n', (3726, 3770), False, 'from torch import nn\n'), ((3814, 3832), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(96)'], {}), '(96)\n', (3828, 3832), False, 'from torch import nn\n'), ((3875, 3896), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3882, 3896), False, 'from torch import nn\n'), ((3939, 3976), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (3951, 3976), False, 'from torch import nn\n'), ((4104, 4158), 'torch.nn.Conv2d', 'nn.Conv2d', (['(96)', '(256)'], {'kernel_size': '(5)', 'padding': '(2)', 'groups': '(2)'}), '(96, 256, kernel_size=5, padding=2, groups=2)\n', (4113, 4158), False, 'from torch import nn\n'), ((4202, 4221), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (4216, 4221), False, 'from torch import nn\n'), ((4264, 4285), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4271, 4285), False, 'from torch import nn\n'), ((4328, 4365), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (4340, 4365), False, 'from torch import nn\n'), ((4493, 4538), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(384)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(256, 384, kernel_size=3, padding=1)\n', (4502, 4538), False, 'from torch import nn\n'), ((4582, 4601), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(384)'], {}), '(384)\n', (4596, 4601), False, 'from torch import nn\n'), ((4644, 4665), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4651, 4665), False, 'from torch import nn\n'), ((4709, 4764), 'torch.nn.Conv2d', 'nn.Conv2d', (['(384)', '(384)'], {'kernel_size': '(3)', 'padding': '(1)', 'groups': '(2)'}), '(384, 384, kernel_size=3, padding=1, groups=2)\n', (4718, 4764), False, 'from torch import nn\n'), ((4808, 4827), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(384)'], {}), '(384)\n', (4822, 4827), False, 'from torch import nn\n'), ((4870, 4891), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4877, 4891), False, 'from torch import nn\n'), ((4935, 4990), 'torch.nn.Conv2d', 'nn.Conv2d', (['(384)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)', 'groups': '(2)'}), '(384, 256, kernel_size=3, padding=1, groups=2)\n', (4944, 4990), False, 'from torch import nn\n'), ((5034, 5053), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (5048, 5053), False, 'from torch import nn\n'), ((5096, 5117), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5103, 5117), False, 'from torch import nn\n'), ((5160, 5197), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (5172, 5197), False, 'from torch import nn\n'), ((5449, 5505), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(1024)'], {'kernel_size': '(2)', 'stride': '(1)', 'padding': '(1)'}), '(256, 1024, kernel_size=2, stride=1, padding=1)\n', (5458, 5505), False, 'from torch import nn\n'), ((5646, 5666), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1024)'], {}), '(1024)\n', (5660, 5666), False, 'from torch import nn\n'), ((5708, 5729), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5715, 5729), False, 'from torch import nn\n'), ((5810, 5855), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(256)'], {'kernel_size': '(3)', 'stride': '(2)'}), '(1024, 256, kernel_size=3, stride=2)\n', (5819, 5855), False, 'from torch import nn\n'), ((5900, 5919), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (5914, 5919), False, 'from torch import nn\n'), ((5962, 5983), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5969, 5983), False, 'from torch import nn\n'), ((6027, 6064), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (6039, 6064), False, 'from torch import nn\n'), ((6202, 6229), 'torch.nn.Linear', 'nn.Linear', (['(256 * 3 * 3)', '(512)'], {}), '(256 * 3 * 3, 512)\n', (6211, 6229), False, 'from torch import nn\n'), ((6269, 6290), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6276, 6290), False, 'from torch import nn\n'), ((6439, 6488), 'torch.nn.Linear', 'nn.Linear', (['(self.num_inputs * 512)', 'self.output_dim'], {}), '(self.num_inputs * 512, self.output_dim)\n', (6448, 6488), False, 'from torch import nn\n'), ((6531, 6552), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6538, 6552), False, 'from torch import nn\n'), ((6718, 6753), 'torch.nn.Linear', 'nn.Linear', (['self.output_dim', 'classes'], {}), '(self.output_dim, classes)\n', (6727, 6753), False, 'from torch import nn\n'), ((6889, 6904), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (6902, 6904), False, 'from torch import nn\n'), ((7089, 7104), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (7102, 7104), False, 'from torch import nn\n'), ((8305, 8330), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8328, 8330), False, 'import torch\n'), ((9286, 9317), 'torch.cat', 'torch.cat', (['(pred, age, side)', '(1)'], {}), '((pred, age, side), 1)\n', (9295, 9317), False, 'import torch\n'), ((12058, 12080), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (12068, 12080), False, 'import torch\n'), ((12137, 12179), 'torch.load', 'torch.load', (['checkpoint'], {'map_location': '"""cpu"""'}), "(checkpoint, map_location='cpu')\n", (12147, 12179), False, 'import torch\n'), ((12799, 12821), 'torch.from_numpy', 'torch.from_numpy', (['X[0]'], {}), '(X[0])\n', (12815, 12821), False, 'import torch\n'), ((12841, 12863), 'torch.from_numpy', 'torch.from_numpy', (['X[1]'], {}), '(X[1])\n', (12857, 12863), False, 'import torch\n'), ((1973, 2007), 'skimage.exposure.equalize_adapthist', 'exposure.equalize_adapthist', (['image'], {}), '(image)\n', (2000, 2007), False, 'from skimage import exposure\n'), ((2184, 2204), 'PIL.Image.open', 'Image.open', (['img_file'], {}), '(img_file)\n', (2194, 2204), False, 'from PIL import Image\n'), ((6836, 6857), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6843, 6857), False, 'from torch import nn\n'), ((6950, 6987), 'torch.nn.Linear', 'nn.Linear', (['(classes + 2)', '(classes + 126)'], {}), '(classes + 2, classes + 126)\n', (6959, 6987), False, 'from torch import nn\n'), ((7036, 7057), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7043, 7057), False, 'from torch import nn\n'), ((7151, 7184), 'torch.nn.Linear', 'nn.Linear', (['(classes + 126)', 'classes'], {}), '(classes + 126, classes)\n', (7160, 7184), False, 'from torch import nn\n'), ((7991, 8015), 'torch.unsqueeze', 'torch.unsqueeze', (['x[i]', '(1)'], {}), '(x[i], 1)\n', (8006, 8015), False, 'import torch\n'), ((8059, 8083), 'torch.unsqueeze', 'torch.unsqueeze', (['x[i]', '(1)'], {}), '(x[i], 1)\n', (8074, 8083), False, 'import torch\n'), ((11802, 11813), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11811, 11813), False, 'import os\n'), ((2050, 2083), 'skimage.exposure.rescale_intensity', 'exposure.rescale_intensity', (['image'], {}), '(image)\n', (2076, 2083), False, 'from skimage import exposure\n'), ((12909, 12928), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (12925, 12928), False, 'import torch\n'), ((11833, 11844), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11842, 11844), False, 'import os\n'), ((9007, 9024), 'torch.tensor', 'torch.tensor', (['age'], {}), '(age)\n', (9019, 9024), False, 'import torch\n'), ((9144, 9162), 'torch.tensor', 'torch.tensor', (['left'], {}), '(left)\n', (9156, 9162), False, 'import torch\n')] |
from __future__ import division
from __future__ import print_function
import sys, os
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
import pandas as pd
import numpy as np
# When GAM is trained with weight columns, scoring on the training frame run into error.
# I fixed this by not tracking weight columns in Java backend.
def link_functions_tweedie_vpow():
np.random.seed(1234)
n_rows = 10
data = {
"X1": np.random.randn(n_rows),
"X2": np.random.randn(n_rows),
"X3": np.random.randn(n_rows),
"W": np.random.choice([10, 20], size=n_rows),
"Y": np.random.choice([0, 0, 0, 0, 0, 10, 20, 30], size=n_rows)
}
train = h2o.H2OFrame(pd.DataFrame(data))
test = train.drop("W")
print(train)
h2o_model = H2OGeneralizedAdditiveEstimator(family="tweedie",
gam_columns=["X3"],
weights_column="W",
lambda_=0,
tweedie_variance_power=1.5,
tweedie_link_power=0)
h2o_model.train(x=["X1", "X2"], y="Y", training_frame=train)
predict_w = h2o_model.predict(train)
predict = h2o_model.predict(test) # scoring without weight column
# should produce same frame
pyunit_utils.compare_frames_local(predict_w, predict, prob=1, tol=1e-6)
if __name__ == "__main__":
pyunit_utils.standalone_test(link_functions_tweedie_vpow)
else:
link_functions_tweedie_vpow()
| [
"sys.path.insert",
"numpy.random.choice",
"h2o.estimators.gam.H2OGeneralizedAdditiveEstimator",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.random.randn",
"tests.pyunit_utils.standalone_test",
"tests.pyunit_utils.compare_frames_local"
] | [((85, 116), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../"""'], {}), "(1, '../../../')\n", (100, 116), False, 'import sys, os\n'), ((452, 472), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (466, 472), True, 'import numpy as np\n'), ((858, 1016), 'h2o.estimators.gam.H2OGeneralizedAdditiveEstimator', 'H2OGeneralizedAdditiveEstimator', ([], {'family': '"""tweedie"""', 'gam_columns': "['X3']", 'weights_column': '"""W"""', 'lambda_': '(0)', 'tweedie_variance_power': '(1.5)', 'tweedie_link_power': '(0)'}), "(family='tweedie', gam_columns=['X3'],\n weights_column='W', lambda_=0, tweedie_variance_power=1.5,\n tweedie_link_power=0)\n", (889, 1016), False, 'from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator\n'), ((1462, 1534), 'tests.pyunit_utils.compare_frames_local', 'pyunit_utils.compare_frames_local', (['predict_w', 'predict'], {'prob': '(1)', 'tol': '(1e-06)'}), '(predict_w, predict, prob=1, tol=1e-06)\n', (1495, 1534), False, 'from tests import pyunit_utils\n'), ((1567, 1624), 'tests.pyunit_utils.standalone_test', 'pyunit_utils.standalone_test', (['link_functions_tweedie_vpow'], {}), '(link_functions_tweedie_vpow)\n', (1595, 1624), False, 'from tests import pyunit_utils\n'), ((517, 540), 'numpy.random.randn', 'np.random.randn', (['n_rows'], {}), '(n_rows)\n', (532, 540), True, 'import numpy as np\n'), ((556, 579), 'numpy.random.randn', 'np.random.randn', (['n_rows'], {}), '(n_rows)\n', (571, 579), True, 'import numpy as np\n'), ((595, 618), 'numpy.random.randn', 'np.random.randn', (['n_rows'], {}), '(n_rows)\n', (610, 618), True, 'import numpy as np\n'), ((633, 672), 'numpy.random.choice', 'np.random.choice', (['[10, 20]'], {'size': 'n_rows'}), '([10, 20], size=n_rows)\n', (649, 672), True, 'import numpy as np\n'), ((687, 745), 'numpy.random.choice', 'np.random.choice', (['[0, 0, 0, 0, 0, 10, 20, 30]'], {'size': 'n_rows'}), '([0, 0, 0, 0, 0, 10, 20, 30], size=n_rows)\n', (703, 745), True, 'import numpy as np\n'), ((778, 796), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (790, 796), True, 'import pandas as pd\n')] |
import csv
import pathlib
import pickle
import os
from collections.abc import Iterable
import numpy as np
import pandas as pd
import scipy.stats as stats
def processed_expression_table(df):
df.index.name = 'genes'
return df.groupby('genes').mean()
def expression_ranks(expression_table_df, ascending, rank_method='max'):
return expression_table_df.rank(method=rank_method, ascending=ascending)
# bg_genes: df of samples with background gene count
def bg_genes(expression_ranks_df):
return expression_ranks_df.count()
def pathway_ranks(pathway_genes, expression_ranks_df, rank_method):
return expression_ranks_df.reindex(pathway_genes).rank(method=rank_method).dropna(how='all')
def effective_pathway(pathway_ranks_df):
return pathway_ranks_df.max()
def b(expression_ranks_df, pathway_ranks_df):
return expression_ranks_df.subtract(pathway_ranks_df).dropna(how='all')
def c(effective_pathway_series, pathway_ranks_df):
return effective_pathway_series - pathway_ranks_df
def d(bg_series, pathway_ranks_df, b_df, c_df):
return bg_series - pathway_ranks_df - b_df - c_df
def sample_2x2(pathway_ranks_dict, b_dict, c_dict, d_dict):
final_dict = {
sample: {
gene: [
[val, b_dict[sample][gene]],
[c_dict[sample][gene], d_dict[sample][gene]]
]
for (gene, val) in genes.items()
}
for (sample, genes) in pathway_ranks_dict.items()
}
return pd.DataFrame(final_dict)
def clean_fisher_exact(table):
try:
if np.isnan(table).any():
return np.nan
else:
return stats.fisher_exact(table, alternative='greater')[1]
except ValueError:
print(table)
def p_values(sample_2x2_df):
return sample_2x2_df.apply(np.vectorize(clean_fisher_exact))
def neg_log(table):
return -np.log(table)
def harmonic_average(iterable):
if 0 in iterable:
return 0
reciprocal_iterable = [1/el for el in iterable if ~np.isnan(el)]
denom = sum(reciprocal_iterable)
if denom == 0:
return np.nan
else:
return len(reciprocal_iterable) / denom
def geometric_average(iterable):
try:
clean_iterable = [el for el in iterable if ~np.isnan(el)]
if not len(clean_iterable):
return np.nan
return np.exp(np.sum(np.log(clean_iterable)) / len(clean_iterable))
except ZeroDivisionError:
return 0
def user_pw_metadata_f(pw_data, output_dir_path):
output_loc = '{}/user_pathways.tsv'.format(output_dir_path)
pd.DataFrame.from_dict(pw_data, orient='index').to_csv(output_loc, sep='\t')
def pw_metadata_f(pw_db_choice, output_dir_path):
output_loc = '{}/{}.tsv'.format(output_dir_path, pw_db_choice)
pw_data = pickle.load(open('databases/metadata/{}.pkl'.format(pw_db_choice), 'rb'))
pw_data.to_csv(output_loc, sep='\t')
def output_dir(output_dir_path):
pathlib.Path(output_dir_path).mkdir(parents=True, exist_ok=True)
def user_pathways(f):
pathway_db = {}
pw_data = {}
with open(f, 'r') as csv_in:
reader = csv.reader(csv_in)
for row in reader:
pw = row[0]
db = row[1]
genes = set(row[2:])
pathway_db[pw] = genes
pw_data[pw] = {
'db': db,
'count': len(genes)
}
return pathway_db, pw_data
def validate_db_name(db_name):
available_dbs = ['kegg', 'hallmark', 'reactome', 'hmdb_smpdb']
if db_name.lower() not in available_dbs:
raise ValueError(
"{} not recognized. Available dbs: {}".format(db_name, ",".join(available_dbs))
)
return True
def db_pathways_dict(db_name):
validate_db_name(db_name)
db_parent = os.path.dirname(os.path.abspath(__file__))
with open('{}/databases/{}.pkl'.format(db_parent, db_name.lower()), 'rb') as f:
pathways = pickle.load(f)
return pathways
def validate_pathways(pw_dict):
if not isinstance(pw_dict, dict):
raise TypeError("Pathways should be a dictionary of lists or sets")
if any(not isinstance(gene_list, Iterable) for gene_list in pw_dict.values()):
raise TypeError("Pathways should be a dictionary of lists or sets")
return True
def all(
expression_table,
pathways=None,
db='kegg',
geometric=True,
min_p_val=True,
ascending=True,
rank_method='max'
):
if not pathways:
pathways = db_pathways_dict(db)
else:
validate_pathways(pathways)
harmonic_averages = [None] * len(pathways)
geometric_averages = []
min_p_vals = []
if geometric:
geometric_averages = [None] * len(pathways)
if min_p_val:
min_p_vals = [None] * len(pathways)
expression_table_df = processed_expression_table(expression_table)
expression_ranks_df = expression_ranks(expression_table_df, ascending=ascending, rank_method=rank_method)
bg_genes_df = bg_genes(expression_ranks_df)
sample_order = expression_table_df.columns
# perform analysis for each pathway
for i, pathway in enumerate(pathways):
print('starting: {}'.format(pathway))
pathway_ranks_df = pathway_ranks(pathways[pathway], expression_ranks_df, rank_method=rank_method)
effective_pathway_df = effective_pathway(pathway_ranks_df)
b_df = b(expression_ranks_df, pathway_ranks_df)
c_df = c(effective_pathway_df, pathway_ranks_df)
d_df = d(bg_genes_df, pathway_ranks_df, b_df, c_df)
sample_2x2_df = sample_2x2(
pathway_ranks_df.to_dict(),
b_df.to_dict(),
c_df.to_dict(),
d_df.to_dict()
)
p_values_df = p_values(sample_2x2_df)
# Harmonic averaging is default
harmonic_averages_series = neg_log(p_values_df.apply(harmonic_average).loc[sample_order])
harmonic_averages_series.name = pathway
harmonic_averages[i] = harmonic_averages_series
if geometric:
geometric_averages_series = neg_log(p_values_df.apply(geometric_average).loc[sample_order])
geometric_averages_series.name = pathway
geometric_averages[i] = geometric_averages_series
if min_p_val:
min_p_vals_series = neg_log(p_values_df.min().loc[sample_order])
min_p_vals_series.name = pathway
min_p_vals[i] = min_p_vals_series
print('finished: {}'.format(pathway))
harmonic_averages_df = pd.concat(harmonic_averages, axis=1).T
if geometric:
geometric_averages_df = pd.concat(geometric_averages, axis=1).T
else:
geometric_averages_df = None
if min_p_val:
min_p_vals_df = pd.concat(min_p_vals, axis=1).T
else:
min_p_vals_df = None
return {
'harmonic': harmonic_averages_df,
'geometric': geometric_averages_df,
'min_p_val': min_p_vals_df
}
def pa_stats(
expression_table,
mode='harmonic',
pathways=None,
db='kegg',
ascending=True,
rank_method='max'
):
if not pathways:
pathways = db_pathways_dict(db)
else:
validate_pathways(pathways)
averages = [None] * len(pathways)
expression_table_df = processed_expression_table(expression_table)
expression_ranks_df = expression_ranks(expression_table_df, ascending=ascending, rank_method=rank_method)
bg_genes_df = bg_genes(expression_ranks_df)
sample_order = expression_table_df.columns
# perform analysis for each pathway
for i, pathway in enumerate(pathways):
pathway_ranks_df = pathway_ranks(pathways[pathway], expression_ranks_df, rank_method=rank_method)
effective_pathway_df = effective_pathway(pathway_ranks_df)
b_df = b(expression_ranks_df, pathway_ranks_df)
c_df = c(effective_pathway_df, pathway_ranks_df)
d_df = d(bg_genes_df, pathway_ranks_df, b_df, c_df)
sample_2x2_df = sample_2x2(
pathway_ranks_df.to_dict(),
b_df.to_dict(),
c_df.to_dict(),
d_df.to_dict()
)
p_values_df = p_values(sample_2x2_df)
if mode == 'geometric':
averages_series = neg_log(p_values_df.apply(geometric_average).loc[sample_order])
if mode == 'harmonic':
averages_series = neg_log(p_values_df.apply(harmonic_average).loc[sample_order])
if mode == 'min':
averages_series = neg_log(p_values_df.min().loc[sample_order])
averages_series.name = pathway
averages[i] = averages_series
averages_df = pd.concat(averages, axis=1).T
return averages_df
# Try doing this with a decorator
def harmonic(
expression_table,
pathways=None,
db='kegg',
ascending=True,
rank_method='max'
):
return pa_stats(expression_table, 'harmonic', pathways, db, ascending, rank_method)
def geometric(
expression_table,
pathways=None,
db='kegg',
ascending=True,
rank_method='max'
):
return pa_stats(expression_table, 'geometric', pathways, db, ascending, rank_method)
def min_p_val(
expression_table,
pathways=None,
db='kegg',
ascending=True,
rank_method='max'
):
return pa_stats(expression_table, 'min', pathways, db, ascending, rank_method)
| [
"pathlib.Path",
"scipy.stats.fisher_exact",
"numpy.log",
"pickle.load",
"os.path.abspath",
"pandas.DataFrame.from_dict",
"pandas.concat",
"numpy.isnan",
"csv.reader",
"pandas.DataFrame",
"numpy.vectorize"
] | [((1490, 1514), 'pandas.DataFrame', 'pd.DataFrame', (['final_dict'], {}), '(final_dict)\n', (1502, 1514), True, 'import pandas as pd\n'), ((1808, 1840), 'numpy.vectorize', 'np.vectorize', (['clean_fisher_exact'], {}), '(clean_fisher_exact)\n', (1820, 1840), True, 'import numpy as np\n'), ((1876, 1889), 'numpy.log', 'np.log', (['table'], {}), '(table)\n', (1882, 1889), True, 'import numpy as np\n'), ((3125, 3143), 'csv.reader', 'csv.reader', (['csv_in'], {}), '(csv_in)\n', (3135, 3143), False, 'import csv\n'), ((3806, 3831), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3821, 3831), False, 'import os\n'), ((3936, 3950), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3947, 3950), False, 'import pickle\n'), ((6531, 6567), 'pandas.concat', 'pd.concat', (['harmonic_averages'], {'axis': '(1)'}), '(harmonic_averages, axis=1)\n', (6540, 6567), True, 'import pandas as pd\n'), ((8666, 8693), 'pandas.concat', 'pd.concat', (['averages'], {'axis': '(1)'}), '(averages, axis=1)\n', (8675, 8693), True, 'import pandas as pd\n'), ((2585, 2632), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['pw_data'], {'orient': '"""index"""'}), "(pw_data, orient='index')\n", (2607, 2632), True, 'import pandas as pd\n'), ((2949, 2978), 'pathlib.Path', 'pathlib.Path', (['output_dir_path'], {}), '(output_dir_path)\n', (2961, 2978), False, 'import pathlib\n'), ((6621, 6658), 'pandas.concat', 'pd.concat', (['geometric_averages'], {'axis': '(1)'}), '(geometric_averages, axis=1)\n', (6630, 6658), True, 'import pandas as pd\n'), ((6751, 6780), 'pandas.concat', 'pd.concat', (['min_p_vals'], {'axis': '(1)'}), '(min_p_vals, axis=1)\n', (6760, 6780), True, 'import pandas as pd\n'), ((1568, 1583), 'numpy.isnan', 'np.isnan', (['table'], {}), '(table)\n', (1576, 1583), True, 'import numpy as np\n'), ((1650, 1698), 'scipy.stats.fisher_exact', 'stats.fisher_exact', (['table'], {'alternative': '"""greater"""'}), "(table, alternative='greater')\n", (1668, 1698), True, 'import scipy.stats as stats\n'), ((2019, 2031), 'numpy.isnan', 'np.isnan', (['el'], {}), '(el)\n', (2027, 2031), True, 'import numpy as np\n'), ((2266, 2278), 'numpy.isnan', 'np.isnan', (['el'], {}), '(el)\n', (2274, 2278), True, 'import numpy as np\n'), ((2371, 2393), 'numpy.log', 'np.log', (['clean_iterable'], {}), '(clean_iterable)\n', (2377, 2393), True, 'import numpy as np\n')] |
from pso import pso
from optitestfuns import ackley
import unittest
from numpy import isclose, array
'''Tests for the nD PSO implementation.
To run it please execute the following command in your terminal or cmd
python -m unittest test_pso.py
'''
class PSOfunctionMethodTests(unittest.TestCase):
def test_pso1D(self):
intVar = []
result = pso(ackley, [-5], [5], intVar)
theo_min = array([0])
print(result.exit)
print('x_opt: {}'.format(result.xopt))
print('FO: {:2e}'.format(result.FO))
assert isclose(result.xopt[0], theo_min, atol=1e-3), "ERROR: variable didn't converged to 0"
def test_pso1Dinteger(self):
intVar = [0]
result = pso(ackley, [-5], [5], intVar)
theo_min = array([0])
print(result.exit)
print('x_opt: {}'.format(result.xopt))
print('FO: {:2e}'.format(result.FO))
assert isclose(result.xopt[0], theo_min, atol=1e-3), "ERROR: variable didn't converged to 0"
assert float(result.xopt[0]).is_integer(), "ERROR: variable obtained wasn't an integer"
def test_pso2D(self):
intVar = []
result = pso(ackley, [-5,-5], [5,5], intVar)
theo_min = array([0])
print(result.exit)
print('x_opt: {}'.format(result.xopt))
print('FO: {:2e}'.format(result.FO))
assert isclose(result.xopt[0], theo_min, atol=1e-3), "ERROR: first variable didn't converged to 0"
assert isclose(result.xopt[1], theo_min, atol=1e-3), "ERROR: second variable didn't converged to 0"
# def test_pso2Dinteger(self):
# intVar = [0,1]
# result = pso(ackley, [-5, -5], [5, 5], intVar)
#
# print(result.exit)
# print('x_opt: {}'.format(result.xopt))
# print('FO: {:2e}'.format(result.FO))
#
# assert math.isclose(result.xopt[0], 0, abs_tol=1e-3), "ERROR: first variable didn't converged to 0"
# assert math.isclose(result.xopt[1], 0, abs_tol=1e-3), "ERROR: second variable didn't converged to 0"
# assert float(result.xopt[0]).is_integer(), "ERROR: first variable obtained wasn't an integer"
# assert float(result.xopt[1]).is_integer(), "ERROR: second variable obtained wasn't an integer"
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"numpy.array",
"numpy.isclose",
"pso.pso"
] | [((2284, 2299), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2297, 2299), False, 'import unittest\n'), ((362, 392), 'pso.pso', 'pso', (['ackley', '[-5]', '[5]', 'intVar'], {}), '(ackley, [-5], [5], intVar)\n', (365, 392), False, 'from pso import pso\n'), ((413, 423), 'numpy.array', 'array', (['[0]'], {}), '([0])\n', (418, 423), False, 'from numpy import isclose, array\n'), ((560, 605), 'numpy.isclose', 'isclose', (['result.xopt[0]', 'theo_min'], {'atol': '(0.001)'}), '(result.xopt[0], theo_min, atol=0.001)\n', (567, 605), False, 'from numpy import isclose, array\n'), ((718, 748), 'pso.pso', 'pso', (['ackley', '[-5]', '[5]', 'intVar'], {}), '(ackley, [-5], [5], intVar)\n', (721, 748), False, 'from pso import pso\n'), ((769, 779), 'numpy.array', 'array', (['[0]'], {}), '([0])\n', (774, 779), False, 'from numpy import isclose, array\n'), ((916, 961), 'numpy.isclose', 'isclose', (['result.xopt[0]', 'theo_min'], {'atol': '(0.001)'}), '(result.xopt[0], theo_min, atol=0.001)\n', (923, 961), False, 'from numpy import isclose, array\n'), ((1163, 1200), 'pso.pso', 'pso', (['ackley', '[-5, -5]', '[5, 5]', 'intVar'], {}), '(ackley, [-5, -5], [5, 5], intVar)\n', (1166, 1200), False, 'from pso import pso\n'), ((1219, 1229), 'numpy.array', 'array', (['[0]'], {}), '([0])\n', (1224, 1229), False, 'from numpy import isclose, array\n'), ((1366, 1411), 'numpy.isclose', 'isclose', (['result.xopt[0]', 'theo_min'], {'atol': '(0.001)'}), '(result.xopt[0], theo_min, atol=0.001)\n', (1373, 1411), False, 'from numpy import isclose, array\n'), ((1473, 1518), 'numpy.isclose', 'isclose', (['result.xopt[1]', 'theo_min'], {'atol': '(0.001)'}), '(result.xopt[1], theo_min, atol=0.001)\n', (1480, 1518), False, 'from numpy import isclose, array\n')] |
# MIT License
#
# Copyright (c) 2017-2018 Udacity, Inc
# Copyright (c) Modifications 2018, 2019 <NAME> (pablo.rodriguez-palafox [at] tum.de)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import re
import random
import numpy as np
import os.path
import scipy.misc
import shutil
import zipfile
import time
import tensorflow as tf
import cv2
from glob import glob
from urllib.request import urlretrieve
from distutils.version import LooseVersion
from tqdm import tqdm
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from datetime import datetime
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
def check_compatibility():
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
if not tf.test.gpu_device_name():
print('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def maybe_download_pretrained_vgg(data_dir):
"""
Download and extract pretrained vgg model if it doesn't exist
:param data_dir: Directory to download the model to
"""
vgg_filename = 'vgg.zip'
vgg_path = os.path.join(data_dir, 'vgg')
vgg_files = [
os.path.join(vgg_path, 'variables/variables.data-00000-of-00001'),
os.path.join(vgg_path, 'variables/variables.index'),
os.path.join(vgg_path, 'saved_model.pb')]
missing_vgg_files = [vgg_file for vgg_file in vgg_files if not os.path.exists(vgg_file)]
if missing_vgg_files:
# Clean vgg dir
if os.path.exists(vgg_path):
shutil.rmtree(vgg_path)
os.makedirs(vgg_path)
# Download vgg
print('Downloading pre-trained vgg model...')
with DLProgress(unit='B', unit_scale=True, miniters=1) as pbar:
urlretrieve(
'https://s3-us-west-1.amazonaws.com/udacity-selfdrivingcar/vgg.zip',
os.path.join(vgg_path, vgg_filename),
pbar.hook)
# Extract vgg
print('Extracting model...')
zip_ref = zipfile.ZipFile(os.path.join(vgg_path, vgg_filename), 'r')
zip_ref.extractall(data_dir)
zip_ref.close()
# Remove zip file to save space
os.remove(os.path.join(vgg_path, vgg_filename))
def img_size(img):
return (img.shape[0], img.shape[1])
def random_crop(img, gt):
h,w = img_size(img)
nw = random.randint(768, w-2) # Random crop size
nh = int(nw / 2) # Keep original aspect ration
x1 = random.randint(0, w - nw) # Random position of crop
y1 = random.randint(0, h - nh)
return img[y1:(y1+nh), x1:(x1+nw), :], gt[y1:(y1+nh), x1:(x1+nw)]
def bc_img(img, s = 1.0, m = 0.0):
img = img.astype(np.int)
img = img * s + m
img[img > 255] = 255
img[img < 0] = 0
img = img.astype(np.uint8)
return img
def get_files_paths(gt_dir, imgs_dir):
"""
Get training data filenames
"""
cities = os.listdir(imgs_dir)
gt = []
imgs = []
for city in cities:
new_gt_path = os.path.join(gt_dir, city)
new_imgs_path = os.path.join(imgs_dir, city)
gt += glob(os.path.join(new_gt_path, "*_gtFine_labelIds.png"))
imgs += glob(os.path.join(new_imgs_path, "*.png"))
gt.sort()
imgs.sort()
return gt, imgs
def get_num_imgs_in_folder(imgs_dir):
"""
Sum the number of images contained in each city
"""
cities = os.listdir(imgs_dir)
num_imgs = 0
for city in cities:
city_path = os.path.join(imgs_dir, city)
num_imgs += len(os.listdir(city_path))
return num_imgs
def prepare_ground_truth(dataset, img, num_classes, mode='train'):
"""
Prepare ground truth for cityscape data
"""
new_image = np.zeros((img.shape[0], img.shape[1], num_classes))
# road
road_mask = img == 7
# Depending on the dataset, the ``fence_mask`` will be generated differently
if dataset[0:4] == 'city':
if mode == 'train':
# construction[building, wall, fence, guard_rail, bridge, tunnel]
fence_mask = np.logical_or.reduce((img == 11, img == 12, img == 13,
img == 14, img == 15, img == 16))
elif mode == 'test':
fence_mask = img == 13
elif dataset[0:4] == 'robo':
fence_mask = img == 13
# everything else
else_mask = np.logical_not(np.logical_or.reduce((road_mask, fence_mask)))
new_image[:,:,0] = road_mask
new_image[:,:,1] = fence_mask
new_image[:,:,2] = else_mask
return new_image.astype(np.float32)
def gen_batch_function(train_gt_dir, train_imgs_dir,
val_gt_dir, val_imgs_dir,
test_gt_dir, test_imgs_dir,
image_shape, dataset):
"""
Generate function to create batches of training data
"""
def get_batches_fn(batch_size=1, mode='train', num_classes=3, print_flag=False):
"""
Create batches of training data
:param batch_size: Batch Size
:return: Batches of training data
"""
if mode == 'train':
# Get only the path of the imgs. Ground truth images' paths will be obtained later
_, imgs_paths = get_files_paths(train_gt_dir, train_imgs_dir)
#background_color = np.array([255, 0, 0])
#road_color = np.array([128, 64, 128, 255])
#car_color = np.array([0, 0, 142, 255])
random.shuffle(imgs_paths)
for batch_i in range(0, len(imgs_paths), batch_size):
images = []
gt_images = []
for image_file in imgs_paths[batch_i:batch_i+batch_size]:
# Get gt_image_file by first finding the city name and then renaming the basename of image_file
city = os.path.basename(image_file).partition("_")[0]
gt_type = 'gtFine_labelIds.png'
gt_image_file = os.path.join(train_gt_dir, city, os.path.basename(image_file)[:-15]+gt_type)
# Read images and groundtruth images
image = scipy.misc.imread(image_file)
gt_image = scipy.misc.imread(gt_image_file)
# Show images and gt_images as they are
if print_flag:
plt.figure(figsize=(16, 8))
plt.subplot(2,2,1)
plt.imshow(image)
plt.subplot(2,2,2)
plt.imshow(gt_image)
#####################################################
# AUGMENTATION #
#Random crop augmentation
image, gt_image = random_crop(image, gt_image)
image = scipy.misc.imresize(image, image_shape)
gt_image = scipy.misc.imresize(gt_image, image_shape)
# Contrast augmentation
contr = random.uniform(0.85, 1.15)
# Brightness augmentation
bright = random.randint(-40, 30)
image = bc_img(image, contr, bright)
#####################################################
#####################################################
# PREPARE GROUND TRUTH
gt_image = prepare_ground_truth(dataset, gt_image, num_classes)
#####################################################
images.append(image)
gt_images.append(gt_image)
if print_flag:
plt.subplot(2,2,3)
plt.imshow(image)
plt.subplot(2,2,4)
gt_image = scipy.misc.imresize(gt_image, image_shape)
plt.imshow(gt_image)
plt.show()
yield np.array(images), np.array(gt_images)
elif mode == 'val':
_, imgs_paths = get_files_paths(val_gt_dir, val_imgs_dir)
#background_color = np.array([255, 0, 0])
#road_color = np.array([128, 64, 128, 255])
#car_color = np.array([0, 0, 142, 255])
random.shuffle(imgs_paths)
for batch_i in range(0, len(imgs_paths), batch_size):
images = []
gt_images = []
for image_file in imgs_paths[batch_i:batch_i+batch_size]:
# Get gt_image_file by first finding the city name and then renaming the basename of image_file
city = os.path.basename(image_file).partition("_")[0]
gt_type = 'gtFine_labelIds.png'
gt_image_file = os.path.join(val_gt_dir, city, os.path.basename(image_file)[:-15]+gt_type)
# Read images and groundtruth images
image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)
gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)
# Show images and gt_images as they are
if print_flag:
plt.figure(figsize=(16, 8))
plt.subplot(2,2,1)
plt.imshow(image)
plt.subplot(2,2,2)
plt.imshow(gt_image)
#####################################################
# PREPARE GROUND TRUTH
gt_image = prepare_ground_truth(dataset, gt_image, num_classes)
#####################################################
images.append(image)
gt_images.append(gt_image)
if print_flag:
plt.subplot(2,2,3)
plt.imshow(image)
plt.subplot(2,2,4)
gt_image = scipy.misc.imresize(gt_image, image_shape)
plt.imshow(gt_image)
plt.show()
yield np.array(images), np.array(gt_images)
return get_batches_fn | [
"matplotlib.pyplot.imshow",
"random.uniform",
"random.shuffle",
"matplotlib.pyplot.subplot",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"shutil.rmtree",
"distutils.version.LooseVersion",
"tensorflow.test.gpu_device_name",
"random.randint",
"numpy.logical_or.reduce",
"matplotli... | [((3742, 3768), 'random.randint', 'random.randint', (['(768)', '(w - 2)'], {}), '(768, w - 2)\n', (3756, 3768), False, 'import random\n'), ((3846, 3871), 'random.randint', 'random.randint', (['(0)', '(w - nw)'], {}), '(0, w - nw)\n', (3860, 3871), False, 'import random\n'), ((3907, 3932), 'random.randint', 'random.randint', (['(0)', '(h - nh)'], {}), '(0, h - nh)\n', (3921, 3932), False, 'import random\n'), ((5090, 5141), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1], num_classes)'], {}), '((img.shape[0], img.shape[1], num_classes))\n', (5098, 5141), True, 'import numpy as np\n'), ((1870, 1898), 'distutils.version.LooseVersion', 'LooseVersion', (['tf.__version__'], {}), '(tf.__version__)\n', (1882, 1898), False, 'from distutils.version import LooseVersion\n'), ((1902, 1921), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.0"""'], {}), "('1.0')\n", (1914, 1921), False, 'from distutils.version import LooseVersion\n'), ((2085, 2110), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), '()\n', (2108, 2110), True, 'import tensorflow as tf\n'), ((5751, 5796), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(road_mask, fence_mask)'], {}), '((road_mask, fence_mask))\n', (5771, 5796), True, 'import numpy as np\n'), ((2930, 2953), 'shutil.rmtree', 'shutil.rmtree', (['vgg_path'], {}), '(vgg_path)\n', (2943, 2953), False, 'import shutil\n'), ((5427, 5519), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(img == 11, img == 12, img == 13, img == 14, img == 15, img == 16)'], {}), '((img == 11, img == 12, img == 13, img == 14, img == 15,\n img == 16))\n', (5447, 5519), True, 'import numpy as np\n'), ((6820, 6846), 'random.shuffle', 'random.shuffle', (['imgs_paths'], {}), '(imgs_paths)\n', (6834, 6846), False, 'import random\n'), ((2246, 2271), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), '()\n', (2269, 2271), True, 'import tensorflow as tf\n'), ((9658, 9684), 'random.shuffle', 'random.shuffle', (['imgs_paths'], {}), '(imgs_paths)\n', (9672, 9684), False, 'import random\n'), ((8373, 8399), 'random.uniform', 'random.uniform', (['(0.85)', '(1.15)'], {}), '(0.85, 1.15)\n', (8387, 8399), False, 'import random\n'), ((8476, 8499), 'random.randint', 'random.randint', (['(-40)', '(30)'], {}), '(-40, 30)\n', (8490, 8499), False, 'import random\n'), ((7731, 7758), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (7741, 7758), True, 'import matplotlib.pyplot as plt\n'), ((7783, 7803), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (7794, 7803), True, 'import matplotlib.pyplot as plt\n'), ((7826, 7843), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (7836, 7843), True, 'import matplotlib.pyplot as plt\n'), ((7868, 7888), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (7879, 7888), True, 'import matplotlib.pyplot as plt\n'), ((7911, 7931), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gt_image'], {}), '(gt_image)\n', (7921, 7931), True, 'import matplotlib.pyplot as plt\n'), ((9057, 9077), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (9068, 9077), True, 'import matplotlib.pyplot as plt\n'), ((9100, 9117), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (9110, 9117), True, 'import matplotlib.pyplot as plt\n'), ((9142, 9162), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (9153, 9162), True, 'import matplotlib.pyplot as plt\n'), ((9263, 9283), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gt_image'], {}), '(gt_image)\n', (9273, 9283), True, 'import matplotlib.pyplot as plt\n'), ((9308, 9318), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9316, 9318), True, 'import matplotlib.pyplot as plt\n'), ((9343, 9359), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (9351, 9359), True, 'import numpy as np\n'), ((9361, 9380), 'numpy.array', 'np.array', (['gt_images'], {}), '(gt_images)\n', (9369, 9380), True, 'import numpy as np\n'), ((10635, 10662), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (10645, 10662), True, 'import matplotlib.pyplot as plt\n'), ((10687, 10707), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (10698, 10707), True, 'import matplotlib.pyplot as plt\n'), ((10730, 10747), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (10740, 10747), True, 'import matplotlib.pyplot as plt\n'), ((10772, 10792), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (10783, 10792), True, 'import matplotlib.pyplot as plt\n'), ((10815, 10835), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gt_image'], {}), '(gt_image)\n', (10825, 10835), True, 'import matplotlib.pyplot as plt\n'), ((11261, 11281), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (11272, 11281), True, 'import matplotlib.pyplot as plt\n'), ((11304, 11321), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (11314, 11321), True, 'import matplotlib.pyplot as plt\n'), ((11346, 11366), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (11357, 11366), True, 'import matplotlib.pyplot as plt\n'), ((11467, 11487), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gt_image'], {}), '(gt_image)\n', (11477, 11487), True, 'import matplotlib.pyplot as plt\n'), ((11512, 11522), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11520, 11522), True, 'import matplotlib.pyplot as plt\n'), ((11546, 11562), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (11554, 11562), True, 'import numpy as np\n'), ((11564, 11583), 'numpy.array', 'np.array', (['gt_images'], {}), '(gt_images)\n', (11572, 11583), True, 'import numpy as np\n')] |
import unittest
from maskgen import plugins, image_wrap
import numpy
import tempfile
from tests import test_support
class RetinexTestCase(test_support.TestSupport):
def setUp(self):
plugins.loadPlugins()
def test_retinex(self):
inputfile = self.locateFile('tests/images/test_project5.jpg')
img_wrapper = image_wrap.openImageFile(self.locateFile('tests/images/test_project5.jpg'))
img = img_wrapper.to_array()
filename_output = tempfile.mktemp(prefix='mstcr', suffix='.jpg', dir='.')
self.addFileToRemove(filename_output)
args, error = plugins.callPlugin('Retinex',
img_wrapper,
inputfile,
filename_output)
wrapper = image_wrap.openImageFile(filename_output)
output = wrapper.to_array()
self.assertTrue(output.shape == (322,483,3))
self.assertTrue(numpy.all(output != input))
if __name__ == '__main__':
unittest.main()
| [
"maskgen.plugins.loadPlugins",
"maskgen.image_wrap.openImageFile",
"tempfile.mktemp",
"unittest.main",
"numpy.all",
"maskgen.plugins.callPlugin"
] | [((1030, 1045), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1043, 1045), False, 'import unittest\n'), ((197, 218), 'maskgen.plugins.loadPlugins', 'plugins.loadPlugins', ([], {}), '()\n', (216, 218), False, 'from maskgen import plugins, image_wrap\n'), ((479, 534), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'prefix': '"""mstcr"""', 'suffix': '""".jpg"""', 'dir': '"""."""'}), "(prefix='mstcr', suffix='.jpg', dir='.')\n", (494, 534), False, 'import tempfile\n'), ((603, 673), 'maskgen.plugins.callPlugin', 'plugins.callPlugin', (['"""Retinex"""', 'img_wrapper', 'inputfile', 'filename_output'], {}), "('Retinex', img_wrapper, inputfile, filename_output)\n", (621, 673), False, 'from maskgen import plugins, image_wrap\n'), ((815, 856), 'maskgen.image_wrap.openImageFile', 'image_wrap.openImageFile', (['filename_output'], {}), '(filename_output)\n', (839, 856), False, 'from maskgen import plugins, image_wrap\n'), ((970, 996), 'numpy.all', 'numpy.all', (['(output != input)'], {}), '(output != input)\n', (979, 996), False, 'import numpy\n')] |
import cudf
import numpy as np
import cupy as cp
import pytest
from cudf.tests.utils import assert_eq
@pytest.mark.parametrize(
"np_ar_tup", [(np.random.random(100), np.random.random(100))]
)
@pytest.mark.parametrize(
"func",
[
lambda x, y: np.greater(x, y),
lambda x, y: np.less(x, y),
lambda x, y: np.less_equal(x, y),
lambda x, y: np.subtract(x, y),
],
)
def test_ufunc_cudf_series(np_ar_tup, func):
x, y = np_ar_tup[0], np_ar_tup[1]
s_1, s_2 = cudf.Series(x), cudf.Series(y)
expect = func(x, y)
got = func(s_1, s_2)
if np.isscalar(expect):
assert_eq(expect, got)
else:
assert_eq(expect, got.to_array())
@pytest.mark.parametrize(
"np_ar_tup", [(np.random.random(100), np.random.random(100))]
)
@pytest.mark.parametrize(
"func",
[
lambda x, y: np.greater(x, y),
lambda x, y: np.less(x, y),
lambda x, y: np.less_equal(x, y),
],
)
def test_ufunc_cudf_series_cupy_array(np_ar_tup, func):
x, y = np_ar_tup[0], np_ar_tup[1]
expect = func(x, y)
cudf_s = cudf.Series(x)
cupy_ar = cp.array(y)
got = func(cudf_s, cupy_ar)
if np.isscalar(expect):
assert_eq(expect, got)
else:
assert_eq(expect, got.to_array())
@pytest.mark.parametrize(
"func",
[
lambda x, y: np.greater(x, y),
lambda x, y: np.less(x, y),
lambda x, y: np.less_equal(x, y),
],
)
def test_error_with_null_cudf_series(func):
s_1 = cudf.Series([1, 2])
s_2 = cudf.Series([1, 2, None])
with pytest.raises(ValueError):
func(s_1, s_2)
| [
"numpy.greater",
"numpy.less",
"numpy.isscalar",
"cudf.Series",
"numpy.random.random",
"numpy.less_equal",
"cudf.tests.utils.assert_eq",
"numpy.subtract",
"pytest.raises",
"cupy.array"
] | [((593, 612), 'numpy.isscalar', 'np.isscalar', (['expect'], {}), '(expect)\n', (604, 612), True, 'import numpy as np\n'), ((1095, 1109), 'cudf.Series', 'cudf.Series', (['x'], {}), '(x)\n', (1106, 1109), False, 'import cudf\n'), ((1124, 1135), 'cupy.array', 'cp.array', (['y'], {}), '(y)\n', (1132, 1135), True, 'import cupy as cp\n'), ((1175, 1194), 'numpy.isscalar', 'np.isscalar', (['expect'], {}), '(expect)\n', (1186, 1194), True, 'import numpy as np\n'), ((1505, 1524), 'cudf.Series', 'cudf.Series', (['[1, 2]'], {}), '([1, 2])\n', (1516, 1524), False, 'import cudf\n'), ((1535, 1560), 'cudf.Series', 'cudf.Series', (['[1, 2, None]'], {}), '([1, 2, None])\n', (1546, 1560), False, 'import cudf\n'), ((506, 520), 'cudf.Series', 'cudf.Series', (['x'], {}), '(x)\n', (517, 520), False, 'import cudf\n'), ((522, 536), 'cudf.Series', 'cudf.Series', (['y'], {}), '(y)\n', (533, 536), False, 'import cudf\n'), ((622, 644), 'cudf.tests.utils.assert_eq', 'assert_eq', (['expect', 'got'], {}), '(expect, got)\n', (631, 644), False, 'from cudf.tests.utils import assert_eq\n'), ((1204, 1226), 'cudf.tests.utils.assert_eq', 'assert_eq', (['expect', 'got'], {}), '(expect, got)\n', (1213, 1226), False, 'from cudf.tests.utils import assert_eq\n'), ((1570, 1595), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1583, 1595), False, 'import pytest\n'), ((149, 170), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (165, 170), True, 'import numpy as np\n'), ((172, 193), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (188, 193), True, 'import numpy as np\n'), ((263, 279), 'numpy.greater', 'np.greater', (['x', 'y'], {}), '(x, y)\n', (273, 279), True, 'import numpy as np\n'), ((302, 315), 'numpy.less', 'np.less', (['x', 'y'], {}), '(x, y)\n', (309, 315), True, 'import numpy as np\n'), ((338, 357), 'numpy.less_equal', 'np.less_equal', (['x', 'y'], {}), '(x, y)\n', (351, 357), True, 'import numpy as np\n'), ((380, 397), 'numpy.subtract', 'np.subtract', (['x', 'y'], {}), '(x, y)\n', (391, 397), True, 'import numpy as np\n'), ((744, 765), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (760, 765), True, 'import numpy as np\n'), ((767, 788), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (783, 788), True, 'import numpy as np\n'), ((858, 874), 'numpy.greater', 'np.greater', (['x', 'y'], {}), '(x, y)\n', (868, 874), True, 'import numpy as np\n'), ((897, 910), 'numpy.less', 'np.less', (['x', 'y'], {}), '(x, y)\n', (904, 910), True, 'import numpy as np\n'), ((933, 952), 'numpy.less_equal', 'np.less_equal', (['x', 'y'], {}), '(x, y)\n', (946, 952), True, 'import numpy as np\n'), ((1346, 1362), 'numpy.greater', 'np.greater', (['x', 'y'], {}), '(x, y)\n', (1356, 1362), True, 'import numpy as np\n'), ((1385, 1398), 'numpy.less', 'np.less', (['x', 'y'], {}), '(x, y)\n', (1392, 1398), True, 'import numpy as np\n'), ((1421, 1440), 'numpy.less_equal', 'np.less_equal', (['x', 'y'], {}), '(x, y)\n', (1434, 1440), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
from __future__ import absolute_import, division, print_function, unicode_literals
import matplotlib.pyplot as plt
import numpy as np
import argparse
import math
import re
import os
png_programs = ['stepic', 'lsbsteg']
jpg_programs = ['f5', 'steghide', 'outguess']
programs = png_programs + jpg_programs
# Describe arguments and options.
parser = argparse.ArgumentParser(description='Create ROC curves for each steganalitic method, targeting a specific steganography program.')
parser.add_argument("reports_dir", help="Path to the StegExpose reports.")
parser.add_argument("program", choices=programs)
parser.add_argument("-n", "--name", metavar="graph_name", help="Save the graph with the specified name (it will be a PNG).")
# Parse argumets.
args = parser.parse_args()
def get_data_from_report(report_file_path):
"""
Parse StegExpose report and return a list of (class, score).
E.g. [('p', 0.10), ('n', 0.05)]
"""
datas = []
with open(report_file_path, 'r') as report_file:
for line in report_file:
# Filter the lines without images.
if re.match(r'.*\.(png|jpg),', line):
# Get the important data.
pieces = line.split(sep=',')
# print(pieces)
image_name = pieces[0]
real_class = 'p' if re.match(r'.*_\d+p\.(png|jpg),', line) else 'n'
above_threshold = pieces[1] == 'true'
message_size = int(pieces[2])
primary_sets_score = float(pieces[3])
chi_square_score = float(pieces[4])
sample_pairs_score = float(pieces[5])
rs_analysis_score = float(pieces[6])
fusion_score = float(pieces[7])
# print(real_class, above_threshold, message_size, primary_sets_score, chi_square_score, sample_pairs_score, rs_analysis_score, fusion_score)
data = {'real_class': real_class, 'primary_sets_score': primary_sets_score, 'chi_square_score': chi_square_score, 'sample_pairs_score': sample_pairs_score, 'rs_analysis_score': rs_analysis_score, 'fusion_score': fusion_score}
datas.append(data)
return datas
def calculate_roc_points(instances):
"""From a sorted list of instances, calculate the points that draw the ROC curve."""
# Calculate the number of positives and negatives (the real ones).
P = N = 0
for label, score in instances:
# print(label, score)
if label == 'p':
P += 1
else:
N += 1
# Calculate each point.
TP = FP = 0
points = []
for label, score in instances:
if label == 'p':
TP += 1
else:
FP +=1
point = (FP/N, TP/P)
# print(point)
points.append(point)
return points
def calculate_discrete_classifier_point(instances, threshold):
"""
From a list of instances, calculate the coordinates for a discrete classifier
that uses the given threshold.
"""
TP = 0 # True positives
FP = 0 # False positives
P = 0 # Total positives
N = 0 # Total negatives
for label, score in instances:
if label == 'p':
P += 1
# Is it classified as positive?
if score >= threshold:
TP += 1
else:
N += 1
# Is it classified as positive? Even though it is not!
if score >= threshold:
FP += 1
tp_rate = TP / P
fp_rate = FP / N
return (fp_rate, tp_rate)
# Parse data out of StegExpose reports.
if args.program in png_programs:
clean_report_name = 'clean-png.csv'
else:
clean_report_name = 'clean-jpeg.csv'
program_report_name = args.program + '.csv'
program_data = get_data_from_report(os.path.join(args.reports_dir, program_report_name))
clean_data = get_data_from_report(os.path.join(args.reports_dir, clean_report_name))
# for i in clean_png_instances: print(i)
# for i in instances: print(i)
# Merge clean ones with dirty ones.
merged_data = program_data + clean_data
# for i in merged_data: print(i)
# Create tuples of instances for each steganalytic method
primary_sets_instances = list(map(lambda d: (d['real_class'], d['primary_sets_score']), merged_data))
chi_square_instances = list(map(lambda d: (d['real_class'], d['chi_square_score']), merged_data))
sample_pairs_instances = list(map(lambda d: (d['real_class'], d['sample_pairs_score']), merged_data))
rs_analysis_instances = list(map(lambda d: (d['real_class'], d['rs_analysis_score']), merged_data))
fusion_instances = list(map(lambda d: (d['real_class'], d['fusion_score']), merged_data))
# for i in primary_sets_instances: print(i)
# Sort the instances by their score.
primary_sets_instances.sort(key=lambda i: i[1], reverse=True)
chi_square_instances .sort(key=lambda i: i[1], reverse=True)
sample_pairs_instances.sort(key=lambda i: i[1], reverse=True)
rs_analysis_instances .sort(key=lambda i: i[1], reverse=True)
fusion_instances .sort(key=lambda i: i[1], reverse=True)
# Filter instances, removing those that contain 'nan' instead of a score.
filtered_primary_sets_instances = list(filter(lambda t: not math.isnan(t[1]), primary_sets_instances))
filtered_chi_square_instances = list(filter(lambda t: not math.isnan(t[1]), chi_square_instances))
filtered_sample_pairs_instances = list(filter(lambda t: not math.isnan(t[1]), sample_pairs_instances))
filtered_rs_analysis_instances = list(filter(lambda t: not math.isnan(t[1]), rs_analysis_instances))
filtered_fusion_instances = list(filter(lambda t: not math.isnan(t[1]), fusion_instances))
# for i in filtered_primary_sets_instances: print(i)
# Sort once again by their score.
filtered_primary_sets_instances.sort(key=lambda i: i[1], reverse=True)
filtered_chi_square_instances.sort(key=lambda i: i[1], reverse=True)
filtered_sample_pairs_instances.sort(key=lambda i: i[1], reverse=True)
filtered_rs_analysis_instances.sort(key=lambda i: i[1], reverse=True)
filtered_fusion_instances.sort(key=lambda i: i[1], reverse=True)
# for i in filtered_primary_sets_instances: print(i)
# Calculate points to plot.
primary_sets_points = calculate_roc_points(filtered_primary_sets_instances)
chi_square_points = calculate_roc_points(filtered_chi_square_instances)
sample_pairs_points = calculate_roc_points(filtered_sample_pairs_instances)
rs_analysis_points = calculate_roc_points(filtered_rs_analysis_instances)
fusion_points = calculate_roc_points(filtered_fusion_instances)
# for i in primary_sets_points: print(i)
# Plot all of them on a single graph.
# Create lists with x and y coordinates.
primary_sets_xs = list(map(lambda p: p[0], primary_sets_points))
primary_sets_ys = list(map(lambda p: p[1], primary_sets_points))
chi_square_xs = list(map(lambda p: p[0], chi_square_points))
chi_square_ys = list(map(lambda p: p[1], chi_square_points))
sample_pairs_xs = list(map(lambda p: p[0], sample_pairs_points))
sample_pairs_ys = list(map(lambda p: p[1], sample_pairs_points))
rs_analysis_xs = list(map(lambda p: p[0], rs_analysis_points))
rs_analysis_ys = list(map(lambda p: p[1], rs_analysis_points))
fusion_xs = list(map(lambda p: p[0], fusion_points))
fusion_ys = list(map(lambda p: p[1], fusion_points))
# These are the AUCs
primary_sets_auc = np.trapz(primary_sets_ys,primary_sets_xs)
chi_square_auc = np.trapz(chi_square_ys,chi_square_xs)
sample_pairs_auc = np.trapz(sample_pairs_ys,sample_pairs_xs)
rs_analysis_auc = np.trapz(rs_analysis_ys,rs_analysis_xs)
fusion_auc = np.trapz(fusion_ys,fusion_xs)
# Plot the ROC curves.
plt.plot(primary_sets_xs, primary_sets_ys, lw=2, color='Red', label='Primary Sets (AUC = %0.2f)' % primary_sets_auc)
plt.plot(chi_square_xs, chi_square_ys, lw=2, color='Yellow', label='Chi Square (AUC = %0.2f)' % chi_square_auc)
plt.plot(sample_pairs_xs, sample_pairs_ys, lw=2, color='Brown', label='Sample Pairs (AUC = %0.2f)' % sample_pairs_auc)
plt.plot(rs_analysis_xs, rs_analysis_ys, lw=2, color='Green', label='Rs Analysis (AUC = %0.2f)' % rs_analysis_auc)
plt.plot(fusion_xs, fusion_ys, lw=2, color='Blue', label='Fusion (AUC = %0.2f)' % fusion_auc)
# Plot the diagonal.
plt.plot([0,1], [0,1], color="black", ls='--', lw=0.5)
# Write title, labels, legends and all to the figure.
# plt.title(title)
plt.xlabel("Taxa de Positivos Falsos")
plt.ylabel("Taxa de Positivos Verdadeiros")
plt.legend(loc="lower right")
plt.axis([0, 1, 0, 1])
plt.grid(True)
# Save or show figure.
if args.name:
plt.savefig(args.name + '.png', bbox_inches='tight')
else:
plt.show()
| [
"numpy.trapz",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.join",
"re.match",
"math.isnan",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.legend",
"matplotlib.pyplo... | [((373, 513), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create ROC curves for each steganalitic method, targeting a specific steganography program."""'}), "(description=\n 'Create ROC curves for each steganalitic method, targeting a specific steganography program.'\n )\n", (396, 513), False, 'import argparse\n'), ((7314, 7356), 'numpy.trapz', 'np.trapz', (['primary_sets_ys', 'primary_sets_xs'], {}), '(primary_sets_ys, primary_sets_xs)\n', (7322, 7356), True, 'import numpy as np\n'), ((7373, 7411), 'numpy.trapz', 'np.trapz', (['chi_square_ys', 'chi_square_xs'], {}), '(chi_square_ys, chi_square_xs)\n', (7381, 7411), True, 'import numpy as np\n'), ((7430, 7472), 'numpy.trapz', 'np.trapz', (['sample_pairs_ys', 'sample_pairs_xs'], {}), '(sample_pairs_ys, sample_pairs_xs)\n', (7438, 7472), True, 'import numpy as np\n'), ((7490, 7530), 'numpy.trapz', 'np.trapz', (['rs_analysis_ys', 'rs_analysis_xs'], {}), '(rs_analysis_ys, rs_analysis_xs)\n', (7498, 7530), True, 'import numpy as np\n'), ((7543, 7573), 'numpy.trapz', 'np.trapz', (['fusion_ys', 'fusion_xs'], {}), '(fusion_ys, fusion_xs)\n', (7551, 7573), True, 'import numpy as np\n'), ((7597, 7718), 'matplotlib.pyplot.plot', 'plt.plot', (['primary_sets_xs', 'primary_sets_ys'], {'lw': '(2)', 'color': '"""Red"""', 'label': "('Primary Sets (AUC = %0.2f)' % primary_sets_auc)"}), "(primary_sets_xs, primary_sets_ys, lw=2, color='Red', label=\n 'Primary Sets (AUC = %0.2f)' % primary_sets_auc)\n", (7605, 7718), True, 'import matplotlib.pyplot as plt\n'), ((7714, 7830), 'matplotlib.pyplot.plot', 'plt.plot', (['chi_square_xs', 'chi_square_ys'], {'lw': '(2)', 'color': '"""Yellow"""', 'label': "('Chi Square (AUC = %0.2f)' % chi_square_auc)"}), "(chi_square_xs, chi_square_ys, lw=2, color='Yellow', label=\n 'Chi Square (AUC = %0.2f)' % chi_square_auc)\n", (7722, 7830), True, 'import matplotlib.pyplot as plt\n'), ((7830, 7953), 'matplotlib.pyplot.plot', 'plt.plot', (['sample_pairs_xs', 'sample_pairs_ys'], {'lw': '(2)', 'color': '"""Brown"""', 'label': "('Sample Pairs (AUC = %0.2f)' % sample_pairs_auc)"}), "(sample_pairs_xs, sample_pairs_ys, lw=2, color='Brown', label=\n 'Sample Pairs (AUC = %0.2f)' % sample_pairs_auc)\n", (7838, 7953), True, 'import matplotlib.pyplot as plt\n'), ((7949, 8068), 'matplotlib.pyplot.plot', 'plt.plot', (['rs_analysis_xs', 'rs_analysis_ys'], {'lw': '(2)', 'color': '"""Green"""', 'label': "('Rs Analysis (AUC = %0.2f)' % rs_analysis_auc)"}), "(rs_analysis_xs, rs_analysis_ys, lw=2, color='Green', label=\n 'Rs Analysis (AUC = %0.2f)' % rs_analysis_auc)\n", (7957, 8068), True, 'import matplotlib.pyplot as plt\n'), ((8066, 8164), 'matplotlib.pyplot.plot', 'plt.plot', (['fusion_xs', 'fusion_ys'], {'lw': '(2)', 'color': '"""Blue"""', 'label': "('Fusion (AUC = %0.2f)' % fusion_auc)"}), "(fusion_xs, fusion_ys, lw=2, color='Blue', label=\n 'Fusion (AUC = %0.2f)' % fusion_auc)\n", (8074, 8164), True, 'import matplotlib.pyplot as plt\n'), ((8194, 8250), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""black"""', 'ls': '"""--"""', 'lw': '(0.5)'}), "([0, 1], [0, 1], color='black', ls='--', lw=0.5)\n", (8202, 8250), True, 'import matplotlib.pyplot as plt\n'), ((8323, 8361), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Taxa de Positivos Falsos"""'], {}), "('Taxa de Positivos Falsos')\n", (8333, 8361), True, 'import matplotlib.pyplot as plt\n'), ((8362, 8405), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Taxa de Positivos Verdadeiros"""'], {}), "('Taxa de Positivos Verdadeiros')\n", (8372, 8405), True, 'import matplotlib.pyplot as plt\n'), ((8406, 8435), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (8416, 8435), True, 'import matplotlib.pyplot as plt\n'), ((8436, 8458), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (8444, 8458), True, 'import matplotlib.pyplot as plt\n'), ((8459, 8473), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8467, 8473), True, 'import matplotlib.pyplot as plt\n'), ((3813, 3864), 'os.path.join', 'os.path.join', (['args.reports_dir', 'program_report_name'], {}), '(args.reports_dir, program_report_name)\n', (3825, 3864), False, 'import os\n'), ((3900, 3949), 'os.path.join', 'os.path.join', (['args.reports_dir', 'clean_report_name'], {}), '(args.reports_dir, clean_report_name)\n', (3912, 3949), False, 'import os\n'), ((8516, 8568), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.name + '.png')"], {'bbox_inches': '"""tight"""'}), "(args.name + '.png', bbox_inches='tight')\n", (8527, 8568), True, 'import matplotlib.pyplot as plt\n'), ((8579, 8589), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8587, 8589), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1158), 're.match', 're.match', (['""".*\\\\.(png|jpg),"""', 'line'], {}), "('.*\\\\.(png|jpg),', line)\n", (1133, 1158), False, 'import re\n'), ((5212, 5228), 'math.isnan', 'math.isnan', (['t[1]'], {}), '(t[1])\n', (5222, 5228), False, 'import math\n'), ((5315, 5331), 'math.isnan', 'math.isnan', (['t[1]'], {}), '(t[1])\n', (5325, 5331), False, 'import math\n'), ((5416, 5432), 'math.isnan', 'math.isnan', (['t[1]'], {}), '(t[1])\n', (5426, 5432), False, 'import math\n'), ((5519, 5535), 'math.isnan', 'math.isnan', (['t[1]'], {}), '(t[1])\n', (5529, 5535), False, 'import math\n'), ((5621, 5637), 'math.isnan', 'math.isnan', (['t[1]'], {}), '(t[1])\n', (5631, 5637), False, 'import math\n'), ((1354, 1393), 're.match', 're.match', (['""".*_\\\\d+p\\\\.(png|jpg),"""', 'line'], {}), "('.*_\\\\d+p\\\\.(png|jpg),', line)\n", (1362, 1393), False, 'import re\n')] |
# 2048 game logic
import math
from enum import IntFlag, Enum
import numpy as np
from numpy import random
class Game2048:
class MoveResult(IntFlag):
VICTORY = 0b0001
DEFEAT = 0b0010
FIELD_UPDATE = 0b0100
NOTHING = 0b1000
GAME_CONTINUES = FIELD_UPDATE | NOTHING
class Move(Enum):
UP = (-1, 0)
DOWN = (1, 0)
LEFT = (0, -1)
RIGHT = (0, 1)
FIELD_SIZE = 4
GAME_GOAL = 2048
_GAME_GOAL_LOG = math.log2(GAME_GOAL)
_NATURAL_GENERATION_NUMBERS_LOG = np.array([1, 2]) # 2, 4
_NATURAL_GENERATION_NUMBERS_WEIGHTS = np.array([0.9, 0.1])
# not exact numbers but log2 of them are stored; for empty cell, 0 is stored
_game_field: np.ndarray
_number_of_moves: int = 0
_score: int = 0
_max_value_log: int = 0
def __init__(self):
"""Generate initial game field state"""
self._generate_field()
def make_move(self, move: Move) -> MoveResult:
move_vector = move.value
x_range: np.ndarray
if move_vector[0] != 1:
x_range = np.arange(0, self.FIELD_SIZE)
else:
x_range = np.arange(self.FIELD_SIZE - 1, -1, -1)
y_range: np.ndarray
if move_vector[1] != 1:
y_range = np.arange(0, self.FIELD_SIZE)
else:
y_range = np.arange(self.FIELD_SIZE - 1, -1, -1)
collide_table = np.full(self._game_field.shape, False)
result = self.MoveResult.NOTHING
zeroes_count = 0
for x in x_range:
for y in y_range:
point = np.array([x, y])
new_point = point
point_value = self._point(point)
if self._is_empty(point):
zeroes_count += 1
continue
while self._is_valid_point(new_point := new_point + move_vector) and self._is_empty(new_point):
pass
if self._is_valid_point(new_point) and not collide_table[new_point[0], new_point[1]] \
and self._point(new_point) == point_value:
new_point_value = point_value + 1
if new_point_value > self._max_value_log:
self._max_value_log = new_point_value
self._score += 2**new_point_value
self._set_point(new_point, new_point_value)
self._set_point(point, 0)
collide_table[new_point[0], new_point[1]] = True
zeroes_count += 1
if new_point_value == self._GAME_GOAL_LOG:
result = self.MoveResult.VICTORY
else:
result = self.MoveResult.FIELD_UPDATE
else:
new_point -= move_vector
if self._is_empty(new_point):
self._set_point(new_point, point_value)
self._set_point(point, 0)
result = self.MoveResult.FIELD_UPDATE
if zeroes_count == 0 and result != self.MoveResult.NOTHING:
self._number_of_moves += 1
return self.MoveResult.DEFEAT
if result != self.MoveResult.NOTHING:
self._generate_random_number()
self._number_of_moves += 1
if zeroes_count < 2 and not self._can_make_move():
return self.MoveResult.DEFEAT
return result
def get_game_field(self) -> np.ndarray:
return np.vectorize(lambda x: 0 if x == 0 else 2**x)(self._game_field)
def get_number_of_moves(self) -> int:
"""N.B.: moves that do not change the field are npt counted"""
return self._number_of_moves
def get_score(self) -> int:
return self._score
def get_max_value(self) -> int:
return 2 ** self._max_value_log
def _can_make_move(self) -> bool:
for x in range(self.FIELD_SIZE):
for y in range(self.FIELD_SIZE):
point_value = self._game_field[x][y]
if point_value == 0:
return True
if x + 1 < self.FIELD_SIZE and self._game_field[x+1][y] in [point_value, 0]:
return True
if y + 1 < self.FIELD_SIZE and self._game_field[x][y+1] in [point_value, 0]:
return True
return False
def _is_valid_point(self, point: np.ndarray) -> bool:
return 0 <= point[0] < self.FIELD_SIZE and 0 <= point[1] < self.FIELD_SIZE
def _is_empty(self, point: np.ndarray) -> bool:
return self._point(point) == 0
def _point(self, point: np.ndarray) -> int:
return self._game_field[point[0]][point[1]]
def _set_point(self, point: np.ndarray, value: int) -> None:
self._game_field[point[0]][point[1]] = value
def _generate_field(self):
self._game_field = np.zeros((self.FIELD_SIZE, self.FIELD_SIZE), np.int8)
self._generate_random_number()
self._generate_random_number()
def _choose_natural_gen_number(self):
return random.choice(self._NATURAL_GENERATION_NUMBERS_LOG, p=self._NATURAL_GENERATION_NUMBERS_WEIGHTS)
def _choose_random_empty_cell(self) -> np.ndarray:
count = np.count_nonzero(self._game_field == 0)
point_number = random.randint(count) + 1
zero_count = 0
for x in range(self.FIELD_SIZE):
for y in range(self.FIELD_SIZE):
if self._game_field[x][y] == 0:
zero_count += 1
if zero_count == point_number:
return np.array([x, y])
def _generate_random_number(self):
num = self._choose_natural_gen_number()
if num > self._max_value_log:
self._max_value_log = num
point = self._choose_random_empty_cell()
self._set_point(point, num)
| [
"numpy.random.choice",
"math.log2",
"numpy.count_nonzero",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"numpy.full",
"numpy.vectorize",
"numpy.arange"
] | [((482, 502), 'math.log2', 'math.log2', (['GAME_GOAL'], {}), '(GAME_GOAL)\n', (491, 502), False, 'import math\n'), ((542, 558), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (550, 558), True, 'import numpy as np\n'), ((609, 629), 'numpy.array', 'np.array', (['[0.9, 0.1]'], {}), '([0.9, 0.1])\n', (617, 629), True, 'import numpy as np\n'), ((1408, 1446), 'numpy.full', 'np.full', (['self._game_field.shape', '(False)'], {}), '(self._game_field.shape, False)\n', (1415, 1446), True, 'import numpy as np\n'), ((4908, 4961), 'numpy.zeros', 'np.zeros', (['(self.FIELD_SIZE, self.FIELD_SIZE)', 'np.int8'], {}), '((self.FIELD_SIZE, self.FIELD_SIZE), np.int8)\n', (4916, 4961), True, 'import numpy as np\n'), ((5099, 5199), 'numpy.random.choice', 'random.choice', (['self._NATURAL_GENERATION_NUMBERS_LOG'], {'p': 'self._NATURAL_GENERATION_NUMBERS_WEIGHTS'}), '(self._NATURAL_GENERATION_NUMBERS_LOG, p=self.\n _NATURAL_GENERATION_NUMBERS_WEIGHTS)\n', (5112, 5199), False, 'from numpy import random\n'), ((5267, 5306), 'numpy.count_nonzero', 'np.count_nonzero', (['(self._game_field == 0)'], {}), '(self._game_field == 0)\n', (5283, 5306), True, 'import numpy as np\n'), ((1090, 1119), 'numpy.arange', 'np.arange', (['(0)', 'self.FIELD_SIZE'], {}), '(0, self.FIELD_SIZE)\n', (1099, 1119), True, 'import numpy as np\n'), ((1156, 1194), 'numpy.arange', 'np.arange', (['(self.FIELD_SIZE - 1)', '(-1)', '(-1)'], {}), '(self.FIELD_SIZE - 1, -1, -1)\n', (1165, 1194), True, 'import numpy as np\n'), ((1278, 1307), 'numpy.arange', 'np.arange', (['(0)', 'self.FIELD_SIZE'], {}), '(0, self.FIELD_SIZE)\n', (1287, 1307), True, 'import numpy as np\n'), ((1344, 1382), 'numpy.arange', 'np.arange', (['(self.FIELD_SIZE - 1)', '(-1)', '(-1)'], {}), '(self.FIELD_SIZE - 1, -1, -1)\n', (1353, 1382), True, 'import numpy as np\n'), ((3525, 3572), 'numpy.vectorize', 'np.vectorize', (['(lambda x: 0 if x == 0 else 2 ** x)'], {}), '(lambda x: 0 if x == 0 else 2 ** x)\n', (3537, 3572), True, 'import numpy as np\n'), ((5330, 5351), 'numpy.random.randint', 'random.randint', (['count'], {}), '(count)\n', (5344, 5351), False, 'from numpy import random\n'), ((1594, 1610), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (1602, 1610), True, 'import numpy as np\n'), ((5631, 5647), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (5639, 5647), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#"""
#Copyright [2020] [Indian Institute of Science, Bangalore & Tata Institute of Fundamental Research, Mumbai]
#SPDX-License-Identifier: Apache-2.0
#"""
__name__ = "Script for generating city files - instantiation of a synthetic city"
import os
import sys
import math
import argparse
import csv
import random
import json
import warnings
warnings.filterwarnings('ignore')
import geopandas as gpd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Point, MultiPolygon
from computeDistributions import *
# Default Gloabal Prameters
interactive = 0
default_miniPop = 100000
default_city="bangalore"
default_ibasepath = 'data/base/bangalore/'
default_obasepath = 'data/bangalore-100K/'
a_workplacesize = 3.26
c_workplacesize = 0.97
m_max_workplacesize = 2870
avgSchoolsize = 300
# Handling inputs and interactions
if interactive:
city = default_city
miniPop = default_miniPop
ibasepath = default_ibasepath
obasepath = default_obasepath
else:
my_parser = argparse.ArgumentParser(description='Create mini-city for COVID-19 simulation')
my_parser.add_argument('-c', help='target city', default=default_city)
my_parser.add_argument('-n', help='target population', default=default_miniPop)
my_parser.add_argument('-i', help='input folder', default=default_ibasepath)
my_parser.add_argument('-o', help='output folder', default=default_obasepath)
args = my_parser.parse_args()
city = (args.c).lower()
miniPop = int(args.n)
ibasepath = args.i
obasepath = args.o
if ibasepath[-1]!='/':
ibasepath = ibasepath+'/'
if obasepath[-1]!='/':
obasepath = obasepath+'/'
# Workplace commute parameters
if city == 'bangalore':
a_commuter_distance = 10.751
b_commuter_distance = 5.384
m_max_commuter_distance = 35
if city == 'mumbai':
a_commuter_distance = 4 #parameter in distribution for commuter distance - Thailand paper
b_commuter_distance = 3.8 #parameter in distribution for commuter distance - Thailand paper
m_max_commuter_distance = 60
# Create output directory if not present
if not os.path.exists(obasepath):
os.mkdir(obasepath)
# Prepare input file paths
citygeojsonfile = ibasepath+"city.geojson"
demographicsfile = ibasepath+"demographics.csv"
employmentfile = ibasepath+"employment.csv"
householdfile = ibasepath+"households.csv"
cityprofilefile = ibasepath+"cityProfile.json"
slumfracfile = ibasepath+"slumFraction.csv"
slumclusterfile = ibasepath+"slumClusters.geojson"
ODMatrixfile = ibasepath+"ODMatrix.csv"
individualsjson = obasepath+"individuals.json"
housesjson = obasepath+"houses.json"
workplacesjson = obasepath+"workplaces.json"
schoolsjson = obasepath+"schools.json"
wardCentreDistancejson = obasepath+"wardCentreDistance.json"
commonAreajson = obasepath+"commonArea.json"
fractionPopulationjson = obasepath+"fractionPopulation.json"
#fixing for now
slum_schoolsize_factor = 2
slum_householdsize_scalefactor = 2
print("Creating city with a population of approximately ",miniPop,flush=True)
print("")
print("Reading city.geojson to get ward polygons...",end='',flush=True)
geoDF = gpd.read_file(citygeojsonfile)
geoDF['wardNo'] = geoDF['wardNo'].astype(int)
geoDF['wardIndex'] = geoDF['wardNo'] - 1
geoDF = geoDF[['wardIndex','wardNo', 'wardName', 'geometry']]
geoDF['wardBounds'] = geoDF.apply(lambda row: MultiPolygon(row['geometry']).bounds, axis=1)
geoDF['wardCentre'] = geoDF.apply(lambda row: (MultiPolygon(row['geometry']).centroid.x, MultiPolygon(row['geometry']).centroid.y), axis=1)
geoDF["neighbors"] = geoDF.apply(lambda row: ", ".join([str(ward) for ward in geoDF[~geoDF.geometry.disjoint(row['geometry'])]['wardNo'].tolist()]) , axis=1)
print("done.",flush=True)
if os.path.exists(slumfracfile):
print(slumfracfile,"exists... processing slum data",flush=True)
slum_flag = 1
slum_fractions = []
with open(slumfracfile, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
if row[0]=='wardIndex':
continue
slum_fractions.append(float(row[2]))
if os.path.exists(slumclusterfile):
slumcluster_flag=1
print("Slum clustter file found. Parsing slum clusters...",end='',flush=True)
geoDFslums = gpd.read_file(slumclusterfile)
wardslums = [[] for _ in range(len(geoDF))]
for i in range(len(geoDFslums)):
for j in range(len(geoDF)):
if geoDFslums["geometry"][i].intersects(geoDF["geometry"][j]):
wardslums[j].append(i)
print("done.",flush=True)
else:
slumcluster_flag=0
print("Slum clustter file not found.",end='',flush=True)
else:
slum_flag=0
slumcluster_flag=0
print(slumfracfile,"does not exist... not processing slum data",flush=True)
print("Reading demographics, employment and household data (csv)...",end='',flush=True)
wardname = []
wardpop = []
wardarea = []
wardemployed = []
wardunemployed = []
wardworkforce = []
wardhouseholds = []
demographics = pd.read_csv(demographicsfile)
wardname = demographics['wardName'].values
wardpop = demographics['totalPopulation'].astype(int).values
wardarea = demographics['area(sq km)'].astype(float).values
households = pd.read_csv(householdfile)
wardhouseholds = households['Households'].astype(int).values
employment = pd.read_csv(employmentfile)
wardemployed = employment['Employed'].astype(int).values
wardunemployed = employment['Unemployment'].astype(int).values
wardworkforce = employment['totalWorkForce'].astype(int).values
print("done.",flush=True)
with open(cityprofilefile, newline='') as file:
cityprofiledata = json.load(file)
hbins = cityprofiledata['householdSize']['bins']
hweights = cityprofiledata['householdSize']['weights']
hweights[0]=hweights[0] + 1- sum(hweights)
def sampleHouseholdSize():
s = np.random.choice(hbins,1,p=hweights)[0]
if '+' in s:
n = int(s[:-1])
elif '-' in s:
(a,b) = s.split('-')
n = random.randint(int(a),int(b))
else:
n = int(s)
return n
agebins = cityprofiledata['age']['bins']
ageweights = cityprofiledata['age']['weights']
ageweights[0] = ageweights[0] + 1 - sum(ageweights)
def sampleAge():
s = np.random.choice(agebins,1,p=ageweights)[0]
if '+' in s:
n = int(s[:-1])
else:
(a,b) = s.split('-')
n = random.randint(int(a),int(b))
return n
def sampleRandomLatLong(wardIndex):
#I'm not sure why the order is longitude followed by latitude
(lon1,lat1,lon2,lat2) = geoDF['wardBounds'][wardIndex]
while True:
lat = random.uniform(lat1,lat2)
lon = random.uniform(lon1,lon2)
point = Point(lon,lat)
if MultiPolygon(geoDF['geometry'][wardIndex]).contains(point):
return (lat,lon)
def sampleRandomLatLong_s(wardIndex,slumbit):
#slumbit = 0 => get point in nonslum
#slumbit = 1 => get point in slum
if slumcluster_flag==0:
return sampleRandomLatLong(wardIndex)
#I'm not sure why the order is longitude followed by latitude
(lon1,lat1,lon2,lat2) = geoDF['wardBounds'][wardIndex]
if slumpoints_precomputed:
if slumbit==1:
if len(slumpoints[wardIndex])==0:
return sampleRandomLatLong(wardIndex)
i = random.randint(0,len(slumpoints[wardIndex])-1)
return slumpoints[wardIndex][i]
else:
#Just going to return a random point in the ward
return sampleRandomLatLong(wardIndex)
#If not precomputed, do rejection sampling
attempts = 0
while attempts<30:
attempts+=1
lat = random.uniform(lat1,lat2)
lon = random.uniform(lon1,lon2)
point = Point(lon,lat)
if MultiPolygon(geoDF['geometry'][wardIndex]).contains(point):
for i in wardslums[wardIndex]:
if geoDFslums["geometry"][i].contains(point):
if slumbit==1:
return (lat,lon)
else:
if slumbit==0:
return(lat,lon)
#Just sample a random point in the ward if unsuccessful
#print("Gave up on sampleRandomLatLong_s with ",wardIndex,slumflag)
return sampleRandomLatLong(wardIndex)
def distance(lat1, lon1, lat2, lon2):
radius = 6371 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
def getCommunityCenterDistance(lat,lon,wardIndex):
#I'm not sure why the order is longitude followed by latitude
(lonc,latc) = geoDF['wardCentre'][wardIndex]
return distance(lat,lon,latc,lonc)
# In[9]:
totalPop = sum(wardpop)
scale = miniPop/totalPop
nwards = len(wardname)
mwardpop = [int(a * scale) for a in wardpop]
mwardemployed = [int(a * scale) for a in wardunemployed]
mwardunemployed = [int(a * scale) for a in wardemployed]
mwardworkforce = [int(a * scale) for a in wardworkforce]
mwardhouseholds = [int(a * scale) for a in wardhouseholds]
if slum_flag:
mslumwardpop = [int(mwardpop[i] * slum_fractions[i]) for i in range(nwards)]
mnonslumwardpop = [mwardpop[i] - mslumwardpop[i] for i in range(len(wardpop))]
else:
mslumwardpop = [0]*nwards
mnonslumwardpop = mwardpop.copy()
print("Creating households for each ward...",end='',flush=True)
houses = []
hid = 0
for wardIndex in range(nwards):
wnonslumpop = mnonslumwardpop[wardIndex]
wslumpop = mslumwardpop[wardIndex]
currnonslumwpop = 0
currslumwpop = 0
while(currnonslumwpop < wnonslumpop):
h = {}
h["id"]=hid
h["wardIndex"]=wardIndex
if slum_flag:
h["slum"]=0
s = sampleHouseholdSize()
h["size"]=s
currnonslumwpop+=s
(lat,lon) = sampleRandomLatLong_s(wardIndex,0)
h["lat"] = lat
h["lon"] = lon
houses.append(h)
hid+=1
#if slum_flag=0, then wslumpop = 0
while(currslumwpop < wslumpop):
h = {}
h["id"]=hid
h["wardIndex"]=wardIndex
if slum_flag:
h["slum"]=1
s = int(sampleHouseholdSize() * slum_householdsize_scalefactor)
h["size"]=s
currslumwpop+=s
(lat,lon) = sampleRandomLatLong_s(wardIndex,1)
h["lat"] = lat
h["lon"] = lon
houses.append(h)
hid+=1
print("done.",flush=True)
homeworkmatrix = []
if os.path.exists(ODMatrixfile):
with open(ODMatrixfile, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
if row[0]=='wardNo':
continue
homeworkmatrix.append(list(map(lambda x: float(x),row[1:])))
else:
print(ODMatrixfile, "not found. Using uniform ODmatrix.",flush=True)
homeworkmatrix = [[(1/nwards) for _ in range(nwards)] for _ in range(nwards)]
for i in range(nwards):
homeworkmatrix[i][0] = homeworkmatrix[i][0] + 1 - sum(homeworkmatrix[i])
print("Creating individuals to populate the households...",end='',flush=True)
pid = 0
individuals = []
schoolers = [[] for _ in range(nwards)]
workers = [[] for _ in range(nwards)]
slum_schoolers = [[] for _ in range(nwards)]
nonslum_schoolers = [[] for _ in range(nwards)]
for h in houses:
s = h["size"]
for i in range(s):
p = {"id":pid}
p["household"]=h["id"]
wardIndex = h["wardIndex"]
p["wardIndex"]=wardIndex
p["wardNo"] = wardIndex+1
if slum_flag:
p["slum"] = h["slum"]
p["lat"] = h["lat"]
p["lon"] = h["lon"]
p["CommunityCentreDistance"] = getCommunityCenterDistance(h["lat"],h["lon"],wardIndex)
age = sampleAge()
p["age"] = age
#initialising most stuff to None
p["employed"]=None
p["workplace"]=None
p["workplaceType"]=0
p["school"]=None
if age<=15:
#decide about his/her school
p["employed"]=0
p["workplaceType"]=2 #this is school
#assuming they all go to school
#schoolers[wardIndex].append(pid)
if slum_flag ==1 and p["slum"]==1:
slum_schoolers[wardIndex].append(pid)
else:
nonslum_schoolers[wardIndex].append(pid)
elif age>=15 and age<65:
#decide about employment
eprob = wardemployed[wardIndex]/wardpop[wardIndex]
eprobadjusted = eprob/sum([ageweights[a] for a in range(3,13)])
if(random.uniform(0,1)<eprobadjusted):
#person is employed
p["employed"]=1
p["workplace"]="TODO"
workplaceward = int(np.random.choice(list(range(nwards)),1,p=homeworkmatrix[wardIndex])[0])
p["workplaceward"]=workplaceward
p["workplaceType"]=1
workers[workplaceward].append(pid)
else:
p["employed"]=0
p["workplaceType"]=0
else:
#decide about seniors
test = 0
p["employed"]=0
individuals.append(p)
pid+=1
print("done.",flush=True)
def workplaces_size_distribution(a=3.26, c=0.97, m_max=2870):
count=1
a=3.26
c=0.97
m_max=2870
p_nplus = np.arange(float(m_max))
for m in range(m_max):
p_nplus[m] = ((( (1+m_max/a)/(1+m/a))**c) -1) / (((1+m_max/a)**c) -1)
p_nminus = 1.0 - p_nplus
p_n = np.arange(float(m_max))
prev=0.0
for m in range(1, m_max):
p_n[m] = p_nminus[m] - prev
prev = p_nminus[m]
return p_n/sum(p_n)
def sampleWorkplaceSize():
wsdist = workplaces_size_distribution()
m_max = len(wsdist)
return int(np.random.choice(np.arange(m_max),1,p=wsdist)[0])
print("Assigning workplaces to people...",end='',flush=True)
#assigning workplaces to people who want work
workplaces = []
wid = 0
for wardIndex in range(nwards):
wworkers = len(workers[wardIndex])
while len(workers[wardIndex])>0:
w = {"id":wid}
(lat,lon) = sampleRandomLatLong(wardIndex)
w["lat"] = lat
w["lon"] = lon
w["wardIndex"]=wardIndex
s = sampleWorkplaceSize()
i = 0
while(i < s and len(workers[wardIndex])>0):
pid = workers[wardIndex].pop(random.randrange(len(workers[wardIndex])))
individuals[pid]["workplace"]=wid
del individuals[pid]["workplaceward"]
i+=1
workplaces.append(w)
wid+=1
print('done.',flush=True)
schoolsizebins = ["0-100", "100-200", "200-300", "300-400", "400-500", "500-600", "600-700", "700-800", "800-900"]
schoolsizebinweights = [0.0185, 0.1204, 0.2315, 0.2315, 0.1574, 0.0889, 0.063, 0.0481, 0.0408]
schoolsizebinweights[0]=schoolsizebinweights[0] -sum(schoolsizebinweights)+1
def sampleSchoolSize():
s = int(np.random.choice(list(range(len(schoolsizebinweights))),1,p=schoolsizebinweights)[0])
return (100*s + random.randint(0,99))
print("Assigning schools to people...",end='',flush=True)
#assigning school to people who want go to school
schools = []
sid = 0
if slum_flag:
for wardIndex in range(nwards):
wslum_schoolers = len(slum_schoolers[wardIndex])
while len(slum_schoolers[wardIndex])>0:
s = {"ID":sid} #capitalised in the previous code so keeping it so
s["wardIndex"]=wardIndex
(lat,lon) = sampleRandomLatLong_s(wardIndex,1)
s["lat"] = lat
s["lon"] = lon
s["slum"]=1
size = int(sampleSchoolSize()*slum_schoolsize_factor)
i = 0
while(i < size and len(slum_schoolers[wardIndex])>0):
pid = slum_schoolers[wardIndex].pop(random.randrange(len(slum_schoolers[wardIndex])))
individuals[pid]["school"]=sid
i+=1
schools.append(s)
sid+=1
for wardIndex in range(nwards):
wnonslum_schoolers = len(nonslum_schoolers[wardIndex])
while len(nonslum_schoolers[wardIndex])>0:
s = {"ID":sid}
s["wardIndex"]=wardIndex
(lat,lon) = sampleRandomLatLong_s(wardIndex,0)
s["lat"] = lat
s["lon"] = lon
if slum_flag:
s["slum"]=0
size = sampleSchoolSize()
i = 0
while(i < size and len(nonslum_schoolers[wardIndex])>0):
pid = nonslum_schoolers[wardIndex].pop(random.randrange(len(nonslum_schoolers[wardIndex])))
individuals[pid]["school"]=sid
i+=1
schools.append(s)
sid+=1
print("done.",flush=True)
# Stats of instantiated city
print("")
print("Created (mini)city")
print("Population:",len(individuals))
print("Households:",len(houses))
print("Schools:",len(schools))
print("Workplaces:",len(workplaces))
print("")
# Assigning Common Areas
commonAreas = []
for i in range(nwards):
c = {"ID":i}
c["wardNo"] = i+1
(lon,lat)= geoDF['wardCentre'][i]
c["lat"] = lat
c["lon"] = lon
commonAreas.append(c)
fractionPopulations = []
for i in range(nwards):
w = {"wardNo":i+1}
w["totalPopulation"] = int(wardpop[i])
w["fracPopulation"] = wardpop[i]/totalPop
fractionPopulations.append(w)
wardCentreDistances = [ {"ID":i+1} for i in range(nwards)]
for i in range(nwards):
for j in range(nwards):
d = distance(commonAreas[i]["lat"],commonAreas[i]["lon"],commonAreas[j]["lat"],commonAreas[j]["lon"])
wardCentreDistances[i][str(j+1)]=d
# Create dataframes for validation
df1 = pd.DataFrame(individuals)
del individuals
# Creating instantiated city files as JSONs
print("Dumping to json files...",end='',flush=True)
f = open(housesjson, "w+")
f.write(json.dumps(houses))
f.close
print("houses.json, ",end='',flush=True)
f = open(workplacesjson, "w+")
f.write(json.dumps(workplaces))
f.close
print("workplaces.json, ",end='',flush=True)
wp = pd.DataFrame(workplaces)
f = open(schoolsjson, "w+")
f.write(json.dumps(schools))
f.close
print("schools.json, ",end='',flush=True)
f = open(commonAreajson, "w+")
f.write(json.dumps(commonAreas))
f.close
print("commonArea.json, ",end='',flush=True)
f = open(fractionPopulationjson, "w+")
f.write(json.dumps(fractionPopulations))
f.close
print("fractionPopulation.json, ",end='',flush=True)
f = open(wardCentreDistancejson, "w+")
f.write(json.dumps(wardCentreDistances))
f.close
print("wardCentreDistance.json, ",end='',flush=True)
del wardCentreDistances, commonAreas, fractionPopulations, schools, houses, workplaces
df1.to_json(individualsjson, orient='records')
print("individuals.json ... done.",flush=True)
print('\nGenerating validation plots for the instantitaion...\n')
# Get distributions to match
age_values, age_distribution = compute_age_distribution(cityprofiledata['age']['weights'])
household_sizes, household_distribution = compute_household_size_distribution(cityprofiledata['householdSize']['bins'], cityprofiledata['householdSize']['weights'])
schoolsize_values, schoolsize_distribution = extrapolate_school_size_distribution(cityprofiledata['schoolsSize']['weights'],avgSchoolsize)
workplacesize_distribution = workplaces_size_distribution()
print("Validating age distribution in instantiation...",end='',flush=True)
plt.plot(df1['age'].value_counts(normalize=True).sort_index(ascending=True), 'r-o',label='Instantiation')
plt.plot(age_distribution, 'b-',label='Data')
plt.xlabel('Age')
plt.ylabel('Density')
plt.title('Distribution of age')
plt.grid(True)
plt.legend()
plt.xticks(np.arange(0,81,10), np.concatenate((age_values[np.arange(0,71,10)], ['80+'])) )
plt.savefig(obasepath+'/age.png')
plt.close()
print("done.",flush=True)
print("Validating household-size in instantiation...",end='',flush=True)
house = df1['household'].value_counts().values
unique_elements, counts_elements = np.unique(house, return_counts=True)
counts_elements = counts_elements / np.sum(counts_elements)
plt.plot(counts_elements, 'r-o', label='Instantiation')
plt.plot(household_distribution, 'b-', label='data')
plt.xlabel('Household-size')
plt.ylabel('Density')
plt.title('Distribution of household-size')
plt.grid(True)
plt.legend()
plt.xticks(np.arange(0,len(household_sizes),1), np.concatenate((age_values[np.arange(1,household_sizes[-1],1)], [str(household_sizes[-1])+'+'])) )
plt.savefig(obasepath+'/household_size.png')
plt.close()
print("done.",flush=True)
print("Validating school-size in instantiation...",end='',flush=True)
schoolsizeDistribution = cityprofiledata['schoolsSize']['weights']
full_frame = np.floor(np.array([len(np.where(df1['school'] == i)[0]) for i in np.unique(df1['school'].values)[~np.isnan(np.unique(df1['school'].values))]])/100).astype(int)
schoolsize_output = [len(np.where(full_frame == j)[0]) for j in np.arange(0,len(schoolsizeDistribution))] / np.sum([len(np.where(full_frame == j)[0]) for j in np.arange(0,len(schoolsizeDistribution))])
plt.plot(schoolsize_output,'r-o', label='Instantiation')
plt.plot(schoolsizeDistribution,'b-', label='Data')
xlabel = np.arange(0,len(schoolsizeDistribution))
plt.xticks(xlabel, np.concatenate((np.arange(1,10)*100, [str('901+')])))
plt.xlabel('School size')
plt.ylabel('Density')
plt.legend()
plt.title('Distribution of school size')
plt.grid(True)
plt.savefig(obasepath+'/school_size')
plt.close()
print("done.",flush=True)
# generate workplace size distribution
a=a_workplacesize
c=c_workplacesize
m_max=m_max_workplacesize
workplace_sizes = np.arange(m_max)
p_nplus = np.arange(float(m_max))
for m in range(m_max):
p_nplus[m] = ((( (1+m_max/a)/(1+m/a))**c) -1) / (((1+m_max/a)**c) -1)
p_nminus = 1.0 - p_nplus
p_n = np.arange(float(m_max))
prev=0.0
for m in range(1, m_max):
p_n[m] = p_nminus[m] - prev
prev = p_nminus[m]
# workplace size
print("Validating workplace-size in instantiation...",end='',flush=True)
full_frame = np.array([len(np.where(df1['workplace'] == i)[0]) for i in np.unique(df1['workplace'].values)[~np.isnan(np.unique(df1['workplace'].values))]])
workplacesize_output = [len(np.where(full_frame == j)[0]) for j in workplace_sizes] / np.sum([len(np.where(full_frame == j)[0]) for j in workplace_sizes])
workplace_distribution = p_n
plt.plot(np.log10(workplace_sizes),np.log10(workplacesize_output),'r',label='Instantiation')
plt.plot(np.log10(workplace_sizes), np.log10(workplace_distribution),label='Model')
plt.xlabel('Workplace size (log-scale)')
plt.ylabel('log_10 Density')
plt.title('Distribution of workplace size (in log-scale)')
plt.grid(True)
plt.legend()
plot_xlabel = [1, 10, 100, 1000, 2400]
plot_xlabel1 = np.log10(workplace_sizes)[plot_xlabel]
plt.xticks(plot_xlabel1, (workplace_sizes)[plot_xlabel])
plt.savefig(obasepath+'/workplace_size')
plt.close()
print("done.",flush=True)
print("Validating workplace commute distance in instantiation...",end='',flush=True)
full_frame = np.array([distance(df1.loc[i,'lat'],df1.loc[i,'lon'],wp.loc[wp.index==int(df1.loc[i,'workplace']),'lat'],wp.loc[wp.index==int(df1.loc[i,'workplace']),'lon']) for i in np.where(df1['workplaceType']==1)[0]])
commuter_distance_output = [len(np.where(np.array(np.floor(full_frame),dtype=int) ==i)[0]) for i in np.arange(0,m_max_commuter_distance)]/np.sum([len(np.where(np.array(np.floor(full_frame),dtype=int) ==i)[0]) for i in np.arange(0,m_max_commuter_distance)])
actual_dist=[]
actual_dist = travel_distance_distribution(0,m_max_commuter_distance,a_commuter_distance,b_commuter_distance)
d = np.arange(0,m_max_commuter_distance,1)
plt.plot(np.log10(d),np.log10(actual_dist),'b-',label='Model')
plt.plot(np.log10(d),np.log10((commuter_distance_output)),'r-o',label='Instantiation')
plt.xlabel('Workplace distance (km) (log-scale)')
plt.ylabel('log_10 Density')
plt.title('Distribution of workplace distances')
plot_xlabel=[1,5,25,31]
plot_xlabel1 = np.log10(d)[plot_xlabel]
plt.xticks(plot_xlabel1,d[plot_xlabel])
plt.grid(True)
plt.legend()
plt.savefig(obasepath+'/workplace_distance')
plt.close()
print("done.",flush=True)
| [
"matplotlib.pyplot.grid",
"numpy.log10",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"math.sqrt",
"shapely.geometry.Point",
"numpy.arange",
"os.path.exists",
"geopandas.read_file",
"argparse.ArgumentParser",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"json.dump... | [((388, 421), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (411, 421), False, 'import warnings\n'), ((3263, 3293), 'geopandas.read_file', 'gpd.read_file', (['citygeojsonfile'], {}), '(citygeojsonfile)\n', (3276, 3293), True, 'import geopandas as gpd\n'), ((3864, 3892), 'os.path.exists', 'os.path.exists', (['slumfracfile'], {}), '(slumfracfile)\n', (3878, 3892), False, 'import os\n'), ((5194, 5223), 'pandas.read_csv', 'pd.read_csv', (['demographicsfile'], {}), '(demographicsfile)\n', (5205, 5223), True, 'import pandas as pd\n'), ((5402, 5428), 'pandas.read_csv', 'pd.read_csv', (['householdfile'], {}), '(householdfile)\n', (5413, 5428), True, 'import pandas as pd\n'), ((5504, 5531), 'pandas.read_csv', 'pd.read_csv', (['employmentfile'], {}), '(employmentfile)\n', (5515, 5531), True, 'import pandas as pd\n'), ((10749, 10777), 'os.path.exists', 'os.path.exists', (['ODMatrixfile'], {}), '(ODMatrixfile)\n', (10763, 10777), False, 'import os\n'), ((17858, 17883), 'pandas.DataFrame', 'pd.DataFrame', (['individuals'], {}), '(individuals)\n', (17870, 17883), True, 'import pandas as pd\n'), ((18224, 18248), 'pandas.DataFrame', 'pd.DataFrame', (['workplaces'], {}), '(workplaces)\n', (18236, 18248), True, 'import pandas as pd\n'), ((19679, 19725), 'matplotlib.pyplot.plot', 'plt.plot', (['age_distribution', '"""b-"""'], {'label': '"""Data"""'}), "(age_distribution, 'b-', label='Data')\n", (19687, 19725), True, 'import matplotlib.pyplot as plt\n'), ((19725, 19742), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Age"""'], {}), "('Age')\n", (19735, 19742), True, 'import matplotlib.pyplot as plt\n'), ((19743, 19764), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (19753, 19764), True, 'import matplotlib.pyplot as plt\n'), ((19765, 19797), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of age"""'], {}), "('Distribution of age')\n", (19774, 19797), True, 'import matplotlib.pyplot as plt\n'), ((19798, 19812), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (19806, 19812), True, 'import matplotlib.pyplot as plt\n'), ((19813, 19825), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19823, 19825), True, 'import matplotlib.pyplot as plt\n'), ((19917, 19952), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(obasepath + '/age.png')"], {}), "(obasepath + '/age.png')\n", (19928, 19952), True, 'import matplotlib.pyplot as plt\n'), ((19951, 19962), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19960, 19962), True, 'import matplotlib.pyplot as plt\n'), ((20145, 20181), 'numpy.unique', 'np.unique', (['house'], {'return_counts': '(True)'}), '(house, return_counts=True)\n', (20154, 20181), True, 'import numpy as np\n'), ((20242, 20297), 'matplotlib.pyplot.plot', 'plt.plot', (['counts_elements', '"""r-o"""'], {'label': '"""Instantiation"""'}), "(counts_elements, 'r-o', label='Instantiation')\n", (20250, 20297), True, 'import matplotlib.pyplot as plt\n'), ((20298, 20350), 'matplotlib.pyplot.plot', 'plt.plot', (['household_distribution', '"""b-"""'], {'label': '"""data"""'}), "(household_distribution, 'b-', label='data')\n", (20306, 20350), True, 'import matplotlib.pyplot as plt\n'), ((20351, 20379), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Household-size"""'], {}), "('Household-size')\n", (20361, 20379), True, 'import matplotlib.pyplot as plt\n'), ((20380, 20401), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (20390, 20401), True, 'import matplotlib.pyplot as plt\n'), ((20402, 20445), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of household-size"""'], {}), "('Distribution of household-size')\n", (20411, 20445), True, 'import matplotlib.pyplot as plt\n'), ((20446, 20460), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (20454, 20460), True, 'import matplotlib.pyplot as plt\n'), ((20461, 20473), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20471, 20473), True, 'import matplotlib.pyplot as plt\n'), ((20621, 20667), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(obasepath + '/household_size.png')"], {}), "(obasepath + '/household_size.png')\n", (20632, 20667), True, 'import matplotlib.pyplot as plt\n'), ((20666, 20677), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20675, 20677), True, 'import matplotlib.pyplot as plt\n'), ((21217, 21274), 'matplotlib.pyplot.plot', 'plt.plot', (['schoolsize_output', '"""r-o"""'], {'label': '"""Instantiation"""'}), "(schoolsize_output, 'r-o', label='Instantiation')\n", (21225, 21274), True, 'import matplotlib.pyplot as plt\n'), ((21274, 21326), 'matplotlib.pyplot.plot', 'plt.plot', (['schoolsizeDistribution', '"""b-"""'], {'label': '"""Data"""'}), "(schoolsizeDistribution, 'b-', label='Data')\n", (21282, 21326), True, 'import matplotlib.pyplot as plt\n'), ((21449, 21474), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""School size"""'], {}), "('School size')\n", (21459, 21474), True, 'import matplotlib.pyplot as plt\n'), ((21475, 21496), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (21485, 21496), True, 'import matplotlib.pyplot as plt\n'), ((21497, 21509), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21507, 21509), True, 'import matplotlib.pyplot as plt\n'), ((21510, 21550), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of school size"""'], {}), "('Distribution of school size')\n", (21519, 21550), True, 'import matplotlib.pyplot as plt\n'), ((21551, 21565), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (21559, 21565), True, 'import matplotlib.pyplot as plt\n'), ((21566, 21605), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(obasepath + '/school_size')"], {}), "(obasepath + '/school_size')\n", (21577, 21605), True, 'import matplotlib.pyplot as plt\n'), ((21604, 21615), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (21613, 21615), True, 'import matplotlib.pyplot as plt\n'), ((21767, 21783), 'numpy.arange', 'np.arange', (['m_max'], {}), '(m_max)\n', (21776, 21783), True, 'import numpy as np\n'), ((22677, 22717), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Workplace size (log-scale)"""'], {}), "('Workplace size (log-scale)')\n", (22687, 22717), True, 'import matplotlib.pyplot as plt\n'), ((22718, 22746), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log_10 Density"""'], {}), "('log_10 Density')\n", (22728, 22746), True, 'import matplotlib.pyplot as plt\n'), ((22747, 22805), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of workplace size (in log-scale)"""'], {}), "('Distribution of workplace size (in log-scale)')\n", (22756, 22805), True, 'import matplotlib.pyplot as plt\n'), ((22806, 22820), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (22814, 22820), True, 'import matplotlib.pyplot as plt\n'), ((22821, 22833), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (22831, 22833), True, 'import matplotlib.pyplot as plt\n'), ((22928, 22982), 'matplotlib.pyplot.xticks', 'plt.xticks', (['plot_xlabel1', 'workplace_sizes[plot_xlabel]'], {}), '(plot_xlabel1, workplace_sizes[plot_xlabel])\n', (22938, 22982), True, 'import matplotlib.pyplot as plt\n'), ((22985, 23027), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(obasepath + '/workplace_size')"], {}), "(obasepath + '/workplace_size')\n", (22996, 23027), True, 'import matplotlib.pyplot as plt\n'), ((23026, 23037), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (23035, 23037), True, 'import matplotlib.pyplot as plt\n'), ((23764, 23804), 'numpy.arange', 'np.arange', (['(0)', 'm_max_commuter_distance', '(1)'], {}), '(0, m_max_commuter_distance, 1)\n', (23773, 23804), True, 'import numpy as np\n'), ((23953, 24002), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Workplace distance (km) (log-scale)"""'], {}), "('Workplace distance (km) (log-scale)')\n", (23963, 24002), True, 'import matplotlib.pyplot as plt\n'), ((24003, 24031), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log_10 Density"""'], {}), "('log_10 Density')\n", (24013, 24031), True, 'import matplotlib.pyplot as plt\n'), ((24032, 24080), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of workplace distances"""'], {}), "('Distribution of workplace distances')\n", (24041, 24080), True, 'import matplotlib.pyplot as plt\n'), ((24145, 24185), 'matplotlib.pyplot.xticks', 'plt.xticks', (['plot_xlabel1', 'd[plot_xlabel]'], {}), '(plot_xlabel1, d[plot_xlabel])\n', (24155, 24185), True, 'import matplotlib.pyplot as plt\n'), ((24185, 24199), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (24193, 24199), True, 'import matplotlib.pyplot as plt\n'), ((24200, 24212), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (24210, 24212), True, 'import matplotlib.pyplot as plt\n'), ((24213, 24259), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(obasepath + '/workplace_distance')"], {}), "(obasepath + '/workplace_distance')\n", (24224, 24259), True, 'import matplotlib.pyplot as plt\n'), ((24258, 24269), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24267, 24269), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1155), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create mini-city for COVID-19 simulation"""'}), "(description='Create mini-city for COVID-19 simulation')\n", (1099, 1155), False, 'import argparse\n'), ((2169, 2194), 'os.path.exists', 'os.path.exists', (['obasepath'], {}), '(obasepath)\n', (2183, 2194), False, 'import os\n'), ((2200, 2219), 'os.mkdir', 'os.mkdir', (['obasepath'], {}), '(obasepath)\n', (2208, 2219), False, 'import os\n'), ((4253, 4284), 'os.path.exists', 'os.path.exists', (['slumclusterfile'], {}), '(slumclusterfile)\n', (4267, 4284), False, 'import os\n'), ((5817, 5832), 'json.load', 'json.load', (['file'], {}), '(file)\n', (5826, 5832), False, 'import json\n'), ((8499, 8524), 'math.radians', 'math.radians', (['(lat2 - lat1)'], {}), '(lat2 - lat1)\n', (8511, 8524), False, 'import math\n'), ((8534, 8559), 'math.radians', 'math.radians', (['(lon2 - lon1)'], {}), '(lon2 - lon1)\n', (8546, 8559), False, 'import math\n'), ((18033, 18051), 'json.dumps', 'json.dumps', (['houses'], {}), '(houses)\n', (18043, 18051), False, 'import json\n'), ((18142, 18164), 'json.dumps', 'json.dumps', (['workplaces'], {}), '(workplaces)\n', (18152, 18164), False, 'import json\n'), ((18287, 18306), 'json.dumps', 'json.dumps', (['schools'], {}), '(schools)\n', (18297, 18306), False, 'import json\n'), ((18398, 18421), 'json.dumps', 'json.dumps', (['commonAreas'], {}), '(commonAreas)\n', (18408, 18421), False, 'import json\n'), ((18524, 18555), 'json.dumps', 'json.dumps', (['fractionPopulations'], {}), '(fractionPopulations)\n', (18534, 18555), False, 'import json\n'), ((18666, 18697), 'json.dumps', 'json.dumps', (['wardCentreDistances'], {}), '(wardCentreDistances)\n', (18676, 18697), False, 'import json\n'), ((19837, 19857), 'numpy.arange', 'np.arange', (['(0)', '(81)', '(10)'], {}), '(0, 81, 10)\n', (19846, 19857), True, 'import numpy as np\n'), ((20218, 20241), 'numpy.sum', 'np.sum', (['counts_elements'], {}), '(counts_elements)\n', (20224, 20241), True, 'import numpy as np\n'), ((22509, 22534), 'numpy.log10', 'np.log10', (['workplace_sizes'], {}), '(workplace_sizes)\n', (22517, 22534), True, 'import numpy as np\n'), ((22535, 22565), 'numpy.log10', 'np.log10', (['workplacesize_output'], {}), '(workplacesize_output)\n', (22543, 22565), True, 'import numpy as np\n'), ((22602, 22627), 'numpy.log10', 'np.log10', (['workplace_sizes'], {}), '(workplace_sizes)\n', (22610, 22627), True, 'import numpy as np\n'), ((22629, 22661), 'numpy.log10', 'np.log10', (['workplace_distribution'], {}), '(workplace_distribution)\n', (22637, 22661), True, 'import numpy as np\n'), ((22889, 22914), 'numpy.log10', 'np.log10', (['workplace_sizes'], {}), '(workplace_sizes)\n', (22897, 22914), True, 'import numpy as np\n'), ((23812, 23823), 'numpy.log10', 'np.log10', (['d'], {}), '(d)\n', (23820, 23823), True, 'import numpy as np\n'), ((23824, 23845), 'numpy.log10', 'np.log10', (['actual_dist'], {}), '(actual_dist)\n', (23832, 23845), True, 'import numpy as np\n'), ((23875, 23886), 'numpy.log10', 'np.log10', (['d'], {}), '(d)\n', (23883, 23886), True, 'import numpy as np\n'), ((23887, 23921), 'numpy.log10', 'np.log10', (['commuter_distance_output'], {}), '(commuter_distance_output)\n', (23895, 23921), True, 'import numpy as np\n'), ((24120, 24131), 'numpy.log10', 'np.log10', (['d'], {}), '(d)\n', (24128, 24131), True, 'import numpy as np\n'), ((4073, 4107), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (4083, 4107), False, 'import csv\n'), ((4420, 4450), 'geopandas.read_file', 'gpd.read_file', (['slumclusterfile'], {}), '(slumclusterfile)\n', (4433, 4450), True, 'import geopandas as gpd\n'), ((6016, 6054), 'numpy.random.choice', 'np.random.choice', (['hbins', '(1)'], {'p': 'hweights'}), '(hbins, 1, p=hweights)\n', (6032, 6054), True, 'import numpy as np\n'), ((6398, 6440), 'numpy.random.choice', 'np.random.choice', (['agebins', '(1)'], {'p': 'ageweights'}), '(agebins, 1, p=ageweights)\n', (6414, 6440), True, 'import numpy as np\n'), ((6771, 6797), 'random.uniform', 'random.uniform', (['lat1', 'lat2'], {}), '(lat1, lat2)\n', (6785, 6797), False, 'import random\n'), ((6811, 6837), 'random.uniform', 'random.uniform', (['lon1', 'lon2'], {}), '(lon1, lon2)\n', (6825, 6837), False, 'import random\n'), ((6853, 6868), 'shapely.geometry.Point', 'Point', (['lon', 'lat'], {}), '(lon, lat)\n', (6858, 6868), False, 'from shapely.geometry import Point, MultiPolygon\n'), ((7804, 7830), 'random.uniform', 'random.uniform', (['lat1', 'lat2'], {}), '(lat1, lat2)\n', (7818, 7830), False, 'import random\n'), ((7844, 7870), 'random.uniform', 'random.uniform', (['lon1', 'lon2'], {}), '(lon1, lon2)\n', (7858, 7870), False, 'import random\n'), ((7886, 7901), 'shapely.geometry.Point', 'Point', (['lon', 'lat'], {}), '(lon, lat)\n', (7891, 7901), False, 'from shapely.geometry import Point, MultiPolygon\n'), ((10848, 10882), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (10858, 10882), False, 'import csv\n'), ((15293, 15314), 'random.randint', 'random.randint', (['(0)', '(99)'], {}), '(0, 99)\n', (15307, 15314), False, 'import random\n'), ((3489, 3518), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (["row['geometry']"], {}), "(row['geometry'])\n", (3501, 3518), False, 'from shapely.geometry import Point, MultiPolygon\n'), ((8566, 8584), 'math.sin', 'math.sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (8574, 8584), False, 'import math\n'), ((8585, 8603), 'math.sin', 'math.sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (8593, 8603), False, 'import math\n'), ((8693, 8711), 'math.sin', 'math.sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (8701, 8711), False, 'import math\n'), ((8733, 8745), 'math.sqrt', 'math.sqrt', (['a'], {}), '(a)\n', (8742, 8745), False, 'import math\n'), ((8747, 8763), 'math.sqrt', 'math.sqrt', (['(1 - a)'], {}), '(1 - a)\n', (8756, 8763), False, 'import math\n'), ((23478, 23515), 'numpy.arange', 'np.arange', (['(0)', 'm_max_commuter_distance'], {}), '(0, m_max_commuter_distance)\n', (23487, 23515), True, 'import numpy as np\n'), ((6879, 6921), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (["geoDF['geometry'][wardIndex]"], {}), "(geoDF['geometry'][wardIndex])\n", (6891, 6921), False, 'from shapely.geometry import Point, MultiPolygon\n'), ((7912, 7954), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (["geoDF['geometry'][wardIndex]"], {}), "(geoDF['geometry'][wardIndex])\n", (7924, 7954), False, 'from shapely.geometry import Point, MultiPolygon\n'), ((8674, 8692), 'math.sin', 'math.sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (8682, 8692), False, 'import math\n'), ((14063, 14079), 'numpy.arange', 'np.arange', (['m_max'], {}), '(m_max)\n', (14072, 14079), True, 'import numpy as np\n'), ((19884, 19904), 'numpy.arange', 'np.arange', (['(0)', '(71)', '(10)'], {}), '(0, 71, 10)\n', (19893, 19904), True, 'import numpy as np\n'), ((20549, 20585), 'numpy.arange', 'np.arange', (['(1)', 'household_sizes[-1]', '(1)'], {}), '(1, household_sizes[-1], 1)\n', (20558, 20585), True, 'import numpy as np\n'), ((21040, 21065), 'numpy.where', 'np.where', (['(full_frame == j)'], {}), '(full_frame == j)\n', (21048, 21065), True, 'import numpy as np\n'), ((21411, 21427), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (21420, 21427), True, 'import numpy as np\n'), ((22183, 22214), 'numpy.where', 'np.where', (["(df1['workplace'] == i)"], {}), "(df1['workplace'] == i)\n", (22191, 22214), True, 'import numpy as np\n'), ((22228, 22262), 'numpy.unique', 'np.unique', (["df1['workplace'].values"], {}), "(df1['workplace'].values)\n", (22237, 22262), True, 'import numpy as np\n'), ((22340, 22365), 'numpy.where', 'np.where', (['(full_frame == j)'], {}), '(full_frame == j)\n', (22348, 22365), True, 'import numpy as np\n'), ((23331, 23366), 'numpy.where', 'np.where', (["(df1['workplaceType'] == 1)"], {}), "(df1['workplaceType'] == 1)\n", (23339, 23366), True, 'import numpy as np\n'), ((23596, 23633), 'numpy.arange', 'np.arange', (['(0)', 'm_max_commuter_distance'], {}), '(0, m_max_commuter_distance)\n', (23605, 23633), True, 'import numpy as np\n'), ((3582, 3611), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (["row['geometry']"], {}), "(row['geometry'])\n", (3594, 3611), False, 'from shapely.geometry import Point, MultiPolygon\n'), ((3624, 3653), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (["row['geometry']"], {}), "(row['geometry'])\n", (3636, 3653), False, 'from shapely.geometry import Point, MultiPolygon\n'), ((12838, 12858), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (12852, 12858), False, 'import random\n'), ((21135, 21160), 'numpy.where', 'np.where', (['(full_frame == j)'], {}), '(full_frame == j)\n', (21143, 21160), True, 'import numpy as np\n'), ((22410, 22435), 'numpy.where', 'np.where', (['(full_frame == j)'], {}), '(full_frame == j)\n', (22418, 22435), True, 'import numpy as np\n'), ((8613, 8631), 'math.radians', 'math.radians', (['lat1'], {}), '(lat1)\n', (8625, 8631), False, 'import math\n'), ((8652, 8670), 'math.radians', 'math.radians', (['lat2'], {}), '(lat2)\n', (8664, 8670), False, 'import math\n'), ((22273, 22307), 'numpy.unique', 'np.unique', (["df1['workplace'].values"], {}), "(df1['workplace'].values)\n", (22282, 22307), True, 'import numpy as np\n'), ((23428, 23448), 'numpy.floor', 'np.floor', (['full_frame'], {}), '(full_frame)\n', (23436, 23448), True, 'import numpy as np\n'), ((20878, 20906), 'numpy.where', 'np.where', (["(df1['school'] == i)"], {}), "(df1['school'] == i)\n", (20886, 20906), True, 'import numpy as np\n'), ((20920, 20951), 'numpy.unique', 'np.unique', (["df1['school'].values"], {}), "(df1['school'].values)\n", (20929, 20951), True, 'import numpy as np\n'), ((23546, 23566), 'numpy.floor', 'np.floor', (['full_frame'], {}), '(full_frame)\n', (23554, 23566), True, 'import numpy as np\n'), ((20962, 20993), 'numpy.unique', 'np.unique', (["df1['school'].values"], {}), "(df1['school'].values)\n", (20971, 20993), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import utils
class LogisticRegression(object):
def __init__(self, n_feature, lr, beta=1):
self.n_feature = n_feature
self.W = np.random.normal(0, 0.01, (self.n_feature, 1))
self.lr = lr
self.trainloss = []
self.validloss = []
self.snapshot = []
self.trainF1 = []
self.validF1 = []
self.beta = beta # 用于调整 NLL 中两项的相对权重,默认值为 1
def sigmoid(self, Z):
return 1 / (np.exp(-Z) + 1)
def predict(self, X):
return self.sigmoid(np.matmul(X, self.W))
def get_loss(self, X, y):
ys = self.predict(X)
# NLL
loss = np.mean(- self.beta * y * np.log(ys) - (1 - y)*np.log(1 - ys))
return loss
def get_grad(self, X, y):
ys = self.predict(X)
# 任务1:先计算预测值和实际值的误差,再计算梯度
pass
# return grad
def check_grad(self, X, y):
grad = self.get_grad(X, y)
numeric_grad = np.zeros_like(grad)
origin_W = self.W.copy()
c = 1e-4
for i in range(numeric_grad.size):
eps = np.zeros(numeric_grad.shape)
eps[i] = c
self.W = origin_W + eps
Jp = self.get_loss(X, y)
self.W = origin_W - eps
Jn = self.get_loss(X, y)
numeric_grad[i] = (Jp - Jn) / 2 / c
return np.sqrt(((grad - numeric_grad) ** 2).sum())
def update(self, X, y):
grad = self.get_grad(X, y)
# gradient descent
self.W = self.W - self.lr * grad
loss = self.get_loss(X, y)
self.trainloss.append(loss)
self.trainF1.append(self.measure(X, y)) # 记录 F1 score
self.snapshot.append(self.W.copy())
def evaluate(self, X, y):
loss = self.get_loss(X, y)
self.validloss.append(loss)
self.validF1.append(self.measure(X, y))
def measure(self, X, y, threshold=0.5):
y_hat = self.predict(X)
TP, FP, FN, TN = utils.confusion_matrix(threshold, y_hat, y)
precision = float(TP) / (TP + FP)
recall = float(TP) / (TP + FN)
F1 = 2 * precision * recall / (precision + recall)
return F1
| [
"numpy.random.normal",
"numpy.log",
"numpy.exp",
"numpy.zeros",
"numpy.matmul",
"utils.confusion_matrix",
"numpy.zeros_like"
] | [((195, 241), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.01)', '(self.n_feature, 1)'], {}), '(0, 0.01, (self.n_feature, 1))\n', (211, 241), True, 'import numpy as np\n'), ((1055, 1074), 'numpy.zeros_like', 'np.zeros_like', (['grad'], {}), '(grad)\n', (1068, 1074), True, 'import numpy as np\n'), ((2104, 2147), 'utils.confusion_matrix', 'utils.confusion_matrix', (['threshold', 'y_hat', 'y'], {}), '(threshold, y_hat, y)\n', (2126, 2147), False, 'import utils\n'), ((586, 606), 'numpy.matmul', 'np.matmul', (['X', 'self.W'], {}), '(X, self.W)\n', (595, 606), True, 'import numpy as np\n'), ((1186, 1214), 'numpy.zeros', 'np.zeros', (['numeric_grad.shape'], {}), '(numeric_grad.shape)\n', (1194, 1214), True, 'import numpy as np\n'), ((498, 508), 'numpy.exp', 'np.exp', (['(-Z)'], {}), '(-Z)\n', (504, 508), True, 'import numpy as np\n'), ((740, 750), 'numpy.log', 'np.log', (['ys'], {}), '(ys)\n', (746, 750), True, 'import numpy as np\n'), ((761, 775), 'numpy.log', 'np.log', (['(1 - ys)'], {}), '(1 - ys)\n', (767, 775), True, 'import numpy as np\n')] |
from MovieRecommender import train_test_model
import pandas as pd
import numpy as np
import sys
from scipy.sparse import csr_matrix, load_npz
import pickle
from tabulate import tabulate
def get_movies_rated(data, user_id, train_data, movies):
data_matrix = data.loc[data.rating != 0]
users = list(np.sort(data_matrix.user_id.unique())) # Get unique users
items = list(np.sort(data_matrix.item_id.unique())) # Get unique movies
users_arr = np.array(users) # Array of user IDs from the ratings matrix
items_arr = np.array(items) # Array of movie IDs from the ratings matrix
# Returns index row of user id
user_ind = np.where(users_arr == user_id)[0][0]
# Get column indices of rated items
rating_ind = train_data[user_ind, :].nonzero()[1]
movie_codes = items_arr[rating_ind] # Get the movie ids for rated items
return movies.loc[movies['item_id'].isin(movie_codes),
'name'].reset_index(drop=True)
def predict_ratings(predictions, item_vecs, user_id):
item_vecs = predictions[1]
user_vec = predictions[0][user_id, :]
pred = user_vec.dot(item_vecs).toarray()[0].reshape(-1)
return pred
def similar_items(movies, model, movie_list, n_similar=20):
# Use implicit to get similar items.
movies.name = movies.name.str.strip()
item_id = movies.item_id.loc[movies.name.str.lower().
isin([s.lower() for s in movie_list])].iloc[0]
movie_names = []
similar = model.similar_items(item_id, n_similar)
# Print the names of similar movies
for item in similar:
idx, rating = item
movie_names.append(movies.name.loc[movies.item_id == idx+1].iloc[0])
similar = pd.DataFrame({"Similar Movies": movie_names[1:]})
return similar
def recommendations(data, train_data, movies, model,
sparse_user_item, user_id):
# Use the implicit recommender.
recommended = model.recommend(user_id, sparse_user_item)
movies_recom = []
# ratings_recom = []
# Get movie names from ids
for item in recommended:
idx, rating = item
movies_recom.append((movies.name.loc[movies.item_id == idx+1].iloc[0]))
# ratings_recom.append(rating)
# Create a dataframe of movie names and scores
# recommendations = pd.DataFrame({'Movies': movies_recom,
# 'Rating': ratings_recom})
movies_rated_by_users = get_movies_rated(data, user_id, train_data, movies)
minlen = min(len(movies_recom), len(movies_rated_by_users))
recommendations = pd.DataFrame({'Recommended Movies':
movies_recom[:minlen],
'Movies Rated':
movies_rated_by_users[:minlen]})
return recommendations
def main():
train_test_model.main()
movies = pd.read_pickle("./output/movies.pkl")
sparse_user_item = load_npz("./output/sparse_user_item.npz")
item_vecs = np.load('./output/item_vecs.npy')
user_vecs = np.load('./output/user_vecs.npy')
data = pd.read_pickle("./output/ratings.pkl")
with open('./output/als_model', 'rb') as file:
als_model = pickle.load(file)
with open('./output/train_data', 'rb') as train_file:
train_data = pickle.load(train_file)
with open('./output/test_data', 'rb') as test_file:
test_data = pickle.load(test_file)
print('Number of arguments:', len(sys.argv) - 1, 'arguments.')
print('Argument List:', str(sys.argv))
if len(sys.argv) == 2:
movie_list = [sys.argv[1]]
n_similar = 21
similar_df = similar_items(movies, als_model, movie_list, n_similar)
similar_df.index += 1
print()
print("************************** "+str(n_similar - 1) +
" MOVIES SIMILAR TO :" + str(movie_list) +
" *****************")
print()
print(tabulate(similar_df, tablefmt="pipe", headers="keys"))
print()
print("**************************************************************")
elif len(sys.argv) == 3:
movie_list = [sys.argv[1]]
n_similar = int(sys.argv[2]) + 1
similar_df = similar_items(movies, als_model, movie_list, n_similar)
similar_df.index += 1
print()
print("************************** "+str(n_similar - 1) +
" MOVIES SIMILAR TO :" + str(movie_list) + " *****************")
print()
print(tabulate(similar_df, tablefmt="pipe", headers="keys"))
print()
print("**************************************************************")
elif len(sys.argv) == 4:
movie_list = [sys.argv[1]]
user_id = int(sys.argv[2])
n_similar = int(sys.argv[3]) + 1
predictions = [csr_matrix(user_vecs), csr_matrix(item_vecs.T)]
predictRatings = predict_ratings(predictions, item_vecs, user_id)
actualRatings = test_data[user_id, :].toarray().reshape(-1)
ratings_df = pd.DataFrame({"Predicted Ratings": predictRatings,
"Actual Ratings": actualRatings})
ratings_df.index += 1
similar_df = similar_items(movies, als_model, movie_list, n_similar)
similar_df.index += 1
recomm_df = recommendations(data, train_data, movies, als_model,
sparse_user_item, user_id)
recomm_df.index += 1
print()
print("************************** TOP 20 RATINGS FOR USER :" +
str(user_id) + " ****************")
print()
print(tabulate(ratings_df[:20], tablefmt="pipe", headers="keys"))
print()
print("************************** "+str(n_similar - 1) +
" MOVIES SIMILAR TO :" + str(movie_list) + " *****************")
print()
print(tabulate(similar_df, tablefmt="pipe", headers="keys"))
print()
print("************************** RECOMMEDATIONS FOR USER :"
+ str(user_id) + " ******************")
print()
print(tabulate(recomm_df, tablefmt="pipe", headers="keys"))
print()
print("**************************************************************")
else:
movie_list = ["Sliding Doors"]
user_id = 100
n_similar = 21
predictions = [csr_matrix(user_vecs), csr_matrix(item_vecs.T)]
predictRatings = predict_ratings(predictions, item_vecs, user_id)
actualRatings = test_data[user_id, :].toarray().reshape(-1)
ratings_df = pd.DataFrame({"Predicted Ratings": predictRatings,
"Actual Ratings": actualRatings})
ratings_df.index += 1
similar_df = similar_items(movies, als_model, movie_list, n_similar)
similar_df.index += 1
recomm_df = recommendations(data, train_data, movies, als_model,
sparse_user_item, user_id)
recomm_df.index += 1
print()
print("************************** TOP 20 RATINGS FOR USER :" +
str(user_id) + " ****************")
print()
print(tabulate(ratings_df[:20], tablefmt="pipe", headers="keys"))
print()
print("************************** " + str(n_similar - 1) +
" MOVIES SIMILAR TO :" + str(movie_list) + " *****************")
print()
print(tabulate(similar_df, tablefmt="pipe", headers="keys"))
print()
print("************************** RECOMMEDATIONS FOR USER :" +
str(user_id) + " ******************")
print()
print(tabulate(recomm_df, tablefmt="pipe", headers="keys"))
print()
print("**************************************************************")
if __name__ == "__main__":
main()
| [
"pandas.read_pickle",
"tabulate.tabulate",
"numpy.where",
"scipy.sparse.load_npz",
"pickle.load",
"numpy.array",
"pandas.DataFrame",
"MovieRecommender.train_test_model.main",
"scipy.sparse.csr_matrix",
"numpy.load"
] | [((459, 474), 'numpy.array', 'np.array', (['users'], {}), '(users)\n', (467, 474), True, 'import numpy as np\n'), ((536, 551), 'numpy.array', 'np.array', (['items'], {}), '(items)\n', (544, 551), True, 'import numpy as np\n'), ((1714, 1763), 'pandas.DataFrame', 'pd.DataFrame', (["{'Similar Movies': movie_names[1:]}"], {}), "({'Similar Movies': movie_names[1:]})\n", (1726, 1763), True, 'import pandas as pd\n'), ((2578, 2689), 'pandas.DataFrame', 'pd.DataFrame', (["{'Recommended Movies': movies_recom[:minlen], 'Movies Rated':\n movies_rated_by_users[:minlen]}"], {}), "({'Recommended Movies': movies_recom[:minlen], 'Movies Rated':\n movies_rated_by_users[:minlen]})\n", (2590, 2689), True, 'import pandas as pd\n'), ((2839, 2862), 'MovieRecommender.train_test_model.main', 'train_test_model.main', ([], {}), '()\n', (2860, 2862), False, 'from MovieRecommender import train_test_model\n'), ((2876, 2913), 'pandas.read_pickle', 'pd.read_pickle', (['"""./output/movies.pkl"""'], {}), "('./output/movies.pkl')\n", (2890, 2913), True, 'import pandas as pd\n'), ((2937, 2978), 'scipy.sparse.load_npz', 'load_npz', (['"""./output/sparse_user_item.npz"""'], {}), "('./output/sparse_user_item.npz')\n", (2945, 2978), False, 'from scipy.sparse import csr_matrix, load_npz\n'), ((2995, 3028), 'numpy.load', 'np.load', (['"""./output/item_vecs.npy"""'], {}), "('./output/item_vecs.npy')\n", (3002, 3028), True, 'import numpy as np\n'), ((3045, 3078), 'numpy.load', 'np.load', (['"""./output/user_vecs.npy"""'], {}), "('./output/user_vecs.npy')\n", (3052, 3078), True, 'import numpy as np\n'), ((3090, 3128), 'pandas.read_pickle', 'pd.read_pickle', (['"""./output/ratings.pkl"""'], {}), "('./output/ratings.pkl')\n", (3104, 3128), True, 'import pandas as pd\n'), ((3200, 3217), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (3211, 3217), False, 'import pickle\n'), ((3297, 3320), 'pickle.load', 'pickle.load', (['train_file'], {}), '(train_file)\n', (3308, 3320), False, 'import pickle\n'), ((3397, 3419), 'pickle.load', 'pickle.load', (['test_file'], {}), '(test_file)\n', (3408, 3419), False, 'import pickle\n'), ((648, 678), 'numpy.where', 'np.where', (['(users_arr == user_id)'], {}), '(users_arr == user_id)\n', (656, 678), True, 'import numpy as np\n'), ((3928, 3981), 'tabulate.tabulate', 'tabulate', (['similar_df'], {'tablefmt': '"""pipe"""', 'headers': '"""keys"""'}), "(similar_df, tablefmt='pipe', headers='keys')\n", (3936, 3981), False, 'from tabulate import tabulate\n'), ((4482, 4535), 'tabulate.tabulate', 'tabulate', (['similar_df'], {'tablefmt': '"""pipe"""', 'headers': '"""keys"""'}), "(similar_df, tablefmt='pipe', headers='keys')\n", (4490, 4535), False, 'from tabulate import tabulate\n'), ((5008, 5096), 'pandas.DataFrame', 'pd.DataFrame', (["{'Predicted Ratings': predictRatings, 'Actual Ratings': actualRatings}"], {}), "({'Predicted Ratings': predictRatings, 'Actual Ratings':\n actualRatings})\n", (5020, 5096), True, 'import pandas as pd\n'), ((6552, 6640), 'pandas.DataFrame', 'pd.DataFrame', (["{'Predicted Ratings': predictRatings, 'Actual Ratings': actualRatings}"], {}), "({'Predicted Ratings': predictRatings, 'Actual Ratings':\n actualRatings})\n", (6564, 6640), True, 'import pandas as pd\n'), ((4797, 4818), 'scipy.sparse.csr_matrix', 'csr_matrix', (['user_vecs'], {}), '(user_vecs)\n', (4807, 4818), False, 'from scipy.sparse import csr_matrix, load_npz\n'), ((4820, 4843), 'scipy.sparse.csr_matrix', 'csr_matrix', (['item_vecs.T'], {}), '(item_vecs.T)\n', (4830, 4843), False, 'from scipy.sparse import csr_matrix, load_npz\n'), ((5599, 5657), 'tabulate.tabulate', 'tabulate', (['ratings_df[:20]'], {'tablefmt': '"""pipe"""', 'headers': '"""keys"""'}), "(ratings_df[:20], tablefmt='pipe', headers='keys')\n", (5607, 5657), False, 'from tabulate import tabulate\n'), ((5850, 5903), 'tabulate.tabulate', 'tabulate', (['similar_df'], {'tablefmt': '"""pipe"""', 'headers': '"""keys"""'}), "(similar_df, tablefmt='pipe', headers='keys')\n", (5858, 5903), False, 'from tabulate import tabulate\n'), ((6074, 6126), 'tabulate.tabulate', 'tabulate', (['recomm_df'], {'tablefmt': '"""pipe"""', 'headers': '"""keys"""'}), "(recomm_df, tablefmt='pipe', headers='keys')\n", (6082, 6126), False, 'from tabulate import tabulate\n'), ((6341, 6362), 'scipy.sparse.csr_matrix', 'csr_matrix', (['user_vecs'], {}), '(user_vecs)\n', (6351, 6362), False, 'from scipy.sparse import csr_matrix, load_npz\n'), ((6364, 6387), 'scipy.sparse.csr_matrix', 'csr_matrix', (['item_vecs.T'], {}), '(item_vecs.T)\n', (6374, 6387), False, 'from scipy.sparse import csr_matrix, load_npz\n'), ((7143, 7201), 'tabulate.tabulate', 'tabulate', (['ratings_df[:20]'], {'tablefmt': '"""pipe"""', 'headers': '"""keys"""'}), "(ratings_df[:20], tablefmt='pipe', headers='keys')\n", (7151, 7201), False, 'from tabulate import tabulate\n'), ((7396, 7449), 'tabulate.tabulate', 'tabulate', (['similar_df'], {'tablefmt': '"""pipe"""', 'headers': '"""keys"""'}), "(similar_df, tablefmt='pipe', headers='keys')\n", (7404, 7449), False, 'from tabulate import tabulate\n'), ((7620, 7672), 'tabulate.tabulate', 'tabulate', (['recomm_df'], {'tablefmt': '"""pipe"""', 'headers': '"""keys"""'}), "(recomm_df, tablefmt='pipe', headers='keys')\n", (7628, 7672), False, 'from tabulate import tabulate\n')] |
#!/usr/bin/env python
from datetime import datetime
import time
from kobot.msg import range_n_bearing_sensor, landmark_sensor, floor_sensor
import rospy
from geometry_msgs.msg import Twist, Vector3, PointStamped, PoseStamped
from std_msgs.msg import UInt8, Bool, String
from nav_msgs.msg import Odometry
import numpy as np
import math
import random
import tf
# for publishing dictionary as encoded string
import json
class LBADriver(object):
def __init__(self):
self.nu = 0
# max_d = 1.74
max_d = 1.1
th_len = 3
d_len = 2
self.x_goal = 0.0
self.y_goal = 0.0
self.action_dim = [th_len, d_len]
# a_th_arr = np.linspace(2*math.pi/10, 8*math.pi/10, th_len)
# a_th_arr = [math.pi/6, math.pi/3, math.pi/2, 2*math.pi/3,5*math.pi/6]
# a_th_arr = [2*math.pi/10, math.pi/2, 8*math.pi/10]
a_th_arr = [math.pi/3, math.pi/2, 2*math.pi/3]
# a_th_arr = [math.pi/2]
# a_d_arr = np.linspace(max_d, max_d, d_len)
a_d_arr = [0.9,1.4]
actions = []
for a_th in a_th_arr:
action_d = []
for a_d in a_d_arr:
action_d.append([a_th, a_d])
actions.append(action_d)
self.actions = actions
self.landmark_id_list = [40, 41, 42, 43, 44, 45]
# convert landmark ids to str
self.landmark_id_list = [str(i) for i in self.landmark_id_list]
self.Q_table = {}
self.check_table = {}
# initialize dict. with landmark ids are keys
# and a np array is reward values
for landmark_id in self.landmark_id_list:
self.Q_table[landmark_id] = np.zeros((th_len, d_len))
self.check_table[landmark_id] = np.zeros((th_len, d_len))
self.get_params()
# default vals.
self.active_landmark = None
self.action_id = [None, None]
self.prev_landmark = None
self.obs_detected = False
self.robot_detected = False
self.going_cue = False
self.sub_lock = False
self.range_prev = [0]*8
self.is_robot_prev = [0]*8
self.robot_pose = [0, 0, 0]
self.I_c = 0
self.I_avg_prev = 0
# message initalizers w/ def. vals.
self.obj_msg = Bool()
self.obj_msg = False
self.intensity_msg = UInt8()
self.intensity_msg = 0
self.landmark_msg = UInt8()
self.landmark_msg = 0
self.landmark_dict = {}
# first define publishers to not get any err.
self.nav_vel_pub = rospy.Publisher(
"nav_vel",
Twist, queue_size=1)
self.Q_table_pub = rospy.Publisher(
"Q_table",
String, queue_size=1)
self.check_table_pub = rospy.Publisher(
"check_table",
String, queue_size=1)
# publishers for neopixel visualization
self.intensity_vis_pub = rospy.Publisher(
"lba/intensity",
UInt8, queue_size=1)
self.landmark_vis_pub = rospy.Publisher(
"lba/landmark",
UInt8, queue_size=1)
# publisher for encoded landmark dict.
self.dict_pub = rospy.Publisher(
"landmark",
String, queue_size=1)
# publisher for closed loop position control
self.pose_goal_pub = rospy.Publisher(
"move_base_simple/goal",
PoseStamped, queue_size=1)
# publisher for switching between vel. and pos. control
self.move_lock_pub = rospy.Publisher(
"move_lock",
Bool, queue_size=1)
freq = 20
if rospy.has_param('odom_freq'):
freq = rospy.get_param('odom_freq')
self.rate_turn_theta = rospy.Rate(freq)
# transformer objects
self.listener = tf.TransformListener()
self.broadcaster = tf.TransformBroadcaster()
rospy.Subscriber("goal_reached",
Bool,
self.goal_reached_callback,
queue_size=1)
rospy.Subscriber("sensors/range_n_bearing",
range_n_bearing_sensor,
self.rb_callback,
queue_size=1)
rospy.Subscriber("sensors/landmark_sensor",
UInt8,
self.landmark_callback,
queue_size=1)
rospy.Subscriber("sensors/floor_sensor",
floor_sensor,
self.intensity_callback,
queue_size=1)
rospy.Subscriber("wheel_odom",
Odometry,
self.odom_callback,
queue_size=1)
def goal_reached_callback(self, data):
"""
Callback called when position controller
informs that goal is reached
"""
if data.data:
self.going_cue = False
rospy.loginfo("Goal reached")
if self.I_c > self.I_thresh:
self.publish_neopixel('green')
self.update_Q_table(self.I_c)
else:
self.publish_neopixel('white')
self.update_Q_table(-1)
else:
self.going_cue = False
rospy.loginfo("Goal not reached")
def select_action(self):
"""
Decide on whether to explore or
exploit the Q Table with a random
action defined by epsilon value
"""
if self.going_cue:
return
epsilon = random.uniform(0.0, 1.0)
if epsilon < self.epsilon:
rospy.loginfo("Random action")
# exploration
self.publish_neopixel('red')
th_indx = random.randint(0, self.action_dim[0]-1)
d_indx = random.randint(0, self.action_dim[1]-1)
self.action_id = [th_indx, d_indx]
else:
rospy.loginfo("Best action")
# explotation
self.publish_neopixel('green')
actions = self.Q_table[self.active_landmark]
# TODO if we have more than one action
# best actions having same value choose
# randomly from them
i, j = np.unravel_index(
np.argmax(actions, axis=None), actions.shape)
self.action_id = [i, j]
self.perform_action()
def update_check_table(self):
check_val = self.check_table[self.active_landmark][self.action_id[0], self.action_id[1]]
if not check_val:
self.check_table[self.active_landmark][self.action_id[0], self.action_id[1]] = 1
# create new dict.
# to not mess with the original
check_table = {}
# convert numpy arr. to list
for key, val in self.check_table.items():
check_table[key] = np.mean(val)
encoded_dict = String()
encoded_dict = json.dumps(check_table)
self.check_table_pub.publish(encoded_dict)
def perform_action(self):
"""
Get the selected action convert
to cartesian coords. from polar
and perform the action
"""
action_th, action_d = self.actions[self.action_id[0]
][self.action_id[1]]
self.update_check_table()
rospy.loginfo("Selected action is {}, {}".format(
action_d, action_th*180.0/math.pi))
x_lc_l = action_d * np.sin(action_th)
y_lc_l = action_d * np.cos(action_th)
self.go_to_cue(x_lc_l, y_lc_l, self.active_landmark)
def update_Q_table(self, reward):
if self.active_landmark is None:
return
Q_new = self.get_new_Q(reward)
self.Q_table[self.active_landmark][self.action_id[0]
][self.action_id[1]] = Q_new
self.active_landmark = None
self.action_id = [None, None]
self.publish_Q_table()
def publish_Q_table(self):
"""
Publish the final state of the Q table
as a string
"""
# create new dict.
# to not mess with the original
Q_table = {}
# convert numpy arr. to list
for key, val in self.Q_table.items():
Q_table[key] = val.tolist()
encoded_dict = String()
encoded_dict = json.dumps(Q_table)
self.Q_table_pub.publish(encoded_dict)
def get_new_Q(self, reward):
"""
Update routine for Q values based
on the reward
"""
weight = 0.1
try:
Q_old = self.Q_table[self.active_landmark][self.action_id[0]
][self.action_id[1]]
Q_new = Q_old * (1 - weight) + reward * weight
except KeyError:
rospy.logerr(" Unknown Aruco")
return Q_new
def update_epsilon_cyclic(self):
"""
Cyclic epsilon update routine
"""
A = 1
self.epsilon = A * (1 + math.cos(2 * math.pi * self.nu / self.p)) / 2
rospy.loginfo("Epsilon : {}".format(self.epsilon))
def get_params(self):
"""
LBA params. are checked constantly and are
updated if necessary
"""
if rospy.has_param('lba_params'):
# fetch a group (dictionary) of parameters
params = rospy.get_param('lba_params')
# LBA params.
self.w_max = params['w_max']
self.T_e = params['T_e']
# RL-LBA params.
self.p = params['p']
# self.epsilon = params['epsilon']
# self.action_id = params['action_id']
# Implementation specific params.
self.K_p = params['K_p']
self.u_max = params['u_max']
self.min_angular_err = params['min_ang_err']
self.zeta_range = params['zeta_range']
self.zeta_is_robot = params['zeta_is_robot']
self.obs_detection_thresh_1 = params['obs_detection_thresh_1']
self.obs_detection_thresh_2 = params['obs_detection_thresh_2']
self.obs_detection_thresh_3 = params['obs_detection_thresh_3']
self.cue_exit_robot_thresh_1 = params['cue_exit_robot_thresh_1']
self.cue_exit_robot_thresh_2 = params['cue_exit_robot_thresh_2']
self.obs_robot_detection_thresh_1 = params[
'obs_robot_detection_thresh_1']
self.obs_robot_detection_thresh_2 = params[
'obs_robot_detection_thresh_2']
self.obs_robot_detection_thresh_3 = params[
'obs_robot_detection_thresh_3']
self.robot_detection_thresh = params['robot_detection_thresh']
self.I_thresh = params["I_thresh"]
self.I_const = params["I_const"]
self.max_rand_ang = params["max_rand_ang"]
self.min_rand_ang = params["min_rand_ang"]
self.dynamic_lba_params = params['dynamic_lba_params']
else: # feed default vals
self.w_max = 120
self.u_max = 0.1
self.min_angular_err = 5 / 180 * math.pi
# self.action_id = [5,2]
self.K_p = 3
self.T_e = 4
# self.epsilon = 1.0
self.zeta_range = 1.0
self.zeta_is_robot = 1.0
self.obs_detection_thresh_1 = 4
self.obs_detection_thresh_2 = 5
self.obs_detection_thresh_3 = 2
self.p = 100
self.obs_robot_detection_thresh_1 = 2
self.obs_robot_detection_thresh_2 = 2
self.obs_robot_detection_thresh_3 = 2
self.cue_exit_robot_thresh_1 = 2
self.cue_exit_robot_thresh_2 = 5
self.robot_detection_thresh = 3
self.I_thresh = 150
self.I_const = 2500
self.max_rand_ang = math.pi/2
self.min_rand_ang = 0
self.dynamic_lba_params = True
def rb_callback(self, data):
"""
Range vals. from the range and bearing
"""
# nothing detected yet
# give default vals.
self.obs_detected = False
self.robot_detected = False
self.rb = range_n_bearing_sensor()
self.rb = data.range_n_bearing
# get only the range values and filter
# with moving average zeta
range_prev = self.range_prev
is_robot_prev = self.is_robot_prev
self.range_prev = [0]*8
self.is_robot_prev = [0]*8
self.detected_sensor_angs = []
for indx, sensor_reading in enumerate(data.range_n_bearing):
# filter range values and update prev filtered lists
range_val = moving_average_filter(sensor_reading.range,
range_prev[indx], self.zeta_range)
self.range_prev[indx] = range_val
# filter is_robot vals.
is_robot = moving_average_filter(sensor_reading.is_robot,
is_robot_prev[indx], self.zeta_is_robot)
self.is_robot_prev[indx] = is_robot
# range_val = int(range_val)
if is_robot < 0.5:
is_robot = False
else:
is_robot = True
if not is_robot:
if range_val > self.obs_detection_thresh_1:
if indx in [0]:
# 0 is directly front facing sensor
# of the robot
self.obs_detection(
range_val, self.obs_robot_detection_thresh_1, indx)
elif indx in [1, 7]:
# 1, 7 front left and right facing sensors
# of the robot
self.obs_detection(
range_val, self.obs_robot_detection_thresh_2, indx)
elif indx in [2, 6]:
# 2, 6 directly left and right facing sensors
# of the robot
self.obs_detection(
range_val, self.obs_robot_detection_thresh_3, indx)
else:
if range_val > self.robot_detection_thresh:
# all sensors used for robot detection
self.robot_detected = True
# for not to collide with robots assume as obstacle
# if they are too close
if indx in [0]:
self.obs_detection(
range_val, self.obs_robot_detection_thresh_1, indx)
elif indx in [1, 7]:
self.obs_detection(
range_val, self.obs_robot_detection_thresh_2, indx)
elif indx in [2, 6]:
self.obs_detection(
range_val, self.obs_robot_detection_thresh_3, indx)
if self.detected_sensor_angs != []:
self.detected_sensor_ang = sum(self.detected_sensor_angs) / float(len(self.detected_sensor_angs))
def obs_detection(self, range_val, range_thresh, indx):
"""
Decide to obstacle detection based on the inputs
"""
# if not self.going_cue:
if range_val > range_thresh:
self.obs_detected = True
self.detected_sensor_angs.append(wrap2pi(indx * math.pi/4))
def avoid_cue_collision(self):
"""
Try to avoid collision when going to cue
"""
self.publish_twist(0, 0)
self.publish_move_lock(True)
rate = rospy.Rate(20)
# each loop takes 0.1 s
for _ in range(20):
if rospy.is_shutdown():
return
if not self.obs_detected:
self.publish_twist(0.12, 0)
# rospy.loginfo("Cue Exit Obs.")
self.publish_move_lock(False)
self.publish_pose(self.x_goal, self.y_goal)
return
rate.sleep()
self.update_Q_table(-0.1)
self.going_cue = False
def avoid_nicely(self):
self.publish_neopixel('yellow')
theta = random.uniform(+math.pi/2 + self.max_rand_ang,
3*math.pi/2 - self.max_rand_ang)
rospy.loginfo("Detection angle : {}".format(
180/math.pi*self.detected_sensor_ang))
theta += self.detected_sensor_ang
theta = wrap2pi(theta)
# find corresponding rb after rotation
decision_indx = int(theta / (math.pi/4))
if self.range_prev[decision_indx] < 3.0 and self.range_prev[(decision_indx+1)%8] < 3.0:
# if random decision gets us out follow it
self.turn_theta(theta)
self.publish_twist(0.1, 0)
else:
# random decision does not get us out
# turn until our front is empty
rate = rospy.Rate(20)
is_ccw = random.choice([True, False])
while True:
if self.range_prev[0] < 4.0:
self.publish_twist(0.1, 0)
break
else:
if is_ccw:
self.publish_twist(0, 1.2)
else:
self.publish_twist(0, -1.2)
rate.sleep()
def navigate(self):
"""
Random walk unless in cue
"""
if self.sub_lock:
# ignore any new messages
# still processing the latest
return
if self.obs_detected:
if self.I_c < self.I_thresh:
# outside the cue
if self.going_cue:
self.publish_move_lock(True)
self.publish_twist(0, 0)
self.avoid_cue_collision()
# going to cue try to
# avoid collision by waiting
else:
# avoidance behaviour
self.avoid_nicely()
else:
self.robot_detected = True
if self.robot_detected:
# self.publish_move_lock(True)
# self.going_cue = False
# self.publish_twist(0, 0)
# if self.going_cue:
# self.publish_neopixel('purple')
# self.going_cue = False
# if self.I_c > self.I_thresh:
# self.update_Q_table(self.I_c)
# else:
# self.update_Q_table(-0.1)
if self.I_c > self.I_thresh:
self.publish_move_lock(True)
self.going_cue = False
self.publish_twist(0, 0)
self.update_Q_table(self.I_c)
# we are in cue
self.sleep_w_s(self.I_c)
# slept enough now try to exit from cue
self.exit_cue_rand()
if not self.obs_detected and not self.robot_detected:
# meaning none of the front sensors is on
self.publish_twist(self.u_max, 0)
def intensity_callback(self, data):
"""
Intensity vals. from the floor sensors
"""
# convert intensity values to ints
# this is needed when uint8_t msg used
I_list = [ord(intensity) for intensity in data.intensity]
I_sum = 0
for I in I_list:
I_sum += I
I_avg = I_sum / len(I_list)
# low pass filter for intensity vals
# I_avg = moving_average_filter(I_avg, self.I_avg_prev, 0.1)
# rospy.loginfo("I_avg : {}".format(I_avg))
self.I_c = I_avg
# self.I_avg_prev = I_avg
# publish intensity val. for visualization
# by neopixels
if self.I_c > self.I_thresh:
self.intensity_msg = I_avg
self.intensity_vis_pub.publish(self.intensity_msg)
else:
self.intensity_msg = 0
self.intensity_vis_pub.publish(self.intensity_msg)
def sleep_w_s(self, I_c):
"""
Compute wait time by using intensity
wait by that amount
"""
# stop and rotate
self.publish_twist(0, 0)
float_I = float(self.I_c)
# compute w_s
w_s = self.w_max*(float_I**2/(float_I**2 + self.I_const))
# mark the start time
start_time = rospy.Time.now()
rate = rospy.Rate(1)
while not rospy.is_shutdown():
# calculate time passed in secs
time_passed = rospy.Time.now() - start_time
time_passed = time_passed.secs
if w_s - time_passed < 0:
# time is up
break
np_val = map_np_val(w_s - time_passed, float(self.w_max))
self.publish_neopixel('blue', np_val)
# inform user every second
rospy.loginfo("Waiting on cue for {} sec".format(
w_s - time_passed))
rate.sleep()
def exit_cue_rand(self):
"""
Tries to exit from cue by rotating randomly
and if front sensors doesn't detect any robot
tries to get away from the cue for 5s unless
any other robot or obstacle seen in front
"""
theta = random.uniform(-math.pi, math.pi)
self.turn_theta(theta)
# try to exit from the cue by
# going straight for 5 s
rate = rospy.Rate(20)
# each loop takes 0.1 s
for _ in range(125):
if rospy.is_shutdown():
return
self.publish_twist(0.07, 0)
# check whether exit path from cue
# is occluded by another robot
for indx in [0, 1, 7]:
if indx in [0]:
if self.rb[indx].is_robot and\
self.rb[indx].range > self.cue_exit_robot_thresh_1:
self.publish_twist(0, 0)
# rospy.loginfo("Cue Exit Robot")
return
else:
pass
if indx in [1, 7]:
if self.rb[indx].is_robot and\
self.rb[indx].range > self.cue_exit_robot_thresh_2:
self.publish_twist(0, 0)
# rospy.loginfo("Cue Exit Robot")
return
if self.obs_detected:
self.publish_twist(0, 0)
# rospy.loginfo("Cue Exit Obs.")
return
# rospy.loginfo("Exiting Cue")
rate.sleep()
# rospy.loginfo("Exited from Cue")
def odom_callback(self, data):
"""
Odom callback for 2-D robot pose
"""
quaternion = data.pose.pose.orientation
explicit_quat = [
quaternion.x, quaternion.y, quaternion.z, quaternion.w]
_, _, yaw = tf.transformations.euler_from_quaternion(
explicit_quat)
x, y = data.pose.pose.position.x, data.pose.pose.position.y
self.robot_pose = [x, y, yaw]
def landmark_callback(self, data):
"""
Landmark callback
"""
if self.going_cue:
return
# get the seen aruco id
self.prev_landmark = self.active_landmark
self.active_landmark = str(data.data)
# Landmark changed we can not be going to the cue anymore
if self.active_landmark != self.prev_landmark:
self.nu += 1
self.update_epsilon_cyclic()
self.going_cue = False
self.select_action()
def go_to_cue(self, x_lc_l, y_lc_l, landmark_id):
"""
Given position of the cue w.r.t. landmark
and landmark id, first transforms that point to
robot frame and computes the required rotation and
goes to cue point in a closed loop pos. control
unless any other obstacle, robot or new landmark encountered
"""
if self.going_cue:
rospy.loginfo("going cue ignored landmark msg")
return
# stop
self.publish_twist(0, 0)
cue_pos_r_x, cue_pos_r_y = transform_point(
self.listener,
x_lc_l,
y_lc_l,
"landmark"+landmark_id,
"base_link")
# publish tf for cue_goal
current_time = rospy.Time.now()
self.broadcaster.sendTransform(
(cue_pos_r_x, cue_pos_r_y, 0),
(0, 0, 0, 1),
current_time, # timestamp
"cue_goal", # child frame
"base_link") # parent frame
# get robot to cue direction in robot frame
theta_rc_r = np.arctan2(cue_pos_r_y, cue_pos_r_x)
rospy.loginfo(
"Going to the cue by using landmark : {}".format(
landmark_id))
rospy.loginfo(
"Turning : {} deg for cue".format(
theta_rc_r*180/math.pi))
# turn to the cue direction
self.turn_theta(theta_rc_r)
self.going_cue = True
# get the goal position in map frame
trans, _ = lookup_tf(self.listener, 'map', 'cue_goal')
# feed the goal position to the pose controller
# in the map frame
x_goal = trans[0]
y_goal = trans[1]
self.x_goal = x_goal
self.y_goal = y_goal
# publish goal pos. for closed loop pos. controller
self.publish_pose(x_goal, y_goal)
def turn_theta(self, theta_rel):
"""
Turns by theta relative to the current
orientation in a closed loop manner
from the feedback of the odometry callback
"""
self.publish_move_lock(True)
self.going_cue = False
self.sub_lock = True
# desired angle is current angle + rel angle
theta_des = self.robot_pose[2] + theta_rel
# error is the diff. b/w current angle and desired angle
angle_err = self.robot_pose[2] - theta_des
# limit angle err to -pi, +pi
angle_err = wrap2pi(angle_err)
# angle control loop should be blocking
while not rospy.is_shutdown():
angle_err = theta_des - self.robot_pose[2]
angle_err = wrap2pi(angle_err)
if abs(angle_err) <= self.min_angular_err:
# reach the target angle
break
omega = angle_err * self.K_p
# control action
self.publish_twist(0, omega)
self.rate_turn_theta.sleep()
# stop
self.sub_lock = False
def publish_landmark_dict(self):
"""
Publishes final state of the
landmark dict. as json string
it can be decoded on the other end
as a dict.
"""
encoded_dict = String()
encoded_dict = json.dumps(self.landmark_dict)
self.dict_pub.publish(encoded_dict)
# Overwrites final state of the landmark dict
# kobot base will collect them all after
# the exp.
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
with open('landmark_dict.txt', 'w') as file1:
file1.write(dt_string)
file1.write(encoded_dict)
def publish_neopixel(self, flag, val=0):
"""
Send message to the neopixel node
to inform user about landmark
"""
if flag == 'red':
# means we know the cue
# or we have reached the cue
self.landmark_msg = 0
elif flag == 'green':
# means unknown landmark detected
self.landmark_msg = 1
elif flag == 'blue':
self.landmark_msg = 2 + val
elif flag == 'white':
self.landmark_msg = 255
elif flag == 'yellow':
self.landmark_msg = 254
elif flag == 'purple':
self.landmark_msg = 253
else:
# erroneous flag
rospy.logerr("NP Flag is N/A")
return
self.landmark_vis_pub.publish(self.landmark_msg)
def publish_move_lock(self, bool_val):
"""
Send state of the lock to the pose_controller
when state is true vel. controller takes on the command
when state is false pos. coontroller takes on the command
"""
move_lock_msg = Bool()
move_lock_msg = bool_val
self.move_lock_pub.publish(move_lock_msg)
def publish_pose(self, x_goal, y_goal):
"""
Send 2-D goal position w.r.t. the map frame to the pose
controller
"""
# remove move_lock to be able to use pose_controller
self.publish_move_lock(False)
# fill the fields of pose_goal msg
pose_goal_msg = PoseStamped()
# pose controller expects position in map frame
pose_goal_msg.header.frame_id = "map"
# orientation is not important
pose_goal_msg.pose.orientation.z = 0.0
pose_goal_msg.pose.orientation.w = 1.0
pose_goal_msg.header.stamp = rospy.Time.now()
# 2-D goal position
pose_goal_msg.pose.position.x = x_goal
pose_goal_msg.pose.position.y = y_goal
self.pose_goal_pub.publish(pose_goal_msg)
def publish_twist(self, x_vel, theta_vel):
"""
Publish ref. vals. for vel. controller
"""
twist_msg = Twist()
twist_msg.linear.x = x_vel
twist_msg.angular.z = theta_vel
self.nav_vel_pub.publish(twist_msg)
def transform_point(listener, x, y, parent_frame, child_frame):
"""
Transform point defined in parent frame
to child frame
"""
# get the point in the parent frame
point = PointStamped()
point.header.frame_id = parent_frame
point.header.stamp = rospy.Time(0)
point.point.x = x
point.point.y = y
point.point.z = 0
# transform point to child frame
point_tf = listener.transformPoint(child_frame, point)
point_tf = point_tf.point
x_tf = point_tf.x
y_tf = point_tf.y
return x_tf, y_tf
def lookup_tf(listener, parent_frame, child_frame, tf_time=rospy.Time(0)):
"""
Get the latest available tf as
translation and rotation vectors from the
tf stack between the given parent
and child frames
"""
try:
# wait until TF is available
listener.waitForTransform(
child_frame,
parent_frame,
tf_time,
rospy.Duration(1.0)) # timeout for waiting
# get the latest available tf
(trans, rot) = listener.lookupTransform(
parent_frame, child_frame, rospy.Time(0))
return [trans, rot]
except (tf.LookupException, tf.ConnectivityException,
tf.ExtrapolationException):
rospy.logerr("TF from {} to {} N/A".format(
parent_frame,
child_frame))
def map_np_val(val, max_val):
x = int(25.0 * (float(val) / float(max_val)))
return x
def filter_list(prev_list, current_list, zeta):
"""
Apply filter to the all elements
of the list one by one
"""
filtered_list = []
for i, current_val in enumerate(current_list):
prev_val = prev_list[i]
filtered_list.append(
moving_average_filter(current_val, prev_val, zeta))
return filtered_list
def moving_average_filter(val, filtered_val_prev, zeta):
"""
Basic moving average filter
zeta = 1 -> ignore prev. vals
zeta = 0 -> ignore current val
"""
filtered_val = (1-zeta)*filtered_val_prev + zeta*val
return filtered_val
def wrap2pi(ang):
"""
Returns given angle in
[-pi, +pi]
"""
ang = ang % (2*math.pi)
if ang > math.pi:
ang = math.pi - ang
return -(math.pi + ang)
else:
return ang
def start():
"""
Initialize class object and define subs. pubs.
etc. as class attributes and
define main loop if needed
"""
# For debug add arg to init_mode log_level=rospy.DEBUG
rospy.init_node("lba")
# rospy.on_shutdown(shutdown_hook)
driver = LBADriver()
freq = 20
if rospy.has_param('lba_freq'):
freq = rospy.get_param('lba_freq')
rate = rospy.Rate(freq) # Hz
last_time = rospy.Time.now()
while not rospy.is_shutdown():
time_passed = (rospy.Time.now() - last_time).secs
if time_passed > 1:
driver.get_params()
last_time = rospy.Time.now()
driver.navigate()
rate.sleep()
# start from command-line
if __name__ == '__main__':
start()
| [
"rospy.logerr",
"kobot.msg.range_n_bearing_sensor",
"rospy.init_node",
"math.cos",
"rospy.Rate",
"tf.TransformListener",
"numpy.arctan2",
"numpy.sin",
"numpy.mean",
"json.dumps",
"std_msgs.msg.Bool",
"rospy.Subscriber",
"random.randint",
"random.uniform",
"random.choice",
"geometry_msg... | [((29648, 29662), 'geometry_msgs.msg.PointStamped', 'PointStamped', ([], {}), '()\n', (29660, 29662), False, 'from geometry_msgs.msg import Twist, Vector3, PointStamped, PoseStamped\n'), ((29729, 29742), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (29739, 29742), False, 'import rospy\n'), ((30062, 30075), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (30072, 30075), False, 'import rospy\n'), ((31956, 31978), 'rospy.init_node', 'rospy.init_node', (['"""lba"""'], {}), "('lba')\n", (31971, 31978), False, 'import rospy\n'), ((32064, 32091), 'rospy.has_param', 'rospy.has_param', (['"""lba_freq"""'], {}), "('lba_freq')\n", (32079, 32091), False, 'import rospy\n'), ((32147, 32163), 'rospy.Rate', 'rospy.Rate', (['freq'], {}), '(freq)\n', (32157, 32163), False, 'import rospy\n'), ((32186, 32202), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (32200, 32202), False, 'import rospy\n'), ((2282, 2288), 'std_msgs.msg.Bool', 'Bool', ([], {}), '()\n', (2286, 2288), False, 'from std_msgs.msg import UInt8, Bool, String\n'), ((2347, 2354), 'std_msgs.msg.UInt8', 'UInt8', ([], {}), '()\n', (2352, 2354), False, 'from std_msgs.msg import UInt8, Bool, String\n'), ((2414, 2421), 'std_msgs.msg.UInt8', 'UInt8', ([], {}), '()\n', (2419, 2421), False, 'from std_msgs.msg import UInt8, Bool, String\n'), ((2567, 2614), 'rospy.Publisher', 'rospy.Publisher', (['"""nav_vel"""', 'Twist'], {'queue_size': '(1)'}), "('nav_vel', Twist, queue_size=1)\n", (2582, 2614), False, 'import rospy\n'), ((2668, 2716), 'rospy.Publisher', 'rospy.Publisher', (['"""Q_table"""', 'String'], {'queue_size': '(1)'}), "('Q_table', String, queue_size=1)\n", (2683, 2716), False, 'import rospy\n'), ((2773, 2825), 'rospy.Publisher', 'rospy.Publisher', (['"""check_table"""', 'String'], {'queue_size': '(1)'}), "('check_table', String, queue_size=1)\n", (2788, 2825), False, 'import rospy\n'), ((2933, 2986), 'rospy.Publisher', 'rospy.Publisher', (['"""lba/intensity"""', 'UInt8'], {'queue_size': '(1)'}), "('lba/intensity', UInt8, queue_size=1)\n", (2948, 2986), False, 'import rospy\n'), ((3044, 3096), 'rospy.Publisher', 'rospy.Publisher', (['"""lba/landmark"""', 'UInt8'], {'queue_size': '(1)'}), "('lba/landmark', UInt8, queue_size=1)\n", (3059, 3096), False, 'import rospy\n'), ((3194, 3243), 'rospy.Publisher', 'rospy.Publisher', (['"""landmark"""', 'String'], {'queue_size': '(1)'}), "('landmark', String, queue_size=1)\n", (3209, 3243), False, 'import rospy\n'), ((3352, 3419), 'rospy.Publisher', 'rospy.Publisher', (['"""move_base_simple/goal"""', 'PoseStamped'], {'queue_size': '(1)'}), "('move_base_simple/goal', PoseStamped, queue_size=1)\n", (3367, 3419), False, 'import rospy\n'), ((3539, 3587), 'rospy.Publisher', 'rospy.Publisher', (['"""move_lock"""', 'Bool'], {'queue_size': '(1)'}), "('move_lock', Bool, queue_size=1)\n", (3554, 3587), False, 'import rospy\n'), ((3643, 3671), 'rospy.has_param', 'rospy.has_param', (['"""odom_freq"""'], {}), "('odom_freq')\n", (3658, 3671), False, 'import rospy\n'), ((3752, 3768), 'rospy.Rate', 'rospy.Rate', (['freq'], {}), '(freq)\n', (3762, 3768), False, 'import rospy\n'), ((3823, 3845), 'tf.TransformListener', 'tf.TransformListener', ([], {}), '()\n', (3843, 3845), False, 'import tf\n'), ((3873, 3898), 'tf.TransformBroadcaster', 'tf.TransformBroadcaster', ([], {}), '()\n', (3896, 3898), False, 'import tf\n'), ((3908, 3993), 'rospy.Subscriber', 'rospy.Subscriber', (['"""goal_reached"""', 'Bool', 'self.goal_reached_callback'], {'queue_size': '(1)'}), "('goal_reached', Bool, self.goal_reached_callback, queue_size=1\n )\n", (3924, 3993), False, 'import rospy\n'), ((4072, 4176), 'rospy.Subscriber', 'rospy.Subscriber', (['"""sensors/range_n_bearing"""', 'range_n_bearing_sensor', 'self.rb_callback'], {'queue_size': '(1)'}), "('sensors/range_n_bearing', range_n_bearing_sensor, self.\n rb_callback, queue_size=1)\n", (4088, 4176), False, 'import rospy\n'), ((4255, 4347), 'rospy.Subscriber', 'rospy.Subscriber', (['"""sensors/landmark_sensor"""', 'UInt8', 'self.landmark_callback'], {'queue_size': '(1)'}), "('sensors/landmark_sensor', UInt8, self.landmark_callback,\n queue_size=1)\n", (4271, 4347), False, 'import rospy\n'), ((4427, 4525), 'rospy.Subscriber', 'rospy.Subscriber', (['"""sensors/floor_sensor"""', 'floor_sensor', 'self.intensity_callback'], {'queue_size': '(1)'}), "('sensors/floor_sensor', floor_sensor, self.\n intensity_callback, queue_size=1)\n", (4443, 4525), False, 'import rospy\n'), ((4604, 4678), 'rospy.Subscriber', 'rospy.Subscriber', (['"""wheel_odom"""', 'Odometry', 'self.odom_callback'], {'queue_size': '(1)'}), "('wheel_odom', Odometry, self.odom_callback, queue_size=1)\n", (4620, 4678), False, 'import rospy\n'), ((5583, 5607), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (5597, 5607), False, 'import random\n'), ((6903, 6911), 'std_msgs.msg.String', 'String', ([], {}), '()\n', (6909, 6911), False, 'from std_msgs.msg import UInt8, Bool, String\n'), ((6935, 6958), 'json.dumps', 'json.dumps', (['check_table'], {}), '(check_table)\n', (6945, 6958), False, 'import json\n'), ((8328, 8336), 'std_msgs.msg.String', 'String', ([], {}), '()\n', (8334, 8336), False, 'from std_msgs.msg import UInt8, Bool, String\n'), ((8360, 8379), 'json.dumps', 'json.dumps', (['Q_table'], {}), '(Q_table)\n', (8370, 8379), False, 'import json\n'), ((9270, 9299), 'rospy.has_param', 'rospy.has_param', (['"""lba_params"""'], {}), "('lba_params')\n", (9285, 9299), False, 'import rospy\n'), ((12220, 12244), 'kobot.msg.range_n_bearing_sensor', 'range_n_bearing_sensor', ([], {}), '()\n', (12242, 12244), False, 'from kobot.msg import range_n_bearing_sensor, landmark_sensor, floor_sensor\n'), ((15645, 15659), 'rospy.Rate', 'rospy.Rate', (['(20)'], {}), '(20)\n', (15655, 15659), False, 'import rospy\n'), ((16215, 16305), 'random.uniform', 'random.uniform', (['(+math.pi / 2 + self.max_rand_ang)', '(3 * math.pi / 2 - self.max_rand_ang)'], {}), '(+math.pi / 2 + self.max_rand_ang, 3 * math.pi / 2 - self.\n max_rand_ang)\n', (16229, 16305), False, 'import random\n'), ((20367, 20383), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (20381, 20383), False, 'import rospy\n'), ((20399, 20412), 'rospy.Rate', 'rospy.Rate', (['(1)'], {}), '(1)\n', (20409, 20412), False, 'import rospy\n'), ((21245, 21278), 'random.uniform', 'random.uniform', (['(-math.pi)', 'math.pi'], {}), '(-math.pi, math.pi)\n', (21259, 21278), False, 'import random\n'), ((21396, 21410), 'rospy.Rate', 'rospy.Rate', (['(20)'], {}), '(20)\n', (21406, 21410), False, 'import rospy\n'), ((22878, 22933), 'tf.transformations.euler_from_quaternion', 'tf.transformations.euler_from_quaternion', (['explicit_quat'], {}), '(explicit_quat)\n', (22918, 22933), False, 'import tf\n'), ((24329, 24345), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (24343, 24345), False, 'import rospy\n'), ((24665, 24701), 'numpy.arctan2', 'np.arctan2', (['cue_pos_r_y', 'cue_pos_r_x'], {}), '(cue_pos_r_y, cue_pos_r_x)\n', (24675, 24701), True, 'import numpy as np\n'), ((26756, 26764), 'std_msgs.msg.String', 'String', ([], {}), '()\n', (26762, 26764), False, 'from std_msgs.msg import UInt8, Bool, String\n'), ((26788, 26818), 'json.dumps', 'json.dumps', (['self.landmark_dict'], {}), '(self.landmark_dict)\n', (26798, 26818), False, 'import json\n'), ((26999, 27013), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (27011, 27013), False, 'from datetime import datetime\n'), ((28299, 28305), 'std_msgs.msg.Bool', 'Bool', ([], {}), '()\n', (28303, 28305), False, 'from std_msgs.msg import UInt8, Bool, String\n'), ((28708, 28721), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (28719, 28721), False, 'from geometry_msgs.msg import Twist, Vector3, PointStamped, PoseStamped\n'), ((28994, 29010), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (29008, 29010), False, 'import rospy\n'), ((29323, 29330), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (29328, 29330), False, 'from geometry_msgs.msg import Twist, Vector3, PointStamped, PoseStamped\n'), ((32108, 32135), 'rospy.get_param', 'rospy.get_param', (['"""lba_freq"""'], {}), "('lba_freq')\n", (32123, 32135), False, 'import rospy\n'), ((32217, 32236), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (32234, 32236), False, 'import rospy\n'), ((1674, 1699), 'numpy.zeros', 'np.zeros', (['(th_len, d_len)'], {}), '((th_len, d_len))\n', (1682, 1699), True, 'import numpy as np\n'), ((1744, 1769), 'numpy.zeros', 'np.zeros', (['(th_len, d_len)'], {}), '((th_len, d_len))\n', (1752, 1769), True, 'import numpy as np\n'), ((3692, 3720), 'rospy.get_param', 'rospy.get_param', (['"""odom_freq"""'], {}), "('odom_freq')\n", (3707, 3720), False, 'import rospy\n'), ((4977, 5006), 'rospy.loginfo', 'rospy.loginfo', (['"""Goal reached"""'], {}), "('Goal reached')\n", (4990, 5006), False, 'import rospy\n'), ((5307, 5340), 'rospy.loginfo', 'rospy.loginfo', (['"""Goal not reached"""'], {}), "('Goal not reached')\n", (5320, 5340), False, 'import rospy\n'), ((5655, 5685), 'rospy.loginfo', 'rospy.loginfo', (['"""Random action"""'], {}), "('Random action')\n", (5668, 5685), False, 'import rospy\n'), ((5775, 5816), 'random.randint', 'random.randint', (['(0)', '(self.action_dim[0] - 1)'], {}), '(0, self.action_dim[0] - 1)\n', (5789, 5816), False, 'import random\n'), ((5836, 5877), 'random.randint', 'random.randint', (['(0)', '(self.action_dim[1] - 1)'], {}), '(0, self.action_dim[1] - 1)\n', (5850, 5877), False, 'import random\n'), ((5949, 5977), 'rospy.loginfo', 'rospy.loginfo', (['"""Best action"""'], {}), "('Best action')\n", (5962, 5977), False, 'import rospy\n'), ((6867, 6879), 'numpy.mean', 'np.mean', (['val'], {}), '(val)\n', (6874, 6879), True, 'import numpy as np\n'), ((7469, 7486), 'numpy.sin', 'np.sin', (['action_th'], {}), '(action_th)\n', (7475, 7486), True, 'import numpy as np\n'), ((7515, 7532), 'numpy.cos', 'np.cos', (['action_th'], {}), '(action_th)\n', (7521, 7532), True, 'import numpy as np\n'), ((9377, 9406), 'rospy.get_param', 'rospy.get_param', (['"""lba_params"""'], {}), "('lba_params')\n", (9392, 9406), False, 'import rospy\n'), ((15735, 15754), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (15752, 15754), False, 'import rospy\n'), ((16932, 16946), 'rospy.Rate', 'rospy.Rate', (['(20)'], {}), '(20)\n', (16942, 16946), False, 'import rospy\n'), ((16969, 16997), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (16982, 16997), False, 'import random\n'), ((20431, 20450), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (20448, 20450), False, 'import rospy\n'), ((21487, 21506), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (21504, 21506), False, 'import rospy\n'), ((23977, 24024), 'rospy.loginfo', 'rospy.loginfo', (['"""going cue ignored landmark msg"""'], {}), "('going cue ignored landmark msg')\n", (23990, 24024), False, 'import rospy\n'), ((26099, 26118), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (26116, 26118), False, 'import rospy\n'), ((30403, 30422), 'rospy.Duration', 'rospy.Duration', (['(1.0)'], {}), '(1.0)\n', (30417, 30422), False, 'import rospy\n'), ((30573, 30586), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (30583, 30586), False, 'import rospy\n'), ((32380, 32396), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (32394, 32396), False, 'import rospy\n'), ((6293, 6322), 'numpy.argmax', 'np.argmax', (['actions'], {'axis': 'None'}), '(actions, axis=None)\n', (6302, 6322), True, 'import numpy as np\n'), ((8825, 8855), 'rospy.logerr', 'rospy.logerr', (['""" Unknown Aruco"""'], {}), "(' Unknown Aruco')\n", (8837, 8855), False, 'import rospy\n'), ((20522, 20538), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (20536, 20538), False, 'import rospy\n'), ((32261, 32277), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (32275, 32277), False, 'import rospy\n'), ((9023, 9063), 'math.cos', 'math.cos', (['(2 * math.pi * self.nu / self.p)'], {}), '(2 * math.pi * self.nu / self.p)\n', (9031, 9063), False, 'import math\n'), ((27916, 27946), 'rospy.logerr', 'rospy.logerr', (['"""NP Flag is N/A"""'], {}), "('NP Flag is N/A')\n", (27928, 27946), False, 'import rospy\n')] |
import pandas as pd
import numpy as np
import random
import csv
import pprint
#データフレームのダウンロード
def make_df(csv):
df = pd.read_csv(csv)
#'/Users/masato/Desktop/UTTdata/prog/PyProgramming/sinhuri2018.csv'
# print(df)
df_col = list(df.columns)[4::]
df_collist = []
for i in range(0, 24, 6):
df_collist.append(df_col[i:i + 6:])
return df, df_collist
#Seedのセット。
# n:学生数#m:科類の学生数k:学科数
def stu_num():
m = [113, 151, 150, 443, 190, 29]
n = sum(m)
k = 79
return n, m, k
# 選好リストの作成の関数
#typeは科類、aは指定科類枠方、Bは同じ文理枠内
def make_prelist():
pre_l_1a = [1]
pre_l_1b = list(range(2, 21))
pre_l_1c = list(range(21, 79))
pre_l_1 = [pre_l_1a, pre_l_1b, pre_l_1c]
pre_l_2a = [2]
pre_l_2b = [1] + list(range(3, 21))
pre_l_2c = list(range(21, 79))
pre_l_2 = [pre_l_2a, pre_l_2b, pre_l_2c]
pre_l_3a = list(range(3, 18))
pre_l_3b = [1, 2] + list(range(18, 21))
pre_l_3c = list(range(21, 79))
pre_l_3 = [pre_l_3a, pre_l_3b, pre_l_3c]
pre_s_1a = list(range(46, 79))
pre_s_1b = list(range(21, 27)) + list(range(29, 46))
pre_s_1c = list(range(1, 21))
pre_s_1 = [pre_s_1a, pre_s_1b, pre_s_1c]
pre_s_2a = list(range(69, 79)) + list(range(29, 46))
pre_s_2b = list(range(21, 27)) + list(range(46, 69))
pre_s_2c = list(range(1, 21))
pre_s_2 = [pre_s_2a, pre_s_2b, pre_s_2c]
pre_s_3a = [30]
pre_s_3b = [29] + list(range(31, 78))
pre_s_3c = list(range(1, 29))
pre_s_3 = [pre_s_3a, pre_s_3b, pre_s_3c]
pre_list = [pre_l_1, pre_l_2, pre_l_3, pre_s_1, pre_s_2, pre_s_3]
return pre_list
def make_pre(pre_list, type, a, b):
#typeはl1 =0,l2=1,,s3=5
tmp = random.random()
if tmp < a / 2:
key = [0, 1, 2]
elif a / 2 <= tmp < a:
key = [0, 2, 1]
elif a <= tmp < (a + b) / 2:
key = [1, 0, 2]
elif (a + b) / 2 <= tmp < b:
key = [1, 2, 0]
elif b / 2 <= tmp < (1 + b) / 2:
key = [2, 0, 1]
elif (1 + b) / 2 <= tmp < 1:
key = [2, 1, 0]
pre_result = []
for i in key:
random.shuffle(pre_list[type][i])
pre_result.extend(pre_list[type][i])
#ここバグってる
while len(pre_result) < 80:
pre_result.extend([0])
#pre_resultの要素は79こ
return pre_result
# 学生のデータの作成。
# # student(0:学生番号,1科類2点数3内定学科4Time5選好)
#n = 1076 m = [113, 151, 150, 443, 190, 29]k = 79
def make_stu(n, m, k, a, b):
tmp = 0
tmp_2 = ["1", "2", "3", "4", "5", "6"]
cus_m = np.cumsum(m)
pre_list = make_prelist()
student = np.zeros((n + 1, k + 5))
for i in range(1, n + 1):
#0には学生番号、2には点数、3には内定学科(最初は-1)を入れる
student[i][0] = i
student[i][2] = random.randrange(11, 9998, 1)
student[i][3] = -1
# make_prefを使う
student[i][1] = tmp_2[tmp]
pre = make_pre(pre_list, tmp, a, b)
#(0-77)
for j in range(k - 1):
student[i][5 + j] = pre[j]
if cus_m[tmp] == i:
tmp += 1
return student
#student = make_stu(n, m)
# 大学のデータの作成。Hard用
# ['第二段階指定1科類', '第二段階指定1枠数', '指定1残席', '指定1底点', '指定1点数', '指定1学籍番号']
def univ_make(df, df_collist):
for j in df_collist:
df[j[0]] = df[j[0]].astype('str')
df[j[2]] = df[j[1]]
df[j[4]] = df[j[1]].apply(lambda x: np.zeros((x)))
df[j[5]] = df[j[1]].apply(lambda x: np.zeros((x)))
# ['第二段階指定1科類', '第二段階指定1枠数', '指定1残席', '指定1底点', '指定1点数', '指定1学籍番号']
return df
# 大学のデータの作成。(Soft用)
def univ_make_s(df, df_collist):
univ_s = df.copy()
for j in df_collist:
univ_s[j[0]] = univ_s[j[0]].astype('str')
for index, row in df.iterrows():
for k in range(3):
for l in range(1, 4 - k):
if (univ_s.iloc[index][df_collist[k][0]]
in univ_s.iloc[index][df_collist[l + k][0]]):
ext = df.at[index, df_collist[k][1]].copy()
univ_s.at[index, df_collist[l + k][1]] += ext
for j in df_collist:
univ_s[j[2]] = univ_s[j[1]].copy()
univ_s[j[4]] = univ_s[j[1]].apply(lambda x: np.zeros((x)))
univ_s[j[5]] = univ_s[j[1]].apply(lambda x: np.zeros((x)))
# ['第二段階指定1科類', '第二段階指定1枠数', '指定1残席', '指定1底点', '指定1点数', '指定1学籍番号']
return univ_s
#df_r = univ_make_s(df, df_collist)
#print(df_r.iloc[2]['第二段階指定1科類'] in df_r.iloc[2]['第二段階指定4科類'])
#print(univ_make_s(df, df_collist))
| [
"random.shuffle",
"pandas.read_csv",
"random.randrange",
"numpy.zeros",
"numpy.cumsum",
"random.random"
] | [((123, 139), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (134, 139), True, 'import pandas as pd\n'), ((1691, 1706), 'random.random', 'random.random', ([], {}), '()\n', (1704, 1706), False, 'import random\n'), ((2490, 2502), 'numpy.cumsum', 'np.cumsum', (['m'], {}), '(m)\n', (2499, 2502), True, 'import numpy as np\n'), ((2548, 2572), 'numpy.zeros', 'np.zeros', (['(n + 1, k + 5)'], {}), '((n + 1, k + 5))\n', (2556, 2572), True, 'import numpy as np\n'), ((2081, 2114), 'random.shuffle', 'random.shuffle', (['pre_list[type][i]'], {}), '(pre_list[type][i])\n', (2095, 2114), False, 'import random\n'), ((2695, 2724), 'random.randrange', 'random.randrange', (['(11)', '(9998)', '(1)'], {}), '(11, 9998, 1)\n', (2711, 2724), False, 'import random\n'), ((3296, 3307), 'numpy.zeros', 'np.zeros', (['x'], {}), '(x)\n', (3304, 3307), True, 'import numpy as np\n'), ((3355, 3366), 'numpy.zeros', 'np.zeros', (['x'], {}), '(x)\n', (3363, 3366), True, 'import numpy as np\n'), ((4090, 4101), 'numpy.zeros', 'np.zeros', (['x'], {}), '(x)\n', (4098, 4101), True, 'import numpy as np\n'), ((4157, 4168), 'numpy.zeros', 'np.zeros', (['x'], {}), '(x)\n', (4165, 4168), True, 'import numpy as np\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: lcpp-dev
# language: python
# name: lcpp-dev
# ---
# %% [markdown]
# # Basic processing of Sentinel V mooring
# %%
import xarray as xr
import numpy as np
import utils
import matplotlib.pyplot as plt
import scipy.stats as stats
import utm
from scipy.ndimage import gaussian_filter
def mode(x, **kwargs):
mout = np.squeeze(stats.mode(x, axis=1)[0])
return mout
def interval_to_mid(intervals):
"""
Parameters
----------
intervals : 1D numpy array
An array of pandas Interval objects.
Returns
-------
mids : 1D numpy array
Midpoints of the intervals.
"""
return np.array([v.mid for v in intervals])
# %% [markdown]
# Load datasets and do some basic conversion of times and variables.
# %%
sV = xr.open_dataset("../proc/ABLE_sentinel_2018_enu.nc")
sV = sV.set_coords(["lon", "lat"])
sV["time"] = utils.POSIX_to_datetime(sV.time.values).astype(np.datetime64)
x, y, *_ = utm.from_latlon(sV.lat, sV.lon)
sV = sV.assign_coords({"x": x, "y": y})
virt = xr.open_dataset("../proc/ABLE_sentinel_RBRvirtuoso_2018.nc")
virt = virt.set_coords(["lon", "lat"])
virt["time"] = utils.POSIX_to_datetime(virt.time.values).astype(np.datetime64)
sbe = xr.open_dataset("../proc/ABLE_sentinel_SBE37_2018.nc")
sbe = sbe.set_coords(["lon", "lat"])
sbe["time"] = utils.POSIX_to_datetime(sbe.time.values).astype(np.datetime64)
# %% [markdown]
# Define some parameters and simple thresholds for processing.
# %%
pmin = 125 # Minimum pressure to keep
dpdtmax = 0.4e-9 # Maximum rate of change of pressure to keep
cut_ends = 2 # Number of points on either end to remove after applying other thresholds
dt = 10 # Bin size for time average [s]
# %% [markdown]
# Apply the thresholds to remove some data.
# %%
is_deep = sV.p > pmin
is_slow = np.fabs(sV.p.differentiate("time")) < dpdtmax
keep = is_deep & is_slow
sVp = sV.isel(time=keep).isel(time=slice(cut_ends, -cut_ends))
# %%
sVp.p.plot.line('.')
# %% [markdown]
# ## Old quality control
#
# Note [Marion's document](https://escholarship.org/content/qt6xd149s8/qt6xd149s8.pdf)
# %%
# # qc_err0 = 0.3
# # qc_err1 = 0.5
# qc_err = 0.15 # error velocity
# qc_q = 110 # correlation
# qc_uv = 2.0 # horizontal velocity
# qc_w = 1.5 # vertical velocity
# qc_a = 30 # echo intensity
# %%
# qc_u_bad = np.abs(sVp.u) > qc_uv
# qc_v_bad = np.abs(sVp.v) > qc_uv
# qc_w_bad = np.abs(sVp.w) > qc_w
# qc_vv_bad = np.abs(sVp.vv) > qc_w
# qc_err_bad = np.abs(sVp.err) > qc_err
# qc_q1_good = sVp.q1 > qc_q
# qc_q2_good = sVp.q2 > qc_q
# qc_q3_good = sVp.q3 > qc_q
# qc_q4_good = sVp.q4 > qc_q
# qc_q_bad = (qc_q1_good.astype(int) + qc_q2_good.astype(int) + qc_q3_good.astype(int) + qc_q4_good.astype(int)) <= 3
# %%
# uv_reject = (qc_q_bad.astype(int) + qc_err_bad.astype(int) + qc_u_bad.astype(int) + qc_v_bad.astype(int)) > 1
# w_reject = (qc_q_bad.astype(int) + qc_err_bad.astype(int) + qc_w_bad.astype(int)) > 1
# vv_reject = (qc_q_bad.astype(int) + qc_err_bad.astype(int) + qc_vv_bad.astype(int)) > 1
# %%
# fig, axs = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(10, 10))
# uv_reject.plot(ax=axs[0])
# w_reject.plot(ax=axs[1])
# vv_reject.plot(ax=axs[2])
# %% [markdown]
# Remove velocity using QC.
# %%
# sVqc = sVp.copy()
# u = sVqc.u.values
# u[uv_reject] = np.nan
# sVqc["u"] = (sVqc.u.dims, u, sVqc.u.attrs)
# v = sVqc.v.values
# v[uv_reject] = np.nan
# sVqc["v"] = (sVqc.v.dims, v, sVqc.v.attrs)
# w = sVqc.w.values
# w[w_reject] = np.nan
# sVqc["w"] = (sVqc.w.dims, w, sVqc.w.attrs)
# vv = sVqc.vv.values
# vv[vv_reject] = np.nan
# sVqc["vv"] = (sVqc.vv.dims, vv, sVqc.vv.attrs)
# %% [markdown]
# ## New cut off data above surface
# %%
dthresh = 100.
sidelobe_pct = 1 - np.cos(np.deg2rad(sVp.beamAngle))
var_names = ["a1", "a2", "a3", "a4", "va"]
nroll = 5
dmingood = np.full((sVp.time.size, len(var_names)), np.nan)
fig, axs = plt.subplots(len(var_names), 1, figsize=(14, 3*len(var_names)))
for i, var in enumerate(var_names):
idxmax = sVp[var].where(sVp.distance > dthresh).argmax("distance")
dmax = sVp.distance[idxmax]
dsl = (1 - sidelobe_pct)*sVp.distance[idxmax]
# dmax = dmax.where(dmax > dthresh)
dmode = dsl.rolling(time=nroll, min_periods=1, center=True).reduce(mode)
sVp[var].plot(ax=axs[i])
dmingood[:, i] = dmode
dsl.plot(ax=axs[i], color="r")
axs[i].set_title("")
for i in range(len(var_names)):
axs[i].plot(sVp.time, dmingood.min(axis=1), color="k")
# %%
good = dmingood.min(axis=1)
# Make a new dataset without surface
sVs = sVp.copy()
# Loop over the 2D datavars
mask = sVp.distance < xr.DataArray(good, dims={"time": sVp.time})
for var in sVp.data_vars:
if sVp[var].dims == ('distance', 'time'):
print(f"Masking {var}.")
sVs[var] = sVp[var].where(mask)
# Remove distances where there is no good data
sVs = sVs.isel(distance=mask.any("time"))
# %% [markdown]
# ## New quality control
# %%
errthresh = 0.2 # Blur around these errors
errthresh_high = 0.2 # Always remove these errors
maskthresh = 0.35 # Blurred mask threshold
qthresh = 300
vqthresh = 35
sigma = (2, 5)
qsum = sVs.q1 + sVs.q2 + sVs.q3 + sVs.q4
qgood = qsum > qthresh
vqgood = sVs.vq.values > vqthresh
sVqc = sVs.copy()
egood = np.abs(sVs.err) < errthresh
egood_filt = gaussian_filter(egood.values.astype(float), sigma)
ebgood = (egood_filt > maskthresh) & (np.abs(sVs.err) < errthresh_high) & qgood
vebgood = (egood_filt > maskthresh) & vqgood
var_names = ["u", "v", "w", "err"]
for var in var_names:
sVqc[var] = sVs[var].where(ebgood)
sVqc["vv"] = sVs.vv.where(vebgood)
# %% [markdown]
# ## Time binning
# %% [markdown]
# Bin average data to reduce size and errors.
#
# First make bins.
# %%
# Time bin start and end to nearest minute. This will cut off some data.
tstart = (sVqc.time[0].values + np.timedelta64(30, 's')).astype('datetime64[m]')
tend = sVqc.time[-1].values.astype('datetime64[m]')
timebins = np.arange(tstart, tend, np.timedelta64(dt, 's'))
# %% [markdown]
# Group and take mean.
# %%
gb = sVqc.groupby_bins("time", timebins)
sVa = gb.mean(skipna=True, keep_attrs=True)
# Use mid time as dimension, rather than Interval.
sVa["time_bins"] = interval_to_mid(sVa.time_bins.values).astype("datetime64[s]")
sVa = sVa.rename({"time_bins": "time"})
# %% [markdown]
# Mean of heading should be performed using circular mean. (Technically, so should pitch and roll, but for small angles the noncircular mean is ok)
# %%
sVa["heading"] = (["time"], sVqc.heading.groupby_bins("time", timebins).reduce(stats.circmean, high=360.).values)
# %% [markdown]
# ## Old cut off data above surface
#
# Use a simple echo intensity threshold to find the maximum.
# %%
# dmin = 60. # Minimum distance above which to look for the maximum
# nroll = 120 # Number of points in rolling mode window
# fcut = 0.1 # Extra distance to remove (1 - fcut)*dcut
# %%
# sVa.va.isel(time=10000).plot.line('.')
# %% [markdown]
# Identify echo maximum in each beam, using a rolling mode to smooth out data.
# %%
# # fig, ax = plt.subplots()
# dcuts = []
# for var in ["a1", "a2", "a3", "a4", "va"]:
# am = sVa[var].where(sVa.distance > dmin)
# imax = am.argmax(dim="distance", skipna=True)
# dmax = am.distance[imax]
# ro = dmax.rolling(time=nroll, min_periods=1, center=True)
# dm = ro.reduce(mode)
# dcut = (1 - fcut)*dm
# # ax.plot(sVa.time, dmax, 'r')
# # ax.plot(sVa.time, dm, 'orange')
# # ax.plot(sVa.time, dcut, 'g')
# dcuts.append(dcut.values)
# %%
# dcuts = np.stack(dcuts, axis=1)
# # Use only the vertical beam for finding the surface.
# dcut_min = dcuts[:, 4]
# dcut_min = xr.DataArray(dcut_min, dims={"time": sVa.time})
# %% [markdown]
# Mask and remove data above distance threshold.
# %%
# sVm = sVa.where(sVa.distance < dcut_min)
# # The masking process converts some variables to 2D, change them back...
# sVm["p"] = sVa.p
# sVm["t"] = sVa.t
# sVm["pitch"] = sVa.pitch
# sVm["rol"] = sVa.rol
# sVm["heading"] = sVa.heading
# sVm = sVm.isel(distance=~np.isnan(sVm.u).all(axis=0))
# %% [markdown]
# ## Plotting time series
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-10T11:00"))
sVm_ = sVm.sel(time=timeslice)
fig, axs = plt.subplots(4, 1, figsize=(15, 10), sharex=True)
sVm_.u.plot(ax=axs[0], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.v.plot(ax=axs[1], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.w.plot(ax=axs[2], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.vv.plot(ax=axs[3], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
fig, ax = plt.subplots(figsize=(12, 3))
sVm_.p.plot(ax=ax)
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-10T11:00"))
sVm_ = sVm.sel(time=timeslice)
fig, axs = plt.subplots(8, 1, figsize=(15, 25), sharex=True)
sVm_.u.plot(ax=axs[0], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.v.plot(ax=axs[1], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.vv.plot(ax=axs[2], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.a1.plot(ax=axs[3], x="time")
sVm_.a2.plot(ax=axs[4], x="time")
sVm_.a3.plot(ax=axs[5], x="time")
sVm_.a4.plot(ax=axs[6], x="time")
sVm_.va.plot(ax=axs[7], x="time")
fig, axs = plt.subplots(3, 1, figsize=(11, 8))
sVm_.heading.plot(ax=axs[0])
sVm_.rol.plot(ax=axs[1])
sVm_.pitch.plot(ax=axs[2])
# %% [markdown]
# # Plug in other instruments to dataset
#
# Group and bin average.
# %%
gb = virt.groupby_bins("time", timebins)
virta = gb.mean(skipna=True, keep_attrs=True)
# Use mid time as dimension, rather than Interval.
virta["time_bins"] = interval_to_mid(virta.time_bins.values).astype("datetime64[ms]")
virta = virta.rename({"time_bins": "time"})
gb = sbe.groupby_bins("time", timebins)
sbea = gb.mean(skipna=True, keep_attrs=True)
# Use mid time as dimension, rather than Interval.
sbea["time_bins"] = interval_to_mid(sbea.time_bins.values).astype("datetime64[ms]")
sbea = sbea.rename({"time_bins": "time"})
# %% [markdown]
# Look at a couple of plots.
# %%
fig, ax = plt.subplots(figsize=(12, 3))
virta.turb.plot(ax=ax)
fig, axs = plt.subplots(3, 1, figsize=(12, 10), sharex=True)
sbea.p.plot(ax=axs[0])
sbea.t.plot(ax=axs[1])
sbea.SP.plot(ax=axs[2])
# %% [markdown]
# Assign other data to the sentinal dataset.
# %%
ds = sVa.copy()
# %%
ds["turb_RBR"] = (sVa.p.dims, virta.turb, virta.turb.attrs)
ds["SP_SBE37"] = (sVa.p.dims, sbea.SP, sbea.SP.attrs)
ds["C_SBE37"] = (sVa.p.dims, sbea.C, sbea.C.attrs)
ds["t_SBE37"] = (sVa.p.dims, sbea.t, sbea.t.attrs)
ds["p_SBE37"] = (sVa.p.dims, sbea.p, sbea.p.attrs)
# %% [markdown]
# Try a plot...
# %%
fig, ax = plt.subplots()
ds.p_SBE37.plot(ax=ax)
ds.p.plot(ax=ax, yincrease=False)
# %% [markdown]
# Estimate some more thermodynamic variables.
# %%
import gsw
# %%
ds["SA_SBE37"] = (ds.p.dims, gsw.SA_from_SP(ds.SP_SBE37, ds.p_SBE37, ds.lon, ds.lat), {"units": "g/kg", "long_name": "Absolute_salinity"})
ds["CT_SBE37"] = (ds.p.dims, gsw.CT_from_t(ds.SA_SBE37, ds.t_SBE37, ds.p_SBE37), {"units": "deg C", "long_name": "Conservative_temperature"})
ds["z_SBE37"] = (ds.p.dims, gsw.z_from_p(ds.p_SBE37, ds.lat), {"units": "m", "long_name": "height"})
ds["depth_SBE37"] = (ds.p.dims, -ds.z_SBE37, {"units": "m", "long_name": "depth"})
ds["z_ADCP"] = (ds.p.dims, gsw.z_from_p(ds.p, ds.lat), {"units": "m", "long_name": "height"})
ds["depth_ADCP"] = (ds.p.dims, -ds.z_ADCP, {"units": "m", "long_name": "depth"})
ds["z"] = (ds.distance.dims, ds.distance + ds.z_ADCP.mean(dim="time"), {"units": "m", "long_name": "height"})
ds["depth"] = (ds.distance.dims, -ds.z, {"units": "m", "long_name": "depth"})
ds = ds.set_coords(["z", "depth"])
# %% [markdown]
# Save dataset to netcdf.
# %%
ds.to_netcdf("../proc/ABLE_sentinel_mooring_2018.nc")
# %% [markdown]
# ## Examine a short segment of the dataset
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-05T12:00"))
ds_ = ds.sel(time=timeslice)
fig, axs = plt.subplots(4, 1, figsize=(15, 10), sharex=True, sharey=True)
ds_.u.plot(ax=axs[0], y="depth", x="time", yincrease=False, vmin=-0.2, vmax=0.2, cmap="coolwarm")
ds_.a3.plot(ax=axs[1], y="depth", x="time", yincrease=False)
ds_.vv.plot(ax=axs[2], y="depth", x="time", yincrease=False, vmin=-0.2, vmax=0.2, cmap="coolwarm")
ds_.va.plot(ax=axs[3], y="depth", x="time", yincrease=False)
fig, axs = plt.subplots(4, 1, figsize=(11.7, 10), sharex=True)
ds_.p_SBE37.plot(ax=axs[0])
ds_.CT_SBE37.plot(ax=axs[1])
ds_.turb_RBR.plot(ax=axs[2])
ds_.pitch.plot(ax=axs[3])
# %% [markdown]
# Compare echo intensity near bottom for different beams.
# %%
dist = 5
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-05T12:00"))
ds_ = ds.sel(time=timeslice).sel(distance=dist, method="nearest")
fig, ax = plt.subplots(figsize=(11, 4))
ds_.a1.plot(ax=ax, label="beam 1")
ds_.a2.plot(ax=ax, label="beam 2")
ds_.a3.plot(ax=ax, label="beam 3")
ds_.a4.plot(ax=ax, label="beam 4")
ds_.va.plot(ax=ax, label="beam v")
ax.set_ylabel("Echo intensity")
ax.legend()
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-05T12:00"))
ds_ = ds.sel(time=timeslice)
fig, ax = plt.subplots(figsize=(10, 10))
for i in range(0, ds_.time.size, 50):
ds__ = ds_.isel(time=i)
ds__.va.plot(ax=ax, label=ds__.time.values.astype("datetime64[s]"))
ax.legend(loc="upper left", bbox_to_anchor=(1, 1))
# %%
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import numpy as np
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
# def cc(arg):
# return mcolors.to_rgba(arg, alpha=0.6)
xs = ds_.distance.values
verts = []
zs = []
for i in range(0, ds_.time.size, 100):
ds__ = ds_.isel(time=i)
time = (ds__.time - ds_.time[0]).astype(float)/1e9
zs.append(time)
ys = ds__.va.values
ys[0], ys[-1] = 0, 0
verts.append(list(zip(xs, ys)))
# zs = [0.0, 1.0, 2.0, 3.0]
# for z in zs:
# ys = np.random.rand(len(xs))
# ys[0], ys[-1] = 0, 0
# verts.append(list(zip(xs, ys)))
poly = PolyCollection(verts) # facecolors=[cc('r'), cc('g'), cc('b'), cc('y')]
poly.set_alpha(0.2)
ax.add_collection3d(poly, zs=zs, zdir='y')
ax.set_xlabel('Distance')
ax.set_xlim3d(0, xs.max())
ax.set_ylabel('Y')
ax.set_ylim3d(0, zs[-1])
ax.set_zlabel('Z')
ax.set_zlim3d(0, 200)
ax.view_init(elev=30., azim=30)
plt.show()
# %%
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
timeslice = slice(np.datetime64("2018-09-05T10:00"), np.datetime64("2018-09-05T10:45"))
ds_ = ds.sel(time=timeslice)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(projection='3d')
T, D = np.meshgrid(ds_.distance.values, (ds_.time.values - ds_.time[0].values).astype(float)/1e9)
# Plot a basic wireframe.
ax.plot_wireframe(T, D, ds_.a2.values, rstride=1, cstride=1)
ax.view_init(elev=45., azim=120)
# %% [markdown]
# # New QC
# %%
tslice = slice(np.datetime64("2018-09-07T10:00"), np.datetime64("2018-09-07T11:00"))
# tslice = slice(np.datetime64("2018-09-04T10:00"), np.datetime64("2018-09-04T11:00"))
# tslice = slice(np.datetime64("2018-09-11T14:00"), np.datetime64("2018-09-11T16:00"))
# tslice = slice(np.datetime64("2018-09-10T03:00"), np.datetime64("2018-09-10T04:00"))
enu = sVp.sel(time=tslice)
# %%
hvel_kwargs = dict(vmin=-0.3, vmax=0.3, cmap="coolwarm")
vvel_kwargs = dict(vmin=-0.1, vmax=0.1, cmap="coolwarm")
fig, axs = plt.subplots(5, 1, sharex=True, figsize=(22, 17))
enu.u.plot(ax=axs[0], **hvel_kwargs)
enu.v.plot(ax=axs[1], **hvel_kwargs)
enu.w.plot(ax=axs[2], **vvel_kwargs)
enu.vv.plot(ax=axs[3], **vvel_kwargs)
np.abs(enu.err).plot(ax=axs[4], vmin=0, vmax=0.2)
for ax in axs:
ax.set_xlabel("")
# %%
fig, axs = plt.subplots(5, 1, sharex=True, figsize=(22, 17))
enu.q1.plot(ax=axs[0])
enu.q2.plot(ax=axs[1])
enu.q3.plot(ax=axs[2])
enu.q4.plot(ax=axs[3])
enu.vq.plot(ax=axs[4])
for ax in axs:
ax.set_xlabel("")
# %%
dthresh = 100.
sidelobe_pct = 1 - np.cos(np.deg2rad(enu.beamAngle))
var_names = ["a1", "a2", "a3", "a4", "va"]
nroll = 5
dmingood = np.full((enu.time.size, len(var_names)), np.nan)
fig, axs = plt.subplots(len(var_names), 1, figsize=(14, 3*len(var_names)))
for i, var in enumerate(var_names):
idxmax = enu[var].where(enu.distance > dthresh).argmax("distance")
dmax = sVp.distance[idxmax]
dsl = (1 - sidelobe_pct)*enu.distance[idxmax]
# dmax = dmax.where(dmax > dthresh)
dmode = dsl.rolling(time=nroll, min_periods=1, center=True).reduce(mode)
enu[var].plot(ax=axs[i])
dmingood[:, i] = dmode
dmode.plot(ax=axs[i], color="r")
axs[i].set_title("")
for i in range(len(var_names)):
axs[i].plot(enu.time, dmingood.min(axis=1), color="k")
# %%
fig, axs = plt.subplots(3, 1, figsize=(22, 9))
enu.heading.plot(ax=axs[0], marker='.', linestyle="")
enu.rol.plot(ax=axs[1])
enu.pitch.plot(ax=axs[2])
# %%
# Make a new dataset without surface
enus = enu.copy()
# Loop over the 2D datavars
mask = enu.distance < xr.DataArray(dmingood.min(axis=1), dims={"time": enu.time})
for var in enu.data_vars:
if enu[var].dims == ('distance', 'time'):
print(f"Masking {var}.")
enus[var] = enu[var].where(mask)
# Remove distances where there is no good data
enus = enus.isel(distance=mask.any("time"))
# %%
hvel_kwargs = dict(vmin=-0.3, vmax=0.3, cmap="coolwarm")
vvel_kwargs = dict(vmin=-0.1, vmax=0.1, cmap="coolwarm")
fig, axs = plt.subplots(5, 1, sharex=True, figsize=(22, 17))
enus.u.plot(ax=axs[0], **hvel_kwargs)
enus.v.plot(ax=axs[1], **hvel_kwargs)
enus.w.plot(ax=axs[2], **vvel_kwargs)
enus.vv.plot(ax=axs[3], **vvel_kwargs)
np.abs(enus.err).plot(ax=axs[4], vmin=0, vmax=0.2)
for ax in axs:
ax.set_xlabel("")
# %%
from scipy.ndimage import gaussian_filter
# %%
errthresh = 0.2 # Blur around these errors
errthresh_high = 0.2 # Always remove these errors
maskthresh = 0.35 # Blurred mask threshold
qthresh = 300
vqthresh = 35
sigma = (2, 5)
qsum = enus.q1 + enus.q2 + enus.q3 + enus.q4
qgood = qsum > qthresh
vqgood = enus.vq.values > vqthresh
enueb = enus.copy()
egood = np.abs(enus.err) < errthresh
egood_filt = gaussian_filter(egood.values.astype(float), sigma)
ebgood = (egood_filt > maskthresh) & (np.abs(enus.err) < errthresh_high) & qgood
vebgood = (egood_filt > maskthresh) & vqgood
var_names = ["u", "v", "w", "err"]
for var in var_names:
enueb[var] = enus[var].where(ebgood)
enueb["vv"] = enus.vv.where(vebgood)
# %%
fig, ax = plt.subplots(1, 1, figsize=(22, 3.5))
ax.pcolormesh(egood_filt)
ax.contour(egood_filt, [maskthresh], colors="r")
ax.contour(qgood, [0.5], colors="g")
ax.contour(vqgood, [0.5], colors="b")
# %% tags=[]
hvel_kwargs = dict(vmin=-0.3, vmax=0.3, cmap="coolwarm")
vvel_kwargs = dict(vmin=-0.1, vmax=0.1, cmap="coolwarm")
fig, axs = plt.subplots(8, 1, sharex=True, figsize=(22, 28))
enueb.u.plot(ax=axs[0], **hvel_kwargs)
enus.u.plot(ax=axs[1], **hvel_kwargs)
enueb.v.plot(ax=axs[2], **hvel_kwargs)
enus.v.plot(ax=axs[3], **hvel_kwargs)
enueb.w.plot(ax=axs[4], **vvel_kwargs)
enus.w.plot(ax=axs[5], **vvel_kwargs)
enueb.vv.plot(ax=axs[6], **vvel_kwargs)
enus.vv.plot(ax=axs[7], **vvel_kwargs)
for ax in axs:
ax.set_xlabel("")
# %% [markdown]
# # Beam separation
# %%
z = sVp.distance[sVp.distance < 120]
angle = np.deg2rad(sVp.beamAngle)
separation_opposite = 2*z*np.tan(angle)
separation_adjacent = 2*z*np.tan(angle)*np.cos(np.pi/4)
fig, ax = plt.subplots()
ax.plot(separation_opposite, z, label="opposite")
ax.plot(separation_adjacent, z, label="adjacent")
ax.axvline(75, color="k", label="half wavelength")
ax.legend()
ax.grid()
ax.set_xlabel("Beam separation [m]")
ax.set_ylabel("Distance from ADCP (mast) [m]")
| [
"utm.from_latlon",
"numpy.array",
"utils.POSIX_to_datetime",
"gsw.z_from_p",
"numpy.datetime64",
"gsw.SA_from_SP",
"numpy.abs",
"matplotlib.collections.PolyCollection",
"numpy.deg2rad",
"numpy.cos",
"numpy.timedelta64",
"xarray.open_dataset",
"matplotlib.pyplot.show",
"numpy.tan",
"scipy... | [((1024, 1076), 'xarray.open_dataset', 'xr.open_dataset', (['"""../proc/ABLE_sentinel_2018_enu.nc"""'], {}), "('../proc/ABLE_sentinel_2018_enu.nc')\n", (1039, 1076), True, 'import xarray as xr\n'), ((1199, 1230), 'utm.from_latlon', 'utm.from_latlon', (['sV.lat', 'sV.lon'], {}), '(sV.lat, sV.lon)\n', (1214, 1230), False, 'import utm\n'), ((1279, 1339), 'xarray.open_dataset', 'xr.open_dataset', (['"""../proc/ABLE_sentinel_RBRvirtuoso_2018.nc"""'], {}), "('../proc/ABLE_sentinel_RBRvirtuoso_2018.nc')\n", (1294, 1339), True, 'import xarray as xr\n'), ((1465, 1519), 'xarray.open_dataset', 'xr.open_dataset', (['"""../proc/ABLE_sentinel_SBE37_2018.nc"""'], {}), "('../proc/ABLE_sentinel_SBE37_2018.nc')\n", (1480, 1519), True, 'import xarray as xr\n'), ((8511, 8560), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '(15, 10)', 'sharex': '(True)'}), '(4, 1, figsize=(15, 10), sharex=True)\n', (8523, 8560), True, 'import matplotlib.pyplot as plt\n'), ((8858, 8887), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (8870, 8887), True, 'import matplotlib.pyplot as plt\n'), ((9045, 9094), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(8)', '(1)'], {'figsize': '(15, 25)', 'sharex': '(True)'}), '(8, 1, figsize=(15, 25), sharex=True)\n', (9057, 9094), True, 'import matplotlib.pyplot as plt\n'), ((9492, 9527), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(11, 8)'}), '(3, 1, figsize=(11, 8))\n', (9504, 9527), True, 'import matplotlib.pyplot as plt\n'), ((10295, 10324), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (10307, 10324), True, 'import matplotlib.pyplot as plt\n'), ((10360, 10409), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(12, 10)', 'sharex': '(True)'}), '(3, 1, figsize=(12, 10), sharex=True)\n', (10372, 10409), True, 'import matplotlib.pyplot as plt\n'), ((10886, 10900), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10898, 10900), True, 'import matplotlib.pyplot as plt\n'), ((12207, 12269), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '(15, 10)', 'sharex': '(True)', 'sharey': '(True)'}), '(4, 1, figsize=(15, 10), sharex=True, sharey=True)\n', (12219, 12269), True, 'import matplotlib.pyplot as plt\n'), ((12601, 12652), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '(11.7, 10)', 'sharex': '(True)'}), '(4, 1, figsize=(11.7, 10), sharex=True)\n', (12613, 12652), True, 'import matplotlib.pyplot as plt\n'), ((13021, 13050), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(11, 4)'}), '(figsize=(11, 4))\n', (13033, 13050), True, 'import matplotlib.pyplot as plt\n'), ((13404, 13434), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (13416, 13434), True, 'import matplotlib.pyplot as plt\n'), ((13826, 13854), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (13836, 13854), True, 'import matplotlib.pyplot as plt\n'), ((14372, 14393), 'matplotlib.collections.PolyCollection', 'PolyCollection', (['verts'], {}), '(verts)\n', (14386, 14393), False, 'from matplotlib.collections import PolyCollection\n'), ((14723, 14733), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14731, 14733), True, 'import matplotlib.pyplot as plt\n'), ((14938, 14966), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (14948, 14966), True, 'import matplotlib.pyplot as plt\n'), ((15767, 15816), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'sharex': '(True)', 'figsize': '(22, 17)'}), '(5, 1, sharex=True, figsize=(22, 17))\n', (15779, 15816), True, 'import matplotlib.pyplot as plt\n'), ((16071, 16120), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'sharex': '(True)', 'figsize': '(22, 17)'}), '(5, 1, sharex=True, figsize=(22, 17))\n', (16083, 16120), True, 'import matplotlib.pyplot as plt\n'), ((17086, 17121), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(22, 9)'}), '(3, 1, figsize=(22, 9))\n', (17098, 17121), True, 'import matplotlib.pyplot as plt\n'), ((17768, 17817), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'sharex': '(True)', 'figsize': '(22, 17)'}), '(5, 1, sharex=True, figsize=(22, 17))\n', (17780, 17817), True, 'import matplotlib.pyplot as plt\n'), ((18809, 18846), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(22, 3.5)'}), '(1, 1, figsize=(22, 3.5))\n', (18821, 18846), True, 'import matplotlib.pyplot as plt\n'), ((19137, 19186), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(8)', '(1)'], {'sharex': '(True)', 'figsize': '(22, 28)'}), '(8, 1, sharex=True, figsize=(22, 28))\n', (19149, 19186), True, 'import matplotlib.pyplot as plt\n'), ((19623, 19648), 'numpy.deg2rad', 'np.deg2rad', (['sVp.beamAngle'], {}), '(sVp.beamAngle)\n', (19633, 19648), True, 'import numpy as np\n'), ((19757, 19771), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19769, 19771), True, 'import matplotlib.pyplot as plt\n'), ((889, 925), 'numpy.array', 'np.array', (['[v.mid for v in intervals]'], {}), '([v.mid for v in intervals])\n', (897, 925), True, 'import numpy as np\n'), ((4853, 4896), 'xarray.DataArray', 'xr.DataArray', (['good'], {'dims': "{'time': sVp.time}"}), "(good, dims={'time': sVp.time})\n", (4865, 4896), True, 'import xarray as xr\n'), ((5488, 5503), 'numpy.abs', 'np.abs', (['sVs.err'], {}), '(sVs.err)\n', (5494, 5503), True, 'import numpy as np\n'), ((6215, 6238), 'numpy.timedelta64', 'np.timedelta64', (['dt', '"""s"""'], {}), "(dt, 's')\n", (6229, 6238), True, 'import numpy as np\n'), ((8397, 8430), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-05T08:00"""'], {}), "('2018-09-05T08:00')\n", (8410, 8430), True, 'import numpy as np\n'), ((8432, 8465), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-10T11:00"""'], {}), "('2018-09-10T11:00')\n", (8445, 8465), True, 'import numpy as np\n'), ((8931, 8964), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-05T08:00"""'], {}), "('2018-09-05T08:00')\n", (8944, 8964), True, 'import numpy as np\n'), ((8966, 8999), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-10T11:00"""'], {}), "('2018-09-10T11:00')\n", (8979, 8999), True, 'import numpy as np\n'), ((11073, 11128), 'gsw.SA_from_SP', 'gsw.SA_from_SP', (['ds.SP_SBE37', 'ds.p_SBE37', 'ds.lon', 'ds.lat'], {}), '(ds.SP_SBE37, ds.p_SBE37, ds.lon, ds.lat)\n', (11087, 11128), False, 'import gsw\n'), ((11212, 11262), 'gsw.CT_from_t', 'gsw.CT_from_t', (['ds.SA_SBE37', 'ds.t_SBE37', 'ds.p_SBE37'], {}), '(ds.SA_SBE37, ds.t_SBE37, ds.p_SBE37)\n', (11225, 11262), False, 'import gsw\n'), ((11353, 11385), 'gsw.z_from_p', 'gsw.z_from_p', (['ds.p_SBE37', 'ds.lat'], {}), '(ds.p_SBE37, ds.lat)\n', (11365, 11385), False, 'import gsw\n'), ((11536, 11562), 'gsw.z_from_p', 'gsw.z_from_p', (['ds.p', 'ds.lat'], {}), '(ds.p, ds.lat)\n', (11548, 11562), False, 'import gsw\n'), ((12095, 12128), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-05T08:00"""'], {}), "('2018-09-05T08:00')\n", (12108, 12128), True, 'import numpy as np\n'), ((12130, 12163), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-05T12:00"""'], {}), "('2018-09-05T12:00')\n", (12143, 12163), True, 'import numpy as np\n'), ((12873, 12906), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-05T08:00"""'], {}), "('2018-09-05T08:00')\n", (12886, 12906), True, 'import numpy as np\n'), ((12908, 12941), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-05T12:00"""'], {}), "('2018-09-05T12:00')\n", (12921, 12941), True, 'import numpy as np\n'), ((13294, 13327), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-05T08:00"""'], {}), "('2018-09-05T08:00')\n", (13307, 13327), True, 'import numpy as np\n'), ((13329, 13362), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-05T12:00"""'], {}), "('2018-09-05T12:00')\n", (13342, 13362), True, 'import numpy as np\n'), ((14831, 14864), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-05T10:00"""'], {}), "('2018-09-05T10:00')\n", (14844, 14864), True, 'import numpy as np\n'), ((14866, 14899), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-05T10:45"""'], {}), "('2018-09-05T10:45')\n", (14879, 14899), True, 'import numpy as np\n'), ((15276, 15309), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-07T10:00"""'], {}), "('2018-09-07T10:00')\n", (15289, 15309), True, 'import numpy as np\n'), ((15311, 15344), 'numpy.datetime64', 'np.datetime64', (['"""2018-09-07T11:00"""'], {}), "('2018-09-07T11:00')\n", (15324, 15344), True, 'import numpy as np\n'), ((18428, 18444), 'numpy.abs', 'np.abs', (['enus.err'], {}), '(enus.err)\n', (18434, 18444), True, 'import numpy as np\n'), ((19676, 19689), 'numpy.tan', 'np.tan', (['angle'], {}), '(angle)\n', (19682, 19689), True, 'import numpy as np\n'), ((19730, 19747), 'numpy.cos', 'np.cos', (['(np.pi / 4)'], {}), '(np.pi / 4)\n', (19736, 19747), True, 'import numpy as np\n'), ((1125, 1164), 'utils.POSIX_to_datetime', 'utils.POSIX_to_datetime', (['sV.time.values'], {}), '(sV.time.values)\n', (1148, 1164), False, 'import utils\n'), ((1394, 1435), 'utils.POSIX_to_datetime', 'utils.POSIX_to_datetime', (['virt.time.values'], {}), '(virt.time.values)\n', (1417, 1435), False, 'import utils\n'), ((1571, 1611), 'utils.POSIX_to_datetime', 'utils.POSIX_to_datetime', (['sbe.time.values'], {}), '(sbe.time.values)\n', (1594, 1611), False, 'import utils\n'), ((3968, 3993), 'numpy.deg2rad', 'np.deg2rad', (['sVp.beamAngle'], {}), '(sVp.beamAngle)\n', (3978, 3993), True, 'import numpy as np\n'), ((15966, 15981), 'numpy.abs', 'np.abs', (['enu.err'], {}), '(enu.err)\n', (15972, 15981), True, 'import numpy as np\n'), ((16321, 16346), 'numpy.deg2rad', 'np.deg2rad', (['enu.beamAngle'], {}), '(enu.beamAngle)\n', (16331, 16346), True, 'import numpy as np\n'), ((17971, 17987), 'numpy.abs', 'np.abs', (['enus.err'], {}), '(enus.err)\n', (17977, 17987), True, 'import numpy as np\n'), ((19716, 19729), 'numpy.tan', 'np.tan', (['angle'], {}), '(angle)\n', (19722, 19729), True, 'import numpy as np\n'), ((578, 599), 'scipy.stats.mode', 'stats.mode', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (588, 599), True, 'import scipy.stats as stats\n'), ((5618, 5633), 'numpy.abs', 'np.abs', (['sVs.err'], {}), '(sVs.err)\n', (5624, 5633), True, 'import numpy as np\n'), ((6078, 6101), 'numpy.timedelta64', 'np.timedelta64', (['(30)', '"""s"""'], {}), "(30, 's')\n", (6092, 6101), True, 'import numpy as np\n'), ((18559, 18575), 'numpy.abs', 'np.abs', (['enus.err'], {}), '(enus.err)\n', (18565, 18575), True, 'import numpy as np\n')] |
import torch
from torch import nn
from tqdm.auto import tqdm
from torchvision import transforms
import numpy as np
torch.manual_seed(36)
np.random.seed(36)
"""
Borrowing a lot of code from Assigments (Pix2Pix for Generator, DCGan for Discriminator)
"""
def crop(image, new_shape):
'''
Function for cropping an image tensor: Given an image tensor and the new shape,
crops to the center pixels.
Parameters:
image: image tensor of shape (batch size, channels, height, width)
new_shape: a torch.Size object with the shape you want x to have
'''
middle_height = image.shape[2] // 2
middle_width = image.shape[3] // 2
starting_height = middle_height - round(new_shape[2] / 2)
final_height = starting_height + new_shape[2]
starting_width = middle_width - round(new_shape[3] / 2)
final_width = starting_width + new_shape[3]
cropped_image = image[:, :, starting_height:final_height, starting_width:final_width]
return cropped_image
class FontDiscriminator(nn.Module):
'''
Discriminator Class
Values:
im_chan: the number of channels in the images, fitted for the dataset used, a scalar
(MNIST is black-and-white, so 1 channel is your default)
hidden_dim: the inner dimension, a scalar
'''
def __init__(self, im_chan=2, hidden_dim=16):
super(FontDiscriminator, self).__init__()
self.disc = nn.Sequential(
self.make_disc_block(im_chan, hidden_dim),
self.make_disc_block(hidden_dim, hidden_dim * 2),
self.make_disc_block(hidden_dim * 2, hidden_dim * 4),
self.make_disc_block(hidden_dim * 4, 1, final_layer=True),
)
self.fc = nn.Linear(49, 1) # TODO Make 49 non-magical
def make_disc_block(self, input_channels, output_channels, kernel_size=4, stride=2, final_layer=False):
'''
Function to return a sequence of operations corresponding to a discriminator block of DCGAN,
corresponding to a convolution, a batchnorm (except for in the last layer), and an activation.
Parameters:
input_channels: how many channels the input feature representation has
output_channels: how many channels the output feature representation should have
kernel_size: the size of each convolutional filter, equivalent to (kernel_size, kernel_size)
stride: the stride of the convolution
final_layer: a boolean, true if it is the final layer and false otherwise
(affects activation and batchnorm)
'''
# Steps:
# 1) Add a convolutional layer using the given parameters.
# 2) Do a batchnorm, except for the last layer.
# 3) Follow each batchnorm with a LeakyReLU activation with slope 0.2.
# Build the neural block
if not final_layer:
return nn.Sequential(
#### START CODE HERE #### #
nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=stride),
nn.BatchNorm2d(output_channels),
nn.LeakyReLU(negative_slope=0.2)
#### END CODE HERE ####
)
else: # Final Layer
return nn.Sequential(
#### START CODE HERE #### #
nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=stride),
nn.MaxPool2d(kernel_size=2),
#### END CODE HERE ####
)
def forward(self, image):
'''
Function for completing a forward pass of the discriminator: Given an image tensor,
returns a 1-dimension tensor representing fake/real.
Parameters:
image: a flattened image tensor with dimension (im_dim)
'''
disc_pred = self.disc(image)
disc_pred = disc_pred.view(len(disc_pred), -1)
return self.fc(disc_pred)
class ContractingBlock(nn.Module):
'''
ContractingBlock Class
Performs two convolutions followed by a max pool operation.
Values:
input_channels: the number of channels to expect from a given input
'''
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(ContractingBlock, self).__init__()
self.conv1 = nn.Conv2d(input_channels, input_channels * 2, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(input_channels * 2, input_channels * 2, kernel_size=3, padding=1)
self.activation = nn.LeakyReLU(0.2)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
if use_bn:
self.batchnorm = nn.BatchNorm2d(input_channels * 2)
self.use_bn = use_bn
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def forward(self, x):
'''
Function for completing a forward pass of ContractingBlock:
Given an image tensor, completes a contracting block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x = self.conv1(x)
if self.use_bn:
x = self.batchnorm(x)
if self.use_dropout:
x = self.dropout(x)
x = self.activation(x)
x = self.conv2(x)
if self.use_bn:
x = self.batchnorm(x)
if self.use_dropout:
x = self.dropout(x)
x = self.activation(x)
x = self.maxpool(x)
return x
class ExpandingBlock(nn.Module):
'''
ExpandingBlock Class:
Performs an upsampling, a convolution, a concatenation of its two inputs,
followed by two more convolutions with optional dropout
Values:
input_channels: the number of channels to expect from a given input
'''
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(ExpandingBlock, self).__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv1 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=2)
self.conv2 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(input_channels // 2, input_channels // 2, kernel_size=2, padding=1)
if use_bn:
self.batchnorm = nn.BatchNorm2d(input_channels // 2)
self.use_bn = use_bn
self.activation = nn.ReLU()
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def forward(self, x, skip_con_x):
'''
Function for completing a forward pass of ExpandingBlock:
Given an image tensor, completes an expanding block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
skip_con_x: the image tensor from the contracting path (from the opposing block of x)
for the skip connection
'''
x = self.upsample(x)
x = self.conv1(x)
skip_con_x = crop(skip_con_x, x.shape)
x = torch.cat([x, skip_con_x], axis=1)
x = self.conv2(x)
if self.use_bn:
x = self.batchnorm(x)
if self.use_dropout:
x = self.dropout(x)
x = self.activation(x)
x = self.conv3(x)
if self.use_bn:
x = self.batchnorm(x)
if self.use_dropout:
x = self.dropout(x)
x = self.activation(x)
return x
class FeatureMapBlock(nn.Module):
'''
FeatureMapBlock Class
The final layer of a U-Net -
maps each pixel to a pixel with the correct number of output dimensions
using a 1x1 convolution.
Values:
input_channels: the number of channels to expect from a given input
output_channels: the number of channels to expect for a given output
'''
def __init__(self, input_channels, output_channels):
super(FeatureMapBlock, self).__init__()
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=1)
def forward(self, x):
'''
Function for completing a forward pass of FeatureMapBlock:
Given an image tensor, returns it mapped to the desired number of channels.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x = self.conv(x)
return x
class DiscriminatorHead(nn.Module):
'''
Discriminator Class
Structured like the contracting path of the U-Net, the discriminator will
output a matrix of values classifying corresponding portions of the image as real or fake.
Parameters:
input_channels: the number of image input channels
hidden_channels: the initial number of discriminator convolutional filters
'''
def __init__(self, input_channels=1, hidden_channels=8, out_features=64):
super(DiscriminatorHead, self).__init__()
self.upfeature = FeatureMapBlock(input_channels, hidden_channels)
self.contract1 = ContractingBlock(hidden_channels, use_bn=False)
self.contract2 = ContractingBlock(hidden_channels * 2)
self.contract3 = ContractingBlock(hidden_channels * 4)
self.contract4 = ContractingBlock(hidden_channels * 8)
#### START CODE HERE ####
self.final = nn.Conv2d(hidden_channels * 16, 1, kernel_size=1)
self.out = nn.Linear(256, out_features)
#### END CODE HERE ####
def forward(self, x):
x0 = self.upfeature(x)
x1 = self.contract1(x0)
x2 = self.contract2(x1)
x3 = self.contract3(x2)
x4 = self.contract4(x3)
xn = self.final(x4).squeeze()
xn = xn.view(len(xn), -1)
return self.out(xn)
class DualHeadFontDiscriminator(nn.Module):
def __init__(self, input_channels=1, hidden_channels=8, out_features=32, hidden_features=64):
super(DualHeadFontDiscriminator, self).__init__()
self.head1 = nn.Sequential(
DiscriminatorHead(input_channels, hidden_channels, out_features),
nn.ReLU(),
)
self.head2 = nn.Sequential(
DiscriminatorHead(input_channels, hidden_channels, out_features),
nn.ReLU(),
)
self.out = nn.Sequential(
nn.Linear(out_features * 2 , hidden_features * 2),
nn.ReLU(),
nn.Linear(hidden_features * 2, hidden_features),
nn.ReLU(),
nn.Linear(hidden_features, 1),
nn.Sigmoid()
)
def forward(self, x, y):
x_feat = self.head1(x)
y_feat = self.head2(y)
return self.out(torch.cat([x_feat, y_feat], dim=1))
class FontGenerator(nn.Module):
'''
FontGenerator Class
A series of 4 contracting blocks followed by 4 expanding blocks to
transform an input image into the corresponding paired image, with an upfeature
layer at the start and a downfeature layer at the end.
Values:
input_channels: the number of channels to expect from a given input
output_channels: the number of channels to expect for a given output
'''
def __init__(self, input_channels=2, output_channels=1, hidden_channels=16):
super(FontGenerator, self).__init__()
self.upfeature = FeatureMapBlock(input_channels, hidden_channels)
self.contract1 = ContractingBlock(hidden_channels, use_dropout=True)
self.contract2 = ContractingBlock(hidden_channels * 2, use_dropout=True)
self.contract3 = ContractingBlock(hidden_channels * 4, use_dropout=True)
self.contract4 = ContractingBlock(hidden_channels * 8)
self.contract5 = ContractingBlock(hidden_channels * 16)
self.contract6 = ContractingBlock(hidden_channels * 32)
self.expand0 = ExpandingBlock(hidden_channels * 64)
self.expand1 = ExpandingBlock(hidden_channels * 32)
self.expand2 = ExpandingBlock(hidden_channels * 16)
self.expand3 = ExpandingBlock(hidden_channels * 8)
self.expand4 = ExpandingBlock(hidden_channels * 4)
self.expand5 = ExpandingBlock(hidden_channels * 2)
self.downfeature = FeatureMapBlock(hidden_channels, output_channels)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
'''
Function for completing a forward pass of UNet:
Given an image tensor, passes it through U-Net and returns the output.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x0 = self.upfeature(x)
x1 = self.contract1(x0)
x2 = self.contract2(x1)
x3 = self.contract3(x2)
x4 = self.contract4(x3)
x5 = self.contract5(x4)
x6 = self.contract6(x5)
x7 = self.expand0(x6, x5)
x8 = self.expand1(x7, x4)
x9 = self.expand2(x8, x3)
x10 = self.expand3(x9, x2)
x11 = self.expand4(x10, x1)
x12 = self.expand5(x11, x0)
xn = self.downfeature(x12)
return self.sigmoid(xn) | [
"torch.manual_seed",
"torch.nn.ReLU",
"torch.nn.Sigmoid",
"torch.nn.BatchNorm2d",
"torch.nn.Dropout",
"torch.nn.LeakyReLU",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"numpy.random.seed",
"torch.nn.Linear",
"torch.nn.Upsample",
"torch.cat"
] | [((116, 137), 'torch.manual_seed', 'torch.manual_seed', (['(36)'], {}), '(36)\n', (133, 137), False, 'import torch\n'), ((138, 156), 'numpy.random.seed', 'np.random.seed', (['(36)'], {}), '(36)\n', (152, 156), True, 'import numpy as np\n'), ((1710, 1726), 'torch.nn.Linear', 'nn.Linear', (['(49)', '(1)'], {}), '(49, 1)\n', (1719, 1726), False, 'from torch import nn\n'), ((4329, 4400), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', '(input_channels * 2)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(input_channels, input_channels * 2, kernel_size=3, padding=1)\n', (4338, 4400), False, 'from torch import nn\n'), ((4422, 4497), 'torch.nn.Conv2d', 'nn.Conv2d', (['(input_channels * 2)', '(input_channels * 2)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(input_channels * 2, input_channels * 2, kernel_size=3, padding=1)\n', (4431, 4497), False, 'from torch import nn\n'), ((4524, 4541), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (4536, 4541), False, 'from torch import nn\n'), ((4565, 4602), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (4577, 4602), False, 'from torch import nn\n'), ((5972, 6036), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=2, mode='bilinear', align_corners=True)\n", (5983, 6036), False, 'from torch import nn\n'), ((6058, 6119), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', '(input_channels // 2)'], {'kernel_size': '(2)'}), '(input_channels, input_channels // 2, kernel_size=2)\n', (6067, 6119), False, 'from torch import nn\n'), ((6141, 6213), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', '(input_channels // 2)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(input_channels, input_channels // 2, kernel_size=3, padding=1)\n', (6150, 6213), False, 'from torch import nn\n'), ((6235, 6312), 'torch.nn.Conv2d', 'nn.Conv2d', (['(input_channels // 2)', '(input_channels // 2)'], {'kernel_size': '(2)', 'padding': '(1)'}), '(input_channels // 2, input_channels // 2, kernel_size=2, padding=1)\n', (6244, 6312), False, 'from torch import nn\n'), ((6452, 6461), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6459, 6461), False, 'from torch import nn\n'), ((7142, 7176), 'torch.cat', 'torch.cat', (['[x, skip_con_x]'], {'axis': '(1)'}), '([x, skip_con_x], axis=1)\n', (7151, 7176), False, 'import torch\n'), ((8052, 8109), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', 'output_channels'], {'kernel_size': '(1)'}), '(input_channels, output_channels, kernel_size=1)\n', (8061, 8109), False, 'from torch import nn\n'), ((9378, 9427), 'torch.nn.Conv2d', 'nn.Conv2d', (['(hidden_channels * 16)', '(1)'], {'kernel_size': '(1)'}), '(hidden_channels * 16, 1, kernel_size=1)\n', (9387, 9427), False, 'from torch import nn\n'), ((9447, 9475), 'torch.nn.Linear', 'nn.Linear', (['(256)', 'out_features'], {}), '(256, out_features)\n', (9456, 9475), False, 'from torch import nn\n'), ((12284, 12302), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (12300, 12302), False, 'import torch\n'), ((4651, 4685), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(input_channels * 2)'], {}), '(input_channels * 2)\n', (4665, 4685), False, 'from torch import nn\n'), ((4766, 4778), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (4776, 4778), False, 'from torch import nn\n'), ((6361, 6396), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(input_channels // 2)'], {}), '(input_channels // 2)\n', (6375, 6396), False, 'from torch import nn\n'), ((6513, 6525), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (6523, 6525), False, 'from torch import nn\n'), ((10122, 10131), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10129, 10131), False, 'from torch import nn\n'), ((10274, 10283), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10281, 10283), False, 'from torch import nn\n'), ((10346, 10394), 'torch.nn.Linear', 'nn.Linear', (['(out_features * 2)', '(hidden_features * 2)'], {}), '(out_features * 2, hidden_features * 2)\n', (10355, 10394), False, 'from torch import nn\n'), ((10409, 10418), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10416, 10418), False, 'from torch import nn\n'), ((10432, 10479), 'torch.nn.Linear', 'nn.Linear', (['(hidden_features * 2)', 'hidden_features'], {}), '(hidden_features * 2, hidden_features)\n', (10441, 10479), False, 'from torch import nn\n'), ((10493, 10502), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10500, 10502), False, 'from torch import nn\n'), ((10516, 10545), 'torch.nn.Linear', 'nn.Linear', (['hidden_features', '(1)'], {}), '(hidden_features, 1)\n', (10525, 10545), False, 'from torch import nn\n'), ((10559, 10571), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (10569, 10571), False, 'from torch import nn\n'), ((10707, 10741), 'torch.cat', 'torch.cat', (['[x_feat, y_feat]'], {'dim': '(1)'}), '([x_feat, y_feat], dim=1)\n', (10716, 10741), False, 'import torch\n'), ((2992, 3079), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', 'output_channels'], {'kernel_size': 'kernel_size', 'stride': 'stride'}), '(input_channels, output_channels, kernel_size=kernel_size, stride=\n stride)\n', (3001, 3079), False, 'from torch import nn\n'), ((3092, 3123), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['output_channels'], {}), '(output_channels)\n', (3106, 3123), False, 'from torch import nn\n'), ((3141, 3173), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (3153, 3173), False, 'from torch import nn\n'), ((3350, 3437), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', 'output_channels'], {'kernel_size': 'kernel_size', 'stride': 'stride'}), '(input_channels, output_channels, kernel_size=kernel_size, stride=\n stride)\n', (3359, 3437), False, 'from torch import nn\n'), ((3450, 3477), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (3462, 3477), False, 'from torch import nn\n')] |
import os
import xarray
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(threshold=np.inf)
from hparams import Hparams
hparams = Hparams()
parser = hparams.parser
hp = parser.parse_args()
dic = [
[f'{hp.reanalysis_dataset_dir}/meta-data/sshg', 'sshg.mon.mean1850-2014.nc', 'zos'],
[f'{hp.reanalysis_dataset_dir}/meta-data/thflx', 'thflx.mon.mean1850-2014.nc', 'hfds'],
[f'{hp.observe_dataset_dir}/meta-data/sshg', 'sshg.mon.mean1980-2020.nc', 'sshg'],
[f'{hp.observe_dataset_dir}/meta-data/thflx', 'thflx.mon.mean1980-2020.nc', 'thflx'],
[f'{hp.observe_dataset_dir}/meta-data/wind/part', '../uwind.mon.mean1980-2019.nc', 'u10'],
[f'{hp.observe_dataset_dir}/meta-data/wind/part', '../vwind.mon.mean1980-2019.nc', 'v10']
]
def merge_nc_files(attr):
basefile = attr[0]
savepath = os.path.join(basefile, attr[1])
var = attr[2]
urls = sorted(os.listdir(basefile))
urls = [os.path.join(basefile, i) for i in urls]
datasets = [xarray.open_dataset(url, cache=True, decode_times=False)[var] for url in urls]
merged = xarray.concat(datasets, 'time')
merged.to_netcdf(savepath)
def read_data():
file = os.path.join(f'{hp.observe_dataset_dir}/meta-data/wind', 'vwind.mon.mean1980-2019.nc')
sea_data = xarray.open_dataset(file, cache=True, decode_times=True)
print(sea_data)
# print(sea_data.sst)
def main():
for i in dic:
merge_nc_files(i)
if __name__ == "__main__":
# read_data()
main()
| [
"os.listdir",
"os.path.join",
"hparams.Hparams",
"xarray.concat",
"xarray.open_dataset",
"numpy.set_printoptions"
] | [((76, 113), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (95, 113), True, 'import numpy as np\n'), ((153, 162), 'hparams.Hparams', 'Hparams', ([], {}), '()\n', (160, 162), False, 'from hparams import Hparams\n'), ((836, 867), 'os.path.join', 'os.path.join', (['basefile', 'attr[1]'], {}), '(basefile, attr[1])\n', (848, 867), False, 'import os\n'), ((1087, 1118), 'xarray.concat', 'xarray.concat', (['datasets', '"""time"""'], {}), "(datasets, 'time')\n", (1100, 1118), False, 'import xarray\n'), ((1180, 1270), 'os.path.join', 'os.path.join', (['f"""{hp.observe_dataset_dir}/meta-data/wind"""', '"""vwind.mon.mean1980-2019.nc"""'], {}), "(f'{hp.observe_dataset_dir}/meta-data/wind',\n 'vwind.mon.mean1980-2019.nc')\n", (1192, 1270), False, 'import os\n'), ((1282, 1338), 'xarray.open_dataset', 'xarray.open_dataset', (['file'], {'cache': '(True)', 'decode_times': '(True)'}), '(file, cache=True, decode_times=True)\n', (1301, 1338), False, 'import xarray\n'), ((904, 924), 'os.listdir', 'os.listdir', (['basefile'], {}), '(basefile)\n', (914, 924), False, 'import os\n'), ((938, 963), 'os.path.join', 'os.path.join', (['basefile', 'i'], {}), '(basefile, i)\n', (950, 963), False, 'import os\n'), ((995, 1051), 'xarray.open_dataset', 'xarray.open_dataset', (['url'], {'cache': '(True)', 'decode_times': '(False)'}), '(url, cache=True, decode_times=False)\n', (1014, 1051), False, 'import xarray\n')] |
from reducer import Reducer
from bounder import Bounder
from normalizer import Normalizer
from gridifier import Gridifier
from pB_approximator import pB_Approximator
from trimmer import Trimmer
from balancer import Balancer
from squeezer import Squeezer
from corrector import Corrector
import numpy as np
import tensorflow as tf
class Pipeline():
def __init__(self, const, base_snapshots):
self._const = const
self._snapshot_cnt = len(base_snapshots)
self._bounder = Bounder(base_snapshots, self._const.outlier_cutoff)
base_snapshots = self._bounder.bound_snapshots(base_snapshots)
self._normalizer = Normalizer(base_snapshots)
base_snapshots = self._normalizer.normalize_snapshots(base_snapshots)
self._minima = np.amin(base_snapshots, axis=0)
self._maxima = np.amax(base_snapshots, axis=0)
@property
def const(self):
return self._const
@property
def lower_bound(self):
return self._bounder.lower_bound
@property
def upper_bound(self):
return self._bounder.upper_bound
@property
def mean(self):
return self._normalizer.mean
@property
def std(self):
return self._normalizer.std
@property
def minima(self):
return self._minima
@property
def maxima(self):
return self._maxima
def reduce_property(self, property_list):
return np.array([property_list[used_position]
for used_position in self._const._used_list_positions])
@property
def r_lower_bound(self):
return self.reduce_property(self.lower_bound)
@property
def r_upper_bound(self):
return self.reduce_property(self.upper_bound)
@property
def r_mean(self):
return self.reduce_property(self.mean)
@property
def r_std(self):
return self.reduce_property(self.std)
@property
def r_minima(self):
return self.reduce_property(self.minima)
@property
def r_maxima(self):
return self.reduce_property(self.maxima)
@property
def snapshot_cnt(self):
return self._snapshot_cnt
def bound_normalize(self, snapshots):
snapshots = self._bounder.bound_snapshots(snapshots)
snapshots = self._normalizer.normalize_snapshots(snapshots)
return snapshots
def reduce(self, snapshots):
reducer = Reducer(self._const)
snapshots = reducer.reduce_snapshots(snapshots)
return snapshots
def gridify(self, snapshots):
gridifier = Gridifier(snapshots, self._const.resolution)
snapshots = gridifier.gridify_snapshots(snapshots)
return snapshots
def approximate(self, snapshots, dataset):
pB_dict, pBs, pB_weights = pB_Approximator.approximate_pBs(
snapshots, dataset.labels, dataset.weights)
return pB_dict, pBs, pB_weights
def trim(self, pBs, *args):
trimmer = Trimmer(pBs)
trimmed = [trimmer.trim_snapshots(arg) for arg in args]
return trimmed
def squeeze(self, pBs):
pBs = Squeezer.squeeze_pBs(pBs, self._const)
return pBs
def normalize(self, snapshots):
normalizer = Normalizer(snapshots)
snapshots = normalizer.normalize_snapshots(snapshots)
return snapshots
def pB_balance(self, pBs):
pBb_weights = Balancer.pB_balance(pBs, self._const.balance_bins)
return pBb_weights
def hypercube_balance(self, snapshots):
hcb_weights = Balancer.hypercube_balance(
snapshots, self._const.balance_bins)
return hcb_weights
def multidim_balance(self, snapshots):
mdb_weights = Balancer.multidim_balance(
snapshots, self._const.balance_bins)
return mdb_weights
def get_1D_means(self, snapshots):
return Corrector.get_means_for_1D_row(snapshots)
def get_2D_means(self, snapshots):
return Corrector.get_means_for_2D_grid(snapshots)
def pack_tf_dataset(
self, snapshots, labels, prediction_weights,
reconstruction_weights):
return tf.data.Dataset.from_tensor_slices(
({self._const.input_name: snapshots},
{self._const.output_name_1: labels,
self._const.output_name_2: snapshots},
{self._const.output_name_1: prediction_weights,
self._const.output_name_2: reconstruction_weights})) \
.shuffle(250000).batch(self._const.batch_size)
def prepare_groundTruth(self, dataset):
snapshots = self.bound_normalize(dataset.snapshots)
snapshots = self.reduce(snapshots)
g_snapshots = self.gridify(snapshots)
return g_snapshots, dataset.labels, dataset.weights
def prepare_dataset_from_bn(self, bn_snapshots, dataset):
snapshots = self.reduce(bn_snapshots)
g_snapshots = self.gridify(snapshots)
_, pBs, _ = self.approximate(g_snapshots, dataset)
ds = self.pack_tf_dataset(
snapshots=snapshots,
labels=pBs,
prediction_weights=np.ones(len(snapshots)),
reconstruction_weights=np.ones(len(snapshots)))
return ds, g_snapshots
def prepare_prediction_plotter(self, dataset):
bn_snapshots = self.bound_normalize(dataset.snapshots)
ds, g_snapshots = self.prepare_dataset_from_bn(bn_snapshots, dataset)
means_1D = self.get_1D_means(g_snapshots)
means_2D = self.get_2D_means(g_snapshots)
return ds, means_1D, means_2D
def prepare_stepper(
self, train_bn_snapshots, train_dataset,
val_bn_snapshots, val_dataset):
train_ds, _ = \
self.prepare_dataset_from_bn(train_bn_snapshots, train_dataset)
val_ds, _ = \
self.prepare_dataset_from_bn(val_bn_snapshots, val_dataset)
return train_ds, val_ds
def prepare_dataset_pickle(self, dataset):
bn_snapshots = self.bound_normalize(dataset.snapshots)
snapshots = self.reduce(bn_snapshots)
g_snapshots = self.gridify(snapshots)
_, pBs, _ = self.approximate(g_snapshots, dataset)
return snapshots, pBs, g_snapshots
| [
"balancer.Balancer.hypercube_balance",
"corrector.Corrector.get_means_for_1D_row",
"numpy.amin",
"balancer.Balancer.multidim_balance",
"reducer.Reducer",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.array",
"normalizer.Normalizer",
"pB_approximator.pB_Approximator.approximate_pBs",
"trimme... | [((499, 550), 'bounder.Bounder', 'Bounder', (['base_snapshots', 'self._const.outlier_cutoff'], {}), '(base_snapshots, self._const.outlier_cutoff)\n', (506, 550), False, 'from bounder import Bounder\n'), ((649, 675), 'normalizer.Normalizer', 'Normalizer', (['base_snapshots'], {}), '(base_snapshots)\n', (659, 675), False, 'from normalizer import Normalizer\n'), ((777, 808), 'numpy.amin', 'np.amin', (['base_snapshots'], {'axis': '(0)'}), '(base_snapshots, axis=0)\n', (784, 808), True, 'import numpy as np\n'), ((832, 863), 'numpy.amax', 'np.amax', (['base_snapshots'], {'axis': '(0)'}), '(base_snapshots, axis=0)\n', (839, 863), True, 'import numpy as np\n'), ((1427, 1526), 'numpy.array', 'np.array', (['[property_list[used_position] for used_position in self._const.\n _used_list_positions]'], {}), '([property_list[used_position] for used_position in self._const.\n _used_list_positions])\n', (1435, 1526), True, 'import numpy as np\n'), ((2410, 2430), 'reducer.Reducer', 'Reducer', (['self._const'], {}), '(self._const)\n', (2417, 2430), False, 'from reducer import Reducer\n'), ((2567, 2611), 'gridifier.Gridifier', 'Gridifier', (['snapshots', 'self._const.resolution'], {}), '(snapshots, self._const.resolution)\n', (2576, 2611), False, 'from gridifier import Gridifier\n'), ((2779, 2854), 'pB_approximator.pB_Approximator.approximate_pBs', 'pB_Approximator.approximate_pBs', (['snapshots', 'dataset.labels', 'dataset.weights'], {}), '(snapshots, dataset.labels, dataset.weights)\n', (2810, 2854), False, 'from pB_approximator import pB_Approximator\n'), ((2959, 2971), 'trimmer.Trimmer', 'Trimmer', (['pBs'], {}), '(pBs)\n', (2966, 2971), False, 'from trimmer import Trimmer\n'), ((3102, 3140), 'squeezer.Squeezer.squeeze_pBs', 'Squeezer.squeeze_pBs', (['pBs', 'self._const'], {}), '(pBs, self._const)\n', (3122, 3140), False, 'from squeezer import Squeezer\n'), ((3218, 3239), 'normalizer.Normalizer', 'Normalizer', (['snapshots'], {}), '(snapshots)\n', (3228, 3239), False, 'from normalizer import Normalizer\n'), ((3381, 3431), 'balancer.Balancer.pB_balance', 'Balancer.pB_balance', (['pBs', 'self._const.balance_bins'], {}), '(pBs, self._const.balance_bins)\n', (3400, 3431), False, 'from balancer import Balancer\n'), ((3526, 3589), 'balancer.Balancer.hypercube_balance', 'Balancer.hypercube_balance', (['snapshots', 'self._const.balance_bins'], {}), '(snapshots, self._const.balance_bins)\n', (3552, 3589), False, 'from balancer import Balancer\n'), ((3696, 3758), 'balancer.Balancer.multidim_balance', 'Balancer.multidim_balance', (['snapshots', 'self._const.balance_bins'], {}), '(snapshots, self._const.balance_bins)\n', (3721, 3758), False, 'from balancer import Balancer\n'), ((3854, 3895), 'corrector.Corrector.get_means_for_1D_row', 'Corrector.get_means_for_1D_row', (['snapshots'], {}), '(snapshots)\n', (3884, 3895), False, 'from corrector import Corrector\n'), ((3951, 3993), 'corrector.Corrector.get_means_for_2D_grid', 'Corrector.get_means_for_2D_grid', (['snapshots'], {}), '(snapshots)\n', (3982, 3993), False, 'from corrector import Corrector\n'), ((4129, 4392), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['({self._const.input_name: snapshots}, {self._const.output_name_1: labels,\n self._const.output_name_2: snapshots}, {self._const.output_name_1:\n prediction_weights, self._const.output_name_2: reconstruction_weights})'], {}), '(({self._const.input_name: snapshots}, {\n self._const.output_name_1: labels, self._const.output_name_2: snapshots\n }, {self._const.output_name_1: prediction_weights, self._const.\n output_name_2: reconstruction_weights}))\n', (4163, 4392), True, 'import tensorflow as tf\n')] |
import helpers
import numpy as np
from waveform import Waveform
class Chord:
def __init__(self, root=None, quality=None, name='', notes=None):
'''
Parameters
----------
root : str
The root note of this chord. eg) 'C#4'
quality : str
The quality of this chord. eg) 'major'
notes : list of Note
Used instead of root/quality if given
'''
self.root = root
self.quality = quality
self.name = name
self.notes = []
if notes is None:
# Assign notes using root/quality
root_note = helpers.get_note_by_name(self.root)
for offset in helpers.qualities[self.quality]:
note_number = root_note.number + offset
note = helpers.get_note_by_number(note_number)
self.notes.append(note)
# Generate name if not already given
if name == '':
self.name = f'{root} {quality}'
else:
# Or just use notes if they're given
self.notes = notes
def get_waveform(self, sample_rate=44100, duration=4):
'''
Parameters
----------
sample_rate : int
How many points will represent the waveform per second
duration : float
How long, in seconds, the waveform will be
Returns
-------
Waveform
The waveform for this chord
'''
total_samples = sample_rate * duration
step = duration / total_samples
index = 0
for note in self.notes:
t = np.arange(0, duration, step)
x = np.sin(2 * np.pi * note.frequency * t)
if index == 0:
points = np.sin(x) * note.velocity
else:
points += np.sin(x) * note.velocity
index += 1
return Waveform(points, sample_rate)
def print_info(self):
print(self.quality)
print(self.note_names)
print(self.note_frequencies)
| [
"helpers.get_note_by_name",
"waveform.Waveform",
"numpy.sin",
"numpy.arange",
"helpers.get_note_by_number"
] | [((2003, 2032), 'waveform.Waveform', 'Waveform', (['points', 'sample_rate'], {}), '(points, sample_rate)\n', (2011, 2032), False, 'from waveform import Waveform\n'), ((643, 678), 'helpers.get_note_by_name', 'helpers.get_note_by_name', (['self.root'], {}), '(self.root)\n', (667, 678), False, 'import helpers\n'), ((1709, 1737), 'numpy.arange', 'np.arange', (['(0)', 'duration', 'step'], {}), '(0, duration, step)\n', (1718, 1737), True, 'import numpy as np\n'), ((1754, 1792), 'numpy.sin', 'np.sin', (['(2 * np.pi * note.frequency * t)'], {}), '(2 * np.pi * note.frequency * t)\n', (1760, 1792), True, 'import numpy as np\n'), ((830, 869), 'helpers.get_note_by_number', 'helpers.get_note_by_number', (['note_number'], {}), '(note_number)\n', (856, 869), False, 'import helpers\n'), ((1846, 1855), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1852, 1855), True, 'import numpy as np\n'), ((1916, 1925), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1922, 1925), True, 'import numpy as np\n')] |
from copy import deepcopy
import numpy as np
def complete_mol(self, labels):
"""
Take a cell and complete certain molecules
The objective is to end up with a unit cell where the molecules of interest
are complete. The rest of the atoms of the cell must remain intact. Note that
the input atoms are transformed and are the same as are present in the
output.
Parameters
----------
labels : int or list of ints
The number of the atoms from which the molecules are generated
Returns
-------
new_mol : Mol object
The now complete molecule
new_cell : Mol object
The cell with the completed molecule
"""
new_mol, scattered_mol = self.per_select(labels, old_pos=True)
new_cell_atoms = deepcopy(
[a for a in self.atoms if a not in scattered_mol])
new_cell = self.copy()
new_cell.atoms = new_cell_atoms
for atom in new_mol:
new_cell.append(atom.copy())
return new_mol, new_cell
def complete_cell(self):
"""
Return a cell where atoms have been translated to complete all molecules of
the cell
Returns
-------
out_cell : Mol object
The new untruncated cell
full_mol_l : list of Mol objects
Each molecule in the untruncated cell
"""
full_mol_l = []
remaining = self.copy()
while len(remaining) != 0:
full_mol, cell = remaining.complete_mol(0)
full_mol_l.append(full_mol)
remaining = cell
for atom in full_mol:
if atom in remaining:
remaining.remove(atom)
# Convinently, remaining is now an empty Mol
out_cell = remaining
for mol in full_mol_l:
out_cell.extend(mol)
return out_cell, full_mol_l
def supercell(self, trans):
"""
Return a supercell of I x J x K
Parameters
----------
trans : array-like of length 3
Multiplications of the primitive cell
Returns
-------
supercell : Mol object
New supercell with adjusted lattice vectors
"""
import fromage.utils.mol as mol_init
# make the input into a np array
trans = np.array(trans)
new_cell = self.empty_mol()
for a_mult in range(trans[0]):
for b_mult in range(trans[1]):
for c_mult in range(trans[2]):
vector = a_mult * \
self.vectors[0] + b_mult * \
self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def centered_supercell(self, trans, from_origin=False):
"""
Make a bigger supercell out of an input cell.
The cell is multiplied positively and negatively through each lattice
vector so that the supercluster ends up being
(1+2*trans[0])*(1+2*trans[1])*(1+2*trans[2]) times larger. For example if the
input is 1,1,1 for a cubic unit cell, the output will be the original unit
cell surrounded by 26 other unit cells forming a total 3x3x3 cube.
Alternatively, the multiplication can be centered around the origin, a corner of the
unit cell, instead of the centre. In that case the supercluster ends up being
only (2*trans[0])*(2*trans[1])*(2*trans[2])
Parameters
----------
trans : numpy array of length 3
Multiplications of the primitive cell
from_origin : bool
Determines the kind of multiplication. True is corner of the cell as
the center, False is middle of the cell.
Returns
-------
mega_cell : Mol object
The resulting supercell
"""
import fromage.utils.mol as mol_init
trans_series = [0, 0, 0]
for i, tra in enumerate(trans):
if from_origin:
trans_series[i] = list(range(-tra, tra))
else:
trans_series[i] = list(range(-tra, tra + 1))
trans_series = np.array(trans_series)
new_cell = self.empty_mol()
for a_mult in trans_series[0]:
for b_mult in trans_series[1]:
for c_mult in trans_series[2]:
vector = a_mult * \
self.vectors[0] + b_mult * \
self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def trans_from_rad(self, clust_rad):
"""
Generate the translations necessary to encapsulate a sphere of given rad
Parameters
----------
clust_rad : float
Radius defining a sphere
Returns
-------
trans_count : 3 x 1 numpy array
The translations required for the unit cell to contain the sphere
"""
# vectors normal to faces
a_perp = np.cross(self.vectors[1], self.vectors[2])
b_perp = np.cross(self.vectors[2], self.vectors[0])
c_perp = np.cross(self.vectors[0], self.vectors[1])
# the three normalised unit vectors
perp = np.array([a_perp / np.linalg.norm(a_perp), b_perp /
np.linalg.norm(b_perp), c_perp / np.linalg.norm(c_perp)])
# dimensions of the final supercell (translations)
trans_count = np.array([0, 0, 0])
# lattice vectors of the quadrant supercell
supercell_vectors = np.zeros((3,3))
# distances from origin to each face
distances = np.array([0.0, 0.0, 0.0])
# loop over lattice vectors
for comp in range(3):
while distances[comp] <= clust_rad:
trans_count[comp] += 1
supercell_vectors[comp] = trans_count[comp] * self.vectors[comp]
distances[comp] = np.dot(supercell_vectors[comp], perp[comp])
return trans_count
def supercell_for_cluster(self, clust_rad, mode='exc', central_mol=None):
"""
Make a supercell which will be used to make a cluster
Parameters
----------
clust_rad : float
The radius of the cluster
mode : str
'exc' is for exclusive clusters, whereas 'inc' is for inclusive clusters
which will need an extra layer of unit cells
central_mol : Mol
Molecule which serves as a center for the cluster (optional)
Returns
-------
out_supercell : Mol
The supercell from which the cluster will be taken
"""
# if there is a central mol, account for nearest neighbour molecules
# bleeding out of the original radius
if central_mol:
central_rad = 0
for atom in central_mol:
dis = atom.v_dist([0, 0, 0])
if dis < central_rad:
central_rad = dis
# get the translations of the unit cell necessary to enclose the required mols
trans = self.trans_from_rad(clust_rad + central_rad)
else:
trans = self.trans_from_rad(clust_rad)
# if the cluster is inclusive, then extra mols might be required from
# an additional layer of the supercell
if mode == 'inc':
trans += np.array([1, 1, 1]) # one buffer cell layer
# make a supercell which includes the desired cluster
out_supercell = self.centered_supercell(trans, from_origin=True)
return out_supercell
def gen_exclusive_clust(self, seed_atoms):
"""
Remove all non complete molecules
This only works if the input contains at least one full molecule
Parameters
----------
seed_atoms : Mol
Aggregate of molecules, not necessarily all complete
Returns
-------
out_clust : Mol
The full molecules of seed_atoms
"""
import fromage.utils.mol as mol_init
max_mol_len = 0
while len(seed_atoms) > 0:
# pick out a molecule from the seed atoms
mol = seed_atoms.select(0)
# if the molecule is the biggest so far
if len(mol) > max_mol_len:
# molecules are supposed to be this long now
max_mol_len = len(mol)
out_clust = mol_init.Mol([])
# if the molecule is the right size
if len(mol) == max_mol_len:
# include this molecule
out_clust += mol
# discard the molecule from seed atoms
for atom in mol:
seed_atoms.remove(atom)
return out_clust
def gen_inclusive_clust(self, seed_atoms, supercell):
"""
Select all complete molecules of supercell which contain seed atoms
This only works if the input contains at least one full molecule
Parameters
----------
seed_atoms : Mol
Aggregate of molecules, not necessarily all complete
supercell : Mol
Supercell which contains all seed atoms. It should have at least one
buffer layer of unit cells around the seed atoms
Returns
-------
out_clust : Mol
The full molecules of supercell which contain seed atoms
"""
import fromage.utils.mol as mol_init
max_mol_len = 0
out_clust = mol_init.Mol([])
# here, the molecule with the atom seed_atoms[0] is necessarily complete
# in supercell
max_mol_len = len(supercell.select(supercell.index(seed_atoms[0])))
while len(seed_atoms) > 0:
# the part of the mol detected in seed_atoms
mol_tmp = seed_atoms.select(0)
if len(mol_tmp) < max_mol_len:
# The whole mol, which could potentially include even more
# seed_atoms
mol = supercell.select(supercell.index(seed_atoms[0]))
else:
mol = mol_tmp
out_clust += mol
for atom in mol_tmp:
seed_atoms.remove(atom)
for atom in mol:
supercell.remove(atom)
# remove all atoms of the mol which are part of seed_atoms
try:
seed_atoms.remove(atom)
except ValueError:
pass
return out_clust
def make_cluster(self, clust_rad, mode='exc', central_mol=None):
"""
Generate a cluster of molecules from a primitive cell
This first makes a supercell of the correct size which will contain with
one additional buffer shell. Then the sphere is generated from this new
supercell by connectivity.
A central molecule can also be supplied which will turn the spheres
defining the clusters into the union of spheres stemming from each atom
of the central molecule.
This algorithm is designed for crystals where one same molecule does not
extend into more than two unit cells in the case of inclusive clusters.
Parameters
----------
clust_rad : float
Radius for generating the cluster. If no central molecule is specified,
this will generate seed atoms in a sphere from the radius
mode : str
Switches between inclusive and exclusive selecting. Inclusive,
'inc', selects all molecules which have atoms within the radius.
Exclusive, 'exc', selects all molecules fully in the radius.
Default: 'exc'
central_mol : Mol
If this is supplied, the central molecule will act as a kernel for the
cluster which will end up being of the appropriate shape. (optional)
Returns
-------
cluster : Mol object
Cluster of molecules from their crystal positions
"""
import fromage.utils.mol as mol_init
# generate a supercell which will include the cluster.
# inclusive clusters will have an extra layer of supercell.
# if a central mol is supplied, the supercell will include the whole
# molecule and the supplied radius.
supercell = self.supercell_for_cluster(clust_rad, mode=mode, central_mol=central_mol)
# seed_atoms will initialise the cluster
seed_atoms = mol_init.Mol([])
# conserve the bonding properties of the original cell
seed_atoms.bonding = supercell.bonding
seed_atoms.thresh = supercell.thresh
# get seed atoms in the shape of the central mol if pertinent
if central_mol:
for atom_i in supercell:
for atom_j in central_mol:
if atom_i.dist(atom_j) < clust_rad:
seed_atoms.append(atom_i)
break
# get spherical seedatoms otherwise
else:
for atom in supercell:
if atom.v_dist([0, 0, 0]) < clust_rad:
seed_atoms.append(atom)
# remove incomplete molecules
if mode == 'exc':
clust_atoms = self.gen_exclusive_clust(seed_atoms)
# complete incomplete molecules
elif mode == 'inc':
clust_atoms = self.gen_inclusive_clust(seed_atoms, supercell)
else:
raise ValueError("Invalid cluster generation mode. Use 'exc' or 'inc'")
return clust_atoms
def centered_mols(self, labels, return_trans=False):
"""
Return the molecules translated at the origin with a corresponding cell
Parameters
----------
labels : int or list of ints
The labels of the atoms to select
print_centro : bool
Print the translation vector which was detected as -centroid
Returns
-------
mol : Mol object
The selected molecules with their centroid at the origin
mod_cell : Mol object
The new confined cell corresponding to the now translated molecules
"""
mol, mod_cell = self.complete_mol(labels)
centro = mol.centroid()
mol.translate(-centro)
mod_cell.translate(-centro)
mod_cell = mod_cell.confined()
if return_trans:
return mol, mod_cell, -centro
else:
return mol, mod_cell
def confined(self):
"""Move all atoms to fit inside the primitive cell"""
frac_mol = self.dir_to_frac_pos()
out_mol = frac_mol.frac_to_dir_pos()
return out_mol
| [
"copy.deepcopy",
"numpy.cross",
"numpy.linalg.norm",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"fromage.utils.mol.Mol"
] | [((769, 828), 'copy.deepcopy', 'deepcopy', (['[a for a in self.atoms if a not in scattered_mol]'], {}), '([a for a in self.atoms if a not in scattered_mol])\n', (777, 828), False, 'from copy import deepcopy\n'), ((2143, 2158), 'numpy.array', 'np.array', (['trans'], {}), '(trans)\n', (2151, 2158), True, 'import numpy as np\n'), ((4050, 4072), 'numpy.array', 'np.array', (['trans_series'], {}), '(trans_series)\n', (4058, 4072), True, 'import numpy as np\n'), ((5039, 5081), 'numpy.cross', 'np.cross', (['self.vectors[1]', 'self.vectors[2]'], {}), '(self.vectors[1], self.vectors[2])\n', (5047, 5081), True, 'import numpy as np\n'), ((5095, 5137), 'numpy.cross', 'np.cross', (['self.vectors[2]', 'self.vectors[0]'], {}), '(self.vectors[2], self.vectors[0])\n', (5103, 5137), True, 'import numpy as np\n'), ((5151, 5193), 'numpy.cross', 'np.cross', (['self.vectors[0]', 'self.vectors[1]'], {}), '(self.vectors[0], self.vectors[1])\n', (5159, 5193), True, 'import numpy as np\n'), ((5451, 5470), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (5459, 5470), True, 'import numpy as np\n'), ((5544, 5560), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (5552, 5560), True, 'import numpy as np\n'), ((5618, 5643), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5626, 5643), True, 'import numpy as np\n'), ((9107, 9123), 'fromage.utils.mol.Mol', 'mol_init.Mol', (['[]'], {}), '([])\n', (9119, 9123), True, 'import fromage.utils.mol as mol_init\n'), ((11833, 11849), 'fromage.utils.mol.Mol', 'mol_init.Mol', (['[]'], {}), '([])\n', (11845, 11849), True, 'import fromage.utils.mol as mol_init\n'), ((7200, 7219), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (7208, 7219), True, 'import numpy as np\n'), ((5889, 5932), 'numpy.dot', 'np.dot', (['supercell_vectors[comp]', 'perp[comp]'], {}), '(supercell_vectors[comp], perp[comp])\n', (5895, 5932), True, 'import numpy as np\n'), ((8144, 8160), 'fromage.utils.mol.Mol', 'mol_init.Mol', (['[]'], {}), '([])\n', (8156, 8160), True, 'import fromage.utils.mol as mol_init\n'), ((5265, 5287), 'numpy.linalg.norm', 'np.linalg.norm', (['a_perp'], {}), '(a_perp)\n', (5279, 5287), True, 'import numpy as np\n'), ((5319, 5341), 'numpy.linalg.norm', 'np.linalg.norm', (['b_perp'], {}), '(b_perp)\n', (5333, 5341), True, 'import numpy as np\n'), ((5352, 5374), 'numpy.linalg.norm', 'np.linalg.norm', (['c_perp'], {}), '(c_perp)\n', (5366, 5374), True, 'import numpy as np\n')] |
import numpy as np
import pickle
import gzip
FILE_NAME = "../../data/big/mcrae-wordnet-vgg16.emb"
wnids = []
classes = []
weights = []
fnames = []
print(f"reading source")
with open(FILE_NAME, "r") as fin:
for line in fin:
name, numbers = line.split(" ", 1)
wnid, cls, _ = name.split("/")[-2].split(".")
fname = name.split("/")[-1]
wnids.append(wnid)
fnames.append(fname)
classes.append(cls)
weights.append(np.fromstring(numbers, dtype=float, sep=" "))
weights = np.stack(weights)
print(f"saving weights")
with gzip.open(FILE_NAME+".npy.gz", "wb") as out:
np.save(out, weights.astype("float32"))
print(f"saving metadata")
with open(FILE_NAME+".meta.pkl", "wb") as out:
pickle.dump({"categories": wnids, "classes": classes, "fnames": fnames}, out, protocol=pickle.HIGHEST_PROTOCOL)
| [
"numpy.stack",
"numpy.fromstring",
"pickle.dump",
"gzip.open"
] | [((527, 544), 'numpy.stack', 'np.stack', (['weights'], {}), '(weights)\n', (535, 544), True, 'import numpy as np\n'), ((576, 614), 'gzip.open', 'gzip.open', (["(FILE_NAME + '.npy.gz')", '"""wb"""'], {}), "(FILE_NAME + '.npy.gz', 'wb')\n", (585, 614), False, 'import gzip\n'), ((743, 858), 'pickle.dump', 'pickle.dump', (["{'categories': wnids, 'classes': classes, 'fnames': fnames}", 'out'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), "({'categories': wnids, 'classes': classes, 'fnames': fnames},\n out, protocol=pickle.HIGHEST_PROTOCOL)\n", (754, 858), False, 'import pickle\n'), ((470, 514), 'numpy.fromstring', 'np.fromstring', (['numbers'], {'dtype': 'float', 'sep': '""" """'}), "(numbers, dtype=float, sep=' ')\n", (483, 514), True, 'import numpy as np\n')] |
"""
these are my data analysis functions
used to download and process some temperature
time series from berkeley Earth.
"""
import numpy as np
import requests
def generate_url(location):
#f allows you to replace things in the string, in this case the 'location' is put where it is in the string and we always want it lowercase
url = f'http://berkeleyearth.lbl.gov/auto/Regional/TAVG/Text/{location.lower()}-TAVG-Trend.txt'
return url
def download_data(location):
url = generate_url(location)
# Download the content of the URL
response = requests.get(url)
# Save it to a file
#with open("data.txt", 'w') as open_file:
# open_file.write(response.text)
data = np.loadtxt(response.iter_lines(), comments="%")
return(data)
def moving_avg(data, width):
"""
computes the moving average.
:param data: Input data array.
:param width: width in samples.
"""
moving_avg = np.full(data.size, np.nan)
for i in range(width, data.size - width):
moving_avg[i] = np.mean(data[i - width:i + width])
return(moving_avg) | [
"numpy.full",
"numpy.mean",
"requests.get"
] | [((565, 582), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (577, 582), False, 'import requests\n'), ((953, 979), 'numpy.full', 'np.full', (['data.size', 'np.nan'], {}), '(data.size, np.nan)\n', (960, 979), True, 'import numpy as np\n'), ((1050, 1084), 'numpy.mean', 'np.mean', (['data[i - width:i + width]'], {}), '(data[i - width:i + width])\n', (1057, 1084), True, 'import numpy as np\n')] |
"""
A Monte Carlo simulation to compute size distortion in hypothesis testing in an OLS context
"""
import numpy as np
import matplotlib as plt
# generate artificial dataset (could use real data if available)
np.random.seed(0)
n = 4 # the higher is n, the less wrong we are about using +/-1.96 as critical values,
# the closer is the empirical size to 5%
sigma_u = 30
beta = 7
X = np.random.normal(0, 10, n).reshape((n, 1))
u = np.random.normal(0, sigma_u, n).reshape((n, 1))
y = X.dot(beta) + u
# monte carlo simulation
beta_zero = 5
XX_inv = np.linalg.inv(np.transpose(X).dot(X))
A = XX_inv.dot(np.transpose(X))
b = X.dot(beta_zero)
number_of_rejections = 0
reps = 100000
for m in range(1, reps + 1):
u_m = np.random.normal(0, sigma_u, n).reshape((n, 1))
y_m = b + u_m
betahat_m = A.dot(y_m)
uhat = y_m - X.dot(betahat_m)
s2_m = np.transpose(uhat).dot(uhat) / (n - 1)
t_m = (betahat_m - beta_zero) / np.sqrt(s2_m * XX_inv)
number_of_rejections += abs(t_m) > 1.96
empirical_size = number_of_rejections / reps
print('The empirical size is %.10f' % empirical_size)
print('The size distortion is |theoretical-empirical|=|5%%-%.4f%%|=%.4f' % (empirical_size * 100,
abs(5 - empirical_size * 100)))
| [
"numpy.random.normal",
"numpy.transpose",
"numpy.sqrt",
"numpy.random.seed"
] | [((211, 228), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (225, 228), True, 'import numpy as np\n'), ((601, 616), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (613, 616), True, 'import numpy as np\n'), ((384, 410), 'numpy.random.normal', 'np.random.normal', (['(0)', '(10)', 'n'], {}), '(0, 10, n)\n', (400, 410), True, 'import numpy as np\n'), ((431, 462), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma_u', 'n'], {}), '(0, sigma_u, n)\n', (447, 462), True, 'import numpy as np\n'), ((930, 952), 'numpy.sqrt', 'np.sqrt', (['(s2_m * XX_inv)'], {}), '(s2_m * XX_inv)\n', (937, 952), True, 'import numpy as np\n'), ((562, 577), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (574, 577), True, 'import numpy as np\n'), ((717, 748), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma_u', 'n'], {}), '(0, sigma_u, n)\n', (733, 748), True, 'import numpy as np\n'), ((855, 873), 'numpy.transpose', 'np.transpose', (['uhat'], {}), '(uhat)\n', (867, 873), True, 'import numpy as np\n')] |
import numpy as np
from chroma.transform import rotate
def uniform_sphere(size=None, dtype=np.double):
"""
Generate random points isotropically distributed across the unit sphere.
Args:
- size: int, *optional*
Number of points to generate. If no size is specified, a single
point is returned.
Source: Weisstein, <NAME>. "Sphere Point Picking." Mathworld.
"""
theta, u = np.random.uniform(0.0, 2*np.pi, size), \
np.random.uniform(-1.0, 1.0, size)
c = np.sqrt(1-u**2)
if size is None:
return np.array([c*np.cos(theta), c*np.sin(theta), u])
points = np.empty((size, 3), dtype)
points[:,0] = c*np.cos(theta)
points[:,1] = c*np.sin(theta)
points[:,2] = u
return points
def flashlight(phi=np.pi/4, direction=(0,0,1), size=None, dtype=np.double):
theta, u = np.random.uniform(0.0, 2*np.pi, size), \
np.random.uniform(np.cos(phi), 1, size)
c = np.sqrt(1-u**2)
if np.equal(direction, (0,0,1)).all():
rotation_axis = (0,0,1)
rotation_angle = 0.0
else:
rotation_axis = np.cross((0,0,1), direction)
rotation_angle = \
-np.arccos(np.dot(direction, (0,0,1))/np.linalg.norm(direction))
if size is None:
return rotate(np.array([c*np.cos(theta), c*np.sin(theta), u]),
rotation_angle, rotation_axis)
points = np.empty((size, 3), dtype)
points[:,0] = c*np.cos(theta)
points[:,1] = c*np.sin(theta)
points[:,2] = u
return rotate(points, rotation_angle, rotation_axis)
| [
"numpy.sqrt",
"numpy.cross",
"numpy.linalg.norm",
"numpy.equal",
"chroma.transform.rotate",
"numpy.dot",
"numpy.empty",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin"
] | [((524, 543), 'numpy.sqrt', 'np.sqrt', (['(1 - u ** 2)'], {}), '(1 - u ** 2)\n', (531, 543), True, 'import numpy as np\n'), ((639, 665), 'numpy.empty', 'np.empty', (['(size, 3)', 'dtype'], {}), '((size, 3), dtype)\n', (647, 665), True, 'import numpy as np\n'), ((964, 983), 'numpy.sqrt', 'np.sqrt', (['(1 - u ** 2)'], {}), '(1 - u ** 2)\n', (971, 983), True, 'import numpy as np\n'), ((1412, 1438), 'numpy.empty', 'np.empty', (['(size, 3)', 'dtype'], {}), '((size, 3), dtype)\n', (1420, 1438), True, 'import numpy as np\n'), ((1540, 1585), 'chroma.transform.rotate', 'rotate', (['points', 'rotation_angle', 'rotation_axis'], {}), '(points, rotation_angle, rotation_axis)\n', (1546, 1585), False, 'from chroma.transform import rotate\n'), ((431, 470), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(2 * np.pi)', 'size'], {}), '(0.0, 2 * np.pi, size)\n', (448, 470), True, 'import numpy as np\n'), ((480, 514), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', 'size'], {}), '(-1.0, 1.0, size)\n', (497, 514), True, 'import numpy as np\n'), ((687, 700), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (693, 700), True, 'import numpy as np\n'), ((721, 734), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (727, 734), True, 'import numpy as np\n'), ((866, 905), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(2 * np.pi)', 'size'], {}), '(0.0, 2 * np.pi, size)\n', (883, 905), True, 'import numpy as np\n'), ((1119, 1149), 'numpy.cross', 'np.cross', (['(0, 0, 1)', 'direction'], {}), '((0, 0, 1), direction)\n', (1127, 1149), True, 'import numpy as np\n'), ((1460, 1473), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1466, 1473), True, 'import numpy as np\n'), ((1494, 1507), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1500, 1507), True, 'import numpy as np\n'), ((933, 944), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (939, 944), True, 'import numpy as np\n'), ((988, 1018), 'numpy.equal', 'np.equal', (['direction', '(0, 0, 1)'], {}), '(direction, (0, 0, 1))\n', (996, 1018), True, 'import numpy as np\n'), ((589, 602), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (595, 602), True, 'import numpy as np\n'), ((606, 619), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (612, 619), True, 'import numpy as np\n'), ((1198, 1226), 'numpy.dot', 'np.dot', (['direction', '(0, 0, 1)'], {}), '(direction, (0, 0, 1))\n', (1204, 1226), True, 'import numpy as np\n'), ((1225, 1250), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (1239, 1250), True, 'import numpy as np\n'), ((1308, 1321), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1314, 1321), True, 'import numpy as np\n'), ((1325, 1338), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1331, 1338), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
import astropy_stark.myfake as mf
import matplotlib.pylab as plt
import numpy as np
output_directory = 'fit_synthetic_lightcurves'
'''
mf.myfake arguments are
wavelengths: enter the wavelengths (-1 indicates an emission line light curve modelled with a top-hat response),
snr: set the signal-to-noise relative to light curve rms
cadence:set the mean cadence
top hat centroid: set the centroid for the top-hat (I think thats what this does but the line lag
thing is still newish so Im used to just making continuum light curve)
'''
synthetic_data = mf.myfake(
[4000.0,5000.0,5000.0,7000.0,-1.0,-1.0],
[50.0,50.0,10.0,50.0,50,10.],
[1.0,1.0,2.0,1.0,1.0,3.0],
thcent = 20.0
)
'''This recovers the synthetic data'''
dat = synthetic_data['echo light curves']
# # Section 2: Settup and run PyceCREAM
#
#
# In[2]:
import pycecream
#instantiate a pycecream object
a = pycecream.pycecream()
'''
If you use a fortran compiler other than gfortran please indicate here.
I just re-enter gfortran here for demonstration purposes even though
this is unecassary as gfortran is the default argument.
'''
a.fortran_caller = 'gfortran'
'''Choose an output directory in which to save the results.
This will be a new directory that you have not previously created (pycecream will make it automatically).
NOTE: Each new cream simulation must have a new name for "output_directory argument below
otherwise an excpetion is raised. This is to prevent accidentally overwriting previous simulations.
I might change this in a future version
'''
a.project_folder = output_directory
#test the merging by adding offset to dat1
d1 = np.array(dat[1])
d1[:,1] = d1[:,1] - np.mean(d1[:,1]) + 232.
dat[1] = d1
'''
Add each of the light curves in the simulation.
In this case we are using the "dat" output from the synthetic data above.
'''
a.add_lc(dat[0],
kind='continuum',
wavelength=4000.,
name = 'continuum 4000')
#background_offset_start=[10.0,0.0],
#vertical_scaling_start=[2.0,0.5])
a.add_lc(dat[1],
name = 'continuum 5000',
kind='continuum',
wavelength=5000.)
#background_offset_start=[10.0,0.0],
#vertical_scaling_start=[2.0,0.5])
a.add_lc(dat[2],
name = 'continuum 5000 (b)',
kind='continuum',
wavelength = 5000.)
#background_offset_start=[10.0,0.0],
#vertical_scaling_start=[2.0,0.5])
a.add_lc(dat[3],
name = 'continuum 7000',
kind='continuum',
wavelength=7000.)
#background_offset_start=[10.0,0.0],
#vertical_scaling_start=[2.0,0.5])
#If adding a line light curve, must indicate using the "kind" argument
a.add_lc(dat[4],name='test line 1',kind='line',
background_offset_start=[10.0,0.0],
extra_variance_prior = [0.1,1.0],
multiplicative_errorbar_prior = [10.0,0.0000001],
vertical_scaling_start=[2.0,0.5],
vertical_scaling_prior=[0.0,0.1],
background_offset_prior=[5.0,0.0001],
tophat_width_prior=[0.0, -0.1],
tophat_centroid_prior=[12.4, 0.00000001]
)
#If we want the same line response function model, set "share_previous_lag"=True
a.add_lc(dat[5],name='test line 1 (shared)',kind='line',share_previous_lag=True,background_offset_start=[10.0,3.3],vertical_scaling_start=[2.0,0.5])
'''
specify the numnber of MCMC iterations. Normally at least several thousand are necessary but shorter numbers
can be used just to check everything is working is done here.
'''
a.N_iterations=100
'''
specify the step sizes for the fit parameters.
Here we are setting the accretion rate step size to vary by ~ 0.1 solar masses per year.
'''
a.p_accretion_rate_step = 0.1
'''
Check the input settings are ok prior to running
'''
print(a.lightcurve_input_params)
'''
RUN!
'''
a.run()
op = a.get_flux_flux_analysis(plotfile='fluxflux.pdf',xlim=[-4,4])
plt.show()
'''
get chains
'''
chains = a.get_MCMC_chains()
fourier_chains = a.get_MCMC_fourier_chains()
cols = list(chains.columns)
fcols = [c for c in cols if 'noise m ' in c]
fchains = chains[fcols]
'''
clean up output directory DONT DO FOR REAL SIMULATION
AS THIS DELETES ALL RESULTS
'''
import os
os.system('rm -rf '+output_directory)
| [
"astropy_stark.myfake.myfake",
"numpy.mean",
"numpy.array",
"pycecream.pycecream",
"matplotlib.pylab.show",
"os.system"
] | [((597, 738), 'astropy_stark.myfake.myfake', 'mf.myfake', (['[4000.0, 5000.0, 5000.0, 7000.0, -1.0, -1.0]', '[50.0, 50.0, 10.0, 50.0, 50, 10.0]', '[1.0, 1.0, 2.0, 1.0, 1.0, 3.0]'], {'thcent': '(20.0)'}), '([4000.0, 5000.0, 5000.0, 7000.0, -1.0, -1.0], [50.0, 50.0, 10.0, \n 50.0, 50, 10.0], [1.0, 1.0, 2.0, 1.0, 1.0, 3.0], thcent=20.0)\n', (606, 738), True, 'import astropy_stark.myfake as mf\n'), ((935, 956), 'pycecream.pycecream', 'pycecream.pycecream', ([], {}), '()\n', (954, 956), False, 'import pycecream\n'), ((1687, 1703), 'numpy.array', 'np.array', (['dat[1]'], {}), '(dat[1])\n', (1695, 1703), True, 'import numpy as np\n'), ((3970, 3980), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (3978, 3980), True, 'import matplotlib.pylab as plt\n'), ((4276, 4315), 'os.system', 'os.system', (["('rm -rf ' + output_directory)"], {}), "('rm -rf ' + output_directory)\n", (4285, 4315), False, 'import os\n'), ((1724, 1741), 'numpy.mean', 'np.mean', (['d1[:, 1]'], {}), '(d1[:, 1])\n', (1731, 1741), True, 'import numpy as np\n')] |
import numpy as np
def predict(arr):
return np.argmax(arr)
def softmax(arr):
return np.exp(arr) / np.sum(np.exp(arr))
def compute_dot(length, width):
theta = np.array([[-4.58614563, -2.24129385, 18.87514796],
[ 0.16068263, -2.15860167, 6.3844344],
[ 4.425463 , 4.39989552, -25.25958236]])
# theta = np.c_[model.coef_, model.intercept_]
x = np.array([length, width, 1])
scores = np.dot(theta, x)
return scores
| [
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.argmax"
] | [((50, 64), 'numpy.argmax', 'np.argmax', (['arr'], {}), '(arr)\n', (59, 64), True, 'import numpy as np\n'), ((174, 305), 'numpy.array', 'np.array', (['[[-4.58614563, -2.24129385, 18.87514796], [0.16068263, -2.15860167, \n 6.3844344], [4.425463, 4.39989552, -25.25958236]]'], {}), '([[-4.58614563, -2.24129385, 18.87514796], [0.16068263, -2.15860167,\n 6.3844344], [4.425463, 4.39989552, -25.25958236]])\n', (182, 305), True, 'import numpy as np\n'), ((383, 411), 'numpy.array', 'np.array', (['[length, width, 1]'], {}), '([length, width, 1])\n', (391, 411), True, 'import numpy as np\n'), ((425, 441), 'numpy.dot', 'np.dot', (['theta', 'x'], {}), '(theta, x)\n', (431, 441), True, 'import numpy as np\n'), ((95, 106), 'numpy.exp', 'np.exp', (['arr'], {}), '(arr)\n', (101, 106), True, 'import numpy as np\n'), ((116, 127), 'numpy.exp', 'np.exp', (['arr'], {}), '(arr)\n', (122, 127), True, 'import numpy as np\n')] |
import os
import time
from copy import deepcopy
from pathlib import Path
from typing import List, Optional, cast
import hydra
import jax
import numpy as np
import ptvsd
import pytorch_lightning as pl
import wandb
from hydra.utils import instantiate
from omegaconf import OmegaConf
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.plugins import DDPPlugin
from typer import Argument, Typer
from fourierflow.utils import (delete_old_results, get_experiment_id,
import_string, upload_code_to_wandb)
app = Typer()
@app.callback(invoke_without_command=True)
def main(config_path: Path,
overrides: Optional[List[str]] = Argument(None),
force: bool = False,
resume: bool = False,
checkpoint_id: Optional[str] = None,
trial: int = 0,
debug: bool = False,
no_logging: bool = False):
"""Train a Pytorch Lightning experiment."""
config_dir = config_path.parent
config_name = config_path.stem
hydra.initialize(config_path=Path('../..') /
config_dir, version_base='1.2')
config = hydra.compose(config_name, overrides=overrides)
OmegaConf.set_struct(config, False)
# This debug mode is for those who use VS Code's internal debugger.
if debug:
ptvsd.enable_attach(address=('0.0.0.0', 5678))
ptvsd.wait_for_attach()
# ptvsd doesn't play well with multiple processes.
config.builder.num_workers = 0
jax.config.update('jax_disable_jit', True)
# jax.config.update("jax_debug_nans", True)
# Set up directories to save experimental outputs.
delete_old_results(config_dir, force, trial, resume)
# Set seed for reproducibility.
rs = np.random.RandomState(7231 + trial)
seed = config.get('seed', rs.randint(1000, 1000000))
pl.seed_everything(seed, workers=True)
config.seed = seed
wandb_id = get_experiment_id(checkpoint_id, trial, config_dir, resume)
config.trial = trial
if 'seed' in config.trainer:
config.trainer.seed = seed
# Initialize the dataset and experiment modules.
builder = instantiate(config.builder)
routine = instantiate(config.routine)
# Support fine-tuning mode if a pretrained model path is supplied.
pretrained_path = config.get('pretrained_path', None)
if pretrained_path:
routine.load_lightning_model_state(pretrained_path)
# Resume from last checkpoint. We assume that the checkpoint file is from
# the end of the previous epoch. The trainer will start the next epoch.
# Resuming from the middle of an epoch is not yet supported. See:
# https://github.com/PyTorchLightning/pytorch-lightning/issues/5325
chkpt_path = Path(config_dir) / 'checkpoints' / wandb_id / 'last.ckpt' \
if resume else None
# Initialize the main trainer.
callbacks = [instantiate(p) for p in config.get('callbacks', [])]
multi_gpus = config.trainer.get('gpus', 0) > 1
plugins = DDPPlugin(find_unused_parameters=False) if multi_gpus else None
if no_logging:
logger = False
enable_checkpointing = False
callbacks = []
else:
# We use Weights & Biases to track our experiments.
config.wandb.name = f"{config.wandb.group}/{trial}"
wandb_opts = cast(dict, OmegaConf.to_container(config.wandb))
logger = WandbLogger(save_dir=str(config_dir),
mode=os.environ.get('WANDB_MODE', 'offline'),
config=deepcopy(OmegaConf.to_container(config)),
id=wandb_id,
**wandb_opts)
upload_code_to_wandb(Path(config_dir) / 'config.yaml', logger)
enable_checkpointing = True
c = wandb.wandb_sdk.wandb_artifacts.get_artifacts_cache()
c.cleanup(wandb.util.from_human_size("100GB"))
Trainer = import_string(config.trainer.pop(
'_target_', 'pytorch_lightning.Trainer'))
trainer = Trainer(logger=logger,
enable_checkpointing=enable_checkpointing,
callbacks=callbacks,
plugins=plugins,
weights_save_path=config_dir,
resume_from_checkpoint=chkpt_path,
enable_model_summary=False,
**OmegaConf.to_container(config.trainer))
# Tuning only has an effect when either auto_scale_batch_size or
# auto_lr_find is set to true.
trainer.tune(routine, datamodule=builder)
trainer.fit(routine, datamodule=builder)
# Load best checkpoint before testing.
chkpt_dir = Path(config_dir) / 'checkpoints'
paths = list(chkpt_dir.glob(f'trial-{trial}-*/epoch*.ckpt'))
assert len(paths) == 1
checkpoint_path = paths[0]
routine.load_lightning_model_state(str(checkpoint_path))
trainer.test(routine, datamodule=builder)
# Compute inference time
if logger:
batch = builder.inference_data()
T = batch['data'].shape[-1]
n_steps = routine.n_steps or (T - 1)
routine = routine.cuda()
batch = routine.convert_data(batch)
routine.warmup()
start = time.time()
routine.infer(batch)
elapsed = time.time() - start
elapsed /= len(batch['data'])
elapsed /= routine.step_size * n_steps
logger.experiment.log({'inference_time': elapsed})
if __name__ == "__main__":
app()
| [
"wandb.wandb_sdk.wandb_artifacts.get_artifacts_cache",
"wandb.util.from_human_size",
"fourierflow.utils.delete_old_results",
"numpy.random.RandomState",
"pathlib.Path",
"fourierflow.utils.get_experiment_id",
"pytorch_lightning.plugins.DDPPlugin",
"ptvsd.enable_attach",
"typer.Typer",
"ptvsd.wait_f... | [((560, 567), 'typer.Typer', 'Typer', ([], {}), '()\n', (565, 567), False, 'from typer import Argument, Typer\n'), ((683, 697), 'typer.Argument', 'Argument', (['None'], {}), '(None)\n', (691, 697), False, 'from typer import Argument, Typer\n'), ((1131, 1178), 'hydra.compose', 'hydra.compose', (['config_name'], {'overrides': 'overrides'}), '(config_name, overrides=overrides)\n', (1144, 1178), False, 'import hydra\n'), ((1183, 1218), 'omegaconf.OmegaConf.set_struct', 'OmegaConf.set_struct', (['config', '(False)'], {}), '(config, False)\n', (1203, 1218), False, 'from omegaconf import OmegaConf\n'), ((1654, 1706), 'fourierflow.utils.delete_old_results', 'delete_old_results', (['config_dir', 'force', 'trial', 'resume'], {}), '(config_dir, force, trial, resume)\n', (1672, 1706), False, 'from fourierflow.utils import delete_old_results, get_experiment_id, import_string, upload_code_to_wandb\n'), ((1753, 1788), 'numpy.random.RandomState', 'np.random.RandomState', (['(7231 + trial)'], {}), '(7231 + trial)\n', (1774, 1788), True, 'import numpy as np\n'), ((1850, 1888), 'pytorch_lightning.seed_everything', 'pl.seed_everything', (['seed'], {'workers': '(True)'}), '(seed, workers=True)\n', (1868, 1888), True, 'import pytorch_lightning as pl\n'), ((1927, 1986), 'fourierflow.utils.get_experiment_id', 'get_experiment_id', (['checkpoint_id', 'trial', 'config_dir', 'resume'], {}), '(checkpoint_id, trial, config_dir, resume)\n', (1944, 1986), False, 'from fourierflow.utils import delete_old_results, get_experiment_id, import_string, upload_code_to_wandb\n'), ((2148, 2175), 'hydra.utils.instantiate', 'instantiate', (['config.builder'], {}), '(config.builder)\n', (2159, 2175), False, 'from hydra.utils import instantiate\n'), ((2190, 2217), 'hydra.utils.instantiate', 'instantiate', (['config.routine'], {}), '(config.routine)\n', (2201, 2217), False, 'from hydra.utils import instantiate\n'), ((1314, 1360), 'ptvsd.enable_attach', 'ptvsd.enable_attach', ([], {'address': "('0.0.0.0', 5678)"}), "(address=('0.0.0.0', 5678))\n", (1333, 1360), False, 'import ptvsd\n'), ((1369, 1392), 'ptvsd.wait_for_attach', 'ptvsd.wait_for_attach', ([], {}), '()\n', (1390, 1392), False, 'import ptvsd\n'), ((1499, 1541), 'jax.config.update', 'jax.config.update', (['"""jax_disable_jit"""', '(True)'], {}), "('jax_disable_jit', True)\n", (1516, 1541), False, 'import jax\n'), ((2887, 2901), 'hydra.utils.instantiate', 'instantiate', (['p'], {}), '(p)\n', (2898, 2901), False, 'from hydra.utils import instantiate\n'), ((3005, 3044), 'pytorch_lightning.plugins.DDPPlugin', 'DDPPlugin', ([], {'find_unused_parameters': '(False)'}), '(find_unused_parameters=False)\n', (3014, 3044), False, 'from pytorch_lightning.plugins import DDPPlugin\n'), ((3783, 3836), 'wandb.wandb_sdk.wandb_artifacts.get_artifacts_cache', 'wandb.wandb_sdk.wandb_artifacts.get_artifacts_cache', ([], {}), '()\n', (3834, 3836), False, 'import wandb\n'), ((4654, 4670), 'pathlib.Path', 'Path', (['config_dir'], {}), '(config_dir)\n', (4658, 4670), False, 'from pathlib import Path\n'), ((5203, 5214), 'time.time', 'time.time', ([], {}), '()\n', (5212, 5214), False, 'import time\n'), ((3333, 3369), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['config.wandb'], {}), '(config.wandb)\n', (3355, 3369), False, 'from omegaconf import OmegaConf\n'), ((3855, 3890), 'wandb.util.from_human_size', 'wandb.util.from_human_size', (['"""100GB"""'], {}), "('100GB')\n", (3881, 3890), False, 'import wandb\n'), ((4358, 4396), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['config.trainer'], {}), '(config.trainer)\n', (4380, 4396), False, 'from omegaconf import OmegaConf\n'), ((5262, 5273), 'time.time', 'time.time', ([], {}), '()\n', (5271, 5273), False, 'import time\n'), ((1049, 1062), 'pathlib.Path', 'Path', (['"""../.."""'], {}), "('../..')\n", (1053, 1062), False, 'from pathlib import Path\n'), ((3460, 3499), 'os.environ.get', 'os.environ.get', (['"""WANDB_MODE"""', '"""offline"""'], {}), "('WANDB_MODE', 'offline')\n", (3474, 3499), False, 'import os\n'), ((3693, 3709), 'pathlib.Path', 'Path', (['config_dir'], {}), '(config_dir)\n', (3697, 3709), False, 'from pathlib import Path\n'), ((2746, 2762), 'pathlib.Path', 'Path', (['config_dir'], {}), '(config_dir)\n', (2750, 2762), False, 'from pathlib import Path\n'), ((3546, 3576), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['config'], {}), '(config)\n', (3568, 3576), False, 'from omegaconf import OmegaConf\n')] |
"""
YTreeArbor class and member functions
"""
#-----------------------------------------------------------------------------
# Copyright (c) ytree development team. All rights reserved.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import h5py
import json
import numpy as np
import os
from unyt.unit_registry import \
UnitRegistry
from yt.data_objects.data_containers import \
YTDataContainer
from yt.utilities.logger import \
ytLogger
from ytree.data_structures.arbor import \
Arbor
from ytree.frontends.ytree.io import \
YTreeDataFile, \
YTreeRootFieldIO, \
YTreeTreeFieldIO
from ytree.frontends.ytree.utilities import \
get_about, \
get_conditional
from ytree.utilities.io import \
_hdf5_yt_attr, \
parse_h5_attr
from ytree.utilities.logger import \
log_level
from ytree.yt_frontend import \
YTreeDataset
class YTreeArbor(Arbor):
"""
Class for Arbors created from the
:func:`~ytree.data_structures.arbor.Arbor.save_arbor`
or :func:`~ytree.data_structures.tree_node.TreeNode.save_tree` functions.
"""
_root_field_io_class = YTreeRootFieldIO
_tree_field_io_class = YTreeTreeFieldIO
_suffix = ".h5"
_node_io_attrs = ('_ai',)
def _node_io_loop_prepare(self, nodes):
if nodes is None:
nodes = np.arange(self.size)
ai = self._node_info['_ai']
elif nodes.dtype == object:
ai = np.array(
[node._ai if node.is_root else node.root._ai
for node in nodes])
else: # assume an array of indices
ai = self._node_info['_ai'][nodes]
# the order they will be processed
io_order = np.argsort(ai)
ai = ai[io_order]
# array to return them to original order
return_order = np.empty_like(io_order)
return_order[io_order] = np.arange(io_order.size)
dfi = np.digitize(ai, self._node_io._ei)
udfi = np.unique(dfi)
data_files = [self.data_files[i] for i in udfi]
index_list = [io_order[dfi == i] for i in udfi]
return data_files, index_list, return_order
def _node_io_loop_start(self, data_file):
data_file._field_cache = {}
data_file.open()
def _node_io_loop_finish(self, data_file):
data_file._field_cache = {}
data_file.close()
def _parse_parameter_file(self):
self._prefix = \
self.filename[:self.filename.rfind(self._suffix)]
fh = h5py.File(self.filename, mode="r")
for attr in ["hubble_constant",
"omega_matter",
"omega_lambda"]:
setattr(self, attr, fh.attrs.get(attr, None))
if "unit_registry_json" in fh.attrs:
self.unit_registry = \
UnitRegistry.from_json(
parse_h5_attr(fh, "unit_registry_json"))
if "box_size" in fh.attrs:
self.box_size = _hdf5_yt_attr(
fh, "box_size", unit_registry=self.unit_registry)
self.field_info.update(
json.loads(parse_h5_attr(fh, "field_info")))
self._size = fh.attrs["total_trees"]
fh.close()
# analysis fields in sidecar files
analysis_filename = f"{self._prefix}-analysis{self._suffix}"
if os.path.exists(analysis_filename):
self.analysis_filename = analysis_filename
fh = h5py.File(analysis_filename, mode="r")
analysis_fi = json.loads(parse_h5_attr(fh, "field_info"))
fh.close()
for field in analysis_fi:
analysis_fi[field]["type"] = "analysis_saved"
self.field_info.update(analysis_fi)
else:
self.analysis_filename = None
self.field_list = list(self.field_info.keys())
def _plant_trees(self):
if self.is_planted:
return
fh = h5py.File(self.filename, "r")
self._node_info['uid'][:] = fh["data"]["uid"][()].astype(np.int64)
self._node_io._si = fh["index"]["tree_start_index"][()]
self._node_io._ei = fh["index"]["tree_end_index"][()]
fh.close()
self._node_info['_ai'][:] = np.arange(self.size)
self.data_files = \
[YTreeDataFile(f"{self._prefix}_{i:04d}{self._suffix}")
for i in range(self._node_io._si.size)]
if self.analysis_filename is not None:
for i, df in enumerate(self.data_files):
df.analysis_filename = \
f"{self._prefix}_{i:04d}-analysis{self._suffix}"
def get_yt_selection(self, above=None, below=None, equal=None, about=None,
conditionals=None, data_source=None):
"""
Get a selection of halos meeting given criteria.
This function can be used to create database-like queries to search
for halos meeting various criteria. It will return a
:class:`~yt.data_objects.selection_objects.cut_region.YTCutRegion`
that can be queried to get field values for all halos meeting the
selection criteria. The
:class:`~yt.data_objects.selection_objects.cut_region.YTCutRegion`
can then be passed to
:func:`~ytree.frontends.ytree.arbor.YTreeArbor.get_nodes_from_selection`
to get all the
:class:`~ytree.data_structures.tree_node.TreeNode` objects that meet the
criteria.
If multiple criteria are provided, selected halos must meet all
criteria.
To specify a custom data container, use the ``ytds`` attribute
associated with the arbor to access the merger tree data as a yt
dataset. For example:
>>> import ytree
>>> a = ytree.load("arbor/arbor.h5")
>>> ds = a.ytds
Parameters
----------
above : optional, list of tuples with (field, value, <units>)
Halos meeting a given criterion must have field values at or
above the provided limiting value. Each entry in the list must
contain the field name, limiting value, and (optionally) units.
below : optional, list of tuples with (field, value, <units>)
Halos meeting a given criterion must have field values at or
below the provided limiting value. Each entry in the list must
contain the field name, limiting value, and (optionally) units.
equal : optional, list of tuples with (field, value, <units>)
Halos meeting a given criterion must have field values equal to
the provided value. Each entry in the list must contain the
field name, value, and (optionally) units.
about : optional, list of tuples with (field, value, tolerance, <units>)
Halos meeting a given criterion must have field values within
the tolerance of the provided value. Each entry in the list must
contain the field name, value, tolerance, and (optionally) units.
conditionals : optional, list of strings
A list of conditionals for constructing a custom
:class:`~yt.data_objects.selection_objects.cut_region.YTCutRegion`.
This can be used instead of above/below/equal/about to create
more complex selection criteria. See the Cut Regions section in the
yt documentation for more information. The conditionals keyword
can only be used if none of the first for selection keywords are
given.
data_source : optional, :class:`~yt.data_objects.data_containers.YTDataContainer`
The source yt data container to be used to make the cut region.
If none given, the
:class:`~yt.data_objects.static_output.Dataset.all_data` container
(i.e., the full dataset) is used.
Returns
-------
cr : :class:`~yt.data_objects.selection_objects.cut_region.YTCutRegion`
The cut region associated with the provided selection criteria.
Examples
--------
>>> import ytree
>>> a = ytree.load("arbor/arbor.h5")
>>> # select halos above 1e12 Msun at redshift > 0.5
>>> sel = a.get_yt_selection(
... above=[("mass", 1e13, "Msun"),
... ("redshift", 0.5)])
>>> print (sel["halos", "mass"])
>>> print (sel["halos", "virial_radius"])
>>> import ytree
>>> a = ytree.load("arbor/arbor.h5")
>>> # select halos below 1e13 Msun at redshift > 1
>>> sel = a.get_yt_selection(
... below=[("mass", 1e13, "Msun")],
... above=[("redshift", 1)])
>>> print (sel["halos", "mass"])
>>> print (sel["halos", "virial_radius"])
>>> import ytree
>>> a = ytree.load("arbor/arbor.h5")
>>> # select phantom halos (a consistent-trees field)
>>> sel = a.get_yt_selection(equal=[("phantom", 1)])
>>> import ytree
>>> a = ytree.load("arbor/arbor.h5")
>>> # select halos with vmax of 200 +-10 km/s (i.e., 5%)
>>> sel = a.get_yt_selection(about=[("vmax", 200, "km/s", 0.05)])
>>> import ytree
>>> a = ytree.load("arbor/arbor.h5")
>>> # use a yt conditional
>>> sel = a.get_yt_selection(
... conditionals=['obj["halos", "mass"] > 1e12'])
>>> import ytree
>>> a = ytree.load("arbor/arbor.h5")
>>> # select halos only within a sphere
>>> ds = a.ytds
>>> sphere = ds.sphere(ds.domain_center, (10, Mpc))
>>> sel = a.get_yt_selection(
... above=[("mass", 1e13)],
... data_source=sphere)
>>> # get the TreeNodes for the selection
>>> for node in a.get_nodes_from_selection(sel):
... print (node["mass"])
See Also
--------
select_halos, get_nodes_from_selection
"""
if above is None:
above = []
if below is None:
below = []
if equal is None:
equal = []
if about is None:
about = []
if conditionals is None:
conditionals = []
if not (bool(conditionals) ^ any([above, below, equal, about])):
raise ValueError(
"Must specify either conditionals or above/below/equal/about, not both."
f"\nconditionals: {conditionals}"
f"\nabove: {above}"
f"\nbelow: {below}"
f"\nequal: {equal}"
f"\nabout: {about}")
if data_source is None:
data_source = self.ytds.all_data()
if not isinstance(data_source, YTDataContainer):
raise ValueError(
f"data_source must be a YTDataContainer: {data_source}.")
for criterion in above:
condition = get_conditional("above", criterion)
conditionals.append(condition)
for criterion in below:
condition = get_conditional("below", criterion)
conditionals.append(condition)
for criterion in equal:
condition = get_conditional("equal", criterion)
conditionals.append(condition)
for criterion in about:
conditions = get_about(criterion)
conditionals.extend(conditions)
cr = data_source.cut_region(conditionals)
return cr
def get_nodes_from_selection(self, container):
"""
Generate TreeNodes from a yt data container.
All halos contained within the data container will be
returned as TreeNode objects. This returns a generator
that can be iterated over or cast as a list.
Parameters
----------
container : :class:`~yt.data_objects.data_containers.YTDataContainer`
Data container, such as a sphere or region, from
which nodes will be generated.
Returns
-------
nodes : generator
The :class:`~ytree.data_structures.tree_node.TreeNode` objects
contained within the container.
Examples
--------
>>> import ytree
>>> a = ytree.load("arbor/arbor.h5")
>>> c = a.arr([0.5, 0.5, 0.5], "unitary")
>>> sphere = a.ytds.sphere(c, (0.1, "unitary"))
>>> for node in a.get_nodes_from_selection(sphere):
... print (node["mass"])
>>> import ytree
>>> a = ytree.load("arbor/arbor.h5")
>>> # select halos above 1e12 Msun at redshift > 0.5
>>> sel = a.get_yt_selection(
... above=[("mass", 1e13, "Msun"),
... ("redshift", 0.5)])
>>> my_nodes = list(a.get_nodes_from_selection(sel))
"""
self._plant_trees()
container.get_data([('halos', 'file_number'),
('halos', 'file_root_index'),
('halos', 'tree_index')])
file_number = container['halos', 'file_number'].d.astype(int)
file_root_index = container['halos', 'file_root_index'].d.astype(int)
tree_index = container['halos', 'tree_index'].d.astype(int)
arbor_index = self._node_io._si[file_number] + file_root_index
for ai, ti in zip(arbor_index, tree_index):
root_node = self._generate_root_node(ai)
if ti == 0:
yield root_node
else:
yield root_node.get_node("forest", ti)
_ytds = None
@property
def ytds(self):
"""
Load as a yt dataset.
Merger tree data is loaded as a yt dataset, providing full access
to yt functionality. Fields are accessed with the naming convention,
("halos", <field name>).
Examples
--------
>>> import ytree
>>> a = ytree.load("arbor/arbor.h5")
>>>
>>> ds = a.ytds
>>> sphere = ds.sphere(ds.domain_center, (5, "Mpc"))
>>> print (sphere["halos", "mass"])
>>>
>>> for node in a.get_nodes_from_selection(sphere):
... print (node["position"])
"""
if self._ytds is not None:
return self._ytds
with log_level(40, mylog=ytLogger):
self._ytds = YTreeDataset(self.filename)
return self._ytds
@classmethod
def _is_valid(self, *args, **kwargs):
"""
File should end in .h5, be loadable as an hdf5 file,
and have "arbor_type" attribute.
"""
fn = args[0]
if not fn.endswith(self._suffix):
return False
try:
with h5py.File(fn, "r") as f:
if "arbor_type" not in f.attrs:
return False
atype = f.attrs["arbor_type"]
if hasattr(atype, "astype"):
atype = atype.astype(str)
if atype != "YTreeArbor":
return False
except BaseException:
return False
return True
| [
"os.path.exists",
"numpy.unique",
"ytree.frontends.ytree.utilities.get_conditional",
"numpy.digitize",
"ytree.utilities.io._hdf5_yt_attr",
"ytree.frontends.ytree.io.YTreeDataFile",
"ytree.frontends.ytree.utilities.get_about",
"ytree.utilities.logger.log_level",
"ytree.utilities.io.parse_h5_attr",
... | [((1863, 1877), 'numpy.argsort', 'np.argsort', (['ai'], {}), '(ai)\n', (1873, 1877), True, 'import numpy as np\n'), ((1976, 1999), 'numpy.empty_like', 'np.empty_like', (['io_order'], {}), '(io_order)\n', (1989, 1999), True, 'import numpy as np\n'), ((2033, 2057), 'numpy.arange', 'np.arange', (['io_order.size'], {}), '(io_order.size)\n', (2042, 2057), True, 'import numpy as np\n'), ((2073, 2107), 'numpy.digitize', 'np.digitize', (['ai', 'self._node_io._ei'], {}), '(ai, self._node_io._ei)\n', (2084, 2107), True, 'import numpy as np\n'), ((2123, 2137), 'numpy.unique', 'np.unique', (['dfi'], {}), '(dfi)\n', (2132, 2137), True, 'import numpy as np\n'), ((2658, 2692), 'h5py.File', 'h5py.File', (['self.filename'], {'mode': '"""r"""'}), "(self.filename, mode='r')\n", (2667, 2692), False, 'import h5py\n'), ((3464, 3497), 'os.path.exists', 'os.path.exists', (['analysis_filename'], {}), '(analysis_filename)\n', (3478, 3497), False, 'import os\n'), ((4053, 4082), 'h5py.File', 'h5py.File', (['self.filename', '"""r"""'], {}), "(self.filename, 'r')\n", (4062, 4082), False, 'import h5py\n'), ((4340, 4360), 'numpy.arange', 'np.arange', (['self.size'], {}), '(self.size)\n', (4349, 4360), True, 'import numpy as np\n'), ((1488, 1508), 'numpy.arange', 'np.arange', (['self.size'], {}), '(self.size)\n', (1497, 1508), True, 'import numpy as np\n'), ((3106, 3169), 'ytree.utilities.io._hdf5_yt_attr', '_hdf5_yt_attr', (['fh', '"""box_size"""'], {'unit_registry': 'self.unit_registry'}), "(fh, 'box_size', unit_registry=self.unit_registry)\n", (3119, 3169), False, 'from ytree.utilities.io import _hdf5_yt_attr, parse_h5_attr\n'), ((3571, 3609), 'h5py.File', 'h5py.File', (['analysis_filename'], {'mode': '"""r"""'}), "(analysis_filename, mode='r')\n", (3580, 3609), False, 'import h5py\n'), ((4400, 4454), 'ytree.frontends.ytree.io.YTreeDataFile', 'YTreeDataFile', (['f"""{self._prefix}_{i:04d}{self._suffix}"""'], {}), "(f'{self._prefix}_{i:04d}{self._suffix}')\n", (4413, 4454), False, 'from ytree.frontends.ytree.io import YTreeDataFile, YTreeRootFieldIO, YTreeTreeFieldIO\n'), ((10997, 11032), 'ytree.frontends.ytree.utilities.get_conditional', 'get_conditional', (['"""above"""', 'criterion'], {}), "('above', criterion)\n", (11012, 11032), False, 'from ytree.frontends.ytree.utilities import get_about, get_conditional\n'), ((11133, 11168), 'ytree.frontends.ytree.utilities.get_conditional', 'get_conditional', (['"""below"""', 'criterion'], {}), "('below', criterion)\n", (11148, 11168), False, 'from ytree.frontends.ytree.utilities import get_about, get_conditional\n'), ((11269, 11304), 'ytree.frontends.ytree.utilities.get_conditional', 'get_conditional', (['"""equal"""', 'criterion'], {}), "('equal', criterion)\n", (11284, 11304), False, 'from ytree.frontends.ytree.utilities import get_about, get_conditional\n'), ((11406, 11426), 'ytree.frontends.ytree.utilities.get_about', 'get_about', (['criterion'], {}), '(criterion)\n', (11415, 11426), False, 'from ytree.frontends.ytree.utilities import get_about, get_conditional\n'), ((14325, 14354), 'ytree.utilities.logger.log_level', 'log_level', (['(40)'], {'mylog': 'ytLogger'}), '(40, mylog=ytLogger)\n', (14334, 14354), False, 'from ytree.utilities.logger import log_level\n'), ((14381, 14408), 'ytree.yt_frontend.YTreeDataset', 'YTreeDataset', (['self.filename'], {}), '(self.filename)\n', (14393, 14408), False, 'from ytree.yt_frontend import YTreeDataset\n'), ((1602, 1677), 'numpy.array', 'np.array', (['[(node._ai if node.is_root else node.root._ai) for node in nodes]'], {}), '([(node._ai if node.is_root else node.root._ai) for node in nodes])\n', (1610, 1677), True, 'import numpy as np\n'), ((3002, 3041), 'ytree.utilities.io.parse_h5_attr', 'parse_h5_attr', (['fh', '"""unit_registry_json"""'], {}), "(fh, 'unit_registry_json')\n", (3015, 3041), False, 'from ytree.utilities.io import _hdf5_yt_attr, parse_h5_attr\n'), ((3242, 3273), 'ytree.utilities.io.parse_h5_attr', 'parse_h5_attr', (['fh', '"""field_info"""'], {}), "(fh, 'field_info')\n", (3255, 3273), False, 'from ytree.utilities.io import _hdf5_yt_attr, parse_h5_attr\n'), ((3647, 3678), 'ytree.utilities.io.parse_h5_attr', 'parse_h5_attr', (['fh', '"""field_info"""'], {}), "(fh, 'field_info')\n", (3660, 3678), False, 'from ytree.utilities.io import _hdf5_yt_attr, parse_h5_attr\n'), ((14739, 14757), 'h5py.File', 'h5py.File', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (14748, 14757), False, 'import h5py\n')] |
######################
## Essential libraries
######################
import cv2
import numpy as np
import os
import math
import copy
codes_folder_path = os.path.abspath('.')
images_folder_path = os.path.abspath(os.path.join('..', 'Videos'))
generated_folder_path = os.path.abspath(os.path.join('..', 'Generated'))
def kernel_psf(angle, d, size=20):
kernel = np.ones((1, d), np.float32)
c, s = np.cos(angle), np.sin(angle)
A = np.float32([[c, -s, 0], [s, c, 0]])
size2 = size // 2 # Division(floor)
A[:,2] = (size2, size2) - np.dot(A[:,:2], ((d-1)*0.5, 0))
kernel = cv2.warpAffine(kernel, A, (size, size), flags=cv2.INTER_CUBIC) # image to specific matrix conversion
return kernel
#wiener filter implementaion
def wiener_filter(img, kernel, K):
kernel /= np.sum(kernel)
copy_img = np.copy(img)
copy_img = np.fft.fft2(copy_img) # 2D fast fourier transform
kernel = np.fft.fft2(kernel, s = img.shape)
kernel = np.conj(kernel) / (np.abs(kernel) ** 2 + K) # wiener formula implementation
copy_img = copy_img * kernel # conversion blurred to deblurred
copy_img = np.abs(np.fft.ifft2(copy_img)) # 2D inverse fourier transform
return copy_img
def process(ip_image):
a=2.2 # contrast
ang=np.deg2rad(90) # angle psf
d=20 # distance psf
b, g, r = cv2.split(ip_image)
# normalization of split images
img_b = np.float32(b)/255.0
img_g = np.float32(g)/255.0
img_r = np.float32(r)/255.0
#psf calculation
psf = kernel_psf(ang, d)
#wiener for all split images
filtered_img_b = wiener_filter(img_b, psf, K = 0.0060) # small value of k that is snr as if 0 filter will be inverse filter
filtered_img_g = wiener_filter(img_g, psf, K = 0.0060)
filtered_img_r = wiener_filter(img_r, psf, K = 0.0060)
#merge to form colored image
filtered_img=cv2.merge((filtered_img_b,filtered_img_g,filtered_img_r))
#converting float to unit
filtered_img=np.clip(filtered_img*255,0,255) # clipping values between 0 and 255
filtered_img=np.uint8(filtered_img)
#changing contrast of the image
filtered_img=cv2.convertScaleAbs(filtered_img,alpha=a)
#removing gibbs phenomena or rings from the image
filtered_img = cv2.fastNlMeansDenoisingColored(filtered_img, None, 10, 10, 7, 15)
filtered_img = cv2.fastNlMeansDenoisingColored(filtered_img, None, 10, 10, 7, 15) # removing left over rings in post processing again
# using unblurred image to get angle and id of aruco
return filtered_img
def main(val):
################################################################
## variable declarations
################################################################
i = 1
## reading in video
cap = cv2.VideoCapture(images_folder_path+"/"+"video name.mp4")
## getting the frames per second value of input video
fps = cap.get(cv2.CAP_PROP_FPS)
## getting the frame sequence
frame_seq = int(val)*fps
## setting the video counter to frame sequence
cap.set(1,frame_seq)
## reading in the frame
ret, frame = cap.read()
## verifying frame has content
print(frame.shape)
## display to see if the frame is correct
cv2.imshow("window", frame)
cv2.waitKey(0);
## calling the algorithm function
op_image = process(frame)
############################################################################################
## main function
############################################################################################
if __name__ == '__main__':
main(input("time value in seconds:"))
| [
"numpy.clip",
"numpy.uint8",
"cv2.convertScaleAbs",
"cv2.imshow",
"numpy.sin",
"numpy.fft.fft2",
"numpy.dot",
"cv2.waitKey",
"numpy.abs",
"cv2.merge",
"cv2.warpAffine",
"numpy.ones",
"numpy.conj",
"numpy.deg2rad",
"numpy.cos",
"cv2.split",
"numpy.copy",
"numpy.fft.ifft2",
"cv2.fa... | [((165, 185), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (180, 185), False, 'import os\n'), ((224, 252), 'os.path.join', 'os.path.join', (['""".."""', '"""Videos"""'], {}), "('..', 'Videos')\n", (236, 252), False, 'import os\n'), ((295, 326), 'os.path.join', 'os.path.join', (['""".."""', '"""Generated"""'], {}), "('..', 'Generated')\n", (307, 326), False, 'import os\n'), ((386, 413), 'numpy.ones', 'np.ones', (['(1, d)', 'np.float32'], {}), '((1, d), np.float32)\n', (393, 413), True, 'import numpy as np\n'), ((464, 499), 'numpy.float32', 'np.float32', (['[[c, -s, 0], [s, c, 0]]'], {}), '([[c, -s, 0], [s, c, 0]])\n', (474, 499), True, 'import numpy as np\n'), ((663, 725), 'cv2.warpAffine', 'cv2.warpAffine', (['kernel', 'A', '(size, size)'], {'flags': 'cv2.INTER_CUBIC'}), '(kernel, A, (size, size), flags=cv2.INTER_CUBIC)\n', (677, 725), False, 'import cv2\n'), ((868, 882), 'numpy.sum', 'np.sum', (['kernel'], {}), '(kernel)\n', (874, 882), True, 'import numpy as np\n'), ((899, 911), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (906, 911), True, 'import numpy as np\n'), ((928, 949), 'numpy.fft.fft2', 'np.fft.fft2', (['copy_img'], {}), '(copy_img)\n', (939, 949), True, 'import numpy as np\n'), ((1005, 1037), 'numpy.fft.fft2', 'np.fft.fft2', (['kernel'], {'s': 'img.shape'}), '(kernel, s=img.shape)\n', (1016, 1037), True, 'import numpy as np\n'), ((1445, 1459), 'numpy.deg2rad', 'np.deg2rad', (['(90)'], {}), '(90)\n', (1455, 1459), True, 'import numpy as np\n'), ((1618, 1637), 'cv2.split', 'cv2.split', (['ip_image'], {}), '(ip_image)\n', (1627, 1637), False, 'import cv2\n'), ((2177, 2236), 'cv2.merge', 'cv2.merge', (['(filtered_img_b, filtered_img_g, filtered_img_r)'], {}), '((filtered_img_b, filtered_img_g, filtered_img_r))\n', (2186, 2236), False, 'import cv2\n'), ((2285, 2320), 'numpy.clip', 'np.clip', (['(filtered_img * 255)', '(0)', '(255)'], {}), '(filtered_img * 255, 0, 255)\n', (2292, 2320), True, 'import numpy as np\n'), ((2373, 2395), 'numpy.uint8', 'np.uint8', (['filtered_img'], {}), '(filtered_img)\n', (2381, 2395), True, 'import numpy as np\n'), ((2451, 2493), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['filtered_img'], {'alpha': 'a'}), '(filtered_img, alpha=a)\n', (2470, 2493), False, 'import cv2\n'), ((2568, 2634), 'cv2.fastNlMeansDenoisingColored', 'cv2.fastNlMeansDenoisingColored', (['filtered_img', 'None', '(10)', '(10)', '(7)', '(15)'], {}), '(filtered_img, None, 10, 10, 7, 15)\n', (2599, 2634), False, 'import cv2\n'), ((2656, 2722), 'cv2.fastNlMeansDenoisingColored', 'cv2.fastNlMeansDenoisingColored', (['filtered_img', 'None', '(10)', '(10)', '(7)', '(15)'], {}), '(filtered_img, None, 10, 10, 7, 15)\n', (2687, 2722), False, 'import cv2\n'), ((3114, 3175), 'cv2.VideoCapture', 'cv2.VideoCapture', (["(images_folder_path + '/' + 'video name.mp4')"], {}), "(images_folder_path + '/' + 'video name.mp4')\n", (3130, 3175), False, 'import cv2\n'), ((3581, 3608), 'cv2.imshow', 'cv2.imshow', (['"""window"""', 'frame'], {}), "('window', frame)\n", (3591, 3608), False, 'import cv2\n'), ((3614, 3628), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3625, 3628), False, 'import cv2\n'), ((426, 439), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (432, 439), True, 'import numpy as np\n'), ((441, 454), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (447, 454), True, 'import numpy as np\n'), ((617, 653), 'numpy.dot', 'np.dot', (['A[:, :2]', '((d - 1) * 0.5, 0)'], {}), '(A[:, :2], ((d - 1) * 0.5, 0))\n', (623, 653), True, 'import numpy as np\n'), ((1054, 1069), 'numpy.conj', 'np.conj', (['kernel'], {}), '(kernel)\n', (1061, 1069), True, 'import numpy as np\n'), ((1253, 1275), 'numpy.fft.ifft2', 'np.fft.ifft2', (['copy_img'], {}), '(copy_img)\n', (1265, 1275), True, 'import numpy as np\n'), ((1691, 1704), 'numpy.float32', 'np.float32', (['b'], {}), '(b)\n', (1701, 1704), True, 'import numpy as np\n'), ((1724, 1737), 'numpy.float32', 'np.float32', (['g'], {}), '(g)\n', (1734, 1737), True, 'import numpy as np\n'), ((1757, 1770), 'numpy.float32', 'np.float32', (['r'], {}), '(r)\n', (1767, 1770), True, 'import numpy as np\n'), ((1073, 1087), 'numpy.abs', 'np.abs', (['kernel'], {}), '(kernel)\n', (1079, 1087), True, 'import numpy as np\n')] |
from typing import NamedTuple, List, Tuple
import numpy as np
import tensorflow as tf
from tensorflow import keras
import vae_loss
class EncoderConfig(NamedTuple):
"""NamedTuple for configuring a Conv2D layer in an encoder.
Fields:
filter: num filters in layer
kernel_size: kernel size in layer
stride: stride in layer
"""
filter: int
kernel_size: int
stride: int
class Encoder(tf.keras.Model):
"""Encoder model."""
def __init__(self,
input_shape: tf.TensorShape,
latent_dim: int,
config=List[EncoderConfig],
*args,
**kwargs):
self._input_shape = input_shape
self._latent_dim = latent_dim
self._config = config
self._unflattened_conv_shape: tf.TensorShape
inputs = tf.keras.layers.Input(self._input_shape, name="encoder_input")
outputs, self._unflattened_conv_shape = self.build_outputs(inputs)
super().__init__(inputs, outputs, name="encoder", *args, **kwargs)
@property
def unflattened_conv_shape(self) -> tf.TensorShape:
"""Return tensor shape before the final flattening op is applied."""
return self._unflattened_conv_shape
def build_outputs(self,
tensor: tf.Tensor) -> Tuple[tf.Tensor, tf.TensorShape]:
"""Create keras model and return outputs."""
for i, conf in enumerate(self._config):
tensor = tf.keras.layers.Conv2D(filters=conf.filter,
kernel_size=conf.kernel_size,
strides=conf.stride,
padding="same",
name=f"encoder_conv_{i}")(tensor)
tensor = tf.keras.layers.BatchNormalization()(tensor)
tensor = tf.keras.layers.LeakyReLU()(tensor)
tensor = tf.keras.layers.Dropout(rate=.25)(tensor)
unflattened_conv_shape = tensor.shape[1:]
tensor = tf.keras.layers.Flatten()(tensor)
outputs = tf.keras.layers.Dense(self._latent_dim +
self._latent_dim)(tensor)
return outputs, unflattened_conv_shape
class DecoderConfig(NamedTuple):
"""NamedTuple for configuring a Conv2DTranspose layer in an encoder.
Fields:
t_filter: num filters in layer
t_kernel_size: kernel size in layer
t_stride: stride in layer
"""
t_filter: int
t_kernel_size: int
t_stride: int
class Decoder(tf.keras.Model):
"""Decoder model."""
def __init__(self, input_shape: tf.TensorShape, latent_dim: int,
config: List[DecoderConfig], *args, **kwargs):
self._input_shape = input_shape
self._latent_dim = latent_dim
self._config = config
inputs = tf.keras.layers.Input(shape=(self._latent_dim, ),
name="decoder_input")
outputs = self.build_outputs(inputs)
super().__init__(inputs, outputs)
def build_outputs(self, tensor: tf.Tensor) -> tf.Tensor:
"""Create keras model and return outputs."""
tensor = tf.keras.layers.Dense(np.prod(self._input_shape))(tensor)
tensor = tf.keras.layers.Reshape(self._input_shape)(tensor)
for i, conf in enumerate(self._config):
tensor = tf.keras.layers.Conv2DTranspose(
filters=conf.t_filter,
kernel_size=conf.t_kernel_size,
strides=conf.t_stride,
padding="same",
name=f"decoder_conv_t_{i}")(tensor)
if i < len(self._config) - 1:
tensor = tf.keras.layers.BatchNormalization()(tensor)
tensor = tf.keras.layers.LeakyReLU()(tensor)
tensor = tf.keras.layers.Dropout(rate=.25)(tensor)
# Note that we did not add any activation on the end, the decoder
# therefore returns values on the logit scale.
return tensor
class VariationalAutoEncoder(keras.Model):
"""Variational Auto Encoder model."""
def __init__(self, encoder, decoder, **kwargs):
super().__init__(**kwargs)
self._encoder = encoder
self._decoder = decoder
@property
def encoder(self) -> tf.keras.models.Model:
"""Accessor method"""
return self._encoder
@property
def decoder(self) -> tf.keras.models.Model:
"""Accessor method"""
return self._decoder
@classmethod
def from_latent_dim(cls, latent_dim, input_shape):
"""Create a VAE with predefined architecture with desired latent dim.
"""
encoder = Encoder(input_shape=input_shape,
latent_dim=latent_dim,
config=[
EncoderConfig(32, 3, 1),
EncoderConfig(64, 3, 2),
EncoderConfig(64, 3, 2),
EncoderConfig(64, 3, 1)
])
decoder = Decoder(input_shape=encoder.unflattened_conv_shape,
latent_dim=latent_dim,
config=[
DecoderConfig(64, 3, 1),
DecoderConfig(64, 3, 2),
DecoderConfig(32, 3, 2),
DecoderConfig(input_shape[-1], 3, 1)
])
return cls(encoder=encoder, decoder=decoder)
def encode(self, tensor: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
mean, log_var = tf.split(self._encoder(tensor),
num_or_size_splits=2,
axis=1)
return mean, log_var
def decode(self, z: tf.Tensor, apply_sigmoid=False) -> tf.Tensor:
logits = self._decoder(z)
if apply_sigmoid:
return tf.sigmoid(logits)
return logits
@staticmethod
def sample_from_latent_conditional(mean: tf.Tensor,
log_var: tf.Tensor) -> tf.Tensor:
"""Sample from the from latent space.
This method returns samples from the distribution
p(z | X), parametrized by `mean` and `log_var`.
Args:
mean: mean of the normal distribution to be sampled from.
log_var: log variange of the normal distribution to be sampled
from. Must have the same shape as `mean`.
Returns:
tf.Tensor: tensor of same shape as `mean` and `log_var` containing
samples of the specified normal distribution.
"""
std_normal_samples = tf.random.normal(shape=mean.shape)
return mean + std_normal_samples * tf.exp(.5 * log_var)
@tf.function
def train_step(self, tensor_batch: tf.Tensor,
optimizer: tf.keras.optimizers.Optimizer,
train_elbo: tf.keras.metrics.Metric):
"""Perform a training step / epoch.
Args:
tensor_batch: Batched tensor for training.
optimizer: optimizer to apply gradients to.
train_elbo: metric to collect the mean elbo of the batch.
"""
with tf.GradientTape() as tape:
loss = tf.reduce_mean(vae_loss.compute_loss(self, tensor_batch))
gradients = tape.gradient(loss, self.trainable_variables)
optimizer.apply_gradients(zip(gradients, self.trainable_variables))
train_elbo(-loss)
| [
"tensorflow.keras.layers.Input",
"tensorflow.random.normal",
"numpy.prod",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.layers.BatchNormalizati... | [((854, 916), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['self._input_shape'], {'name': '"""encoder_input"""'}), "(self._input_shape, name='encoder_input')\n", (875, 916), True, 'import tensorflow as tf\n'), ((2894, 2964), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(self._latent_dim,)', 'name': '"""decoder_input"""'}), "(shape=(self._latent_dim,), name='decoder_input')\n", (2915, 2964), True, 'import tensorflow as tf\n'), ((6732, 6766), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': 'mean.shape'}), '(shape=mean.shape)\n', (6748, 6766), True, 'import tensorflow as tf\n'), ((2064, 2089), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (2087, 2089), True, 'import tensorflow as tf\n'), ((2117, 2175), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(self._latent_dim + self._latent_dim)'], {}), '(self._latent_dim + self._latent_dim)\n', (2138, 2175), True, 'import tensorflow as tf\n'), ((3300, 3342), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['self._input_shape'], {}), '(self._input_shape)\n', (3323, 3342), True, 'import tensorflow as tf\n'), ((5959, 5977), 'tensorflow.sigmoid', 'tf.sigmoid', (['logits'], {}), '(logits)\n', (5969, 5977), True, 'import tensorflow as tf\n'), ((7282, 7299), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7297, 7299), True, 'import tensorflow as tf\n'), ((1489, 1629), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'conf.filter', 'kernel_size': 'conf.kernel_size', 'strides': 'conf.stride', 'padding': '"""same"""', 'name': 'f"""encoder_conv_{i}"""'}), "(filters=conf.filter, kernel_size=conf.kernel_size,\n strides=conf.stride, padding='same', name=f'encoder_conv_{i}')\n", (1511, 1629), True, 'import tensorflow as tf\n'), ((1831, 1867), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (1865, 1867), True, 'import tensorflow as tf\n'), ((1897, 1924), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (1922, 1924), True, 'import tensorflow as tf\n'), ((1954, 1988), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (1977, 1988), True, 'import tensorflow as tf\n'), ((3247, 3273), 'numpy.prod', 'np.prod', (['self._input_shape'], {}), '(self._input_shape)\n', (3254, 3273), True, 'import numpy as np\n'), ((3421, 3584), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', ([], {'filters': 'conf.t_filter', 'kernel_size': 'conf.t_kernel_size', 'strides': 'conf.t_stride', 'padding': '"""same"""', 'name': 'f"""decoder_conv_t_{i}"""'}), "(filters=conf.t_filter, kernel_size=conf.\n t_kernel_size, strides=conf.t_stride, padding='same', name=\n f'decoder_conv_t_{i}')\n", (3452, 3584), True, 'import tensorflow as tf\n'), ((6810, 6831), 'tensorflow.exp', 'tf.exp', (['(0.5 * log_var)'], {}), '(0.5 * log_var)\n', (6816, 6831), True, 'import tensorflow as tf\n'), ((7343, 7384), 'vae_loss.compute_loss', 'vae_loss.compute_loss', (['self', 'tensor_batch'], {}), '(self, tensor_batch)\n', (7364, 7384), False, 'import vae_loss\n'), ((3732, 3768), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (3766, 3768), True, 'import tensorflow as tf\n'), ((3802, 3829), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (3827, 3829), True, 'import tensorflow as tf\n'), ((3863, 3897), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (3886, 3897), True, 'import tensorflow as tf\n')] |
import math
import cv2
import numpy as np
from dtld_parsing.calibration import CalibrationData
from typing import Tuple
__author__ = "<NAME>, <NAME> and <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class ThreeDPosition(object):
"""
Three dimensional position with respect to a defined frame_id.
"""
def __init__(self, x: float, y: float, z: float, frame_id: str = "stereo_left"):
self._x = x
self._y = y
self._z = z
self._frame_id = frame_id
def set_pos(self, x: float, y: float, z: float):
self._x = x
self._y = y
self._z = z
def move_pos(self, x: float, y: float, z: float):
self._x += x
self._y += y
self._z += z
def get_pos(self) -> Tuple[float, float, float]:
return self._x, self._y, self._z
class ThreeDimensionalPosition(object):
def __init__(
self,
calibration_left: CalibrationData,
calibration_right: CalibrationData,
binning_x: int = 0,
binning_y: int = 0,
roi_offset_x: int = 0,
roi_offset_y: int = 0,
):
"""
Class determining the 3D position of objects from disparity images.
Args:
calibration_left(CalibrationData): calibration for left camera
calibration_right(CalibrationData): calibration for right camera
binning_x(int): binning between original camera and disparity image in x direction
binning_y(int): binning between original camera and disparity image in y direction
roi_offset_x(int): RoI offset in x
roi_offset_y(int): RoI offset in y
"""
self._calibration_left = calibration_left
self._calibration_right = calibration_right
self._binning_x = binning_x
self._binning_y = binning_y
self._roi_offset_x = roi_offset_x
self._roi_offset_y = roi_offset_y
def unrectify_rectangle(self, x: int, y: int, width: int, height: int):
"""
Converts rectified to unrectified coordinates using calibration matrices.
Args:
x(int): upper left corner of bbox
y(int): upper left corner of bbox
width(int): width of bbox
height(int): height of bbox
Returns:
x, y, width, height in unrectified coordinates
"""
# not rectified coordinates
pt_distorted = np.array([[float(x), float(y)], [float(x + width), float(y + height),],])
pt_distorted = pt_distorted[:, np.newaxis, :]
# rectify points
pt_undistorted = cv2.undistortPoints(
pt_distorted,
self._calibration_left.intrinsic_calibration.intrinsic_matrix,
self._calibration_left.distortion_calibration.distortion_matrix,
R=self._calibration_left.rectification_matrix.rectification_matrix,
P=self._calibration_left.projection_matrix.projection_matrix,
)
# get new coords
x_out = pt_undistorted[0][0][0]
y_out = pt_undistorted[0][0][1]
w_out = pt_undistorted[1][0][0] - pt_undistorted[0][0][0]
h_out = pt_undistorted[1][0][1] - pt_undistorted[0][0][1]
# binning in x and y (camera images were binned before
# disparity calculation)
return (
int(round(x_out / float(self._binning_x))),
int(round(y_out / float(self._binning_y))),
int(round(w_out / float(self._binning_x))),
int(round(h_out / float(self._binning_y))),
)
def determine_disparity(self, x: int, y: int, width: int, height: int, disparity_image: np.ndarray) -> float:
"""
Calculates disparity from unrectified coordinates using calibration matrices and disparity image input.
Args:
x(int): upper left corner of bbox
y(int): upper left corner of bbox
width(int): width of bbox
height(int): height of bbox
disparity_image(np.ndarray): disparity image
Returns:
float: median disparity in RoI
"""
disparity_crop = disparity_image[y : y + height, x : x + width]
# image = cv2.rectangle(
# disparity_image, (int(x), int(y)), (int(x) + int(width), int(y) + int(height)), (255, 255, 255), 1,
# )
# cv2.imwrite("/home/muelju3/disp.png", image)
return np.nanmedian(disparity_crop)
def determine_three_dimensional_position(
self, x: int, y: int, width: int, height: int, disparity_image: np.ndarray
) -> ThreeDPosition:
"""
Calculates 3d position from rectified coordinates using calibration matrices and disparity image input.
Args:
x(int): upper left corner of bbox
y(int): upper left corner of bbox
width(int): width of bbox
height(int): weight of bbox
disparity_image(np.ndarray): disparity image
Returns:
ThreeDPosition: ThreeDPosition
"""
x_u, y_u, width_u, height_u = self.unrectify_rectangle(x=x, y=y, width=width, height=height)
disparity = self.determine_disparity(
x=x_u - int(round(self._roi_offset_x / self._binning_x)),
y=y_u - int(round(self._roi_offset_y / self._binning_y)),
width=width_u,
height=height_u,
disparity_image=disparity_image,
)
# all values inside bbox are nan --> no depth
if disparity == 0.0 or math.isnan(disparity):
return ThreeDPosition(x=-1.0, y=-1.0, z=-1.0, frame_id="stereo_left")
return self.twod_point_to_threed_from_disparity(x=x + width / 2.0, y=y + height / 2.0, disparity=disparity)
def twod_point_to_threed_from_disparity(self, x, y, disparity):
# get calibration values
left_fx = self._calibration_left.intrinsic_calibration.fx
left_fy = self._calibration_left.intrinsic_calibration.fy
left_cx = self._calibration_left.intrinsic_calibration.cx
left_cy = self._calibration_left.intrinsic_calibration.cy
tx = -1.0 * self._calibration_right.projection_matrix.baseline
# determine 3d pos
x_world = left_fy * tx * x - left_fy * left_cx * tx
y_world = left_fx * tx * y - left_fx * left_cy * tx
z_world = left_fx * left_fy * tx
# normalize
w = -1.0 * self._binning_x * left_fy * disparity
return ThreeDPosition(x=x_world / w, y=y_world / w, z=z_world / w, frame_id="stereo_left")
def twod_point_to_threed_from_depth(self, x: int, y: int, depth: float) -> float:
disparity = self.depth_to_disparity(depth)
return self.twod_point_to_threed_from_disparity(x, y, disparity)
def disparity_to_depth(self, disparity: float) -> float:
"""
Converts disparity to depth.
Args:
disparity(float): Disparity in pixels
Returns:
float: depth value in meters
"""
tx = -1.0 * self._calibration_right.projection_matrix.tx
return tx / (disparity * self._binning_x)
def depth_to_disparity(self, depth: float) -> float:
"""
Converts depth to disparity.
Args:
depth(float): Depth in meters
Returns:
float: disparity in meters
"""
tx = -1.0 * self._calibration_right.projection_matrix.tx
return tx / (depth * self._binning_x)
def twod_from_threed(self, x: float, y: float, z: float):
"""
Calculates hypothesis size in pixels based on depth of object.
Args:
x(float): 3D position x coordinate
y(float): 3D position z coordinate
z(float): 3D position y coordinate
Returns:
int, int: 2d pos
"""
# translation = depth
t_vec = np.array([0.0, 0.0, 0.0])
r_vec = np.array([0.0, 0.0, 0.0])
# world corner points of object (float object assumption)
world_points = np.array([[x, y, z],])
# project world points on image plane
image_points = cv2.projectPoints(
world_points,
r_vec,
t_vec,
self._calibration_left.intrinsic_calibration.intrinsic_matrix,
distCoeffs=self._calibration_left.distortion_calibration.distortion_matrix,
)[0].tolist()
# determine box width and height
return image_points[0][0][0], image_points[0][0][1]
| [
"numpy.nanmedian",
"cv2.projectPoints",
"numpy.array",
"cv2.undistortPoints",
"math.isnan"
] | [((2612, 2923), 'cv2.undistortPoints', 'cv2.undistortPoints', (['pt_distorted', 'self._calibration_left.intrinsic_calibration.intrinsic_matrix', 'self._calibration_left.distortion_calibration.distortion_matrix'], {'R': 'self._calibration_left.rectification_matrix.rectification_matrix', 'P': 'self._calibration_left.projection_matrix.projection_matrix'}), '(pt_distorted, self._calibration_left.\n intrinsic_calibration.intrinsic_matrix, self._calibration_left.\n distortion_calibration.distortion_matrix, R=self._calibration_left.\n rectification_matrix.rectification_matrix, P=self._calibration_left.\n projection_matrix.projection_matrix)\n', (2631, 2923), False, 'import cv2\n'), ((4415, 4443), 'numpy.nanmedian', 'np.nanmedian', (['disparity_crop'], {}), '(disparity_crop)\n', (4427, 4443), True, 'import numpy as np\n'), ((7875, 7900), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (7883, 7900), True, 'import numpy as np\n'), ((7917, 7942), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (7925, 7942), True, 'import numpy as np\n'), ((8033, 8054), 'numpy.array', 'np.array', (['[[x, y, z]]'], {}), '([[x, y, z]])\n', (8041, 8054), True, 'import numpy as np\n'), ((5523, 5544), 'math.isnan', 'math.isnan', (['disparity'], {}), '(disparity)\n', (5533, 5544), False, 'import math\n'), ((8126, 8320), 'cv2.projectPoints', 'cv2.projectPoints', (['world_points', 'r_vec', 't_vec', 'self._calibration_left.intrinsic_calibration.intrinsic_matrix'], {'distCoeffs': 'self._calibration_left.distortion_calibration.distortion_matrix'}), '(world_points, r_vec, t_vec, self._calibration_left.\n intrinsic_calibration.intrinsic_matrix, distCoeffs=self.\n _calibration_left.distortion_calibration.distortion_matrix)\n', (8143, 8320), False, 'import cv2\n')] |
import numpy as np
class Layer(object):
def __init__(self, type, has_gradient):
self.type = type
self.has_gradient = has_gradient
self.input_shape = (0, 0)
self.output_shape = (0, 0)
self.b = np.zeros((1, 1))
self.w = np.zeros((1, 1))
def feed_forward(self, a_in):
return None
def compute_z(self, a_in):
return None
def compute_a(self, z):
return None
def compute_da(self, z):
return None
def compute_gradient(self, prev_delta, sigma_Z, A):
return None, None | [
"numpy.zeros"
] | [((240, 256), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (248, 256), True, 'import numpy as np\n'), ((274, 290), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (282, 290), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020, University of Southampton
All rights reserved.
Licensed under the BSD 3-Clause License.
See LICENSE.md file in the project root for full license information.
"""
import uuid
from pathlib import Path
import imageio
import joblib
import numpy as np
import pandas as pd
from tqdm import tqdm
from correct_images.tools.joblib_tqdm import tqdm_joblib
from oplab import Console
# functions used to create a trimmed auv_ekf_<camera_name>.csv file based on
# user's selection of images
def trim_csv_files(image_files_paths, original_csv_path, trimmed_csv_path):
"""Trim csv files based on the list of images provided by user
Parameters
-----------
image_files_paths : str
user provided list of imagenames which need to be processed
original_csv_path : str
path to the auv_ekf_<camera_name.csv>
trimmed_csv_path : str
path to trimmed csv which needs to be created
"""
image_name_list = get_imagename_list(image_files_paths)
dataframe = pd.read_csv(original_csv_path)
image_path_list = dataframe["relative_path"]
trimmed_path_list = [
path for path in image_path_list if Path(path).stem in image_name_list
]
trimmed_dataframe = dataframe.loc[
dataframe["relative_path"].isin(trimmed_path_list)
]
trimmed_dataframe.to_csv(trimmed_csv_path, index=False, header=True)
def get_imagename_list(image_files_paths):
"""get list of imagenames from the filelist provided by user
Parameters
-----------
image_files_paths : Path to the filelist provided in correct_images.yaml
"""
with open(image_files_paths, "r") as image_file:
image_name_list = image_file.read().splitlines()
image_name_list = [Path(x).stem for x in image_name_list]
return image_name_list
# TODO is this used?
# store into memmaps the distance and image numpy files
def load_memmap_from_numpyfilelist(filepath, numpyfilelist: list):
"""Generate memmaps from numpy arrays
Parameters
-----------
filepath : Path
path to output memmap folder
numpyfilelist : list
list of paths to numpy files
Returns
--------
Path, numpy.ndarray
memmap_path and memmap_handle
"""
image = np.load(str(numpyfilelist[0]))
list_shape = [len(numpyfilelist)]
list_shape = list_shape + list(image.shape)
filename_map = "memmap_" + str(uuid.uuid4()) + ".map"
memmap_path = Path(filepath) / filename_map
memmap_handle = np.memmap(
filename=memmap_path,
mode="w+",
shape=tuple(list_shape),
dtype=np.float32,
)
Console.info("Loading memmaps from numpy files...")
def memmap_loader(numpyfilelist, memmap_handle, idx):
memmap_handle[idx, ...] = np.load(numpyfilelist[idx])
with tqdm_joblib(
tqdm(desc="numpy images to memmap", total=len(numpyfilelist))
):
joblib.Parallel(n_jobs=-2, verbose=0)(
joblib.delayed(memmap_loader)(numpyfilelist, memmap_handle, idx)
for idx in range(len(numpyfilelist))
)
return memmap_path, memmap_handle
# save processed image in an output file with
# given output format
def write_output_image(image, filename, dest_path, dest_format):
"""Write into output images
Parameters
-----------
image : numpy.ndarray
image data to be written
filename : string
name of output image file
dest_path : Path
path to the output folder
dest_format : string
output image format
"""
file = filename + "." + dest_format
file_path = dest_path / file
ch = image.shape[0]
if ch == 3:
image = image.transpose((1, 2, 0))
imageio.imwrite(file_path, image)
return file_path
| [
"pandas.read_csv",
"pathlib.Path",
"imageio.imwrite",
"uuid.uuid4",
"joblib.Parallel",
"joblib.delayed",
"numpy.load",
"oplab.Console.info"
] | [((1042, 1072), 'pandas.read_csv', 'pd.read_csv', (['original_csv_path'], {}), '(original_csv_path)\n', (1053, 1072), True, 'import pandas as pd\n'), ((2659, 2710), 'oplab.Console.info', 'Console.info', (['"""Loading memmaps from numpy files..."""'], {}), "('Loading memmaps from numpy files...')\n", (2671, 2710), False, 'from oplab import Console\n'), ((3745, 3778), 'imageio.imwrite', 'imageio.imwrite', (['file_path', 'image'], {}), '(file_path, image)\n', (3760, 3778), False, 'import imageio\n'), ((2479, 2493), 'pathlib.Path', 'Path', (['filepath'], {}), '(filepath)\n', (2483, 2493), False, 'from pathlib import Path\n'), ((2804, 2831), 'numpy.load', 'np.load', (['numpyfilelist[idx]'], {}), '(numpyfilelist[idx])\n', (2811, 2831), True, 'import numpy as np\n'), ((1771, 1778), 'pathlib.Path', 'Path', (['x'], {}), '(x)\n', (1775, 1778), False, 'from pathlib import Path\n'), ((2940, 2977), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': '(-2)', 'verbose': '(0)'}), '(n_jobs=-2, verbose=0)\n', (2955, 2977), False, 'import joblib\n'), ((2438, 2450), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2448, 2450), False, 'import uuid\n'), ((1192, 1202), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1196, 1202), False, 'from pathlib import Path\n'), ((2991, 3020), 'joblib.delayed', 'joblib.delayed', (['memmap_loader'], {}), '(memmap_loader)\n', (3005, 3020), False, 'import joblib\n')] |
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
查找高频词汇
"""
import random
from operator import itemgetter
import feedparser
import numpy as np
from Supervised.Classification.NaiveBayes import text_utils
from Supervised.Classification.NaiveBayes import classif_naive_bayes
def calc_most_freq(vocab_list, full_text):
# 去除高频词
freq_dict = {}
for token in vocab_list:
freq_dict[token] = full_text.count(token)
sorted_freq = sorted(freq_dict.items(), key=itemgetter(1), reverse=True)
return sorted_freq[0:10]
def local_words(feed1, feed0):
"""
判断RSS来源
"""
# 加载rss数据
doc_list = []
class_list = []
full_text = []
min_len = min(len(feed0), len(feed1))
for i in range(min_len):
word_list = text_utils.text_parse_cn(feed1['entries'][i]['summary'])
doc_list.append(word_list)
full_text.extend(word_list)
class_list.append(1)
word_list = text_utils.text_parse_cn(feed0['entries'][i]['summary'])
doc_list.append(word_list)
full_text.extend(word_list)
class_list.append(0)
vocab_list = text_utils.create_vocab_list(doc_list)
# 去掉高频词
top30words = calc_most_freq(vocab_list, full_text)
for pair in top30words:
if pair[0] in vocab_list:
vocab_list.remove(pair[0])
# 随机抽取3条数据做测试数据
test_set = [int(num) for num in random.sample(range(2 * min_len), 3)]
training_set = list(set(range(2 * min_len)) - set(test_set))
# 利用剩余数据训练模型
training_mat = []
training_class = []
for doc_index in training_set:
training_mat.append(text_utils.bag_words2vec(vocab_list, doc_list[doc_index]))
training_class.append(class_list[doc_index])
p0v, p1v, p_spam = classif_naive_bayes.train_naive_bayes(
np.array(training_mat),
np.array(training_class)
)
# 测试3条数据
error_count = 0
for doc_index in test_set:
word_vec = text_utils.bag_words2vec(vocab_list, doc_list[doc_index])
if classif_naive_bayes.classify_naive_bayes(
np.array(word_vec),
p0v,
p1v,
p_spam
) != class_list[doc_index]:
error_count += 1
print("the error rate is {}".format(error_count / len(test_set)))
return vocab_list, p0v, p1v
def test_rss():
"""
判断RSS来源
数据量太小了
"""
ny = feedparser.parse('http://rss.sina.com.cn/roll/sports/hot_roll.xml')
sf = feedparser.parse('http://rss.sina.com.cn/tech/rollnews.xml')
vocab_list, p_sf, p_nf = local_words(ny, sf)
def get_top_words():
"""
判断RSS来源,输出各来源的关键字
数据量太小了
"""
ny = feedparser.parse('http://rss.sina.com.cn/roll/sports/hot_roll.xml')
sf = feedparser.parse('http://rss.sina.com.cn/tech/rollnews.xml')
vocab_list, p_sf, p_ny = local_words(ny, sf)
top_ny = []
top_sf = []
for i in range(len(p_sf)):
if p_sf[i] > -6.0:
top_sf.append((vocab_list[i], p_sf[i]))
if p_ny[i] > -6.0:
top_ny.append((vocab_list[i], p_ny[i]))
sorted_sf = sorted(top_sf, key=lambda pair: pair[1], reverse=True)
sorted_ny = sorted(top_ny, key=lambda pair: pair[1], reverse=True)
print('this is sports:')
for item in sorted_sf:
print(item[0])
print('his is tech:')
for item in sorted_ny:
print(item[0])
if __name__ == "__main__":
#test_rss()
get_top_words()
| [
"feedparser.parse",
"operator.itemgetter",
"numpy.array",
"Supervised.Classification.NaiveBayes.text_utils.create_vocab_list",
"Supervised.Classification.NaiveBayes.text_utils.text_parse_cn",
"Supervised.Classification.NaiveBayes.text_utils.bag_words2vec"
] | [((1106, 1144), 'Supervised.Classification.NaiveBayes.text_utils.create_vocab_list', 'text_utils.create_vocab_list', (['doc_list'], {}), '(doc_list)\n', (1134, 1144), False, 'from Supervised.Classification.NaiveBayes import text_utils\n'), ((2359, 2426), 'feedparser.parse', 'feedparser.parse', (['"""http://rss.sina.com.cn/roll/sports/hot_roll.xml"""'], {}), "('http://rss.sina.com.cn/roll/sports/hot_roll.xml')\n", (2375, 2426), False, 'import feedparser\n'), ((2436, 2496), 'feedparser.parse', 'feedparser.parse', (['"""http://rss.sina.com.cn/tech/rollnews.xml"""'], {}), "('http://rss.sina.com.cn/tech/rollnews.xml')\n", (2452, 2496), False, 'import feedparser\n'), ((2627, 2694), 'feedparser.parse', 'feedparser.parse', (['"""http://rss.sina.com.cn/roll/sports/hot_roll.xml"""'], {}), "('http://rss.sina.com.cn/roll/sports/hot_roll.xml')\n", (2643, 2694), False, 'import feedparser\n'), ((2704, 2764), 'feedparser.parse', 'feedparser.parse', (['"""http://rss.sina.com.cn/tech/rollnews.xml"""'], {}), "('http://rss.sina.com.cn/tech/rollnews.xml')\n", (2720, 2764), False, 'import feedparser\n'), ((755, 811), 'Supervised.Classification.NaiveBayes.text_utils.text_parse_cn', 'text_utils.text_parse_cn', (["feed1['entries'][i]['summary']"], {}), "(feed1['entries'][i]['summary'])\n", (779, 811), False, 'from Supervised.Classification.NaiveBayes import text_utils\n'), ((932, 988), 'Supervised.Classification.NaiveBayes.text_utils.text_parse_cn', 'text_utils.text_parse_cn', (["feed0['entries'][i]['summary']"], {}), "(feed0['entries'][i]['summary'])\n", (956, 988), False, 'from Supervised.Classification.NaiveBayes import text_utils\n'), ((1783, 1805), 'numpy.array', 'np.array', (['training_mat'], {}), '(training_mat)\n', (1791, 1805), True, 'import numpy as np\n'), ((1815, 1839), 'numpy.array', 'np.array', (['training_class'], {}), '(training_class)\n', (1823, 1839), True, 'import numpy as np\n'), ((1930, 1987), 'Supervised.Classification.NaiveBayes.text_utils.bag_words2vec', 'text_utils.bag_words2vec', (['vocab_list', 'doc_list[doc_index]'], {}), '(vocab_list, doc_list[doc_index])\n', (1954, 1987), False, 'from Supervised.Classification.NaiveBayes import text_utils\n'), ((473, 486), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (483, 486), False, 'from operator import itemgetter\n'), ((1601, 1658), 'Supervised.Classification.NaiveBayes.text_utils.bag_words2vec', 'text_utils.bag_words2vec', (['vocab_list', 'doc_list[doc_index]'], {}), '(vocab_list, doc_list[doc_index])\n', (1625, 1658), False, 'from Supervised.Classification.NaiveBayes import text_utils\n'), ((2053, 2071), 'numpy.array', 'np.array', (['word_vec'], {}), '(word_vec)\n', (2061, 2071), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import print_function
import roslib
import rospy
import std_msgs
import numpy
from sensor_msgs.msg import BatteryState
if __name__ == "__main__":
rospy.init_node('battery_node')
pub = rospy.Publisher("battery", BatteryState, queue_size=1)
tinit = rospy.Time.now().to_time()
speed = 0.05
try:
while not rospy.is_shutdown():
msg = BatteryState()
t = rospy.Time.now().to_time()
msg.charge = 10 * numpy.exp(speed*(tinit-t)) - 1.0
rospy.sleep(0.5)
pub.publish(msg)
except Exception as e:
print(e) | [
"rospy.is_shutdown",
"rospy.init_node",
"sensor_msgs.msg.BatteryState",
"rospy.Time.now",
"numpy.exp",
"rospy.sleep",
"rospy.Publisher"
] | [((192, 223), 'rospy.init_node', 'rospy.init_node', (['"""battery_node"""'], {}), "('battery_node')\n", (207, 223), False, 'import rospy\n'), ((234, 288), 'rospy.Publisher', 'rospy.Publisher', (['"""battery"""', 'BatteryState'], {'queue_size': '(1)'}), "('battery', BatteryState, queue_size=1)\n", (249, 288), False, 'import rospy\n'), ((301, 317), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (315, 317), False, 'import rospy\n'), ((373, 392), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (390, 392), False, 'import rospy\n'), ((412, 426), 'sensor_msgs.msg.BatteryState', 'BatteryState', ([], {}), '()\n', (424, 426), False, 'from sensor_msgs.msg import BatteryState\n'), ((547, 563), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (558, 563), False, 'import rospy\n'), ((444, 460), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (458, 460), False, 'import rospy\n'), ((501, 531), 'numpy.exp', 'numpy.exp', (['(speed * (tinit - t))'], {}), '(speed * (tinit - t))\n', (510, 531), False, 'import numpy\n')] |
import os
import pandas as pd
import numpy as np
from itertools import chain
from pmaf.internal._extensions._cpython._pmafc_extension._helper import (
make_sequence_record_tuple,
)
from pmaf.internal.io._seq import SequenceIO
from typing import Generator, Tuple, Union
def read_qiime_taxonomy_map(taxonomy_tsv_fp: str) -> pd.Series:
"""Reads taxonomy file in QIIME/Greengenes notation.
Parameters
----------
taxonomy_tsv_fp :
Path to QIIME/Greengenes formatted taxonomy map.
taxonomy_tsv_fp: str :
Returns
-------
class:`~pandas.Series` of taxonomy map.
"""
if os.path.exists(taxonomy_tsv_fp):
with open(taxonomy_tsv_fp, "r") as map_file:
tmp_tax_map = pd.read_csv(map_file, sep="\t", index_col=0, header=None)
if tmp_tax_map.shape[1] != 1:
raise ValueError("Invalid taxonomy file.")
tax_map = tmp_tax_map.rename(columns={1: "taxonomy"})
tax_map.index = tax_map.index.astype(str)
return tax_map
else:
raise FileNotFoundError("Given file does not exists.")
def parse_qiime_taxonomy_map(taxonomy_map_df: pd.DataFrame) -> pd.DataFrame:
"""Parse taxonomy :class:`~pandas.DataFrame` in QIIME/Greengenes notation.
Result produce class:`~pandas.DataFrame` where taxa are reorganized into ordered but unvalidated ranks.
Parameters
----------
taxonomy_map_df :
:class:`~pandas.DataFrame` with taxonomy data.
taxonomy_map_df: pd.DataFrame :
Returns
-------
Taxonomy sheet of type
class:`~pandas.DataFrame`
"""
if not isinstance(taxonomy_map_df, pd.DataFrame):
raise TypeError("`taxonomy_map_df` must be pandas DataFrame.")
if taxonomy_map_df.empty or (taxonomy_map_df.shape[1] != 1):
raise ValueError("DataFrame cannot be empty.")
taxonomy_map = taxonomy_map_df.iloc[:, 0]
zip_list = list(
chain(
*taxonomy_map.map(
lambda lineage: [
e.strip().split("__")[0] for e in lineage.split(";") if ("__" in e)
]
)
.ravel()
.tolist()
)
)
def get_unique(zip_list):
"""Get unique values.
Parameters
----------
zip_list :
Returns
-------
"""
seen = set()
seen_add = seen.add
return [x for x in zip_list if not (x in seen or seen_add(x))]
found_levels = get_unique(zip_list)
def allocator(lineage, levels): # TODO: No need for vectorization, make a Cython
"""
Parameters
----------
lineage :
levels :
Returns
-------
"""
# function instead or think about something else.
"""Function to parse individual taxonomic consensus lineage in
QIIME/Greengenes notation. Vectorization in this case does not provide speed
boost, I assumed it did back in the day."""
taxa_dict = {
e[0]: e[1]
for e in [e.strip().split("__") for e in lineage.split(";") if ("__" in e)]
} # Anonymous function that explodes lineage into dictionary
taxa_dict_allowed = {
rank: taxa_dict[rank] for rank in taxa_dict.keys() if rank in levels
} # Drops forbidden ranks
# Following loop sets unavailable ranks to '', which is necessary for generating taxonomy sheet
for key in levels:
if not (key in taxa_dict_allowed.keys()):
taxa_dict_allowed[key] = ""
taxa_list_ordered = [
taxa_dict_allowed[rank] for rank in levels
] # Sort ranks according to Consts.MAIN_RANKS rank order
return taxa_list_ordered
allocator_vectorized = np.vectorize(
allocator, excluded=["levels"], otypes=[list]
) # Vectorizes function in order gain performance
master_taxonomy_sheet = pd.DataFrame(
index=list(taxonomy_map.index),
data=list(
allocator_vectorized(lineage=list(taxonomy_map.values), levels=found_levels)
),
columns=found_levels,
)
return master_taxonomy_sheet.applymap(
lambda x: None if (x == "" or pd.isna(x)) else x
)
# TODO: Generating two products with different sizes are not good solution.
# Improve the generator to keep products consistent.
def parse_qiime_sequence_generator(
sequence_fasta_fp: str, chunk_size: int, alignment: bool
) -> Generator[Union[Tuple[dict, pd.DataFrame], pd.DataFrame], None, None]:
"""Parser for sequence/alignment data in FASTA format provided in QIIME-styled databases.
Parameters
----------
sequence_fasta_fp :
Sequence data in FASTA format
chunk_size :
Chunk size to generate chunk :class:`~pandas.DataFrame`.
alignment :
True if MSA are supplied.
sequence_fasta_fp: str :
chunk_size: int :
alignment: bool :
Returns
-------
"""
seqio = SequenceIO(sequence_fasta_fp, ftype="fasta", upper=True)
max_seq_length = 0
min_seq_length = 99999 # Assuming no marker sequence can be longer than this
max_id_length = 0
max_rows = 0
for s_id, s_seq in seqio.pull_parser(id=True, description=False, sequence=True):
seq_length = len(s_seq)
id_length = len(str(s_id))
max_seq_length = seq_length if seq_length > max_seq_length else max_seq_length
min_seq_length = seq_length if seq_length < min_seq_length else min_seq_length
max_id_length = id_length if id_length > max_id_length else max_id_length
max_rows = max_rows + 1
seq_iterator = seqio.pull_parser(id=True, description=False, sequence=True)
chunk_counter = chunk_size
next_chunk = True
first_chunk = True
df_columns = (
["index", "sequence", "length", "tab"]
if not alignment
else ["index", "sequence", "length"]
)
while next_chunk:
sequences_list = []
for s_id, s_seq in seq_iterator:
if not alignment:
record_list = make_sequence_record_tuple(str(s_id), s_seq)
else:
record_list = [str(s_id), s_seq, len(s_seq)]
if chunk_counter > 1:
sequences_list.append(record_list)
chunk_counter = chunk_counter - 1
else:
chunk_counter = chunk_size
sequences_list.append(record_list)
break
if len(sequences_list) > 0:
chunk_df = pd.DataFrame.from_records(
sequences_list, columns=df_columns, index=["index"]
)
if not alignment:
chunk_df = chunk_df.astype({"length": "int32", "tab": "int32"})
if first_chunk:
first_chunk = False
pre_state_dict = {
"sequence": max_seq_length,
"min_sequence": min_seq_length,
"max_rows": max_rows,
}
yield pre_state_dict, chunk_df
else:
yield chunk_df
else:
next_chunk = False
return
| [
"pandas.DataFrame.from_records",
"os.path.exists",
"pandas.read_csv",
"pmaf.internal.io._seq.SequenceIO",
"pandas.isna",
"numpy.vectorize"
] | [((636, 667), 'os.path.exists', 'os.path.exists', (['taxonomy_tsv_fp'], {}), '(taxonomy_tsv_fp)\n', (650, 667), False, 'import os\n'), ((3834, 3893), 'numpy.vectorize', 'np.vectorize', (['allocator'], {'excluded': "['levels']", 'otypes': '[list]'}), "(allocator, excluded=['levels'], otypes=[list])\n", (3846, 3893), True, 'import numpy as np\n'), ((5071, 5127), 'pmaf.internal.io._seq.SequenceIO', 'SequenceIO', (['sequence_fasta_fp'], {'ftype': '"""fasta"""', 'upper': '(True)'}), "(sequence_fasta_fp, ftype='fasta', upper=True)\n", (5081, 5127), False, 'from pmaf.internal.io._seq import SequenceIO\n'), ((748, 805), 'pandas.read_csv', 'pd.read_csv', (['map_file'], {'sep': '"""\t"""', 'index_col': '(0)', 'header': 'None'}), "(map_file, sep='\\t', index_col=0, header=None)\n", (759, 805), True, 'import pandas as pd\n'), ((6613, 6691), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['sequences_list'], {'columns': 'df_columns', 'index': "['index']"}), "(sequences_list, columns=df_columns, index=['index'])\n", (6638, 6691), True, 'import pandas as pd\n'), ((4275, 4285), 'pandas.isna', 'pd.isna', (['x'], {}), '(x)\n', (4282, 4285), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
'''
==============================================================================
Associative Memory (AM) classifier for binary Hyperdimensional (HD) Comuputing
==============================================================================
'''
import time
import sys
import torch as t
import numpy as np
import cloudpickle as cpckl
from hd_encoder import sng_encoder
from am_classifier import am_classifier
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "17.5.2019"
class hd_classifier_ext():
def __init__(self):
import cffi
import os
import platform
path = os.path.dirname(os.path.abspath(os.path.join(__file__, '..')))
self._ffi = cffi.FFI()
self._ffi.cdef('\n'.join([
open(os.path.join(path, '_hd_encoder.h'), 'r').read(),
open(os.path.join(path, '_hd_classifier.h'), 'r').read()
]))
self._lib = self._ffi.dlopen(
os.path.join(path, f'hdlib_{platform.machine()}.so')
)
def encode(self, X):
# compute dimensionality
n_samples, n_feat = X.shape
self._lib.hd_encoder_encode(
self._encoder,
self._ffi.cast('const feature_t * const', X.data_ptr()),
n_feat
)
def save(self, filename):
self._lib.save(self._classifier, self._encoder, self._ffi.new("char[]", filename.encode('ascii')))
def load(self, filename):
# prepare classifier and encoder structs
self._classifier = self._ffi.new('struct hd_classifier_t *')
self._encoder = self._ffi.new('struct hd_encoder_t *')
self._lib.hamming_distance_init()
# load the data
result = self._lib.load(self._classifier, self._encoder, self._ffi.new("char[]", filename.encode('ascii')))
assert result == 0
self._n_classes = self._classifier.n_class
# prepare data for interaction
# TODO this will likely be unnecesary in the future (see same lines in am_init)
self._ngramm_sum_buffer = t.Tensor(self._encoder.n_blk * 32).type(t.int32).contiguous()
self._encoder.ngramm_sum_buffer = self._ffi.cast('uint32_t * const', self._ngramm_sum_buffer.data_ptr())
def am_init(self, D, nitem, n_classes, ngramm=3):
# round D up to the nearest multiple of 32
n_blk = int((D + 31) / 32)
if D != n_blk * 32:
print(f"Dimensionality given which is not a multiple of 32! Using {n_blk * 32} instead")
D = n_blk * 32
self._D = D
# setup encoder
self._encoder = self._ffi.new('struct hd_encoder_t *')
self._lib.hd_encoder_init(self._encoder, n_blk, ngramm, nitem)
self._lib.hamming_distance_init()
# overwrite the encoder summing buffer with a torch tensor's pointer
# this is so that the result can be communicated without copying
# TODO this will likely be unnecesary in the future
self._ngramm_sum_buffer = t.Tensor(D).type(t.int32).contiguous()
self._encoder.ngramm_sum_buffer = self._ffi.cast('uint32_t * const', self._ngramm_sum_buffer.data_ptr())
# setup classifier
self._n_classes = n_classes
self._class_vec_sum = t.Tensor(self._n_classes, self._D).type(t.int32).zero_()
self._class_vec_cnt = t.Tensor(self._n_classes).type(t.int32).zero_()
self._classifier = self._ffi.new('struct hd_classifier_t *')
self._lib.hd_classifier_init(self._classifier, self._encoder.n_blk, n_classes, 0)
self._classifier.class_vec_sum = self._ffi.cast('block_t *', self._class_vec_sum.data_ptr())
self._classifier.class_vec_cnt = self._ffi.cast('int *', self._class_vec_cnt.data_ptr())
# TODO: release memory and close self._lib
def am_update(self, X, y):
'''
Update AM
Parameters
----------
X: numpy array, size = [n_samples, n_feat]
Training samples
y: numpy array, size = [n_samples]
Training labels
'''
X = t.from_numpy(X).type(t.uint8)
y = t.from_numpy(y).type(t.int32)
n_samples = X.shape[0]
# summation of training vectors
for sample in range(n_samples):
y_s = y[sample]
if (y_s < self._n_classes) and (y_s >= 0):
self.encode(X[sample].view(1, -1))
self._class_vec_sum[y_s].add_(self._ngramm_sum_buffer)
self._class_vec_cnt[y_s] += self._encoder.ngramm_sum_count
else:
raise ValueError("Label is not in range of [{:},{:}], got {:}".format(
0, self._n_classes, y_s))
return
def am_threshold(self):
self._lib.hd_classifier_threshold(self._classifier)
def fit(self, X, y):
'''
Train AM
Parameters
----------
X: numpy array, size = [n_samples, n_feat]
Training samples
y: numpy array, size = [n_samples]
Training labels
'''
n_samples, _ = X.shape
n_classes = t.max(y) + 1
self.am_init(n_classes)
# Train am
self.am_update(X, y)
# Thresholding
self.am_threshold()
return
def predict(self, X):
'''
Prediction
Parameters
----------
X: torch tensor, size = [n_samples, _D]
Input samples to predict.
Returns
-------
dec_values : torch tensor, size = [n_sampels]
predicted values.
'''
X = t.from_numpy(X).type(t.uint8)
n_samples = X.shape[0]
dec_values = t.Tensor(n_samples)
for sample in range(n_samples):
dec_values[sample] = self._lib.hd_classifier_predict(
self._classifier,
self._encoder,
self._ffi.cast('const feature_t * const', X[sample].data_ptr()),
X[sample].shape[0]
)
return dec_values.numpy()
class hd_classifier(am_classifier):
def __init__(self, D=10000, encoding='sumNgramm', device='cpu', nitem=1, ngramm=3, name='test'):
'''
Parameters
----------
D : int
HD dimension
encode: hd_encoding class
encoding class
'''
self._name = name
try:
self.load()
except:
use_cuda = t.cuda.is_available()
_device = t.device(device if use_cuda else "cpu")
if encoding is 'sumNgramm':
_encoder = sng_encoder(D, _device, nitem, ngramm)
else:
raise ValueError(f'{encoding} encoding not supported')
super().__init__(D, _encoder, _device)
def save(self):
'''
save class as self.name.txt
'''
file = open(self._name + '.txt', 'wb')
cpckl.dump(self.__dict__, file)
file.close()
def load(self):
'''
try load self._name.txt
'''
file = open(self._name + '.txt', 'rb')
self.__dict__ = cpckl.load(file)
def save2binary_model(self):
'''
try load self._name_bin.npz
'''
_am = bin2int(self._am.cpu().type(t.LongTensor).numpy())
_itemMemory = bin2int(
self._encoder._itemMemory.cpu().type(t.LongTensor).numpy())
np.save(self._name + 'bin', _n_classes=self._n_classes, _am=_am,
_itemMemory=_itemMemory, _encoding=self._encoding)
return
def bin2int(x):
'''
try load self._name_bin.npz
Parameters
----------
x : numpy array size = [u,v]
input array binary
Restults
--------
y : numpy array uint32 size = [u, ceil(v/32)]
'''
u, v = x.shape
v_out = int(np.ceil(v / 32))
y = np.zeros((u, v_out), dtype=np.uint32)
for uidx in range(u):
for vidx in range(v_out):
for bidx in range(32): # iterate through all bit index
if vidx * 32 + bidx < v:
y[uidx, vidx] += x[uidx, vidx * 32 + bidx] << bidx
return y
| [
"cloudpickle.load",
"numpy.ceil",
"cffi.FFI",
"torch.Tensor",
"torch.max",
"os.path.join",
"torch.from_numpy",
"hd_encoder.sng_encoder",
"numpy.zeros",
"torch.cuda.is_available",
"cloudpickle.dump",
"platform.machine",
"numpy.save",
"torch.device"
] | [((7861, 7898), 'numpy.zeros', 'np.zeros', (['(u, v_out)'], {'dtype': 'np.uint32'}), '((u, v_out), dtype=np.uint32)\n', (7869, 7898), True, 'import numpy as np\n'), ((720, 730), 'cffi.FFI', 'cffi.FFI', ([], {}), '()\n', (728, 730), False, 'import cffi\n'), ((5681, 5700), 'torch.Tensor', 't.Tensor', (['n_samples'], {}), '(n_samples)\n', (5689, 5700), True, 'import torch as t\n'), ((6923, 6954), 'cloudpickle.dump', 'cpckl.dump', (['self.__dict__', 'file'], {}), '(self.__dict__, file)\n', (6933, 6954), True, 'import cloudpickle as cpckl\n'), ((7125, 7141), 'cloudpickle.load', 'cpckl.load', (['file'], {}), '(file)\n', (7135, 7141), True, 'import cloudpickle as cpckl\n'), ((7414, 7533), 'numpy.save', 'np.save', (["(self._name + 'bin')"], {'_n_classes': 'self._n_classes', '_am': '_am', '_itemMemory': '_itemMemory', '_encoding': 'self._encoding'}), "(self._name + 'bin', _n_classes=self._n_classes, _am=_am,\n _itemMemory=_itemMemory, _encoding=self._encoding)\n", (7421, 7533), True, 'import numpy as np\n'), ((7836, 7851), 'numpy.ceil', 'np.ceil', (['(v / 32)'], {}), '(v / 32)\n', (7843, 7851), True, 'import numpy as np\n'), ((5104, 5112), 'torch.max', 't.max', (['y'], {}), '(y)\n', (5109, 5112), True, 'import torch as t\n'), ((668, 696), 'os.path.join', 'os.path.join', (['__file__', '""".."""'], {}), "(__file__, '..')\n", (680, 696), False, 'import os\n'), ((4068, 4083), 'torch.from_numpy', 't.from_numpy', (['X'], {}), '(X)\n', (4080, 4083), True, 'import torch as t\n'), ((4110, 4125), 'torch.from_numpy', 't.from_numpy', (['y'], {}), '(y)\n', (4122, 4125), True, 'import torch as t\n'), ((5599, 5614), 'torch.from_numpy', 't.from_numpy', (['X'], {}), '(X)\n', (5611, 5614), True, 'import torch as t\n'), ((6455, 6476), 'torch.cuda.is_available', 't.cuda.is_available', ([], {}), '()\n', (6474, 6476), True, 'import torch as t\n'), ((6499, 6538), 'torch.device', 't.device', (["(device if use_cuda else 'cpu')"], {}), "(device if use_cuda else 'cpu')\n", (6507, 6538), True, 'import torch as t\n'), ((6607, 6645), 'hd_encoder.sng_encoder', 'sng_encoder', (['D', '_device', 'nitem', 'ngramm'], {}), '(D, _device, nitem, ngramm)\n', (6618, 6645), False, 'from hd_encoder import sng_encoder\n'), ((992, 1010), 'platform.machine', 'platform.machine', ([], {}), '()\n', (1008, 1010), False, 'import platform\n'), ((2059, 2093), 'torch.Tensor', 't.Tensor', (['(self._encoder.n_blk * 32)'], {}), '(self._encoder.n_blk * 32)\n', (2067, 2093), True, 'import torch as t\n'), ((2993, 3004), 'torch.Tensor', 't.Tensor', (['D'], {}), '(D)\n', (3001, 3004), True, 'import torch as t\n'), ((3239, 3273), 'torch.Tensor', 't.Tensor', (['self._n_classes', 'self._D'], {}), '(self._n_classes, self._D)\n', (3247, 3273), True, 'import torch as t\n'), ((3326, 3351), 'torch.Tensor', 't.Tensor', (['self._n_classes'], {}), '(self._n_classes)\n', (3334, 3351), True, 'import torch as t\n'), ((783, 818), 'os.path.join', 'os.path.join', (['path', '"""_hd_encoder.h"""'], {}), "(path, '_hd_encoder.h')\n", (795, 818), False, 'import os\n'), ((850, 888), 'os.path.join', 'os.path.join', (['path', '"""_hd_classifier.h"""'], {}), "(path, '_hd_classifier.h')\n", (862, 888), False, 'import os\n')] |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for creating melody training datasets.
Use extract_melodies to extract monophonic melodies from a NoteSequence proto.
"""
import logging
import math
# internal imports
import numpy as np
from magenta.protobuf import music_pb2
# Special events.
NUM_SPECIAL_EVENTS = 2
NOTE_OFF = -1
NO_EVENT = -2
# Other constants.
MIN_MIDI_PITCH = 0 # Inclusive.
MAX_MIDI_PITCH = 127 # Inclusive.
NOTES_PER_OCTAVE = 12
DEFAULT_BEATS_PER_MINUTE = 120.0
BEATS_PER_BAR = 4 # This code assumes 4 beats per measure of music.
# Standard pulses per quarter.
# https://en.wikipedia.org/wiki/Pulses_per_quarter_note
STANDARD_PPQ = 96
# Set the quantization cutoff.
# Note events before this cutoff are rounded down to nearest step. Notes
# above this cutoff are rounded up to nearest step. The cutoff is given as a
# fraction of a step.
# For example, with quantize_cutoff = 0.75 using 0-based indexing,
# if .75 < event <= 1.75, it will be quantized to step 1.
# If 1.75 < event <= 2.75 it will be quantized to step 2.
# A number close to 1.0 gives less wiggle room for notes that start early,
# and they will be snapped to the previous step.
QUANTIZE_CUTOFF = 0.75
# NOTE_KEYS[note] = The major keys that note belongs to.
# ex. NOTE_KEYS[0] lists all the major keys that contain the note C,
# which are:
# [0, 1, 3, 5, 7, 8, 10]
# [C, C#, D#, F, G, G#, A#]
#
# 0 = C
# 1 = C#
# 2 = D
# 3 = D#
# 4 = E
# 5 = F
# 6 = F#
# 7 = G
# 8 = G#
# 9 = A
# 10 = A#
# 11 = B
#
# NOTE_KEYS can be generated using the code below, but is explicitly declared
# for readability:
# scale = [1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1]
# NOTE_KEYS = [[j for j in xrange(12) if scale[(i - j) % 12]]
# for i in xrange(12)]
NOTE_KEYS = [
[0, 1, 3, 5, 7, 8, 10],
[1, 2, 4, 6, 8, 9, 11],
[0, 2, 3, 5, 7, 9, 10],
[1, 3, 4, 6, 8, 10, 11],
[0, 2, 4, 5, 7, 9, 11],
[0, 1, 3, 5, 6, 8, 10],
[1, 2, 4, 6, 7, 9, 11],
[0, 2, 3, 5, 7, 8, 10],
[1, 3, 4, 6, 8, 9, 11],
[0, 2, 4, 5, 7, 9, 10],
[1, 3, 5, 6, 8, 10, 11],
[0, 2, 4, 6, 7, 9, 11]]
class PolyphonicMelodyException(Exception):
pass
class BadNoteException(Exception):
pass
class Melody(object):
"""Stores a quantized stream of monophonic melody events.
Melody is an intermediate representation that all melody models
can use. NoteSequence proto to melody code will do work to align notes
and extract monophonic melodies. Model specific code just needs to
convert Melody to SequenceExample protos for TensorFlow.
Melody implements an iterable object. Simply iterate to retrieve
the melody events.
Melody events are integers in range [-2, 127] (inclusive),
where negative values are the special event events: NOTE_OFF, and NO_EVENT.
Non-negative values [0, 127] are note-on events for that midi pitch. A note
starts at a non-negative value (that is the pitch), and is held through
subsequent NO_EVENT events until either another non-negative value is reached
(even if the pitch is the same as the previous note), or a NOTE_OFF event is
reached. A NOTE_OFF starts at least one step of silence, which continues
through NO_EVENT events until the next non-negative value.
NO_EVENT values are treated as default filler. Notes must be inserted
in ascending order by start time. Note end times will be truncated if the next
note overlaps.
Melodies can start at any non-zero time, and are shifted left so that the bar
containing the first note-on event is the first bar.
Attributes:
events: A python list of melody events which are integers. Melody events are
described above.
offset: When quantizing notes, this is the offset between indices in
`events` and time steps of incoming melody events. An offset is chosen
such that the first melody event is close to the beginning of `events`.
steps_per_bar: Number of steps in a bar (measure) of music.
last_on: Index of last note-on event added. This index will be within
the range of `events`.
last_off: Index of the NOTE_OFF event that belongs to the note-on event
at `last_on`. This index is likely not in the range of `events` unless
_write_all_notes was called.
"""
def __init__(self, steps_per_bar=16):
"""Construct an empty Melody.
Args:
steps_per_bar: How many time steps per bar of music. Melody needs to know
about bars to skip empty bars before the first note.
"""
self.events = []
self.offset = 0
self.steps_per_bar = steps_per_bar
self.last_on = None # Index of the last note-on event in `events`.
# last_off is the index of the NOTE_OFF event belonging to the most
# recently added note.
self.last_off = None
def __iter__(self):
"""Return an iterator over the events in this Melody.
Returns:
Python iterator over events.
"""
return iter(self.events)
def __len__(self):
"""How many events are in this Melody.
Returns:
Number of events as an int.
"""
return len(self.events)
def _can_add_note(self, start_step):
"""Check if a note-on event can be added at the given time step.
Note-on events cannot be added at the same time as or before previous
note-on events.
Args:
start_step: Time step of the note-on that we wish to add. This is
a non-negative integer.
Returns:
True if `start_step` is after all previous note-on events.
"""
return self.last_on is None or start_step - self.offset > self.last_on
def _add_note(self, pitch, start_step, end_step):
"""Adds the given note to the stream.
The previous note's end step will be changed to end before this note if
there is overlap.
The note is not added if `start_step` is before the start step of the
previously added note, or if `start_step` equals `end_step`.
Args:
pitch: Midi pitch. An integer between 0 and 127 inclusive.
start_step: A non-zero integer step that the note begins on.
end_step: An integer step that the note ends on. The note is considered to
end at the onset of the end step. `end_step` must be greater than
`start_step`.
"""
if not self._can_add_note(start_step):
raise BadNoteException(
'Given start step %d is before last on event at %d'
% (start_step, self.last_on))
if start_step == end_step:
raise BadNoteException('Given start step and end step are the same: %d'
% start_step)
if not self.events:
self.offset = start_step - start_step % self.steps_per_bar
start_step_offset = start_step - self.offset
end_step_offset = end_step - self.offset
self.events += [NO_EVENT] * (start_step_offset - len(self.events) + 1)
self.events[start_step_offset] = pitch
if self.last_off is not None and self.last_off < start_step_offset:
self.events[self.last_off] = NOTE_OFF
self.last_on = start_step_offset
self.last_off = end_step_offset
def _write_all_notes(self):
"""Write remaining note off event to `events`.
This internal method makes sure that all notes being temporarily stored in
other instance variables, namely self.last_on and self.last_off, are
written to self.events. __iter__ and __len__ will only return what is in
self.events, so all notes must be written there after operating on the
events in this instance.
"""
self.events += [NO_EVENT] * (self.last_off - len(self.events) + 1)
self.events[self.last_off] = NOTE_OFF
self.last_on = None
self.last_off = None
def _clear(self):
"""Clear `events` and last note-on/off information."""
self.events = []
self.last_on = None
self.last_off = None
def _distance_to_last_event(self, step):
"""Returns distance of the given step to the last off event.
Args:
step: Step to compute the distance to.
Returns:
Distance between step and last off event. 0 if events are the same.
Negative if step comes before the last off event.
Raises:
ValueError: When the stream is empty.
"""
if self.last_off is None:
raise ValueError('No events in the stream')
return step - self.offset - self.last_off
def get_note_histogram(self):
"""Gets a histogram of the note occurrences in a melody.
Returns:
A list of 12 ints, one for each note value (C at index 0 through B at
index 11). Each int is the total number of times that note occurred in
the melody.
"""
np_melody = np.array(self.events, dtype=int)
return np.bincount(
np_melody[np_melody >= MIN_MIDI_PITCH] % NOTES_PER_OCTAVE,
minlength=NOTES_PER_OCTAVE)
def get_major_key(self):
"""Finds the major key that this melody most likely belong to.
Each key is matched against the pitches in the melody. The key that
matches the most pitches is returned. If multiple keys match equally, the
key with the lowest index is returned (where the indexes of the keys are
C = 0 through B = 11).
Returns:
An int for the most likely key (C = 0 through B = 11)
"""
note_histogram = self.get_note_histogram()
key_histogram = np.zeros(NOTES_PER_OCTAVE)
for note, count in enumerate(note_histogram):
key_histogram[NOTE_KEYS[note]] += count
return key_histogram.argmax()
def from_notes(self, notes, bpm=120.0, gap=16, ignore_polyphonic_notes=False):
"""Populate self with an iterable of music_pb2.NoteSequence.Note.
BEATS_PER_BAR/4 time signature is assumed.
The given list of notes is quantized according to the given beats per minute
and populated into self. Any existing notes in the instance are cleared.
0 velocity notes are ignored. The melody is ended when there is a gap of
`gap` steps or more after a note.
If note-on events occur at the same step, this melody is cleared and an
exception is thrown.
Args:
notes: Iterable of music_pb2.NoteSequence.Note
bpm: Beats per minute. This determines the quantization step size in
seconds. Beats are subdivided according to `steps_per_bar` given to
the constructor.
gap: If this many steps or more follow a note, the melody is ended.
ignore_polyphonic_notes: If true, any notes that come before or land on
an already added note's start step will be ignored. If false,
PolyphonicMelodyException will be raised.
Raises:
PolyphonicMelodyException: If any of the notes start on the same step when
quantized and ignore_polyphonic_notes is False.
"""
self._clear()
# Compute quantization steps per second.
steps_per_second = bpm / 60.0 * self.steps_per_bar / BEATS_PER_BAR
quantize = lambda x: int(math.ceil(x - QUANTIZE_CUTOFF))
# Sort track by note start times.
notes.sort(key=lambda note: note.start_time)
for note in notes:
# Ignore 0 velocity notes.
if not note.velocity:
continue
# Quantize the start and end times of the note.
start_step = quantize(note.start_time * steps_per_second)
end_step = quantize(note.end_time * steps_per_second)
if end_step == start_step:
end_step += 1
# Do not allow notes to start or end in negative time.
if start_step < 0 or end_step < 0:
raise BadNoteException(
'Got negative note time: start_time = %s, end_time = %s'
% (note.start_time, note.end_time))
# If start_step comes before or lands on an already added note's start
# step, we cannot add it. Discard the melody because it is not monophonic.
if not self._can_add_note(start_step):
if ignore_polyphonic_notes:
continue
else:
self._clear()
raise PolyphonicMelodyException()
# If a gap of `gap` or more steps is found, end the melody.
if (len(self) and
self._distance_to_last_event(start_step) >= gap):
break
# Add the note-on and off events to the melody.
self._add_note(note.pitch, start_step, end_step)
self._write_all_notes()
def from_event_list(self, events):
self.events = events
def to_sequence(self, velocity=100, instrument=0, sequence_start_time=0.0,
bpm=120.0):
"""Converts the Melody to Sequence proto.
Args:
velocity: Midi velocity to give each note. Between 1 and 127 (inclusive).
instrument: Midi instrument to give each note.
sequence_start_time: A time in seconds (float) that the first note in the
sequence will land on.
bpm: Beats per minute (float).
Returns:
A NoteSequence proto encoding the given melody.
"""
seconds_per_step = 60.0 / bpm * BEATS_PER_BAR / self.steps_per_bar
sequence = music_pb2.NoteSequence()
sequence.tempos.add().bpm = bpm
sequence.ticks_per_beat = STANDARD_PPQ
current_sequence_note = None
for step, note in enumerate(self):
if MIN_MIDI_PITCH <= note <= MAX_MIDI_PITCH:
# End any sustained notes.
if current_sequence_note is not None:
current_sequence_note.end_time = (
step * seconds_per_step + sequence_start_time)
# Add a note.
current_sequence_note = sequence.notes.add()
current_sequence_note.start_time = (
step * seconds_per_step + sequence_start_time)
# Give the note an end time now just to be sure it gets closed.
current_sequence_note.end_time = (
(step + 1) * seconds_per_step + sequence_start_time)
current_sequence_note.pitch = note
current_sequence_note.velocity = velocity
current_sequence_note.instrument = instrument
elif note == NOTE_OFF:
# End any sustained notes.
if current_sequence_note is not None:
current_sequence_note.end_time = (
step * seconds_per_step + sequence_start_time)
current_sequence_note = None
return sequence
def squash(self, min_note, max_note, transpose_to_key):
"""Transpose and octave shift the notes in this Melody.
The key center of this melody is computed with a heuristic, and the notes
are transposed to be in the given key. The melody is also octave shifted
to be centered in the given range. Additionally, all notes are octave
shifted to lie within a given range.
Args:
min_note: Minimum pitch (inclusive) that the resulting notes will take on.
max_note: Maximum pitch (exclusive) that the resulting notes will take on.
transpose_to_key: The melody is transposed to be in this key. 0 = C Major.
Returns:
How much notes are transposed by.
"""
melody_key = self.get_major_key()
key_diff = transpose_to_key - melody_key
midi_notes = [note for note in self.events
if MIN_MIDI_PITCH <= note <= MAX_MIDI_PITCH]
if not midi_notes:
return
melody_min_note = min(midi_notes)
melody_max_note = max(midi_notes)
melody_center = (melody_min_note + melody_max_note) / 2
target_center = (min_note + max_note - 1) / 2
center_diff = target_center - (melody_center + key_diff)
transpose_amount = (
key_diff +
NOTES_PER_OCTAVE * int(round(center_diff / float(NOTES_PER_OCTAVE))))
for i in xrange(len(self.events)):
# Transpose MIDI pitches. Special events below MIN_MIDI_PITCH are not changed.
if self.events[i] >= MIN_MIDI_PITCH:
self.events[i] += transpose_amount
if self.events[i] < min_note:
self.events[i] = (
min_note + (self.events[i] - min_note) % NOTES_PER_OCTAVE)
elif self.events[i] >= max_note:
self.events[i] = (max_note - NOTES_PER_OCTAVE +
(self.events[i] - max_note) % NOTES_PER_OCTAVE)
return transpose_amount
def extract_melodies(sequence, steps_per_beat=4, min_bars=7,
min_unique_pitches=5):
"""Extracts a list of melodies from the given NoteSequence proto.
A time signature of BEATS_PER_BAR is assumed for each sequence. If the
sequence has an incompatable time signature, like 3/4, 5/4, etc, then
the time signature is ignored and BEATS_PER_BAR/4 time is assumed.
Once a note-on event in a track is encountered, a melody begins. Once a
gap of silence since the last note-off event of a bar length or more is
encountered, or the end of the track is reached, that melody is ended. Only
the first melody of each track is used (this reduces the number of repeated
melodies that may come from repeated choruses or verses, but may also cause
unique non-first melodies, such as bridges and outros, to be missed, so maybe
this should be changed).
The melody is then checked for validity. The melody is only used if it is
at least `min_bars` bars long, and has at least `min_unique_pitches` unique
notes (preventing melodies that only repeat a few notes, such as those found
in some accompaniment tracks, from being used).
After scanning each instrument track in the NoteSequence, a list of all the valid
melodies is returned.
Args:
sequence: A NoteSequence proto containing notes.
steps_per_beat: How many subdivisions of each beat. BEATS_PER_BAR/4 time is
assumed, so steps per bar is equal to
`BEATS_PER_BAR` * `steps_per_beat`.
min_bars: Minimum length of melodies in number of bars. Shorter melodies are
discarded.
min_unique_pitches: Minimum number of unique notes with octave equivalence.
Melodies with too few unique notes are discarded.
Returns:
A python list of Melody instances.
"""
# Assume bars contain 4 beats, or quarter notes.
steps_per_bar = steps_per_beat * 4
# Beats per minute is stored in the tempo change event. If there is no tempo
# then assume 120 bpm per the MIDI standard.
bpm = (sequence.tempos[0].bpm if len(sequence.tempos)
else DEFAULT_BEATS_PER_MINUTE)
# Group note messages into tracks.
tracks = {}
for note in sequence.notes:
if note.instrument not in tracks:
tracks[note.instrument] = []
tracks[note.instrument].append(note)
melodies = []
for track in tracks.values():
melody = Melody(steps_per_bar)
# Quantize the track into a Melody object.
# If any notes start at the same time, only one is kept.
melody.from_notes(track, bpm=bpm, gap=steps_per_bar,
ignore_polyphonic_notes=True)
# Require a certain melody length.
if len(melody) - 1 < steps_per_bar * min_bars:
logging.debug('melody too short')
continue
# Require a certain number of unique pitches.
note_histogram = melody.get_note_histogram()
unique_pitches = np.count_nonzero(note_histogram)
if unique_pitches < min_unique_pitches:
logging.debug('melody too simple')
continue
melodies.append(melody)
return melodies
| [
"math.ceil",
"logging.debug",
"magenta.protobuf.music_pb2.NoteSequence",
"numpy.count_nonzero",
"numpy.array",
"numpy.zeros",
"numpy.bincount"
] | [((9152, 9184), 'numpy.array', 'np.array', (['self.events'], {'dtype': 'int'}), '(self.events, dtype=int)\n', (9160, 9184), True, 'import numpy as np\n'), ((9196, 9298), 'numpy.bincount', 'np.bincount', (['(np_melody[np_melody >= MIN_MIDI_PITCH] % NOTES_PER_OCTAVE)'], {'minlength': 'NOTES_PER_OCTAVE'}), '(np_melody[np_melody >= MIN_MIDI_PITCH] % NOTES_PER_OCTAVE,\n minlength=NOTES_PER_OCTAVE)\n', (9207, 9298), True, 'import numpy as np\n'), ((9811, 9837), 'numpy.zeros', 'np.zeros', (['NOTES_PER_OCTAVE'], {}), '(NOTES_PER_OCTAVE)\n', (9819, 9837), True, 'import numpy as np\n'), ((13410, 13434), 'magenta.protobuf.music_pb2.NoteSequence', 'music_pb2.NoteSequence', ([], {}), '()\n', (13432, 13434), False, 'from magenta.protobuf import music_pb2\n'), ((19345, 19377), 'numpy.count_nonzero', 'np.count_nonzero', (['note_histogram'], {}), '(note_histogram)\n', (19361, 19377), True, 'import numpy as np\n'), ((19175, 19208), 'logging.debug', 'logging.debug', (['"""melody too short"""'], {}), "('melody too short')\n", (19188, 19208), False, 'import logging\n'), ((19428, 19462), 'logging.debug', 'logging.debug', (['"""melody too simple"""'], {}), "('melody too simple')\n", (19441, 19462), False, 'import logging\n'), ((11386, 11416), 'math.ceil', 'math.ceil', (['(x - QUANTIZE_CUTOFF)'], {}), '(x - QUANTIZE_CUTOFF)\n', (11395, 11416), False, 'import math\n')] |
# %%
import os
import numpy as np
import pandas as pd
from gurobipy import GRB, Model, quicksum
from matplotlib import pyplot as plt
from plotly import express as px
from thermo.correlation import expected_rand_obj_val, rand_obj_val_avr
from thermo.data import dropna, load_gaultois, load_screen, train_test_split
from thermo.evaluate import filter_low_risk_high_ret, plot_output
from thermo.rf import RandomForestRegressor
from thermo.utils import ROOT
from thermo.utils.amm import MatPipe, featurize, fit_pred_pipe
DIR = ROOT + "/results/screen/amm+rf/"
os.makedirs(DIR, exist_ok=True)
# %%
magpie_features, gaultois_df = load_gaultois(target_cols=["formula", "zT", "T"])
screen_df, _ = load_screen()
for df in [gaultois_df, screen_df]:
df.rename(columns={"formula": "composition"}, inplace=True)
# %%
# Form Cartesian product between screen features and the 4 temperatures ([300, 400, 700,
# 1000] Kelvin) found in Gaultois' database. We'll predict each material at all 4 temps.
# Note: None of the composition are predicted to achieve high zT at 300, 400 Kelvin.
# Remove those to cut computation time in half.
temps = (700, 1000)
temps_col = np.array(temps).repeat(len(screen_df))
screen_df = screen_df.loc[screen_df.index.repeat(len(temps))]
screen_df.insert(0, "T", temps_col)
# %%
mat_pipe_zT, zT_pred = fit_pred_pipe(gaultois_df, screen_df, "zT")
# %%
mat_pipe_zT = MatPipe.save(DIR + "mat.pipe")
# %%
mat_pipe_zT = MatPipe.load(DIR + "mat.pipe")
# %%
amm_train_features = featurize(mat_pipe_zT, gaultois_df[["T", "composition"]])
amm_screen_features = featurize(mat_pipe_zT, screen_df[["T", "composition"]])
# %%
# add composition column for duplicate detection so we save features for
# every material only once
amm_screen_features["composition"] = screen_df.composition
amm_screen_features.drop_duplicates(subset=["composition"]).to_csv(
DIR + "amm_screen_features.csv", float_format="%g", index=False
)
amm_train_features.to_csv(
DIR + "amm_train_features.csv", float_format="%g", index=False
)
# %%
amm_train_features = pd.read_csv(DIR + "amm_train_features.csv")
amm_screen_features = pd.read_csv(DIR + "amm_screen_features.csv")
del amm_screen_features["composition"]
# add temperature column to AMM features
amm_screen_features = amm_screen_features.loc[
amm_screen_features.index.repeat(len(temps))
]
amm_screen_features.insert(0, "T", temps_col)
# %% [markdown]
# # Check AMM+RF performance on Gaultois data
# Running cells in this section shows automatminer (AMM) features (which are just a
# subset of less correlated MagPie features) performs about the same as the complete
# MagPie set in accuracy but slightly better in uncertainty.
# %%
zT_series, magpie_features, check_features = dropna(
gaultois_df.zT, magpie_features, amm_train_features
)
[X_tr_amm, X_tr_magpie, y_tr], [X_test_amm, X_test_magpie, y_test] = train_test_split(
check_features, magpie_features, zT_series
)
# %%
amm_rf_zT = RandomForestRegressor()
amm_rf_zT.fit(X_tr_amm, y_tr)
amm_check_pred, amm_check_var = amm_rf_zT.predict(X_test_amm)
plot_output(y_test.values, amm_check_pred, amm_check_var)
# %%
magpie_rf_zT = RandomForestRegressor()
magpie_rf_zT.fit(X_tr_magpie, y_tr)
magpie_check_pred, magpie_check_var = magpie_rf_zT.predict(X_test_magpie)
plot_output(y_test.values, magpie_check_pred, magpie_check_var)
# %% [markdown]
# # Train AMM+RF on entire Gaultois data, then screen ICSD+COD
# %%
rf_zT = RandomForestRegressor()
rf_zT.fit(amm_train_features.iloc[gaultois_df.dropna().index], gaultois_df.zT.dropna())
zT_pred, zT_var = rf_zT.predict(amm_screen_features)
screen_df["zT_pred"] = zT_pred
screen_df["zT_var"] = zT_var
# %% [markdown]
# # Coarse triaging
# %%
# Save to CSV the 20 materials predicted to have the highest zT with no concern for
# estimated uncertainty. Baseline comparison to check if uncertainty estimation reduces
# the false positive rate.
screen_df.sort_values("zT_pred", ascending=False)[:20].to_csv(
ROOT + "/results/screen/hr-materials.csv", index=False, float_format="%g"
)
# %%
lrhr_idx = filter_low_risk_high_ret(screen_df.zT_pred, screen_df.zT_var, min_ret=1.3)
lrhr_candidates = screen_df[lrhr_idx]
# %%
px.scatter(lrhr_candidates, x="zT_var", y="zT_pred", hover_data=lrhr_candidates.columns)
# %% [markdown]
# # Correlation between low-risk high-return materials
# %%
zT_corr = rf_zT.get_corr(amm_screen_features.iloc[lrhr_candidates.index])
zT_corr = pd.DataFrame(
zT_corr, columns=lrhr_candidates.composition, index=lrhr_candidates.composition
)
# %%
zT_corr.to_csv(DIR + "correlation_matrix.csv", float_format="%g")
# %%
zT_corr_evals, zT_corr_evecs = np.linalg.eig(zT_corr)
zT_corr_evecs = zT_corr_evecs[zT_corr_evals.argsort()[::-1]]
plt.scatter(zT_corr_evecs[0], zT_corr_evecs[1])
# %% [markdown]
# # Fine Triaging
# Helpful links for the discrete constrained optimization problem of
# finding the p least correlated materials out of n predictions:
# - [Find k of n items with least pairwise correlations](
# https://stats.stackexchange.com/questions/73125)
# - [Least correlated subset of random variables from a correlation matrix](
# https://stats.stackexchange.com/questions/110426)
# %%
# The idea for this way of reducing correlation came from
# https://stats.stackexchange.com/a/327822/226996. Taking the element-wise
# absolute value (rather than squaring) and then summing gives similar results.
greedy_candidates = lrhr_candidates.copy(deep=True)
greedy_candidates["rough_correlation"] = (zT_corr ** 2).sum().values
greedy_candidates = (
greedy_candidates.reset_index()
.rename(columns={"index": "orig_index"})
.sort_values(by="rough_correlation")
)
# %%
# Set environment variable GRB_LICENSE_FILE so that Gurobi finds its license.
# An academic license can be obtained for free at
# https://www.gurobi.com/downloads/end-user-license-agreement-academic.
os.environ["GRB_LICENSE_FILE"] = ROOT + "/hpc/gurobi.lic"
# Create a model for solving the quadratic optimization problem of selecting p out of n
# materials with least pairwise correlation according to the correlation matrix zT_corr.
grb_model = Model("quadratic_problem")
grb_model.params.TimeLimit = 300 # in sec
grb_model.params.LogFile = DIR + "gurobi.log"
os.remove(DIR + "gurobi.log")
# %%
n_select = 20
# Create decision variables.
dvar = grb_model.addVars(len(lrhr_candidates), vtype=GRB.BINARY).values()
# %%
# Define the model objective to minimize the sum of pairwise correlations.
obj = zT_corr.dot(dvar).dot(dvar)
grb_model.setObjective(obj, GRB.MINIMIZE)
# %%
# Add L1 constraint on dvar so that the optimization returns at least n_select formulas.
constr = grb_model.addConstr(quicksum(dvar) >= n_select, "l1_norm")
# %%
grb_model.optimize()
# %%
# Save selected materials to dataframe and CSV file.
assert (
sum(var.x for var in dvar) == n_select
), "Gurobi selected a different number of materials than specified by n_select"
gurobi_candidates = lrhr_candidates.iloc[[bool(var.x) for var in dvar]]
# %%
gurobi_candidates.to_csv(DIR + "gurobi_candidates.csv", float_format="%g")
greedy_candidates.to_csv(DIR + "greedy_candidates.csv", float_format="%g")
# %%
gurobi_candidates = pd.read_csv(DIR + "gurobi_candidates.csv", index_col=0)
greedy_candidates = pd.read_csv(DIR + "greedy_candidates.csv", index_col=0)
# %%
for name, df in zip(
["gurobi_candidates", "greedy_candidates"],
[gurobi_candidates, greedy_candidates.iloc[:20]],
):
df.sort_values(["formula", "T"]).to_latex(
f"{DIR}{name}.tex",
columns=["formula", "database", "id", "T", "zT_pred", "zT_var"],
float_format="%.3g",
index=False,
)
# %% [markdown]
# # Comparing greedy and Gurobi solution
# %%
# greedy_candidates contains all low-risk high-return materials sorted by their sum of
# squared correlations with all other materials. If either the greedy or Gurobi method
# (or both) picked materials entirely at random, we would expect formulas chosen by
# Gurobi to have an average index in the list equal to the total list's average index.
# The degree to which the average index of Gurobi materials in the greedy list is lower
# than average list index is an indicator of agreement between the two methods.
gurobi_in_greedy = greedy_candidates.orig_index.isin(gurobi_candidates.index)
greedy_avg_index = greedy_candidates[gurobi_in_greedy].index.to_series().mean()
print(
"Average index of materials chosen by Gurobi in the list\n"
f"sorted according to least squared correlation: {greedy_avg_index}\n"
f"vs the average index of the total list: {(len(lrhr_candidates) + 1) / 2}"
)
# %%
greedy_indices_in_corr_mat = lrhr_candidates.index.isin(
greedy_candidates.orig_index[:n_select]
)
greedy_obj_val = zT_corr.values.dot(greedy_indices_in_corr_mat).dot(
greedy_indices_in_corr_mat
)
avr_rand_obj_val = rand_obj_val_avr(zT_corr, n_select, (n_repeats := 50))
# If len(zT_corr) >> 500, expected_rand_obj_val will take a long time due to cubic
# scaling. Consider decreasing max_risk or increasing min_ret in
# filter_low_risk_high_ret to decrease len(zT_corr).
exp_rand_obj_val = expected_rand_obj_val(zT_corr, n_select)
print(
f"objective values:\n- Gurobi: {grb_model.objVal:.4g}\n"
f"- greedy: {greedy_obj_val:.4g}\n"
f"- average of {n_repeats} random draws: {avr_rand_obj_val:.4g}\n"
f"- expectation value of random solution: {exp_rand_obj_val:.4g}"
)
| [
"thermo.data.load_gaultois",
"pandas.read_csv",
"thermo.evaluate.plot_output",
"numpy.array",
"thermo.utils.amm.MatPipe.load",
"thermo.data.load_screen",
"os.remove",
"thermo.rf.RandomForestRegressor",
"plotly.express.scatter",
"thermo.data.train_test_split",
"thermo.utils.amm.fit_pred_pipe",
... | [((560, 591), 'os.makedirs', 'os.makedirs', (['DIR'], {'exist_ok': '(True)'}), '(DIR, exist_ok=True)\n', (571, 591), False, 'import os\n'), ((630, 679), 'thermo.data.load_gaultois', 'load_gaultois', ([], {'target_cols': "['formula', 'zT', 'T']"}), "(target_cols=['formula', 'zT', 'T'])\n", (643, 679), False, 'from thermo.data import dropna, load_gaultois, load_screen, train_test_split\n'), ((695, 708), 'thermo.data.load_screen', 'load_screen', ([], {}), '()\n', (706, 708), False, 'from thermo.data import dropna, load_gaultois, load_screen, train_test_split\n'), ((1328, 1371), 'thermo.utils.amm.fit_pred_pipe', 'fit_pred_pipe', (['gaultois_df', 'screen_df', '"""zT"""'], {}), "(gaultois_df, screen_df, 'zT')\n", (1341, 1371), False, 'from thermo.utils.amm import MatPipe, featurize, fit_pred_pipe\n'), ((1393, 1423), 'thermo.utils.amm.MatPipe.save', 'MatPipe.save', (["(DIR + 'mat.pipe')"], {}), "(DIR + 'mat.pipe')\n", (1405, 1423), False, 'from thermo.utils.amm import MatPipe, featurize, fit_pred_pipe\n'), ((1445, 1475), 'thermo.utils.amm.MatPipe.load', 'MatPipe.load', (["(DIR + 'mat.pipe')"], {}), "(DIR + 'mat.pipe')\n", (1457, 1475), False, 'from thermo.utils.amm import MatPipe, featurize, fit_pred_pipe\n'), ((1504, 1561), 'thermo.utils.amm.featurize', 'featurize', (['mat_pipe_zT', "gaultois_df[['T', 'composition']]"], {}), "(mat_pipe_zT, gaultois_df[['T', 'composition']])\n", (1513, 1561), False, 'from thermo.utils.amm import MatPipe, featurize, fit_pred_pipe\n'), ((1585, 1640), 'thermo.utils.amm.featurize', 'featurize', (['mat_pipe_zT', "screen_df[['T', 'composition']]"], {}), "(mat_pipe_zT, screen_df[['T', 'composition']])\n", (1594, 1640), False, 'from thermo.utils.amm import MatPipe, featurize, fit_pred_pipe\n'), ((2070, 2113), 'pandas.read_csv', 'pd.read_csv', (["(DIR + 'amm_train_features.csv')"], {}), "(DIR + 'amm_train_features.csv')\n", (2081, 2113), True, 'import pandas as pd\n'), ((2137, 2181), 'pandas.read_csv', 'pd.read_csv', (["(DIR + 'amm_screen_features.csv')"], {}), "(DIR + 'amm_screen_features.csv')\n", (2148, 2181), True, 'import pandas as pd\n'), ((2753, 2812), 'thermo.data.dropna', 'dropna', (['gaultois_df.zT', 'magpie_features', 'amm_train_features'], {}), '(gaultois_df.zT, magpie_features, amm_train_features)\n', (2759, 2812), False, 'from thermo.data import dropna, load_gaultois, load_screen, train_test_split\n'), ((2889, 2949), 'thermo.data.train_test_split', 'train_test_split', (['check_features', 'magpie_features', 'zT_series'], {}), '(check_features, magpie_features, zT_series)\n', (2905, 2949), False, 'from thermo.data import dropna, load_gaultois, load_screen, train_test_split\n'), ((2975, 2998), 'thermo.rf.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (2996, 2998), False, 'from thermo.rf import RandomForestRegressor\n'), ((3094, 3151), 'thermo.evaluate.plot_output', 'plot_output', (['y_test.values', 'amm_check_pred', 'amm_check_var'], {}), '(y_test.values, amm_check_pred, amm_check_var)\n', (3105, 3151), False, 'from thermo.evaluate import filter_low_risk_high_ret, plot_output\n'), ((3174, 3197), 'thermo.rf.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (3195, 3197), False, 'from thermo.rf import RandomForestRegressor\n'), ((3311, 3374), 'thermo.evaluate.plot_output', 'plot_output', (['y_test.values', 'magpie_check_pred', 'magpie_check_var'], {}), '(y_test.values, magpie_check_pred, magpie_check_var)\n', (3322, 3374), False, 'from thermo.evaluate import filter_low_risk_high_ret, plot_output\n'), ((3471, 3494), 'thermo.rf.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (3492, 3494), False, 'from thermo.rf import RandomForestRegressor\n'), ((4103, 4177), 'thermo.evaluate.filter_low_risk_high_ret', 'filter_low_risk_high_ret', (['screen_df.zT_pred', 'screen_df.zT_var'], {'min_ret': '(1.3)'}), '(screen_df.zT_pred, screen_df.zT_var, min_ret=1.3)\n', (4127, 4177), False, 'from thermo.evaluate import filter_low_risk_high_ret, plot_output\n'), ((4224, 4317), 'plotly.express.scatter', 'px.scatter', (['lrhr_candidates'], {'x': '"""zT_var"""', 'y': '"""zT_pred"""', 'hover_data': 'lrhr_candidates.columns'}), "(lrhr_candidates, x='zT_var', y='zT_pred', hover_data=\n lrhr_candidates.columns)\n", (4234, 4317), True, 'from plotly import express as px\n'), ((4477, 4575), 'pandas.DataFrame', 'pd.DataFrame', (['zT_corr'], {'columns': 'lrhr_candidates.composition', 'index': 'lrhr_candidates.composition'}), '(zT_corr, columns=lrhr_candidates.composition, index=\n lrhr_candidates.composition)\n', (4489, 4575), True, 'import pandas as pd\n'), ((4688, 4710), 'numpy.linalg.eig', 'np.linalg.eig', (['zT_corr'], {}), '(zT_corr)\n', (4701, 4710), True, 'import numpy as np\n'), ((4772, 4819), 'matplotlib.pyplot.scatter', 'plt.scatter', (['zT_corr_evecs[0]', 'zT_corr_evecs[1]'], {}), '(zT_corr_evecs[0], zT_corr_evecs[1])\n', (4783, 4819), True, 'from matplotlib import pyplot as plt\n'), ((6171, 6197), 'gurobipy.Model', 'Model', (['"""quadratic_problem"""'], {}), "('quadratic_problem')\n", (6176, 6197), False, 'from gurobipy import GRB, Model, quicksum\n'), ((6287, 6316), 'os.remove', 'os.remove', (["(DIR + 'gurobi.log')"], {}), "(DIR + 'gurobi.log')\n", (6296, 6316), False, 'import os\n'), ((7240, 7295), 'pandas.read_csv', 'pd.read_csv', (["(DIR + 'gurobi_candidates.csv')"], {'index_col': '(0)'}), "(DIR + 'gurobi_candidates.csv', index_col=0)\n", (7251, 7295), True, 'import pandas as pd\n'), ((7316, 7371), 'pandas.read_csv', 'pd.read_csv', (["(DIR + 'greedy_candidates.csv')"], {'index_col': '(0)'}), "(DIR + 'greedy_candidates.csv', index_col=0)\n", (7327, 7371), True, 'import pandas as pd\n'), ((8907, 8961), 'thermo.correlation.rand_obj_val_avr', 'rand_obj_val_avr', (['zT_corr', 'n_select', '(n_repeats := 50)'], {}), '(zT_corr, n_select, (n_repeats := 50))\n', (8923, 8961), False, 'from thermo.correlation import expected_rand_obj_val, rand_obj_val_avr\n'), ((9183, 9223), 'thermo.correlation.expected_rand_obj_val', 'expected_rand_obj_val', (['zT_corr', 'n_select'], {}), '(zT_corr, n_select)\n', (9204, 9223), False, 'from thermo.correlation import expected_rand_obj_val, rand_obj_val_avr\n'), ((1160, 1175), 'numpy.array', 'np.array', (['temps'], {}), '(temps)\n', (1168, 1175), True, 'import numpy as np\n'), ((6724, 6738), 'gurobipy.quicksum', 'quicksum', (['dvar'], {}), '(dvar)\n', (6732, 6738), False, 'from gurobipy import GRB, Model, quicksum\n')] |
import cv2
import numpy
import sys
import os
if len(sys.argv) == 2:
folder_path = str(sys.argv[1])
else:
print('## USAGE ## \n python readTiff_folder.py path_to_folder. \n Space Bar for next image. Any other key to exit. \n##')
exit
dirs = os.listdir(folder_path)
cv2.namedWindow(folder_path)
for imagePath in dirs:
image = cv2.imread(os.path.join(folder_path,imagePath), -1)
img_scaled = cv2.normalize(image, dst=None, alpha=0, beta=65535, norm_type=cv2.NORM_MINMAX)
smallest = numpy.amin(image)
biggest = numpy.amax(image)
print('Min: {} - Max: {}'.format(smallest, biggest))
print(image)
print(imagePath)
cv2.imshow(folder_path, img_scaled)
if cv2.waitKey() == 32:
continue
else:
break
cv2.destroyAllWindows()
| [
"os.listdir",
"numpy.amin",
"cv2.normalize",
"os.path.join",
"cv2.imshow",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.amax",
"cv2.namedWindow"
] | [((246, 269), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (256, 269), False, 'import os\n'), ((270, 298), 'cv2.namedWindow', 'cv2.namedWindow', (['folder_path'], {}), '(folder_path)\n', (285, 298), False, 'import cv2\n'), ((718, 741), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (739, 741), False, 'import cv2\n'), ((400, 478), 'cv2.normalize', 'cv2.normalize', (['image'], {'dst': 'None', 'alpha': '(0)', 'beta': '(65535)', 'norm_type': 'cv2.NORM_MINMAX'}), '(image, dst=None, alpha=0, beta=65535, norm_type=cv2.NORM_MINMAX)\n', (413, 478), False, 'import cv2\n'), ((492, 509), 'numpy.amin', 'numpy.amin', (['image'], {}), '(image)\n', (502, 509), False, 'import numpy\n'), ((521, 538), 'numpy.amax', 'numpy.amax', (['image'], {}), '(image)\n', (531, 538), False, 'import numpy\n'), ((628, 663), 'cv2.imshow', 'cv2.imshow', (['folder_path', 'img_scaled'], {}), '(folder_path, img_scaled)\n', (638, 663), False, 'import cv2\n'), ((345, 381), 'os.path.join', 'os.path.join', (['folder_path', 'imagePath'], {}), '(folder_path, imagePath)\n', (357, 381), False, 'import os\n'), ((670, 683), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (681, 683), False, 'import cv2\n')] |
import numpy as np
df_x = np.load("Dados Normalizados.npy")
print(df_x.shape)
df_y = np.load("Target.npy")
print(df_y.shape)
df = np.concatenate([df_x, np.expand_dims(df_y,axis=-1)], axis=-1)
#df = np.expand_dims(df, axis = -1)
#df = df.reshape(df.shape[0], 1, df.shape[1], df.shape[2])
print(df.shape)
import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"]="2"
from sklearn.model_selection import KFold
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.callbacks import ModelCheckpoint, TensorBoard
import tensorflow as tf
from keras.models import Sequential, Model
from keras.layers import Embedding, LSTM, Dense, Input, Bidirectional, Conv2D, Activation, MaxPooling2D, Flatten, Reshape
from keras.layers import Conv1D, MaxPooling1D, SimpleRNN
from keras.utils import multi_gpu_model
from keras import backend as K
K.set_image_dim_ordering('tf')
kf = KFold(n_splits=5)
np.random.seed(10)
print(df[:5,0,-1])
np.random.shuffle(df)
print(df[:5,0,-1])
split = 1
for percentage in range(1,10):
if split != 1:
continue
else:
embed_dim = 128
lstm_out = int(np.floor(486*4//1))
batch_size = 32
activation = 'tanh'
print(split)
inp = Input(shape=(df.shape[1], 2))
#model.add(Embedding(2500, embed_dim,input_length = df.shape[1] - 1, dropout = 0.2))
#x = Conv1D(128, 3, padding='same')(inp)
#x = BatchNormalization()(x)
#x = Activation(activation)(x)
#x = MaxPooling1D(2, padding='same')(x)
#x = Conv1D(256, 3, padding='same')(x)
#x = BatchNormalization()(x)
#x = Activation(activation)(x)
#x = MaxPooling1D(2, padding='same')(x)
#x = Conv1D(512, 3, padding='same')(x)
#x = BatchNormalization()(x)
#x = Activation(activation)(x)
#x = MaxPooling1D(2, padding='same')(x)
#x = Conv2D(32, (2,2), padding='same')(x)
#x = BatchNormalization()(x)
#x = Activation(activation)(x)
#x = MaxPooling2D(2, padding='same')(x)
#print(x.shape)
#x = Reshape((851,32))(x)
#x = Flatten()(x)
#x = LSTM(lstm_out, return_sequences=True)(x)
x = LSTM(lstm_out)(inp)
x = Dense(lstm_out//2, activation=activation)(x)
#x = Dense(lstm_out, activation=activation)(x)
#x = Dense(100, activation='relu')(x)
x = Dense(2,activation='sigmoid')(x)
#x = Reshape((2,1))(x)
model = Model(inp, x)
#parallel_model = multi_gpu_model(model, gpus=5)
parallel_model = model
parallel_model.compile(loss = 'binary_crossentropy', optimizer='adam',metrics = ['accuracy'])
#print(parallel_model.summary())
#print("TRAIN:", train_index, "TEST:", test_index)
#X_train, X_test = df[train_index,:,:2], df[test_index,:,:2]
#Y_train, Y_test = df[train_index,0,-1], df[test_index,0,-1]
X_train, X_test, Y_train, Y_test = train_test_split(df[:,:,:2], df[:,0,-1], test_size = 0.10, random_state = 7)
Y_test_binalized = np.zeros((Y_test.shape[0],2), dtype="float32")
for i in range(Y_test.shape[0]):
if Y_test[i] == 0.:
Y_test_binalized[i,0] = 1.
else:
Y_test_binalized[i,1] = 1.
Y_train_binalized = np.zeros((Y_train.shape[0],2), dtype="float32")
for i in range(Y_train.shape[0]):
if Y_train[i] == 0.:
Y_train_binalized[i,0] = 1.
else:
Y_train_binalized[i,1] = 1.
#print(Y_train.shape)
#tensorboard = TensorBoard(log_dir=f'./fold{split}')
parallel_model.fit(X_train, Y_train_binalized, batch_size =batch_size*1, epochs = 5,
verbose = 1, validation_data=(X_test, Y_test_binalized))
pred = parallel_model.predict(X_test)
'''
label_binalized = np.zeros((Y_test.shape[0],2), dtype="float32")
for i in range(Y_test.shape[0]):
if Y_test[i] == 0.:
label_binalized[i,0] = 1.
else:
label_binalized[i,1] = 1.
pred_binalized = np.zeros((pred.shape[0],2), dtype="float32")
for i in range(pred.shape[0]):
if pred[i] < 0.5:
pred_binalized[i,0] = 1.
else:
pred_binalized[i,1] = 1.
'''
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(2):
fpr[i], tpr[i], _ = roc_curve(Y_test_binalized[:, i], pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
plt.figure()
lw = 2
colors = ['darkblue','darkorange']
classes = ['AGN','Blazar',]
for i in range(2):
plt.plot(fpr[i], tpr[i], color=colors[i],
lw=lw, label=f'{classes[i]} (area = %0.2f)' % roc_auc[i])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(f'ROC - Final Model')
plt.legend(loc="lower right")
#plt.savefig(f'Train_Test/ROC-NO LSTM.jpg')
plt.show()
split += 1 | [
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"sklearn.metrics.roc_curve",
"keras.layers.Dense",
"sklearn.model_selection.KFold",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"keras.layers.LSTM",
"numpy.random.seed",
"keras.models.Model",
"matplotlib.pyplot.ylim",
"sklearn.model_... | [((27, 60), 'numpy.load', 'np.load', (['"""Dados Normalizados.npy"""'], {}), "('Dados Normalizados.npy')\n", (34, 60), True, 'import numpy as np\n'), ((86, 107), 'numpy.load', 'np.load', (['"""Target.npy"""'], {}), "('Target.npy')\n", (93, 107), True, 'import numpy as np\n'), ((918, 948), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""tf"""'], {}), "('tf')\n", (942, 948), True, 'from keras import backend as K\n'), ((955, 972), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)'}), '(n_splits=5)\n', (960, 972), False, 'from sklearn.model_selection import KFold\n'), ((974, 992), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (988, 992), True, 'import numpy as np\n'), ((1012, 1033), 'numpy.random.shuffle', 'np.random.shuffle', (['df'], {}), '(df)\n', (1029, 1033), True, 'import numpy as np\n'), ((153, 182), 'numpy.expand_dims', 'np.expand_dims', (['df_y'], {'axis': '(-1)'}), '(df_y, axis=-1)\n', (167, 182), True, 'import numpy as np\n'), ((1296, 1325), 'keras.layers.Input', 'Input', ([], {'shape': '(df.shape[1], 2)'}), '(shape=(df.shape[1], 2))\n', (1301, 1325), False, 'from keras.layers import Embedding, LSTM, Dense, Input, Bidirectional, Conv2D, Activation, MaxPooling2D, Flatten, Reshape\n'), ((2544, 2557), 'keras.models.Model', 'Model', (['inp', 'x'], {}), '(inp, x)\n', (2549, 2557), False, 'from keras.models import Sequential, Model\n'), ((3030, 3105), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df[:, :, :2]', 'df[:, 0, -1]'], {'test_size': '(0.1)', 'random_state': '(7)'}), '(df[:, :, :2], df[:, 0, -1], test_size=0.1, random_state=7)\n', (3046, 3105), False, 'from sklearn.model_selection import train_test_split\n'), ((3143, 3190), 'numpy.zeros', 'np.zeros', (['(Y_test.shape[0], 2)'], {'dtype': '"""float32"""'}), "((Y_test.shape[0], 2), dtype='float32')\n", (3151, 3190), True, 'import numpy as np\n'), ((3405, 3453), 'numpy.zeros', 'np.zeros', (['(Y_train.shape[0], 2)'], {'dtype': '"""float32"""'}), "((Y_train.shape[0], 2), dtype='float32')\n", (3413, 3453), True, 'import numpy as np\n'), ((4747, 4759), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4757, 4759), True, 'import matplotlib.pyplot as plt\n'), ((5022, 5083), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", (5030, 5083), True, 'import matplotlib.pyplot as plt\n'), ((5092, 5112), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (5100, 5112), True, 'import matplotlib.pyplot as plt\n'), ((5121, 5142), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (5129, 5142), True, 'import matplotlib.pyplot as plt\n'), ((5151, 5184), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (5161, 5184), True, 'import matplotlib.pyplot as plt\n'), ((5193, 5225), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (5203, 5225), True, 'import matplotlib.pyplot as plt\n'), ((5234, 5265), 'matplotlib.pyplot.title', 'plt.title', (['f"""ROC - Final Model"""'], {}), "(f'ROC - Final Model')\n", (5243, 5265), True, 'import matplotlib.pyplot as plt\n'), ((5274, 5303), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (5284, 5303), True, 'import matplotlib.pyplot as plt\n'), ((5364, 5374), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5372, 5374), True, 'import matplotlib.pyplot as plt\n'), ((1189, 1211), 'numpy.floor', 'np.floor', (['(486 * 4 // 1)'], {}), '(486 * 4 // 1)\n', (1197, 1211), True, 'import numpy as np\n'), ((2272, 2286), 'keras.layers.LSTM', 'LSTM', (['lstm_out'], {}), '(lstm_out)\n', (2276, 2286), False, 'from keras.layers import Embedding, LSTM, Dense, Input, Bidirectional, Conv2D, Activation, MaxPooling2D, Flatten, Reshape\n'), ((2304, 2347), 'keras.layers.Dense', 'Dense', (['(lstm_out // 2)'], {'activation': 'activation'}), '(lstm_out // 2, activation=activation)\n', (2309, 2347), False, 'from keras.layers import Embedding, LSTM, Dense, Input, Bidirectional, Conv2D, Activation, MaxPooling2D, Flatten, Reshape\n'), ((2463, 2493), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""sigmoid"""'}), "(2, activation='sigmoid')\n", (2468, 2493), False, 'from keras.layers import Embedding, LSTM, Dense, Input, Bidirectional, Conv2D, Activation, MaxPooling2D, Flatten, Reshape\n'), ((4646, 4691), 'sklearn.metrics.roc_curve', 'roc_curve', (['Y_test_binalized[:, i]', 'pred[:, i]'], {}), '(Y_test_binalized[:, i], pred[:, i])\n', (4655, 4691), False, 'from sklearn.metrics import roc_curve, auc\n'), ((4717, 4736), 'sklearn.metrics.auc', 'auc', (['fpr[i]', 'tpr[i]'], {}), '(fpr[i], tpr[i])\n', (4720, 4736), False, 'from sklearn.metrics import roc_curve, auc\n'), ((4893, 4997), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr[i]', 'tpr[i]'], {'color': 'colors[i]', 'lw': 'lw', 'label': "(f'{classes[i]} (area = %0.2f)' % roc_auc[i])"}), "(fpr[i], tpr[i], color=colors[i], lw=lw, label=\n f'{classes[i]} (area = %0.2f)' % roc_auc[i])\n", (4901, 4997), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import osmnx as ox
import networkx as nx
from sklearn.neighbors import KDTree
import folium
import matplotlib.pyplot as plt
sorth_street = (30.207238, 120.204248)
G = ox.graph_from_point(sorth_street, distance=500)
ox.plot_graph(G, fig_height=10, fig_width=10, edge_color='green')
route = nx.shortest_path(G, np.random.choice(G.nodes),
np.random.choice(G.nodes))
ox.plot_graph_route(G, route, fig_height=10, fig_width=10)
nodes, _ = ox.graph_to_gdfs(G)
print(nodes)
node_a = (30.205538, 120.199288)
node_b = (30.212378, 120.211898)
node_c = (30.20928, 120.21248)
node_d = (30.20158, 120.20588)
tree = KDTree(nodes[['y', 'x']], metric='euclidean')
a_idx = tree.query([node_a], k=1, return_distance=False)[0]
b_idx = tree.query([node_b], k=1, return_distance=False)[0]
c_idx = tree.query([node_c], k=1, return_distance=False)[0]
closest_node_to_a = nodes.iloc[a_idx].index.values[0]
closest_node_to_b = nodes.iloc[b_idx].index.values[0]
closest_node_to_c = nodes.iloc[c_idx].index.values[0]
fig, ax = ox.plot_graph(G, fig_height=10, fig_width=10,
show=False, close=False,
edge_color='black')
ax.scatter(G.node[closest_node_to_a]['x'],
G.node[closest_node_to_a]['y'],
c='green', s=100)
ax.scatter(G.node[closest_node_to_b]['x'],
G.node[closest_node_to_b]['y'],
c='green', s=100)
ax.scatter(G.node[closest_node_to_c]['x'],
G.node[closest_node_to_c]['y'],
c='green', s=100)
plt.show()
route = nx.shortest_path(G, closest_node_to_a,
closest_node_to_c)
fig, ax = ox.plot_graph_route(G, route, fig_height=10,
fig_width=10,
show=False, close=False,
edge_color='black',
orig_dest_node_color='green',
route_color='green')
plt.show()
| [
"osmnx.plot_graph",
"numpy.random.choice",
"osmnx.plot_graph_route",
"sklearn.neighbors.KDTree",
"osmnx.graph_from_point",
"networkx.shortest_path",
"osmnx.graph_to_gdfs",
"matplotlib.pyplot.show"
] | [((187, 234), 'osmnx.graph_from_point', 'ox.graph_from_point', (['sorth_street'], {'distance': '(500)'}), '(sorth_street, distance=500)\n', (206, 234), True, 'import osmnx as ox\n'), ((235, 300), 'osmnx.plot_graph', 'ox.plot_graph', (['G'], {'fig_height': '(10)', 'fig_width': '(10)', 'edge_color': '"""green"""'}), "(G, fig_height=10, fig_width=10, edge_color='green')\n", (248, 300), True, 'import osmnx as ox\n'), ((409, 467), 'osmnx.plot_graph_route', 'ox.plot_graph_route', (['G', 'route'], {'fig_height': '(10)', 'fig_width': '(10)'}), '(G, route, fig_height=10, fig_width=10)\n', (428, 467), True, 'import osmnx as ox\n'), ((480, 499), 'osmnx.graph_to_gdfs', 'ox.graph_to_gdfs', (['G'], {}), '(G)\n', (496, 499), True, 'import osmnx as ox\n'), ((650, 695), 'sklearn.neighbors.KDTree', 'KDTree', (["nodes[['y', 'x']]"], {'metric': '"""euclidean"""'}), "(nodes[['y', 'x']], metric='euclidean')\n", (656, 695), False, 'from sklearn.neighbors import KDTree\n'), ((1049, 1143), 'osmnx.plot_graph', 'ox.plot_graph', (['G'], {'fig_height': '(10)', 'fig_width': '(10)', 'show': '(False)', 'close': '(False)', 'edge_color': '"""black"""'}), "(G, fig_height=10, fig_width=10, show=False, close=False,\n edge_color='black')\n", (1062, 1143), True, 'import osmnx as ox\n'), ((1534, 1544), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1542, 1544), True, 'import matplotlib.pyplot as plt\n'), ((1554, 1611), 'networkx.shortest_path', 'nx.shortest_path', (['G', 'closest_node_to_a', 'closest_node_to_c'], {}), '(G, closest_node_to_a, closest_node_to_c)\n', (1570, 1611), True, 'import networkx as nx\n'), ((1647, 1809), 'osmnx.plot_graph_route', 'ox.plot_graph_route', (['G', 'route'], {'fig_height': '(10)', 'fig_width': '(10)', 'show': '(False)', 'close': '(False)', 'edge_color': '"""black"""', 'orig_dest_node_color': '"""green"""', 'route_color': '"""green"""'}), "(G, route, fig_height=10, fig_width=10, show=False,\n close=False, edge_color='black', orig_dest_node_color='green',\n route_color='green')\n", (1666, 1809), True, 'import osmnx as ox\n'), ((1952, 1962), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1960, 1962), True, 'import matplotlib.pyplot as plt\n'), ((330, 355), 'numpy.random.choice', 'np.random.choice', (['G.nodes'], {}), '(G.nodes)\n', (346, 355), True, 'import numpy as np\n'), ((382, 407), 'numpy.random.choice', 'np.random.choice', (['G.nodes'], {}), '(G.nodes)\n', (398, 407), True, 'import numpy as np\n')] |
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
import numpy as np
import random
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from lightgbm import LGBMRegressor
class LayerSizeGenerator:
def __init__(self):
self.num_layers = [1, 2]
self.num_neurons = np.arange(1, 15+1, 1)
def rvs(self, random_state=42):
random.seed(random_state)
# first randomly define num of layers, then pick the neuron size for each of them
num_layers = random.choice(self.num_layers)
layer_sizes = random.choices(self.num_neurons, k=num_layers)
return layer_sizes
class NNCombWrapper():
def __init__(self, model_params=None):
self.model_name = "nncomb"
self.search_type = 'random'
self.param_grid = {"early_stopping": [True],
"learning_rate": ["invscaling"],
"learning_rate_init": np.linspace(0.001, 0.999, 100),
'alpha': np.linspace(0.001, 0.999, 100),
'solver': ["adam"],
'activation': ["relu"],
"hidden_layer_sizes": LayerSizeGenerator()}
if model_params is None:
self.ModelClass = MLPRegressor()
else:
self.ModelClass = MLPRegressor(**model_params)
class RandomForestWrapper():
def __init__(self, model_params=None):
self.model_name = "random_forest"
self.search_type = 'random'
self.param_grid = {"max_features": ['auto', 'sqrt', 'log2'],
"min_samples_split": sp_randint(2, 31),
"n_estimators": sp_randint(2, 301),
"max_depth": sp_randint(2, 20)}
if model_params is None:
self.ModelClass = RandomForestRegressor()
else:
self.ModelClass = RandomForestRegressor(**model_params)
class LGBWrapper():
def __init__(self, model_params=None):
self.model_name = "lgb_regression"
self.search_type = 'random'
self.param_grid = {'num_leaves': sp_randint(6, 50),
'min_child_samples': sp_randint(100, 500),
'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4],
'subsample': sp_uniform(loc=0.2, scale=0.8),
"n_estimators": sp_randint(500, 1000),
"max_depth": sp_randint(3, 100),
"learning_rate": np.linspace(0.001, 0.99, 100),
'colsample_bytree': sp_uniform(loc=0.4, scale=0.6),
'reg_alpha': [0, 1e-1, 1, 2, 5, 7, 10, 50, 100],
'reg_lambda': [0, 1e-1, 1, 5, 10, 20, 50, 100],
"objective": ["huber"]}
if model_params is None:
self.ModelClass = LGBMRegressor()
else:
self.ModelClass = LGBMRegressor(**model_params) | [
"scipy.stats.randint",
"sklearn.neural_network.MLPRegressor",
"random.choice",
"sklearn.ensemble.RandomForestRegressor",
"scipy.stats.uniform",
"lightgbm.LGBMRegressor",
"random.seed",
"random.choices",
"numpy.linspace",
"numpy.arange"
] | [((371, 394), 'numpy.arange', 'np.arange', (['(1)', '(15 + 1)', '(1)'], {}), '(1, 15 + 1, 1)\n', (380, 394), True, 'import numpy as np\n'), ((438, 463), 'random.seed', 'random.seed', (['random_state'], {}), '(random_state)\n', (449, 463), False, 'import random\n'), ((575, 605), 'random.choice', 'random.choice', (['self.num_layers'], {}), '(self.num_layers)\n', (588, 605), False, 'import random\n'), ((628, 674), 'random.choices', 'random.choices', (['self.num_neurons'], {'k': 'num_layers'}), '(self.num_neurons, k=num_layers)\n', (642, 674), False, 'import random\n'), ((1003, 1033), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.999)', '(100)'], {}), '(0.001, 0.999, 100)\n', (1014, 1033), True, 'import numpy as np\n'), ((1071, 1101), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.999)', '(100)'], {}), '(0.001, 0.999, 100)\n', (1082, 1101), True, 'import numpy as np\n'), ((1335, 1349), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {}), '()\n', (1347, 1349), False, 'from sklearn.neural_network import MLPRegressor\n'), ((1394, 1422), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {}), '(**model_params)\n', (1406, 1422), False, 'from sklearn.neural_network import MLPRegressor\n'), ((1691, 1708), 'scipy.stats.randint', 'sp_randint', (['(2)', '(31)'], {}), '(2, 31)\n', (1701, 1708), True, 'from scipy.stats import randint as sp_randint\n'), ((1753, 1771), 'scipy.stats.randint', 'sp_randint', (['(2)', '(301)'], {}), '(2, 301)\n', (1763, 1771), True, 'from scipy.stats import randint as sp_randint\n'), ((1813, 1830), 'scipy.stats.randint', 'sp_randint', (['(2)', '(20)'], {}), '(2, 20)\n', (1823, 1830), True, 'from scipy.stats import randint as sp_randint\n'), ((1895, 1918), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (1916, 1918), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1963, 2000), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '(**model_params)\n', (1984, 2000), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2186, 2203), 'scipy.stats.randint', 'sp_randint', (['(6)', '(50)'], {}), '(6, 50)\n', (2196, 2203), True, 'from scipy.stats import randint as sp_randint\n'), ((2253, 2273), 'scipy.stats.randint', 'sp_randint', (['(100)', '(500)'], {}), '(100, 500)\n', (2263, 2273), True, 'from scipy.stats import randint as sp_randint\n'), ((2411, 2441), 'scipy.stats.uniform', 'sp_uniform', ([], {'loc': '(0.2)', 'scale': '(0.8)'}), '(loc=0.2, scale=0.8)\n', (2421, 2441), True, 'from scipy.stats import uniform as sp_uniform\n'), ((2486, 2507), 'scipy.stats.randint', 'sp_randint', (['(500)', '(1000)'], {}), '(500, 1000)\n', (2496, 2507), True, 'from scipy.stats import randint as sp_randint\n'), ((2549, 2567), 'scipy.stats.randint', 'sp_randint', (['(3)', '(100)'], {}), '(3, 100)\n', (2559, 2567), True, 'from scipy.stats import randint as sp_randint\n'), ((2613, 2642), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.99)', '(100)'], {}), '(0.001, 0.99, 100)\n', (2624, 2642), True, 'import numpy as np\n'), ((2691, 2721), 'scipy.stats.uniform', 'sp_uniform', ([], {'loc': '(0.4)', 'scale': '(0.6)'}), '(loc=0.4, scale=0.6)\n', (2701, 2721), True, 'from scipy.stats import uniform as sp_uniform\n'), ((2988, 3003), 'lightgbm.LGBMRegressor', 'LGBMRegressor', ([], {}), '()\n', (3001, 3003), False, 'from lightgbm import LGBMRegressor\n'), ((3048, 3077), 'lightgbm.LGBMRegressor', 'LGBMRegressor', ([], {}), '(**model_params)\n', (3061, 3077), False, 'from lightgbm import LGBMRegressor\n')] |
import numpy as np
from itertools import product
from analysis.utils import one_hot_to_int
def get_oq_keys(X_i, task, to_int=True):
"""extract obs/query keys from the input matrix, for one sample
Parameters
----------
X_i : np array
a sample from SequenceLearning task
task : object
the SequenceLearning task that generated X_i
to_int : bool
whether convert to integer representation
Returns
-------
list, list, list
observation keys, query keys, observation values
"""
# get the observation / query keys
o_key = X_i[:, :task.k_dim]
q_key = X_i[:, -task.k_dim:]
o_val = X_i[:, task.k_dim:task.k_dim + task.v_dim]
# convert to integer representation
if to_int:
o_key = [one_hot_to_int(o_key[t]) for t in range(len(o_key))]
q_key = [one_hot_to_int(q_key[t]) for t in range(len(q_key))]
o_val = [one_hot_to_int(o_val[t]) for t in range(len(o_val))]
return o_key, q_key, o_val
def set_nanadd(input_set, new_element):
"""set.add a new element, don't add np.nan
Parameters
----------
input_set : set
a set of int
new_element : int
a new element to be added to the set
Returns
-------
set
the set updated by the new element
"""
if not np.isnan(new_element):
input_set.add(new_element)
return input_set
def _compute_true_dk(o_key, q_key, o_val, task):
"""compute ground truth uncertainty for a trial
Parameters
----------
o_key : list of int
Description of parameter `o_key`.
q_key : list of int
Description of parameter `q_key`.
o_val : list of int
Description of parameter `o_val`.
task : obj
the SL task
Returns
-------
type
Description of returned object.
"""
assert task.n_parts == 2, 'this function only works for 2-part seq'
assert len(o_key) == len(q_key), 'obs seq length must match query seq'
T_total_ = len(o_key)
# T_part_ = T_total_ // task.n_parts
# prealloc
o_key_up_to_t, q_key_up_to_t = set(), set()
dk = np.ones(T_total_, dtype=bool)
# compute uncertainty info over time
for t in range(T_total_):
q_key_up_to_t = set_nanadd(q_key_up_to_t, q_key[t])
# if the observation is not nan (removed), consider it as an observed key
if not np.isnan(o_val[t]):
# if the key is not nan (due to delay), add it as an observed key
o_key_up_to_t = set_nanadd(o_key_up_to_t, o_key[t])
# if the query is in the observed key up to time t
if q_key[t] in o_key_up_to_t:
# shouldn't say don't know
dk[t] = False
# log info
# t_relative = np.mod(t, T_part_)
# print(f'time = {t}, {t_relative} / {T_total_} | dk = {dk[t]}')
# print(o_key_up_to_t)
# print(q_key_up_to_t)
return dk
def compute_true_dk(X_i, task):
"""compute objective uncertainty w/ or w/o EM (EM vs. WM), where ...
- with EM == no flusing, which applies to the RM condition
- WM == w/o EM == EM flushed, which applies to the NM and DM
Parameters
----------
X_i : np array
a sample from SequenceLearning task
task : object
the SequenceLearning task that generated X_i
Returns
-------
dict
ground truth / objective uncertainty
"""
assert task.n_parts == 2, 'this function only works for 2-part seq'
o_key, q_key, o_val = get_oq_keys(X_i, task, to_int=True)
T_total_ = len(o_key)
T_part_ = T_total_ // task.n_parts
dk = {}
dk['EM'] = _compute_true_dk(o_key, q_key, o_val, task)
dk['WM'] = _compute_true_dk(
o_key[T_part_:], q_key[T_part_:], o_val[T_part_:], task
)
return dk
def batch_compute_true_dk(X, task, dtype=bool):
"""compute the uncertainty ground truth for a sample/batch of data
- a wrapper for `compute_true_dk()`
Parameters
----------
X : 3d array
a sample from the SL task
task : obj
the SL task
Returns
-------
2d array, 2d array
uncertainty w/ w/o episodic flush
"""
n_samples = len(X)
dk_wm = np.zeros((n_samples, task.n_param), dtype=dtype)
dk_em = np.zeros((n_samples, task.n_param * task.n_parts), dtype=dtype)
# dk = [compute_true_dk(X[i], task) for i in range(n_samples)]
# pred_time_mask = [None] * n_samples
for i in range(n_samples):
T_total_i = np.shape(X[i])[0]
T_part_i, pad_len_i, _, _ = task.get_time_param(T_total_i)
pred_time_mask_i = task.get_pred_time_mask(
T_total_i, T_part_i, pad_len_i)
# compute objective uncertainty, w/ or w/o EM
dk_i = compute_true_dk(X[i], task)
dk_wm[i] = dk_i['WM'][pred_time_mask_i[T_part_i:]]
dk_em[i] = dk_i['EM'][pred_time_mask_i]
return dk_wm, dk_em
def compute_event_similarity_matrix(Y, normalize=False):
"""compute the inter-event similarity matrix of a batch of data
e.g.
task = SequenceLearning(n_param, n_branch, n_parts=1)
X, Y = task.sample(n_samples)
similarity_matrix = compute_event_similarity_matrix(Y, normalize=False)
Parameters
----------
Y : 3d array (n_examples, _, _) or 2d array (n_examples, _)
the target values
normalize : bool
whether to normalize by vector dim
Returns
-------
2d array (n_examples, n_examples)
the inter-event similarity matrix
"""
if len(np.shape(Y)) == 3:
Y_int = np.argmax(Y, axis=-1)
elif len(np.shape(Y)) == 2:
Y_int = Y
else:
raise ValueError('Invalid Y shape')
# prealloc
n_samples = np.shape(Y)[0]
similarity_matrix = np.zeros((n_samples, n_samples))
for i, j in product(range(n_samples), range(n_samples)):
similarity_matrix[i, j] = compute_event_similarity(
Y_int[i], Y_int[j], normalize=normalize)
return similarity_matrix
def compute_event_similarity(event_i, event_j, normalize=True):
"""compute the #shared elements for two arrays
e.g.
event_i = np.argmax(q_vals_vec[i], axis=-1)
event_j = np.argmax(q_vals_vec[j], axis=-1)
sim_ij = compute_event_similarity(event_i, event_j, normalize=True)
Parameters
----------
event_i/j : 1d np array
event representation
normalize : bool
whether to normalize by vector dim
Returns
-------
float
similarity
"""
assert np.shape(event_i) == np.shape(event_j)
similarity = np.sum(event_i == event_j)
if normalize:
return similarity / len(event_i)
return similarity
#
# def remove_identical_events(Ys, n_param):
# """remove events that are identical
#
# Parameters
# ----------
# Ys : 3d array
# number of examples x number of time points x feature dim
# n_param : int
# indicate max(number of shared parameters)
#
# Returns
# -------
# Ys : 3d array
# number of examples' x number of time points x feature dim
#
# """
# event_similarity_matrix = compute_event_similarity(Ys, tril_k=-1)
# repeated_id = np.where(event_similarity_matrix == n_param)
# rm_axis = 0
# Ys_ = np.delete(Ys, repeated_id[rm_axis], axis=rm_axis)
# return Ys_
| [
"numpy.ones",
"numpy.argmax",
"numpy.sum",
"numpy.zeros",
"analysis.utils.one_hot_to_int",
"numpy.isnan",
"numpy.shape"
] | [((2141, 2170), 'numpy.ones', 'np.ones', (['T_total_'], {'dtype': 'bool'}), '(T_total_, dtype=bool)\n', (2148, 2170), True, 'import numpy as np\n'), ((4221, 4269), 'numpy.zeros', 'np.zeros', (['(n_samples, task.n_param)'], {'dtype': 'dtype'}), '((n_samples, task.n_param), dtype=dtype)\n', (4229, 4269), True, 'import numpy as np\n'), ((4282, 4345), 'numpy.zeros', 'np.zeros', (['(n_samples, task.n_param * task.n_parts)'], {'dtype': 'dtype'}), '((n_samples, task.n_param * task.n_parts), dtype=dtype)\n', (4290, 4345), True, 'import numpy as np\n'), ((5761, 5793), 'numpy.zeros', 'np.zeros', (['(n_samples, n_samples)'], {}), '((n_samples, n_samples))\n', (5769, 5793), True, 'import numpy as np\n'), ((6573, 6599), 'numpy.sum', 'np.sum', (['(event_i == event_j)'], {}), '(event_i == event_j)\n', (6579, 6599), True, 'import numpy as np\n'), ((1326, 1347), 'numpy.isnan', 'np.isnan', (['new_element'], {}), '(new_element)\n', (1334, 1347), True, 'import numpy as np\n'), ((5565, 5586), 'numpy.argmax', 'np.argmax', (['Y'], {'axis': '(-1)'}), '(Y, axis=-1)\n', (5574, 5586), True, 'import numpy as np\n'), ((5722, 5733), 'numpy.shape', 'np.shape', (['Y'], {}), '(Y)\n', (5730, 5733), True, 'import numpy as np\n'), ((6517, 6534), 'numpy.shape', 'np.shape', (['event_i'], {}), '(event_i)\n', (6525, 6534), True, 'import numpy as np\n'), ((6538, 6555), 'numpy.shape', 'np.shape', (['event_j'], {}), '(event_j)\n', (6546, 6555), True, 'import numpy as np\n'), ((778, 802), 'analysis.utils.one_hot_to_int', 'one_hot_to_int', (['o_key[t]'], {}), '(o_key[t])\n', (792, 802), False, 'from analysis.utils import one_hot_to_int\n'), ((848, 872), 'analysis.utils.one_hot_to_int', 'one_hot_to_int', (['q_key[t]'], {}), '(q_key[t])\n', (862, 872), False, 'from analysis.utils import one_hot_to_int\n'), ((918, 942), 'analysis.utils.one_hot_to_int', 'one_hot_to_int', (['o_val[t]'], {}), '(o_val[t])\n', (932, 942), False, 'from analysis.utils import one_hot_to_int\n'), ((2399, 2417), 'numpy.isnan', 'np.isnan', (['o_val[t]'], {}), '(o_val[t])\n', (2407, 2417), True, 'import numpy as np\n'), ((4506, 4520), 'numpy.shape', 'np.shape', (['X[i]'], {}), '(X[i])\n', (4514, 4520), True, 'import numpy as np\n'), ((5530, 5541), 'numpy.shape', 'np.shape', (['Y'], {}), '(Y)\n', (5538, 5541), True, 'import numpy as np\n'), ((5600, 5611), 'numpy.shape', 'np.shape', (['Y'], {}), '(Y)\n', (5608, 5611), True, 'import numpy as np\n')] |
# DMD algorithms by <NAME>.
#
# TODO:
# - Should we create an ABC interface for DMD?
# - __init__.py and separate files
#
import numpy as np
from numpy.linalg import svd, pinv, eig
from scipy.linalg import expm
from .process import _threshold_svd, dag
class DMD:
def __init__(self, X2, X1, ts, **kwargs):
""" X2 = A X1
Args:
X2 (:obj:`ndarray` of float): Left side data matrix with columns containing states at sequential times.
X1 (:obj:`ndarray` of float): Right side data matrix with columns containing states at sequential times.
U (:obj:`ndarray` of float): Control signal(s) with columns containing controls.
ts (:obj:`ndarray` of float): Time measurements
**kwargs: see Keyword arguments.
Keyword arguments:
threshold (real, int): Truncate the singular values associated with DMD modes. default None.
threshold_type (str): One of {'number', 'percent'}. default 'percent'.
Attributes:
X2 (:obj:`ndarray` of float): Left side data matrix
X1 (:obj:`ndarray` of float): Right side data matrix
U (:obj:`ndarray` of float): Control signal data matrix
t0 (float): Initial time.
dt (float): Step size.
orig_timesteps (:obj:`ndarray` of float): Original times matching X1.
A (:obj:`ndarray` of float): Learned drift operator.
Atilde (:obj:`ndarray` of float): Projected A.
eigs (list of float): Eigenvalues of Atilde.
modes (:obj:`ndarray` of float): DMD modes are eigenvectors of Atilde (shared by A).
"""
self.X2 = X2
self.X1 = X1
self.t0 = ts[0]
self.dt = ts[1] - ts[0]
self.orig_timesteps = ts if len(ts) == self.X1.shape[1] else ts[:-1]
# I. Compute SVD
threshold = kwargs.get('threshold', None)
if threshold is None:
U, S, Vt = svd(self.X1, full_matrices=False)
else:
threshold_type = kwargs.get('threshold_type', 'percent')
U, S, Vt = _threshold_svd(self.X1, threshold, threshold_type)
# II: Compute operators: X2 = A X1 and Atilde = U*AU
Atilde = dag(U) @ self.X2 @ dag(Vt) @ np.diag(1 / S)
self.A = self.X2 @ dag(Vt) @ np.diag(1 / S) @ dag(U)
# III. DMD Modes
# Atilde W = W Y (Eigendecomposition)
self.eigs, W = eig(Atilde)
# Two versions (eigenvectors of A)
# (i) DMD_exact = X2 V S^-1 W
# (ii) DMD_proj = U W
dmd_modes = kwargs.get('dmd_modes', 'exact')
if dmd_modes == 'exact':
self.modes = self.X2 @ dag(Vt) @ np.diag(1 / S) @ W
elif dmd_modes == 'projected':
self.modes = U @ W
else:
raise ValueError('In DMD initialization, unknown dmd_mode type.')
@classmethod
def from_full(cls, X, ts, **kwargs):
X1 = X[:, :-1]
X2 = X[:, 1:]
return cls(X2, X1, ts, **kwargs)
def time_spectrum(self, ts, system='discrete'):
"""Returns a continuous approximation to the time dynamics of A.
Note that A_dst = e^(A_cts dt). Suppose (operator, eigs) pairs are denoted (A_dst, Y) for the discrete case
and (A_cts, Omega) for the continuous case. The eigenvalue correspondence is e^log(Y)/dt = Omega.
Args:
ts (:obj:`ndarray` of float): Times.
system ({'continuous', 'discrete'}): default 'discrete'.
Returns:
:obj:`ndarray` of float: Evaluations of modes at ts.
"""
if np.isscalar(ts):
# Cast eigs to complex numbers for logarithm
if system == 'discrete':
omega = np.log(self.eigs + 0j) / self.dt
elif system == 'continuous':
omega = self.eigs + 0j
else:
raise ValueError('In time_spectrum, invalid system value.')
return np.exp(omega * (ts - self.t0))
else:
return np.array([self.time_spectrum(it, system=system) for it in ts]).T
def _predict(self, ts, x0, system):
left = self.modes
right = pinv(self.modes) @ x0
if np.isscalar(ts):
return left @ np.diag(self.time_spectrum(ts, system)) @ right
else:
return np.array([left @ np.diag(self.time_spectrum(it, system)) @ right for it in ts]).T
def predict_dst(self, ts=None, x0=None):
"""Predict the future state using continuous approximation to the discrete A.
Args:
ts (:obj:`ndarray` of float): Array of time-steps to predict. default self.orig_timesteps.
x0 (:obj:`ndarray` of float): The initial value. default self.x0.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
x0 = self.X1[:, 0] if x0 is None else x0
ts = self.orig_timesteps if ts is None else ts
return self._predict(ts, x0, 'discrete')
def predict_cts(self, ts=None, x0=None):
"""Predict the future state using the continuous operator A.
Args:
ts (:obj:`ndarray` of float): Array of time-steps to predict. default self.orig_timesteps.
x0 (:obj:`ndarray` of float): The initial value. default self.x0.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
x0 = self.X1[:, 0] if x0 is None else x0
ts = self.orig_timesteps if ts is None else ts
return self._predict(ts, x0, 'continuous')
class DMDc:
def __init__(self, X2, X1, U, ts, **kwargs):
""" X2 = A X1 + B U
Args:
X2 (:obj:`ndarray` of float): Left side data matrix with columns containing states at sequential times.
X1 (:obj:`ndarray` of float): Right side data matrix with columns containing states at sequential times.
U (:obj:`ndarray` of float): Control signal(s) with columns containing controls.
ts (:obj:`ndarray` of float): Time measurements
**kwargs: see Keyword arguments.
Keyword arguments:
threshold (real, int): Truncate the singular values associated with DMD modes. default None.
threshold_type (str): One of {'number', 'percent'}. default 'percent'.
Attributes:
X2 (:obj:`ndarray` of float): Left side data matrix
X1 (:obj:`ndarray` of float): Right side data matrix
U (:obj:`ndarray` of float): Control signal data matrix
t0 (float): Initial time.
dt (float): Step size.
orig_timesteps (:obj:`ndarray` of float): Original times matching X1.
A (:obj:`ndarray` of float): Learned drift operator.
Atilde (:obj:`ndarray` of float): Projected A.
B (:obj:`ndarray` of float): Learned control operator.
Btilde (:obj:`ndarray` of float): projected B.
eigs (list of float): Eigenvalues of Atilde.
modes (:obj:`ndarray` of float): DMD modes are eigenvectors of Atilde (shared by A).
"""
self.X1 = X1
self.X2 = X2
self.U = U if U.shape[1] == self.X1.shape[1] else U[:, :-1] # ONLY these 2 options
Omega = np.vstack([self.X1, self.U])
self.t0 = ts[0]
self.dt = ts[1] - ts[0]
self.orig_timesteps = ts if len(ts) == self.X1.shape[1] else ts[:-1]
# I. Compute SVDs
threshold = kwargs.get('threshold', None)
if threshold is None:
Ug, Sg, Vgt = svd(Omega, full_matrices=False)
U, S, Vt = svd(self.X2, full_matrices=False)
else:
# Allow for independent thresholding
t1, t2 = 2 * [threshold] if np.isscalar(threshold) else threshold
threshold_type = kwargs.get('threshold_type', 'percent')
Ug, Sg, Vgt = _threshold_svd(Omega, t1, threshold_type)
U, S, Vt = _threshold_svd(self.X2, t2, threshold_type)
# II. Compute operators
n, _ = self.X2.shape
left = self.X2 @ dag(Vgt) @ np.diag(1 / Sg)
self.A = left @ dag(Ug[:n, :])
self.B = left @ dag(Ug[n:, :])
# III. DMD modes
self.Atilde = dag(U) @ self.A @ U
self.Btilde = dag(U) @ self.B
self.eigs, W = eig(self.Atilde)
self.modes = self.A @ U @ W
@classmethod
def from_full(cls, X, U, ts, **kwargs):
X2 = X[:, 1:]
X1 = X[:, :-1]
return cls(X2, X1, U, ts, **kwargs)
def predict_dst(self, control=None, x0=None):
""" Predict the future state using discrete evolution.
Evolve the system from X0 as long as control is available, using
the discrete evolution X_2 = A X_1 + B u_1.
Default behavior (control=None) is to use the original control. (If the underlying A is desired,
format zeros_like u that runs for the desired time.)
Args:
control (:obj:`ndarray` of float): The control signal.
x0 (:obj:`ndarray` of float): The initial value.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
U = self.U if control is None else control
xt = self.X1[:, 0] if x0 is None else x0
res = [xt]
for ut in U[:, :-1].T:
xt_1 = self.A @ xt + self.B @ ut
xt = xt_1
res.append(xt_1)
return np.array(res).T
def predict_cts(self, control=None, x0=None, dt=None):
""" Predict the future state using continuous evolution.
Evolve the system from X0 as long as control is available, using
the continuous evolution while u is constant,
X_dot = A X + B u
x(t+dt) = e^{dt A}(x(t) + dt B u(t))
Default behavior (control=None) is to use the original control. (If the underlying A is desired,
format zeros_like u that runs for the desired time.) Be sure that dt matches the train dt if
using delay embeddings.
Args:
control (:obj:`ndarray` of float): The control signal.
A zero-order hold is assumed between time steps.
The dt must match the training data if time-delays are used.
x0 (:obj:`ndarray` of float): The initial value.
dt (float): The time-step between control inputs.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
U = self.U if control is None else control
dt = self.dt if dt is None else dt
xt = self.X1[:, 0] if x0 is None else x0
res = [xt]
for ut in U[:, :-1].T:
xt_1 = expm(dt * self.A) @ (xt + dt * self.B @ ut)
xt = xt_1
res.append(xt_1)
return np.array(res).T
def zero_control(self, n_steps=None):
n_steps = len(self.orig_timesteps) if n_steps is None else n_steps
return np.zeros([self.U.shape[0], n_steps])
class biDMD:
def __init__(self, X2, X1, U, ts, **kwargs):
"""X2 = A X1 + U B X1
Args:
X2 (:obj:`ndarray` of float): Left side data matrix with columns containing states at sequential times.
X1 (:obj:`ndarray` of float): Right side data matrix with columns containing states at sequential times.
U (:obj:`ndarray` of float): Control signal(s) with columns containing controls.
ts (:obj:`ndarray` of float): Time measurements
**kwargs: see Keyword arguments.
Keyword arguments:
shift (int): Number of time delays in order to match times in the nonlinear term. default 0.
threshold (real, int): Truncate the singular values associated with DMD modes. default None.
threshold_type (str): One of {'number', 'percent'}. default 'percent'.
Attributes:
X2 (:obj:`ndarray` of float): Left side data matrix
X1 (:obj:`ndarray` of float): Right side data matrix
U (:obj:`ndarray` of float): Control signal data matrix
Ups (:obj:`ndarray` of float): augmented state U*X1.
t0 (float): Initial time.
dt (float): Step size.
orig_timesteps (:obj:`ndarray` of float): Original times matching X1.
A (:obj:`ndarray` of float): Learned drift operator.
Atilde (:obj:`ndarray` of float): Projected A.
B (:obj:`ndarray` of float): Learned nonlinear control operator.
Btilde (:obj:`ndarray` of float): projected B.
eigs (list of float): Eigenvalues of Atilde.
modes (:obj:`ndarray` of float): DMD modes are eigenvectors of Atilde (shared by A).
"""
self.U = U
self.X1 = X1
self.X2 = X2
self.t0 = ts[0]
self.dt = ts[1] - ts[0]
self.orig_timesteps = ts if len(ts) == self.X1.shape[1] else ts[:-1]
# store useful dimension
n_time = len(self.orig_timesteps)
# Partially unwrap delay embedding to make sure the correct control signals
# are combined with the correct data times. The unwrapped (=>) operators:
# X1 => (delays+1) x (measured dimensions) x (measurement times)
# U => (delays+1) x (number of controls) x (measurement times)
# Ups => (delays+1) x (controls) x (measured dimensions) x (measurement times)
# => (delays+1 x controls x measured dimensions) x (measurement times)
# Re-flatten all but the time dimension of Ups to set the structure of the
# data matrix. This will set the strucutre of the B operator to match our
# time-delay function.
self.shift = kwargs.get('shift', 0)
self.Ups = np.einsum('sct, smt->scmt',
self.U.reshape(self.shift + 1, -1, n_time),
self.X1.reshape(self.shift + 1, -1, n_time)
).reshape(-1, n_time)
Omega = np.vstack([self.X1, self.Ups])
# I. Compute SVDs
threshold = kwargs.get('threshold', None)
if threshold is None:
Ug, Sg, Vgt = svd(Omega, full_matrices=False)
U, S, Vt = svd(self.X2, full_matrices=False)
else:
# Allow for independent thresholding
t1, t2 = 2 * [threshold] if np.isscalar(threshold) else threshold
threshold_type = kwargs.get('threshold_type', 'percent')
Ug, Sg, Vgt = _threshold_svd(Omega, t1, threshold_type)
U, S, Vt = _threshold_svd(self.X2, t2, threshold_type)
# II. Compute operators
n, _ = self.X2.shape
left = self.X2 @ dag(Vgt) @ np.diag(1 / Sg)
self.A = left @ dag(Ug[:n, :])
self.B = left @ dag(Ug[n:, :])
# III. DMD modes
self.Atilde = dag(U) @ self.A @ U
self.Btilde = dag(U) @ self.B
self.eigs, W = eig(self.Atilde)
self.modes = self.A @ U @ W
def predict_dst(self, control=None, x0=None):
""" Predict the future state using discrete evolution.
Evolve the system from X0 as long as control is available, using
the discrete evolution:
x_1 = A x_0 + B (u.x_0)
= [A B] [x_0, u.x_0]^T
Args:
control (:obj:`ndarray` of float): The control signal.
x0 (): The initial value.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
res = [xt]
for t in range(control.shape[1] - 1):
# Outer product then flatten to correctly combine the different
# times present due to time-delays. That is, make sure that
# u(t)'s multiply x(t)'s
# _ct => (time-delays + 1) x (number of controls)
# _xt => (time-delays + 1) x (measured dimensions)
# _ups_t => (time-delays + 1) x (controls) x (measurements)
# Flatten to get the desired vector.
_ct = control[:, t].reshape(self.shift + 1, -1)
_xt = xt.reshape(self.shift + 1, -1)
ups_t = np.einsum('sc,sm->scm', _ct, _xt).flatten()
xt_1 = self.A @ xt + self.B @ ups_t
xt = xt_1
res.append(xt_1)
return np.array(res).T
def predict_cts(self, control=None, x0=None, dt=None):
""" Predict the future state using continuous evolution.
Evolve the system from X0 as long as control is available, using
the continuous evolution while u is constant,
x_{t+1} = e^{A dt + u B dt } x_t
Args:
control (:obj:`ndarray` of float): The control signal.
A zero-order hold is assumed between time steps.
The dt must match the training data if time-delays are used.
x0 (:obj:`ndarray` of float): The initial value.
dt (float): The time-step between control inputs.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
dt = self.dt if dt is None else dt
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
# store useful dimensions
delay_dim = self.shift + 1
control_dim = self.U.shape[0] // delay_dim
measure_1_dim = self.X1.shape[0] // delay_dim
to_dim = self.X2.shape[0]
res = [xt]
for t in range(control.shape[1] - 1):
# Correctly combine u(t) and B(t)
# Initial:
# B <= (time-delays+1 x measurements_2) x (time-delays+1 x controls x measurements_1)
# Reshape:
# B => (time-delays+1 x measurements_2) x (time-delays+1) x (controls) x (measurements_1)
# _ct => (time-delays+1) x (controls)
# _uBt => (time-delays+1 x measurements_2) x (time-delays+1) x (measurements_1)
# => (time-delays+1 x measurements_2) x (time-delays+1 x measurements_1)
# Notice that _uBt is formed by a sum over all controls in order to act on the
# state xt which has dimensions of (delays x measurements_1).
_uBt = np.einsum('ascm,sc->asm',
self.B.reshape(to_dim, delay_dim, control_dim, measure_1_dim),
control[:, t].reshape(delay_dim, control_dim)
).reshape(to_dim, delay_dim * measure_1_dim)
xt_1 = expm((self.A + _uBt) * dt) @ xt
xt = xt_1
res.append(xt_1)
return np.array(res).T
def zero_control(self, n_steps=None):
n_steps = len(self.orig_timesteps) if n_steps is None else n_steps
return np.zeros([self.Ups.shape[0], n_steps])
class biDMDc:
def __init__(self, X2, X1, U, ts, **kwargs):
""" X2 = A X1 + U B X1 + D U
Args:
X2 (:obj:`ndarray` of float): Left side data matrix with columns containing states at sequential times.
X1 (:obj:`ndarray` of float): Right side data matrix with columns containing states at sequential times.
U (:obj:`ndarray` of float): Control signal(s) with columns containing controls.
ts (:obj:`ndarray` of float): Time measurements
**kwargs: see Keyword arguments.
Keyword arguments:
shift (int): Number of time delays in order to match times in the nonlinear term. default 0.
threshold (real, int): Truncate the singular values associated with DMD modes. default None.
threshold_type (str): One of {'number', 'percent'}. default 'percent'.
Attributes:
X2 (:obj:`ndarray` of float): Left side data matrix
X1 (:obj:`ndarray` of float): Right side data matrix
U (:obj:`ndarray` of float): Control signal data matrix
Ups (:obj:`ndarray` of float): augmented state U*X1.
t0 (float): Initial time.
dt (float): Step size.
orig_timesteps (:obj:`ndarray` of float): Original times matching X1.
A (:obj:`ndarray` of float): Learned drift operator.
Atilde (:obj:`ndarray` of float): Projected A.
B (:obj:`ndarray` of float): Learned nonlinear control operator.
Btilde (:obj:`ndarray` of float): projected B.
D (:obj:`ndarray` of float): Learned control operator.
eigs (list of float): Eigenvalues of Atilde.
modes (:obj:`ndarray` of float): DMD modes are eigenvectors of Atilde (shared by A).
"""
self.U = U
self.X1 = X1
self.X2 = X2
self.t0 = ts[0]
self.dt = ts[1] - ts[0]
self.orig_timesteps = ts if len(ts) == self.X1.shape[1] else ts[:-1]
# store useful dimension
n_time = len(self.orig_timesteps)
self.shift = kwargs.get('shift', 0)
delay_dim = self.shift + 1
# Partially unwrap delay embedding to make sure the correct control signals
# are combined with the correct data times. The unwrapped (=>) operators:
# X1 => (delays+1) x (measured dimensions) x (measurement times)
# U => (delays+1) x (number of controls) x (measurement times)
# Ups => (delays+1) x (controls) x (measured dimensions) x (measurement times)
# => (delays+1 x controls x measured dimensions) x (measurement times)
# Re-flatten all but the time dimension of Ups to set the structure of the
# data matrix. This will set the structure of the B operator to match our
# time-delay function.
self.Ups = np.einsum('sct, smt->scmt',
self.U.reshape(delay_dim, -1, n_time),
self.X1.reshape(delay_dim, -1, n_time)
).reshape(-1, n_time)
Omega = np.vstack([self.X1, self.Ups, self.U])
# I. Compute SVDs
threshold = kwargs.get('threshold', None)
if threshold is None:
Ug, Sg, Vgt = svd(Omega, full_matrices=False)
U, S, Vt = svd(self.X2, full_matrices=False)
else:
# Allow for independent thresholding
t1, t2 = 2 * [threshold] if np.isscalar(threshold) else threshold
threshold_type = kwargs.get('threshold_type', 'percent')
Ug, Sg, Vgt = _threshold_svd(Omega, t1, threshold_type)
U, S, Vt = _threshold_svd(self.X2, t2, threshold_type)
# II. Compute operators
c = self.U.shape[0] // delay_dim
n = self.X1.shape[0]
left = self.X2 @ dag(Vgt) @ np.diag(1 / Sg)
# Omega = X + uX + u => dim'ns: n + c*n + c
self.A = left @ dag(Ug[:n, :])
self.B = left @ dag(Ug[n:(c + 1) * n, :])
self.D = left @ dag(Ug[(c + 1) * n:, :])
# III. DMD modes
self.Atilde = dag(U) @ self.A @ U
self.Btilde = dag(U) @ self.B
self.Dtilde = dag(U) @ self.D
self.eigs, W = eig(self.Atilde)
self.modes = self.A @ U @ W
def predict_dst(self, control=None, x0=None):
""" Predict the future state using discrete evolution.
Evolve the system from X0 as long as control is available, using
the discrete evolution,
x_1 = A x_0 + B (u*x_0) + D u
= [A B D] [x_0, u*x_0, u ]^T
Args:
control (:obj:`ndarray` of float): The control signal.
x0 (): The initial value.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
res = [xt]
for t in range(control.shape[1] - 1):
# Outer product then flatten to correctly combine the different
# times present due to time-delays. That is, make sure that
# u(t)'s multiply x(t)'s
# _ct => (time-delays + 1) x (number of controls)
# _xt => (time-delays + 1) x (measured dimensions)
# _ups_t => (time-delays + 1) x (controls) x (measurements)
# Flatten to get the desired vector.
_ct = control[:, t].reshape(self.shift + 1, -1)
_xt = xt.reshape(self.shift + 1, -1)
ups_t = np.einsum('sc,sm->scm', _ct, _xt).flatten()
xt_1 = self.A @ xt + self.B @ ups_t + self.D @ control[:, t]
xt = xt_1
res.append(xt_1)
return np.array(res).T
def predict_cts(self, control=None, x0=None, dt=None):
""" Predict the future state using continuous evolution.
Evolve the system from X0 as long as control is available, using
the continuous evolution while u is constant,
x_{t+1} = e^{A dt + u B dt } (x_t + dt * D u_t}
Args:
control (:obj:`ndarray` of float): The control signal.
A zero-order hold is assumed between time steps.
The dt must match the training data if time-delays are used.
x0 (:obj:`ndarray` of float): The initial value.
dt (float): The time-step between control inputs.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
dt = self.dt if dt is None else dt
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
# store useful dimensions
delay_dim = self.shift + 1
control_dim = self.U.shape[0] // delay_dim
measure_1_dim = self.X1.shape[0] // delay_dim
to_dim = self.X2.shape[0]
res = [xt]
for t in range(control.shape[1] - 1):
# Correctly combine u(t) and B(t)
# Initial:
# B <= (time-delays+1 x measurements_2) x (time-delays+1 x controls x measurements_1)
# Reshape:
# B => (time-delays+1 x measurements_2) x (time-delays+1) x (controls) x (measurements_1)
# _ct => (time-delays+1) x (controls)
# _uBt => (time-delays+1 x measurements_2) x (time-delays+1) x (measurements_1)
# => (time-delays+1 x measurements_2) x (time-delays+1 x measurements_1)
# Notice that _uBt is formed by a sum over all controls in order to act on the
# state xt which has dimensions of (delays x measurements_1).
_uBt = np.einsum('ascm,sc->asm',
self.B.reshape(to_dim, delay_dim, control_dim, measure_1_dim),
control[:, t].reshape(delay_dim, control_dim)
).reshape(to_dim, delay_dim * measure_1_dim)
xt_1 = expm(dt * (self.A + _uBt)) @ (xt + dt * self.D @ control[:, t])
xt = xt_1
res.append(xt_1)
return np.array(res).T
def zero_control(self, n_steps=None):
n_steps = len(self.orig_timesteps) if n_steps is None else n_steps
return np.zeros([self.Ups.shape[0], n_steps])
| [
"numpy.isscalar",
"numpy.linalg.eig",
"numpy.linalg.pinv",
"numpy.log",
"numpy.diag",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"scipy.linalg.expm",
"numpy.einsum",
"numpy.vstack",
"numpy.linalg.svd"
] | [((2440, 2451), 'numpy.linalg.eig', 'eig', (['Atilde'], {}), '(Atilde)\n', (2443, 2451), False, 'from numpy.linalg import svd, pinv, eig\n'), ((3622, 3637), 'numpy.isscalar', 'np.isscalar', (['ts'], {}), '(ts)\n', (3633, 3637), True, 'import numpy as np\n'), ((4228, 4243), 'numpy.isscalar', 'np.isscalar', (['ts'], {}), '(ts)\n', (4239, 4243), True, 'import numpy as np\n'), ((7288, 7316), 'numpy.vstack', 'np.vstack', (['[self.X1, self.U]'], {}), '([self.X1, self.U])\n', (7297, 7316), True, 'import numpy as np\n'), ((8339, 8355), 'numpy.linalg.eig', 'eig', (['self.Atilde'], {}), '(self.Atilde)\n', (8342, 8355), False, 'from numpy.linalg import svd, pinv, eig\n'), ((10974, 11010), 'numpy.zeros', 'np.zeros', (['[self.U.shape[0], n_steps]'], {}), '([self.U.shape[0], n_steps])\n', (10982, 11010), True, 'import numpy as np\n'), ((14016, 14046), 'numpy.vstack', 'np.vstack', (['[self.X1, self.Ups]'], {}), '([self.X1, self.Ups])\n', (14025, 14046), True, 'import numpy as np\n'), ((14935, 14951), 'numpy.linalg.eig', 'eig', (['self.Atilde'], {}), '(self.Atilde)\n', (14938, 14951), False, 'from numpy.linalg import svd, pinv, eig\n'), ((18933, 18971), 'numpy.zeros', 'np.zeros', (['[self.Ups.shape[0], n_steps]'], {}), '([self.Ups.shape[0], n_steps])\n', (18941, 18971), True, 'import numpy as np\n'), ((22089, 22127), 'numpy.vstack', 'np.vstack', (['[self.X1, self.Ups, self.U]'], {}), '([self.X1, self.Ups, self.U])\n', (22098, 22127), True, 'import numpy as np\n'), ((23207, 23223), 'numpy.linalg.eig', 'eig', (['self.Atilde'], {}), '(self.Atilde)\n', (23210, 23223), False, 'from numpy.linalg import svd, pinv, eig\n'), ((27298, 27336), 'numpy.zeros', 'np.zeros', (['[self.Ups.shape[0], n_steps]'], {}), '([self.Ups.shape[0], n_steps])\n', (27306, 27336), True, 'import numpy as np\n'), ((1964, 1997), 'numpy.linalg.svd', 'svd', (['self.X1'], {'full_matrices': '(False)'}), '(self.X1, full_matrices=False)\n', (1967, 1997), False, 'from numpy.linalg import svd, pinv, eig\n'), ((2263, 2277), 'numpy.diag', 'np.diag', (['(1 / S)'], {}), '(1 / S)\n', (2270, 2277), True, 'import numpy as np\n'), ((3983, 4013), 'numpy.exp', 'np.exp', (['(omega * (ts - self.t0))'], {}), '(omega * (ts - self.t0))\n', (3989, 4013), True, 'import numpy as np\n'), ((4195, 4211), 'numpy.linalg.pinv', 'pinv', (['self.modes'], {}), '(self.modes)\n', (4199, 4211), False, 'from numpy.linalg import svd, pinv, eig\n'), ((7584, 7615), 'numpy.linalg.svd', 'svd', (['Omega'], {'full_matrices': '(False)'}), '(Omega, full_matrices=False)\n', (7587, 7615), False, 'from numpy.linalg import svd, pinv, eig\n'), ((7639, 7672), 'numpy.linalg.svd', 'svd', (['self.X2'], {'full_matrices': '(False)'}), '(self.X2, full_matrices=False)\n', (7642, 7672), False, 'from numpy.linalg import svd, pinv, eig\n'), ((8116, 8131), 'numpy.diag', 'np.diag', (['(1 / Sg)'], {}), '(1 / Sg)\n', (8123, 8131), True, 'import numpy as np\n'), ((9461, 9474), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (9469, 9474), True, 'import numpy as np\n'), ((10825, 10838), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (10833, 10838), True, 'import numpy as np\n'), ((14180, 14211), 'numpy.linalg.svd', 'svd', (['Omega'], {'full_matrices': '(False)'}), '(Omega, full_matrices=False)\n', (14183, 14211), False, 'from numpy.linalg import svd, pinv, eig\n'), ((14235, 14268), 'numpy.linalg.svd', 'svd', (['self.X2'], {'full_matrices': '(False)'}), '(self.X2, full_matrices=False)\n', (14238, 14268), False, 'from numpy.linalg import svd, pinv, eig\n'), ((14712, 14727), 'numpy.diag', 'np.diag', (['(1 / Sg)'], {}), '(1 / Sg)\n', (14719, 14727), True, 'import numpy as np\n'), ((16439, 16452), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (16447, 16452), True, 'import numpy as np\n'), ((18784, 18797), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (18792, 18797), True, 'import numpy as np\n'), ((22261, 22292), 'numpy.linalg.svd', 'svd', (['Omega'], {'full_matrices': '(False)'}), '(Omega, full_matrices=False)\n', (22264, 22292), False, 'from numpy.linalg import svd, pinv, eig\n'), ((22316, 22349), 'numpy.linalg.svd', 'svd', (['self.X2'], {'full_matrices': '(False)'}), '(self.X2, full_matrices=False)\n', (22319, 22349), False, 'from numpy.linalg import svd, pinv, eig\n'), ((22834, 22849), 'numpy.diag', 'np.diag', (['(1 / Sg)'], {}), '(1 / Sg)\n', (22841, 22849), True, 'import numpy as np\n'), ((24756, 24769), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (24764, 24769), True, 'import numpy as np\n'), ((27149, 27162), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (27157, 27162), True, 'import numpy as np\n'), ((2315, 2329), 'numpy.diag', 'np.diag', (['(1 / S)'], {}), '(1 / S)\n', (2322, 2329), True, 'import numpy as np\n'), ((7776, 7798), 'numpy.isscalar', 'np.isscalar', (['threshold'], {}), '(threshold)\n', (7787, 7798), True, 'import numpy as np\n'), ((10715, 10732), 'scipy.linalg.expm', 'expm', (['(dt * self.A)'], {}), '(dt * self.A)\n', (10719, 10732), False, 'from scipy.linalg import expm\n'), ((14372, 14394), 'numpy.isscalar', 'np.isscalar', (['threshold'], {}), '(threshold)\n', (14383, 14394), True, 'import numpy as np\n'), ((18686, 18712), 'scipy.linalg.expm', 'expm', (['((self.A + _uBt) * dt)'], {}), '((self.A + _uBt) * dt)\n', (18690, 18712), False, 'from scipy.linalg import expm\n'), ((22453, 22475), 'numpy.isscalar', 'np.isscalar', (['threshold'], {}), '(threshold)\n', (22464, 22475), True, 'import numpy as np\n'), ((27019, 27045), 'scipy.linalg.expm', 'expm', (['(dt * (self.A + _uBt))'], {}), '(dt * (self.A + _uBt))\n', (27023, 27045), False, 'from scipy.linalg import expm\n'), ((2708, 2722), 'numpy.diag', 'np.diag', (['(1 / S)'], {}), '(1 / S)\n', (2715, 2722), True, 'import numpy as np\n'), ((3757, 3781), 'numpy.log', 'np.log', (['(self.eigs + 0.0j)'], {}), '(self.eigs + 0.0j)\n', (3763, 3781), True, 'import numpy as np\n'), ((16280, 16313), 'numpy.einsum', 'np.einsum', (['"""sc,sm->scm"""', '_ct', '_xt'], {}), "('sc,sm->scm', _ct, _xt)\n", (16289, 16313), True, 'import numpy as np\n'), ((24572, 24605), 'numpy.einsum', 'np.einsum', (['"""sc,sm->scm"""', '_ct', '_xt'], {}), "('sc,sm->scm', _ct, _xt)\n", (24581, 24605), True, 'import numpy as np\n')] |
import zlib
from rdkit import Chem
from rdkit.Chem.PropertyMol import PropertyMol
import torch
import numpy as np
from modules.mol import conformation_generation, get_mol_coordinate, get_mol_type_one_hot
from modules.surface import MoleculeAtomsToPointNormal
def generate_and_encode(smi):
"""
Generate the conformation of a specific smiles.
:param smi:
:return:
"""
mol, idxs = conformation_generation(smi, RmsThresh=1)
mol_blocks_list = []
for idx in idxs:
mol_clone = PropertyMol(mol)
conformer = mol.GetConformer(idx)
mol_clone.AddConformer(conformer)
mol_clone.SetProp('_Name', smi)
mol_blocks_list.append(Chem.MolToMolBlock(mol_clone, ))
s = '\n'.join(mol_blocks_list)
s = s.encode()
zlib_s = zlib.compress(s)
del mol
del mol_blocks_list
return zlib_s
def decode(block):
"""
:param block:
:return:
"""
string = zlib.decompress(block)
string = string.decode()
mols = []
for string_i in string.split('END\n\n'):
mols.append(Chem.MolFromMolBlock(string_i + 'END\n\n'))
return mols
def to_point_cloud(mol, B=500, theta_distance=1.0, r=2.05, smoothness=0.1, variance=0.2, ite=100):
"""
:param mol: rdkit.mol. The mol object to process the point cloud.
:param B: int, the number of the sampling points.
:param theta_distance: float, the variance distance (A) of the normal sampling of the neighborhood points.
:param r: float, the radius of the level set surface.
:param smoothness: float, the smooth constant for SDF calculation.
:param variance: float,
:param ite: int, The number of the iterations.
:return:
"""
conformer = mol.GetConformer()
atoms = get_mol_coordinate(conformer)
atomtype = get_mol_type_one_hot(mol)
atomtype = torch.from_numpy(np.array(atomtype)).cuda()
atoms = torch.from_numpy(atoms).cuda()
point_processer = MoleculeAtomsToPointNormal(atoms=atoms, atomtype=atomtype, B=B, r=r,
smoothness=smoothness, variance=variance,
theta_distance=theta_distance)
atoms, z = point_processer.sampling()
z = point_processer.descend(atoms, z, ite=ite)
z = point_processer.cleaning(atoms, z)
z = point_processer.sub_sampling(z)
return z.detach()
| [
"modules.mol.conformation_generation",
"rdkit.Chem.MolFromMolBlock",
"rdkit.Chem.MolToMolBlock",
"modules.mol.get_mol_coordinate",
"torch.from_numpy",
"zlib.compress",
"numpy.array",
"rdkit.Chem.PropertyMol.PropertyMol",
"modules.mol.get_mol_type_one_hot",
"modules.surface.MoleculeAtomsToPointNorm... | [((406, 447), 'modules.mol.conformation_generation', 'conformation_generation', (['smi'], {'RmsThresh': '(1)'}), '(smi, RmsThresh=1)\n', (429, 447), False, 'from modules.mol import conformation_generation, get_mol_coordinate, get_mol_type_one_hot\n'), ((786, 802), 'zlib.compress', 'zlib.compress', (['s'], {}), '(s)\n', (799, 802), False, 'import zlib\n'), ((939, 961), 'zlib.decompress', 'zlib.decompress', (['block'], {}), '(block)\n', (954, 961), False, 'import zlib\n'), ((1752, 1781), 'modules.mol.get_mol_coordinate', 'get_mol_coordinate', (['conformer'], {}), '(conformer)\n', (1770, 1781), False, 'from modules.mol import conformation_generation, get_mol_coordinate, get_mol_type_one_hot\n'), ((1797, 1822), 'modules.mol.get_mol_type_one_hot', 'get_mol_type_one_hot', (['mol'], {}), '(mol)\n', (1817, 1822), False, 'from modules.mol import conformation_generation, get_mol_coordinate, get_mol_type_one_hot\n'), ((1947, 2092), 'modules.surface.MoleculeAtomsToPointNormal', 'MoleculeAtomsToPointNormal', ([], {'atoms': 'atoms', 'atomtype': 'atomtype', 'B': 'B', 'r': 'r', 'smoothness': 'smoothness', 'variance': 'variance', 'theta_distance': 'theta_distance'}), '(atoms=atoms, atomtype=atomtype, B=B, r=r,\n smoothness=smoothness, variance=variance, theta_distance=theta_distance)\n', (1973, 2092), False, 'from modules.surface import MoleculeAtomsToPointNormal\n'), ((514, 530), 'rdkit.Chem.PropertyMol.PropertyMol', 'PropertyMol', (['mol'], {}), '(mol)\n', (525, 530), False, 'from rdkit.Chem.PropertyMol import PropertyMol\n'), ((686, 715), 'rdkit.Chem.MolToMolBlock', 'Chem.MolToMolBlock', (['mol_clone'], {}), '(mol_clone)\n', (704, 715), False, 'from rdkit import Chem\n'), ((1070, 1112), 'rdkit.Chem.MolFromMolBlock', 'Chem.MolFromMolBlock', (["(string_i + 'END\\n\\n')"], {}), "(string_i + 'END\\n\\n')\n", (1090, 1112), False, 'from rdkit import Chem\n'), ((1894, 1917), 'torch.from_numpy', 'torch.from_numpy', (['atoms'], {}), '(atoms)\n', (1910, 1917), False, 'import torch\n'), ((1855, 1873), 'numpy.array', 'np.array', (['atomtype'], {}), '(atomtype)\n', (1863, 1873), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 30 20:41:13 2020
@author: rodri
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt # gerar os gráficos
from sklearn.linear_model import LinearRegression # modelo de regressão linear
from yellowbrick.regressor import ResidualsPlot
base = pd.read_csv('cars.csv') # puxar base csv e transformar em list
base = base.drop(['Unnamed: 0'], axis = 1 ) # excluir coluna 1 de dados
X = base.iloc[:,1].values # criar variável de 'distância' index = 1 - values coloca para valor
X= X.reshape(-1,1) # gerar matriz
Y = base.iloc[:,0].values # Gerar Y como variável de 'speed'. O VALUE transforma para número
correlação = np.corrcoef(X,Y)
modelo = LinearRegression() # modelo ???
modelo.fit(X,Y)
modelo.intercept_
modelo.coef_
plt.scatter(X,Y) # pontos dos registros
plt.plot(X, modelo.predict(X),color='red') # algoritimo de ac
#distancia 22 pés ,qual VELOCIDADE ?
modelo.intercept_ + modelo.coef_*22
modelo.predict([[22]])
modelo._residues
visualizador = ResidualsPlot(modelo)
visualizador.fit(X,Y)
visualizador.poof() | [
"pandas.read_csv",
"numpy.corrcoef",
"yellowbrick.regressor.ResidualsPlot",
"matplotlib.pyplot.scatter",
"sklearn.linear_model.LinearRegression"
] | [((313, 336), 'pandas.read_csv', 'pd.read_csv', (['"""cars.csv"""'], {}), "('cars.csv')\n", (324, 336), True, 'import pandas as pd\n'), ((689, 706), 'numpy.corrcoef', 'np.corrcoef', (['X', 'Y'], {}), '(X, Y)\n', (700, 706), True, 'import numpy as np\n'), ((714, 732), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (730, 732), False, 'from sklearn.linear_model import LinearRegression\n'), ((795, 812), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'Y'], {}), '(X, Y)\n', (806, 812), True, 'import matplotlib.pyplot as plt\n'), ((1032, 1053), 'yellowbrick.regressor.ResidualsPlot', 'ResidualsPlot', (['modelo'], {}), '(modelo)\n', (1045, 1053), False, 'from yellowbrick.regressor import ResidualsPlot\n')] |
import numpy
def insw_vector(parameter):
switch = 0.0
if parameter > 0.0:
switch = 1.0
return switch
def eTq_vector(temp, basetemp, q10):
eTq = numpy.exp(numpy.log(q10)*(temp-basetemp)/basetemp)
return eTq
def get_concentration_ratio(numerator, denominator, p_small):
concentration_ratio = 0.0
if numerator > 0:
concentration_ratio = numerator/(denominator + p_small)
return concentration_ratio | [
"numpy.log"
] | [((182, 196), 'numpy.log', 'numpy.log', (['q10'], {}), '(q10)\n', (191, 196), False, 'import numpy\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.